diff --git "a/699.jsonl" "b/699.jsonl" new file mode 100644--- /dev/null +++ "b/699.jsonl" @@ -0,0 +1,775 @@ +{"seq_id":"257278980","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 25 10:28:48 2017\ncreating and controlling a fleet of water heaters\n@author: Chuck Booten (NREL), Jeff Maguire (NREL)\n\"\"\"\n\n# code needed for GLOBAL fleet_interface\nimport sys\nfrom os.path import dirname, abspath, join\n\nsys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))\n# sys.path.insert(0,'C:\\\\battery_interface_jmaguire\\\\src\\\\fleets\\\\water_heater_fleet')\n################################################################\nfrom configparser import ConfigParser\n# import datetime\n# from datetime import timedelta\n\nfrom datetime import datetime, timedelta\n\nfrom fleet_interface import FleetInterface\nfrom fleet_response import FleetResponse\n# from fleets.water_heater_fleet.load_config import LoadConfig\nfrom fleets.water_heater_fleet.load_config import LoadConfig\nfrom frequency_droop import FrequencyDroop\nfrom fleets.water_heater_fleet.wh import WaterHeater\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport time\nimport csv\nimport os\n\n\nclass WaterHeaterFleet(FleetInterface): # FleetInterface\n \"\"\"\n This class implements FleetInterface so that it can communicate with a fleet\n \"\"\"\n\n def __init__(self, GridInfo, ts, s_step): # add , sim_step later\n \"\"\"\n for battery def __init__(self, GridInfo,**kwargs):\n old: (self, Steps = 100, Timestep = 10, P_request = 0, Q_request = 0, forecast = 0, StartHr = 40)\n ts: Timestamp in simulation loop: datetime\n sim_step: Simulation time step: timedelta object\n \"\"\"\n # Location of working path\n self.base_path = dirname(abspath(__file__))\n\n # Read config file\n config = ConfigParser()\n config.read(join(self.base_path, 'config.ini'))\n\n # Load config file and store data in a dataframe\n LC = LoadConfig(config)\n self.df_WHModels = LC.get_config_models()\n\n # Run baseline power to store baseline power and SOC if parameters\n # of the fleet are changed. ONLY ASSIGN TRUE IF YOU CHANGE THE\n # PARAMETERS OF THE FLEET AS IT WILL DRAMATICALLY INCREASE THE CPU TIME\n self.run_baseline = LC.get_run_baseline()\n # Establish the properties of the grid on which the fleet is connected on\n self.grid = GridInfo\n # Get cur directory\n self.base_path = dirname(abspath(__file__))\n\n # Input data for water heaters (probably should be moved to config.ini)\n self.numWH = 500 # number of water heaters to be simulated to represent the entire fleet\n # addshedTimestep NOTE, MUST BE A DIVISOR OF 60. Acceptable numbers are: 0.1, 0.2, 0.5, 1,2,3,4,5,6,10,12,15,20,30, 60, etc.\n # self.MaxNumAnnualConditions = 20 #max # of annual conditions to calculate, if more WHs than this just reuse some of the conditions and water draw profiles\n\n \" TODO: update this sim_step in main interface \"\n\n self.sim_step = s_step.total_seconds() # 1 * 10 # in seconds\n # self.sim_step = 10 #1 * 10 # in seconds\n\n # #%% Frequency-watt parameters\n # FrequencyWatt='Frequency Watt'\n # self.db_UF=float(LC.get(FW,'db_UF'))\n # self.db_OF=float(LC.get(FW,'db_OF'))\n # self.k_UF=float(self.config.get(FrequencyWatt,'k_UF'))\n # self.k_OF=float(self.config.get(FrequencyWatt,'k_OF'))\n\n # Location for frequency response (doesn't necessarily correspond with WH location)\n # TODO: sync this location with the climate zone\n self.location = np.random.randint(0, 1, self.numWH)\n\n # How to calculate effective fleet rating: this is going to be poorly\n # met because it does not consider random availability of the fleet.\n # However this seems to be the best approximation\n self.fleet_rating = (self.numWH * 4.5) # unit power is 4.5 kw\n\n # Weight used to scale the service request\n self.service_weight = LC.get_service_weight()\n\n # Fleet configuration variables\n self.is_P_priority = LC.get_fleet_config()[0]\n self.is_autonomous = LC.get_fleet_config()[1]\n\n # Autonomous operation\n fw_21 = LC.get_FW()\n self.FW21_Enabled = fw_21[0]\n if self.FW21_Enabled == True:\n # Single-sided deadband value for low-frequency, in Hz\n self.db_UF = fw_21[1]\n # Single-sided deadband value for high-frequency, in Hz\n self.db_OF = fw_21[2]\n # Per-unit frequency change corresponding to 1 per-unit power output change (frequency droop), dimensionless\n self.k_UF = fw_21[3]\n # Per-unit frequency change corresponding to 1 per-unit power output change (frequency droop), dimensionless\n self.k_OF = fw_21[4]\n # Available active power, in p.u. of the DER rating\n self.P_avl = fw_21[5]\n # Minimum active power output due to DER prime mover constraints, in p.u. of the DER rating\n self.P_min = fw_21[6]\n self.P_pre = fw_21[7]\n\n # Randomization of discrete devices: deadbands must be randomize to provide a continuous response\n self.db_UF_subfleet = np.random.uniform(low=self.db_UF[0], high=self.db_UF[1], size=(self.numWH,))\n self.db_OF_subfleet = np.random.uniform(low=self.db_OF[0], high=self.db_OF[1], size=(self.numWH,))\n\n # Impact metrics of the fleet\n metrics = LC.get_impact_metrics_params()\n\n # Aveage tank baseline\n self.ave_Tinb = metrics[0]\n # Aveage tank temperature under grid service\n self.ave_Tin = metrics[1]\n # Cylces in baseline\n self.cycle_basee = metrics[2]\n # Cylces in grid operation\n self.cycle_grid = metrics[3]\n # State of Charge of the battery equivalent model under baseline\n self.SOCb_metric = metrics[4]\n # State of Charge of the battery equivalent model\n self.SOC_metric = metrics[5]\n # Unmet hours of the fleet\n self.unmet_hours = metrics[6]\n\n # P_togrid/P_baseline\n self.ratio_P_togrid_P_base = 1.\n # Energy impacts of providing the grid service\n self.energy_impacts = 0.\n\n ########### initial tank temperatures and setpoint temperature\n # for capacity, type, location and max. number of service calls need to specify discrete values and randomly sample to get a desired distribution\n self.MaxNumAnnualConditions = 20\n self.CapacityMasterList = [50, 50, 50, 50, 50, 50, 50, 50, 40, 40, 80] # 70% 50 gal, 20% 40 gal, 10% 80 gal\n self.TypeMasterList = ['ER', 'ER', 'ER', 'ER', 'ER', 'ER', 'ER', 'ER', 'ER',\n 'HP'] # elec_resis 90% and HPWH 10%\n self.LocationMasterList = ['living', 'living', 'living', 'living',\n 'unfinished basement'] # 80% living, 20% unfinished basement for now\n self.MaxServiceCallMasterList = [100, 80, 80, 200, 150, 110, 50, 75,\n 100] # this is the max number of annual service calls for load add/shed.\n self.TtankInitialMean = 123 # deg F\n self.TtankInitialStddev = 9.7 # deg F\n self.TsetInitialMean = 123 # deg F\n self.TsetInitialStddev = 9.7 # deg F\n self.minSOC = 0.2 # minimum SoC for aggregator to call for shed service\n self.maxSOC = 0.8 # minimum SoC for aggregator to call for add service\n self.minCapacityAdd = 350 # W-hr, minimum add capacity to be eligible for add service\n self.minCapacityShed = 150 # W-hr, minimum shed capacity to be eligible for shed service\n\n ########################################################################\n\n # Initialize timestamps and local times of the class for future calculations\n self.initial_ts = ts # time stamp to start the simulation\n self.ts = ts\n self.initial_time = self.get_time_of_the_day(ts)\n self.time = self.get_time_of_the_day(ts)\n self.dt = 60 # time step (in seconds)\n\n ##################################\n # Initializing lists to be saved to track indivisual water heater performance over each timestep\n\n # Randomly set up and mix the water heater fleet\n # Random seed for regenerating the same result\n self.seed = 1\n np.random.seed(self.seed)\n\n # generate distribution of initial water heater fleet states.\n # random initial temperatures (with a distribution based on field research by Lutz)\n\n self.TtankInitial = np.random.normal(self.TtankInitialMean, self.TtankInitialStddev, self.numWH)\n self.TsetInitial = np.random.normal(self.TsetInitialMean, self.TsetInitialStddev, self.numWH)\n for x in range(self.numWH):\n self.TsetInitial[x] = max(self.TsetInitial[x], 110)\n if self.TtankInitial[x] > self.TsetInitial[x]:\n self.TtankInitial[x] = self.TsetInitial[x]\n\n self.TtankInitial_b = self.TtankInitial\n\n Capacity = [np.random.choice(self.CapacityMasterList) for n in range(self.numWH)]\n # Capacity_fleet_ave = sum(Capacity)/self.numWH\n self.Type = [np.random.choice(self.TypeMasterList) for n in range(self.numWH)]\n Location = [np.random.choice(self.LocationMasterList) for n in range(self.numWH)]\n self.MaxServiceCalls = [np.random.choice(self.MaxServiceCallMasterList) for n in range(self.numWH)]\n\n climate_location = 'Denver' # only allowable climate for now since the pre-run water draw profile generator has only been run for this climate\n # 10 different profiles for each number of bedrooms, bedrooms can be 1-5, gives 50 different draw profiles, can shift profiles by 0-364 days,gives 365*50 = 18250 different water draw profiles for each climate\n self.Tamb = []\n self.RHamb = []\n self.Tmains = []\n self.hot_draw = []\n self.mixed_draw = []\n self.draw = []\n input_param = [0] * self.numWH\n\n for a in range(self.numWH):\n if a <= (\n self.MaxNumAnnualConditions - 1): # if self.numWH > MaxNumAnnualConditions just start reusing older conditions to save computational time\n numbeds = np.random.randint(1, 5)\n shift = np.random.randint(0, 364)\n unit = np.random.randint(0, 9)\n input_param[a] = [a, numbeds, shift, unit]\n (tamb, rhamb, tmains, hotdraw, mixeddraw) = get_annual_conditions(climate_location, Location[a], shift,\n numbeds, unit, self.dt, self.ts)\n\n self.Tamb.append(tamb) # have a Tamb for each step for each water heater being simulated\n self.RHamb.append(rhamb)\n self.Tmains.append(tmains)\n self.hot_draw.append(hotdraw)\n self.mixed_draw.append(mixeddraw)\n self.draw.append(\n hotdraw + 0.3 * mixeddraw) # 0.3 is so you don't need to know the exact hot/cold mixture for mixed draws, just assume 70% hot is needed for mixed\n\n\n else: # start re-using conditions\n self.Tamb.append(self.Tamb[a % self.MaxNumAnnualConditions][:])\n self.RHamb.append(self.RHamb[a % self.MaxNumAnnualConditions][:])\n self.Tmains.append(self.Tmains[a % self.MaxNumAnnualConditions][:])\n self.hot_draw.append(self.hot_draw[a % self.MaxNumAnnualConditions][:])\n self.mixed_draw.append(self.mixed_draw[a % self.MaxNumAnnualConditions][:])\n self.draw.append(self.hot_draw[a - self.MaxNumAnnualConditions][:] + 0.3 * self.mixed_draw[\n a - self.MaxNumAnnualConditions][\n :])\n\n # print('len Tamb',len(Tamb[0]), len(Tamb))\n # print('len hotdraw',len(hot_draw[0]), len(hot_draw))\n # print('Tamb',Tamb)\n\n draw_fleet = sum(\n self.draw) # this sums all rows, where each row is a WH, so gives the fleet sum of hot draw at each step\n self.draw_fleet_ave = draw_fleet / self.numWH # this averages all rows, where each row is a WH, so gives the fleet average of hot draw at each step\n self.element_on_last = [0 for x in range(self.numWH)]\n\n self.MaxServiceCalls = [np.random.choice(self.MaxServiceCallMasterList) for n in range(self.numWH)]\n self.AvailableCapacityAdd = [0 for x in range(self.numWH)]\n self.AvailableCapacityShed = [0 for x in range(self.numWH)]\n self.ServiceCallsAccepted = [0 for x in range(self.numWH)]\n self.ServiceProvided = [0 for x in range(self.numWH)]\n\n self.IsAvailableAdd = np.random.randint(2, size=self.numWH + 1)\n self.IsAvailableShed = np.random.randint(2, size=self.numWH + 1)\n\n self.elementOnB = np.random.randint(2, size=self.numWH)\n self.elementOn = np.random.randint(2, size=self.numWH)\n\n self.cycle_off_base = [0 for x in range(self.numWH)]\n self.cycle_on_base = [0 for x in range(self.numWH)]\n self.cycle_off_grid = [0 for x in range(self.numWH)]\n self.cycle_on_grid = [0 for x in range(self.numWH)]\n\n # MaxServiceAddedPerTimeStep = [0 for y in range(ServiceRequest.Steps)]\n # MaxServiceShedPerTimeStep = [0 for y in range(ServiceRequest.Steps)]\n\n self.TotalServiceCallsAcceptedPerWH = [0 for y in range(self.numWH)]\n\n self.SoCInit = [0.8 for y in range(self.numWH)]\n self.SOC = self.SoCInit\n self.SOCb = self.SoCInit\n\n self.AvailableCapacityAddInit = [0 for y in range(self.numWH)]\n self.AvailableCapacityShedInit = [0 for y in range(self.numWH)]\n\n self.IsAvailableAddInit = np.random.randint(2, size=self.numWH + 1)\n self.IsAvailableShedInit = np.random.randint(2, size=self.numWH + 1)\n\n self.step = 0\n\n ##############################################################################################################\n\n # Initializing the WH models\n\n self.whs = [WaterHeater(self.Tamb[0], self.RHamb[0], self.Tmains[0], 0, 0, Capacity[number], self.Type[number],\n Location[number], 0, self.MaxServiceCalls[number]) for number in range(self.numWH)]\n\n def get_time_of_the_day(self, ts):\n \"\"\" Method to calculate the time of the day in seconds for the simulation of the fleets \"\"\"\n h, m, s = ts.hour, ts.minute, ts.second\n # Convert the hours, minutes, and seconds to seconds: referenced to 0 AM\n t = int(h) * 3600 + int(m) * 60 + int(s)\n if t >= 0:\n return t\n else:\n return t + 24 * 3600\n\n def process_request(self, fleet_request):\n \"\"\"\n This function takes the fleet request and repackages it for the integral run function\n :param fleet_request: an instance of FleetRequest\n :return fleet_response: an instance of resp\n ## Follow the example of EV\n \"\"\"\n ts = fleet_request.ts_req # starting time\n self.dt = int(\n fleet_request.sim_step.total_seconds()) # fleet_request.sim_step # Sim_step is how long a simulation time step\n # dt in timedelta format\n p_req = fleet_request.P_req\n q_req = fleet_request.Q_req\n\n # call run function with proper inputs\n resp = self.run(p_req, q_req, self.SOC, self.time, self.dt, ts)\n\n return resp\n\n # Example code for Frequency Watt Function\n\n def frequency_watt(self, p_req=0, p_prev=0, ts=datetime.utcnow(), location=0, db_UF=0.05, db_OF=0.05): # datetime.\n \"\"\"\n This function takes the requested power, date, time, and location\n and modifys the requested power according to the configured FW21\n :param p_req: real power requested, ts:datetime opject,\n location: numerical designation for the location of the BESS\n :return p_mod: modifyed real power based on FW21 function\n \"\"\"\n f = self.grid.get_frequency(ts, location)\n\n if (f < 60 - db_UF).any():\n p_mod = 0\n elif (f > 60 + db_OF).any():\n p_mod = p_req\n else:\n p_mod = p_prev\n\n return p_mod\n\n def update_soc_due_to_frequency_droop(self, initSOC, p_fleet, dt):\n \"\"\"\n This method returns the modified state of charge of each subfleet\n due to frequency droop in the grid\n \"\"\"\n charge_rate = p_fleet / (self.numWH * 4.5) * 0.1 # Heuristic soc change rate\n SOC_update = initSOC + [charge_rate]\n\n if max(SOC_update) > 1:\n p_fleet = 0\n SOC_update = initSOC\n\n return p_fleet, SOC_update\n\n # Example code for VVO\n '''\n Electric resistance water heaters consume no reactive power\n TODO: Update when HPWHs are added\n '''\n\n def run(self, P_req, Q_req, initSOC, t, dt, ts):\n # ExecuteFleet(self, Steps, Timestep, P_request, Q_request, forecast):\n # run(self, P_req=[0], Q_req=[0], ts=datetime.utcnow(), del_t=timedelta(hours=1)):\n\n # Give the code the capability to respond to None requests\n\n if P_req == None:\n P_req = 0\n if Q_req == None:\n Q_req = 0\n\n P_togrid = 0\n P_service = 0\n P_base = 0\n P_service_max = 0\n\n self.P_request_perWH = P_req / self.numWH # this is only for the first step\n\n number = 0 # index for running through all the water heaters\n\n NumDevicesToCall = 0\n P_service_max0 = 0\n\n # decision making about which water heater to call on for service, check if available at last step, if so then\n # check for SoC > self.minSOC and Soc < self.maxSOC\n\n for n in range(self.numWH):\n if self.P_request_perWH < 0 and self.IsAvailableAdd[n] > 0 and self.SOC[n] < self.maxSOC:\n NumDevicesToCall += 1\n elif self.P_request_perWH > 0 and self.IsAvailableShed[n] > 0 and self.SOC[n] > self.minSOC:\n NumDevicesToCall += 1\n\n if P_req != None:\n self.P_request_perWH = P_req / max(NumDevicesToCall,\n 1) # divide the fleet request by the number of devices that can be called upon\n\n # create a .csv outputfile with each water heater's metrics\n # outputfilename = join(self.base_path,\"WH_fleet_outputs.csv\")\n # self.outputfile = open(outputfilename,\"w\")\n # self.outputfile.write(\"Timestep,\")\n\n # create a .csv outputfile with P_service, P_togrid, and P_base\n # outputfilename = join(self.base_path,\"WH_fleet_outputs.csv\")\n\n #################################\n for wh in self.whs: # loop through all water heaters\n\n if P_req == None:\n response = wh.execute(self.TtankInitial[number], self.TtankInitial_b[number], self.TsetInitial[number],\n self.Tamb[number][0], self.RHamb[number][0], self.Tmains[number][0],\n self.draw[number][0], 0, self.Type, self.dt, self.draw_fleet_ave[0],\n self.element_on_last)\n P_service = 0\n if P_req < 0 and self.IsAvailableAdd[number] > 0:\n response = wh.execute(self.TtankInitial[number], self.TtankInitial_b[number], self.TsetInitial[number],\n self.Tamb[number][0], self.RHamb[number][0], self.Tmains[number][0],\n self.draw[number][0], P_req, self.Type, self.dt, self.draw_fleet_ave[0],\n self.element_on_last)\n P_req = P_req - response.Eservice\n P_service += response.Eservice\n elif P_req > 0 and self.IsAvailableShed[number] > 0:\n response = wh.execute(self.TtankInitial[number], self.TtankInitial_b[number], self.TsetInitial[number],\n self.Tamb[number][0], self.RHamb[number][0], self.Tmains[number][0],\n self.draw[number][0], P_req, self.Type, self.dt, self.draw_fleet_ave[0],\n self.element_on_last)\n P_req = P_req + response.Eservice\n P_service -= response.Eservice\n #print(\"P_req = {}, P_service = {}, Eservice = {}\".format(P_req,P_service,response.Eservice))\n else:\n response = wh.execute(self.TtankInitial[number], self.TtankInitial_b[number], self.TsetInitial[number],\n self.Tamb[number][0], self.RHamb[number][0], self.Tmains[number][0],\n self.draw[number][0], 0, self.Type, self.dt, self.draw_fleet_ave[0],\n self.element_on_last)\n # print('P_req = {}'.format(P_req))\n # assign returned parameters to associated lists to be recorded\n self.element_on_last[number] = response.ElementOn\n self.TtankInitial[number] = response.Ttank\n self.TtankInitial_b[number] = response.Ttank_b\n\n self.SOC[number] = response.SOC\n self.SOCb[number] = response.SOC_b\n self.IsAvailableAdd[number] = response.IsAvailableAdd\n self.IsAvailableShed[number] = response.IsAvailableShed\n\n self.AvailableCapacityAdd[number] = response.AvailableCapacityAdd\n self.AvailableCapacityShed[number] = response.AvailableCapacityShed\n self.ServiceCallsAccepted[number] = response.ServiceCallsAccepted\n\n self.ServiceProvided[number] = response.Eservice\n\n '''\n P_togrid -= response.Eused\n P_base -= response.Pbase\n\n if P_req <0 or P_req > 0:\n P_response = (P_togrid - P_base)\n else:\n P_response = 0\n '''\n P_togrid -= response.Eused\n P_base -= response.Pbase\n \n \n\n # self.outputfile.write(str(response.Ttank) +\",\" + str(self.TsetInitial[number]) + \",\" + str(response.Eused) + \",\" + str(response.PusedMax) + \",\" + str(response.Eloss) + \",\" + str(response.ElementOn) + \",\" + str(response.Eservice) + \",\" + str(response.SOC) + \",\" + str(response.AvailableCapacityAdd) + \",\" + str(response.AvailableCapacityShed) + \",\" + str(response.ServiceCallsAccepted) + \",\" + str(response.IsAvailableAdd) + \",\" + str(response.IsAvailableShed) + \",\" + str(self.draw[number][0]) + \",\" + str(response.Edel) + \",\")\n\n # resp.sim_step = response.sim_step\n number += 1 # go to next device\n\n if P_req <= 0:\n P_service_max += response.AvailableCapacityShed # NOTE THIS ASSUMES THE MAX SERVICE IS LOAD SHED\n else:\n P_service_max0 += response.AvailableCapacityAdd\n P_service_max = -1.0 * P_service_max0\n\n # self.outputfile.write(\"\\n\")\n\n self.step += 1 # To advance the step by step in the disturbance file\n\n # Output Fleet Response\n\n resp = FleetResponse()\n\n resp.P_service = []\n resp.P_service_max = []\n resp.P_service_min = []\n resp.P_togrid = []\n resp.P_togrid_max = []\n resp.P_togrid_min = []\n resp.P_forecast = []\n resp.P_base = []\n resp.E = []\n resp.C = []\n resp.ts = ts\n resp.sim_step = dt\n\n # resp.P_dot_up = resp.P_togrid_max / ServiceRequest.Timestep.seconds\n\n resp.P_service_max = P_service_max\n resp.P_service = P_service\n resp.P_base = P_base\n resp.P_togrid = P_togrid\n # if P_service != 0:\n # print(\"resp.P_base = {}\".format(resp.P_base))\n # print(\"Pbase = {}\".format(resp.P_base))\n # print(\"Ptogrid = {}\".format(resp.P_togrid))\n\n # Available Energy stored at the end of the most recent timestep\n # resp.E += response.Estored\n resp.E = 0\n resp.C += response.SOC / (self.numWH)\n\n resp.Q_togrid = 'NA'\n resp.Q_service = 'NA'\n resp.Q_service_max = 'NA'\n resp.Q_service_min = 'NA'\n resp.Q_togrid_min = 'NA'\n resp.Q_togrid_max = 'NA'\n\n resp.Q_dot_up = 'NA'\n resp.Q_dot_down = 'NA'\n resp.P_dot_up = 0\n resp.P_dot_down = 0\n\n resp.Eff_charge = 1.0 # TODO: change this if we ever use a HPWH to use the HP efficiency\n resp.Eff_discharge = 1.0 # always equal to 1 for this device\n\n resp.P_dot = resp.P_togrid / dt\n resp.P_service_min = 0\n\n resp.dT_hold_limit = 'NA'\n resp.T_restore = 'NA'\n resp.Strike_price = 'NA'\n resp.SOC_cost = 'NA'\n\n # TotalServiceProvidedPerTimeStep[step] = -1.0*self.P_service # per time step for all hvacs -1.0*\n \"\"\"\n Modify the power and SOC of the different subfeets according \n to the frequency droop regulation according to IEEE standard\n \"\"\"\n\n if self.FW21_Enabled and self.is_autonomous:\n power_ac = resp.P_service_max\n p_prev = resp.P_togrid\n power_fleet = self.frequency_watt(power_ac, p_prev, self.ts, self.location, self.db_UF_subfleet,\n self.db_OF_subfleet)\n power_fleet, SOC_step = self.update_soc_due_to_frequency_droop(initSOC, power_fleet, dt)\n\n self.time = t + dt\n self.ts = self.ts + timedelta(seconds=dt)\n # Restart time if it surpasses 24 hours\n if self.time > 24 * 3600:\n self.time = self.time - 24 * 3600\n\n # Impact Metrics\n # Update the metrics\n '''\n Ttotal = 0\n SOCTotal = 0\n\n self.cycle_basee = np.sum(self.cycle_off_base)\n self.cycle_basee += np.sum(self.cycle_on_base)\n self.cycle_grid = np.sum(self.cycle_off_grid)\n self.cycle_grid += np.sum(self.cycle_on_grid)\n '''\n\n for number in range(self.numWH):\n if response.Ttank <= self.TsetInitial[number] - 10: # assume 10F deadband (consistent with wh.py)\n self.unmet_hours += 1 * self.sim_step / 3600.0\n\n if resp.P_base == 0 and resp.P_togrid == 0:\n self.ratio_P_togrid_P_base = 1.0\n elif resp.P_base == 0 and resp.P_togrid != 0:\n self.ratio_P_togrid_P_base = 'NA'\n else:\n self.ratio_P_togrid_P_base = resp.P_togrid / (resp.P_base)\n self.energy_impacts += abs(resp.P_service) * (self.sim_step / 3600)\n\n return resp\n\n #################################################\n\n def forecast(self, requests):\n \"\"\"\n This function repackages the list of fleet requests passed to it into the interal run function.\n In order for this to be a forecast, and therfore not change the state variables of the fleet, the\n fleets state variables are saved before calling the run function and then the states are restored\n to their initial values after the forecast simulation is complete.\n :param fleet_requests: list of fleet requests\n :return res: list of service responses\n \"\"\"\n responses = []\n for number in range(self.numWH):\n TtankInitial = self.TtankInitial[number]\n TsetInitial = self.TsetInitial[number]\n Tamb = self.Tamb[number][0]\n RHamb = self.RHamb[number][0]\n Tmains = self.Tmains[number][0]\n draw = self.draw[number][0]\n Type = self.Type\n draw_fleet_ave = self.draw_fleet_ave[0]\n SOC = self.SOC[number]\n\n # Iterate and process each request in fleet_requests\n for req in requests:\n ts = req.ts_req\n dt = int(req.sim_step.total_seconds())\n p_req = req.P_req\n q_req = req.Q_req\n res = self.run(p_req, q_req, self.SOC, self.time, dt, ts)\n\n return res\n\n # reset the model\n for number in range(self.numWH): # loop through all HVACs\n self.TtankInitial[number] = TtankInitial[number]\n self.TsetInitial[number] = TsetInitial[number]\n self.Tamb[number][0] = Tamb[number]\n self.RHamb[number][0] = RHamb[number]\n self.Tmains[number][0] = Tmains[number]\n self.draw[number][0] = draw[number]\n self.Type = Type[number]\n self.draw_fleet_ave[0] = draw_fleet_ave[number]\n self.SOC[number] = SOC[number]\n\n return responses\n\n def run_baseline_simulation(self):\n \"\"\"\n Method to run baseline simulation and store power level and SOC of\n the sub fleets.\n \"\"\"\n self.n_days_base = 1 # Only consider 1 day simulation, self.n_days_base\n sim_time = self.n_days_base * self.sim_step\n\n print(\"Running day-ahead baseline simulation ...\")\n print(\"Running baseline right away charging strategy ...\")\n baseline_soc, baseline_power, baseline_cycles, baseline_Ttank = self.run_baseline_right_away(self.n_days_base,\n sim_time)\n\n print(\"Exported baseline soc, Temperatures, power and HVAC cycles ...\")\n\n # Already saved inside the right away function\n # baseline_soc.to_csv(join(path, r'SOC_baseline.csv'), index = False)\n # baseline_power.to_csv(join(path, r'power_baseline.csv'), index = False)\n # baseline_Tin.to_csv(join(path, r'Tin_baseline.csv'), index = False)\n # baseline_Tin_max.to_csv(join(path, r'Tin_max_baseline.csv'), index = False)\n # baseline_Tin_min.to_csv(join(path, r'Tin_min_baseline.csv'), index = False)\n print(\"Exported\")\n\n def run_baseline_right_away(self, n_days_base, sim_time):\n # Run a baseline simulation with (P_request = None)\n\n # initialize df for baseline results\n baseline_power = np.zeros([sim_time, ])\n baseline_cycles = np.zeros([sim_time, ])\n baseline_Ttank = np.zeros([sim_time, ])\n baseline_soc = np.zeros([sim_time, ])\n\n # main simulation loop\n for i in range(sim_time):\n for j in self.numWH:\n response = wh.execute(self.TtankInitial[j], self.TtankInitial_b[j], self.TsetInitial[j],\n self.Tamb[j][0], self.RHamb[j][0], self.Tmains[j][0], self.draw[j][0], None,\n self.Type, self.dt, self.draw_fleet_ave[0], self.element_on_last)\n self.TtankInitial[j] = response.Ttank\n self.element_on_last[i, j] = response.ElementOn\n baseline_power.iloc[i, j] = response.Eused_ts\n baseline_cycles.iloc[i, j] = response.cycles_b\n baseline_Ttank.iloc[i, j] = response.Ttank_b\n baseline_soc.iloc[i, j] = response.SOC_b\n\n return baseline_soc, baseline_power, baseline_cycles, baseline_Ttank\n\n def output_impact_metrics(self):\n impact_metrics_DATA = [[\"Impact Metrics File\"],\n [ \"ave_Tin\", \"Cycle_base\", \"Cycle_service\", \"SOC_base\", \"SOC_service\",\n \"Unmet Hours\"]]\n\n impact_metrics_DATA.append(\n [str(self.ave_Tin), str(self.cycle_basee), str(self.cycle_grid), str(self.SOCb_metric),\n str(self.SOC_metric), str(self.unmet_hours)])\n impact_metrics_DATA.append([\"P_togrid/P_base ratio:\", self.ratio_P_togrid_P_base])\n impact_metrics_DATA.append([\"Energy Impacts (kWh):\", self.energy_impacts])\n\n with open('impact_metrics.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(impact_metrics_DATA)\n\n # pass\n\n def change_config(self, fleet_config):\n # \"\"\"\n # This function updates the fleet configuration settings programatically.\n # :param fleet_config: an instance of FleetConfig\n # \"\"\"\n\n # change config\n self.is_P_priority = fleet_config.is_P_priority\n self.is_autonomous = fleet_config.is_autonomous\n self.FW_Param = fleet_config.FW_Param # FW_Param=[db_UF,db_OF,k_UF,k_OF]\n self.fw_function.db_UF = self.FW_Param[0]\n self.fw_function.db_OF = self.FW_Param[1]\n self.fw_function.k_UF = self.FW_Param[2]\n self.fw_function.k_OF = self.FW_Param[3]\n self.autonomous_threshold = fleet_config.autonomous_threshold\n\n # self.Vset = fleet_config.v_thresholds\n\n def assigned_service_kW(self):\n \"\"\"\n This function allows weight to be passed to the service model.\n Scale the service to the size of the fleet\n \"\"\"\n return self.service_weight * self.fleet_rating\n\n def print_performance_info(self):\n \"\"\"\n This function is to dump the performance metrics either to screen or file or both\n :return:\n \"\"\"\n pass\n ###############################################################################\n\n\ndef get_annual_conditions(climate_location, installation_location, days_shift, n_br, unit, timestep_sec, start_time):\n # reads from 8760 (or 8760 * 60) input files for ambient air temp, RH, mains temp, and draw profile and loads data into arrays for future use\n timestep_min = timestep_sec / 60.\n # Decompose utc timestamp to get the starting hour\n startmonthindex = [[1, 0], [2, 31], [3, 59], [4, 90], [5, 120], [6, 151], [7, 181], [8, 212], [9, 243], [10, 273],\n [11, 304], [12, 334]]\n start_month = start_time.month\n start_day = start_time.day\n start_hour = start_time.hour\n for m in startmonthindex:\n if start_month == m[0]:\n start_day += m[1]\n break\n start_hr = (start_day - 1) * 24. + start_hour\n\n num_steps_per_hr = int(\n np.ceil((60. / float(timestep_min)))) # how many hourly steps do you need to take if timestep is in minutes\n num_steps = 31 * 24 * 60 # TODO: what if someone wants to simulate longer than a month, or the simulation wraps over the end of the year?\n num_hrs = int(np.ceil(float(num_steps) / float(num_steps_per_hr)))\n num_mins = int(np.ceil(float(num_steps) * float(timestep_min)))\n # print('num_mins',num_mins)\n steps_per_min = int(np.ceil(1. / float(timestep_min)))\n Tamb = []\n RHamb = []\n Tmains = []\n if climate_location != 'Denver':\n raise NameError(\n \"Error! Only allowing Denver as a run location for now. Eventually we'll allow different locations and load different files based on the location.\")\n if installation_location == 'living':\n amb_temp_column = 1\n amb_rh_column = 2\n elif installation_location == 'unfinished basement':\n amb_temp_column = 3\n amb_rh_column = 4\n elif installation_location == 'garage':\n amb_temp_column = 5\n amb_rh_column = 6\n elif installation_location == 'unifinished attic':\n amb_temp_column = 7\n amb_rh_column = 8\n else:\n raise NameError(\n \"Error! Only allowed installation locations are living, unfinished basement, garage, unfinished attic. Change the installation location to a valid location\")\n mains_temp_column = 9\n\n linenum = 0\n\n ambient_cond_file = open((os.path.join(os.path.dirname(__file__), 'data_files', 'denver_conditions.csv')),\n 'r') # hourly ambient air temperature and RH\n for line in ambient_cond_file:\n if linenum > start_hr and linenum <= (\n start_hr + num_hrs): # skip header all the way to the start hour but only go as many steps as are needed\n items = line.strip().split(',')\n for b in range(min(num_steps_per_hr, num_steps)): # repeat for however many steps there are in an hr\n Tamb.append([float(items[amb_temp_column])])\n RHamb.append([float(items[amb_rh_column])])\n Tmains.append([float(items[mains_temp_column])])\n b += 1\n linenum += 1\n ambient_cond_file.close()\n\n # Read in max and average values for the draw profiles\n linenum = 0\n n_beds = 0\n n_unit = 0\n\n # Total gal/day draw numbers based on BA HSP\n sh_hsp_tot = 14.0 + 4.67 * float(n_br)\n s_hsp_tot = 12.5 + 4.16 * float(n_br)\n cw_hsp_tot = 2.35 + 0.78 * float(n_br)\n dw_hsp_tot = 2.26 + 0.75 * float(n_br)\n b_hsp_tot = 3.50 + 1.17 * float(n_br)\n\n sh_max = np.zeros((5, 10))\n s_max = np.zeros((5, 10))\n b_max = np.zeros((5, 10))\n cw_max = np.zeros((5, 10))\n dw_max = np.zeros((5, 10))\n sh_sum = np.zeros((5, 10))\n s_sum = np.zeros((5, 10))\n b_sum = np.zeros((5, 10))\n cw_sum = np.zeros((5, 10))\n dw_sum = np.zeros((5, 10))\n\n sum_max_flows_file = open(\n (os.path.join(os.path.dirname(__file__), 'data_files', 'DrawProfiles', 'MinuteDrawProfilesMaxFlows.csv')),\n 'r') # sum and max flows for all units and # of bedrooms\n for line in sum_max_flows_file:\n if linenum > 0: # this linenum is in min, not hours\n items = line.strip().split(',')\n n_beds = int(items[0]) - 1\n n_unit = int(items[1]) - 1\n # column is unit number, row is # of bedrooms. Taken directly from BEopt\n sh_max[n_beds, n_unit] = float(items[2])\n s_max[n_beds, n_unit] = float(items[3])\n b_max[n_beds, n_unit] = float(items[4])\n cw_max[n_beds, n_unit] = float(items[5])\n dw_max[n_beds, n_unit] = float(items[6])\n sh_sum[n_beds, n_unit] = float(items[7])\n s_sum[n_beds, n_unit] = float(items[8])\n b_sum[n_beds, n_unit] = float(items[9])\n cw_sum[n_beds, n_unit] = float(items[10])\n dw_sum[n_beds, n_unit] = float(items[11])\n linenum += 1\n sum_max_flows_file.close()\n\n linenum = 0\n # Read in individual draw profiles\n # steps_per_year = int(np.ceil(60 * 24 * 365 / timestep_min))\n hot_draw = np.zeros((num_steps, 1)) # steps_per_year\n mixed_draw = np.zeros((num_steps, 1)) # steps_per_year\n # take into account days shifted\n draw_idx = 60 * 24 * days_shift\n if num_steps <= draw_idx: # if there aren't enough steps being simulated to account for the offset period then just ignore it\n offset = 0\n else:\n offset = draw_idx\n\n draw_profile_file = open((os.path.join(os.path.dirname(__file__), 'data_files', 'DrawProfiles',\n 'DHWDrawSchedule_{}bed_unit{}_1min_fraction.csv'.format(n_br, unit))),\n 'r') # minutely draw profile (shower, sink, CW, DW, bath)\n agghotflow = 0.0\n aggmixflow = 0.0\n nbr = n_br - 1 # go back to starting index at zero for python internal calcs\n lineidx = 0\n for line in draw_profile_file:\n if linenum > start_hr * 60 and linenum <= start_hr * 60 + num_mins: # this linenum is in min\n\n items = line.strip().split(',')\n hot_flow = 0.0\n mixed_flow = 0.0\n\n if items[0] != '':\n sh_draw = float(items[0]) * sh_max[nbr, unit] * (sh_hsp_tot / sh_sum[nbr, unit])\n mixed_flow += sh_draw\n if items[1] != '':\n s_draw = float(items[1]) * s_max[nbr, unit] * (s_hsp_tot / s_sum[nbr, unit])\n mixed_flow += s_draw\n if items[2] != '':\n cw_draw = float(items[2]) * cw_max[nbr, unit] * (cw_hsp_tot / cw_sum[nbr, unit])\n hot_flow += cw_draw\n if items[3] != '':\n dw_draw = float(items[3]) * dw_max[nbr, unit] * (dw_hsp_tot / dw_sum[nbr, unit])\n hot_flow += dw_draw\n if items[4] != '':\n b_draw = float(items[4]) * b_max[nbr, unit] * (b_hsp_tot / b_sum[nbr, unit])\n mixed_flow += b_draw\n agghotflow += hot_flow\n aggmixflow += mixed_flow\n\n # aggregate whenever the linenum is a multiple of timestep_min. Each increment in lineum represents one minute. Timestep_min is the number of minutes per timestep\n if timestep_min >= 1: # aggregate if timesteps are >= 1 minute\n if linenum % timestep_min == 0:\n hot_draw[lineidx] += agghotflow\n mixed_draw[lineidx] += aggmixflow\n agghotflow = 0\n aggmixflow = 0\n draw_idx += 1\n elif timestep_min < 1: # repeat the value if timesteps are < 1 minute\n # print('len draws',len(hot_draw))\n # if linenum == 1:\n # hot_draw[offset] = hot_flow #assume hot_draw = 0 up until draw_idx timestep\n # mixed_draw[offset] = mixed_flow #assume mixed_draw = 0 up until draw_idx timestep\n\n for c in range(min(steps_per_min, num_steps)): # repeat for however many steps there are in a minute\n # hot_draw = np.append(hot_draw,hot_flow)\n # mixed_draw = np.append(mixed_draw,mixed_flow)\n hot_draw[lineidx + c] = hot_flow # assume hot_draw = 0 up until draw_idx timestep\n mixed_draw[lineidx + c] = mixed_flow\n c += 1\n # print('len hot_draw', len(hot_draw))\n else:\n hot_draw[lineidx] = agghotflow\n mixed_draw[lineidx] = aggmixflow\n lineidx += 1\n linenum += 1\n\n # if draw_idx >= steps_per_year:\n # draw_idx = 0\n draw_profile_file.close()\n return Tamb, RHamb, Tmains, hot_draw, mixed_draw\n","sub_path":"src/fleets/water_heater_fleet/wh_fleet.py","file_name":"wh_fleet.py","file_ext":"py","file_size_in_byte":41876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"48918674","text":"\"\"\"Environment used in training the Djikstra approach.\nThis simple environment is very different from the Pomme environments.\n1. It's an NxN grid - a single agent starts at A and has to reach a target B.\n2. Both A and B are generated randomly. There should be an option to include\n M rigid walls as well (positioned randomly) into which the agent can't move.\n3. The \"expert\" will be Djikstra.\n4. The agent gets +1 for reaching the goal within max timesteps (dependent on\n N, the grid size) and -1 for not reaching the goal.\nObservations should be:\n1. The agent's position.\n2. The goal's position.\n3. The positions of the rigid walls.\nAs input, this can be three feature maps and num_stack = 1. The agent will not\nneed to have an LSTM.\nOutput should be a single discrete action representing [Up, Down, Left, Right].\n\"\"\"\nfrom collections import defaultdict\nimport json\nimport os\nimport queue\nimport random\nimport time\n\nfrom gym import spaces\nimport numpy as np\n\nfrom .. import constants\nfrom .. import utility\nfrom .v0 import Pomme as PommeV0\n\n\nclass Grid(PommeV0):\n\n def _set_action_space(self):\n self.action_space = spaces.Discrete(5)\n\n def _set_observation_space(self):\n # TODO: do we need to explicitly have the agent's posiiton?\n # what about the goal's position\n \"\"\"The Observation Space for the single agent.\n There are a total of board_size^2 + 2 observations:\n - all of the board (board_size^2)\n - agent's position (2)\n \"\"\"\n bss = self._board_size**2\n min_obs = [0] * bss + [0] * 2\n max_obs = [len(constants.GridItem)] * bss + [self._board_size] * 2\n self.observation_space = spaces.Box(\n np.array(min_obs), np.array(max_obs))\n\n def set_reward_shaping(self, step_loss=0.0, bomb_reward=None,\n item_reward=None, use_second_place=False):\n self._step_loss = step_loss\n\n def set_florensa_starts(self, starts):\n self._florensa_starts = starts\n\n def get_florensa_start(self):\n return self._florensa_start_id\n\n def make_board(self):\n self._board = utility.make_board_grid(\n size=self._board_size, num_rigid=self._num_rigid, min_length=10)\n\n def get_observations(self):\n self.observations = self.model.get_observations_grid(\n self._board, self._agents, self._max_steps,\n step_count=self._step_count)\n return self.observations\n\n def _get_rewards(self):\n \"\"\"\n The agent receives reward +1 for reaching the goal and\n penalty -0.1 for each step it takes in the environment.\n \"\"\"\n agent_pos = self.observations[0]['position']\n goal_pos = self.observations[0]['goal_position']\n rewards = self.model.get_rewards_grid(agent_pos, goal_pos)\n rewards = [r - self._step_loss for r in rewards]\n return rewards\n\n def _get_done(self):\n agent_pos = self.observations[0]['position']\n goal_pos = self.observations[0]['goal_position']\n return self.model.get_done_grid(agent_pos, goal_pos,\n self._step_count, self._max_steps)\n\n def _get_info(self, done, rewards):\n agent_pos = self.observations[0]['position']\n goal_pos = self.observations[0]['goal_position']\n ret = self.model.get_info_grid(done, agent_pos, goal_pos)\n ret['step_count'] = self._step_count\n ret['optimal_num_steps'] = self._optimal_num_steps\n try:\n ret['game_state_file'] = self._game_state_file\n except AttributeError:\n pass\n if hasattr(self, '_game_state_step_start'):\n ret['game_state_step_start'] = self._game_state_step_start\n ret['game_state_step_start_beg'] = self._game_state_step_start_beg\n\n if hasattr(self, '_florensa_start_id'):\n ret['florensa_start_id'] = self._florensa_start_id\n\n return ret\n\n def change_game_state_distribution(self):\n self._game_state_distribution = 'genesis'\n\n def reset(self):\n self._optimal_num_steps = 0\n assert (self._agents is not None)\n\n def get_game_state_step(step_count):\n # TODO: the game_state_distribution types will need\n # some adjustment to the simple grid env\n if self._game_state_distribution == 'uniform':\n # Pick a random game state to start from.\n step = random.choice(range(step_count))\n elif self._game_state_distribution == 'genesis':\n step = 0\n elif self._game_state_distribution.startswith('uniformBounds'):\n # (0, 32), (24, 64), (56, 128), (120, 256), (248, 512), (504, 1024), (1016, 2048)\n # --> (504, 1024) --> (0, step_count - 504)\n # --> (1016, 2048) --> (0, 1)\n lb = self._uniform_v\n if self._uniform_v < 40:\n ub = 1\n else:\n ub = int(lb / 2) - 8\n\n # at lb == 512, minrange is either 0 or step_count - 512\n # then maxrange is either 1, step_count - 511, or step_count - 244\n # step_count - 244 > step_count - 511, so it can't be the latter.\n # thus it's either 1 or step_count - 244.\n # step is then either 0, random(0, step_count - 244) if step_count > 244,\n # or random(step_count - 512, step_count - 244) if step_count > 512.\n minrange = max(0, step_count - lb)\n maxrange = max(minrange + 1, step_count - ub)\n step = random.choice(range(minrange, maxrange))\n elif self._game_state_distribution.startswith('grUniformBounds'):\n # This is the Grid version of uniformBounds. The max value here is 47.\n # (0, 4), (4, 8), (8, 16), (16, 32), (32, 64), (max, max)\n lb = self._uniform_v\n if self._uniform_v < 5:\n ub = 1\n else:\n ub = lb // 2\n\n minrange = max(0, step_count - lb)\n maxrange = max(minrange + 1, step_count - ub)\n step = random.choice(range(minrange, maxrange))\n elif utility.is_int(self._game_state_distribution):\n game_state_int = int(self._game_state_distribution)\n min_range = max(0, step_count - game_state_int - 5)\n max_range = min(max(0, step_count - game_state_int) + 5,\n step_count)\n step = random.choice(range(min_range, max_range))\n else:\n raise\n return step\n\n if self._online_backplay:\n # Run a game until completion, saving the states at each step.\n # Then, pick from the right set of steps.\n board, agent_pos, goal_pos, num_inaccess = utility.make_board_grid(\n size=self._board_size, num_rigid=self._num_rigid,\n extra=True)\n path = self._compute_path_json(board, agent_pos, goal_pos)\n counter = 1\n while len(path) < 35:\n board, agent_pos, goal_pos, inaccess_counter = utility.make_board_grid(\n size=self._board_size, num_rigid=self._num_rigid,\n extra=True)\n path = self._compute_path_json(board, agent_pos, goal_pos)\n counter += 1\n num_inaccess += inaccess_counter\n # self._num_make.append(counter)\n # self._num_inac.append(num_inaccess)\n # if self.rank == 0:\n # print(\"Avg num make / num inac: %d / %.3f / %.3f\" % (\n # len(self._num_make), np.mean(self._num_make),\n # np.mean(self._num_inac)))\n\n step = get_game_state_step(step_count=len(path))\n self._game_state_step_start = len(path) - step + 1\n self._game_state_step_start_beg = step\n info = path[step]\n self._board = info['board'].astype(np.uint8)\n self._board_size = info['board_size']\n self._step_count = info['step_count']\n agent = self._agents[0]\n agent.set_start_position(info['position'])\n agent.set_goal_position(info['goal'])\n agent.reset(info['step_count'])\n self._optimal_num_steps = len(path)\n elif self._game_state_distribution == 'florensa':\n if not hasattr(self, '_florensa_starts'):\n self._florensa_starts = None\n\n if self._florensa_starts:\n self._florensa_start_id = random.choice(list(self._florensa_starts.keys()))\n self.set_json_info(self._florensa_starts[self._florensa_start_id]['json'])\n else:\n self._step_count = 0\n self.make_board()\n for agent_id, agent in enumerate(self._agents):\n pos_agent = np.where(self._board == constants.GridItem.Agent.value)\n row_agent = pos_agent[0][0]\n col_agent = pos_agent[1][0]\n agent.set_start_position((row_agent, col_agent))\n\n pos_goal = np.where(self._board == constants.GridItem.Goal.value)\n row_goal = pos_goal[0][0]\n col_goal = pos_goal[1][0]\n agent.set_goal_position((row_goal, col_goal))\n\n agent.reset()\n self._optimal_num_steps = self._compute_optimal(\n self._board, self._agents[0].position, self._agents[0].goal_position)\n elif hasattr(self, '_applicable_games') and self._applicable_games:\n directory, step_count = random.choice(self._applicable_games)\n counter = 0\n while True:\n if counter == 5:\n raise\n step = get_game_state_step(step_count)\n game_state_file = os.path.join(directory, '%03d.json' % step)\n counter += 1\n try:\n while not os.path.exists(game_state_file):\n step = get_game_state_step(step_count)\n game_state_file = os.path.join(directory, '%03d.json' % step)\n self._game_state_step_start = step_count - step + 1\n self._game_state_step_start_beg = step\n self._game_state_file = game_state_file\n with open(game_state_file, 'r') as f:\n self.set_json_info(json.loads(f.read()))\n break\n except json.decoder.JSONDecodeError as e:\n print(\"PR --> GSF: %s / sc: %d / step: %d...\" %\n (game_state_file, step_count, step))\n logging.warn(\"LOG --> GSF: %s / sc: %d / step: %d...\" %\n (game_state_file, step_count, step))\n if directory not in self._optimal_num_steps_directory:\n optimal_num_steps = self._compute_optimal(\n self._board, self._agents[0].position, self._agents[0].goal_position)\n self._optimal_num_steps_directory[directory] = optimal_num_steps\n self._optimal_num_steps = self._optimal_num_steps_directory[directory]\n elif self._init_game_state is not None:\n self.set_json_info()\n else:\n self._step_count = 0\n self.make_board()\n for agent_id, agent in enumerate(self._agents):\n pos_agent = np.where(self._board == constants.GridItem.Agent.value)\n row_agent = pos_agent[0][0]\n col_agent = pos_agent[1][0]\n agent.set_start_position((row_agent, col_agent))\n\n pos_goal = np.where(self._board == constants.GridItem.Goal.value)\n row_goal = pos_goal[0][0]\n col_goal = pos_goal[1][0]\n agent.set_goal_position((row_goal, col_goal))\n\n agent.reset()\n self._optimal_num_steps = self._compute_optimal(\n self._board, self._agents[0].position, self._agents[0].goal_position)\n return self.get_observations()\n\n @staticmethod\n def _compute_optimal(board, start, end):\n seen = set()\n dist = defaultdict(lambda: 1000000)\n dist[start] = 0\n Q = queue.PriorityQueue()\n Q.put((dist[start], start))\n while not Q.empty():\n d, position = Q.get()\n x, y = position\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n if x + row >= len(board) or x + row < 0:\n continue\n if y + col >= len(board) or y + col < 0:\n continue\n\n new_position = (x + row, y + col)\n if board[new_position] == 1:\n continue\n\n val = d + 1\n if val < dist[new_position]:\n dist[new_position] = val\n\n if new_position == end:\n return dist[new_position]\n\n if new_position not in seen:\n seen.add(new_position)\n Q.put((dist[new_position], new_position))\n return None\n\n def set_json_info(self, game_state=None):\n game_state = game_state or self._init_game_state\n\n board_size = int(game_state['board_size'])\n self._board_size = board_size\n self._step_count = int(game_state['step_count'])\n\n board_array = json.loads(game_state['board'])\n self._board = np.ones((board_size, board_size)).astype(np.uint8)\n self._board *= constants.Item.Passage.value\n for x in range(self._board_size):\n for y in range(self._board_size):\n self._board[x, y] = board_array[x][y]\n\n # NOTE: We assume there is just one agent.\n agent_array = json.loads(game_state['agents'])\n agent = self._agents[0]\n agent.set_start_position(tuple(agent_array[0]['position']))\n agent.set_goal_position(tuple(agent_array[0]['goal_position']))\n agent.reset(self._step_count)\n self._board[agent.position] = constants.GridItem.Agent.value\n self._board[agent.goal_position] = constants.GridItem.Goal.value\n\n def act(self, obs, acting_agent_ids=[], ex_agent_ids=None):\n if ex_agent_ids is not None:\n agents = [agent for agent in self._agents \\\n if agent.agent_id not in ex_agent_ids]\n else:\n agents = [agent for agent in self._agents \\\n if agent.agent_id not in self.training_agents]\n # TODO: Replace this hack with something more reasonable.\n agents = [agent for agent in agents if \\\n agent.agent_id not in acting_agent_ids]\n return self.model.act_grid(agents, obs, self.action_space)\n\n def step(self, actions):\n results = self.model.step_grid(actions, self._board, self._agents)\n self._board, self._agents = results[:2]\n\n # NOTE: this should be above calling the below functions since they\n # take the step_count to change obs etc., so step_count should be\n # updated before\n self._step_count += 1\n\n # NOTE: get_observations needs to be called before\n # the others to change obs state!!\n obs = self.get_observations()\n done = self._get_done()\n reward = self._get_rewards()\n info = self._get_info(done, reward)\n\n return obs, reward, done, info\n\n @staticmethod\n def featurize(obs):\n board = obs[\"board\"].reshape(-1).astype(np.float32)\n position = utility.make_np_float(obs[\"position\"])\n goal_position = utility.make_np_float(obs[\"goal_position\"])\n ret = np.concatenate(board, agent_position, goal_position)\n return ret\n\n def get_json_info(self):\n \"\"\"Returns a json snapshot of the current game state.\"\"\"\n ret = {\n 'board_size': self._board_size,\n 'step_count': self._step_count,\n 'board': self._board,\n 'agents': self._agents,\n }\n for key, value in ret.items():\n ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)\n return ret\n\n def render(self,\n mode=None,\n close=False,\n record_pngs_dir=None,\n record_json_dir=None,\n do_sleep=True\n ):\n if close:\n self.close()\n return\n\n print(\"Step %d / Optimal %d.\" % (self._step_count,\n self._optimal_num_steps))\n print(self._board)\n print(\"\\n\")\n time.sleep(1.0 / self.render_fps)\n\n @staticmethod\n def _compute_path_json(board, start, end):\n seen = set()\n prev = {}\n dist = defaultdict(lambda: 1000000)\n dist[start] = 0\n Q = queue.PriorityQueue()\n Q.put((dist[start], start))\n\n found = False\n while not Q.empty() and not found:\n d, position = Q.get()\n x, y = position\n for row, col in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n new_position = (x + row, y + col)\n if new_position in seen:\n continue\n elif x + row >= len(board) or x + row < 0:\n continue\n elif y + col >= len(board) or y + col < 0:\n continue\n elif board[new_position] == 1:\n continue\n\n val = d + 1\n if val < dist[new_position]:\n dist[new_position] = val\n prev[new_position] = position\n\n seen.add(new_position)\n Q.put((dist[new_position], new_position))\n\n if new_position == end:\n found = True\n break\n\n path = []\n curr = end\n while prev.get(curr):\n curr = prev[curr]\n curr_board = board.copy()\n curr_board[start] = constants.GridItem.Passage.value\n curr_board[curr] = constants.GridItem.Agent.value\n path.append({\n 'goal': end,\n 'position': curr,\n 'board': curr_board,\n 'board_size': len(curr_board),\n 'step_count': dist[curr]\n })\n\n path = list(reversed(path))\n return path\n","sub_path":"pommerman/envs/v4.py","file_name":"v4.py","file_ext":"py","file_size_in_byte":18469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"165710523","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport random\r\ndigits = [\"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\"]\r\ninds = [0,6,5,4,3,2,8,7,9,1]\r\nrandom.shuffle(inds)\r\ndef solve(s):\r\n s = [c for c in s]\r\n d = {}\r\n for c in s:\r\n if c not in d:\r\n d[c] = 0\r\n d[c] += 1\r\n ans = []\r\n i = 0\r\n # print s\r\n while any(d[c] > 0 for c in d.keys()):\r\n digit = digits[inds[i]]\r\n dd = {}\r\n for c in digit:\r\n if c not in dd:\r\n dd[c] = 0\r\n dd[c] += 1\r\n if all(c in d.keys() for c in dd.keys()) and all(dd[c] <= d[c] for c in dd.keys()):\r\n ans.append(inds[i])\r\n for c in dd.keys():\r\n d[c] -= dd[c]\r\n # print \"found \", inds[i], \" \", d\r\n else:\r\n i += 1\r\n return \"\".join([str(c) for c in sorted(ans)])\r\n\r\nif __name__ == \"__main__\":\r\n testcases = input()\r\n\r\n for caseNr in xrange(1, testcases + 1):\r\n s = str(raw_input())\r\n print(\"Case #%i: %s\" % (caseNr, solve(s)))\r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_2/16_2_1_sharno_1B_A.py","file_name":"16_2_1_sharno_1B_A.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"490927725","text":"\"\"\"\n编写初始化日志的函数\n\"\"\"\nimport logging\nfrom logging import handlers\nimport os\n\n\"\"\"全局变量的封装\"\"\"\n# 绝对路径的封装\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n# url的封装\nHOST = \"http://182.92.81.159\"\n# headers的封装\nHEADERS = {\"Content-Type\": \"application/json\"}\n# 登陆后ID的封装\nEMP_ID = \"\"\n\n\"\"\"日志器函数封装\"\"\"\n\n\ndef init_logging():\n # 创建日志器\n logger = logging.getLogger()\n # 设置日志等级\n logger.setLevel(logging.INFO)\n # 设置处理器\n # 设置控制台处理器\n sh = logging.StreamHandler()\n # 设置文件处理器\n # TimedRotatingFileHandler 可以用来帮助我们切分日志:\n # 按时间来设置日志\n filename = BASE_DIR + \"/log/ihrm.log\"\n fh = logging.handlers.TimedRotatingFileHandler(filename,when='m', interval=1, backupCount=7)\n # 设置格式化器\n fmt = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s'\n formatter = logging.Formatter(fmt)\n # 将格式化器添加到处理器中\n # 添加到控制台\n sh.setFormatter(formatter)\n # 添加到文件处理器\n fh.setFormatter(formatter)\n # 将处理器添加到日志器\n logger.addHandler(fh)\n logger.addHandler(sh)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"205765657","text":"from apscheduler.executors.gevent import GeventExecutor\nfrom requests import get\nfrom .system import free_memory\nfrom gevent import sleep\nfrom logging import getLogger\nfrom random import random\nimport consul\nimport iso8601\nfrom datetime import timedelta, datetime\nfrom apscheduler.schedulers.gevent import GeventScheduler\nfrom gevent.subprocess import check_call, CalledProcessError\n\nfrom uuid import uuid4\n\nLOCK_RETRIES = 6\nSLEEP_BETWEEN_TRIES_LOCK = 10\nWORKER_TIME_RUN = 16 * 60\n\nAWS_META_DATA_URL = 'http://169.254.169.254/latest/meta-data/instance-id'\nSERVER_NAME_PREFIX = 'AUCTION_WORKER_{}'\n\nMIN_AUCTION_START_TIME_RESERV = timedelta(seconds=60)\nMAX_AUCTION_START_TIME_RESERV = timedelta(seconds=15 * 60)\n\n\ndef get_server_name():\n try:\n r = get(AWS_META_DATA_URL, timeout=10)\n suffix = r.body_string()\n except Exception:\n suffix = uuid4().hex\n return SERVER_NAME_PREFIX.format(suffix)\n\n\nclass AuctionExecutor(GeventExecutor):\n\n def start(self, scheduler, alias):\n return super(AuctionExecutor, self).start(scheduler, alias)\n\n def shutdown(self, wait=True):\n \"\"\"\n Shuts down this executor.\n\n :param bool wait: ``True`` to wait until all submitted jobs\n have been executed\n \"\"\"\n while len(self._instances) > 0:\n sleep(1)\n\n def _run_job_success(self, job_id, events):\n \"\"\"Called by the executor with the list of generated events when `run_job` has been successfully called.\"\"\"\n super(GeventExecutor, self)._run_job_success(job_id, events)\n self.cleanup_jobs_instances(job_id)\n\n def _run_job_error(self, job_id, exc, traceback=None):\n \"\"\"Called by the executor with the exception if there is an error calling `run_job`.\"\"\"\n super(GeventExecutor, self)._run_job_error(job_id, exc, traceback=traceback)\n self.cleanup_jobs_instances(job_id)\n\n def cleanup_jobs_instances(self, job_id):\n with self._lock:\n if self._instances[job_id] == 0:\n del self._instances[job_id]\n\n\nclass AuctionScheduler(GeventScheduler):\n def __init__(self, server_name, config,\n limit_auctions=500,\n limit_free_memory=0.15,\n logger=getLogger(__name__),\n *args, **kwargs):\n super(AuctionScheduler, self).__init__(*args, **kwargs)\n self.server_name = server_name\n self.config = config\n self.execution_stopped = False\n self.use_consul = self.config.get('main', {}).get('use_consul', True)\n if self.use_consul:\n self.consul = consul.Consul()\n self.logger = logger\n self._limit_pool_lock = self._create_lock()\n self._limit_auctions = self.config['main'].get('limit_auctions', int(limit_auctions))\n self._limit_free_memory = self.config['main'].get('limit_free_memory', float(limit_free_memory))\n self._count_auctions = 0\n self.exit = False\n\n def _create_default_executor(self):\n return AuctionExecutor()\n\n def convert_datetime(self, datetime_stamp):\n return iso8601.parse_date(datetime_stamp).astimezone(self.timezone)\n\n def get_auction_worker_configuration_path(self, view_value, key='api_version'):\n value = view_value.get(key, '')\n if value:\n return self.config['main'].get(\n 'auction_worker_config_for_{}_{}'.format(key, value), self.config['main']['auction_worker_config']\n )\n\n return self.config['main']['auction_worker_config']\n\n def shutdown(self, *args, **kwargs):\n self.exit = True\n response = super(AuctionScheduler, self).shutdown(*args, **kwargs)\n self.execution_stopped = True\n return response\n\n def _auction_fucn(self, args):\n try:\n check_call(args)\n except CalledProcessError:\n self.logger.error(\"Exit with error {}\".format(args[0]))\n\n def run_auction_func(self, args, ttl=WORKER_TIME_RUN):\n if self._count_auctions >= self._limit_auctions:\n self.logger.info(\"Limited by count\")\n return\n\n if free_memory() <= self._limit_free_memory:\n self.logger.info(\"Limited by memory\")\n return\n\n document_id = args[0]\n sleep(random())\n if self.use_consul:\n i = LOCK_RETRIES\n session = self.consul.session.create(behavior='delete', ttl=WORKER_TIME_RUN)\n while i > 0:\n if self.consul.kv.put(\"auction_{}\".format(document_id), self.server_name, acquire=session):\n self.logger.info(\"Run worker for document {}\".format(document_id))\n with self._limit_pool_lock:\n self._count_auctions += 1\n\n self._auction_fucn(args)\n\n self.logger.info(\"Finished {}\".format(document_id))\n self.consul.session.destroy(session)\n with self._limit_pool_lock:\n self._count_auctions -= 1\n return\n sleep(SLEEP_BETWEEN_TRIES_LOCK)\n i -= 1\n\n self.logger.debug(\"Locked on other server\")\n self.consul.session.destroy(session)\n else:\n self._auction_fucn(args)\n\n def schedule_auction(self, document_id, view_value, args):\n auction_start_date = self.convert_datetime(view_value['start'])\n if self._executors['default']._instances.get(document_id):\n return\n job = self.get_job(document_id)\n if job:\n job_auction_start_date = job.args[2]['start'] # job.args[2] view_value\n if job_auction_start_date == auction_start_date:\n return\n self.logger.warning(\"Changed start date: {}\".format(document_id))\n\n now = datetime.now(self.timezone)\n if auction_start_date - now > MAX_AUCTION_START_TIME_RESERV:\n AW_date = auction_start_date - MAX_AUCTION_START_TIME_RESERV\n elif auction_start_date - now > MIN_AUCTION_START_TIME_RESERV:\n self.logger.warning('Planned auction\\'s starts date in the past')\n AW_date = now\n else:\n return\n self.logger.info('Scedule start of {} at {} ({})'.format(document_id,\n AW_date,\n view_value['start']))\n\n self.add_job(self.run_auction_func, kwargs=dict(args=args),\n misfire_grace_time=60,\n next_run_time=AW_date,\n id=document_id,\n replace_existing=True)\n","sub_path":"openprocurement/auction/helpers/chronograph.py","file_name":"chronograph.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"580122806","text":"import roslib;\nimport rospy\nimport smach\nimport smach_ros\n\nfrom std_msgs.msg import Bool\nfrom geometry_msgs.msg import Twist,Vector3\n\n#For testing:\n#rostopic pub -1 /manipulation_result std_msgs/Bool \"success\"\n\n# define state Manipulation:\nclass Manipulation(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['success','home','collision','error'],\n input_keys=['point','after_gripper'],output_keys=['after_gripper'])\n self.answer_resived=False\n self.answer=''\n rospy.Subscriber(\"manipulation_result\", Bool, self.answer_collback)\n\n def execute(self, userdata):\n rospy.loginfo('Executing state Manipulation:')\n \n self.send_task(userdata.point)\n #self.wait_for_answer()\n rospy.sleep(1)\n self.answer=True\n \n if (self.answer ==True):\n if(userdata.after_gripper==True):\n userdata.after_gripper=False\n return 'home'\n return 'success'\n else:\n if self.answer==False:\n return 'collision'\n else:\n return 'error'\n \n #Send data to Manipulation mudule:\n def send_task(self,point):\n rospy.loginfo('Send task to Manipualator module')\n #generate message:\n #msg=Twist(Vector3(point[0],point[1],point[2]),Vector3(point[3],point[4],int(gripper_open)))\n msg=Twist(Vector3(1,2,2),Vector3(1,2,1))\n \n pub = rospy.Publisher('manipulator', Twist, queue_size=10)\n pub.publish(msg)\n return 0\n def wait_for_answer(self):\n while (True):\n if (self.answer_resived):\n self.answer_resived=False\n return 0\n return 0\n def answer_collback(self,data):\n rospy.loginfo('Data of state Manipulation callback:')\n rospy.loginfo(data)\n self.answer_resived=True\n self.answer=data.data\n return 0","sub_path":"src/Manipulation.py","file_name":"Manipulation.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"335338868","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\ntorch.manual_seed(1)\r\n\r\nx_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2] ]\r\ny_data = [[0],[0],[0],[1],[1],[1]]\r\nx_train = torch.FloatTensor(x_data)\r\ny_train = torch.FloatTensor(y_data)\r\n\r\n#x_Data 와 y_Data 정의를 동시해 해주고 FloatTensor를 통해 넘파이 배열로 만들어줌\r\n\r\nW = torch.zeros((2,1), requires_grad=True)\r\nb = torch.zeros(1,requires_grad=True)\r\n\r\n\r\n#가중치와 편향을 각각 벡터로 0으로 만들어준다.\r\noptimizer = optim.SGD([W, b], lr=1)\r\n# 경사하강법을 통해서 가중치와 편향을 가지고 학습률 1퍼센트로 설정한 후 학습을 한다.\r\nnb_epochs = 1000\r\n#1000번 학습\r\nfor epoch in range(nb_epochs + 1):\r\n\r\n # Cost 계산\r\n hypothesis = torch.sigmoid(x_train.matmul(W) + b)\r\n #가설을 시그모이드 함수를 통해서 매개변수에 Wx + b를 넣어준다.\r\n cost = -(y_train * torch.log(hypothesis) +\r\n (1 - y_train) * torch.log(1 - hypothesis)).mean()\r\n #비용함수 식을 적는다. 식은 로그함수를 구현\r\n # cost로 H(x) 개선\r\n optimizer.zero_grad()\r\n cost.backward()\r\n optimizer.step()\r\n\r\n # 100번마다 로그 출력\r\n if epoch % 100 == 0:\r\n print('Epoch {:4d}/{} Cost: {:.6f}'.format(\r\n epoch, nb_epochs, cost.item()\r\n ))\r\n\r\nhypothesis = torch.sigmoid(x_train.matmul(W) + b)\r\nprint(hypothesis)\r\n#학습을 한 후에 출력한다.\r\nprediction = hypothesis >= torch.FloatTensor([0.5])\r\nprint(prediction)\r\n#각각의 인덱스가 0.5를 넘으면 True 안넘으면 0.5이하면 False\r\nprint(W)\r\n\r\n#가중치를 출력한다.\r\nprint(b)\r\n\r\n#편향을 출력한다.","sub_path":"Logistic Regression/Logistic Regression.py","file_name":"Logistic Regression.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418855762","text":"import logging\nimport logging.config\nfrom os import stat\nimport fire\nfrom typing import Dict, List\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.linear_model import LinearRegression\nimport alphashape\n\nfrom ism_model import config as cfg\nfrom ism_model.utils.dataset import Data\n\n\nclass Model():\n def __init__(self, L_0: float, Y_0: float):\n logging.info(\"Started the ISM model ...\")\n\n self.L_0 = L_0\n self.Y_0 = Y_0\n\n self.sigma = 0.0\n self.delta = 0.0\n self.rho = 0.0\n\n self.a = 0.0\n self.gamma = 0.0\n self.alpha_k = 0.0\n self.n = 0.0\n self.s = 0.0\n self.delta = 0.0\n\n @staticmethod\n def create_params_set(coeffs: Dict[str, str], n_splits: int) -> pd.DataFrame:\n parsed_coeffs = {\n param: np.linspace(\n start=float(coeffs[param].split(\";\")[0]),\n stop=float(coeffs[param].split(\";\")[1]),\n num=n_splits\n ) for param in coeffs\n }\n grid_df = pd.DataFrame(\n data=np.vstack(np.meshgrid(*parsed_coeffs.values())).reshape(len(parsed_coeffs), -1).T,\n columns=parsed_coeffs.keys()\n )\n return grid_df\n\n @staticmethod\n def calc_gdp(Y_0: float, a: float, b: float, L: float, L_0: float, K: float, K_0: float) -> float:\n labour = (L/L_0)**(-b+1e-8)\n capital = (K/K_0)**(-b+1e-8)\n return Y_0*((a*labour + (1-a)*capital)**(-1/(b+1e-8)))\n\n @staticmethod\n def calc_capital(J: float, mu: float, K: float) -> float:\n return J + (1-mu)*K\n\n @staticmethod\n def calc_export(delta: float, Y: float, pi_e: float) -> float:\n return delta*Y/pi_e\n\n @staticmethod\n def calc_import(rho: float, delta: float, Y: float, pi_i: float) -> float:\n return rho*(1-delta)*Y/pi_i\n\n @staticmethod\n def calc_investments(sigma: float, rho: float, delta: float, Y: float, pi_j: float) -> float:\n return sigma*(1 + rho*(1 - delta))*Y/pi_j\n\n @staticmethod\n def calc_sigma(pi_j_list: List[float], J_list: List[float],\n Y_list: List[float], pi_i_list: List[float], Imp_list: List[float]):\n sigma = [\n (pi_j*J)/(Y + pi_i*Imp) for pi_j, J, Y, pi_i, Imp in zip(pi_j_list, J_list, Y_list, pi_i_list, Imp_list)\n ]\n return sigma\n\n @staticmethod\n def calc_delta(pi_e_list: List[float], E_list: List[float], Y_list: List[float]):\n delta = [\n pi_e*E/Y for pi_e, E, Y in zip(pi_e_list, E_list, Y_list)\n ]\n return delta\n\n @staticmethod\n def calc_rho(pi_i_list: List[float], Imp_list: List[float],\n Y_list: List[float], pi_e_list: List[float], E_list: List[float]):\n rho = [\n (pi_i*Imp)/(Y - pi_e*E) for pi_i, Imp, Y, pi_e, E in zip(pi_i_list, Imp_list, Y_list, pi_e_list, E_list)\n ]\n return rho\n\n @staticmethod\n def fit_coeffs(data: List[float]):\n LinReg = LinearRegression(n_jobs=-1)\n y = np.array(data)\n X = np.arange(len(data)).reshape(-1, 1)\n LinReg.fit(X, y)\n pred = LinReg.predict(X)\n return pred\n\n @staticmethod\n def fit_labour(self, data: List[float]):\n LinReg = LinearRegression(n_jobs=-1)\n y = np.log(np.array(data))\n X = np.arange(len(data)).reshape(-1, 1)\n LinReg.fit(X, y)\n return math.exp(LinReg.intercept_), LinReg.coef_[0]\n\n @staticmethod\n def fit_exponential(data: List[float]):\n LinReg = LinearRegression(n_jobs=-1)\n y = np.log(np.array(data))\n X = np.arange(len(data)).reshape(-1, 1)\n LinReg.fit(X, y)\n return math.exp(LinReg.intercept_), LinReg.coef_[0]\n\n @staticmethod\n def fit_polinominal(data: List[float], power: int):\n LinReg = LinearRegression(n_jobs=-1)\n y = np.array(data)\n X = np.array(\n [np.power(\n np.arange(len(data)),\n pow\n ) for pow in range(1, power+1)]\n ).T\n LinReg.fit(X, y)\n pred = LinReg.predict(X)\n return pred\n\n @staticmethod\n def create_shell(points: np.array, alpha: float):\n hull = alphashape.alphashape(points, alpha)\n hull_pts = hull.exterior.coords.xy\n return hull_pts\n\n @staticmethod\n def calc_pareto_front(points: np.array, return_mask: bool = True):\n is_efficient = np.arange(points.shape[0])\n n_points = points.shape[0]\n next_point_index = 0 # Next index in the is_efficient array to search for\n while next_point_index < len(points):\n nondominated_point_mask = np.any(points < points[next_point_index], axis=1)\n nondominated_point_mask[next_point_index] = True\n is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points\n points = points[nondominated_point_mask]\n next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1\n if return_mask:\n is_efficient_mask = np.zeros(n_points, dtype=bool)\n is_efficient_mask[is_efficient] = True\n return is_efficient_mask\n else:\n return is_efficient\n\n def set_static_params(self, sigma: float, delta: float, rho: float) -> None:\n self.sigma = sigma\n self.delta = delta\n self.rho = rho\n\n def load_data(self, source_file: str):\n self.dataset = Data(source_file=source_file).load()\n self.dataset = self.dataset[[\n cfg.GDP_COL,\n cfg.LABOUR_COL\n ]]\n\n def save_data(self):\n pass\n\n def compute_metrics(self):\n pass\n\n def run(self):\n pass\n\n\nif __name__ == \"__main__\":\n fire.Fire(Model)\n","sub_path":"Master/Coursework (first grade)/ism-model/ism_model/model/ism.py","file_name":"ism.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"179285741","text":"import json\nimport sys\nfrom os.path import join\n\nmodels = [\n \"bert_base_seq2seq\",\n \"bert_large_seq2seq\",\n \"elmo_seq2seq\",\n \"openai_seq2seq\",\n \"seq2seq\"\n]\n\niterations = [str(i) for i in range(1,18)] + [\"Full\"]\n\ndef main(argv):\n if len(argv) != 2:\n print(\"Usage: python -m scripts.accumulate_fine_tune_evaluation dir\")\n return\n\n directory = argv[1]\n for model in models:\n seq_acc = []\n BLEU = []\n loss = []\n for iteration in iterations:\n path = join(directory, model + \"_\" + iteration + \".json\")\n fd = open(path, \"r\")\n json_data = json.loads(fd.read())\n fd.close()\n seq_acc.append(json_data[\"seq_acc\"])\n BLEU.append(json_data[\"BLEU\"])\n loss.append(json_data[\"loss\"])\n fd = open(\"fine_tune_evaluation_acc.json\", \"a+\")\n fd.write(json.dumps({\"model_name\": model, \"seq_acc\": seq_acc, \"BLEU\": BLEU, \"loss\": loss}) + \"\\n\")\n fd.close()\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/accumulate_fine_tune_evaluation.py","file_name":"accumulate_fine_tune_evaluation.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"392574599","text":"\"\"\"\nThis function takes an array and does stuff\n\"\"\"\n__author__ = \"Wyatt\"\n\n\nimport numpy as np\n\n\ndef quadratic_fit(array):\n \n x_values = array[0, :] \n y_values = array[1, :] \n\n quadratic_coefficients = np.polyfit(x_values, y_values, 2)\n\n return quadratic_coefficients\n","sub_path":"Final Stuff/quadratic_fit.py","file_name":"quadratic_fit.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"220967491","text":"# Funcao Start Stop para uso com servico EC2 AWS em conjunto com servico Cloud Watch AWS e Lambda AWS\n# Biclioteca AWS\n# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_ChangingAttributesWhileInstanceStopped\n# https://aws.amazon.com/pt/premiumsupport/knowledge-center/start-stop-lambda-cloudwatch/\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#client\n\nimport boto3\n\nec2 = boto3.client('ec2')\n\ndef lambda_handler(event, context):\n\n try:\n start_stop_ec2_instances(event, context)\n \n start_stop_rds_instances(event, context)\n \n except Exception as e:\n displayException(e)\n traceback.print_exc()\n \ndef start_stop_ec2_instances(event, context):\n \n # Get action parametro para event\n action = event.get('action')\n \n if action is None:\n action = ''\n\n # Checando action\n if action.lower() not in ['start', 'stop']:\n print (\"A Funcao nao esta operando e sera abortada\")\n else:\n # Get ec2 com filter conditions\n filtered_ec2 = ec2.describe_instances(\n Filters=[\n {'Name': 'tag-key', 'Values': ['Auto-StartStop-Enabled', 'auto-startstop-enabled']},\n {'Name': 'instance-state-name', 'Values': ['running', 'stopped']}\n ] # Condicoes adicionadas nas TAG's dos servicos AWS - Trigger \n ).get(\n 'Reservations', []\n )\n \n # Convertendo array of array para uma flat array\n instances_ec2 = sum(\n [\n [i for i in r['Instances']]\n for r in filtered_ec2\n ], [])\n \n print (\"Encontrada \" + str(len(instances_ec2)) + \" EC2 instances started/stopped\")\n \n # Loop through instances\n for instance_ec2 in instances_ec2:\n\n try:\n instance_id = instance_ec2['InstanceId']\n\n # ec2 instance TAG name\n for tag in instance_ec2['Tags']:\n if tag['Key'] == 'Name':\n instance_name = tag['Value']\n print (\"instance_name: \" + instance_name + \" instance_id: \" + instance_id)\n continue\n \n # ec2 instance STATUS\n instance_state = instance_ec2['State']['Name']\n print (\"STATUS: %s\" % instance_state)\n \n # Start or stop ec2 instance\n if instance_state == 'running' and action == 'stop':\n ec2.stop_instances(\n InstanceIds=[\n instance_id\n ],\n # DryRun = True\n )\n print (\"Instance %s comes to stop\" % instance_id)\n \n elif instance_state == 'stopped' and action == 'start':\n ec2.start_instances(\n InstanceIds=[\n instance_id\n ],\n # Se funcionamento ok = true\n )\n print (\"Instance %s\" % instance_id)\n \n else: #Não esta pronta para funcao\n print (\"Instance %s(%s) Funcao não pronta\" % (instance_id, instance_name))\n \n except Exception as e:\n displayException(e)\n\n","sub_path":"StartStopInstancesEC2.py","file_name":"StartStopInstancesEC2.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229667008","text":"\"\"\"\"\nCreated Thr Feb 16\n\n@author: Juan Jimenez\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport csv\nfrom textwrap import wrap\nsource = 'TristateCampusHate.csv'\n\ndef graph1():\n state_graph = [['NY', 0],['CT', 0],['MA',0],['NJ',0],['PA',0]]\n\n try:\n s = open(source, 'r')\n except:\n print('this file cannot be opened')\n else:\n reader = csv.reader(s)\n for line in reader:\n for state in state_graph:\n\n if (state[0] == line[1]):\n state[1] = state[1] + int(line[5])\n else:\n continue\n states = []\n y = []\n for state in state_graph:\n states.append(state[0])\n y.append(state[1])\n print (states)\n print(y)\n x = [1,2,3,4,5]\n plt.bar(x,y,align='center',width = 0.7)\n plt.xticks(x,states)\n plt.ylabel('Hate crimes reported')\n plt.xlabel('States')\n plt.title('Hate on college campuses in the Tri-state')\n\n plt.show()\n\n\ndef graph2():\n sex_graph = [['Female', 0], ['Male', 0]]\n\n try:\n s = open(source, 'r')\n except:\n print('this file cannot be opened')\n else:\n reader = csv.reader(s)\n for line in reader:\n for sex in sex_graph:\n if line[4].isnumeric() == False:\n continue\n elif (sex[0] == 'Female'):\n sex[1] = sex[1] + int(line[4])\n else:\n sex[1] = sex[1] + int(line[3])\n xlabels = []\n y = []\n x = [1,2]\n for sex in sex_graph:\n xlabels.append(sex[0])\n y.append(sex[1])\n\n explode =(0,0)\n\n plt.pie(y, explode = explode, labels=xlabels, autopct='%1.1f%%', shadow=True, startangle=90)\n plt.title('Reported Hate Crimes on Campus by Sex')\n plt.axis('equal')\n plt.show()\n\n\n\ndef graph3():\n try:\n s = open(source, 'r')\n except:\n print('this file cannot be opened')\n else:\n firstLine = 0\n school_info_list = []\n reader = csv.reader(s)\n for row in reader:\n found = 0\n if (firstLine == 0):\n school_info_list.append([row[2],row[5]])\n firstLine += 1\n else:\n for entry in school_info_list:\n if (entry[0] == row[2]):\n entry[1] = int(entry[1]) + int(row[5])\n found += 1\n else:\n continue\n if(found == 0):\n school_info_list.append([row[2],int(row[5])])\n else:\n continue\n xlabes =[]\n y = []\n x = [1,2,3,4,5,6,7,8,9]\n\n for entry in school_info_list:\n xlabes.append(entry[0])\n y.append(entry[1])\n\n plt.bar(x,y,align='center',width=0.5)\n plt.xticks(x,xlabes, rotation=45, wrap=True )\n plt.ylabel('Hate crimes reported')\n plt.xlabel('University Type')\n plt.title('Hate Crimes Reported on Private vs Public University')\n\n plt.show()\n\n\ndef graph4():\n try:\n s = open(source,'r')\n except:\n print('this file cannot be read')\n else:\n uni_list = []\n h = 'Harvard University'\n y = 'Yale University'\n c = 'Columbia University in the City of New York'\n n = 'New York University'\n b = 'Boston University'\n bc = 'Boston College'\n reader = csv.reader(s)\n for line in reader:\n if ((line[0] == h) or (line[0] == y) or (line[0] == c) or (line[0] == n) or (line[0] == b) or (line[0] == bc)):\n uni_list.append([line[0],int(line[5])])\n else:\n continue\n x =[1,2,3,4,5,6]\n xlabes=[]\n y=[]\n for uni in uni_list:\n xlabes.append(uni[0])\n y.append(uni[1])\n plt.bar(x,y,align='center',width = 0.3)\n plt.rc('xtick', labelsize = 5)\n plt.xticks(x,xlabes, rotation = 45, wrap = True)\n plt.ylabel('Hate crimes reported')\n plt.xlabel('University')\n plt.title('Hate Crimes Reported on Prestigous Universities')\n\n plt.show()\n\ngraph1()\ngraph2()\ngraph3()\ngraph4()\n","sub_path":"Assignment_3/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"471791283","text":"#!/usr/bin/env python\n#########################################################################\n# Reinforcement Learning with FEM on the CartPoleEnvironment \n#\n# Requirements: pylab (for plotting only). If not available, comment the\n# last 3 lines out\n#########################################################################\n\n__author__ = \"Thomas Rueckstiess, Frank Sehnke\"\n\n\nfrom pybrain.tools.example_tools import ExTools\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask\nfrom pybrain.rl.agents import OptimizationAgent\nfrom pybrain.optimization import FEM\nfrom pybrain.rl.experiments import EpisodicExperiment\n\nbatch=2 #number of samples per learning step\nprnts=100 #number of learning steps after results are printed\nepis=4000/batch/prnts #number of roleouts\nnumbExp=10 #number of experiments\net = ExTools(batch, prnts) #tool for printing and plotting\n\nfor runs in range(numbExp):\n # create environment\n env = CartPoleEnvironment() \n # create task\n task = BalanceTask(env, 200, desiredValue=None)\n # create controller network\n net = buildNetwork(4, 1, bias=False)\n # create agent with controller and learner (and its options)\n agent = OptimizationAgent(net, FEM(storeAllEvaluations = True))\n et.agent = agent\n # create the experiment\n experiment = EpisodicExperiment(task, agent)\n\n #Do the experiment\n for updates in range(epis):\n for i in range(prnts):\n experiment.doEpisodes(batch)\n #print \"Epsilon : \", agent.learner.sigma\n et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)\n et.addExps()\net.showExps()\n","sub_path":"examples/rl/environments/cartpole/cart_fem.py","file_name":"cart_fem.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"90660812","text":"from .Downloader import Downloader\n\n\"\"\"\n\tCome funziona lo scarico dei dati dell'affluenza\n\t1) si imposta la classe (con i percorsi, gli header base ecc)\n\t\n\t2) periodicamente (while True: time.sleep()) scarico gli enti pervenuti\n\t3) calcolo quali sono gli enti pervenuti (facendo la differenza tra gli ultimi due file)\n\t4) calcolo regione e provincia dei comuni pervenuti , perche' per l'affluenza non devo\n\t\tscaricare le pagine dei singoli comuni, ma mi basta la pagina della provincia,\n\t\tdove cosi' prendo anche tutti gli altri comuni a gratis e faccio meno richieste\n\t\t\n\t\thttps://eleapi.interno.gov.it/siel/PX/votanti/TE/01\t\t\t\t\t\t\t\t\t\titalia\n\t\thttps://eleapi.interno.gov.it/siel/PX/votanti/TE/01/RE/08\t\t\t\t3080000000\t\tregione\n\t\thttps://eleapi.interno.gov.it/siel/PX/votanti/TE/01/PR/042\t\t\t\t3080420000\t\tprov\n\t\thttps://eleapi.interno.gov.it/siel/PX/scrutiniG/TE/08/PR/042/CM/0220\t3080420220\t\tcomune\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\txxx\t\t\t\tregione\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t xxx\t\t\tprovincia\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t xxxx\t\tcomune\n\t5) scarico i file e li salvo\n\n\"\"\"\n\nclass DownloaderAffluenza(Downloader):\n\t\n\t# https://eleapi.interno.gov.it/siel/PX/votanti/TE/01/RE/08\n\t# https://elezioni.interno.gov.it/europee/votanti/20190526/votantiEI08000\n\n\t# https://eleapi.interno.gov.it/siel/PX/votanti/TE/01/PR/042\n\t# https://elezioni.interno.gov.it/europee/votanti/20190526/votantiEI08042\n\n\tdef calcola_urls_enti_superiori(self, list_codici_comuni):\n\t\tpad0 = lambda s, n: s + '0' * (n - len(s))\n\t\n\t\tlist_urls = []\n\t\ttemplate_url = [\n\t\t\t\"https://eleapi.interno.gov.it/siel/PX/votanti/TE/01/{tipo_ente}/{cod_ente}\", \n\t\t\t\"{base_folder}/votanti/votanti_{tipo_ente}_{cod_ente}_-{timestamp}-.json\",\n\t\t\t[[\"Referer\", \"https://elezioni.interno.gov.it/europee/votanti/20190526/votantiEI{}\"]]\n\t\t]\n\t\t\n\t\tfor cod_com in list_codici_comuni:\n\t\t\tfor ente_i in self.calc_enti_sup_da_comune(cod_com):\n\t\t\t\turl_i = [\n\t\t\t\t\ttemplate_url[0].format(**ente_i),\n\t\t\t\t\ttemplate_url[1].format(**ente_i),\n\t\t\t\t\t[[\n\t\t\t\t\t\t\"Referer\", \n\t\t\t\t\t\t\"https://elezioni.interno.gov.it/europee/votanti/20190526/votantiEI{}\".format(\n\t\t\t\t\t\t\tpad0(ente_i['cod_ente'], 5)\n\t\t\t\t\t\t)\n\t\t\t\t\t]]\n\t\t\t\t]\n\t\t\t\n\t\t\t\tif url_i not in list_urls:\n\t\t\t\t\tlist_urls.append(url_i)\n\t\t\t\t\n\t\treturn list_urls\n\t\t\n\tdef calc_enti_sup_da_comune(self, cod_com):\n\t\t# dato il codice di un comune estraggo il codice della provincia e regione associata\n\t\t\n\t\t#\tcurl 'https://eleapi.interno.gov.it/siel/PX/votanti/TE/08/PR/052' \n\t\t#\t-H 'Referer: https://elezioni.interno.gov.it/comunali/votanti/20190526/votantiGI08052'\n\t\t\n\t\t# ritorno\n\t\t#yield {'tipo_ente': 'RE', 'cod_ente': cod_com[0:3], 'timestamp': '{timestamp}', 'base_folder': '{base_folder}'}\n\t\tyield {'tipo_ente': 'PR', 'cod_ente': cod_com[3:6], 'timestamp': '{timestamp}', 'base_folder': '{base_folder}'}\n\t\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","sub_path":"europee_italia_20190526/eligendo/DownloaderAffluenza.py","file_name":"DownloaderAffluenza.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"341033880","text":"# <문제> 두 배열의 원소 교체: 문제 설명\n# 동빈이는 두 개의 배열 A와 B를 가지고 있습니다.\n# 두 배열은 N개의 원소로 구성되어 있으며, 배열의 원소는 모두 자연수입니다.\n# 동빈이는 최대 K번의 바꿔치기 연산을 수행할 수 있는데, 바꿔치기 연산이란 배열 A에 있는 원소 하나와\n# 배열 B에 있는 원소 하나를 골라서 두 원소를 서로 바꾸는 것을 말합니다.\n# 동빈이의 최종 목표는 배열 A의 모든 원소의 합이 최대가 되도록 하는 것이며, 여러분은 동빈이를 도와야 합니다.\n# N, K 그리고 배열 A와 B의 정보가 주어졌을 때,\n# 최대 K번의 바꿔치기 연산을 수행하여 만들 수 있는 배열 A의 모든 원소의 합의 최댓값을 출력하는 프로그램을 작성하세요.\n\nn, k = 5, 3\narr_a = [1, 2, 5, 4, 3]\narr_b = [5, 5, 6, 6, 5]\narr_a.sort()\narr_b.sort(reverse=True)\nfor i in range(k):\n if arr_a[i] < arr_b[i]:\n arr_a[i], arr_b[i] = arr_b[i], arr_a[i]\n else:\n break\nprint(sum(arr_a))\n\n# [문제 해결 아이디어]\n# 핵심 아이디어: 매번 배열 A에서 가장 작은 원소를 골라서, 배열 B에서 가장 큰 원소와 교체합니다.\n# 가장 먼저 배열 A와 B가 주어지면 A에 대하여 오름차순 정렬하고, B에 대하여 내림차순 정렬합니다.\n# 이후에 두 배열의 원소를 첫 번째 인덱스부터 차례로 확인하면서 A의 원소가 B의 원소보다 작을 때에만 교체를 수행합니다.\n# 이 문제에서는 두 배열의 원소가 최대 100,000개까지 입력될 수 있으므로,\n# 최악의 경우 O(NlongN)을 보장하는 정렬 알고리즘을 이용해야 합니다.\n\nn, k = 5, 3\na = [1, 2, 5, 4, 3]\nb = [5, 5, 6, 6, 5]\na.sort()\nb.sort(reverse=True)\nfor i in range(k):\n if a[i] < b[i]:\n a[i], b[i] = b[i], a[i]\n else:\n break\nprint(sum(a))","sub_path":"03_정렬/05_문제_원소교체.py","file_name":"05_문제_원소교체.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"255803151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module exposes a single class :class:`ga4gh.dos.client.Client`, which\nexposes the HTTP methods of the Data Object Service as named Python functions.\n\nThis makes it easy to access resources that are described following these\nschemas, and uses bravado to dynamically generate the client functions\nfollowing the OpenAPI schema.\n\nIt currently assumes that the service also hosts the swagger.json, in a style\nsimilar to the demonstration server, :mod:`ga4gh.dos.server`.\n\"\"\"\nfrom bravado.client import SwaggerClient\nfrom bravado.swagger_model import Loader\nfrom bravado.requests_client import RequestsClient\nfrom bravado_core.exception import SwaggerValidationError\nfrom bravado_core.formatter import SwaggerFormat\n\nDEFAULT_CONFIG = {\n 'validate_requests': True,\n 'validate_responses': True\n}\n\n\ndef validate_int64(test):\n \"\"\"\n Accepts an int64 and checks for numerality. Throws a Swagger Validation\n exception when failing the test.\n\n :param test:\n :return:\n :raises SwaggerValidationError:\n \"\"\"\n if str(test) != test:\n raise SwaggerValidationError('int64 are serialized as strings')\n\n\n# This is to support serializing int64 as strings on the wire. JavaScript\n# only supports up to 2^53.\nint64_format = SwaggerFormat(\n format='int64',\n to_wire=lambda i: str(i),\n to_python=lambda i: int(i),\n validate=validate_int64, # jsonschema validates integer\n description=\"Converts [wire]str:int64 <=> python long\"\n)\n\n\nclass Client:\n \"\"\"\n This class is the instantiated to create a new connection to a DOS. It\n connects to the service to download the swagger.json and returns a client\n in the DataObjectService namespace.\n\n ::\n\n from ga4gh.dos.client import Client\n client = Client(\"http://localhost:8000/ga4gh/dos/v1\")\n\n models = client.models\n c = client.client\n\n # Will return a Data Object by identifier\n c.GetDataObject(data_object_id=\"abc\").result()\n\n # To access models in the Data Object Service namespace:\n ListDataObjectRequest = models.get_model('ListDataObjectsRequest')\n\n # And then instantiate a request with our own query:\n my_request = ListDataObjectsRequest(alias=\"doi:10.0.1.1/1234\")\n\n # Finally, send the request to the service and evaluate the response.\n c.ListDataObjects(body=my_request).result()\n\n\n The class accepts a configuration dictionary that maps directly to the\n bravado configuration.\n\n For more information on configuring the client, see\n `bravado documentation\n `_.\n \"\"\"\n def __init__(self, url, config=DEFAULT_CONFIG, http_client=None, request_headers=None):\n swagger_path = '{}/swagger.json'.format(url.rstrip(\"/\"))\n config['formats'] = [int64_format]\n self._config = config\n self.models = SwaggerClient.from_url(swagger_path,\n config=config,\n http_client=http_client,\n request_headers=request_headers)\n self.client = self.models.DataObjectService\n\n @classmethod\n def config(cls, url, http_client=None, request_headers=None):\n \"\"\"\n Accepts an optionally configured requests client with authentication\n details set.\n\n :param url: The URL of the service to connect to\n :param http_client: The http_client to use, \\\n defaults to :func:`RequestsClient`\n :param request_headers: The headers to set on each request.\n :return:\n \"\"\"\n swagger_path = '{}/swagger.json'.format(url.rstrip('/'))\n http_client = http_client or RequestsClient()\n loader = Loader(http_client, request_headers=request_headers)\n spec_dict = loader.load_spec(swagger_path)\n return spec_dict\n\n\ndef main():\n print('client')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/ga4gh/dos/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501963369","text":"import sys\ndef absmaj(parties):\n senNum = sum(parties)\n for part in parties:\n if part > senNum/2:\n return True\n return False\n\ndef argmax(t, exc=None):\n m = 0\n for i in range(len(t)):\n if t[i] >= t[m] and (exc == None or exc != i):\n m = i\n return m\n\ndef partName(x):\n return chr(x+ord('A'))\n \ndef solve():\n nbPart = int(input())\n parties = [ int(x) for x in input().strip().split(' ') ]\n \n plan = []\n while sum(parties) > 0:\n ev1 = argmax(parties)\n ev2 = argmax(parties, ev1)\n parties[ev1] -= 1\n parties[ev2] -= 1\n if absmaj(parties):\n parties[ev2] += 1\n if absmaj(parties):\n print(\"ERROR: strategy not working.\")\n sys.exit(1)\n else:\n plan.append(partName(ev1))\n else:\n plan.append(partName(ev1)+partName(ev2))\n \n out = ''\n for x in plan:\n out += x+' '\n return out[:-1]\n\nT = int(input())\nfor c in range(T):\n print(\"Case #{}: {}\".format(c+1,solve()))\n","sub_path":"solutions_5753053697277952_1/Python/Tobast/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"482486707","text":"from data.VehicleData import VEHICLE_DATA_VERSION, VEHICLE_DATA_SERIES\nfrom data.VehicleEntity import VEHICLE_VERSION_MAP_KEY, VEHICLE_VERSION_MAP_VALUE, VEHICLE_SERIES_MAP_NAME, VEHICLE_SERIES_MAP_VALUE\nfrom data.VehicleTreeSql import VEHICLE_SQL\nfrom data.VehicleTree import VEHICLE_TREE\n\nfrom data.VehicleLibrary import getKey\n\n\ndef writeFile(path, content):\n with open(path, 'w+', encoding='utf-8') as f:\n f.write(content)\n f.close()\n return 'DONE';\n\n\npath = \"./import-vehicle.sql\";\ncontent = VEHICLE_SQL;\nwriteFile(path, content);\n\n# 官网数据\n###############################################################################\nprint(\"官网数据:\".format())\nprint(\"\\t车系总共{!s}个\".format(len(VEHICLE_DATA_SERIES)))\nprint(\"\\t车系版本总共{!s}个\".format(len(VEHICLE_DATA_VERSION)))\n\n# 导入数据\n###############################################################################\ndata_import = {\n \"total_series\": 0,\n \"total_version\": 0,\n \"total_attribute\": 0,\n \"missing_series_output\": 0,\n \"missing_series_input\": 0,\n}\n\n\ndef line():\n print(\"------------------------------------------------------\")\n\n\ndef analyse(tree):\n line()\n # total\n for skey in tree.keys():\n data_import[\"total_series\"] += 1;\n for vkey in tree[skey][\"versions\"].keys():\n data_import[\"total_version\"] += 1;\n for pkey in tree[skey][\"versions\"][vkey][\"properties\"].keys():\n data_import[\"total_attribute\"] += 1;\n\n # 官网车系文件中,多出来的数据\n for series in VEHICLE_DATA_SERIES:\n if not findCode(tree, 1, series[\"car_code\"]):\n print(\"\\t缺失配置数据的车系: {!s} {!s}\".format(series[\"name\"], series[\"value\"]))\n\n # 官网版本文件中,多出来的数据\n for version in VEHICLE_DATA_VERSION:\n if not findCode(tree, 2, version[\"value\"]):\n print(\"\\t缺失配置数据的版本: {!s} {!s}\".format(version[\"name\"], version[\"value\"]))\n\n # 找不到车系的版本\n for version in VEHICLE_DATA_VERSION:\n if version['parent'] not in VEHICLE_SERIES_MAP_VALUE.keys():\n print(\"\\t没有车系的版本: {!s} {!s}\".format(version[\"name\"], version[\"value\"]))\n\n line()\n return;\n\n\ndef findName(tree, type, value):\n for skey in tree.keys():\n if type == 1 and getKey(value) == getKey(skey): return True;\n for vkey in tree[skey][\"versions\"].keys():\n if type == 2 and getKey(value) == getKey(vkey): return True;\n for pkey in tree[skey][\"versions\"][vkey][\"properties\"].keys():\n if type == 3 and getKey(value) == getKey(pkey): return True;\n\n return False;\n\n\ndef findCode(tree, type, value):\n for skey in tree.keys():\n so = tree[skey][\"entity\"]\n if type == 1 and getKey(value) == getKey(so['code']): return True;\n for vkey in tree[skey][\"versions\"].keys():\n vo = tree[skey][\"versions\"][vkey][\"entity\"]\n if type == 2 and getKey(value) == getKey(vo['version_code']): return True;\n\n return False;\n\n\nanalyse(VEHICLE_TREE);\n\nprint(\"需要导入的数据:\".format())\nprint(\"\\t车品牌总共1个\".format())\nprint(\"\\t车系总共{!s}个\".format(data_import[\"total_series\"]))\nprint(\"\\t车系版本总共{!s}个\".format(data_import[\"total_version\"]))\nprint(\"\\t车系版本属性总共{!s}个\".format(data_import[\"total_attribute\"]))\n","sub_path":"data/Vehicle.py","file_name":"Vehicle.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"508423908","text":"from riskGame.classes.heuristic.heuristic import Heuristic\n\n\nclass PlaceBonusAI(Heuristic):\n\n def make_decision(self, states, move):\n\n sorted_states = []\n\n for state in states:\n bonus_node = state.get_bonus_node\n val = bonus_node.max_loss_attack()\n sorted_states.append((bonus_node, val))\n\n sorted(sorted_states, key=lambda x: x[1])\n move.set_bonus_hold_node(sorted_states[0][0])\n return move\n","sub_path":"riskGame/classes/heuristic/bonus/place_bonus_ai.py","file_name":"place_bonus_ai.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"213378123","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn import datasets, svm \r\nfrom sklearn.model_selection import train_test_split\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import matthews_corrcoef\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn import preprocessing\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.exceptions import DataConversionWarning\r\n\r\n\r\n\r\ndf=pd.read_csv(r\"C:\\Users\\kamal\\Desktop\\DA\\train_upd.csv\")\r\nprint(\"data readed\")\r\n#df=df.iloc[1:10000,:]\r\ndef enc(k):\r\n\r\n if k=='4G_RAN_CONGESTION':\r\n return 1\r\n\r\n elif k=='4G_BACKHAUL_CONGESTION':\r\n return 2\r\n\r\n elif k=='3G_BACKHAUL_CONGESTION':\r\n return 3\r\n\r\n else :\r\n return 0\r\ndf['Congestion_Type'] = df['Congestion_Type'].apply(enc)\r\n\r\ny=df['Congestion_Type']\r\n\r\n#df.drop(['Congestion_Type'],axis=1)\r\nX=df.drop(['cell_name','Congestion_Type','par_year','par_month'],axis=1)\r\n#X=df.drop([],axis=1)\r\nprint(type(X))\r\ndummies=pd.get_dummies(X['ran_vendor'],prefix='ran_vendor')\r\nX=pd.concat([X,dummies],axis=1)\r\nX.drop(['ran_vendor'],axis=1,inplace=True)\r\n\r\n#print(X.head())\r\n#print(y.head())\r\n\r\n\r\n\r\nscaler = StandardScaler()\r\nscaler.fit(X)\r\nX=scaler.transform(X)\r\n\r\n#print(X.head())\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\r\n\r\npca=PCA(n_components=10)\r\npca.fit(X_train)\r\nX_train=pca.transform(X_train)\r\nX_test=pca.transform(X_test)\r\nprint(X_train.shape)\r\nvariance=pca.explained_variance_ratio_\r\nprint(\"retained data = \"+str(sum(variance*100)))\r\ndef mat(model):\r\n y_pred = model.predict(X_test)\r\n test=pd.Series.tolist(y_test)\r\n pred=np.ndarray.tolist(y_pred)\r\n #print(test)\r\n p=matthews_corrcoef(test,pred)\r\n return p\r\n\r\n\r\n\r\n \r\n","sub_path":"svm2.py","file_name":"svm2.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"253433122","text":"# coding=UTF-8\nfrom transformers import Wav2Vec2CTCTokenizer\nfrom transformers import Wav2Vec2FeatureExtractor\nfrom transformers import Wav2Vec2Processor\nimport torchaudio\nimport os\nimport librosa\nimport numpy as np\nfrom datasets.arrow_dataset import Dataset\n\nclass Audio_Processor():\n def __init__(self, vocab_path=\"./vocab.json\", original_sample_rate = 48000, target_sample_rate = 16000, processor_save_path = None):\n\n if processor_save_path is None:\n self.tokenizer = Wav2Vec2CTCTokenizer(vocab_path, unk_token=\"[UNK]\", pad_token=\"[PAD]\",\n word_delimiter_token=\"|\")\n self.feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0,\n do_normalize=True, return_attention_mask=True)\n self.processor = Wav2Vec2Processor(feature_extractor=self.feature_extractor, tokenizer=self.tokenizer)\n\n else:\n self.load_processor(processor_save_path)\n\n self.original_sample_rate = original_sample_rate\n self.target_sample_rate = target_sample_rate\n def process_audio(self, datasets,path = None):\n def speech_file_to_array_fn(batch):\n speech_array, sampling_rate = torchaudio.load(batch[\"path\"])\n batch[\"speech\"] = speech_array[0].numpy()\n batch[\"sampling_rate\"] = sampling_rate\n batch[\"target_text\"] = batch[\"sentence\"]\n return batch\n\n def resample(batch):\n batch[\"speech\"] = librosa.resample(np.asarray(batch[\"speech\"]), 48_000, 16_000)\n batch[\"sampling_rate\"] = 16_000\n return batch\n def prepare_dataset(batch):\n # check that all files have the correct sampling rate\n assert (\n len(set(batch[\"sampling_rate\"])) == 1\n ), f\"Make sure all inputs have the same sampling rate of {self.processor.feature_extractor.sampling_rate}.\"\n\n batch[\"input_values\"] = self.processor(batch[\"speech\"], sampling_rate=batch[\"sampling_rate\"][0]).input_values\n\n with self.processor.as_target_processor():\n batch[\"labels\"] = self.processor(batch[\"target_text\"]).input_ids\n return batch\n def save_audio_datasets(datasets,path):\n datasets.save_to_disk(path)\n datasets = Dataset.from_dict(datasets[12:14]).map(speech_file_to_array_fn, remove_columns=datasets.column_names)\n datasets = datasets.map(resample, num_proc=4)\n datasets = datasets.map(prepare_dataset, remove_columns = datasets.column_names, batch_size=8, num_proc=4, batched=True)\n if path is not None:\n save_audio_datasets(datasets,path)\n\n return datasets\n\n def save_processor(self,processor_save_dir):\n self.processor.save_pretrained(processor_save_dir)\n\n def load_processor(self,processor_save_dir):\n self.processor = Wav2Vec2Processor.from_pretrained(processor_save_dir)\n self.tokenizer = self.processor.tokenizer\n self.feature_extractor = self.processor.feature_extractor","sub_path":"src/Audio_Processor.py","file_name":"Audio_Processor.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"181339218","text":"import os\nimport sys\nfrom datetime import datetime\nimport logging\n\nfrom flask import Flask, g, current_app\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager, current_user\nfrom flask_bootstrap import Bootstrap\nfrom flask_mail import Mail\nfrom flask_moment import Moment\nimport pandas as pd\n\nfrom .oauth import OAuthSignIn\nfrom .config import config\n\n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\napp = Flask(__name__)\napp.config.from_object(config[os.getenv('FLASK_ENV') or 'default'])\n\ndb = SQLAlchemy(app)\n\nbootstrap = Bootstrap()\nbootstrap.init_app(app)\n\nlm = LoginManager(app)\nlm.login_view = 'index'\n\nmail = Mail()\nmail.init_app(app)\n\nmoment = Moment(app)\n\nMEMBERS_DICT = {}\nTABLE_DICT = {}\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nDATA_DIR = os.path.join(basedir, 'data')\n\n\n@app.before_request\ndef before_request():\n update_g()\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n # first-request data reload might be necessary for auto-restart development\n current_user.in_cgem = current_user.social_id in g.members_dict\n db.session.commit()\n\n\ndef update_g():\n g.members_dict = MEMBERS_DICT\n g.cal = TABLE_DICT['cal']\n g.recent_docs = TABLE_DICT['recent_docs']\n g.statuses = TABLE_DICT['statuses']\n # g.review = TABLE_DICT['review']\n\n\ndef update_members_dict():\n \"\"\"Update members dictionary\"\"\"\n global MEMBERS_DICT\n MEMBERS_DICT.clear()\n with app.app_context():\n from .admin import get_members_dict\n new_dict = get_members_dict()\n MEMBERS_DICT.update(new_dict)\n\n\ndef update_table_dict():\n global TABLE_DICT\n with app.app_context():\n from .admin import Calendar, RecentDocs, StatusTable, ReviewTable\n TABLE_DICT.clear()\n TABLE_DICT.update({\n 'cal': Calendar(),\n 'recent_docs': RecentDocs(),\n 'statuses': StatusTable(db.engine),\n })\n\n\ndef _load_name_dict():\n \"\"\"Load email: name dictionary from local file.\"\"\"\n email_names = pd.read_table(os.path.join(DATA_DIR, 'email_names.tsv'), sep='\\t',\n header=None, names=['email', 'full_name'])\n name_dict = email_names.set_index('email')['full_name'].to_dict()\n return name_dict\n\n\ndef update_name_dict():\n global name_dict\n name_dict.clear()\n new_dict = _load_name_dict()\n name_dict.update(new_dict)\n\n\nname_dict = _load_name_dict()\nupdate_members_dict()\nupdate_table_dict()\n\nfrom app import models\nfrom app import routes\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"612350112","text":"import string\n\n\ndef count_words(text):\n d = {}\n texts = text.split()\n for s in texts:\n try:\n d[s] += 1.\n except KeyError:\n d[s] = 1.\n return d\n\n\ndef remove_punctuation(text):\n text = str(text)\n\n ascii_index = list(map(lambda x: ord(x), string.punctuation))\n trans_list = {}\n for char in ascii_index:\n trans_list.setdefault(char)\n\n return text.translate(trans_list)\n\n\ndef gen_word_vector(text_data):\n pass\n\n\ndef dickeys2set(dic, t_set):\n keys = list(dic.keys())\n for key in keys:\n t_set.add(key)\n return t_set\n\n\ndef set2dic(t_set):\n dic = {}\n for item in t_set:\n dic[item] = 0.\n return dic\n\n\n\n","sub_path":"text_analytics.py","file_name":"text_analytics.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"460978833","text":"#!/usr/bin/env python\n\"\"\"Implements steps #3, #4 and #5 from https://getpocket.com/developer/docs/authentication\"\"\"\nfrom os import environ as env\nimport webbrowser\n\nimport requests\n\n\ndef authorize_pocket_app():\n resp = requests.post(\n url=\"https://getpocket.com/v3/oauth/request\",\n data={\n \"consumer_key\": env['POCKET_CONSUMER_KEY'],\n \"redirect_uri\": env['POCKET_APP_NAME']\n })\n\n code = resp.text.split(\"=\")[1]\n webbrowser.open(f\"https://getpocket.com/auth/authorize?request_token={code}\"\n \"&redirect_uri=https://duckduckgo.com\")\n input(\"Authorize %s app in the browser, then click enter\" % env['POCKET_APP_NAME'])\n get_token(code)\n\n\ndef get_token(code):\n resp = requests.post(\n url=\"https://getpocket.com/v3/oauth/authorize\",\n data={\n \"consumer_key\": env[\"POCKET_CONSUMER_KEY\"],\n \"code\": code,\n })\n\n token = resp.text.split(\"&\")[0].split(\"=\")[1]\n print(\"Secret token:\", token)\n\n\nif __name__ == \"__main__\":\n authorize_pocket_app()\n","sub_path":"lib/stacks/pocket_to_kindle/authorize_app.py","file_name":"authorize_app.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"368050106","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\na = np.loadtxt(\"entropy.log\")\nprint(a.shape)\n\nplt.figure()\nplt.title(\"entropy\")\nplt.plot(np.arange(a.shape[0]), a)\nplt.show()\n\n","sub_path":"code/ConnectX/javascript_combined_game__CONNECTX/Model/PolicyValueNet_from_junxiaosong/10_6/6-25-20.4101975129837334/ploting.py","file_name":"ploting.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"603557683","text":"import time\nimport random\nclass HC:\n def __init__(self, dimension):\n self.dimension = dimension\n self.initialState = State([0] * dimension)\n self.currentState = self.initialState\n self.count = 0\n\n def reset(self):\n self.currentState = self.initialState \n\n def random_start(self):\n while True:\n self.initialState.board = [random.randint(0, self.dimension - 1) for i in range(0,self.dimension)]\n result = self.start()\n if result.cost == 0:\n return result\n else:\n self.reset()\n\n def start(self):\n while True:\n if self.currentState.cost == 0:\n return self.currentState\n\n nextState = self.currentState\n for i in range(0, self.dimension):\n for j in range(0, self.dimension):\n if self.currentState.board[i] != j:\n board = self.currentState.board.copy()\n board[i] = j\n neigbour = State(board)\n if nextState.cost > neigbour.cost:\n nextState = neigbour\n\n if nextState.cost < self.currentState.cost:\n self.count += 1\n nextState.print()\n self.currentState = nextState\n else:\n return self.currentState\n\nclass State:\n def __init__(self, board):\n self.board = board # board[x] = y if the queen on row x is on column y\n self.dimension = len(self.board)\n self.cost = self.computeCost()\n\n def computeCost(self):\n cost = 0\n for i in range(0, self.dimension):\n for j in range(i+1, self.dimension):\n if self.isThreatened(i, self.board[i], j, self.board[j]):\n cost += 1\n return cost\n\n @staticmethod\n def isThreatened(r1, c1, r2, c2):\n return r1 == r2 or c1 == c2 or abs(r1-r2) == abs(c1-c2)\n\n def print(self):\n print(\"Board: [\", end=\" \")\n for i in self.board:\n print(i, end=\" \")\n print(\"] , Cost: \" + str(self.cost))\n\n\nsolver = HC(20)\nstart = time.time()\ngoal = solver.random_start()\nend = time.time()\nprint(solver.count)\nprint(end-start)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"140822154","text":"import discord\r\nimport os\r\n\r\nclient = discord.Client()\r\n\r\nonline_players = []\r\nchannel_exists = True\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"BOT IS ONLINE\")\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n\r\n if message.content == \".msb\":\r\n\r\n await message.channel.send(\r\n \"```Available commands:\\n.msb -> Commands List\\n.on -> Set yourself as connected in the minecraft server\\n.off -> Set yourself as disconnected in the minecraft server\\n.players -> Check who is currently connected to the server\\n.start -> Gives you the link to the aternos Control Panel\\n.clear -> Clear all recent messages```\")\r\n\r\n elif message.content == \".on\":\r\n\r\n if message.author.name not in online_players:\r\n online_players.append(message.author.name)\r\n await message.channel.send(\r\n \"```{} connected!\\n{} connected players.```\".format(message.author.name, len(online_players)))\r\n\r\n else:\r\n await message.author.send(\"You are connected already!\")\r\n\r\n elif message.content == \".off\":\r\n if message.author.name in online_players:\r\n online_players.remove(message.author.name)\r\n await message.channel.send(\r\n \"```{} disconnected!\\n{} connected players.```\".format(message.author.name, len(online_players)))\r\n else:\r\n await message.author.send(\"You are disconnected already!\")\r\n\r\n elif message.content == \".players\":\r\n await message.channel.send(\"```Connected Players:```\")\r\n for i in range(len(online_players)):\r\n await message.channel.send(\"``` - {}```\".format(online_players[i]))\r\n\r\n elif message.content == \".start\":\r\n await message.author.send(\"If u don't know the credentials i'm not telling u :))\\nhttps://aternos.org/go/\")\r\n await message.channel.send(\"```Sent to you on private...```\")\r\n elif message.content == \".clear\":\r\n with message.channel.typing():\r\n await message.delete()\r\n await message.channel.purge(limit=100)\r\n\r\n await message.delete()\r\n\r\n\r\nclient.run(os.getenv('TOKEN'))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"185414918","text":"from django.shortcuts import render\n\n# Create your views here.\n\n# 定义视图\nfrom django.http import HttpResponse\n\ndef index(request):\n context = {'title':'测试模板'}\n\n # return HttpResponse('cuitingting')\n return render(request,'book/index.html',context)\n\nfrom book.models import PeopleInfo,BookInfo\n\n# 1.增加数据\n# 方法一:\n# 通过实例对象\nbook=BookInfo()\nbook.name='django3'\nbook.readcount=100\nbook.commentcount=200\nbook.pub_date='2020-12-29'\nbook.save()\n# 方法二:\n# 通过create方法增加\nnew_book=BookInfo.objects.create(\n name='python03',\n readcount=200,\n commentcount=300,\n pub_date='2020-12-29'\n)\n\n# 2.更新数据\n# 方法一:通过实例对象\nbook=BookInfo.objects.get(id=14)\nbook.name='django14'\nbook.save()\n\n# 方法二:\n# 直接更新用update方法\nBookInfo.objects.filter(id=14).update(\n name='django014'\n)\n\n# 3.删除数据\n# 方法一:通过实例对象\nbook=BookInfo.objects.filter(id=13)\nbook.delete()\n\n# 方法二:\n# filter和get获取id\nBookInfo.objects.filter(id=12).delete()\n\n# 4.查询操作\n# 基础查询\nBookInfo.objects.get(id=2)\n\nBookInfo.objects.filter(id=2)\n\nBookInfo.objects.all()\n\nBookInfo.objects.count()\n\n# 过滤查询\nBookInfo.objects.filter(name__contains='湖')\n\nBookInfo.objects.filter(name__endswith='部')\n\nBookInfo.objects.filter(name__isnull=True)\n\nBookInfo.objects.filter(id__in=[1,2])\n\nBookInfo.objects.filter(id__gt=5)\n\nBookInfo.objects.exclude(id=3)\n\nfrom django.db.models import F, Q\n\n# F对象\nBookInfo.objects.filter(readcount__gte=F('commentcount'))\n\n# Q对象\n\nBookInfo.objects.filter(id__gt=2).filter(readcount__gt=20)\nBookInfo.objects.filter(id__gt=2,readcount__gt=20)\n\n# 与\nBookInfo.objects.filter(Q(id__gt=2)&Q(readcount__gte=20))\n# 或\nBookInfo.objects.filter(Q(id__gt=2)|Q(readcount__gte=20))\n# 非\nBookInfo.objects.filter(~Q(id=2))\n\n# 聚合函数 min max count avg sum\nfrom django.db.models import Sum\nBookInfo.objects.aggregate(Sum('readcount'))\n\n# 排序\nBookInfo.objects.all().order_by('readcount')\nBookInfo.objects.order_by('-readcount')\n\n# 关联查询\n# 由一到多\nbook=BookInfo.objects.get(id=3)\nbook.peopleinfo_set.all()\n\n# 由多到一\nperson=PeopleInfo.objects.get(id=6)\nprint(person)\nperson.book\n\n# 关联过滤查询\n# 由多查一\nBookInfo.objects.filter(peopleinfo__description__contains='八')\n\nBookInfo.objects.filter(peopleinfo__name='郭靖')\n\n# 由一查多\nPeopleInfo.objects.filter(book__name='天龙八部')\n\nPeopleInfo.objects.filter(book__readcount__gt=30)\n\n# 查询集QuerySet\nbook=BookInfo.objects.all()\nbook\n\n# 限制查询集\nPeopleInfo.objects.all()[3]\n\nPeopleInfo.objects.all()[:4]\n\n# 分页\nfrom django.core.paginator import Paginator\npeople=PeopleInfo.objects.all()\npaginator=Paginator(object_list=people,per_page=2)\npersons=paginator.page(1)\npaginator.num_pages\n\n\ndef book(request,cat_id,detail_id):\n # GET请求-url路径参数http://127.0.0.1/1/100/\n # print(cat_id,detail_id)\n # GET请求 QueryString http://ip:port/?key=value&key2=value2\n # 一键一值\n query_string=request.GET\n # a=query_string.get('a')\n # b=query_string.get('b')\n # print(a,b)\n # 一键多值\n alist=query_string.getlist('a')\n b=query_string.get('b')\n print(alist,b)\n return HttpResponse('看书')\n\n# POST表单请求\ndef login(request):\n body=request.POST\n print(body)\n return HttpResponse('login')\n\n\n# POST非表单请求\ndef weibo(request):\n body=request.body\n body_str=body.decode()\n import json\n data=json.loads(body_str)\n print(data)\n return HttpResponse('weibo json')\n\n# 自定义转换器\ndef site_register(request,mobile):\n return HttpResponse('OK')\n\n# JsonResponse\nfrom django.http.response import JsonResponse\ndef res_json(request):\n data={\n 'name':'itcast',\n 'age':15\n }\n return JsonResponse(data)\n data不管是不是字典数据,我们自己改的safe自己负责,不管data是不是字典都可以\n # return JsonResponse(data,safe=False)\n\n\n# cookie保持在客户端\ndef set_cookie(request):\n name=request.GET.get('name')\n response=HttpResponse('set_cookie')\n response.set_cookie(key='name',value=name)\n return response\n\ndef get_cookie(request):\n cookie=request.COOKIES\n name=cookie.get('name')\n return HttpResponse('get_cookie')\n\n# session保存在服务器,依赖于cookie\ndef set_session(request):\n request.session['name']='abc'\n return HttpResponse('set_session')\n\ndef get_session(request):\n name=request.session('name')\n print(name)\n return HttpResponse('get_session')\n\nfrom django.views import View\nclass JDLogin(View):\n def get(self,request):\n return HttpResponse('jd - login - get')\n def post(self,request):\n # self.abc(request)\n return HttpResponse('jd - login - post')\n # def abc(self,request):\n # return HttpResponse('abc')\n\n\"\"\"\nCenterView.__mro__\nMRO的顺序\n\"\"\"\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n# class CenterView(View,LoginRequiredMixin):\nclass CenterView(LoginRequiredMixin,View):\n def get(self,request):\n return HttpResponse('center get')\n def post(self,request):\n return HttpResponse('Center post')\n\n\n\n","sub_path":"bookmanager5/book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"329529115","text":"from Repository.database import cnxn\nfrom Model.Report import Report\nfrom Util.AbrirArquivos import AbrirArquivos\n\ndef obterListaDeContratos():\n \n cursor = cnxn.cursor()\n\n query = AbrirArquivos.obterQuery(\"listacontratos\")\n\n cursor.execute(query)\n row = cursor.fetchone() \n\n num_fields = len(cursor.description)\n field_names = [i[0] for i in cursor.description] \n\n print(field_names)\n\n classeNova = Report()\n listaReports = []\n\n while row:\n \n classeNova = Report()\n\n count = 0 \n\n for j in row:\n setattr(classeNova, field_names[count] , str(j))\n count += 1\n \n listaReports.append(classeNova)\n\n row = cursor.fetchone()\n\n return listaReports","sub_path":"ContratoDAO.py","file_name":"ContratoDAO.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"370182737","text":"# -*- coding: utf-8 -*-\n\nfrom dateutil.relativedelta import relativedelta\nimport logging\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.exceptions import UserError\nfrom odoo.addons import decimal_precision as dp\n\n_logger = logging.getLogger(__name__)\n\nSPLIT_METHOD = [\n ('equal', 'Equal'),\n ('by_quantity', 'By Quantity'),\n ('by_current_cost_price', 'By Current Cost'),\n ('by_weight', 'By Weight'),\n ('by_volume', 'By Volume'),\n]\n\n\nclass AccountTax(models.Model):\n _inherit = 'account.tax'\n\n discount_account_id = fields.Many2one('account.account', domain=[('deprecated', '=', False)], string='Discount Account', ondelete='restrict')\n as400_tax_key = fields.Char(string=\"AS400 Tax Key\")\n\n def get_grouping_key_aok(self, invoice_tax_val, group_by='group_by_tax'):\n \"\"\" Returns a string that will be used to group account.invoice.tax sharing the same properties\"\"\"\n self.ensure_one()\n if group_by == 'group_by_account':\n return str(invoice_tax_val['line_account_id']) + '-' + str(invoice_tax_val['tax_id']) + '-' + str(invoice_tax_val['account_id']) + '-' + str(invoice_tax_val['account_analytic_id'])\n elif group_by == 'group_by_tax':\n return str(invoice_tax_val['tax_id']) + '-' + str(invoice_tax_val['account_id']) + '-' + str(invoice_tax_val['account_analytic_id'])\n\n\nclass AccountPaymentTerm(models.Model):\n _inherit = \"account.payment.term\"\n\n one_due_amount = fields.Boolean('Create one due amount', default=True)\n as400_payment_term_code = fields.Char(\"AS400 Payment Term Code\")\n\n @api.one\n def compute(self, value, date_ref=False):\n date_ref = date_ref or fields.Date.today()\n amount = value\n sign = value < 0 and -1 or 1\n result = []\n if self.env.context.get('currency_id'):\n currency = self.env['res.currency'].browse(self.env.context['currency_id'])\n else:\n currency = self.env.user.company_id.currency_id\n for line in self.line_ids:\n if line.value == 'fixed':\n amt = sign * currency.round(line.value_amount)\n elif line.value == 'percent':\n amt = currency.round(value * (line.value_amount / 100.0))\n elif line.value == 'balance':\n amt = currency.round(amount)\n if amt:\n next_date = fields.Date.from_string(date_ref)\n if line.option == 'day_after_invoice_date':\n next_date += relativedelta(days=line.days)\n elif line.option == 'fix_day_following_month':\n next_first_date = next_date + relativedelta(day=1, months=1) # Getting 1st of next month\n next_date = next_first_date + relativedelta(days=line.days - 1)\n elif line.option == 'last_day_following_month':\n next_date += relativedelta(day=31, months=1) # Getting last day of next month\n elif line.option == 'last_day_current_month':\n next_date += relativedelta(day=31, months=0) # Getting last day of next month\n result.append((fields.Date.to_string(next_date), amt))\n amount -= amt\n amount = sum(amt for _, amt in result)\n dist = currency.round(value - amount)\n if dist:\n last_date = result and result[-1][0] or fields.Date.today()\n result.append((last_date, dist))\n if self.one_due_amount:\n return [(result[-1][0], value)]\n return result\n\n @api.one\n def compute_payment_term_date(self, value, date_ref=False):\n date_ref = date_ref or fields.Date.today()\n amount = value\n sign = value < 0 and -1 or 1\n result = []\n if self.env.context.get('currency_id'):\n currency = self.env['res.currency'].browse(self.env.context['currency_id'])\n else:\n currency = self.env.user.company_id.currency_id\n for line in self.line_ids.sorted():\n if line.value == 'fixed':\n amt = sign * currency.round(line.value_amount)\n elif line.value == 'percent':\n amt = currency.round(value * (line.value_amount / 100.0))\n elif line.value == 'balance':\n amt = currency.round(amount)\n if amt:\n next_date = fields.Date.from_string(date_ref)\n if line.option == 'day_after_invoice_date':\n next_date += relativedelta(days=line.days)\n elif line.option == 'fix_day_following_month':\n next_first_date = next_date + relativedelta(day=1, months=1) # Getting 1st of next month\n next_date = next_first_date + relativedelta(days=line.days - 1)\n elif line.option == 'last_day_following_month':\n next_date += relativedelta(day=31, months=1) # Getting last day of next month\n elif line.option == 'last_day_current_month':\n next_date += relativedelta(day=31, months=0) # Getting last day of next month\n result.append((fields.Date.to_string(next_date), amt, line))\n amount -= amt\n amount = sum(amt for _, amt, _ in result)\n dist = currency.round(value - amount)\n if dist:\n last_date = result and result[-1][0] or fields.Date.today()\n result.append((last_date, dist, line))\n# if self.one_due_amount:\n# return [(result[-1][0], value, line)]\n return result\n\n\nclass AccountPaymentMode(models.Model):\n _inherit = \"account.payment.mode\"\n\n consider_payment_discount = fields.Boolean(\"Consider Payment Discount\", default=True)\n as400_payment_mode = fields.Char(\"AS400 Payment Mode\")\n\n\nclass AccountPaymentLine(models.Model):\n _inherit = 'account.payment.line'\n\n def _compute_all(self):\n for line in self:\n sum = 0.0\n dates = []\n for discount in line.payment_line_discount_ids:\n sum += discount.payment_discount\n dates.append(discount.discount_due_date)\n line.payment_discount = sum\n line.discount_due_date = min(dates) if dates else False\n line.discounted_amount = line.amount_currency - sum\n\n payment_line_discount_ids = fields.One2many('account.payment.line.discount', 'payment_line_id', string=\"Payment Order Line Discount\")\n payment_discount = fields.Monetary(compute=\"_compute_all\", string=\"Payment Discount\", currency_field='currency_id')\n deduct_discount = fields.Boolean(\"Deduct Discount\")\n discount_due_date = fields.Date(compute=\"_compute_all\", string=\"Discount Due Date\")\n discounted_amount = fields.Monetary(compute=\"_compute_all\", string=\"Discounted Amount\", currency_field='currency_id')\n\n\nclass AccountPaymentLineDiscount(models.Model):\n _name = 'account.payment.line.discount'\n _description = 'Payment Line Discount'\n\n def _compute_all(self):\n for record in self:\n invoice = record.payment_line_id.move_line_id.invoice_id\n record.invoice_amount = record.invoice_tax_id.base + record.invoice_tax_id.amount_total\n\n date_invoice = invoice.date_invoice\n if not date_invoice:\n date_invoice = fields.Date.context_today(self)\n\n pterm = invoice.payment_term_id\n pterm_list = pterm.with_context(currency_id=invoice.company_id.currency_id.id).compute_payment_term_date(value=record.invoice_amount, date_ref=date_invoice)[0]\n discount_date = payment_discount = False\n payment_discount_amount = 0.0\n for line in pterm_list:\n if line[2].value == 'percent' and fields.Date.from_string(line[0]) >= fields.Date.from_string(fields.Date.context_today(self)):\n discount_date = line[0]\n payment_discount = line[2].value_amount\n payment_discount_amount = line[1]\n break\n record.discount_due_date = discount_date\n record.payment_discount_perc = payment_discount\n record.payment_discount = payment_discount_amount\n\n payment_line_id = fields.Many2one(\"account.payment.line\", string=\"Payment Line\")\n currency_id = fields.Many2one(\n 'res.currency', string='Currency of the Payment Transaction',\n required=True,\n default=lambda self: self.env.user.company_id.currency_id)\n invoice_amount = fields.Monetary(compute=\"_compute_all\", string=\"Invoice Amount\", currency_field='currency_id')\n discount_due_date = fields.Date(compute=\"_compute_all\", string=\"Discount Due Date\")\n payment_discount_perc = fields.Float(compute=\"_compute_all\", string=\"Payment Discount %\")\n payment_discount = fields.Monetary(compute=\"_compute_all\", string=\"Payment Discount\", currency_field='currency_id')\n tax_id = fields.Many2one(\"account.tax\", string=\"Tax\")\n account_id = fields.Many2one(\"account.account\", string=\"Account\")\n invoice_tax_id = fields.Many2one('account.invoice.tax.aok', string=\"Account Invoice Tax\")\n\n\nclass AccountPaymentOrder(models.Model):\n _inherit = 'account.payment.order'\n\n consider_payment_discount = fields.Boolean(related=\"payment_mode_id.consider_payment_discount\", string=\"Consider Payment Discount\")\n\n @api.multi\n def draft2open(self):\n AccountPaymentLineDiscount = self.env['account.payment.line.discount']\n AccountInvoiceTax = self.env['account.invoice.tax.aok']\n for order in self:\n for line in order.payment_line_ids:\n if line.move_line_id:\n invoice = line.move_line_id.invoice_id\n # Unlink the existing invoice tax records.\n account_invoice_tax = AccountInvoiceTax.search([('invoice_id', '=', invoice.id)])\n account_invoice_tax.unlink()\n lines_by_account = invoice.compute_taxes_aok('group_by_account')\n for tax_line in lines_by_account:\n AccountPaymentLineDiscount.create({'payment_line_id': line.id, 'tax_id': tax_line.tax_id.id, 'account_id': tax_line.line_account_id.id, 'invoice_tax_id': tax_line.id})\n lines_by_tax = invoice.compute_taxes_aok('group_by_tax')\n for tax_line in lines_by_tax:\n AccountPaymentLineDiscount.create({'payment_line_id': line.id, 'tax_id': tax_line.tax_id.id, 'account_id': tax_line.tax_id.discount_account_id.id, 'invoice_tax_id': tax_line.id})\n return super(AccountPaymentOrder, self).draft2open()\n\n @api.multi\n def generate_move(self):\n \"\"\"\n Create the moves that pay off the move lines from\n the payment/debit order.\n \"\"\"\n self.ensure_one()\n am_obj = self.env['account.move']\n post_move = self.payment_mode_id.post_move\n # prepare a dict \"trfmoves\" that can be used when\n # self.payment_mode_id.move_option = date or line\n # key = unique identifier (date or True or line.id)\n # value = bank_pay_lines (recordset that can have several entries)\n trfmoves = {}\n for bline in self.bank_line_ids:\n hashcode = bline.move_line_offsetting_account_hashcode()\n if hashcode in trfmoves:\n trfmoves[hashcode] += bline\n else:\n trfmoves[hashcode] = bline\n\n for hashcode, blines in trfmoves.items():\n mvals = self._prepare_move(blines)\n total_company_currency = total_payment_currency = 0\n for bline in blines:\n total_company_currency += bline.amount_company_currency\n total_payment_currency += bline.amount_currency\n partner_ml_vals = self._prepare_move_line_partner_account(\n bline)\n mvals['line_ids'].append((0, 0, partner_ml_vals))\n trf_ml_vals = self._prepare_move_line_offsetting_account(\n total_company_currency, total_payment_currency, blines)\n split_vals = self._split_lines(trf_ml_vals)\n for vals in split_vals:\n mvals['line_ids'].append(vals)\n move = am_obj.create(mvals)\n blines.reconcile_payment_lines()\n if post_move:\n move.post()\n\n @api.multi\n def _split_lines(self, trf_ml_vals=None):\n self.ensure_one()\n if trf_ml_vals is None:\n trf_ml_vals = {}\n list1 = []\n for payment_line in self.payment_line_ids:\n total_discount = 0.0\n for discount in payment_line.payment_line_discount_ids:\n invoice_tax = discount.invoice_tax_id\n if invoice_tax:\n base_discount = (invoice_tax.base * discount.payment_discount_perc / 100)\n tax_discount = (invoice_tax.amount_total * discount.payment_discount_perc / 100)\n total_discount += base_discount + tax_discount\n discount_account = invoice_tax.tax_id.discount_account_id\n list1.append((0, 0, {'credit': base_discount, 'name': 'Payment Discount', 'debit': 0.0, 'partner_id': trf_ml_vals.get('partner_id'), 'date': trf_ml_vals.get('date'), 'account_id': invoice_tax.account_id.id}))\n list1.append((0, 0, {'credit': tax_discount, 'name': 'Payment Tax Discount', 'debit': 0.0, 'partner_id': trf_ml_vals.get('partner_id'), 'date': trf_ml_vals.get('date'), 'account_id': discount_account.id}))\n list1.append((0, 0, {'credit': trf_ml_vals.get('credit') - total_discount, 'name': trf_ml_vals.get('name'), 'debit': 0.0, 'partner_id': trf_ml_vals.get('partner_id'), 'date': trf_ml_vals.get('date'), 'account_id': trf_ml_vals.get('account_id')}))\n return list1\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n @api.multi\n def post(self):\n invoice = self._context.get('invoice', False)\n self._post_validate()\n for move in self:\n move.line_ids.create_analytic_lines()\n if move.name == '/':\n new_name = False\n journal = move.journal_id\n\n if invoice and invoice.move_name and invoice.move_name != '/':\n new_name = invoice.move_name\n else:\n if journal.sequence_id:\n # If invoice is actually refund and journal has a refund_sequence then use that one or use the regular one\n sequence = journal.sequence_id\n if invoice and invoice.type in ['out_refund', 'in_refund'] and journal.refund_sequence:\n if not journal.refund_sequence_id:\n raise UserError(_('Please define a sequence for the credit notes'))\n sequence = journal.refund_sequence_id\n\n new_name = sequence.with_context(ir_sequence_date=move.date).next_by_id()\n else:\n raise UserError(_('Please define a sequence on the journal.'))\n\n if new_name and invoice and invoice.type != 'in_invoice':\n move.name = new_name\n if new_name and invoice and invoice.type == 'in_invoice':\n move.name = invoice.number or new_name\n return self.write({'state': 'posted'})\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n landed_cost_ok = fields.Boolean('Is a Landed Cost')\n split_method = fields.Selection(\n selection=SPLIT_METHOD, string='Split Method', default='equal',\n help=\"Equal : Cost will be equally divided.\\n\"\n \"By Quantity : Cost will be divided according to product's quantity.\\n\"\n \"By Current cost : Cost will be divided according to product's current cost.\\n\"\n \"By Weight : Cost will be divided depending on its weight.\\n\"\n \"By Volume : Cost will be divided depending on its volume.\")\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n\n number = fields.Char(related='move_id.name', store=True, copy=False,\n readonly=True, states={'draft': [('readonly', False)]})\n cost_lines = fields.One2many(\n 'account.invoice.cost.lines', 'invoice_id', 'Cost Lines',\n copy=True)\n valuation_adjustment_lines = fields.One2many(\n 'account.invoice.adjustment.lines', 'invoice_id', 'Valuation Adjustments')\n\n @api.model\n def create(self, vals):\n res = super(AccountInvoice, self).create(vals)\n if vals.get('number'):\n res.write({'number': vals.get('number')})\n return res\n\n def get_valuation_lines(self):\n lines = []\n\n for line in self.mapped('invoice_line_ids'):\n # # it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost\n # if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'fifo':\n # continue\n vals = {\n 'product_id': line.product_id.id,\n 'invoice_line_id': line.id,\n 'quantity': line.quantity,\n 'former_cost': 0.0, # line.value\n 'weight': line.product_id.weight * line.quantity,\n 'volume': line.product_id.volume * line.quantity\n }\n lines.append(vals)\n\n if not lines and self:\n raise UserError(_('The selected invoice does not contain any invoice line that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))\n return lines\n\n @api.multi\n def compute_landed_cost(self):\n AdjustementLines = self.env['account.invoice.adjustment.lines']\n AdjustementLines.search([('invoice_id', 'in', self.ids)]).unlink()\n\n digits = dp.get_precision('Product Price')(self._cr)\n towrite_dict = {}\n for cost in self.filtered(lambda cost: cost.invoice_line_ids):\n total_qty = 0.0\n total_cost = 0.0\n total_weight = 0.0\n total_volume = 0.0\n total_line = 0.0\n all_val_line_values = cost.get_valuation_lines()\n for val_line_values in all_val_line_values:\n for cost_line in cost.cost_lines:\n val_line_values.update({'invoice_id': cost.id, 'cost_line_id': cost_line.id})\n AdjustementLines.create(val_line_values)\n total_qty += val_line_values.get('quantity', 0.0)\n total_weight += val_line_values.get('weight', 0.0)\n total_volume += val_line_values.get('volume', 0.0)\n\n former_cost = val_line_values.get('former_cost', 0.0)\n # round this because former_cost on the valuation lines is also rounded\n total_cost += tools.float_round(former_cost, precision_digits=digits[1]) if digits else former_cost\n\n total_line += 1\n\n for line in cost.cost_lines:\n value_split = 0.0\n for valuation in cost.valuation_adjustment_lines:\n value = 0.0\n if valuation.cost_line_id and valuation.cost_line_id.id == line.id:\n if line.split_method == 'by_quantity' and total_qty:\n per_unit = (line.price_unit / total_qty)\n value = valuation.quantity * per_unit\n elif line.split_method == 'by_weight' and total_weight:\n per_unit = (line.price_unit / total_weight)\n value = valuation.weight * per_unit\n elif line.split_method == 'by_volume' and total_volume:\n per_unit = (line.price_unit / total_volume)\n value = valuation.volume * per_unit\n elif line.split_method == 'equal':\n value = (line.price_unit / total_line)\n elif line.split_method == 'by_current_cost_price' and total_cost:\n per_unit = (line.price_unit / total_cost)\n value = valuation.former_cost * per_unit\n else:\n value = (line.price_unit / total_line)\n\n if digits:\n value = tools.float_round(value, precision_digits=digits[1], rounding_method='UP')\n fnc = min if line.price_unit > 0 else max\n value = fnc(value, line.price_unit - value_split)\n value_split += value\n\n if valuation.id not in towrite_dict:\n towrite_dict[valuation.id] = value\n else:\n towrite_dict[valuation.id] += value\n for key, value in towrite_dict.items():\n AdjustementLines.browse(key).write({'additional_landed_cost': value})\n\n # Add additional landed cost to invoice line.\n for invoice in self.filtered(lambda invoice: invoice.invoice_line_ids):\n for line in invoice.invoice_line_ids:\n line.landed_cost = sum(invoice.valuation_adjustment_lines.filtered(lambda rec: rec.invoice_line_id.id == line.id).mapped('additional_landed_cost'))\n return True\n\n @api.multi\n def action_invoice_open(self):\n self.compute_landed_cost()\n return super(AccountInvoice, self).action_invoice_open()\n\n def _prepare_tax_line_vals_aok(self, line, tax):\n \"\"\" Prepare values to create an account.invoice.tax line\n\n The line parameter is an account.invoice.line, and the\n tax parameter is the output of account.tax.compute_all().\n \"\"\"\n vals = {\n 'invoice_id': self.id,\n 'name': tax['name'],\n 'tax_id': tax['id'],\n 'amount': tax['amount'],\n 'base': tax['base'],\n 'manual': False,\n 'sequence': tax['sequence'],\n 'account_analytic_id': tax['analytic'] and line.account_analytic_id.id or False,\n 'account_id': self.type in ('out_invoice', 'in_invoice') and (tax['account_id'] or line.account_id.id) or (tax['refund_account_id'] or line.account_id.id),\n 'line_account_id': line.account_id.id,\n }\n\n # If the taxes generate moves on the same financial account as the invoice line,\n # propagate the analytic account from the invoice line to the tax line.\n # This is necessary in situations were (part of) the taxes cannot be reclaimed,\n # to ensure the tax move is allocated to the proper analytic account.\n if not vals.get('account_analytic_id') and line.account_analytic_id and vals['account_id'] == line.account_id.id:\n vals['account_analytic_id'] = line.account_analytic_id.id\n\n return vals\n\n @api.multi\n def get_taxes_values_aok(self, group_by='group_by_tax'):\n tax_grouped = {}\n lines = self.env['account.invoice.line']\n lines_by_account = self.invoice_line_ids.filtered(lambda l: l.account_id.user_type_id.name in ('Current Assets', 'Non-current Assets', 'Fixed Assets'))\n lines_by_tax = self.invoice_line_ids - lines_by_account\n if group_by == 'group_by_account':\n lines = lines_by_account\n elif group_by == 'group_by_tax':\n lines = lines_by_tax\n for line in lines:\n price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.invoice_line_tax_ids.compute_all(price_unit, self.currency_id, line.quantity, line.product_id, self.partner_id)['taxes']\n for tax in taxes:\n val = self._prepare_tax_line_vals_aok(line, tax)\n key = self.env['account.tax'].browse(tax['id']).get_grouping_key_aok(val, group_by=group_by)\n\n if key not in tax_grouped:\n tax_grouped[key] = val\n else:\n tax_grouped[key]['amount'] += val['amount']\n tax_grouped[key]['base'] += val['base']\n return tax_grouped\n\n @api.multi\n def compute_taxes_aok(self, group_by='group_by_tax'):\n \"\"\"Function used in other module to compute the taxes on a fresh invoice created (onchanges did not applied)\"\"\"\n account_invoice_tax = self.env['account.invoice.tax.aok']\n self.ensure_one()\n # Generate one tax line per tax, however many invoice lines it's applied to\n tax_grouped = self.get_taxes_values_aok(group_by=group_by)\n # Create new tax lines\n for tax in tax_grouped.values():\n account_invoice_tax |= account_invoice_tax.create(tax)\n return account_invoice_tax\n\n\nclass AccountInvoiceCostLine(models.Model):\n _name = 'account.invoice.cost.lines'\n _description = 'Account Invoice Cost Lines'\n\n name = fields.Char('Description')\n invoice_id = fields.Many2one(\n 'account.invoice', 'Account Invoice',\n required=True, ondelete='cascade')\n product_id = fields.Many2one('product.product', 'Product', required=True)\n price_unit = fields.Float('Cost', digits=dp.get_precision('Product Price'), required=True)\n split_method = fields.Selection(selection=SPLIT_METHOD, string='Split Method', required=True)\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n if not self.product_id:\n self.quantity = 0.0\n self.name = self.product_id.name or ''\n self.split_method = self.product_id.split_method or 'equal'\n self.price_unit = self.product_id.standard_price or 0.0\n\n\nclass AccountInvoiceAdjustmentLines(models.Model):\n _name = 'account.invoice.adjustment.lines'\n _description = 'Account Invoice Adjustment Lines'\n\n name = fields.Char(\n 'Description', compute='_compute_name', store=True)\n invoice_id = fields.Many2one(\n 'account.invoice', 'Invoice',\n ondelete='cascade', required=True)\n invoice_line_id = fields.Many2one(\n 'account.invoice.line', 'Invoice Line', readonly=True)\n cost_line_id = fields.Many2one('account.invoice.cost.lines', 'Cost Line')\n move_id = fields.Many2one('stock.move', 'Stock Move', readonly=True)\n product_id = fields.Many2one('product.product', 'Product', required=True)\n quantity = fields.Float(\n 'Quantity', default=1.0,\n digits=0, required=True)\n weight = fields.Float(\n 'Weight', default=1.0,\n digits=dp.get_precision('Stock Weight'))\n volume = fields.Float(\n 'Volume', default=1.0)\n former_cost = fields.Float(\n 'Former Cost', digits=dp.get_precision('Product Price'))\n former_cost_per_unit = fields.Float(\n 'Former Cost(Per Unit)', compute='_compute_former_cost_per_unit',\n digits=0, store=True)\n additional_landed_cost = fields.Float(\n 'Additional Landed Cost',\n digits=dp.get_precision('Product Price'))\n final_cost = fields.Float(\n 'Final Cost', compute='_compute_final_cost',\n digits=0, store=True)\n\n @api.one\n @api.depends('invoice_line_id.name', 'product_id.code', 'product_id.name')\n def _compute_name(self):\n name = '%s - ' % (self.invoice_line_id.name if self.invoice_line_id else '')\n self.name = name + (self.product_id.code or self.product_id.name or '')\n\n @api.one\n @api.depends('former_cost', 'quantity')\n def _compute_former_cost_per_unit(self):\n self.former_cost_per_unit = self.former_cost / (self.quantity or 1.0)\n\n @api.one\n @api.depends('former_cost', 'additional_landed_cost')\n def _compute_final_cost(self):\n self.final_cost = self.former_cost + self.additional_landed_cost\n\n\nclass ProductCategory(models.Model):\n _inherit = \"product.category\"\n\n analytic_account_id = fields.Many2one('account.analytic.account', string='Analytic Account', required=True)\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n analytic_tag_ids = fields.Many2many(\"account.analytic.tag\", string=\"Analytic Tags\")\n\n\nclass SaleOrderLine(models.Model):\n _inherit = \"sale.order.line\"\n\n @api.multi\n @api.onchange('product_id')\n def product_id_change(self):\n result = super(SaleOrderLine, self).product_id_change()\n self.analytic_tag_ids = self.product_id.analytic_tag_ids\n return result\n\n @api.multi\n def _prepare_invoice_line(self, qty):\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n res['account_analytic_id'] = self.product_id.categ_id.analytic_account_id.id\n return res\n\nclass PurchaseOrderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n result = super(PurchaseOrderLine, self).onchange_product_id()\n self.analytic_tag_ids = self.product_id.analytic_tag_ids\n self.account_analytic_id = self.product_id.categ_id.analytic_account_id\n return result\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = \"account.invoice.line\"\n\n landed_cost = fields.Float(string='Landed Cost', digits=dp.get_precision('Product Price'))\n price_subtotal = fields.Monetary(string='Amount',\n store=True, readonly=True, compute='_compute_price', help=\"Total amount without taxes\")\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n result = super(AccountInvoiceLine, self)._onchange_product_id()\n self.analytic_tag_ids = self.product_id.analytic_tag_ids\n self.account_analytic_id = self.product_id.categ_id.analytic_account_id\n return result\n\n @api.one\n @api.depends('price_unit', 'discount', 'invoice_line_tax_ids', 'quantity',\n 'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id', 'invoice_id.company_id',\n 'invoice_id.date_invoice', 'invoice_id.date', 'landed_cost')\n def _compute_price(self):\n currency = self.invoice_id and self.invoice_id.currency_id or None\n price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)\n taxes = False\n if self.invoice_line_tax_ids:\n taxes = self.invoice_line_tax_ids.compute_all(price, currency, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)\n self.price_subtotal = price_subtotal_signed = taxes['total_excluded'] + self.landed_cost if taxes else self.quantity * price + self.landed_cost\n self.price_total = taxes['total_included'] if taxes else self.price_subtotal\n if self.invoice_id.currency_id and self.invoice_id.currency_id != self.invoice_id.company_id.currency_id:\n price_subtotal_signed = self.invoice_id.currency_id.with_context(date=self.invoice_id._get_currency_rate_date()).compute(price_subtotal_signed, self.invoice_id.company_id.currency_id)\n sign = self.invoice_id.type in ['in_refund', 'out_refund'] and -1 or 1\n self.price_subtotal_signed = price_subtotal_signed * sign\n\n\nclass AccountInvoiceTaxAOK(models.Model):\n _name = \"account.invoice.tax.aok\"\n _description = \"Invoice Tax AOK\"\n _order = 'sequence'\n\n invoice_id = fields.Many2one('account.invoice', string='Invoice', ondelete='cascade', index=True)\n name = fields.Char(string='Tax Description', required=True)\n tax_id = fields.Many2one('account.tax', string='Tax', ondelete='restrict')\n account_id = fields.Many2one('account.account', string='Tax Account', required=True, domain=[('deprecated', '=', False)])\n account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic account')\n amount = fields.Monetary()\n amount_rounding = fields.Monetary()\n amount_total = fields.Monetary(string=\"Amount\", compute='_compute_amount_total')\n manual = fields.Boolean(default=True)\n sequence = fields.Integer(help=\"Gives the sequence order when displaying a list of invoice tax.\")\n company_id = fields.Many2one('res.company', string='Company', related='account_id.company_id', store=True, readonly=True)\n currency_id = fields.Many2one('res.currency', related='invoice_id.currency_id', store=True, readonly=True)\n base = fields.Monetary(string='Base', store=True)\n line_account_id = fields.Many2one('account.account', string='Tax Account', domain=[('deprecated', '=', False)])\n\n @api.depends('amount', 'amount_rounding')\n def _compute_amount_total(self):\n for tax_line in self:\n tax_line.amount_total = tax_line.amount + tax_line.amount_rounding\n","sub_path":"aok_account/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":32661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"368869593","text":"''' Author : Ana Paulina Bucki based on NN.py by mehdirezaie\n Date : May 2018\n'''\n\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nclass preprocess(object):\n def __init__(self, data):\n self.X = data['features']\n self.Y = data['label'][:, np.newaxis]\n self.P = data['hpix']\n self.W = data['fracgood'][:, np.newaxis]\n if len(self.X.shape) == 1:\n self.X = self.X[:, np.newaxis]\n \nclass Netregression(object): # class for general regression\n def __init__(self, train, valid, test):\n # train\n self.train = preprocess(train)\n # test\n self.test = preprocess(test)\n # validation\n self.valid = preprocess(valid)\n #\n self.nfeatures = self.train.X.shape[1]\n \n def train_evaluate(self, learning_rate=0.001, batchsize=100, \n nepoch=10, nchain=5, Units=[5,5,5,5]):\n #\n nfeature = self.nfeatures\n nclass = 1\n #\n x = tf.placeholder(tf.float32, [None, nfeature])\n #\n if Units == 'Lin': # linear\n y = tf.layers.dense(x, units=nclass, activation=None)\n elif len(Units) == 1: # 1 hidden layer\n y0 = tf.layers.dense(x, units=Units[0], activation=tf.nn.relu)\n y = tf.layers.dense(y0, units=nclass, activation=None)\n elif len(Units) == 2: # 2 hidden layers\n y0 = tf.layers.dense(x, units=Units[0], activation=tf.nn.relu)\n y1 = tf.layers.dense(y0, units=Units[1], activation=tf.nn.relu)\n y = tf.layers.dense(y1, units=nclass, activation=None)\n elif len(Units) == 3:\n y0 = tf.layers.dense(x, units=Units[0], activation=tf.nn.relu)\n y1 = tf.layers.dense(y0, units=Units[1], activation=tf.nn.relu)\n y2 = tf.layers.dense(y1, units=Units[2], activation=tf.nn.relu)\n y = tf.layers.dense(y2, units=nclass, activation=None)\n elif len(Units) == 4:\n y0 = tf.layers.dense(x, units=Units[0], activation=tf.nn.relu)\n y1 = tf.layers.dense(y0, units=Units[1], activation=tf.nn.relu)\n y2 = tf.layers.dense(y1, units=Units[2], activation=tf.nn.relu)\n y3 = tf.layers.dense(y2, units=Units[3], activation=tf.nn.relu)\n y = tf.layers.dense(y3, units=nclass, activation=None)\n else:\n raise ValueError('Units should be either None, [M], [M,N] ...')\n #\n y_ = tf.placeholder(tf.float32, [None, nclass])\n w = tf.placeholder(tf.float32, [None, nclass])\n #\n mse = tf.losses.mean_squared_error(y_, y, weights=w)\n #\n global_step = tf.Variable(0, name='global_step', trainable=False)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_step = optimizer.minimize(mse, global_step=global_step)\n #\n train = self.train\n valid = self.valid\n test = self.test\n #\n train_size = train.X.shape[0]\n #\n # to normalization\n #\n meanX = np.mean(train.X, axis=0)\n stdX = np.std(train.X, axis=0)\n meanY = np.mean(train.Y, axis=0)\n stdY = np.std(train.Y, axis=0)\n self.Xstat = (meanX, stdX)\n self.Ystat = (meanY, stdY)\n #\n train.X = (train.X - meanX) / stdX\n train.Y = (trian.Y - meanY) / stdY\n test.x = (test.X - meanX) / stdX\n test.Y = (test.Y - meanY) / stdY\n valid.X = (valid.X - meanX) / stdX\n valid.Y = (valid.Y - meanY) / stdY\n #\n # to compute the number of training epochs (stops when RMSE normalizes?)\n #\n if np.mod(train_size, batchsize) == 0:\n nep = (train_size // batchsize)\n else:\n nep = (train_size // batchsize) + 1\n #\n # storing MSE\n #\n self.epoch_MSEs = []\n self.chain_y = []\n for ii in range(nchain):\n print('chain ',ii)\n mse_list = []\n #\n # initializing NN\n #\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n for i in range(nepoch):\n #\n train_loss = mse.eval(feed_dict={x:train.X, y_:train.Y, w:train.W})\n valid_loss = mse.eval(feed_dict={x:valid.X, y_:valid.Y, w:valid.W})\n #\n mse_list.append([i, train_loss, valid_loss])\n #\n for k in range(nep):\n j = k*batchsize\n if j+batchsize > train_size:\n batch_xs, batch_ys, batch_ws = train.X[j:-1], train.Y[j:-1], train.W[j:-1]\n else:\n batch_xs, batch_ys, batch_ws = train.X[j:j+batchsize], train.Y[j:j+batchsize], train.W[j:j+batchsize]\n #\n sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys, w:batch_ws})\n #\n y_mse, y_pred = sess.run((mse.y),feed_dict={x: test.X, y_: test.Y, x:test.W})\n self.chain_y.append([ii, y_pred])\n self.epoch_MSEs.append([ii, y_mse, np.array(mse_list)])\n \n baselineY = np.mean(train.Y)\n assert np.abs(baselineY) < 1.e-6, 'check normalization'\n baseline_testmse = np.mean(test.W * test.Y**2)\n baseline_validmse = np.mean(valid.W * valid.Y**2)\n baseline_trainmse = np.mean(train.W * train.Y**2)\n #\n self.optionsdic = {}\n self.optionsdic['baselineMSE'] = (baseline_trainmse, baseline_validmse, baseline_testmse)\n self.optionsdic['learning_rate'] = learning_rate\n self.optionsdic['batchsize'] = batchsize\n self.optionsdic['nepoch'] = nepoch\n self.optionsdic['nchain'] = nchain\n self.optionsdic['Units'] = Units\n self.optionsdic['stats'] = {'xstat':self.Xstat, 'ystat':self.Ystat}\n \n def savez(self, indir='./', name='regression_2hl_5chain_10epoch'):\n output = {}\n output['train'] = self.train.P, self.train.X, self.train.Y, self.train.W\n output['test'] = self.test.P, self.test.X, self.test.Y, self.test.W\n output['valid'] = self.valid.P, self.valid.X, self.valid.Y, self.valid.W\n output['epoch_MSEs'] = self.epoch_MSEs\n output['chain_y'] = self.chain_y\n output['options'] = self.optionsdic\n #\n if indir[-1] != '/':\n indir += '/'\n if not os.path.exists(indir):\n os.makedirs(indir)\n #\n np.savez(indir+name, output)\n #\n print ('output is saved as {} under {}'.format(name, indir))\n \ndef run_nchainlearning(indir, *arrays, **options):\n n_arrays = len(arrays)\n if n_arrays != 3:\n raise ValuseError(\"Three arrays for train and test are required\")\n net = Netregression(*arrays)\n net.train_evaluate(**options)\n #\n batchsize = options.pop('batchsize', 100)\n nepoch = options.pop('nepoch', 10)\n nchain = options.pop('nchain', 5)\n Units = options.pop('Units', [5,5,5,5])\n Lrate = options.pop('learning_rate', 0.001)\n units = ''.join([str(l) for l in Units])\n #\n ouname = 'reg-nepoch'+str(nepoch)+'-nchain'+str(nchain)\n ouname += '-batchsize'+str(batchsize)+'units'+units\n ouname += '-Lrate'+str(Lrate)\n #\n net.savez(indir=indir, name=ouname)\n\ndef read_NNfolds(files):\n \n p_true = []\n x_true = []\n y_true = []\n y_pred = []\n y_base = []\n weights = []\n #\n for j,file_i in enumerate(files):\n d = np.load(file_i)\n out = d['arr_0'].item()\n p_true.append(out['test'][0])\n x_true.append(out['test'][1])\n y_true.append(out['test'][2].squeeze())\n weights.append(out['test'][3].squeeze())\n #\n y_avg = []\n #\n for i in range(len(out['chain_y'])):\n y_avg.append(out['chain_y'][i][1].squeeze().tolist())\n meanY, std_Y = out['options']['stats']['ystat']\n #\n y_base.append(np.ones(out['test'][2].shape[0])*meanY)\n y_pred.append(stdY*np.mean(np.array(y_avg), axis=0) + meanY)\n\n # Combining folds\n Ptrue = np.concatenate(p_true)\n Xtrue = np.concatenate(x_true)\n Ytrue = np.concatenate(y_true)\n Ypred = np.concatenate(y_pred)\n Ybase = np.concatenate(y_base) \n Weights = np.concatenate(weights) \n #\n return Ptrue, Xtrue, Ytrue, Ypred, Ybase, Weights\n\nif __name__== '__main__':\n from mpi4py import MPI\n #\n comm = MPI.COMM_WOLRD\n size = comm.Get_size()\n rank = comm.Get_rank()\n #\n if rank == 0:\n from argparse import ArgumentParser\n ap = ArgumentParser(description='Neural Net regression')\n ap.add_argument('--path', default='/global/cscratch1/sd/mehdi/dr5_anand/eboss/')\n ap.add_argument('--input', default='test_train_eboss_dr5-masked.npy')\n ap.add_argument('--output', default='/global/cscratch1/sd/mehdi/dr5_anand/eboss/regression/')\n ap.add_argument('--nchain', type=int, default=10)\n ap.add_argument('--nepoch', type=int, default=1000)\n ap.add_argument('--batchsize', type=int, default=8000)\n ap.add_argument('--units', nargs='*', type=int, default=[10,10])\n ap.add_argument('--learning_rate', type=float, default=0.01)\n ns = ap.parse_args()\n #\n data = np.load(ns.path+ns.input).item()\n config = {'nchain':ns.nchain,\n 'nepoch':ns.nepoch,\n 'batchsize':ns.batchsize,\n 'Units':ns.units,\n 'learning_rate':ns.learning_rate}\n oupath = ns.output\n else:\n oupath = None\n data = None\n config = None\n #\n data = comm.bcast(data, root=0)\n config = comm.bcast(config, root=0)\n oupath = comm.bcast(oupath, root=0)\n #\n # run\n if rank == 0:\n print(\"bcast finished\")\n if rank in [0, 1, 2, 3, 4]:\n print(\"config on rank %d is: \"%comm.rank, config)\n fold = 'fold'+str(rank)\n print(fold, ' is being processed')\n run_nchainlearning(oupath+fold+'/',\n data['train'][fold],\n data['test'][fold],\n **config)\n# end \n","sub_path":"NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":10345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"399142799","text":"#!/usr/bin/env python\nimport argparse\nimport os\nimport random\nimport re\nimport urllib\nfrom collections import deque\n\nimport requests\n\nfrom bruteforcers.amazon import Amazon\nfrom bruteforcers.twitter import Twitter\nfrom constants import Colors, Action, PHONE_NUMBER\nfrom core.proxy import Proxy\nfrom core.user_agents import UserAgentsCycle\nfrom scrapers.ebay import Ebay\nfrom scrapers.lastpass import LastPass\nfrom scrapers.paypal import PayPal\nfrom settings import Settings\nfrom suppliers.agnostic_supplier import AgnosticSupplier\n\nfrom itertools import product\nfrom bs4 import BeautifulSoup\nimport logging\n\n# Basic configuration for logging\nlogging.basicConfig(format='%(message)s', level=logging.INFO)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef bruteforce(args, colors, user_agents_instance, proxy_instance, settings):\n if args.email and not re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", args.email):\n exit(colors.RED + \"Email is invalid\" + colors.ENDC)\n if (args.mask and args.file) or (not args.mask and not args.file):\n exit(colors.RED + \"You need to provide a masked number or a file with numbers to try\" + colors.ENDC)\n if args.mask and not re.match(\"^[0-9X]{10}\", args.mask): exit(\n colors.RED + \"You need to pass a 10-digit US phone number masked as in: 555XXX1234\" + colors.ENDC)\n if args.file and not os.path.isfile(args.file): exit(colors.RED + \"You need to pass a valid file path\" + colors.ENDC)\n print(f\"Looking for the phone number associated to {args.email}...\")\n\n supplier_class = AgnosticSupplier.get_supplier(PHONE_NUMBER)\n phonenumber_supplier = supplier_class(settings, user_agents_instance, proxy_instance, colors, args.mask)\n if args.mask:\n possible_phone_numbers = phonenumber_supplier.get()\n else:\n possible_phone_numbers = phonenumber_supplier.get_from_dump(args.file)\n\n bruteforcers = get_bruteforcers(args, possible_phone_numbers, user_agents_instance, proxy_instance)\n deque(map(lambda b: b.bruteforce(), bruteforcers))\n\n\ndef get_bruteforcers(args, possible_phone_numbers, user_agents_instance, proxy_instance) -> list:\n bruteforcers_parameters = dict(possiblePhoneNumbers=possible_phone_numbers,\n email=args.email,\n verbose=args.verbose,\n user_agents_instance=user_agents_instance,\n proxy_instance=proxy_instance)\n twitter = Twitter(**bruteforcers_parameters)\n if args.quiet:\n return [twitter]\n return [twitter, Amazon(**bruteforcers_parameters)]\n\n\n#TODO: move this also to agnostic class like suppliers\ndef start_scraping(email, quiet_mode, user_agents_instance, proxy_instance, colors):\n scrapers = get_scrapers(email, \n quiet_mode, \n user_agents_instance, \n proxy_instance,\n colors)\n deque(map(lambda s: s.scrape(), scrapers))\n\n\ndef get_scrapers(email, quiet_mode, user_agents_instance, proxy_instance, colors):\n scraper_parameters = dict(email=email, \n user_agents=user_agents_instance, \n proxy=proxy_instance, \n colors=colors)\n if quiet_mode:\n return [\n PayPal(**scraper_parameters)\n ]\n return [\n Ebay(**scraper_parameters),\n LastPass(**scraper_parameters)\n ]\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='An OSINT tool to find phone numbers associated to email addresses')\n subparsers = parser.add_subparsers(help='commands', dest='action')\n subparsers.required = True # python3 compatibility, will generate slightly different error massage then python2\n \n scrape_parser = subparsers.add_parser(Action.SCRAPE, help='scrape online services for phone number digits')\n scrape_parser.add_argument(\"-e\", required=True, metavar=\"EMAIL\", dest=\"email\", help=\"victim's email address\")\n scrape_parser.add_argument(\"-p\", metavar=\"PROXYLIST\", dest=\"proxies\",\n help=\"a file with a list of https proxies to use. Format: https://127.0.0.1:8080\")\n scrape_parser.add_argument(\"-q\", dest=\"quiet\", action=\"store_true\",\n help=\"scrape services that do not alert the victim\")\n \n generator_parser = subparsers.add_parser(Action.GENERATE,\n help=\"generate all valid phone numbers based on NANPA's public records\")\n generator_parser.add_argument(\"-m\", required=True, metavar=\"MASK\", dest=\"mask\",\n help=\"a masked 10-digit US phone number as in: 555XXX1234\")\n generator_parser.add_argument(\"-o\", metavar=\"FILE\", dest=\"file\", help=\"outputs the list to a dictionary\")\n generator_parser.add_argument(\"-q\", dest=\"quiet\", action=\"store_true\",\n help=\"use services that do not alert the victim\")\n generator_parser.add_argument(\"-p\", metavar=\"PROXYLIST\", dest=\"proxies\",\n help=\"a file with a list of https proxies to use. Format: https://127.0.0.1:8080\")\n generator_parser.add_argument(\"-r\", metavar=\"REGION\", dest=\"region\", help=\"region, default region is US\")\n \n bruteforce_parser = subparsers.add_parser(Action.BRUTE_FORCE,\n help='bruteforce using online services to find the phone number')\n bruteforce_parser.add_argument(\"-e\", required=True, metavar=\"EMAIL\", dest=\"email\", help=\"victim's email address\")\n bruteforce_parser.add_argument(\"-m\", metavar=\"MASK\", dest=\"mask\",\n help=\"a masked, 10-digit US phone number as in: 555XXX1234\")\n bruteforce_parser.add_argument(\"-d\", metavar=\"DICTIONARY\", dest=\"file\", help=\"a file with a list of numbers to try\")\n bruteforce_parser.add_argument(\"-p\", metavar=\"PROXYLIST\", dest=\"proxies\",\n help=\"a file with a list of HTTPS proxies to use. Format: https://127.0.0.1:8080\")\n bruteforce_parser.add_argument(\"-q\", dest=\"quiet\", action=\"store_true\",\n help=\"use services that do not alert the victim\")\n bruteforce_parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output\")\n \n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n settings = Settings(args)\n colors = Colors()\n proxy_instance = Proxy(settings, colors)\n user_agents_instance = UserAgentsCycle(settings)\n\n if args.action == Action.SCRAPE:\n start_scraping(args.email, args.quiet, user_agents_instance, proxy_instance, colors)\n elif args.action == Action.GENERATE:\n phonenumber_supplier = AgnosticSupplier.get_supplier(PHONE_NUMBER)(settings,\n user_agents_instance,\n proxy_instance,\n colors,\n args.mask)\n possible_phone_numbers = phonenumber_supplier.get()\n phonenumber_supplier.dump_supplied_phones(args.file, possible_phone_numbers)\n elif args.action == Action.BRUTE_FORCE:\n bruteforce(args, colors, user_agents_instance, proxy_instance, settings)","sub_path":"email2phonenumber.py","file_name":"email2phonenumber.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501735092","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nBasic Cointegration Mean Reversions Unit Root Testing\r\nFix\r\n- Check if the log difference is better to use than the absolute value of the series\r\n\"\"\"\r\n\r\n\r\nimport datetime\r\nimport MySQLdb as mdb\r\nimport numpy as np\r\nimport pandas as pd\r\nimport statsmodels.tsa.stattools as ts\r\n\r\n\r\ndef connect_sec_db():\r\n # Database connection to MySQL instance\r\n db_host = \"localhost\"\r\n db_user = \"root\"\r\n db_pass = \"password\"\r\n db_name = \"securities_master\"\r\n con = mdb.connect(host=db_host, user=db_user, passwd=db_pass, db=db_name, \r\n autocommit=True)\r\n \r\n return con\r\n\r\n\r\ncon = connect_sec_db()\r\n\r\n\r\nprice_data = pd.read_sql(\"select * from daily_price;\", con=con)\r\n\r\nprice_data = price_data[price_data.checks == 1]\r\n\r\n\r\nunique_symbols = price_data.symbol_id.unique().tolist()\r\n\r\npresults = pd.DataFrame(columns = ['symbol_id','test type', 'p-value'])\r\nrow = 1\r\n\r\n## Test all datasets for a unit root\r\nfor t in unique_symbols: \r\n \r\n data = price_data[price_data.symbol_id == t]\r\n #data['lreturn'] = np.log(price_data.adj_close_price) - np.log(price_data.adj_close_price).shift(1)\r\n #data = data.iloc[1:]\r\n \r\n ## ADF test statistic on data\r\n ## First value is test-statistic, second value is p-value\r\n ## Fourth value is data points in the sample\r\n ## Fifth dict contains critival values at different percentages 1%, 5% & 10%\r\n ## If test stat is larger than critical value then cannot reject null gamma = 0\r\n ## and unlikely to have found mean reverting time series\r\n ## If test stat is less than critical value then reject null that gamma = 0\r\n ## and have found mean reverting time series.\r\n ## p-value gives probability of not rejecting null.\r\n for ttype in ['nc', 'c', 'ct']:\r\n adfuller_res = ts.adfuller_res = ts.adfuller(data['adj_close_price'],\r\n regression = ttype)\r\n \r\n ## We are testing for a unit root so we want the data at level to be \r\n ## non-stationary, so we want to not reject the null. Hence if the p-value\r\n ## is greater than 5% we do not reject the null of non-stationarity\r\n if adfuller_res[1] > 0.05: \r\n presults.loc[row] = [t, ttype, adfuller_res[1]]\r\n row += 1\r\n \r\ndpresults = pd.DataFrame(columns = ['symbol_id','test type', 'p-value'])\r\nunique_symbols = presults.symbol_id.unique().tolist()\r\n\r\nfor t in unique_symbols:\r\n data = price_data[price_data.symbol_id == t]\r\n data['dlreturn'] = price_data.adj_close_price - price_data.adj_close_price.shift(1)\r\n data = data.iloc[1:]\r\n for ttype in ['nc', 'c', 'ct']:\r\n adfuller_res = ts.adfuller_res = ts.adfuller(data['dlreturn'],\r\n regression = ttype)\r\n \r\n ## We are testing for a unit root so now we want to reject the null of\r\n ## non-stationarity after taking the difference of the data. So we want\r\n ## a low p-value less than 5%\r\n if adfuller_res[1] < 0.05: \r\n dpresults.loc[row] = [t, ttype, adfuller_res[1]]\r\n row += 1\r\n \r\n\r\nunit_roots = dpresults.symbol_id.unique().tolist()\r\n ","sub_path":"A4. ADFtest.py","file_name":"A4. ADFtest.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"228306370","text":"import requests\nimport json\nimport unittest\n\nclass Mytest(unittest.TestCase):\n \"\"\"百度请求\"\"\"\n\n def setUp(self):\n print(\"Test start\")\n\n\n def tearDown(self):\n print(\"Test stop\")\n\n\n\nclass test_baidu_get(Mytest):\n\n def test_baidu_get(self):\n self.url='https://www.baidu.com'\n self.header={\"content-Type\":\"application/json\"}\n r=requests.get(url=self.url)\n\n print (r.text)\n print(r.status_code)\n\nif __name__=='__main__':\n unittest.main()\n","sub_path":"request/test_case/test_request_mmmoney.py","file_name":"test_request_mmmoney.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"426521820","text":"\n\n#load data\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\n\ndef warn(*args, **kwargs): pass\nimport warnings\nwarnings.warn = warn\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cross_validation import StratifiedShuffleSplit\n\ntrain = pd.read_csv('merged_train.csv',sep='\\t')\n#train = pd.read_csv('../train.csv')\ntest = pd.read_csv('../test.csv')\n\"\"\"\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"rbf\", C=0.025, probability=True),\n NuSVC(probability=True),\n DecisionTreeClassifier(),\n RandomForestClassifier(),\n AdaBoostClassifier(),\n GradientBoostingClassifier(),\n GaussianNB(),\n LinearDiscriminantAnalysis(),\n QuadraticDiscriminantAnalysis()]\n\"\"\"\n\n\ndef encode(train, test):\n\tprint(train.head())\n\tprint(test.head())\n\tle = LabelEncoder().fit(train.species) #turn species to no\n\tlabels = le.transform(train.species) # encode species strings\n\tclasses = list(le.classes_) # save column names for submission\n\ttest_ids = test.id # sample id and index no\n\t#train = train.drop(['species', 'id'], axis=1)\n\ttrain = train.drop(['species', 'id'], axis=1)\n\ttest = test.drop(['id'], axis=1)\n\t\n\treturn train, labels, test, test_ids, classes\n\ntrain, labels, test, test_ids, classes = encode(train, test)\nsss = StratifiedShuffleSplit(labels, 10, test_size=0.2, random_state=23)\n\n\nfor train_index, test_index in sss: #shuffle. only last set is used\n#\tif (count!=0):\n#\t\tprint(\"2 append\")\n#\t\tX_train, X_test = np.concatenate((X_train, train.values[train_index]), axis=0), np.concatenate((X_test, train.values[test_index]), axis=0)\n#\t\ty_train, y_test = np.concatenate((y_train,labels[train_index]), axis=0), np.concatenate((y_test, labels[test_index]), axis=0)\n#\telse:\n\tX_train, X_test = train.values[train_index], train.values[test_index]\n\ty_train, y_test = labels[train_index], labels[test_index]\n\t\n\n#\tX_train, X_test = np.concatenate(X_train, train.values[train_index]), np.concatenate(X_test, train.values[test_index])\n#\ty_train, y_test = np.concatenate(y_train,labels[train_index]), np.concatenate(y_test, labels[test_index])\nprint(y_test.shape)\n\n#clf = KNeighborsClassifier(3)\nclf= LinearDiscriminantAnalysis()\nlog_cols=[\"Classifier\", \"Accuracy\", \"Log Loss\"]\nlog = pd.DataFrame(columns=log_cols)\nclf.fit(X_train, y_train)\nname = clf.__class__.__name__\n\nprint(\"=\"*30)\nprint(name)\n\nprint('****Results****')\ntrain_predictions = clf.predict(X_test)\n\nacc = accuracy_score(y_test, train_predictions)\n\nprint(\"Accuracy: {:.4%}\".format(acc))\n\ntrain_predictions = clf.predict_proba(X_test)\nll = log_loss(y_test, train_predictions)\nprint(\"Log Loss: {}\".format(ll))\n\nlog_entry = pd.DataFrame([[name, acc*100, ll]], columns=log_cols)\nlog = log.append(log_entry)\n\n\n","sub_path":"preprocessing/randomforesttest.py","file_name":"randomforesttest.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"487782499","text":"import numpy as np\nimport pandas as pd\n\n\ndef summarize_return_num(return_nums):\n \"\"\"\n Gets the number of returns by return number.\n\n :param return_nums: A :class:`pandas.Series` of return number that describes the return number of each point.\n :return: A :class:`pandas.Series` of return number counts by return number.\n \"\"\"\n\n return return_nums.groupby(return_nums).agg('count')\n\n\ndef summarize_percentiles(z, pct = (1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99)):\n \"\"\"\n :param z: A :class:`pandas.Series` of z values.\n \"\"\"\n\n return (np.percentile(z, pct), pct)\n\n\ndef standard_metrics(points, heightbreak=6):\n metrics = pd.DataFrame()\n\n # Some values used multiple times\n mean_z = np.mean(points.z)\n\n metrics['total_returns'] = [np.alen(points)]\n\n # Get number of returns by return number\n for i, num in enumerate(summarize_return_num(points.return_num)):\n metrics['r_{}'.format(i+1)] = [num]\n\n metrics['max_z'] = [np.max(points.z)]\n metrics['min_z'] = [np.min(points.z)]\n metrics['mean_z'] = [mean_z]\n metrics['median_z'] = [np.median(points.z)]\n metrics['stddev_z'] = [np.std(points.z)]\n metrics['var_z'] = [np.var(points.z)]\n\n for pct_z, pct in zip(*summarize_percentiles(points.z)):\n metrics['p_{}'.format(pct)] = [pct_z]\n\n # \"Cover metrics\"\n metrics['canopy_relief_ratio'] = (metrics['mean_z'] - metrics['min_z']) / (metrics['max_z'] - metrics['min_z'])\n metrics['pct_r_1_above_{}'.format(heightbreak)] = np.sum((points['return_num'] == 1) & (points['z'] > heightbreak)) / metrics['r_1']\n metrics['pct_r_1_above_mean'] = np.sum((points['return_num'] == 1) & (points['z'] > mean_z)) / metrics['r_1']\n metrics['pct_all_above_{}'.format(heightbreak)] = np.sum(points['z'] > heightbreak) / metrics['total_returns']\n metrics['pct_all_above_mean'] = np.sum(points['z'] > mean_z) / metrics['total_returns']\n\n return metrics\n","sub_path":"pyfor/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"203143883","text":"import os\nimport datetime\nfrom flask import Blueprint, jsonify, request, abort\nfrom firebase_admin import storage\nfrom services.realtime_collection import realtime_collections, Collections\n\nbp = Blueprint('reports.py', __name__, url_prefix='/reports.py')\n\nBUCKET = os.environ.get(\"DEFAULT_BUCKET\")\nFOLDER = 'reports'\n\ndef to_obj(doc):\n doc_copy = doc.copy()\n doc_copy['fileurl'] = [\n storage.bucket(BUCKET) \\\n .blob(f'{FOLDER}/{doc[\"filename\"]}') \\\n .generate_signed_url(\n expiration=datetime.timedelta(days=1),\n )\n ]\n\n return doc_copy\n\n@bp.route('/')\ndef index():\n project = request.args.get('project', 'global')\n docs = realtime_collections.collections[Collections.REPORTS].get()\n\n def docs_filter(doc):\n return doc.get('is_active') == True and doc.get('project') == project\n\n return jsonify(data=list(sorted(map(to_obj, filter(docs_filter, docs)), key=lambda doc: doc.get('order_position'))))\n\n\n@bp.route('/')\ndef show(key):\n doc = realtime_collections.collections[Collections.REPORTS].get(key)\n\n if doc is None or not doc.get('is_active'):\n abort(404)\n\n return jsonify(data=doc)\n","sub_path":"api/blueprints/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"37536345","text":"\nimport pywingchun\nfrom kungfu.wingchun.constants import *\n\ndef object_as_dict(obj):\n d = {}\n for attr in dir(obj):\n if not attr.startswith('__'):\n value = getattr(obj, attr)\n if type(value) in [InstrumentType, Side, Offset, OrderStatus, Direction, PriceType, VolumeCondition, TimeCondition, LedgerCategory]:\n d[attr] = int(value)\n else:\n d[attr] = value\n return d\n\ndef is_final_status(order_status):\n return int(order_status) in AllFinalOrderStatus\n\ndef get_position_effect(instrument_type, side, offset):\n if instrument_type == InstrumentType.Stock:\n return Direction.Long\n elif side == Side.Buy and offset == Offset.Open:\n return Direction.Long\n elif side == Side.Sell and (offset == Offset.Close or offset == Offset.CloseToday or offset == Offset.CloseYesterday):\n return Direction.Long\n elif side == Side.Sell and offset == Offset.Open:\n return Direction.Short\n elif side == Side.Buy and (offset == Offset.Close or offset == Offset.CloseToday or offset == Offset.CloseYesterday):\n return Direction.Short\n else:\n raise ValueError('could not find position effect for instrument_type {}, side {}, offset {}'.format(instrument_type, side, offset))\n\nget_instrument_type = pywingchun.utils.get_instrument_type\nis_valid_price = pywingchun.utils.is_valid_price\nget_symbol_id = pywingchun.utils.get_symbol_id\n","sub_path":"core/python/kungfu/wingchun/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"297089575","text":"from models import Sentence\n\n\ndef sent2features(sent: Sentence):\n return [word2features(sent, i) for i in range(sent.length)]\n\n\ndef word2features(sent: Sentence, i: int):\n features = get_mor_feature(sent.get(i))\n if i >= 2:\n features.extend(get_mor_feature(sent.get(i-2), '-2'))\n else:\n features.append('BOS')\n\n if i >= 1:\n features.extend(get_mor_feature(sent.get(i-1), '-1'))\n else:\n features.append('BOS')\n\n if i < sent.length - 1:\n features.extend(get_mor_feature(sent.get(i+1), '+1'))\n else:\n features.append('EOS')\n\n if i < sent.length - 2:\n features.extend(get_mor_feature(sent.get(i+2), '+2'))\n else:\n features.append('EOS')\n\n return features\n\n\ndef get_mor_feature(mor, pos=None):\n features = []\n if pos is None:\n prefix = ''\n features.append('bias')\n else:\n prefix = pos + ':'\n\n features.extend([\n prefix + 'word=' + mor.token,\n prefix + 'pos1=' + mor.pos1,\n prefix + 'pos2=' + mor.pos2,\n prefix + 'type=' + mor.type,\n ])\n return features\n","sub_path":"crf/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"88420112","text":"\"\"\"An individual treadmill spawn instance.\"\"\"\n\nimport logging\nimport os\nimport yaml\n\nfrom treadmill.spawn import utils as spawn_utils\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Instance(object):\n \"\"\"Treadmill spawn instance\"\"\"\n\n __slots__ = (\n 'id',\n 'proid',\n 'name',\n 'settings',\n 'manifest',\n 'manifest_path'\n )\n\n def __init__(self, manifest_path):\n self.manifest_path = manifest_path\n self.id = os.path.splitext(os.path.basename(self.manifest_path))[0]\n self.proid = spawn_utils.get_user_safe(self.manifest_path)\n self.settings = {\n 'name': self.id,\n 'stop': True,\n 'reconnect': False,\n 'reconnect_timeout': 0\n }\n self.manifest = None\n\n self._read_manifest_file()\n\n self.name = '{0}.{1}'.format(self.proid, self.settings['name'])\n\n def _read_manifest_file(self):\n \"\"\"Reads the YAML (manifest) file contents.\"\"\"\n docs = []\n\n try:\n stream = open(self.manifest_path, \"r\")\n manifest_contents = stream.read()\n generator = yaml.load_all(manifest_contents)\n\n for doc in generator:\n docs.append(doc)\n except (IOError, yaml.YAMLError) as ex:\n _LOGGER.error(ex)\n return\n\n if len(docs) < 2:\n _LOGGER.error(\"YAML file needs to contain 2 docs\")\n return\n\n self.settings.update(docs[0])\n self.manifest = docs[1]\n","sub_path":"treadmill/spawn/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"427090010","text":"\"\"\"\nAnalyze MCMC samples\nShiwei Lan @ U of Warwick, 2016\n\"\"\"\n\nimport os,pickle\nimport numpy as np\n# import matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport sys\nsys.path.append( \"../\" )\nfrom util.bayesianStats import effectiveSampleSize as ess\nfrom joblib import Parallel, delayed\n\nclass ana_samp(object):\n def __init__(self,algs,dir_name='',ext='.pckl',save_txt=True,PLOT=False,save_fig=False):\n self.algs=algs\n self.num_algs=len(algs)\n # locate the folder\n cwd=os.getcwd()\n self.savepath=os.path.join(cwd,dir_name)\n # scan files\n self.fnames=[f for f in os.listdir(self.savepath) if f.endswith(ext)]\n # some settings\n self.save_txt=save_txt\n self.PLOT=PLOT\n self.save_fig=save_fig\n\n def cal_ESS(self,samp):\n num_samp,dim=np.shape(samp)\n if dim==1:\n ESS=ess(samp)\n else:\n ESS=Parallel(n_jobs=4)(map(delayed(ess), np.transpose(samp)))\n return num_samp,ESS\n\n def plot_samp(self,samp,loglik,alg_no):\n coord=np.int_(samp[0,]); samp=samp[1:,]\n num_samp,dim=np.shape(samp)\n idx=np.floor(np.linspace(0,num_samp-1,np.min([1e4,num_samp]))).astype(int)\n# col=np.sort(np.random.choice(dim,np.min([4,dim]),False))\n# col=np.array([1,2,np.floor(dim/2),dim],dtype=np.int)-1\n col=np.arange(np.min([6,dim]),dtype=np.int)\n# mat4plot=samp[idx,]; mat4plot=mat4plot[:,col]; # samp[idx,col] doesn't work, seems very stupid~~\n mat4plot=samp[np.ix_(idx,col)]\n # figure 1: plot selected samples\n fig,axes = plt.subplots(nrows=1,ncols=2,num=alg_no*2,figsize=(10,6))\n [axes[0].plot(idx,samp[idx,d]) for d in col]\n axes[0].set_title('selected samples')\n axes[1].plot(loglik)\n axes[1].set_title('log-likelihood')\n if self.save_fig:\n fig.savefig(os.path.join(self.savepath,self.algs[alg_no]+'_traceplot.png'),dpi=fig.dpi)\n else:\n plt.show()\n # figure 2: pairwise distribution density contour\n from scipy import stats\n def corrfunc(x, y, **kws):\n r, _ = stats.pearsonr(x, y)\n ax = plt.gca()\n ax.annotate(\"r = {:.2f}\".format(r),\n xy=(.1, .9), xycoords=ax.transAxes)\n\n# fig = plt.figure(num=alg_no+self.num_algs,figsize=(8,8))\n df4plot = pd.DataFrame(mat4plot,columns=[r'$\\theta_{%d}$' % k for k in col])\n# pd.scatter_matrix(df4plot)\n# plt.figure(alg_no+self.num_algs)\n g = sns.PairGrid(df4plot)\n g.map_upper(plt.scatter)\n g.map_lower(sns.kdeplot, cmap=\"Blues_d\")\n g.map_diag(sns.kdeplot, lw=3, legend=False)\n g.map_lower(corrfunc)\n# if matplotlib.get_backend().lower() in ['agg', 'macosx']:\n# fig.set_tight_layout(True)\n# else:\n# fig.tight_layout()\n if self.save_fig:\n g.savefig(os.path.join(self.savepath,self.algs[alg_no]+'_distribution.png'))\n else:\n plt.show()\n\n def analyze(self):\n self.stepsize=np.zeros(self.num_algs)\n self.acptrate=np.zeros(self.num_algs)\n self.spiter=np.zeros(self.num_algs)\n self.ESS=np.zeros((self.num_algs,4))\n self.minESS_s=np.zeros(self.num_algs)\n self.spdup=np.zeros(self.num_algs)\n self.PDEsolns=np.zeros(self.num_algs)\n \n # calculate ESS of samples stored in h5 file separately\n ESS_fname=os.path.join(self.savepath,'sumry_ESS.txt')\n if not os.path.isfile(ESS_fname):\n import subprocess\n subprocess.call('./get_ESS.sh')\n else:\n# sumry_ESS=np.array(np.loadtxt(ESS_fname,dtype={'names':('method','ESS_min','ESS_med','ESS_max'),'formats':('|S10',np.float,np.float,np.float)},delimiter=','))[None,]\n sumry_ESS=np.array(np.genfromtxt(ESS_fname,dtype=np.dtype([('method','U11'),('ESS_min','\", methods=['GET', 'POST', 'DELETE', 'PUT'])\ndef users_actions(USER_ID):\n if request.method == 'GET':\n try:\n # Connect to DB\n conn, cursor = connect()\n\n cursor.execute(\"SELECT * FROM 42Oh3xFfiH.users_dateTime WHERE user_id = %s\", args=USER_ID)\n for row in cursor:\n name = row[1]\n\n\n disconnect(conn, cursor)\n\n return {\"status\": \"ok\", \"user_name\": name}, 200\n\n except Exception as err:\n return {\"status\": \"error\", \"reason\": \"no such id\"}, 500\n\n elif request.method == 'POST': # Check if the method given os POST\n try:\n # Prepared\n sql = \"INSERT INTO 42Oh3xFfiH.users_dateTime (user_id, user_name, creation_date) VALUES (%s, %s, %s)\"\n\n # Connect to DB\n conn, cursor = connect()\n\n data = request.json # Get data from json payload\n date = datetime.now() # Get current date and time for creation date field in users table\n cursor.execute(sql, args=(\n USER_ID, data.get(\"user_name\"), date.strftime(\"%Y-%m-%d %H:%M:%S\"))) # Execute the query\n\n # Disconnect from DB\n disconnect(conn, cursor)\n\n # If user generation succeeded\n return {\"status\": \"ok\", \"user_added\": data.get(\"user_name\")}, 200\n\n except Exception as err:\n return {\"status\": \"error\", \"reason\": \"id already exist\"}, 500\n\n elif request.method == 'PUT':\n try:\n # Connect to DB\n conn, cursor = connect()\n\n data = request.json\n cursor.execute(\"UPDATE 42Oh3xFfiH.users_dateTime SET user_name = %s WHERE user_id = %s\",\n args=(data.get(\"user_name\"), USER_ID))\n\n\n disconnect(conn, cursor)\n\n # Return json of success\n return {\"status\": \"ok\", \"user_updated\": data.get(\"user_name\")}, 200\n\n except Exception as err: # If error occurred\n return {\"status\": \"error\", \"reason\": \"no such id\"}, 500\n\n elif request.method == 'DELETE':\n try:\n # Connect to DB\n conn, cursor = connect()\n\n cursor.execute(\"DELETE from 42Oh3xFfiH.users_dateTime WHERE user_id = %s\", args=(USER_ID))\n\n # Disconnect from DB\n disconnect(conn, cursor)\n\n # Return json of success\n return {\"status\": \"ok\", \"user_deleted\": USER_ID}, 200\n\n except Exception as err: # If error occurred\n return {\"status\": \"error\", \"reason\": \"no such id\"}, 500\n\n\napp.run(host='127.0.0.1', debug=True, port=5000)\n","sub_path":"rest_app.py","file_name":"rest_app.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229437112","text":"import cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2gray\nfrom skimage.filters import gaussian\nfrom skimage.segmentation import active_contour\nfrom scipy import ndimage\nfrom scipy.misc import imsave\n\n# gets coordinate centroid array and contours\n\nframe_count = 136\ncoordinate_array = np.load('coordinate_array.npy')\nprint(coordinate_array[0])\n# ADD LOOPING\n# while(1):\ncurrent = coordinate_array[frame_count - 136]\nimg = ndimage.imread(\"mask_frames/fgmask\" + str(frame_count) + \".png\", mode = 'RGB')\nimg = rgb2gray(img)\n\n# circle for wings\ns = np.linspace(0, 2 * np.pi, 400)\nx = current[0] + 200 * np.cos(s)\ny = current[1] + 200 * np.sin(s)\ninit_circle = np.array([x, y]).T\nsnake = active_contour(gaussian(img, 2), init_circle, alpha=0.001, beta=0, gamma=0.001)\n\n# circle for tails\ns_tail = np.linspace(0, 2 * np.pi, 400)\nx_tail = 40 * np.cos(s_tail)\ny_tail = 120 * np.sin(s_tail)\nnew_x_tail = current[0] -50 + x_tail * np.cos(7 * np.pi / 4) + y_tail * np.sin(7 * np.pi / 4)\nnew_y_tail = current[1] + 100 - x_tail * np.sin(7 * np.pi / 4) + y_tail * np.cos(7 * np.pi / 4)\ninit_tail = np.array([new_x_tail, new_y_tail]).T\nsnake1 = active_contour(gaussian(img, 1), init_tail, alpha=0.001, beta=0, gamma=0.001)\n\nfig, ax = plt.subplots(figsize=(7, 7))\nax.imshow(img, cmap=plt.cm.gray)\nax.plot(init_circle[:, 0], init_circle[:, 1], '--r', lw=2)\nax.plot(init_tail[:, 0], init_tail[:, 1], '--g', lw=2)\nax.plot(snake[:, 0], snake[:, 1], '-r', lw=2)\nax.plot(snake1[:, 0], snake1[:, 1], '-g', lw=2)\nax.set_xticks([]), ax.set_yticks([])\nax.axis([0, img.shape[1], img.shape[0], 0])\nplt.show()\n# fig.savefig(\"contoured_results/contoured_result\" + str(frame_count) + \".png\")\n\nframe_count = frame_count + 1","sub_path":"imageContouring.py","file_name":"imageContouring.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"211272234","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom aria import workflow\nfrom aria.orchestrator import operation\nfrom aria.orchestrator.workflows.api.task import OperationTask\nfrom aria.orchestrator.runner import Runner\n\nfrom tests import mock\n\nimport pytest\n\n\nOPERATION_RESULTS = {}\n\n\n@operation\ndef mock_create_operation(ctx, key, value, **kwargs): # pylint: disable=unused-argument\n OPERATION_RESULTS[key] = value\n\n\n@pytest.fixture(autouse=True)\ndef cleanup():\n OPERATION_RESULTS.clear()\n\n\ndef test_runner_no_tasks():\n @workflow\n def workflow_fn(ctx, graph): # pylint: disable=unused-argument\n pass\n\n _test_runner(workflow_fn)\n\n\ndef test_runner_tasks():\n @workflow\n def workflow_fn(ctx, graph):\n for node_instance in ctx.model.node_instance.iter():\n graph.add_tasks(\n OperationTask.node_instance(instance=node_instance,\n name='tosca.interfaces.node.lifecycle.Standard.create'))\n\n _test_runner(workflow_fn)\n\n assert OPERATION_RESULTS.get('create') is True\n\n\ndef _initialize_model_storage_fn(model_storage):\n mock.topology.create_simple_topology_single_node(\n model_storage,\n 1,\n '%s.%s' % (__name__, mock_create_operation.__name__)\n )\n\n\ndef _test_runner(workflow_fn):\n runner = Runner(workflow_name='runner workflow',\n workflow_fn=workflow_fn,\n inputs={},\n initialize_model_storage_fn=_initialize_model_storage_fn,\n deployment_id=1)\n runner.run()\n","sub_path":"tests/orchestrator/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"260675896","text":"# (C) Datadog, Inc. 2018-2019\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nfrom __future__ import absolute_import\n\nimport json\nimport os\nfrom base64 import urlsafe_b64encode\n\nimport pytest\n\nfrom .._env import (\n AGENT_COLLECTOR_SEPARATOR,\n E2E_FIXTURE_NAME,\n E2E_PARENT_PYTHON,\n TESTING_PLUGIN,\n e2e_active,\n e2e_testing,\n format_config,\n get_env_vars,\n replay_check_run,\n serialize_data,\n)\n\n__aggregator = None\n__datadog_agent = None\n\n\n@pytest.fixture\ndef aggregator():\n \"\"\"This fixture returns a mocked Agent aggregator with state cleared.\"\"\"\n global __aggregator\n\n # Since this plugin is loaded before pytest-cov, we need to import lazily so coverage\n # can see imports, class/function definitions, etc. of the base package.\n if __aggregator is None:\n try:\n from datadog_checks.base.stubs import aggregator as __aggregator\n except ImportError:\n raise ImportError('datadog-checks-base is not installed!')\n\n __aggregator.reset()\n return __aggregator\n\n\n@pytest.fixture\ndef datadog_agent():\n global __datadog_agent\n\n if __datadog_agent is None:\n try:\n from datadog_checks.base.stubs import datadog_agent as __datadog_agent\n except ImportError:\n raise ImportError('datadog-checks-base is not installed!')\n\n __datadog_agent.reset()\n return __datadog_agent\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef dd_environment_runner(request):\n testing_plugin = os.getenv(TESTING_PLUGIN) == 'true'\n\n # Do nothing if no e2e action is triggered and continue with tests\n if not testing_plugin and not e2e_active(): # no cov\n return\n # If e2e tests are being run it means the environment has\n # already been spun up so we prevent another invocation\n elif e2e_testing(): # no cov\n # Since the scope is `session` there should only ever be one definition\n fixture_def = request._fixturemanager._arg2fixturedefs[E2E_FIXTURE_NAME][0]\n\n # Make the underlying function a no-op\n fixture_def.func = lambda: None\n return\n\n try:\n config = request.getfixturevalue(E2E_FIXTURE_NAME)\n except Exception as e:\n # pytest doesn't export this exception class so we have to do some introspection\n if e.__class__.__name__ == 'FixtureLookupError':\n # Make it explicit for our command\n pytest.exit('NO E2E FIXTURE AVAILABLE')\n\n raise\n\n metadata = {}\n\n # Environment fixture also returned some metadata\n if isinstance(config, tuple):\n config, possible_metadata = config\n\n # Support only defining the env_type for ease-of-use\n if isinstance(possible_metadata, str):\n metadata['env_type'] = possible_metadata\n else:\n metadata.update(possible_metadata)\n\n # Default to Docker as that is the most common\n metadata.setdefault('env_type', 'docker')\n\n # Save any environment variables\n metadata.setdefault('env_vars', {})\n metadata['env_vars'].update(get_env_vars(raw=True))\n\n data = {'config': config, 'metadata': metadata}\n\n message = serialize_data(data)\n\n message = 'DDEV_E2E_START_MESSAGE {} DDEV_E2E_END_MESSAGE'.format(message)\n\n if testing_plugin:\n return message\n else: # no cov\n # Exit testing and pass data back up to command\n pytest.exit(message)\n\n\n@pytest.fixture\ndef dd_agent_check(request, aggregator):\n if not e2e_testing():\n pytest.skip('Not running E2E tests')\n\n # Lazily import to reduce plugin load times for everyone\n from datadog_checks.dev import TempDir, run_command\n\n def run_check(config=None, **kwargs):\n root = os.path.dirname(request.module.__file__)\n while True:\n if os.path.isfile(os.path.join(root, 'setup.py')):\n check = os.path.basename(root)\n break\n\n new_root = os.path.dirname(root)\n if new_root == root:\n raise OSError('No Datadog Agent check found')\n\n root = new_root\n\n python_path = os.environ[E2E_PARENT_PYTHON]\n env = os.environ['TOX_ENV_NAME']\n\n check_command = [python_path, '-m', 'datadog_checks.dev', 'env', 'check', check, env, '--json']\n\n if config:\n config = format_config(config)\n config_file = os.path.join(temp_dir, '{}-{}-{}.json'.format(check, env, urlsafe_b64encode(os.urandom(6))))\n\n with open(config_file, 'wb') as f:\n output = json.dumps(config).encode('utf-8')\n f.write(output)\n check_command.extend(['--config', config_file])\n\n for key, value in kwargs.items():\n if value is not False:\n check_command.append('--{}'.format(key.replace('_', '-')))\n\n if value is not True:\n check_command.append(str(value))\n\n result = run_command(check_command, capture=True)\n if AGENT_COLLECTOR_SEPARATOR not in result.stdout:\n raise ValueError(\n '{}{}\\nCould find `{}` in the output'.format(result.stdout, result.stderr, AGENT_COLLECTOR_SEPARATOR)\n )\n\n _, _, collector_output = result.stdout.partition(AGENT_COLLECTOR_SEPARATOR)\n collector_output = collector_output.strip()\n if not collector_output.endswith(']'):\n # JMX needs some additional cleanup\n collector_output = collector_output[: collector_output.rfind(']') + 1]\n collector = json.loads(collector_output)\n\n replay_check_run(collector, aggregator)\n\n return aggregator\n\n # Give an explicit name so we don't shadow other uses\n with TempDir('dd_agent_check') as temp_dir:\n yield run_check\n\n\ndef pytest_configure(config):\n # pytest will emit warnings if these aren't registered ahead of time\n config.addinivalue_line('markers', 'unit: marker for unit tests')\n config.addinivalue_line('markers', 'integration: marker for integration tests')\n config.addinivalue_line('markers', 'e2e: marker for end-to-end Datadog Agent tests')\n","sub_path":"datadog_checks_dev/datadog_checks/dev/plugin/pytest.py","file_name":"pytest.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"524892376","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nresult = 0\n\nprint ('Добро пожаловать в Калькулятор')\n\nfirst = True\nwhile True:\n if first:\n number = input('Введите первое число: ')\n number_1 = input('Введите второе число: ')\n operation = input('Выберите операцию: \\n 1. + \\n 2. - \\n 3. * \\n 4. / \\n')\n while (operation < 1) | (operation > 4):\n print('Ошибка: Недопустимая операция')\n operation = input('Выберите операцию: \\n 1. + \\n 2. - \\n 3. * \\n 4. / \\n')\n if operation == 1:\n result = number + number_1\n elif operation == 2:\n result = number - number_1\n elif operation == 3:\n result = number * number_1\n elif operation == 4:\n if number_1 == 0: print('Ошибка: Деление на 0 невозможно')\n else: result = number / number_1\n print(result)\n selection = input('Введите 0 для завершения программы \\nВведите 1 для продолжения расчетов: ')\n if selection == 0:\n break\n elif selection == 1:\n first = False\n number = result","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"585426470","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('article_title', models.CharField(max_length=200, verbose_name=b'\\xd0\\x9d\\xd0\\xb0\\xd0\\xb7\\xd0\\xb2\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5')),\n ('article_text', models.TextField(verbose_name=b'\\xd0\\x9e\\xd0\\xbf\\xd0\\xb8\\xd1\\x81\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5 \\xd0\\xbf\\xd1\\x80\\xd0\\xbe\\xd0\\xb4\\xd1\\x83\\xd0\\xba\\xd1\\x82\\xd0\\xb0')),\n ('article_date', models.DateTimeField(verbose_name=b'\\xd0\\x94\\xd0\\xb0\\xd1\\x82\\xd0\\xb0 \\xd0\\xb4\\xd0\\xbe\\xd0\\xb1\\xd0\\xb0\\xd0\\xb2\\xd0\\xbb\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd1\\x8f')),\n ('article_likes', models.IntegerField(default=0)),\n ('article_image', models.ImageField(default=b'images/None/no_image.png', upload_to=b'images/', verbose_name=b'\\xd0\\x98\\xd0\\xb7\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd0\\xb0\\xd0\\xb6\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5')),\n ],\n options={\n 'db_table': 'Articles',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Comments',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comments_text', models.TextField(verbose_name=b'\\xd0\\xa2\\xd0\\xb5\\xd0\\xba\\xd1\\x81\\xd1\\x82 \\xd0\\xba\\xd0\\xbe\\xd0\\xbc\\xd0\\xbc\\xd0\\xb5\\xd0\\xbd\\xd1\\x82\\xd0\\xb0:')),\n ('comments_article', models.ForeignKey(to='article.Article')),\n ],\n options={\n 'db_table': 'comments',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FirmWare',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fw_date', models.DateTimeField(verbose_name=b'\\xd0\\x94\\xd0\\xb0\\xd1\\x82\\xd0\\xb0')),\n ('fw_description', models.CharField(max_length=200, verbose_name=b'\\xd0\\x9e\\xd0\\xbf\\xd0\\xb8\\xd1\\x81\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5')),\n ('fw_files', models.FileField(default=b'no_files', upload_to=b'files/')),\n ('fw', models.ForeignKey(to='article.Article')),\n ],\n options={\n 'db_table': 'firmwares',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"djangoenv/bin/firstapp/article/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"650022409","text":"# from hello import app\nimport os\nimport main\nimport unittest\nimport tempfile\nimport json\n\n\nclass BuckybanTestCase(unittest.TestCase):\n def setUp(self):\n self.db_fd, main.app.config['DATABASE'] = tempfile.mkstemp()\n main.app.config['TESTING'] = True\n self.app = main.app.test_client()\n main.init_db()\n\n def tearDown(self):\n os.close(self.db_fd)\n os.unlink(main.app.config['DATABASE'])\n\n def test_post_get_scores(self):\n resp = self.app.post(\n '/api/v1/scores/243',\n method='POST',\n data=dict(\n moves=40,\n duration=2000,\n username='mario'\n ),\n follow_redirects=False\n )\n data = json.loads(resp.response.next())\n self.assertEqual(data, {u'result': True})\n \n resp = self.app.post(\n '/api/v1/scores/243',\n method='POST',\n data=dict(\n moves=40,\n duration=3000,\n username='wario'\n ),\n follow_redirects=False\n )\n data = json.loads(resp.response.next())\n self.assertEqual(data, {u'result': True})\n \n resp = self.app.post(\n '/api/v1/scores/243',\n method='POST',\n data=dict(\n moves=20,\n duration=6000,\n username='luigi'\n ),\n follow_redirects=True\n )\n data = json.loads(resp.response.next())\n self.assertEqual(data, {u'result': True})\n \n scores = self.app.get('/api/v1/scores/243')\n data = json.loads(scores.response.next())\n \n self.assertEqual(data[0]['username'], 'luigi')\n self.assertEqual(data[1]['username'], 'mario')\n self.assertEqual(data[2]['username'], 'wario')\n \n scores = self.app.get('/api/v1/scores/111')\n data = json.loads(scores.response.next())\n self.assertEqual(len(data), 0)\n\n def test_post_scores_fail(self):\n resp = self.app.post(\n '/api/v1/scores/243',\n method='POST',\n data=dict(\n moves=40,\n time=2000,\n ),\n follow_redirects=True\n )\n self.assertEqual(resp.status_code, 400)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/buckyban/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"225803414","text":"'''\nCreated on Jul 30, 2019\n\n@author: Daniel, Marcel\n'''\n\nimport SQLStatements\nimport mysql.connector as mariadb\n\nclass CreateDatabaseEntries:\n '''\n Creating databaseentries for kicker database\n needs cursor to maria-db connection\n '''\n global cursor\n global kickerDP \n kickerDP = \"https://www.kicker.de\"\n\n def __init__(self, theCursor):\n '''\n @theCursor mariadb.connect(...).cursor()\n '''\n global cursor\n cursor = theCursor\n for SQLStatements.createDB in cursor.execute(SQLStatements.createDB, multi=True):\n pass\n\n\n def createMatch (self, season, matchDayInt, home, homeGoals, away, awayGoals, score, analysis):\n \"\"\"\n create match statement for given attributes and execute\n \"\"\"\n cmStatement = SQLStatements.createMatch.format(season, matchDayInt, home, away, score, homeGoals, awayGoals, analysis)\n try:\n cursor.execute(cmStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n \n\n def createMatchFromAnalysis(self, matchLink):\n print(\"something\")\n\n def createClubs(self, clubs):\n \"\"\"\n @clubs dictionary with \n keys() = clubName\n values() = clubPage \n \"\"\"\n clubArray = []\n for clubName in clubs:\n clubArray.append(clubName)\n for clubName in clubArray:\n clubPage = clubs[clubName]\n try:\n dbStatement = SQLStatements.createClub.format(clubName, clubPage)\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n\n def createPlayer(self, playerID, firstName, lastName, birthday, number):\n dbStatement = SQLStatements.createPlayer.format(playerID, firstName, lastName, birthday, number)\n try:\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n \n def createPlayedMatchStatement(self, season, statisticsTable, playerID, \n playerDictionary):\n \n clubs = []\n for key in playerDictionary:\n clubs.append(key)\n\n clubs.remove(\"link\")\n \n i = 13\n currentClub = \"\"\n while i < len(statisticsTable):\n gameArray = []\n playedAt = \"\"\n #different competition\n if len(statisticsTable[i].select(\"a\")) == 0:\n break\n #not interested in games on bench\n elif statisticsTable[i+1].getText().strip() == \"ohne Einsatz im Kader\":\n i = i + 3\n else:\n gameArray = statisticsTable[i].getText().strip().replace(\"\\n\\n\", \"\").split(\"\\n\")\n \"\"\"\n statisticsTable\n 0 gameArray [0 spieltag - dd.mm.yyyy, 1hT, 2htShort, 3 homegoals, 4 :, 5awaygoals, 678 halftime, 9 awayTeam, 10 awayTeamshort]\n 1 grade\n 2 goals (including penalties)\n 3 pen attempts/goals\n 4 assists\n 5 scorerpoints\n 6 subbedIn\n 7 subbedOut\n 8 cardY\n 9 cardYR\n 10 cardR\n 11 placeholder\n \"\"\"\n homeTeam = gameArray[1]\n awayTeam = gameArray[9]\n playedFor = \"\"\n playedAgainst = \"\"\n\n #need to find out which team the player played for\n if currentClub == \"\":\n if len(clubs) == 1:\n currentClub = clubs[0]\n #played for 2 teams in a season\n elif len(clubs) == 2:\n #both teams played each other on first match of player\n if (homeTeam == clubs[0] and awayTeam == clubs[1]) or (homeTeam == \n clubs[1] and awayTeam == clubs[0]):\n homeTeam2 = gameArray[12]\n awayTeam2 = gameArray[20]\n if homeTeam2 == clubs[0] or awayTeam2 == clubs[0]:\n currentClub = clubs[0]\n elif homeTeam2 == clubs[1] or awayTeam2 == clubs[1]:\n currentClub = clubs[1]\n else:\n if homeTeam == clubs[0]:\n currentClub = homeTeam\n if awayTeam == clubs[0]:\n currentClub = awayTeam\n if homeTeam == clubs[1]:\n currentClub = homeTeam\n if awayTeam == clubs[1]:\n currentClub = awayTeam\n #played for home or away team\n if homeTeam == currentClub:\n playedFor = currentClub\n playedAgainst = awayTeam\n playedAt = \"H\"\n if awayTeam == currentClub:\n playedFor = currentClub\n playedAgainst = homeTeam\n playedAt = \"A\"\n \n playerDictionary[currentClub] -= 1\n if playerDictionary[currentClub] == 0:\n clubs.remove(currentClub)\n currentClub = \"\"\n\n grade = formatGrade(statisticsTable[i+1])\n goals = replaceMinus(statisticsTable[i+2])\n penAttempts = 0\n penScored = 0\n if statisticsTable[i+3].getText().strip() != \"-\":\n penAttempts = statisticsTable[i+3].getText().strip().split(\"/\")[0]\n penScored = statisticsTable[i+3].getText().strip().split(\"/\")[1]\n assists = replaceMinus(statisticsTable[i+4])\n subbedIn = replaceMinus(statisticsTable[i+6])\n subbedOut = replaceMinus(statisticsTable[i+7])\n cardY = replaceMinus(statisticsTable[i+8])\n cardYR = replaceMinus(statisticsTable[i+9])\n cardR = replaceMinus(statisticsTable[i+10])\n\n try:\n dbStatement = SQLStatements.createPlayedMatch.format(\n playerID, season, playedFor, playedAgainst, playedAt,\n grade, goals, penAttempts, penScored, assists,\n subbedIn, subbedOut, cardR, cardYR, cardY)\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n gameArray = []\n i += 12\n\n def createPlayedMatchFromSchema(self, playerID, season, playedFor, playedAgainst, playedAt,\n grade, goals, penAttempts, penScored, assists,\n subbedIn, subbedOut, cardR, cardYR, cardY):\n try:\n grade = formatGrade(grade)\n dbStatement = SQLStatements.createPlayedMatch.format(\n playerID, season, playedFor, playedAgainst, playedAt,\n grade, goals, penAttempts, penScored, assists,\n subbedIn, subbedOut, cardR, cardYR, cardY)\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n\n\n def createReferee(self, refPage, refFirstName, refLastName):\n try:\n dbStatement = SQLStatements.createReferee.format(refPage, refFirstName, refLastName)\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n \n def createOfficiatedMatch(self, season, matchday, refPage, homeTeam, awayTeam, \n yellowCards, yellowRedCards, redCards, penalties):\n \n yellowCards = replaceMinus(yellowCards)\n yellowRedCards = replaceMinus(yellowRedCards)\n redCards = replaceMinus(redCards)\n penalties = replaceMinus(penalties)\n try:\n dbStatement = SQLStatements.createOfficiatedMatch.format(season, matchday, refPage,\n homeTeam, awayTeam, yellowCards, yellowRedCards, redCards, penalties)\n cursor.execute(dbStatement)\n except mariadb.Error as error:\n if error.args[0] != 1062:\n print(\"Error: {}\".format(error))\n\n def isPlayerInDB(self, playerID):\n \"\"\"\n @return YES/NO if player in db\n \"\"\"\n dbStatement = SQLStatements.isPlayerInDB.format(playerID)\n cursor.execute(dbStatement)\n for response in cursor:\n return response[0]\n\n def getMatchdays(self, season):\n dbStatement = SQLStatements.getMatchdays.format(season)\n cursor.execute(dbStatement)\n matchdays = {}\n for response in cursor:\n matchdays.update({response[0]:response[1]})\n return matchdays\n\n def getSeasons(self):\n \"\"\"\n @return list of seasons\n \"\"\"\n seasons = []\n dbStatement = SQLStatements.getSeasons\n cursor.execute(dbStatement)\n for response in cursor:\n seasons.append(response[0])\n return seasons\n\ndef replaceMinus(word):\n \"\"\"\n formatting numbers in games played table\n @return String of number\n \"\"\"\n if type(word) != str:\n word = word.getText().strip()\n if word == \"-\" or word == \"\":\n return 0\n else:\n return word.replace(\".\", \"\")\n\ndef formatGrade(grade):\n \"\"\"\n formatting grade of player\n @return \n NULL if played but no grade (<30min)\n or String of 3.5\n \"\"\"\n if type(grade) != str:\n grade = grade.getText().strip()\n if grade == \"-\" or grade == \"\":\n return \"NULL\"\n else:\n return grade.replace(\",\", \".\")\n \n","sub_path":"CreateDatabaseEntries.py","file_name":"CreateDatabaseEntries.py","file_ext":"py","file_size_in_byte":10216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"601498684","text":"from RelFinder import RelFinder\nfrom streamer.StreamCleaner import StreamCleaner\nimport time\nimport sys, signal\nimport argparse\n\n\n\ndef signal_handler(signal, frame):\n print(\"\\nEnding Analysis\")\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nif __name__ == \"__main__\":\n \n relf = RelFinder()\n rownum=\"{:<5}{:<20}{:<20}{:<15}{:<10}\"\n previous_results=()\n i=0\n\n parser = argparse.ArgumentParser(description='Program to list top relationships with a token') \n parser.add_argument('token', help='Token for similary estimation')\n parser.add_argument('-t','--threshold',default=100,type=int,help='Number of tokens to consider during IDF filtering')\n parser.add_argument('-s','--sleep', default=5,type=int,help='Number of seconds to sleep before doing next analysis')\n parser.add_argument('-nt','--limit',default=10,type=int, help='Top number of tokens to display')\n parser.add_argument('-l','--loops',default=5,type=int, help='Number loops to do analysis')\n\n args = vars(parser.parse_args())\n token = args ['token']\n threshold = args ['threshold']\n sleep = args['sleep']\n limit=args['limit']\n loops=args['loops']\n\n try:\n while i 0:\n if i==0:\n previous_results=results.copy() \n for j in range(limit):\n if results[j][1]==previous_results[j][1]:\n change=\"No change\"\n else:\n found=False\n for k in range(limit):\n if results[j][1]==previous_results[k][1]:\n change=\"{:+} before {} now {}\".format(previous_results[k][3]-results[j][3],previous_results[k][3]+1,results[j][3]+1)\n found=True\n if not found: \n change=\"New Token\"\n print (rownum.format(results[j][3]+1,results[j][0],results[j][1],round(results[j][2],7),change))\n print(\"-\"*80)\n previous_results=results.copy() \n time.sleep(sleep)\n i+=1\n else:\n print(\"No data found for that token\")\n exit (0)\n except KeyboardInterrupt:\n print('\\nProgram ended!')\n","sub_path":"TRelationsToken.py","file_name":"TRelationsToken.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"30524772","text":"# Judge does not like the solution, but by all intensive purposes it seems correct\nnumberOfLines = int(input())\nname = []\nage = []\nfor i in range(numberOfLines):\n text = input()\n name += [i[1:len(i) - 1] for i in text.split(' ') if i.startswith('@') and i.endswith('|')]\n age += [int(i[1:len(i) - 1]) for i in text.split(' ') if i.startswith('#') and i.endswith('*')]\n\nfor k, v in zip(name, age):\n print(f\"{k} is {v} years old.\")\n","sub_path":"Excercise Text Processing/More Excercises/1_Extract_Person_Information.py","file_name":"1_Extract_Person_Information.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493000405","text":"#These scripts are refer to \"https://github.com/carpedm20/DCGAN-tensorflow\"\nimport os\nimport scipy.misc\nimport numpy as np\nfrom model import MidiNet\nfrom utils import pp, to_json, generation_test\nfrom npy2midi_converter import write_piano_rolls_to_midi, set_piano_roll_to_instrument\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch\", 25, \"Epoch to train [20]\")\nflags.DEFINE_float(\"learning_rate\", 0.002, \"Learning rate of for adam [0.0002]\")\nflags.DEFINE_float(\"beta1\", 0.5, \"Momentum term of adam [0.5]\")\nflags.DEFINE_integer(\"batch_size\", 72, \"The size of batch [72]\")\nflags.DEFINE_integer(\"genre\", 1, \"The size of batch [72]\")\nflags.DEFINE_integer(\"num_program\", 0, \"The size of batch [72]\")\nflags.DEFINE_integer(\"output_w\", 16, \"The size of the output segs to produce [16]\")\nflags.DEFINE_integer(\"output_h\", 128, \"The size of the output note to produce [128]\")\nflags.DEFINE_integer(\"c_dim\", 1, \"Number of Midi track. [1]\")\nflags.DEFINE_integer(\"num_batches\", 200, \"Number of batches conctenated in a midi file. [1]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"/media/ashar/Data/lmd_genre/lpd_5/midinet_ckpts_relu_samplenew_lr/\",\n \"Directory for [checkpoint]\")\n\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"Directory name to save the image samples [samples]\")\nflags.DEFINE_string(\"dataset\", \"MidiNet_v1\", \"The name of dataset \")\nflags.DEFINE_boolean(\"is_train\", True, \"True for training, False for testing [False]\")\nflags.DEFINE_boolean(\"is_crop\", False, \"True for training, False for testing [False]\")\nflags.DEFINE_boolean(\"generation_test\", False, \"True for generation_test, False for nothing [False]\")\nflags.DEFINE_string(\"gen_dir\", \"gen\", \"Directory name to save the generate samples [samples]\")\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n pp.pprint(flags.FLAGS.__flags)\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n if not os.path.exists(FLAGS.sample_dir):\n os.makedirs(FLAGS.sample_dir)\n if not os.path.exists(FLAGS.gen_dir):\n os.makedirs(FLAGS.gen_dir)\n\n with tf.Session() as sess:\n if FLAGS.dataset == 'MidiNet_v1':\n model = MidiNet(sess, batch_size=FLAGS.batch_size, y_dim=4, output_w=FLAGS.output_w,\n output_h=FLAGS.output_h, c_dim=FLAGS.c_dim, dataset_name=FLAGS.dataset,\n is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir, sample_dir=FLAGS.sample_dir,\n gen_dir=FLAGS.gen_dir)\n\n model.load(FLAGS.checkpoint_dir)\n\n\n #For a song, we need to provide 100 batches of size 72, which would be concatenated to form a single song vector\n song_array = np.array([])\n genre_vec = np.zeros((FLAGS.batch_size, 4))\n\n # One hot for the genre\n genre_vec[:, 0:4] = [0, 0, 1, 0]\n\n for i in range(FLAGS.num_batches):\n batch_sample = generation_test(sess, model, FLAGS, genre_vec, option=0)\n song_array = np.concatenate([song_array, batch_sample], axis=0) if song_array.size else batch_sample\n\n song_array[np.isnan(song_array)] = 0\n song_array[song_array >= 0.8] = 1\n song_array[song_array < 0.7] = 0\n\n piano_roll = np.expand_dims(song_array.squeeze(), axis=0)\n output_path = os.path.join(FLAGS.gen_dir, str(FLAGS.genre))\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n write_piano_rolls_to_midi(piano_roll, program_nums=[FLAGS.num_program], is_drum=[False],\n filename=output_path + '/testrelu.mid',\n velocity=70, tempo=100.0, beat_resolution=24)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"v1/song_generation.py","file_name":"song_generation.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"246963356","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\n\n\ndef index(request):\n greeting = 'Привет, мир!'\n get_params = request.GET.items()\n post_params = request.POST.items()\n return render(request, 'index.html', {'greeting': greeting,\n 'get_params':get_params,\n 'post_params':post_params})\n\n\n\n\n","sub_path":"ask_dyudina/ask_dyudina/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"320794958","text":"import coin\r\n\r\n# The main function.\r\n\r\ndef main():\r\n # Create an object from the Coin class.\r\n my_coin = coin.Coin()\r\n\r\n # Display the side of the coin that is facing up.\r\n print('This side is up: ' , my_coin.get_sideup())\r\n\r\n # Toss the coin.\r\n print('I am tossing the coin...')\r\n my_coin.toss()\r\n\r\n # CHEATER, changing the side up attribute from the outside\r\n # The coin will always be Heads\r\n # This does not work anymore\r\n my_coin.__sideup = 'Heads'\r\n\r\n print('I\\'m going to toss the coin ten times: ')\r\n \r\n for count in range(10):\r\n my_coin.toss()\r\n print(my_coin.get_sideup())\r\n\r\n# Call the main function\r\nmain()\r\n\r\n","sub_path":"Chapter 11 objects programs/coin_demo3_433.py","file_name":"coin_demo3_433.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"235333890","text":"def _merge_list(l1, l2):\n i = j = 0; l = []\n while True:\n if l1[i] < l2[j]:\n l += [l1[i]]\n i += 1\n else:\n l += [l2[j]]\n j += 1\n \n if i == len(l1):\n l += l2[j:]\n break\n elif j == len(l2):\n l += l1[i:]\n break\n return l\n\ndef _merge(A):\n \"\"\"\n Params:\n A: {list[list]}\n \"\"\"\n n = len(A)\n T = []\n if n % 2:\n T += [A.pop(-1)]\n for i in range(n//2):\n T += [_merge_list(A[i*2], A[i*2+1])]\n return T\n\ndef msort(A, inverse=False):\n \"\"\"\n Params:\n A: {list}\n \"\"\"\n T = [[i] for i in A]\n\n while len(T) > 1:\n T = _merge(T)\n\n T = T[0]\n T = T[::-1] if inverse else T\n\n return T\n\nif __name__ == \"__main__\":\n A = list(range(4, 25))[::-1]\n print(A)\n A = msort(A)\n print(A)","sub_path":"Introduction to Algorithm/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"209637244","text":"\"\"\"\nAutomate the Boring Stuff with Python\nChapter 8 Reading and Writing Files - Project: Multiclipboard implementation\nDescription:\n - The command line argument for the keyword is checked.\n - If the argument is save, then the clipboard contents are saved to the keyword.\n - If the argument is list, then all the keywords are copied to the clipboard.\n - Otherwise, the text for the keyword is copied to the keyboard.\nModifications:\n - It has a delete command line argument that will delete a keyword from the shelf.\n - Add a delete command line argument that will delete all keywords.\n\"\"\"\n\nimport shelve, pyperclip, sys\n\nmcbShelf = shelve.open('mcb')\n\n# Save clipboard content.\nif len(sys.argv) == 3 and sys.argv[1].lower() == 'save':\n mcbShelf[sys.argv[2]] = pyperclip.paste()\n\nelif len(sys.argv) == 3 and sys.argv[1].lower() == 'delete':\n del mcbShelf[sys.argv[2]]\nelif len(sys.argv) == 2:\n # List keywords and load content.\n if sys.argv[1].lower() == 'list':\n pyperclip.copy(str(list(mcbShelf.keys())))\n if sys.argv[1].lower() == 'delete':\n mcbShelf.clear()\n elif sys.argv[1] in mcbShelf:\n pyperclip.copy(mcbShelf[sys.argv[1]])\n\nmcbShelf.close()","sub_path":"AutomateTheBoringStuffWithPython/Ch8_ReadingAndWritingFiles/mcb.pyw","file_name":"mcb.pyw","file_ext":"pyw","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"327135044","text":"def dv_loadingBar(count, total, task_part=None):\n \"\"\" Provides user with a loadingbar line. See following:\n 041/400 [== ] Subtask 793\n count/total [== ] 'task_part'\n Parameters\n ----------\n count : str, float or int\n Current task count. Easy to access throught 'enumerate()'\n total : str, float or int\n Maximal number of all tasks\n task_part : String | Optional\n If the task is divided in subtask and you want to keep track of\n your functions progress in detail pass your subtask in string format.\n Example\n -------\n array = np.linspace(1, 1000, 400)\n for p, i in enumerate(array):\n loadingBar(count=p, total=array.shape[0],\n task_part='Subtask')\n Returns\n -------\n stdout : Rewriteable String Output\n Generates a String Output for every of the progress steps\n \"\"\"\n if task_part is None:\n task_part = ''\n \n percent = float(count + 1) / float(total) * 100\n size = 2\n\n sys.stdout.write(\"\\r \"\n + str(int(count + 1)).rjust(3, '0')\n + \"/\" + str(int(total)).rjust(3, '0')\n + ' [' + '=' * int(percent / 10) * size\n + ' ' * (10 - int(percent / 10)) * size\n + '] %30s' % (task_part))\n if count + 1 == total:\n finish = '[done]'\n sys.stdout.write(\"\\r \"\n + str(int(count + 1)).rjust(3, '0')\n + \"/\" + str(int(total)).rjust(3, '0')\n + ' [' + '=' * int(percent / 10) * size\n + ' ' * (10 - int(percent / 10)) * size\n + '] %30s\\n' % (finish))\n\n\n return","sub_path":"dv_loadingBar.py","file_name":"dv_loadingBar.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"563007377","text":"import os\n\nclass Config():\n def __init__(self):\n super(Config).__init__()\n\n self.batch_size=128\n self.crop_size = 224\n self.data_name = 'charades_precomp'\n self.data_path = '/media/datasets/ld_data/Charades-STA/charades_c3d/CHARADES_C3D/'\n self.no_imgnorm = False\n self.vocab_path = './vocab/'\n self.vocab_size = 11755\n\n self.cnn_type='vgg19'\n self.margin = 0.1\n self.max_violation = False\n self.measure = 'cosine'\n self.embed_size=1024\n self.finetune=False\n self.grad_clip=2.0\n self.img_dim=4096\n self.word_dim = 300\n\n self.learning_rate=0.0001\n self.log_step=10\n self.lr_update = 15\n # self.num_epochs = 40\n self.num_layers = 1\n self.use_abs = False\n self.use_restval = False\n self.val_step = 500\n self.train_epoches = 100\n self.train_test = True\n\n self.no_val = True\n\n self.workers = 10\n self.cuda_devices = '0'\n self.logger_name='runs/test_cross_attn_charades_c3dvse7_slide2'\n self.base_path = '/media/datasets/ld_data/Weak_Supervised_Moment-20191023T033504Z-001/Weak_Supervised_Moment'\n self.resume=self.base_path + '/runs/reimplementation/'\n self.resume_developer_last = 'runs/reimplementation/'\n\n self.platform_lst = ['lab', 'developer', 'cluster']\n self.platform_option = 1\n\n\nif __name__ == \"__main__\":\n config = Config()\n print(config.__dict__)\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"496445562","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pickle\nimport csv\nimport numpy as np\nimport argparse\nimport math\nimport sys\nimport operator\nimport collections as coll\n\ndef getData(train_file):\n\twith open(train_file,\"rb\") as fp:\n\t\ttrain_data = pickle.load(fp)\n\treturn train_data\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"train_file\")\n\targs = parser.parse_args()\n\treturn args\n\ndef main():\n\targs = parse_args()\n\tprint(\"Loading data...\")\n\ttrain_data = getData(args.train_file)\n\tprint(\"Data loaded\")\n\tdata = []\n\tfor i in range(len(train_data)):\n\t\tdata += [word for word in train_data[i]]\n\ttrain_data = data\n\t# vocab = list(set(train_data))\n\tunigrams = coll.Counter(train_data)\n\tfreq = [v for k, v in unigrams.iteritems()]\n\tfreq = {}\n\tfor k, v in unigrams.iteritems():\n\t\tif(v in freq):\n\t\t\tfreq[v] += 1\n\t\telse:\n\t\t\tfreq[v] = 1\n\tsorted_x = sorted(freq.items(), key=operator.itemgetter(1), reverse=True)\n\tw = [x[0] for x in sorted_x]\n\tf = [x[1] for x in sorted_x]\n\tplt.semilogy(w,f,color='red',marker='+')\n\tplt.xlabel(\"frequency\")\n\tplt.ylabel(\"frequency of frequencies\")\n\tplt.savefig(\"./brown_freq_of_freq\")\n\t# plt.plot(freq)\n\t# plt.xlabel(\"words\")\n\t# plt.ylabel(\"frequencies\")\n\t# plt.savefig(\"./brown_freq\")\n\nif __name__ == '__main__':\n main()","sub_path":"code/brown_plot_words.py","file_name":"brown_plot_words.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"440731073","text":"from qgis.utils import iface\nfrom qgis import gui, core\n\nfrom Ferramentas_Producao.modules.qgis.mapFunctions.mapFunction import MapFunction\n\nclass SmoothLine(MapFunction):\n\n def __init__(self):\n super(SmoothLine, self).__init__()\n \n def isValidParameters(self, layer):\n if not layer:\n return (False, 'Selecione uma camada')\n if not(layer.crs().mapUnits() == core.QgsUnitTypes.DistanceMeters):\n return (False, 'A camada ativa deve ter sua unidade de distancia em metros')\n if not(layer.geometryType() == core.QgsWkbTypes.LineGeometry):\n return (False, 'A camada ativa deve ser do tipo \"LineGeometry\"')\n if not(layer.isEditable()):\n return (False, 'A camada ativa deve está no modo editável')\n if not(len(layer.selectedFeatures()) > 0):\n return (False, 'Selecione no mínimo uma feição')\n return (True, '')\n\n def run(self, layer):\n result = self.isValidParameters(layer)\n if not result[0]:\n return result\n for feat in layer.selectedFeatures():\n geom = feat.geometry()\n geom_smooth = geom.smooth(2, 0.3)\n feat.setGeometry(geom_smooth)\n layer.updateFeature(feat)\n iface.mapCanvas().refresh()\n return (True, '')\n","sub_path":"modules/qgis/mapFunctions/smoothLine.py","file_name":"smoothLine.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"29141694","text":"class _Nodes(object):\n def __init__(self, data = None):\n self.data = data\n self.next = None\n def __str__(self):\n return str(self.data)\n\nclass SLL(object) :\n def __init__(self):\n self.tail = None\n self.head = None\n self.size = 0\n\n def append(self, data) :\n node = _Nodes(data)\n if self.head:\n self.head.next = node\n self.head = node\n else:\n self.tail = node\n self.head = node\n self.size += 1\n\n\n def iter(self) :\n curr = self.tail\n while curr:\n val = curr.data\n curr= curr.next\n yield val\n\n def delete(self, data) :\n '''\n delete a node having this data\n '''\n curr = self.tail\n prev = self.tail\n while curr:\n if curr.data == data:\n if self.tail == curr:\n self.tail= curr.next\n else:\n prev.next = curr.next\n self.size -= 1\n #print(self.size)\n return\n prev = curr\n curr = curr.next\n\n\n def is_data_in_sll(self, data):\n curr = self.tail\n while curr:\n if curr.data == data:\n return True\n curr = curr.next\n return False\n\n def clear(self) :\n self.tail = None\n self.head = None\n\n\n\n\n\n\n\n\n\n\nsll = SLL()\nsll.append(7)\nsll.append(8)\nsll.append(112)\nsll.append(52)\nprint(sll.is_data_in_sll(112))\nsll.delete(112)\nprint(sll.is_data_in_sll(112))\nfor it in sll.iter():\n print(it)\nsll.clear()\nprint(sll.is_data_in_sll(7))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DSLearning/SLL.py","file_name":"SLL.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"225985014","text":"# Copyright (c) 2013, Alphamonak Solutions Ltd. Pvt. Ltd. \n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport redapp\n\ndef execute():\n\tcancelled_invoices = redapp.db.sql_list(\"\"\"select name from `tabSales Invoice` \n\t\twhere docstatus = 2 and ifnull(update_stock, 0) = 1\"\"\")\n\n\tif cancelled_invoices:\n\t\tredapp.db.sql(\"\"\"delete from `tabGL Entry` \n\t\t\twhere voucher_type = 'Sales Invoice' and voucher_no in (%s)\"\"\" \n\t\t\t% (', '.join(['%s']*len(cancelled_invoices))), tuple(cancelled_invoices))","sub_path":"redapple/patches/v4_2/delete_gl_entries_for_cancelled_invoices.py","file_name":"delete_gl_entries_for_cancelled_invoices.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"426992467","text":"age=int(input('请输入你的年龄:'))\nif age>=18:\n print('你已经成年了')\nelse:\n print('你还未成年')\n\n\n\nnum1=25\nnum2=1\nwhile(num1!=num2):\n num2=int(input('请输入你猜的字'))\n if num1>num2:\n print('你输入的值小了')\n elif num1\n\n \n \n \n\"\"\"\n\n# REGEX\n# -- Text folder search (*.html *.xhtml):\nHTML_FILES = re.compile('\\b\\w*(?:.x?html|xml)\\b')\n# -- Style folder search (*.css):\nCSS_FILES = re.compile('\\b\\w*(?:.css)\\b')\n\n\nclass EpubFile(ZipFile):\n \"\"\"Clase que representa un archivo epub.\"\"\"\n\n def __init__(self, epub_path, mode='r'):\n \"\"\"Abre un archivo epub.\n El parametro mode debe ser 'r' para leer un archivo existente, 'w' para\n truncar el archivo y crear uno nuevo y 'a' para modificar un archivo\n existente.\"\"\"\n super().__init__(epub_path, mode)\n\n if mode == 'w':\n self.__create_new_epub()\n elif mode == 'r':\n pass\n elif mode == 'a':\n pass\n else:\n raise ValueError()\n\n def __str__(self):\n return \"Epub: {0} \".format(self.filename)\n\n def is_epub(self):\n \"\"\"Verifica si el archivo es de tipo epub.\"\"\"\n if EPUB_MIME_FILE in self.namelist():\n buff = self.read(EPUB_MIME_FILE)\n return EPUB_MIMETYPE == buff.decode('utf-8')\n return False\n\n def __create_new_epub(self):\n \"\"\"Crea un nuevo archivo epub inicializado con la estructura basica.\"\"\"\n print(\"__create_new_epub()\")\n # Creamos el archivo mimetype\n self.writestr(\"mimetype\", EPUB_MIMETYPE)\n # Creamos el archivo META_INF/container.xml\n self.writestr(\"META_INF/container.xml\", EPUB_CONTAINER_HEADER)\n # Creamos el archivo OEBPS/content.opf\n self.writestr(\"OEBPS/content.opf\", \"\")\n self.writestr(\"OEBPS/toc.ncx\", \"\")\n\n def delete_member(self, member):\n \"\"\" \"\"\"\n if member not in self.namelist():\n print(\"Member not in epub\")\n return\n if member in (\"mimetype\", \"OEBPS/toc.ncx \", \"OEBPS/content.opf \",\n \"META_INF/container.xml\"):\n print(\"Imposible to delete: {0} member\".format(member))\n return\n\n dirname = os.path.dirname(self.filename)\n tmp_epub = os.path.join(dirname, 'tmp.epub')\n\n with EpubFile(tmp_epub, 'w') as new_epub:\n for item in self.infolist():\n buff = self.read(item.filename)\n if item.filename != member:\n new_epub.writestr(item, buff)\n\n print(\"New Epub\")\n print(new_epub.printdir())\n print()\n\n print(self.printdir())\n\n def insert_member(self, member_path):\n \"\"\" \"\"\"\n with open(member_path, 'r') as _file:\n buff = _file.read()\n #TODO: Utilizar expresiones regulares para saber tipo de archivo!\n if member_path.endswith('.html'):\n self.writestr(os.path.join(EPUB_TXT_DIR,\n os.path.basename(member_path)), buff)\n","sub_path":"core/epubfile.py","file_name":"epubfile.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"650044748","text":"import abc\nimport collections\nimport getpass\nimport math\nimport os\nimport subprocess\nimport sys\nimport textwrap\nimport time\nimport warnings\nfrom distutils.spawn import find_executable\nfrom typing import Dict, List\n\nimport adaptive_scheduler._mock_scheduler\nfrom adaptive_scheduler.utils import _progress, _RequireAttrsABCMeta\n\n\nclass BaseScheduler(metaclass=_RequireAttrsABCMeta):\n \"\"\"Base object for a Scheduler.\n\n Parameters\n ----------\n cores : int\n Number of cores per job (so per learner.)\n run_script : str\n Filename of the script that is run on the nodes. Inside this script we\n query the database and run the learner.\n python_executable : str, default: `sys.executable`\n The Python executable that should run the `run_script`. By default\n it uses the same Python as where this function is called.\n log_folder : str, default: \"\"\n The folder in which to put the log-files.\n mpiexec_executable : str, optional\n ``mpiexec`` executable. By default `mpiexec` will be\n used (so probably from ``conda``).\n executor_type : str, default: \"mpi4py\"\n The executor that is used, by default `mpi4py.futures.MPIPoolExecutor` is used.\n One can use ``\"ipyparallel\"``, ``\"dask-mpi\"``, ``\"mpi4py\"``, or ``\"process-pool\"``.\n num_threads : int, default 1\n ``MKL_NUM_THREADS``, ``OPENBLAS_NUM_THREADS``, and ``OMP_NUM_THREADS``\n will be set to this number.\n extra_scheduler : list, optional\n Extra ``#SLURM`` (depending on scheduler type)\n arguments, e.g. ``[\"--exclusive=user\", \"--time=1\"]``.\n extra_env_vars : list, optional\n Extra environment variables that are exported in the job\n script. e.g. ``[\"TMPDIR='/scratch'\", \"PYTHONPATH='my_dir:$PYTHONPATH'\"]``.\n extra_script : str, optional\n Extra script that will be executed after any environment variables are set,\n but before the main scheduler is run.\n\n Returns\n -------\n `BaseScheduler` object.\n \"\"\"\n\n required_attributes = [\"_ext\", \"_submit_cmd\", \"_options_flag\", \"_cancel_cmd\"]\n\n def __init__(\n self,\n cores,\n run_script,\n python_executable,\n log_folder,\n mpiexec_executable,\n executor_type,\n num_threads,\n extra_scheduler,\n extra_env_vars,\n extra_script,\n ):\n self.cores = cores\n self.run_script = run_script\n self.python_executable = python_executable or sys.executable\n self.log_folder = log_folder\n self.mpiexec_executable = mpiexec_executable or \"mpiexec\"\n self.executor_type = executor_type\n self.num_threads = num_threads\n self._extra_scheduler = extra_scheduler\n self._extra_env_vars = extra_env_vars\n self._extra_script = extra_script\n self._JOB_ID_VARIABLE = \"${JOB_ID}\"\n\n @abc.abstractmethod\n def queue(self, me_only: bool) -> Dict[str, dict]:\n \"\"\"Get the current running and pending jobs.\n\n Parameters\n ----------\n me_only : bool, default: True\n Only see your jobs.\n\n Returns\n -------\n queue : dict\n Mapping of ``job_id`` -> `dict` with ``name`` and ``state``, for\n example ``{job_id: {\"job_name\": \"TEST_JOB-1\", \"state\": \"R\" or \"Q\"}}``.\n\n Notes\n -----\n This function might return extra information about the job, however\n this is not used elsewhere in this package.\n \"\"\"\n pass\n\n @property\n def ext(self) -> str:\n \"\"\"The extension of the job script.\"\"\"\n return self._ext\n\n @property\n def submit_cmd(self) -> str:\n \"\"\"Command to start a job, e.g. ``qsub fname.batch`` or ``sbatch fname.sbatch``.\"\"\"\n return self._submit_cmd\n\n @abc.abstractmethod\n def job_script(self, name: str) -> str:\n \"\"\"Get a jobscript in string form.\n\n Returns\n -------\n job_script : str\n A job script that can be submitted to the scheduler.\n \"\"\"\n pass\n\n def batch_fname(self, name: str) -> str:\n \"\"\"The filename of the job script.\"\"\"\n return name + self.ext\n\n @staticmethod\n def sanatize_job_id(job_id):\n return job_id\n\n def cancel(\n self, job_names: List[str], with_progress_bar: bool = True, max_tries: int = 5\n ) -> None:\n \"\"\"Cancel all jobs in `job_names`.\n\n Parameters\n ----------\n job_names : list\n List of job names.\n with_progress_bar : bool, default: True\n Display a progress bar using `tqdm`.\n max_tries : int, default: 5\n Maximum number of attempts to cancel a job.\n \"\"\"\n\n def to_cancel(job_names):\n return [\n job_id\n for job_id, info in self.queue().items()\n if info[\"job_name\"] in job_names\n ]\n\n def cancel_jobs(job_ids):\n for job_id in _progress(job_ids, with_progress_bar, \"Canceling jobs\"):\n cmd = f\"{self._cancel_cmd} {job_id}\".split()\n returncode = subprocess.run(cmd, stderr=subprocess.PIPE).returncode\n if returncode != 0:\n warnings.warn(f\"Couldn't cancel '{job_id}'.\", UserWarning)\n\n job_names = set(job_names)\n for _ in range(max_tries):\n job_ids = to_cancel(job_names)\n if not job_ids:\n # no more running jobs\n break\n cancel_jobs(job_ids)\n time.sleep(0.5)\n\n def _mpi4py(self, name: str) -> str:\n log_fname = self.log_fname(name)\n return f\"{self.mpiexec_executable} -n {self.cores} {self.python_executable} -m mpi4py.futures {self.run_script} --log-fname {log_fname} --job-id {self._JOB_ID_VARIABLE} --name {name}\"\n\n def _dask_mpi(self, name: str) -> str:\n log_fname = self.log_fname(name)\n return f\"{self.mpiexec_executable} -n {self.cores} {self.python_executable} {self.run_script} --log-fname {log_fname} --job-id {self._JOB_ID_VARIABLE} --name {name}\"\n\n def _ipyparallel(self, name: str) -> str:\n log_fname = self.log_fname(name)\n job_id = self._JOB_ID_VARIABLE\n profile = \"${profile}\"\n return textwrap.dedent(\n f\"\"\"\\\n profile=adaptive_scheduler_{job_id}\n\n echo \"Creating profile {profile}\"\n ipython profile create {profile}\n\n echo \"Launching controller\"\n ipcontroller --ip=\"*\" --profile={profile} --log-to-file &\n sleep 10\n\n echo \"Launching engines\"\n {self.mpiexec_executable} -n {self.cores-1} ipengine --profile={profile} --mpi --cluster-id='' --log-to-file &\n\n echo \"Starting the Python script\"\n {self.python_executable} {self.run_script} --profile {profile} --n {self.cores-1} --log-fname {log_fname} --job-id {job_id} --name {name}\n \"\"\"\n )\n\n def _process_pool(self, name: str) -> str:\n log_fname = self.log_fname(name)\n return f\"{self.python_executable} {self.run_script} --n {self.cores} --log-fname {log_fname} --job-id {self._JOB_ID_VARIABLE} --name {name}\"\n\n def _executor_specific(self, name: str) -> str:\n if self.executor_type == \"mpi4py\":\n return self._mpi4py(name)\n elif self.executor_type == \"dask-mpi\":\n return self._dask_mpi(name)\n elif self.executor_type == \"ipyparallel\":\n if self.cores <= 1:\n raise ValueError(\n \"`ipyparalllel` uses 1 cores of the `adaptive.Runner` and\"\n \"the rest of the cores for the engines, so use more than 1 core.\"\n )\n return self._ipyparallel(name)\n elif self.executor_type == \"process-pool\":\n return self._process_pool(name)\n else:\n raise NotImplementedError(\n \"Use 'ipyparallel', 'dask-mpi', 'mpi4py' or 'process-pool'.\"\n )\n\n def log_fname(self, name: str) -> str:\n \"\"\"The filename of the log.\"\"\"\n if self.log_folder:\n os.makedirs(self.log_folder, exist_ok=True)\n return os.path.join(self.log_folder, f\"{name}-{self._JOB_ID_VARIABLE}.log\")\n\n def output_fnames(self, name: str) -> List[str]:\n log_fname = self.log_fname(name)\n return [log_fname.replace(\".log\", \".out\")]\n\n @property\n def extra_scheduler(self):\n \"\"\"Scheduler options that go in the job script.\"\"\"\n extra_scheduler = self._extra_scheduler or []\n return \"\\n\".join(f\"#{self._options_flag} {arg}\" for arg in extra_scheduler)\n\n @property\n def extra_env_vars(self):\n \"\"\"Environment variables that need to exist in the job script.\"\"\"\n extra_env_vars = self._extra_env_vars or []\n return \"\\n\".join(f\"export {arg}\" for arg in extra_env_vars)\n\n @property\n def extra_script(self):\n \"\"\"Script that will be run before the main scheduler.\"\"\"\n return str(self._extra_script) or \"\"\n\n def write_job_script(self, name: str) -> None:\n with open(self.batch_fname(name), \"w\") as f:\n job_script = self.job_script(name)\n f.write(job_script)\n\n def start_job(self, name: str) -> None:\n \"\"\"Writes a job script and submits it to the scheduler.\"\"\"\n self.write_job_script(name)\n returncode = None\n while returncode != 0:\n returncode = subprocess.run(\n f\"{self.submit_cmd} {self.batch_fname(name)}\".split(),\n stderr=subprocess.PIPE,\n ).returncode\n time.sleep(0.5)\n\n def __getstate__(self) -> dict:\n return dict(\n cores=self.cores,\n run_script=self.run_script,\n python_executable=self.python_executable,\n log_folder=self.log_folder,\n mpiexec_executable=self.mpiexec_executable,\n executor_type=self.executor_type,\n num_threads=self.num_threads,\n extra_scheduler=self._extra_scheduler,\n extra_env_vars=self._extra_env_vars,\n )\n\n def __setstate__(self, state):\n self.__init__(**state)\n\n\nclass PBS(BaseScheduler):\n def __init__(\n self,\n cores,\n run_script=\"run_learner.py\",\n python_executable=None,\n log_folder=\"\",\n mpiexec_executable=None,\n executor_type=\"mpi4py\",\n num_threads=1,\n extra_scheduler=None,\n extra_env_vars=None,\n extra_script=None,\n *,\n cores_per_node=None,\n ):\n super().__init__(\n cores,\n run_script,\n python_executable,\n log_folder,\n mpiexec_executable,\n executor_type,\n num_threads,\n extra_scheduler,\n extra_env_vars,\n extra_script,\n )\n # Attributes that all schedulers need to have\n self._ext = \".batch\"\n # the \"-k oe\" flags with \"qsub\" writes the log output to\n # files directly instead of at the end of the job. The downside\n # is that the logfiles are put in the homefolder.\n self._submit_cmd = \"qsub -k oe\"\n self._JOB_ID_VARIABLE = \"${PBS_JOBID}\"\n self._options_flag = \"PBS\"\n self._cancel_cmd = \"qdel\"\n\n # PBS specific\n self.cores_per_node = cores_per_node\n self._calculate_nnodes()\n if cores != self.cores:\n warnings.warn(f\"`self.cores` changed from {cores} to {self.cores}\")\n\n def __getstate__(self):\n # PBS has one different argument from the BaseScheduler\n return dict(**super().__getstate__(), cores_per_node=self.cores_per_node)\n\n @staticmethod\n def sanatize_job_id(job_id):\n \"\"\"Changes '91722.hpc05.hpc' into '91722'.\"\"\"\n return job_id.split(\".\")[0]\n\n def _calculate_nnodes(self):\n if self.cores_per_node is None:\n partial_msg = \"Use set `cores_per_node=...` before passing the scheduler.\"\n try:\n max_cores_per_node = self._guess_cores_per_node()\n self.nnodes = math.ceil(self.cores / max_cores_per_node)\n self.cores_per_node = round(self.cores / self.nnodes)\n msg = (\n f\"`#PBS -l nodes={self.nnodes}:ppn={self.cores_per_node}` is\"\n f\" guessed using the `qnodes` command, we set\"\n f\" `cores_per_node={self.cores_per_node}`.\"\n f\" You might want to change this. {partial_msg}\"\n )\n warnings.warn(msg)\n self.cores = self.nnodes * self.cores_per_node\n except Exception as e:\n msg = (\n f\"Got an error: {e}.\"\n \" Couldn't guess `cores_per_node`, this argument is required\"\n f\" for PBS. {partial_msg}\"\n \" We set `cores_per_node=1`!\"\n )\n warnings.warn(msg)\n self.nnodes = self.cores\n self.cores_per_nodes = 1\n else:\n self.nnodes = self.cores / self.cores_per_node\n if not float(self.nnodes).is_integer():\n raise ValueError(\"cores / cores_per_node must be an integer!\")\n else:\n self.nnodes = int(self.nnodes)\n\n def output_fnames(self, name: str) -> List[str]:\n # The \"-k oe\" flags with \"qsub\" writes the log output to\n # files directly instead of at the end of the job. The downside\n # is that the logfiles are put in the homefolder.\n home = os.path.expanduser(\"~/\")\n stdout, stderr = [\n os.path.join(home, f\"{name}.{x}{self._JOB_ID_VARIABLE}\") for x in \"oe\"\n ]\n return [stdout, stderr]\n\n def job_script(self, name: str) -> str:\n \"\"\"Get a jobscript in string form.\n\n Returns\n -------\n job_script : str\n A job script that can be submitted to PBS.\n \"\"\"\n\n job_script = textwrap.dedent(\n f\"\"\"\\\n #!/bin/sh\n #PBS -l nodes={self.nnodes}:ppn={self.cores_per_node}\n #PBS -V\n #PBS -N {name}\n #PBS -o /tmp/placeholder\n #PBS -e /tmp/placeholder\n {{extra_scheduler}}\n\n export MKL_NUM_THREADS={self.num_threads}\n export OPENBLAS_NUM_THREADS={self.num_threads}\n export OMP_NUM_THREADS={self.num_threads}\n {{extra_env_vars}}\n\n cd $PBS_O_WORKDIR\n\n {{extra_script}}\n\n {{executor_specific}}\n \"\"\"\n )\n\n job_script = job_script.format(\n extra_scheduler=self.extra_scheduler,\n extra_env_vars=self.extra_env_vars,\n extra_script=self.extra_script,\n executor_specific=self._executor_specific(name),\n job_id_variable=self._JOB_ID_VARIABLE,\n )\n\n return job_script\n\n @staticmethod\n def _split_by_job(lines):\n jobs = [[]]\n for line in lines:\n line = line.strip()\n if line:\n jobs[-1].append(line)\n else:\n jobs.append([])\n return [j for j in jobs if j]\n\n @staticmethod\n def _fix_line_cuts(raw_info):\n info = []\n for line in raw_info:\n if \" = \" in line:\n info.append(line)\n else:\n info[-1] += line\n return info\n\n def queue(self, me_only: bool = True) -> Dict[str, dict]:\n cmd = [\"qstat\", \"-f\"]\n\n proc = subprocess.run(\n cmd,\n text=True,\n capture_output=True,\n env=dict(os.environ, SGE_LONG_QNAMES=\"1000\"),\n )\n output = proc.stdout\n\n if proc.returncode != 0:\n raise RuntimeError(\"qstat is not responding.\")\n\n jobs = self._split_by_job(output.replace(\"\\n\\t\", \"\").split(\"\\n\"))\n\n running = {}\n for header, *raw_info in jobs:\n job_id = header.split(\"Job Id: \")[1]\n info = dict([line.split(\" = \") for line in self._fix_line_cuts(raw_info)])\n if info[\"job_state\"] in [\"R\", \"Q\"]:\n info[\"job_name\"] = info[\n \"Job_Name\"\n ] # used in `server_support.manage_jobs`\n info[\"state\"] = info[\"job_state\"] # used in `RunManager.live`\n running[job_id] = info\n\n if me_only:\n # We do this because the \"-u [username here]\" flag doesn't\n # work with \"-f\" on some clusters.\n username = getpass.getuser()\n running = {\n job_id: info\n for job_id, info in running.items()\n if username in info[\"Job_Owner\"]\n }\n\n return running\n\n def _qnodes(self):\n proc = subprocess.run([\"qnodes\"], text=True, capture_output=True)\n output = proc.stdout\n\n if proc.returncode != 0:\n raise RuntimeError(\"qnodes is not responding.\")\n\n jobs = self._split_by_job(output.replace(\"\\n\\t\", \"\").split(\"\\n\"))\n\n nodes = {\n node: dict([line.split(\" = \") for line in self._fix_line_cuts(raw_info)])\n for node, *raw_info in jobs\n }\n return nodes\n\n def _guess_cores_per_node(self):\n nodes = self._qnodes()\n cntr = collections.Counter([int(info[\"np\"]) for info in nodes.values()])\n ncores, freq = cntr.most_common(1)[0]\n return ncores\n\n\nclass SLURM(BaseScheduler):\n def __init__(\n self,\n cores,\n run_script=\"run_learner.py\",\n python_executable=None,\n log_folder=\"\",\n mpiexec_executable=None,\n executor_type=\"mpi4py\",\n num_threads=1,\n extra_scheduler=None,\n extra_env_vars=None,\n extra_script=None,\n ):\n super().__init__(\n cores,\n run_script,\n python_executable,\n log_folder,\n mpiexec_executable,\n executor_type,\n num_threads,\n extra_scheduler,\n extra_env_vars,\n extra_script,\n )\n # Attributes that all schedulers need to have\n self._ext = \".sbatch\"\n self._submit_cmd = \"sbatch\"\n self._JOB_ID_VARIABLE = \"${SLURM_JOB_ID}\"\n self._options_flag = \"SBATCH\"\n self._cancel_cmd = \"scancel\"\n\n # SLURM specific\n self.mpiexec_executable = mpiexec_executable or \"srun --mpi=pmi2\"\n\n def _ipyparallel(self, name: str) -> str:\n log_fname = self.log_fname(name)\n job_id = self._JOB_ID_VARIABLE\n profile = \"${profile}\"\n return textwrap.dedent(\n f\"\"\"\\\n profile=adaptive_scheduler_{job_id}\n\n echo \"Creating profile {profile}\"\n ipython profile create {profile}\n\n echo \"Launching controller\"\n ipcontroller --ip=\"*\" --profile={profile} --log-to-file &\n sleep 10\n\n echo \"Launching engines\"\n srun --ntasks {self.cores-1} ipengine --profile={profile} --cluster-id='' --log-to-file &\n\n echo \"Starting the Python script\"\n srun --ntasks 1 {self.python_executable} {self.run_script} --profile {profile} --n {self.cores-1} --log-fname {log_fname} --job-id {job_id} --name {name}\n \"\"\"\n )\n\n def job_script(self, name: str) -> str:\n \"\"\"Get a jobscript in string form.\n\n Returns\n -------\n job_script : str\n A job script that can be submitted to SLURM.\n \"\"\"\n output_fname = self.output_fnames(name)[0].replace(self._JOB_ID_VARIABLE, \"%A\")\n job_script = textwrap.dedent(\n f\"\"\"\\\n #!/bin/bash\n #SBATCH --job-name {name}\n #SBATCH --ntasks {self.cores}\n #SBATCH --no-requeue\n #SBATCH --output {output_fname}\n {{extra_scheduler}}\n\n export MKL_NUM_THREADS={self.num_threads}\n export OPENBLAS_NUM_THREADS={self.num_threads}\n export OMP_NUM_THREADS={self.num_threads}\n {{extra_env_vars}}\n\n {{extra_script}}\n\n {{executor_specific}}\n \"\"\"\n )\n\n job_script = job_script.format(\n extra_scheduler=self.extra_scheduler,\n extra_env_vars=self.extra_env_vars,\n extra_script=self.extra_script,\n executor_specific=self._executor_specific(name),\n )\n return job_script\n\n def queue(self, me_only: bool = True) -> Dict[str, Dict[str, str]]:\n python_format = {\n \"jobid\": 100,\n \"name\": 100,\n \"state\": 100,\n \"numnodes\": 100,\n \"reasonlist\": 4000,\n } # (key -> length) mapping\n\n slurm_format = \",\".join(f\"{k}:{v}\" for k, v in python_format.items())\n cmd = [\n \"/usr/bin/squeue\",\n rf'--Format=\",{slurm_format},\"',\n \"--noheader\",\n \"--array\",\n ]\n if me_only:\n username = getpass.getuser()\n cmd.append(f\"--user={username}\")\n proc = subprocess.run(cmd, text=True, capture_output=True)\n output = proc.stdout\n\n if (\n \"squeue: error\" in output\n or \"slurm_load_jobs error\" in output\n or proc.returncode != 0\n ):\n raise RuntimeError(\"SLURM is not responding.\")\n\n def line_to_dict(line):\n line = list(line)\n info = {}\n for k, v in python_format.items():\n info[k] = \"\".join(line[:v]).strip()\n line = line[v:]\n return info\n\n squeue = [line_to_dict(line) for line in output.split(\"\\n\")]\n states = (\"PENDING\", \"RUNNING\", \"CONFIGURING\")\n squeue = [info for info in squeue if info[\"state\"] in states]\n running = {info.pop(\"jobid\"): info for info in squeue}\n for info in running.values():\n info[\"job_name\"] = info.pop(\"name\")\n return running\n\n\nclass LocalMockScheduler(BaseScheduler):\n \"\"\"A scheduler that can be used for testing and runs locally.\n\n CANCELLING DOESN'T WORK ATM, ALSO LEAVES ZOMBIE PROCESSES!\n \"\"\"\n\n def __init__(\n self,\n cores,\n run_script=\"run_learner.py\",\n python_executable=None,\n log_folder=\"\",\n mpiexec_executable=None,\n executor_type=\"mpi4py\",\n num_threads=1,\n extra_scheduler=None,\n extra_env_vars=None,\n extra_script=None,\n *,\n mock_scheduler_kwargs=None,\n ):\n warnings.warn(\"The LocalMockScheduler currently doesn't work!\")\n super().__init__(\n cores,\n run_script,\n python_executable,\n log_folder,\n mpiexec_executable,\n executor_type,\n num_threads,\n extra_scheduler,\n extra_env_vars,\n extra_script,\n )\n # LocalMockScheduler specific\n self.mock_scheduler_kwargs = mock_scheduler_kwargs or {}\n self.mock_scheduler = adaptive_scheduler._mock_scheduler.MockScheduler(\n **self.mock_scheduler_kwargs\n )\n mock_scheduler_file = adaptive_scheduler._mock_scheduler.__file__\n self.base_cmd = f\"{self.python_executable} {mock_scheduler_file}\"\n\n # Attributes that all schedulers need to have\n self._ext = \".batch\"\n self._submit_cmd = f\"{self.base_cmd} --submit\"\n self._JOB_ID_VARIABLE = \"${JOB_ID}\"\n self._cancel_cmd = f\"{self.base_cmd} --cancel\"\n\n def __getstate__(self) -> dict:\n # LocalMockScheduler has one different argument from the BaseScheduler\n return dict(\n **super().__getstate__(), mock_scheduler_kwargs=self.mock_scheduler_kwargs\n )\n\n def job_script(self, name: str) -> str:\n \"\"\"Get a jobscript in string form.\n\n Returns\n -------\n job_script : str\n A job script that can be submitted to PBS.\n\n Notes\n -----\n Currenty there is a problem that this will not properly cleanup.\n for example `ipengine ... &` will be detached and go on,\n normally a scheduler will take care of this.\n \"\"\"\n\n job_script = textwrap.dedent(\n f\"\"\"\\\n #!/bin/sh\n\n export MKL_NUM_THREADS={self.num_threads}\n export OPENBLAS_NUM_THREADS={self.num_threads}\n export OMP_NUM_THREADS={self.num_threads}\n {{extra_env_vars}}\n\n {{extra_script}}\n\n {{executor_specific}}\n \"\"\"\n )\n\n job_script = job_script.format(\n extra_env_vars=self.extra_env_vars,\n executor_specific=self._executor_specific(name),\n extra_script=self.extra_script,\n job_id_variable=self._JOB_ID_VARIABLE,\n )\n\n return job_script\n\n def queue(self, me_only: bool = True) -> Dict[str, dict]:\n return self.mock_scheduler.queue()\n\n def start_job(self, name: str) -> None:\n self.write_job_script(name)\n returncode = None\n while returncode != 0:\n returncode = subprocess.run(\n f\"{self.submit_cmd} {name} {self.batch_fname(name)}\".split(),\n stderr=subprocess.PIPE,\n ).returncode\n time.sleep(0.5)\n\n @property\n def extra_scheduler(self):\n raise NotImplementedError(\"extra_scheduler is not implemented.\")\n\n\ndef _get_default_scheduler():\n \"\"\"Determine which scheduler system is being used.\n\n It tries to determine it by running both PBS and SLURM commands.\n\n If both are available then one needs to set an environment variable\n called 'SCHEDULER_SYSTEM' which is either 'PBS' or 'SLURM'.\n\n For example add the following to your `.bashrc`\n\n ```bash\n export SCHEDULER_SYSTEM=\"PBS\"\n ```\n\n By default it is \"SLURM\".\n \"\"\"\n\n has_pbs = bool(find_executable(\"qsub\")) and bool(find_executable(\"qstat\"))\n has_slurm = bool(find_executable(\"sbatch\")) and bool(find_executable(\"squeue\"))\n\n DEFAULT = SLURM\n default_msg = f\"We set DefaultScheduler to '{DEFAULT}'.\"\n scheduler_system = os.environ.get(\"SCHEDULER_SYSTEM\", \"\").upper()\n if scheduler_system:\n if scheduler_system not in (\"PBS\", \"SLURM\"):\n warnings.warn(\n f\"SCHEDULER_SYSTEM={scheduler_system} is not implemented.\"\n f\"Use SLURM or PBS. {default_msg}\"\n )\n return DEFAULT\n else:\n return {\"SLURM\": SLURM, \"PBS\": PBS}[scheduler_system]\n elif has_slurm and has_pbs:\n msg = f\"Both SLURM and PBS are detected. {default_msg}\"\n warnings.warn(msg)\n return DEFAULT\n elif has_pbs:\n return PBS\n elif has_slurm:\n return SLURM\n else:\n msg = f\"No scheduler system could be detected. {default_msg}\"\n warnings.warn(msg)\n return DEFAULT\n\n\nDefaultScheduler = _get_default_scheduler()\n","sub_path":"adaptive_scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":26893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"297804836","text":"#!/usr/bin/python3\n\nimport argparse\nimport xrdinfo\nimport sys\n\n# Default timeout for HTTP requests\nDEFAULT_TIMEOUT = 5.0\n\n\ndef print_error(content):\n \"\"\"Error printer.\"\"\"\n content = \"ERROR: {}\\n\".format(content)\n sys.stderr.write(content)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='List X-Road Subsystems that are attached to Security Server (registered).',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog='You need to provide either Security Server or Central Server address.\\n\\n'\n 'NB! Global configuration signature is not validated when using Central Server.\\n'\n 'Use local Security Server whenever possible.'\n )\n parser.add_argument(\n '-s', metavar='SECURITY_SERVER', help='DNS name/IP/URL of local Security Server')\n parser.add_argument(\n '-c', metavar='CENTRAL_SERVER',\n help='DNS name/IP/URL of Central Server/Configuration Proxy')\n parser.add_argument('-t', metavar='TIMEOUT', help='timeout for HTTP query', type=float)\n parser.add_argument(\n '--verify', metavar='CERT_PATH',\n help='validate peer TLS certificate using CA certificate file.')\n parser.add_argument(\n '--cert', metavar='CERT_PATH', help='use TLS certificate for HTTPS requests.')\n parser.add_argument('--key', metavar='KEY_PATH', help='private key for TLS certificate.')\n parser.add_argument(\n '--instance', metavar='INSTANCE',\n help='use this instance instead of local X-Road instance (works only with \"-s\" argument)')\n args = parser.parse_args()\n\n instance = None\n if args.instance:\n instance = args.instance\n\n timeout = DEFAULT_TIMEOUT\n if args.t:\n timeout = args.t\n\n verify = False\n if args.verify:\n verify = args.verify\n\n cert = None\n if args.cert and args.key:\n cert = (args.cert, args.key)\n\n shared_params = None\n if args.s:\n try:\n shared_params = xrdinfo.shared_params_ss(\n addr=args.s, instance=instance, timeout=timeout, verify=verify, cert=cert)\n except xrdinfo.XrdInfoError as e:\n print_error(e)\n exit(1)\n elif args.c:\n try:\n shared_params = xrdinfo.shared_params_cs(\n addr=args.c, timeout=timeout, verify=verify, cert=cert)\n except xrdinfo.XrdInfoError as e:\n print_error(e)\n exit(1)\n else:\n parser.print_help()\n exit(1)\n\n try:\n for subsystem in xrdinfo.registered_subsystems(shared_params):\n line = xrdinfo.identifier(subsystem)\n print(line)\n except xrdinfo.XrdInfoError as e:\n print_error(e)\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xrdinfo/xrd_registered_subsystems.py","file_name":"xrd_registered_subsystems.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"142672055","text":"from refactorlib.tests.util import parametrize, get_output, assert_same_content\nfrom refactorlib.cheetah.parse import parse\n\ndef test_can_find_calls():\n\texample = parse('''\n\t\tfoo $foo() bar\n\t''')\n\n\tcalls = example.find_calls('foo')\n\tassert len(calls) == 1\n\tassert calls[0].totext() == '$foo()'\n\n@parametrize(get_output)\ndef test_can_remove_calls(example, output):\n\texample = open(example).read()\n\texample = parse(example)\n\n\tcalls = example.find_calls('foo')\n\tassert len(calls) == 5\n\n\tfor call in calls:\n\t\tcall.remove_call()\n\n\t# Check the text.\n\texample = example.totext()\n\tassert_same_content(output, example)\n\n","sub_path":"refactorlib/tests/cheetah/remove_call_test.py","file_name":"remove_call_test.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530680917","text":"import allure\nimport pytest\nimport yaml\n\n\n@allure.feature(\"计算器测试模块\")\nclass Test_Calc:\n\n def file_data():\n with open(\"./datas.yaml\") as f:\n datas = yaml.safe_load(f)\n add_data = datas[\"add_datas\"]\n sub_data = datas[\"sub_datas\"]\n mul_data = datas[\"mul_datas\"]\n div_data = datas[\"div_datas\"]\n return [add_data,sub_data,mul_data,div_data]\n\n # 加法测试用例,第一执行\n @allure.story(\"加法测试用例\")\n @pytest.mark.run(order=1)\n @pytest.mark.parametrize(\"a,b,expect\",file_data()[0])\n # @pytest.fixture()\n def test_add(self,meilanzi,a,b,expect):\n assert meilanzi.add(a,b) == expect\n\n # 减法测试用例,最后执行\n @allure.story(\"减法测试用例\")\n @pytest.mark.run(order=-1)\n @pytest.mark.parametrize(\"a,b,expect\",file_data()[1])\n def test_sub(self,meilanzi,a,b,expect):\n # self.calc = Calculator()\n assert round(meilanzi.sub(a,b),2) == expect\n\n # 乘法测试用例,第三执行\n @pytest.mark.run(order=3)\n @allure.story(\"乘法测试用例\")\n @pytest.mark.parametrize(\"a,b,expect\",file_data()[2])\n def test_mul(self,meilanzi,a,b,expect):\n assert meilanzi.mul(a,b) == expect\n #\n # 除法测试用例,第二执行\n @pytest.mark.run(order=2)\n @allure.story(\"除法测试用例\")\n @pytest.mark.parametrize(\"a,b,expect\", file_data()[3])\n def test_div(self,meilanzi,a,b,expect):\n assert round(meilanzi.div(a, b)) == expect\n\n","sub_path":"test_calc_allure.py","file_name":"test_calc_allure.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"603318356","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/roman.akopov@bixtrim.com/PycharmProjects/sepia/sepia/processor.py\n# Compiled at: 2018-05-02 04:06:58\n# Size of source mod 2**32: 3966 bytes\n\n\nclass IncludeHelper(object):\n\n def __init__(self, base_path):\n self.base_path = base_path\n\n def _get_file_path(self, *path_parts):\n from os.path import abspath\n from os.path import expanduser\n from os.path import expandvars\n from os.path import isfile\n from os.path import join\n from os.path import normpath\n if len(path_parts):\n if type(path_parts[0]) is str:\n return abspath(normpath(expandvars(expanduser(join(self.base_path, *path_parts)))))\n for path_parts_try in path_parts:\n path_try = abspath(normpath(expandvars(expanduser(join(self.base_path, *path_parts_try)))))\n if isfile(path_try):\n return path_try\n print('\"\\x1b[33m{0}\\x1b[0m\" was not found.'.format(path_try))\n\n return self.base_path\n\n def include_plain(self, *path_parts):\n with open((self._get_file_path)(*path_parts), 'r') as (file):\n return file.read()\n\n def include_markdown(self, *path_parts):\n from markdown import markdown\n with open((self._get_file_path)(*path_parts), 'r') as (file):\n return markdown((file.read()),\n extensions=[\n 'markdown.extensions.abbr',\n 'markdown.extensions.attr_list',\n 'markdown.extensions.def_list',\n 'markdown.extensions.footnotes',\n 'markdown.extensions.tables',\n 'markdown.extensions.smart_strong'])\n\n\ndef include_stdout(*path_parts):\n from subprocess import PIPE\n from subprocess import run\n return run(*path_parts, **{'stdout': PIPE}).stdout\n\n\nclass Processor(object):\n\n def __init__(self, template_set, variable_set, output_path):\n self._template_set = template_set\n self._variable_set = variable_set\n self._output_path = output_path\n\n def execute(self):\n from jinja2 import FileSystemLoader\n from jinja2.sandbox import SandboxedEnvironment\n from os import makedirs\n from os.path import dirname\n from os.path import join\n from shutil import copy2\n from shutil import copystat\n for destination_name, source_info in self._template_set.constant.items():\n destination_path = join(self._output_path, destination_name)\n destination_dir = dirname(destination_path)\n makedirs(destination_dir, exist_ok=True)\n copy2(source_info.file_path, destination_path)\n\n for destination_name, source_info in self._template_set.variable.items():\n destination_path = join(self._output_path, destination_name)\n destination_dir = dirname(destination_path)\n makedirs(destination_dir, exist_ok=True)\n loader = FileSystemLoader(source_info.base_path)\n include_helper = IncludeHelper(source_info.base_path)\n environment = SandboxedEnvironment(trim_blocks=True,\n lstrip_blocks=True,\n keep_trailing_newline=False,\n autoescape=False,\n loader=loader)\n environment.globals.update(self._variable_set.data)\n environment.globals['include_plain'] = include_helper.include_plain\n environment.globals['include_markdown'] = include_helper.include_markdown\n environment.globals['include_stdout'] = include_stdout\n template_context = {}\n with open(source_info.file_path, 'r') as (source_file):\n source_text = source_file.read()\n template = environment.from_string(source_text)\n destination_text = template.render(template_context)\n with open(destination_path, 'w') as (destination_file):\n destination_file.write(destination_text)\n copystat(source_info.file_path, destination_path)","sub_path":"pycfiles/sepia-0.7.809-py3-none-any/processor.cpython-36.py","file_name":"processor.cpython-36.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"533931530","text":"\n# coding: utf-8\n\n# # Author : Pashanki Pandit\n\n# Data Science & Business Analytics Tasks\n\n# Task # 6 - Prediction using Decision Tree Algorithm\n\n# Graduate Rotational Internship Program(GRIP)\n# The Sparks Foundation\n\n# In this task I Create the Decision Tree classifier and visualize it graphically.\n\n# # Load the libraries in Python\n\n# In[162]:\n\n\nimport pandas as pd\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nimport missingno as mo\nfrom sklearn.utils import resample\n\n\n# # Load the data\n\n# In[163]:\n\n\ndf=pd.read_csv(\"C:/Users/abhijeet/Downloads/Iris (2).csv\")\n\n\n# In[164]:\n\n\ndf.head()\n\n\n# In[165]:\n\n\ndf.shape\n\n\n# # Preprocessing the Data\n\n# In[166]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\nle=LabelEncoder()\ndf[\"Species\"]=le.fit_transform(df.Species)\n\n\n# In[167]:\n\n\ndf.head()\n\n\n# # Delete the unwanted coumbs\n\n# In[168]:\n\n\ndf.columns\n\n\n# In[169]:\n\n\ndf = df.drop(['Id'],axis=1)\n\n\n# In[170]:\n\n\ndf.head()\n\n\n# In[171]:\n\n\ndf.info()\n\n\n# In[172]:\n\n\ndf.describe()\n\n\n# Insights\n# \n# - from above we get that the diffrence between mean and medium if it is more than (<5%) it means it is not normally distributed.\n# - from above there is all are normally distributed.\n\n# # Check the correletion\n\n# In[173]:\n\n\ndf.corr()\n\n\n# # We check the Missing values\n\n# In[174]:\n\n\ndf.isna().sum() ### all are zero means no missing values\n\n\n# # Divide categorical and continuos variables in 2 lists\n\n# In[175]:\n\n\ncat=[]\ncon=[]\nfor i in df.columns:\n if (df[i].dtype==\"object\"):\n cat.append(i)\n else:\n con.append(i)\n\n\n# In[176]:\n\n\ncat\n\n\n# In[177]:\n\n\ncon\n\n\n# In[178]:\n\n\n#EDA\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nfor i in df.columns:\n if (df[i].dtype==\"object\"):\n print(pd.crosstab(df.Species,df[i]))\n else:\n sb.boxplot(df.Species,df[i])\n plt.show()\n\n\n# Insights\n# \n# -We conlued fron above it is the linear data\n\n# # We create the DecisionTreeClassifier\n\n# In[128]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier\ndtc=DecisionTreeClassifier()\nmodel=dtc.fit(xtrain,ytrain)\npred=model.predict(xtest)\n\n\n# In[129]:\n\n\nfrom sklearn.tree import export_graphviz\nexport_graphviz(dtc,out_file=\"C:/Users/abhijeet/Desktop/dummy_work/e2\")\n\nimport pydotplus as pdp\ngraph=pdp.graph_from_dot_file(\"C:/Users/abhijeet/Desktop/dummy_work/e2\")\nfrom IPython.display import Image\nImage(graph.create_jpg())\n\n\n# # You can now feed any new/test data to this classifer and it would be able to predict the right class accordingly. We shown below\n\n# In[130]:\n\n\nY=df[['Species']]\nX=df[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']]\n\nfrom sklearn.model_selection import train_test_split\nxtrain,xtest,ytrain,ytest=train_test_split(X,Y,test_size=0.2,random_state=30)\n\nfrom sklearn.tree import DecisionTreeClassifier\ndtc=DecisionTreeClassifier()\nmodel=dtc.fit(xtrain,ytrain)\npred=model.predict(xtest)\n\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(ytest,pred))\n\n\n# Insights\n# \n# - we get the very good accuracy at the randome state=30 \n\n# In[131]:\n\n\nfrom sklearn.tree import export_graphviz\nexport_graphviz(dtc,out_file=\"C:/Users/abhijeet/Desktop/dummy_work/e2\")\n\nimport pydotplus as pdp\ngraph=pdp.graph_from_dot_file(\"C:/Users/abhijeet/Desktop/dummy_work/e2\")\nfrom IPython.display import Image\nImage(graph.create_jpg())\n\n","sub_path":"The spreks foundation task.py","file_name":"The spreks foundation task.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"150766391","text":"#!/usr/bin/env python\n\nclass Templater:\n \n def __init__(self, csspath):\n self.csspath = csspath\n \n def MkPageFromFile(self,filename, dict, replacestylelink=False):\n lines = open(filename).readlines()\n \n tmp = ''\n \n for x in lines:\n tmp +=x\n \n return self.MkPage( tmp , dict, replacestylelink)\n \n def MkPage(self,template,dict, replacestylelink):\n res = template#fix this\n\n if replacestylelink:#cut tag n paste file\n j = template.find(' -1:\n while template[i:i+6] != 'href=\"':\n i += 1\n if i != j:\n path = ''\n i += 6\n while template[i] != '\"':\n path += template[i]\n i += 1\n while template[i] != '>':\n i += 1\n lines = open(path).readlines()\n \n tmp = ''\n \n for x in lines:\n tmp +=x \n res = template[:j] + '' + template[i+1:]\n for k, v in dict.iteritems():\n res = res.replace('{$' + k + '}', v)\n \n return res\n","sub_path":"forumpkg/templater.py","file_name":"templater.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"268894223","text":"import sys\r\nfrom PyQt5.QtWidgets import QWidget, QGridLayout, QLineEdit, QPushButton, QLabel, QListWidget, QListWidgetItem, \\\r\n QApplication\r\nfrom PyQt5 import QtGui\r\n\r\n\r\nclass fileHunterWindow(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWidgets()\r\n\r\n def setWidgets(self):\r\n layout = QGridLayout()\r\n self.setLayout(layout)\r\n self.setWindowTitle(\"FileHunter\")\r\n\r\n # create widgets\r\n self.searchBox = QLineEdit(\"Enter a keyword\")\r\n searchButton = QPushButton('Search')\r\n keywordLabel = QLabel(\"Keyword:\")\r\n outputLabel = QLabel(\"Output:\")\r\n self.listView = QListWidget()\r\n openButton = QPushButton(\"Open\")\r\n exitButton = QPushButton(\"Exit\")\r\n\r\n # set icons\r\n self.word_icon = QtGui.QIcon('word.png')\r\n self.ppt_icon = QtGui.QIcon('ppt.png')\r\n self.excel_icon = QtGui.QIcon('excel.png')\r\n self.txt_icon = QtGui.QIcon('txt.png')\r\n open_icon = QtGui.QIcon('open.png')\r\n search_icon = QtGui.QIcon('search.png')\r\n\r\n # edit widgets\r\n openButton.setIcon(open_icon)\r\n searchButton.setIcon(search_icon)\r\n self.searchBox.selectAll() # highlights the default text\r\n\r\n # add widgets to layout\r\n layout.addWidget(keywordLabel, 0, 0)\r\n layout.addWidget(self.searchBox, 1, 0, 1, 5) # searchBox at row1 col0, rowSpan = 1, colSpan = 5\r\n layout.addWidget(searchButton, 1, 5)\r\n layout.addWidget(outputLabel, 2, 0)\r\n layout.addWidget(self.listView, 3, 0, 1, 6) # outputList at row3 col0, rowSpan = 1, colSpan = 6\r\n layout.addWidget(openButton, 6, 4)\r\n layout.addWidget(exitButton, 6, 5)\r\n\r\n self.show()\r\n\r\n # add clicked functions\r\n searchButton.clicked.connect(self.searchClicked)\r\n openButton.clicked.connect(self.openClicked)\r\n exitButton.clicked.connect(self.exitClicked)\r\n self.listView.itemClicked.connect(self.listClicked)\r\n\r\n # backend stuff here\r\n def searchClicked(self): # put target string from text box into a variable to run search function\r\n message = self.searchBox.text()\r\n print(message)\r\n # create test list items\r\n self.listView.clear() # clear the list when a new search is made\r\n for x in range(0, 2):\r\n item = QListWidgetItem(self.word_icon, (\"A Test Word File \" + str(x + 1)))\r\n self.listView.addItem(item)\r\n item = QListWidgetItem(self.ppt_icon, (\"A Test PowerPoint File \" + str(x + 1)))\r\n self.listView.addItem(item)\r\n item = QListWidgetItem(self.excel_icon, (\"A Test Excel File \" + str(x + 1)))\r\n self.listView.addItem(item)\r\n item = QListWidgetItem(self.txt_icon, (\"A Test Text File \" + str(x + 1)))\r\n self.listView.addItem(item)\r\n\r\n def openClicked(self):\r\n print(\"Open Button clicked\")\r\n\r\n def listClicked(self, item):\r\n listText = item.text() # add open function code in here\r\n print(listText)\r\n\r\n def exitClicked(self):\r\n self.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = fileHunterWindow()\r\n sys.exit(app.exec_())\r\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"324353721","text":"import os, sys, subprocess, json\n\n#from video_metadata import get_multimedia_metadata, find_executable\ndef get_metadata(file):\n\n video_metadata = get_multimedia_metadata(\"test_video.mov\", find_executable(\"ffprobe\"))\n \n vertical_pixels = video_metadata['streams'][1]['height']\n if vertical_pixels > 2159:\n quality = \"UHD\"\n elif vertical_pixels > 1079:\n quality = \"FHD\"\n else:\n quality = \"HD\"\n\n duration = int(video_metadata['streams'][1]['duration'])\n\n return quality, duration\n#\n\ndef is_executable(fname_abs):\n \"\"\"\n Checks if the given file name is a regular file and if it is an\n executable by the current user.\n \"\"\"\n result = False\n if os.path.isfile(fname_abs) and os.access(fname_abs, os.X_OK):\n result = True\n return result\n\ndef escape_file_name(fname):\n \"\"\"\n Helper function to safely convert the file name (a.k.a. escaping) with\n spaces which can cause issues when passing over to the command line.\n \"\"\"\n result = fname.replace('\\\"', '\\\\\"') # escapes double quotes first\n result = ['\"',result,'\"']\n return \"\".join(result)\n\n\ndef find_executable(executable, path=None):\n # cross-platofrm way to find executable\n # inspired by http://snippets.dzone.com/posts/show/6313\n # and\n # http://stackoverflow.com/questions/377017/\n \"\"\"\n Attempts to find executable file in the directories listed in 'path' (a\n string listing directories separated by 'os.pathsep'; defaults to\n os.environ['PATH']).\n Returns the complete filename or None if no such file is found.\n \"\"\"\n\n if path is None:\n path = os.environ['PATH']\n\n paths = path.split(os.pathsep)\n extlist = ['']\n if sys.platform == 'win32':\n # checks if the provided executable file name has an extension\n # and if not - then search for all possible extensions\n # in order as defined by the 'PATHEXT' environmental variable\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in pathext:\n extlist = pathext\n\n result = None\n for ext in extlist:\n execname = executable + ext\n abs_execname = os.path.abspath(execname)\n # checks if the file exists, is a normal file and can be executed\n if is_executable(abs_execname):\n result = abs_execname\n break\n else:\n for p in paths:\n f = os.path.join(p, execname)\n abs_f = os.path.abspath(f)\n if is_executable(abs_f):\n result = abs_f\n break\n\n return result\n\ndef get_multimedia_metadata(file_path, ffprobe_path):\n \"\"\"\n Get the infomation about multimedia (audio, video or even some\n image formats) from :program:`ffprobe` command (part of \n `ffmpeg `_).\n \n Requires providing the path to the *ffprobe* external binary with\n permissions to execute it. It then parses the output and converts it to\n Python dictionary. Raises :exc:`ExtractorException` if it cannot find\n the :program:`ffprobe` binary or the output is not as expected.\n \"\"\"\n if ffprobe_path: \n if not os.path.isfile(ffprobe_path):\n errmsg = \"Cannot find 'ffprobe' at: \" + str(ffprobe_path)\n raise GeneralException(errmsg)\n\n # work around the issue with invoking the executable under Windows/Linux\n # Linux - shell must be True otherwise Popen can't find the file\n # Windows - shell must be False to avoid unnecessary invoking the command\n # shell\n use_shell = True\n if sys.platform.startswith('win'):\n use_shell = False\n\n escaped_fpath = escape_file_name(file_path)\n ffprobe_pipe = subprocess.Popen(\n \" \".join([ffprobe_path,\n '-print_format json',\n '-show_format',\n '-show_streams',\n escaped_fpath]),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=use_shell\n )\n \n ffprobe_output = ffprobe_pipe.stdout.readlines()\n ffprobe_err = ffprobe_pipe.stderr.readlines()\n \n if ffprobe_output == []:\n err_msg = \"\".join(ffprobe_err)\n raise GeneralException(\"Error while invoking ffprobe:\\n\" + err_msg)\n \n ffprobe_json = []\n skip_line = True\n # cleanse any additional information that is not valid JSON\n for line in ffprobe_output:\n if line.strip() == '{':\n skip_line = False\n if not skip_line:\n ffprobe_json.append(line)\n \n result = json.loads(\"\".join(ffprobe_json))\n return result\n\n","sub_path":"video_metadata.py","file_name":"video_metadata.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"404317120","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0002_product_menu'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product',\n name='html_keywords',\n field=models.CharField(verbose_name='html keywords', max_length=1000, blank=True, null=True),\n ),\n ]\n","sub_path":"shop/migrations/0003_auto_20150917_1955.py","file_name":"0003_auto_20150917_1955.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"594435763","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/4/27 16:35\n# @Author : Leanper\n\n# @File : gui_spinbox.py\n# @Software: PyCharm\n#@Desc:\n\nfrom tkinter import *\n\nfrom wx import MessageBox\n\n'''\nSpinbox(master=None, **options)(class) master 父控件\n\n'''\nroot = Tk()\n\nw = Spinbox(root, from_=0, to=10) # from与Python的关键字冲突,故加上个下划线。步进可以设置\nw.pack(fill=BOTH, expand=True)\n\ns = Spinbox(root, values=(1, 3, 5, 6), bg=\"#00ff00\", buttonbackground=\"#ff00ff\")\ns.pack(side=RIGHT, padx=10)\nmainloop()\n","sub_path":"python/study/gui/gui_tkinter/gui_spinbox.py","file_name":"gui_spinbox.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"562363556","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n for i in range(len(s) // 2):\n s[i], s[-i - 1] = s[-i - 1], s[i]\n return s\n\n\n def reverseString1(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n for i in range(len(s)//2):\n s[i],s[len(s)-i-1]=s[len(s)-i-1],s[i]\n return s\n\n\n def reverseString2(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n left = 0\n right = len(s)-1\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -=1\n return s","sub_path":"Leetcode_solutions/reverseString.py","file_name":"reverseString.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"497544898","text":"#Packages that are used in this script, including pandas for data manipulation, numpy (currently unused), pyodbc for odbc database connections and datetime for current date/time references\nimport pandas as pd\nimport numpy as np\nimport pyodbc as pydb\nimport datetime\n\n#Stores current Date/Time as a variable, used for Year and Month values in Dataframe and dynamic file naming for output csv\ntime = datetime.datetime.now()\n\n#Import the csv which contains Momentum calculations data.\ndatastream = pd.read_csv(r'C:\\Users\\hamzaamjad\\Dropbox (ASU)\\SIMF 2016 - 2017\\Data\\Datastream\\Momentum\\Datastream Momentum Ranking 11-4-2016.csv') #Filepath needs to be updated to latest version of Momentum calculations results\n\n#Take the existing datatypes from the Momentum calculations adata nd store as various dataypes more appropriate for the SQL Server\ndatastream['Ticker'] = datastream['Ticker'].astype('object')\ndatastream['ISIN'] = datastream['ISIN'].astype('object')\ndatastream['Date'] = datastream['Date'].astype('datetime64')\ndatastream['Return'] = datastream['Return'].astype('float64')\ndatastream['Return'] = datastream['Return'].apply(lambda x: '%.20f' % x) #Forces data to be stored as float vs scientific notation, I partially understand how this works. You may run into the same issue with Bloomberg data when you pull it, just apply this to the columns you run into issues with\ndatastream['Year'] = time.year\ndatastream['Month'] = time.month\n\n#Creates a filename, stored as a string, based on the current day-month-year\nfilename = r'C:\\Users\\hamzaamjad\\Dropbox (ASU)\\SIMF 2016 - 2017\\Data\\Datastream\\Momentum\\Datastream Momentum Cleaned ' + str(time.month) + '-' + str(time.day) + '-' + str(time.year) + '.csv' #This filepath needs to be updated\n\n#Outputs cleaned Momentum calculations data to a csv file, this file will be used for BULKINSERT SQL Query\ndatastream.to_csv((filename), index=False)\n\n#Create the ODBC connection to the SQL Server\nsimfund = pydb.connect(r'DSN=simfund', autocommit=True)\n\n#Create the cursor which will be used to execute SQL queries on the server\ncursor = simfund.cursor()\n\n\"\"\"\n#Drops existing Momentum2016 Table, creates new Momentum2016 table with specificed columns and then inserts intial dataset. This should only be used in the initial stages of seeding. Currently commented out.\ncursor.execute(\n'''\nUSE Datastream /*Selects the \"Datastream\" database from the existing SQL Schema*/\n\n/*DROP TABLE Momentum2016 Deletes the existing \"QuarterlyFinancials\" table*/\n\n/*Creates the new \"QuarterlyFinancials\" table, defining the column headers and the data type to be stored within that column*/\nCREATE TABLE Momentum2016\n(\n\t[Ticker] NVARCHAR(20), /*Ticker*/\n\t[ISIN] NVARCHAR(24) NOT NULL, /*ISIN*/\n\t[Date] DATETIME,\n\t[Return] NUMERIC(38,20),\n\t[Year] SMALLINT NOT NULL,\n\t[Month] TINYINT NOT NULL,\n\tPRIMARY KEY ([ISIN],[Year],[Month])\n)\n\nBULK INSERT Momentum2016\nFROM 'C:/Users/hamzaamjad/Dropbox (ASU)/SIMF 2016 - 2017/Data/Datastream/Momentum/Datastream Momentum Cleaned 12-18-2016.csv' /*This filepath needs to be updated each time manually...if you have an automated way to do this and show me how, I promise to be your personal bitch for a day*/\nWITH (FIRSTROW=2,FIELDTERMINATOR=',',ROWTERMINATOR='0x0a');\n'''\n )\n\"\"\"\n\n#Adds new data to the Momentum2016 Table\ncursor.execute(\n'''\nUSE Datastream /*Selects the \"Datastream\" database from the existing SQL Schema*/\n\nBULK INSERT Momentum2016\nFROM 'C:/Users/hamzaamjad/Dropbox (ASU)/SIMF 2016 - 2017/Data/Datastream/Momentum/Datastream Momentum Cleaned 12-18-2016.csv' /*This filepath needs to be updated each time manually...if you have an automated way to do this and show me how, I promise to be your personal bitch for a day*/\nWITH (FIRSTROW=2,FIELDTERMINATOR=',',ROWTERMINATOR='0x0a');\n'''\n )\n\n#Close the cursor\ncursor.close()\n\n#Close the ODBC Connection\nsimfund.close()","sub_path":"Datastream Momentum.py","file_name":"Datastream Momentum.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"448480280","text":"import requests\nimport app.config.config as config\nfrom random import choice\nfrom app import logger\n\n\n\nclass Wiki:\n \"\"\"\n return the info on the address from the wiki API\n \"\"\"\n\n def __init__(self):\n self.url_api = 'http://fr.wikipedia.org/w/api.php'\n self.response_for_api = config.dict_response_grandpy\n\n def _get_wiki_id_page(self, name) -> int:\n \"\"\"\n return the id_page wiki page for search name\n :param name: search name in api wiki\n :return: name id_page\n \"\"\"\n parameters = {\n \"action\": \"query\",\n \"list\": \"search\",\n \"srsearch\": name,\n \"format\": \"json\",\n }\n get_api = requests.get(self.url_api, params=parameters).json()\n page_id = get_api['query']['search'][0]['pageid']\n\n return page_id\n\n def get_wiki_address(self, name) -> str:\n \"\"\"\n return the id_page wiki page for search name\n :param name: search name in api wiki\n :return: research history (name)\n \"\"\"\n try:\n page_id = self._get_wiki_id_page(name=name)\n parameters_by_id = {\n \"format\": \"json\",\n \"action\": \"query\",\n \"prop\": \"extracts\",\n \"exintro\": 1,\n \"explaintext\": 1,\n \"exsentences\": 2,\n \"pageids\": page_id\n }\n get_api_by_id = requests.get(self.url_api, params=parameters_by_id).json()['query']['pages'][str(page_id)][\n 'extract']\n\n test_coherence = 0\n for word in name.split(\" \"):\n if word.lower() in get_api_by_id.lower():\n test_coherence += 1\n\n if ((test_coherence*100) / len(name.split(\" \"))) >= 60:\n return get_api_by_id\n else:\n bad_response = choice(self.response_for_api['api_wiki']['bad_response'])\n return bad_response\n\n except requests.exceptions.ConnectionError as e:\n logger.info(\"Probleme de connexion à l'API WIKI\")\n logger.info(e)\n choise_response_connection_error = choice(self.response_for_api['api_wiki']['connection_error'])\n\n return choise_response_connection_error\n except IndexError as e:\n logger.error(\"Aucune information dans l'API WIKI\")\n logger.error(e)\n bad_response = choice(self.response_for_api['api_wiki']['bad_response'])\n\n return bad_response\n","sub_path":"app/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"337965749","text":"def md_l293d(duty):\n import RPi.GPIO as GPIO\n import time\n from PWM import set_pwm\n \n # Select Raspi pin settings\n GPIO.setmode(GPIO.BOARD)\n\n # Suppress Warnings\n GPIO.setwarnings(False)\n\n # Define Pin Functions\n\n # Motor Left will have 2 pins i.e. Pin 38 and Pin 40\n LeftMotor_up = 38\n LeftMotor_down = 40\n\n\n # Motor right will have 2 pins i.e. Pin 32 and Pin 36\n RightMotor_up = 32\n RightMotor_down = 36\n\n\n # Set the proper GPIOS\n GPIO.setup(LeftMotor_up,GPIO.OUT)\n GPIO.setup(LeftMotor_down,GPIO.OUT)\n GPIO.setup(RightMotor_up,GPIO.OUT)\n GPIO.setup(RightMotor_down,GPIO.OUT)\n\n # Set the port pins for Motor\n GPIO.output(LeftMotor_up,1)\n GPIO.output(LeftMotor_down,0) \n GPIO.output(RightMotor_up,1)\n GPIO.output(RightMotor_down,0)\n\n set_pwm(duty)\n\n","sub_path":"l293d.py","file_name":"l293d.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"83832751","text":"import sys\nimport os\n\ndef addtwodimdict(thedict, key_a, key_b, val):\n ''' this is a function to add two dimetion dict '''\n if key_a in thedict:\n thedict[key_a].update({key_b: val})\n else:\n thedict.update({key_a: {key_b: val}})\n return thedict\n\n\ndef deal_freq_file(freq_file):\n adict = {}\n clonotype_list = []\n with open(freq_file, \"r\") as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n if line.startswith(\"CloneId\"):\n continue\n CloneId, Clonotype, Reads, Frequency, FreqLog10 = line.split(\"\\t\")\n if Clonotype not in adict:\n adict[Clonotype] = Frequency\n clonotype_list.append(Clonotype)\n return adict, clonotype_list\n\n\ndef main():\n sample_name_list = sys.argv[1:]\n a_big_list = []\n a_big_dict = {}\n for sample_name in sample_name_list:\n print(sample_name)\n freq_file = sample_name + '.filtered2reads.freq.xls'\n print(freq_file)\n if not os.path.exists(freq_file):\n sys.exit(\"file not exists!\")\n clonotype_dict, clonotype_list = deal_freq_file(freq_file)\n\n # merge dict \n for clonotype in clonotype_dict:\n addtwodimdict(a_big_dict, clonotype, sample_name, clonotype_dict[clonotype] )\n # merge list\n for clonotype in clonotype_list:\n a_big_list.append(clonotype)\n \n # iterate list\n output_file = open(\"_VS_\".join(sample_name_list) + '.freq.xls', \"w\")\n output_file.write(\"Clonetype\\t{}\\n\".format(\"\\t\".join(sample_name_list)))\n for clonotype in set(a_big_list):\n output_file.write(\"{}\".format(clonotype))\n for sample in sample_name_list:\n output_file.write(\"\\t{}\".format(a_big_dict[clonotype].get(sample, \"1e-10\")))\n output_file.write(\"\\n\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"TCR_Library_1_pipeline/scripts/compare_frequency_in_multi_patchs_result.py","file_name":"compare_frequency_in_multi_patchs_result.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"241512415","text":"# encoding: UTF-8\n\nimport time\nimport os\nimport instrument\nimport csv\nimport workdays\nimport json\nimport order\nfrom misc import *\nfrom eventEngine import *\nfrom vtConstant import *\n\n########################################################################\nclass Gateway(object):\n \"\"\"交易接口\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, agent, gatewayName = 'Gateway'):\n \"\"\"Constructor\"\"\"\n self.gatewayName = gatewayName\n self.agent = agent\n self.eventEngine = agent.eventEngine\n self.file_prefix = agent.folder + gatewayName + '_'\n self.qry_account = {}\n self.qry_pos = {}\n self.id2order = {}\n self.positions = {}\n self.instruments = [] # 已订阅合约代码\n self.eod_flag = False\n self.account_info = {'available': 0,\n 'locked_margin': 0,\n 'used_margin': 0,\n 'curr_capital': 1000000,\n 'prev_capital': 1000000,\n 'pnl_total': 0,\n 'yday_pnl': 0,\n 'tday_pnl': 0,\n }\t\t\t\t\t\t\t\n self.order_stats = {'total_submit': 0, 'total_failure': 0, 'total_cancel':0 }\n self.order_constraints = {\t'total_submit': 2000, 'total_cancel': 2000, 'total_failure':500, \\\n 'submit_limit': 200, 'cancel_limit': 200, 'failure_limit': 200 }\n #----------------------------------------------------------------------\n def initialize(self):\n pass\n\n def day_finalize(self, tday):\n self.save_local_positions(tday)\n eod_pos = {}\n for inst in self.positions:\n pos = self.positions[inst]\n eod_pos[inst] = [pos.curr_pos.long, pos.curr_pos.short]\n self.id2order = {}\n self.positions = {}\n self.order_stats = {'total_submit': 0, 'total_failure': 0, 'total_cancel':0 }\n for inst in self.instruments:\n self.order_stats[inst] = {'submit': 0, 'cancel':0, 'failure': 0, 'status': True }\n if sum(eod_pos[inst])>0:\n self.positions[inst] = order.Position(self.agent.instruments[inst], self)\n self.positions[inst].pos_yday.long = eod_pos[inst][0]\n self.positions[inst].pos_yday.short = eod_pos[inst][1]\n self.positions[inst].re_calc()\n self.account_info['prev_capital'] = self.account_info['curr_capital']\n\n def add_instrument(self, instID):\n if instID not in self.positions:\n self.positions[instID] = order.Position(self.agent.instruments[instID], self)\n if instID not in self.order_stats:\n self.order_stats[instID] = {'submit': 0, 'cancel':0, 'failure': 0, 'status': True }\n if instID not in self.qry_pos:\n self.qry_pos[instID] = {'tday': [0, 0], 'yday': [0, 0]}\n\n def event_subscribe(self):\n pass\n\n def onTick(self, tick):\n \"\"\"市场行情推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_TICK)\n event1.dict['data'] = tick\n self.eventEngine.put(event1)\n \n # 特定合约代码的事件\n event2 = Event(type=EVENT_TICK + tick.instID)\n event2.dict['data'] = tick\n self.eventEngine.put(event2)\n \n #----------------------------------------------------------------------\n def onTrade(self, trade):\n \"\"\"成交信息推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_TRADE)\n event1.dict['data'] = trade\n self.eventEngine.put(event1)\n \n # 特定合约的成交事件\n event2 = Event(type=EVENT_TRADE + trade.order_ref)\n event2.dict['data'] = trade\n self.eventEngine.put(event2) \n \n #----------------------------------------------------------------------\n def onOrder(self, order):\n \"\"\"订单变化推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_ORDER)\n event1.dict['data'] = order\n self.eventEngine.put(event1)\n \n # 特定订单编号的事件\n event2 = Event(type=EVENT_ORDER + order.order_ref)\n event2.dict['data'] = order\n self.eventEngine.put(event2)\n \n #----------------------------------------------------------------------\n def onPosition(self, position):\n \"\"\"持仓信息推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_POSITION)\n event1.dict['data'] = position\n self.eventEngine.put(event1)\n \n # 特定合约代码的事件\n event2 = Event(type=EVENT_POSITION+position.instID)\n event2.dict['data'] = position\n self.eventEngine.put(event2)\n \n #----------------------------------------------------------------------\n def onAccount(self, account):\n \"\"\"账户信息推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_ACCOUNT)\n event1.dict['data'] = account\n self.eventEngine.put(event1)\n \n # 特定合约代码的事件\n event2 = Event(type=EVENT_ACCOUNT+account.vtAccountID)\n event2.dict['data'] = account\n self.eventEngine.put(event2)\n \n #----------------------------------------------------------------------\n def onError(self, error):\n \"\"\"错误信息推送\"\"\"\n # 通用事件\n logContent = error.errorMsg\n self.onLog(logContent, level = logging.WARNING)\n \n #----------------------------------------------------------------------\n def onLog(self, log_content, level = logging.DEBUG):\n \"\"\"日志推送\"\"\"\n # 通用事件\n event = Event(type=EVENT_LOG)\n event.dict['data'] = log_content\n event.dict['gateway'] = self.gatewayName\n event.dict['level'] = level\n self.eventEngine.put(event)\n \n #----------------------------------------------------------------------\n def onContract(self, contract):\n \"\"\"合约基础信息推送\"\"\"\n # 通用事件\n event1 = Event(type=EVENT_CONTRACT)\n event1.dict['data'] = contract\n self.eventEngine.put(event1) \n \n def save_order_list(self, tday):\n order.save_order_list(tday, self.id2order, self.file_prefix)\n\n def load_order_list(self, tday):\n self.id2order = order.load_order_list(tday, self.file_prefix, self.positions)\n\n def load_local_positions(self, tday):\n pos_date = tday\n logfile = self.file_prefix + 'EODPos_' + pos_date.strftime('%y%m%d')+'.csv'\n if not os.path.isfile(logfile):\n pos_date = workdays.workday(pos_date, -1, CHN_Holidays)\n logfile = self.file_prefix + 'EODPos_' + pos_date.strftime('%y%m%d')+'.csv'\n if not os.path.isfile(logfile):\n logContent = \"no prior position file is found\"\n self.onLog(logContent, level = logging.INFO)\n return False\n else:\n self.eod_flag = True\n with open(logfile, 'rb') as f:\n reader = csv.reader(f)\n for idx, row in enumerate(reader):\n if row[0] == 'capital':\n self.account_info['prev_capital'] = float(row[1])\n elif row[0] == 'pos':\n inst = row[1]\n if inst in self.instruments:\n if inst not in self.positions:\n self.positions[inst] = order.Position(self.agent.instruments[inst], self)\n self.positions[inst].pos_yday.long = int(row[2])\n self.positions[inst].pos_yday.short = int(row[3])\n return True\n\n def save_local_positions(self, tday):\n file_prefix = self.file_prefix\n logfile = file_prefix + 'EODPos_' + tday.strftime('%y%m%d')+'.csv'\n if os.path.isfile(logfile):\n return False\n else:\n with open(logfile,'wb') as log_file:\n file_writer = csv.writer(log_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL);\n for inst in self.positions:\n pos = self.positions[inst]\n pos.re_calc()\n self.calc_margin()\n file_writer.writerow(['capital', self.account_info['curr_capital']])\n for inst in self.positions:\n pos = self.positions[inst]\n if abs(pos.curr_pos.long) + abs(pos.curr_pos.short) > 0:\n file_writer.writerow(['pos', inst, pos.curr_pos.long, pos.curr_pos.short])\n return True\n\n def calc_margin(self):\n locked_margin = 0\n used_margin = 0\n yday_pnl = 0\n tday_pnl = 0\n for instID in self.positions:\n inst = self.agent.instruments[instID]\n pos = self.positions[instID]\n under_price = 0.0\n if (inst.ptype == instrument.ProductType.Option):\n under_price = self.agent.instruments[inst.underlying].price\n locked_margin += pos.locked_pos.long * inst.calc_margin_amount(ORDER_BUY, under_price)\n locked_margin += pos.locked_pos.short * inst.calc_margin_amount(ORDER_SELL, under_price) \n used_margin += pos.curr_pos.long * inst.calc_margin_amount(ORDER_BUY, under_price)\n used_margin += pos.curr_pos.short * inst.calc_margin_amount(ORDER_SELL, under_price)\n yday_pnl += (pos.pos_yday.long - pos.pos_yday.short) * (inst.price - inst.prev_close) * inst.multiple\n tday_pnl += pos.tday_pos.long * (inst.price-pos.tday_avp.long) * inst.multiple\n tday_pnl -= pos.tday_pos.short * (inst.price-pos.tday_avp.short) * inst.multiple \n self.account_info['locked_margin'] = locked_margin\n self.account_info['used_margin'] = used_margin\n self.account_info['pnl_total'] = yday_pnl + tday_pnl\n self.account_info['curr_capital'] = self.account_info['prev_capital'] + self.account_info['pnl_total']\n self.account_info['available'] = self.account_info['curr_capital'] - self.account_info['locked_margin']\n\n #----------------------------------------------------------------------\n def connect(self):\n \"\"\"连接\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def subscribe(self, subscribeReq):\n \"\"\"订阅行情\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def sendOrder(self, orderReq):\n \"\"\"发单\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def cancelOrder(self, cancelOrderReq):\n \"\"\"撤单\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def qryAccount(self):\n \"\"\"查询账户资金\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def qryPosition(self):\n \"\"\"查询持仓\"\"\"\n pass\n \n #----------------------------------------------------------------------\n def close(self):\n \"\"\"关闭\"\"\"\n pass\n\n def register_event_handler(self):\n pass\n\n########################################################################\nclass VtBaseData(object):\n \"\"\"回调函数推送数据的基础类,其他数据类继承于此\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.gatewayName = EMPTY_STRING # Gateway名称 \n self.rawData = None # 原始数据\n \n \n########################################################################\nclass VtTickData(VtBaseData):\n \"\"\"Tick行情数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtTickData, self).__init__()\n \n # 代码相关\n self.symbol = EMPTY_STRING # 合约代码\n self.exchange = EMPTY_STRING # 交易所代码\n self.instID = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码\n \n # 成交数据\n self.lastPrice = EMPTY_FLOAT # 最新成交价\n self.lastVolume = EMPTY_INT # 最新成交量\n self.volume = EMPTY_INT # 今天总成交量\n self.openInterest = EMPTY_INT # 持仓量\n self.time = EMPTY_STRING # 时间 11:20:56.5\n self.date = EMPTY_STRING # 日期 20151009\n \n # 常规行情\n self.openPrice = EMPTY_FLOAT # 今日开盘价\n self.highPrice = EMPTY_FLOAT # 今日最高价\n self.lowPrice = EMPTY_FLOAT # 今日最低价\n self.preClosePrice = EMPTY_FLOAT\n \n self.upperLimit = EMPTY_FLOAT # 涨停价\n self.lowerLimit = EMPTY_FLOAT # 跌停价\n \n # 五档行情\n self.bidPrice1 = EMPTY_FLOAT\n self.bidPrice2 = EMPTY_FLOAT\n self.bidPrice3 = EMPTY_FLOAT\n self.bidPrice4 = EMPTY_FLOAT\n self.bidPrice5 = EMPTY_FLOAT\n \n self.askPrice1 = EMPTY_FLOAT\n self.askPrice2 = EMPTY_FLOAT\n self.askPrice3 = EMPTY_FLOAT\n self.askPrice4 = EMPTY_FLOAT\n self.askPrice5 = EMPTY_FLOAT \n \n self.bidVolume1 = EMPTY_INT\n self.bidVolume2 = EMPTY_INT\n self.bidVolume3 = EMPTY_INT\n self.bidVolume4 = EMPTY_INT\n self.bidVolume5 = EMPTY_INT\n \n self.askVolume1 = EMPTY_INT\n self.askVolume2 = EMPTY_INT\n self.askVolume3 = EMPTY_INT\n self.askVolume4 = EMPTY_INT\n self.askVolume5 = EMPTY_INT \n \n \n########################################################################\nclass VtTradeData(VtBaseData):\n \"\"\"成交数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtTradeData, self).__init__()\n \n # 代码编号相关\n self.symbol = EMPTY_STRING # 合约代码\n self.exchange = EMPTY_STRING # 交易所代码\n self.instID = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码\n \n self.tradeID = EMPTY_STRING # 成交编号\n self.vtTradeID = EMPTY_STRING # 成交在vt系统中的唯一编号,通常是 Gateway名.成交编号\n \n self.orderID = EMPTY_STRING # 订单编号\n self.order_ref = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号\n \n # 成交相关\n self.direction = EMPTY_UNICODE # 成交方向\n self.offset = EMPTY_UNICODE # 成交开平仓\n self.price = EMPTY_FLOAT # 成交价格\n self.volume = EMPTY_INT # 成交数量\n self.tradeTime = EMPTY_STRING # 成交时间\n \n\n########################################################################\nclass VtOrderData(VtBaseData):\n \"\"\"订单数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtOrderData, self).__init__()\n \n # 代码编号相关\n self.symbol = EMPTY_STRING # 合约代码\n self.exchange = EMPTY_STRING # 交易所代码\n self.instID = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码\n \n self.orderID = EMPTY_STRING # 订单编号 local order ID\n self.order_ref = EMPTY_STRING # for Order class object ID\n self.orderSysID = EMPTY_STRING\t\t\t# remote order ID\n\n # 报单相关\n self.direction = EMPTY_UNICODE # 报单方向\n self.offset = EMPTY_UNICODE # 报单开平仓\n self.price = EMPTY_FLOAT # 报单价格\n self.totalVolume = EMPTY_INT # 报单总数量\n self.tradedVolume = EMPTY_INT # 报单成交数量\n self.status = EMPTY_UNICODE # 报单状态\n \n self.orderTime = EMPTY_STRING # 发单时间\n self.cancelTime = EMPTY_STRING # 撤单时间\n \n # CTP/LTS相关\n self.frontID = EMPTY_INT # 前置机编号\n self.sessionID = EMPTY_INT # 连接编号\n\n \n########################################################################\nclass VtPositionData(VtBaseData):\n \"\"\"持仓数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtPositionData, self).__init__()\n \n # 代码编号相关\n self.symbol = EMPTY_STRING # 合约代码\n self.exchange = EMPTY_STRING # 交易所代码\n self.instID = EMPTY_STRING # 合约在vt系统中的唯一代码,合约代码.交易所代码 \n \n # 持仓相关\n self.direction = EMPTY_STRING # 持仓方向\n self.position = EMPTY_INT # 持仓量\n self.frozen = EMPTY_INT # 冻结数量\n self.price = EMPTY_FLOAT # 持仓均价\n self.vtPositionName = EMPTY_STRING # 持仓在vt系统中的唯一代码,通常是instID.方向\n \n # 20151020添加\n self.ydPosition = EMPTY_INT # 昨持仓\n\n\n########################################################################\nclass VtAccountData(VtBaseData):\n \"\"\"账户数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtAccountData, self).__init__()\n \n # 账号代码相关\n self.accountID = EMPTY_STRING # 账户代码\n self.vtAccountID = EMPTY_STRING # 账户在vt中的唯一代码,通常是 Gateway名.账户代码\n \n # 数值相关\n self.preBalance = EMPTY_FLOAT # 昨日账户结算净值\n self.balance = EMPTY_FLOAT # 账户净值\n self.available = EMPTY_FLOAT # 可用资金\n self.commission = EMPTY_FLOAT # 今日手续费\n self.margin = EMPTY_FLOAT # 保证金占用\n self.closeProfit = EMPTY_FLOAT # 平仓盈亏\n self.positionProfit = EMPTY_FLOAT # 持仓盈亏\n \n\n########################################################################\nclass VtErrorData(VtBaseData):\n \"\"\"错误数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtErrorData, self).__init__()\n \n self.errorID = EMPTY_STRING # 错误代码\n self.errorMsg = EMPTY_UNICODE # 错误信息\n self.additionalInfo = EMPTY_UNICODE # 补充信息\n\n\n########################################################################\nclass VtLogData(VtBaseData):\n \"\"\"日志数据类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtLogData, self).__init__()\n \n self.logTime = time.strftime('%X', time.localtime()) # 日志生成时间\n self.logContent = EMPTY_UNICODE # 日志信息\n self.level = logging.DEBUG\n\n\n########################################################################\nclass VtContractData(VtBaseData):\n \"\"\"合约详细信息类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(VtBaseData, self).__init__()\n \n self.symbol = EMPTY_STRING # 代码\n self.exchange = EMPTY_STRING # 交易所代码\n self.instID = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码\n self.name = EMPTY_UNICODE # 合约中文名\n \n self.productClass = EMPTY_UNICODE # 合约类型\n self.size = EMPTY_INT # 合约大小\n self.priceTick = EMPTY_FLOAT # 合约最小价格TICK\n \n # 期权相关\n self.strikePrice = EMPTY_FLOAT # 期权行权价\n self.underlyingSymbol = EMPTY_STRING # 标的物合约代码\n self.optionType = EMPTY_UNICODE # 期权类型\n\n\n########################################################################\nclass VtSubscribeReq(object):\n \"\"\"订阅行情时传入的对象类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.symbol = EMPTY_STRING # 代码\n self.exchange = EMPTY_STRING # 交易所\n \n # 以下为IB相关\n self.productClass = EMPTY_UNICODE # 合约类型\n self.currency = EMPTY_STRING # 合约货币\n self.expiry = EMPTY_STRING # 到期日\n self.strikePrice = EMPTY_FLOAT # 行权价\n self.optionType = EMPTY_UNICODE # 期权类型\n\n\n########################################################################\nclass VtOrderReq(object):\n \"\"\"发单时传入的对象类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.symbol = EMPTY_STRING # 代码\n self.exchange = EMPTY_STRING # 交易所\n self.price = EMPTY_FLOAT # 价格\n self.volume = EMPTY_INT # 数量\n \n self.priceType = EMPTY_STRING # 价格类型\n self.direction = EMPTY_STRING # 买卖\n self.offset = EMPTY_STRING # 开平\n \n # 以下为IB相关\n self.productClass = EMPTY_UNICODE # 合约类型\n self.currency = EMPTY_STRING # 合约货币\n self.expiry = EMPTY_STRING # 到期日\n self.strikePrice = EMPTY_FLOAT # 行权价\n self.optionType = EMPTY_UNICODE # 期权类型 \n \n\n########################################################################\nclass VtCancelOrderReq(object):\n \"\"\"撤单时传入的对象类\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.symbol = EMPTY_STRING # 代码\n self.exchange = EMPTY_STRING # 交易所\n \n # 以下字段主要和CTP、LTS类接口相关\n self.orderID = EMPTY_STRING # 报单号\n self.frontID = EMPTY_STRING # 前置机号\n self.sessionID = EMPTY_STRING # 会话号\n","sub_path":"gateway.py","file_name":"gateway.py","file_ext":"py","file_size_in_byte":23422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"504298751","text":"import time\nfrom models import Model\nfrom models.user import User\nfrom utils import log\n\n\nclass Topic(Model):\n\n def __init__(self, form):\n self.id = None\n self.views = 0\n self.title = form.get('title', '')\n self.content = form.get('content', '')\n self.created_time = int(time.time())\n self.updated_time = self.created_time\n self.user_id = form.get('user_id', '')\n self.board_id = int(form.get('board_id', -1))\n\n @classmethod\n def get(cls, id):\n \"\"\"\n 覆写父类的 get 方法\n 用来统计浏览量\n \"\"\"\n topic = cls.find_by(id=id)\n topic.views += 1\n topic.save()\n return topic\n\n def replies(self):\n from .reply import Reply\n\n ms = Reply.find_all(topic_id=self.id)\n log('replies -> ms', ms)\n return ms\n\n def board(self):\n from .board import Board\n m = Board.find(self.board_id)\n return m\n\n def user(self):\n u = User.find(id=self.user_id)\n return u\n","sub_path":"web18-20/bbs/models/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"616317310","text":"import os\n\nfrom albumentations.augmentations.transforms import Cutout, HorizontalFlip, RandomBrightnessContrast, VerticalFlip\nfrom albumentations.core.composition import OneOf\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport sys\nimport pandas as pd\nimport numpy as np\n\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport random\nimport timm\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\n\nfrom torch.utils.data import Dataset, DataLoader\n\nimport argparse\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom sklearn.metrics import roc_auc_score\n\nproject_path = os.path.abspath('.')\ndata_path = os.path.join(project_path, 'input')\ntrain_path = os.path.join(data_path, 'train')\ntest_path = os.path.join(data_path, 'test')\ncsv_path = os.path.join(data_path, 'sample_submission.csv')\n\nparser = argparse.ArgumentParser()\n\nlabel_path = \"/datadisk/kg/seti/train_df.csv\"\nlabel_df = pd.read_csv(label_path)\ndevice = torch.device('cuda')\n\nbest_auc = .0\n\ncfg = {\n 'fold_num': 5,\n 'seed': 2021,\n 'model_arch': 'tf_efficientnet_b2',\n 'img_size': 512,\n 'epochs': 9,\n 'train_batch_size': 64,\n 'val_batch_size': 64,\n 'T_0': 3,\n 'T_mul': 1,\n 'lr': 1e-5,\n 'min_lr': 1e-6,\n 'accum_iter': 1,\n 'weight_decay': 1e-6,\n 'num_workers': 12,\n 'device': 'cuda',\n 'device_num': 1,\n 'skip_fold': 0\n}\n\ndef seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\ndef train_transforms():\n return A.Compose([\n # A.Resize(cfg['img_size'], cfg['img_size']),\n A.Resize(768, 768),\n A.RandomResizedCrop(cfg['img_size'], cfg['img_size'], scale=(0.9, 1.0)),\n A.HorizontalFlip(p=0.5),\n\n A.OneOf([\n A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2), p=0.5),\n A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),\n ], p=0.6),\n \n OneOf([\n A.MotionBlur(blur_limit=5),\n A.MedianBlur(blur_limit=5),\n A.GaussianBlur(blur_limit=5),\n A.GaussNoise(var_limit=(5.0, 30.0)),\n ], p=0.3),\n\n OneOf([\n A.OpticalDistortion(distort_limit=0.8),\n A.GridDistortion(num_steps=5, distort_limit=0.8),\n A.ElasticTransform(alpha=3),\n ], p=0.3),\n \n A.Cutout(num_holes=5, max_h_size=int(0.08*cfg['img_size']), max_w_size=int(0.08*cfg['img_size']), p=0.4),\n ToTensorV2(),\n ])\n\ndef valid_transforms():\n return A.Compose([\n A.Resize(cfg['img_size'], cfg['img_size']),\n ToTensorV2(),\n ])\n\nclass LabelSmoothing(nn.Module):\n def __init__(self, smoothing = 0.02):\n super(LabelSmoothing, self).__init__()\n self.smoothing = smoothing \n\n def forward(self, logits, labels):\n labels[labels == 1] = 1 - self.smoothing \n labels[labels == 0] = self.smoothing \n return F.binary_cross_entropy_with_logits(logits, labels)\n\nclass SetiDataset(Dataset):\n def __init__(self, df, transform=None):\n super(SetiDataset, self).__init__()\n self.df = df\n self.file_names = df.file_path.values\n self.labels = df.target.values\n self.transform = transform\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n file_path = self.file_names[idx]\n image = np.load(file_path)\n image = image.astype(np.float32)\n image = np.vstack(image).transpose((1, 0))\n\n image = self.transform(image=image)['image']\n label = torch.tensor(self.labels[idx]).float()\n return image, label\n\nclass CustomModel(nn.Module):\n def __init__(self, pretrained=True):\n super(CustomModel, self).__init__()\n self.model = timm.create_model(cfg['model_arch'], pretrained=pretrained, in_chans=1)\n\n n_features = self.model.classifier.in_features\n self.model.global_pool = nn.Identity()\n self.model.classifier = nn.Identity()\n self.pooling = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(n_features, 1)\n\n def forward(self, x):\n features = self.model(x)\n pooled_features = self.pooling(features).view(x.size(0), -1)\n return self.fc(pooled_features)\n\ndef prepare_dataloader(fold):\n train_idx = label_df[label_df.fold != fold].index\n val_idx = label_df[label_df.fold == fold].index\n\n train_df = label_df.loc[train_idx].reset_index(drop=True)\n val_df = label_df.loc[val_idx].reset_index(drop=True)\n\n train_dataset = SetiDataset(train_df, transform=train_transforms())\n val_dataset = SetiDataset(val_df, transform=valid_transforms())\n train_loader = DataLoader(train_dataset, batch_size=cfg['train_batch_size'], shuffle=True,\n pin_memory=False, drop_last=True, num_workers=cfg['num_workers'])\n val_loader = DataLoader(val_dataset, batch_size=cfg['val_batch_size'], shuffle=False,\n pin_memory=False, drop_last=False, num_workers=cfg['num_workers'])\n return train_loader, val_loader, train_df, val_df\n\ndef train(train_loader, net, optimizer, scheduler, criterion):\n net.train()\n train_loss = 0.\n \n progress_bar = tqdm(enumerate(train_loader), total=len(train_loader))\n for batch_idx, (images, labels) in progress_bar:\n images, labels = images.to(device), labels.to(device)\n\n outputs = net(images)\n loss = criterion(outputs.view(-1), labels)\n loss /= cfg['accum_iter']\n loss.backward()\n if ((batch_idx+1) % cfg['accum_iter'] == 0 or batch_idx == len(train_loader)-1):\n optimizer.step()\n net.zero_grad()\n optimizer.zero_grad()\n\n train_loss += loss.item()\n description = 'Loss: %.3f' % (train_loss/(batch_idx+1))\n progress_bar.set_description(description)\n\n scheduler.step()\n\ndef save_model(net, fold, auc=.0, loss=.0):\n state = {\n 'net': net.state_dict(),\n 'auc': auc,\n 'loss': loss\n }\n print(f'Saving auc: {auc}...')\n model_arch = cfg['model_arch']\n img_size = cfg['img_size']\n torch.save(state, os.path.join(project_path, f'checkpoint/seti-{model_arch}-fold{fold}-{img_size}-auc.pth'))\n\ndef validate(val_loader, net, criterion, fold, val_labels):\n global best_auc\n net.eval()\n test_loss = 0.\n preds = []\n\n progress_bar = tqdm(enumerate(val_loader), total=len(val_loader))\n for batch_idx, (inputs, labels) in progress_bar:\n x, labels = inputs.to(device), labels.to(device)\n \n with torch.no_grad():\n outputs = net(x)\n preds.append(outputs.sigmoid().detach().cpu().numpy())\n loss = criterion(outputs.view(-1), labels)\n test_loss += loss.item()\n\n description = 'Loss: %.3f' % (test_loss/(batch_idx+1))\n progress_bar.set_description(description)\n\n preds = np.concatenate(preds)\n auc = roc_auc_score(val_labels, preds)\n\n test_loss /= len(val_loader)\n\n if auc > best_auc:\n best_auc = auc\n print(auc)\n save_model(net, fold, auc, test_loss)\n\ndef main_loop(resume):\n os.makedirs('checkpoint', exist_ok=True)\n seed_everything(cfg['seed'])\n torch.cuda.empty_cache()\n\n for fold in range(cfg['fold_num']):\n if fold < cfg['skip_fold']:\n continue\n print(f'{fold}th fold training starts')\n\n global best_auc\n best_auc = .0\n train_loader, val_loader, _, val_df = prepare_dataloader(fold)\n\n if resume:\n net = CustomModel(pretrained=False)\n checkpoint = torch.load(f\"checkpoint/seti-{cfg['model_arch']}-fold{fold}-{cfg['img_size']}-auc.pth\")\n net.load_state_dict(checkpoint['net'])\n best_auc = checkpoint['auc']\n else:\n net = CustomModel(pretrained=True)\n net = net.cuda()\n\n optimizer = optim.AdamW(net.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=cfg['T_0'], T_mult=cfg['T_mul'], eta_min=cfg['min_lr'], last_epoch=-1)\n\n for epoch in range(cfg['epochs']):\n print(f'\\nEpoch {epoch}')\n if epoch < 0:\n criterion = nn.BCEWithLogitsLoss()\n else:\n criterion = LabelSmoothing()\n\n train(train_loader, net, optimizer, scheduler, criterion)\n validate(val_loader, net, criterion, fold, val_df.target.values)\n \n del net, train_loader, val_loader, optimizer, scheduler\n torch.cuda.empty_cache()\n\nparser.add_argument('--resume', type=int, default=1)\nargs = parser.parse_args()\n\nmain_loop(args.resume)\n","sub_path":"train_effb2.py","file_name":"train_effb2.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418929734","text":"#!/usr/bin/python3\n# coding: utf-8 \n\n''' \nОпределение количества различных подстрок с использованием хеш-функции. \nПусть на вход функции дана строка. Требуется вернуть количество различных подстрок в этой строке.\nПримечание: в сумму не включаем пустую строку и строку целиком.\nПример работы функции:\n# >>> func(\"papa\")\n6\n# >>> func(\"sova\")\n9\n'''\ndef count_subs(string):\n result = set()\n\n for i in range(1, len(string)):\n for j in range(len(string) - i + 1):\n h = hash(string[j:i+j])\n result.add(h)\n #print(string[j:i+j])\n\n return len(result)\n\ns = input('Введите строку: ')\nprint(f'В данной строке {count_subs(s)} различных подстрок')\n","sub_path":"Lesson_9/Check/Lesson_9/les_9_task_1.py","file_name":"les_9_task_1.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"293386708","text":"\"\"\" postgresql, sqlalchemy PostgreSQL Provider.\n\nNotes on sqlalchemy Provider\n--------------------\nThis provider implements a basic set of funcionalities from aiopg\nsqlalchemy and use threads\n\"\"\"\n\nimport asyncio\nfrom threading import Thread\nimport logging\nimport aiopg\nfrom aiopg.sa import create_engine\nfrom psycopg2.extras import NamedTupleCursor\nfrom sqlalchemy.exc import (\n DatabaseError,\n OperationalError,\n SQLAlchemyError,\n)\n\nfrom ..exceptions import (\n ConnectionTimeout,\n DataError,\n EmptyStatement,\n NoDataFound,\n ProviderError,\n StatementError,\n TooManyConnections,\n)\nfrom . import *\n\nfrom asyncdb.providers.sql import SQLProvider, baseCursor\n\n\nclass postgresqlCursor(baseCursor):\n _connection: aiopg.Connection = None\n\n async def __aenter__(self) -> \"postgresqlCursor\":\n # self._cursor = await self._connection.cursor(cursor_factory=NamedTupleCursor)\n self._cursor = await self._connection.execute(self._sentence, self._params)\n return self\n\n\nclass postgresql(SQLProvider, Thread):\n _provider = \"postgresql\"\n _syntax = \"sql\"\n _test_query = \"SELECT 1::integer as column\"\n _dsn = \"postgresql://{user}:{password}@{host}:{port}/{database}\"\n _loop = None\n _pool = None\n # _engine = None\n _connection = None\n _connected = False\n _initialized_on = None\n\n def __init__(self, dsn=\"\", loop=None, params={}):\n self._params = params\n if not dsn:\n self._dsn = self.create_dsn(self._params)\n else:\n self._dsn = dsn\n self._result = None\n self._connection = None\n self._engine = None\n self._loop = None\n # create a new loop before thread\n self._loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self._loop)\n # calling parent Thread\n Thread.__init__(self)\n self._engine = self.connect()\n\n def __del__(self):\n self._loop.run_until_complete(self.terminate())\n\n \"\"\"\n Context magic Methods\n \"\"\"\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self._loop.run_until_complete(self.release())\n\n \"\"\"\n Thread Methodss\n \"\"\"\n\n def start(self):\n self._logger.debug(\"Running Start\")\n Thread.start(self)\n\n def join(self):\n self._logger.debug(\"Running Join\")\n Thread.join(self)\n\n def run(self):\n self._logger.debug(\"Running RUN\")\n\n def close(self):\n self._logger.debug(\"Running Close\")\n if self._loop:\n try:\n self._loop.run_until_complete(\n asyncio.wait_for(self.terminate(), timeout=5)\n )\n finally:\n # close loop\n self._loop.close()\n\n async def terminate(self):\n \"\"\"\n Closing a Connection\n \"\"\"\n if self._connection:\n try:\n await self._engine.release(self._connection)\n except Exception as err:\n await self._connection.close()\n if self._engine:\n self._engine.close()\n try:\n await self._engine.wait_closed()\n finally:\n self._engine.terminate()\n\n def connect(self):\n self._logger.debug(\"Running connect\")\n try:\n return self._loop.run_until_complete(\n create_engine(\n dsn=self._dsn,\n maxsize=self._max_connections,\n timeout=self._timeout,\n loop=self._loop,\n )\n )\n # return self._loop.run_until_complete(aiopg.create_pool(dsn=self._dsn, maxsize=self._max_connections,timeout=self._timeout,loop=self._loop))\n except (SQLAlchemyError, DatabaseError, OperationalError) as err:\n self._engine = None\n raise ProviderError(\"Connection Error: {}\".format(str(err)))\n except Exception as err:\n self._engine = None\n raise ProviderError(\"Engine Error, Terminated: {}\".format(str(err)))\n\n def connection(self):\n \"\"\"\n Get a connection\n \"\"\"\n self._logger.debug(\"PostgreSQL: Connecting to {}\".format(self._dsn))\n self._connection = None\n self._connected = False\n self.start()\n try:\n if self._engine:\n self._connection = self._loop.run_until_complete(self._engine.acquire())\n except (SQLAlchemyError, DatabaseError, OperationalError) as err:\n self._connection = None\n raise ProviderError(\"Connection Error: {}\".format(str(err)))\n except Exception as err:\n self._connection = None\n raise ProviderError(\"Engine Error, Terminated: {}\".format(str(err)))\n finally:\n return self\n\n async def release(self):\n \"\"\"\n Release a Connection object\n \"\"\"\n try:\n if self._connection:\n if self._engine:\n await self._engine.release(self._connection)\n else:\n self._connection.close()\n except Exception as err:\n raise ProviderError(\"Release Error, Terminated: {}\".format(str(err)))\n finally:\n self._connection = None\n\n async def prepare(self, sentence=\"\"):\n \"\"\"\n Preparing a sentence\n \"\"\"\n return [sentence, error]\n\n def test_connection(self):\n \"\"\"\n Test Connnection\n \"\"\"\n error = None\n row = {}\n if self._test_query is None:\n raise NotImplementedError()\n self._logger.debug(\"{}: Running Test\".format(self._provider))\n try:\n # cursor = self._loop.run_until_complete(self._connection.cursor(cursor_factory=NamedTupleCursor))\n # self._loop.run_until_complete(cursor.execute(self._test_query))\n result = self._loop.run_until_complete(\n self._connection.execute(self._test_query)\n )\n row = self._loop.run_until_complete(result.fetchone())\n if row:\n row = dict(row)\n # print(cursor.description)\n # row = dict()\n # print(row)\n # cursor.close()\n if error:\n self._logger.debug(\"Test Error: {}\".format(error))\n except Exception as err:\n error = str(err)\n raise ProviderError(message=str(err), code=0)\n finally:\n return [row, error]\n\n async def query(self, sentence=\"\"):\n \"\"\"\n Running a Query\n \"\"\"\n error = None\n if not sentence:\n raise EmptyStatement(\"Sentence is an empty string\")\n try:\n self._logger.debug(\"Running Query {}\".format(sentence))\n result = await self._connection.execute(sentence)\n if result:\n rows = await result.fetchall()\n self._result = [dict(row.items()) for row in rows]\n except (DatabaseError, OperationalError) as err:\n error = \"Query Error: {}\".format(str(err))\n raise ProviderError(error)\n except Exception as err:\n error = \"Query Error, Terminated: {}\".format(str(err))\n raise ProviderError(error)\n finally:\n return [self._result, error]\n\n async def queryrow(self, sentence=\"\"):\n \"\"\"\n Running Query and return only one row\n \"\"\"\n error = None\n if not sentence:\n raise EmptyStatement(\"Sentence is an empty string\")\n try:\n self._logger.debug(\"Running Query {}\".format(sentence))\n result = await self._connection.execute(sentence)\n if result:\n row = await result.fetchone()\n self._result = dict(row)\n except (DatabaseError, OperationalError) as err:\n error = \"Query Row Error: {}\".format(str(err))\n raise ProviderError(error)\n except Exception as err:\n error = \"Query Row Error, Terminated: {}\".format(str(err))\n raise ProviderError(error)\n finally:\n return [self._result, error]\n\n async def execute(self, sentence=\"\"):\n \"\"\"Execute a transaction\n get a SQL sentence and execute\n returns: results of the execution\n \"\"\"\n error = None\n if not sentence:\n raise EmptyStatement(\"Sentence is an empty string\")\n if not self._connection:\n self.connection()\n try:\n self._logger.debug(\"Execute Sentence {}\".format(sentence))\n result = await self._engine.execute(sentence)\n self._result = result\n except (DatabaseError, OperationalError) as err:\n error = \"Execute Error: {}\".format(str(err))\n raise ProviderError(error)\n except Exception as err:\n error = \"Execute Error, Terminated: {}\".format(str(err))\n raise ProviderError(error)\n finally:\n return [self._result, error]\n\n \"\"\"\n Cursor Context\n \"\"\"\n\n async def cursor(self, sentence):\n if not sentence:\n raise EmptyStatement(\"Sentence is an empty string\")\n self._logger.debug(\"Creating Cursor {}\".format(sentence))\n self._cursor = await self._connection.execute(sentence)\n # self._cursor = await self._connection.cursor(cursor_factory=NamedTupleCursor)\n # await self._cursor.execute(sentence)\n return self\n\n \"\"\"\n Cursor Iterator Context\n \"\"\"\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n try:\n data = dict(await self._cursor.fetchone())\n if data is not None:\n return data\n else:\n raise StopAsyncIteration\n except TypeError:\n raise StopAsyncIteration\n\n \"\"\"\n Fetching a Cursor\n \"\"\"\n\n async def fetchrow(self):\n pass\n\n async def fetch(self, number=1):\n pass\n\n\n\"\"\"\nRegistering this Provider\n\"\"\"\nregisterProvider(postgresql)\n","sub_path":"asyncdb/providers/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":10038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"557746453","text":"import os\nimport environ\n\n\nenv = environ.Env()\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = env(\n 'DJANGO_SECRET_KEY',\n default='local-secret-key'\n )\n\nDEBUG = env.bool('DJANGO_DEBUG', False)\n\nALLOWED_HOSTS = ['legacydapp.com', 'www.legacydapp.com', ]\nif DEBUG:\n ALLOWED_HOSTS += ['localhost', 'local', '127.0.0.1', ]\n\n\nINSTALLED_APPS = [\n 'djangocms_admin_style',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\nTHIRD_PARTY_APPS = [\n 'phonenumber_field',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.google',\n 'django_extensions',\n # 'django_eth_events',\n 'django_eth',\n]\n\nINTERNAL_APPS = [\n 'capsules',\n 'support',\n 'users',\n 'plugins',\n]\n\nINSTALLED_APPS += THIRD_PARTY_APPS + INTERNAL_APPS\n\nif DEBUG:\n INSTALLED_APPS += [\n 'debug_toolbar',\n ]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'legacy.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'legacy.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': env.db('DATABASE', default='postgres:///legacy'),\n}\n\nif DEBUG:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'mydatabase',\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Paris'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nPHONENUMBER_DB_FORMAT = 'E164'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static_root')\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'static'),\n]\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'upload')\n\n# User model and (all)Auth Stuff\n\nAUTH_USER_MODEL = 'users.User'\nLOGIN_REDIRECT_URL = 'home'\nLOGOUT_REDIRECT_URL = 'home'\n\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\nACCOUNT_AUTHENTICATION_METHOD = (\"email\")\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USERNAME_REQUIRED = False\n\nSITE_ID = 1\n\nAPPEND_SLASH = False\n\nADMINS = [('errors', 'errors@legacydapp.com'), ('legacy admins', 'admins@chainimpact.io')]\nMANAGERS = ADMINS\n\nDEFAULT_FROM_EMAIL = 'your.email@example.com'\n\n# email + sendgrid\nEMAIL_BACKEND = \"sendgrid_backend.SendgridBackend\"\n\n# EMAIL_HOST = env('EMAIL_HOST', default='smtp.sendgrid.net')\n# EMAIL_PORT = env('EMAIL_PORT', default='587')\n# EMIAL_HOST_USER = env('EMIAL_HOST_USER', default='sendgrid_username')\n# EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', default='sendgrid_password')\n# EMAIL_USE_TLS = True\nSENDGRID_API_KEY = env('SENDGRID_API_KEY', default='sendgrid_api_key')\nSENDGRID_SANDBOX_MODE_IN_DEBUG = False\n\n# BEHOLD THE HOLY LOGS\nLOGGING_FILE = '{}'.format(\n '/tmp/legacy.log' if DEBUG\n else '/var/log/legacy/legacy.log')\n\nLOGGING = {\n # OTHER OPTIONS\n \"version\": 1,\n \"disable_existing_loggers\": False,\n 'filters': {\n # OTHER FILTERS\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n # OTHER HANDLERS\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': LOGGING_FILE,\n },\n 'mail_admins': {\n 'level': 'ERROR',\n # 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n 'include_html': True\n },\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n # OTHER LOGGERS\n 'management_commands': {\n 'handlers': ['console', 'mail_admins', ],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['file', 'console', ],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\n\n# ETHEREUM_NODE_URL = os.environ['HTTP://127.0.0.1:7545']\nETHEREUM_NODE_URL = 'HTTP://127.0.0.1:7545'\n# ETHEREUM_MAX_WORKERS = os.environ['ETHEREUM_MAX_WORKERS']\nETHEREUM_MAX_WORKERS = 3\n","sub_path":"legacy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"446939167","text":"import time\nfrom typing import List, Tuple, Union\n\nfrom selenium.common.exceptions import (\n NoSuchElementException,\n TimeoutException,\n)\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom utilites.decorators import add_logger\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n''' \nThe BasePage class is a base class that all the Pages that will inherit from this\nBasePage class. Some most common method is written here that we're gonna need \nall over the project/Pages to work with.\n\nwritten_by: jiaul_islam\n'''\n\n\nclass BasePage(object):\n \"\"\"\n All the Page will inherit this class BasePage Class to use the common\n functionality.\n \"\"\"\n\n def __init__(self, driver: WebDriver, timeout: int = 30) -> None:\n self._driver: WebDriver = driver\n self.timeout = timeout\n\n def find_element(self, *locator) -> WebElement:\n \"\"\" Find the element by the help of the locator that user shared \"\"\"\n try:\n return self._driver.find_element(*locator)\n except TypeError as error:\n print(f\"Unexpected Type Error [base.py || Line - 37]\"\n f\"\\n{repr(error)}\")\n except AttributeError as error:\n print(f\"Unexpected Attribute Error in find_element() ||\\n{repr(error)}\")\n except NoSuchElementException:\n pass\n\n def find_elements(self, *locator) -> Union[List[WebElement], None]:\n \"\"\" Find the elements by the help of the locator that user shared \"\"\"\n try:\n return self._driver.find_elements(*locator)\n except TypeError as error:\n print(f\"Unexpected Value Error [base.py || Line - 47]\"\n f\"\\n{repr(error)}\")\n except AttributeError as error:\n print(f\"Unexpected Attribute Error in find_elements() ||\\n{repr(error)}\")\n except NoSuchElementException:\n pass\n\n def is_visible(self, xpath_locator) -> bool:\n \"\"\" If the element is found in the Page then return True else False \"\"\"\n try:\n _element = WebDriverWait(self._driver, self.timeout).until(\n ec.visibility_of_element_located(xpath_locator))\n except TimeoutException:\n pass\n except AttributeError as error:\n print(f\"Unexpected Attribute Error [base.py || Line - 60]\"\n f\"\\n{repr(error)}\")\n else:\n return bool(_element)\n\n @add_logger\n def click(self, element_locator_xpath) -> None:\n \"\"\" Click a web element by a locator shared by the user \"\"\"\n WebDriverWait(driver=self._driver,\n timeout=self.timeout,\n ignored_exceptions=None\n ).until(ec.visibility_of_element_located(element_locator_xpath)).click()\n\n @add_logger\n def write(self, xpath_locator: Tuple[By, str], text: str) -> None:\n \"\"\" Write the text in web element by a locator shared by the user \"\"\"\n WebDriverWait(self._driver, self.timeout).until(\n ec.visibility_of_element_located(xpath_locator)).send_keys(text)\n\n @add_logger\n def hover_over(self, xpath_locator: str) -> None:\n \"\"\" Hover over the element shared by the user locator \"\"\"\n _element: Union[WebElement, None] = WebDriverWait(self._driver, self.timeout).until(\n ec.visibility_of_element_located(xpath_locator))\n if _element is not None:\n ActionChains(self._driver).move_to_element(_element).perform()\n else:\n raise AttributeError\n\n @add_logger\n def switch_to_frame(self, xpath_locator) -> None:\n \"\"\" Switch to a frame by a frame locator \"\"\"\n _frame: Union[WebElement, None] = self._driver.find_element(*xpath_locator)\n self._driver.switch_to.frame(_frame)\n\n @add_logger\n def double_click(self, xpath_locator: Tuple[By, str]) -> None:\n \"\"\" Double click on a element by a locator \"\"\"\n _element: Union[WebElement, None] = WebDriverWait(self._driver, self.timeout, 2).until(\n ec.visibility_of_element_located(xpath_locator))\n ActionChains(self._driver).double_click(_element).perform()\n\n @add_logger\n def select_all(self, xpath_locator: Tuple[By, str]) -> None:\n \"\"\" Sends CTRL + A action to a page \"\"\"\n WebDriverWait(self._driver, self.timeout).until(\n ec.visibility_of_element_located(xpath_locator)).click()\n\n ActionChains(self._driver).key_down(\n Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL).perform()\n\n def get_text(self, xpath_locator: Tuple[By, str]) -> str:\n \"\"\" Get the text value of a web element shared by a user \"\"\"\n try:\n _val_of_elem: str = WebDriverWait(self._driver, self.timeout).until(\n ec.visibility_of_element_located(xpath_locator)).get_attribute(\"value\")\n except TimeoutException as error:\n print(f\"Unexpected Timeout Error [base.py || Line - 145]\"\n f\"\\n{repr(error)}\")\n else:\n return _val_of_elem\n\n @add_logger\n def handle_frame_alert(self, frame_locator: str, ok_btn_locator: str) -> None:\n \"\"\" Checks for expected frames and press OK button in the frame \"\"\"\n self.switch_to_frame(frame_locator)\n self.click(ok_btn_locator)\n self._driver.switch_to.default_content()\n\n @add_logger\n def back_to_home_page(self, xpath_locator: Tuple[By, str]) -> None:\n \"\"\" Return to the homepage \"\"\"\n self.click(xpath_locator)\n\n def wait_for_loading_icon_disappear(self, *locator: Tuple[By, str], _time: float = 1, _range: int = 600) -> None:\n \"\"\" Wait for loading_icon to vanish \"\"\"\n _counter = 1\n while _counter <= _range:\n _loading_icons: list = self._driver.find_elements(*locator)\n if not len(_loading_icons):\n break\n time.sleep(_time)\n _counter += 1\n","sub_path":"pages/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"78902074","text":"import sys\nimport time\nimport numpy as np\nimport os\nimport psutil\n\ndef run_job():\n '''\n Computes factors\n '''\n\n print('Started factors')\n k = 30000\n m,n = np.random.rand(k,k), np.random.rand(k,k)\n result = m * n\n print(result.shape)\n time.sleep(60)\n\n process = psutil.Process(os.getpid())\n print(process.memory_info().rss) # in bytes \n print('Finished factors')\n\n\nif __name__ == '__main__':\n run_job()","sub_path":"compute_factors.py","file_name":"compute_factors.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530734482","text":"import numpy as np\nfrom quant.stock.date import Date\nfrom quant.stock.stock import Stock\nfrom quant.project.multi_factor.alpha_model.exposure.alpha_factor import AlphaFactor\n\n\nclass AlphaHKInflow(AlphaFactor):\n\n \"\"\"\n 因子说明: 北向资金净流入(6月), 持股数变动*日均成交价\n \"\"\"\n\n def __init__(self):\n\n AlphaFactor.__init__(self)\n self.exposure_path = self.data_path\n self.raw_factor_name = 'alpha_raw_hk_inflow'\n\n def cal_factor_exposure(self, beg_date, end_date):\n\n \"\"\" 计算因子暴露 \"\"\"\n\n term = 120\n\n # read data\n beg_date = Date().change_to_str(beg_date)\n end_date = Date().change_to_str(end_date)\n share = Stock().read_factor_h5(\"HK2CHoldShare\")\n price = Stock().read_factor_h5(\"Price_Unadjust\")\n\n vol = price.mul(share) / 1000000\n vol = vol.T.dropna(how='all')\n vol = vol.loc[beg_date:end_date, :]\n vol = vol.fillna(0.0)\n\n vol_bias = vol.diff(periods=term)\n vol_bias = vol_bias.T\n vol_bias = vol_bias.replace(0, np.nan)\n\n # save data\n vol_bias = vol_bias.T.dropna(how='all').T\n self.save_alpha_factor_exposure(vol_bias, self.raw_factor_name)\n\nif __name__ == \"__main__\":\n\n from datetime import datetime\n beg_date = '20040101'\n end_date = datetime.today()\n\n self = AlphaHKInflow()\n self.cal_factor_exposure(beg_date, end_date)\n","sub_path":"project/multi_factor/alpha_model/exposure/alpha_factor_hkinflow.py","file_name":"alpha_factor_hkinflow.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"23643883","text":"\"\"\"\r\n357. Count Numbers with Unique Digits\r\n\"\"\"\r\n\r\nclass Solution:\r\n def countNumbersWithUniqueDigits(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: int\r\n \"\"\"\r\n if n == 0: return 1\r\n if n > 9: n = 9\r\n count, accum, tot = 9, 9, 10\r\n while n>1:\r\n count, accum, tot, n = count - 1, accum*count, tot + accum*count, n-1\r\n return tot ","sub_path":"Binary Search/357. Count Numbers with Unique Digits.py","file_name":"357. Count Numbers with Unique Digits.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"367987408","text":"__author__ = 'Tyraan'\nimport socketserver,time\nmyHost=''\nmyPort=50007\n\ndef now():\n return time.ctime(time.time())\n\nclass MyClientHandler(socketserver.BaseRequestHandler):\n def handle(self):\n print(self.client_address,now())\n time.sleep(5)\n while True:\n data = self.request.recv(1024)\n if not data:break\n reply = 'this is reply %s at %s'%(data,now())\n self.request.send(reply.encode())\n self.request.close()\n\nsocketserver.ThreadingTCPServer((myHost,myPort),MyClientHandler).serve_forever()\n","sub_path":"python/class-server.py","file_name":"class-server.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145802375","text":"'''\nFaça um programa para a leitura de duas notas parciais de um aluno.\nO programa deve calcular a média alcançada por aluno e apresentar:\n- A mensagem \"Aprovado\", se a média alcançada for maior ou igual a sete;\n- A mensagem \"Reprovado\", se a média for menor do que sete;\n- A mensagem \"Aprovado com Distinção\", se a média for igual a dez.\n'''\nimport sys\n\ndef myinput(texto):\n retorno = ''\n if sys.version_info.major == 2:\n retorno = raw_input(texto)\n elif sys.version_info.major == 3:\n retorno = input(texto)\n return retorno\n\nlista = []\nqtde = 2\ni = 1\nwhile i <= qtde:\n n = myinput('Digite a {}a. nota: ' . format(i))\n\n if n.isalpha():\n print('O texto \"{}\" da {}a. nota não é um número' . format(n, i))\n else:\n n = float(n)\n lista.append(n)\n i += 1\n\nmedia = sum(lista) / qtde\nprint('A média das {} notas é {}' . format(qtde, media))\n\nif media == 10:\n print('Aprovado com Distinção')\nelif media >= 7:\n print('Aprovado')\nelse:\n print('Reprovado')\n\n","sub_path":"Maratona Data Science Brazil/Semana#01 - Python/02-estruturas-de-decisao/exercicio-05.py","file_name":"exercicio-05.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"95196972","text":"from agent_dir.agent import Agent\nimport scipy\nimport numpy as np\nfrom agent_dir.util import conv2d, DNN, get_real_reward\nimport tensorflow as tf\nimport time\nimport argparse\ndef prepro(o,image_size=[80,80]):\n \"\"\"\n Call this function to preprocess RGB image to grayscale image if necessary\n This preprocessing code is from\n https://github.com/hiwonjoon/tf-a3c-gpu/blob/master/async_agent.py\n \n Input: \n RGB image: np.array\n RGB screen of game, shape: (210, 160, 3)\n Default return: np.array \n Grayscale image, shape: (80, 80, 1)\n \n \"\"\"\n y = 0.2126 * o[:, :, 0] + 0.7152 * o[:, :, 1] + 0.0722 * o[:, :, 2]\n y = y.astype(np.uint8)\n resized = scipy.misc.imresize(y, [80,80])\n resized = np.expand_dims(resized.astype(np.float32),axis=2)\n return np.expand_dims(resized.astype(np.float32),axis=0)\n\ndef prepro_v2(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n I = np.expand_dims(I.astype(np.float32),axis=2)\n return np.expand_dims(I.astype(np.float32),axis=0)\n\nclass Agent_PG(Agent):\n def __init__(self, env, args):\n \"\"\"\n Initialize every things you need here.\n For example: building your model\n \"\"\"\n self.epoch = args.epoch\n self.lr = args.lr\n self.N = args.N\n self.load = args.load\n self.name = args.name\n self.gl_a = args.gl_a\n self.argmax = args.argmax\n self.save_path = args.save_path\n super(Agent_PG,self).__init__(env)\n self.build_model()\n \n if args.test_pg or args.load:\n #you can load your model here\n print('loading trained model')\n #self.env.reset()\n #for _ in range(2100):\n #_, r, done, info = self.env.step(0)\n #print(r,done,info)\n #if(done):\n #self.env.reset()\n \n ##################\n # YOUR CODE HERE #\n ##################\n\n\n def init_game_setting(self):\n \"\"\"\n\n Testing function will call this function at the begining of new game\n Put anything you want to initialize if necessary\n\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n pass\n\n def build_model(self):\n ### Action Making\n self.diff = tf.placeholder(tf.float32,[None,6400])\n \n def build_CNN(frame,reuse=False):\n output_dim = 2 \n with tf.variable_scope(\"CNN\") as scope:\n if (reuse):\n scope.reuse_variables()\n C1 = conv2d(frame,output_dim,3,3,2,2,name=\"conv1\")\n print(C1.shape)\n flatten = tf.reshape(C1,[-1,3200])\n print(flatten.shape)\n return flatten \n ###diff_feature = build_CNN(diff)\n\n #frames = tf.concat([feature1,feature2],1)\n D1 = DNN(self.diff,[6400,100],0)\n D2 = DNN(D1,[100,2],1)\n self.action_prob = D2\n self.action = tf.nn.softmax(self.action_prob)\n \n ### Calculating loss \n self.actions = tf.placeholder(tf.int32,[None])\n self.w_loss = tf.losses.log_loss(labels=tf.one_hot(self.actions,2),\n predictions=self.action)\n #loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.action_prob,labels=tf.one_hot(self.actions,2))\n #w_loss = tf.multiply(loss,self.weights)\n self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.w_loss)\n \n \n def pretrain(self):\n \"\"\"\n Implement your training algorithm here\n \"\"\"\n with tf.Session() as sess:\n saver = tf.train.Saver(max_to_keep=1)\n if not self.load:\n sess.run(tf.global_variables_initializer())\n print(\"New Model: \", self.name)\n else:\n latest_checkpoint = tf.train.latest_checkpoint(self.save_path)\n saver.restore(sess, latest_checkpoint)\n print(\"Saver Loaded: \" + latest_checkpoint)\n PATH = self.save_path + self.name + \"max.ckpt\"\n labels = np.load(\"./labels.npy\")\n diff = []\n action = []\n for tag in labels:\n diff.append(prepro_v2(tag[1]-tag[0]).ravel())\n action.append(tag[2])\n print(action)\n for epo in range(self.epoch):\n if (epo % 1 == 0):\n ckpt_path = saver.save(sess, PATH, global_step=epo)\n print(\"Model saved: \",ckpt_path)\n \n _, loss = sess.run([self.opt,self.w_loss], \n feed_dict = {\n self.diff : diff,\n self.actions : action\n })\n \n print(\"epo loss\",epo,loss)\n \n ##################\n # YOUR CODE HERE #\n ##################\n pass\n\n def make_action(self, observation, test=True):\n \"\"\"\n Return predicted action of your agent\n\n Input:\n observation: np.array\n current RGB screen of game, shape: (210, 160, 3)\n\n Return:\n action: int\n the predicted action from trained model\n \"\"\"\n ##################\n # YOUR CODE HERE #\n ##################\n return self.env.get_random_action()\n \ndef parse():\n parser = argparse.ArgumentParser(description=\"MLDS 2018 HW4\")\n parser.add_argument('--env_name', default=None, help='environment name')\n parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')\n parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')\n parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')\n parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')\n try:\n from argument import add_arguments\n parser = add_arguments(parser)\n except:\n pass\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse()\n env = None\n agent = Agent_PG(env, args)\n agent.pretrain()\n","sub_path":"HW4/4-1/pretrain_agent.py","file_name":"pretrain_agent.py","file_ext":"py","file_size_in_byte":6425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"592064861","text":"### THE FUNCTION SEARCH_NODE WILL SEARCH THROUGH THE LIST OF NODES FROM THE USER'S\n### QUERY (WHICH CAN SUPPORT REGULAR EXPRESSIONS) AND MATCH A SINGLE OR MULTIPLE NODE\n\n### EXTRACT_NODES FUNCTION WILL EXTRACT ALL THE NODES FROM THE DATABASE OF NODE_OBJECTS SO \n### THAT IT CAN RUN THE SEARCH AGAINST THE LIST OF NODES\n\n### SEARCH_TEMPLATE FUNCTION WILL TAKE THE SEARCH RESULTS FROM THE LIST OF NODES AND RUN IT \n### AGAINST NODE_OBJECT TO DETERMINE THE PLATFORM AND TYPE AND COMPARE WITH THE NODE_TEMPLATE\n### DATABASE TO MATCH. ONCE MATCHED, IT WILL CHECK TO VERIFY AN EXISTING TEMPLATE IS AVAILABLE\n\n### SEARCH_POLICY FUNCTION WILL TAKE THE SEARCH RESULTS FROM THE LIST OF NODES AND RUN IT \n### AGAINST NODE_OBJECT TO DETERMINE THE PLATFORM, OS AND TYPE AND COMPARE WITH THE NODE_POLICY\n### DATABASE TO MATCH. IF A NODE IS NOT DEEMED AS A FIREWALL, IT WILL \n\n### NODE_ELEMENT FUNCTION APPENDS THE POSITION INDEX OF THE MATCH RESULTS (MATCH_NODE) AGAINST \n### THE OVERALL NODE_OBJECTS. THIS FUNCTION IS NEEDED TO BE CALLED ONLY WHEN SEARCH_TEMPLATE\n### FUNCTION IS NOT USED\n\nimport re\nimport initialize\nfrom processdb import process_json\nfrom get_property import get_template_directory\nfrom get_property import get_policy_directory\n\ndef search_node(argument_node,node_object):\n\n\tnode_list = extract_nodes(node_object)\n\tquery = re.compile(argument_node)\n\tsearch_result = list(filter(query.match,node_list))\n\n\treturn search_result\n\ndef extract_nodes(node_object):\n\n\tnode_list = []\n\tindex = 0\n\n\tfor node in node_object:\n\t\thostname = node_object[index]['hostname']\n\t\tnode_list.append(hostname)\n\t\tindex = index + 1\n\n\treturn node_list\t\n\ndef search_template(template_list,match_node,node_template,node_object,auditcreeper):\n\n\tsearch_result = []\n\tindex = 0\n\telement = 0\n\tfor node in match_node:\n\t\tfor node_obj in node_object:\n\t\t\tif(node == node_obj['hostname']):\n\n\t\t\t\t### INDEX GETS THE POSITION IN THE LIST AND APPENDS IT TO THE GLOBAL VARIABLE ELEMENT\n\t\t\t\tindex = node_object.index(node_obj)\n\t\t\t\tinitialize.element.append(index)\n\n\t\t\t\t### THIS SECTION WILL PULL OUT ALL THE TEMPLATES BELONGING TO THE SPECIFIC PLATFORM, OS AND TYPE OF DEVICE FROM THE TEMPLATE DATABASE\n\t\t\t\tfor node_temp in node_template:\n\t\t\t\t\tif(node_obj['platform'] == node_temp['platform'] and node_obj['opersys'] == node_temp['opersys'] and node_obj['type'] == node_temp['type']):\n\n#\t\t\t\t\t\tprint(\"NODE_TEMP: {}\".format(node_temp['templates']))\n\t\t\t\t\t\tif(auditcreeper):\n\t\t\t\t\t\t\ttemplate_node_list = []\n#\t\t\t\t\t\t\tprint(\"THIS IS NODE_TEMP['TEMPLATE'] for NODE {}\".format(node) + \": {}\".format(node_temp['templates'],))\n\t\t\t\t\t\t\tfor template_dir_name in node_temp['templates']:\n\t\t\t\t\t\t\t\ttemplate_name = template_dir_name.split('/')[-1]\n\t\t\t\t\t\t\t\ttemplate_node_list.append(template_name)\n\t\t\t\t\t\t\ttemplate_list.append(template_node_list)\n#\t\t\t\t\t\t\tprint(\"THIS IS THE TEMPLATE_NODE_LIST FOR HOST {} : {}\".format(node,template_node_list))\n#\t\t\t\t\t\t\tprint(\"THIS IS THE TEMPLATE_LIST IN SEARCH.PY : {}\".format(template_list))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t### THIS CALLS THE DIRECTORY MODULE WHICH WILL RETURN THE CORRECT DIRECTORY PATH BASED ON DEVICE PLATFORM, OS AND TYPE\n\t\t\t\t\t\t\tdirectory = get_template_directory(node_obj['platform'],node_obj['opersys'],node_obj['type'])\n\t\t\t\t\t\t\tfile = directory + template_list[element]\n\t\t\t\t\t\t\tif(file in node_temp['templates']):\n\t\t\t\t\t\t\t\tsearch_result.append(\"MATCH\")\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(\"[!] [NO ASSOCIATING TEMPLATE {}\".format(template_list[element]) + \" FOR NODE {}]\".format(node))\n\t\t\t\t\t\t\t\tsearch_result.append(\"NO MATCH\")\n\t\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\t\n\t\t\telse:\n\t\t\t\tcontinue\t\n\n#\tprint(\"TEMPLATE_LIST IN SEARCH.PY: {}\".format(template_list))\n\treturn search_result \n\ndef search_policy(policy_list,match_node,node_policy,node_object,auditcreeper):\n\n\tsearch_result = []\n\tindex = 0\n\tpolicy_index = 0\n\telement = 0\n\tfor node in match_node:\n\t\tfor node_obj in node_object:\n\t\t\tif(node == node_obj['hostname']):\n\n\t\t\t\t### THIS SECTION WILL PULL OUT ALL THE TEMPLATES BELONGING TO THE SPECIFIC PLATFORM, OS AND TYPE OF DEVICE FROM THE TEMPLATE DATABASE\n\t\t\t\tfor node_pol in node_policy:\n\t\t\t\t\tif(node == node_pol['hostname']):\n\n\t\t\t\t\t\t### INDEX GETS THE POSITION IN THE LIST AND APPENDS IT TO THE GLOBAL VARIABLE ELEMENT\n\t\t\t\t\t\tindex = node_object.index(node_obj)\n\t\t\t\t\t\tinitialize.element.append(index)\n\t\t\t\t\t\tpolicy_index = node_policy.index(node_pol)\n\t\t\t\t\t\tinitialize.element_policy.append(policy_index)\n\t\t\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\t\t\tprint(\"INDEX: {}\".format(initialize.element))\n#\t\t\t\t\t\tprint(\"POLICY_INDEX: {}\".format(initialize.element_policy))\n\n\t\t\t\t\t\tif(auditcreeper):\n\t\t\t\t\t\t\tpolicy_node_list = []\n\t\t\t\t\t\t\tfor policy_dir_name in node_pol['policy']:\n\t\t\t\t\t\t\t\tpolicy_name = policy_dir_name.split('/')[-1]\n\t\t\t\t\t\t\t\tpolicy_node_list.append(policy_name)\n\t\t\t\t\t\t\tpolicy_list.append(policy_node_list)\n\t\t\t\t\t\t\tsearch_result.append(\"MATCH\")\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t### THIS CALLS THE DIRECTORY MODULE WHICH WILL RETURN THE CORRECT DIRECTORY PATH BASED ON DEVICE PLATFORM, OS AND TYPE\n\t\t\t\t\t\t\tdirectory = get_policy_directory(node_pol['platform'],node_obj['opersys'],node_obj['type'])\n\t\t\t\t\t\t\tfile = directory + policy_list[element]\n\t\t\t\t\t\t\tif(file in node_pol['policy']):\n\t\t\t\t\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\t\t\t\t\tprint(\"NODE: {} NODE_POL['HOSTNAME']: {}\".format(node,node_pol['hostname']))\n\t\t\t\t\t\t\t\tsearch_result.append(\"MATCH\")\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(\"[!] [NO ASSOCIATING POLICY {}\".format(policy_list[element]) + \" FOR NODE {}]\".format(node))\n\t\t\t\t\t\t\t\tsearch_result.append(\"NO MATCH\")\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\t\n\t\t\telse:\n\t\t\t\tcontinue\t\n\n#\tprint(\"POLICY_LIST IN SEARCH.PY: {}\".format(policy_list))\n\treturn search_result \n\ndef node_element(match_node,node_object):\n\n\tindex = 0\n\n\tfor node in match_node:\n\t\tfor node_obj in node_object:\n\t\t\tif(node in node_obj['hostname']):\n\t\t\t\tindex = node_object.index(node_obj)\n\t\t\t\tinitialize.element.append(index)\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"299393653","text":"# -*- coding:utf-8 -*-\r\n# @Time : 2019-09-18 20:11\r\n# @Author : Richardo Mu\r\n# @FILE : q_13.PY\r\n# @Software : PyCharm\r\n\"\"\"\r\n题目:打印出所有的“水仙花数”,所谓“水仙花数”是指一个三位数,其各位数字立方和等于该数\r\n   本身。例如:153是一个“水仙花数”,因为153=1的三次方+5的三次方+3的三次方。\r\n\"\"\"\r\nclass Solution(object):\r\n def shui(self):\r\n for i in range(100,999):\r\n h = int(i/100)\r\n m = int(i/10%10)\r\n l = i%10\r\n if i == h**3 + m**3 + l**3:\r\n print(i)\r\na = Solution()\r\na.shui()","sub_path":"q_13.py","file_name":"q_13.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"59924688","text":"#!/usr/bin/python3\n\nimport rospy\n\n\ndef canvas_counter(path: str) -> bool:\n \"\"\"\n\n :param path: Path to file with numbers of canvases\n :return: True, if there are no more canvases. False, if there are a few canvases.\n \"\"\"\n try:\n f = open(path, \"r+\")\n number = int(f.read())\n f.seek(0)\n f.truncate()\n rospy.loginfo(f\"Previous number of canvases: {number}\")\n except Exception as e:\n rospy.loginfo(\"can't open the file!\")\n rospy.loginfo(e)\n exit()\n number -= 1\n rospy.loginfo(f\"Current number of canvases: {number}\")\n if number == 0:\n f.write(\"3\")\n f.close()\n rospy.loginfo(f\"Need to order canvases.\")\n return True\n else:\n f.write(str(number))\n f.close()\n rospy.loginfo(f\"Can continue drawing.\")\n return False\n","sub_path":"src/canvas_counter.py","file_name":"canvas_counter.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"516428830","text":"\"\"\" THE CPU Context contain all of the flags registers and pointers \"\"\"\n\nclass VMContext:\n def __init__(self, memory_size=4000000):\n self.IP = 0x0000 # Instruction pointer\n self.SP = 0x00 # Stack Pointer\n self.BP = 0x0000 # Base pointer\n\n self.stack = []\n self.memory = [None] * memory_size\n self.globals = [None] * 255\n self.code =[]\n\n\n def push(self,value):\n self.stack.append(value)\n self.SP+=1\n\n def pop(self):\n val = self.stack.pop()\n self.SP-=1\n return val\n\n\n\n","sub_path":"App/VM/VMContext.py","file_name":"VMContext.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"234270213","text":"from datetime import datetime, timedelta\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pydotplus as pydot\r\nimport seaborn as sns\r\nimport statsmodels.api as sm\r\nfrom sklearn import mixture\r\n\r\nfrom A2_add_variable import Convert_Angle\r\nfrom A3_select_data import DataRange2\r\nfrom reliability.Distributions import (Gamma_Distribution,\r\n Lognormal_Distribution,\r\n Weibull_Distribution)\r\nfrom reliability.Fitters import (Fit_Gamma_2P, Fit_Gamma_3P, Fit_Lognormal_2P,\r\n Fit_Weibull_2P, Fit_Weibull_3P,\r\n Fit_Weibull_Mixture)\r\nfrom X9_read_listed import m_day, mon, mon_name\r\n\r\nd_name = ['N','N-E-N','N-E','N-E-E',\r\n 'E','S-E-E','S-E','S-E-S',\r\n 'S','S-W-S','S-W','S-W-W',\r\n 'W','N-W-W','N-W','N-W-N']\r\n#----------------------------------------------------------------------------------------------------------------#\r\n#Wind Speed\r\ndef STAT_WS(data):\r\n x = data['WS95'].copy()\r\n table = []\r\n table.append([x.count(),x.mean(),x.std(),\r\n x.min(),x.quantile(0.25),\r\n x.quantile(0.5),x.quantile(0.75),\r\n x.max(),x.skew(),x.kurt()])\r\n df_table = pd.DataFrame(table)\r\n df_table.columns = ['count','mean','std','min','Q1','Q2','Q3','max','skew','kurt']\r\n return df_table\r\n#----------------------------------------------------------------------------------------------------------------#\r\n#Wind Direction\r\ndef STAT_WD(data):\r\n x = data['WD95'].copy()\r\n theta_p = x*np.pi/180\r\n S = (np.sin(theta_p)).sum()\r\n C = (np.cos(theta_p)).sum()\r\n S2 = (np.sin(2*theta_p)).sum()\r\n C2 = (np.cos(2*theta_p)).sum()\r\n\r\n R = np.sqrt((S**2)+(C**2))\r\n count = len(theta_p)\r\n R_bar = R/count\r\n Var = 1-R_bar\r\n std = (-2*np.log(Var))**0.5\r\n \r\n nu1 = np.arctan2(S,C)\r\n nu2 = np.arctan2(S2,C2)\r\n rho2 = (np.cos(2*(nu1-theta_p))).mean()\r\n \r\n dispersion = (1-rho2)/(2*R_bar**2)\r\n skewness = rho2*np.sin(nu2-(2*nu1))/((1-R_bar)**1.5)\r\n kurtosis = (rho2*np.cos(nu2-(2*nu1))-R_bar**4)/((1-R_bar)**2)\r\n mean = nu1*180/np.pi\r\n table = []\r\n table.append([count,mean,R_bar,std,dispersion,skewness,kurtosis])\r\n df_table = pd.DataFrame(table)\r\n df_table.columns = ['count','mean','R_bar','std','dispersion','skewness','kurtosis']\r\n return df_table\r\n#----------------------------------------------------------------------------------------------------------------#\r\n#Year\r\ndef Stats_Year(data,year,variable ='WS95'): \r\n data01 = data[['DateTime','{}'.format(variable)]].copy()\r\n df = data01.dropna(how='any')\r\n logicY = (df['DateTime'].apply(lambda x:x.year) ==year)\r\n dataY = df[logicY].copy()\r\n dataY.reset_index(inplace=True,drop=True)\r\n print(\"Select year={}, {}\".format(year,variable))\r\n if variable == 'WS95':\r\n table = STAT_WS(dataY)\r\n elif variable== 'WD95':\r\n table = STAT_WD(dataY)\r\n table['Year'] = year\r\n return table\r\n\r\n#Month\r\ndef Stats_Month(data,year,month,variable='WS95'):\r\n data01 = data[['DateTime','{}'.format(variable)]].copy()\r\n df = data01.dropna(how='any')\r\n logicY = (df['DateTime'].apply(lambda x:x.year) ==year)\r\n dataY = df[logicY].copy()\r\n dataY.reset_index(inplace=True,drop=True)\r\n logicM = (dataY['DateTime'].apply(lambda x:x.month) ==month)\r\n dataM = dataY[logicM].copy()\r\n dataM.reset_index(inplace=True,drop=True)\r\n print(\"Select {}-{}\".format(year,month))\r\n if variable == 'WS95':\r\n table = STAT_WS(dataM)\r\n elif variable== 'WD95':\r\n table = STAT_WD(dataM)\r\n table['Year'] = year\r\n table['Month']=month\r\n return table\r\n# Monthly\r\ndef Stats_12month(data,year,variable='WS95'):\r\n df_mon = pd.DataFrame()\r\n for i in range(12):\r\n table = Stats_Month(data,year,i+1,variable)\r\n df_mon= pd.concat([df_mon,table],axis=0)\r\n return df_mon\r\n\r\n# Season\r\ndef Stats_Season(data,year,season,variable='WS95'):\r\n data01 = data[['DateTime','{}'.format(variable)]].copy()\r\n #data01['Year'] = data['DateTime'].apply(lambda x: x.year)\r\n data01['Month'] = data['DateTime'].apply(lambda x: x.month)\r\n seasons = [(month%12 + 3)//3 for month in range(1, 13)]\r\n month_to_season = dict(zip(range(1,13), seasons))\r\n data01['Season']= data01['Month'].apply(lambda x:month_to_season[x])\r\n df = data01.dropna(how='any')\r\n logicY = (df['DateTime'].apply(lambda x:x.year) ==year)\r\n dataY = df[logicY].copy()\r\n dataY.reset_index(inplace=True,drop=True)\r\n # Season\r\n logicS = (dataY['Season'] == season)\r\n dataS = dataY[logicS].copy()\r\n dataS.reset_index(inplace=True,drop=True)\r\n print(\"Select {}-{}\".format(year,season))\r\n if variable == 'WS95':\r\n table = STAT_WS(dataS)\r\n elif variable== 'WD95':\r\n table = STAT_WD(dataS)\r\n table['Year'] = year\r\n table['Season']=season\r\n Season_dict = {1:'Winter',2:'Spring',3:'Summer',4:'Autumn'}\r\n table['Seasons']=table['Season'].apply(lambda x:Season_dict[x])\r\n return table\r\n\r\n\r\n# Seasonly\r\ndef Stats_4seasons(data,year,variable='WS95'):\r\n df_sea = pd.DataFrame()\r\n for i in range(4):\r\n table = Stats_Season(data,year,i+1,variable)\r\n df_sea= pd.concat([df_sea,table],axis=0)\r\n return df_sea\r\n#----------------------------------------------------------------------------------------------------------------#\r\n# bubble_plot\r\ndef Stat_Direction_Probability(data,year):\r\n data = Convert_Angle(data,95,-11.25,348.75,'h')\r\n d_name = ['N','N-E-N','N-E','N-E-E',\r\n 'E','S-E-E','S-E','S-E-S',\r\n 'S','S-W-S','S-W','S-W-W',\r\n 'W','N-W-W','N-W','N-W-N']\r\n #Year\r\n logicY = (data[\"DateTime\"].apply(lambda x: x.year))==year\r\n df_Y = data[logicY].copy()\r\n \r\n wd_d = pd.DataFrame()\r\n wd_p = pd.DataFrame()\r\n wd_d['Direction'] = d_name\r\n for i in range(12):\r\n #month\r\n \r\n logicM = (df_Y[\"DateTime\"].apply(lambda x: x.month))==i+1\r\n df_M = df_Y[logicM].copy()\r\n wd = df_M['WD95h']\r\n a,b = np.histogram(wd,bins = np.arange(-11.25,348.76,22.5))\r\n wd_d['{}'.format(mon_name[i])] = a\r\n\r\n N = wd.count()\r\n print(N)\r\n wd_p['{}'.format(mon_name[i])] = wd_d['{}'.format(mon_name[i])] /N*100\r\n\r\n mp_tab = []\r\n for i in range(12):\r\n a_new = wd_d.sort_values(by=['{}'.format(mon_name[i])],ascending=False)\r\n a_new.reset_index(inplace=True,drop=True)\r\n Mp1 = a_new['Direction'].iloc[0]\r\n Mp2 = a_new['Direction'].iloc[1]\r\n Mp3 = a_new['Direction'].iloc[2]\r\n mp_tab.append([Mp1,Mp2,Mp3])\r\n mp_tab = np.array(mp_tab)\r\n return wd_d,wd_p,mp_tab\r\n\r\ndef GetVariable(data):\r\n V_mean = data['WS95'].mean()\r\n S = data['sin95'].sum()\r\n C = data['cos95'].sum()\r\n R = np.sqrt(S**2+C**2)\r\n R_bar = R/len(data)\r\n D_mean = np.arctan2(S,C)\r\n wd = data['WD95h'].copy()\r\n a,b = np.histogram(wd,bins = np.arange(-11.25,348.76,22.5))\r\n a_norm = np.round(a/a.sum()*100,2)\r\n P_North = a_norm[0]+a_norm[1]+a_norm[-1]\r\n P_South = a_norm[8]+a_norm[9]+a_norm[10]\r\n dataS = ([V_mean,D_mean,R_bar,P_North,P_South])\r\n return dataS\r\n\r\ndef Stat_Variable(data,moving,duration):\r\n date0 = \"2017-01-01 00:00\"\r\n start_date = datetime.strptime(date0, '%Y-%m-%d %H:%M')\r\n S_var =[]\r\n S_date=[]\r\n N =int(np.floor(len(data)/1440))\r\n steps = int(np.floor((N-duration)/moving) +1)\r\n for i in range(steps):\r\n #print(start_date)\r\n dataA = DataRange2(data,start_date,duration)\r\n dataAS = dataA[['DateTime','WS95','WD95h','sin95','cos95']].copy()\r\n Stat_var = GetVariable(dataAS)\r\n S_var.append(Stat_var)\r\n S_date.append(start_date)\r\n start_date = start_date+timedelta(days=moving)\r\n Stat = np.array(S_var)\r\n Stat =pd.DataFrame(S_var)\r\n Stat.columns = ['V_mean','D_mean','R_bar','P_North','P_South']\r\n Stat['Date'] = S_date\r\n return Stat\r\n\r\n# Histogram BIW ------------------------------------------------------------------------------------------------------------------\r\n\r\ndef HistogramPLOT(data,month,year):\r\n #Initiate\r\n Situation = []\r\n mon = ['January','Febuary','March','April','May','June','July','August','September','October','November','December']\r\n data01 = data[['DateTime','WS95']].copy()\r\n data01.dropna(how='any',inplace=True)\r\n logicY = (data01[\"DateTime\"].apply(lambda x: x.year)==(year))\r\n data01 = data01[logicY].copy()\r\n fig = plt.figure(figsize=(20,32), facecolor='w', edgecolor='r')\r\n #Plotting 12 graph\r\n xvals = np.linspace(0,30,1000)\r\n for i in range(month):\r\n ax = plt.subplot2grid((4,3),(int(np.floor(i/3)),int(i%3)))\r\n logic = (data01[\"DateTime\"].apply(lambda x: x.month))==(i+1)\r\n ws = data01['WS95'][logic]\r\n ws = ws+0.0001\r\n failures = []\r\n censored = []\r\n threshold = 30\r\n for item in ws:\r\n if item>threshold:\r\n censored.append(threshold)\r\n else:\r\n failures.append(item)\r\n xvals = np.linspace(0,30,1000)\r\n if (np.sum(logic)!=0):\r\n ax.hist(ws,bins=30,density=True)\r\n hist,edge = np.histogram(np.array(ws),bins=1000,range=(0,30) ,density=True)\r\n ax.set_ylim(0,0.18)\r\n ax.set_xlim(0,30)\r\n ax.set_xticks([0,5,10,15,20,25,30])\r\n ax.tick_params(axis=\"x\", labelsize=30)\r\n ax.tick_params(axis=\"y\", labelsize=26)\r\n ax.set_title('{}'.format(mon[i]),fontweight='bold',size=30)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\ndef Histogram_Season(data,year,season):\r\n #Initiate\r\n Seasons= {1:'Winter',2:'Spring',3:'Summer',4:'Autumn',}\r\n logicY = (data[\"DateTime\"].apply(lambda x: x.year)==(year))\r\n data01 = data[logicY].copy() \r\n fig, (ax) = plt.subplots(1, 1,figsize=(10, 10), gridspec_kw = {'wspace':0, 'hspace':0})\r\n #Plotting 12 graph\r\n logic = (data01[\"Season\"]==(season))\r\n ws = data01['WS95'][logic]\r\n if (np.sum(logic)!=0):\r\n ax.hist(ws,bins=30,density=True)\r\n ax.set_ylim(0,0.14)\r\n ax.set_xlim(0,30)\r\n ax.set_xticks([0,5,10,15,20,25,30])\r\n ax.tick_params(axis=\"x\", labelsize=30)\r\n ax.tick_params(axis=\"y\", labelsize=26)\r\n ax.set_title('{}'.format(Seasons[season]),fontweight='bold',size=30)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\ndef HistogramPLOT_wbm(data,month,year):\r\n #Initiate\r\n Situation = []\r\n mon = ['January','Febuary','March','April','May','June','July','August','September','October','November','December']\r\n data01 = data[['DateTime','WS95']].copy()\r\n data01.dropna(how='any',inplace=True)\r\n logicY = (data01[\"DateTime\"].apply(lambda x: x.year)==(year))\r\n data01 = data01[logicY].copy()\r\n fig = plt.figure(figsize=(20,32), facecolor='w', edgecolor='r')\r\n #Plotting 12 graph\r\n xvals = np.linspace(0,30,1000)\r\n for i in range(month):\r\n ax = plt.subplot2grid((4,3),(int(np.floor(i/3)),int(i%3)))\r\n logic = (data01[\"DateTime\"].apply(lambda x: x.month))==(i+1)\r\n ws = data01['WS95'][logic]\r\n ws = ws+0.0001\r\n failures = []\r\n censored = []\r\n threshold = 30\r\n for item in ws:\r\n if item>threshold:\r\n censored.append(threshold)\r\n else:\r\n failures.append(item)\r\n xvals = np.linspace(0,30,1000)\r\n if (np.sum(logic)!=0):\r\n ax.hist(ws,bins=30,density=True)\r\n hist,edge = np.histogram(np.array(ws),bins=1000,range=(0,30) ,density=True)\r\n wbm = Fit_Weibull_Mixture(failures=failures,right_censored=censored,show_plot=False,print_results=False)\r\n part1_pdf = Weibull_Distribution(alpha=wbm.alpha_1,beta=wbm.beta_1).PDF(xvals=xvals,show_plot=False)\r\n part2_pdf = Weibull_Distribution(alpha=wbm.alpha_2,beta=wbm.beta_2).PDF(xvals=xvals,show_plot=False)\r\n Mixture_PDF = part1_pdf*wbm.proportion_1+part2_pdf*wbm.proportion_2\r\n ax.plot(xvals,Mixture_PDF,label='Weibull_Mixture')\r\n ax.legend()\r\n ax.set_ylim(0,0.18)\r\n ax.set_xlim(0,30)\r\n ax.set_xticks([0,5,10,15,20,25,30])\r\n ax.tick_params(axis=\"x\", labelsize=30)\r\n ax.tick_params(axis=\"y\", labelsize=26)\r\n ax.set_title('{}'.format(mon[i]),fontweight='bold',size=30)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\ndef HistogramPLOT_all(data,month,year):\r\n #Initiate\r\n Situation = []\r\n mon = ['January','Febuary','March','April','May','June','July','August','September','October','November','December']\r\n #Get just Full day data\r\n logicF = (data[\"isFULL\"].apply(lambda x: x)==(1))\r\n data01 = data[logicF].copy()\r\n data01.fillna(method='ffill',inplace=True)\r\n \r\n logicY = (data01[\"DateTime\"].apply(lambda x: x.year)==(year))\r\n data01 = data01[logicY].copy()\r\n\r\n fig = plt.figure(figsize=(24,18), dpi= 80, facecolor='w', edgecolor='r')\r\n #Plotting 12 graph\r\n xvals = np.linspace(0,30,1000)\r\n for i in range(month):\r\n ax = plt.subplot2grid((4,3),(int(np.floor(i/3)),int(i%3)))\r\n logic = (data01[\"DateTime\"].apply(lambda x: x.month))==(i+1)\r\n ws = data01['WS95'][logic]\r\n ws = ws+0.0001\r\n failures = []\r\n censored = []\r\n threshold = 30\r\n for item in ws:\r\n if item>threshold:\r\n censored.append(threshold)\r\n else:\r\n failures.append(item)\r\n xvals = np.linspace(0,30,1000)\r\n print(ws.shape)\r\n if (np.sum(logic)!=0):\r\n ax.hist(ws,bins=30,normed=True)\r\n hist,edge = np.histogram(np.array(ws),bins=1000,range=(0,30) ,normed=True)\r\n wb2 = Fit_Weibull_2P(failures=failures,show_probability_plot=False,print_results=False)\r\n wb3 = Fit_Weibull_3P(failures=failures,show_probability_plot=False,print_results=False)\r\n gm2 = Fit_Gamma_2P(failures=failures,show_probability_plot=False,print_results=False)\r\n gm3 = Fit_Gamma_3P(failures=failures,show_probability_plot=False,print_results=False)\r\n ln2 = Fit_Lognormal_2P(failures=failures,show_probability_plot=False,print_results=False)\r\n wbm = Fit_Weibull_Mixture(failures=failures,right_censored=censored,show_plot=False,print_results=False)\r\n \r\n wb2_pdf = Weibull_Distribution(alpha=wb2.alpha, beta=wb2.beta).PDF(xvals=xvals, show_plot=True,label='Weibull_2P' )\r\n wb3_pdf = Weibull_Distribution(alpha=wb3.alpha, beta=wb3.beta,gamma=wb3.gamma).PDF(xvals=xvals, show_plot=True,label='Weibull_3P')\r\n gm2_pdf = Gamma_Distribution(alpha=gm2.alpha, beta=gm2.beta).PDF(xvals=xvals, show_plot=True,label='Gamma_2P' )\r\n gm3_pdf = Gamma_Distribution(alpha=gm3.alpha, beta=gm3.beta, gamma=gm3.gamma).PDF(xvals=xvals, show_plot=True,label='Gamma_3P')\r\n ln2_pdf = Lognormal_Distribution(mu=ln2.mu, sigma=ln2.sigma).PDF(xvals=xvals, show_plot=True,label='Lognormal_2P' )\r\n \r\n part1_pdf = Weibull_Distribution(alpha=wbm.alpha_1,beta=wbm.beta_1).PDF(xvals=xvals,show_plot=False)\r\n part2_pdf = Weibull_Distribution(alpha=wbm.alpha_2,beta=wbm.beta_2).PDF(xvals=xvals,show_plot=False)\r\n Mixture_PDF = part1_pdf*wbm.proportion_1+part2_pdf*wbm.proportion_2\r\n ax.plot(xvals,Mixture_PDF,label='Weibull_Mixture')\r\n ax.legend()\r\n ax.set_ylim(0,0.16)\r\n ax.set_xlim(0,30)\r\n ax.set_xticks([0,5,10,15,20,25,30])\r\n ax.tick_params(axis=\"x\", labelsize=20)\r\n ax.tick_params(axis=\"y\", labelsize=20)\r\n ax.set_title('{}'.format(mon[i]),fontweight='bold',size=20)\r\n plt.tight_layout()\r\n plt.show()\r\n","sub_path":"00_Function/D1_stats_value.py","file_name":"D1_stats_value.py","file_ext":"py","file_size_in_byte":15811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"417922049","text":"import tasker\nimport time\nimport socket\n\n\nclass Worker(\n tasker.worker.Worker,\n):\n name = 'test_worker'\n config = {\n 'encoder': {\n 'compressor': 'dummy',\n 'serializer': 'pickle',\n },\n 'monitoring': {\n 'host_name': socket.gethostname(),\n 'stats_server': {\n 'host': 'localhost',\n 'port': 9999,\n }\n },\n 'connector': {\n 'type': 'redis',\n 'params': {\n 'host': 'localhost',\n 'port': 6379,\n 'password': 'e082ebf6c7fff3997c4bb1cb64d6bdecd0351fa270402d98d35acceef07c6b97',\n 'database': 0,\n },\n },\n 'timeouts': {\n 'soft_timeout': 3.0,\n 'hard_timeout': 35.0,\n 'critical_timeout': 0.0,\n },\n 'executor': {\n 'type': 'serial',\n },\n 'profiler': {\n 'enabled': True,\n 'num_of_slowest_methods_to_log': 50,\n },\n 'max_tasks_per_run': 1000000,\n 'tasks_per_transaction': 25000,\n 'max_retries': 3,\n 'report_completion': False,\n }\n\n def init(\n self,\n ):\n pass\n\n def work(\n self,\n run_type,\n ):\n if run_type == 'start':\n self.logger.error('start : {}'.format(time.time()))\n elif run_type == 'end':\n self.logger.error('end : {}'.format(time.time()))\n\n\ndef main():\n worker = Worker()\n worker.init_worker()\n\n worker.apply_async_one(\n run_type='start',\n )\n\n before_push = time.time()\n for i in range(1):\n for i in range(5):\n tasks = []\n for j in range(20000):\n task_obj = worker.craft_task(\n run_type='',\n )\n tasks.append(task_obj)\n worker.apply_async_many(\n tasks=tasks,\n )\n after_push = time.time()\n print('pushing time: {}'.format(after_push - before_push))\n\n worker.apply_async_one(\n run_type='end',\n )\n\n supervisor = tasker.supervisor.Supervisor(\n worker_class=Worker,\n concurrent_workers=4,\n )\n supervisor.start()\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print(e)\n print('killed')\n","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"417642343","text":"import data\n\ndef HoursCalc(value):\n return int((value[0] * pow(2, 16) + value[1]) / 3600)\n \ndef CounterCalc(value):\n return int((value[0] * pow(2, 64)) + (value[1] * pow(2, 32)) + (value[2] * pow(2 ,16)) + value[3])\n \ndef StatesCalc(value, language):\n states_list = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n index = 0\n \n for i in range(len(data.states)):\n state = data.states[i]\n for a in range(len(state)):\n if value[i] & pow(2, a):\n states_list[index] = state[a][language]\n \n index += 1\n \n return states_list, index\n \ndef AlarmsCalc(value, language):\n alarms_list = [ \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\" ]\n index = 0\n \n for i in range(len(data.alarms)):\n alarm = data.alarms[i]\n for a in range(len(alarm)):\n if value[i] & pow(2, a):\n alarms_list[index] = alarm[a][language]\n \n index += 1\n \n return alarms_list, index\n","sub_path":"lambda/fn.py","file_name":"fn.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"246457708","text":"\"\"\"\nProgram Name: Assignment #38 Filtering Value\nProgrammer: Hyun Wook Kim\nDate: 2018.07.04\nDescription:\n숫자 리스트를 입력 받고 공백글자로 나누어 저장한 다음\n이중에서 짝수만 들어있는 새로운 배열을 만들어 출력하는\n프로그램을 작성하라.\n\"\"\"\n\nif __name__ == '__main__':\n _input = input(\"Enter a list of numbers, separated by spaces: \").split(\" \")\n out_list = list()\n\n for i in range(len(_input)):\n if int(_input[i]) % 2 == 0:\n out_list.append(int(_input[i]))\n\n print(out_list)\n","sub_path":"Taco101_Python/Assignment/assignment38.py","file_name":"assignment38.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"600120684","text":"import datetime\n\n__author__ = 'Judge'\n\nfrom contextlib import contextmanager\n\nclass cursor:\n def execute(self, x):\n print(x)\n\n@contextmanager\ndef transaction_cursor():\n cur = cursor()\n try:\n cur.execute('begin')\n yield cur\n cur.execute('commit')\n except:\n cur.execute('rollback')\n raise\n\nwith transaction_cursor() as c:\n # всё под with будет завёрнуто в транзакцию\n c.execute('...')\n c.execute('...')\n c.execute('...')\n\n\nx = datetime.datetime.today().strftime(\"%Y-%m-%d %H-%M-%S\")\nprint(x)","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"123421386","text":"import numpy as np\nimport pylab as pl\ndef make_c_coor(boxsize,nnn):\n dsx = boxsize/nnn\n\n xx01 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn)+0.5*dsx\n xx02 = np.linspace(-boxsize/2.0,boxsize/2.0,nnn)+0.5*dsx\n xi2,xi1 = np.meshgrid(xx01,xx02)\n return xi1,xi2\n#--------------------------------------------------------------------\ndef lens_equation_sie(x1,x2,lpar):\n xc1 = lpar[0] #x coordinate of the center of lens (in units of Einstein radius).\n xc2 = lpar[1] #y coordinate of the center of lens (in units of Einstein radius).\n q = lpar[2] #Ellipticity of lens.\n rc = lpar[3] #Core size of lens (in units of Einstein radius).\n re = lpar[4] #Einstein radius of lens.\n pha = lpar[5] #Orintation of lens.\n\n phirad = np.deg2rad(pha)\n cosa = np.cos(phirad)\n sina = np.sin(phirad)\n\n xt1 = (x1-xc1)*cosa+(x2-xc2)*sina\n xt2 = (x2-xc2)*cosa-(x1-xc1)*sina\n\n phi = np.sqrt(xt2*xt2+xt1*q*xt1*q+rc*rc)\n sq = np.sqrt(1.0-q*q)\n pd1 = phi+rc/q\n pd2 = phi+rc*q\n fx1 = sq*xt1/pd1\n fx2 = sq*xt2/pd2\n qs = np.sqrt(q)\n\n a1 = qs/sq*np.arctan(fx1)\n a2 = qs/sq*np.arctanh(fx2)\n\n xt11 = cosa\n xt22 = cosa\n xt12 = sina\n xt21 =-sina\n\n fx11 = xt11/pd1-xt1*(xt1*q*q*xt11+xt2*xt21)/(phi*pd1*pd1)\n fx22 = xt22/pd2-xt2*(xt1*q*q*xt12+xt2*xt22)/(phi*pd2*pd2)\n fx12 = xt12/pd1-xt1*(xt1*q*q*xt12+xt2*xt22)/(phi*pd1*pd1)\n fx21 = xt21/pd2-xt2*(xt1*q*q*xt11+xt2*xt21)/(phi*pd2*pd2)\n\n a11 = qs/(1.0+fx1*fx1)*fx11\n a22 = qs/(1.0-fx2*fx2)*fx22\n a12 = qs/(1.0+fx1*fx1)*fx12\n a21 = qs/(1.0-fx2*fx2)*fx21\n\n rea11 = (a11*cosa-a21*sina)*re\n rea22 = (a22*cosa+a12*sina)*re\n rea12 = (a12*cosa-a22*sina)*re\n rea21 = (a21*cosa+a11*sina)*re\n\n y11 = 1.0-rea11\n y22 = 1.0-rea22\n y12 = 0.0-rea12\n y21 = 0.0-rea21\n\n jacobian = y11*y22-y12*y21\n mu = 1.0/jacobian\n\n res1 = (a1*cosa-a2*sina)*re\n res2 = (a2*cosa+a1*sina)*re\n return res1,res2,mu\n#--------------------------------------------------------------------\n\ndef main(npl):\n\n boxsize = 6.0 # in the units of Einstein Radius\n nnn = 64\n #nnn = 4096\n\n xi1,xi2 = make_c_coor(boxsize,nnn)\n #----------------------------------------------------------------------\n infile = './cat_lenses.dat'\n lpar = np.loadtxt(infile,unpack=True)\n lpar = lpar.T\n\n for i in xrange(npl):\n ag1,ag2,mua = lens_equation_sie(xi1,xi2,lpar[i])\n ag1.astype(\"float32\").tofile(\"ag1_\"+str(i)+\".bin\")\n ag2.astype(\"float32\").tofile(\"ag2_\"+str(i)+\".bin\")\n pl.figure()\n pl.contourf(np.sqrt(ag1*ag1+ag2*ag2))\n pl.colorbar()\n\n return 0\n#------------------------------------------------------------------------------\nif __name__ == '__main__':\n main(5)\n pl.show()\n","sub_path":"inputs/ipt_rec_mlpl.py","file_name":"ipt_rec_mlpl.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"565668080","text":"#!env python3\n# -*- coding: utf-8 -*-\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\nwebpage_response = requests.get('https://google.com/')\nwebpage = webpage_response.content\nsoup = BeautifulSoup(webpage, \"html.parser\")\n\n# Object Types\ncontent = '

Click to learn more about each turtle

'\nsoup = BeautifulSoup(content, \"html.parser\")\nprint(soup.p)\nprint(soup.p.string)\n\ncontent = '''\n
    \n
  • 1 cup flour
  • \n
  • 1/2 cup sugar
  • \n
  • 2 tbsp oil
  • \n
  • 1/2 tsp baking soda
  • \n
  • ? cup chocolate chips
  • \n
  • 1/2 tsp vanilla
  • \n
  • 2 tbsp milk
  • \n
\n'''\ncontent = content.rstrip()\nsoup = BeautifulSoup(content, \"html.parser\")\nfor child in soup.ul.children:\n print(child)\n\nfor parent in soup.ul.parents:\n print(parent)\n\n# Find All\nprint(soup.find_all(\"li\"))\nprint(soup.find_all(re.compile('u?li?')))\nprint(soup.find_all(['ul' , 'li']))\nprint(soup.find_all(attrs={'class':'banner'}))\nprint(soup.find_all(attrs={'class':'banner', 'id':'abc'}))\nprint(soup.find_all(lambda tag: tag.has_attr('class') and tag.attrs['class'][0] == 'banner'))\n\n# CSS selecter\nprint(soup.select('.banner'))\n\n# reading text\ncontent = '''\n

Search Results for: Funfetti

\n'''\nsoup = BeautifulSoup(content, \"html.parser\")\nprint(soup.get_text())\nprint(soup.get_text('|'))\n","sub_path":"python/web/beautiful_soup/beautiful_soup.py","file_name":"beautiful_soup.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"562020179","text":"# coding=utf-8\n\nimport ctypes\nclass dy_array:\n def __init__(self, list=[]):\n self.capacity = 100\n self.num = 0\n self.a = self.new_array(self.capacity)\n if len(list) >= self.capacity:\n self._resize(len(list) * 2)\n for i in range(len(list)):\n self.a[i] = list[i]\n self.num = len(list)\n\n def size(self) -> int:\n return self.num\n\n def to_list(self) -> list:\n l = []\n for i in range(self.num):\n l.append(self.a[i])\n return l\n\n def from_list(self, l: list):\n if len(l) >= self.capacity:\n self._resize(len(l))\n if len(l) > 0:\n for i in range(len(l)):\n self.a[i] = l[i]\n self.num = len(l)\n return self\n\n def append_to_tail(self, obj):\n if self.num == self.capacity:\n self._resize(self.capacity * 2)\n self.a[self.num] = obj\n self.num += 1\n\n def append_to_head(self, value):\n if self.num == self.capacity:\n self._resize(self.capacity * 2)\n for i in range(self.num, 0, -1):\n self.a[i] = self.a[i - 1]\n self.a[0] = value\n self.num += 1\n\n def map(self, f):\n for i in range(self.num):\n self.a[i] = f(self.a[i])\n\n def reduce(self, f, initial_state):\n element = 0\n state = initial_state\n for i in range(self.num):\n state = f(state, self.a[element])\n element += 1\n return state\n\n def find(self, value):\n for i in self.to_list():\n if i is value:\n return True\n return False\n\n def filter(self, value):\n lst_filter = []\n for i in self.a[:self.num]:\n if i is not value:\n lst_filter.append(i)\n return lst_filter\n\n def empty(self):\n return None\n\n def remove(self, value):\n for i in range(self.num):\n if self.a[i] == value:\n for j in range(i, self.num - 1):\n self.a[j] = self.a[j + 1]\n self.a[self.num - 1] = None\n self.num -= 1\n return\n raise ValueError('not found')\n\n def combine(self, dy1, dy2):\n if dy1 is None:\n if dy2.num >= self.capacity:\n self._resize(dy2.num * 2)\n for i in range(dy2.num):\n self.a[i] = dy2.a[i]\n self.num += 1\n elif dy2 is None:\n if dy1.num >= self.capacity:\n self._resize(dy1.num * 2)\n for i in range(dy1.num):\n self.a[i] = dy1.a[i]\n self.num += 1\n else:\n if (dy1.num + dy2.num) >= self.capacity:\n self._resize((dy1.num + dy2.num) * 2)\n for i in range(dy1.num):\n self.a[i] = dy1.a[i]\n self.num += 1\n for j in range(dy1.num, dy1.num + dy2.num):\n self.a[j] = dy2.a[j - dy1.num]\n self.num += 1\n\n def new_array(self, n):\n return (n * ctypes.py_object)()\n\n def _resize(self, n: int):\n N = self.new_array(n)\n for k in range(self.num):\n N[k] = self.a[k]\n self.a = N\n self.capacity = n\n\n def is_empty(self):\n return self.num == 0\n\n def __iter__(self):\n self.k = 0\n return self\n\n def __next__(self):\n if self.k < self.num:\n m = self.a[self.k]\n self.k += 1\n return m\n else:\n raise StopIteration","sub_path":"Lab1/src/mutable.py","file_name":"mutable.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"55470400","text":"#!/usr/bin/python\n#-------------------------------------------------------------------------------\n# Name: mail_gpsdo_log.py\n# Purpose:\n#\n# Author: paulv\n#\n# Created: 13-03-2020\n# Copyright: (c) paulv 2020\n# Licence: \n#-------------------------------------------------------------------------------\n\nimport os\nimport re\nimport subprocess\nimport sys, traceback\nimport email\nfrom time import time, sleep, gmtime, strftime, localtime\nimport zipfile\n\nVERSION=\"1.1\" # added support for a RAM disk\nDEBUG = False\n\n# here is where we store the files\nlog_file = \"/mnt/ramdisk/gpsdo.log\"\nzip_file = \"/mnt/ramdisk/gpsdo.zip\"\n# target email account\nmail_address = \"pw.versteeg@gmail.com\"\n\n\ndef mail_err_log():\n '''\n Just before mid-night has been found by cron, this function emails the daily\n logs.\n '''\n try:\n print(\"zip the file\")\n os.chdir('/mnt/ramdisk')\n zipfile.ZipFile('gpsdo.zip', mode='w').write('gpsdo.log', compress_type=zipfile.ZIP_DEFLATED)\n except Exception as e:\n print(\"*** Exception {}\".format(e))\n\n try:\n if os.path.isfile(log_file):\n # if the file is there...\n # send it out as an attachement\n cmd = 'mpack -s \"Bliley GPSDO log file\" {} {}'.format(zip_file, mail_address)\n print(\"mail_gpsdo_log cmd : {}\".format(cmd))\n subprocess.call([cmd], shell=True)\n\n except Exception as e:\n print(\"error\", \"Unexpected Exception in mail_gpsdo_log() {0}\".format(e))\n return\n\n\n\ndef main():\n print(\"Mail Bliley GPSDO logs: log Version {}\".format(VERSION))\n mail_err_log()\n\nif __name__ == '__main__':\n main()\n","sub_path":"mail_gpsdo_log.py","file_name":"mail_gpsdo_log.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326563727","text":"from random import randint\n\ndef search_elements(collection):\n p = 1\n result = []\n for value in collection:\n if value % 6 == 0 and value % 10 == 8:\n result.append(value)\n p *= value\n\n result.insert(0, p)\n\n return result\n\n\nlst = [randint(0, 100) for i in range(15)]\nprint(lst)\n\nres = search_elements(lst)\nif len(res) == 1:\n print('Found nothing')\nelse:\n print(' * '.join(str(x) for x in res[1:]) + ' = ' + str(res[0]))","sub_path":"lesson_09/examples/line_search.py","file_name":"line_search.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"401701892","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n name = \"Yerassyl\"\n out = \"

Hello Yerassyl

\"\n return render_template(\"home.html\")\n\n\n@app.route(\"/category\")\ndef category():\n out = \"

Categories

\"\n return out\n\n\n@app.route(\"/contacts\")\ndef contacts_page():\n return \"Contacts\"\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"lesson23Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"102544455","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpRequest, HttpResponseRedirect\nfrom groupsapp.models import Group, Available, Task\nfrom django.urls import reverse\nfrom groupsapp.forms import GroupAddForm, TaskEditForm, GroupEditForm\n\n# Create your views here.\n\n\ndef add_user(request: HttpRequest, id: int, key: str):\n if request.user.is_authenticated:\n group = get_object_or_404(Group, pk=id)\n\n certain_group = Available.objects.filter(groups__id=id, user=request.user)\n\n if certain_group:\n pass\n else:\n if key == str(group.uuid):\n new_available = Available(user=request.user, groups=group)\n new_available.quantity += 1\n new_available.save()\n\n return HttpResponseRedirect(reverse('groups:index'))\n\n\ndef remove_user(request: HttpRequest, id: int, key: str):\n if request.user.is_authenticated:\n able = get_object_or_404(Available, groups=id, user=request.user)\n if str(able.groups.uuid) == key:\n able.delete()\n\n return HttpResponseRedirect(reverse('groups:index'))\n\n\ndef index(request: HttpRequest):\n if request.user.is_authenticated:\n group = Available.objects.filter(user=request.user)\n if not group:\n group = False\n else:\n group = False\n\n context = {\n 'group': group\n }\n\n return render(request, 'groupsapp/index.html', context)\n\n\ndef group(request: HttpRequest, id: int, key: str):\n # available_groups = get_object_or_404(Available, groups=id, user=request.user.id)\n uuid = True\n all_users = False\n if request.user.is_authenticated:\n available_groups = Available.objects.filter(groups=id, user=request.user.id)\n all_users = Available.objects.filter(groups=id)\n if not available_groups:\n group = False\n else:\n group = get_object_or_404(Group, pk=id)\n if str(group.uuid) != key:\n group = False\n uuid = False\n else:\n uuid = group.uuid\n else:\n group = False\n available_groups = False\n\n context = {\n 'group': group,\n 'available': available_groups,\n 'uuid': uuid,\n 'all_users': all_users,\n }\n\n return render(request, 'groupsapp/group.html', context)\n\n\ndef new_group(request: HttpRequest):\n if request.method == 'POST':\n group_form = GroupAddForm(request.POST, request.FILES)\n if group_form.is_valid():\n data = group_form.cleaned_data.get(\"uuid\")\n group_form.save()\n able = get_object_or_404(Group, uuid=data)\n new_available = Available(user=request.user, groups=able)\n new_available.quantity += 1\n new_available.save()\n return HttpResponseRedirect(reverse('groups:index'))\n else:\n group_form = GroupAddForm()\n\n context = {\n 'group_form': group_form,\n }\n\n return render(request, 'groupsapp/new_group.html', context)\n\n\ndef task(request: HttpRequest, id: int, key: str):\n authenticated = False\n user_is_valid = False\n group_is_valid = False\n task_is_valid = False\n task_is = None\n if request.user.is_authenticated:\n authenticated = True\n available_group = Group.objects.filter(uuid=key)\n if available_group:\n group_is_valid = True\n available_group = get_object_or_404(Group, uuid=key)\n available_user = Available.objects.filter(user=request.user.id, groups=available_group.id)\n if available_user:\n user_is_valid = True\n task_is = Task.objects.filter(id=id, group=available_group.id)\n if task_is:\n task_is_valid = True\n task_is = get_object_or_404(Task, id=id, group=available_group.id)\n\n context = {\n 'task': task_is,\n 'task_is_valid': task_is_valid,\n 'authenticated': authenticated,\n 'group_is_valid': group_is_valid,\n 'user_is_valid': user_is_valid,\n }\n\n return render(request, 'groupsapp/task.html', context)\n\n\ndef all_tasks(request: HttpRequest, key: str):\n authenticated = False\n key_is_valid = False\n user_is_valid = False\n tasks = None\n group_is = None\n if request.user.is_authenticated:\n authenticated = True\n group_is = Group.objects.filter(uuid=key)\n if group_is:\n key_is_valid = True\n group_is = get_object_or_404(Group, uuid=key)\n available = Available.objects.filter(user=request.user.id, groups=group_is.id)\n if available:\n user_is_valid = True\n tasks = Task.objects.filter(group=group_is.id)\n\n context = {\n 'authenticated': authenticated,\n 'key_is_valid': key_is_valid,\n 'user_is_valid': user_is_valid,\n 'tasks': tasks,\n 'group_is': group_is,\n }\n\n return render(request, 'groupsapp/all_tasks.html', context)\n\n\ndef new_task(request: HttpRequest, id: int):\n task_form = None\n authenticated = False\n user_is_valid = False\n if request.user.is_authenticated:\n authenticated = True\n available = Available.objects.filter(user=request.user.id, groups=id)\n if available:\n user_is_valid = True\n if request.method == 'POST':\n task_form = TaskEditForm(request.POST, request.FILES)\n if task_form.is_valid():\n task_form.group = id\n task_form.save()\n return HttpResponseRedirect(reverse('groups:index'))\n else:\n task_form = TaskEditForm(initial={'is_done': False, 'group': id})\n\n context = {\n 'task_form': task_form,\n 'authenticated': authenticated,\n 'user_is_valid': user_is_valid,\n }\n\n return render(request, 'groupsapp/new_task.html', context)\n\n\ndef edit_group(request: HttpRequest, id: int, key: str):\n authenticated = False\n group_form = None\n key_is_valid = False\n user_is_valid = False\n uuid = None\n group_id = None\n if request.user.is_authenticated:\n authenticated = True\n available_user = Available.objects.filter(user=request.user.id, groups=id)\n if available_user:\n user_is_valid = True\n available_key = Group.objects.filter(uuid=key)\n if available_key:\n uuid = available_key[0].uuid\n group_id = available_key[0].id\n key_is_valid = True\n if request.method == 'POST':\n group_form = GroupEditForm(request.POST, instance=available_key[0])\n\n if group_form.is_valid():\n group_form.save()\n return HttpResponseRedirect(reverse('groups:index'))\n else:\n group_form = GroupEditForm(instance=available_key[0])\n\n context = {\n 'group_form': group_form,\n 'authenticated': authenticated,\n 'key_is_valid': key_is_valid,\n 'user_is_valid': user_is_valid,\n 'uuid': uuid,\n 'group_id': group_id,\n }\n\n return render(request, 'groupsapp/edit_group.html', context)\n","sub_path":"groupsapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"422134602","text":"\"\"\"\nThe :py:mod:`spynnaker.pynn` package contains the frontend specifications\nand implementation for the PyNN High-level API\n(http://neuralensemble.org/trac/PyNN)\n\"\"\"\n\nimport inspect\nfrom ._version import __version__, __version_month__, __version_year__\n\n# utility functions\nfrom spynnaker.pyNN.utilities import conf\nfrom spynnaker.pyNN.utilities import utility_calls\nfrom spynnaker.pyNN.utilities.parameters_surrogate\\\n import PyNNParametersSurrogate\n\n# pynn centric classes\nfrom spynnaker.pyNN.spinnaker import Spinnaker\nfrom spynnaker.pyNN.spinnaker import executable_finder\nfrom spynnaker.pyNN import exceptions\nfrom spynnaker.pyNN.utilities.conf import config\nfrom spynnaker.pyNN.utilities.database.socket_address import SocketAddress\n\n# neural models\nfrom spynnaker.pyNN.models.neural_models.if_cond_exp \\\n import IFConductanceExponentialPopulation as IF_cond_exp\nfrom spynnaker.pyNN.models.neural_models.if_curr_dual_exp \\\n import IFCurrentDualExponentialPopulation as IF_curr_dual_exp\nfrom spynnaker.pyNN.models.neural_models.if_curr_exp \\\n import IFCurrentExponentialPopulation as IF_curr_exp\nfrom spynnaker.pyNN.models.neural_models.izk_curr_exp \\\n import IzhikevichCurrentExponentialPopulation as IZK_curr_exp\n\n# neural projections\nfrom spynnaker.pyNN.models.neural_projections\\\n .delay_afferent_partitionable_edge import DelayAfferentPartitionableEdge\nfrom spynnaker.pyNN.models.utility_models.delay_extension_vertex \\\n import DelayExtensionVertex\nfrom spynnaker.pyNN.models.neural_projections.delay_partitionable_edge \\\n import DelayPartitionableEdge\nfrom spynnaker.pyNN.models.neural_projections.delay_partitioned_edge \\\n import DelayPartitionedEdge\nfrom spynnaker.pyNN.models.neural_projections.projection_partitionable_edge \\\n import ProjectionPartitionableEdge\nfrom spynnaker.pyNN.models.neural_projections.projection_partitioned_edge \\\n import ProjectionPartitionedEdge\n\n# spike sources\nfrom spynnaker.pyNN.models.spike_source.spike_source_poisson\\\n import SpikeSourcePoisson\nfrom spynnaker.pyNN.models.spike_source.spike_source_array \\\n import SpikeSourceArray\nfrom spynnaker.pyNN.models.spike_source.spike_source_from_file \\\n import SpikeSourceFromFile\n\n# connections\nfrom spynnaker.pyNN.models.neural_projections.connectors.all_to_all_connector\\\n import AllToAllConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.\\\n fixed_number_pre_connector import FixedNumberPreConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.\\\n fixed_probability_connector import FixedProbabilityConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.from_list_connector \\\n import FromListConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.from_file_connector \\\n import FromFileConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.multapse_connector \\\n import MultapseConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.one_to_one_connector \\\n import OneToOneConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.\\\n distance_dependent_probability_connector import \\\n DistanceDependentProbabilityConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.\\\n fixed_number_post_connector import FixedNumberPostConnector\nfrom spynnaker.pyNN.models.neural_projections.connectors.small_world_connector\\\n import SmallWorldConnector\n\n# Mechanisms for synapse dynamics\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.\\\n synapse_dynamics import SynapseDynamics\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.stdp_mechanism \\\n import STDPMechanism\n\n# STDP weight dependences\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.dependences.\\\n additive_weight_dependence import AdditiveWeightDependence\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.dependences.\\\n multiplicative_weight_dependence import MultiplicativeWeightDependence\n\n# STDP timing dependences\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.dependences.\\\n pfister_spike_triplet_time_dependence import \\\n PfisterSpikeTripletTimeDependence as PfisterSpikeTripletRule\nfrom spynnaker.pyNN.models.neural_properties.synapse_dynamics.dependences.\\\n spike_pair_time_dependency import SpikePairTimeDependency as SpikePairRule\n\n# constraints\nfrom pacman.model.constraints.placer_constraints.\\\n placer_chip_and_core_constraint import PlacerChipAndCoreConstraint\nfrom pacman.model.constraints.partitioner_constraints.\\\n partitioner_maximum_size_constraint import PartitionerMaximumSizeConstraint\nfrom pacman.model.constraints.placer_constraints.\\\n placer_radial_placement_from_chip_constraint \\\n import PlacerRadialPlacementFromChipConstraint\n\n# note importing star is a bad thing to do.\nfrom pyNN.random import *\nfrom pyNN.space import *\n\n# traditional logger\nlogger = logging.getLogger(__name__)\n\n# global controller / spinnaker object that does everything\n_spinnaker = None\n\n# List of binary search paths\n_binary_search_paths = []\n\n\ndef register_binary_search_path(search_path):\n \"\"\"\n :param search_path:\n Registers an additional binary search path for\n for executables\n\n absolute search path for binaries\n \"\"\"\n executable_finder.add_path(search_path)\n\n\ndef end(stop_on_board=True):\n \"\"\"\n :param stop_on_board:\n Do any necessary cleaning up before exiting.\n\n Unregisters the controller\n \"\"\"\n global _spinnaker\n _spinnaker.stop(stop_on_board)\n _spinnaker = None\n\n\ndef get_spynnaker():\n \"\"\"helper method for other plugins to add stuff to the graph\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker\n\n\ndef num_processes():\n \"\"\"Return the number of MPI processes\n (not used for SpiNNaker, always returns 1)\n \"\"\"\n return 1\n\n\ndef rank():\n \"\"\"Return the MPI rank of the current node. (not used for SpiNNaker,\n always returns 0 - as this is the minimum rank suggesting the front node)\n \"\"\"\n return 0\n\n\ndef reset():\n \"\"\"Reset the time to zero, and start the clock.\n TO BE IMPLEMENTED\n \"\"\"\n pass\n\n\ndef run(run_time=None):\n \"\"\"Run the simulation for run_time ms.\n\n :param int run_time:\n simulation length (in ms)\n\n On run the following :py:class:`pacman103.core.control.Controller`\n functions are called:\n - :py:mod:`pacman103.core.control.Controller.map_model`\n - :py:mod:`pacman103.core.control.Controller.specify_output`\n - :py:mod:`pacman103.core.control.Controller.generate_output`\n - :py:mod:`pacman103.core.control.Controller.load_executables`\n - :py:mod:`pacman103.core.control.Controller.run`\n \"\"\"\n global _spinnaker\n _spinnaker.run(run_time)\n return None\n\n\ndef setup(timestep=0.1, min_delay=None, max_delay=None, machine=None,\n database_socket_addresses=None, **extra_params):\n \"\"\"\n Should be called at the very beginning of a script.\n extra_params contains any keyword arguments that are required by a given\n simulator but not by others.\n For simulation on SpiNNaker the following parameters are mandatory:\n\n :param `pacman103.lib.lib_machine` machine:\n A SpiNNaker machine used to run the simulation.\n\n\n The setup() call instantiates a\n :py:class:`pacman103.core.control.Controller`\n object which is used as a global variable throughout the whole process.\n\n It also creates an AppMonitor Object (a vertex with model-type AppMon),\n placing a mapping constraint on it so that it is on chip (0,0).\n This functionality may move elsewhere later.\n\n NB: timestep, min_delay and max_delay are required by the PyNN API but we\n ignore them because they have no bearing on the on-chip simulation code.\n :param timestep:\n :param min_delay:\n :param max_delay:\n :param machine:\n :param database_socket_addresses:\n :param extra_params:\n :return:\n \"\"\"\n global _spinnaker\n global _binary_search_paths\n\n logger.info(\n \"sPyNNaker (c) {} APT Group, University of Manchester\".format(\n __version_year__))\n logger.info(\n \"Release version {} - {} {}\".format(\n __version__, __version_month__, __version_year__))\n\n if len(extra_params) > 1:\n logger.warn(\"Extra params has been applied which we do not consider\")\n _spinnaker = Spinnaker(\n host_name=machine, timestep=timestep, min_delay=min_delay,\n max_delay=max_delay,\n database_socket_addresses=database_socket_addresses)\n # Return None, simply because the PyNN API says something must be returned\n return None\n\n\ndef set_number_of_neurons_per_core(neuron_type, max_permitted):\n \"\"\"\n Sets a ceiling on the number of neurons of a given type that can be placed\n on a single core.\n This information is stored in the model itself and is referenced\n during the partition stage of the mapper.\n Note that each neuron type has a default value for this parameter that will\n be used if no override is given.\n :param neuron_type:\n :param max_permitted:\n \"\"\"\n if not inspect.isclass(neuron_type):\n if neuron_type in globals():\n neuron_type = globals()[neuron_type]\n else:\n neuron_type = None\n if neuron_type is None:\n raise Exception(\"Unknown Vertex Type {}\"\n .format(neuron_type))\n\n if hasattr(neuron_type, \"set_model_max_atoms_per_core\"):\n neuron_type.set_model_max_atoms_per_core(max_permitted)\n else:\n raise Exception(\"{} is not a Vertex type\"\n .format(neuron_type))\n\n\n# noinspection PyPep8Naming\ndef Population(size, cellclass, cellparams, structure=None, label=None):\n \"\"\"\n\n :param size:\n :param cellclass:\n :param cellparams:\n :param structure:\n :param label:\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.create_population(size, cellclass, cellparams,\n structure, label)\n\n\n# noinspection PyPep8Naming\ndef Projection(presynaptic_population, postsynaptic_population,\n connector, source=None, target='excitatory',\n synapse_dynamics=None, label=None, rng=None):\n \"\"\"\n\n :param presynaptic_population:\n :param postsynaptic_population:\n :param connector:\n :param source:\n :param target:\n :param synapse_dynamics:\n :param label:\n :param rng:\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.create_projection(\n presynaptic_population, postsynaptic_population, connector, source,\n target, synapse_dynamics, label, rng)\n\n\ndef NativeRNG(seed_value):\n \"\"\"\n fixes the rnadom number generators seed\n :param seed_value:\n :return:\n \"\"\"\n numpy.random.seed(seed_value)\n\n\ndef get_current_time():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.get_current_time()\n","sub_path":"spynnaker/pyNN/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"582005530","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/will/.virtualenvs/datasight-backend/lib/python2.7/site-packages/dejavu/fingerprint.py\n# Compiled at: 2015-04-19 17:14:05\nimport numpy as np, matplotlib.mlab as mlab, matplotlib.pyplot as plt\nfrom scipy.ndimage.filters import maximum_filter\nfrom scipy.ndimage.morphology import generate_binary_structure, iterate_structure, binary_erosion\nimport hashlib\nfrom operator import itemgetter\nIDX_FREQ_I = 0\nIDX_TIME_J = 1\nDEFAULT_FS = 44100\nDEFAULT_WINDOW_SIZE = 4096\nDEFAULT_OVERLAP_RATIO = 0.5\nDEFAULT_FAN_VALUE = 15\nDEFAULT_AMP_MIN = 10\nPEAK_NEIGHBORHOOD_SIZE = 20\nMIN_HASH_TIME_DELTA = 0\nMAX_HASH_TIME_DELTA = 200\nPEAK_SORT = True\nFINGERPRINT_REDUCTION = 20\n\ndef fingerprint(channel_samples, Fs=DEFAULT_FS, wsize=DEFAULT_WINDOW_SIZE, wratio=DEFAULT_OVERLAP_RATIO, fan_value=DEFAULT_FAN_VALUE, amp_min=DEFAULT_AMP_MIN):\n \"\"\"\n FFT the channel, log transform output, find local maxima, then return\n locally sensitive hashes.\n \"\"\"\n arr2D = mlab.specgram(channel_samples, NFFT=wsize, Fs=Fs, window=mlab.window_hanning, noverlap=int(wsize * wratio))[0]\n arr2D = 10 * np.log10(arr2D)\n arr2D[arr2D == -np.inf] = 0\n local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)\n return generate_hashes(local_maxima, fan_value=fan_value)\n\n\ndef get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):\n struct = generate_binary_structure(2, 1)\n neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)\n local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D\n background = arr2D == 0\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n detected_peaks = local_max - eroded_background\n amps = arr2D[detected_peaks]\n j, i = np.where(detected_peaks)\n amps = amps.flatten()\n peaks = zip(i, j, amps)\n peaks_filtered = [ x for x in peaks if x[2] > amp_min ]\n frequency_idx = [ x[1] for x in peaks_filtered ]\n time_idx = [ x[0] for x in peaks_filtered ]\n if plot:\n fig, ax = plt.subplots()\n ax.imshow(arr2D)\n ax.scatter(time_idx, frequency_idx)\n ax.set_xlabel('Time')\n ax.set_ylabel('Frequency')\n ax.set_title('Spectrogram')\n plt.gca().invert_yaxis()\n plt.show()\n return zip(frequency_idx, time_idx)\n\n\ndef generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):\n \"\"\"\n Hash list structure:\n sha1_hash[0:20] time_offset\n [(e05b341a9b77a51fd26, 32), ... ]\n \"\"\"\n if PEAK_SORT:\n peaks.sort(key=itemgetter(1))\n for i in range(len(peaks)):\n for j in range(1, fan_value):\n if i + j < len(peaks):\n freq1 = peaks[i][IDX_FREQ_I]\n freq2 = peaks[(i + j)][IDX_FREQ_I]\n t1 = peaks[i][IDX_TIME_J]\n t2 = peaks[(i + j)][IDX_TIME_J]\n t_delta = t2 - t1\n if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:\n h = hashlib.sha1('%s|%s|%s' % (str(freq1), str(freq2), str(t_delta)))\n yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)","sub_path":"pycfiles/PyDejavu-0.1.3.macosx-10.9-intel.tar/fingerprint.py","file_name":"fingerprint.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"115413525","text":"import pandas as pd\n\ndef get_data_frames(data_name, root_directory, years):\n \"\"\"Returns a list of dataframe of the requested ``data_name``.\n The argument ``data_name`` takes the type of data. eg: ``data_name=\"enrolments\"`` or ``data_name=\"results\"``.\n The function navigates to the ``root_directory`` and navigates every sub directory named after year of the dataset.\n The subdirectories consist of year-wise directories which consists of the data as per the year.\n The ``years`` argument is a list of all the years whose subdirectories are named after in the root directory of the dataset.\n\n :param data_name: The name of the data to be imported (example: data_name=enrolments).\n :param root_directory: The path to data directory where all the csv files are present (example: root_directory=students_data).\n :param years: A list of years which is the subfolder inside the root_directory.\n\n :return: list of all the pandas dataframe of the data_name.\n\n :Example:\n\n >>> import studentpathway as sp\n >>> years = [2015, 2016, 2017, 2018]\n >>> result_data = sp.get_data_frames(\"results\", \"students_data\", years)\n \"\"\"\n\n data = []\n try:\n for i in range(len(years)):\n file_name = str(data_name) + str(years[i]) + \".csv\"\n path = root_directory + \"/\" + str(years[i]) + \"/\" + file_name\n data.append(pd.read_csv(path))\n return data\n except TypeError as e:\n print(e)\n except ValueError as e:\n print(e)\n except:\n print(\"Unexpected error!\")\n raise\n","sub_path":"studentpathway/dataprocessing/get_data_frames.py","file_name":"get_data_frames.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"500981137","text":"from airflow import DAG, settings\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom airflow.utils.trigger_rule import TriggerRule\nfrom datetime import timedelta\n\n\ndef py_op_callback():\n print('Airflow::test_dag::py_op')\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': days_ago(2),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\n\ndag = DAG(\n 'test_dag',\n default_args=default_args,\n description='dvc_mlflow_airflow',\n schedule_interval=timedelta(days=1),\n)\n\npy_op = PythonOperator(\n task_id='py_op',\n dag=dag,\n python_callable=py_op_callback,\n provide_context=False,\n retries=0\n)\n\npy_op\n","sub_path":"airflow/airflow/dags/test_dag.py","file_name":"test_dag.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"311573972","text":"import lul.models\nimport lul.page.handlers\n\n\nclass Main(lul.page.handlers.Base):\n def get(self):\n\n template_values = {\n \"GOOGLE_API_KEY\": self.google_api_key\n }\n \n self.render_template(\"web\", template_values)\n\n\nclass Full(lul.page.handlers.Base):\n def get(self):\n template_values = {\n \"GOOGLE_API_KEY\": self.google_api_key\n }\n \n self.render_template(\"web-full\", template_values)\n\n\nclass Locations(lul.page.handlers.Base):\n def get(self):\n \n locations = lul.models.PointOfInterest.query().order(\n -lul.models.PointOfInterest.updated_date\n )\n\n template_values = {\n \"locations_count\": locations.count(),\n \"locations\": locations\n }\n\n self.render_template(\"web-locations\", template_values)\n\n\nLEETON_MIN_LAT = -34.57432351042673\nLEETON_MAX_LAT = -34.53155583908121\nLEETON_MIN_LNG = 146.38411045074463\nLEETON_MAX_LNG = 146.42093181610107\n\n\nclass Guide(lul.page.handlers.Base):\n\n def generate_marker_string(self, pois, colour=\"red\", size=\"tiny\", precision=4):\n\n # fallback for no markers\n if pois.count() == 0:\n return \"\"\n\n marker_string = \"&markers=color:%s%%7Csize:%s\" % (colour, size)\n for poi in pois:\n\n # exclude if outside acceptable ranges\n if poi.location.latitude < LEETON_MIN_LAT or \\\n poi.location.latitude > LEETON_MAX_LAT or \\\n poi.location.longitude < LEETON_MIN_LNG or \\\n poi.location.longitude > LEETON_MAX_LNG:\n continue\n\n marker_string += \"%%7C%f,%f\" % (poi.location.latitude, poi.location.longitude)\n\n return marker_string\n\n def get(self):\n\n leeton_pois = lul.models.PointOfInterest.query()\n\n template_values = {\n \"GOOGLE_API_KEY\": self.google_api_key,\n \"CENTRE\": \"-34.553,146.402\",\n \"LEETON_MARKERS\": self.generate_marker_string(leeton_pois),\n \"ZOOM\": 14\n }\n\n self.render_template(\"guide\", template_values)\n\n\nclass LetsEncryptHandler(lul.page.handlers.Base):\n\n def get(self, challenge):\n self.response.headers['Content-Type'] = 'text/plain'\n responses = {\n '4x1UbfmifwnDcJh-kHBaJpSkkBvo5DHIDCeMDvKNy_Y': '4x1UbfmifwnDcJh-kHBaJpSkkBvo5DHIDCeMDvKNy_Y.rz4wOLBFKvEfTZUTA7lH2QjijEV26Sx6yGR7IgxAOwY'\n }\n self.response.write(responses.get(challenge, ''))\n\n","sub_path":"lightupleeton/lul/page/handlers/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"396806184","text":"\nfrom report import report_sxw\nfrom osv import orm\nfrom tools.translate import _\n\nclass subsistence_notify(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(subsistence_notify, self).__init__(cr, uid, name, context)\n self.localcontext.update({\n 'total':self.total_mission,\n })\n self.context = context\n\n def set_context(self, objects, data, ids, report_type=None):\n for obj in objects:\n if obj.state != \"approved\":\n raise orm.except_orm(_('Warning!'), _('You cannot print this report for not approved mission!'))\n if obj.mission_id.company_id or not obj.mission_id.currency:\n raise orm.except_orm(_('Warning!'), _('You can not print. This report available only for external missions !')) \n return super(subsistence_notify, self).set_context(objects, data, ids, report_type=report_type)\n\n def total_mission(self,miss_id):\n self.cr.execute('''SELECT sum(l.mission_amounts) as total\n FROM hr_employee_mission m, hr_employee_mission_line l\n WHERE m.id = %s AND l.emp_mission_id = m.id '''%(miss_id))\n return self.cr.dictfetchall()\n\nreport_sxw.report_sxw('report.Subsistence_notifi', 'hr.employee.mission', 'hr_mission/report/sub_and_enrch_notification.rml' ,parser=subsistence_notify ,header=True)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"v_7/GDS/common_shamil_v3/hr_mission/report/sub_and_enrch_notification.py","file_name":"sub_and_enrch_notification.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493936496","text":"import torch as t\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\n\ntorch.manual_seed(40)\n\nclass Network(nn.Module):\n\n def __init__(self):\n super(Network, self).__init__()\n\n self.mlp = nn.Sequential(\n nn.Linear(28, 28),\n nn.BatchNorm1d(28),\n nn.ReLU(),\n )\n self.dropout = nn.Dropout(p=0.4)\n self.linear = nn.Linear(28, 2)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n \n out = self.mlp(x)\n #out = self.dropout(out)\n out = self.linear(out)\n out = self.sigmoid(out)\n return out\n","sub_path":"05_samples/Cardiovascular_disease/utils/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"161980399","text":"# -*- coding: utf-8 -*-\n\nfrom openfisca_core.entities import build_entity\n\n# You can define here the entities you need in your legislation.\n\nHousehold = build_entity(\n key = \"household\",\n plural = \"households\",\n label = u'Household',\n roles = [\n {\n 'key': 'parent',\n 'plural': 'parents',\n 'label': u'Parents',\n 'max': 2,\n },\n {\n 'key': 'child',\n 'plural': 'child',\n 'label': u'Child',\n }\n ]\n )\n\n\nPerson = build_entity(\n key = \"person\",\n plural = \"persons\",\n label = u'Person',\n is_person = True,\n )\n\nentities = [Household, Person]\n","sub_path":"openfisca_country_template/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"227029622","text":"__author__ = 'debjit'\n'''modified esther's code'''\nimport argparse as ap\nfrom EM import EMModule\nimport numpy as np\nimport con_vec_utils as ctv\nimport utils as ut\nimport os\nif __name__ == \"__main__\":\n parser = ap.ArgumentParser(description='Singe EM iteration', usage='SingleEM_no_noise [-initialize] [-true_theta][-file] ')\n parser.add_argument(\"-file\", help=\".txt file containing the input text\", nargs='?')\n parser.add_argument('-type', help=\"Give percentage of noise\")\n parser.add_argument('-initialize', help=\"to initialize with pre-saved estimated labels from pre-trained model\", action=\"store_true\")\n parser.add_argument('-true_theta', help=\"to load pre-saved theta into EMModule\", action=\"store_true\")\n parser.add_argument('-tracking', help=\"verbose mode\", action=\"store_true\")\n args = parser.parse_args()\n#retrieve predictions & convert to vector\n estlab_path = args.file #\"/data/users/dpaul/Thesis/Practical/Data/Data_North_Clean/Output_trail_train/original_20000_out\"\n est_x_uni, est_y_uni, est_vocabulary, est_labels = ctv.load_conll(estlab_path, delim='\\t', est=True, y_column='c')\n est_labels = ['b-np', 'b-pp', 'i-np', 'b-vp', 'i-vp', 'b-sbar', 'o', 'b-adjp', 'b-advp', 'i-advp', 'i-adjp', 'i-sbar', 'i-pp', 'b-prt', 'b-lst', 'b-intj', 'i-intj', 'b-conjp', 'i-conjp', 'i-prt', 'b-ucp', 'i-ucp','i-lst','\\n']#est_labels #ut.get_labels() \n print(est_labels)\n est_y_num = ctv.uni_to_uni_num(est_y_uni, est_labels,'c')\n est_y_vector = ut.get_reverse_zt(est_y_num)\n# retrieve original old labels & convert to vector\n noisy_x_uni, noisy_y_uni, noisy_vocabulary, noisy_labels = ctv.load_conll(estlab_path, delim='\\t', est=True, y_column='z') \n noisy_labels=['b-np', 'b-pp', 'i-np', 'b-vp', 'i-vp', 'b-sbar', 'o', 'b-adjp', 'b-advp', 'i-advp', 'i-adjp', 'i-sbar', 'i-pp', 'b-prt', 'b-lst', 'b-intj', 'i-intj', 'b-conjp', 'i-conjp', 'i-prt', 'b-ucp', 'i-ucp','i-lst','\\n']\n print(noisy_labels)\n noisy_y_num = ctv.uni_to_uni_num(noisy_y_uni, noisy_labels,'z') \n noisy_y_vector = ut.get_reverse_zt(noisy_y_num) \n \n if args.tracking:\n print(\"tracking progress to vector\")\n print('nr instances should be', len(est_x_uni))\n print('nr labels should be', len(est_labels), est_labels)\n print('original estimated labels labelset was:', pre)\n if len(est_x_uni) != len(noisy_x_uni):\n print('Nr instances not the same')\n elif max(est_y_num) != max(noisy_y_num):\n print('Nr labels not the same')\n print('Shapes of vectors are: ', np.shape(est_y_vector), np.shape(noisy_y_vector))\n z = noisy_y_vector \n prob_y = est_y_vector \n print('Getting Estlabs from: ',estlab_path, 'Difference Estlab (prob_y) and orig (z):', ut.dist(z, prob_y))#+ '/' + estlab_path\n# retrieve or initialize theta\n if args.initialize:\n EM = EMModule(initialize=True, initializer=prob_y, labels=z)\n \n np.save('/data/users/dpaul/Thesis/Practical/EM_chunk/theta/theta'+args.type, EM.theta) \n print('Saved initialised theta to','/data/users/dpaul/Thesis/Practical/EM_chunk/theta/theta'+ args.type+'.npy')\n elif args.true_theta:\n EM = EMModule(labels=z)\n EM.theta = np.load('./theta' + '.npy')\n print('Loaded true theta from', os.getcwd() + '/truethetas'+'/theta'+ args.type + '.npy')\n print(EM.theta)\n else:\n EM = EMModule(labels=z)\n EM.theta = np.load('/data/users/dpaul/Thesis/Practical/EM_chunk/theta/theta'+ args.type +'.npy') #/nethome/evdberg/NER_NoisyLabelNeuralNetwork/ ### temporary my change\n \n print('Loaded theta from','/data/users/dpaul/Thesis/Practical/EM_chunk/theta/theta'+ args.type +'.npy')\n print(EM.theta)\n prev_theta = EM.theta\n# iterates once, gets improved theta, updates and checks for convergence\n print(\"Updating theta and c\")\n \n c_vector, new_theta = EM.iteration(new_prob_y=prob_y)\n print('theta after iteration:', new_theta)\n \n # save c and theta\n if not args.true_theta:\n np.save('/data/users/dpaul/Thesis/Practical/EM_chunk/theta/theta'+ args.type ,new_theta) #/nethome/evdberg/NER_NoisyLabelNeuralNetwork/ \n c_num = ut.get_zt(c_vector) \n \n c_uni = ctv.uni_num_to_uni(c_num, est_labels) \n #print(c_uni)\n if args.true_theta:\n output_path = estlab_path+'_true'\n c_conll = ctv.uni_to_conll(est_x_uni, c_uni) #without pred xc, with pred xyc\n with open(output_path, 'w') as f:\n for out_line in c_conll:\n f.write(out_line)\n print('Wrote to', output_path)\n elif args.initialize:\n c_conll_xyc = ctv.uni_to_conll(est_x_uni, c_uni, orig=noisy_y_uni) #without pred xc, with pred xyc\n output_xyc = estlab_path\n \n with open(output_xyc, 'w') as f:\n for out_line in c_conll_xyc: \n f.write(out_line)\n print('Wrote updates estlab (c) to: ', output_xyc)\n c_conll_xc = ctv.uni_to_conll(est_x_uni, c_uni) #without pred xc, with pred xyc\n output_xc = estlab_path+'_update'\n with open(output_xc, 'w') as f:\n for out_line in c_conll_xc:\n f.write(out_line)\n\n else: \n c_conll_xyc = ctv.uni_to_conll(est_x_uni, c_uni, orig=noisy_y_uni) #without pred xc, with pred xyc\n output_xyc = estlab_path\n \n with open(output_xyc, 'w') as f:\n for out_line in c_conll_xyc: \n f.write(out_line)\n print('Wrote updates estlab (c) to: ', output_xyc)\n c_conll_xc = ctv.uni_to_conll(est_x_uni, c_uni) #without pred xc, with pred xyc\n output_xc = estlab_path+'_update'\n with open(output_xc, 'w') as f:\n for out_line in c_conll_xc:\n f.write(out_line)\n","sub_path":"EM_chunk/SingleEM.py","file_name":"SingleEM.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"644103924","text":"nomes=[]\nidades=[]\ncont=3\nmaior15=menor_med=0\nfor i in range(cont):\n nomes.append(str(input('Informe o nome do aluno: ')))\n idades.append(int(input('Informe a idade do aluno: ')))\nmedia= sum(idades)/len(idades)\nfor i in range(cont):\n if idades [i]>15:\n maior15+=1\n if idades[i] < media:\n menor_med+=1\nidx=idades.index(max(idades))\nidx2=idades.index(min(idades))\nprint(f'A quantidade de alunos com idade superior a 15 anos: {maior15}')\nprint(f'A média das idades dos alunos: {media}')\nprint(f'A quantidade de alunois com idade abaixo da média: {media}')\nprint(f'A maior idade é {max(idades)} o nome do aluno {nomes[idx]}')\nprint(f'A menor idade é {min(idades)} o nome do aluno {nomes[idx2]}')\n \n","sub_path":"Pacote para dowloand/Python/ex021 (for e if)recebendo e calculando idades.py","file_name":"ex021 (for e if)recebendo e calculando idades.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"135908359","text":"from client import HttpSession\nimport runner\n\n\nclass Ta(object):\n \"\"\"API for resources and directory operation\"\"\"\n def __init__(self, session):\n # self.base_url = 'http://api-ta-testing.starcedu.com.cn'\n self.base_url = runner.HOST_TA\n self.session = HttpSession(session, self.base_url)\n\n def api_ta_node_tags(self):\n \"\"\"获取标签列表(包括系统标签和自定义标签)\"\"\"\n res = self.session.send_request('GET', r'/api/ta_node/tags')\n return res\n\n def api_ta_node_tag_put(self, tag_id, name):\n \"\"\"标签重命名\"\"\"\n data = {\n 'tag_id': tag_id,\n 'name': name\n }\n res = self.session.send_request('PUT', r'/api/ta_node/tag', data=data)\n return res\n\n def api_ta_node_tag_post(self, name, node_id=None, target_id=None, type=None):\n \"\"\"添加标签或添加节点-标签绑定关系\"\"\"\n data = {\n 'name': name,\n 'node_id': node_id,\n 'target_id': target_id,\n 'type': type\n }\n res = self.session.send_request('POST', r'/api/ta_node/tag', data=data)\n return res\n\n def api_ta_node_tag_delete(self, tag_id, node_id=None, target_id=None):\n \"\"\"删除标签或解除节点-标签绑定关系\"\"\"\n data = {\n 'tag_id': tag_id,\n 'node_id': node_id,\n 'target_id': target_id\n }\n res = self.session.send_request('DELETE', r'/api/ta_node/tag', data=data)\n return res\n","sub_path":"api/ta.py","file_name":"ta.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"511294897","text":"import jieba\nimport jieba.posseg\nfrom operator import itemgetter\n\nSTOP_WORDS = {\"the\", \"of\", \"is\", \"and\", \"to\", \"in\", \"that\", \"we\", \"for\", \"an\", \"are\", \"by\", \"be\", \"as\", \"on\",\n \"with\", \"can\", \"if\", \"from\", \"which\", \"you\", \"it\", \"this\", \"then\", \"at\", \"have\", \"all\", \"not\", \"one\",\n \"has\", \"or\", \"that\", \"..\", \"...\", \"---\"}\n\n\nclass IDFLoader(object):\n\n def __init__(self):\n self.path = \"./spider/preprocessing/idf.txt\"\n self.freq = {} # 词频\n self.median = 0.0 # 中值\n self.set_path()\n\n def set_path(self):\n content = open(self.path, 'rb').read().decode('utf-8')\n self.freq = {}\n for line in content.splitlines():\n word, freq = line.strip().split(' ')\n self.freq[word] = float(freq)\n self.median = sorted(\n self.freq.values())[len(self.freq) // 2]\n\n def get_idf(self):\n return self.freq, self.median\n\n\ndef get_stop_words():\n # print('\\n\\n\\n111\\n\\n\\n')\n with open('./spider/preprocessing/cn_stopwords.txt', 'r', encoding='utf-8') as f:\n # print('\\n\\n\\n111\\n\\n\\n')\n while True:\n line = f.readline()\n if not line:\n break\n s = line.strip()\n # print(s)\n STOP_WORDS.add(s)\n # print(STOP_WORDS)\n\n\nclass TFIDF:\n\n def __init__(self):\n get_stop_words()\n self.stop_words = STOP_WORDS.copy()\n self.idf_loader = IDFLoader()\n self.freq, self.median = self.idf_loader.get_idf()\n\n def extract_tags(self, sentence, topK=10, withWeight=False):\n words = jieba.cut(sentence)\n freq = {}\n for w in words:\n wc = w\n if len(wc.strip()) < 2 or wc.lower() in self.stop_words:\n continue\n freq[w] = freq.get(w, 0.0) + 1.0\n # 统计词频\n total = sum(freq.values())\n for k in freq:\n kw = k\n freq[k] *= self.freq.get(kw, self.median) / total\n\n # 是否输出weight\n if withWeight:\n tags = sorted(freq.items(), key=itemgetter(1), reverse=True)\n else:\n tags = sorted(freq, key=freq.__getitem__, reverse=True)\n\n # 输出几个\n if topK:\n return tags[:topK]\n else:\n return tags\n","sub_path":"spider/spider/preprocessing/TFIDF.py","file_name":"TFIDF.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"239313121","text":"import functools\nimport asyncio\nimport random\nfrom multi_await import MultiAwait\nfrom multi_await import multi_await\n\nasync def raise_exception():\n int('z')\n\nasync def return_value():\n return 1\n\nasync def sleep_random():\n await asyncio.sleep(0.1 + random.random())\n return 'sleep'\n \nasync def run_forever():\n while True:\n await asyncio.sleep(9999.0)\n\nasync def return_value_as_task():\n return 2\n \nasync def push_even_numbers(q):\n for i in range(100):\n if i % 2 == 0:\n await q.put(i)\n \nasync def push_odd_numbers(q):\n for i in range(100):\n if i % 2 == 1:\n await q.put(i)\n\n# --- Tests \n\nasync def test_can_get_a_value():\n async with multi_await() as m: \n m.add(return_value)\n \n results, failures = await m.get()\n\n assert results == [1]\n assert failures == [None]\n \nasync def test_can_get_a_value_from_a_task():\n async with multi_await() as m: \n m.add(return_value_as_task)\n \n results, failures = await m.get()\n\n assert results == [2] \n assert failures == [None]\n \nasync def test_can_get_an_exception(): \n async with multi_await() as m: \n m.add(raise_exception)\n \n results, failures = await m.get()\n\n assert results == [None] \n assert isinstance(failures[0], ValueError)\n \nasync def test_can_get_an_exception_first(): \n async with multi_await() as m: \n m.add(raise_exception)\n m.add(run_forever)\n \n results, failures = await m.get()\n\n assert results == [None, None] \n assert isinstance(failures[0], ValueError)\n assert failures[1] == None\n await m.cancel() \n \nasync def test_can_get_a_value_with_another_coro_that_never_returns():\n async with multi_await() as m: \n m.add(return_value)\n m.add(run_forever)\n \n results, failures = await m.get()\n\n assert results == [1, None] \n assert failures == [None, None]\n \nasync def test_can_get_exception_and_value():\n async with multi_await() as m:\n m.add(return_value)\n m.add(raise_exception)\n \n results, failures = await m.get()\n assert results == [1, None]\n assert failures[0] == None\n assert isinstance(failures[1], ValueError)\n \nasync def test_can_run_the_same_tasks_multiple_times():\n # Start some queues with differing capacities\n odd_queue = asyncio.Queue(1)\n even_queue = asyncio.Queue(2)\n numbers = []\n\n # Start some tasks to feed stuff into the queue\n tasks = [ asyncio.create_task(push_odd_numbers(odd_queue)), asyncio.create_task(push_even_numbers(even_queue))] \n \n async with multi_await() as m:\n m.add(odd_queue.get)\n m.add(even_queue.get)\n m.add(run_forever) # Does nothing in the test, just like the others\n \n done = False\n while not done:\n results, failures = await m.get()\n assert failures == [None, None, None]\n for r in results:\n if r is not None:\n numbers.append(r)\n \n done = len(numbers) >= 100\n \n assert list(range(100)) == sorted(numbers)\n \n [await t for t in tasks] # Cleanup after the test\n \nasync def test_fails_if_nothing_to_do():\n async with multi_await() as m:\n try:\n await m.get()\n except RuntimeError as e:\n assert str(e) == 'Attempted to await 0 coroutines'\n \nfor entry in dir():\n if not entry.startswith('test_'):\n continue\n\n el = asyncio.new_event_loop()\n asyncio.set_event_loop(el)\n\n print(\"----- Running test '%s'\" % (entry,))\n el.run_until_complete(locals()[entry]())\n print('----- Done')\n\n el.stop()\n el.close()\n \n","sub_path":"test_multi_await.py","file_name":"test_multi_await.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"275938661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEdited on Feb 18 2014\n\n@author: Claire E Beery\n\"\"\"\n\nfrom random import randint\nfrom math import sin, cos, pi\nimport Image\n\n\n## SS: Rather than having the recurse() method and the build_random_function() method, you could\n## have one build_random_function() method that calls itself in order to build up a function\n## if you're interested, here's an example:\n## \n# xy_options = [\"x\", \"y\"]\n# function_options = [[\"prod\", 2],[\"cos_pi\", 1],[\"sin_pi\", 1],[\"cube\", 1],[\"reverse\", 1]]\n# def build_random_function(min_depth, max_depth):\n# if max_depth == 0 or (min_depth == 0 and randint(0,1) == 1):\n# return [xy_options[randint(0,1)]]\n# else: \n# selected_function = function_options[randint(0,4)]\n# if selected_function[1] == 1:\n# return [selected_function[0], build_random_function(min_depth - 1, max_depth - 1)]\n# elif selected_function[1] == 2:\n# return [selected_function[0], build_random_function(min_depth - 1, max_depth - 1), build_random_function(min_depth - 1, max_depth - 1)]\n\ndef build_random_function(min_depth, max_depth):\n \"\"\" generates a nested list that describes a composite function in the \n form of ['function', argument 1, argument 2] where each argument can be \n a function\n \n possible component functions: sin(pi*a), cos(pi*a), a*b, (a-b)/2, a**2\n \n inputs: min_depth, describes the smallest amount of nesting any branch \n of the composite function should have\n max_depth, describes the largest amount of nestinng any branch \n of the composite function can have \n \n \"\"\"\n\n if min_depth > 1: #add layers until minimum depth is achieved\n return recurse(min_depth,max_depth)\n else:\n depth = randint(1,max_depth) # assign random depth to check against\n \n if depth == 1: #is assigned depth is reached, end branch\n return base()\n else: # else add more branches/depth\n return recurse(min_depth,max_depth) \n\n\ndef base():\n \"\"\"creates the base case for build_random_function\"\"\"\n \n operations = [\"x\",\"y\"]\n end_op = operations[randint(0,1)] #picks x or y for the base input \n \n return end_op\n \ndef recurse(min_depth, max_depth):\n \"\"\"impliments the recursion of build_random_function\"\"\"\n \n # creates a list of possible functions \n ops = [\"prod\",\"diff_halves\",\"sin_pi\",\"cos_pi\",\"squared\"] \n \n #picks a function to use \n nxt_op_index = randint(0,4)\n nxt_op = ops[nxt_op_index]\n \n # creates first input for function (recursive of build_random_function)\n firstInput = build_random_function(min_depth -1, max_depth -1)\n\n # if function needs more than 1 input is needed, create it\n # return the chosen function and its inputs\n if nxt_op_index == 0 or nxt_op_index == 1: \n secondInput = build_random_function(min_depth -1, max_depth -1)\n return [nxt_op,firstInput,secondInput]\n else:\n return [nxt_op,firstInput]\n \n## SS: Passed my tests :)\ndef evaluate_random_function(f, x, y):\n \"\"\" evaluates the value of a multiple layer composite function \n \n possible component functions: sin(pi*a), cos(pi*a), a*b, (a-b)/2, a**2, x, y\n \n inputs: f, the composite function in the form of ['function', argument 1, argument 2]\n x, value of the base function x\n y, value of the base function y\n \"\"\"\n \n # unpack f\n function = f[0]\n\n #base\n if function == \"x\":\n return x\n elif function == \"y\":\n return y\n else: \n\n # find input values\n input1 = evaluate_random_function(f[1],x,y) \n if len(f) == 3: # if input2 exists\n input2 = evaluate_random_function(f[2],x,y) \n \n #function definitions\n if function == \"prod\":\n return input1*input2\n elif function == \"diff_halves\":\n return (input1 - input2) / 2.0\n elif function == \"sin_pi\":\n return sin(input1*pi)\n elif function == \"cos_pi\":\n return cos(input1*pi)\n elif function == \"squared\":\n return input1**2\n\n## SS: Failed 2 of my tests:\n# FAILED - remap_interval_unit_tests()\n# Test 1 FAILED: \n# Input value: 175\n# Input range: [0, 350]\n# Output range: [-1, 1]\n# Expected Output: 0.0\n# Actual Output: -1\n# Test 4 FAILED: \n# Input value: 0\n# Input range: [-1, 1]\n# Output range: [0, 255]\n# Expected Output: 127.5\n# Actual Output: 0\ndef remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n \"\"\" Maps the input value that is in the interval [input_interval_start, input_interval_end]\n to the output interval [output_interval_start, output_interval_end]. The mapping\n is an affine one (i.e. output = input*c + b).\n \n inputs: val, value to be remapped\n input_interval_start, min of starting range\n input_interval_end, max of starting range\n output_interval_start, min of desired range\n output_interval_end, max of desired range\n \"\"\"\n \n # val = val - input_interval_start\n # val = val / (input_interval_end - input_interval_start)\n # val = val * (output_interval_end - output_interval_start)\n # val = val + output_interval_start\n \n # return val\n\n slope = (float(output_interval_end) - float(output_interval_start))/(float(input_interval_end) - float(input_interval_start))\n return slope * (val - float(input_interval_start)) + float(output_interval_start)\n \n \n # name compliments of \"A Funny Thing Happened on the Way to the Forum\" \n # come see the FWOP performance the 20th, 21st, 27th, or 28th at Sorenson \n # theater\n \n## SS: You know what this function reminds me of? Pretty Little Liars :) and yes, I watch it :) \ndef pretty_little_picture(): \n # build functions for the primary colors\n blue = build_random_function(3,7)\n red = build_random_function(3,7)\n green = build_random_function(3,7)\n \n # create an empty color image using PIL \n img_size = 350\n im = Image.new(\"RGB\",(img_size,img_size))\n\n for i in range(0,img_size -1):\n x = remap_interval(i,0.0,img_size -1.0,-1.0,1.0)\n for k in range(0,img_size -1):\n y = remap_interval(i,0.0,img_size-1.0,-1.0,1.0)\n \n # find individual color values\n b = evaluate_random_function(blue, x, y)\n r = evaluate_random_function(red, x, y)\n g = evaluate_random_function(green, x, y)\n \n # remap to color scale\n b = int(remap_interval(b,0.0,img_size-1,0.0,255.0))\n r = int(remap_interval(r,0.0,img_size-1,0.0,255.0))\n g = int(remap_interval(g,0.0,img_size-1,0.0,255.0))\n \n # set pixels\n\n ## SS: the line below gave me errors, I see you've changed it since\n ## im.putpixel([i,k], (int(r),int(g),int(b))) \n\n ## im.putpixel((i,k), [r,g,b])\n im.putpixel((i,k), (r,g,b))\n \n im.save(\"image.bmp\") \n \n## SS: All of the images that I saw were completely black. This does not appear to be working correctly.\nif __name__ == '__main__':\n pretty_little_picture()\n ","sub_path":"hw4/random_art.py","file_name":"random_art.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"223332175","text":"from sys import stdin\n\nS = stdin.readline().rstrip()\n\nlst = ['dream', 'dreamer', 'erase', 'eraser']\n\nans = True\nwhile len(S) > 0:\n\n # print(S)\n\n if S[len(S) - 7:] == lst[1]:\n S = S[:len(S) - 7]\n elif S[len(S) - 6:] == lst[3]:\n S = S[:len(S) - 6]\n elif S[len(S) - 5:] == lst[0] or S[len(S) - 5:] == lst[2]:\n S = S[:len(S) - 5]\n else:\n ans = False\n break\n\nif (ans):\n print('YES')\nelse:\n print('NO')\n","sub_path":"practice/abc049c.py","file_name":"abc049c.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"436980114","text":"from image import ImageCaptcha\nimport numpy as np\nimport random\n\n\nimport string\ncharacters = string.digits + string.ascii_uppercase\n\nwidth, height, n_len, n_class = 170, 80, 4, len(characters)\n\n'''Test'''\n# generator = ImageCaptcha(width=width, height=height)\n# random_str = ''.join([random.choice(characters) for j in range(n_len)])\n# print(random_str)\n# img = generator.generate_image(random_str)\n# img.show()\n# plt.imshow(img)\n# plt.title(random_str)\n\ndef gen(batch_size=32):\n X = np.zeros((batch_size, height, width, 3), dtype=np.uint8)\n y = [np.zeros((batch_size, n_class), dtype=np.uint8) for i in range(n_len)]\n generator = ImageCaptcha(width=width, height=height)\n while True:\n for i in range(batch_size):\n random_str = ''.join([random.choice(characters) for j in range(n_len)])\n img = generator.generate_image(random_str)\n img.show()\n X[i] = img\n for j, ch in enumerate(random_str):\n y[j][i, :] = 0\n y[j][i, characters.find(ch)] = 1\n yield X, y\n\ndef decode(y):\n y = np.argmax(np.array(y), axis=2)[:,0]\n return ''.join([characters[x] for x in y])\n\nX, y = next(gen(1))\nprint(decode(y))\n\n'''DL'''\nfrom keras.models import *\nfrom keras.layers import *\n\ninput_tensor = Input((height, width, 3))\nx = input_tensor\nfor i in range(4):\n x = Convolution2D(32*2**i, 3, 3, activation='relu')(x)\n x = Convolution2D(32*2**i, 3, 3, activation='relu')(x)\n x = MaxPooling2D((2, 2))(x)\n\nx = Flatten()(x)\nx = Dropout(0.25)(x)\nx = [Dense(n_class, activation='softmax', name='c%d'%(i+1))(x) for i in range(4)]\nmodel = Model(input=input_tensor, output=x)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])","sub_path":"captcha_create/captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"505713830","text":"import os\n\nimport cv2\nfrom flask import *\n\nfrom base import *\nfrom base.com.dao.registration_dao import UserDAO\n\n\n@app.route('/')\ndef index():\n return render_template('login.html')\n\n\n@app.route('/user_login', methods=[\"POST\"])\ndef user_login():\n user_email_id = request.form.get('email')\n session['user_email_id'] = user_email_id\n user_dao = UserDAO()\n user_vo_list = user_dao.view_email_id()\n for user_details in user_vo_list:\n if (user_details.user_email_id) == user_email_id:\n session['user_name'] = user_details.user_name\n return redirect(url_for('login_with_face'))\n else:\n continue\n else:\n flash('email not registred......')\n return redirect(url_for('index'))\n\n\n@app.route('/login_with_face')\ndef login_with_face():\n name_of_user = None\n recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()\n recognizer.read(\"TrainingImageLabel/\" + session['user_email_id'] + \".yml\")\n print(\"TrainingImageLabel/\" + session['user_email_id'] + \".yml\")\n print(session['user_email_id'])\n harcascadePath = \"C:/Users/bansi/PycharmProjects/login_with_face/base/static/haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(harcascadePath);\n cam = cv2.VideoCapture(0)\n font = cv2.FONT_HERSHEY_SIMPLEX\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)\n Id, conf = recognizer.predict(gray[y:y + h, x:x + w])\n print(conf)\n if (conf < 20):\n tt = str(Id) + \"-\" + session['user_name']\n return render_template('welcome.html', username=session['user_name'])\n else:\n Id = 'Unknown'\n tt = str(Id)\n if (conf > 55):\n noOfFile = len(os.listdir(\"ImagesUnknown\")) + 1\n cv2.imwrite(\"ImagesUnknown\\Image\" + str(noOfFile) + \".jpg\", im[y:y + h, x:x + w])\n cv2.putText(im, str(tt), (x, y + h), font, 1, (255, 255, 255), 2)\n\n cv2.imshow('im', im)\n if (cv2.waitKey(1) == ord('q')):\n break\n cam.release()\n cv2.destroyAllWindows()\n","sub_path":"base/com/controller/login_controller.py","file_name":"login_controller.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"610345510","text":"import json\n\nfrom django.views import View\nfrom django.http import HttpResponse, JsonResponse\n\nfrom products.models import Product, Color, ProductColor, Image, Category, ProductApplyOn, ApplyOn\n\nclass ProductListView(View):\n def get(self, request):\n products = Product.objects.select_related('category').prefetch_related('productapplyon_set__apply_on','productcolor_set__image_set','productcolor_set__color')\n\n product_list = [{\n 'name' : product.name,\n 'price' : product.price,\n 'product_id' : product.id,\n 'category' : product.category.name,\n 'apply_on' : [applyon.apply_on.name for applyon in product.productapplyon_set.all()],\n 'product_image' : product.productcolor_set.first().image_set.get().product_image,\n 'model_image' : product.productcolor_set.first().image_set.get().model_image, \n 'colors' : [{\n 'color_id' : color.color_id,\n 'color_name' : color.color.name if color.color_id else ''\n } for color in product.productcolor_set.all()]\n } for product in products]\n \n return JsonResponse({\"product_list\": product_list}, status=200)\n\nclass ProductDetailView(View):\n def get(self, request, product_id):\n try:\n product = Product.objects.prefetch_related('productcolor_set__image_set', 'productcolor_set__color').get(id=product_id)\n\n product_info = [{ \n 'name' : product.name,\n 'description' : product.description,\n 'super_tip' : product.super_tip,\n 'size' : product.size,\n 'good_to_know' : product.good_to_know,\n 'contains' : product.contains,\n 'price' : product.price\n }]\n \n \n color_list = [{\n 'color_id' : product.color_id,\n 'color_name' : product.color.name if product.color_id else \"\",\n 'product_image' : product.image_set.get().product_image,\n 'model_image' : product.image_set.get().model_image,\n 'detail1_image' : product.image_set.get().detail1_image,\n 'detail2_image' : product.image_set.get().detail2_image\n } for product in product.productcolor_set.all()]\n\n product_pairs = Product.objects.filter(category_id=product.category_id).exclude(id=product_id)[:2]\n \n pair_with = [{\n \"name\" : product.name,\n \"price\" : product.price,\n \"product_image\" : product.productcolor_set.first().image_set.get().product_image,\n \"product_id\" : product.id\n } for product in product_pairs]\n\n recommendations = Product.objects.all().exclude(id=product_id)\n\n you_may_also_like = [{\n 'name' : recommendation.name,\n 'price' : recommendation.price,\n 'product_id' : recommendation.id,\n 'product_image' : recommendation.productcolor_set.first().image_set.get().product_image,\n 'model_image' : recommendation.productcolor_set.first().image_set.get().model_image,\n } for recommendation in recommendations]\n\n return JsonResponse({\n \"product_info\" : product_info, \n \"color_list\" : color_list,\n \"pair_with\" : pair_with,\n \"you_may_also_like\" : you_may_also_like,\n }, status=200)\n\n except Product.DoesNotExist:\n return JsonResponse({\"message\" : \"Product does not exist\"}, status=400)\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"431072793","text":"import sys\nimport random\n\nrandom.seed(sys.argv[1])\nsampleSize = int(sys.argv[2])\ninputFilename = sys.argv[3]\noutputFilename = sys.argv[4]\n\nwith open(inputFilename) as inputFile:\n lines = inputFile.readlines()\n\nwith open(outputFilename, 'w') as outputFile:\n for i in xrange(0, sampleSize):\n randomPosition = random.randint(0, len(lines))\n outputFile.write(lines[randomPosition])\n\n","sub_path":"bv/randomLines.py","file_name":"randomLines.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353192927","text":"#!/usr/bin/env python3\n# TFG: David Ubide Alaiz\nimport numpy as np\nfrom sympy import *\nfrom copy import deepcopy\nimport random\n\nclass CPN():\n\n def __init__(self,name=''):\n self.cpnName=name\n self.listPlaces={}\n self.list_places=[]\n self.listTrans=[]\n self.listArcs=[]\n self.listColors=[]\n self.composed_color = []\n\n def addPlace(self,place):\n self.listPlaces[place.placeName]=place\n\n def addTransition(self,trans):\n self.listTrans.append(trans)\n\n def addArc(self,arc):\n self.listArcs.append(arc)\n\n def addColor(self,color):\n self.listColors.append(color)\n\n def __str__(self):\n return(\"\"\"CPN: {}\\nPlaces: {}\\nTransitions: {}\\nColors: {}\"\"\".format(self.cpnName,self.listPlaces.keys(),self.listTrans,self.listColors))\n\n def generateXML(self,Id):\n \"\"\"generate a xml file for cpn tool\"\"\"\n #Init of the string containing xml code\n xml=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n\n ###Declarations\n xml+=\"\"\"\\n\"\"\"\n\n #Priorities\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"Standard priorities\\n\"\"\"\n xml+=\"\"\"val P_HIGH = 100;\\nval P_HIGH = 100;\\n\\n\"\"\"\n xml+=\"\"\"val P_NORMAL = 1000;\\nval P_NORMAL = 1000;\\n\\n\"\"\"\n xml+=\"\"\"val P_LOW = 10000;\\nval P_LOW = 10000;\\n\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n\n #Colors\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"Declarations\\n\"\"\"\n vars=\"\"\n for color in self.listColors:\n xml += \"\"\"\\n\"\"\"\n xml += \"\"\"{}\\n\"\"\".format(color.colName)\n xml += \"\"\"\\n\"\"\".format(color.colName)\n xml += \"\"\"colset {} = string;\"\"\".format(color.colName)\n xml += \"\"\"\\n\"\"\"\n\n #Var associated with colors\n vars+=\"\"\"\\n\\n{}\\n\\n\"\"\".format(color.colName)\n vars+=\"\"\"{}\\n\"\"\".format(color.colVar)\n vars+=\"\"\"var {} : {};\\n\"\"\".format(color.colVar,color.colName)\n vars+=\"\"\"\\n\"\"\"\n for composed_c in self.composed_color:\n xml += \"\"\"\\n\"\"\"\n xml += \"\"\"{}\\n\"\"\".format(composed_c.colName)\n xml += \"\"\"\\n\"\"\"\n for new_c in composed_c.listaColores:\n xml += \"\"\"{}\\n\"\"\".format(new_c.colName)\n xml += \"\"\"\\n\"\"\"\n xml += \"\"\"colset {} = product \"\"\".format(composed_c.colName)\n primero = True\n for new_c2 in composed_c.listaColores:\n if primero:\n xml+=\"\"\" {}\"\"\".format(new_c2.colName)\n primero = False\n else:\n xml += \"\"\" * {}\"\"\".format(new_c2.colName)\n\n xml += \"\"\" \\n\\n\"\"\"\n\n xml+=vars\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\"\n ###\n\n ###Main Page\n idPage=Id\n Id+=1\n xml+=\"\"\"\\n\\n\"\"\".format(idPage)\n\n #Places\n places=\"\"\n for pl in self.listPlaces:\n currPl=self.listPlaces[pl]\n places+=\"\"\"\\n\"\"\".format(currPl.id)\n places+=\"\"\"{}\\n\"\"\".format(currPl.placeName)\n places+=\"\"\"\\n\"\"\" #Just for visual conveniance\n places+=\"\"\"\\n\"\"\"\n places+=\"\"\"\\n\"\"\"\n if currPl.compose_color:\n places += \"\"\"{}\\n\"\"\".format(currPl.compose_color.colName)\n else:\n places+=\"\"\"{}\\n\"\"\".format(currPl.placeColor.colName)\n places+=\"\"\"\\n\"\"\"\n places+=\"\"\"\\n\"\"\"\n places+=\"\"\"\\n\"\"\"\n if currPl.marks == 0:\n places+=\"\"\"\\n\"\"\"\n else:\n places+=\"\"\"{}`"{}"\\n\"\"\".format(currPl.marks,currPl.initMarking)\n places+=\"\"\"\\n\\n\"\"\"\n\n xml+=places\n\n #Transitions\n\n transi=\"\"\n for tr in self.listTrans:\n transi+=\"\"\"\\n\"\"\".format(tr.id)\n transi+=\"\"\"{}\\n\"\"\".format(tr.transName)\n transi+=\"\"\"\\n\"\"\" #For visual conveniance\n transi+=\"\"\"\\n\"\"\"\n\n xml+=transi\n\n #Arcs\n\n arcs=\"\"\n for a in self.listArcs:\n arcs+=\"\"\"\\n\"\"\".format(a.type)\n arcs+=\"\"\"\\n\"\"\".format(a.trans.id)\n arcs+=\"\"\"\\n\"\"\".format(a.place.id)\n arcs+=\"\"\"\\n\"\"\"\n place_arc = a.place\n if place_arc.compose_color:\n arcs += \"\"\"(\"\"\"\n primero = True\n for color_a in place_arc.compose_color.listaColores:\n if primero:\n arcs+=\"\"\"{}\"\"\".format(color_a.colName)\n primero = False\n else:\n arcs += \"\"\",{}\"\"\".format(color_a.colName)\n arcs+= \"\"\")\\n\"\"\"\n else:\n arcs += \"\"\"{}\\n\"\"\".format(a.expr)\n arcs+=\"\"\"\\n\\n\"\"\"\n\n xml+=arcs\n\n\n xml+=\"\"\"\\n\"\"\"\n\n #instance\n idInstance=Id\n Id+=1\n xml+=\"\"\"\\n\\n\\n\"\"\".format(idInstance,idPage)\n\n #binder\n idBinder=Id\n Id+=1\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\".format(idBinder)\n idSheet=Id\n Id+=1\n xml+=\"\"\"\\n\"\"\"\n xml+=\"\"\"\\n\"\"\".format(idSheet,idInstance)\n xml+=\"\"\"\\n\\n\\n\"\"\"\n xml+=\"\"\"\\n\\n\"\"\"\n xml+=\"\"\"\\n\\n\\n\"\"\"\n xml+=\"\"\"\\n\\n\"\"\"\n\n #Monitors\n xml+=\"\"\"\\n\"\"\"\n\n\n #End of xml code\n xml+=\"\"\"\\n\\n\"\"\"\n\n #create output file\n output=open(\"./processmining/static/{}.cpn\".format(self.cpnName),'w+')\n output.write(xml)\n\n\n\n\n\nclass Place():\n\n def __init__(self,name,col,mark,number_of_marks,id,resource_list_,composeColor):\n self.placeName=name\n self.placeColor=col\n self.initMarking=mark\n self.id=id\n self.marks = number_of_marks\n self.resource_list = resource_list_\n self.compose_color = composeColor\n\n def __repr__(self):\n return(\"{}\".format(self.placeName))\n\n def __str__(self):\n return(\"Place: {}, {}, initMarking {}\".format(self.placeName,self.placeColor,self.initMarking))\n\n def __eq__(self,other):\n return(self.id==other.id)\n\n\nclass Transition():\n\n def __init__(self,name,id,origen,destino):\n self.transName=name\n self.id=id\n self.place_orig= origen\n self.place_final = destino\n self.listArcs=[]\n\n def __repr__(self):\n return(self.transName)\n\n def __eq__(self,other):\n return(self.id==other.id)\n\n def addArc(self,arc):\n self.listArcs.append(arc)\n\n\n\nclass Arc():\n\n def __init__(self,typ,P,T,exp=''):\n #typ must be PtoT or TtoP\n try:\n if typ==\"PtoT\" or typ==\"TtoP\":\n self.type=typ\n else:\n raise ValueError(\"Wrong Arc type: {}\".format(typ))\n except ValueError as ve:\n print(ve)\n\n self.expr=exp\n self.place=P\n self.trans=T\n\n def __repr__(self):\n if self.type=='PtoT':\n return(\"{} from {} to {}, expr={}\".format(self.type,self.place.placeName,self.trans.transName,self.expr))\n else:\n return(\"{} from {} to {}, expr={}\".format(self.type,self.trans.transName,self.place.placeName,self.expr))\n\n\nclass Color():\n\n def __init__(self,name,items,var):\n self.colName=name\n self.colset=items\n self.colVar=var\n\n def __repr__(self):\n return(\"Colset {}\".format(self.colName))\n\n def __eq__(self,other):\n return self.colName==other.colName\nclass ComposedColor ():\n\n def __init__(self,name,list_of_colors):\n self.colName=name\n self.listaColores = list_of_colors\n\n def __repr__(self):\n return(\"Colset {}\".format(self.colName))\n\n def __eq__(self,other):\n return self.colName==other.colName\n\ndef create_transitions(list_places,CPN,lastID,list_transitions):\n\n for key_places in list_transitions:\n origen_destino = key_places.split(\";\")\n origen = origen_destino[0]\n destino = origen_destino[1]\n orig = CPN.listPlaces[origen]\n dest = CPN.listPlaces[destino]\n new_tr = Transition(orig.placeName+dest.placeName ,lastID,orig,dest)\n CPN.addTransition(new_tr)\n lastID += 1\n\n orig = list_places[len(list_places)-1]\n dest = list_places[0]\n new_tr = Transition(orig.placeName + dest.placeName, lastID, orig, dest)\n CPN.addTransition(new_tr)\n lastID += 1\n return lastID\n\ndef create_arcs(list_transition,CPN):\n lofarcs = []\n for transition in list_transition:\n place_orig_t =transition.place_orig\n place_final_t= transition.place_final\n new_arc_orig = Arc(\"PtoT\",place_orig_t, transition,'paciente')\n new_arc_dest = Arc(\"TtoP\", place_final_t, transition, 'paciente')\n if place_orig_t.resource_list:\n for resource_orig in place_orig_t.resource_list:\n resource_place = CPN.listPlaces[resource_orig.colName]\n new_arc_dest_r = Arc(\"TtoP\", resource_place, transition, resource_place.placeName)\n lofarcs.append(new_arc_dest_r)\n CPN.addArc(new_arc_dest_r)\n\n if place_final_t.resource_list:\n for resource_dest in place_final_t.resource_list:\n resource_place = CPN.listPlaces[resource_dest.colName]\n new_arc_dest_r = Arc(\"PtoT\", resource_place, transition, resource_place.placeName)\n lofarcs.append(new_arc_dest_r)\n CPN.addArc(new_arc_dest_r)\n\n lofarcs.append(new_arc_orig)\n lofarcs.append(new_arc_dest)\n CPN.addArc(new_arc_orig)\n CPN.addArc(new_arc_dest)\n return lofarcs\n\ndef create_list_recource_colors(list_resources,dict_resources,CPN):\n\n dict_resources_1 = dict()\n for resource_1 in list_resources:\n new_color = Color(resource_1, [], resource_1)\n dict_resources_1[resource_1] = new_color\n CPN.addColor(new_color)\n\n\n\n new_dict_resource = dict()\n for resource_key in dict_resources.keys():\n new_dict_resource[resource_key]=[]\n for resource_2 in dict_resources[resource_key]:\n new_dict_resource[resource_key].append(dict_resources_1[resource_2])\n\n\n return new_dict_resource\n\ndef create_composed_color(list_resources,CPN,color_inicial):\n new_name =color_inicial.colName\n for resource in list_resources:\n new_name+=resource.colName\n new_list = list_resources + [color_inicial]\n new_color = ComposedColor(new_name,new_list)\n if not new_color in CPN.composed_color:\n CPN.composed_color.append(new_color)\n return new_color\n else:\n for color in CPN.composed_color:\n if color.colName == new_color.colName:\n return color\n\n\n\ndef create_CPN(CPN_name, CPN_list_places,CPN_list_resources,CPN_list_transitions,CPN_dict_resources_places):\n\n\n test=CPN(CPN_name)\n Id=1\n ################\n # Data #\n ################\n #P=['Iniciorp','p2','p3'] #Set of places\n # S = ['Iniciorp;p2', 'p2;p3']\n #Recursos = ['medico', 'enfermera']\n # dict_resources = dict()\n # dict_resources['p2'] = ['medico', 'enfermera']\n\n P = CPN_list_places\n R=['paciente1','paciente2'] #Set of robots\n S = CPN_list_transitions\n Recursos = CPN_list_resources\n dict_resources = CPN_dict_resources_places\n\n\n\n PACIENTE=Color('paciente',R,'paciente')\n\n test.addColor(PACIENTE)\n\n # Create places and colors for resources\n\n\n\n places_resources = create_list_recource_colors(Recursos,dict_resources,test)\n\n Y=[''] #Set of tasks\n for resource in Recursos:\n newplace = Place(resource, PACIENTE, resource, 0, Id,[],[])\n newplace.marking = newplace.initMarking.replace('`', '*')\n test.addPlace(newplace)\n Id += 1\n\n #\n ################\n\n\n\n\n\n\n # Add Create new\n\n List_places= []\n for newP in P:\n if newP in places_resources.keys():\n nuevos_recursos = places_resources[newP]\n new_col = create_composed_color(nuevos_recursos,test,PACIENTE)\n newplace = Place(newP, PACIENTE, 'paciente', 0, Id, places_resources[newP],new_col)\n else:\n newplace = Place(newP, PACIENTE, 'paciente', 0, Id, [],[])\n\n newplace.marking = newplace.initMarking.replace('`', '*')\n test.addPlace(newplace)\n List_places.append(newplace)\n Id+=1\n\n Id = create_transitions(List_places,test,Id,S)\n list_arcs= create_arcs(test.listTrans,test)\n test.generateXML(Id)\n","sub_path":"processmining/for_medical.py","file_name":"for_medical.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"526306739","text":"import pysnow\nimport nexpose\nimport json\nfrom lab_owners_snow import get_lab_owner_by_ip\n\nconfig_file_path = 'C:\\\\nexposeCreds.json'\nwith open(config_file_path, 'r') as f:\n config = json.load(f)\n report = nexpose.generate_report(config)\n\n found_owners = {}\n\n # ip_address\n for vuln in report:\n # each 'vuln' is a python dict\n ip = vuln['ip_address']\n\n # Get all lab owners this IP might belong to\n if 'ip' not in found_owners:\n lab_owner_row = get_lab_owner_by_ip(ip)\n # Add found lab owner to dict for quick lookups\n found_owners[ip] = lab_owner_row\n else:\n lab_owner_row = found_owners[ip]\n\n print(lab_owner_row)\n exit(0)\n","sub_path":"MatchOwner.py","file_name":"MatchOwner.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"135540818","text":"from django.core.management.base import BaseCommand\nfrom django_seed import Seed\nfrom django.contrib.auth import get_user_model\nimport random\nfrom itertools import combinations, permutations\n\nfrom movies.models import Movie, Article, Comment, SimpleRating, DetailedRating\n\nUser = get_user_model()\n\n\nclass UserSeeder():\n def __init__(self, number=100):\n self.seeder = Seed.seeder()\n self.number = number\n\n def execute(self):\n self.seeder.add_entity(User, self.number, {\n 'is_staff': 0,\n 'is_superuser': 0,\n 'password': lambda x: User.objects.make_random_password(length=100),\n })\n return self.seeder.execute()\n\n\nclass SimpleRatingGenerator():\n def __init__(self, users, movie_user_idx_combinations, max_number=1000, movies=None):\n self.users = users or User.objects.all()\n\n self.movie_user_idx_combinations = movie_user_idx_combinations\n self.combination_length = len(self.movie_user_idx_combinations)\n\n self.number_of_insertions = max_number\n if self.number_of_insertions > self.combination_length:\n self.number_of_insertions = self.combination_length\n\n self.movie_user_idx_combinations = random.sample(\n self.movie_user_idx_combinations, self.number_of_insertions)\n\n self.movies = movies or Movie.objects.all()\n\n self.ratings = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0)\n self.ratings_upper_idx = len(self.ratings) - 1\n\n def execute(self):\n inserted_pks = [0] * self.number_of_insertions\n for i in range(self.number_of_insertions):\n movie_idx = self.movie_user_idx_combinations[i][0]\n user_idx = self.movie_user_idx_combinations[i][1]\n kwargs = {\n 'movie': self.movies[movie_idx],\n 'user': self.users[user_idx],\n 'rating': self.ratings[random.randint(0, self.ratings_upper_idx)],\n }\n new_rating = SimpleRating.objects.create(**kwargs)\n inserted_pks[i] = new_rating.pk\n return inserted_pks\n\n\nclass DetailedRatingGenerator():\n def __init__(self, users, movie_user_idx_combinations, max_number=1000, movies=None):\n self.users = users or User.objects.all()\n\n self.movie_user_idx_combinations = movie_user_idx_combinations\n self.combination_length = len(self.movie_user_idx_combinations)\n\n self.number_of_insertions = max_number\n if self.number_of_insertions > self.combination_length:\n self.number_of_insertions = self.combination_length\n\n self.movie_user_idx_combinations = random.sample(\n self.movie_user_idx_combinations, self.number_of_insertions)\n\n self.movies = movies or Movie.objects.all()\n\n self.rating_candidate1 = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0)\n self.rating_candidate2 = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0)\n self.rating_candidate3 = (\n 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5)\n self.rating_candidate4 = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0)\n self.rating_candidate5 = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0)\n self.rating_candidate6 = (0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5,\n 4.0, 4.5, 5.0, 3.0, 3.5, 4.0, 4.5, 4.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0, 4.5, 5.0)\n\n self.rating_sets = ((self.rating_candidate1, len(\n self.rating_candidate1) - 1), (self.rating_candidate2, len(self.rating_candidate2) - 1), (self.rating_candidate3, len(self.rating_candidate3) - 1), (self.rating_candidate4, len(self.rating_candidate4) - 1), (self.rating_candidate5, len(self.rating_candidate5) - 1), (self.rating_candidate6, len(self.rating_candidate6) - 1))\n self.rating_sets_upper_idx = len(self.rating_sets) - 1\n\n def execute(self):\n inserted_pks = [0] * self.number_of_insertions\n for i in range(self.number_of_insertions):\n movie_idx = self.movie_user_idx_combinations[i][0]\n user_idx = self.movie_user_idx_combinations[i][1]\n kwargs = {\n 'movie': self.movies[movie_idx],\n 'user': self.users[user_idx],\n 'originality': None,\n 'plot': None,\n 'cinematography': None,\n 'music_score': None,\n 'characters': None,\n 'entertainment_value': None,\n }\n criteria = ['originality', 'plot', 'cinematography',\n 'music_score', 'characters', 'entertainment_value']\n for i in range(len(criteria)):\n criterion = criteria[i]\n rating_candidate, candidate_upper_idx = self.rating_sets[i]\n kwargs[criterion] = rating_candidate[random.randint(\n 0, candidate_upper_idx)]\n\n new_rating = DetailedRating.objects.create(**kwargs)\n inserted_pks[i] = new_rating.pk\n return inserted_pks\n\n\nclass ArticleGenerator():\n def __init__(self, users=None, movies=None):\n self.users = users or User.objects.all()\n self.number_of_users = self.users.count()\n self.movies = movies or Movie.objects.all()\n self.number_of_movies = self.movies.count()\n self.number_of_insertions = self.number_of_movies // 5 * self.number_of_users\n self.contents = ['간만에 좋은 영화였습니다. 추천드려요~', '가족과 함께 보면 좋을 영화!',\n '예전에 보고 너무 인상 깊어서 다시 봤는데, 그 때는 느끼지 못했던 감성과 메세지를 얻어갑니다. 볼 때마다 감회가 새로운 영화..!',\n '이런 영화를 여태 안 보고 뭐했는지..!',\n '역시 믿고 보는 배우...! 믿고 보는 감독...! 최고입니다.',\n '인생 작품',\n '개인적으로 너무 좋아하는 영화라서 5번째 보는 중']\n self.number_of_contents = len(self.contents)\n\n def execute(self):\n nums = [i for i in range(self.number_of_movies)]\n for user in self.users:\n for movie in random.sample(list(self.movies), self.number_of_movies // 5):\n article = Article.objects.create(\n user=user, movie=movie, content=self.contents[random.randint(0, self.number_of_contents - 1)])\n\n\nclass CommentGenerator():\n def __init__(self, users=None, articles=None):\n self.users = users or User.objects.all()\n self.number_of_users = self.users.count()\n self.articles = articles or Article.objects.all()\n self.number_of_articles = self.articles.count()\n self.number_of_insertions = self.number_of_articles // 10 * self.number_of_users\n self.contents = [\n '동감..!', '저랑 취향이 비슷하신 듯!!', '오.. 저도 같은 생각이에요', '22222222', 'ㅇㅇ 간만에 괜찮은 영화 발견', '한 번 봐야겠네요!']\n self.number_of_contents = len(self.contents)\n\n def execute(self):\n nums = [i for i in range(self.number_of_articles)]\n for user in self.users:\n for article in random.sample(list(self.articles), self.number_of_articles // 100):\n article = Comment.objects.create(\n user=user, article=article, content=self.contents[random.randint(0, self.number_of_contents - 1)])\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n user_seeder = UserSeeder(number=100)\n INSERTED_USER_PKS = list(user_seeder.execute().values())[0]\n INSERTED_USERS = [User.objects.get(\n pk=INSERTED_USER_PKS[i]) for i in range(len(INSERTED_USER_PKS))]\n\n MOVIES = Movie.objects.all()\n MOVIE_USER_IDX_COMBINATIONS = [(i, j) for i in range(\n len(MOVIES)) for j in range(len(INSERTED_USERS))]\n simple_rating_generator = SimpleRatingGenerator(users=INSERTED_USERS, movie_user_idx_combinations=MOVIE_USER_IDX_COMBINATIONS,\n max_number=len(INSERTED_USERS)*len(MOVIES), movies=MOVIES)\n simple_rating_generator.execute()\n\n detailed_rating_generator = DetailedRatingGenerator(users=INSERTED_USERS, movie_user_idx_combinations=MOVIE_USER_IDX_COMBINATIONS,\n max_number=(len(INSERTED_USERS)*len(MOVIES))//10, movies=MOVIES)\n detailed_rating_generator.execute()\n article_generator = ArticleGenerator()\n article_generator.execute()\n comment_generator = CommentGenerator()\n comment_generator.execute()\n","sub_path":"movies/management/commands/custom_seed.py","file_name":"custom_seed.py","file_ext":"py","file_size_in_byte":9457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"79861501","text":"# some header here\nimport rdflib as rdf\nfrom onto_graph import OntoGraph\nimport injector as inj\n\n\ndef detect_false_sameas(same_as, g1, g2):\n \"\"\"\n Given some sameAs links, checks if U1 sameAs U2 and p is a common property of U1 and U2\n if p(U1, x) AND p(U2, y) and if x == y then the sameAs property is valid\n valid sameAs properties are added to a new dictionnary true_sameAs\n \"\"\"\n true_sameAs = {}\n wrong_sameas_count = 0\n\n # for each sameAs statements\n for U1, U2 in same_as.items():\n common_props = get_common_prop(U1, U2, g1.graph, g2.graph) # All the common properties between U1 and U2\n\n FPcount = 0 # count the number of functional properties\n SameFPcount = 0 # count the number of functional properties with x == y\n for p in common_props:\n if p in g1.functional_properties and p in g2.functional_properties: # functional properties filtering\n FPcount = FPcount + 1 # we found a functional property\n objs1 = list(g1.graph.objects(rdf.URIRef(U1), p))\n objs2 = list(g2.graph.objects(rdf.URIRef(U2), p))\n if len(objs1) == 1 and len(objs2) == 1 and objs1[0] == objs2[0]:\n SameFPcount = SameFPcount + 1 # we found a functional property validating x == y\n\n # retrieve the true sameAs\n if FPcount > 0:\n if SameFPcount / FPcount >= 0.5:\n if U1 not in true_sameAs:\n true_sameAs[U1] = U2\n else:\n wrong_sameas_count += 1 # also want to have the number of wrong sameAs detected\n\n # print(\"True same as ratio : \", len(true_sameAs) / len(same_as))\n\n return (len(true_sameAs), wrong_sameas_count)\n\n\ndef get_common_prop(U1, U2, G1, G2):\n \"\"\"\n This function gets the common properties between 2 URIs subjects in 2 different ontologies\n return the rdf property\n \"\"\"\n pred_obj1 = set([p for p, o in list(G1.predicate_objects(rdf.URIRef(U1)))])\n pred_obj2 = set([p for p, o in list(G2.predicate_objects(rdf.URIRef(U2)))])\n return pred_obj1.intersection(pred_obj2)\n\n\n############################\n# For testing purpose only #\n############################\nif __name__ == '__main__':\n print('Testing the validator...')\n\n # what we want: inject a number of erroneous sameAs links to a refalign file\n # by using URI's from that refalign's folder AND the source ontology\n\n # create the graphs, then extract its functional properties\n g_0 = OntoGraph('../data/000/onto.owl')\n g_0.extract_func_properties()\n\n g_1 = OntoGraph('../data/001/onto.owl')\n g_1.extract_func_properties()\n\n to_validate = inj.extract_sameas('../data/001/err_refalign.rdf')\n\n detect_false_sameas(same_as=to_validate, g1=g_0, g2=g_1)\n","sub_path":"code/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"434909725","text":"from django.http import HttpResponseRedirect, HttpResponse\r\nfrom django.shortcuts import render, render_to_response, RequestContext\r\nfrom helpers.helpers import *\r\nfrom pis_system.models import *\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.contrib.auth.decorators import login_required, user_passes_test\r\nfrom django.contrib import messages\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.views.decorators.http import require_http_methods\r\nfrom django.views.generic import View, ListView\r\nimport json\r\n\r\nSYSTEM_NAME = \"System Settings\"\r\n\r\nclass Settings(View):\r\n\r\n template_name = './settings/settings.html'\r\n def get(self, request, *args, **kwargs):\r\n return render(request, self.template_name, {'system_name': SYSTEM_NAME, 'cur_menu': 'system_setting'})\r\n\r\nclass Signatories(View):\r\n \r\n def post_create_update_signatories(self):\r\n request_params = json.loads(self.request.body)\r\n signatory = Signatories.get()\r\n signatory.elem_coord = request_params['elem_coord']\r\n signatory.hs_coord = request_params['elem_coord']\r\n signatory.registrar = request_params['elem_coord']\r\n signatory.sys_pres = request_params['elem_coord']\r\n signatory.save()\r\n \r\n data = {'id' : signatory.id,\r\n 'elem_coord' : signatory.elem_coord,\r\n 'hs_coord' : signatory.hs_coord,\r\n 'registrar' : signatory.registrar,\r\n 'sys_pres' : signatory.sys_pres\r\n }\r\n\r\n json_response = json.dump(data)\r\n return HttpResponse(json_response, content_type=\"application/json\")\r\n\r\n def get_signatories(self):\r\n signatory = Signatories.get()\r\n data = {'id' : signatory.id,\r\n 'elem_coord' : signatory.elem_coord,\r\n 'hs_coord' : signatory.hs_coord,\r\n 'registrar' : signatory.registrar,\r\n 'sys_pres' : signatory.sys_pres\r\n }\r\n json_response = json.dump(data)\r\n return HttpResponse(json_response, content_type=\"application/json\")\r\n\r\n\r\n","sub_path":"settings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"363023654","text":"from typing import List, Iterable, Tuple, Dict\nfrom enum import Enum\nimport urllib.request\nfrom requests.exceptions import ConnectionError\nfrom requests import get, post, Response\nfrom json.decoder import JSONDecodeError\n\nclass ModelLevel(Enum):\n DIV = 'div'\n TOKEN = 'token'\n DOC = 'doc'\n SPAN = 'span'\n\n @staticmethod\n def map(level_str):\n level_map = {}\n for item in ModelLevel:\n level_map[item.value] = item\n return level_map[level_str]\n\nclass MetisAPIClient():\n\n def __init__(self, host: str, base_url: str = '/api/v1'):\n self.api_url = f'{host}{base_url}'\n\n def _invoke_model(self, level: ModelLevel, data, clf_key: str = '') -> Response:\n try:\n \n if level == ModelLevel.DOC:\n url = f'{self.api_url}/clf/{level.value}'\n else:\n url = f'{self.api_url}/clf/{level.value}/{clf_key}'\n \n r = post(url, json=data)\n resp = r.json()\n success = resp['success']\n\n #if not success :\n # print(f'{resp[\"message\"]}') \n\n return (success, resp)\n\n except ConnectionError as err : \n print(\"Connection error: {0}\".format(err)) \n return (False, None)\n \n except JSONDecodeError as err :\n print(\"JSON can't be decoded: {0}\".format(err)) \n return (False, None)\n\n\n @staticmethod\n def _result_list(results: List[Tuple[str, str]]) -> List[str]:\n in_seq = False\n result_list = []\n current_pos = []\n \n for (val, pred) in results :\n \n # case entering\n if (not in_seq) and pred :\n in_seq = True\n current_pos.append(val)\n \n # case still in pos\n elif in_seq and pred :\n current_pos.append(val)\n \n # case leaving\n elif in_seq and (not pred) :\n in_seq = False\n result_list.append(\" \".join(current_pos))\n current_pos = []\n \n # case nothing\n #elif (not in_seq) and (not pred) :\n \n return result_list\n\n def div_extraction(self, divs: List[str], clause_type: str) -> List[str]:\n \"\"\" Take a list of divs and return the ones that are interpreted as part \n of a given clause_type.\n \"\"\"\n (success, resp) = self._invoke_model(ModelLevel.DIV, divs, clause_type)\n if success :\n results = resp['data']\n zipped = zip(divs, results)\n return(self._result_list(zipped))\n else :\n return None\n\n def span_extraction(self, spans: List[str], clause_type: str) -> List[str]:\n \"\"\" Take a list of spans and return the ones that are interpreted as part \n of a given clause_type.\n \"\"\"\n (success, resp) = self._invoke_model(ModelLevel.SPAN, spans, clause_type)\n if success :\n results = resp['data']\n zipped = zip(spans, results)\n return(self._result_list(zipped))\n else :\n return None\n\n def token_extraction(self, paragraph: str, token_type: str) -> List[str]:\n \"\"\" Take a paragraph and return the tokens that belong to the given token_type.\n \"\"\"\n (success, resp) = self._invoke_model(ModelLevel.TOKEN, [paragraph], token_type)\n if success :\n results = resp['data']\n return(self._result_list(results))\n else :\n return None\n\n def document_classification(self, content) -> str:\n \"\"\" Take a list of all spans or the raw text of a document and return its classification label.\n \"\"\"\n if isinstance(content, str):\n doc_content = content\n elif isinstance(content, Iterable):\n doc_content = \" \".join(content)\n else:\n raise TypeError(\"Unsupported type : \" + str(type(content)))\n\n (success, resp) = self._invoke_model(ModelLevel.DOC, doc_content)\n if success :\n result = resp['data']\n return result\n else :\n return None\n\n def list_models(self, level: ModelLevel = None) -> Dict[str, List[str]]:\n \"\"\" List all the models available in Metis. Optional level parameters can be used to filter on \n model level (doc, div, span, token).\n \"\"\"\n if level:\n url = f'{self.api_url}/clf/info/{level.value}'\n else:\n url = f'{self.api_url}/clf/info'\n\n r = get(url)\n resp = r.json()\n return resp\n\n def model_info(self, level: ModelLevel, model_identifier: str):\n \"\"\" Retrieve the detailed information for a specific model.\n \"\"\"\n url = f'{self.api_url}/clf/{level.value}/info/{model_identifier}'\n r = get(url)\n resp = r.json()\n\n if resp['success'] :\n result = resp['data']\n return result\n else :\n print(resp['message'])\n return None\n\n def list_token_keys(self):\n \"\"\" List all the token keys to use for the executing token extraction.\n \"\"\"\n token_models = self.list_models(ModelLevel.TOKEN)['token']\n\n keys_info = []\n\n for token_model in token_models:\n token_model_infos = self.model_info(ModelLevel.TOKEN, token_model)\n for klass in token_model_infos['classes']:\n keys_info.append((klass['code'], klass['name']))\n\n return keys_info\n","sub_path":"metis_api_client.py","file_name":"metis_api_client.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"412867156","text":"import os\nimport os.path\nimport tempfile\nimport shutil\nimport json\nfrom nose.tools import eq_\nfrom nose.tools import with_setup\nfrom build_pack_utils import utils\nfrom common.integration import ErrorHelper\nfrom common.components import BuildPackAssertHelper\nfrom common.components import HttpdAssertHelper\nfrom common.components import PhpAssertHelper\nfrom common.components import NoWebServerAssertHelper\nfrom common.components import BlackfireAssertHelper\nfrom common.components import HhvmAssertHelper\nfrom common.components import DownloadAssertHelper\nfrom common.base import BaseCompileApp\n\n\nblackfire = utils.load_extension('extensions/blackfire')\n\n\nclass TestBlackfire(object):\n def setUp(self):\n self.build_dir = tempfile.mkdtemp(prefix='build-')\n self.php_dir = os.path.join(self.build_dir, 'php', 'etc')\n os.makedirs(self.php_dir)\n shutil.copy('defaults/config/php/5.4.x/php.ini', self.php_dir)\n shutil.copy('defaults/config/php/5.4.x/php-fpm.conf', self.php_dir)\n\n def tearDown(self):\n if os.path.exists(self.build_dir):\n shutil.rmtree(self.build_dir)\n\n def testShouldCompileNotSet(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35'\n })\n ext = blackfire.BlackfireExtension(ctx)\n eq_(False, ext._should_compile())\n\n def testShouldCompileManualSet(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35',\n 'BLACKFIRE_SERVER_ID': 'TEST_SERVER_ID',\n 'BLACKFIRE_SERVER_TOKEN': 'TEST_SERVER_TOKEN'\n })\n ext = blackfire.BlackfireExtension(ctx)\n eq_(True, ext._should_compile())\n eq_('TEST_SERVER_ID', ext.server_id)\n eq_('TEST_SERVER_TOKEN', ext.server_token)\n\n def testShouldCompileServiceSet(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35',\n 'VCAP_SERVICES': {\n 'blackfire': [{\n 'credentials': {\n 'serverId': 'TEST_SERVER_ID',\n 'serverToken': 'TEST_SERVER_TOKEN'\n },\n 'label': 'blackfire'\n }]\n }\n })\n ext = blackfire.BlackfireExtension(ctx)\n eq_(True, ext._should_compile())\n eq_('TEST_SERVER_ID', ext.server_id)\n eq_('TEST_SERVER_TOKEN', ext.server_token)\n\n def testShouldCompileServiveAndManuelSet(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35',\n 'VCAP_SERVICES': {\n 'blackfire': [{\n 'credentials': {\n 'serverId': 'TEST_SERVICE_SERVER_ID',\n 'serverToken': 'TEST_SERVICE_SERVER_TOKEN'\n },\n 'label': 'blackfire'\n }] \n },\n 'BLACKFIRE_SERVER_ID': 'TEST_MANUAL_SERVER_ID',\n 'BLACKFIRE_SERVER_TOKEN': 'TEST_MANUAL_SERVER_TOKEN',\n })\n ext = blackfire.BlackfireExtension(ctx)\n eq_(True, ext._should_compile())\n eq_('TEST_MANUAL_SERVER_ID', ext.server_id)\n eq_('TEST_MANUAL_SERVER_TOKEN', ext.server_token)\n\n def testUpdatePhpIni(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35',\n 'BLACKFIRE_SERVER_ID': 'TEST_SERVER_ID',\n 'BLACKFIRE_SERVER_TOKEN': 'TEST_SERVER_TOKEN'\n })\n ext = blackfire.BlackfireExtension(ctx)\n ext._should_compile()\n ext._update_php_ini()\n with open(os.path.join(self.php_dir, 'php.ini'), 'rt') as php_ini:\n lines = php_ini.readlines()\n eq_(True, lines.index('[blackfire]\\n') >= 0)\n eq_(True, lines.index('blackfire.server_id=TEST_SERVER_ID\\n') > 0)\n eq_(True, lines.index('blackfire.server_token=TEST_SERVER_TOKEN\\n') > 0)\n \n def testWriteAgentConfig(self):\n ctx = utils.FormattedDict({\n 'BUILD_DIR': self.build_dir,\n 'PHP_VERSION': '5.4.35',\n 'BLACKFIRE_SERVER_ID': 'TEST_SERVER_ID',\n 'BLACKFIRE_SERVER_TOKEN': 'TEST_SERVER_TOKEN'\n })\n ext = blackfire.BlackfireExtension(ctx)\n agent_config_path = os.path.join(self.build_dir, 'blackfire', 'agent', 'conf.ini')\n ext._should_compile()\n ext._write_agent_configuration(agent_config_path)\n with open(agent_config_path, 'rt') as agent_config:\n lines = agent_config.readlines()\n eq_(True, lines.index('[blackfire]\\n') >= 0)\n eq_(True, lines.index('server-id=e92fc80d-dc52-4cfb-8f4c-a8db940706f8\\n') >= 0)\n eq_(True, lines.index('server-token=101af42ab9afcd468a3d3e9f87565008b21262b6a3d7f50812d52c911ba3d698\\n') >= 0)\n\nclass TestCompileBlackfireWithPHP(BaseCompileApp):\n def __init__(self):\n self.app_name = 'app-1'\n\n def setUp(self):\n BaseCompileApp.setUp(self)\n os.environ['BLACKFIRE_SERVER_ID'] = 'TEST_SERVER_ID'\n os.environ['BLACKFIRE_SERVER_TOKEN'] = 'TEST_SERVER_TOKEN'\n os.environ['VCAP_APPLICATION'] = json.dumps({\n 'name': 'app-name-1'\n })\n\n def test_compile_php_with_blackfire(self):\n bp = BuildPackAssertHelper()\n blackfire = BlackfireAssertHelper()\n self.opts.set_web_server('httpd')\n output = ErrorHelper().compile(self.bp)\n blackfire.assert_contents_of_procs_file(self.build_dir)\n blackfire.assert_files_installed(self.build_dir)\n","sub_path":"tests/test_blackfire.py","file_name":"test_blackfire.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"457173245","text":"\"\"\"\nExercise 3\n\nCreate a program called fibonacci_linear.py\n\nRequirements\n\nGiven a term (n), determine the value of x(n).\nIn the fibonacci_linear.py program, create a function called fibonnaci. The function should take in an integer and return the value of x(n).\nThis problem must be solved WITHOUT the use of recursion.\nConstraints\nn >= 0\n\nAnswer below:\n\"\"\"\n\n\n\nn = int(input(\"Provide a number greater than or equal to 0: \"))\n\ndef fibonacci(n):\n first_num=0\n second_num=1\n\n if n>=0:\n for i in range(1,n,1):\n third_num = first_num + second_num\n first_num = second_num\n second_num = third_num\n print(third_num)\n else:\n print(\"You need to provide a valid input to run the function\")\n\n\nif n>=0 and n<=30:\n fibonacci(n)\nelse:\n print(\"You need to provide a valid input to run the function\")","sub_path":"fibonacci_linear.py","file_name":"fibonacci_linear.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"456961122","text":"#### SIMPLE BIRTH CALCULATOR ####\n#### PERMISSION IS HEREBY GRANTED TO ANY COPYRIGHT HOLDER ####\n\nimport datetime\nyear = datetime.datetime.now().year\n\nprint(\" B I R T H C A L C U L A T O R !\")\n\nwhile True:\n def birth_calculator():\n birth_year = int(input(\"Enter Birth Year: \"))\n age = (int(year) - birth_year)\n if birth_year >= year + 1:\n print(\"The birth year cannot be greater than present year!\")\n elif birth_year <= 0:\n print(\"The birth year cannot be less than 1\")\n else:\n print(f\"You Are {age} years old!\")\n exit()\n\n try:\n birth_calculator()\n\n except ValueError:\n print('Please Enter A Valid Number!')\n\n except KeyboardInterrupt:\n exit()\n","sub_path":"MONTH 5/birth_calculator.py","file_name":"birth_calculator.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"626831384","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport collections\nfrom std_msgs.msg import Header\nfrom sensor_msgs.msg import Image\nfrom numpy.random import uniform\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\nclass ImageModulation():\n\n def __init__(\n self, input_topic_name,\n output_topic_name, avg_time_stamp_delay,\n time_stamp_jitter, blur_factor,\n data_loss_percent, rate,\n start_with_full_queue):\n self.input_topic_name = input_topic_name\n self.output_topic_name = output_topic_name\n self.avg_time_stamp_delay = avg_time_stamp_delay\n self.time_stamp_jitter = time_stamp_jitter\n self.blur_factor = blur_factor\n self.data_loss_percent = data_loss_percent\n self.rate = rospy.Rate(rate)\n self.start_with_full_queue = start_with_full_queue\n\n self.im_datas = collections.deque(maxlen=blur_factor)\n self.headers = collections.deque(maxlen=blur_factor)\n self.curr_image = None\n self.curr_header = None\n self.last_image = None\n self.stamp = None\n self.float_time = None\n self.new_data = False\n self.bridge = CvBridge()\n\n self.image_sub = rospy.Subscriber(\n self.input_topic_name, Image, self.im_callback)\n self.image_pub = rospy.Publisher(\n self.output_topic_name, Image, queue_size=1)\n self.timer = rospy.Timer(\n rospy.Duration(1.0/rate), self.timer_cb)\n\n\n def timer_cb(self, data):\n if (len(self.im_datas) >= self.blur_factor):\n\n if self.new_data:\n self.curr_header = self.headers[0]\n #self.curr_image = self.im_datas[0]\n self.modulate_headers()\n self.modulate_image()\n self.new_data = False\n self.publish()\n\n def modulate_headers(self):\n self.stamp_to_float(self.curr_header.stamp)\n jitter = uniform(\n self.avg_time_stamp_delay - self.time_stamp_jitter,\n self.avg_time_stamp_delay + self.time_stamp_jitter)\n self.float_time += jitter\n self.float_to_stamp()\n head = Header()\n head.seq = self.curr_header.seq\n head.frame_id = self.curr_header.frame_id\n head.stamp = self.stamp\n self.curr_header = head\n\n def modulate_image(self):\n # Blending the images with 0.5 and 0.5\n im = self.im_datas[0]\n for i in range(self.blur_factor - 1):\n im = cv2.addWeighted(\n im, 0.5,\n self.im_datas[i], 0.5, 0)\n self.curr_image = im\n\n def float_to_stamp(self):\n self.stamp = rospy.Time().from_sec(self.float_time)\n\n def stamp_to_float(self, stamp):\n self.float_time = stamp.secs + stamp.nsecs / 1000000000.0\n\n def publish(self):\n\n rand = uniform(0.0,1.0)\n if rand > self.data_loss_percent:\n im = None\n try:\n im = self.bridge.cv2_to_imgmsg(self.curr_image, \"bgr8\")\n except CvBridgeError as e:\n print (e)\n if im:\n im.header = self.curr_header\n self.image_pub.publish(im)\n self.last_image = self.curr_image\n\n def im_callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n if self.start_with_full_queue:\n for i in range(self.blur_factor):\n self.im_datas.appendleft(cv_image)\n self.headers.appendleft(data.header)\n self.start_with_full_queue = False\n self.im_datas.appendleft(cv_image)\n self.headers.appendleft(data.header)\n self.new_data = True\n\n def run(self):\n while not rospy.is_shutdown():\n self.rate.sleep()\n\ndef main():\n rospy.init_node(\"ImageModulation\")\n\n default_rate = 20\n default_blur_factor = 2\n default_avg_time_stamp_delay = 0.02\n default_time_stamp_jitter = 0.01\n default_data_loss_percent = 0.1\n default_input_topic_name = \"/camera/rgb/image_raw\";\n default_output_topic_name = \"/camera/rgb/modified\";\n default_start_with_filled_queue = True\n\n rate = rospy.get_param(\n \"rate\", default_rate)\n blur_factor = rospy.get_param(\n 'blur_factor', default_blur_factor)\n avg_time_stamp_delay = rospy.get_param(\n 'avg_time_stamp_delay',\n default_avg_time_stamp_delay)\n time_stamp_jitter = rospy.get_param(\n 'time_stamp_jitter',\n default_time_stamp_jitter)\n data_loss_percent = rospy.get_param(\n 'data_loss_percent',\n default_data_loss_percent)\n input_topic_name = rospy.get_param(\n 'input_topic_name',\n default_input_topic_name)\n output_topic_name = rospy.get_param(\n 'output_topic_name',\n default_output_topic_name)\n start_with_full_queue = rospy.get_param(\n 'start_with_full_queue',\n default_start_with_filled_queue)\n\n\n image_mod = ImageModulation(\n input_topic_name,\n output_topic_name,\n avg_time_stamp_delay,\n time_stamp_jitter,\n blur_factor,\n data_loss_percent,\n rate,\n start_with_full_queue)\n\n image_mod.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/Detect_smoke_with_ROS/youbot_simulations/simulation_message_modulation/src/camera_image_modulation.py","file_name":"camera_image_modulation.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"17096172","text":"import cv2\nimport pytesseract\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nif __name__ == '__main__':\n image = cv2.imread('test-bg/shadow.jpg')\n result = image.copy()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n # # Remove horizontal lines\n # horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40, 1))\n # remove_horizontal = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)\n # cnts = cv2.findContours(remove_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n # for c in cnts:\n # cv2.drawContours(result, [c], -1, (255, 255, 255), 5)\n #\n # # Remove vertical lines\n # vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 40))\n # remove_vertical = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)\n # cnts = cv2.findContours(remove_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n # for c in cnts:\n # cv2.drawContours(result, [c], -1, (255, 255, 255), 5)\n thresh=255-thresh\n data = pytesseract.image_to_string(thresh)\n print(data)\n cv2.imshow('thresh', thresh)\n cv2.imshow('result', result)\n cv2.imwrite('result.png', thresh)\n cv2.waitKey()","sub_path":"threshextra.py","file_name":"threshextra.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"156402284","text":"\"\"\"User View tests.\"\"\"\n\n\nimport os\nfrom unittest import TestCase\n\nfrom models import db, User, Message, Likes, Follows\n\nos.environ['DATABASE_URL'] = os.environ.get('DATABASE_URL', \"postgresql:///warbler-test\")\n\nfrom app import app, CURR_USER_KEY\n\n# Create our tables (we do this here, so we only create the tables\n# once for all tests --- in each test, we'll delete the data\n# and create fresh new clean test data\n\ndb.create_all()\n\n\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\n\nclass UserViewTestCase(TestCase):\n \"\"\"Test views for users.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n User.query.delete()\n Message.query.delete()\n\n self.client = app.test_client()\n\n self.testuser = User.signup(\n username = \"alice\",\n email = \"test@test.com\",\n password = \"testuser\",\n image_url = None\n )\n\n self.testuser2 = User.signup(\n username = \"bob\",\n email = \"other@test.com\",\n password = \"abcd1234efgh5678\",\n image_url = None\n )\n\n self.testuser3 = User.signup(\n username = \"carl\",\n email = \"number3@test.com\",\n password = \"djafaklmra\",\n image_url = None\n )\n\n self.testuser4 = User.signup(\n username = \"alvin\",\n email = \"alvin@test.com\",\n password = \"8675309\",\n image_url = None\n )\n\n db.session.commit()\n\n def tearDown(self):\n retval = super().tearDown()\n db.session.rollback()\n return retval\n\n def test_index(self):\n \"\"\"Checks that the index lists all users.\"\"\"\n\n with self.client as c:\n response = c.get(\"/users\")\n data = str(response.data)\n\n self.assertIn(\"@alice\", data)\n self.assertIn(\"@bob\", data)\n self.assertIn(\"@carl\", data)\n self.assertIn(\"@alvin\", data)\n\n def test_search(self):\n \"\"\"Test that searching for a user works as expected.\"\"\"\n\n with self.client as c:\n response = c.get(\"/users?q=al\")\n data = str(response.data)\n\n self.assertIn(\"@alice\", data)\n self.assertIn(\"@alvin\", data)\n\n self.assertNotIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)\n\n def test_show(self):\n \"\"\"Test that a valid user page loads.\"\"\"\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))\n\n def test_like(self):\n \"\"\"Tests that liking a message works correctly.\"\"\"\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].user_id, self.testuser.id)\n\n def test_unlike(self):\n \"\"\"Tests that unliking a message works correctly.\"\"\"\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), 0)\n\n def test_unauthorized_like(self):\n \"\"\"Tests that attempting to like/unlike a message whilst unauthorized is rejected.\"\"\"\n\n message = Message(text=\"hello world\", user_id=self.testuser2.id)\n \n db.session.add(message)\n db.session.commit()\n\n like = Likes(message_id=message.id, user_id=self.testuser.id)\n \n db.session.add(like)\n db.session.commit()\n \n message_id = message.id\n\n with self.client as c:\n response = c.post(f\"/messages/{message_id}/like\", follow_redirects=True)\n \n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(response.data))\n\n likes = Likes.query.filter(Likes.message_id==message_id).all()\n self.assertEqual(len(likes), Likes.query.count())\n\n def test_following(self):\n \"\"\"Tests that a user's following page works correctly.\"\"\"\n\n follow1 = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/following\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)\n\n def test_followers(self):\n \"\"\"Tests that a user's followers page works correctly.\"\"\"\n\n follow1 = Follows(\n user_being_followed_id = self.testuser.id,\n user_following_id = self.testuser2.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/followers\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)\n\n def test_unauthorized_following(self):\n \"\"\"Tests that accessing a following page without credentials is rejected.\"\"\"\n\n follow = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add(follow)\n db.session.commit()\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}/following\", follow_redirects=True)\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(\"@bob\", data)\n self.assertIn(\"Access unauthorized\", data)\n\n def test_unauthorized_followers(self):\n \"\"\"Tests that accessing a followers page without credentials is rejected.\"\"\"\n\n follow = Follows(\n user_being_followed_id = self.testuser.id,\n user_following_id = self.testuser2.id\n )\n\n db.session.add(follow)\n db.session.commit()\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}/followers\", follow_redirects=True)\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertNotIn(\"@bob\", data)\n self.assertIn(\"Access unauthorized\", data)\n","sub_path":"26/test_user_views.py","file_name":"test_user_views.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"363582333","text":"import pygame\nfrom random import randint\n\nbg_color = (255, 255, 255)\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\n\nSCREEN_WIDTH = 500\nSCREEN_HEIGHT = 800\n\nSCREEN_SCALE = (SCREEN_WIDTH, SCREEN_HEIGHT)\n\nclass World():\n def __init__(self):\n pass\n pass\n\nclass Wall():\n def __init__(self):\n # design\n self.width = SCREEN_WIDTH\n self.thick = 30\n self.x = 0\n self.y = 0\n self.loc = (self.x, self. y)\n self.color = BLACK\n\n # falling speed\n self.fall_vel = 16\n\n # randomly located hole\n self.hole_size = 100\n self.hole_loc = randint(70, 430)\n\n # scoring\n self.is_scored = False\n\n def fall(self):\n self.y += self.fall_vel\n \n def collision_area(self):\n x, y = self.hole_loc - self.hole_size // 2, self.y\n w, h = self.hole_size, self.thick\n return x, y, w, h\n \n\n def _update_loc(self):\n self.loc = (self.x, self.y)\n\nclass Walls():\n def __init__(self):\n self.walls = []\n\n def create_wall(self):\n new_wall = Wall()\n self.walls.append(new_wall)\n \n def update(self):\n for wall in self.walls:\n wall.fall()\n self._sanitize()\n \n def draw(self):\n for wall in self.walls:\n pygame.draw.rect(win, wall.color, (wall.x, wall.y, wall.hole_loc - wall.hole_size // 2, wall.thick))\n pygame.draw.rect(win, wall.color, (wall.x + wall.hole_loc + wall.hole_size // 2, wall.y, wall.width, wall.thick))\n \n def does_wall_exists(self):\n if len(self.walls) == 0:\n return False\n return True\n\n def _sanitize(self):\n for wall in self.walls:\n if wall.y > SCREEN_HEIGHT - wall.thick:\n self.walls.pop(0)\n\n\nclass Obstacle():\n def __init__(self):\n # design\n pass\n\nclass Player():\n def __init__(self):\n # design\n self.radius = 10\n self.x = SCREEN_WIDTH // 2\n self.y = SCREEN_HEIGHT - 300\n self.loc = (self.x, self.y)\n self.color = RED\n\n # jumping parameters\n self.jump_vel = 30 \n self.is_jumping = False\n self.jump_decay = 0.3\n self.jump_tick= 0\n\n # moving parameters\n self.move_vel = 10\n self.fall_vel = 0.5\n self.fall_tick = 1\n self.go = None\n\n # game reward\n self.score = 0\n\n # New jump\n def jump(self, direction):\n self.go = direction\n self.fall_tick = 1\n self.is_jumping = True\n\n # Initial Jumping X Movement\n if self.go == 'left':\n self.x -= self.move_vel\n if self.go == 'right':\n self.x += self.move_vel\n\n self.jump_tick = 0\n self.y -= self.jump_vel * (self.jump_decay ** self.jump_tick)\n self.jump_tick += 1\n\n self._update_loc()\n \n # Keep jumping\n def keep_jumping(self):\n self.y -= self.jump_vel * (self.jump_decay ** self.jump_tick)\n if self.jump_tick > 5:\n self.is_jumping = False\n self.jump_tick = 0\n self.jump_tick += 1\n self._update_loc()\n\n def move(self):\n if self.go == 'left':\n self.x -= self.move_vel\n if self.go == 'right':\n self.x += self.move_vel\n \n self._update_loc()\n\n def fall(self):\n self.y += int(self.fall_vel * (self.fall_tick ** 2))\n self.fall_tick += 1\n self._update_loc()\n \n def draw(self):\n pygame.draw.circle(win, self.color, self.loc, self.radius)\n\n def collision_area(self):\n return self.x, self.y, self.radius\n\n def _update_loc(self):\n self.loc = (int(self.x), int(self.y))\n\nwin = pygame.display.set_mode(SCREEN_SCALE)\n\ndef main():\n pygame.init()\n pygame.display.init()\n pygame.font.init()\n \n pygame.display.set_caption(\"Amazing Bricks\")\n win.fill(bg_color)\n\n clock = pygame.time.Clock()\n myfont = pygame.font.SysFont('monospace', 50)\n \n world = World()\n player = Player()\n walls = Walls()\n \n # looping parameters\n running = True\n tick = 0\n # main loop\n while running:\n # quit check\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n score_text = myfont.render(str(player.score), True, (0, 0, 0))\n win.blit(score_text, (30, 20))\n\n # player update\n keys = pygame.key.get_pressed()\n\n # if the player is jumping, make it keep jumping\n if player.is_jumping:\n player.keep_jumping()\n\n if keys[pygame.K_LEFT]:\n player.jump('left')\n if keys[pygame.K_RIGHT]:\n player.jump('right')\n \n player.move()\n player.fall()\n\n # wall update\n if tick == 25:\n walls.create_wall()\n tick = 0\n \n walls.update()\n\n # scoring\n for wall in walls.walls:\n if wall.y > player.y:\n if not wall.is_scored:\n wall.is_scored = True\n player.score += 1\n \n # collision detection\n\n\n # update screen\n walls.draw()\n player.draw()\n pygame.display.update()\n win.fill(bg_color)\n\n tick += 1\n \n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/game/bricks.py","file_name":"bricks.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"298799925","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('' , views.tweets , name='tweets' ),\r\n path('tweets' , views.tweets , name='tweets' ),\r\n path('compare' , views.compare , name='compare' ),\r\n path('add_classifier' , views.add_classifier , name='add_classifier' ),\r\n path('delete_classifier' , views.delete_classifier , name='delete_classifier' ),\r\n path('classifiers' , views.classifiers , name='classifiers' ),\r\n ]\r\n","sub_path":"tweetsent/sentiment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"127535515","text":"#!/usr/bin/env python\n\nimport redis\n\n#r = redis.Redis(host='localhost',)\n\n# You can configure the Redis client to automatically convert responses from bytes obj to strings\n# NOTE: it just does this for print output it doesn't actually change the byte obj into a string!!!\nr = redis.Redis(host='localhost', charset=\"utf-8\", decode_responses=True)\n\ndef upVote(id):\n key = \"article:\" + id + \":votes\"\n r.incr(key)\n\ndef downVote(id):\n key = \"article:\" + id + \":votes\"\n r.decr(key)\n\ndef showResults(id):\n headlineKey = \"article:\" + id + \":headline\"\n voteKey = \"article:\" + id + \":votes\"\n #for headline, vote in r.mget([headlineKey, voteKey]):\n for headline, vote in r.mget(headlineKey, voteKey):\n print('The article ' + str(headline,'utf-8') + ' has ' + str(vote,'utf-8') + 'votes')\n\nif __name__ == \"__main__\":\n\n showResults('12345')\n","sub_path":"articles-popularity.py","file_name":"articles-popularity.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"294445324","text":"from django.db import models\n\n# Create your models here.\nclass Operacion(models.Model):\n fecha = models.DateTimeField('Fecha y Hora', blank = False, null = False, unique = False, auto_now_add = True)\n descripcion = models.TextField('Descripción', blank = False, null = False)\n enviada = models.BooleanField('Enviada', blank = True, default = False)\n ganancia = models.IntegerField('Ganancia (Pips)', blank = False, null = False, unique = False, default = 0)\n\n @classmethod\n def ganancia_maxima_acumulada(cls):\n ganancia_maxima_acumulada = 0\n for operacion in cls.objects.all():\n ganancia_maxima_acumulada += operacion.ganancia\n return ganancia_maxima_acumulada\n\n @classmethod\n def nueva_operacion(cls, descripcion):\n n_operacion = cls.objects.create(\n descripcion = descripcion,\n )\n print('Se ha registrado corrextamente la Operación')\n return n_operacion\n\n def modificar_operacion(self, ganancia):\n self.ganancia = ganancia\n self.save()\n\n class Meta:\n verbose_name_plural = 'Operaciones'\n\n def __str__(self):\n return '%s: %s' %(self.fecha, self.descripcion)","sub_path":"forexsignals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"500234983","text":"\"\"\"\nState Variables | Cannlytics Console\n\nAuthor: Keegan Skeate\nCompany: Cannlytics\nCreated: 10/15/2020\nUpdated: 5/5/2021\n\nRelatively static state variables for extra context on each page/screen.\nThe idea is to separate the material from the templates,\nwith the hope of better-maintained code.\n\nOptional: Turn into models and save in database.\n\"\"\"\n\ndata = {}\n\ndocs = {}\n\nmaterial = {\n \"dashboard\": {\n \"cards\": [\n {\n \"path\": \"analysis\",\n \"title\": \"Analysis\",\n \"description\": \"Manage analyses.\",\n \"image_path\": \"console/images/icons/multi-tone/certificate-flask.png\",\n },\n {\n \"path\": \"areas\",\n \"title\": \"Areas\",\n \"description\": \"Manage facilities and locations.\",\n \"image_path\": \"console/images/icons/multi-tone/lab.png\",\n },\n {\n \"path\": \"clients\",\n \"title\": \"Clients\",\n \"description\": \"Manage laboratory clients.\",\n \"image_path\": \"console/images/icons/multi-tone/clients.png\",\n },\n {\n \"path\": \"instruments\",\n \"title\": \"Instruments\",\n \"description\": \"Manage laboratory instruments.\",\n \"image_path\": \"console/images/icons/multi-tone/instrument.png\",\n },\n {\n \"path\": \"inventory\",\n \"title\": \"Inventory\",\n \"description\": \"Manage inventory, items, packages, and more.\",\n \"image_path\": \"console/images/icons/multi-tone/records.png\",\n },\n {\n \"path\": \"invoices\",\n \"title\": \"Invoices\",\n \"description\": \"Manage laboratory invoices.\",\n \"image_path\": \"console/images/icons/multi-tone/documents.png\",\n },\n {\n \"path\": \"samples\",\n \"title\": \"Samples\",\n \"description\": \"Manage laboratory samples.\",\n \"image_path\": \"console/images/icons/multi-tone/vials.png\",\n },\n {\n \"path\": \"results\",\n \"title\": \"Results\",\n \"description\": \"Manage laboratory results.\",\n \"image_path\": \"console/images/icons/multi-tone/certificate.png\",\n },\n {\n \"path\": \"staff\",\n \"title\": \"Staff\",\n \"description\": \"Manage laboratory staff.\",\n \"image_path\": \"console/images/icons/two-tone/two_tone_client_folder.png\",\n },\n {\n \"path\": \"transfers\",\n \"title\": \"Transfers\",\n \"description\": \"Manage sample transfers.\",\n \"image_path\": \"console/images/icons/two-tone/two_tone_clock.png\",\n },\n {\n \"path\": \"stats\",\n \"title\": \"Statistics\",\n \"description\": \"Manage laboratory statistics.\",\n \"image_path\": \"console/images/icons/two-tone/two_tone_graph.png\",\n },\n {\n \"path\": \"traceability\",\n \"title\": \"Traceability\",\n \"description\": \"Manage traceability integration and view audit logs.\",\n \"image_path\": \"console/images/icons/multi-tone/certificate-access.png\",\n },\n {\n \"path\": \"settings\",\n \"title\": \"Settings\",\n \"description\": \"Manage your user and organization settings.\",\n \"image_path\": \"console/images/icons/two-tone/two_tone_gears.png\",\n },\n # Plants, Harvests *Cultivator*\n # Sales (Transactions | Receipts) *Cultivator* *Processor* *Retailer*\n ],\n \"welcome_message\": {\n \"title\": \"Welcome to your new laboratory platform!\", # 🚀\n \"message\": \"Get started with simple and easy cannabis analytics.\",\n },\n \"organization_choices\": [\n {\n \"action\": \"Get started\",\n \"title\": \"🥼 For Labs\",\n \"description\": \"Start your lab workflow, manage your lab data, and issue your certificates. Start or join a lab.\",\n \"image\": \"console/images/illustrations/outline/lab_tablet.svg\",\n \"type\": \"lab\",\n },\n # {\n # \"action\": \"Begin now\",\n # \"title\": \"🌳 For Cultivators / Processors\",\n # \"description\": \"Start managing your lab results now. Start or join as a producer/processor to begin.\",\n # \"image\": \"console/images/illustrations/outline/lab_tablet.svg\",\n # \"type\": \"producer\",\n # },\n # {\n # \"action\": \"Explore for free\",\n # \"title\": \"📦 For Retailers\",\n # \"description\": \"Access lab data for your products quickly and easily. Begin today.\",\n # \"image\": \"console/images/illustrations/outline/lab_tablet.svg\",\n # \"type\": \"retailer\",\n # },\n # {\n # \"action\": \"Learn more\",\n # \"title\": \"🛍️ For Consumers\",\n # \"description\": \"Track your consumption. Log purchases, see your usage stats, and get lab data.\",\n # \"image\": \"console/images/illustrations/outline/lab_tablet.svg\",\n # \"type\": \"consumer\",\n # },\n # {\n # \"action\": \"Dive in\",\n # \"title\": \"🤝 For Everyone Else\",\n # \"description\": \"For all software integrators, researchers, and data seekers. Cannlytics has something for you.\",\n # \"image\": \"console/images/illustrations/outline/lab_desktop.svg\",\n # \"type\": \"integrator\",\n # },\n ],\n },\n \"analyses\": {\n \"breadcrumbs\": [\n {\"title\": \"Analysis\", \"url\": \"/analysis\"},\n {\"title\": \"Analyses\", \"active\": True},\n ],\n \"fields\": [\n {\"type\": \"text\", \"key\": \"name\", \"title\": \"Name\"},\n {\"type\": \"text\", \"key\": \"instrument\", \"title\": \"Instrument\"},\n {\"type\": \"text\", \"key\": \"analytes\", \"title\": \"Analytes\"},\n ],\n \"options\": [],\n },\n \"instruments\": {\n \"breadcrumbs\": [\n {\"title\": \"Analysis\", \"url\": \"/analysis\"},\n {\"title\": \"Instruments\", \"active\": True},\n ],\n \"fields\": [\n {\"type\": \"text\", \"key\": \"name\", \"title\": \"Name\"},\n {\"type\": \"text\", \"key\": \"analyes\", \"title\": \"Analyses\"},\n {\"type\": \"text\", \"key\": \"data_path\", \"title\": \"Data path\"},\n ],\n \"options\": [],\n },\n \"account\": {\n \"breadcrumbs\": [\n {\"title\": \"Settings\", \"url\": \"settings\"},\n {\"title\": \"Account\", \"active\": True},\n ],\n \"fields\": [\n {\"type\": \"email\", \"key\": \"email\", \"title\": \"Email\"},\n {\"type\": \"text\", \"key\": \"name\", \"title\": \"Name\"},\n {\"type\": \"text\", \"key\": \"position\", \"title\": \"Position\"},\n {\"type\": \"text\", \"key\": \"location\", \"title\": \"Location\"},\n # {\"type\": \"text\", \"key\": \"linkedin\", \"title\": \"LinkedIn\"},\n {\"type\": \"text\", \"key\": \"license\", \"title\": \"License\"},\n ],\n \"options\": [\n {\"title\": \"Change your password\", \"url\": \"/account/password-reset\"},\n {\"title\": \"Set your pin\", \"url\": \"/settings/account/pin\"},\n {\"title\": \"Set your signature\", \"url\": \"/settings/account/signature\"},\n ],\n },\n \"organizations\": {\n \"breadcrumbs\": [\n {\"title\": \"Settings\", \"url\": \"settings\"},\n {\"title\": \"Organizations\", \"active\": True},\n ],\n \"placeholder\": {\n \"action\": \"Start an organization\",\n \"height\": \"200px\",\n \"image\": \"console/images/illustrations/chemistry_scientist.svg\",\n \"title\": \"Create or join an organization\",\n \"message\": \"Add team members to your organization or join an organization to begin collaborating.\",\n \"url\": \"./organizations/new\",\n },\n \"fields\": [\n {\"type\": \"text\", \"key\": \"organization\", \"title\": \"Organization\"},\n {\"type\": \"text\", \"key\": \"trade_name\", \"title\": \"Trade Name\"},\n {\"type\": \"text\", \"key\": \"website\", \"title\": \"Website\"},\n {\"type\": \"text\", \"key\": \"phone\", \"title\": \"Phone\"},\n {\"type\": \"email\", \"key\": \"email\", \"title\": \"Email\"},\n {\"type\": \"text\", \"key\": \"linkedin\", \"title\": \"LinkedIn\"},\n {\"type\": \"text\", \"key\": \"address\", \"title\": \"Address\", \"secondary\": True},\n {\"type\": \"text\", \"key\": \"city\", \"title\": \"City\", \"secondary\": True},\n {\"type\": \"text\", \"key\": \"state\", \"title\": \"State\", \"secondary\": True},\n {\"type\": \"text\", \"key\": \"country\", \"title\": \"Country\", \"secondary\": True},\n {\"type\": \"text\", \"key\": \"zip_code\", \"title\": \"Zip Code\", \"secondary\": True},\n {\"type\": \"text\", \"key\": \"external_id\", \"title\": \"External ID\", \"secondary\": True},\n ],\n },\n \"pin\": {\n \"breadcrumbs\": [\n {\"title\": \"Settings\", \"url\": \"/settings\"},\n {\"title\": \"Account\", \"url\": \"/settings/account\"},\n {\"title\": \"Pin\", \"active\": True},\n ],\n },\n \"signature\": {\n \"breadcrumbs\": [\n {\"title\": \"Settings\", \"url\": \"/settings\"},\n {\"title\": \"Account\", \"url\": \"/settings/account\"},\n {\"title\": \"Signature\", \"active\": True},\n ],\n },\n \"templates\": {\n \"breadcrumbs\": [\n {\"title\": \"Intake\", \"url\": \"intake\"},\n {\"title\": \"Templates\", \"active\": True},\n ],\n },\n \"calendar\": {\n \"placeholder\": {\n \"action\": \"Schedule your first transfer\",\n \"height\": \"200px\",\n \"image\": \"console/images/illustrations/chemistry_scientist.svg\",\n \"title\": \"Awaiting your first transfer\",\n \"message\": \"Once you begin receiving transfers, your pickups and sample dropoffs will appear here.\",\n \"url\": \"settings/organizations/new\",\n },\n },\n \"logistics\": {\n \"tabs\": [\n {\"name\": \"Calendar\", \"section\": \"calendar\", \"url\": \"/logistics/calendar\"},\n {\"name\": \"Logs\", \"section\": \"logs\", \"url\": \"/logistics/logs\"},\n {\n \"name\": \"Analytics\",\n \"section\": \"analytics\",\n \"url\": \"/logistics/analytics\",\n },\n {\"name\": \"Map\", \"section\": \"map\", \"url\": \"/logistics/map\"},\n ],\n \"placeholder\": {\n \"action\": \"Begin analysis for analytics\",\n \"height\": \"200px\",\n \"image\": \"console/images/illustrations/chemistry_scientist.svg\",\n \"title\": \"Start your first analysis\",\n \"message\": \"Begin conducting analyses to unlock your analytics.\",\n \"url\": \"settings/organizations/new\",\n },\n },\n \"records\": {\n \"placeholder\": {\n \"action\": \"Add a client\",\n \"height\": \"200px\",\n \"image\": \"console/images/illustrations/chemistry_scientist.svg\",\n \"title\": \"Add your first client\",\n \"message\": \"Add a client to begin providing analyses.\",\n \"url\": \"records/new\",\n },\n \"client\": {\n \"breadcrumbs\": [\n {\"title\": \"Clients\", \"url\": \"/records\"},\n {\"title\": \"Client\", \"active\": True},\n ],\n \"fields\": [\n {\"type\": \"email\", \"key\": \"email\", \"title\": \"Email\"},\n {\"type\": \"text\", \"key\": \"name\", \"title\": \"Name\"},\n {\"type\": \"text\", \"key\": \"linkedin\", \"title\": \"LinkedIn\"},\n {\"type\": \"text\", \"key\": \"position\", \"title\": \"Position\"},\n {\"type\": \"text\", \"key\": \"location\", \"title\": \"Location\"},\n ],\n \"options\": [],\n },\n },\n}\n\n\nlayout = {\n \"sidebar\": {\n \"lab_index\": [\n {\n \"title\": \"Dashboard\",\n \"url\": \"/\",\n \"icon\": \"grid\",\n \"slug\": \"\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Analysis\",\n \"url\": \"/analysis\",\n \"icon\": \"edit\",\n \"slug\": \"analysis\",\n \"user_type\": \"lab\",\n },\n # {\n # \"title\": \"Areas\",\n # \"url\": \"/areas\",\n # \"icon\": \"grid\",\n # \"slug\": \"areas\",\n # },\n {\n \"title\": \"Clients\",\n \"url\": \"/clients/records\",\n \"icon\": \"users\",\n \"slug\": \"clients\",\n \"user_type\": \"lab\",\n },\n {\n \"title\": \"Contacts\",\n \"url\": \"/clients/records\",\n \"icon\": \"users\",\n \"slug\": \"clients\",\n \"user_type\": [None, \"producer\", \"processor\", \"retailer\",\n \"consumer\", \"integrator\"],\n },\n {\n \"title\": \"Instruments\",\n \"url\": \"/instruments\",\n \"icon\": \"server\",\n \"slug\": \"instruments\",\n \"user_type\": \"lab\",\n },\n {\n \"title\": \"Intake\",\n \"url\": \"/intake\",\n \"icon\": \"log-in\",\n \"slug\": \"intake\",\n \"user_type\": \"lab\",\n },\n {\n \"title\": \"Inventory\",\n \"url\": \"/inventory\",\n \"icon\": \"archive\",\n \"slug\": \"inventory\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Invoices\",\n \"url\": \"/invoices\",\n \"icon\": \"credit-card\",\n \"slug\": \"invoices\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Purchases\",\n \"url\": \"/purchases\",\n \"icon\": \"shoping-bag\",\n \"slug\": \"purchases\",\n \"user_type\": [\"consumer\"],\n },\n {\n \"title\": \"Samples\",\n \"url\": \"/samples\",\n \"icon\": \"edit-2\",\n \"slug\": \"samples\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Results\",\n \"url\": \"/results\",\n \"icon\": \"award\",\n \"slug\": \"results\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Stats\",\n \"url\": \"/stats\",\n \"icon\": \"activity\",\n \"slug\": \"stats\",\n \"user_type\": [\"producer\", \"processor\", \"retailer\",\n \"consumer\", \"integrator\"],\n },\n {\n \"title\": \"Transfers\",\n \"url\": \"/transfers\",\n \"icon\": \"navigation\",\n \"slug\": \"transfers\",\n \"user_type\": [\"producer\", \"processor\", \"retailer\", \n \"integrator\"],\n },\n {\n \"title\": \"Traceability\",\n \"url\": \"/traceability\",\n \"icon\": \"share-2\",\n \"slug\": \"traceability\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Settings\",\n \"url\": \"/settings\",\n \"icon\": \"settings\",\n \"slug\": \"settings\",\n \"user_type\": \"*\",\n },\n {\n \"title\": \"Help\",\n \"url\": \"/help\",\n \"icon\": \"help-circle\",\n \"slug\": \"help\",\n \"user_type\": \"*\",\n },\n ],\n },\n}\n\nmaterial[\"get-started\"] = {\n \"account\": {\"fields\": material[\"account\"][\"fields\"]},\n \"organization\": {\"fields\": material[\"organizations\"][\"fields\"]},\n \"pricing_tiers\": [\n {\n \"name\": \"Free\",\n \"price\": \"👐\",\n \"color\": \"green\",\n \"action\": \"Sign up for free\",\n \"url\": \"https://console.cannlytics.com\",\n \"attributes\": [\n \"All software\",\n \"All community material\",\n \"Email support\",\n \"Voting rights\",\n ],\n },\n {\n \"name\": \"Pro\",\n \"price\": \"$250 / mo.\",\n \"color\": \"orange\",\n \"action\": \"Get started\",\n \"url\": \"/contact\",\n \"attributes\": [\n \"A company website\",\n \"A full-suite LIMS\",\n \"A client portal\",\n \"Phone and digital support\",\n ],\n },\n {\n \"name\": \"Enterprise\",\n \"price\": \"$500 / mo.\",\n \"color\": \"purple\",\n \"action\": \"Contact us\",\n \"url\": \"/contact\",\n \"attributes\": [\n \"Traceability integration\",\n \"Early access to new features\",\n \"Around the clock support\",\n \"On-site support\",\n ],\n },\n ],\n}\n","sub_path":"console/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":17128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"294702628","text":"A = 'ABC'\r\nB = 'BABC'\r\nG = 'CCAABB'\r\nn = int(input())\r\nl = input()\r\nd = {'Adrian':0, 'Bruno':0, 'Goran':0}\r\nfor i in range(n):\r\n j = l[i]\r\n if A[i%3] == j:\r\n d['Adrian'] += 1\r\n if B[i%4] == j:\r\n d['Bruno'] += 1\r\n if G[i%6] == j:\r\n d['Goran'] += 1\r\nh = max(d.values())\r\nprint(h)\r\nfor i in d:\r\n if d[i] == h:\r\n print(i)\r\n","sub_path":"ptice.py","file_name":"ptice.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"106841041","text":"import urllib.request as url_req\nimport urllib.parse as ull_p\nimport re\nfrom html.parser import HTMLParser\nimport time\n\n\nclass G_Scraping(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.__v_list = dict()\n\n self.before_date = None\n self.before_tag = \"\"\n\n self.date_flg = 0\n self.next_exec_flg = 0\n self.time_flg = 0\n\n self.count = 0\n\n self.now_id = \"\"\n\n def handle_starttag(self, name, attrs):\n attrs = dict(attrs)\n if name == \"div\":\n if 'jscontroller' in attrs:\n print(attrs['jscontroller'])\n\n\nparser = G_Scraping()\nrequest = url_req.Request(\n url='https://play.google.com/store/apps/details?id=jp.co.cygames.Shadowverse&hl=ja&showAllReviews=true')\n\ntmp = url_req.urlopen(request, timeout=15)\n\ncharset = tmp.info().get_content_charset()\n\nparser.feed(str(tmp.read().decode(charset)))\n","sub_path":"Py_Batch/G_Scraiping.py","file_name":"G_Scraiping.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"49983920","text":"from copy import deepcopy\nR, C = map( int, input().split())\nsy, sx = map( int, input().split())\nsy -= 1\nsx -= 1\ngy, gx = map( int, input().split())\ngy -= 1\ngx -= 1\nB = [ list(input()) for _ in range(R)]\nans = 0\nS = [[sy, sx]]\nB[sy][sx] = '#'\nQ = []\nwhile len(S) != 0:\n T = deepcopy(S)\n ans += 1\n while len(T) != 0:\n P = T.pop()\n y, x = P[0], P[1]\n if B[max(0,y-1)][x] == '.':\n B[y-1][x] = '#'\n Q.append([y-1,x])\n if B[min(R-1,y+1)][x] == '.':\n B[y+1][x] = '#'\n Q.append([y+1,x])\n if B[y][max(x-1,0)] == '.':\n B[y][x-1] = '#'\n Q.append([y,x-1])\n if B[y][min(x+1,C-1)] == '.':\n B[y][x+1] = '#'\n Q.append([y,x+1])\n if B[gy][gx] == '#':\n break\n S = deepcopy(Q)\n Q = []\nprint(ans)\n \n \n \n \n\n","sub_path":"beginner/007/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"175651465","text":"from tkinter import *\nfrom tkinter import filedialog\n\n# Adding comment to initiate github Action\n# test\n\n\ndef main():\n\n file = filedialog.askopenfilename(filetypes = ((\"Text files\",\"*.txt\"),(\"all files\",\"*.*\")))\n\n window = Tk()\n\n window.title(\"Welcome to LikeGeeks app\")\n window.geometry(\"400x300\")\n\n lbl = Label(window, text=file, font=(\"arial bold\", 50))\n\n lbl.grid(column=200, row=200)\n\n\n window.mainloop()\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tk.py","file_name":"tk.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"55597708","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/snurtle/config.py\n# Compiled at: 2012-08-03 08:35:35\nfrom xdg import BaseDirectory\nimport os, ConfigParser\n\nclass Configuration(object):\n\n @staticmethod\n def Path():\n path = os.path.join(BaseDirectory.xdg_config_home, 'snurtle')\n if not os.path.isdir(path):\n os.mkdir(path)\n path = os.path.join(path, 'config.ini')\n return path\n\n @staticmethod\n def Load():\n path = Configuration.Path()\n config = ConfigParser.RawConfigParser()\n if not os.path.isfile(path):\n config.add_section('Global')\n Configuration.SetHost(config, '')\n Configuration.SetLogin(config, '')\n Configuration.SetSecure(config, 0)\n else:\n config.read(path)\n return config\n\n @staticmethod\n def GetHost(config):\n if 'host' in config.options('Global'):\n hostname = config.get('Global', 'host')\n else:\n hostname = ''\n return hostname\n\n @staticmethod\n def SetHost(config, value):\n config.set('Global', 'host', value)\n\n @staticmethod\n def GetLogin(config):\n return config.get('Global', 'login')\n\n @staticmethod\n def SetLogin(config, value):\n config.set('Global', 'login', value)\n\n @staticmethod\n def GetSecure(config):\n if 'secure' in config.items('Global'):\n return config.get('Global', 'secure')\n else:\n return 0\n\n @staticmethod\n def SetSecure(config, value):\n config.set('Global', 'secure', value)\n\n @staticmethod\n def Save(config):\n path = Configuration.Path()\n wfile = open(path, 'wb')\n if wfile:\n config.write(wfile)\n wfile.close","sub_path":"pycfiles/snurtle-0.99.4-py2.7/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442303502","text":"from django_tables.base import BaseTable, Rows, TableOptions\n\n\n__all__ = ('SQLTable',)\n\n\nclass SQLTableOptions(TableOptions):\n def __init__(self, options=None):\n super(SQLTableOptions, self).__init__(options)\n self.columns = getattr(options, 'columns', None)\n # Exclude is not currently supported:\n self.exclude = getattr(options, 'exclude', None)\n\n\nclass SQLRows(Rows):\n\n def __init__(self, *args, **kwargs):\n super(SQLRows, self).__init__(*args, **kwargs)\n\n def _reset(self):\n self._length = None\n\n def __len__(self):\n \"\"\"Use the queryset count() method to get the length, instead of\n loading all results into memory. This allows, for example,\n smart paginators that use len() to perform better.\n \"\"\"\n if getattr(self, '_length', None) is None:\n self._length = self.table.data.count()\n return self._length\n\n # for compatibility with QuerySetPaginator\n count = __len__\n\n\nclass SQLTable(BaseTable):\n rows_class = SQLRows\n\n def __init__(self, data=None, *args, **kwargs):\n if data is None:\n raise ValueError(\"Table must be instantiated with data=queryset\")\n else:\n self.queryset = data\n super(SQLTable, self).__init__(self.queryset, *args, **kwargs)\n\n def _validate_column_name(self, name, purpose):\n # Kind of overkill to ensure that the column is in the\n # query cursor. You'll get an error, don't worry.\n return True\n\n def _build_snapshot(self):\n \"\"\"Overridden. The snapshot in this case is simply a queryset\n with the necessary filters etc. attached.\n \"\"\"\n # reset caches\n self._columns._reset()\n self._rows._reset()\n qs = self.queryset\n if self.order_by:\n actual_order_by = self._resolve_sort_directions(self.order_by)\n qs = qs.order_by(*self._cols_to_fields(actual_order_by))\n return qs\n","sub_path":"src/olympia/editors/sql_table.py","file_name":"sql_table.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"598053945","text":"arc = []\nword = input('Latin word: ')\nif word == '':\n print('empty')\nelse:\n while word != '':\n if word.endswith('tur') and not word.endswith('batur'): #больше постоянных различий прошедшего ��ассива отнастоязего я не нашла\n arc.append(word + '\\n')\n word = input()\nwith open('text.txt', 'w', encoding='utf-8') as f:\n f.writelines(arc)\n","sub_path":"hw5/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462439739","text":"\"\"\"\r\nTests that direct routes are the default fallback.\r\n\r\nCreates a topology like the following:\r\n\r\nh1 == s1 -- h2\r\n \\ \\ \r\n c1 -- s3\r\n\r\nSends packet from h2 to h1, c1 should be triggered. disconnect s1 and s3. s1 should remember that it can reach h1 directly\r\nso h1 will still be reached without c1 being touched. \r\n\"\"\"\r\n\r\nimport sim\r\nimport sim.api as api\r\nimport sim.basics as basics\r\n\r\nfrom tests.test_simple import GetPacketHost, NoPacketHost\r\nfrom tests.test_link_weights import CountingHub\r\n\r\ndef launch():\r\n h2 = NoPacketHost.create('h2')\r\n h1 = GetPacketHost.create('h1')\r\n\r\n s1 = sim.config.default_switch_type.create('s1')\r\n s3 = sim.config.default_switch_type.create('s3')\r\n c1 = CountingHub.create('c1')\r\n h1.linkTo(s1, latency=10)\r\n s1.linkTo(s3, latency=1)\r\n s1.linkTo(h2, latency=1)\r\n h1.linkTo(c1, latency=1)\r\n c1.linkTo(s3, latency=1)\r\n\r\n def test_tasklet():\r\n yield 15\r\n\r\n api.userlog.debug('Sending ping from h2 to h1')\r\n h2.ping(h1)\r\n\r\n yield 5\r\n\r\n if c1.pings == 1:\r\n api.userlog.debug('The ping took the right path')\r\n good = True\r\n else:\r\n api.userlog.error('Wrong initial path!')\r\n good = False\r\n s1.unlinkTo(s3)\r\n\r\n yield 0.1\r\n api.userlog.debug('Sending ping from h2 to h1')\r\n\r\n h2.ping(h1)\r\n yield 15\r\n if c1.pings == 1 and h1.pings == 2:\r\n \tapi.userlog.debug('Good path!')\r\n \tgood = True and good \r\n else:\r\n \tapi.userlog.error('Wrong, fallback direct path not used!')\r\n \tgood = False\r\n \r\n import sys\r\n sys.exit(0 if good else 1)\r\n\r\n api.run_tasklet(test_tasklet)","sub_path":"proj2_routing/tests/test_fallback_direct.py","file_name":"test_fallback_direct.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"69209279","text":"import itertools\nimport json\nfrom typing import Dict, List\nfrom overrides import overrides\n\nimport numpy as np\n\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.data import Tokenizer\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.fields.field import Field\nfrom allennlp.data.fields import TextField, LabelField, ListField, ArrayField, MultiLabelField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\nfrom allennlp.data.tokenizers import WordTokenizer\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.tokenizers.word_splitter import SimpleWordSplitter, WordSplitter, SpacyWordSplitter\n\n\n@DatasetReader.register(\"SeqClassificationReader\")\nclass SeqClassificationReader(DatasetReader):\n \"\"\"\n Reads a file from Pubmed-RCT dataset. Each instance contains an abstract_id, \n a list of sentences and a list of labels (one per sentence).\n Input File Format: Example abstract below:\n {\n \"abstract_id\": 5337700, \n \"sentences\": [\"this is motivation\", \"this is method\", \"this is conclusion\"], \n \"labels\": [\"BACKGROUND\", \"RESULTS\", \"CONCLUSIONS\"]\n }\n \"\"\"\n\n def __init__(self,\n lazy: bool = False,\n token_indexers: Dict[str, TokenIndexer] = None,\n word_splitter: WordSplitter = None,\n tokenizer: Tokenizer = None,\n sent_max_len: int = 100,\n max_sent_per_example: int = 20,\n use_sep: bool = True,\n sci_sum: bool = False,\n use_abstract_scores: bool = True,\n sci_sum_fake_scores: bool = True,\n predict: bool = False,\n ) -> None:\n super().__init__(lazy)\n self._tokenizer = WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=False))\n self._token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n self.sent_max_len = sent_max_len\n self.use_sep = use_sep\n self.predict = predict\n self.sci_sum = sci_sum\n self.max_sent_per_example = max_sent_per_example\n self.use_abstract_scores = use_abstract_scores\n self.sci_sum_fake_scores = sci_sum_fake_scores\n\n @overrides\n def _read(self, file_path: str):\n file_path = cached_path(file_path)\n\n with open(file_path) as f:\n for line in f:\n json_dict = json.loads(line)\n instances = self.read_one_example(json_dict)\n for instance in instances:\n yield instance\n\n def read_one_example(self, json_dict):\n instances = []\n sentences = json_dict[\"sentences\"]\n\n if not self.predict:\n labels = json_dict[\"labels\"]\n else:\n labels = None\n\n confidences = json_dict.get(\"confs\", None)\n\n additional_features = None\n if self.sci_sum:\n if self.sci_sum_fake_scores:\n labels = [np.random.rand() for _ in sentences]\n else: \n labels = [s if s > 0 else 0.000001 for s in json_dict[\"highlight_scores\"]]\n\n if self.use_abstract_scores:\n features = []\n if self.use_abstract_scores:\n if self.sci_sum_fake_scores:\n abstract_scores = [np.random.rand() for _ in sentences]\n else:\n abstract_scores = json_dict[\"abstract_scores\"]\n features.append(abstract_scores)\n \n additional_features = list(map(list, zip(*features))) # some magic transpose function\n\n sentences, labels = self.filter_bad_sci_sum_sentences(sentences, labels)\n\n if len(sentences) == 0:\n return []\n\n for sentences_loop, labels_loop, confidences_loop, additional_features_loop in \\\n self.enforce_max_sent_per_example(sentences, labels, confidences, additional_features):\n\n instance = self.text_to_instance(\n sentences=sentences_loop,\n labels=labels_loop,\n confidences=confidences_loop,\n additional_features=additional_features_loop,\n )\n instances.append(instance)\n return instances\n\n def enforce_max_sent_per_example(self, sentences, labels=None, confidences=None, additional_features=None):\n \"\"\"\n Splits examples with len(sentences) > self.max_sent_per_example into multiple smaller examples\n with len(sentences) <= self.max_sent_per_example.\n Recursively split the list of sentences into two halves until each half\n has len(sentences) < <= self.max_sent_per_example. The goal is to produce splits that are of almost\n equal size to avoid the scenario where all splits are of size\n self.max_sent_per_example then the last split is 1 or 2 sentences\n This will result into losing context around the edges of each examples.\n \"\"\"\n if labels is not None:\n assert len(sentences) == len(labels)\n if confidences is not None:\n assert len(sentences) == len(confidences)\n if additional_features is not None:\n assert len(sentences) == len(additional_features)\n\n if len(sentences) > self.max_sent_per_example and self.max_sent_per_example > 0:\n i = len(sentences) // 2\n l1 = self.enforce_max_sent_per_example(\n sentences[:i], None if labels is None else labels[:i],\n None if confidences is None else confidences[:i],\n None if additional_features is None else additional_features[:i])\n l2 = self.enforce_max_sent_per_example(\n sentences[i:], None if labels is None else labels[i:],\n None if confidences is None else confidences[i:],\n None if additional_features is None else additional_features[i:])\n return l1 + l2\n else:\n return [(sentences, labels, confidences, additional_features)]\n\n def is_bad_sentence(self, sentence: str):\n if len(sentence) > 10 and len(sentence) < 600:\n return False\n else:\n return True\n\n def filter_bad_sci_sum_sentences(self, sentences, labels):\n filtered_sentences = []\n filtered_labels = []\n if not self.predict:\n for sentence, label in zip(sentences, labels):\n # most sentences outside of this range are bad sentences\n if not self.is_bad_sentence(sentence):\n filtered_sentences.append(sentence)\n filtered_labels.append(label)\n else:\n filtered_sentences.append(\"BADSENTENCE\")\n filtered_labels.append(0.000001)\n sentences = filtered_sentences\n labels = filtered_labels\n else:\n for sentence in sentences:\n # most sentences outside of this range are bad sentences\n if not self.is_bad_sentence(sentence):\n filtered_sentences.append(sentence)\n else:\n filtered_sentences.append(\"BADSENTENCE\")\n sentences = filtered_sentences\n\n return sentences, labels\n\n @overrides\n def text_to_instance(self,\n sentences: List[str],\n labels: List[str] = None,\n confidences: List[float] = None,\n additional_features: List[float] = None,\n ) -> Instance:\n if not self.predict:\n assert len(sentences) == len(labels)\n if confidences is not None:\n assert len(sentences) == len(confidences)\n if additional_features is not None:\n assert len(sentences) == len(additional_features)\n\n if self.use_sep:\n tokenized_sentences = [self._tokenizer.tokenize(s)[:self.sent_max_len] + [Token(\"[SEP]\")] for s in sentences]\n sentences = [list(itertools.chain.from_iterable(tokenized_sentences))[:-1]]\n else:\n # Tokenize the sentences\n sentences = [\n self._tokenizer.tokenize(sentence_text)[:self.sent_max_len]\n for sentence_text in sentences\n ]\n\n fields: Dict[str, Field] = {}\n fields[\"sentences\"] = ListField([\n TextField(sentence, self._token_indexers)\n for sentence in sentences\n ])\n\n if labels is not None:\n if isinstance(labels[0], list):\n fields[\"labels\"] = ListField([\n MultiLabelField(label) for label in labels\n ])\n else:\n # make the labels strings for easier identification of the neutral label\n # probably not strictly necessary\n if self.sci_sum:\n fields[\"labels\"] = ArrayField(np.array(labels))\n else:\n fields[\"labels\"] = ListField([\n LabelField(str(label)+\"_label\") for label in labels\n ])\n\n if confidences is not None:\n fields['confidences'] = ArrayField(np.array(confidences))\n if additional_features is not None:\n fields[\"additional_features\"] = ArrayField(np.array(additional_features))\n\n return Instance(fields)","sub_path":"sequential_sentence_classification/dataset_reader.py","file_name":"dataset_reader.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"432118056","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSPE GCS Competition\n\"\"\"\n\nimport config as cnfg\nimport utils as ut\nimport lasio\nimport os\nimport joblib\n# import collections\n# import pickle\nimport pandas as pd\nimport numpy as np\n# import matplotlib.pyplot as plt\n# import seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error as MSE\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom scipy.stats import randint as sp_randint\nfrom scipy.stats import uniform as sp_uniform\nimport lightgbm as lgb\nfrom sklearn.svm import OneClassSVM\n# import xgboost as xg\n\npd.set_option(\"max_columns\", None)\n\nfile_list = []\nfile_list += [file for file in os.listdir(os.curdir+'/train_data') \n if file.endswith(\".las\")]\ndf_pickle_fn_norm = cnfg.df_pickle_fn_norm\ndf_pickle_fn = cnfg.df_pickle_fn\ndf_pickle_fn_pt = cnfg.df_pickle_fn_pt\nscalar_fn = cnfg.scalar_fn\nmodel_fn = cnfg.model_fn\n\nmissing_value = cnfg.missing_value\nmissingness_thresh = cnfg.missingness_thresh\nvars_to_use = cnfg.vars_to_use\nresponse_var = cnfg.response_var\npred_vars = cnfg.pred_vars\nnonneg_vars = cnfg.nonneg_vars\nthresh = cnfg.thresh\nres_lags = cnfg.res_lags\ngr_lags = cnfg.gr_lags\nnphi_lags = cnfg.nphi_lags\nrhob_lags = cnfg.rhob_lags\ndtco_lags = cnfg.dtco_lags\nres_win = cnfg.res_win\ngr_win = cnfg.gr_win\nnphi_win = cnfg.nphi_win\ndtco_win = cnfg.dtco_win\nrhob_win = cnfg.rhob_win\n\nlog_mapping = pd.read_excel(\"Logs_Mapping.xlsx\", sheet_name=\"Distinct mnemonics\")\n\n\n# Check well relative locations:\n# lat = []\n# lon = []\n# inputlas = {}\n# for file in file_list:\n# print(file)\n# inputlas[file] = lasio.read(file) #Read file\n# lat.append(inputlas[file].well['SLAT'].value)\n# lon.append(inputlas[file].well['SLON'].value)\n\n# plt.scatter(x=lon, y=lat)\n# plt.show()\n\n\n# mnemonics_df = pd.DataFrame(\n# columns=[\n# \"FILE\",\n# \"LOG\",\n# \"UNIT\",\n# \"DESC\",\n# \"COUNT\",\n# \"MEAN\",\n# \"STD\",\n# \"MIN\",\n# \"25%\",\n# \"50%\",\n# \"75%\",\n# \"MAX\",\n# \"MISSINGNESS\",\n# ]\n# )\n\ntrain_df = pd.DataFrame()\ntest_df = pd.DataFrame()\nval_df = pd.DataFrame()\ninputlas = {}\n\n# Read files and get log stats in dataframe\nfor file in file_list:\n # file = '0e121cce5c23_TGS.las'\n # file = '1cf78b7ca1cc_TGS.las'\n # file = '4bc281e7f645_TGS.las'\n # file = '70a049901d0c_TGS.las'\n inputlas[file] = lasio.read('./train_data/'+file) # Read file\n print(file)\n\n df = inputlas[file].df() # Convert data to dataframe\n df = df.rename_axis(\"DEPT\").reset_index() # Create depth axis and reset index\n df = df.replace(missing_value, \"\") # Convert missing value validationt o null\n df = df.dropna(subset=[\"DTSM\"])\n des = pd.DataFrame(df.describe()) # Get data stats\n\n # for curves in inputlas.curves:\n # # if 'SFL' in curves.mnemonic:\n # # print(file)\n # # if curves.mnemonic not in mnemonics:\n # # print(curves.mnemonic)\n # curv_desc = [file, curves.mnemonic, curves.unit, curves.descr]\n # curv_stats = list(des.loc[:, curves.mnemonic].values)\n # missingness = 100*df[curves.mnemonic].isnull().mean()\n # curv_desc.extend(curv_stats)\n # curv_desc.extend([missingness])\n # temp_df = pd.DataFrame([curv_desc],\n # columns=['FILE', 'LOG', 'UNIT', 'DESC', 'COUNT',\n # 'MEAN', 'STD', 'MIN', '25%', '50%',\n # '75%', 'MAX', 'MISSINGNESS'])\n # temp_df = temp_df[temp_df['COUNT'] > 0]\n # mnemonics_df = mnemonics_df.append(temp_df)\n\n df = df.dropna(axis=1, how=\"all\")\n df = ut.log_renaming_shortlisting(df, log_mapping, response_var)\n if all(x in df.columns for x in vars_to_use):\n df = df[vars_to_use]\n df, high_missing_cols = ut.impute_missing_data(df, missingness_thresh)\n if len(high_missing_cols) > 0:\n df = df.dropna(axis=1, how=\"any\")\n if len(df.columns) == len(vars_to_use):\n # df = df[df[\"RESD\"] > 0] #remove negative resistivities\n # df = df[df[\"RESM\"] > 0]\n df[df['RESD'] <= 0] = 0.01\n df[df['RESM'] <= 0] = 0.01\n df[df[nonneg_vars] < 0] = 0 # remove negative values \n # df = ut.outlier_detection(df)\n df = ut.convert_res_to_log(df)\n # df = ut.create_lag_features(df, \"RESD\", lags=res_lags, wins=res_win)\n # df = ut.create_lag_features(df, \"RESM\", lags=res_lags, wins=res_win)\n # df = ut.create_lag_features(df, \"GR\", lags=gr_lags, wins=gr_win)\n # df = ut.create_lag_features(df, \"NPHI\", lags=nphi_lags, wins=nphi_win)\n # df = ut.create_lag_features(df, \"RHOB\", lags=rhob_lags, wins=rhob_win)\n # df = ut.create_lag_features(df, \"DTCO\", lags=dtco_lags, wins=dtco_win)\n \n # df = ut.normalize_cols(df)\n \n df_train, df_test = train_test_split(df, test_size = 0.2, random_state=11)\n df_test = ut.create_lag_features(df_test, \"RESD\", lags=res_lags, wins=res_win)\n df_test = ut.create_lag_features(df_test, \"RESM\", lags=res_lags, wins=res_win)\n df_test = ut.create_lag_features(df_test, \"GR\", lags=gr_lags, wins=gr_win)\n df_test = ut.create_lag_features(df_test, \"NPHI\", lags=nphi_lags, wins=nphi_win)\n df_test = ut.create_lag_features(df_test, \"RHOB\", lags=rhob_lags, wins=rhob_win)\n df_test = ut.create_lag_features(df_test, \"DTCO\", lags=dtco_lags, wins=dtco_win)\n \n df_train, df_val = train_test_split(df_train, test_size = 0.1, random_state=11)\n \n df_val = ut.create_lag_features(df_val, \"RESD\", lags=res_lags, wins=res_win)\n df_val = ut.create_lag_features(df_val, \"RESM\", lags=res_lags, wins=res_win)\n df_val = ut.create_lag_features(df_val, \"GR\", lags=gr_lags, wins=gr_win)\n df_val = ut.create_lag_features(df_val, \"NPHI\", lags=nphi_lags, wins=nphi_win)\n df_val = ut.create_lag_features(df_val, \"RHOB\", lags=rhob_lags, wins=rhob_win)\n df_val = ut.create_lag_features(df_val, \"DTCO\", lags=dtco_lags, wins=dtco_win)\n \n df_train = ut.outlier_detection(df_train)\n df_train = ut.create_lag_features(df_train, \"RESD\", lags=res_lags, wins=res_win)\n df_train = ut.create_lag_features(df_train, \"RESM\", lags=res_lags, wins=res_win)\n df_train = ut.create_lag_features(df_train, \"GR\", lags=gr_lags, wins=gr_win)\n df_train = ut.create_lag_features(df_train, \"NPHI\", lags=nphi_lags, wins=nphi_win)\n df_train = ut.create_lag_features(df_train, \"RHOB\", lags=rhob_lags, wins=rhob_win)\n df_train = ut.create_lag_features(df_train, \"DTCO\", lags=dtco_lags, wins=dtco_win)\n # df = df.dropna(axis=0, how=\"any\")\n\n print(f\"Appending {file} to main df\")\n # train_df = train_df.append(df)\n train_df = train_df.append(df_train)\n test_df = test_df.append(df_test)\n val_df = val_df.append(df_val)\n print(train_df.shape)\n print(test_df.shape)\n print(val_df.shape)\n \n# sns.pairplot(train_df, vars=vars_to_use, diag_kind='kde',\n# plot_kws = {'alpha': 0.6, 's': 30, 'edgecolor': 'k'})\n\ntrain_df.to_pickle('train_df.pkl')\ntest_df.to_pickle('test_df.pkl')\nval_df.to_pickle('val_df.pkl')\n\nval_df = pd.read_pickle(r'val_df.pkl')\ntest_df = pd.read_pickle(r'test_df.pkl')\n\ntrain_df_norm_x, scalar_x = ut.apply_minmaxscaler(train_df.drop([response_var], \n axis=1), \n train_df.drop([response_var], \n axis=1).columns)\ntrain_df_norm_y, scalar_y = ut.apply_minmaxscaler(train_df[[response_var]], \n [response_var])\njoblib.dump(scalar_x, 'scaler_x.pkl') \njoblib.dump(scalar_y, 'scaler_y.pkl') \n\ntrain_df_norm = pd.concat([train_df_norm_x, train_df_norm_y], axis=1)\n# train_df_norm, scaler = ut.powertransform_cols(train_df)\n# train_df_norm.to_pickle(df_pickle_fn_pt)\n# train_df_norm = train_df\ntrain_df_norm.to_pickle(df_pickle_fn_norm)\n\ntrain_x = train_df_norm_x\ntrain_y = train_df_norm_y\n\n# test_df_norm = pd.concat([test_df_norm_x, test_df_norm_y], axis=1)\n\nsvm = OneClassSVM(nu=0.1)\nyhat = svm.fit_predict(pd.DataFrame(val_df[response_var]))\nmask = yhat != -1\nval_df = val_df[mask]\n\nsvm = OneClassSVM(nu=0.1)\nyhat = svm.fit_predict(pd.DataFrame(test_df[response_var]))\nmask = yhat != -1\nval_df = test_df[mask]\n\ntest_x = ut.normalize_test(test_df.drop([response_var], axis=1), scalar_x)\ntest_y = ut.normalize_test(test_df[[response_var]], scalar_y)\n\nval_x = ut.normalize_test(val_df.drop([response_var], axis=1), scalar_x)\nval_y = ut.normalize_test(val_df[[response_var]], scalar_y)\n# val_df_norm = pd.concat([val_df_norm_x, val_df_norm_y], axis=1)\n\n# Basic Light GBM\nlgb_params = {\n # 'nfold': 5,\n \"boosting_type\": \"gbdt\",\n \"metric\": \"rmse\",\n \"objective\": \"regression\",\n # 'objective': 'tweedie',\n # 'tweedie_variance_power': 1.1,\n \"force_row_wise\": True,\n \"n_jobs\": -1,\n \"seed\": 11,\n \"learning_rate\": 0.075,\n \"feature_fraction\": 0.5,\n # \"sub_feature\" : 0.8,\n # \"sub_row\" : 0.75,\n \"lambda_l2\": 0.1,\n \"bagging_fraction\": 0.75,\n \"bagging_freq\": 1,\n # \"colsample_bytree\": 0.75,\n \"num_leaves\": sp_randint(6, 50),#2 ** 7 - 1,\n \"min_child_samples\": sp_randint(100, 500),\n \"min_data_in_leaf\": 30,#2 ** 8 - 1,\n \"verbosity\": 1,\n \"num_boost_round\": 600,\n # 'num_iterations' : 1200,\n \"n_estimators\": 4000,\n # 'device': 'gpu',\n # 'gpu_platform_id': 2,\n # 'gpu_device_id': 1\n}\n\n#Train and test data division\n# x = np.asarray(train_df_norm.loc[:, train_df_norm.columns != response_var])\n# y = np.asarray(train_df_norm[response_var])\n\n# train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, \n# random_state=11)\n\n# train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, \n# test_size=0.2, \n# random_state=11)\n\ntrain_data = lgb.Dataset(train_x, label=train_y)\nval_data = lgb.Dataset(val_x, label=val_y)\ntest_data = lgb.Dataset(test_x, label=test_y)\n\n# m_lgb = lgb.train(lgb_params, train_data)\nm_lgb = lgb.train(lgb_params, train_data, early_stopping_rounds=50,\n valid_sets=[train_data, val_data], verbose_eval=100)\n\n# m_lgb_cv = lgb.cv(lgb_params, train_data, nfold=3, stratified=False, \n# early_stopping_rounds=10)\njoblib.dump(m_lgb, model_fn)\n\n# pred = m_lgb_cv\npred = m_lgb.predict(test_x, n_jobs=-1)\n\ntest_y = ut.invTransform(scalar_y, test_y, \"DTSM\", train_df.columns)\npred = ut.invTransform(scalar_y, pred, \"DTSM\", train_df.columns)\n\n\nrmse = np.sqrt(MSE(test_y, pred))\nprint(rmse)\n\n\n#Hyper param optimization Lightgbm\ndef learning_rate_010_decay_power_099(current_iter):\n base_learning_rate = 0.1\n lr = base_learning_rate * np.power(.99, current_iter)\n return lr if lr > 1e-3 else 1e-3\n\ndef learning_rate_010_decay_power_0995(current_iter):\n base_learning_rate = 0.1\n lr = base_learning_rate * np.power(.995, current_iter)\n return lr if lr > 1e-3 else 1e-3\n\ndef learning_rate_005_decay_power_099(current_iter):\n base_learning_rate = 0.05\n lr = base_learning_rate * np.power(.99, current_iter)\n return lr if lr > 1e-3 else 1e-3\n\n\nfit_params={\"early_stopping_rounds\":30, \n \"eval_metric\" : 'rmse', \n \"eval_set\" : [(val_x, val_y)],\n # 'eval_names': ['valid'],\n #'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_010_decay_power_099)],\n 'verbose': 100}\n\nparam_test ={'num_leaves': sp_randint(6, 50), \n 'min_child_samples': sp_randint(100, 500), \n 'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],\n 'subsample': sp_uniform(loc=0.2, scale=0.8), \n 'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),\n 'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],\n 'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}\n\n# n_HP_points_to_test = 100\nclf = lgb.LGBMRegressor(max_depth=-1, random_state=11, silent=True, \n metric='rmse', n_jobs=-1, n_estimators=5000)\n\ngs = RandomizedSearchCV(\n estimator=clf, param_distributions=param_test, \n # n_iter=n_HP_points_to_test,\n scoring='neg_root_mean_squared_error',\n cv=5,\n refit=True,\n random_state=11,\n verbose=True)\n\ngs.fit(train_x.reset_index(drop=True), train_y.reset_index(drop=True), **fit_params)\nprint('Best score reached: {} with params: {} '.format(gs.best_score_, gs.best_params_))\n\n# opt_parameters = {'colsample_bytree': 0.9234, 'min_child_samples': 399, \n# 'min_child_weight': 0.1, 'num_leaves': 13, 'reg_alpha': 2, \n# 'reg_lambda': 5, 'subsample': 0.855}\n\nopt_parameters = gs.best_params_\n\nclf_sw = lgb.LGBMRegressor(**clf.get_params())\n#set optimal parameters\nclf_sw.set_params(**opt_parameters)\n\ngs_sample_weight = GridSearchCV(estimator=clf_sw, \n param_grid={'scale_pos_weight':[1,2,6,12]},\n scoring='neg_root_mean_squared_error',\n cv=5,\n refit=True,\n verbose=True)\n\ngs_sample_weight.fit(train_x.reset_index(drop=True), train_y.reset_index(drop=True), **fit_params)\n\nprint('Best score reached: {} with params: {} '.format(gs_sample_weight.best_score_, \n gs_sample_weight.best_params_))\n\n# print(\"Valid+-Std Train : Parameters\")\n# for i in np.argsort(gs_sample_weight.cv_results_['mean_test_score'])[-5:]:\n# print('{1:.3f}+-{3:.3f} {2:.3f} : {0}'.format(gs_sample_weight.cv_results_['params'][i], \n# gs_sample_weight.cv_results_['mean_test_score'][i], \n# gs_sample_weight.cv_results_['mean_train_score'][i],\n# gs_sample_weight.cv_results_['std_test_score'][i]))\n\nclf_final = lgb.LGBMRegressor(**clf.get_params())\n#set optimal parameters\nclf_final.set_params(**opt_parameters)\n\n#Train the final model with learning rate decay\nclf_final.fit(train_x, train_y, **fit_params, \n callbacks=[lgb.reset_parameter(learning_rate=\n learning_rate_010_decay_power_0995)])\n\n\nfeat_imp = pd.Series(clf_final.feature_importances_, index=train_x.columns)\nfeat_imp.nlargest(20).plot(kind='barh', figsize=(8,10))\n\n\npred = clf_final.predict(test_x, n_jobs=-1)\n\ntest_y_ = ut.invTransform(scalar_y, test_y, \"DTSM\", train_df.columns)\npred = ut.invTransform(scalar_y, pred, \"DTSM\", train_df.columns)\n\n\nrmse = np.sqrt(MSE(test_y_, pred))\nprint(rmse)\n\n\n\n\n#RNN\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.models import Model\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.optimizers import Adam\n\n\n# ARCHITECTURE_PARAMS = {\n# \"n1\": [64],\n# \"n2\": [32],\n# \"n3\": [1],\n# \"dropout\": [0.15, 0.25],\n# \"rec_dropout\": [0.5]\n# }\nLR = 0.00005\nACTIVATION_FUNC = \"relu\"\nOUTPUT_LAYER_ACT_FUNCT = \"linear\"\nERROR_METRIC = \"mse\"\nEPOCHS = 25\nBATCH_SIZE = 64\nTRAIN_RATIOS = 0.8\nMETRIC_CHOICE = [tf.keras.metrics.RootMeanSquaredError()]\nLOSS =\"mse\",\nOPTIMIZER = Adam(lr=LR)\n\n\ntrain_x_rnn = np.asarray(train_x.replace(np.nan, missing_value)).reshape(-1, 1, train_x.shape[1])\ntrain_y_rnn = np.asarray(train_x.replace(np.nan, missing_value))#.reshape(-1, 1, train_y.shape[1])\nval_x_rnn = np.asarray(val_x.replace(np.nan, missing_value)).reshape(-1, 1, val_x.shape[1])\nval_y_rnn = np.asarray(val_y.replace(np.nan, missing_value))#.reshape(-1, 1, val_y.shape[1])\ntest_x_rnn = np.asarray(test_x.replace(np.nan, missing_value)).reshape(-1, 1, val_x.shape[1])\ntest_y_rnn = test_y.replace(np.nan, missing_value)\n\ncallbacks = [EarlyStopping(monitor='val_loss', mode=min, patience=3), \n ModelCheckpoint('../rnn_model.h5', save_best_only=True, \n save_weights_only=False, monitor='val_loss', verbose=1)]\n\nmodel_pipeline_input = layers.Input(shape=(1, train_x.shape[1]), dtype='float32')\nmodel_pipeline_masking = layers.Masking(mask_value=missing_value)(model_pipeline_input)\n\n# model_pipeline = layers.Bidirectional(\n# layers.GRU(16, \n# dropout=0.15, \n# return_sequences=True, \n# recurrent_dropout=0.5))(model_pipeline_masking)\n\nmodel_pipeline = layers.LSTM(64,\n dropout=0.25, \n return_sequences=True, \n # recurrent_dropout=0.5, \n activation=ACTIVATION_FUNC)(model_pipeline_masking)\n\n# Compile model with loss and optimizer\nmodel_pipeline = layers.Dense(1, activation=OUTPUT_LAYER_ACT_FUNCT)(model_pipeline)\nmodel = Model(inputs=model_pipeline_input, outputs=model_pipeline)\n\nmodel.compile(loss=LOSS, \n optimizer=OPTIMIZER, \n metrics=METRIC_CHOICE)\nmodel.fit(train_x_rnn, train_y_rnn, \n validation_data=(val_x_rnn, val_y_rnn), \n batch_size=BATCH_SIZE, \n epochs=EPOCHS,\n callbacks=callbacks\n )\n\nmodel.evaluate(val_x_rnn, val_y_rnn)\n\npred_rnn = model.predict(test_x_rnn)\npred_rnn = ut.invTransform(scalar_y, pred_rnn.ravel().reshape(-1,1), \"DTSM\", train_df.columns)\n\ntest_y_ = ut.invTransform(scalar_y, test_y, \"DTSM\", train_df.columns)\n\nrmse = np.sqrt(MSE(test_y_, pred_rnn))\nprint(rmse)\n\npred_ens = []\nfor l, r in zip(pred, pred_rnn):\n if r <=0:\n pred_ens.append(l)\n else:\n pred_ens.append(r)\n \nrmse = np.sqrt(MSE(test_y_, pred_ens))\nprint(rmse)\n\nplt.scatter(x=test_y_, y=pred)\n\n#------------------------------------------------------------------------------\n#Bayesian optimization\nfrom bayes_opt import BayesianOptimization\n\ndef bayesion_opt_lgbm(dtrain, init_iter=3, n_iters=7, random_state=11, \n seed = 101):\n # dtrain = lgb.Dataset(data=X, label=y)\n\n # Objective Function\n # def hyp_lgbm(num_leaves, feature_fraction, bagging_fraction, max_depth, \n # min_split_gain, min_child_weight):\n def hyp_lgbm(params):\n params['application'] = 'regression'\n # params ={'application':'regression','num_iterations': num_iterations,\n # 'learning_rate':0.05, 'early_stopping_round':50,\n # 'metric':'rmse','num_leaves': sp_randint(6, 50), \n # 'min_child_samples': sp_randint(100, 500), \n # 'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],\n # 'subsample': sp_uniform(loc=0.2, scale=0.8), \n # 'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),\n # 'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],\n # 'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}\n # # params = {'application':'regression','num_iterations': num_iterations,\n # # 'learning_rate':0.05, 'early_stopping_round':50,\n # # 'metric':'rmse'} # Default parameters\n # params[\"num_leaves\"] = int(round(num_leaves))\n # params['feature_fraction'] = max(min(feature_fraction, 1), 0)\n # params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)\n # params['max_depth'] = int(round(max_depth))\n # params['min_split_gain'] = min_split_gain\n # params['min_child_weight'] = min_child_weight\n cv_results = lgb.cv(params, dtrain, nfold=5, seed=seed, stratified=False,\n verbose_eval =None)\n # print(cv_results)\n return np.min(cv_results['rmse-mean'])\n # Domain space-- Range of hyperparameters \n # pds = {'num_leaves': sp_randint(6, 50),\n # 'feature_fraction': (0.1, 0.9),\n # 'bagging_fraction': (0.8, 1),\n # 'max_depth': (17, 25),\n # 'min_split_gain': (0.001, 0.1),\n # 'min_child_weight': (10, 25)\n # }\n pds = {'num_iterations': 100,\n 'learning_rate': tuple(np.linspace(0.001, 0.05, 10)), \n 'early_stopping_round':10,\n 'metric':'rmse',\n 'num_leaves': (6, 50, 100), \n 'min_child_samples': (100, 300, 400, 500), \n 'min_child_weight': (1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4),\n # 'subsample': sp_uniform(loc=0.2, scale=0.8), \n # 'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),\n 'reg_alpha': (0, 1e-1, 1, 2, 5, 7, 10, 50, 100),\n 'reg_lambda': (0, 1e-1, 1, 5, 10, 20, 50, 100),\n 'feature_fraction': tuple(np.linspace(0.1, 0.9, num=6)),\n 'bagging_fraction': tuple(np.linspace(0.8, 1, num=5)),\n 'max_depth': tuple(np.linspace(10, 15, num=5)),\n 'n_estimators': tuple(np.linspace(2000, 5000, num=4))\n }\n\n # Surrogate model\n optimizer = BayesianOptimization(hyp_lgbm, pds, random_state=random_state)\n \n # Optimize\n optimizer.maximize(init_points=init_iter, n_iter=n_iters)\n \n return optimizer\n\noptimizer = bayesion_opt_lgbm(train_data, init_iter=5, n_iters=10, \n random_state=77, seed = 101)\n\n\n\n# mnemonics_df.to_excel(\"Mnemonics_wth_file_with_stats_DTSM_length.xlsx\")\n\n\n# def log_plot(logs):\n# logs = logs.sort_values(by=\"DEPT\")\n# top = logs[\"DEPT\"].min()\n# bot = logs[\"DEPT\"].max()\n\n# f, ax = plt.subplots(nrows=1, ncols=5, figsize=(12, 8))\n# ax[0].plot(logs.GRR, logs[\"DEPT\"], color=\"green\")\n# ax[1].plot(logs.TNPH, logs[\"DEPT\"], color=\"red\")\n# ax[2].plot(logs.HLLD, logs[\"DEPT\"], color=\"black\")\n# ax[3].plot(logs.RHOZ, logs[\"DEPT\"], color=\"c\")\n# ax[4].plot(logs.VPVS, logs[\"DEPT\"], color=\"m\")\n\n# for i in range(len(ax)):\n# ax[i].set_ylim(top, bot)\n# ax[i].invert_yaxis()\n# ax[i].grid()\n\n# ax[0].set_xlabel(\"GR\")\n# ax[0].set_xlim(logs.GRR.min(), logs.GRR.max())\n# ax[0].set_ylabel(\"Depth(ft)\")\n# ax[1].set_xlabel(\"POR\")\n# ax[1].set_xlim(logs.TNPH.min(), logs.TNPH.max())\n# ax[2].set_xlabel(\"HLLD\")\n# ax[2].set_xlim(logs.HLLD.min(), logs.HLLD.max())\n# ax[3].set_xlabel(\"RHOB\")\n# ax[3].set_xlim(logs.RHOZ.min(), logs.RHOZ.max())\n# ax[4].set_xlabel(\"DTSM\")\n# ax[4].set_xlim(logs.VPVS.min(), logs.VPVS.max())\n\n# ax[1].set_yticklabels([])\n# ax[2].set_yticklabels([])\n# ax[3].set_yticklabels([])\n# ax[4].set_yticklabels([])\n# # ax[5].set_yticklabels([]);\n# # ax[6].set_yticklabels([])\n\n# f.suptitle(\"Well: #\" + las_filename, fontsize=14, y=0.94)\n\n\n# log_plot(df)\n","sub_path":"train_data1.py","file_name":"train_data1.py","file_ext":"py","file_size_in_byte":23042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"286042284","text":"# encoding=utf-8\nimport os,re\nimport argparse as ap\nimport numpy as np\nimport traceback, pdb\nfrom tqdm import tqdm\n \n\nif __name__ == \"__main__\":\n # Argument Parser\n parser = ap.ArgumentParser()\n parser.add_argument(\"--input_dir\", help=\"Path to input directory\", required=True)\n args = parser.parse_args()\n Chinese = u'[\\u4e00-\\u9fa5]'\n num = 0\n for root, root_dir_names, root_file_names in os.walk(args.input_dir):\n print(root, root_dir_names, len(root_file_names))\n if len(root_file_names) > 0:\n for file_name in tqdm(root_file_names):\n if file_name.endswith('txt') or file_name.endswith('TXT'):\n content = open(os.path.join(root, file_name), 'r', encoding='utf-8').read()\n all_words = re.findall(Chinese, content)\n num += len(all_words)\n print(num)\n","sub_path":"calDirChineseWord.py","file_name":"calDirChineseWord.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"143967888","text":"#!/usr/bin/env python3\n\nimport sys, os\nfrom dotenv import find_dotenv, load_dotenv\nfrom lib.inverted_index import InvertedIndex\nfrom lib.encrypted_index import EncryptedIndex\nfrom timeit import default_timer as timer\nimport numpy as np \nload_dotenv(find_dotenv())\n\ndef main(index):\n print(\"Query Format: [Desired Keyword] [Number of Documents] [Document Flag (y/n)]\")\n print(\"Keyword Required. Default Document=10. Default Flag=n\")\n print(\"Query: \", end=\"\", flush=True)\n\n for line in sys.stdin: \n\n res = []\n line = line.rstrip()\n line_split = line.split()\n\n num_docs = 10\n doc_flag = 'n'\n\n\n if line == \"quit()\": # exit \n break\n\n if len(line_split) == 1: # arg parser\n pass\n elif len(line_split) == 3:\n if line_split[1].isdigit() and (line_split[2]=='y' or line_split[2]=='n'):\n num_docs = int(line_split[1])\n doc_flag = line_split[2]\n else:\n print (\"Invalid Command\")\n print(\"Query: \", end=\"\", flush=True)\n continue\n else:\n print (\"Invalid Command\")\n print(\"Query: \", end=\"\", flush=True)\n continue\n\n start = timer()\n\n # for document in index.query(line[:-1]).most_common(num_docs):\n # doc_word_count = (len(index.document(document[0]).split()))\n # word_freq_count = document[1]\n # print(document)\n\n # buf = ((1000*(1+np.log(word_freq_count))/doc_word_count), document[0])\n # res.append(buf)\n\n for document in index.my_query(line_split[0], num_docs):\n doc_word_count = (len(index.document(document[0]).split()))\n word_freq_count = (index.document(document[0]).count(line_split[0]))\n\n buf = ((1000*(1+np.log(word_freq_count))/doc_word_count), document[0])\n res.append(buf)\n\n res.sort()\n end = timer()\n\n print(\"Query Time: \" + str(end - start) + \"ms\")\n\n # for rank, curDoc in enumerate(res[::-1]):\n # print (str(rank+1) + \". Doc ID: \" + str(curDoc[1]) + \" Score: \" + str(curDoc[0]))\n # if doc_flag=='y':\n # print (index.document(document[0]))\n\n for rank, curDoc in enumerate(res[::-1]):\n print (str(rank+1) + \". Doc ID: \" + str(index.decrypt(curDoc[1])) + \" Score: \" + str(curDoc[0]))\n if doc_flag=='y':\n print (index.document(document[0]))\n\n print(\"Query: \", end=\"\", flush=True)\n\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 2:\n print(\"USAGE: ./main.py path_to_trec_dataset\")\n exit(0)\n\n trec_file_path = sys.argv[1]\n secret_key = os.environ.get(\"SECRET_KEY\").encode('utf-8')\n\n # index = EncryptedIndex(secret_key)\n index = EncryptedIndex(secret_key, 'encrypted_index')\n # index = InvertedIndex()\n # index = InvertedIndex('inverted_index')\n\n # index.index_TREC(trec_file_path)\n # index.save_index()\n main(index)\n\n sys.exit(0)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"151751616","text":"from ctypes import *\n\nlib = CDLL(\"python/mandelbrot.so\", RTLD_GLOBAL)\n\nmandelbrot = lib.mandelbrot\nmandelbrot.argtypes = [c_int, c_int, c_double, c_double, c_double, c_double, c_char_p]\n\nif __name__ == \"__main__\":\n mandelbrot(400,400,0,0,4,4, c_char_p(b\"out.ppm\"))\n\n\n\n\n\n# https://docs.python.org/3/library/ctypes.html\n# https://github.com/pjreddie/darknet/blob/master/python/darknet.py\n\n# def c_array(ctype, values):\n# \"\"\"d = c_array(c_float, [0.0]*256)\"\"\"\n \n# arr = (ctype*len(values))()\n# arr[:] = values\n# return arr\n\n","sub_path":"python/mandelbrot.py","file_name":"mandelbrot.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"183909265","text":"import subprocess\nimport re\nimport datetime\n\ndef gitinfo():\n git_ret=subprocess.Popen(['git','log','--pretty=%H','HEAD^..HEAD'],\n stdout=subprocess.PIPE)\n git_hash = git_ret.communicate()[0]\n if git_hash:\n git_hash=git_hash.strip().decode()\n url_ret=subprocess.Popen(['git','remote','show','origin'],\n stdout=subprocess.PIPE)\n remote=url_ret.communicate()[0].decode()\n match=re.search('URL:\\s*(\\S+)\\n',remote)\n if match:\n git_url=match.group(1)\n scmversion='{0}:{1}'.format(git_url, git_hash)\n else:\n scmversion=git_hash\n return scmversion\n else:\n return None\n\ndef makefile():\n return open(\"Makefile\").read()\n\n\ndef when():\n return datetime.datetime.now().isoformat()\n\nif __name__=='__main__':\n attrib=dict()\n attrib[\"VERSION\"]=gitinfo()\n attrib[\"CFG\"]=makefile()\n attrib[\"COMPILETIME\"]=when()\n\n f=open(\"hsb_version.hpp\", \"w\")\n for k, v in attrib.items():\n f.write(\"constexpr char {0}[]=R\\\"({1})\\\";\\n\\n\".format(k, v))\n f.close()\n\n","sub_path":"src/getgit.py","file_name":"getgit.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"193018092","text":"def key(x):\n alpha = 'abcdefghijklmnopqrstuvwxyz'\n num = '0123456789'\n letters = list(alpha)\n indices = list(range (-27, -1 ) )\n d = dict(zip(letters, indices)) \n\n repres = [] #тут репрезентация строки в виде цифр\n found_num = '' #чтоб запоминать число\n for i in x:\n if i in alpha:\n if found_num != '':\n repres.append(int(found_num))\n found_num = '' #если число в начале или в середине, то мы идем пока не встретим букву и тогда аппендим число и удаляем его из found_num\n repres.append(d[i])\n elif i in num:\n found_num+=i\n if found_num != '': #если число в конце (потому что аппенд был только если мы встретили букву дальше)\n repres.append(int(found_num))\n return repres\n\n\n","sub_path":"students/Zelenkova_Lera/03/09_natsort.py","file_name":"09_natsort.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"242826704","text":"from __future__ import division, print_function, absolute_import\r\nimport os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# read the input data\r\n\r\n\r\ndef load_data(data_directory):\r\n directories = [d for d in os.listdir(data_directory)\r\n if os.path.isdir(os.path.join(data_directory, d))]\r\n labels = []\r\n image_features = []\r\n label = 0\r\n for d in directories:\r\n label_directory = os.path.join(data_directory, d)\r\n file_names = [os.path.join(label_directory, f)\r\n for f in os.listdir(label_directory) if f.endswith(\".txt\")]\r\n\r\n for f in file_names:\r\n feature = np.loadtxt(f, delimiter=' ')\r\n image_features.append(feature.flatten())\r\n labels.append(label)\r\n label += 1\r\n return image_features, labels\r\n\r\n\r\nROOT_PATH = './SingleLabelImageFeatures/Features/'\r\n\r\nimage_features, labels = load_data(ROOT_PATH)\r\nimage_features = np.array(image_features)\r\nlabels = np.array(labels)\r\n# plt.hist(labels, 10)\r\n# plt.show()\r\nprint(image_features.shape)\r\nprint(labels.shape)\r\nn = image_features.shape[0]\r\n# Splitting the data and Train, Val and Test\r\ninitial_split = .3\r\nX_train, X_remain, y_train, y_remain = train_test_split(image_features, labels, test_size=initial_split, random_state=2)\r\nnew_split = 10.0/(initial_split*100.)\r\nX_val, X_test, y_val, y_test = train_test_split(X_remain, y_remain, test_size=(1-new_split), random_state=2)\r\n\r\n# Making mini batches for training\r\nbatch_size = 32\r\nn_train = X_train.shape[0]\r\n\r\n\r\ndef ceil(a, b):\r\n return -(-a//b)\r\n\r\n\r\ndef next_batch(batch_num):\r\n batch = X_train[batch_num*batch_size: (batch_num+1)*batch_size, :]\r\n return batch\r\n\r\n\r\n# Training Parameters\r\neta = 0.01\r\nnum_epochs = 400\r\ndisplay_step = 100\r\n\r\n# Network Parameters\r\nnum_hidden_1 = 512 # nodes in 1st Hidden Layer\r\nnum_hidden_2 = 256 # nodes in 2nd Hidden Layer\r\nnum_hidden_3 = 200 # nodes in 2nd Hidden Layer\r\nnum_linear = 120 # nodes in linear layer (reduced Representation)\r\nnum_input = X_train.shape[1]\r\n\r\n# placeholder for input data\r\nX = tf.placeholder(\"float\", [None, num_input])\r\n\r\nweights = {\r\n 'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1], seed=1)*.01),\r\n 'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2], seed=2)*.01),\r\n 'encoder_h3': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3], seed=3)*.01),\r\n 'encoder_lin_w': tf.Variable(tf.random_normal([num_hidden_3, num_linear], seed=4)*.01),\r\n 'decoder_h1': tf.Variable(tf.random_normal([num_linear, num_hidden_3], seed=5)*.01),\r\n 'decoder_h2': tf.Variable(tf.random_normal([num_hidden_3, num_hidden_2], seed=6)*.01),\r\n 'decoder_h3': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1], seed=7)*.01),\r\n 'decoder_o': tf.Variable(tf.random_normal([num_hidden_1, num_input], seed=8)*.01),\r\n\r\n}\r\nbiases = {\r\n 'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1], seed=9)*.01),\r\n 'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2], seed=10)*.01),\r\n 'encoder_b3': tf.Variable(tf.random_normal([num_hidden_3], seed=11)*.01),\r\n 'linear_b': tf.Variable(tf.random_normal([num_linear], seed=16)*.01),\r\n 'decoder_b1': tf.Variable(tf.random_normal([num_hidden_3], seed=12)*.01),\r\n 'decoder_b2': tf.Variable(tf.random_normal([num_hidden_2], seed=13)*.01),\r\n 'decoder_b3': tf.Variable(tf.random_normal([num_hidden_1], seed=14)*.01),\r\n 'decoder_o': tf.Variable(tf.random_normal([num_input], seed=15)*.01)\r\n\r\n}\r\n\r\n\r\n# Encoder\r\ndef encoder(x):\r\n # use Sigmoid Activation for Hidden layer 1 and Hidden Layer 2 of the Encoder\r\n layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))\r\n layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))\r\n layer_3 = tf.nn.softplus(tf.add(tf.matmul(layer_2, weights['encoder_h3']), biases['encoder_b3']))\r\n linear_layer = tf.add(tf.matmul(layer_3, weights['encoder_lin_w']), biases['linear_b'])\r\n\r\n return linear_layer\r\n\r\n\r\n# Decoder\r\ndef decoder(x):\r\n # use Sigmoid Activation for Hidden layer 1 and Hidden Layer 2 of the Decoder\r\n layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))\r\n layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))\r\n layer_3 = tf.nn.softplus(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3']))\r\n output = tf.add(tf.matmul(layer_3, weights['decoder_o']), biases['decoder_o'])\r\n\r\n return output\r\n\r\n\r\n# construct model\r\nencoder_op = encoder(X)\r\ndecoder_op = decoder(encoder_op)\r\n\r\n# Predicted output\r\ny_pred = decoder_op\r\n# Target output\r\ny_true = X\r\n\r\n# set loss and minimizer\r\nloss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\r\noptimizer = tf.train.RMSPropOptimizer(eta).minimize(loss)\r\n\r\n# Initialize the variables\r\ninit = tf.global_variables_initializer()\r\n\r\n# Start Training\r\nsess = tf.Session()\r\nsess.run(init)\r\n\r\nnum_batches_train = ceil(n_train, batch_size)\r\nfor i in range(1, num_epochs+1):\r\n print(\"EPOCH: \", i)\r\n for j in range(0, num_batches_train):\r\n batch_x = next_batch(j % num_batches_train)\r\n _, l = sess.run([optimizer, loss], feed_dict={X: batch_x})\r\n\r\n if j%display_step == 0 or j == 1:\r\n print('EPOCH%i Step%i: Minibatch Loss: %f' % (i, j, l))\r\n\r\n\r\n# Evaluate the error on Train data\r\nval_recons = sess.run(decoder_op, feed_dict={X: X_train})\r\ny_true = X_train\r\ny_pred = val_recons\r\nloss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\r\nprint(sess.run(loss))\r\n\r\n\r\n# Evaluate the error on validation data\r\nval_recons = sess.run(decoder_op, feed_dict={X: X_val})\r\ny_true = X_val\r\ny_pred = val_recons\r\nloss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\r\nprint(sess.run(loss))\r\n\r\n# Evaluate the error on Test data\r\nval_recons = sess.run(decoder_op, feed_dict={X: X_test})\r\ny_true = X_test\r\ny_pred = val_recons\r\nloss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\r\nprint(sess.run(loss))\r\n\r\n# write reduced dimension data in a file X_redhl.txt\r\nX_red = sess.run(decoder_op, feed_dict={X: image_features})\r\n\r\nwith open('X_redhl.txt', 'w') as f:\r\n for i in range(n):\r\n for j in range(num_linear):\r\n f.write(\"%s \" % X_red[i][j])\r\n f.write(\"%s\\n\" % labels[i])","sub_path":"Assign-2_AutoEncoders_and_RBM/PCA_AE/autoencoder_3hidden.py","file_name":"autoencoder_3hidden.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"298430470","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/dario/Projects/pysplice/pysplice/__init__.py\n# Compiled at: 2015-09-18 18:10:57\n# Size of source mod 2**32: 747 bytes\nfrom pysplice._splice import ffi, lib\nimport os, errno\nfrom collections import namedtuple\nPipe = namedtuple('Pipe', 'fileno')\n\ndef mkpipe():\n readfd, writefd = os.pipe()\n return (Pipe(lambda : readfd), Pipe(lambda : writefd))\n\n\ndef splice(infile, off_in, outfile, off_out, size, flags=0):\n off_in = ffi.NULL if off_in is None else ffi.new('loff_t *', off_in)\n off_out = ffi.NULL if off_out is None else ffi.new('loff_t *', off_out)\n while 1:\n res = lib.splice(infile.fileno(), off_in, outfile.fileno(), off_out, size, flags)\n if res != -1:\n return res\n if ffi.errno != errno.EINTR:\n raise OSError(ffi.errno, os.strerror(ffi.errno))","sub_path":"pycfiles/pysplice-0.1.0.tar/__init__.cpython-35.py","file_name":"__init__.cpython-35.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"344937978","text":"from django.shortcuts import render, redirect\nfrom django.db.models import Count\nfrom .models import *\nfrom django.contrib import messages\n# Create your views here.\n# CURRENT USER\ndef current_user(request):\n if 'user_id' in request.session:\n return User.objects.get(id=request.session['user_id'])\n# INDEX ROUTE\ndef index(request):\n if current_user(request):\n return redirect('/home')\n return render(request, 'light/index.html')\n# END INDEX\n# PROCESS\ndef process(request):\n is_valid = User.objects.register_validate(request.POST)\n if is_valid['errors']:\n for error in is_valid['errors']:\n messages.error(request, error)\n return redirect('/#light')\n else:\n user = User.objects.create_user(request.POST)\n request.session['user_id'] = user.id\n return redirect('/home')\n# LOGIN\ndef login(request):\n is_valid = User.objects.login_validate(request.POST)\n if is_valid['status'] == True:\n request.session['user_id'] = is_valid['user'].id\n return redirect('/home')\n else:\n if is_valid['status'] == False:\n messages.error(request, is_valid['message'])\n return redirect('/home')\n# LOGOUT\ndef logout(request ):\n request.session.clear()\n return redirect('/')\n# HOME\ndef home(request):\n if 'user_id' in request.session:\n user = current_user(request)\n popular_reviews = Review.objects.annotate(num_likes=Count('liked_by')).order_by('-num_likes')[:3]\n context = {\n 'current_user': user,\n 'reviews': Review.objects.all().order_by('-created_at'),\n 'popular_reviews': popular_reviews,\n 'friends': user.friends.all(),\n 'friends_ids': user.friends.all().values_list('id', flat=True)\n }\n\n return render(request, 'light/home.html', context)\n return redirect('/')\n# END HOME\n# CREATE REVIEWS\ndef create_review(request):\n user = current_user(request)\n review = Review.objects.create_review(request.POST, user)\n return redirect('/home')\n# LIKE REVIEWS\ndef like(request, id):\n user = current_user(request)\n review = Review.objects.get(id=id)\n review.liked_by.add(user.id)\n return redirect('/home')\ndef unlike(request, id):\n user = current_user(request)\n review = Review.objects.get(id=id)\n review.liked_by.remove(user.id)\n return redirect('/home')\n# ACCOUNT\ndef account(request, id):\n user = current_user(request)\n reviews = Review.objects.filter(id=user.id)\n context = {\n 'current_user': user,\n 'reviews': reviews,\n }\n return render(request, 'light/account.html', context)\n# DELETE ACCOUNT\ndef delete(request, id):\n user = current_user(request)\n user.delete()\n return redirect('/')\n# ADD FRIEND\ndef add(request, id):\n user = current_user(request)\n user.friends.add(id)\n return redirect('/home')\ndef unfriend(request, id):\n user = current_user(request)\n user.friends.remove(id)\n return redirect('/home')\n","sub_path":"light/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"268577516","text":"import pickle\nimport time\nimport tensorflow as tf\nfrom alexnet import AlexNet\nimport csv\n\nnb_classes = 43\nepochs = 1\nbatch_size = 128\nlabels = []\n\n\ndef createDictCSV(fileName=\"\", dataDict={}):\n with open(fileName, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile)\n for i,j in dataDict.items():\n if i == 'labels':\n labels = j\n for index, value in enumerate(labels):\n if value:\n labels[index] = 1\n j = labels\n csvWriter.writerow([i,j])\n csvFile.close()\n\n\n\nwith open('./train.p', 'rb') as f:\n data = pickle.load(f)\n createDictCSV(\"data_csv.csv\", data)\n print(data['labels'])","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"554590287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\n:mod:`filmstriben_recommender` -- recommender service\n\n===================\nRecommender Service\n===================\n\nRecommender service.\n\"\"\"\nfrom datetime import datetime\nimport json\n# from jsonschema.exceptions import ValidationError\nimport logging\nimport os\nimport socket\nimport tornado.web as tw\nimport tornado.ioloop as ti\nfrom pyutils import JSONFormatter\nfrom pyutils import build_info\nfrom pyutils import Statistics\nfrom pyutils import StatusHandler\nfrom pyutils import BaseHandler\nfrom pyutils import StaticHandler\nfrom pyutils import create_post_examples_from_dir\nfrom recommender_common import RecommendHandler\nfrom pkg_resources import resource_filename\n\nfrom mobus import PostgresReader\nfrom filmstriben_recommender.recommender import Recommender\n\nlogger = logging.getLogger(__name__)\n\nSTATS = {'filmstriben': Statistics(name='filmstriben-recommender')}\n\nSTATIC = '''\n{\n \"responseHeader\": {\n \"build\": \"not available\",\n \"git\": \"not available\",\n \"version\": \"devel\",\n \"ab-id\": 1,\n \"recommender\": \"filmstriben\",\n \"numReturned\": 2,\n \"timings\": {\n \"timings\": {\n \"pid2work\": 23.804,\n \"fetch\": 0.006,\n \"from-analysis\": 29.887,\n \"work2meta\": 17.163,\n \"ignore\": 0.020999999999999998,\n \"booster\": 0.005,\n \"filter\": 22.878,\n \"post-filter\": 0.005,\n \"augment\": 0.022\n },\n \"total\": 1\n },\n \"time\": 1\n },\n \"response\": [\n {\n \"debug-work\": \"work:843550\",\n \"val\": 0.12085382706823944,\n \"type\": \"filmstriben\",\n \"from\": [\n \"870970-basis:52830575\"\n ],\n \"pid\": \"870970-basis:25890663\",\n \"loancount\": 65,\n \"debug-creator\": \"Lena Hanno Clyne\",\n \"debug-title\": \"Falla vackert\"\n },\n {\n \"debug-work\": \"work:4446785\",\n \"val\": 0.11221693800542396,\n \"type\": \"filmstriben\",\n \"from\": [\n \"870970-basis:52830575\"\n ],\n \"pid\": \"870970-basis:26025737\",\n \"loancount\": 193,\n \"debug-creator\": \"Dan Harris\",\n \"debug-title\": \"Imaginary heroes\"\n }\n ]\n}\n'''.strip()\n\n\nclass HelpHandler(BaseHandler):\n \"\"\" Help Handler \"\"\"\n def initialize(self, root_name):\n self.path = resource_filename('filmstriben_recommender', 'data/html/help.html')\n example_path = resource_filename('filmstriben_recommender', 'data/examples/')\n self.examples = create_post_examples_from_dir('/%s' % (root_name), example_path, suffix='.json', title=\"Examples\")\n\n def get(self):\n with open(self.path) as fh:\n content = fh.read()\n content = content.replace('@EXAMPLE@', self.examples)\n self.write(content)\n\n\ndef make_app(root, recommenders, ab_id):\n info = build_info.get_info('filmstriben_recommender')\n handlers = [(r\"/%s\" % (root), RecommendHandler, dict(recommender=r,\n specification=r.specification,\n ab_id=ab_id,\n info=info,\n stat_collector=STATS[r.name])) for r in recommenders]\n handlers += [(r\"/%s/help\" % root, HelpHandler, dict(root_name='filmstriben'))]\n handlers.append((r\"/%s/status\" % root, StatusHandler, dict(ab_id=1, info=info, statistics=STATS.values())))\n handlers.append((r\"/%s/static\" % root, StaticHandler, dict(content=STATIC)))\n return tw.Application(handlers)\n\n\ndef main(port, ab_id):\n start = datetime.now()\n root = 'filmstriben'\n db_urls = get_db_urls({'lowell': 'LOWELL_URL', 'recmod': 'RECMOD_URL'})\n recommenders = [Recommender(db_urls['lowell'], PostgresReader(db_urls['recmod'], 'filmstriben_cosim'))]\n app = make_app(root, recommenders, ab_id)\n logger.info(\"Startup took [%s]\", datetime.now() - start)\n logger.info(\"service up at 'http://%s:%s/%s'\" % (socket.gethostname(), port, root))\n app.listen(port)\n ti.IOLoop.current().start()\n\n\ndef get_db_urls(key_value_dict):\n vals = {}\n v = None\n try:\n for key, v in key_value_dict.items():\n vals[key] = os.environ[v]\n except KeyError:\n raise RuntimeError('Environment variable %s must be set' % v)\n return vals\n\n\ndef setup_logger(json_formatter=None, level=logging.DEBUG, logfile_name=None):\n global logger\n if not json_formatter:\n json_formatter = JSONFormatter()\n\n logger = logging.getLogger('')\n ch = logging.StreamHandler()\n\n if logfile_name:\n ch.setFormatter(logging.Formatter('%(message)s'))\n fh = logging.FileHandler(logfile_name)\n fh.setFormatter(json_formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n else:\n ch.setFormatter(json_formatter)\n\n ch.setLevel(level)\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n\ndef cli():\n \"\"\" Commandline interface \"\"\"\n import argparse\n\n port = 5000\n\n parser = argparse.ArgumentParser(description='recommender service')\n parser.add_argument('-a', '--ab-id', dest='ab_id',\n help=\"ab id of service. default is 1\", default=1)\n parser.add_argument('-l', '--logfile', dest='logfile',\n help='Name of logfile (otherwise logs json to stdout)', default=None)\n parser.add_argument('-p', '--port', dest='port',\n help='port to expose service on. Default is %d' % port, default=port)\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',\n help='verbose output')\n args = parser.parse_args()\n\n structured_formatter = JSONFormatter(tags={'type': 'service', 'port': args.port})\n level = logging.INFO\n if args.verbose:\n level = logging.DEBUG\n setup_logger(structured_formatter, level, logfile_name=args.logfile)\n\n main(args.port, args.ab_id)\n","sub_path":"src/filmstriben_recommender/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"394993592","text":"# a\na,b,c = map(int, input().split())\nans = c - (a - b)\n\nif ans < 0:\n print (0)\nelse:\n print (ans)\n# b\nn = int(input())\nans = 0\n\nfor i in range(1,n+1):\n if len(str(i))%2 != 0:\n ans += 1\nprint(ans)\n# c\ndef sub(q,data):\n if q <= 0 or data[q]-data[q-1] >=1:\n return (1)\n if data[q]-data[q-1] == 0:\n sun(q-1,data)\n if data[q]-data[q-1] <= -1:\n return (0)\n\ndef check_inc(list):\n for i,v in enumerate(list[:-1]):\n if list[i] - list[i+1] >= 1:\n if list[i] - list[i+1] >= 2:\n return (\"No\")\n if i >= 1:\n if list[i-1] - list[i] >= 1:\n return (\"No\")\n if sub(i,list) == 0:\n return (\"No\")\n return (\"Yes\")\n\nn = int(input())\na = list (map(int, input().split()))\nr = [i*2 for i in a] \nprint(check_inc(a))\n","sub_path":"j-ABC136.py","file_name":"j-ABC136.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"233637847","text":"\"\"\"\r\nThis code includes an interesting bug, can you find it? ;)\r\nAuthor: Omer Rosenbaum\r\n\"\"\"\r\n\r\n# Looney Tune structure:\r\n# [name, friends_list, int]\r\n# name - str, friend s_list - list, age - int\r\nNAME = 0\r\nFRIENDS_LIST = 1\r\nAGE = 2\r\n\r\n\r\ndef show_looney_friends(looney_tune):\r\n \"\"\"this function shows the friends of a given looney tune\"\"\"\r\n if len(looney_tune[FRIENDS_LIST]) == 0:\r\n print(\"%s has no friends :(\" % (looney_tune[NAME],))\r\n\r\n else:\r\n print(\"This are %s's friends:\" % (looney_tune[NAME],))\r\n for friend in looney_tune[FRIENDS_LIST]:\r\n print(\"%s is %d years old\" % (friend[NAME], friend[AGE]))\r\n\r\n # Added for output readability\r\n print('==========')\r\n\r\n\r\ndef add_new_friend(looney_tune, new_friend):\r\n \"\"\"this functon adds new_friend to the looney_tune's list of friends\"\"\"\r\n looney_tune[FRIENDS_LIST].append(new_friend)\r\n\r\n\r\ndef create_looney_tune(name=\"cool guy!\", friends_list=[], age=0):\r\n \"\"\"Creates a new looney tune and returns it.\"\"\"\r\n return [name, friends_list, age]\r\n\r\n\r\ndef show_friends(looneys_list):\r\n \"\"\"shows friends for all looney tunes in lonneys_list\"\"\"\r\n for looney_tune in looneys_list:\r\n show_looney_friends(looney_tune)\r\n\r\n\r\ndef main():\r\n \"\"\"Main function used to test the code\"\"\"\r\n bugs = create_looney_tune(\"Bugs Bunny\", age=2)\r\n daffy = create_looney_tune(\"Daffy Duck\")\r\n melissa = create_looney_tune(\"Melissa Duck\", [daffy], 4)\r\n yosemite = create_looney_tune(\"Yosemite Sam\", [bugs, melissa], 302)\r\n\r\n all_looney_tunes = (bugs, daffy, melissa, yosemite)\r\n show_friends(all_looney_tunes)\r\n\r\n print('+++ adding friends +++')\r\n\r\n add_new_friend(daffy, bugs)\r\n add_new_friend(yosemite, daffy)\r\n\r\n show_friends(all_looney_tunes)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"dictionaries_and_files/ex4-buns_buggy_xA4QNaW/buns buggy.py","file_name":"buns buggy.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"498273519","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport tqdm\nimport torch\nimport shutil\nimport logging\nimport argparse\nimport numpy as np\nimport pickle as pkl\nimport seaborn as sns\nimport os.path as osp\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport matplotlib.pyplot as plt\nimport torch.backends.cudnn as cudnn\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom torchvision import models\n\nfrom dataset_utils import *\nfrom lottery_masks import LotteryMask\n\n\ndef parse_args():\n\n file_purpose = '''\n train a network for lottery tickets\n '''\n\n parser = argparse.ArgumentParser(description=file_purpose,\n epilog=file_purpose, \n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n model_choices = ['FC', 'Conv']\n act_choices = ['ReLU', 'Identity', 'Tanh', 'Sigmoid']\n\n default_lr = 1e-3\n default_l2 = 0.0\n default_num_epochs = 100\n default_batch_size = 64\n default_n_hidden = 5\n default_hidden_dim = 100\n default_width = 5\n default_act = 'ReLU'\n default_workers = 2\n default_dataset_root = osp.join(osp.dirname(os.getcwd()) ,'datasets')\n default_seed = 0\n default_momentum = 0.9\n default_start = 100.0\n default_end = 1.0\n default_steps = 10\n default_step_gamma = 0.1\n default_milestones = [50, 75]\n\n\n parser.add_argument('-lr', type=float, default=default_lr, help='learning rate')\n parser.add_argument('-l2', type=float, default=default_l2, help='l2 penalty')\n parser.add_argument('-n', '--num_epochs', type=int, default=default_num_epochs, help='number of training epochs')\n parser.add_argument('-d', '--dataset', type=str, choices=dataset_choices, required=True, help='dataset')\n parser.add_argument('-b', '--batch_size', type=int, default=default_batch_size, help='batch size for training')\n parser.add_argument('-j', '--workers', type=int, default=default_workers, help='number of wrokers for dataloader')\n parser.add_argument('-m', '--model', type=str, choices=model_choices, required=True, help='model')\n parser.add_argument('-n_hidden', type=int, default=default_n_hidden, help='number of hidden layers (different semantics for FC and Conv')\n parser.add_argument('-hidden_dim', type=int, default=default_hidden_dim, help='dimension of linear layers')\n parser.add_argument('-bn', action='store_true', help='use batch-norm before activations')\n parser.add_argument('-width', type=int, default=default_width, help='width multiplier for Conv model')\n parser.add_argument('-act', type=str, default=default_act, choices=act_choices, help='activation function')\n parser.add_argument('-r', '--run', type=str, required=True, help='run directory prefix')\n parser.add_argument('-f', action='store_true', help='force rewrite')\n parser.add_argument('-dp', action='store_true', help='data parallel model')\n parser.add_argument('-mom', type=float, default=default_momentum, help='momentum for SGD')\n parser.add_argument('-pre', action='store_true', help='pretrained imagenet weights')\n parser.add_argument('-end', type=float, default=default_end, help='end')\n parser.add_argument('-start', type=float, default=default_start, help='start')\n parser.add_argument('-steps', type=int, default=default_steps, help='number of pruning steps')\n parser.add_argument('-cuda', type=int, help='use cuda, if use, then give gpu number')\n parser.add_argument('--seed', type=int, default=default_seed, help='seed for randomness')\n parser.add_argument('--augment', action='store_true', help='augment data with random-flip and random crop')\n parser.add_argument('--milestones', type=int, nargs='+', default=default_milestones, help='milestones for multistep-lr')\n parser.add_argument('--step_gamma', type=float, default=default_step_gamma, help='step gamma for multistep lr')\n parser.add_argument('--dataset_root', type=str, default=default_dataset_root, help='directory for dataset')\n parser.add_argument('-pdb', '--with_pdb', action='store_true', help='run with python debugger')\n\n return parser.parse_args()\n\n\n\n##################\n# networks\n##################\n\nclass FC(nn.Module):\n\n def __init__(self, n_hidden=10, hidden_dim=10, input_shape=(1, 32, 32), n_classes=10, bn=False, activation='relu'):\n\n super(FC, self).__init__()\n assert n_hidden >= 2, 'n_hidden has to be >= 2'\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.input_size = 1\n for n in input_shape:\n self.input_size = self.input_size * n\n self.input_shape = input_shape\n self.n_classes = n_classes\n self.bn = bn\n self.activation = activation\n\n if self.activation in ['ReLU', 'Tanh', 'Sigmoid', 'Identity']:\n self.activation_fn = getattr(nn, self.activation)\n else:\n raise Exception('activation fn not allowed: {}'.format(self.activation))\n\n layers = [nn.Linear(self.input_size, self.hidden_dim)]\n layers.append(self.activation_fn())\n if self.bn:\n layers.append(nn.BatchNorm1d(self.hidden_dim))\n\n for layer_index in range(n_hidden - 2):\n layers.append(self.activation_fn())\n layers.append(nn.Linear(self.hidden_dim, self.hidden_dim))\n if self.bn:\n layers.append(nn.BatchNorm1d(self.hidden_dim))\n\n layers.append(self.activation_fn())\n\n layers.append(nn.Linear(self.hidden_dim, self.n_classes))\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n\n out = x.view(x.size(0), self.input_size)\n out = self.net(out)\n\n return out\n\n\nclass Conv(nn.Module):\n\n def __init__(self, n_hidden=3, hidden_dim=100, width=5, ch=1, input_shape=(1, 32, 32), n_classes=10, bn=False, activation='relu'):\n\n super(Conv, self).__init__()\n assert n_hidden >= 2, 'n_hidden has to be >= 2'\n\n self.n_hidden = n_hidden\n self.hidden_dim = hidden_dim\n self.width = width\n self.ch = ch\n self.input_shape = input_shape\n self.n_classes = n_classes\n self.bn = bn\n self.activation = activation\n if self.activation in ['ReLU', 'Tanh', 'Sigmoid', 'Identity']:\n self.activation_fn = getattr(nn, self.activation)\n else:\n raise Exception('activation fn not allowed: {}'.format(self.activation))\n\n width = self.width\n\n features = list()\n features.append(nn.Conv2d(self.ch, width, kernel_size=3, padding=1))\n features.append(self.activation_fn())\n if self.bn:\n features.append(nn.BatchNorm2d(width))\n\n for layer_index in range(self.n_hidden - 2):\n features.append(self.activation_fn())\n features.append(nn.Conv2d(width, 2*width, kernel_size=3, padding=1))\n if self.bn:\n features.append(nn.BatchNorm2d(d))\n features.append(nn.MaxPool2d(2, 2))\n\n width = width * 2\n\n self.features = nn.Sequential(*features)\n\n self.features_dim = self.get_features().view(2, -1).size(1)\n\n classifier = list()\n classifier.append(nn.Linear(self.features_dim, hidden_dim))\n classifier.append(nn.BatchNorm1d(hidden_dim))\n classifier.append(self.activation_fn())\n classifier.append(nn.Linear(hidden_dim, self.n_classes))\n\n self.classifier = nn.Sequential(*classifier)\n\n def get_features(self, x=None):\n\n if x is None:\n input_shape = (2,) + self.input_shape\n x = torch.rand(input_shape)\n \n with torch.no_grad():\n out = self.features(x)\n\n return out\n\n def forward(self, x):\n\n out = self.features(x)\n out = out.view(out.size(0), self.features_dim)\n out = self.classifier(out)\n\n return out\n\n\n##################\n# train\n##################\n\ndef evaluate_model(model, \n criterion, \n dataloader, \n device, \n dataset_size\n ):\n\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n\n # iterate over data\n with torch.no_grad():\n for batch, truth in dataloader:\n batch = batch.to(device)\n truth = truth.to(device)\n\n output = model(batch)\n _, preds = torch.max(output, 1)\n running_corrects += torch.sum(preds == truth)\n \n loss = criterion(output, truth)\n\n # accummulate loss\n running_loss += loss.item() * batch.size(0)\n \n final_loss = running_loss / dataset_size\n final_acc = running_corrects.double() / dataset_size\n # assert type(final_loss) == np.float, 'final_loss type: {}'.format(type(final_loss))\n # assert type(final_acc) == np.float, 'final_acc type: {}'.format(type(final_acc))\n return {'loss': final_loss, 'acc': final_acc.item()}\n\n\ndef train(model,\n mask,\n optimizer,\n scheduler,\n dataloaders,\n criterion,\n device,\n config,\n num_epochs,\n # writer,\n logger,\n pruning_index\n ):\n\n dataset_sizes = config['dataset_size']\n\n # store train stats\n loss_list = {'train': list(), 'test': list()}\n acc_list = {'train': list(), 'test': list()}\n # iterate over epochs\n for epoch in range(num_epochs):\n # learn\n with torch.enable_grad():\n model.train()\n for batch, truth in dataloaders['train']:\n batch = batch.to(device)\n truth = truth.to(device)\n optimizer.zero_grad()\n\n output = model(batch)\n loss = criterion(output, truth)\n\n loss.backward()\n\n mask.apply_mask_to_grads(model)\n optimizer.step()\n\n scheduler.step()\n # evaluate \n logger.info('epoch: {}'.format(epoch))\n for phase in ['train', 'test']:\n logger.info('{}:'.format(phase))\n stats = evaluate_model(model, \n criterion, \n dataloaders[phase], \n device, \n dataset_sizes[phase]\n )\n\n loss_list[phase].append(stats['loss'])\n acc_list[phase].append(stats['acc'])\n logger.info('\\tloss: {}'.format(stats['loss']))\n logger.info('\\tacc: {}'.format(stats['acc']))\n # writer.add_scalar('loss-{}/{}'.format(pruning_index, phase), stats['loss'], epoch)\n # writer.add_scalar('accuracy-{}/{}'.format(pruning_index, phase), stats['acc'], epoch)\n\n return {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'loss': loss_list, 'acc': acc_list}\n\n\nif __name__ == '__main__':\n\n # debugging utility\n args = parse_args()\n if args.with_pdb:\n import pdb\n pdb.set_trace()\n\n # fix randomness\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.cuda is not None:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # cool plotting style\n sns.set_style('whitegrid')\n sns.set_palette('Set2')\n\n # directory structure\n log_dir = osp.join(args.run, 'logs')\n ckpt_dir = osp.join(args.run, 'ckpt')\n images_dir = osp.join(args.run, 'images')\n\n if osp.exists(args.run):\n if args.f:\n shutil.rmtree(args.run)\n else:\n raise Exception('{} already exists'.format(args.run))\n\n for dirname in [args.run, log_dir, ckpt_dir, images_dir]:\n os.makedirs(dirname)\n\n # save args\n args_path = osp.join(args.run, 'train_lottery.pkl')\n with open(args_path, 'w+b') as f:\n pkl.dump(args, f)\n\n # logging\n # writer = SummaryWriter(log_dir=log_dir)\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n logging_file = osp.join(log_dir, 'train_lottery.log')\n logger = logging.getLogger('train_lottery')\n with open(logging_file, 'w+') as f:\n pass\n logger_file_handler = logging.FileHandler(logging_file)\n logger.addHandler(logger_file_handler)\n logger.info('arguments: {}'.format(args))\n\n # get dataset mean, std\n mean, std = get_mean_std(args.dataset)\n config = get_dataset_config(args.dataset)\n train_transform, test_transform = get_dataset_transforms(mean, std, config['size'], augment=args.augment)\n train_data, test_data = get_dataset(args.dataset, args.dataset_root, train_transform, test_transform)\n \n dataloaders = dict()\n dataloaders['train'] = data.DataLoader(train_data,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers\n )\n\n dataloaders['test'] = data.DataLoader(test_data,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.workers\n )\n\n # torch device \n device = torch.device('cuda:%d' % args.cuda)\n\n # model\n input_shape = (config['ch'], config['size'], config['size'])\n if args.model == 'FC':\n model = FC(n_hidden=args.n_hidden, \n hidden_dim=args.hidden_dim, \n input_shape=input_shape,\n n_classes=config['num_classes'],\n bn=args.bn,\n activation=args.act\n )\n elif args.model == 'Conv':\n model = Conv(n_hidden=args.n_hidden,\n hidden_dim=args.hidden_dim,\n width=args.width,\n ch=config['ch'],\n input_shape=input_shape,\n n_classes=config['num_classes'],\n bn=args.bn,\n activation=args.act\n )\n else:\n raise Exception('unknown model: {}'.format(args.model))\n\n # model_weights_path = osp.join(ckpt_dir, 'model_weights.pth')\n # assert osp.exists(model_weights_path), '{} was not found'.format(model_weights_path)\n # model.load_state_dict(torch.load(model_weights_path))\n \n model = model.to(device)\n\n # log computational graph and params\n # log_graph(writer, model, torch.zeros(args.batch_size, config['ch'], config['size'], config['size'], device=device))\n \n # data parallel\n if args.dp:\n model = nn.DataParallel(model)\n\n # loss function\n criterion = nn.CrossEntropyLoss()\n\n # save init\n init_model_weights_path = osp.join(ckpt_dir, 'init_weights.pth')\n torch.save(model.state_dict(), init_model_weights_path)\n\n # mask of ones\n mask = LotteryMask(model, device, start=args.start, end=args.end, steps=args.steps)\n\n # start pruning\n for pruning_index in range(args.steps):\n\n logger.info('pruning_index: {}'.format(pruning_index))\n\n # optimizer\n optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.l2, momentum=args.mom)\n\n # scheduler\n scheduler = lr_scheduler.MultiStepLR(optimizer, args.milestones, gamma=args.step_gamma)\n \n model = mask.apply_mask_to_weights(model)\n # ready to train\n system = train(model,\n mask,\n optimizer,\n scheduler,\n dataloaders,\n criterion,\n device,\n config,\n args.num_epochs,\n # writer,\n logger,\n pruning_index\n )\n\n unpruned_count, overall_count = mask.get_pruned_stats()\n Pm = unpruned_count / overall_count * 100.0\n print('Pm:', Pm.item(), '%')\n\n mask.update_mask(model)\n # mask 0 action\n mask.prune_to_zero(model)\n # mask 1 action\n init_state_dict = torch.load(init_model_weights_path)\n mask.reset_to_init(model, init_state_dict)\n\n # save\n torch.save({\n 'model': system['model'],\n 'mask': mask.get_mask()\n }, osp.join(ckpt_dir, 'model_weights_{:.3f}.pth'.format(Pm))\n )\n\n # train stats as npz\n acc_stats_path = osp.join(ckpt_dir, 'train_lottery_acc_stats_{:.3e}.npz'.format(Pm))\n loss_stats_path = osp.join(ckpt_dir, 'train_lottery_loss_stats_{:.3e}.npz'.format(Pm))\n np.savez(acc_stats_path, **system['acc'])\n np.savez(loss_stats_path, **system['loss'])\n\n # final_test_loss = system['loss']['test'][-1]\n # final_test_acc = system['acc']['test'][-1]\n\n # args into tensorboard\n # log_hparams(writer, args, final_test_loss, final_test_acc)\n\n print('test acc:', system['acc']['test'][-1])\n print('train acc:', system['acc']['train'][-1])\n # writer.close()\n","sub_path":"experiments/overparametrization_imp.py","file_name":"overparametrization_imp.py","file_ext":"py","file_size_in_byte":16632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"89669349","text":"from random import randint\r\n\r\ndef cargar_random(n,minimo,maximo):\r\n lista = [randint(minimo,maximo)for i in range(n)]\r\n return lista\r\n\r\ndef normalizar(lista):\r\n suma =sum(lista)\r\n num= 1/suma\r\n for i in range(len(lista)):\r\n lista[i] = num *lista[i]\r\n \r\n\r\ndef main():\r\n lista = cargar_random(5,1,10)\r\n print(f'la lista es: {lista}')\r\n normalizar(lista)\r\n print(f'la lista normalizada {lista}')\r\n print(sum(lista))\r\n \r\nif __name__==\"__main__\":\r\n main()","sub_path":"z ejercicios varios/tp2_6.py","file_name":"tp2_6.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"459514078","text":"from bs4 import BeautifulSoup\nimport urllib3\nfrom urllib.parse import urljoin, quote\nimport urllib.parse\nimport csv\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nhttp = urllib3.PoolManager()\n \narquivo = 'ceepi-sessoes_atas.csv'\nconselho = 'ceepi'\n\n# Cabeçalho do csv\nwith open(arquivo, 'w', encoding='utf-8', newline = '') as csvfile:\n c = csv.writer(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n c.writerow(['Id','Url','Tipo','Numero','Data','Processo','Relator','Interessado','Ementa','Assunto','Documento','Titulo'])\n\n# Resoluções\nurl_raw = 'http://www.ceepi.pro.br/Sessões/sessões.htm'\nurl = 'http://www.ceepi.pro.br/Sess%C3%B5es/sess%C3%B5es.htm' \npage = http.request('GET', url)\nsoup = BeautifulSoup(page.data, 'lxml')\ntipo = ''\nnumero = ''\ndata = ''\nprocesso = ''\nrelator = ''\ninteressado = ''\nassunto = ''\nementa = ''\ndocumento = ''\ntitulo = ''\n\ncenter = soup.find('center')\ntable = center.table\n\ntrs = table.find_all('tr')\ntrs = trs[2:]\n\ni = 1\ncount = 0\nfor row in trs:\n columns = row.find_all('td')\n col = 0\n tipos = ['ano', 'Pauta','Ata']\n for td in columns:\n if(col == 0):\n ano = int(td.text)\n else:\n tipo = tipos[col]\n \n links = td.find_all('a')\n for a in links:\n url0 = a.get('href')\n url0 = urljoin(url, str(url0))\n\n url_raw = urljoin(url_raw, str(url0))\n\n if 'Sessões' in url0:\n url0 = url0.replace('Sessões','Sess%C3%B5es')\n try:\n page = http.request('GET', url0)\n soup = BeautifulSoup(page.data, 'lxml')\n except Exception as e:\n print(str(e))\n print('url = {}'.format(url0))\n continue\n\n table = soup.find('table')\n if table:\n trs = table.find_all('tr')\n trs = trs[3:]\n\n for _row in trs:\n cols = _row.find_all('td')\n _col = 0\n _mes = ''\n _url = ''\n _data = ''\n _ementa = ''\n\n for _td in cols:\n if(_col == 0):\n _mes = _td.text.strip()\n else:\n links = _td.find_all('a')\n if(not links):\n continue\n for a in links:\n _titulo = tipo + ' - ' + a.text + '/'+ _mes +'/'+ str(ano)\n _numero = a.text + '/'+ _mes\n _data = a.text + '/'+ _mes + '/' + str(ano)\n _documento = urljoin(url0, a.get('href'))\n _ementa = _titulo\n \n count = count + 1\n id = conselho + '-'+tipo.lower() +'-'+str(count)\n \n with open(arquivo, 'a', encoding='utf-8', newline = '') as csvfile:\n c = csv.writer(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_ALL)\n c.writerow([id,url0,tipo,_numero,_data,processo,relator,interessado,_ementa,assunto,_documento, _titulo])\n \n _col = _col + 1\n col = col + 1\n\nprint(count)","sub_path":"crawlers/ceepi/atas_sessoes.py","file_name":"atas_sessoes.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"47098923","text":"#!/usr/bin/python\n\nimport random\nimport math\nfrom time import localtime\n\nclass MonteCarlo(object):\n\tdef __init__(self):\n\t\tself.circleArea = 0.0\n\t\tself.squareArea = 0.0\n\n\tdef inCircle(self, x, y):\n\t\treturn (x*x + y*y <= 1)\n\n\tdef randomLinear(self, n):\n\t\t#start y wert\n\t\tlt = localtime()\n\t\ty = 0.0\n\t\ty = lt[5]\n\n\t\t# commment the following out for randome y\n\t\ty = 6\n\n\t \t#start paramater\n\t\ta = 1664525.0\n\t\tb = 1013904223.0\n\t\tm = pow(2.0,32.0)\n\n\t\t#berechnung\n\t\tfor i in range(n):\n\t\t\ty = ((a * y + b) % m)\n\t\t\trandom = 0.0\n\t\t\trandom = y / m # /m fuer werte zwischen 0 und 1\n\t\t\tyield random\n\n\tdef calcMonteCarloLinGen(self, n):\n\t\tla = list(self.randomLinear(n))\n\t\tlb = list(self.randomLinear(n))\n\t\tfor k in range(n):\n\t\t\ta = la[k]\n\t\t\tb = lb[k]\n\t\t\tif self.inCircle(a, b):\n\t\t\t\tself.circleArea = self.circleArea + 1.0\n\t\t\telse:\n\t\t\t\tself.squareArea = self.squareArea + 1.0\n\n\t\tself.pi = self.circleArea / self.squareArea\n\n\tdef calcMonteCarlo(self, n):\n\t\tfor i in range(n):\n\t\t\ta = random.random()\n\t\t\tb = random.random()\n\t\t\tif self.inCircle(a, b):\n\t\t\t\tself.circleArea = self.circleArea + 1.0\n\t\t\telse:\n\t\t\t\tself.squareArea = self.squareArea + 1.0\n\t\tself.pi = self.circleArea / self.squareArea\n\n\tdef doIt(self):\n\t\t\"\"\"\n\t\t>>> m = MonteCarlo()\n\t\t>>> m.doIt()\n\t\tPi berechnet mit Linearen Kongruezgenerator 2.4257720833832925\n\t\t\"\"\"\n\t\tself.circleArea = 0.0\n\t\tself.squareArea = 0.0\n\t\tself.calcMonteCarloLinGen(1000000)\n\t\tprint(\"Pi berechnet mit Linearen Kongruezgenerator \" + str(self.pi))\n\nif __name__ == \"__main__\":\n\tmonte = MonteCarlo()\n\n\tmonte.calcMonteCarlo(1000000)\n\tprint(\"Pi berechnet mit Python Randomgenerator \" + str(monte.pi))\n\tmonte.doIt()\n\n\timport doctest\n\tdoctest.testmod()\n","sub_path":"monte_carlo/monteCarloTest/MonteCarlo.py","file_name":"MonteCarlo.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"431259129","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io.wavfile\nimport scipy.fftpack\nimport librosa.core as librosa\n\naudio1, fs1 = librosa.load('audio1.wav', sr=None)\naudio2, fs2 = librosa.load('audio2.wav', sr=None)\ntime = 3\nfs = 8000\nz = np.arange(0, 3, 1/fs)\ny = 10*np.sin(2*np.pi*100*z+ 1)\n\n#Task 2a)\ndft2a = np.transpose(librosa.stft(audio1, n_fft=4410*2-2))\ndft2a = np.square(np.abs(dft2a))\n\n#Power spectrum matrix\nplt.figure()\nplt.imshow(dft2a)\nplt.title(\"Power spectrum\")\n\n#Logarithm power spectrum matrix\nplt.figure()\nplt.imshow(np.log(dft2a))\nplt.title(\"Log power spectrum\")\n\n#The library's implementation matrix has 67 rows compared to 65 of the\n#self-made function. The values seem to be slightly different as well,\n#possibly due to the different number of frames, or the different way\n#Librosa uses to read the signal, since it looks like the data has been\n#normalized to between [-1,1] already\n\ndef dft(signal, frame, sample_rate):\n frame_s = frame/1000 \n samples = int(frame_s * sample_rate)\n idx = 0\n \n signals = []\n dfts = []\n dft_matrix = np.empty((int(len(signal)/int(samples/2)+1), samples))\n for i in np.arange(0, len(signal)-int(samples/2), int(samples/2)):\n sub_signal = signal[int(i):int(i+samples)]\n sub_signal_dft = scipy.fftpack.fft(sub_signal)\n \n signals.append(sub_signal)\n dfts.append(sub_signal_dft)\n if (len(sub_signal) == samples):\n dft_matrix[idx] = np.square(np.abs(sub_signal_dft))\n idx += 1\n\n return np.transpose(dft_matrix)\n\nmatrix1 = dft(audio1, 93, 22050)\nmatrix2 = dft(audio2, 32, 22050)\nmatrix3 = dft(y, 32, fs)\n\n\n#Logarithm power spectrum matrix\nplt.figure()\nplt.imshow(np.log(np.transpose(matrix1)))\nplt.title(\"Log power spectrum\")\n\n#Logarithm power spectrum matrix\nplt.figure()\nplt.imshow(np.log(np.transpose(matrix2)))\nplt.title(\"Log power spectrum\")\n\n#Logarithm power spectrum matrix\nplt.figure()\nplt.imshow(np.log(np.transpose(matrix3)))\nplt.title(\"Log power spectrum\")\n\n#Task 2c)\n#As the window size increases, the spectrum becomes smaller in width.\n#For the window length, most of the value below 100 works fine. However,\n#if the window length is too small, it will create an almost yellow spectrum,\n#which makes things indistinguishable.","sub_path":"SGN-14007 Introduction to Audio Processing/Exercise 2/Problem 2.py","file_name":"Problem 2.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"40484867","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf_gt = pd.read_csv(\"ExportCSV.csv\", sep=\";\")#2016 210\ndf_gt = df_gt[np.logical_not(pd.to_numeric(df_gt.LocationAbbr, errors='coerce').notnull())]\ndf_gt = df_gt.dropna(subset=['LocationAbbr']) #Drop only if NaN in specific column (as asked in the question)\n\ndf2_gt = pd.read_csv(\"ExportCSV-2.csv\", sep=\";\")\ndf2_gt = df2_gt.dropna(subset=['LocationAbbr'])\nprint(\"da rimuovere\", set(df_gt.LocationAbbr).difference(set(df2_gt.LocationAbbr)))\n\ngk2015 = pd.read_csv(\"GoogleKidney2015.csv\",sep=\",\")#2015\ng = (gk2015[1:])\ng = g.convert_objects(convert_numeric=True)\ng = g.sort_index()\ngoogle_values = [x[0] for x in g.values]\n\nstates = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n}\n\ngoogle_states = []\ni = 0\nfor state in g.index:\n if list(states.values())[i] == state:\n google_states.append(list(states.keys())[i])\n i += 1\n\nst = dict((y,x) for x, y in states.items())\nindex = list(g.index)\ngoogle_states = []\nfor state in st:\n if state in index:\n google_states.append(st[state])\nprint(len(google_states), len(google_values))\nlen(set(google_values).difference(set(google_states)))\n\n# select the same states on the GT and the Google data (standardize)\ndf2_gt = df2_gt[df2_gt['LocationAbbr'].isin(google_states)]\n\n# create a compact ground truth dataframe\ndf2_gt = pd.DataFrame(data=list(df2_gt.Data_Value), index=list(df2_gt.LocationAbbr), columns=['value'],\n dtype=np.float)\n\ngoogle_df = pd.DataFrame(data=google_values, index=google_states, columns=['value'], dtype=np.float)\n\n# normalizing\ndf2_gt = (df2_gt - df2_gt.mean())/df2_gt.std()\ngoogle_df = (google_df - google_df.mean())/google_df.std()\n\n# order\ndf2_gt = df2_gt.sort_index()\ngoogle_df = google_df.sort_index()\n\nax = df2_gt.plot()\ngoogle_df.plot(ax=ax)\n\n# google_df.plot()\n# df2_gt.plot(color=\"red\")\n","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"470021821","text":"\"\"\"\n./LOAD.py \nModule for Load model\nAuthor: Ryan Tulabing\nProject: Local Demand Control\nInstitution: University of Auckland\nYear: 2017 - 2020\n\"\"\"\n\n#---import python packages---\nfrom optparse import OptionParser\n# from cvxopt.base import matrix, mul, sin, cos\nfrom numpy import linspace\nimport sys\n\n\nimport numpy as np\nimport pandas as pd\nimport datetime, time\nimport threading, queue\nimport multiprocessing\n\n# from optparse import OptionParser\n# from cvxopt.base import matrix, mul, sin, cos\n# from numpy import linspace\n# import sys\n# import matplotlib.pyplot as pyplot\n\n# try:\n# # for interacting with raspi\n# import RPi.GPIO as GPIO\n# GPIO.setmode(GPIO.BOARD) # set up BOARD GPIO numbering \n# # for driving chroma through rs232\n# import serial\n# # for reading spi\n# import spidev\n# except:\n# pass\n\n# try:\n# # for controlling pifacedigital\n# import pifacedigitalio\n# except:\n# pass\n\n# for multicast\nimport socket\nimport struct\nimport sys\nimport json\nimport ast\n\n\n#---import local modules---\nimport FUNCTIONS\nimport CLOCK\nimport WEATHER\nimport solar\nimport CREATOR\nimport COMMON\n\nfrom CREATOR import df_houseSpecs, df_heatpumpSpecs, df_heaterSpecs, df_fridgeSpecs, df_freezerSpecs \nfrom CREATOR import df_waterheaterSpecs, df_clotheswasherSpecs, df_clothesdryerSpecs\nfrom CREATOR import df_dishwasherSpecs, df_evSpecs, df_storageSpecs\nfrom CREATOR import df_clotheswasher, df_clothesdryer, df_dishwasher\nfrom CREATOR import df_solarSpecs, df_windSpecs\n\n\nglobal history, df_history, df_demand, df_demand_copy, cols, target_loading, grid_capacity, aggregation\nglobal mean_latitude, mean_longitude, start_time, time_step, df_baseload\n\nfrom numpy import nanmean, nanmin, nanmax\nhistory = {}\ncols = ['name', 'house', 'load_type', 'load_class', 'unixtime', 'hour_start', 'hour_end', 'temp_in', 'temp_out', 'connected', 'mode', 'p_status', 'a_status', 'p_demand', 'a_demand', 'soc','flexibility', 'priority', 'limit', 'ldc_signal']\ndf_demand = pd.DataFrame([], columns=cols)\ndf_demand_copy = df_demand.copy()\ndf_history = pd.DataFrame([])\n\n\naggregation = {\n 'temp_in': ['mean', 'min', 'max'],\n 'temp_in_active': [nanmean, nanmin, nanmax],\n 'temp_out': ['mean', 'min', 'max'],\n 'soc': ['mean', 'min', 'max'],\n 'flexibility': ['mean', 'min', 'max'],\n 'priority':['mean', 'min', 'max'],\n 'a_demand': ['sum'],\n 'p_demand': ['sum']\n}\n\n\n\nclass base():\n \"\"\"Common base class for all devices\"\"\"\n def __init__(self):\n # multiprocessing.Process.__init__(self)\n # self.daemon = True\n\n self.params = {}\n # self.q_states_self = queue.Queue(maxsize=3) # queue for data of the device\n # self.q_states_all = queue.Queue(maxsize=3) # queue for data of peers\n # self.q_user_cmd = queue.Queue(maxsize=3) # queue for holding the user-command on the state of the date (overiding the auto mode)\n # self.q_grid_cmd = queue.Queue(maxsize=3)\n \n self.dict_states_self = {}\n self.dict_states_all = {}\n self.dict_user_cmd = {}\n self.dict_grid_cmd = {}\n\n # self.q_states_self.put(self.dict_states_self)\n # self.q_states_all.put(self.dict_states_all)\n # self.q_user_cmd.put(self.dict_user_cmd)\n # self.q_grid_cmd.put(self.dict_grid_cmd)\n\n self.unixtime = 0\n self.isotime = ''\n \n self.n = 0\n\n def setup(self):\n for key in self.params.keys():\n self.__dict__[key] = np.array([]) #self.params[key]\n\n # self.dict_user_cmd.update({'status':1, 'priority':0, 'schedule':{}, 'can_shed':0, 'can_ramp':0, 'hour_start':0, 'hour_end':0})\n # # self.q_user_cmd.put(self.dict_user_cmd)\n \n # self.dict_grid_cmd.update({'algorithm':self.algorithm, 'frequency':self.ldc_signal, 'loading':self.limit, 'timescale':self.timescale})\n # self.q_grid_cmd.put(self.dict_grid_cmd)\n \n\n def list2matrix(self):\n for key in self.params.keys():\n self.__dict__[key] = matrix(self.__dict__[key], (self.n, 1), 'd')\n\n def add(self, **kwargs):\n self.n += 1\n keys = self.params.keys()\n\n # create additional slot in the numpy array\n for key in keys:\n try:\n self.__dict__[key] = np.append(self.__dict__[key], np.array([self.params[key]]))\n\n except Exception as e:\n print(\"Error:\", key, e)\n\n # add value to the last slot in the numpy array, except for list_starts, list_ends, and q_user_cmd\n for key, val in kwargs.items():\n if not key in keys: continue\n\n try:\n if key in ['list_starts', 'list_ends', 'q_user_cmd']:\n self.params[key].append(val)\n else:\n self.__dict__[key][-1] = val\n\n except Exception as e:\n print(e, self.__dict__[key], key, val)\n\n\n def prerun(self):\n self.n_usage = self.n_usage.astype(int)\n self.counter = self.counter.astype(int)\n self.old_status = self.a_status\n self.params['list_starts'] = np.array(self.params['list_starts'])\n self.params['list_ends'] = np.array(self.params['list_ends'])\n # padd arrays\n # if 'baseload' in self.params['load_type']:\n # self.__dict__['profile'] = np.array(self.params['profile'])\n # else:\n # for i in range(len(self.params['profile'])):\n # p = int(np.max(self.len_profile)-self.len_profile[i])\n # self.params['profile'][i] = np.pad(self.params['profile'][i], (0, p), mode='constant')\n \n # self.params['profile'] = np.array(self.params['profile']).reshape(len(self.params['profile']), int(np.max(self.len_profile)))\n \n\n \n\n\n\n\n def run_device(self):\n # self.drive_chroma()\n # self.drive_piface()\n # self.drive_relay()\n self.simulate_model()\n # do other stuff\n return 0\n\n\n def step(self):\n # simulation step for the house and all loads therein\n try:\n self.propose_demand()\n self.decide()\n self.update_demand()\n self.run_device()\n return self.dict_states_self # dict_states_self is updated at COMMON.py\n except Exception as e:\n print(\"Error in \", self.name, \" step:\", e)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass Load(base):\n def __init__(self):\n base.__init__(self)\n self.params = {}\n \n self.params.update({\n 'unixtime':time.time(), \n 'isotime':datetime.datetime.now().isoformat(),\n 'timescale':1, \n 'timestep':1,\n 'schedule_skew':0,\n 'skew':0,\n 'ldc':0,\n 'baseload':0,\n 'humidity':0,\n 'windspeed':0,\n 'irradiance': 0,\n 'irradiance_roof': 0,\n 'irradiance_wall1': 0,\n 'irradiance_wall2': 0,\n 'irradiance_wall3': 0,\n 'irradiance_wall4': 0,\n 'solar_capacity': 0,\n 'solar_efficiency': 0,\n\n 'heat_in':0.0, \n 'heat_ex':0.0, \n 'heat_device':0.0, \n 'heat_all':0.0,\n 'demand_heating':0.0, \n 'demand_cooling':0.0,\n 'Cp': 1006.0,\n 'Ca': 1006.0,\n 'Cm': 1006.0, \n 'Um': 0,\n 'Ua': 0,\n 'water_density':1000.0, \n 'mass_flow':0.0,\n 'cop': 3.5, \n 'temp_in': 0,\n 'temp_mat': 0,\n 'temp_out': 0,\n 'cooling_setpoint': 0,\n 'heating_setpoint': 0,\n 'tolerance': 0,\n 'temp_min': 0,\n 'temp_max': 0,\n 'heat_in':0.0, \n 'heat_ex':0.0, \n 'heat_device':0.0, \n 'heat_all':0.0,\n 'cooling_counter': 0,\n 'heating_counter': 0,\n 'min_coolingtime': 0,\n 'min_heatingtime': 0,\n\n 'charging_counter':0,\n 'discharging_counter': 0,\n 'charging_counter':0,\n 'discharging_counter': 0,\n 'min_chargingtime': 5,\n 'min_dischargingtime': 5,\n 'counter':0,\n 'power_battery':0, \n 'trip_time':0, \n \n 'priority':0.0, \n 'd_priority':0.0, \n 'job_status':0,\n 'flexibility':0.0, \n 'soc':0.0,\n 'mode':0,\n 'ramp_power':0.0, \n 'shed_power':0.0,\n 'can_shed':0,\n 'can_ramp':0,\n 'p_status':0,\n 'a_status':0,\n 'p_demand':0.0,\n 'a_demand':0.0,\n\n 'house_capacity':10000.0, \n 'house_limit':10000.0,\n 'ldc_signal':100, \n 'delta_signal':0, \n 'ldc_command':1.0, \n 'algorithm':0, \n 'limit':10000.0,\n 'load_class':'ntcl',\n 'load_type':'ntcl',\n 'hour_start':0,\n 'hour_end':24,\n 'unix_start':0,\n 'unix_end':0,\n 'connected':0,\n 'finish':0,\n 'n_usage':0,\n 'list_starts':[],\n 'list_ends':[],\n 'profile':[],\n 'len_profile':0,\n 'q_user_cmd':[],\n })\n self.setup()\n \n\n\n\n\n\n def propose_demand(self):\n # This function proposes the demand and status of the device for the next time step\n try:\n # determine if the device is connected\n # update n_usage... used as index for list_starts, and list_ends (the schedule)\n self.__dict__.update(COMMON.get_n_usage(**self.__dict__))\n # get the data about when the device should start and end\n self.__dict__.update(COMMON.get_hour(**self.__dict__))\n # convert schedules from hours of the day to unix\n self.__dict__.update(COMMON.get_unix(**self.__dict__))\n # based on unix_start and unix_end determine if the device is connected at current unixtime\n self.__dict__.update(COMMON.is_connected(**self.__dict__))\n\n # determine device status\n # get flexibility\n self.__dict__.update(COMMON.get_flexibility(**self.__dict__))\n # get state of charge\n self.__dict__.update(COMMON.get_soc(**self.__dict__))\n # get job status\n self.__dict__.update(COMMON.get_job_status(**self.__dict__))\n # determine mode of the device\n self.__dict__.update(COMMON.get_mode(**self.__dict__))\n # propose a status for the device (i.e., on or off)\n self.__dict__.update(COMMON.get_p_status(**self.__dict__))\n\n # determine device demand for this timestep\n # propose a power demand of the device\n self.__dict__.update(COMMON.get_p_demand(**self.__dict__))\n\n # determine ramping and shedding potential\n # determine if can ramp or can shed\n self.__dict__.update(COMMON.check_ramp_shed(**self.__dict__))\n # calculate ramping power\n self.__dict__.update(COMMON.get_ramp_power(**self.__dict__))\n # calculate shedding power\n self.__dict__.update(COMMON.get_shed_power(**self.__dict__))\n\n except Exception as e:\n print(\"Error propose_demand:\", e)\n\n return 0\n\n\n def decide(self):\n # decide to approve or deny the proposed status\n # get device priority\n self.__dict__.update(COMMON.adjust_priority(**self.__dict__))\n # interpret ldc_signal\n self.__dict__.update(COMMON.interpret_signal(**self.__dict__))\n # adjust local limit\n self.__dict__.update(COMMON.adjust_limit(**self.__dict__))\n # save old status\n self.old_status = self.a_status # save old status\n # decide on the next status\n self.__dict__.update(COMMON.get_a_status(**self.__dict__))\n \n return self.a_status\n\n def update_demand(self):\n # Update the heat contribution of the TCL considering the aggregators command\n # recalculate demand for next timestep\n self.__dict__.update(COMMON.get_a_demand(**self.__dict__))\n\n return self.a_demand\n\n\n def simulate_model(self):\n # simulate the model\n # run simulation and determine the temp_in and temp_mat \n self.__dict__.update(COMMON.simulate_model(**self.__dict__))\n\n # update data for peers and history records\n self.__dict__.update(COMMON.broadcast(**self.__dict__))\n\n # update counter, which counts how long the device is at a certain status\n self.__dict__.update(COMMON.adjust_counter(**self.__dict__))\n\n return self.temp_in, self.temp_mat\n\n\n\n\nclass App(Load):\n def __init__(self):\n Load.__init__(self)\n\n for key in list(df_houseSpecs):\n self.params[key] = df_houseSpecs.loc[self.n, key] \n\n for key in list(df_heatpumpSpecs):\n self.params[key] = df_heatpumpSpecs.loc[self.n, key] \n\n for key in list(df_waterheaterSpecs):\n self.params[key] = df_waterheaterSpecs.loc[self.n, key] \n\n for key in list(df_freezerSpecs):\n self.params[key] = df_freezerSpecs.loc[self.n, key] \n\n for key in list(df_fridgeSpecs):\n self.params[key] = df_fridgeSpecs.loc[self.n, key] \n\n for key in list(df_clotheswasherSpecs):\n self.params[key] = df_clotheswasherSpecs.loc[self.n, key] \n\n for key in list(df_clothesdryerSpecs):\n self.params[key] = df_clothesdryerSpecs.loc[self.n, key] \n\n for key in list(df_dishwasherSpecs):\n self.params[key] = df_dishwasherSpecs.loc[self.n, key] \n\n for key in list(df_evSpecs):\n self.params[key] = df_evSpecs.loc[self.n, key] \n\n for key in list(df_solarSpecs):\n self.params[key] = df_solarSpecs.loc[self.n, key]\n\n\n self.setup()\n\n\n\n\n\n\n\nclass Device():\n \"\"\"interface for all specific device classes\"\"\"\n\n def __init__(self, flist, realtime=False, timescale=1):\n \n global history, df_history, df_demand, df_demand_copy, cols, target_loading, grid_capacity, aggregation\n global mean_latitude, mean_longitude, start_time, time_step, df_baseload\n\n self.flist = flist\n self.ldc_signal = 100\n self.limit = grid_capacity\n self.loading = target_loading\n self.target = self.loading * self.limit\n self.dict_agg = {}\n self.df_agg = pd.DataFrame([])\n\n # run global clock\n self.clock = CLOCK.Clock(name='clock', start=start_time, end=None, step_size=time_step, realtime=realtime, timescale=timescale) \n # run weather sensor object\n self.weather = WEATHER.Weather(name='weather', latitude=mean_latitude, longitude=mean_longitude, timestamp=start_time, mcast_ip='238.173.254.147', mcast_port=12604)\n # save df_baseloads\n \n # elapsed time counter\n self.c = time.perf_counter()\n # checkpoint to display simulation status\n self.checkpoint = 60 # [s] display every 60 seconds\n\n for item in self.flist:\n try:\n self.__dict__[item] = eval(item + '()')\n history[item] = []\n except Exception as e:\n print(\"Error Device init:\", e)\n\n def setup(self):\n for item in self.flist:\n if self.__dict__[item].n:\n self.__dict__[item].list2matrix()\n\n def fcall(self, x):\n f = 0\n for item in self.flist:\n if self.__dict__[item].n:\n f += self.__dict__[item].fcall(x)\n\n return f\n\n def dfcall(self, x):\n df = 0\n for item in self.flist:\n if self.__dict__[item].n:\n df += self.__dict__[item].dfcall(x)\n\n return df\n\n def step(self, ldc_signal=None, loading=None, report=False, save=False):\n global history, df_demand, cols, df_history, aggregation\n\n # update ldc_signal\n if ldc_signal: self.ldc_signal = ldc_signal\n if loading: \n self.loading = loading\n self.target = self.limit * self.loading\n # advance clock\n self.clock.step()\n self.__dict__['App'].unixtime = self.clock.timestamp\n self.__dict__['App'].isotime = self.clock.isotime\n self.__dict__['App'].dayhour = self.clock.dayhour\n self.__dict__['App'].daysecond = self.clock.daysecond\n self.__dict__['App'].tm_min = self.clock.tm_min\n self.__dict__['App'].step_size = self.clock.step_size\n self.__dict__['App'].m = 1\n\n # get weather data\n dict_weather = self.weather.weather_now(self.clock.timestamp)['weather']\n\n # get indices for each type of device\n idx_house = np.flatnonzero(self.__dict__['App'].load_type=='baseload')\n idx_hvac = np.flatnonzero((self.__dict__['App'].load_type=='hvac') | (self.__dict__['App'].load_type=='heater'))\n idx_tcl = np.flatnonzero(self.__dict__['App'].load_class=='tcl')\n idx_der = np.flatnonzero(self.__dict__['App'].load_class=='der')\n \n if np.size(idx_house): \n self.__dict__['App'].humidity[idx_house] = np.add(np.random.normal(0, 0.001, np.size(idx_house)), dict_weather['humidity'])\n self.__dict__['App'].windspeed[idx_house] = np.add(np.random.normal(0, 0.1, np.size(idx_house)), dict_weather['windspeed'])\n self.__dict__['App'].temp_out[idx_house] = np.add(np.random.normal(0, 0.01, np.size(idx_house)), dict_weather['temp_out'])\n\n for item in self.flist:\n try:\n if self.__dict__[item].n:\n # signal is added with randomness to account for variations in the accuracy of the sensors\n self.__dict__[item].ldc_signal = np.clip(np.add(np.random.normal(0, 3, self.__dict__[item].n), self.ldc_signal), a_min=0.1, a_max=100)\n df_data = pd.DataFrame.from_dict(self.__dict__[item].step(), orient='columns')\n df_data.index = df_data['name'].values\n df_data[['unixtime', 'unix_start', 'unix_end']] = df_data[['unixtime', 'unix_start', 'unix_end']].astype(int)\n # print(df_data[['isotime','unixtime', 'unix_start', 'unix_end', 'connected', 'soc', 'mode','p_status', 'a_status','a_demand', 'temp_out','temp_in', 'temp_max', 'temp_min', 'can_ramp', 'can_shed']])\n if df_data.index[0] in df_demand.index:\n df_demand.update(df_data)\n else:\n df_demand = pd.concat([df_demand, df_data], sort=False)\n\n df_demand_copy = df_demand.set_index('house', inplace=False)\n # print(df_demand_copy[['hour_start', 'hour_end', 'unixtime', 'unix_start', 'unix_end', 'connected', 'soc', 'mode','p_status', 'a_status','a_demand', 'n_usage', 'irradiance']])\n except Exception as e:\n print(\"Error Device step:\", e, item)\n \n # extend baseload data to all devices in each house\n try:\n self.__dict__['App'].baseload = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand['house'].values, 'a_demand'].values\n except:\n pass\n\n # update irradiance for all houses\n if np.size(idx_der):\n self.__dict__['App'].irradiance[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance'].values\n self.__dict__['App'].irradiance_roof[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance_roof'].values\n self.__dict__['App'].irradiance_wall1[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance_wall1'].values\n self.__dict__['App'].irradiance_wall2[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance_wall2'].values\n self.__dict__['App'].irradiance_wall3[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance_wall3'].values\n self.__dict__['App'].irradiance_wall4[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'irradiance_wall4'].values\n self.__dict__['App'].humidity[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'humidity'].values\n self.__dict__['App'].windspeed[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'windspeed'].values\n self.__dict__['App'].temp_out[idx_der] = df_demand_copy[df_demand_copy['load_type']=='baseload'].loc[df_demand[df_demand['load_class']=='der']['house'].values, 'temp_out'].values\n\n\n # The following lines are valid since, the ratio of hvac to house is 1:1\n if np.size(idx_hvac): \n # self.__dict__['App'].temp_in[idx_house] = df_demand_copy[df_demand_copy['load_type']=='hvac'].loc[df_demand[df_demand['load_class']=='tcl']['house'].values,'temp_in'].values\n # update temp_out of all tcl devices, note: hvac temp_out will be update below\n # self.__dict__['App'].temp_out[idx_tcl] = df_demand_copy[df_demand_copy['load_type']=='hvac'].loc[df_demand[df_demand['load_class']=='tcl']['house'].values,'temp_in'].values\n # update temp out of hvacs\n self.__dict__['App'].temp_out[idx_hvac] = np.add(np.random.normal(0, 0.01, np.size(idx_hvac)), dict_weather['temp_out'])\n self.__dict__['App'].humidity[idx_hvac] = np.add(np.random.normal(0, 0.001, np.size(idx_hvac)), dict_weather['humidity'])\n self.__dict__['App'].windspeed[idx_hvac] = np.add(np.random.normal(0, 0.1, np.size(idx_hvac)), dict_weather['windspeed'])\n \n \n \n # adjust ldc_signal\n if ldc_signal==None:\n offset = (((self.target) - df_demand['a_demand'].sum()) / self.limit)\n self.ldc_signal += offset * (self.clock.step_size * 1e-1)\n self.ldc_signal = np.clip(self.ldc_signal, a_min=0.01, a_max=100.0)\n\n # prepare data to return\n df_data = df_demand[['house','a_demand']].groupby('house', sort=True).sum().reset_index(drop=True)\n df_data['p_mw'] = df_data['a_demand'] * 1e-6\n df_data['pf'] = np.random.normal(0.94, 0.01, len(df_data.index)) # assumed average power factor is 0.94\n df_data['q_mvar'] = (df_data['p_mw'] / df_data['pf']) * np.sin(np.arccos(df_data['pf']))\n \n idx_a = np.flatnonzero(self.__dict__['App'].phase=='AN')\n idx_b = np.flatnonzero(self.__dict__['App'].phase=='BN')\n idx_c = np.flatnonzero(self.__dict__['App'].phase=='CN')\n\n dict_phases = {'AN':idx_a, 'BN':idx_b, 'CN':idx_c}\n\n for i in ['A', 'B', 'C']:\n df_data['p_{}_mw'.format(i)] = 0\n df_data['p_{}_mw'.format(i)][dict_phases['{}N'.format(i)]] = df_data['p_mw'][dict_phases['{}N'.format(i)]]\n df_data['q_{}_mvar'.format(i)] = 0\n df_data['q_{}_mvar'.format(i)][dict_phases['{}N'.format(i)]] = df_data['q_mvar'][dict_phases['{}N'.format(i)]]\n\n \n # prepare aggregated data of all devices\n summary = df_demand.groupby('load_type').agg(aggregation)\n summary.columns = [\"_\".join(x) for x in summary.columns.ravel()]\n self.df_agg = summary.T.unstack().to_frame().sort_index(level=1).T\n self.df_agg.columns = self.df_agg.columns.map('_'.join) \n self.df_agg['limit'] = self.limit\n self.df_agg['unixtime'] = df_demand['unixtime'].mean()\n self.df_agg['ldc_signal'] = df_demand['ldc_signal'].mean()\n self.df_agg['loading'] = self.loading \n self.df_agg['sum_a_mw'] = df_data['p_mw'].sum() # from df_data\n self.df_agg['sum_a_mvar'] = df_data['q_mvar'].sum() # from df_data\n self.df_agg['sum_p_mw'] = df_demand['p_demand'].sum() * 1e-6 \n self.df_agg['sum_p_mvar'] = (self.df_agg['sum_p_mw'] / 0.94) * np.sin(np.arccos(0.94)) \n self.df_agg['sum_solar_mw'] = df_demand['solar_capacity'].sum() * 1e-6\n self.df_agg['mean_flexibility'] = df_demand['flexibility'].mean()\n\n # append to history \n if save:\n df_history = pd.concat([df_history, self.df_agg], sort=False).reset_index(drop=True)\n \n # save history to csv file\n if int(self.clock.timestamp)<=start_time+1:\n df_history.to_csv('./results/history_basic_ldc.csv', index=False, header=True) # write first row of data with headers\n df_history = df_history.tail(0) # empty the data frame\n elif int(self.clock.timestamp)%self.checkpoint==0: \n if report:\n t = time.perf_counter()\n print(self.clock.isotime, np.round((t - self.c)/self.checkpoint, 3), 's/step', \n 'demand:', np.round(df_demand['a_demand'].sum(),3), 'signal:', np.round(df_demand['ldc_signal'].mean(), 3), \n 'limit:', np.round(df_demand['limit'].mean(), 3), 'flex:', np.round(df_demand['flexibility'].mean(), 3), \n 'priority:', np.round(df_demand['priority'].mean(), 3))\n\n self.c = t\n\n with open('./results/history_basic_ldc.csv', 'a') as f:\n df_history.to_csv(f, index=False, header=False) # do not write the header\n df_history = df_history.tail(0) # empty the data frame (to reduce memory load) since data has already been written to csv file.\n \n\n # df_data['unixtime'] = np.mean(df_demand['unixtime'])\n # df_demand['localtime'] = [pd.to_datetime(a, unit='s').tz_localize('UTC').tz_convert('Pacific/Auckland').isoformat() for a in df_demand['unixtime']]\n\n # df=df.groupby('Name').agg({'Missed':'sum', 'Credit':'sum','Grade':'mean'}).rename(columns=d), \n # df_demand['iso_end'] = [pd.to_datetime(a, unit='s').tz_localize('UTC').tz_convert('Pacific/Auckland').isoformat() for a in df_demand['unix_end']]\n\n return df_data[['p_mw', 'q_mvar', 'p_A_mw', 'p_B_mw', 'p_C_mw', 'q_A_mvar', 'q_B_mvar', 'q_C_mvar']]\n\n def save(self):\n for item in self.flist:\n if self.__dict__[item].n:\n self.__dict__[item].save() \n return 0\n\n\n\ndef read():\n \"\"\"parse input data in lain text format\"\"\"\n # fid = open(datafile, 'rt')\n\n for line in fid:\n data = line.split()\n if not len(data): continue\n if data[0] == 'fridge':\n Function.poly.add( a = float(data[1]),\n b = float(data[2]),\n c = float(data[3]))\n elif data[0] == 'freezer':\n Function.sine.add( A = float(data[1]),\n omega = float(data[2]),\n phi = float(data[3]))\n\n fid.close()\n\n\n\ndef change_ldc(device, load_type, n_ldc, report=False):\n idx = np.flatnonzero(device.App.__dict__['load_type']==load_type)\n device.App.__dict__['ldc'][idx] = 0\n s = idx[:int(n_ldc)]\n\n if np.size(s): device.App.__dict__['ldc'][s] = 1\n if report: print('{} units of {} are capable, out of {}'.format(np.size(s), load_type, np.size(idx)))\n return device\n\n\n\ndef add_unit(device, load_type, n_unit=0, n_ldc=0, idx=10, report=False):\n # solar PV\n dict_load_specs = {\n 'baseload': [df_houseSpecs, device.App.__dict__['df_baseload']],\n 'hvac': [df_heatpumpSpecs, device.App.__dict__['df_baseload']],\n 'heater': [df_heaterSpecs, device.App.__dict__['df_baseload']],\n 'waterheater': [df_waterheaterSpecs, device.App.__dict__['df_baseload']],\n 'fridge': [df_fridgeSpecs, device.App.__dict__['df_baseload']],\n 'freezer': [df_freezerSpecs, device.App.__dict__['df_baseload']],\n 'clotheswasher': [df_clotheswasherSpecs, df_clotheswasher],\n 'clothesdryer': [df_clothesdryerSpecs, df_clothesdryer],\n 'dishwasher': [df_dishwasherSpecs, df_dishwasher],\n 'ev': [df_evSpecs, 0],\n 'storage': [df_storageSpecs, 0],\n 'solar': [df_solarSpecs, 0],\n 'wind': [df_windSpecs, 0]\n }\n\n try:\n device.App.params['list_starts'] = list(device.App.params['list_starts'])\n device.App.params['list_ends'] = list(device.App.params['list_ends'])\n \n if n_unit:\n for i in range(idx, idx + n_unit):\n dict_params = {}\n for key in list(dict_load_specs[load_type][0]):\n dict_params[key] = dict_load_specs[load_type][0].loc[i, key] \n dict_params['list_starts'] = np.array([dict_params['s{}'.format(i)] for i in range(10)])\n dict_params['list_ends'] = np.array([dict_params['e{}'.format(i)] for i in range(10)])\n\n if load_type in ['ev', 'storage', 'solar', 'wind']:\n dict_params['len_profile'] = 0 \n else:\n dict_params['len_profile'] = len(dict_load_specs[load_type][1][dict_params['profile']].values)\n device.App.add(**dict_params)\n\n device.App.prerun()\n\n if report: print('System has {} units of {}'.format(n_unit, load_type))\n\n except Exception as e:\n print(\"Error LOAD.py add_unit:\", e, load_type)\n return device\n\ndef add_device(device, dict_devices, idx=10):\n '''\n Add units of devices based on dict_devices\n '''\n dict_load_type = {\n 'House': 'baseload',\n 'Hvac': 'hvac',\n 'Heater': 'heater',\n 'Waterheater': 'waterheater',\n 'Fridge': 'fridge',\n 'Freezer': 'freezer',\n 'Clotheswasher': 'clotheswasher',\n 'Clothesdryer': 'clothesdryer',\n 'Dishwasher': 'dishwasher',\n 'Ev': 'ev',\n 'Storage': 'storage',\n 'Solar': 'solar',\n 'Wind': 'wind'\n }\n\n for k in dict_load_type.keys():\n if dict_devices[k]['n']:\n if len(device.App.__dict__['load_type']):\n x_unit = len(np.flatnonzero(device.App.__dict__['load_type']==dict_load_type[k])) # existing number of units\n x_ldc = len(np.flatnonzero((device.App.__dict__['load_type']==dict_load_type[k])&(device.App.__dict__['ldc']==1))) # existing number of units with ldc\n \n else:\n x_unit = 0\n x_ldc = 0\n\n n_idx = idx + x_unit # offset starting index for profile selection in df_houseSPecs, etc..\n n_unit = dict_devices[k]['n'] - x_unit\n\n if n_unit:\n n_ldc = int(dict_devices[k]['n'] * dict_devices[k]['ldc']) - x_ldc\n device = add_unit(device=device, load_type=dict_load_type[k], n_unit=n_unit, n_ldc=n_ldc, idx=n_idx)\n device = change_ldc(device=device, load_type=dict_load_type[k], n_ldc=int(dict_devices[k]['n'] * dict_devices[k]['ldc']))\n\n return device\n\n\n\n\n\ndef to_yearsecond(start, duration):\n dt_range = pd.date_range(start=pd.to_datetime(start, unit='s'),\n freq='S', periods=duration, tz='UTC')\n \n dt_range = dt_range.tz_convert('Pacific/Auckland')\n df_datetime = pd.DataFrame(dt_range, columns=['date_time'])\n df_datetime.index = pd.DatetimeIndex(df_datetime['date_time'])\n df_datetime['yearsecond'] = ((df_datetime.index.week - 1) * (3600*24*7)) \\\n + (df_datetime.index.dayofweek * (3600*24)) \\\n + (df_datetime.index.hour * 3600) \\\n + (df_datetime.index.minute * 60) \\\n + (df_datetime.index.second)\n\n # print(dt_range)\n \n return df_datetime['yearsecond'].values[0], df_datetime['yearsecond'].values[-1]\n\ndef get_baseloads(start, duration, padding=1800):\n '''\n Get the baseloads from './profiles/baseload.h5' and return a dataframe\n '''\n x, y = to_yearsecond(start, duration)\n with pd.HDFStore('./profiles/baseload.h5', 'r') as store:\n df = store.select('records', where='index>={} and index<={}'.format(x - padding, y + padding))\n return df\n \n\n####---------------------------------------------------------------------------------\n\ndef make_devices(dict_devices, \n idx=11,\n capacity = 300e3,\n loading = 0.5,\n start = time.time(),\n duration = 3600, #[s]72 hours\n step_size = 1,\n realtime = True,\n timescale = 1,\n three_phase = True,\n simulate=1,\n renew=1,\n latitude = -36.866590076725494,\n longitude = 174.77534779638677,\n mcast_ip_local = '238.173.254.147',\n mcast_port_local = 12604,\n ):\n \n global history, df_history, df_demand, df_demand_copy, cols, target_loading, grid_capacity\n global mean_latitude, mean_longitude, start_time, time_step, df_baseload\n grid_capacity = capacity\n target_loading = loading\n mean_latitude = latitude\n mean_longitude = longitude\n start_time = start\n time_step = step_size\n\n\n # CREATOR.create_specs(\n # n_houses = dict_devices['House']['n'] + idx, \n # n_hvacs = dict_devices['Hvac']['n'] + idx, \n # n_heaters = dict_devices['Heater']['n'] + idx, \n # n_waterheaters = dict_devices['Waterheater']['n'] + idx, \n # n_fridges = dict_devices['Fridge']['n'] + idx, \n # n_freezers = dict_devices['Freezer']['n'] + idx,\n # n_evs = dict_devices['Ev']['n'] + idx, \n # n_storages = dict_devices['Storage']['n'] + idx, \n # n_clotheswashers = dict_devices['Clotheswasher']['n'] + idx, \n # n_clothesdryers = dict_devices['Clothesdryer']['n'] + idx, \n # n_dishwashers = dict_devices['Dishwasher']['n'] + idx,\n # n_pvs = dict_devices['Solar']['n'] + idx, \n # n_winds = dict_devices['Wind']['n'] + idx, \n # ldc_adoption = dict_devices['House']['ldc'], \n # v2g_adoption = dict_devices['Ev']['v2g'], \n # latitude = latitude,\n # longitude = longitude, \n # renew=renew\n # )\n\n df_houseSpecs = pd.read_csv('./specs/houseSpecs.csv')\n df_heatpumpSpecs = pd.read_csv('./specs/hvacSpecs.csv')\n df_heaterSpecs = pd.read_csv('./specs/heaterSpecs.csv')\n df_fridgeSpecs = pd.read_csv('./specs/fridgeSpecs.csv')\n df_freezerSpecs = pd.read_csv('./specs/freezerSpecs.csv')\n df_waterheaterSpecs = pd.read_csv('./specs/waterheaterSpecs.csv')\n df_clotheswasherSpecs = pd.read_csv('./specs/clotheswasherSpecs.csv')\n df_clothesdryerSpecs = pd.read_csv('./specs/clothesdryerSpecs.csv')\n df_dishwasherSpecs = pd.read_csv('./specs/dishwasherSpecs.csv')\n df_evSpecs = pd.read_csv('./specs/evSpecs.csv')\n df_storageSpecs = pd.read_csv('./specs/storageSpecs.csv')\n \n\n df_baseload = get_baseloads(start, duration)\n\n\n device = Device(['App'], realtime=realtime, timescale=timescale) #list(dict_devices))\n # fetch baseload profiles\n device.App.__dict__['df_baseload'] = df_baseload\n\n device = add_device(device, dict_devices, idx=idx)\n\n return device\n\n\n \nif __name__ == '__main__':\n\n parser = OptionParser(version=' ')\n parser.add_option('-i', '--i', dest='idx',\n default=11, help='starting index')\n parser.add_option('-n', '--n', dest='n',\n default=70, help='number of units')\n \n options, args = parser.parse_args(sys.argv[1:])\n\n start_idx = 0\n ldc_adoption = 0.0\n n = int(options.n)\n grid_capacity = n * 5000\n target_loading = 0.5\n\n dt_start = datetime.datetime(2018, 7, 15, 17, 0, 0)\n start = time.mktime(dt_start.timetuple())\n\n \n \n # define the number of devices to run\n dict_devices = {\n 'House':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Hvac':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Heater':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Fridge':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Freezer':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Waterheater':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Clotheswasher':{'n':int(n*1), 'ldc':ldc_adoption},\n 'Clothesdryer':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Dishwasher':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Ev':{'n':int(n*0), 'ldc':ldc_adoption, 'v2g':int(n*0.0)},\n 'Storage':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Solar':{'n':int(n*0), 'ldc':ldc_adoption},\n 'Wind':{'n':int(n*0), 'ldc':ldc_adoption}, \n }\n\n device = make_devices(dict_devices=dict_devices,\n idx=start_idx,\n capacity = grid_capacity,\n loading = target_loading,\n start = start,\n duration = 1800,\n step_size = 1,\n realtime = False,\n timescale = 1,\n three_phase = True,\n simulate=1,\n renew=0,\n latitude = -36.866590076725494,\n longitude = 174.77534779638677,\n mcast_ip_local = '238.173.254.147',\n mcast_port_local = 12604,\n )\n\n checkpoint = 60 # [s] # to display simulation status and write accumulated data to the disk\n print(dict_devices)\n for i in range(60*60*36):\n # simulate devices through time\n t0 = time.perf_counter()\n x = device.step()\n print(device.App.isotime, time.perf_counter()-t0) #, device.App.a_status)\n # print(x)\n # print(device.df_agg[['waterheater_temp_in_mean', 'waterheater_temp_out_mean', 'waterheater_a_demand_sum', 'waterheater_flexibility_mean']])\n # print(x.sum(axis=0))\n \n ","sub_path":"ldc_simulator/LOAD.py","file_name":"LOAD.py","file_ext":"py","file_size_in_byte":37950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"492091147","text":"import socket\nfrom flask import Flask, render_template, request\nfrom redis import Redis\n\napp = Flask(__name__)\nredis = Redis(host='redis', port=6379)\n\n@app.route('/')\ndef home():\n count = redis.incr('hits')\n hostname = hostname = socket.gethostname()\n retStr = render_template('index.html',deployment_color = 'RED', line_back = '#FF0000',count = count, hostname = hostname)\n return(retStr)\n\n@app.route('/health')\ndef health():\n retStr = render_template('health.html', deployment_color = \"RED\")\n return(retStr)\n\n#@app.route('/redis')\n#def redis():\n# return(\"Redis query:\")\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n\n","sub_path":"pyredis-red/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"330845945","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START vision_batch_annotate_files]\n\n\nfrom google.cloud import vision_v1\n\n\ndef sample_batch_annotate_files(file_path=\"path/to/your/document.pdf\"):\n \"\"\"Perform batch file annotation.\"\"\"\n client = vision_v1.ImageAnnotatorClient()\n\n # Supported mime_type: application/pdf, image/tiff, image/gif\n mime_type = \"application/pdf\"\n with open(file_path, \"rb\") as f:\n content = f.read()\n input_config = {\"mime_type\": mime_type, \"content\": content}\n features = [{\"type_\": vision_v1.Feature.Type.DOCUMENT_TEXT_DETECTION}]\n\n # The service can process up to 5 pages per document file. Here we specify\n # the first, second, and last page of the document to be processed.\n pages = [1, 2, -1]\n requests = [{\"input_config\": input_config, \"features\": features, \"pages\": pages}]\n\n response = client.batch_annotate_files(requests=requests)\n for image_response in response.responses[0].responses:\n print(f\"Full text: {image_response.full_text_annotation.text}\")\n for page in image_response.full_text_annotation.pages:\n for block in page.blocks:\n print(f\"\\nBlock confidence: {block.confidence}\")\n for par in block.paragraphs:\n print(f\"\\tParagraph confidence: {par.confidence}\")\n for word in par.words:\n print(f\"\\t\\tWord confidence: {word.confidence}\")\n for symbol in word.symbols:\n print(\n \"\\t\\t\\tSymbol: {}, (confidence: {})\".format(\n symbol.text, symbol.confidence\n )\n )\n\n\n# [END vision_batch_annotate_files]\n","sub_path":"vision/snippets/detect/vision_batch_annotate_files.py","file_name":"vision_batch_annotate_files.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379340398","text":"# -*- coding: cp1252 -*-\n#extract POS tags from parsed Spanish\n#the POS is the first token after the word that isnt in square or <> brackets\n#the next few (if noun or verb give tense, gender, number etc)\nPAR = open(\"Spanish_parsed.txt\", \"r\")\npar = PAR.read()\nPAR.close()\n\nOUT = open(\"spanish-processed.txt\", \"w\")\nOUT.close()\n\nprevpos = \"\"\nprevgen = \"\"\nprevnum = \"\"\nprevword = \"\"\nflipped = 0\nfor line in par.split(\"\\n\")[3:-1]:\n #print(line)\n tok = line.split()\n #print(tok)\n currword = tok[0]\n idx = 1\n if line != \".\" and line != \",\" and line != \"?\" and line != \"¿\":\n for t in tok[1:]:\n idx = idx+1\n if t[0] != \"<\" and t[0] != \"[\":\n #print(t)\n POS = t\n break\n if POS == \"ADJ\" or POS == \"N\":\n gen = tok[idx]\n num = tok[idx+1]\n\n #print(gen, num)\n if POS == \"N\":\n print(\"Currently noun, skip \", currword)\n else:\n if prevpos == \"N\":\n if POS == \"ADJ\":\n #noun followed by an adjective\n if (gen == prevgen or gen == \"MF\" or prevgen == \"MF\")and prevnum == num: #agrees in gender and number\n #print(line)\n #print(\"Order needs to be flipped\")\n OUT = open(\"spanish-processed.txt\", \"a\")\n OUT.write(currword+\" \"+prevword+\" \")\n flipped = 1\n #print(\"writing\", currword, prevword)\n OUT.close()\n else:\n #non agreeing adj and noun, possible?\n #print(\"no flipping because they dont agree\")\n OUT = open(\"spanish-processed.txt\", \"a\")\n #print(\"writing\", prevword, currword)\n OUT.write(prevword+\" \"+currword+\" \")\n OUT.close()\n flipped = 0\n else:\n #noun not followed by an adjective, just print the noun and current word\n #print(\"curr word follows a noun but is not an adjective\")\n OUT = open(\"spanish-processed.txt\", \"a\")\n #print(\"writing\", prevword, currword)\n OUT.write(prevword+\" \"+currword+\" \")\n OUT.close()\n flipped = 0\n else:\n #current word does not follow a noun and is not a noun, just print it\n OUT = open(\"spanish-processed.txt\", \"a\")\n OUT.write(currword+\" \")\n OUT.close()\n flipped = 0\n if POS == \"ADJ\" or POS == \"N\":\n prevgen = gen\n prevnum = num\n prevword = currword\n prevpos = POS\n \n else:\n OUT = open(\"spanish-processed.txt\", \"a\")\n if flipped == 0:\n OUT.write(prevword+\" .\\n\")\n else:\n OUT.write(\" .\\n\")\n OUT.close()\n prevword = \"\"\n prevpos = \"\"\n prevgen = \"\"\n prevnum = \"\"\n","sub_path":"hw3/getPOS.py","file_name":"getPOS.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"584807780","text":"from rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework_tracking.mixins import LoggingMixin\n\nfrom .models import Data\nfrom .serializers import DataListSerializer\n\n# from rest_framework.permissions import IsAuthenticated\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\n\n# NOTE: Took out the full list of data -> response is too long and we will never allow this request.\n# class DataListAPIView(ListAPIView):\n# \"\"\"\n# Retrieve a list of all indicators.\n# \"\"\"\n# queryset = Data.objects.all()\n# serializer_class = DataListSerializer\n\nclass CountryDataAPIView(LoggingMixin, ListAPIView):\n \"\"\"\n Retrive a country's data for a specific indicator - default for all years. \\n\n Delimiter is | between all the values in your parameters for each variable. \\n\n /countries/data/?country=202&indicator=22 (OR) \\n\n /countries/data/?country=202&indicator=22&year=2015 \\n\n \"\"\"\n # check if logged in\n # permission_classes = (IsAuthenticated,)\n authentication_classes = (JSONWebTokenAuthentication,)\n throttle_scope = 'generic'\n\n serializer_class = DataListSerializer\n\n def get_queryset(self, *args, **kwargs):\n query_params = self.request.query_params\n countries = query_params.get('country', None)\n indicators = query_params.get('indicator', None)\n years = query_params.get('year', None)\n\n # create an empty list for parameters to be filters by \n countryParams = []\n indicatorParams = []\n yearParams = []\n\n # create the list based on the query parameters\n if countries is not None:\n for country in countries.split('|'):\n country = country.replace(\"%20\", \" \")\n countryParams.append(int(country))\n if indicators is not None:\n for indicator in indicators.split('|'):\n indicator = indicator.replace(\"%20\", \" \")\n indicatorParams.append(int(indicator))\n if years is not None:\n for year in years.split('|'):\n year = year.replace(\"%20\", \" \")\n yearParams.append(int(year))\n\n # print('countries: ', countryParams)\n # print('indicators: ', indicatorParams)\n # print('year: ', yearParams)\n\n # filter by the parameters\n if countries and indicators and years is not None:\n queryset_list = Data.objects.all()\n queryset_list = queryset_list.filter(country_id__in=countryParams)\n queryset_list = queryset_list.filter(indicator_id__in=indicatorParams)\n queryset_list = queryset_list.filter(date__in=yearParams)\n return queryset_list\n if countries and indicators is not None and years is None:\n queryset_list = Data.objects.all()\n queryset_list = queryset_list.filter(country_id__in=countryParams)\n queryset_list = queryset_list.filter(indicator_id__in=indicatorParams)\n return queryset_list\n # return 404","sub_path":"service/data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"369935140","text":"\"\"\"A program to calculate payment to be made by a person who borrows money from a finacial institution \"\"\"\r\nfrom tkinter import *\r\n\r\nclass Loancalculator:\r\n def __init__(self):\r\n root = Tk()\r\n root.title(\"Loan Calculator\")\r\n Label(root,text=\"Annual Interest RAte\").grid(row=1,column=1,sticky=W)\r\n Label(root, text=\"Number Of Years\").grid(row=2, column=1, sticky=W)\r\n Label(root, text=\"Loan Amount\").grid(row=3, column=1, sticky=W)\r\n Label(root, text=\"Monthly Payment\").grid(row=4, column=1, sticky=W)\r\n Label(root, text=\"Total Payment\").grid(row=5, column=1, sticky=W)\r\n\r\n self.annualInterestRateVar=IntVar()\r\n Entry(root,text=self.annualInterestRateVar,justify=RIGHT).grid(row=1,column=2)\r\n self.noOfYears=IntVar()\r\n Entry(root, text=self.noOfYears, justify=RIGHT).grid(row=2, column=2)\r\n self.loanAmountVar=IntVar()\r\n Entry(root, text=self.loanAmountVar, justify=RIGHT).grid(row=3, column=2)\r\n self.monthlyPaymentVar=IntVar()\r\n lblMonthlyPayment=Label(root, textvariable=self.monthlyPaymentVar, justify=RIGHT).grid(row=4, column=2,sticky=E)\r\n self.totalPaymentVar=IntVar()\r\n lblTotalPayment=Label(root,textvariable=self.totalPaymentVar,justify=RIGHT).grid(row=5,column=2,sticky=E)\r\n btComputePayment=Button(root,text=\"Compute Payment\",command=self.computePayment).grid(row=6,column=2,sticky=E)\r\n root.mainloop()\r\n\r\n def monthPayment(self):\r\n annualInterestRate = self.annualInterestRateVar.get()\r\n noOfYears = self.noOfYears.get()\r\n monthlyInterestRate = (annualInterestRate / 12) / 100\r\n noOfMontlyPayments = noOfYears * 12\r\n monthlyPayment = self.loanAmountVar.get() * monthlyInterestRate / (\r\n 1 - (1 + monthlyInterestRate) ** -noOfMontlyPayments)\r\n self.monthlyPaymentVar.set(format(monthlyPayment, \"10.3f\"))\r\n return self.monthlyPaymentVar\r\n\r\n def computePayment(self):\r\n self.monthPayment()\r\n totalpayment = self.monthlyPaymentVar.get() * 12 * self.noOfYears.get()\r\n self.totalPaymentVar.set(format(totalpayment, \"10.3f\"))\r\n return self.totalPaymentVar\r\n\r\nLoancalculator()\r\n\r\n\r\n","sub_path":"Loan Calculator.py","file_name":"Loan Calculator.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"197012251","text":"\"\"\"\nNOTE: this is dead code! do not use!\nThis file is only present to ensure backwards compatibility\nin case someone is importing from here\nThis is only meant for 3rd party code expecting ovos-core\nto be a drop in replacement for mycroft-core\n\"\"\"\n\nimport collections\nimport csv\nimport re\nfrom os import walk\nfrom os.path import splitext, join\nfrom ovos_backend_client.pairing import is_paired\nfrom mycroft.enclosure.api import EnclosureAPI\nfrom mycroft.util.format import expand_options\nfrom ovos_utils.log import LOG\nfrom ovos_utils.intents.intent_service_interface import munge_regex, to_alnum\n\nRASPBERRY_PI_PLATFORMS = ('mycroft_mark_1', 'picroft', 'mycroft_mark_2pi')\n\nONE_MINUTE = 60\n\n# these 2 methods are maintained as part of ovos_utils but need to be available from this location for compatibility\nfrom ovos_utils.skills.settings import get_local_settings, save_settings\n\n\ndef skill_is_blacklisted(skill):\n \"\"\"DEPRECATED: do not use, method only for api backwards compatibility\n Logs a warning and returns False\n \"\"\"\n # this is a internal msm helper\n # it should have been private\n # cant remove to keep api compatibility\n # nothing in the wild should be using this\n LOG.warning(\"skill_is_blacklisted is an internal method and has been deprecated. Stop using it!\")\n return False\n\n\nclass DevicePrimer:\n \"\"\"DEPRECATED: this class has been fully deprecated, stop using it!\n Only here to provide public api compatibility but it does absolutely nothing!\n \"\"\"\n\n def __init__(self, message_bus_client, config=None):\n self.bus = message_bus_client\n self.platform = \"unknown\"\n self.enclosure = EnclosureAPI(self.bus)\n self.backend_down = False\n\n @property\n def is_paired(self):\n return is_paired()\n\n def prepare_device(self):\n \"\"\"Internet dependent updates of various aspects of the device.\"\"\"\n LOG.warning(\"DevicePrimer has been deprecated!\")\n\n\ndef read_vocab_file(path):\n \"\"\" Read voc file.\n\n This reads a .voc file, stripping out empty lines comments and expand\n parentheses. It returns each line as a list of all expanded\n alternatives.\n\n Args:\n path (str): path to vocab file.\n\n Returns:\n List of Lists of strings.\n \"\"\"\n LOG.warning(\"read_vocab_file is deprecated! \"\n \"use SkillResources class instead\")\n vocab = []\n with open(path, 'r', encoding='utf8') as voc_file:\n for line in voc_file.readlines():\n if line.startswith('#') or line.strip() == '':\n continue\n vocab.append(expand_options(line.lower()))\n return vocab\n\n\ndef load_regex_from_file(path, skill_id):\n \"\"\"Load regex from file\n The regex is sent to the intent handler using the message bus\n\n Args:\n path: path to vocabulary file (*.voc)\n skill_id: skill_id to the regex is tied to\n \"\"\"\n LOG.warning(\"read_regex_from_file is deprecated! \"\n \"use SkillResources class instead\")\n regexes = []\n if path.endswith('.rx'):\n with open(path, 'r', encoding='utf8') as reg_file:\n for line in reg_file.readlines():\n if line.startswith(\"#\"):\n continue\n LOG.debug('regex pre-munge: ' + line.strip())\n regex = munge_regex(line.strip(), skill_id)\n LOG.debug('regex post-munge: ' + regex)\n # Raise error if regex can't be compiled\n try:\n re.compile(regex)\n regexes.append(regex)\n except Exception as e:\n LOG.warning(f'Failed to compile regex {regex}: {e}')\n\n return regexes\n\n\ndef load_vocabulary(basedir, skill_id):\n \"\"\"Load vocabulary from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from (will recurse)\n skill_id: skill the data belongs to\n Returns:\n dict with intent_type as keys and list of list of lists as value.\n \"\"\"\n LOG.warning(\"load_vocabulary is deprecated! \"\n \"use SkillResources class instead\")\n vocabs = {}\n for path, _, files in walk(basedir):\n for f in files:\n if f.endswith(\".voc\"):\n vocab_type = to_alnum(skill_id) + splitext(f)[0]\n vocs = read_vocab_file(join(path, f))\n if vocs:\n vocabs[vocab_type] = vocs\n return vocabs\n\n\ndef load_regex(basedir, skill_id):\n \"\"\"Load regex from all files in the specified directory.\n\n Args:\n basedir (str): path of directory to load from\n bus (messagebus emitter): messagebus instance used to send the vocab to\n the intent service\n skill_id (str): skill identifier\n \"\"\"\n LOG.warning(\"load_regex is deprecated! \"\n \"use SkillResources class instead\")\n regexes = []\n for path, _, files in walk(basedir):\n for f in files:\n if f.endswith(\".rx\"):\n regexes += load_regex_from_file(join(path, f), skill_id)\n return regexes\n\n\ndef read_value_file(filename, delim):\n \"\"\"Read value file.\n\n The value file is a simple csv structure with a key and value.\n\n Args:\n filename (str): file to read\n delim (str): csv delimiter\n\n Returns:\n OrderedDict with results.\n \"\"\"\n LOG.warning(\"read_value_file is deprecated! \"\n \"use SkillResources class instead\")\n result = collections.OrderedDict()\n\n if filename:\n with open(filename) as f:\n reader = csv.reader(f, delimiter=delim)\n for row in reader:\n # skip blank or comment lines\n if not row or row[0].startswith(\"#\"):\n continue\n if len(row) != 2:\n continue\n\n result[row[0]] = row[1]\n return result\n\n\ndef read_translated_file(filename, data):\n \"\"\"Read a file inserting data.\n\n Args:\n filename (str): file to read\n data (dict): dictionary with data to insert into file\n\n Returns:\n list of lines.\n \"\"\"\n LOG.warning(\"read_translated_file is deprecated! \"\n \"use SkillResources class instead\")\n if filename:\n with open(filename) as f:\n text = f.read().replace('{{', '{').replace('}}', '}')\n return text.format(**data or {}).rstrip('\\n').split('\\n')\n else:\n return None\n","sub_path":"mycroft/deprecated/skills/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"578663680","text":"from keras.models import load_model\nfrom extract_frames_from_video import load_data_valid, extract_frames_from_video_valid, extract_frames_from_video_valid_scraping\nfrom os import listdir\nfrom keras.utils import to_categorical\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nimport json\nimport pytube\n\n#define variables and chrome driver\nhome_dir = \"C:\\\\Users\\\\ASUS\\\\PycharmProjects\\\\vehicle_accident_investigation_flask\"\nnow = datetime.datetime. now()\nlistInt = []\ndriver = webdriver.Chrome(home_dir + \"\\\\chrome_driver\\\\chromedriver.exe\")\nurl = \"https://www.youtube.com/results?search_query=vehicle+crash+in+a+traffic+road\"\n\ndef getIndex(YList):\n index = 0\n #print(\"YList : \", YList)\n for data in YList:\n #data = int(data)\n #print(\"data : \", data)\n if data == 1:\n retour = index\n else:\n index += 1\n listInt.append(retour)\n return retour\n\ndef get_YouTube_Data_Using_Web_Scraping():\n driver.get(url)\n user_data = driver.find_elements_by_xpath('//*[@id=\"thumbnail\"]')\n links = []\n for i in user_data:\n links.append(i.get_attribute('href'))\n print(\"we have found \", len(links), \"video data concerning traffic road accident on youtube\")\n\n # create a dataframe with 4 columns (link, title, description, category) to store video details\n df_accident = pd.DataFrame(columns=['link', 'title', 'description', 'category'])\n\n # scrape video details in youtube\n wait = WebDriverWait(driver, 10)\n v_category = \"CATEGORY_NAME\"\n i = 0\n listUrl = []\n for x in links:\n try:\n print(\"x : \" + str(i), x) # url\n # x = str(x)\n driver.get(x)\n v_id = x.strip('https://www.youtube.com/watch?v=')\n v_title = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"h1.title yt-formatted-string\"))).text\n v_description = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div#description yt-formatted-string\"))).text\n df_accident.loc[len(df_accident)] = [v_id, v_title, v_description, v_category]\n listUrl.append(x)\n except:\n print(\"Invalid url format\")\n i += 1\n\n # merge data into single dataframe\n frames = [df_accident]\n df_copy = pd.concat(frames, axis=0, join='outer', join_axes=None, ignore_index=True, keys=None, levels=None,names=None, verify_integrity=False, copy=True)\n\n df_link = pd.DataFrame(columns=[\"link\"])\n df_title = pd.DataFrame(columns=[\"title\"]) # title\n df_description = pd.DataFrame(columns=[\"description\"]) # description\n df_category = pd.DataFrame(columns=[\"category\"])\n df_link[\"link\"] = df_copy['link']\n df_title[\"title\"] = df_copy['title']\n df_description[\"description\"] = df_copy['description']\n df_category[\"category\"] = df_copy['category']\n #print(\"df_link : \", df_link)\n #print(\"df_title : \", df_title) # title\n #print(\"df_description : \", df_description) # description\n #print(\"df_category : \", df_category)\n return listUrl, df_title, df_description\n\ndef investigate_crash2():\n # define categorical label\n label_dict = {\n 0: 'Without Crash',\n 1: 'Crash',\n 2: 'Ambiguity Event',\n 3: 'Unknwon Event',\n 4: 'Unknown Event',\n }\n #load data\n listUrl, df_title, df_description = get_YouTube_Data_Using_Web_Scraping()\n youtube = pytube.YouTube(listUrl[0])\n video = youtube.streams.first()\n video.download('C:/xampp/htdocs/emergencyAlertForACrash/data/')\n extract_frames_from_video_valid('C:/xampp/htdocs/emergencyAlertForACrash/data/')\n X_test1 = load_data_valid()\n print('input image shape : {}'.format(X_test1.shape))\n\n #call model\n vehicle_crash_investigator_model = load_model(home_dir + '\\\\model\\\\VAICS_epoch1010int.h5py')\n\n #predict label category rely for the input image\n predict = vehicle_crash_investigator_model.predict(X_test1)\n print(\"predicted : \", predict)\n predict_cat = to_categorical(predict, num_classes=5)\n predict_cat = predict_cat.astype('int')\n print(\"predicted : \", predict_cat)\n\n # show result\n i = 0\n listFrameNames = []\n listResults = []\n\n for frame in listdir(home_dir+\"\\\\static\\\\generated_frames_valid\\\\\"):\n res = label_dict[getIndex(predict_cat[i])]\n listFrameNames.append(frame)\n listResults.append(label_dict[getIndex(predict_cat[i])])\n try:\n if res.__eq__(label_dict[getIndex(predict_cat[1])]):\n frame = frame.split(\".\")[0]\n frameId = frame.split(\"d\")[1]\n frameId = int(frameId)\n print(\"crash detected in frame n° :\", frameId, \"(\" + frame + \".jpg)\")\n except:\n print(\"Error05 :cet erreur n'est pas prise en charge par le systeme\")\n finally:\n i+=1\n\nif __name__==\"__main__\":\n investigate_crash2()","sub_path":"draft/investigate_webscraping.py","file_name":"investigate_webscraping.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"98777505","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, Command, find_packages\n\n\nwith open('requirements.txt') as f:\n required = f.read().splitlines()\n\nsetup(name='P3 Collaboration Competition',\n version='0.0.0',\n description='Tennis learning agent',\n packages=find_packages(),\n install_requires = required,\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"469374988","text":"\"\"\"\n# Depth First Traversal\n]\n1 = Inorder = (left root right)\n2 = preorder = ( root left right)\n3 = postorder = (left right root)\n\n# Breath First Traversal\n1. Level ORder Traversal\n\n\n 1 first.left.left.data\n / \\ queue = []\n 2 3 print(1,2,3,4,5,6,7)\n / \\ / \\\n4 5 6 7\n\ninorder = 4,2,5,1,6,3,7\npreorder = 1,2,4,5,3,6,7\npostorder = 4,5,2,6,7,3,1\nlevelorder = 1,2,3,4,5,6,7\n\n\"\"\"\n\nclass node:\n def __init__(self,data=None):\n self.data = data\n self.left_node = None\n self.right_node = None\nclass BinaryTree:\n def __init__(self):\n self.root = None\n def inorder(self):\n if(self.root == None):\n print(\"Tree Is Empty....\")\n else:\n self._inorder(self.root)\n def _inorder(self,current):\n if current:\n self._inorder(current.left_node)\n print(current.data,end=\" \")\n self._inorder(current.right_node)\n def preorder(self):\n if(self.root == None):\n print(\"Tree Is Empty....\")\n else:\n self._preorder(self.root)\n def _preorder(self,current):\n if current:\n print(current.data,end=\" \")\n self._preorder(current.left_node)\n self._preorder(current.right_node)\n def postorder(self):\n if(self.root == None):\n print(\"Tree Is Empty....\")\n else:\n self._postorder(self.root)\n def _postorder(self,current):\n if current:\n self._postorder(current.left_node)\n self._postorder(current.right_node)\n print(current.data,end=\" \")\n def levelorder(self):\n if(self.root == None):\n print(\"Tree Is Empty....\")\n else:\n self._levelorder(self.root)\n def _levelorder(self,current):\n queue = []\n queue.append(current)\n while len(queue)>0:\n node1 = queue.pop(0)\n print(node1.data,end=\" \")\n if(node1.left_node is not None):\n queue.append(node1.left_node)\n if(node1.right_node is not None):\n queue.append(node1.right_node)\n\nob1 = BinaryTree()\nfirst = node(1)\nsecond = node(2)\nthird = node(3)\nfourth = node(4)\nfifth = node(5)\nsixth = node(6)\nseventh = node(7)\nob1.root = first\nfirst.left_node = second\nfirst.right_node = third\nsecond.left_node = fourth\nsecond.right_node = fifth\nthird.left_node = sixth\nthird.right_node = seventh\nprint('Inorder : ',end=\" \")\nob1.inorder()\nprint()\nprint(\"Preorder : \",end=' ')\nob1.preorder()\nprint()\nprint(\"Postorder : \",end=' ')\nob1.postorder()\nprint()\nprint(\"Levelorder : \",end=' ')\nob1.levelorder()\n","sub_path":"16.BinaryTreeLevelOrderTraversal.py","file_name":"16.BinaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"468608123","text":"from threading import Lock\n# noinspection PyUnresolvedReferences\nfrom update import output\nfrom update import output_irregardelessly\n\nimport omega, re, os\nimport services\nfrom pending_client_commands import pending_commands_lock, pendable_functions, pending_commands\n\nfrom server_commands.interaction_commands import has_choices, generate_choices, generate_phone_choices, select_choice, cancel_mixer_interaction, cancel_super_interaction, push_interaction\nfrom server_commands.clock_commands import set_speed\nfrom server_commands.sim_commands import set_active_sim\nfrom server_commands.ui_commands import ui_dialog_respond, ui_dialog_pick_result, ui_dialog_text_input\nfrom csn import mp_chat\nfrom config import user_directory\nincoming_commands = []\noutgoing_commands = []\n\noutgoing_lock = Lock()\nincoming_lock = Lock()\n\nclass Message:\n def __init__(self, msg_id, msg):\n self.msg_id = msg_id\n self.msg = msg\n \nclass File: \n def __init__(self, file_name, file_contents):\n self.file_name = file_name\n self.file_contents = file_contents\n \ncommand_functions ={\n \"has_choices\": has_choices,\n \"generate_choices\": generate_choices,\n \"generate_phone_choices\": generate_phone_choices,\n \"select_choice\": select_choice,\n \"cancel_mixer_interaction\": cancel_mixer_interaction,\n \"cancel_super_interaction\": cancel_super_interaction,\n \"push_interaction\": push_interaction,\n \"set_speed\": set_speed, \n \"set_active_sim\": set_active_sim,\n \"mp_chat\": mp_chat,\n \"ui_dialog_respond\": ui_dialog_respond,\n \"ui_dialog_pick_result\": ui_dialog_pick_result,\n \"ui_dialog_text_input\": ui_dialog_text_input}\n\n \n \ndef parse_arg(arg):\n #Horrible, hacky way of parsing arguments from the client commands.\n new_arg = arg\n orig_arg = new_arg.replace('\"', \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"'\", \"\").strip()\n new_arg = orig_arg\n try:\n new_arg = float(orig_arg)\n\n try: \n new_arg = int(orig_arg)\n except BaseException:\n pass \n except BaseException:\n pass \n output(\"arg_handler\", str(new_arg) + \"\\n\")\n\n return new_arg\n \ndef get_file_matching_name(name):\n for root, dirs, files in os.walk(\"{}/saves/scratch\".format(user_directory.replace(\"Mods/Heuristics/Scripts/\", \"\"))):\n for file_name in files:\n replaced = file_name.replace(\"zoneObjects-\", \"\").replace(\"-6.sav\", \"\").strip()\n replaced = replaced[1:]\n output_irregardelessly(\"zone_id\", \"{} , {}\".format(replaced, name))\n if name == replaced:\n file_path = str(os.path.join(root, file_name))\n break\n return file_path, file_name\n \ndef client_sync():\n output(\"locks\", \"acquiring incoming lock 1\")\n\n with incoming_lock:\n global incoming_commands\n output(\"receive\", \"{} \\n\".format(len(incoming_commands)))\n for unpacked_msg_data in incoming_commands:\n if type(unpacked_msg_data) is Message:\n try:\n client = services.client_manager().get_first_client()\n client_instance = services.client_manager().get_first_client()\n \n if client == None:\n return\n except Exception:\n continue\n \n omega.send(client_instance.id, unpacked_msg_data.msg_id, unpacked_msg_data.msg)\n incoming_commands.remove(unpacked_msg_data)\n\n elif type(unpacked_msg_data) is File:\n client_file = open(get_file_matching_name(unpacked_msg_data.file_name)[0], \"wb\")\n new_architecture_data = unpacked_msg_data.file_contents\n client_file.write(new_architecture_data)\n client_file.close()\n incoming_commands.remove(unpacked_msg_data)\n output(\"locks\", \"releasing incoming lock\")\n\n \n\nregex = re.compile('[a-zA-Z]')\n\ndef do_command(command_name, *args):\n command_exists = command_name in command_functions\n output_irregardelessly(\"commands\", command_exists)\n if command_exists:\n command_functions[command_name](*args)\n output_irregardelessly(\"commands\", \"There is a command named: {}. Executing it.\".format(command_name))\n\n else:\n output_irregardelessly(\"commands\", \"There is no such command named: {}!\".format(command_name))\n return \ndef server_sync():\n output(\"locks\", \"acquiring incoming lock 1\")\n with incoming_lock:\n global incoming_commands\n client_instance = services.client_manager().get_first_client()\n\n for command in incoming_commands:\n \n current_line = command.split(',')\n function_name = current_line[0]\n if function_name == '':\n continue\n parsed_args = []\n\n for arg_index in range(1, len(current_line)):\n arg = current_line[arg_index].replace(')', '').replace('{}', '').replace('(', '')\n if \"'\" not in arg:\n arg = regex.sub('', arg)\n arg = arg.replace('<._ = ', '').replace('>', '')\n parsed_arg = parse_arg(arg)\n parsed_args.append(parsed_arg)\n #set connection to other client\n client_id = 1000\n parsed_args[-1] = client_id\n function_to_execute = \"{}({})\".format(function_name, str(parsed_args).replace('[', '').replace(']',''))\n function_name = function_name.strip()\n output_irregardelessly(\"client_specific\", \"New function called {} recieved\".format(function_name))\n if function_name in pendable_functions:\n with pending_commands_lock:\n if function_name not in pending_commands:\n pending_commands[function_name] = []\n if client_id not in pending_commands[function_name]:\n pending_commands[function_name].append(client_id)\n output_irregardelessly('arg_handler', str(function_to_execute) )\n try:\n do_command(function_name, *parsed_args)\n except Exception as e:\n output_irregardelessly(\"Execution Errors\", \"Something happened: {}\".format(e))\n incoming_commands.remove(command)\n output(\"locks\", \"releasing incoming lock\")\n\n","sub_path":"Scripts/mp_essential.py","file_name":"mp_essential.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"25462625","text":"from flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\napp = Flask(__name__)\nBootstrap(app)\nimport pandas as pd\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/my-movies')\ndef list_user_movies():\n my_movies_rated = pd.read_csv('./data/my-ratings.csv', sep=',', header=0)\n all_movies = pd.read_csv('./data/movies.csv', sep=',', header=0)\n my_movies_rated = pd.merge(my_movies_rated, all_movies, on='movieId', how='inner')\n return render_template('my-movies.html', page_title='My Movies', my_movies_rated=my_movies_rated)\n\ndef getnorm():\n \"\"\"retourne la la racine carrée de la somme des notes au carré \"\"\"\n all_movies = pd.read_csv('./data/movies.csv', sep=',', header=0)\n all_rates = pd.read_csv('./data/ratings.csv', sep=',', header=0)\n all_rates = pd.merge(all_rates, all_movies, on='movieId', how='inner')\n\n\n # http://localhost:5000/my-movies","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"211621465","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import render_to_response\nimport datetime\n\n\n# 表单\ndef search_form(request):\n return render_to_response('validate/search_form.html')\n\n\n# 接收请求数据\ndef search(request):\n request.encoding='utf-8'\n if 'q' in request.GET:\n message = '你搜索的内容为:' + request.GET['q'].encode('utf-8')\n else:\n message = '你提交了空表单'\n return HttpResponse(message)\n\ndef current_datetime(request):\n now = datetime.datetime.now()\n html = \"It is now %s.\" % now\n return HttpResponse(html)\n\ndef hours_ahead(request, offset):\n try:\n offset = int(offset)\n except ValueError:\n raise Http404()\n dt = datetime.datetime.now() + datetime.timedelta(hours=offset)\n html = \"In %s hour(s), it will be %s.\" % (offset, dt)\n return HttpResponse(html)","sub_path":"valid_test/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"192345580","text":"from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom datetime import datetime\nfrom flaskblog import db, login_manager,app\nfrom flask_login import UserMixin\n\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(db.Model,UserMixin):\n\tid = db.Column(db.Integer, primary_key = True)\n\tusername = db.Column(db.String(20),unique= True,nullable = False)\n\temail = db.Column(db.String(100),unique= True,nullable = False)\n\timage_file = db.Column(db.String(100),nullable = False, default = 'default.jpg')\n\t\t\t\t\t\t\t# default.jpg is a default profile photo which i'll add later.\n\tpassword = \tdb.Column(db.String(60), nullable = False)\n\tposts = db.relationship('Post', backref = 'author', lazy = True)\n\t\t\t\t\t\t\t#backref is similar to adding another Column to the post module \n\t\t\t\t\t\t\t# lazy-> just to finds when the sqlalchemy loads the database\n\n\tdef get_reset_token(self, expires_sec=1800):\n\t\ts = Serializer(app.config['SECRET_KEY'], expires_sec)\n\t\treturn s.dumps({'user_id': self.id}).decode('utf-8')\n #returning the token which is created by 'dumps' method\n\n\n# methos that verify a token.\n\t@staticmethod\n\tdef verify_reset_token(token):\n\t\ts = Serializer(app.config['SECRET_KEY'])\n\t\ttry:\n\t\t\tuser_id = s.loads(token)['user_id']\n\t\texcept:\n\t\t\treturn None\n\t\treturn User.query.get(user_id)\n\ndef __repr__(self):\n\treturn f\"User('{self.username}','{self.email}','{self.image_file}')\"\n\nclass Post(db.Model):\n\tid = db.Column(db.Integer, primary_key =True)\n\ttitle = db.Column(db.String(100),nullable = False)\n\tdate_posted = db.Column(db.DateTime,nullable = False, default = datetime.utcnow)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#utc are the current time.\n\tcontent = db.Column(db.Text, nullable=False)\n\tuser_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\t\t\t\t# lowercase 'user.id' referencing the table name and column name\ndef __repr__(self):\n return f\"Post('{self.title}','{self.date_posted}')\"\t","sub_path":"flaskblog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"389964212","text":"import cv2\nimport numpy as np\nimport os\n \nfrom os.path import isfile, join\n \ndef convert_frames_to_video(pathIn,pathOut,fps, folder):\n\tframe_array = []\n\tfiles = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]\n \n\t#for sorting the file names properly\n\t\n\tfiles.sort(key = lambda x: int(x[5:-4]))\n \n\tfor i in range(len(files)):\n\t\tfilename=pathIn + files[i]\n\t\t#reading each files\n\t\timg = cv2.imread(filename)\n\t\theight, width, layers = img.shape\n\t\tsize = (width,height)\n\t\tprint(filename)\n\t\t#inserting the frames into an image array\n\t\tframe_array.append(img)\n \n\tout = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'MPEG'), fps, size)\n \n\tfor i in range(len(frame_array)):\n\t\t# writing to a image array\n\t\tout.write(frame_array[i])\n\tout.release()\n \ndef main():\n\tcnt = 1\n\tfiles = os.listdir(\"./grayframes/\")\n\tfiles.sort(key = lambda x: int(x[:-7]))\n\n\tfor folder in files:\n\t\t# if folder != \"10_frames\":\n\t\t# \tcontinue\n\t\tprint(folder)\n\t\tpathIn= \"./grayframes/\" + folder + \"/\"\n\t\tpathOut = \"./grayvideos/\" + str(cnt) + \".mp4\"\n\t\tfps = 3\n\t\tconvert_frames_to_video(pathIn, pathOut, fps, folder)\n\t\tcnt += 1\n \nif __name__==\"__main__\":\n\tmain()\n","sub_path":"framestovideo.py","file_name":"framestovideo.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"596136186","text":"from lib import entity\nfrom game_code import menus\n\n\nclass Player(entity.Entity):\n\n def __init__(self):\n self.inventory = menus.Inventory()\n self.journal = menus.Journal()\n super(Player, self).__init__()\n\n def attach_game(self, game):\n super(Player, self).attach_game(game)\n self.inventory.attach_game(game)\n self.journal.attach_game(game)\n","sub_path":"game/game_code/objects/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"352048164","text":"\"\"\"\nMap the image file name and image index to the captions (sorted glob).\nThe 'image_index' key is super important because the feature maps' \n index is corresponding to the file name\n\nFor example:\n feature.npy shape = (82783, 14, 14, 512)\n the first row of feature.npy is the first image's (sorted glob) feature map\n\"\"\"\nimport pdb\nimport json\nimport glob\nimport cPickle\n\nfrom tqdm import tqdm\n\nCOCO_TRAIN_ANNOTATIONS_JSON_PATH = '/home/markd/data/mscoco/annotations/captions_train2014.json'\nCOCO_TEST_FOLDER_PATH = '/home/markd/data/mscoco/testLSML_20548/'\n\n\"\"\"Deal with training set now\"\"\"\nwith open(COCO_TRAIN_ANNOTATIONS_JSON_PATH, 'r') as f:\n info = json.load(f)\n\nimgs = info[u'images']\nannos = info[u'annotations']\nimgs = sorted(imgs, key=lambda k: k[u'id'])\nannos = sorted(annos, key=lambda k: k[u'image_id'])\n\n# add the index key and remove all other redundant keys\nfor i, im in enumerate(imgs):\n del im[u'license']\n del im[u'coco_url']\n del im[u'height']\n del im[u'width']\n del im[u'date_captured']\n del im[u'flickr_url']\n im['index'] = i\n# remove redundant key\nfor a in annos:\n del a[u'id']\n\nannos_ult = []\n# my super efficient loop, because both are sorted so we can pop the first item!\nfor im in tqdm(imgs):\n while(len(annos) != 0):\n if im[u'id'] == annos[0][u'image_id']:\n annos_ult.append({'caption': annos[0][u'caption'].encode('utf8'),\\\n 'image_name': im[u'file_name'].encode('utf8'),\\\n 'image_id': im[u'id'],\\\n 'image_index': im['index']})\n annos.pop(0)\n else:\n break\ncPickle.dump(annos_ult, open('train_82783_order.pkl', 'wb')) \n\n\"\"\"By far we are done with training set\nWe still need this to map the corresponding features even though\n we don't have to deal with captions.\nRemember to deal with '/path/to/data/12345.jpg' to split it to \n'12345.jpg'\n\"\"\"\ntest_files = sorted(glob.glob(COCO_TEST_FOLDER_PATH+'*'))\npure_files = []\nfor t in test_files:\n pure_files.append(t.split('/')[-1])\ncPickle.dump(pure_files, open('test_20548_order.pkl', 'wb'))\n","sub_path":"data/map_features.py","file_name":"map_features.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"337534861","text":"from time import sleep\nfrom lib.helpers.database_helper import pull_and_shape_batch\nfrom lib.helpers.redis_helper import get_best_loss, get_weights_matrix\nfrom lib.helpers.redis_helper import get_training_count\nfrom lib.helpers.redis_helper import set_best_loss, set_weights_matrix\nfrom lib.helpers.redis_helper import incr_training_count, init_training_count\nfrom lib.nn.functions import gradient_loss_function, loss_function\nfrom lib.nn.functions import random_matrix, scores\n\ndef initialize_training_session():\n\n # initialize training count\n init_training_count()\n\n # initialize the loss to be arbitrarily high\n bestloss = int(1e10) \n set_best_loss(bestloss)\n\n # initialize a random_weights matrix\n random_weights = random_matrix(2, 7326)\n set_weights_matrix(random_weights)\n\ndef prepare_plot_of_loss_function(length=750):\n import matplotlib.pyplot as plt\n import seaborn\n training_counts = []\n loss_values = []\n training_count = get_training_count()\n while training_count < length:\n training_counts.append(training_count)\n loss_values.append(get_best_loss())\n sleep(1)\n training_count = get_training_count()\n \n plt.plot(training_counts[1:], loss_values[1:]) \n\n return training_counts, loss_values\n \ndef train_via_random_search(n=100,offset=0, action_ids=None, gamma=0.001): \n incr_training_count()\n batch_features, \\\n batch_outcomes = pull_and_shape_batch(n=n,\n offset=offset*n,\n action_ids=action_ids)\n random_weights = random_matrix(2, 7326)\n loss = loss_function(random_weights, \n batch_features, \n batch_outcomes,\n gamma=gamma)\n \n if loss < get_best_loss(): \n set_best_loss(loss)\n set_weights_matrix(random_weights) \n\ndef train_via_random_local_search(n=100, offset=0, action_ids=None, gamma=0.001):\n incr_training_count()\n step_size = 0.0001\n weight_matrix = get_weights_matrix()\n batch_features, \\\n batch_outcomes = pull_and_shape_batch(n=n,\n offset=offset*n,\n action_ids=action_ids)\n weight_matrix_try = weight_matrix + random_matrix(2, 7326) * step_size\n loss = loss_function(weight_matrix_try,\n batch_features, \n batch_outcomes,\n gamma=gamma)\n \n if loss < get_best_loss(): \n set_best_loss(loss)\n set_weights_matrix(weight_matrix_try) \n\ndef train_via_gradient_descent(n=100, offset=0, action_ids=None, delta=1.0, gamma=0.001):\n training_count = incr_training_count()\n step_size = 0.0001\n weight_matrix = get_weights_matrix()\n batch_features, \\\n batch_outcomes = pull_and_shape_batch(n=n,\n offset=offset*n,\n action_ids=action_ids)\n\n grad = gradient_loss_function(weight_matrix, \n batch_features,\n batch_outcomes,\n delta=delta) \n weight_matrix = weight_matrix + grad * step_size\n loss = loss_function(weight_matrix,\n batch_features, \n batch_outcomes,\n gamma=gamma)\n \n set_best_loss(loss)\n set_weights_matrix(weight_matrix) \n","sub_path":"lib/helpers/pipeline_helper.py","file_name":"pipeline_helper.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"631488869","text":"import matplotlib.pyplot as plt\nimport matplotlib.ticker as plticker\nclass testplot(object): \n ### Plot weight distribution as pie chart\n #matplotlib inline\n def plot_pie(weight, ticker,df):\n labels = df.index.values[ticker]\n sizes = weight\n # Creating plot \n # fig, (ax1, ax2) = plt.subplots(1, 2)\n # fig.suptitle('portfolio weight')\n # ax1.pie(sizes_mvo, labels = labels_mvo) \n # ax2.pie(sizes_cvar, labels = labels_cvar)\n # ax1.title.set_text('MVO')\n # ax2.title.set_text('CVaR')\n #explode = (0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\n plt.show()\n\n ### Plot acutal and expected return\n #matplotlib inline\n\n def backtest_plot (date_list, all_port_act_ret, all_port_exp_ret):\n fig, ax = plt.subplots()\n loc = plticker.MultipleLocator(base=90) # this locator puts ticks at regular intervals\n ax.xaxis.set_major_locator(loc)\n ax.plot(date_list[:-3],all_port_act_ret[:-1],date_list[:-3],all_port_exp_ret[:-1],'b')\n # ax.set_xticklabels(['2010','2010','2012','2014','2016','2018','2020'])\n ax.set_xlabel('Date')\n ax.set_ylabel('Return(%)')\n ax.set_title('Backtesting of Portfolio Return')\n ax.legend(['Actual Return','Expected Return'])\n\n ### Plot cumulative portfolio return\n #matplotlib inline\n def cum_plot (date_list, cum_ret_exp, cum_ret_act):\n fig, ax = plt.subplots()\n loc = plticker.MultipleLocator(base=90) # this locator puts ticks at regular intervals\n ax.xaxis.set_major_locator(loc)\n # y = np.sin(x)\n ax.plot(date_list[:-2],np.asarray(cum_ret_exp)*100-100,date_list[:-2],np.asarray(cum_ret_act)*100-100)\n ax.set_xlabel('Date')\n ax.set_ylabel('Return (%)')\n ax.set_title('Cumulative Portfolio Return')\n ax.legend(['Cumulated Expected Return','Cumulated Actual Return'])\n ax.set_xticklabels(['2010','2010','2012','2014', '2016','2018','2020'])\n","sub_path":"All/testplot.py","file_name":"testplot.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"573222341","text":"# Michael Pszonka\n# CS 675 Assignment 2\n# Note. unfortunately having issues achieving convergence\nimport random as rd\nimport math\n\ndef format_data(dataset, labels):\n input_data = [line.split() for line in dataset]\n output_data = [line.split()[0] for line in labels]\n \n input_data = [[float(j) for j in vector] for vector in input_data]\n output_data = [int(vector) for vector in output_data]\n\n #post processing of data\n for row in range(len(input_data)):\n input_data[row].append(1.0)\n\n for row in range(len(output_data)):\n if(output_data[row] == 0):\n output_data[row] = -1\n\n return input_data, output_data\n\ndef dot_product(x, theta):\n dot_product = 0\n\n for i, j in zip(x, theta):\n dot_product += i * j\n return dot_product\n\ndef calculate_difference(prediction, actual):\n guess = []\n for i in range(len(prediction)):\n guess.append(prediction[i] - actual[i])\n\n return guess\n\ndef gradientDescent(X, y, theta, learning_rate, stopping_condition):\n prev_objective = 0.0\n objective = 0.0\n convergence_dist = 2.0\n rows = len(X)\n col = len(X[0])\n\n while(convergence_dist > stopping_condition):\n delta_f = [] #initalization\n for i in range(col):\n delta_f.append(0)\n\n for j in range(rows):\n prediction = dot_product(X[j], theta)\n for k in range(col): \n hypothesis = y[j] - prediction #predicted - outcome\n delta_f[k] += hypothesis * X[j][k]\n\n #update weights\n for i in range(col):\n theta[i] = theta[i] + (learning_rate * delta_f[i])\n\n \n # calculate residual sum of square\n prev_objective = objective\n objective = 0.0\n \n for i in range(rows):\n objective += (y[i] - dot_product(X[i], theta)) ** 2\n \n if (prev_objective > objective):\n convergence_dist = prev_objective - objective\n else:\n convergence_dist = objective - prev_objective\n\n print(f'Distance to Convergence: {convergence_dist}')\n \n print(theta)\n \n\nif __name__ == \"__main__\":\n data = open('test_data.data')\n labels = open('test_labels.labels')\n \n training_set, training_labels = format_data(data, labels)\n\n theta = []\n\n for i in range(len(training_set[0])):\n theta.append(0.02 * rd.random() - 0.01)\n gradientDescent(training_set, training_labels, theta, .001, 0.001)\n\n #dist to origin = theta0 / ||theta||\n\n #||theta||:\n magnitude = 0\n for i in range(len(theta) - 1):\n magnitude += theta[i] ** 2\n magnitude =math.sqrt(magnitude)\n \n theta_0 = theta[len(theta) - 1]\n dist = abs(theta_0 / magnitude)\n\n print('Distance to origin: ' + str(dist))\n\n","sub_path":"gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"64858802","text":"import datetime\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.shortcuts import redirect\nfrom web import models\n\nfrom django.conf import settings\n\n\nclass Trancer(object):\n def __init__(self):\n self.user = None\n self.price_policy = None\n self.project = None\n\n\nclass AuthMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n \"\"\"\n 如果用户已经登录,则request中赋值\n :param request:\n :return:\n \"\"\"\n request.tracer = Trancer()\n\n user_id = request.session.get('user_id', 0)\n user_object = models.UserInfo.objects.filter(id=user_id).first()\n request.tracer.user = user_object\n\n # 白名单:没有登陆都可以访问的URL\n \"\"\"\n 1.获取当前用户访问的URL\n 2.检查URL是否在白名单中,如果再则可以继续向后访问,如果不在则进行判断是否已登录\n \"\"\"\n if request.path_info in settings.WHITE_REGEX_URL_LIST:\n return\n\n # 检查用户是否已登录,已登录继续往后走;无登录则返回登录页面\n if not request.tracer.user:\n return redirect('login')\n\n # 登陆成功之后,访问后台管理时:获取当前用户所拥有的额度\n # 方式一:免费额度再交易记录中存储\n\n # 获取当前用户ID值最大(最近交易记录)\n _object = models.Transaction.objects.filter(user=user_object, status=2).order_by('-id').first()\n\n # 判断是否过期\n current_datetime = datetime.datetime.now()\n if _object.end_datetime and _object.end_datetime < current_datetime:\n # _object = models.Transaction.objects.filter(user=user_object,status=2).order_by('id').first()\n _object = models.Transaction.objects.filter(user=user_object, status=2, price_policy__category=1).first()\n\n # request.transaction = _object\n request.tracer.price_policy = _object.price_policy\n\n # 方式二:免费的额度存储配置文件\n \"\"\"\n # 获取当前用户ID值最大(最近交易记录)\n _object = models.Transaction.objects.filter(user=user_object, status=2).order_by('-id').first()\n\n if not _object:\n # 没有购买\n request.price_policy = models.PricePolicy.objects.filter(category=1,title='个人免费版').first()\n else:\n # 付费版\n current_datetime = datetime.datetime.now()\n if _object.end_datetime and _object.end_datetime < current_datetime:\n request.price_policy = models.PricePolicy.objects.filter(category=1, title='个人免费版').first()\n else:\n request.price_policy = _object.price_policy\n \"\"\"\n\n\n def process_view(self,request,view,args,kwargs):\n #判断URL是否是以manage开头,如果是则判断项目ID 是否是我创建 or 参与\n if not request.path_info.startswith('/manage/'):\n return\n\n project_id = kwargs.get('project_id')\n\n # 是否是我创建的\n project_object = models.Project.objects.filter(creator=request.tracer.user,id=project_id).first()\n if project_object:\n # 是我创建的项目的话,就允许通过\n request.tracer.project = project_object\n return\n\n # 是否是我参与的项目\n project_user_object = models.ProjectUser.objects.filter(user=request.tracer.user,project_id=project_id).first()\n if project_user_object:\n # 是我参与的项目\n request.tracer.project = project_user_object.project\n return\n\n return redirect('project_list')","sub_path":"web/middleware/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"111042986","text":"from django import forms\nfrom django.forms import SelectDateWidget\n\nfrom .models import Patients\n\n\nclass PatientsForm(forms.ModelForm):\n class Meta:\n model = Patients\n fields = '__all__'\n widgets = {\n 'suspect_date': SelectDateWidget(attrs={'class': 'form-control'})\n }\n labels = {'full_name': 'Full Name',\n 'mobile': 'Mobile No',\n 'patient_img': 'Patient Image',\n 'suspect_date': 'Suspect Date'}\n\n def __init__(self, *args, **kwargs):\n super(PatientsForm, self).__init__(*args, **kwargs)\n self.fields['virusName'].empty_label = \"Select\"\n self.fields['district'].empty_label = \"Select\"\n self.fields['suspect_date'].required = False\n","sub_path":"corona_crud/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"477126584","text":"from topostfix import shunting_yard\n\ndef parse(polynomial):\n result = []\n assistentstring = \"\"\n\n for element in polynomial:\n if element == \"+\" or element == \"-\" or element == \"*\" or element == \"/\" or element == \"(\" or element == \")\" or element == \"^\":\n if len(assistentstring):\n result.append(assistentstring)\n assistentstring = \"\"\n result.append(element)\n\n elif element.isdigit():\n assistentstring += element\n\n else:\n if len(assistentstring):\n result.append(assistentstring)\n assistentstring = \"\"\n\n result.append(element)\n\n if len(assistentstring):\n result.append(assistentstring)\n\n return result\n\ndef get_processed_polynomials(polynomial1, polynomial2):\n list1 = parse(polynomial1)\n list2 = parse(polynomial2)\n postfix_polynomials = shunting_yard([list1, list2])\n return postfix_polynomials\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"314431832","text":"# -*- coding: utf-8 -*-\n\n#import coopr pyomo\nfrom coopr.pyomo import *\n\n#Our model will be an abstract model - Separates model from data\nmodel = AbstractModel()\n\n\nmodel.Shifts = Set()\n\n\nmodel.Overtime = Set()\nmodel.Scenarios = Set()\n\n\nmodel.DailyCost = Param(model.Shifts, within = NonNegativeReals, default = 0.0) \n\n\n \nmodel.OTCost = Param(model.Shifts, within = NonNegativeReals, default = 0.0)\nmodel.CoverPatient = Param(model.Shifts, within = NonNegativeReals, default = 0.0)\nmodel.Demand = Param(model.Shifts, within = NonNegativeReals, default = 0.0)\n\n \n#Decision Variables\n\nmodel.ShiftDay = Var(model.Shifts, within = NonNegativeIntegers) \n\nmodel.OTScenario = Var(model.Overtime, within = NonNegativeIntegers) \nmodel.FirstStageCost = Var(within = Reals) #Auxiliary variable: Need to model first-stage cost for PySP specifiation\nmodel.SecondStageCost = Var(within = Reals) #Auxiliary variable: Need to model second-stage cost for PySP specification\n\n\n#Constrains:\n\ndef demand_constraint_rule1(m):\n constraint_expr = 0\n constraint_expr += (m.CoverPatient['Sat']*m.ShiftDay['Sat'] + m.CoverPatient['SatSun']*m.ShiftDay['SatSun'] + m.CoverPatient['SatMon']*m.ShiftDay['SatMon'] + m.OTScenario['Sat'])\n return constraint_expr >= m.Demand['Sat']\n \nmodel.DemandConstraint1 = Constraint(rule = demand_constraint_rule1)\n\ndef demand_constraint_rule2(m):\n constraint_expr = 0\n constraint_expr += (m.CoverPatient['Sun']*m.ShiftDay['Sun'] + m.CoverPatient['SatSun']*m.ShiftDay['SatSun'] + m.CoverPatient['SunMon']*m.ShiftDay['SunMon'] + m.OTScenario['Sun'])\n return constraint_expr >= m.Demand['Sun']\n \nmodel.DemandConstraint2 = Constraint(rule = demand_constraint_rule2)\n \ndef demand_constraint_rule3(m):\n constraint_expr = 0 \n constraint_expr += (m.CoverPatient['Mon']*m.ShiftDay['Mon'] + m.CoverPatient['SatMon']*m.ShiftDay['SatMon'] + m.CoverPatient['SunMon']*m.ShiftDay['SunMon'] + m.OTScenario['Mon'])\n return constraint_expr >= m.Demand['Mon']\n \nmodel.DemandConstraint3 = Constraint(rule = demand_constraint_rule3)\n\n\ndef total_nurse(m):\n constraint_expr = 0\n for i in m.Shifts:\n constraint_expr += m.ShiftDay[i]\n \n return constraint_expr <= 15.0 \nmodel.NurseConstraint = Constraint(rule = total_nurse)\n\n\ndef first_stage_cost_rule(m):\n obj_expr = 0\n for j in m.Shifts:\n obj_expr += m.DailyCost[j]*m.ShiftDay[j]\n \n return m.FirstStageCost == obj_expr\nmodel.ComputeFirstStageCost = Constraint(rule = first_stage_cost_rule)\n\ndef second_stage_cost_rule(m):\n obj_expr = 0\n for j in m.Overtime:\n obj_expr += m.OTCost[j]*m.OTScenario[j]\n \n return m.SecondStageCost == obj_expr\n\nmodel.ComputeSecondStageCost = Constraint(rule = second_stage_cost_rule)\n \n\n\n#objective\ndef obj_rule(m):\n return m.FirstStageCost + m.SecondStageCost\n \nmodel.MinCost = Objective(rule = obj_rule, sense = minimize)\n\n\n \n","sub_path":"models/ev.py","file_name":"ev.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"63215287","text":"#2. Un indirizzo MAC (Media Access Control address) è un indirizzo univoco \r\n# #associato dal produttore, a una NIC, composto da 6 coppie di cifre \r\n# esadecimali separate da due punti.\r\n#Un esempio di MAC è 02:FF:A5:F2:55:12.\r\n#Scrivi una funzione genera_mac che generi degli indirizzi MAC pseudo casuali.\r\n\r\nimport random\r\n\r\n#la lista di tutti i caratteri che sono validi per la creazione di un indirizzo MAC\r\ncaratteri = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']\r\n\r\ndef generaMAC():\r\n #creo la stringa per il mac\r\n mac = ''\r\n\r\n #per tutta la lunghezza del MAC (hanno sempre 17 caratteri)\r\n for i in range(17):\r\n # se mi trovo in posizione 2,5,7,9 metto i :\r\n if (i + 1) % 3 == 0:\r\n mac += ':'\r\n else:\r\n #altrimenti tiro a caso un carattere dalla stringa sopra creata, meno 1 perchè lìindice della lista parte da 0\r\n mac += caratteri[random.randint(0, len(caratteri)-1)]\r\n #infine ritorno l'indirizzo\r\n return mac\r\n\r\ndef main():\r\n print (generaMAC())\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","sub_path":"es_python/es_vacanze_python/es_2.py","file_name":"es_2.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"53637015","text":"def Writelist(list, output):\n for line in list:\n output.write(line.__str__() + ' ')\n\ndef Sort(left,right,list):\n if len(list)>0:\n Sort(left,len(list)/2,list[left,right/2])\n else:\n Sort()\n\n\n\ninput = open('input.txt');\noutput = open('output.txt' , 'w');\narrLength = int(input.readline())\nlist = []\n\nfor line in input.readline().split(' '):\n list.append(int(line))\nWritelist(list, output)\n\n\n","sub_path":"Курсы итмо/Неделя 2/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"285163297","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Filter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ip_list', models.TextField(max_length=65535, null=True, blank=True)),\n ('api_list', models.TextField(max_length=65535, null=True, blank=True)),\n ],\n options={\n 'verbose_name': '\\u8fc7\\u6ee4\\u540d\\u5355',\n 'verbose_name_plural': '\\u8fc7\\u6ee4\\u540d\\u5355\\u5217\\u8868',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, verbose_name=b'\\xe5\\x90\\x8d\\xe7\\xa7\\xb0')),\n ('is_has_new_report', models.BooleanField(default=False)),\n ('b_done_over', models.BooleanField(default=False)),\n ('b_first_modify_passwd', models.BooleanField(default=False)),\n ('mem_type', models.IntegerField(default=1, max_length=4, choices=[(1, b'Custom'), (2, b'Analysts'), (3, b'DataTrans'), (0, b'Checker'), (4, b'Single'), (5, b'Phone'), (6, b'Meal'), (7, b'Coder')])),\n ('thread_num', models.IntegerField(default=5, max_length=4)),\n ('passwd_wrong_count', models.IntegerField(default=0, max_length=4)),\n ('allow_ips', models.TextField(max_length=800, null=True, blank=True)),\n ('last_login_ip', models.CharField(max_length=20, null=True, blank=True)),\n ('block_reason', models.CharField(max_length=100, null=True, blank=True)),\n ('last_login_time', models.DateTimeField(null=True, blank=True)),\n ('email', models.CharField(max_length=100, null=True, blank=True)),\n ('out_in_member', models.IntegerField(default=1, help_text=b'\\xe5\\x86\\x85\\xe9\\x83\\xa8\\xe4\\xba\\xba\\xe5\\x91\\x98\\xe4\\xb8\\xba1,\\xe5\\xa4\\x96\\xe9\\x83\\xa8\\xe4\\xba\\xba\\xe5\\x91\\x98\\xe4\\xb8\\xba0', max_length=4)),\n ('permission', models.TextField(default=b'null', help_text=b'\\xe6\\x8e\\xa5\\xe5\\x8f\\xa3\\xe8\\xaf\\xb7\\xe6\\xb1\\x82\\xe5\\x90\\x8d\\xe7\\xa7\\xb0\\xef\\xbc\\x9a1\\xe4\\xb8\\xba\\xe5\\xbc\\x80\\xe9\\x80\\x9a', max_length=800, verbose_name=b'\\xe6\\x8e\\xa5\\xe5\\x8f\\xa3\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('permission2', models.CharField(default=b'0000000000', help_text=b'\\xe4\\xbd\\x9c\\xe5\\xba\\x9f', max_length=10, verbose_name=b'\\xe6\\x96\\xb0\\xe6\\x8e\\xa5\\xe5\\x8f\\xa3\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('portrait_permission', models.CharField(default=b'1000', help_text=b'\\xe4\\xbd\\x9c\\xe5\\xba\\x9f', max_length=10, verbose_name=b'\\xe7\\x94\\xbb\\xe5\\x83\\x8f\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('credit_permission', models.CharField(default=b'1000', help_text=b'\\xe4\\xbd\\x9c\\xe5\\xba\\x9f', max_length=10, verbose_name=b'\\xe4\\xbf\\xa1\\xe8\\xb4\\xb7\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('portrait3_permission', models.TextField(default=b'null', max_length=800, verbose_name=b'\\xe9\\x80\\x9a\\xe7\\x94\\xa8\\xe7\\x89\\x883.0\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('credit3_permission', models.TextField(default=b'null', max_length=800, verbose_name=b'\\xe4\\xbf\\xa1\\xe8\\xb4\\xb73.0\\xe6\\x9d\\x83\\xe9\\x99\\x90')),\n ('credit3_permission2', models.CharField(default=b'000000000', help_text=b'\\xe6\\x9c\\x88\\xe5\\xba\\xa6\\xe6\\x94\\xb6\\xe6\\x94\\xaf\\xe7\\xad\\x89\\xe7\\xba\\xa7/\\xe6\\x89\\x8b\\xe6\\x9c\\xba\\xe6\\x9c\\x89\\xe6\\x95\\x88\\xe6\\x80\\xa7/\\xe9\\x93\\xb6\\xe8\\xa1\\x8c\\xe5\\xae\\xa2\\xe7\\xbe\\xa4\\xe8\\xaf\\x84\\xe5\\x88\\x86/p2p\\xe5\\xae\\xa2\\xe7\\xbe\\xa4\\xe8\\xaf\\x84\\xe5\\x88\\x86/\\xe6\\xb6\\x88\\xe8\\xb4\\xb9\\xe9\\x87\\x91\\xe8\\x9e\\x8d\\xe5\\xae\\xa2\\xe7\\xbe\\xa4\\xe8\\xaf\\x84\\xe5\\x88\\x86', max_length=12, verbose_name=b'\\xe4\\xbf\\xa1\\xe8\\xb4\\xb73.0\\xe6\\x9d\\x83\\xe9\\x99\\x902')),\n ('custom_num', models.CharField(max_length=15, null=True, blank=True)),\n ('sampling_sort', models.IntegerField(default=1, max_length=5, verbose_name=b'\\xe6\\x95\\xb0\\xe6\\x8d\\xae\\xe9\\x9b\\x86\\xe5\\xb8\\x82\\xe4\\xb8\\xad\\xe6\\x96\\x87\\xe4\\xbb\\xb6\\xe4\\xb8\\xaa\\xe6\\x95\\xb0')),\n ('custom_city', models.CharField(max_length=50, null=True, verbose_name=b'\\xe5\\xae\\xa2\\xe6\\x88\\xb7\\xe6\\x80\\xbb\\xe9\\x83\\xa8\\xe6\\x89\\x80\\xe5\\x9c\\xa8\\xe5\\x9f\\x8e\\xe5\\xb8\\x82', blank=True)),\n ('taskid', models.CharField(max_length=200, null=True, verbose_name=b'taskid', blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('analyst_custom', models.ForeignKey(related_name=b'analycus', blank=True, to='account.Member', null=True)),\n ('checker_custom', models.OneToOneField(related_name=b'checker', null=True, blank=True, to='account.Member')),\n ('datatran_custom', models.ForeignKey(related_name=b'datatrancus', blank=True, to='account.Member', null=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': '\\u7cfb\\u7edf\\u6210\\u5458',\n 'verbose_name_plural': '\\u7cfb\\u7edf\\u6210\\u5458\\u5217\\u8868',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Queryer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('passwd', models.CharField(max_length=20)),\n ('apicode', models.CharField(max_length=20)),\n ('extra_info', models.CharField(max_length=200, null=True, blank=True)),\n ('is_busy', models.BooleanField(default=False)),\n ('do_on_file', models.CharField(max_length=100, null=True, blank=True)),\n ('start_match', models.DateTimeField(null=True, blank=True)),\n ('end_match', models.DateTimeField(null=True, blank=True)),\n ('mapping_files', models.CharField(max_length=1700, null=True, verbose_name=b'\\xe8\\xbf\\x91\\xe6\\x9c\\x9f\\xe5\\x8c\\xb9\\xe9\\x85\\x8d\\xe7\\x9a\\x84\\xe6\\x96\\x87\\xe4\\xbb\\xb6\\xe8\\xae\\xb0\\xe5\\xbd\\x95', blank=True)),\n ('real_name', models.CharField(max_length=50, null=True, blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True)),\n ('constom', models.OneToOneField(null=True, blank=True, to='account.Member')),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': '\\u753b\\u50cf\\u67e5\\u8be2\\u5e10\\u53f7',\n 'verbose_name_plural': '\\u753b\\u50cf\\u67e5\\u8be2\\u5e10\\u53f7\\u5217\\u8868',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='member',\n unique_together=set([('email', 'mem_type')]),\n ),\n ]\n","sub_path":"Mapping/account/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"196755471","text":"# -*- coding: utf-8 -*-\n\nimport disco\nfrom disco.core import Job, result_iterator\nimport Tokenizer\n\ndef map(line, params):\n columns = line.split('\\t')\n party = columns[1]\n tweet = columns[2]\n tokens = Tokenizer.tokenize(tweet)\n used_tokens = {}\n for token in tokens:\n if not used_tokens.has_key(token):\n used_tokens[token] = 0\n yield party + '_' + token, 1\n\ndef reduce(iter, params):\n from disco.util import kvgroup\n for party_token, counts in kvgroup(sorted(iter)):\n yield party_token, sum(counts)\n\nif __name__ == '__main__':\n job = Job().run(input=[\"data:tweets\"], map_reader = disco.worker.task_io.chain_reader, map=map, reduce=reduce)\n with open('WordStats.txt', 'w') as f:\n for party_word, count in result_iterator(job.wait(show=True)):\n f.write(party_word + '\\t' + str(count) + '\\n')\n f.close()\n","sub_path":"CalculateWordStats.py","file_name":"CalculateWordStats.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501914680","text":"\"\"\"\nTesting the trained model\n\"\"\"\n\nimport spacy\n\ntext = \"Hillary Clinton said that India will not just become a regional power, but it will become a global power by 2025.\"\n\nnlp = spacy.load(\"Output\")\ndoc = nlp(text)\n\nprint(\"\\n\\n ---------------------- \\n\\n\", doc)\n\nfor ent in doc.ents:\n print(\"\\n\\n --->>>>\", ent, ent.label_)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"256711689","text":"# Copyright (c) billpwchan - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n# Written by Bill Chan , 2021\n\nimport configparser\nimport datetime\nimport glob\nfrom datetime import date\nfrom pathlib import Path\n\nfrom futu import *\n\nimport data_engine\nimport logger\nfrom handler.cur_kline_handler import CurKlineHandler\nfrom handler.rt_data_handler import RTDataHandler\nfrom handler.stock_quote_handler import StockQuoteHandler\nfrom strategies.Strategies import Strategies\n\n\nclass FutuTrade:\n def __init__(self):\n \"\"\"\n Futu Trading Engine Constructor\n \"\"\"\n self.config = configparser.ConfigParser()\n self.config.read(\"config.ini\")\n self.quote_ctx = OpenQuoteContext(host=self.config['FutuOpenD.Config'].get('Host'),\n port=self.config['FutuOpenD.Config'].getint('Port'))\n self.trade_ctx = OpenHKTradeContext(host=self.config['FutuOpenD.Config'].get('Host'),\n port=self.config['FutuOpenD.Config'].getint('Port'))\n self.username = self.config['FutuOpenD.Credential'].get('Username')\n # self.password = self.config['FutuOpenD.Credential'].get('Password')\n self.password_md5 = self.config['FutuOpenD.Credential'].get('Password_md5')\n self.futu_data = data_engine.DatabaseInterface(database_path=self.config['Database'].get('Database_path'))\n self.default_logger = logger.get_logger(\"futu_trade\")\n self.trd_env = TrdEnv.REAL if self.config.get('FutuOpenD.Config', 'TrdEnv') == 'REAL' else TrdEnv.SIMULATE\n\n def __del__(self):\n \"\"\"\n Default Cleanup Operations for Futu Trade Engine. Disconnect all Quote & Trade Connections\n \"\"\"\n self.default_logger.info(\"Deleting Quote_CTX Connection\")\n self.quote_ctx.close() # 关闭当条连接,FutuOpenD会在1分钟后自动取消相应股票相应类型的订阅\n self.default_logger.info(\"Deleting Trade_CTX Connection\")\n self.trade_ctx.close() # 关闭当条连接,FutuOpenD会在1分钟后自动取消相应股票相应类型的订阅\n\n def __unlock_trade(self):\n \"\"\"\n Unlock Trading Account if TrdEnv.REAL\n \"\"\"\n if self.trd_env == TrdEnv.REAL:\n ret, data = self.trade_ctx.unlock_trade(password_md5=self.password_md5)\n if ret == RET_OK:\n self.default_logger.info(\"Account Unlock Success.\")\n else:\n raise Exception(\"Account Unlock Unsuccessful: {}\".format(data))\n\n def __save_historical_data(self, stock_code: str, start_date: date, end_date: date = None,\n k_type: object = KLType, force_update: bool = False) -> bool:\n \"\"\"\n Save Historical Data (e.g., 1M, 15M, 1D, etc.) from FUTU OpenAPI to ./data folder. Saved in CSV Format\n :param stock_code: Stock Code with Format (e.g., HK.00001)\n :param start_date: Datetime Object that specifies the start date\n :param end_date: Datetime Object that specifies the end date. If left as None, it will be automatically calculated as 365 days after start_date\n :param k_type: FuTu KLType Object\n :return: bool\n \"\"\"\n out_dir = f'./data/{stock_code}'\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n if k_type == KLType.K_1M:\n output_path = f'./data/{stock_code}/{stock_code}_{start_date.strftime(\"%Y-%m-%d\")}_1M.csv'\n elif k_type == KLType.K_DAY:\n output_path = f'./data/{stock_code}/{stock_code}_{start_date.year}_1D.csv'\n else:\n self.default_logger.error(f'Unsupported KLType. Please try it later.')\n return False\n\n # Ensure update current day's 1M data & current year's 1D data\n if os.path.exists(output_path) and not force_update and (\n (start_date != datetime.today().date() and k_type == KLType.K_1M) or (\n start_date.year != datetime.today().date().year and k_type == KLType.K_DAY)\n ):\n return False\n\n # Request Historical K-line Data (Daily)\n start_date = start_date.strftime(\"%Y-%m-%d\")\n end_date = end_date.strftime(\"%Y-%m-%d\") if end_date is not None else None\n while True:\n ret, data, page_req_key = self.quote_ctx.request_history_kline(stock_code, start=start_date,\n end=end_date,\n ktype=k_type, autype=AuType.QFQ,\n fields=[KL_FIELD.ALL],\n max_count=1000, page_req_key=None,\n extended_time=False)\n if ret == RET_OK:\n data.to_csv(output_path, index=False)\n self.default_logger.info(f'Saved: {output_path}')\n self.__store_data_database(data, k_type=k_type)\n return True\n else:\n # Retry Storing Data due to too frequent requests (max. 60 requests per 30 seconds)\n time.sleep(1)\n self.default_logger.error(f'Historical Data Store Error: {data}')\n\n def __store_data_database(self, data, k_type):\n for index, row in data.iterrows():\n self.futu_data.add_stock_data(row['code'], row['time_key'], row['open'], row['close'], row['high'],\n row['low'], row['pe_ratio'], row['turnover_rate'], row['volume'],\n row['turnover'], row['change_rate'], row['last_close'], k_type)\n\n def get_market_state(self):\n return self.quote_ctx.get_global_state()\n\n def get_1M_data(self, stock_list: list):\n \"\"\"\n Get 1M Data from CSV based on Stock List. Returned in Dict format\n :param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])\n :return: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}\n \"\"\"\n # Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}\n input_data = {}\n for stock_code in stock_list:\n delta = 0\n # Check if the file already exists or the dataframe has no data (Non-Trading Day)\n while \\\n not Path(\n f'./data/{stock_code}/{stock_code}_{str((datetime.today() - timedelta(days=delta)).date())}_1M.csv').exists() or pd.read_csv(\n f'./data/{stock_code}/{stock_code}_{str((datetime.today() - timedelta(days=delta)).date())}_1M.csv').empty:\n delta += 1\n\n output_path = f'./data/{stock_code}/{stock_code}_{str((datetime.today() - timedelta(days=delta)).date())}_1M.csv'\n input_csv = pd.read_csv(output_path, index_col=None)\n self.default_logger.info(f'Get {output_path} Success from Stock List Success.')\n input_data[stock_code] = input_data.get(stock_code, input_csv)\n return input_data\n\n def update_1M_data(self, stock_code: str, years=2, force_update: bool = False) -> None:\n \"\"\"\n Update 1M Data to ./data/{stock_code} folders for max. 2-years duration\n :param force_update:\n :param stock_code: Stock Code with Format (e.g., HK.00001)\n :param years: 2 years\n \"\"\"\n for i in range(round(365 * years)):\n day = datetime.today() - timedelta(days=i)\n if not self.__save_historical_data(stock_code=stock_code, start_date=day.date(), end_date=day.date(),\n k_type=KLType.K_1M, force_update=force_update):\n continue\n time.sleep(0.7)\n\n def update_1D_data(self, stock_code: str, years=10, force_update: bool = False) -> None:\n \"\"\"\n Update 1D Data (365 days per file) to ./data/{stock_code} folders for max. 2-years duration\n :param force_update:\n :param stock_code: Stock Code with Format (e.g., HK.00001)\n :param years: 10 years\n \"\"\"\n for i in range(0, round(years + 1)):\n day = date((datetime.today() - timedelta(days=i * 365)).year, 1, 1)\n if not self.__save_historical_data(stock_code=stock_code, start_date=day,\n k_type=KLType.K_DAY, force_update=force_update):\n continue\n time.sleep(0.7)\n\n def store_all_data_database(self):\n file_list = glob.glob(f\"./data/*/*_1M.csv\", recursive=True)\n for input_file in file_list:\n input_csv = pd.read_csv(input_file, index_col=None)\n self.default_logger.info(f'Processing: {input_file}')\n self.__store_data_database(input_csv, k_type=KLType.K_1M)\n self.futu_data.commit()\n\n file_list = glob.glob(f\"./data/*/*_1D.csv\", recursive=True)\n for input_file in file_list:\n input_csv = pd.read_csv(input_file, index_col=None)\n self.default_logger.info(f'Processing: {input_file}')\n self.__store_data_database(input_csv, k_type=KLType.K_DAY)\n self.futu_data.commit()\n\n def stock_quote_subscription(self, input_data: dict, stock_list: list, strategy: Strategies, timeout: int = 60):\n \"\"\"\n 实时报价回调,异步处理已订阅股票的实时报价推送。\n :param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}\n :param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])\n :param strategy: Strategies defined in ./strategies class. Should be inherited from based class Strategies\n :param timeout: Subscription Timeout in secs.\n \"\"\"\n self.__unlock_trade()\n\n # Stock Quote Handler\n handler = StockQuoteHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,\n strategy=strategy, trd_env=self.trd_env)\n self.quote_ctx.set_handler(handler) # 设置实时报价回调\n self.quote_ctx.subscribe(stock_list, [SubType.QUOTE], is_first_push=True,\n subscribe_push=True) # 订阅实时报价类型,FutuOpenD开始持续收到服务器的推送\n time.sleep(timeout)\n\n def rt_data_subscription(self, input_data: dict, stock_list: list, strategy: Strategies, timeout: int = 60):\n \"\"\"\n 实时分时回调,异步处理已订阅股票的实时分时推送。\n :param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}\n :param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])\n :param strategy: Strategies defined in ./strategies class. Should be inherited from based class Strategies\n :param timeout: Subscription Timeout in secs.\n \"\"\"\n self.__unlock_trade()\n\n # RT Data Handler\n handler = RTDataHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,\n strategy=strategy, trd_env=self.trd_env)\n self.quote_ctx.set_handler(handler) # 设置实时分时推送回调\n self.quote_ctx.subscribe(stock_list, [SubType.RT_DATA], is_first_push=True,\n subscribe_push=True) # 订阅分时类型,FutuOpenD开始持续收到服务器的推送\n time.sleep(timeout)\n\n def cur_kline_subscription(self, input_data: dict, stock_list: list, strategy: Strategies, timeout: int = 60):\n \"\"\"\n 实时 K 线回调,异步处理已订阅股票的实时 K 线推送。\n :param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}\n :param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])\n :param strategy: Strategies defined in ./strategies class. Should be inherited from based class Strategies\n :param timeout: Subscription Timeout in secs.\n \"\"\"\n self.__unlock_trade()\n\n # cur Kline Handler\n handler = CurKlineHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,\n strategy=strategy, trd_env=self.trd_env)\n self.quote_ctx.set_handler(handler) # 设置实时分时推送回调\n self.quote_ctx.subscribe(stock_list, [SubType.K_1M], is_first_push=True,\n subscribe_push=True) # 订阅K线数据类型,FutuOpenD开始持续收到服务器的推送\n time.sleep(timeout)\n\n def display_quota(self):\n ret, data = self.quote_ctx.query_subscription()\n if ret == RET_OK:\n self.default_logger.info(f'Query Subscription Quota: {data}')\n ret, data = self.quote_ctx.get_history_kl_quota(get_detail=True)\n if ret == RET_OK:\n self.default_logger.info(f'Historical K-line Quota: {data}')\n\n\ndef update_hsi_constituents(input_path='./data/HSI.Constituents'):\n file_list = glob.glob(f\"{input_path}/*.xlsx\")\n hsi_constituents = []\n for input_file in file_list:\n hsi_constituents = pd.read_excel(input_file, index_col=0, engine='openpyxl')\n hsi_constituents = hsi_constituents.iloc[1::2].index.tolist()\n hsi_constituents = ['.'.join(item.split('.')[::-1]) for item in hsi_constituents]\n with open(f'./data/HSI.Constituents/HSI_constituents_{datetime.today().date()}.json', 'w+') as file_obj:\n json.dump(list(set(hsi_constituents)), file_obj)\n\n\ndef update_customized_stocks(input_path='./data/Customized', input_list=None):\n # Need to get existing stocks in the JSON and append it\n file_list = glob.glob(f\"{input_path}/*.xlsx\")\n stock_list = [] if input_list is None else input_list\n for input_file in file_list:\n customized_stocks = pd.read_excel(input_file, index_col=0, engine='openpyxl')\n customized_stocks = customized_stocks.iloc[1::2].index.tolist()\n stock_list.extend(['.'.join(item.split('.')[::-1]) for item in customized_stocks])\n with open(f'./data/Customized/Customized_Stocks_{datetime.today().date()}.json', 'w+') as file_obj:\n json.dump(list(set(stock_list)), file_obj)\n\n\ndef get_hsi_constituents(input_file):\n with open(input_file, 'r') as file_obj:\n return json.load(file_obj)\n\n\ndef get_customized_stocks(input_file):\n with open(input_file, 'r') as file_obj:\n return json.load(file_obj)\n","sub_path":"trading_engine.py","file_name":"trading_engine.py","file_ext":"py","file_size_in_byte":14678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"185382350","text":"#!/usr/bin/env python3\n\"\"\"\nTasks\nGet example output for tests - Ted\nParser for ODGI Bin file format - Ted\nComponent Segmentation Detection - Josiah and Joerg\n Python memory object model - Josiah\nOutput format\n\"\"\"\nfrom typing import List, Tuple, Set\nfrom pathlib import Path as osPath\nfrom datetime import datetime\nfrom sortedcontainers import SortedDict\nfrom DNASkittleUtils.Contigs import read_contigs\n\nfrom matrixcomponent.matrix import Path, Component, LinkColumn, Bin\nfrom matrixcomponent.PangenomeSchematic import PangenomeSchematic\nimport matrixcomponent.utils as utils\n\nimport os\nimport logging\nimport argparse\nimport matrixcomponent\n\nimport matrixcomponent.JSONparser as JSONparser\n\nimport numpy as np\nimport pandas as pd\n\nMAX_COMPONENT_SIZE = 100 # automatic calculation from cells_per_file did not go well\nLOGGER = logging.getLogger(__name__)\n\"\"\"logging.Logger: The logger for this module\"\"\"\n\n\ndef populate_component_occupancy(schematic: PangenomeSchematic):\n for component in schematic.components:\n # are matrix paths in the same order as schematic.path_names?\n # side effect instead of return\n component.occupants = [any([bin.coverage > 0.1 for bin in bins if bin])\n for bins in component.matrix]\n print(\"Populated Occupancy per component per path.\")\n\ndef populate_component_matrix(paths: List[Path], schematic: PangenomeSchematic):\n # the loops are 1) paths, and then 2) schematic.components\n # paths are in the same order as schematic.path_names\n for i, path in enumerate(paths):\n sorted_bins = SortedDict((bin.bin_id, bin) for bin in path.bins)\n values = list(sorted_bins.values())\n for component in schematic.components:\n from_id = sorted_bins.bisect_left (component.first_bin)\n to_id = sorted_bins.bisect_right(component.last_bin)\n relevant = values[from_id:to_id]\n padded = []\n if relevant:\n padded = [[]] * (component.last_bin - component.first_bin + 1)\n for bin in relevant:\n padded[bin.bin_id - component.first_bin] = \\\n Bin(bin.coverage, bin.inversion_rate, bin.first_nucleotide, bin.last_nucleotide)\n component.matrix.append(padded) # ensure there's always 1 entry for each path\n print(\"Populated Matrix per component per path.\")\n populate_component_occupancy(schematic)\n\n\ndef segment_matrix(matrix: List[Path], bin_width, cells_per_file, pangenome_length) -> PangenomeSchematic:\n from matrixcomponent import JSON_VERSION\n print(f\"Starting Segmentation process on {len(matrix)} Paths.\")\n schematic = PangenomeSchematic(JSON_VERSION,\n bin_width,\n 1,\n 1,\n [], [p.name for p in matrix], 1, pangenome_length)\n connections, dividers = dividers_with_max_size(matrix, cells_per_file)\n\n component_by_first_bin = {}\n component_by_last_bin = {}\n start_pos = 0\n for valid_start in dividers:\n if valid_start != 0:\n current = Component(start_pos, valid_start - 1)\n # current.active_members = 1\n schematic.components.append(current)\n component_by_first_bin[start_pos] = current\n component_by_last_bin[valid_start - 1] = current\n start_pos = valid_start\n print(f\"Created {len(schematic.components)} components\")\n\n # populate Component occupancy per Path\n populate_component_matrix(matrix, schematic)\n\n connections_array = connections.to_numpy()\n groups = utils.find_groups(connections_array[:, :2])\n path_indices = connections.path_index.to_numpy()\n\n participants_mask = np.zeros(len(schematic.path_names), dtype=bool)\n\n nLinkColumns = 0\n for (start, end) in groups:\n row = connections_array[start]\n src, dst = int(row[0]), int(row[1])\n\n participants_mask[:] = False\n participants_mask[path_indices[start:end]] = True\n phase_dots = participants_mask.tolist()\n link_column = LinkColumn(src, dst, participants=phase_dots)\n\n src_component = component_by_last_bin.get(src)\n dst_component = component_by_first_bin.get(dst)\n\n if src_component:\n src_component.departures.append(link_column)\n nLinkColumns += 1\n\n if dst_component:\n dst_component.arrivals.append(link_column)\n nLinkColumns += 1\n\n for i in range(len(schematic.components)-1):\n component, next_component = schematic.components[i],schematic.components[i+1]\n add_adjacent_connector_column(component, next_component, schematic)\n\n print(f\"Created {nLinkColumns} LinkColumns\")\n\n return schematic\n\n\ndef dividers_with_max_size(matrix: List[Path], cells_per_file: int):\n \"\"\"Adds in additional dividers to ensure very large components are split into\n multiple components with no Links.\"\"\"\n connections, dividers = find_dividers(matrix)\n # estimate number of paths, x10 because most paths are empty\n dividers_extended = []\n prev = 0\n for div in dividers:\n gap_size = div - prev\n if gap_size > MAX_COMPONENT_SIZE:\n for i in range(prev + MAX_COMPONENT_SIZE, div, MAX_COMPONENT_SIZE):\n dividers_extended.append(i) # add a series of dividers spaced ^ apart\n prev = div\n dividers_extended.append(div)\n\n return connections, dividers_extended\n\n\ndef add_adjacent_connector_column(component, next_component, schematic):\n \"\"\"The last Departure LinkColumn is to the adjacent component\n Use logic to decide on which rows need adjacent connectors\n Start with the easy subtractive case of occupancy - departures and move to more complex,\n multiple copy cases.\"\"\"\n adjacents = []\n for row in range(len(schematic.path_names)):\n connection_exists = False\n if component.occupants[row] and next_component.occupants[row]: # occupant present\n # n_arrivals = sum([column.participants[row] for column in component.arrivals])\n departed = any([column.participants[row] for column in component.departures]) # no need to compute sum\n # connection_exists = n_arrivals + 1 > departed\n connection_exists = not departed # didn't depart\n adjacents.append(connection_exists)\n component.departures.append(LinkColumn( # LinkColumn for adjacents\n component.last_bin,\n component.last_bin + 1,\n participants=adjacents))\n\n\ndef find_dividers(matrix: List[Path]) -> Tuple[pd.DataFrame, Set[int]]:\n max_bin = 1\n\n self_loops = [] # track self loops just in case component gets cut in half\n connection_dfs = [] # pandas dataframe with columns (from, to, path [name])\n\n n_remaining_links = 0\n for i, path in enumerate(matrix):\n bin_ids = np.array([b.bin_id for b in path.bins])\n bin_ids.sort()\n\n if bin_ids.size > 0:\n max_bin = max(max_bin, int(bin_ids[-1]))\n\n links = path.links\n if links.size == 0:\n continue\n\n # we don't want these to become dividers\n boundary_mask = utils.path_boundaries(links)\n self_loops_mask = utils.self_loops(links)\n\n if np.any(self_loops_mask):\n self_loops.append(links[self_loops_mask])\n\n links = links[~(boundary_mask | self_loops_mask)]\n\n path_dividers_mask = utils.path_dividers(links, bin_ids)\n path_dividers = links[path_dividers_mask]\n if path_dividers.size == 0:\n continue\n\n df = pd.DataFrame.from_dict({\n 'from': path_dividers[:, 0], # aka upstream\n 'to': path_dividers[:, 1], # aka downstream\n 'path_index': i\n })\n\n n_remaining_links = n_remaining_links + len(df)\n df = utils.sort_and_drop_duplicates(df) # early deduplication saves lots of runtime memory\n\n connection_dfs.append(df)\n\n # \n #\n # if (upstream + 1) in leaving.keys() :\n # print(f\"Found inherited rearrangement {upstream+1}\")\n #\n # TODO: insert prevarications about exact position\n # Divider should be somewhere in here\n # Tolerable range?\n # Stack up others using the same LinkColumn\n\n df = pd.concat(connection_dfs)\n df = utils.sort_and_drop_duplicates(df)\n n_uniq_links = len(df)\n\n # all start positions of components\n # (max_bin + 1) is end of pangenome\n dividers = np.concatenate([[1, max_bin + 1], df[\"from\"] + 1, df[\"to\"]])\n dividers = np.unique(dividers).tolist()\n\n print(f\"Largest bin_id was {max_bin}\\n\"\n f\"Found {len(dividers)} dividers.\")\n\n if self_loops:\n n_self_loops = np.unique(np.concatenate(self_loops), axis=0).shape[0]\n print(f\"Eliminated {n_self_loops} self-loops\")\n\n n_links = sum([len(p.links) for p in matrix])\n print(f\"Input has {n_links} listed Links. \"\n f\"Segmentation eliminated {(1-n_remaining_links/n_links)*100}% of them.\")\n print(f\"Found {n_uniq_links} unique links\")\n\n return df, dividers\n\n\ndef setup_logging():\n \"\"\"Setup the logging, add a log file\"\"\"\n log_name = osPath(args.json_file).with_suffix('.log')\n if args.output_folder:\n log_name = osPath(args.output_folder).joinpath('log')\n os.makedirs(args.output_folder, exist_ok=True)\n t = datetime.now()\n timestr = f\"{t.year}{t.month:02}{t.day:02}-{t.hour:02}-{t.minute:02}-{t.second:02}\"\n log_name = str(log_name) + '.' + timestr + '.log'\n\n handler = logging.FileHandler(os.path.join(log_name))\n handler.setLevel(args.log_level)\n handler.setFormatter(logging.Formatter(matrixcomponent.LOGGING_FORMAT_STR,\n datefmt=matrixcomponent.LOGGING_DATE_FORMAT))\n logging.getLogger().addHandler(handler)\n\n\n# Helper class to allow multi-line help messages for argparse user parameters:\nclass SmartFormatter(argparse.HelpFormatter):\n def _split_lines(self, text, width):\n if text.startswith('R|'):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.HelpFormatter._split_lines(self, text, width)\n\n\ndef write_files(folder, odgi_fasta: Path, schematic: PangenomeSchematic):\n os.makedirs(folder, exist_ok=True) # make directory for all files\n\n fasta = None\n if odgi_fasta:\n fasta = read_contigs(odgi_fasta)[0]\n\n bin2file_mapping = schematic.split_and_write(args.cells_per_file, folder, fasta)\n\n schematic.write_index_file(folder, bin2file_mapping)\n\n\ndef get_arguments():\n \"\"\"Create the command line interface and return the command line arguments\n\n Returns\n -------\n Namespace\n The command line arguments\n\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=\"Example Command:\\n\"\n \"--json-file=data/run1.B1phi1.i1.seqwish.w100.json --cells-per-file=5000 --fasta=data/run1.B1phi1.i1.seqwish.fasta\")\n\n parser.add_argument('-j', '--json-file',\n dest='json_file',\n required=True,\n help='input JSON file')\n\n parser.add_argument('-f', '--fasta',\n dest='fasta',\n help='Optional: Fasta file containing the pangenome sequence generated by '\n 'odgi for this Graph.')\n\n parser.add_argument('-o', '--out-folder',\n dest='output_folder',\n help='output folder')\n\n parser.add_argument('-c', '--cells-per-file',\n dest='cells_per_file',\n default=5000,\n type=int,\n help='Tip: Adjust this number to get chunk files output close to 2MB. '\n 'Number of cells per file (#bins per file = #cells / #paths)')\n\n parser.add_argument('-l', '--log-level',\n default='DEBUG',\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),\n help='level of logging verbosity. DEBUG is most verbose')\n\n parser.add_argument('-p', '--parallel-cores',\n dest='parallel_cores',\n default=os.cpu_count(),\n type=int,\n help='Tip: do not set this one to more than available CPU cores)')\n\n args = parser.parse_args()\n if not args.output_folder:\n # directory with the same name as the json\n args.output_folder = osPath(args.json_file).parent.joinpath(osPath(args.json_file).stem)\n else:\n args.output_folder = osPath(args.output_folder)\n os.makedirs(args.output_folder, exist_ok=True)\n\n if (args.parallel_cores <= 0):\n args.parallel_cores = os.cpu_count()\n\n return args\n\n\ndef main():\n global args\n args = get_arguments()\n setup_logging()\n LOGGER.info(f'reading {osPath(args.json_file)}...\\n')\n paths, pangenome_length, bin_width = JSONparser.parse(args.json_file, args.parallel_cores)\n schematic = segment_matrix(paths, bin_width, args.cells_per_file, pangenome_length)\n del paths\n\n # this one spits out json and optionally other output files (fasta, ttl)\n write_files(args.output_folder, args.fasta, schematic)\n\n\nif __name__ == '__main__':\n main()\n#--json-file=data/run1.B1phi1.i1.seqwish.w100.json --cells-per-file=5000\n# --fasta=data/run1.B1phi1.i1.seqwish.fasta\n","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":13574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"375761318","text":"closings = {\n '(': ')',\n '[': ']',\n '{': '}',\n '<': '>',\n}\n\ndef is_opening(c):\n return c in closings.keys()\n\ndef validate(line):\n stack = []\n for c in line:\n if is_opening(c):\n stack.append(c)\n else:\n if c != closings[stack.pop()]:\n return False\n return stack\n\nlines = []\nwith open('input') as f:\n lines = [l.strip() for l in f.read().split('\\n')]\n\nprint(lines)\n\nscores = {\n ')': 1,\n ']': 2,\n '}': 3,\n '>': 4\n}\n\nfinal_scores = []\nfor l in lines:\n stack = validate(l)\n if stack:\n completion = [closings[c] for c in stack[::-1]]\n print('%s => %s' % (l, ''.join(completion)))\n total = 0\n for c in completion:\n total = total * 5 + scores[c]\n final_scores.append(total)\n\nprint(final_scores)\nprint(sorted(final_scores)[len(final_scores)//2])\n","sub_path":"2021/10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"572837756","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 9 19:42:26 2020\r\n\r\n@author: cb425\r\n\"\"\"\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nfrom torchvision.utils import save_image\r\n\r\nclass Discriminator(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.model = nn.Sequential(\r\n nn.Linear(784, 1024),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(1024, 512),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(512, 256),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(256, 1),\r\n nn.Sigmoid(),\r\n )\r\n\r\n def forward(self, x):\r\n x = x.view(x.size(0), 784) # reshape, avoids explicit data copy\r\n output = self.model(x)\r\n return output\r\n\r\nclass Generator(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.model = nn.Sequential(\r\n nn.Linear(100, 256),\r\n nn.ReLU(),\r\n nn.Linear(256, 512),\r\n nn.ReLU(),\r\n nn.Linear(512, 1024),\r\n nn.ReLU(),\r\n nn.Linear(1024, 784),\r\n nn.Tanh(),\r\n )\r\n\r\n def forward(self, x):\r\n output = self.model(x)\r\n output = output.view(x.size(0), 1, 28, 28)\r\n return output\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n torch.manual_seed(111)\r\n \r\n device = \"\"\r\n if torch.cuda.is_available(): # check if a GPU is available\r\n device = torch.device(\"cuda\")\r\n else:\r\n device = torch.device(\"cpu\")\r\n \r\n transform = transforms.Compose([ # Converts the data to a PyTorch tensor range from 0 to 1. \r\n transforms.ToTensor(), # Since the image backgrounds are black, most of the coefficients are equal to 0 when they’re represented using this range.\r\n transforms.Normalize((0.5,), (0.5,)) # changes the range of the coefficients to -1 to 1\r\n ])\r\n \r\n train_set = torchvision.datasets.MNIST(\r\n root=\".\", train=True, download=True, transform=transform\r\n )\r\n \r\n # shuffle the data from train_set and return batches of 32 samples that would be used to train the neural networks.\r\n batch_size = 100\r\n train_loader = torch.utils.data.DataLoader(\r\n train_set, batch_size=batch_size, shuffle=True\r\n )\r\n \r\n discriminator = Discriminator().to(device=device)\r\n generator = Generator().to(device=device)\r\n \r\n lr = 0.0001\r\n num_epochs = 50\r\n k = 2\r\n loss_function = nn.BCELoss()\r\n \r\n optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr=lr)\r\n optimizer_generator = torch.optim.Adam(generator.parameters(), lr=lr)\r\n \r\n for epoch in range(num_epochs):\r\n for n, (real_samples, mnist_labels) in enumerate(train_loader):\r\n # Data for training the discriminator\r\n real_samples = real_samples.to(device=device)\r\n real_samples_labels = torch.ones((batch_size, 1)).to(\r\n device=device\r\n )\r\n latent_space_samples = torch.randn((batch_size, 100)).to(\r\n device=device\r\n )\r\n generated_samples = generator(latent_space_samples)\r\n generated_samples_labels = torch.zeros((batch_size, 1)).to(\r\n device=device\r\n )\r\n all_samples = torch.cat((real_samples, generated_samples))\r\n all_samples_labels = torch.cat(\r\n (real_samples_labels, generated_samples_labels)\r\n )\r\n \r\n # Training the discriminator\r\n for ii in range(k):\r\n discriminator.zero_grad()\r\n output_discriminator = discriminator(all_samples)\r\n loss_discriminator = loss_function(\r\n output_discriminator, all_samples_labels\r\n )\r\n loss_discriminator.backward(retain_graph=True)\r\n optimizer_discriminator.step()\r\n \r\n # Data for training the generator\r\n latent_space_samples = torch.randn((batch_size, 100)).to(\r\n device=device\r\n )\r\n \r\n # Training the generator\r\n generator.zero_grad()\r\n generated_samples = generator(latent_space_samples)\r\n output_discriminator_generated = discriminator(generated_samples)\r\n loss_generator = loss_function(\r\n output_discriminator_generated, real_samples_labels\r\n )\r\n loss_generator.backward()\r\n optimizer_generator.step()\r\n\r\n # Show loss\r\n if n == batch_size - 1:\r\n print(f\"Epoch: {epoch} Loss D.: {loss_discriminator}\")\r\n print(f\"Epoch: {epoch} Loss G.: {loss_generator}\")\r\n \r\n with torch.no_grad():\r\n z = torch.randn(64, 100).cuda()\r\n sample = generator(z).cuda()\r\n \r\n save_image(sample.view(64, 1, 28, 28), './samples/GAN_generative_sample_' + '.png')\r\n train_set.train_data = train_set.train_data.type(torch.DoubleTensor)\r\n save_image(1/255*train_set.train_data[:64].view(64, 1, 28, 28), './samples/GAN_original_sample_' + '.png')\r\n \r\n torch.save(generator.state_dict(), 'generator.pth')\r\n torch.save(discriminator.state_dict(), 'discriminator.pth')","sub_path":"Gan.py","file_name":"Gan.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"343532750","text":"def solve_model(og,\n tol=1e-4,\n max_iter=1000,\n verbose=True,\n print_skip=25):\n \"\"\"\n Solve model by iterating with the Bellman operator.\n\n \"\"\"\n\n # Set up loop\n v = og.u(og.grid) # Initial condition\n i = 0\n error = tol + 1\n\n while i < max_iter and error > tol:\n v_greedy, v_new = T(v, og)\n error = np.max(np.abs(v - v_new))\n i += 1\n if verbose and i % print_skip == 0:\n print(f\"Error at iteration {i} is {error}.\")\n v = v_new\n\n if i == max_iter:\n print(\"Failed to converge!\")\n\n if verbose and i < max_iter:\n print(f\"\\nConverged in {i} iterations.\")\n\n return v_greedy, v_new\n","sub_path":"source/_static/lecture_specific/optgrowth/solve_model.py","file_name":"solve_model.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"219332074","text":"\"\"\"Kyle Friend, Washington and Lee University, April 2020\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\n#The first section loads a text file with gene names associated with ribosome densities.\r\n#Data are then stored in a dictionary.\r\nRDPs = {}\r\nn = 0\r\nrpfs = []\r\nwith open('Ribosome_densities.txt', 'r') as infile:\r\n for line in infile:\r\n line = line.rstrip()\r\n if n % 5 == 0:\r\n gene = line\r\n rpfs = []\r\n else:\r\n line = line.split()\r\n line = [float(x) for x in line]\r\n count = 0\r\n for val in line:\r\n if val == 0:\r\n count += 1\r\n if count > 0.25*len(line):\r\n pass\r\n else:\r\n rpfs.append(line)\r\n if len(rpfs) == 4:\r\n RDPs[gene] = rpfs\r\n rpfs = []\r\n n += 1\r\n\r\n#Next biological replicates are normalized to the maximum value.\r\nall_genes = list(RDPs.keys())\r\nfor gene in all_genes:\r\n rpfs = RDPs[gene]\r\n norms = []\r\n for rpf in rpfs:\r\n top = max(rpf)\r\n rpf = [x/top for x in rpf]\r\n norms.append(rpf)\r\n RDPs[gene] = norms\r\n\r\n#Then arrays are created with data for the two treatments.\r\n#The array split operation creates 100 bins with ribosomal densities.\r\n\r\nCon_Codons = []\r\nTor_Codons = []\r\nfor gene in all_genes:\r\n rpfs = RDPs[gene]\r\n Con_avg = []\r\n Tor_avg = []\r\n if len(rpfs[0]) < 100: continue\r\n for i in range(len(rpfs[0])):\r\n Con_avg.append((rpfs[0][i] + rpfs[1][i])/2)\r\n Tor_avg.append((rpfs[2][i] + rpfs[3][i])/2)\r\n Con_avg = np.array_split(Con_avg, 100)\r\n Tor_avg = np.array_split(Tor_avg, 100)\r\n bin_con = []\r\n bin_tor = []\r\n for val in Con_avg:\r\n bin_con.append(np.mean(val))\r\n for val in Tor_avg:\r\n bin_tor.append(np.mean(val))\r\n Con_Codons.append(bin_con)\r\n Tor_Codons.append(bin_tor)\r\n\r\nCon_Codons = np.array(Con_Codons)\r\nTor_Codons = np.array(Tor_Codons)\r\n\r\n#Maximum values are created to set the final output to 1.\r\nCon_dense = np.mean(Con_Codons, axis = 0)\r\nTor_dense = np.mean(Tor_Codons, axis = 0)\r\nCon_dense = list(Con_dense)\r\ntop_cons = max(Con_dense)\r\nTor_dense = list(Tor_dense)\r\ntop_tors = max(Tor_dense)\r\n\r\n#Average ribosome density is calculated on a per codon basis, and confidence intervals are also generated.\r\n#Statistical analysis is also performed using a Mann-Whitney test.\r\n(dim_x, dim_y) = Con_Codons.shape\r\n\r\noutfile = open('Normalized_RPFs_Percent_ORF.txt', 'w')\r\n\r\nfor i in range(dim_y):\r\n cons = Con_Codons[:, i]\r\n cons = [x/top_cons for x in cons]\r\n tors = Tor_Codons[:, i]\r\n tors = [x/top_tors for x in tors]\r\n n = len(cons)\r\n con_avg, se_cons = np.mean(cons), stats.sem(cons)\r\n tors_avg, se_tors = np.mean(tors), stats.sem(tors)\r\n h_cons = se_cons * stats.t.ppf((1 + 0.99)/2.0, n - 1)\r\n h_tors = se_tors * stats.t.ppf((1 + 0.99)/2.0, n - 1)\r\n (U_stat, p_value) = stats.mannwhitneyu(cons, tors)\r\n summary = [con_avg, tors_avg, h_cons, h_tors, p_value]\r\n summary = [str(x) for x in summary]\r\n\r\n outfile.write('\\t'.join(summary) + '\\n')\r\n\r\noutfile.close()\r\n","sub_path":"Fraction_ORF_Compile.py","file_name":"Fraction_ORF_Compile.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"195645959","text":"# Databricks notebook source\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\n\n# COMMAND ----------\n\ndf = pd.read_csv('/dbfs/FileStore/lorenzo.baldacci@databricks.com/example_wp_peyton_manning.csv')\ndf['y'] = np.log(df['y'])\ndf.head()\n\n# COMMAND ----------\n\nm = Prophet()\nm.fit(df);\n\n# COMMAND ----------\n\nfuture = m.make_future_dataframe(periods=365)\nfuture.tail()\n\n# COMMAND ----------\n\nforecast = m.predict(future)\nforecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()\n\n# COMMAND ----------\n\nm.plot(forecast);\n\n# COMMAND ----------\n\nm.plot_components(forecast)\n","sub_path":"prophet-forecasting.py","file_name":"prophet-forecasting.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"208522709","text":"import socket\nimport struct\nimport os\nBUF_SIZE = 1024#一次接受1024bytes\nDOWN_FLAG=0\nUP_FLAG=1\nGET_FLAG=2\n\ndef uploadreq(server,name,size,client_addr):\n count=0\n os.chdir(r'C:\\Users\\wtf\\Desktop\\python网络编程\\csdn_udp\\SeverList')\n name=Fname.split(b\"\\x00\")[0]\n if size>0:\n print ('client ip:',client_addr,' File name is:',name,\"字节大小是\",size)\n f=open(name,'wb')#写模式打开\n server.sendto(b'ok',client_addr) #发送就绪,接收结构体\n while True:\n data,client_addr=server.recvfrom(BUF_SIZE)\n if data!=b'end': #0-9,接受bytes\n f.write(data)\n print ('received'+str(count)+'次接受自 ',client_addr,'\\\\n')\n count+=1\n else:\n break\n #正常情况下回复消息\n server.sendto('ok'.encode('utf-8'),client_addr)\n count+=1\n f.close()\n print('循环了'+str(count))\n else:\n print(\"该文件为空\")\n\nserver_addr = ('127.0.0.1',8888)\nserver = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nserver.bind(server_addr)\n#循环接受客户端发送数据,并将数据发回\ncount=0\n\nwhile True:\n if count==0:\n print (\"Are You Ready?\")\n data,client_addr = server.recvfrom(BUF_SIZE)\n Fname,size,flag=struct.unpack('20sII',data)\n print('来自',client_addr,'的连接')\n os.chdir(r'C:\\Users\\wtf\\Desktop\\python网络编程\\csdn_udp\\SeverList') #服务器端路径 \\是转义符\n if flag==UP_FLAG:\n # uploadreq(server,Fname,size,client_addr)\n # count=0\n name=Fname.split(b\"\\x00\")[0]#因为会用\\x00填充\n if size>0:\n print ('client ip:',client_addr,' File name is:',name,\"字节大小是\",size)\n f=open(name,'wb')#写模式打开\n server.sendto(b'ok',client_addr) #发送就绪,接收结构体\n else:\n print(\"该文件为空\")\n while True:\n data,client_addr=server.recvfrom(BUF_SIZE)\n if data!=b'end': #0-9,接受bytes\n f.write(data)\n print ('received'+str(count)+'次接受自 ',client_addr,'\\n')\n count+=1\n else:\n break\n #回复消息\n server.sendto('ok'.encode('utf-8'),client_addr)\n count+=1\n print('循环了'+str(count))\n f.close()\n count=0 #就再次循环\n elif flag==GET_FLAG:\n a=os.listdir()\n flist=''\n print(a)\n for line in a:\n flist+=str(line)+'%'\n print('\\n')\n print(line)\n data=flist.encode('utf-8')\n server.sendto(data,client_addr)\n print('已发送我方列表')\n count=0\n elif flag==DOWN_FLAG:\n #根据名字查找打开文件\n name=Fname.split(b\"\\x00\")[0]\n down_count=1\n f=open(name,'rb')\n while True:\n data=f.read(BUF_SIZE)\n if data!=b'':\n server.sendto(data,client_addr)\n print('server already send',down_count)\n down_count+=1\n else:\n break\n echo,client_addr=server.recvfrom(BUF_SIZE)\n server.sendto(b'end',client_addr)\n f.close()\n\n#打包文件元祖消息并发送\nserver.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"14538884","text":"#S was going to make targets their own classes, but might as well preserve \n#S dicts. we can just have the scheduler perform functions on them and update\n#S values. \n\"\"\"\nThe scheduler class, which performs all checks on observability of targets,\ncalculates weights, and some ephem on sun, moon. Will contain some other \nutility functions.\n\"\"\"\n\nimport numpy as np\nimport math\nimport os\nimport ephem\nimport sys\nimport simbad_reader\n#import targetlist\nimport ephem\n#import ipdb\n#import env\nimport datetime\nimport time\nimport subprocess\nfrom configobj import ConfigObj\nimport utils\n\n###\n# SCHEDULER\n###\n\nclass scheduler:\n def __init__(self,config_file,base_directory='.'):\n #S want to get the site from the control class, makes easy computing\n #S LST and such\n self.base_directory = base_directory\n self.config_file = config_file\n self.dt_fmt = '%Y-%m-%dT%H:%M:%S'\n # load the config file\n self.load_config()\n # make the observer which will be used in calculations and what not\n self.obs = ephem.Observer()\n self.obs.lat = ephem.degrees(str(self.latitude)) # N\n self.obs.lon = ephem.degrees(str(self.longitude)) # E\n self.obs.horizon = ephem.degrees(str(self.sun_horizon))\n self.obs.elevation = self.elevation # meters \n self.time = datetime.datetime(2000,1,1,12,00,00)\n self.obs.date = self.time\n # an ephem sun and moon object for tracking the sun\n self.sun = ephem.Sun()\n self.sun.compute(self.obs)\n self.moon = ephem.Moon()\n self.moon.compute(self.obs)\n \n # seconds between three obs\n self.sep_limit = 120.*60.\n # if you are using the simbad reader target list\n self.target_list = simbad_reader.read_simbad(self.targets_file)\n self.make_fixedBodies()\n\n def load_config(self):\n try:\n config = ConfigObj(self.base_directory+'/config/'+self.config_file)\n self.latitude = config['Setup']['LATITUDE']\n self.longitude = config['Setup']['LONGITUDE']\n self.elevation = float(config['Setup']['ELEVATION'])\n self.sitename = config['Setup']['SITENAME']\n self.sun_horizon = float(config['Setup']['HORIZON'])\n self.target_horizon = float(config['Setup']['MINALT'])\n self.targets_file = config['Setup']['TARGETSFILE']\n self.min_moon_sep = float(config['Setup']['MINMOONSEP'])\n #self.tac_type = config['Setup']['TAC_TYPE']\n #self.tac_fraction = float(config['Setup']['TAC_FRACTION'])\n # used for minerva logging\n# self.logger_name = config['Setup']['LOGNAME']\n\n except:\n print('ERROR accessing configuration file: ' + self.config_file)\n sys.exit()\n\n\n def calc_ha(self,target):\n self.site.obs.date = datetime.datetime.utcnow()\n lst = (self.site.obs.sidereal_time()*180./np.pi)/15.\n ha = lst - target['ra']\n if ha<0.:\n ha+=24.\n if ha>24.:\n ha-=24.\n if ha>12.:\n ha = ha-24\n\n return ha\n\n def sort_target_list(self,key='weight'):\n #S sort the target_list list of target dictionaries by the given key\n try:\n self.target_list = sorted(self.target_list, key=lambda x:x[key])\n return True\n except:\n print('Something went wrong when sorting with ' + key)\n return False\n\n def choose_target(self):\n #S need to make a target class for being observered?\n #S will return the selected target dictionary\n #S need way to choose next best target\n\n #S update the time, probably don't need this here\n self.site.obs.date = datetime.datetime.utcnow()\n #S update the weights for all the targets in our list\n self.calculate_weights()\n #S sort the target list by weight, so that the list of dictionaries\n #S is now in descending order based on weight.\n self.target_list = sorted(targetlist, key=lambda x:x['weight'])\n for target in self.target_list:\n if self.can_observe(target):\n return target\n #S I thnik we should cover all this in can_observe()\n \"\"\"\n #S Check to see if we already observed this target. Could be \n #S switched to check if observed less than a certain number\n #S this condition may need to be removed for multiple observations\n #S per night.\n if target['observed'] == 1:\n continue\n #S Check to see if we will try and observe past sunset\n if (datetime.datetime.utcnow()+\\\n datetime.timedelta(seconds=target['exptime']))\\\n >self.obs.NautTwilBegin():\n continue\n #S check to see if the target will go below horizon before \n #S finishing the observation.\n \n #S if all checks pass, we want to return the chosen target dict\n\n \"\"\"\n\n\n def update_list(self,bstar=False,includeInactive=False):\n #S need to update list potentially\n try:\n self.target_list=targetlist.mkdict(\\\n bstar=bstar,\\\n includeInactive=includeInactive)\n except:\n #S Placeholder for logger\n pass\n \n\n def calculate_weights(self):\n #S need to update weights for all the targets in the list.\n #S going to use simple HA weighting for now.\n for target in self.target_list:\n if self.is_observable(target):\n #target['weight'] = self.weight_obstime(target,timeof=self.time)*self.weight_uptime(target,timeof=self.time,latitude=self.latitude,min_alt=self.target_horizon)*self.weight_HA(target,timeof=self.time)\n #target['weight'] = self.calc_weight(target,timeof=self.time) #Old HA\n timeobs = self.weight_obstime(target,timeof=self.time) #time sometimes generates exception, why?\n hourangle = self.weight_HA(target,timeof=self.time)\n #print(target['name'], timeobs, hourangle)\n target['weight'] = timeobs*hourangle\n #target['weight'] = self.calc_weight1(target,timeof=self.time) #MINERVA\n print(target['name'], \"is observable with time&HA\", timeobs, hourangle)\n else:\n target['weight'] = -999\n print(target['name'], \"is NOT observable\")\n self.target_list = sorted(self.target_list, key=lambda x:-x['weight'])\n #pass\n\n def weight_weather(self,target,timeof=None):\n \"\"\"\n Generates a weighting based on very rough weather estimate.\n Returned value is 1/probability of clear skys when target transits \n meridian at midnight LST.\n \"\"\"\n weather_weight = 1\n return weather_weight\n\n def weight_HA(self,target,timeof=None):\n \"\"\"\n Uses alternate HA weight formulation\n weight = 1 - abs(norm(HA/RA)), 0 to 1 if\n above horizon, 0 to -1 if below\n \"\"\"\n #old algorithm, need to check units\n #if target['observed']>1:\n # return -1\n # temp set the horizon for targets\n #self.obs.date = self.time\n #lst = math.degrees(self.obs.sidereal_time())/15. #\"hours\"\n #target['fixedbody'].compute(self.obs)\n #return 1.-np.abs((lst-target['ra'])/12.)\n\n #debugged HA weighting added by pdn, needs reviewing\n target_ha=(math.degrees(self.obs.sidereal_time())/15-target['ra'])\n obs_weight= 1.-np.abs(target_ha/6.0) #allows obs to horizon, but okay if min-alt works\n #print(\"Positioning diffs:\", math.degrees(self.obs.sidereal_time())/15, target['ra'])\n #print(\"HA, weight:\", target_ha, obs_weight)\n return obs_weight\n\n\n def weight_uptime(self,target,timeof=None,latitude=None,min_alt=None):\n \"\"\"\n Weighting based on amount of time object is above a given altitude \n (0 == horizon). Weighting does not really consider if object is observable. \n can always point to the objects. Goes from 1 to 0 ish.\n \"\"\"\n # if no timeof provided, use current utc\n if timeof == None:\n timeof = datetime.datetime.utcnow()\n\n #generic weighting because some objects will get observed less often due to poor decs\n if (latitude == None):\n latitude = self.latitude\n\n # if no minimum altitude is supplied, go with config file.\n if (min_alt == None):\n min_alt = self.target_horizon\n\n # We don't care about circumpolar objects that much\n if(math.radians(float(latitude)) >= np.pi/2-math.radians(target['dec'])):\n time_weight=0.1\n print(\"Uptime weighting (always up): \", time_weight)\n else:\n try: \n time_weight = 1-np.arccos((np.sin(min_alt)-np.sin(math.radians(target['dec']))*np.sin(math.radians(float(latitude))))/(np.cos(math.radians(target['dec']))*np.cos(math.radians(float(latitude)))))/np.pi\n print(\"Uptime weighting:\", time_weight)\n except: #below horizon, circumpolar, or broken.\n time_weight = 0.0\n print(\"Uptime weighting: unobservable\")\n\n return time_weight\n\n\n def weight_obstime(self,target,timeof=None,latitude=None):\n \"\"\"\n Weighting target observation by when it was last observed.\n Default is sep_limit (default: 7200 seconds), which was initially\n setup for 3 obs/night.\n \"\"\"\n # if now timeof provided, use current utc\n if timeof == None:\n timeof = datetime.datetime.utcnow()\n\n #S if the target was observed less than the separation time limit\n #S between observations, then we give it an 'unobservable' weight.\n # just comment out if you want a random start time\n# self.start_ha = -self.sep_limit/3600.\n try:\n print(\"timeof, last_obs, diff, sep_limit\", timeof, target['last_obs'][1], (timeof-target['last_obs'][1]).total_seconds(), self.sep_limit)\n if (timeof-target['last_obs'][1]).total_seconds() < self.sep_limit:\n return 0.\n except:\n print(\"timeof: \", timeof)\n print(\"exception: target['last_obs'] == \", target['last_obs'], \"\\n\")\n\n cad_weight = 0.\n\n try:\n # add weight for longest days since last observed\n lastobs = (timeof-target['last_obs'][1]).total_seconds() / (86400.)\n cad_weight = lastobs\n #print(\"Lastobs time weighting:\", cad_weight)\n except:\n cad_weight = 0.#boop weight to 1 instead?\n print('Error: lastobs timing. Zeroing weight.')\n return cad_weight\n \n\n\n def calc_weight(self,target,timeof=None):\n \"\"\"\n simple, just going to weight for current ha sort of\n weight = 1 - abs(HA/RA)\n \"\"\"\n if target['observed']>1:\n return -1\n # temp set the horizon for targets\n self.obs.date = self.time\n lst = math.degrees(self.obs.sidereal_time())/15.\n target['fixedbody'].compute(self.obs)\n return 1.-np.abs((lst-target['ra'])/12.)\n\n def make_fixedBodies(self):\n for target in self.target_list:\n target['fixedbody'] = ephem.FixedBody()\n# ipdb.set_trace()\n target['fixedbody']._ra = str(target['ra'])\n target['fixedbody']._dec = str(target['dec'])\n# target['fixedbody']._epoch = 2000.0\n target['fixedbody'].compute(self.obs)\n\n \n def calc_weight1(self,target,timeof=None,obspath=None):\n \"\"\"\n This is the full minerva weighting with trying to get 3 observations every night\n \"\"\"\n # need some sort of default for the obs path\n if obspath == None:\n obspath = self.sim_path\n\n # if now timeof provided, use current utc\n if timeof == None:\n timeof = datetime.datetime.utcnow()\n\n #S if the target was observed less than the separation time limit\n #S between observations, then we give it an 'unobservable' weight.\n # just comment out if you want a random start time\n# self.start_ha = -self.sep_limit/3600.\n try:\n if (timeof-target['last_obs'][1]).total_seconds()<\\\n self.sep_limit:\n return -1.\n except:\n #ipdb.set_trace()\n print(\"exception\")\n \n\n if target['observed']>3:\n return -1.\n\n cad_weight = 0.\n try:\n \n # if os.stat(obspath+target['name']+'.txt'):\n# obs_hist = self.get_obs_history(target,simpath=obspath)\n \n cad_weight = 0.\n # if the last obs time was great than four hours ago, add a bit\n# ipdb.set_trace()\n# print((timeof-obs_hist[-1][1]).total_seconds()>4.*3600.)\n if (timeof-target['last_obs'][-1][0]).total_seconds()>24.*3600.:\n# print('cad boost to ' +target['name'])\n cad_weight = 1.\n except:\n print('boop\\n')\n cad_weight = 1.\n \n #S weight for the first observation of a three obs run.\n if target['observed']%3==0:\n #S the standard deviation of this is actually important as we \n #S start to think about cadence. if we want to make cadence\n #S and the three obs weight complimetnary or something, a steeper\n #S drop off of the gaussian WILL matter when mixed with a cad term.\n target_ha=(math.degrees(self.obs.sidereal_time())-target['ra'])\n threeobs_weight= \\\n np.exp(-((target_ha-self.start_ha)**2./(2.*.5**2.)))\n\n #S weight for the second observation of a three obs run.\n elif target['observed']%3 == 1:\n #S there is a cap of 2. on this weight, which means a third \n #S observation will always be prioritized.\n threeobs_weight=np.min(\\\n [2.,1.+((timeof-target['last_obs'][-1][0]).total_seconds()-\\\n -self.sep_limit)/self.sep_limit])\n\n #S weight for the third observation of a three obs run, but note that\n #S there is no cap on this one.\n elif target['observed']%3 == 2:\n threeobs_weight=2.+\\\n ((timeof-target['last_obs'][-1][0]).total_seconds()-\\\n self.sep_limit)/self.sep_limit\n\n return threeobs_weight+cad_weight\n \n \n# no multiple observations in a night, weighting by sin(altitude) with a linear-weight along with time since last observation\n def calc_weight2(self,target,timeof=None,obspath=None):\n\n # need some sort of default for the obs path\n if obspath == None:\n obspath = self.sim_path\n\n # if now timeof provided, use current utc\n if timeof == None:\n timeof = datetime.datetime.utcnow()\n\n #S if the target was observed less than the separation time limit\n #S between observations, then we give it an 'unobservable' weight.\n # just comment out if you want a random start time\n# self.start_ha = -self.sep_limit/3600.\n try:\n if (timeof-target['last_obs'][-1][0]).total_seconds()<\\\n self.sep_limit:\n return -1.\n except:\n ipdb.set_trace()\n \n\n# if target['observed']>3:\n# return -1.\n\n cad_weight = 0.\n try:\n # add weight for longest since last observed\n lastobs = (timeof-target['last_obs'][-1][0]).total_seconds() / (24.*3600.)\n cad_weight = lastobs\n except:\n cad_weight = 0.\n \n # note; this weighting downweights stars at poor declinations that never get to high altitudes. \n self.obs.date = timeof\n self.obs.horizon = str(self.target_horizon)\n target['fixedbody'].compute(self.obs)\n star=math.sin(target['fixedbody'].alt)\n horiz=math.sin(math.radians(float(self.target_horizon))) \n if star3:\n# return -1.\n\n cad_weight = 0.\n try:\n # add weight for longest days since last observed\n lastobs = (timeof-target['last_obs'][-1][0]).total_seconds() / (24.*3600.)\n cad_weight = lastobs\n except:\n cad_weight = 0.#boop weight to 1 instead?\n print('Error: lastobs timing. Zeroing weight.\\n')\n \n # note; this weighting downweights stars at poor declinations that never get to high altitudes. \n target_ha=(math.degrees(self.obs.sidereal_time())-target['ra'])\n obs_weight= 1.-np.abs(target_ha/6.0)\n\n return obs_weight*cad_weight\n\n\n def calc_weight4(self,target,timeof=None,obspath=None,latitude=None):\n print(target['name'])\n\n # need some sort of default for the obs path\n if obspath == None:\n obspath = self.sim_path\n\n # if now timeof provided, use current utc\n if timeof == None:\n timeof = datetime.datetime.utcnow()\n\n #S if the target was observed less than the separation time limit\n #S between observations, then we give it an 'unobservable' weight.\n # just comment out if you want a random start time\n# self.start_ha = -self.sep_limit/3600.\n try:\n if (timeof-target['last_obs'][1]).total_seconds()<\\\n self.sep_limit:\n return -1.\n except:\n #ipdb.set_trace()\n print(\"timeof: \", timeof)\n print(\"exception: target['last_obs'] == \", target['last_obs'], \"\\n\")\n\n cad_weight = 0.\n\n try:\n # add weight for longest days since last observed\n lastobs = (timeof-target['last_obs'][1]).total_seconds() / (24.*3600.)\n cad_weight = lastobs\n print(\"Lastobs time weighting:\", cad_weight)\n except:\n cad_weight = 0.#boop weight to 1 instead?\n print('Error: lastobs timing. Zeroing weight.')\n \n target_ha=(math.degrees(self.obs.sidereal_time())/15-target['ra'])\n obs_weight= 1.-np.abs(target_ha/6.0) #allows obs to horizon, but okay if min-alt works\n print(\"HA weight:\", target_ha)\n\n #generic weighting because some objects will get observed less often due to poor decs\n if (latitude == None):\n latitude = self.latitude\n\n try: \n time_weight=math.pi/math.acos(-math.tan(math.radians(target['dec']))*math.tan(math.radians(float(latitude))))\n print(\"Time weighting:\", time_weight)\n except:\n time_weight=1.0\n\n if(math.radians(float(latitude)) >= np.pi/2-math.radians(target['dec'])):\n time_weight=0.1\n print(\"Time weighting:\", time_weight)\n time_weight=1.0\n print(\"Net Weighting: \", obs_weight*cad_weight*time_weight, '\\n')\n return obs_weight*cad_weight*time_weight\n\n\n def prep_night(self,timeof=None,init_run=False):\n \"\"\"\n A function to go through some processes that only need to be done at \n the beginning of the night.\n \"\"\"\n if timeof == None:\n timeof = self.time\n # temp set the horizon for targets\n self.obs.date = self.time\n self.obs.horizon = str(self.target_horizon)\n # get a random starting hour angle normally distrubted around an hour\n # angle of -2. this is for the three observations per night of MINERVA,\n # and might be useless to you.\n self.start_ha = np.random.normal(loc=-2.,scale=.5)\n\n for target in self.target_list:\n # reset targets observation counter for the night to zero\n target['observed']=0\n # compute the target for the obs at time and horizon\n target['fixedbody'].compute(self.obs)\n # if it's neverup, flag it\n if target['fixedbody'].neverup:\n target['neverup']=True\n else:\n target['neverup']=False\n try:\n target['last_obs']=self.get_obs_history(target,prev_obs=1)\n except:\n target['last_obs']=[]\n if init_run == True:\n try:\n target['last_obs']=self.get_obs_history(target,prev_obs=1)\n except:\n target['last_obs']=[]\n # reset to sun horizon\n self.obs.horizon = str(self.sun_horizon)\n # debugging\n print(\"Start of night\", timeof)\n \n \n def get_obs_history(self,target,prev_obs=1,simpath=None):\n if simpath == None:\n simpath = self.sim_path\n # a function that 'tail's a target file to get the last prev_obs and\n # places the details in a list?\n # add a line for the empty one at the end of a file?\n # Could probably be done better with np.genfromtxt and array slicing.\n target_file = simpath+target['name']+'.txt'\n raw_obs=\\\n subprocess.check_output(['tail','-n',str(prev_obs),target_file])\n obs_lines = str(raw_obs).split(',')[:-1]\n obs_list = []\n try:\n #line = line.split('\\t')\n obs_lines[0] = datetime.datetime.strptime(utils.bjd2utc(obs_lines[0][2:]),self.dt_fmt)\n obs_lines[1] = datetime.datetime.strptime(utils.bjd2utc(obs_lines[1]),self.dt_fmt)\n #obs_lines[0] = utils.bjd2utc(obs_lines[0][2:])\n #obs_lines[1] = utils.bjd2utc(obs_lines[1])\n obs_lines[2] = float(obs_lines[2])\n obs_lines[3] = float(obs_lines[3])\n obs_lines[4] = float(obs_lines[4])\n #obs_list.append(line)\n except:\n # so it doesn't try and parse the header\n pass\n if obs_list is []:\n # Need better erroring out on a blank observation list.\n ipdb.set_trace()\n return obs_lines\n #return obs_list\n\n\n def is_observable(self,target,timeof=None):\n # if the timeof obs is not provided, use the schedulers clock for the \n # time. this could cause issues, need to keep an eye on it\n if timeof == None:\n timeof=self.time\n #S want to make sure taget is a legal candidate. this includes avoiding\n #S targets who:\n #S - have not risen\n #S - will set before exposure will be finished\n #S - have a suitable moon separation\n #S and other criteia decided later\n \n #S Check to see if we already observed this target. Could be \n #S switched to check if observed less than a certain number\n #S this condition may need to be removed for multiple observations\n #S per night.\n# if target['observed'] == 1:\n# continue\n #S Check to see if we will try and observe past sunset\n \n\n \n # check if the star will be rising sometime tonight\n #TODO:\n # i think this checks for just a 24 hour period, but needs more \n # investigation\n if target['neverup']:\n #print(target['name']+\" is never up\")\n return False\n\n # check if the target is separated enough from the moon\n #TODO test\n moon = ephem.Moon()\n moon.compute(self.obs)\n if ephem.separation(moon,target['fixedbody']) math.radians(float(self.target_horizon)):\n # see if we have enough time to observe\n if timeof+datetime.timedelta(minutes=target['exptime'])<\\\n self.nextsunrise(timeof,horizon=self.sun_horizon):\n # check if it will be below horizon at the end of the obs\n finish_time = timeof+\\\n datetime.timedelta(minutes=target['exptime'])\n self.obs.date=finish_time\n target['fixedbody'].compute(self.obs)\n if target['fixedbody'].alt>math.radians(self.target_horizon):\n # there is time to observe\n return True\n else:\n # the target will set before fully observable\n return False\n else:\n # there is not enought time to observe this target before the \n # sun rises\n #print(\"can't observe\"+target['name'])\n return False\n else:\n return False\n\n \n \n # reset the horizon for the sun\n self.obs.horizon = str(self.sun_horizon)\n\n\n\n def nextsunrise(self, currenttime, horizon=-12):\n self.obs.horizon=str(horizon)\n sunrise = self.obs.next_rising(ephem.Sun(),start=currenttime,\\\n use_center=True).datetime()\n return sunrise\n def nextsunset(self, currenttime, horizon=-12):\n self.obs.horizon=str(horizon)\n sunset = self.obs.next_setting(ephem.Sun(), start=currenttime,\\\n use_center=True).datetime()\n return sunset\n\n def prevsunrise(self, currenttime, horizon=-12):\n self.obs.horizon=str(horizon)\n sunrise = self.obs.previous_rising(ephem.Sun(), start=currenttime,\\\n use_center=True).datetime()\n return sunrise\n def prevsunset(self, currenttime, horizon=-12):\n self.obs.horizon=str(horizon)\n sunset = self.obs.previous_setting(ephem.Sun(), start=currenttime,\\\n use_center=True).datetime()\n return sunset\n def sunalt(self,timeof=None):\n if timeof == None:\n self.obs.date=datetime.datetime.utcnow()\n else:\n self.obs.date=timeof\n sun = ephem.Sun()\n sun.compute(self.obs)\n return float(sun.alt)*180.0/math.pi\n def sunaz(self):\n sun = ephem.Sun()\n sun.compute(self.obs)\n return float(sun.az)*180.0/math.pi\n\n def dict_to_class(self):\n #S a potential route we can take.\n pass\n\n def get_photom_scheds(self,night,telescopes):\n #S Holding off till later on this.\n pass\n def read_photom_sched(self,photom_file):\n #S See get_photom_scheds()\n pass\n\n#S Things we need\n#S -good way to break one telescope away.\n#S -i think we really need to break away from observing scripts for each \n#S telescope. or at least need to find a new way potentially.\n#S -\n\nif __name__ == '__main__':\n# ipdb.set_trace()\n e = scheduler('scheduler.ini')\n# ipdb.set_trace()\n","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":28584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"364652443","text":"import tkinter\r\nimport json\r\nimport tkinter.scrolledtext as tkst\r\nwindow = tkinter.Tk()\r\n\r\nframe = tkinter.Frame(window)\r\nframe.pack()\r\n\r\nlabel = tkinter.Label(frame, text='Задача:')\r\nlabel.grid(row=1, column=1)\r\n\r\nentry1 = tkinter.Entry(frame)\r\nentry1.grid(row=1,column=2)\r\n\r\nlabel = tkinter.Label(frame, text='Категория:')\r\nlabel.grid(row=2, column=1)\r\n\r\nentry2 = tkinter.Entry(frame)\r\nentry2.grid(row=2,column=2)\r\n\r\nlabel = tkinter.Label(frame, text='Время:')\r\nlabel.grid(row=3, column=1)\r\n\r\nentry3 = tkinter.Entry(frame)\r\nentry3.grid(row=3,column=2)\r\n\r\ndef zadat():\r\n with open('js.json', 'r') as file:\r\n data = json.load(file)\r\n data.update( {counter.get(): {'Задача ': entry1.get(), 'Категория': entry2.get(), 'Время': entry3.get() } } )\r\n\r\n with open('js.json', 'w') as f:\r\n json.dump(data, f)\r\n\r\n counter.set(counter.get() + 1)\r\n\r\ncounter = tkinter.IntVar()\r\ncounter.set(1)\r\n\r\ndef spis():\r\n with open('js.json', 'r', encoding='utf-8') as file:\r\n data = json.load(file)\r\n aw = list(data.values())\r\n ed = ''\r\n for i in range(len(aw)):\r\n de = aw[i]\r\n ka = de[\"Задача \"]\r\n fd = de['Категория']\r\n rg = de['Время']\r\n ed = ed + ' Задача ' + ka + ' Категория ' + fd + ' Время ' + rg\r\n\r\n editArea = tkst.ScrolledText(\r\n width=40,\r\n height=13\r\n )\r\n editArea.pack(padx=110, pady=10)\r\n editArea.insert(tkinter.INSERT, ed)\r\n\r\nbutton = tkinter.Button(frame, text='Задать', command = zadat)\r\nbutton.grid(row=4,column=2)\r\n\r\nbutton = tkinter.Button(frame, text='Список задач', command = spis)\r\nbutton.grid(row=5,column=2)\r\n\r\nbutton = tkinter.Button(frame, text='Выход', command = exit)\r\nbutton.grid(row=6,column=2)\r\n\r\n\r\nwindow.mainloop()","sub_path":"kt1/kt1/my_task_w2.py","file_name":"my_task_w2.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"7242626","text":"from sklearn import svm\nfrom sklearn import datasets\nprint(\"import of svm and datasets done.\")\n\nclf = svm.SVC(gamma='scale')\niris = datasets.load_iris()\n\nX,y = iris.data, iris.target # inputs and wanted outputs\nclf.fit(X,y)\nprint(\"\\nThe trained classifier is:\\n\",clf)\n\n#to save a model thanks to python's built-in persistence model\nimport pickle\ns = pickle.dumps(clf)\nclf2 = pickle.loads(s)\n\nN = 2\nprint(\"\\nDigits predicted:\\n\",clf2.predict(X[0:N])) # X[0:N] to print predictions of N images\nprint(\"\\nWanted predictions:\\n\", y[0:N])\n\nprint(\"\\nList of 3 predicted images:\\n\",list(clf.predict(X[:3])))\nclf.fit(X,iris.target_names[y])\nprint(\"List of 3 predicted images with associated names:\\n\",list(clf.predict(X[:3])))\n\n\"\"\" In the case of scikit-learn it is more interesting to use joblib\nIt is more efficient on big data \"\"\"\nprint(\"\\nSave learning in 'filename.joblib'\\n\")\nfrom joblib import dump, load\ndump(clf, 'filename.joblib') # to save the trained model\n\nclf = load('filename.joblib') # to load the saved model\n\n\n","sub_path":"tuto_scikit-learn/save_training.py","file_name":"save_training.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"312722959","text":"#\n# @lc app=leetcode.cn id=13 lang=python3\n#\n# [13] 罗马数字转整数\n#\n# https://leetcode-cn.com/problems/roman-to-integer/description/\n#\n# algorithms\n# Easy (60.57%)\n# Likes: 790\n# Dislikes: 0\n# Total Accepted: 148.4K\n# Total Submissions: 244.8K\n# Testcase Example: '\"III\"'\n#\n# 罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。\n#\n# 字符 数值\n# I 1\n# V 5\n# X 10\n# L 50\n# C 100\n# D 500\n# M 1000\n#\n# 例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做  XXVII, 即为 XX + V +\n# II 。\n#\n# 通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5\n# 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:\n#\n#\n# I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。\n# X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。 \n# C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。\n#\n#\n# 给定一个罗马数字,将其转换成整数。输入确保在 1 到 3999 的范围内。\n#\n# 示例 1:\n#\n# 输入: \"III\"\n# 输出: 3\n#\n# 示例 2:\n#\n# 输入: \"IV\"\n# 输出: 4\n#\n# 示例 3:\n#\n# 输入: \"IX\"\n# 输出: 9\n#\n# 示例 4:\n#\n# 输入: \"LVIII\"\n# 输出: 58\n# 解释: L = 50, V= 5, III = 3.\n#\n#\n# 示例 5:\n#\n# 输入: \"MCMXCIV\"\n# 输出: 1994\n# 解释: M = 1000, CM = 900, XC = 90, IV = 4.\n#\n#\n\n# @lc code=start\n\n\nclass Solution:\n def romanToInt(self, s: str) -> int:\n # number = 0\n # for i in range(len(s)):\n # if s[i] == 'M':\n # number += 1000\n # elif s[i] == \"D\":\n # number += 500\n # elif s[i] == \"C\":\n # try:\n # if s[i + 1] == \"D\" or s[i+1] == \"M\":\n # number -= 100\n # else:\n # number += 100\n # except:\n # number += 100\n # elif s[i] == \"L\":\n # number += 50\n # elif s[i] == \"X\":\n # try:\n # if s[i + 1] == \"L\" or s[i+1] == \"C\":\n # number -= 10\n # else:\n # number += 10\n # except:\n # number += 10\n # elif s[i] == \"V\":\n # number += 5\n # elif s[i] == \"I\":\n # try:\n # if s[i + 1] == \"V\" or s[i+1] == \"X\":\n # number -= 1\n # else:\n # number += 1\n # except:\n # number += 1\n # return number\n # 這裡使用字典來完成\n number = 0\n roma_dict = {\"I\": 1, \"V\": 5, \"X\": 10,\n \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n prenumber = roma_dict[s[0]]\n for i in s[1::]:\n if prenumber < roma_dict[i]:\n number -= prenumber\n else:\n number += prenumber\n prenumber = roma_dict[i]\n number += prenumber\n return number\n # I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。\n # X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。 \n # C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。\n # @lc code=end\n","sub_path":"easy/13.罗马数字转整数.py","file_name":"13.罗马数字转整数.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"563233315","text":"from cv2 import imread\nfrom os import popen\nfrom flask import Flask, request\nfrom chainer import serializers, using_config\nfrom network import ResNet18\nimport math\n\nmodel = ResNet18()\napp = Flask(__name__)\n\ndef get_probs(y):\n s = sum([math.exp(x) for x in y])\n probs = [math.exp(x)/s for x in y]\n return probs\n\ndef load_model():\n serializers.load_npz('/mnt/NFS_Folder/Model/model', model)\n\n@app.route('/')\ndef home_endpoint():\n return 'Home'\n\n@app.route('/predict', methods=['POST'])\ndef get_prediction():\n if request.method == 'POST':\n imagefile = request.files.get('imagefile')\n filename = imagefile.filename\n imagefile.save(filename)\n imagefile.save('/mnt/NFS_Folder/Model/' + filename)\n\n img = imread(imagefile.filename).astype('float32')\n with using_config('train', False):\n y = model(img[None, ...]).array[0]\n\n prediction = y.argmax()\n probability = get_probs(y)[prediction]\n return str(prediction) +\",\"+str(probability)\n\n\nif __name__ == '__main__':\n load_model()\n ip = popen('ifconfig').read().split()[5]\n fpath = '/mnt/NFS_Folder/Model/ip_file.txt'\n with open(fpath, 'w') as ip_file:\n ip_file.write(ip)\n app.run(host='0.0.0.0',port=81)\n\n# curl -X POST 0.0.0.1:81/predict -F \"imagefile=@test_image-label14.jpg\"\n","sub_path":"src/inference_rest.py","file_name":"inference_rest.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27598164","text":"#! /usr/bin/python3\nimport argparse\nimport binascii\nimport hashlib\nimport re\nimport sys\n\n# All the hard parts stolen from https://github.com/sipa/bech32/blob/master/ref/python/segwit_addr.py:\n\n# Copyright (c) 2017 Pieter Wuille\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Reference implementation for Bech32 and segwit addresses.\"\"\"\n\n\nCHARSET = \"qpzry9x8gf2tvdw0s3jn54khce6mua7l\"\n\n\ndef bech32_polymod(values):\n \"\"\"Internal function that computes the Bech32 checksum.\"\"\"\n generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]\n chk = 1\n for value in values:\n top = chk >> 25\n chk = (chk & 0x1ffffff) << 5 ^ value\n for i in range(5):\n chk ^= generator[i] if ((top >> i) & 1) else 0\n return chk\n\n\ndef bech32_hrp_expand(hrp):\n \"\"\"Expand the HRP into values for checksum computation.\"\"\"\n return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]\n\n\ndef bech32_verify_checksum(hrp, data):\n \"\"\"Verify a checksum given HRP and converted data characters.\"\"\"\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1\n\n\ndef bech32_create_checksum(hrp, data):\n \"\"\"Compute the checksum values given HRP and data.\"\"\"\n values = bech32_hrp_expand(hrp) + data\n polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1\n return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]\n\n\ndef bech32_encode(hrp, data):\n \"\"\"Compute a Bech32 string given HRP and data values.\"\"\"\n combined = data + bech32_create_checksum(hrp, data)\n return hrp + '1' + ''.join([CHARSET[d] for d in combined])\n\n\ndef bech32_decode(bech):\n \"\"\"Validate a Bech32 string, and determine HRP and data.\"\"\"\n if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or\n (bech.lower() != bech and bech.upper() != bech)):\n return (None, None)\n bech = bech.lower()\n pos = bech.rfind('1')\n if pos < 1 or pos + 7 > len(bech): #or len(bech) > 90:\n return (None, None)\n if not all(x in CHARSET for x in bech[pos+1:]):\n return (None, None)\n hrp = bech[:pos]\n data = [CHARSET.find(x) for x in bech[pos+1:]]\n if not bech32_verify_checksum(hrp, data):\n return (None, None)\n return (hrp, data[:-6])\n\n\ndef convertbits(data, frombits, tobits, pad=True):\n \"\"\"General power-of-2 base conversion.\"\"\"\n acc = 0\n bits = 0\n ret = []\n maxv = (1 << tobits) - 1\n max_acc = (1 << (frombits + tobits - 1)) - 1\n for value in data:\n if value < 0 or (value >> frombits):\n return None\n acc = ((acc << frombits) | value) & max_acc\n bits += frombits\n while bits >= tobits:\n bits -= tobits\n ret.append((acc >> bits) & maxv)\n if pad:\n if bits:\n ret.append((acc << (tobits - bits)) & maxv)\n elif bits >= frombits or ((acc << (tobits - bits)) & maxv):\n return None\n return ret\n\n\ndef decode(hrp, addr):\n \"\"\"Decode a segwit address.\"\"\"\n hrpgot, data = bech32_decode(addr)\n if hrpgot != hrp:\n return (None, None)\n decoded = convertbits(data[1:], 5, 8, False)\n if decoded is None or len(decoded) < 2 or len(decoded) > 40:\n return (None, None)\n if data[0] > 16:\n return (None, None)\n if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:\n return (None, None)\n return (data[0], decoded)\n\n\ndef encode(hrp, witver, witprog):\n \"\"\"Encode a segwit address.\"\"\"\n ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))\n assert decode(hrp, ret) is not (None, None)\n return ret\n\n######\n\n# Represent as a big-endian 32-bit number.\ndef u32list(val):\n assert val < (1 << 32)\n return [(val >> 24) & 0xff, (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff]\n\ndef from_u32list(l):\n return (l[0] << 24) + (l[1] << 16) + (l[2] << 8) + l[3]\n\ndef tagged(char, l):\n bits=convertbits(l, 8, 5)\n assert len(bits) < (1 << 10)\n return [CHARSET.find(char), len(bits) >> 5, len(bits) & 31] + bits\n\n# Try to pull out tagged data: returns tag, tagged data and remainder.\ndef pull_tagged(data):\n if len(data) < 3:\n sys.exit(\"Truncated field\")\n length = data[1] * 32 + data[2]\n if length > len(data) - 3:\n sys.exit(\"Truncated {} field: expected {} values\"\n .format(CHARSET[data[0]], length))\n return (CHARSET[data[0]], convertbits(data[3:3+length], 5, 8, False), data[3+length:])\n\ndef lnencode(options):\n # Minimize amounts using postfix:\n if options.amount % 1000000000 == 0:\n amount = str(options.amount // 1000000000) + 'g'\n elif options.amount % 1000000 == 0:\n amount = str(options.amount // 1000000) + 'm'\n elif options.amount % 1000 == 0:\n amount = str(options.amount // 1000) + 'k'\n else:\n amount = str(options.amount)\n \n hrp = 'ln' + options.currency + amount\n \n # version + paymenthash + channelid\n data = [0] + convertbits(list(binascii.unhexlify(options.paymenthash)) + list(binascii.unhexlify(options.channelid)), 8, 5)\n \n for r in options.route:\n pubkey,channel,fee,cltv = r.split('/')\n route = list(binascii.unhexlify(pubkey)) + list(binascii.unhexlify(channel)) + u32list(int(fee)) + u32list(int(cltv))\n data = data + tagged('r', route)\n \n if options.fallback:\n # FIXME: Take a real address here, and check and strip the checksum & currentcy.\n data = data + tagged('f', [ord(c) for c in options.fallback])\n \n if options.description:\n data = data + tagged('d', [ord(c) for c in options.description])\n \n if options.description_hashed:\n data = data + tagged('h', hashlib.sha256(options.description_hashed.encode('utf-8')).digest())\n\n # FIXME: We need privkey to generate signature.\n data = data + convertbits([0] * 64, 8, 5)\n\n print(bech32_encode(hrp, data))\n\ndef lndecode(options):\n hrp, data = bech32_decode(options.lnaddress)\n if not hrp:\n sys.exit(\"Bad bech32 checksum\")\n\n if not hrp.startswith('ln'):\n sys.exit(\"Does not start with ln\")\n\n if data[0] != 0:\n sys.exit(\"Unknown version {}\".format(data[0]))\n data = data[1:]\n\n m = re.search(\"\\d+\", hrp)\n if not m:\n sys.exit(\"Does not contain amount\")\n\n print(\"Currency: {}\".format(hrp[2:m.start()]))\n amount=int(m.group(0))\n # Postfix?\n if hrp[m.end():] == 'k':\n amount = amount * 1000\n elif hrp[m.end():] == 'm':\n amount = amount * 1000000\n elif hrp[m.end():] == 'g':\n amount = amount * 1000000000\n elif hrp[m.end():] != '':\n sys.exit(\"Unknown amount postfix \" + hrp[m.end():])\n\n print(\"Amount: {}\".format(amount))\n if options.rate:\n print(\"(Conversion: {})\".format(amount / 10**11 * float(options.rate)))\n\n # (32 + 8) bytes turns into 64 bytes when base32 encoded.\n if len(data) < 64:\n sys.exit(\"Not long enough ton contain payment hash and channel id\")\n\n decoded = convertbits(data[:64], 5, 8, False)\n data = data[64:]\n assert len(decoded) == 32 + 8\n print(\"Payment hash: {}\".format(binascii.hexlify(bytearray(decoded[0:32]))))\n print(\"Channel id: {}\".format(binascii.hexlify(bytearray(decoded[32:40]))))\n\n # Final signature takes 103 bytes (64 bytes base32 encoded)\n while len(data) > 103:\n tag,tagdata,data = pull_tagged(data)\n if tag == 'r':\n if len(tagdata) != 33 + 8 + 4 + 4:\n sys.exit('Unexpected r tag length {}'.format(len(tagdata)))\n print(\"Route: {}/{}/{}/{}\"\n .format(binascii.hexlify(bytearray(tagdata[0:33])),\n binascii.hexlify(bytearray(tagdata[33:41])),\n from_u32list(tagdata[41:45]),\n from_u32list(tagdata[45:49])))\n elif tag == 'f':\n # FIXME: Format address!\n print(\"Fallback: {}\"\n .format(binascii.hexlify(bytearray(tagdata))))\n elif tag == 'd':\n print(\"Description: {}\".format(''.join(chr(c) for c in tagdata)))\n elif tag == 'h':\n print(\"Description hash: {}\"\n .format(binascii.hexlify(bytearray(tagdata))))\n else:\n print(\"UNKNOWN TAG {}: {}\"\n .format(tag, binascii.hexlify(bytearray(tagdata))))\n\n # FIXME: check signature!\n sigdecoded = convertbits(data, 5, 8, False)\n if sigdecoded != [0] * 64:\n sys.exit(\"Bad signature\");\n\nparser = argparse.ArgumentParser(description='Encode lightning address')\nsubparsers = parser.add_subparsers(dest='subparser_name',\n help='sub-command help')\n\nparser_enc = subparsers.add_parser('encode', help='encode help')\nparser_dec = subparsers.add_parser('decode', help='decode help')\n\nparser_enc.add_argument('--currency', default='bc',\n help=\"What currency\")\nparser_enc.add_argument('--route', action='append', default=[],\n help=\"Extra route steps of form pubkey/channel/fee/cltv\")\nparser_enc.add_argument('--fallback',\n help='Fallback address for onchain payment')\nparser_enc.add_argument('--description',\n help='What is being purchased')\nparser_enc.add_argument('--description-hashed',\n help='What is being purchased (for hashing)')\nparser_enc.add_argument('amount', type=int, help='Amount in millisatoshi')\nparser_enc.add_argument('paymenthash', help='Payment hash (in hex)')\nparser_enc.add_argument('channelid', help='Channel id (in hex)')\nparser_enc.set_defaults(func=lnencode)\n\nparser_dec.add_argument('lnaddress', help='Address to decode')\nparser_dec.add_argument('--rate', type=float, help='Conversion amount for 1 currency unit')\nparser_dec.set_defaults(func=lndecode)\n\noptions = parser.parse_args()\nif not options.subparser_name:\n parser.print_help()\nelse:\n options.func(options)\n","sub_path":"lightning-address.py","file_name":"lightning-address.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583450223","text":"import math\r\n\r\njarakkotaAB= 125\r\nkecepatankotaAB= 62\r\njarakkotaBC= 256\r\nkecepatankotaBC= 70\r\n\r\nwaktukotaAB= (math.ceil (jarakkotaAB/kecepatankotaAB*60))\r\nwaktukotaBC= (math.ceil (jarakkotaBC/kecepatankotaBC*60))\r\nistirahat= 45\r\n\r\njumlahwaktu= (waktukotaAB+waktukotaBC+istirahat)\r\n\r\nwaktuawal= 6\r\njumlahwaktudalamjam= (jumlahwaktu//60)+waktuawal\r\njumlahwaktudalammenit= (jumlahwaktu%60)\r\n\r\nprint('Pak Amir sampai dikota C pukul', (jumlahwaktudalamjam),'.',(jumlahwaktudalammenit) )\r\n","sub_path":"Tugas3/latihan4.py","file_name":"latihan4.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"64616387","text":"import asyncio\nimport datetime\nimport random\n\nimport websockets\n\n\nasync def echo(websocket, path):\n async for message in websocket:\n await websocket.send(message)\n\n\nasync def time(websocket, path):\n cid = random.randint(1, 100)\n while True:\n now = datetime.datetime.utcnow().isoformat() + 'Z'\n msg = f'{cid} {now}'\n print(msg)\n await websocket.send(msg)\n await asyncio.sleep(random.random() * 3)\n\n\nhost = '0.0.0.0'\nport = 8000\nfunc = time\n\nprint(func, host, port)\n\nasyncio.get_event_loop().run_until_complete(\n websockets.serve(func, host, port)\n)\nasyncio.get_event_loop().run_forever()\n","sub_path":"python/websockets_server.py","file_name":"websockets_server.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"583996682","text":"from back_test.model.base_option_set import BaseOptionSet\nfrom back_test.model.base_account import BaseAccount\nfrom back_test.model.base_instrument import BaseInstrument\nfrom back_test.model.base_option import BaseOption\nfrom data_access import get_data\nimport back_test.model.constant as c\nimport datetime\nimport numpy as np\nfrom Utilities.PlotUtil import PlotUtil\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\n\n\ndef get_option_unit(option_put: BaseOption, underlying_value: float):\n # unit = np.floor(underlying_value / option_put.strike() / option_put.multiplier()) # 期权名义本金等于标的市值\n unit = np.floor(underlying_value / option_put.underlying_close() / option_put.multiplier()) # 期权名义本金等于标的市值\n return unit\n\ndef buy_put(moneyness, maturity1):\n list_atm_call, list_atm_put = optionset.get_options_list_by_moneyness_mthd1(moneyness, maturity1)\n if list_atm_put is None:\n print('Given moneyness not available, choose min strike')\n list_atm_put = optionset.get_deepest_otm_put_list(maturity1)\n atm_put = optionset.select_higher_volume(list_atm_put)\n # unit = unit_underlying * underlying.multiplier()/atm_put.multiplier() # 50ETF\n unit = equal_50etf_unit * underlying.multiplier()/atm_put.multiplier() # 沪深300指数\n order = account.create_trade_order(atm_put, c.LongShort.LONG, unit)\n record = atm_put.execute_order(order, slippage=slippage)\n account.add_record(record, atm_put)\n premium = record[c.Util.TRADE_BOOK_VALUE]\n return atm_put,premium\n\n\n\nstart_date = datetime.date(2015, 2, 1)\n# start_date = datetime.date(2017, 2, 1)\nend_date = datetime.date(2018, 8, 31)\nd1 = start_date\nmin_holding = 15\nnbr_maturity = 1\nslippage = 0\npct_underlying_invest = 1.0\n\n##############\nalpha = 0.0\nmoneyness = -5\n#################\n\ndf_metrics = get_data.get_50option_mktdata(start_date, end_date)\ndf_option_underlying = get_data.get_index_mktdata(start_date, end_date, c.Util.STR_INDEX_50ETF)\ndf_underlying = get_data.get_index_mktdata(start_date, end_date, c.Util.STR_INDEX_300SH_TOTAL_RETURN)\n\n\ncalendar = c.Calendar(sorted(df_underlying[c.Util.DT_DATE].unique()))\npu = PlotUtil()\n\n\n\ndf = pd.DataFrame()\n#\nd1 = calendar.firstBusinessDayNextMonth(d1)\nd2 = d1 + datetime.timedelta(days=365)\nwhile d2 <= end_date:\n print(d1)\n df_metrics_1 = df_metrics[(df_metrics[c.Util.DT_DATE] >= d1)&(df_metrics[c.Util.DT_DATE] <= d2)].reset_index(drop=True)\n df_underlying_1 = df_underlying[(df_underlying[c.Util.DT_DATE] >= d1)&(df_underlying[c.Util.DT_DATE] <= d2)].reset_index(drop=True)\n # df_option_underlying_1 = df_option_underlying[(df_option_underlying[c.Util.DT_DATE] >= d1)&(df_option_underlying[c.Util.DT_DATE] <= d2)].reset_index(drop=True)\n df_underlying_with_alpha = df_underlying_1[[c.Util.DT_DATE, c.Util.ID_INSTRUMENT, c.Util.AMT_CLOSE]]\n df_underlying_with_alpha.loc[:, 'r'] = np.log(df_underlying_with_alpha[c.Util.AMT_CLOSE]).diff()\n df_underlying_with_alpha.loc[:, 'r1'] = np.log(df_underlying_with_alpha[c.Util.AMT_CLOSE]).diff() + alpha / 252\n df_underlying_with_alpha.loc[:, 'close_alpha'] = None\n p0 = df_underlying_with_alpha.loc[0, c.Util.AMT_CLOSE]\n for (idx, r) in df_underlying_with_alpha.iterrows():\n if idx == 0:\n df_underlying_with_alpha.loc[idx, 'close_alpha'] = df_underlying_with_alpha.loc[0, c.Util.AMT_CLOSE]\n else:\n df_underlying_with_alpha.loc[idx, 'close_alpha'] = df_underlying_with_alpha.loc[\n idx - 1, 'close_alpha'] * np.exp(\n df_underlying_with_alpha.loc[idx, 'r1'])\n\n df_underlying_with_alpha = df_underlying_with_alpha[\n [c.Util.DT_DATE, c.Util.ID_INSTRUMENT, c.Util.AMT_CLOSE, 'close_alpha']].rename(\n columns={c.Util.AMT_CLOSE: 'etf_close'})\n df_underlying_with_alpha = df_underlying_with_alpha.rename(columns={'close_alpha': c.Util.AMT_CLOSE})\n # df_underlying_with_alpha.to_csv('../accounts_data/df_underlying_with_alpha='+str(alpha)+'.csv')\n \"\"\" Init Portfolio and Account \"\"\"\n init_fund=10000000\n optionset = BaseOptionSet(df_metrics_1)\n optionset.init()\n underlying = BaseInstrument(df_underlying_with_alpha)\n underlying.init()\n account = BaseAccount(init_fund, leverage=1.0, rf=0.03)\n\n \"\"\" 初始开仓 \"\"\"\n unit_underlying = np.floor(pct_underlying_invest * account.cash / underlying.mktprice_close() / underlying.multiplier())\n order_underlying = account.create_trade_order(underlying, c.LongShort.LONG, unit_underlying)\n record_underlying = underlying.execute_order(order_underlying, slippage=slippage)\n account.add_record(record_underlying, underlying)\n maturity1 = optionset.select_maturity_date(nbr_maturity=nbr_maturity, min_holding=min_holding)\n equal_50etf_unit = unit_underlying*underlying.mktprice_close()/optionset.eligible_options[0].underlying_close()\n atm_put,premium = buy_put(moneyness,maturity1)\n\n # SH300指数\n\n total_premium = premium\n while optionset.has_next():\n \"\"\" 最终平仓 \"\"\"\n if optionset.eval_date >= d2:\n print('Close out.')\n close_out_orders = account.creat_close_out_order()\n for order in close_out_orders:\n execution_record = account.dict_holding[order.id_instrument].execute_order(order, slippage=0,\n execute_type=c.ExecuteType.EXECUTE_ALL_UNITS)\n account.add_record(execution_record, account.dict_holding[order.id_instrument])\n account.daily_accounting(optionset.eval_date)\n break\n \" Roll to next maturity \"\n if optionset.eval_date > maturity1 - datetime.timedelta(days=30):\n order = account.create_close_order(atm_put)\n execution_record = account.dict_holding[order.id_instrument].execute_order(order, slippage=slippage,\n execute_type=c.ExecuteType.EXECUTE_ALL_UNITS)\n account.add_record(execution_record, account.dict_holding[order.id_instrument])\n maturity1 = optionset.select_maturity_date(nbr_maturity=nbr_maturity, min_holding=min_holding)\n atm_put,premium = buy_put(moneyness,maturity1)\n total_premium += premium + execution_record[c.Util.TRADE_BOOK_VALUE]\n\n account.daily_accounting(optionset.eval_date)\n optionset.next()\n underlying.next()\n\n # series_npv = account.account[c.Util.PORTFOLIO_NPV]\n # series_npv.iloc[-1] = series_npv.iloc[-1] * (1+alpha) # plus alpha\n analysis = account.get_netvalue_analysis(account.account[c.Util.PORTFOLIO_NPV])\n analysis['权利金占比'] = total_premium/init_fund\n df_underlying_with_alpha.loc[:,'npv_50etf'] = df_underlying_with_alpha.loc[:,'etf_close']/df_underlying_with_alpha.loc[0,'etf_close']\n analysis_50ETF = account.get_netvalue_analysis(df_underlying_with_alpha['npv_50etf'])\n df_underlying_with_alpha.loc[:,'npv_50etf_alpha'] = df_underlying_with_alpha.loc[:,c.Util.AMT_CLOSE]/df_underlying_with_alpha.loc[0,c.Util.AMT_CLOSE]\n analysis_50ETF_alpha = account.get_netvalue_analysis(df_underlying_with_alpha['npv_50etf_alpha'])\n\n df[str(d1)+':hedged'] = analysis\n df[str(d1)+':etf'] = analysis_50ETF\n df[str(d1)+':etf_alpha'] = analysis_50ETF_alpha\n d1 = calendar.firstBusinessDayNextMonth(d1)\n d2 = d1 + datetime.timedelta(days=365)\nprint(df)\ndf.to_csv('../../accounts_data/buy_put_rolling-sh300-_alpha='+str(alpha)+'_m='+str(moneyness)+'-unitmatch.csv')\n\n","sub_path":"OptionStrategyLib/OptionStrategy/protective_put/strategy_buy_put_rolling.py","file_name":"strategy_buy_put_rolling.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"106060599","text":"import sys\n\n\nclass ComplexNumber:\n def __init__(self, real=0.0, imaginary=0.0):\n self.realPart = real\n self.imagPart = imaginary\n return\n\n def __add__(self, other):\n return ComplexNumber(self.realPart + other.realPart,\n self.imagPart + other.imagPart)\n\n def __sub__(self, other):\n return ComplexNumber(self.realPart - other.realPart,\n self.imagPart - other.imagPart)\n\n def __mul__(self, other):\n return ComplexNumber(self.realPart * other.realPart -\n self.imagPart * other.imagPart,\n self.realPart * other.imagPart +\n other.realPart * self.imagPart)\n\n def __truediv__(self, other):\n down = other.realPart ** 2 + other.imagPart ** 2\n one = (self.realPart * other.realPart +\n self.imagPart * other.imagPart) / down\n two = (self.imagPart * other.realPart -\n self.realPart * other.imagPart) / down\n return ComplexNumber(one, two)\n\n def __str__(self):\n returningstr = ''\n if self.realPart != 0.0 and self.imagPart != 0.0:\n returningstr += \"{0:.2f}\".format(self.realPart)\n if self.imagPart > 0.0:\n returningstr += ' + ' + \\\n \"{0:.2f}\".format(self.imagPart) + 'i'\n else:\n returningstr += ' - ' + \\\n \"{0:.2f}\".format(abs(self.imagPart)) + 'i'\n elif self.imagPart == 0.0 and self.realPart != 0.0:\n returningstr += \"{0:.2f}\".format(self.realPart)\n elif self.realPart == 0.0 and self.imagPart != 0.0:\n returningstr += \"{0:.2f}\".format(self.imagPart) + 'i'\n else:\n returningstr += '0.00'\n return returningstr\n\nfor k in sys.stdin:\n print(eval(k))\n","sub_path":"ComplexNumber/ComplexNumber.py","file_name":"ComplexNumber.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"533641182","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport subprocess\nimport os\nimport re\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n '-p',\n '--project',\n default='t',\n choices=['t', 'm', 's', 'l'],\n help='The resolution config, can be \"t (test)\", \"m (MediaStreaming)\", \"s (StarMaker)\", \"l (StarMakerLite)\". '\n 'Defaults to \"t (test)\".')\n parser.add_argument(\n '-s',\n '--setting',\n action='store_true',\n default=False,\n help='adb pull Zorro SDK config file.')\n parser.add_argument(\n '-l',\n '--log',\n action='store_true',\n default=False,\n help='adb pull Zorro SDK log.')\n parser.add_argument(\n '-e',\n '--elog',\n action='store_true',\n default=False,\n help='adb pull Zorro SDK encrpyted log.')\n parser.add_argument(\n '-a',\n '--aaa',\n action='store_true',\n default=False,\n help='adb pull Zorro 3A PCM files.')\n parser.add_argument(\n '-c',\n '--cap',\n action='store_true',\n default=False,\n help='adb pull pcap.')\n parser.add_argument(\n '-d',\n '--decrypt',\n type=str,\n default='',\n help='Decrypt Zorro file.')\n parser.add_argument(\n '-r',\n '--rtcevent',\n type=str,\n default='',\n help='Pull and decrypt the RTC event file.')\n\n return parser.parse_args()\n\n\ndef get_sdk_file(app_name, log_name):\n log = \"/sdcard/Android/data/\";\n if app_name == \"zorro\":\n log += \"media.ushow.zorrodemo\"\n elif app_name == \"ams\":\n log += \"com.android.media.amsdemo\"\n elif app_name == \"starmaker\":\n log += \"com.starmakerinteractive.starmaker\"\n elif app_name == \"starmakerlite\":\n log += \"com.starmakerinteractive.thevoice\"\n\n log += \"/files/zorro/normal/\"\n log += log_name\n return log\n\n\ndef pull_app_file(args, file_name, dest_dir):\n if args.project == \"t\":\n file = get_sdk_file(\"zorro\", file_name)\n elif args.project == \"m\":\n file = get_sdk_file(\"ams\", file_name)\n elif args.project == \"s\":\n file = get_sdk_file(\"starmaker\", file_name)\n elif args.project == \"l\":\n file = get_sdk_file(\"starmakerlite\", file_name)\n\n cmd = \"adb pull \" + file + \" \" + dest_dir\n # print(cmd)\n dev_null = open(os.devnull, 'w')\n \n out = subprocess.call(cmd, shell=True, stdout=dev_null, stderr=dev_null)\n if out == 0:\n print(\"\\033[32madb pull \" + file + \" ---> \" + dest_dir + \"\\n\\033[0m\")\n else:\n print(\"\\033[31mError: adb pull \" + file + \" ---> \" + dest_dir + \"\\n\\033[0m\")\n return out\n\n\ndef main():\n args = parse_args()\n # print(args)\n\n pcap_name = \"1.pcap\"\n pcap_dir = \"/sdcard\"\n dest_dir = \"/Users/mike/Downloads\"\n log_name = \"zorro.txt\"\n zego_apm_processed_pcm = \"zorro_audio_48000_1_apmed_zg.pcm\"\n zorro_apm_processed_pcm = \"zorro_audio_48000_1_apmed_zr.pcm\"\n pulled_pcm = \"zorro_audio_48000_1_pulled.pcm\"\n pushed_pcm = \"zorro_audio_48000_1_pushed.pcm\"\n elog_name = \"ezorro.txt\"\n delog_name = \"ezorro.dec\"\n config_name = \"zorro\"\n rtc_event_log_parser_dir = os.environ.get(\"RTC_EVENT_LOG_PARSER_HOME\")\n\n webrtc_dir = os.environ.get(\"ANDROID_WEBRTC_HOME\")\n decrpyt_file = os.path.join(webrtc_dir, \"zorro/tools/spdlog_decrypt\", \"spdlog-decrypt-1.0.jar\")\n\n pcap = os.path.join(pcap_dir, pcap_name)\n\n if args.cap:\n cmd = \"adb pull \" + pcap + \" \" + dest_dir\n # print(cmd)\n print(\"\\033[32madb pull \" + pcap + \" ---> \" + dest_dir + \"\\n\\033[0m\")\n subprocess.call(cmd, shell=True)\n local_pcap = os.path.join(dest_dir, pcap_name)\n cmd = \"wireshark \" + local_pcap\n subprocess.call(cmd, shell=True)\n return 0\n\n if args.setting:\n if args.project == \"t\":\n config = get_sdk_file(\"zorro\", config_name)\n elif args.project == \"m\":\n config = get_sdk_file(\"ams\", config_name)\n elif args.project == \"s\":\n config = get_sdk_file(\"starmaker\", config_name)\n elif args.project == \"l\":\n config = get_sdk_file(\"starmakerlite\", config_name)\n\n cmd = \"adb pull \" + config + \" \" + dest_dir\n # print(cmd)\n print(\"\\033[32madb pull \" + config + \" ---> \" + dest_dir + \"\\n\\033[0m\")\n subprocess.call(cmd, shell=True)\n local_config = os.path.join(dest_dir, config_name)\n cmd = \"subl -- \" + local_config\n subprocess.call(cmd, shell=True)\n return 0\n\n if args.log:\n out = pull_app_file(args, log_name, dest_dir)\n if out == 0:\n dest_file = os.path.join(dest_dir, log_name)\n cmd = \"subl -- \" + dest_file\n # print(cmd)\n subprocess.call(cmd, shell=True)\n return 0\n\n if args.elog:\n out = pull_app_file(args, elog_name, dest_dir)\n if out == 0:\n dest_file = os.path.join(dest_dir, elog_name)\n # print(dest_file )\n output = os.path.join(dest_dir, delog_name)\n cmd = \"java -jar \" + decrpyt_file + \" \" + dest_file + \" \" + output\n # print(cmd)\n subprocess.call(cmd, shell=True)\n cmd = \"subl -- \" + output\n subprocess.call(cmd, shell=True)\n return 0\n\n if args.aaa:\n pull_app_file(args, zego_apm_processed_pcm, dest_dir)\n pull_app_file(args, zorro_apm_processed_pcm, dest_dir)\n pull_app_file(args, pulled_pcm, dest_dir)\n pull_app_file(args, pushed_pcm, dest_dir)\n return 0\n\n if args.rtcevent:\n out = pull_app_file(args, args.rtcevent, dest_dir)\n # if out == 0:\n fname, fename = os.path.splitext(args.rtcevent)\n if fename == \".log\":\n dest_file = os.path.join(dest_dir, args.rtcevent)\n out_file = os.path.join(dest_dir, fname + \".pcapng\")\n print(\"out_file: \" + out_file + \" dest:\" + dest_file);\n os.chdir(rtc_event_log_parser_dir)\n cmd = \"node rtp.js \" + dest_file + ' | text2pcap -D -4 2.2.2.2,1.1.1.1 -u 20000,10000 -t \"%T.\" -n - ' + out_file\n subprocess.call(cmd, shell=True)\n\n print(\"\\033[32mOutput: \" + out_file + \"\\n\\033[0m\")\n\n cmd = \"wireshark \" + out_file\n subprocess.call(cmd, shell=True)\n return 0\n\n if args.decrypt:\n file = args.decrypt\n # print(file)\n output = file + \".dec\"\n subprocess.call(['java', '-jar', decrpyt_file, file, output], shell=False)\n subprocess.call(['subl', '--', output], shell=False)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","sub_path":"epull.py","file_name":"epull.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"364942709","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Chronic Disease Indicators Module\n\n.. moduleauthor:: Timothy Helton \n\"\"\"\n\nfrom collections import OrderedDict\nimport logging\nimport os.path as osp\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\nlog_format = ('%(asctime)s %(levelname)8s -> %(name)s <- '\n '(line: %(lineno)d) %(message)s\\n')\ndate_format = '%m/%d/%Y %I:%M:%S'\nlogging.basicConfig(format=log_format, datefmt=date_format,\n level=logging.INFO)\n\n\ndata_file = osp.join('..', 'data', 'chronic_disease_indicators',\n 'US_Chronic_Disease_Indicators__CDI.csv')\ndata_dtype = OrderedDict({\n 'YearStart': int,\n 'YearEnd': int,\n 'LocationAbbr': str,\n 'LocationDesc': str,\n 'DataSource': str,\n 'Topic': str,\n 'Question': str,\n 'Response': str,\n 'DataValueUnit': str,\n 'DataValueTypeID': str,\n 'DataValueType': str,\n 'DataValue': str,\n 'DataValueAlt': float,\n 'DataValueFootnoteSymbol': str,\n 'DatavalueFootnote': str,\n 'LowConfidenceLimit': float,\n 'HighConfidenceLimit': float,\n 'StratificationCategory1': str,\n 'Stratification1': str,\n 'StratificationCategory2': str,\n 'Stratification2': str,\n 'StratificationCategory3': str,\n 'Stratification3': str,\n 'GeoLocation': str,\n 'TopicID': str,\n 'QuestionID': str,\n 'ResponseID': str,\n 'LocationID': str,\n 'StratificationCategoryID1': str,\n 'StratificationID1': str,\n 'StratificationCategoryID2': str,\n 'StratificationID2': str,\n 'StratificationCategoryID3': str,\n 'StratificationID3': str,\n})\n\ncol_names = (\n 'yr_start',\n 'yr_end',\n 'loc_abbr',\n 'loc_desc',\n 'data_src',\n 'topic',\n 'question',\n 'response',\n 'data_unit',\n 'data_type_id',\n 'data_type',\n 'data_value',\n 'data_value_alt',\n 'footnote_symbol',\n 'footnote',\n 'low_conf',\n 'high_conf',\n 'strat_cat_1',\n 'strat_1',\n 'strat_cat_2',\n 'strat_2',\n 'strat_cat_3',\n 'strat_3',\n 'geo_loc',\n 'topic_id',\n 'question_id',\n 'response_id',\n 'loc_id',\n 'strat_cat_1_id',\n 'strat_1_id',\n 'strat_cat_2_id',\n 'strat_2_id',\n 'strat_cat_3_id',\n 'strat_3_id',\n)\n\n\nclass CDI:\n \"\"\"Actions related to the US Chronic Disease Indicators dataset.\n \n :param str data_path: path to data file\n \n :Attributes:\n \n **data**: *pandas.DataFrame* us chronic disease indicators data\n **data_path**: *str* path to data file \n \n \"\"\"\n def __init__(self, data_path=data_file, data_cols=col_names,\n dtype=data_dtype):\n self.data = None\n self.data_cols = data_cols\n self.data_path = osp.realpath(data_path)\n self.diseases = None\n self.dtype = dtype\n\n def __repr__(self):\n return f\"CDI()\"\n\n def load_data(self):\n \"\"\"Load data into a Pandas DataFrame.\"\"\"\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')\n\n def get_diseases(self):\n \"\"\"Get diseases from data.\"\"\"\n self.diseases = self.data.groupby('topic')['topic'].count()\n\n def plot_diseases(self, save=False):\n \"\"\"Create plot with bar plot above two pie plots that share categories. \n\n :param bool save: if True the plot will be saved as .png\n \"\"\"\n label_size = 14\n title_size = 24\n fig = plt.figure('Diseases Figure', figsize=(10, 16),\n facecolor='white', edgecolor='black')\n rows, cols = (3, 1)\n ax1 = plt.subplot2grid((rows, cols), (0, 0))\n ax2 = plt.subplot2grid((rows, cols), (1, 0), rowspan=2)\n\n self.diseases.plot(kind='bar', alpha=0.5, ax=ax1)\n ax1.set_xlabel('Diseases', fontsize=label_size)\n ax1.set_ylabel('Records', fontsize=label_size)\n\n self.diseases.plot(kind='pie', autopct='%i%%',\n labeldistance=1.05, pctdistance=0.9, shadow=True,\n startangle=90, ax=ax2)\n ax2.set_title('Percentage of Data by Disease', fontsize=title_size,\n y=0.94)\n ax2.set_ylabel('')\n\n plt.suptitle('Diseases in Data', fontsize=title_size, y=1.03)\n plt.tight_layout()\n\n if save:\n plt.savefig('diseases_data.png')\n else:\n plt.show()\n","sub_path":"k2datascience/chronic_disease.py","file_name":"chronic_disease.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"507899838","text":"# -*- coding: utf-8 -*-\nimport pandas, utils, os, excel_template, MasterData\nimport spold2_reader as spold2\nimport pyprind\nimport copy\n\n# Define paths and file names\n#version = '3.4'\n#system_model = 'Undefined'\n#dataset_list_folder = r'C:\\Dropbox (ecoinvent)\\ei-david\\Technical tasks\\Metals\\Aluminium'\n#dataset_list_filename = 'Aluminium datasets correspondence list.xlsx'\n#dataset_list_tab = 'Electricity datasets'\n#dataset_list = pandas.read_excel(os.path.join(dataset_list_folder, dataset_list_filename), dataset_list_tab)\noutput_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\external\\SRI\\data collection\\SRI_water supply\\technical\\datasets\\pump operation and infrastructure datasets\\local datasets'\noutput_filename = 'water pump operation - local datasets.xlsx'\n\n# Get activity overview and spold file names\ndataset_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\external\\SRI\\data collection\\SRI_water supply\\technical\\datasets\\pump operation and infrastructure datasets\\local datasets'\n#dataset_folder = os.path.join(utils.version_system_model_path(version, system_model), 'datasets')\n#ao_pkl_folder = os.path.join(utils.version_system_model_path(version, system_model), 'pkl')\n#ao = utils.pkl_load(ao_pkl_folder, 'ao')\n#ao_for_AL = utils.ao_for_AL(ao, direction = 'id_to_activity_geo', accelerate = False)\n#ao = ao[ao['activityName'].isin(dataset_list['activityName'])]\n#ao = ao[ao['activityName'] == 'zinc-lead mine operation']\n#filelist = set(ao['filename'])\ngeographies = ['BR',\n'CA-QC',\n'CH',\n'CN',\n'CO',\n'DE',\n'ES',\n'FR',\n'IN',\n'MA',\n'MY',\n'PE',\n'PH',\n'TN',\n'US',\n'ZA']\n\n#dataset_folder = r'C:\\Users\\David\\Desktop\\Files to convert'\nfilelist = utils.build_file_list(dataset_folder, extension = 'spold')\n\ndatasets = []\ncounter = 0\nfor filename in filelist:\n f = spold2.Dataset(dataset_folder, filename) #, ao_for_AL = ao_for_AL)\n for geo in geographies:\n f_local = copy.copy(f)\n f_local.geography = geo\n datasets.append(f_local)\n counter += 1\n print(counter)\n# datasets = [f]\n# output_filename = filename[0:len(filelist[0])-6]+'.xlsx'\n# excel_template.assemble_for_templates(datasets, output_folder, output_filename, \n# ao_for_AL = ao_for_AL)\n \n\n\n#MD = MasterData.get_current_MD()\n#MD['IntermediateExchanges prop.'] = MD['IntermediateExchanges prop.'].set_index('name')\n#MD['ElementaryExchanges prop.'] = MD['ElementaryExchanges prop.'].set_index(\n# ['name', 'compartment', 'subcompartment']).sortlevel(level=0)\n#\n#properties = []\n#for f in pyprind.prog_bar(datasets, title = 'constructing dataframes'):\n# f.merge_parent_child()\n# f.add_activityLinks(ao_for_AL)\n# f.dfs_for_excel_template(MD, only_meta = False)\n# properties.extend(f.properties_for_template)\n \n \nexcel_template.assemble_for_templates(datasets, output_folder, output_filename)\n# ao_for_AL = ao_for_AL)\n","sub_path":"projects/David/datasets_to_template.py","file_name":"datasets_to_template.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"19744181","text":"import tensorflow as tf\r\nfrom preprocess import *\r\nimport pickle\r\ntf.flags.DEFINE_integer(\"batch_size\", 128, \"The size of each batch.\")\r\ntf.flags.DEFINE_integer(\"num_epoch\", 100, \"Number epoches.\")\r\ntf.flags.DEFINE_boolean(\"ja2ch\", False, \"Whether to translate Japanese into Chinese.\")\r\ntf.flags.DEFINE_integer('eval_freq', 10, 'Evaluate after certain steps.')\r\ntf.flags.DEFINE_integer('save_freq', 10, 'Save after certain steps.')\r\nFLAGS = tf.flags.FLAGS\r\nwith open(\"./data/ch_vocab.txt\", \"rb\") as ch_vocab_f,\\\r\n\topen(\"./data/ja_vocab.txt\", \"rb\") as ja_vocab_f:\r\n\tch_vocab = pickle.loads(ch_vocab_f.read())\r\n\tja_vocab = pickle.loads(ja_vocab_f.read())\r\nwith open(\"./data/ja_train.txt\", \"rb\") as ja_train_f,\\\r\n\topen(\"./data/ch_train.txt\", \"rb\") as ch_train_f,\\\r\n\topen(\"./data/ja_eval.txt\", \"rb\") as ja_eval_f,\\\r\n\topen(\"./data/ch_eval.txt\", \"rb\") as ch_eval_f:\r\n\tja_train = pickle.loads(ja_train_f.read())\r\n\tch_train = pickle.loads(ch_train_f.read())\r\n\tja_eval = pickle.loads(ja_eval_f.read())\r\n\tch_eval = pickle.loads(ch_eval_f.read())\r\nja_iter = batch_iter(ja_train, FLAGS.batch_size, FLAGS.num_epoch)\r\nch_iter = batch_iter(ch_train, FLAGS.batch_size, FLAGS.num_epoch)\r\nif FLAGS.ja2ch == True:\r\n\ten_iter = ja_iter\r\n\ten_eval = ja_eval\r\n\ten_vocab = ja_vocab\r\n\tde_iter = ch_iter\r\n\tde_eval = ch_eval\r\n\tde_vocab = ch_vocab\r\nelse:\r\n\ten_iter = ch_iter\r\n\ten_eval = ch_eval\r\n\ten_vocab = ch_vocab\r\n\tde_iter = ja_iter\r\n\tde_eval = ja_eval\r\n\tde_vocab = ja_vocab\r\nen_indexer = {v:k for k,v in en_vocab.items()}\r\nde_indexer = {v:k for k,v in de_vocab.items()}\r\nfor epoch in range(FLAGS.num_epoch):\r\n\tfor train_batch in zip(en_iter, de_iter):\r\n\t\twith tf.Session() as sess:\r\n\t\t\tckpt = tf.train.get_checkpoint_state('./ckpt') \r\n\t\t\tsaver = tf.train.import_meta_graph(ckpt.model_checkpoint_path +'.meta')\r\n\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\r\n\t\t\tgraph = tf.get_default_graph()\r\n\t\t\tglobal_step = graph.get_tensor_by_name(\"transformer/final_part/global_step:0\")\r\n\t\t\ttrain_step = graph.get_tensor_by_name(\"transformer/final_part/train_step:0\")\r\n\t\t\tloss = graph.get_tensor_by_name(\"transformer/final_part/loss:0\")\r\n\t\t\tinput_en = graph.get_tensor_by_name(\"transformer/embedding_part/input_en:0\")\r\n\t\t\tinput_de = graph.get_tensor_by_name(\"transformer/embedding_part/input_de:0\")\r\n\t\t\tglobal_step_value, _, loss_result = sess.run((global_step, train_step, loss),\\\r\n\t\t\t feed_dict={input_en: train_batch[0], input_de: train_batch[1]})\r\n\t\t\tprint(\"After %d steps, the loss is %d.\" % (global_step_value, loss_result))\r\n\t\t\t# summary_writer.close()\r\n\t\t\t# exit()\r\n\t\t\tif global_step_value%FLAGS.save_freq==0:\r\n\t\t\t\tsaver.save(sess, \"./ckpt/my_model\")\r\n\r\n\t\t\tif global_step_value%FLAGS.eval_freq==0:\r\n\t\t\t\tprediction_result = sess.run(prediction, feed_dict={input_en: en_eval, input_de: de_eval})\r\n\t\t\t\t# print(\"prediction result:\"+str(prediction[0]))\r\n\t\t\t\t# bleu_score = bleu(prediction, de_eval)\r\n\t\t\t\ttranslate_ori = [[en_indexer[char] for char in batch] for batch in en_eval]\r\n\t\t\t\ttranslate_obj = [[de_indexer[char] for char in batch] for batch in de_eval]\r\n\t\t\t\ttranslate_resl = [[de_indexer[char] for char in batch] for batch in prediction_result]\r\n\t\t\t\tprint(\"The original sentence:\" + str(translate_ori[0]))\r\n\t\t\t\tprint(\"The objective sentence:\" + str(translate_obj[0]))\r\n\t\t\t\tprint(\"The result sentence:\" + str(translate_resl[0]))","sub_path":"machine_trainslation_2nd/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"253578438","text":"#!/usr/bin/python3.6 \nimport subprocess\nimport re\nimport requests\n\n# Store Mac address of all nodes here\nsaved = {\n 'xx:xx:xx:xx:xx:xx': 'My laptop',\n}\n\n# Set wireless interface using ifconfig\ninterface = \"wlan0\"\n\nmac_regex = re.compile(r'([a-zA-Z0-9]{2}:){5}[a-zA-Z0-9]{2}')\n\n\ndef parse_arp():\n arp_out = subprocess.check_output(f'arp -e -i {interface}', shell=True).decode('utf-8')\n if 'no match found' in arp_out:\n return None\n\n result = []\n for lines in arp_out.strip().split('\\n'):\n line = lines.split()\n if interface in line and '(incomplete)' not in line:\n for element in line:\n # If its a mac addr\n if mac_regex.match(element):\n result.append((line[0], element))\n return result\n\n\ndef get_mac_vendor(devices):\n num = 0\n for device in devices:\n try:\n url = f\"http://api.macvendors.com/{device[1]}\"\n try:\n vendor = requests.get(url).text\n except Exception as e:\n print(e)\n vendor = None\n\n except Exception as e:\n print(\"Error occured while getting mac vendor\", e)\n\n num += 1\n print_device(device, num, vendor)\n\ndef print_device(device, num=0, vendor=None):\n device_name = saved[device[1]] if device[1] in saved else 'unrecognised !!'\n\n print(f'\\n{num})', device_name, '\\nVendor:', vendor, '\\nMac:', device[1], '\\nIP: ',device[0])\n\nif __name__ == '__main__':\n print('Retrieving connected devices ..')\n\n devices = parse_arp()\n\n if not devices:\n print('No devices found!')\n\n else:\n print('Retrieving mac vendors ..')\n try:\n get_mac_vendor(devices)\n\n except KeyboardInterrupt as e:\n num = 0\n for device in devices:\n num += 1\n print_device(device, num)","sub_path":"connectivity/scripts/arp.py","file_name":"arp.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"360644071","text":"from functools import wraps\n\n\ndef async_cache(fn):\n cache = {}\n\n @wraps(fn)\n async def wrapper(*args, **kwargs):\n key = (args, tuple(kwargs.items()))\n if key not in cache:\n cache[key] = await fn(*args, **kwargs)\n return cache[key]\n wrapper.cache = cache\n\n return wrapper\n","sub_path":"src/thelocals/utils/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"113302386","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nfrom app_settings import AppSettings\nfrom numpy import random as nrand\nfrom scipy.stats import nbinom\nfrom scipy.stats import poisson\n\n\nclass Chromosome:\n \"\"\"The genetics of the system.\"\"\"\n \n def __init__(self, sequence = None):\n if sequence is not None: \n self.sequence = sequence\n return\n \n cfg = AppSettings()\n self.sequence = \"\"\n p = cfg.genetics.chromosome_length/(1 + cfg.genetics.chromosome_length)\n \n while(random.random() <= p):\n self.sequence += random.choice(self.nucleotides())\n \n def substitutions(self):\n \"\"\"Randomly changes charcters in the dna according to the poisson \n distribution. expected changes length * mutation rate * nonsynomous\n substitutions.\"\"\"\n \n cfg = AppSettings()\n num = poisson.rvs(cfg.genetics.mutation_rate * len(self.sequence))\n positions = nrand.randint(0, len(self.sequence), size=num)\n for pos in positions:\n self.sequence = self.sequence[:pos] + \\\n random.choice(self.nucleotides()) + \\\n self.sequence[(pos + 1):]\n \n def deletion(self):\n \"\"\"Deletes a random sequence of characters from a random position on the string. \n The length is taken from the negative binomial distribution.\"\"\"\n \n # Prevents an out of bounds error for random.randint()\n if(len(self.sequence) == 0):\n return\n \n pos = random.randint(0, len(self.sequence) - 1)\n \n cfg = AppSettings()\n p = cfg.genetics.mutation_length/(1 + cfg.genetics.mutation_length)\n \n while(random.random() <= p):\n self.sequence = self.sequence[:pos] + self.sequence[(pos + 1):]\n \n \n def insertion(self):\n \"\"\"Inserts a random length of random nucleotide characters into a sequence\n at a random location. \"\"\"\n \n if(len(self.sequence) <= 1):\n pos = 0\n else:\n pos = random.randint(0, len(self.sequence) - 1)\n \n cfg = AppSettings()\n p = cfg.genetics.mutation_length/(1 + cfg.genetics.mutation_length)\n \n while(random.random() <= p):\n self.sequence = self.sequence[:pos] + \\\n random.choice(self.nucleotides()) + \\\n self.sequence[pos:]\n \n \n def inversion(self):\n \n if(len(self.sequence) <= 1):\n pos = 0\n else:\n pos = random.randint(0, len(self.sequence) - 1)\n \n cfg = AppSettings()\n p = cfg.genetics.mutation_length/(1 + cfg.genetics.mutation_length)\n \n length = nbinom.rvs(\n 1, \n cfg.genetics.mutation_length/(1 + cfg.genetics.mutation_length)\n )\n \n self.sequence = self.sequence[:pos] + \\\n self.sequence[pos:(pos + length)][::-1] + \\\n self.sequence[(pos + length):]\n \n\n \n \n\n @staticmethod\n def nucleotides():\n \"\"\"Returns all the nucleotides that can be used in a dna string\"\"\"\n cfg = AppSettings()\n return(\n cfg.genetics.behaviors + \n cfg.genetics.gene_delimiter + \n cfg.genetics.receptor_delimiter + \n cfg.genetics.wildcards\n )\n ","sub_path":"coop_evolve/genetics/chromosome.py","file_name":"chromosome.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"28671230","text":"from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect\nfrom django.http import HttpResponseForbidden, HttpResponseBadRequest\nimport json\nfrom rest_framework import viewsets\nfrom .models import Simresults, SimresultsSingle, Graph\nfrom .serializers import *\n\n\nclass SimresultViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = SimresultsSingle.objects.all()\n serializer_class = SimresultSerializer\n paginate_by = None\n\n\ndef noise_vs_x(request):\n plot_factor = request.GET.get('plot_factor', 'competence')\n plot_y_axis = request.GET.get('plot_y_axis', 'sa')\n trust_used = request.GET.get('trust_used', 'FALSE')\n\n agf_str = request.GET.get('agf', '1')\n agf = agf_str.split(',')\n agf = set([int(x) for x in agf if x.isdigit()])\n\n if not agf.intersection(set([1, 3])):\n return HttpResponse(json.dumps({'status': 'failed',\n 'reason': 'invalid agents per fact'}),\n content_type='application/json')\n\n results = Simresults.objects.noise_sa(plot_factor=plot_factor,\n plot_y_axis=plot_y_axis,\n agf=agf_str,\n trust_used=trust_used.upper())\n\n results_hash = {}\n results_arr = []\n\n for r in results:\n result_dict = r.__dict__\n results_hash[result_dict[plot_factor]] = {}\n results_hash[result_dict[plot_factor]]['agf'] = {}\n for agf in agf_str.split(','):\n results_hash[result_dict[plot_factor]]['agf'][agf] = {}\n\n for r in results:\n result_dict = r.__dict__\n results_hash[result_dict[plot_factor]]['agf'][str(result_dict['agent_per_fact'])][result_dict['noise']] = result_dict[plot_y_axis]\n\n results_arr.append(results_hash)\n return HttpResponse(json.dumps({'results': results_arr}),\n content_type='application/json')\n\ndef comm_per_sa(request):\n results_arr = []\n results_hash = Graph.objects.filter(name='comm_per_sa')[0]\n results_arr.append(results_hash.__dict__['data'][0])\n return HttpResponse(json.dumps({'results': results_arr}),\n content_type='application/json')\n","sub_path":"simulations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"392707896","text":"import pyMT.data_structures as WSDS\nimport pyMT.utils as utils\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# list_file = r'C:/Users/eric/phd/ownCloud/Metal Earth/Data/WinGLinkEDIs_final/plot.lst'\n# list_file = 'C:/Users/eric/ownCloud/data/Regions/swayze/j2/all.lst'\nlist_file = 'C:/Users/eroots/phd/ownCloud/data/Regions/MetalEarth/j2/allbb.lst'\ndata = WSDS.RawData(list_file)\n# sites = ['ATT019M', 'SWZ032M']\nsites = ['p92001', 'p92002']\naxes = []\nfig = plt.figure(figsize=(12, 6))\naxes.append(plt.subplot2grid((3, 2), (0, 0), rowspan=2))\naxes.append(plt.subplot2grid((3, 2), (2, 0), rowspan=1))\naxes.append(plt.subplot2grid((3, 2), (0, 1), rowspan=2))\naxes.append(plt.subplot2grid((3, 2), (2, 1), rowspan=1))\n# site = data.sites[sites[0]]\nfor ii, name in enumerate(sites):\n site = data.sites[name]\n rhoxy, rhoxy_err, rhoxy_log10err = utils.compute_rho(site, calc_comp='rhoxy', errtype='errors')\n rhoyx, rhoyx_err, rhoyx_log10err = utils.compute_rho(site, calc_comp='rhoyx', errtype='errors')\n phaxy, phaxy_err = utils.compute_phase(site, calc_comp='phaxy', wrap=True, errtype='errors')[:2]\n phayx, phayx_err = utils.compute_phase(site, calc_comp='phayx', wrap=True, errtype='errors')[:2]\n axes[(ii) * 2].errorbar(np.log10(site.periods), np.log10(rhoxy), xerr=None,\n yerr=rhoxy_log10err, marker='o',\n linestyle='', color='b',\n markersize=5, label='XY')\n axes[(ii) * 2].errorbar(np.log10(site.periods), np.log10(rhoyx), xerr=None,\n yerr=rhoyx_log10err, marker='o',\n linestyle='', color='r',\n markersize=5, label='YX')\n axes[(ii) * 2 + 1].errorbar(np.log10(site.periods), phaxy, xerr=None,\n yerr=phaxy_err, marker='o',\n linestyle='', color='b',\n markersize=5, label='XY')\n axes[(ii) * 2 + 1].errorbar(np.log10(site.periods), phayx, xerr=None,\n yerr=phayx_err, marker='o',\n linestyle='', color='r',\n markersize=5, label='YX')\n # axes[(ii) * 2].loglog((site.periods), (rhoyx), 'ro', markersize=5, label='YX')\n # axes[(ii) * 2].set_xlabel('Period (s)', fontsize=14)\n if ii == 0:\n axes[(ii) * 2].set_ylabel(r'Apparent Resistivity (${\\Omega}$-m)', fontsize=14)\n axes[(ii) * 2 + 1].set_ylabel(r'Degrees (${\\degree}$)', fontsize=14)\n axes[(ii) * 2].legend(loc=1)\n axes[(ii) * 2].tick_params(axis='both', labelsize=12)\n axes[(ii) * 2].set_title(name, fontsize=16)\n axes[(ii) * 2].set_ylim([0, 6])\n # axes[(ii) * 2].set_ylim([1e0, 1e5])\n # axes[(ii) * 2 + 1].semilogx((site.periods), (phaxy), 'bo', markersize=5, label='XY')\n # axes[(ii) * 2 + 1].semilogx((site.periods), (phayx), 'ro', markersize=5, label='YX')\n axes[(ii) * 2 + 1].set_xlabel('Period (s)', fontsize=14)\n \n axes[(ii) * 2 + 1].legend(loc=1)\n axes[(ii) * 2 + 1].set_ylim([0, 180])\n axes[(ii) * 2 + 1].tick_params(axis='both', labelsize=12)\n\nfig.tight_layout()\n# plt.savefig('C:/Users/eric/phd/ownCloud/Documents/Seminars/Seminar 3/Figures/data_example.png', dpi=300)\nplt.show()\n","sub_path":"pyMT/scripts/plot_rhopha_nice.py","file_name":"plot_rhopha_nice.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"190515741","text":"import unittest\nfrom database.models.Serie import Serie\nfrom flask_app.flask_app import db\n\n\nclass TestDatabase(unittest.TestCase):\n def setUp(self):\n db.reflect()\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n db.session.rollback()\n db.reflect()\n db.drop_all()\n\n def test_serie(self):\n voyager = Serie(\"Voyager\", 1994, 2001)\n tos = Serie(\"TOS\", 1966, 1967)\n\n db.session.add(voyager)\n db.session.add(tos)\n db.session.commit()\n\n alleged_voyager = Serie.query.filter_by(name=\"Voyager\").first()\n alleged_tos = Serie.query.filter_by(name=\"TOS\").first()\n\n self.assertEqual(alleged_voyager.name, \"Voyager\")\n self.assertEqual(alleged_tos.name, \"TOS\")\n","sub_path":"tests/unittests/test_database_serie.py","file_name":"test_database_serie.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"899485","text":"#! /usr/bin/python\n\n# Import the core Python modules for ROS and to implement ROS Actions:\nimport rospy\nimport actionlib\n\n# Import all the necessary ROS message types:\nfrom com2009_actions.msg import SearchFeedback, SearchResult, SearchAction\nfrom sensor_msgs.msg import LaserScan\n\n# Import some other modules from within this package\nfrom move_tb3 import MoveTB3\nfrom tb3_odometry import TB3Odometry\n\n# Import some other useful Python Modules\nfrom math import sqrt, pow\nimport numpy as np\n\nclass SearchActionServer(object):\n feedback = SearchFeedback() \n result = SearchResult()\n\n def __init__(self):\n self.actionserver = actionlib.SimpleActionServer(\"/search_action_server\", \n SearchAction, self.action_server_launcher, auto_start=False)\n self.actionserver.start()\n\n #subscribe to lidar scan data node\n self.scan_subscriber = rospy.Subscriber(\"/scan\",\n LaserScan, self.scan_callback)\n\n self.min_distance = 0.5\n self.object_angle = 0\n self.robot_controller = MoveTB3()\n self.robot_odom = TB3Odometry()\n self.arc_angles = np.arange(-40, 41)\n \n #lidar scan callback function - update new scan points and vision in front of robot\n def scan_callback(self, scan_data):\n \n left_arc = scan_data.ranges[0:37]\n right_arc = scan_data.ranges[-36:]\n #front arc is a combination of left and right arcs, normalised so angles run left to right continuously\n front_arc = np.array(left_arc[::-1] + right_arc[::-1])\n #closest distance measurement\n self.min_distance = front_arc.min()\n #direction of closest measurement\n self.object_angle = self.arc_angles[np.argmin(front_arc)]\n \n def action_server_launcher(self, goal):\n r = rospy.Rate(10)\n #goal is when the robot is about to hit an obstacle (movement controllers prevent this from happening)\n success = True\n #check for forward speed goal (can only move within robot's limits)\n if goal.fwd_velocity <= 0 or goal.fwd_velocity > 0.26:\n print(\"Invalid velocity. Select a value between 0 and 0.26 m/s.\")\n success = False\n #lower limit for stop distance - if lower, the robot will collide before it stops\n if goal.approach_distance <= 0.18:\n print(\"Invalid stop distance: Robot will crash\")\n success = False\n\n #aborts action if any checks are violated\n if not success:\n self.actionserver.set_aborted()\n return\n\n print(\"Request to move at {:.3f}m/s and stop if less than{:.2f}m infront of an obstacle\".format(goal.fwd_velocity, goal.approach_distance))\n\n # Get the current robot odometry:\n self.posx0 = self.robot_odom.posx\n self.posy0 = self.robot_odom.posy\n\n #has_returned is used to make the robot move in new directions when it returns to near the starting location\n self.has_returned = False\n #turning_direction is used to let the robot continue turning in the same direction when not turning fast enough close to an obstacle\n self.turning_direction = -1\n\n #sets an initial value for distance from origin/start point\n self.distance=0\n\n print(\"The robot will start to move now...\")\n # set the robot velocity:\n self.robot_controller.set_move_cmd(goal.fwd_velocity, 0.0)\n \n #while the robot hasn't collided\n while self.min_distance > goal.approach_distance:\n \n #---object avoidance---\n\n #if not turning fast enough and about to hit a wall\n if self.min_distance <= 0.4:\n #stop moving and keep turning until the coast is clear\n self.robot_controller.set_move_cmd(0, self.turning_direction * 1.2)\n\n #if approaching an obstacle\n elif self.min_distance <= 0.55:\n\n #if the object is on the left or is directly in front\n if self.object_angle<=0:\n #update current turning direction\n self.turning_direction = -1\n #turn left at speed inversely proportional to proximity to obstacle\n self.robot_controller.set_move_cmd(goal.fwd_velocity, -0.3*(1/self.min_distance))\n \n #if the object is on the right\n else:\n #update turning direction\n self.turning_direction = 1\n #turn right at speed inversely proportional to proximity to obstacle\n self.robot_controller.set_move_cmd(goal.fwd_velocity, 0.3*(1/self.min_distance))\n\n else:\n #otherwise, no objects in the way, so set speed to normal forward velocity\n self.robot_controller.set_move_cmd(goal.fwd_velocity, 0.0)\n\n\n #--exploration--\n #if robot has returned to the middle of the arena\n if (self.distance <=0.75) & (self.has_returned==False):\n #robot is at centre\n self.has_returned=True\n\n #make robot turn left, right or straight ahead (angular velocity of -1, 0 and 1)\n turn = np.random.randint(-1,2)\n self.robot_controller.set_move_cmd(0, 3*turn)\n self.robot_controller.publish()\n\n #wait until turn is complete\n rospy.sleep(0.5)\n\n #robot has left starting area\n elif self.distance >=1.0:\n #when the robot returns to the starting area, it will turn a random direction again\n self.has_returned=False\n\n self.robot_controller.publish()\n\n # check if there has been a request to cancel the action mid-way through:\n if self.actionserver.is_preempt_requested():\n rospy.loginfo(\"Cancelling the search.\")\n \n self.actionserver.set_preempted()\n # stop the robot:\n self.robot_controller.stop()\n success = False\n # exit the loop:\n break\n else:\n #calculate distance from origin/start position\n self.distance = sqrt(pow(self.posx0 - self.robot_odom.posx, 2) + pow(self.posy0 - self.robot_odom.posy, 2))\n # populate the feedback message and publish it:\n self.feedback.current_distance_travelled = self.distance\n self.actionserver.publish_feedback(self.feedback)\n \n\n #if robot has gone too close to a wall, action is complete\n if success:\n rospy.loginfo(\"approach completed sucessfully, too close to a wall.\")\n self.result.total_distance_travelled = self.distance\n self.result.closest_object_distance = self.min_distance\n self.result.closest_object_angle = self.object_angle\n\n self.actionserver.set_succeeded(self.result)\n self.robot_controller.stop()\n \nif __name__ == '__main__':\n rospy.init_node(\"search_action_server\")\n SearchActionServer()\n rospy.spin()\n","sub_path":"src/search_server_old.py","file_name":"search_server_old.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"332781749","text":"import calendar\nimport select\nimport socket\nfrom datetime import datetime\nimport sys\n\nimport pytest\nfrom freezegun import freeze_time\nfrom lomond import errors, events\nfrom lomond import constants\nfrom lomond.session import WebsocketSession\nfrom lomond.websocket import WebSocket\nfrom mocket import Mocket, MocketEntry, mocketize\n\n\n@pytest.fixture()\ndef session(monkeypatch):\n monkeypatch.setattr(\n 'os.urandom', b'\\xaa'.__mul__\n )\n # ^^ the above line will be significant in the test where we want\n # to validate the headers being sent to the socket. Namely, the\n # websocket key which is based on os.urandom. Obviously, we can't\n # have an actual random call here because the test wouldn't be\n # deterministic, hence this sequence of bytes.\n\n return WebsocketSession(WebSocket('wss://example.com/'))\n\n\n@pytest.fixture()\n# @mocketize\ndef session_with_socket(monkeypatch):\n Mocket.register(\n MocketEntry(\n ('example.com', 80),\n [b'some binary data']\n )\n )\n\n session_obj = session(monkeypatch)\n return session_obj\n\n\nclass FakeSocket(object):\n def __init__(self, *args, **kwargs):\n self.buffer = b''\n self._sendall = kwargs.get('sendall', None)\n\n def fileno(self):\n return 999\n\n def recv(self, *args, **kwargs):\n raise ValueError('this is a test')\n\n def shutdown(self, *args, **kwargs):\n pass\n\n def close(self):\n raise socket.error('already closed')\n\n def sendall(self, data):\n self.buffer += data\n if callable(self._sendall):\n self._sendall(data)\n\n def pending(self):\n return 0\n\n\ndef test_write_without_sock_fails(session):\n with pytest.raises(errors.WebSocketUnavailable) as e:\n session.write(b'\\x01')\n\n assert str(e.value) == 'not connected'\n\n\ndef test_write_with_closed_websocket_fails(session):\n session.websocket.state.closed = True\n session._sock = FakeSocket()\n with pytest.raises(errors.WebSocketClosed) as e:\n session.write(b'\\x01')\n assert str(e.value) == 'data not sent'\n\n\ndef test_write_with_closing_websocket_fails(session):\n session.websocket.state.closing = True\n session._sock = FakeSocket()\n with pytest.raises(errors.WebSocketClosing) as e:\n session.write(b'\\x01')\n assert str(e.value) == 'data not sent'\n\n\ndef test_socket_error_propagates(session):\n def sendall(data):\n raise socket.error('just testing errors')\n\n session._sock = FakeSocket()\n session._sock.sendall = sendall\n with pytest.raises(errors.TransportFail) as e:\n session.write(b'\\x01')\n\n assert str(e.value) == 'socket fail; just testing errors'\n\n\ndef test_non_network_error_propagates(session):\n def sendall(data):\n raise ValueError('some random exception')\n\n session._sock = FakeSocket()\n session._sock.sendall = sendall\n\n with pytest.raises(errors.TransportFail) as e:\n session.write(b'\\x01')\n\n assert str(e.value) == 'socket error; some random exception'\n\n\ndef test_repr(session):\n assert repr(session) == \"\"\n\n\ndef test_close_socket(session, mocker):\n session._sock = FakeSocket()\n\n mocker.spy(FakeSocket, 'shutdown')\n mocker.spy(FakeSocket, 'close')\n\n session._close_socket()\n\n assert FakeSocket.shutdown.call_count == 1\n assert FakeSocket.close.call_count == 1\n\n\n@mocketize\ndef test_connect(session, mocker):\n Mocket.register(\n MocketEntry(\n ('example.com', 80),\n [b'some binary data']\n )\n )\n _socket = session._connect()\n assert isinstance(_socket, socket.socket)\n\n\n@mocketize\ndef test_socket_fail(session, mocker):\n def select_that_throws_exception(*args, **kwargs):\n raise select.error('this is just a test')\n\n Mocket.register(\n MocketEntry(\n ('example.com', 80),\n [b'some binary data']\n )\n )\n\n mocker.patch('lomond.session.select.select', select_that_throws_exception)\n with pytest.raises(WebsocketSession._SocketFail):\n session._select(session._sock, poll=5)\n\n\ndef test_send_request(session):\n session._sock = FakeSocket()\n session._send_request()\n assert session._sock.buffer == (\n b'GET / HTTP/1.1\\r\\n'\n b'Host: example.com:443\\r\\n'\n b'Upgrade: websocket\\r\\n'\n b'Connection: Upgrade\\r\\n'\n b'Sec-WebSocket-Key: qqqqqqqqqqqqqqqqqqqqqg==\\r\\n'\n b'Sec-WebSocket-Version: 13\\r\\n'\n b'User-Agent: ' + constants.USER_AGENT.encode('utf-8') + b'\\r\\n'\n b'\\r\\n'\n )\n\n\ndef test_run_with_socket_open_error(session):\n def connect_which_raises_error():\n raise socket.error('socket.error during connect')\n\n session._connect = connect_which_raises_error\n\n _events = list(session.run())\n\n assert len(_events) == 2\n\n assert isinstance(_events[0], events.Connecting)\n assert _events[0].url == 'wss://example.com/'\n\n assert isinstance(_events[1], events.ConnectFail)\n assert str(_events[1]) == \"ConnectFail('socket.error during connect')\"\n\n\ndef test_run_with_regular_exception_on_connect(session):\n def connect_which_raises_value_error():\n raise ValueError('socket.error during connect')\n\n session._connect = connect_which_raises_value_error\n\n _events = list(session.run())\n\n assert len(_events) == 2\n\n assert isinstance(_events[0], events.Connecting)\n assert _events[0].url == 'wss://example.com/'\n\n assert isinstance(_events[1], events.ConnectFail)\n assert str(_events[1]) == (\n \"ConnectFail('error; socket.error during connect')\"\n )\n\n\ndef test_run_with_send_request_raising_transport_error(session):\n # _send_request can raise TransportFail inside write() call\n # in order to do that, the socket has to be opened and raise\n # either socket.error or Exception during sendall() call.\n # let's do just that. First of all, the method in question:\n def sendall_which_raises_error(data):\n raise socket.error('error during sendall')\n\n # here's where the plot thickens. socket connection is established\n # during self._connect, so we have to substitude this method so that\n # it returns our FakeSocket object.\n\n def return_fake_socket():\n return FakeSocket(sendall=sendall_which_raises_error)\n\n session._connect = return_fake_socket\n\n _events = list(session.run())\n\n assert isinstance(_events[-1], events.ConnectFail)\n assert str(_events[-1]) == (\n \"ConnectFail('request failed; socket fail; error during sendall')\"\n )\n\n\ndef test_run_with_send_request_raising_exception(session, mocker):\n # exactly like the one above, but a different type of error is raised.\n # this time, we have to set the state of socket to closed, thus forcing\n # lomond to throw a non-socket error;\n def return_fake_socket(self):\n self.websocket.state.closed = True\n return FakeSocket()\n\n mocker.patch(\n 'lomond.session.WebsocketSession._connect', return_fake_socket)\n\n _events = list(session.run())\n\n assert isinstance(_events[-1], events.ConnectFail)\n assert str(_events[-1]) == (\n \"ConnectFail('request error; data not sent')\"\n )\n\n\ndef test_that_on_ping_responds_with_pong(session, mocker):\n # we don't actually care that much for the whole stack underneath,\n # we only want to check whether a certain method was called..\n send_pong = mocker.patch(\n 'lomond.websocket.WebSocket.send_pong'\n )\n\n session._send_pong(events.Ping(b'\\x00'))\n\n assert send_pong.called_with(b'\\x00')\n\n\ndef test_error_on_close_socket(caplog, session):\n def close_which_raises_error():\n raise ValueError('a problem occurred')\n\n session._sock = FakeSocket()\n session._sock.close = close_which_raises_error\n\n session._close_socket()\n\n import logging\n\n assert caplog.record_tuples[-1] == (\n 'lomond',\n logging.WARNING,\n 'error closing socket (a problem occurred)'\n )\n\n\n@freeze_time(\"1994-05-01 18:40:00\")\ndef test_check_poll(session):\n session._on_ready()\n assert session._check_poll(5 * 60)\n assert not session._check_poll(60 * 60)\n\n\n@freeze_time(\"1994-05-01 18:40:00\")\ndef test_check_auto_ping(session, mocker):\n session._on_ready()\n\n mocker.patch.object(session.websocket, 'send_ping')\n\n assert session.websocket.send_ping.call_count == 0\n\n with freeze_time('1994-05-01 18:41:00'):\n session._check_auto_ping(15 * 60)\n\n assert session.websocket.send_ping.call_count == 1\n session._check_auto_ping(36 * 60)\n assert session.websocket.send_ping.call_count == 1\n\n\n@freeze_time(\"1994-05-01 18:40:00\")\ndef test_check_ping_timeout(session, mocker):\n session._on_ready()\n\n assert not session._check_ping_timeout(10)\n with freeze_time('1994-05-01 18:41:00'):\n assert session._check_ping_timeout(10)\n\n\n@mocketize\ndef test_simple_run(monkeypatch, mocker):\n monkeypatch.setattr(\n 'os.urandom', b'\\x00'.__mul__\n )\n Mocket.register(\n MocketEntry(\n ('example.com', 80),\n [(\n b'HTTP/1.1 101 Switching Protocols\\r\\n'\n b'Upgrade: websocket\\r\\n'\n b'Connection: Upgrade\\r\\n'\n b'Sec-WebSocket-Accept: icx+yqv66kxgm0fcwalwlflwtai=\\r\\n'\n b'\\r\\n'\n b'\\x81\\x81\\x00\\x00\\x00\\x00A'\n )]\n )\n )\n\n # mocket doesn't support .pending() call which is used when ssl is used\n session = WebsocketSession(WebSocket('ws://example.com/'))\n session._on_ready()\n # well, we have to cheat a little. The thing is, inner loop of\n # run() sets last poll time to time.time and so we would have to\n # wait for some time to actually hit poll / ping. This is not desirable\n # so we can do the following:\n # save original _regular call into _regular_orig\n # (_regular is a first - well, technically, a second) call inside run\n # after _poll_start is set which makes it a nice candidate for monkey-patch\n # location. Here's how we do it:\n session._regular_orig = session._regular\n\n mocker.patch(\n 'lomond.websocket.WebSocket._send_close')\n mocker.patch.object(session.websocket, 'send_ping')\n mocker.patch(\n 'lomond.session.WebsocketSession._select',\n lambda self, sock, poll:[True, False]\n )\n\n _events = list(session.run())\n\n assert len(_events) == 6\n assert isinstance(_events[0], events.Connecting)\n assert isinstance(_events[1], events.Connected)\n assert isinstance(_events[2], events.Ready)\n assert isinstance(_events[3], events.Poll)\n assert isinstance(_events[4], events.Text)\n assert isinstance(_events[5], events.Disconnected)\n\n\n@freeze_time(\"1994-05-01 18:40:00\")\n@mocketize\ndef test_unresponsive(monkeypatch, mocker):\n \"\"\"Check ping timeout.\"\"\"\n monkeypatch.setattr(\n 'os.urandom', b'\\x00'.__mul__\n )\n Mocket.register(\n MocketEntry(\n ('example.com', 80),\n [(\n b'HTTP/1.1 101 Switching Protocols\\r\\n'\n b'Upgrade: websocket\\r\\n'\n b'Connection: Upgrade\\r\\n'\n b'Sec-WebSocket-Accept: icx+yqv66kxgm0fcwalwlflwtai=\\r\\n'\n b'\\r\\n'\n b'\\x81\\x81\\x00\\x00\\x00\\x00A'\n )]\n )\n )\n\n # mocket doesn't support .pending() call which is used when ssl is used\n session = WebsocketSession(WebSocket('ws://example.com/'))\n session._on_ready()\n # well, we have to cheat a little. The thing is, inner loop of\n # run() sets last poll time to time.time and so we would have to\n # wait for some time to actually hit poll / ping. This is not desirable\n # so we can do the following:\n # save original _regular call into _regular_orig\n # (_regular is a first - well, technically, a second) call inside run\n # after _poll_start is set which makes it a nice candidate for monkey-patch\n # location. Here's how we do it:\n session._regular_orig = session._regular\n\n mocker.patch(\n 'lomond.websocket.WebSocket._send_close')\n mocker.patch.object(session.websocket, 'send_ping')\n mocker.patch(\n 'lomond.session.WebsocketSession._select',\n lambda self, sock, poll:[True, False]\n )\n\n _events = []\n iter_events = iter(session.run(ping_timeout=5))\n\n for event in iter_events:\n _events.append(event)\n if event.name == 'text':\n break\n\n with freeze_time(\"1994-05-01 18:41:00\"):\n for event in iter_events:\n _events.append(event)\n\n assert len(_events) == 8\n assert isinstance(_events[0], events.Connecting)\n assert isinstance(_events[1], events.Connected)\n assert isinstance(_events[2], events.Ready)\n assert isinstance(_events[3], events.Poll)\n assert isinstance(_events[4], events.Text)\n assert isinstance(_events[5], events.Poll)\n assert isinstance(_events[6], events.Unresponsive)\n assert isinstance(_events[7], events.Disconnected)\n\n\ndef test_recv_with_secure_websocket(session):\n def fake_recv(self):\n return b'\\x01'\n\n session._sock = FakeSocket()\n session._sock.recv = fake_recv\n\n assert session._recv(1) == b'\\x01'\n","sub_path":"tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":13167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"96399366","text":"# -*- coding: utf-8 -*-\n\nfrom NB_base import two_fold\nfrom NB_base import ClassifierChain_NB\nimport pandas as pd\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n dataset = sys.argv[1]\n\n # setup\n savePath = \"../result/NB/CC_NB_bayesNet/\"\n dataPath = os.path.abspath(\"../data/\" + dataset + \"/\")\n X_file = \"X_scale.csv\"\n y_file = \"y.csv\"\n\n data = pd.read_csv(os.path.join(dataPath, X_file))\n label = pd.read_csv(os.path.join(dataPath, y_file))\n\n df_CC = two_fold(ClassifierChain_NB, data, label, dataset,\n ensemble=1, ordering=\"random\", structure=\"bayes_net\", lead=False)\n\n # save the results\n if not os.path.exists(savePath):\n os.makedirs(savePath)\n df_CC.to_csv(savePath+dataset+\".csv\")\n","sub_path":"code/NB/ClassifierChain_NB_bayesNet.py","file_name":"ClassifierChain_NB_bayesNet.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"294310832","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('messi5.jpg')\nimg2 = cv2.imread('opencv-logo.png')\n\nprint(img.shape)#returns a tuple of number of rows, columns, and channels\nprint(img.size) # returns total number of pixels is accessed\nprint(img.dtype) # returns image datatype is obtained\nb,g,r = cv2.split(img)\nimg = cv2.merge((b,g,r))\n\nball = img[280:340, 330:390]#this takes out the ball by making a rectagle region\n\nimg[273:333, 100:160] = ball\n\nimg = cv2.resize(img, (512, 512))\nimg2 = cv2.resize(img2, (512,512))\n\n#dst = cv2.add(img,img2)\ndst = cv2.addWeighted(img,.9, img2,.1, 0)#basically makes one image more transulcent than the other\n\ncv2.imshow('image',dst)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"split,merge,resize,add.py","file_name":"split,merge,resize,add.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"75156739","text":"import discord\r\nimport asyncio\r\nimport random\r\nimport os\r\nimport time\r\nimport datetime\r\nfrom urllib.request import urlopen, Request\r\nimport urllib\r\nimport urllib.request\r\nimport bs4\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\n\r\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name('gjhelper-cc7069273059.json', scope)\r\nclient = gspread.authorize(creds)\r\ndoc = client.open_by_url('https://docs.google.com/spreadsheets/d/1PA2WP-aQ-d8TlGubOSpUJwHoH8VZfiTwIFPO3eYGnIs')\r\n\r\n\r\nclient = discord.Client()\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n\tprint(\"login\")\r\n\tprint(client.user.name)\r\n\tprint(client.user.id)\r\n\tprint(\"----------------\")\r\n\tawait client.change_presence(game=discord.Game(name='업무외지원', type=1))\r\n\r\n\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\tglobal gc #정산\r\n\tglobal creds\t#정산\r\n\tglobal ladder\r\n \r\n\tif message.content.startswith('!나이'):\r\n\t\tSearchID = message.content[len('!나이')+1:]\r\n\t\tgc = gspread.authorize(creds)\r\n\t\twks = gc.open('GJ재고관리').worksheet('만나이계산기')\r\n\t\t\r\n\t\twks.update_acell('C8', SearchID)\r\n\t\tresult1 = wks.acell('H8').value\r\n\t\tresult2 = wks.acell('J8').value\r\n \r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = ' 오늘기준 ' + SearchID + ' 나이! ',\r\n\t\t\tdescription= '```md\\n' + SearchID + result1 + result2 + '```',\r\n\t\t\tcolor=0x5ABEFF\r\n\t\t\t)\r\n\t\tawait client.send_message(message.channel, embed=embed)\r\n\t\t\r\n\t\t\r\n\tif message.content.startswith('!유지기간'):\r\n\t\tSearchID = message.content[len('!유지기간')+1:]\r\n\t\tgc = gspread.authorize(creds)\r\n\t\twks = gc.open('GJ재고관리').worksheet('유지기간')\r\n\t\twks.update_acell('a1', SearchID)\r\n\t\tresult = wks.acell('b1').value\r\n\t\t\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = ' 오늘기준 ' + SearchID + ' 개통자 남은 유지일수는 ',\r\n\t\t\tdescription= '```md\\n' + SearchID + result + '```',\r\n\t\t\tcolor=0x5ABEFF\r\n\t\t\t)\r\n\t\tawait client.send_message(message.channel, embed=embed)\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n # \r\n#\tif message.content.startswith('!모델명'):\r\n#\t\tSearchID = message.content[len('!모델명')+1:]\r\n#\t\tgc = gspread.authorize(creds)\r\n#\t\twks = gc.open('GJ재고관리').worksheet('시트2')\r\n#\t\twks.update_acell('A1', SearchID)\r\n#\t\tresult = wks.acell('B1').value\r\n#\t\t\r\n#\t\tembed = discord.Embed(\r\n#\t\t\ttitle = ' :printer: 모델명 코드 리스트 ',\r\n#\t\t\tdescription= '```' + SearchID + ' 모델명 코드는 ' + result + ' ```',\r\n#\t\t\tcolor=0x0000ff\r\n#\t\t\t)\r\n#\t\tawait client.send_message(message.channel, embed=embed)\r\n\r\n\r\n\tif message.content.startswith('!영화순위'):\r\n # http://ticket2.movie.daum.net/movie/movieranklist.aspx\r\n\t\ti1 = 0 # 랭킹 string값\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = \"영화순위\",\r\n\t\t\tdescription = \"영화순위입니다.\",\r\n\t\t\tcolour= discord.Color.red()\r\n\t\t\t)\r\n\t\thdr = {'User-Agent': 'Mozilla/5.0'}\r\n\t\turl = 'http://ticket2.movie.daum.net/movie/movieranklist.aspx'\r\n\t\tprint(url)\r\n\t\treq = Request(url, headers=hdr)\r\n\t\thtml = urllib.request.urlopen(req)\r\n\t\tbsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\t\tmoviechartBase = bsObj.find('div', {'class': 'main_detail'})\r\n\t\tmoviechart1 = moviechartBase.find('ul', {'class': 'list_boxthumb'})\r\n\t\tmoviechart2 = moviechart1.find_all('li')\r\n\r\n\t\tfor i in range(0, 20):\r\n\t\t\ti1 = i1+1\r\n\t\t\tstri1 = str(i1) # i1은 영화랭킹을 나타내는데 사용됩니다\r\n\t\t\tprint()\r\n\t\t\tprint(i)\r\n\t\t\tprint()\r\n\t\t\tmoviechartLi1 = moviechart2[i] # ------------------------- 1등랭킹 영화---------------------------\r\n\t\t\tmoviechartLi1Div = moviechartLi1.find('div', {'class': 'desc_boxthumb'}) # 영화박스 나타내는 Div\r\n\t\t\tmoviechartLi1MovieName1 = moviechartLi1Div.find('strong', {'class': 'tit_join'})\r\n\t\t\tmoviechartLi1MovieName = moviechartLi1MovieName1.text.strip() # 영화 제목\r\n\t\t\tprint(moviechartLi1MovieName)\r\n\r\n\t\t\tmoviechartLi1Ratting1 = moviechartLi1Div.find('div', {'class': 'raking_grade'})\r\n\t\t\tmoviechartLi1Ratting2 = moviechartLi1Ratting1.find('em', {'class': 'emph_grade'})\r\n\t\t\tmoviechartLi1Ratting = moviechartLi1Ratting2.text.strip() # 영화 평점\r\n\t\t\tprint(moviechartLi1Ratting)\r\n\r\n\t\t\tmoviechartLi1openDay1 = moviechartLi1Div.find('dl', {'class': 'list_state'})\r\n\t\t\tmoviechartLi1openDay2 = moviechartLi1openDay1.find_all('dd') # 개봉날짜, 예매율 두개포함한 dd임\r\n\t\t\tmoviechartLi1openDay3 = moviechartLi1openDay2[0]\r\n\t\t\tmoviechartLi1Yerating1 = moviechartLi1openDay2[1]\r\n\t\t\tmoviechartLi1openDay = moviechartLi1openDay3.text.strip() # 개봉날짜\r\n\t\t\tprint(moviechartLi1openDay)\r\n\t\t\tmoviechartLi1Yerating = moviechartLi1Yerating1.text.strip() # 예매율 ,랭킹변동\r\n\t\t\tprint(moviechartLi1Yerating) # ------------------------- 1등랭킹 영화---------------------------\r\n\t\t\tprint()\r\n\t\t\tembed.add_field(name='---------------랭킹'+stri1+'위---------------', value='\\n영화제목 : '+moviechartLi1MovieName+'\\n영화평점 : '+moviechartLi1Ratting+'점'+'\\n개봉날짜 : '+moviechartLi1openDay+'\\n예매율,랭킹변동 : '+moviechartLi1Yerating, inline=False) # 영화랭킹\r\n\r\n\r\n\t\tawait client.send_message(message.channel, embed=embed)\r\n\r\n\r\n\tif message.content.startswith('!주사위'):\r\n\t\trandomNum = random.randrange(1, 7) # 1~6까지 랜덤수\r\n\t\tprint(randomNum)\r\n\t\tif randomNum == 1:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: '+ ':one:'))\r\n\t\tif randomNum == 2:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: ' + ':two:'))\r\n\t\tif randomNum ==3:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: ' + ':three:'))\r\n\t\tif randomNum ==4:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: ' + ':four:'))\r\n\t\tif randomNum ==5:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: ' + ':five:'))\r\n\t\tif randomNum ==6:\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description=':game_die: ' + ':six: '))\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\tif message.content.startswith(\"!복권\"):\r\n\t\tText = \"\"\r\n\t\tnumber = [1, 2, 3, 4, 5, 6, 7]\r\n\t\tcount = 0\r\n\t\tfor i in range(0, 7):\r\n\t\t\tnum = random.randrange(1, 46)\r\n\t\t\tnumber[i] = num\r\n\t\t\tif count >= 1:\r\n\t\t\t\tfor i2 in range(0, i):\r\n\t\t\t\t\tif number[i] == number[i2]: # 만약 현재랜덤값이 이전숫자들과 값이 같다면\r\n\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\tprint(\"작동 이전값 : \" + str(numberText))\r\n\t\t\t\t\t\tnumber[i] = random.randrange(1, 46)\r\n\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\tprint(\"작동 현재값 : \" + str(numberText))\r\n\t\t\t\t\t\tif number[i] == number[i2]: # 만약 다시 생성한 랜덤값이 이전숫자들과 또 같다면\r\n\t\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\t\tprint(\"작동 이전값 : \" + str(numberText))\r\n\t\t\t\t\t\t\tnumber[i] = random.randrange(1, 46)\r\n\t\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\t\tprint(\"작동 현재값 : \" + str(numberText))\r\n\t\t\t\t\t\t\tif number[i] == number[i2]: # 만약 다시 생성한 랜덤값이 이전숫자들과 또 같다면\r\n\t\t\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\t\t\tprint(\"작동 이전값 : \" + str(numberText))\r\n\t\t\t\t\t\t\t\tnumber[i] = random.randrange(1, 46)\r\n\t\t\t\t\t\t\t\tnumberText = number[i]\r\n\t\t\t\t\t\t\t\tprint(\"작동 현재값 : \" + str(numberText))\r\n\r\n\t\t\tcount = count + 1\r\n\t\t\tText = Text + \" \" + str(number[i])\r\n\t\t\t\r\n\t\tprint(Text.strip())\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle=\"복권 숫자!\",\r\n\t\t\tdescription=Text.strip(),\r\n\t\t\tcolour=discord.Color.red()\r\n\t\t)\r\n\t\tawait client.send_message(message.channel, embed=embed)\r\n\t\t\r\n\t\t\r\n\tif message.content.startswith('!사다리'):\r\n\t\tladder = []\r\n\t\tladder = message.content[len('!사다리') + 1:].split(\" \")\r\n\t\tnum_cong = int(ladder[0])\r\n\t\tdel (ladder[0])\r\n\t\tif num_cong < len(ladder):\r\n\t\t\tresult_ladder = random.sample(ladder, num_cong)\r\n\t\t\tresult_ladderSTR = ','.join(map(str, result_ladder))\r\n\t\t\tembed = discord.Embed(\r\n\t\t\t\ttitle=\"----- 당첨! -----\",\r\n\t\t\t\tdescription='```' + result_ladderSTR + '```',\r\n\t\t\t\tcolor=0xff00ff\r\n\t\t\t\t)\r\n\t\t\tawait client.send_message(message.channel, embed=embed, tts=False)\r\n\t\telse:\r\n\t\t\tawait client.send_message(message.channel, '```추첨인원이 총 인원과 같거나 많습니다. 재입력 해주세요```', tts=False)\r\n\r\n\tif message.content.startswith('!타이머'):\r\n\r\n\t\tText = \"\"\r\n\t\tlearn = message.content.split(\" \")\r\n\t\tvrsize = len(learn) # 배열크기\r\n\t\tvrsize = int(vrsize)\r\n\t\tfor i in range(1, vrsize): # 띄어쓰기 한 텍스트들 인식함\r\n\t\t\tText = Text + \" \" + learn[i]\r\n\t\t\t\r\n\t\tsecint = int(Text)\r\n\t\tsec = secint\r\n\t\t\r\n\t\tfor i in range(sec, 0, -1):\r\n\t\t\tprint(i)\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description='타이머 작동중 : '+str(i)+'초'))\r\n\t\t\ttime.sleep(1)\r\n\r\n\t\telse:\r\n\t\t\tprint(\"땡\")\r\n\t\t\tawait client.send_message(message.channel, embed=discord.Embed(description='타이머 종료'))\r\n\r\n\t\t\r\n\r\n\t\t\r\n\t\t\r\n\r\n\t\r\n\r\n\t\t\r\n\t\t\t\r\n\t\t\t\r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"248287323","text":"#!/usr/bin/env python3\r\nimport csv\r\nimport operator\r\nimport re\r\n\r\nfrom collections import defaultdict\r\n\r\nerror_counts = defaultdict(int) # {error_message: count}\r\nper_user = {} # {import reuser_name: {error_1: count, error_2: count}\r\n\r\n# 1: status, 2: message, 3: ticket number 4: user\r\nregex_draft = r\"ticky: ([A-Z]*) (.*) (\\[#\\d{4}] )?\\(([\\w.]*)\\)\" # Try to grab ticket number so not in error message.\r\n\r\n# 1: status, 2: message, 3: user\r\nregex = r\"ticky: ([A-Z]*) (.*) \\(([\\w.]*)\\)\"\r\nticket_num_regex = r'(\\[#\\d{4}])'\r\n\r\nwith open('sample_syslog.log', 'r') as logfile:\r\n for log_entry in logfile:\r\n # Get desired data:\r\n re_search = re.search(regex, log_entry.strip())\r\n\r\n status, message, user_name = re_search.groups()\r\n # Strip ticket number:\r\n if re.search(ticket_num_regex, message):\r\n message = message[:-8]\r\n\r\n if user_name not in per_user:\r\n per_user[user_name] = {'INFO': 0, 'ERROR': 0}\r\n else:\r\n per_user[user_name][status] += 1\r\n\r\n error_counts[message] += 1\r\n\r\n# Sort the dicts\r\nsorted_error_counts = sorted(error_counts.items(), key=operator.itemgetter(1), reverse=True)\r\nsorted_per_user_dicts = sorted(per_user.items())\r\n\r\nsorted_per_user = [(user[0], user[1]['INFO'], user[1][\"ERROR\"]) for user in sorted_per_user_dicts]\r\n\r\n# Add column headers:\r\nsorted_error_counts_with_headers = [(\"Error\", \"Count\")] + sorted_error_counts\r\nsorted_per_user_with_headers = [(\"Username\", \"INFO\", \"ERROR\")] + sorted_per_user\r\n\r\n# print(f'{error_counts=},\\n {per_user=}')\r\n# print(f'{sorted_error_counts=},\\n {sorted_per_user=}')\r\n#\r\n# print(f'{sorted_error_counts_with_headers=}')\r\n# print(f'{sorted_per_user_with_headers=}')\r\n\r\n# Write csv files:\r\nwith open('error_message.csv', mode='w') as error_message_file:\r\n error_message_writer = csv.writer(error_message_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n for error_message in sorted_error_counts_with_headers:\r\n error_message_writer.writerow(error_message)\r\n\r\nwith open('user_statistics.csv', mode='w') as user_statistics_file:\r\n user_statistics_writer = csv.writer(user_statistics_file,\r\n delimiter=',')\r\n for user in sorted_per_user_with_headers:\r\n user_statistics_writer.writerow(user)","sub_path":"Using Python to Interact with the Operating System/ticky_check.py","file_name":"ticky_check.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"154239113","text":"#!/usr/bin/env python\n#from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom nn_tools import *\n\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef nn_data_djasx(code,date,daysback,params):\n\n # simple asx/dj prediction with nback and emas\n nback=params+[0,0,0,0,0,0,0,0,0,0] # add more zeros to fill out\n\n vali = []\n valo = []\n vdate = []\n\n dj = SPriceEod('dj', date, daysback=daysback+10)\n asx = SPriceEod('asx200', date, daysback=daysback)\n djm1 = fts_ema(dj.close,12,fillna=True)\n djm2 = fts_ema(dj.close,30,fillna=True)\n asxm1 = fts_ema(asx.close,12,fillna=True)\n asxm2 = fts_ema(asx.close,30,fillna=True)\n\n buff1 = max(nback)+1\n buff2 = 2\n\n for n in range(buff1,len(asx.close)-buff2):\n vi=[]\n vo=[]\n\n vdate.append(asx.date[n])\n\n # work out the index for the DJ prior to asx date\n for i in range(0,len(dj.close)):\n if asx.date[n] > dj.date[i] and asx.date[n] <= dj.date[i+1]:\n index = i\n\n for k in range(0,nback[0]): vi.append(dj.close[index-k] - dj.close[index - 1-k])\n for k in range(0,nback[1]): vi.append(asx.close[n - 1 - k] - asx.close[n - 2 - k])\n for k in range(0,nback[2]): vi.append(dj.close[index - k])\n for k in range(0,nback[3]): vi.append(asx.close[n-1-k])\n for k in range(0,nback[4]): vi.append(djm1[index-k]-djm2[index-k]) # difference in the two moving averages\n for k in range(0,nback[5]): vi.append(asxm1[n-1 - k] - asxm2[n-1 - k]) # difference in the two moving averages\n\n vali.append(vi)\n\n vo.append((asx.close[n]-asx.close[n-1])/asx.close[n-1])\n valo.append(vo)\n\n vali=np.array(vali)\n valo=np.array(valo)\n\n return vali, valo, vdate\n\ndef split_normalise(vali,valo,vdate,sdate,buffer):\n\n for i in range(0,len(vdate)):\n if int(vdate[i]) >= sdate and int(vdate[i-1]) < sdate:\n index=i\n\n print ('Training date range ', vdate[0],vdate[index-1-buffer])\n print ('Validation data range ', vdate[index],vdate[-1])\n\n vali_t = vali[:index-buffer,:]\n vali_v = vali[index:,:]\n valo_t = valo[:index-buffer,:]\n valo_v = valo[index:,:]\n\n si1 = np.shape(vali_t)\n si2 = np.shape(valo_t)\n si3 = np.shape(vali_v)\n nvals_t = si1[0]\n nvals_v = si3[0]\n nvars_i = si1[1]\n nvars_o = si2[1]\n\n print ('No training values ', nvals_t)\n print ('No validation values ', nvals_v)\n print ('No input variables ', nvars_i)\n print ('No output variables ', nvars_o)\n\n # normalise input values for training and validation\n for i in range(0,nvars_i):\n vv = vali_t[:, i]\n std = np.std(vv)\n mean = np.mean(vv)\n for n in range(0,nvals_t):\n vali_t[n, i] = (vali_t[n, i] - mean)/std\n for n in range(0,nvals_v):\n vali_v[n, i] = (vali_v[n, i] - mean)/std\n\n # normalise output values for training and validation\n for i in range(0,nvars_o):\n vv = valo_t[:, i]\n std = np.std(vv)\n mean = np.mean(vv)\n for n in range(0,nvals_t):\n valo_t[n, i] = (valo_t[n, i] - mean)/std\n for n in range(0,nvals_v):\n valo_v[n, i] = (valo_v[n, i] - mean)/std\n\n return vali_t, vali_v, valo_t, valo_v\n\n\ndef nn_create_data(name, datatype, batches, params, date_end, daysback, date_split, buffer):\n\n nn = 0\n for batch in batches:\n print ('Reading batch ', batch)\n nn += 1\n func = globals()[\"nn_data_\" +datatype]\n vali, valo, vdate = func(batch, date_end, daysback, params) #5000\n vali_t2, vali_v2, valo_t2, valo_v2 = split_normalise(vali,valo,vdate,date_split,buffer)\n if nn == 1:\n vali_t = np.copy(vali_t2)\n vali_v = np.copy(vali_v2)\n valo_t = np.copy(valo_t2)\n valo_v = np.copy(valo_v2)\n else:\n vali_t = np.append(vali_t,vali_t2, axis = 0)\n vali_v = np.append(vali_v,vali_v2, axis = 0)\n valo_t = np.append(valo_t,valo_t2, axis = 0)\n valo_v = np.append(valo_v,valo_v2, axis = 0)\n\n print ('vali_t',np.shape(vali_t))\n print ('valo_t',np.shape(valo_t))\n print ('vali_v',np.shape(vali_v))\n print ('valo_v',np.shape(valo_v))\n\n\n dir='/Users/oalves/python/nn/exps/'+name\n if not os.path.exists(dir):\n os.makedirs(dir)\n np.save(os.path.join(dir,'vali_t.npy'),vali_t)\n np.save(os.path.join(dir,'valo_t.npy'),valo_t)\n np.save(os.path.join(dir,'vali_v.npy'),vali_v)\n np.save(os.path.join(dir,'valo_v.npy'),valo_v)\n\n\n","sub_path":"nn_data.py","file_name":"nn_data.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"125333080","text":"import postgresql #импорт библиотекки постгрис\nfrom datetime import datetime\n\ndb = postgresql.open('pq://west223:westwest223@dbpostgres:5432/telegram_db')\n\n#Вставить запись в таблицу user_messages\ndef insertUserMessage(message, answer):\n insertDBUserMessage = db.prepare(\n \"INSERT INTO user_messages (request_message, response_message, user_id, user_name, created_at)\"\n \" VALUES ($1, $2, $3, $4, $5)\")\n\n\n userName = str(message.chat.first_name) + \" \" + str(message.chat.last_name)\n\n insertDBUserMessage(message.text, str(answer), message.from_user.id, userName, datetime.now()) # вставляем в бд\n\n\n\n","sub_path":"docker-work/telegrambot/dbPostgres.py","file_name":"dbPostgres.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"90814003","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom random import *\n\ncounts = 100000 # number of Monte Carlo simulations\nalc = [] # empty list to be filled for quality adjusted life years gained\n\n# =================================================================================\n# CALCULATIONS FOR QALYS GAINED DUE TO INTERVENTION IN ALCOHOL CONSUMPTION\n# =================================================================================\nfor i in range(0, counts):\n # calculating QALYs lost to chronic and acute conditions due to alcohol in birth cohort of 4,000,000\n alc_chr = np.random.randint(650000, 700000) # alc_chr: Alcohol-attributable life years lost to chronic conditions\n alc_chr = uniform(0.80 * alc_chr, 1.20 * alc_chr) # Range: =/- 20%\n alc_acu = np.random.randint(1300000, 1800000) # alc_acu: Alcohol-attributable life years lost to acute conditions\n alc_acu = uniform(0.80 * alc_acu, 1.20 * alc_acu) # Range: =/- 20%\n alc_m_chr = np.random.randint(350000,\n 400000) # alc_m_chr: Alcohol-attributable morbidity-related QALYs lost from chronic conditions\n alc_m_chr = uniform(0.60 * alc_m_chr, 1.40 * alc_m_chr) # Range: =/- 40%\n alc_m_acu = np.random.randint(100000,\n 120000) # alc_m_acu: Alcohol-attributable morbidity-related QALYs lost from acute conditions\n alc_m_acu = uniform(0.60 * alc_m_acu, 1.40 * alc_m_acu) # Range: =/- 40%\n alc_t_lost = alc_chr + alc_acu + alc_m_chr + alc_m_acu # Total alcohol-attributable QALYs lost\n\n # calculating effects of screening\n alc_scr = uniform(0.02, 0.7) # alc_scr: Delivery of screening and counseling\n alc_adh = uniform(0.2, 0.95) # alc_adh: Adherence with screening\n alc_scr_eff = uniform(0.20, 0.85) # alc_scr_eff: Effectiveness of counseling at changing behaviour\n alc_eff_chr = uniform(0.65, 1) # alc_eff_chr: Efficacy of behaviour change at reducing chronic conditions\n alc_eff_acu = uniform(0.20, 0.80) # alc_eff_acu: Efficacy of behaviour change at reducing acute conditions\n alc_t_eff = 1 / alc_t_lost * (alc_eff_acu * (alc_acu + alc_m_acu) + alc_eff_chr * (alc_chr + alc_m_chr))\n # alc_t_eff: Weighted efficacy of behaviour change at reducing total alcohol-attributable QALYs lost\n # calculating quality adjusted life years lost and gained\n alc_y_lost = alc_t_lost / (1 - alc_scr * alc_scr_eff * alc_t_eff) # Predicted alcohol-attributable QALYs lost\n alc_y_gain = alc_y_lost * alc_adh * alc_scr_eff * alc_t_eff # QALYS gained, CPB\n alc.append(alc_y_gain)\n\nalc_sorted = sorted(alc)\n\n\ndef cohort_alc(plt):\n \"\"\"\n This function calculates the mean and standard deviation of quality adjusted life years to the cohort of 4,000,000\n population. It also plots histogram showing QALYs within the cohort over entire Monte Carlo Simulation bracket.\n lowerlimit:lower fixed quantile limit\n upperlimit: upper fixed quantile limit\n bins: fixed number of bins\n return: plt\n\n \"\"\"\n\n lowerlimit = alc_sorted[int(0.025 * counts)]\n upperlimit = alc_sorted[int(0.975 * counts)]\n print(\"\\nQALYS GAINED IN COHORT:\\n\")\n print(\"Mean for QALYs gained (CPB): \" + str(int(np.mean(alc_sorted))))\n print(\"Standard deviation for QALYs gained (CPB): \" + str(int(np.std(alc_sorted))))\n print(\"Confidence interval: [\" + str(int(lowerlimit)) + \", \" + str(int(upperlimit)) + \"]\")\n print(\"Number of simulations: \" + str(len(alc_sorted)))\n\n # Build Histogram and Graph\n bins = np.arange(0, 1000000, 5000) # fixed bin size\n plt.xlim([0, 600000])\n plt.xticks(range(0, 600000, 100000))\n plt.hist(alc_sorted, bins=bins, alpha=0.5)\n\n # Cohort calculations Graph\n plt.title('Histogram results')\n plt.xlabel('Cohort: Calculated QALYs for alcohol screening and intervention')\n plt.ylabel('Frequency (arb. units)')\n plt == plt.show()\n return plt\n\n\nNI_alc = 206000000 # number of person years above 15 years old for a cohort of 4,000,000\nalc_final = [i / NI_alc for i in alc_sorted]\n\n\ndef inter_alc(plt):\n \"\"\"\n This function calculates the quality of life years gained per individual intervention taking into account\n all interventions and constructing confidence intervals, calculating the mean and standard deviation of years gained\n and plots histogram showing QALYs/intervention over entire Monte Carlo Simulation bracket.\n lowerlimit:lower fixed quantile limit\n upperlimit: upper fixed quantile limit\n bins: fixed number of bins\n return: plt\n\n \"\"\"\n print(\"\\n\\nQALYS GAINED PER INDIVIDUAL INTERVENTION:\\n\")\n print(\"Number of interventions in the cohort: \", NI_alc)\n\n lowerlimit = alc_final[int(0.025 * counts)]\n upperlimit = alc_final[int(0.975 * counts)]\n\n print(\"Mean for QALYs gained (CPB): \" + str((np.mean(alc_final))))\n print(\"Standard deviation for QALYs gained (CPB): \" + str((np.std(alc_final))))\n print(\"Confidence interval: [\" + str((lowerlimit)) + \", \" + str((upperlimit)) + \"]\")\n print(\"Number of simulations: \" + str(len(alc_final)))\n\n bins = np.arange(0, 0.0035, 0.000005) # fixed bin size\n plt.xlim([0, 0.0035])\n plt.hist(alc_final, bins=bins, alpha=0.5)\n plt.title('Histogram results')\n plt.xlabel('Calculated QALYs/intervention')\n plt.ylabel('Frequency (arb. units)')\n plt == plt.show()\n return plt\n\n\nalc_intervention_mean = np.mean(alc_final)\nalc_intervention_stdev = np.std(alc_final)\n\n\n# ===========================================\n# IMPACT OF NGO INTERVENTION ON ALCOHOL\n# ===========================================\ndef impact_alc():\n \"\"\"\n This function calculates the mean quality adjusted life years for a period of three years after taking fluctuations\n of patient traffic into account and calculates the monetory value saved.\n Term1: previous year with patient fluctuation of 0.2\n Term2: current year patient volume\n Term3: next year patient volume with fluctuation of 0.2\n return: none\n >>>patient_volume=300\n >>>Fluctuation = 0.2\n >>>QALY_dollars = 132200\n Alcohol Mean QALYS/year: 0.41458903907\n Alcohol_Stdev_QALYS/year: 0.156845352146\n Monetary value (Mean): 41458.903907\n Monetary value (Stdev): 15684.5352146\n\n >>>patient_volume= 300\n >>>Fluctuation = 0\n >>>QALY_dollars = 100\n Alcohol Mean QALYS/year: 0.413815868588\n Alcohol_Stdev_QALYS/year: 0.156432947411\n Monetary value (Mean): 41.3815868588\n Monetary value (Stdev): 15.6432947411\n\n\n \"\"\"\n alc_patient_volume = np.random.randint(300,500)\n\n Fluctuation = np.random.random() # year on year patient traffic fluctuation\n QALY_dollars = np.random.randint(125000, 150000) # QALYs converted to dollars\n\n QALYs_year_mean = alc_intervention_mean * alc_patient_volume\n # Var(AB)=Var(A)Var(B)+Var(A)Mean2(B)+Mean2(A)Var(B)\n Term1 = (alc_intervention_stdev * alc_patient_volume * Fluctuation) ** 2\n Term2 = (alc_intervention_stdev * alc_patient_volume) ** 2\n Term3 = (alc_intervention_mean * alc_patient_volume * Fluctuation) ** 2\n\n QALYs_year_stdev = (Term1 + Term2 + Term3) ** 0.5\n print(\"Impact of NGO:\\n\")\n print(\"Patients:\", alc_patient_volume)\n print(\"Year on fluctuation:\", Fluctuation)\n print(\"Monetary value of one QALY: \", QALY_dollars)\n print(\"Alcohol Mean QALYS/year: \", QALYs_year_mean)\n print(\"Alcohol_Stdev_QALYS/year: \", QALYs_year_stdev)\n print(\"Monetary value (Mean): \", QALYs_year_mean * QALY_dollars)\n print(\"Monetary value (Stdev): \", QALYs_year_stdev * QALY_dollars)\n print(\"At Adherence with screening of {0:.2f}\".format(alc_adh * 100),\n \"% and a behavior changefactor of {0:.2f}\".format(alc_t_eff * 100), \"%\")\n print(\"average quality savings for individual per year {0:.2f}\".format(QALYs_year_mean * QALY_dollars / alc_patient_volume),\n \"Dollars for\", counts, \"simulations\")\n\n\nif __name__ == '__main__':\n \"\"\"\n Call all functions.\n \"\"\"\n\n print(\"\\nCALCULATIONS FOR ALCOHOL INTERVENTION:\\n\")\n cohort_alc(plt)\n inter_alc(plt)\n impact_alc()\n\n\n\n","sub_path":"Alcohol.py","file_name":"Alcohol.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"84943838","text":"# coding=utf-8\nimport json\n\ndata1 = {\"name\":\"张三\", \"age\":\"17\"}\ndata2 = {\"name\":\"李四\", \"age\":\"18\"}\nprint(json.dumps(data2, indent=2, sort_keys=True, ensure_ascii=False))\n# with open(\"1.txt\",'w+',encoding='utf-8') as f:\n# json.dump(data1, f) # 写为一行\n # json.dump(',', f)\n #json.dump(data1, f,indent=2,sort_keys=True, ensure_ascii=False) # 写为多行\n# with open(\"1.txt\",'r', encoding='utf-8') as f:\n# data = json.load(f)\n# print(data)\n\"\"\"\nwith open(\"1.txt\",'w', encoding='utf-8') as f:\n json.dump(data1, f,ensure_ascii=False)\n json.dump('|', f,ensure_ascii=False)\n json.dump(data2, f,ensure_ascii=False)\n\nwith open(\"1.txt\",'r', encoding='utf-8') as f:\n datas = f.readlines()\n datas_strings = str(datas).replace(\"['\",\"\").replace(\"']\",\"\").replace('}\"|\"{',\"}|{\")\n for data in datas_strings.split('|'):\n print(data)\n json_data = json.loads(data)\n print(type(json_data), json_data)\n\n\"\"\"\nwith open(\"1.txt\",'a', encoding='utf-8') as f:\n f.write(str(data1)+\"\\n\"+str(data2)+\"\\n\")\n\nwith open(\"1.txt\",'r', encoding='utf-8') as f:\n datas = f.readlines()\n print(datas)\n for data in datas:\n data = data.replace(\"\\n\",\"\")\n print(type(data),data)\n json_data = eval(data)\n print(type(json_data), json_data)\n # data = data.replace(\"\\n\",\"\").replace(\"'\",\"\\\"\")\n # print(type(data),data)\n # json_data = json.loads(data)\n # print(type(json_data), json_data)\n\n","sub_path":"json_demo.py","file_name":"json_demo.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"355326355","text":"#!/usr/bin/env python\n\nimport sys, os.path\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'modules'))\nimport dock_cat\nimport cmd_stat\nimport main_fcn\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\ncmd_app = '/Applications/Ettercap-graphical'\ncmd_src = 'sudo -E ettercap -G'\nicns_path = '%s/icons/ettercap/ettercap.icns' % script_path\n\nmsg = \"Setting up Ettercap:\"\n\n\ndef setup():\n exist_bin = cmd_stat.exitcode('which -s ettercap')\n if exist_bin != 0:\n e = 'ettercap --with-ghostscript --with-gtk+ --with-ipv6 --with-luajit'\n child = cmd_stat.exitcode('brew install %s' % e)\n return child\n else:\n return exist_bin\n\n\ndef app_dock():\n dock_cat.cmd_cat(cmd_app, '', '', cmd_src)\n child = dock_cat.dock('others', icns_path, cmd_app)\n return child\n\n#### MAIN ####\ndef main():\n main_fcn.main_(msg, setup, app_dock)\n","sub_path":"kallisti/pentools/ettercap.py","file_name":"ettercap.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"571974814","text":"import unittest\nfrom ddt import ddt, data, unpack\nfrom selenium import webdriver\n\n@ddt\nclass SarchDDT(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n\n #navigate to app home page\n self.driver.get(\"http://demo.icebergcommerce.com/\")\n\n #test data\n @data((\"phones\", 3),(\"music\", 7))\n @unpack\n\n def test_search(self, search_value, expected_count):\n self.search_field = self.driver.find_element_by_name('q')\n self.search_field.clear()\n self.search_field.send_keys(search_value)\n self.search_field.submit()\n products = self.driver.find_elements_by_xpath(\"//h2[@class='product-name']/a\")\n self.assertEqual(expected_count, len(products))\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"magento-testing/searchDDT.py","file_name":"searchDDT.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"409526011","text":"#!/usr/bin/env python3\n\nimport pygame.font\nfrom pygame.sprite import Group\nfrom ship import Ship\n\nclass ScoreBoard() : \n ''' a class display number of scores in the game '''\n def __init__(self, settings, screen, stats) : \n self.screen = screen\n self.settings = settings\n self.stats = stats\n self.text_color = (30,30,30)\n # create a font object which can be rendered to image later\n self.font = pygame.font.SysFont(None, 48)\n self.prepare_images()\n\n\n def prepare_images(self) : \n # convert score to image\n self.convert_score()\n # convert high score\n self.convert_high_score()\n # convert level to image\n self.convert_level()\n # convert ship to image\n self.convert_ships()\n\n def convert_ships(self) : \n self.ships = Group()\n for ship_index in range(self.stats.ships_left) : \n ship = Ship(self.screen, self.settings)\n ship.rect.x = 10 + ship_index * ship.rect.width\n ship.rect.y = 10\n self.ships.add(ship)\n\n def convert_score(self) : \n '''convert score to image, this function is called when score is changed '''\n # when you assign -1, you are rounded to neareset 10\n rounded_score = round(self.stats.score, -1)\n score_str = '{:,}'.format(rounded_score)\n self.score_image = self.font.render(score_str, True, self.text_color, self.settings.bg_color)\n self.score_image_rect = self.score_image.get_rect()\n # leave 20 px margin on the right side and top\n self.score_image_rect.right = self.screen.get_rect().right - 20\n self.score_image_rect.top = 20\n\n def convert_high_score(self) : \n '''convert high_score to high_image, this function is called when high_score is changed '''\n # when you assign -1, you are rounded to neareset 10\n rounded_high_score = round(self.stats.high_score, -1)\n high_score_str = '{:,}'.format(rounded_high_score)\n self.high_image = self.font.render(high_score_str, True, self.text_color, self.settings.bg_color)\n self.high_image_rect = self.high_image.get_rect()\n # leave 20 px margin on the right side and top\n self.high_image_rect.centerx = self.screen.get_rect().centerx\n self.high_image_rect.top = 20\n\n def show_score(self) : \n '''draw the score on the screen'''\n self.screen.blit(self.score_image, self.score_image_rect)\n self.screen.blit(self.high_image, self.high_image_rect)\n self.screen.blit(self.level_image, self.level_image_rect)\n # group object has a draw method, which draw all elments on the surface\n self.ships.draw(self.screen)\n\n def convert_level(self) : \n '''convert score to image, this function is called when score is changed '''\n # when you assign -1, you are rounded to neareset 10\n\n level_str = str(self.stats.level)\n self.level_image = self.font.render(level_str, True, self.text_color, self.settings.bg_color)\n self.level_image_rect = self.level_image.get_rect()\n # put the image below the score with 10 px gap\n self.level_image_rect.right = self.screen.get_rect().right - 20\n self.level_image_rect.top = self.score_image_rect.bottom + 10\n\n\n","sub_path":"practice/14.06_sound/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"141871098","text":"# -*- condig: utf-8 -*-\nimport xmltodict, json, requests, nltk\nfrom bs4 import BeautifulSoup\nimport xml.etree.ElementTree as xee\n\nurls = []\ndata = requests.get('https://www.canon.tj/sitemap.xml').content\ndata_to_json = json.dumps(xmltodict.parse(data))\njson_to_data = json.loads(data_to_json)\n\n\nfor a in json_to_data:\n if '@xmlns' in json_to_data[a]: del json_to_data[a]['@xmlns']\n if 'loc' in json_to_data[a]: print(a['loc'])\n for b in json_to_data[a]:\n data = json_to_data[a][b]\n if 'loc' in json_to_data[a][b]: print(b[a][b]['loc'])\n for c in data:\n if 'loc' in c: print(c['loc'])\n \n \n\n\n# print(data_to_json)\n# get_data = json_to_data['https://somon.tj/sitemap.xml']['url']\n\n# for i in get_data:\n# urls.append(i['loc'])\n\n# for t in urls:\n# res = requests.get(t)\n# html = res.content\n# content = ''.join(BeautifulSoup(html, \"html.parser\").findAll(text=True))\n# title = str(BeautifulSoup(html).title.string)\n# print('=========', title, '==========')\n# print(content)\n\n# print(type(title))\n# print(type(t))\n# print(type(content))\n ","sub_path":"xml_http_reader.py","file_name":"xml_http_reader.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"544299160","text":"#!/usr/bin/env python\n\nimport json\nimport rtyaml\nimport glob\n\ndef jsonKeys2str(x):\n \"\"\"Some of the yamls have integer keys, which json converts to string.\n in the future if there are keys that are strings that are intended to be left\n as strings this may break\"\"\"\n if isinstance(x, dict):\n return {(int(k) if k.isdigit() else k):v for k, v in x.items()}\n return x\n\nyamls = glob.glob(\"*.yaml\")\n\nret = 0\nfor path in yamls:\n yaml_data = rtyaml.load(open(path))\n json_data = json.load(\n\t open(\"alternate_formats/{}\".format(\n\t\t path.replace(\".yaml\", \".json\")), 'r'),\n\t object_hook=jsonKeys2str)\n if yaml_data != json_data:\n ret = 1\n print(\"Error: {} does not match the generated json.\".format(path))\n\nexit(ret)\n","sub_path":"test/test_json_matches.py","file_name":"test_json_matches.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"441809613","text":"@instrumented_task('sentry.tasks.process_resource_change', default_retry_delay=(60 * 5), max_retries=5)\n@retry()\ndef process_resource_change(sender, instance_id, created):\n model = sender.__name__\n model = RESOURCE_RENAMES.get(model, model.lower())\n instance = sender.objects.get(id=instance_id)\n event = ('created' if created else 'updated')\n action = '{}.{}'.format(model, event)\n if (action not in ALLOWED_ACTIONS):\n return\n project = None\n if isinstance(instance, Group):\n project = instance.project\n if (not project):\n return\n servicehooks = ServiceHook.objects.filter(project_id=project.id)\n for servicehook in filter((lambda s: (action in s.events)), servicehooks):\n if (not servicehook.created_by_sentry_app):\n continue\n payload = app_platform_event(action, SentryAppInstallation.objects.get(id=servicehook.actor_id), serialize(instance))\n send_request(servicehook, payload, verify_ssl=True)","sub_path":"Data Set/bug-fixing-3/071c48c0703d3b4eb5218849da94660e722a43dc--fix.py","file_name":"071c48c0703d3b4eb5218849da94660e722a43dc--fix.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"551946386","text":"# -*- coding: utf-8 -*-\n\nimport tflearn\nimport tensorflow as tf\nimport pickle\nimport json\nfrom Helper import bow\n\n# # ------------------ CONVERSATION ---------------------\ndata_conversation = pickle.load(\n open(\"./training_conversation/training_data\", \"rb\"))\nwords_conversation = data_conversation['words']\nclasses_conversation = data_conversation['classes']\ntrain_x_conversation = data_conversation['train_x']\ntrain_y_conversation = data_conversation['train_y']\n\ntf.reset_default_graph()\nnet_conversation = tflearn.input_data(shape=[None, len(train_x_conversation[0])])\nnet_conversation = tflearn.fully_connected(net_conversation, 8)\nnet_conversation = tflearn.fully_connected(net_conversation, 8)\nnet_conversation = tflearn.fully_connected(\n net_conversation, len(train_y_conversation[0]), activation='softmax')\nnet_conversation = tflearn.regression(net_conversation, optimizer='adam',\n loss='categorical_crossentropy')\n\nmodel_conversation = tflearn.DNN(\n net_conversation, tensorboard_dir='./training_conversation/tflearn_logs')\n# ------------------ END CONVERSATION ---------------------\n\nmodel_conversation.load('./training_conversation/model.tflearn')\n\nERROR_THRESHOLD = 0.75\n\ndef classify(sentence):\n results = model_conversation.predict(\n [bow(sentence, words_conversation)])[0]\n results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD]\n results.sort(key=lambda x: x[1], reverse=True)\n return_list = []\n for r in results:\n return_list.append((classes_conversation[r[0]], r[1]))\n return return_list\n","sub_path":"training_conversation/Conversation.py","file_name":"Conversation.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"172220203","text":"# 3\n# a = [[1, 2, 3, 4], ['a', 'b', 'c', 'd', 'e'], 'abcdefgh']\n# print(a[2][::-2])\n\n# 4\n# B\n# dict1 = {[1, 2, 3]: 'a', [2, 3, 4]: 'b'}\n# C\n# dict1 = {1: [1, 2, 3], 2: [1, 2, 3]}\n# print(dict1)\n\n# 5 以下代码有什么问题吗?\n# a = input('请输入数字')\n# print('%s是%s位数' % a, int(a))\n\n# 6\n# A\n# L = [for n in [1, 2, 3, 4, 5, 6, 7, 8, 9]]\n# L = [n for n in [1, 2, 3, 4, 5, 6, 7, 8, 9]]\n# print(L)\n# B\n# if (x > y): print(x)\n# C、\n# x = lambda a: a + 1\n# print(x)\n\n# 11.\n# C\n# a = lambda x: {\n# x + 1, x + 2\n# }\n# print(a(3))\n# B\n# li = [1, 3, 5]\n# print(li.pop())\n# print(li)\n\n# 12\n# dict1 = {1: [1, 2, 5], 2: [1, 2, 3]}\n# print(dict1.items())\n# print(list(dict1.keys()))\n# print(dict1.setdefault(2))\n\n# 13\n# print(range(10)[-1])\n\n# 17、6分\n# 打印一个空心菱形\n# 要求:行列可以手动input控制\n# ps:注意行和列的限制\n# 最小菱形\n# *\n# * *\n# *\n# 打印菱形\ndef printRhombus():\n try:\n # 输入行列\n row = int(input(\"请输入行:\"))\n col = int(input(\"请输入列:\"))\n # 空心菱形必须要满足:行列相等,为基位,大于3\n if (row == col and row % 2 != 0) and (row >= 3 and col >= 3):\n # 循环行列\n for i in range(row):\n # 输出第一个与最后一个\n if i == 0 or i == row - 1:\n print(\" \" * int((row / 2)), end=\"\")\n print(\"*\")\n\n # 输出菱形区域\n else:\n # 计算菱形外左边空格\n outSpace = abs(int((row / 2)) - i)\n print(\" \" * outSpace, end=\"\")\n print(\"*\", end=\"\")\n # 计算菱形内空格:总行数 - (左边空格 * 2) - 2\n innerSpace = row - (2 * outSpace) - 2\n print(\" \" * innerSpace, end=\"\")\n print(\"*\")\n pass\n\n else:\n print(\"空心菱形必须要满足:行列相等,为奇数,大于3\")\n\n except TypeError:\n print(\"请输入数字字符\")\n\n\n# printRhombus()\n\n# 18,随机验证码\n# import random\n#\n# codeSource = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n# while True:\n# # 使用random,选出6位验证码\n# # 使用join,将字符串列表转为字符串,输出\n# genCode = \"\".join(random.sample(codeSource, 6))\n# print(\"验证码:\" + genCode)\n# inputCode = input(\"请输入验证码:\")\n# if genCode.lower() == inputCode.lower():\n# print(\"验证通过\")\n# break\n# else:\n# print(\"验证码不正确,请重新输入\")\n# print()\n\n\n# 19,函数功能是:计算k以内(包括k)最大的10个(不足10个则是所有的)能被13或17整除的自然数之和\n# k = int(input(\"输入k:\"))\n# count = 0\n# result = 0\n# # 逆序去k以内的数\n# for num in range(k, 0, -1):\n# # 满足13或者17的数\n# if num % 13 == 0 or num % 17 == 0:\n# print(num)\n# # 求和\n# result += num\n# # 计算次数,满足十个退出循环\n# count += 1\n# if count >= 10:\n# break\n#\n# print(\"自然数之和:%s\" % result)\n\n# 20、6分\n# 20人围成一圈,第一个人开始从 2 报数,每次报到 5 的人淘汰,然后下一个人接着从 1 开始报,请问最后剩下的两人分别是?\n# PS:注意审题!\npeoples = [n for n in range(1, 21)]\nnum = 2\nindex = 0\nwhile len(peoples) > 2:\n if num == 5:\n print(peoples)\n peoples.pop(index)\n # 当前index被删除后,下一个人会自动填充到当前的index上面,\n # 所以当前被删除的index,就是下一个人,num=1\n num = 1\n\n # 当index增加一,,num也需要加一\n num += 1\n index += 1\n if index >= len(peoples):\n index = 0\n\nelse:\n print(peoples)\n\n# 21、6分\n# 创建一个列表,再给定一个值\n# 在该列表中移除所有和这个值相同的元素\n# 例如:list1 = [1,1,1,2,3,4,5,6,1,7,1]\n# val = 1\n# 结果:list1 被修改为 [2,3,4,5,6,7]\n# ps:在原列表中操作!不使用新的列表空间! 请选择合适的方法实现,注意细节!\n# vList = [\"x\", 10, \"c\", 9, 10, \"c\", \"aa\", \"b\", \"c\", 8, 10, \"10\"]\n# val = 10\n# # 倒序取列表值,避免remove移位\n# for i in range(len(vList) - 1, -1, -1):\n# tempCon = vList[i]\n# if tempCon == val:\n# vList.remove(tempCon)\n# print(vList)\n\n# 22、6分\n# 杨辉三角\n# 给一个非负整数 n,生成杨辉三角的前 n 行\n# ps:在杨辉三角中,每个数是它左上方和右上方的数的和。\nn = 10\nli = [] # 使用列表记录每一行数据,用于下一个索引获取\nfor num in range(0, n):\n # 第一行返回:1\n if num == 0:\n li.append([1])\n\n # 第二行返回:1 1\n elif num == 1:\n li.append([1, 1])\n\n # 其他:获取li中上一个索引内容用于求和计算\n else:\n # 上一个索引值\n lastIndex = num - 1\n # 临时列表用于存储\n tempList = [1]\n # 获取上一行内容,并且从第一个索引取值,相加\n for i in range(1, len(li[lastIndex])):\n # 下个求和内容\n nextValue = li[lastIndex][i - 1] + li[lastIndex][i]\n # 添加到列表中\n tempList.append(nextValue)\n\n # 添加最后一个内容\n tempList.append(1)\n # 添加到li中\n li.append(tempList)\n\n# 输出内容\n# for row in li:\n# for col in row:\n# print(\"%s \" % col, end=\"\")\n# print()\n\n# 23\n# try:\n# m = input(\"输入单词模式:\")\n# str1 = input(\"输入过滤内容,单词最好以空格分割:\")\n# # 分割输入内容\n# ls = str1.split(\" \")\n# # 分析模式,假设都是正确的\n# result = True\n# for i in range(1, len(m)):\n# # 判断内容是否跟单词模式是一样的\n# if (m[i] == m[i - 1]) == (ls[i] == ls[i - 1]):\n# pass\n# else:\n# # 不一样,说明不匹配,结束循环\n# result = False\n# break\n#\n# print(\"结果为:%s\" % result)\n# except IndexError:\n# print(\"输入内容有误\")\n\n# 24\n# try:\n# print(\"请输入考试感受,输入exit结束\")\n# cons = []\n# leastCount = 0\n# # 打开文件,判断是否输入了三点\n# with open(\"pro.txt\", \"r+\", encoding=\"utf-8\") as f:\n# cons = f.readlines()\n# print(cons)\n# leastCount = len(cons)\n#\n# # 循环输入内容,除非退出,否则不可以退出\n# while True:\n# inputCon = input()\n# if inputCon == \"exit\":\n# if leastCount < 3:\n# print(\"退出失败,不满足三点,请继续输入\")\n# else:\n# print(\"退出成功!\")\n# break\n#\n# else:\n# # 记录输入内容与记录输入点数\n# cons.append(inputCon + \"\\n\")\n# leastCount += 1\n#\n# # 写入文件内容\n# with open(\"pro.txt\", \"a+\", encoding=\"utf-8\") as f:\n# f.writelines(cons)\n#\n# except Exception:\n# print(\"文件操作异常\")\n\n# 25\n\n# # 人类\n# class People:\n# # 姓名\n# name = \"\"\n# # 性别\n# gender = \"\"\n# # 年龄\n# age = 0\n# # 国籍\n# region = \"\"\n#\n# # 构造方法\n# def __init__(self, name, gender, age, region):\n# self.name = name\n# self.gender = gender\n# self.age = age\n# self.region = region\n#\n# # 吃饭\n# def eat(self):\n# print(\"%s 在吃饭\" % self.name)\n#\n# # 睡觉\n# def sleep(self):\n# print(\"%s 在睡觉\" % self.name)\n#\n# # 工作\n# def work(self):\n# print(\"%s 在工作\" % self.name)\n#\n#\n# # 学生类\n# class Student(People):\n# # 学号\n# no = \"\"\n# # 学校\n# school = \"\"\n#\n# # 构造方法\n# def __init__(self, name, gender, age, region, no, school):\n# super().__init__(name, gender, age, region)\n# self.no = no\n# self.school = school\n#\n# # 重写工作\n# def work(self):\n# print(\"学生的工作是学习\")\n#\n#\n# # 干部类\n# class StuLeader(Student):\n# # 职务\n# duty = \"\"\n#\n# # 构造方法\n# def __init__(self, name, gender, age, region, no, school, duty):\n# super().__init__(name, gender, age, region, no, school)\n# self.duty = duty\n#\n# # 开会\n# def meeting(self):\n# print(\"学生干部的职责是%s,他们正在开会!\" % self.duty)\n#\n#\n# stuLeader = StuLeader(\"张三\", \"男\", 18, \"中国\", \"1001\", \"师范\", \"社团招聘\")\n# stuLeader.work()\n# stuLeader.meeting()\n","sub_path":"2019/AI/Python/PythonExercise/exam_14/TeamExamA_14.py","file_name":"TeamExamA_14.py","file_ext":"py","file_size_in_byte":8693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"440911920","text":"import sympy\nfrom sympy.geometry import Point,Segment\nimport copy\nimport random\nimport cairo\nimport operator\nfrom gi.repository import Gtk\nimport math\nfrom pygame.time import Clock\n\n\nclass Drone:\n\tdef __init__(self,x, y):\n\t\tself.pos = Point([x,y],evaluate=False)\n\t\tself.velocity = 5\n\t\tself.target = None\n\t\tself.wait = False\n\t\tself.reached = False\n\t\tself.clock = None\n\t\tself.v_x = 0\n\t\tself.v_y = 0\n\n\tdef move(self):\n\t\tself.v_x = int(self.velocity*math.sin(math.atan(Segment(self.pos,self.target).slope)))\n\t\tself.v_y = int(self.velocity*math.cos(math.atan(Segment(self.pos,self.target).slope)))\n\t\tself.clock = Clock()\n\t\t\n\tdef change_velocity(self,v):\n\t\tself.velocity = v\n\n\tdef update_pos(self,clock):\n\t\tif not self.wait:\n\t\t\tif abs(self.pos[0] - self.target[0]) > self.velocity:\n\t\t\t\tself.pos = self.pos.translate(int(self.v_x*(clock.get_time()-self.clock.get_time())/1000), 0)\n\n\t\t\tif abs(self.pos[1] - self.target[1]) > self.velocity:\n\t\t\t\tself.pos = self.pos.translate(0,int(self.v_y*(clock.get_time()-self.clock.get_time())/1000))\n\n\nclass Destination:\n\tdef __init__(self,x,y):\n\t\tself.pos = Point(x, y,evaluate=False)\n\t\tself.assigned = False\n\n\n\ndef check_collision(drone1, drone2):\n\tthreshold = 5\n\tp = Segment(drone1.pos,drone1.target).intersection(Segment(drone2.pos,drone2.target))\n\tif p:\n\t\tif p[0].distance(drone1.pos)<=threshold or p[0].distance(drone2.pos)<=threshold:\n\t\t\treturn True\n\treturn False\n\ndef main():\n\tcount = 10\n\tmax_x = 100\n\tmax_y = 100\n\tdrones = [Drone(random.randint(0, max_x), random.randint(0, max_y)) for i in range(0, count)]\n\tdestinations = [Destination(random.randint(0, max_x), random.randint(0, max_y)) for i in range(0, count)]\n\n#Assign the closest target to each drone\n\tfor drone in drones:\n\t\tmax = 2000\n\t\tmax_p = None\n\t\tfor dest in destinations:\n\t\t\tdistance = drone.pos.distance(dest.pos)\n\t\t\tif distance < max:\n\t\t\t\tmax = distance\n\t\t\t\tmax_p = copy.deepcopy(dest)\n\t\tdrone.target = max_p.pos\n\t\tmax_p.assigned = True\n\n\tfor drone in drones:\n\t\tprint(\"Drone with coordinate {} is assigned to {}\".format(drone.pos,drone.target))\n\n\tc = Clock()\n\tfor drone in drones:\n\t\tdrone.move()\n\twhile True:\n\t\tfor drone in drones:\n\t\t\tdrone.wait = False\n\t\t\tprint(drone.pos)\n\n\t\tcount = 0\n\t\tfor drone in drones:\n\t\t\tif drone.reached:\n\t\t\t\tcount+=1\n\t\tprint(\"{} drones have reached the target\".format(count))\n\n\t\tfor i,d in enumerate(drones):\n\t\t\tfor drone2 in drones[i+1:]:\n\t\t\t\tif check_collision(d, drone2):\n\t\t\t\t\td.wait = True\n\t\tc.tick()\n\t\tfor drone in drones:\n\t\t\tdrone.update_pos(c)\n\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"199156095","text":"from enum import Enum\nimport entorno as env\nimport storageManager.jsonMode as DBMS\nimport typeChecker.typeReference as TRef\nimport typeChecker.typeEnum as TEnum\nimport sqlErrors\nfrom reporteErrores.errorReport import ErrorReport\nfrom useDB.instanciaDB import DB_ACTUAL\n\nclass IS(Enum):\n TRUE = 1\n FALSE = 2\n NULL = 3\n DISTINCT = 4\n UNKNOWN = 5\n \nclass ALTER_TABLE_DROP(Enum):\n COLUMN = 1\n CONSTRAINT = 2\n\nclass ALTER_TABLE_ADD(Enum):\n COLUMN = 1\n UNIQUE = 2\n FOREIGN_KEY = 3\n MULTI_FOREIGN_KEY = 3\n CHECKS = 4\n\nclass CONSTRAINT_FIELD(Enum):\n UNIQUE = 1\n PRIMARY_KEY = 2\n NULL = 3\n\nclass TYPE_COLUMN(Enum):\n SMALLINT = 'SMALLINT'\n BIGINT = 'BIGINT'\n INTEGER = 'INTEGER'\n DECIMAL = 'DECIMAL'\n NUMERIC = 'NUMERIC'\n REAL = 'REAL'\n DOUBLE_PRECISION = 'DOUBLE_PRECISION'\n MONEY = 'MONEY'\n CHAR = 'CHAR'\n VARCHAR = 'VARCHAR'\n TEXT = 'TEXT'\n BOOLEAN = 'BOOLEAN'\n # No implementadas\n TIME = 'TIME'\n TIMESTAMP = 'TIMESTAMP'\n DATE = 'DATE'\n INTERVAL = 'INTERVAL'\n\n# ------------------------ DDL ----------------------------\n# Instruccion (Abstracta)\nclass Instruccion:\n def ejecutar(self,ts):\n pass\n\n def dibujar(self):\n return 'Sin implementar'\n\nclass CreateType(Instruccion):\n def __init__(self, nombre, lista):\n self.nombre = nombre\n self.lista = lista\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"CREATE \" + self.nombre + \" TYPE as ENUM\\\" ];\"\n for item in self.lista:\n nodo += \"\\n\" + identificador + \"-> \" + str(hash(item))\n nodo += '\\n' + str(hash(item)) + \"[ label = \\\" \" + item + \" \\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n lista = list()\n for item in self.lista:\n if not item in lista:\n lista.append(item)\n if not TEnum.insertEnum(self.nombre, lista):\n return ErrorReport('Semantico', 'Invalid Enum Declaration', 0)\n return 'Enum \\'' + self.nombre + '\\' succesful created'\n\n# Create Database\nclass CreateDatabase(Instruccion):\n def __init__(self, nombre, reemplazo = False, existencia = False, duenio = None, modo = 0):\n self.nombre = nombre\n self.reemplazo = reemplazo\n self.existencia = existencia\n self.duenio = duenio\n self.modo = modo\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador\n\n if self.reemplazo:\n nodo += \"[ label = \\\" CREATE OR REPLACE DATABASE \" + self.nombre + \" \\\" ];\"\n else:\n nodo += \"[ label = \\\"CREATE DATABASE \" + self.nombre + \"\\\" ];\"\n\n if self.existencia:\n nodo += \"\\nEXISTS\" + identificador + \"[ label = \\\"IF EXISTS\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> EXISTS\" + identificador + \";\"\n\n if self.duenio:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.duenio)) + \";\"\n nodo += \"\\n\" + str(hash(self.duenio)) + \"[ label = \\\"OWNER: \" + self.duenio + \"\\\" ];\"\n\n if self.modo > 0:\n nodo += \"\\n\" + str(hash(self.modo)) + \"[ label = \\\"MODE: \" + str(self.modo) + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.modo)) + \";\"\n\n return nodo\n\n def ejecutar(self,ts):\n if TRef.databaseExist(self.nombre):\n if not self.existencia:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_database), 0)\n\n exito = 0\n databases = DBMS.showDatabases()\n\n if self.reemplazo:\n if self.nombre in databases: #Eliminamos si existe \n DBMS.dropDatabase(self.nombre)\n TRef.dropDatabase(self.nombre)\n exito = DBMS.createDatabase(self.nombre)\n elif self.existencia:\n if not self.nombre in databases:\n exito = DBMS.createDatabase(self.nombre)\n else:\n exito = DBMS.createDatabase(self.nombre)\n\n #Si tenemos exito se crea en el type reference\n if exito == 1:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_schema_definition), 0)\n elif exito == 2:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_database), 0)\n \n TRef.createDatabase(self.nombre, self.modo)\n return \"Database '\" + self.nombre + \"' succesful created\"\n\n# Create Table\nclass CreateTable(Instruccion):\n def __init__(self, nombre, columnas, herencia = None):\n self.nombre = nombre\n self.columnas = columnas\n self.herencia = herencia\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"CREATE TABLE \" + self.nombre + \"\\\" ];\"\n nodo += \"\\n//COLUMNAS DE LA TABLA\" + identificador + \"\\n\"\n\n for col in self.columnas:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(col)) + \";\"\n nodo += col.dibujar()\n\n if self.herencia:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.herencia)) + \";\"\n nodo += \"\\n\" + str(hash(self.herencia)) + \"[ label = \\\"\" + self.herencia + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n if DB_ACTUAL.getName() == None:\n return ErrorReport('Semantico', 'Not defined database to used', 0)\n elif not TRef.databaseExist(DB_ACTUAL.getName()):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_invalid_schema_name.invalid_schema_name), 0)\n elif TRef.tableExist(DB_ACTUAL.getName(), self.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_table), 0)\n\n # Aux de comprobacion y almacenamiento\n columns = dict()\n auxFK = list()\n auxPK = list()\n auxUnique = list()\n auxCheck = list()\n\n #Aux check field\n auxCheckF = list()\n\n # Proceso de las distintas columnas recibidas en la consulta\n for col in self.columnas:\n if isinstance(col, CreateField): #Columna nueva\n #Obtenemos cada columna y corroboramos que tengan nombres distintos\n if col.nombre in columns:\n return 1\n else: \n colSint = col.ejecutar(ts)\n if isinstance(colSint, ErrorReport):\n return colSint\n\n if isinstance(colSint, tuple):\n columns[col.nombre] = colSint[0]\n auxCheckF.append(colSint[1])\n else:\n columns[col.nombre] = colSint\n elif isinstance(col, ConstraintMultipleFields): #Multiples Constraints\n if col.tipo == CONSTRAINT_FIELD.UNIQUE:\n auxUnique.extend(col.ejecutar(ts))\n else:\n auxPK.extend(col.ejecutar(ts))\n elif isinstance(col, ForeignKeyMultipleFields): #Multiples Llaves Foraneas\n colSint = col.ejecutar(ts)\n if isinstance(colSint, ErrorReport):\n return colSint\n auxFK.extend(colSint)\n elif isinstance(col, CheckMultipleFields): #Multiple chequeos\n auxCheck.extend(col.ejecutar(ts))\n else:\n return col\n\n if len(auxCheckF) != 0:\n tsLocal = env.Entorno()\n tsLocal = env.toEnviroment(columns, tsLocal)\n \n for chequeoS in auxCheckF:\n if chequeoS[0].evaluacionCheck(tsLocal) != 0:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n columns[chequeoS[2]]['Check'] = chequeoS[0].getExpresionToString()\n columns[chequeoS[2]]['CheckConst'] = chequeoS[1]\n \n #Modificamos los valores dependiendo de las columnas multiples\n # Primary Key\n for pk in auxPK:\n # Se verifica que cada constraint haga referencia a un campo, de lo contrario será invalido\n if not pk in columns:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n columns[pk]['PK'] = True\n\n # Foreign Key\n for fk in auxFK:\n # Se verifica que cada constraint haga referencia a un campo, de lo contrario será invalido\n if not fk[0] in columns:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n columns[fk[0]]['FK'] = True\n columns[fk[0]]['References'] = {'Table':fk[1],'Field':fk[0]}\n\n #Check\n if len(auxCheck) != 0:\n tsLocal = env.Entorno()\n tsLocal = env.toEnviroment(columns, tsLocal)\n\n for chequeo in auxCheck:\n # Se verifica que cada constraint haga referencia a un campo, de lo contrario será invalido\n if not chequeo[0] in columns:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n if chequeo[1].evaluacionCheck(tsLocal) != 0:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n columns[chequeo[0]]['Check'] = chequeo[1].getExpresionToString()\n # Unique\n for unico in auxUnique:\n # Se verifica que cada constraint haga referencia a un campo, de lo contrario será invalido\n if not unico in columns:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n columns[unico]['Unique'] = True\n \n #--------- Herencia\n if self.herencia:\n if not TRef.tableExist(DB_ACTUAL.getName(), self.herencia):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_table_not_found), 0)\n else:\n colsPadre = TRef.getColumns(DB_ACTUAL.getName(), self.herencia)\n for col in colsPadre:\n # Verificamos que no existan columnas repetidas con el padre, ya que no existe el polimorfismo de campos\n if col in columns:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_column), 0)\n # De no existir columnas duplicadas, se agregan las columnas a la tabla\n columns.update(colsPadre)\n\n auxIndexPK = list()\n contador = 0\n for col in columns:\n if columns[col]['PK']:\n auxIndexPK.append(contador)\n contador += 1\n\n # Ahora procedemos a crear\n result = DBMS.createTable(DB_ACTUAL.getName(), self.nombre, len(columns))\n\n if result == 0:\n if len(auxIndexPK) > 0:\n DBMS.alterAddPK(DB_ACTUAL.getName(),self.nombre, auxIndexPK)\n TRef.createTable(DB_ACTUAL.getName(), self.nombre, columns, self.herencia)\n return result\n\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_table), 0)\n \n# Create Field\nclass CreateField(Instruccion):\n def __init__(self, nombre, tipo, atributos = None):\n self.nombre = nombre\n self.tipo = tipo\n self.atributos = atributos\n\n def dibujar(self):\n identificador = str(hash(self))\n\n aux = self.tipo\n if isinstance(self.tipo, tuple):\n aux = self.tipo[0].value\n if isinstance(self.tipo[1], tuple):\n aux += \"(\" + str(self.tipo[1][0]) + \",\" + str(self.tipo[1][1]) + \")\"\n else:\n aux += \"(\" + str(self.tipo[1]) + \")\"\n elif isinstance(self.tipo, str):\n pass\n else:\n aux = self.tipo.value\n nodo = \"\\n\" + identificador + \"[ label = \\\"NEW FIELD \" + self.nombre + \" \" + aux + \"\\\" ];\"\n nodo += \"\\n//ATRIBUTOS DE CREAR UN CAMPO \" + identificador + \"\\n\"\n\n if self.atributos:\n for atributo in self.atributos:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(atributo))\n nodo += atributo.dibujar()\n\n nodo += \"\\n//FIN DE ATRIBUTOS DE CREAR CAMPO \" + identificador + \"\\n\"\n\n return nodo\n\n def ejecutar(self, ts):\n #Guardamos el tipo y largo si es necesario\n tipo = None\n largo = None\n\n if isinstance(self.tipo, tuple):\n tipo = self.tipo[0].value\n if isinstance(self.tipo[1],tuple):\n largo = {'Precision': self.tipo[1][0],'Scale': self.tipo[1][1]}\n else:\n if self.tipo[1] < 1:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.numeric_value_out_of_range), 0)\n largo = self.tipo[1]\n elif isinstance(self.tipo, str):\n #Comprobamos que el type a elegir exista\n if not TEnum.enumExist(self.tipo):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.indeterminate_datatype), 0)\n tipo = self.tipo\n else:\n tipo = self.tipo.value\n \n #Bajo la logica de que puede venir parametros repetidos, tomaremos el ultimo en venir como valido\n atributos = dict(\n {\n \"Type\": tipo,\n \"Lenght\": largo,\n \"Default\": None,\n \"Null\": True,\n \"PK\": False,\n \"PKConst\": None,\n \"FK\": False,\n \"References\": None,\n \"FKConst\": None,\n \"Unique\": False,\n \"UniqueConst\": None,\n \"Check\": None,\n \"CheckConst\": None\n }\n )\n\n checks = None\n if self.atributos:\n for atr in self.atributos:\n if isinstance(atr, ConstraintField):\n if atr.tipo == CONSTRAINT_FIELD.PRIMARY_KEY:\n atributos['PK'] = True\n\n elif atr.tipo == CONSTRAINT_FIELD.UNIQUE:\n atributos['Unique'] = True\n atributos['UniqueConst'] = atr.ejecutar(ts)\n\n elif atr.tipo == CONSTRAINT_FIELD.NULL:\n atributos['Null'] = atr.ejecutar(ts)\n elif isinstance(atr, ForeignKeyField):\n fk = atr.ejecutar(ts)\n if isinstance(fk, ErrorReport):\n return fk\n else:\n colFK = TRef.getColumns(DB_ACTUAL.getName(), fk['Table'])[fk['Field']]\n if colFK['Type'] != tipo:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_data_type), 0)\n atributos['References'] = fk\n atributos['FK'] = True\n elif isinstance(atr, DefaultField):\n try:\n dflt = atr.ejecutar(ts).val\n #Chequeamos el default\n if (tipo == 'SMALLINT'\\\n or tipo == 'BIGINT' \\\n or tipo == 'INTEGER') and not isinstance(dflt, int):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n elif (tipo == 'DECIMAL' \\\n or tipo == 'NUMERIC' \\\n or tipo == 'REAL' \\\n or tipo == 'DOUBLE_PRECISION' \\\n or tipo == 'MONEY') and not (isinstance(dflt, float) or isinstance(dflt, int)):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n elif (tipo == 'CHAR' \\\n or tipo == 'VARCHAR' \\\n or tipo == 'TEXT' \\\n or tipo == 'ENUM') and not isinstance(dflt, str):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n elif tipo == 'BOOLEAN' and not isinstance(dflt, bool):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n elif tipo == 'DATE' and not isinstance(dflt, str):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n #Guardamos el default\n atributos['Default'] = dflt\n except:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.datatype_mismatch),0)\n elif isinstance(atr,CheckField):\n cheq = atr.ejecutar(ts)\n checks = (cheq[1],cheq[0],self.nombre)\n else:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_table_definition), 0)\n\n\n if isinstance(checks, tuple):\n return (atributos, checks)\n\n return atributos\n\n# Default Field\nclass DefaultField(Instruccion):\n def __init__(self, valor):\n self.valor = valor\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"DEFAULT: \" + str(self.valor.getExpresionToString()) + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n return self.valor.ejecutar(None)\n\n# Check Field\nclass CheckField(Instruccion):\n def __init__(self, condiciones, nombre = None):\n self.condiciones = condiciones\n self.nombre = nombre\n \n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"CHECK\\\" ];\"\n\n if self.nombre:\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.nombre + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.condiciones)) + \";\"\n nodo += \"\\n\" + str(hash(self.condiciones)) + \"[ label =\\\"\" + self.condiciones.getExpresionToString() + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n return (self.nombre,self.condiciones)\n\n# Constraint Field\nclass ConstraintField(Instruccion):\n def __init__(self, tipo, valor = None):\n self.tipo = tipo\n self.valor = valor\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador\n\n if self.tipo == CONSTRAINT_FIELD.UNIQUE:\n nodo += \"[ label = \\\"UNIQUE\\\" ];\"\n elif self.tipo == CONSTRAINT_FIELD.NULL:\n nodo += \"[ label = \\\"NULLS\\\" ];\"\n else:\n nodo += \"[ label = \\\"PRIMARY KEY\\\" ];\"\n\n if self.valor:\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + str(self.valor) + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n\n return nodo\n \n def ejecutar(self, ts):\n return self.valor\n\n#ForeignKey Field\nclass ForeignKeyField(Instruccion):\n def __init__(self, tabla, campo):\n self.tabla = tabla\n self.campo = campo\n\n def dibujar(self):\n identificador = str(hash(self))\n nodo = \"\\n\" + identificador + \"[ label = \\\"FOREIGN KEY\\\" ];\"\n\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.tabla)) + \";\"\n nodo += \"\\n\" + str(hash(self.tabla)) + \"[ label = \\\"TABLE: \" + self.tabla + \"\\\" ];\"\n\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.campo)) + \";\"\n nodo += \"\\n\" + str(hash(self.campo)) + \"[ label = \\\"FIELD: \" + self.campo + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n if TRef.columnExist(DB_ACTUAL.getName(), self.tabla, self.campo):\n return {'Table': self.tabla, 'Field': self.campo}\n else:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_column_name_not_found), 0)\n\n# Constraint Multiple Fields: Comprende tanto Unique como Primary Key\nclass ConstraintMultipleFields(Instruccion):\n def __init__(self, tipo, lista):\n self.tipo = tipo\n self.lista = lista\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador\n\n if self.tipo == CONSTRAINT_FIELD.UNIQUE:\n nodo += \"[ label = \\\"UNIQUE MULTIPLE\\\" ];\"\n else:\n nodo += \"[ label = \\\"PRIMARY KEY MULTIPLE\\\" ];\"\n\n for item in self.lista:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(item)) + \";\"\n nodo += \"\\n\" + str(hash(item)) + \"[ label = \\\"\" + item + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n return self.lista\n \n# Foreign Key Multiple Fields\nclass ForeignKeyMultipleFields(Instruccion):\n def __init__(self, listaPropia, otraTabla, listaOtraTabla):\n self.lista = listaPropia\n self.otraTabla = otraTabla\n self.listaOtraTabla = listaOtraTabla\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"FOREIGN KEY MULTIPLE\\\" ];\"\n\n nodo += \"\\nLOCAL\" + identificador + \"[ label = \\\"LOCAL FIELDS\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> LOCAL\" + identificador + \";\"\n\n for item in self.lista:\n nodo += \"\\nLOCAL\" + identificador + \" -> \" + str(hash(item)) + \";\"\n nodo += \"\\n\" + str(hash(item)) + \"[ label = \\\"\" + item +\" \\\" ];\"\n\n nodo += \"\\nFOREIGN\" + identificador + \"[ label = \\\"\" + self.otraTabla + \" FIELDS\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> FOREIGN\" + identificador + \";\"\n\n for item in self.listaOtraTabla:\n nodo += \"\\nFOREIGN\" + identificador + \" -> \" + str(hash(item)) + \";\"\n nodo += \"\\n\" + str(hash(item)) + \"[ label = \\\"\" + item +\" \\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n if not TRef.databaseExist(DB_ACTUAL.getName()):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_schema_not_found), 0)\n elif not TRef.tableExist(DB_ACTUAL.getName(),self.otraTabla):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_table_not_found), 0) \n\n #Comparamos que la misma cantidad de ids propios sea igual a la foranea\n if len(self.lista) != len(self.listaOtraTabla):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.data_exception), 0)\n\n for col in self.listaOtraTabla:\n if not TRef.columnExist(DB_ACTUAL.getName(), self.otraTabla, col):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_column_number), 0)\n\n listaSin = list()\n for i in range(len(self.lista)):\n listaSin.append( (self.lista[i], self.otraTabla, self.listaOtraTabla[i]) )\n\n return listaSin\n\n# Check Multiple Fields\nclass CheckMultipleFields(Instruccion):\n def __init__(self, campo, condiciones):\n self.campo = campo\n self.condiciones = condiciones\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"CHECK \" + self.campo + \"\\\" ];\"\n\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.condiciones)) + \";\"\n nodo += \"\\n\" + str(hash(self.condiciones)) + \"[ label = \\\"\" + self.condiciones.getExpresionToString() + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n return (self.campo, self.condiciones)\n\n# Alter Database\nclass AlterDatabase(Instruccion):\n def __init__(self, nombre, accion):\n self.nombre = nombre\n self.accion = accion\n\n def dibujar(self):\n identificador = str(hash(self))\n \n nodo = \"\\n\" + identificador + \"[ label = \\\"ALTER DATABASE\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.accion)) + \";\"\n \n if self.accion[0] == 'OWNER':\n nodo += \"\\n\" + str(hash(self.accion)) + \"[ label = \\\"OWNER: \" + self.accion + \"\\\" ];\"\n else:\n nodo += \"\\n\" + str(hash(self.accion)) + \"[ label = \\\"NAME: \" + self.accion + \"\\\" ];\"\n\n return nodo\n \n def ejecutar(self, ts):\n if not TRef.databaseExist(self.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_invalid_schema_name.invalid_schema_name), 0)\n\n if self.accion[0] == 'OWNER':\n pass\n else:\n #Comprobamos que no exista una base de datos con ese nombre\n if TRef.databaseExist(self.accion[1]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_database), 0)\n DBMS.alterDatabase(self.nombre, self.accion[1])\n TRef.alterDatabase(self.nombre, self.accion[1])\n return 'Successful alter database ' + self.nombre\n\n# Alter Table\nclass AlterTable(Instruccion):\n def __init__(self, tabla, accion):\n self.tabla = tabla\n self.accion = accion\n\n def dibujar(self):\n identificador = str(hash(self))\n \n nodo = \"\\n\" + identificador + \"[ label = \\\"ALTER TABLE\\\" ];\"\n\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.tabla + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n\n if isinstance(self.accion, list):\n for item in self.accion:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(item)) + \";\"\n nodo += item.dibujar()\n else:\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.accion)) + \";\"\n nodo += self.accion.dibujar()\n\n return nodo\n \n def ejecutar(self, ts):\n if DB_ACTUAL.getName() == None:\n return ErrorReport('Semantico', 'Not defined database to used', 0)\n elif not TRef.databaseExist(DB_ACTUAL.getName()):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_invalid_schema_name.invalid_schema_name), 0)\n elif not TRef.tableExist(DB_ACTUAL.getName(), self.tabla):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.undefined_table), 0)\n\n if isinstance(self.accion, list):\n for subaccion in self.accion:\n sint = subaccion.ejecutar(ts)\n #Si es un error, solo se retorna\n if isinstance(sint, ErrorReport):\n return sint\n\n if not TRef.columnExist(DB_ACTUAL.getName(),self.tabla, subaccion.campo):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.undefined_column), 0)\n \n if subaccion.cantidad:\n sint = subaccion.ejecutar(ts)\n if isinstance(sint, int):\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Type', TYPE_COLUMN.VARCHAR.value)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Lenght', sint)\n elif isinstance(sint, tuple):\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Type', sint[0].value)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Lenght', sint[1])\n else:\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Type', sint)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Lenght', None)\n else:\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, subaccion.campo, 'Null', False)\n elif isinstance(self.accion, AlterTableDrop):\n if self.accion.tipo == ALTER_TABLE_DROP.COLUMN:\n sint = self.accion.ejecutar(ts)\n #Comprobamos la existencia del campo\n if not TRef.columnExist(DB_ACTUAL.getName(),self.tabla,self.accion.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.undefined_column), 0)\n dropField = TRef.alterDropColumn(DB_ACTUAL.getName(), self.tabla, sint)\n if dropField == 1:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.data_exception), 0)\n elif dropField == 4:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n elif dropField == 6:\n return ErrorReport('Semantico', 'Error: A table cannot be empty', 0)\n else:\n if not TRef.constraintExist(DB_ACTUAL.getName(),self.accion.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n colPres = TRef.getConstraint(DB_ACTUAL.getName(),self.tabla, self.accion.nombre)\n if not isinstance(colPres, tuple):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.data_exception), 0)\n\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], colPres[1], None)\n if colPres[1] == 'PKConst':\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], 'PK', False)\n DBMS.alterDropPK(DB_ACTUAL.getName(), self.tabla)\n DBMS.alterAddPK(DB_ACTUAL.getName(), self.tabla, TRef.getIndexPK(DB_ACTUAL.getName(), self.tabla))\n elif colPres[1] == 'PKConst':\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], 'FK', False)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], 'References', None)\n elif colPres[1] == 'UniqueConst':\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], 'Unique', False)\n else:\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, colPres[0], 'Check', None)\n elif isinstance(self.accion, AlterTableAdd):\n colSint = self.accion.ejecutar(ts)\n if isinstance(colSint, ErrorReport):\n return colSint\n\n if self.accion.tipo == ALTER_TABLE_ADD.COLUMN:\n if TRef.columnExist(DB_ACTUAL.getName(), self.tabla, self.accion.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.duplicate_column), 0)\n tipo = None\n largo = None\n\n if isinstance(self.accion.accion, tuple):\n tipo = self.accion.accion[0].value\n if isinstance(self.tipo[1],tuple):\n largo = {'Precision': self.accion.accion[1][0],'Scale': self.accion.accion[1][1]}\n else:\n largo = self.accion.accion[1]\n elif isinstance(self.accion.accion, str):\n tipo = self.accion.accion\n else:\n tipo = self.accion.accion.value\n\n atributos = dict(\n {\n \"Type\": tipo,\n \"Lenght\": largo,\n \"Default\": None,\n \"Null\": True,\n \"PK\": False,\n \"PKConst\": None,\n \"FK\": False,\n \"References\": None,\n \"FKConst\": None,\n \"Unique\": False,\n \"UniqueConst\": None,\n \"Check\": None,\n \"CheckConst\": None\n }\n )\n\n TRef.alterAddColumn(DB_ACTUAL.getName(), self.tabla, self.accion.nombre, atributos)\n DBMS.alterAddColumn(DB_ACTUAL.getName(), self.tabla, None)\n elif self.accion.tipo == ALTER_TABLE_ADD.FOREIGN_KEY:\n \n\n if not TRef.columnExist(DB_ACTUAL.getName(),self.tabla,colSint[0]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference),0)\n elif TRef.getColumns(DB_ACTUAL.getName(),self.tabla)[colSint[0]]['Type'] != TRef.getColumns(DB_ACTUAL.getName(),colSint[1])[colSint[2]]['Type']:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_data_type), 0)\n TRef.alterAddFK(DB_ACTUAL.getName(),self.tabla,colSint[0],{'Table':colSint[1],'Field':colSint[2]})\n TRef.alterField(DB_ACTUAL.getName(),self.tabla,colSint[0],'FKConst',colSint[3])\n elif self.accion.tipo == ALTER_TABLE_ADD.MULTI_FOREIGN_KEY:\n # Procesamos por columna\n for i in range(len(colSint)):\n if not TRef.columnExist(DB_ACTUAL.getName(),self.tabla,colSint[i][0]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference),0)\n elif TRef.getColumns(DB_ACTUAL.getName(),self.tabla)[colSint[i][0]]['Type'] != TRef.getColumns(DB_ACTUAL.getName(),colSint[i][1])[colSint[i][2]]['Type']:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_data_type), 0)\n TRef.alterAddFK(DB_ACTUAL.getName(),self.tabla,colSint[i][0],{'Table':colSint[i][1],'Field':colSint[i][2]})\n elif self.accion.tipo == ALTER_TABLE_ADD.CHECKS:\n auxCols = TRef.getColumns(DB_ACTUAL.getName(),self.tabla)\n if not colSint[0] in auxCols:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n auxCols[colSint[0]]['Check'] = colSint[0] + ' != ' + colSint[1]\n else:\n if not TRef.columnExist(DB_ACTUAL.getName(),self.tabla,self.accion.nombre):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.invalid_column_reference), 0)\n elif TRef.constraintExist(DB_ACTUAL.getName(),self.accion.accion):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n elif TRef.getAttribute(DB_ACTUAL.getName(),self.tabla,self.accion.nombre, 'UniqueConst') != None:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, self.accion.nombre, 'UniqueConst', self.accion.accion)\n TRef.alterField(DB_ACTUAL.getName(), self.tabla, self.accion.nombre, 'Unique', True)\n\n return 'Alter table complete' \n\n# Alter Field: Cambia al tipo varchar o cambia ser nulo\nclass AlterField(Instruccion):\n def __init__(self, campo, cantidad = None):\n self.campo = campo\n self.cantidad = cantidad\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador \n\n if self.cantidad:\n nodo += \"[ label = \\\"ALTER COLUMN \" + self.campo + \" TYPE\\\" ];\"\n\n nodo += \"\\nTYPE\" + identificador + \"[ label = \\\"VARCHAR(\" + str(self.cantidad) + \")\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> TYPE\" + identificador + \";\\n\"\n else:\n nodo += \"[ label = \\\"ALTER COLUMN \" + self.campo + \" SET\\\" ];\"\n\n nodo += \"\\nVALUE\" + identificador + \"[ label = \\\"NOT NULL\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> VALUE\" + identificador + \";\\n\"\n\n return nodo\n \n def ejecutar(self, ts):\n # Verificar si existe la columna\n if self.cantidad:\n if isinstance(self.cantidad, int):\n if self.cantidad < 0:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.numeric_value_out_of_range),0)\n return self.cantidad\n elif isinstance(self.cantidad, tuple):\n return self.cantidad\n elif isinstance(self.cantidad, str):\n return self.cantidad\n else:\n return self.cantidad.value\n \n else:\n return False\n\n# Alter Table Drop: Encapsula tanto constraints como columna\nclass AlterTableDrop(Instruccion):\n def __init__(self, nombre, tipo):\n self.nombre = nombre\n self.tipo = tipo\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador\n\n if self.tipo == ALTER_TABLE_DROP.CONSTRAINT:\n nodo += \"[ label = \\\"DROP CONSTRAINT\\\" ];\"\n else:\n nodo += \"[ label = \\\"DROP COLUMN\\\" ];\"\n\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.nombre + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\\n\"\n\n return nodo \n\n def ejecutar(self, ts):\n return self.nombre\n\n# Alter add \nclass AlterTableAdd(Instruccion):\n def __init__(self, nombre, tipo, accion):\n self.nombre = nombre\n self.tipo = tipo\n self.accion = accion\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador \n\n if self.tipo == ALTER_TABLE_ADD.UNIQUE:\n nodo += \"[ label = \\\"ADD UNIQUE \" + self.nombre + \"\\\" ];\"\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.nombre + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n nodo += \"\\nID\" + identificador + \"[ label = \\\"\" + self.accion + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> ID\" + identificador + \";\\n\"\n elif self.tipo == ALTER_TABLE_ADD.FOREIGN_KEY:\n nodo += \"[ label = \\\"ADD CONSTRAINT \" + self.nombre + \" FOREIGN KEY\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.accion[0])) +\"\\n\"\n nodo += \"\\n\" + str(hash(self.accion[0])) + \"[ label = \\\"\" + self.accion[0] + \".\" + self.accion[1] + \"\\\" ]\"\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(self.accion[2])) +\"\\n\"\n nodo += \"\\n\" + str(hash(self.accion[2])) + \"[ label = \\\"CONSTRAINT: \" + self.accion[2] + \"\\\" ]\"\n elif self.tipo == ALTER_TABLE_ADD.MULTI_FOREIGN_KEY:\n nodo += \"[ label = \\\"ADD FOREIGN KEY\\\" ];\"\n for local in self.nombre:\n nodo += \"\\n\" + str(hash(local)) + \"[ label =\\\"\" + local + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> \" + str(hash(local)) + \";\"\n nodo += \"\\nTABLA\" + identificador + \"[ label = \\\"\" + self.accion[0] + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> TABLA\" + identificador + \";\"\n for foraneo in self.accion[1]:\n nodo += \"\\n\" + str(hash(foraneo)) + \"[ label =\\\"\" + foraneo + \"\\\" ];\"\n nodo += \"\\nTABLA\" + identificador + \" -> \" + str(hash(foraneo)) + \";\"\n elif self.tipo == ALTER_TABLE_ADD.CHECKS:\n nodo += \"[ label = \\\"ADD CHECKS\\\" ]\"\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.nombre + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n nodo += \"\\nACTION\" + identificador + \"[ label = \\\"\" + self.accion + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> ACTION\" + identificador + \";\\n\"\n else:\n aux = self.accion\n if isinstance(self.accion, tuple):\n aux = self.accion[0].value\n if isinstance(self.accion[1], tuple):\n aux += \"(\" + str(self.accion[1][0]) + \",\" + str(self.accion[1][1]) + \")\"\n else:\n aux += \"(\" + str(self.accion[1]) + \")\"\n elif isinstance(self.accion, str):\n pass\n else:\n aux = self.accion.value\n nodo += \"[ label = \\\"ADD COLUMN \" + self.nombre + \" \" + aux + \"\\\" ];\"\n return nodo\n\n def ejecutar(self, ts):\n if self.tipo == ALTER_TABLE_ADD.FOREIGN_KEY:\n if TRef.constraintExist(DB_ACTUAL.getName(),self.accion[2]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_integrity_constraint_violation.integrity_constraint_violation), 0) \n if not TRef.tableExist(DB_ACTUAL.getName(),self.accion[0]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_table_not_found), 0) \n if not TRef.columnExist(DB_ACTUAL.getName(), self.accion[0], self.accion[1]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_column_number), 0)\n return (self.nombre,self.accion[0],self.accion[1],self.accion[2])\n elif self.tipo == ALTER_TABLE_ADD.MULTI_FOREIGN_KEY:\n if not TRef.tableExist(DB_ACTUAL.getName(),self.accion[0]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_table_not_found), 0) \n\n #Comparamos que la misma cantidad de ids propios sea igual a la foranea\n if len(self.nombre) != len(self.accion[1]):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.data_exception), 0)\n\n for col in self.accion[1]:\n if not TRef.columnExist(DB_ACTUAL.getName(), self.accion[0], col):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_fdw_error.fdw_invalid_column_number), 0)\n\n listaSin = list()\n for i in range(len(self.nombre)):\n listaSin.append( (self.nombre[i], self.accion[0], self.accion[1][i]) )\n\n return listaSin\n elif self.tipo == ALTER_TABLE_ADD.COLUMN:\n if isinstance(self.accion, tuple):\n if self.accion[1] < 1:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_data_exception.numeric_value_out_of_range), 0)\n elif isinstance(self.accion, str):\n #Comprobamos que el type a elegir exista\n if not TEnum.enumExist(self.accion):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.indeterminate_datatype), 0)\n return (self.nombre, self.accion)\n# Show Database\nclass ShowDatabase(Instruccion):\n def __init__(self, like = None):\n self.like = like\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"SHOW DATABASE\\\" ];\"\n nodo += \"\\nNAME\" + identificador + \"[ label = \\\"\" + self.db + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> NAME\" + identificador + \";\"\n if self.like:\n nodo += \"\\nLIKE\" + identificador + \"[ label = \\\"\" + self.like + \"\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> LIKE\" + identificador + \";\"\n return nodo\n\n def ejecutar(self, ts):\n display = 'Databases\\n---------------------\\n'\n databases = TRef.showDatabases()\n\n for db in databases:\n display += db + '\\n'\n\n return display\n\n# Drop Database\nclass DropDatabase(Instruccion):\n def __init__(self, db, existencia = False):\n self.db = db\n self.existencia = existencia\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"DROP DATABASE \" + self.db + \"\\\" ];\"\n if self.existencia:\n nodo += \"\\nLIKE\" + identificador + \"[ label = \\\"IF EXISTS\\\" ];\"\n nodo += \"\\n\" + identificador + \" -> LIKE\" + identificador + \";\"\n return nodo\n\n def ejecutar(self, ts):\n if not TRef.databaseExist(self.db):\n if self.existencia:\n return \"Drop Database: Database doesn't exist\"\n else:\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_invalid_schema_name.invalid_schema_name), 0)\n\n DBMS.dropDatabase(self.db)\n TRef.dropDatabase(self.db)\n\n return 'Successful database dropped'\n\nclass DropTable(Instruccion):\n def __init__(self, tabla):\n self.tabla = tabla\n\n def dibujar(self):\n identificador = str(hash(self))\n\n nodo = \"\\n\" + identificador + \"[ label = \\\"DROP TABLE \" + self.tabla + \"\\\" ];\"\n\n return nodo\n\n def ejecutar(self, ts):\n if DB_ACTUAL.getName() == None:\n return ErrorReport('Semantico', 'Not defined database to used', 0)\n elif not TRef.databaseExist(DB_ACTUAL.getName()):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_invalid_schema_name.invalid_schema_name), 0)\n elif not TRef.tableExist(DB_ACTUAL.getName(), self.tabla):\n return ErrorReport('Semantico', sqlErrors.sqlErrorToString(sqlErrors.sql_error_syntax_error_or_access_rule_violation.undefined_table), 0)\n\n DBMS.dropTable(DB_ACTUAL.getName(), self.tabla)\n TRef.dropTable(DB_ACTUAL.getName(), self.tabla)\n return 'Successful table dropped' \n","sub_path":"parser/team25/code/astDDL.py","file_name":"astDDL.py","file_ext":"py","file_size_in_byte":47105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"550444785","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nimport scipy.optimize as opt\nimport pandas as pd\nimport csv\nimport os\n\n\n\n# README: change atn & steps\n# cp CONFIG/REVCON to ./\n# module load python3\n# this is for extract certain atoms coordination \n\nfileo_hst = open('HISTORY', 'r')\nfileo_raw = open('CONFIG', 'r')\nfileo_opt = open('REVCON', 'r')\n\nlines_hst = fileo_hst.readlines()\nlines_hst_len = len(lines_hst)\n\nlines_raw = fileo_raw.readlines()\nlines_raw_len = len(lines_raw)\nlines_opt = fileo_opt.readlines()\nlines_opt_len = len(lines_opt)\n\n# Li: 1-1601-4801-5057 (5920 total)\natn = 13920 # atom number of model\natn_1f = 1 # from atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_1t = 10 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_2f = 1601 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_2t = 1610 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_3f = 4801 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_3t = 4810 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_4f = 5057 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\natn_4t = 5066 # to atom number !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\natn_extract = atn_1t - atn_1f + atn_2t - atn_2f + atn_3t - atn_3f + atn_4t - atn_4f + 4 # how many atoms extract out\n\nframe_len = atn * 2 + 4 # each frame lines length, coordinations 4 lines\ncir = int((lines_hst_len - 2) / frame_len) # circulation times\n\n##################################\n### final_frame_f cell box side length\n##################################\n\nfinal_frame_f = 2 + (cir-1) * frame_len\nside_a = float(lines_hst[final_frame_f+1].split()[0])\nside_b = float(lines_hst[final_frame_f+2].split()[1])\nside_c = float(lines_hst[final_frame_f+3].split()[2])\n\n# in each frame, Li lines from & to:\nline_1f = 4 + (atn_1f - 1) * 2 + 1 # from line number (in real number) without HISTORY head 2 lines\nline_1t = line_1f + (atn_1t - atn_1f) * 2 + 1 # to line number\nline_2f = 4 + (atn_2f - 1) * 2 + 1 # from line number (in real number)\nline_2t = line_2f + (atn_2t - atn_2f) * 2 + 1 # to line number\nline_3f = 4 + (atn_3f - 1) * 2 + 1 # from line number (in real number)\nline_3t = line_3f + (atn_3t - atn_3f) * 2 + 1 # to line number\nline_4f = 4 + (atn_4f - 1) * 2 + 1 # from line number (in real number)\nline_4t = line_4f + (atn_4t - atn_4f) * 2 + 1 # to line number\n\nprint('atn : ' + str(atn))\nprint('atn from: (change) ' + str(atn_1f))\nprint('atn to: (change) ' + str(atn_1t))\nprint('total length: ' + str(lines_hst_len))\nprint('each frame length ' + str(frame_len))\nprint('circulation times: ' + str(cir))\nprint('from line: ' + str(line_1f))\nprint('to line: ' + str(line_1t))\nprint('final cell length: ' + str(side_a) + ' ' + str(side_b) + ' ' + str(side_c))\n\n########################## read & write ################################################\n\ncoordinates_mat = np.zeros(shape=(cir + 2, 1)) # top 2 lines, CONFIG, REVCON\n\na1 = np.arange(atn_1f, atn_1t+1, 1)\na2 = np.arange(atn_2f, atn_2t+1, 1)\na3 = np.arange(atn_3f, atn_3t+1, 1)\na4 = np.arange(atn_4f, atn_4t+1, 1)\na_all = np.hstack((a1, a2, a3, a4))\n\n\nfor j in a_all:\n\n # CONFIG - raw\n this_line = lines_raw[2 + line_1f + (j-1) * 2 - 1] # CONFIG -1 line than HISTORY\n x0 = float(this_line.split()[0])\n y0 = float(this_line.split()[1])\n z0 = float(this_line.split()[2])\n\n # REVCON - opt\n this_line = lines_opt[2 + line_1f + (j-1) * 4 - 1] # REVCON -1 line than HISTORY, REVCON each atom 4 lines\n x1 = float(this_line.split()[0])\n y1 = float(this_line.split()[1])\n z1 = float(this_line.split()[2])\n\n coord_one_atm = np.vstack(( np.mat([x0, y0, z0]), np.mat([x1, y1, z1]) )) # ['x', 'y', 'z']\n\n # data\n for i in range(cir):\n this_line = lines_hst[ 2 + i*frame_len + line_1f + (j-1) * 2 ]\n\n x = float(this_line.split()[0])\n y = float(this_line.split()[1])\n z = float(this_line.split()[2])\n\n # periodic boundary conditions\n if abs(x-x1) > 30:\n if x > x1:\n x = x - side_a\n else:\n x = x + side_a\n\n if abs(y-y1) > 30:\n if y > y1:\n y = y - side_b\n else:\n y = y + side_b\n\n if abs(z-z1) > 30:\n if z > z1:\n z = z - side_c\n else:\n z = z + side_c\n\n coord_one_atm = np.vstack(( coord_one_atm, np.mat([x, y, z]) ))\n\n coordinates_mat = np.hstack(( coordinates_mat, coord_one_atm ))\n\ncoordinates_mat = np.delete(coordinates_mat, [0], axis=1) # del first col\n\n# for head in csv:\nheading_xyz = [] # lst\nfor i in a_all:\n for j in range(3):\n heading_xyz += [i]\n\n\ndf = pd.DataFrame(coordinates_mat, columns=heading_xyz) # heading_r or heading_xyzr\ndf.to_csv('4atms_coordinate.csv', index=False)\n\nfileo_hst.close()\nfileo_raw.close()\nfileo_opt.close()\n\n\n\n\n","sub_path":"HISTORY_to_csv.py","file_name":"HISTORY_to_csv.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"102979567","text":"#!/usr/bin/python\nimport sys\n\ndef calc_pri(s1, s2):\n\tl1 = [int(i) for i in s1]\n\tl2 = [int(i) for i in s2]\n\tl1.reverse();\tl2.reverse()\n\tcarry=0;\tnum_carry=0\t\n\tfor i, j in map(None, l1, l2):\n\t\ti = i if i !=None else 0\n\t\tj = j if j !=None else 0\n\t\tif i+j+carry>=10:\n\t\t\tcarry=1\n\t\t\tnum_carry+=1\n\t\telse:\n\t\t\tcarry=0\n\t\n\tif num_carry==0:\tprint(\"No carry operation.\")\n\telse:\t\t\t\tprint(\"%d carry operations.\" % num_carry)\n\nif __name__ == \"__main__\":\n\tif len(sys.argv)==1:\n\t\tprint(\"%s INPUT_FILE\" % sys.argv[0])\n\t\tsys.exit(0)\n\n\tf = open(sys.argv[1], 'r')\n\tfor l in f:\n\t\tdata = l.strip().split()\n\t\tif data[0]=='0' and data[1]=='0':\tbreak\n\t\tcalc_pri(data[0], data[1])\n","sub_path":"2nd/primary_arithmetic.py","file_name":"primary_arithmetic.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"518101050","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile(1):\n\n ret, frame = cap.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # absolute thresholding\n # ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)\n # adaptive thresholding, calcualate a different threshold value for different\n # region in image\n thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n\n # otsu binarization, find an optimal threshold value on a bimodal image\n retVal, thresh2 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n\n cv2.imshow('thresh', thresh)\n cv2.imshow('otsu', thresh2)\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"python/threshold_image.py","file_name":"threshold_image.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"247496601","text":"#-*- coding:utf-8 -*-\n\nimport logging\n\nfrom flask.ext import script\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app import factory\nfrom app.core.parsers import RutrackerParser\nfrom app.torrents.models import Torrent\n\n\nmanager = script.Manager()\n\n\n@manager.option('--id', dest='torrent_id')\n@manager.option('--download', dest='download', action='store_true')\n@manager.option('--remove', dest='remove', action='store_true')\n@manager.option('--start', dest='start', action='store_true')\n@manager.option('--stop', dest='stop', action='store_true')\ndef add(**kwargs):\n logger = logging.getLogger('app')\n\n torrent_id = kwargs['torrent_id']\n\n try:\n torrent = Torrent.query.filter(Torrent.id == torrent_id).one()\n except NoResultFound:\n logger.warning('Torrent does not exists')\n return\n\n tc = factory.get_torrent_client()\n\n if kwargs['download']:\n parser = RutrackerParser(login='clayman174', password='0836798')\n parser.authenticate()\n\n torrent.download(parser=parser, client=tc)\n\n if kwargs['start']:\n torrent.start(client=tc)\n\n if kwargs['stop']:\n torrent.stop(client=tc)\n\n if kwargs['remove']:\n torrent.remove(client=tc)\n","sub_path":"app/torrents/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"349562917","text":"from vpython import *\nimport numpy as np\n\n\ndef findPoints(X1, X2, X3):\n # Find the normal of the plane containing three points\n N = np.cross((X2 - X1), (X3 - X1))\n N_normal = N / sqrt(N[0]**2 + N[1]**2 + N[2]**2)\n b = np.dot(N_normal, ((X2 + X1) / 2)) # Offset\n\n # Find the point of circumcenter\n Xm_12 = X2 - X1\n Xm_23 = X3 - X2\n\n # L_12 = X_1 + X_12 * t\n # L_23 = X_2 + X_23 * t\n\n d1 = np.cross(N_normal, Xm_12)\n d2 = np.cross(N_normal, Xm_23)\n\n node_12 = (X2 + X1) / 2\n node_23 = (X3 + X2) / 2\n\n points(pos=vec(node_12[0], node_12[1], node_12[2]), radius=5, color=color.red)\n points(pos=vec(node_23[0], node_23[1], node_23[2]), radius=5, color=color.red)\n\n A = ([d1[0], -d2[0]], [d1[1], -d2[1]])\n contant = [node_23[0] - node_12[0], node_23[1] - node_12[1]]\n A_inv = np.linalg.inv(A)\n st = A_inv.dot(contant)\n X0 = node_12 + d1.dot(st[0])\n points(pos=[vec(X0[0], X0[1], X0[2])], radius=5, color=color.red)\n arrow(pos=vec(node_12[0], node_12[1], node_12[2]), axis=vec(d1[0], d1[1], d1[2]), color=color.blue, shaftwidth=0.05)\n arrow(pos=vec(node_23[0], node_23[1], node_23[2]), axis=vec(d2[0], d2[1], d2[2]), color=color.blue, shaftwidth=0.05)\n arrow(pos=vec(X0[0], X0[1], X0[2]), axis=vec(N_normal[0], N_normal[1], N_normal[2]) * 10, color=color.orange, shaftwidth=0.05)\n arrow(pos=vec(X0[0], X0[1], X0[2]), axis=vec(-N_normal[0], -N_normal[1], -N_normal[2]) * 10, color=color.orange, shaftwidth=0.05)\n equation = np.poly1d([N_normal[0]**2 + N_normal[1]**2 + N_normal[2]**2, -(2 * (X0[0] - X1[0]) * N_normal[0] + 2 * (X0[1] - X1[1]) * N_normal[1] + 2 * (X0[2] - X1[2]) * N_normal[2]), (X1[0] - X0[0]) ** 2 + (X1[1] - X0[1]) ** 2 + (X1[2] - X0[2]) ** 2 - 100])\n ans_t = (equation.r)\n ans_1 = X0 + N_normal.dot(ans_t[0])\n ans_2 = X0 + N_normal.dot(ans_t[1])\n points(pos=[vec(ans_1[0], ans_1[1], ans_1[2])], radius=5, color=color.yellow)\n points(pos=[vec(ans_2[0], ans_2[1], ans_2[2])], radius=5, color=color.yellow)\n\n # Plot three points as a triangle\n y = plotTriangle(X1, X2, X3)\n\n # Plot 3 axes\n X_axis = arrow(pos=vec(0, 0, 0), axis=vec(10, 0, 0), color=color.cyan, shaftwidth=0.05)\n Y_axis = arrow(pos=vec(0, 0, 0), axis=vec(0, 10, 0), color=color.cyan, shaftwidth=0.05)\n Z_axis = arrow(pos=vec(0, 0, 0), axis=vec(0, 0, 10), color=color.cyan, shaftwidth=0.05)\n X_axisText = text(pos=vec(11, 0, 0), text='x', align='center', color=color.green)\n Y_axisText = text(pos=vec(0, 11, 0), text='y', align='center', color=color.green)\n Z_axisText = text(pos=vec(0, 0, 11), text='z', align='center', color=color.green)\n\n\ndef plotTriangle(tp1, tp2, tp3):\n # Plot four triangles to become tetrahedron\n tp1 = vertex(pos=vec(tp1[0], tp1[1], tp1[2]))\n tp2 = vertex(pos=vec(tp2[0], tp2[1], tp2[2]))\n tp3 = vertex(pos=vec(tp3[0], tp3[1], tp3[2]))\n triangle(v0=tp1, v1=tp2, v2=tp3)\n\n\nif __name__ == '__main__':\n findPoints(np.array([3, 5, 3]), np.array([4, 7, 8]), np.array([8, 3, 6]))\n","sub_path":"vPython/findPoints.py","file_name":"findPoints.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"15358363","text":"import csv\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom library.apps.authors.models import Author\n\n\nclass Command(BaseCommand):\n help = 'Import authors data to database'\n\n def add_arguments(self, parser):\n parser.add_argument('filename', help=\"Enter the .csv file\")\n\n def handle(self, *args, **options):\n filename = options['filename']\n with open(filename) as f:\n f_csv = csv.DictReader(f)\n authors = []\n for row in f_csv:\n try:\n author = Author(**row)\n except TypeError:\n raise CommandError('Invalid file')\n else:\n authors.append(author)\n Author.objects.bulk_create(authors)\n self.stdout.write(self.style.SUCCESS('Authors added successfully'))\n","sub_path":"library/apps/authors/management/commands/import_authors.py","file_name":"import_authors.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"329738358","text":"import sqlite3\nimport pandas as pd\nimport math\nconn = sqlite3.connect('ghi.db')\n\nconn.execute('''DROP TABLE IF EXISTS reports2010''')\nconn.execute('''DROP TABLE IF EXISTS reportsdetail2010''')\nconn.execute('''DROP TABLE IF EXISTS reports2013''')\nconn.execute('''DROP TABLE IF EXISTS reportsdata2013''')\n\nconn.execute(\n ''' CREATE TABLE reports2010 (id, year, company name, total impact score, rank overall, number of diseases) ''')\n\nconn.execute(\n ''' CREATE TABLE reportsdetail2010 (id, year, company name, drug, disease target, diseaseimpact, diseasepercent, companyrankdisease, percentdalycompanydisease) ''')\n\n\nconn.execute(\n ''' CREATE TABLE reports2013 (company name, total impact score, rank overall, number of diseases,drug) ''')\n\ndatasrc = 'ORS_Reports.csv'\ndf = pd.read_csv(datasrc)\nis_df_true = df.notnull()\n#print(df)\n\ndef cleanfloat(var):\n #print(var)\n if var == '#REF!':\n var = 0\n if var == '#DIV/0!' or var == 'No data':\n var = 0\n if type(var) != float and type(var) != int:\n if ',' in var:\n var = float(var.replace(',', '').replace('%', ''))\n if var != var:\n var = 0\n return var\n\nreports2010 = []\nreportsdetail2010 = []\nreports2013 = []\nreportsdata2013 = []\n\nid = 0;\nfor k in range(0, 44):\n if is_df_true.iloc[k, 1] == True:\n companyname = df.iloc[k, 1]\n print(companyname)\n if is_df_true.iloc[k, 2] == True:\n totalimpactscore = cleanfloat(df.iloc[k, 2])\n print(totalimpactscore)\n if is_df_true.iloc[k, 3] == True:\n companyrank = int(df.iloc[k, 3])\n print(companyrank)\n if is_df_true.iloc[k, 4] == True:\n numOfDisease = int(df.iloc[k, 4])\n print(numOfDisease)\n if is_df_true.iloc[k, 1] == True and is_df_true.iloc[k, 2] == True and is_df_true.iloc[k, 3] == True and is_df_true.iloc[k, 4] == True:\n id = id+1\n row = [id, 2010, companyname, totalimpactscore, companyrank, numOfDisease]\n reports2010.append(row)\n\nfor item in reports2010:\n print(item)\n conn.execute(' insert into reports2010 values (?,?,?,?,?,?) ', item)\n\n_id = 0;\ntempcompanyname = \"\"\nfor k in range(0, 44):\n companyname = df.iloc[k,1]\n if is_df_true.iloc[k, 1] == False:\n companyname = tempcompanyname\n else:\n _id = _id + 1;\n tempcompanyname = companyname\n print(tempcompanyname)\n print(companyname)\n drug = df.iloc[k, 6]\n print(drug)\n diseaseTargeted = df.iloc[k, 7]\n print(diseaseTargeted)\n diseaseimpact = cleanfloat(df.iloc[k, 8])\n print(diseaseimpact)\n if is_df_true.iloc[k, 10] == True:\n diseasepercent = df.iloc[k, 10]\n print(diseasepercent)\n else:\n diseasepercent = ''\n if is_df_true.iloc[k, 11] == True:\n companyrankdisease = df.iloc[k, 11]\n print(companyrankdisease)\n else:\n companyrankdisease = ''\n if is_df_true.iloc[k, 12] == True:\n percentdalycompanydisease = df.iloc[k, 12]\n print(percentdalycompanydisease)\n else:\n percentdalycompanydisease = ''\n rowdata = [_id, 2010, companyname, drug, diseaseTargeted, diseaseimpact, diseasepercent,companyrankdisease, percentdalycompanydisease ]\n reportsdetail2010.append(rowdata)\n\nfor item in reportsdetail2010:\n print(item)\n conn.execute(' insert into reportsdetail2010 values (?,?,?,?,?,?,?,?,?) ', item)\n\n\n\n\nconn.commit()\nprint(\"Database operation complete\")","sub_path":"reportsdb.py","file_name":"reportsdb.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"512768954","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 2 14:21:04 2018\n\n@author: lipchiz\n\"\"\"\n\nimport sys\n# sys.path.append(\"/home/nyuser/zlrmodeltest/datafetch\")\nsys.path.append(\"/home/nyuser/jianghan/FeatureAlgorithm/Tools\")\n# sys.path.append(r\"D:\\FeatureAlgorithm\\Tools\")\n#sys.path.append(\"/home/lipchiz/文档/pythonscripts/quant/datafetch\")\n# from loadSmallDataFromDB import loadData\nfrom loadSmallDataFromDBV2 import loadData\n# from loadDataFromDBVScreenStocks import loadData\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\n# from sklearn.externals import joblib\nimport pandas.io.sql as sql\nfrom sklearn.metrics import recall_score,precision_score,accuracy_score,roc_curve,classification_report,auc,confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom hyperopt import fmin, hp, tpe, STATUS_OK, Trials\nimport datetime\nimport pymysql\nimport gc\nfrom datetime import datetime, timedelta\nimport logging\nimport os\nimport pickle\n\n\ndef precision_1(preds, dtrain):\n label = dtrain.get_label()\n pred = [int(i >= 0.5) for i in preds]\n precision = precision_score(label, pred, pos_label=1, average='binary')\n return '1-precision', precision\n\n\ndef yload(table_name, starttime, endtime):\n connection = {'host': '10.46.228.175', 'port': 3306, 'user': 'alg',\n 'passwd': 'Alg#824', 'db': 'quant', 'charset': 'utf8'}\n sql_order = \"select * from %s where date>='%s' and date<='%s'\" % (table_name, starttime, endtime)\n con = pymysql.connect(**connection)\n y = sql.read_sql(sql_order, con)\n return y\n\ndef objective(args):\n params = {\n 'silent': 1, # 设置成1则没有运行信息输出,最好是设置为0.是否在运行升级时打印消息。\n 'booster': 'gbtree',\n 'objective': 'binary:logistic',\n 'eval_metric': 'auc',\n 'nthread': 45,\n 'learning_rate': args['learning_rate'],\n 'colsample_bytree': args['colsample_bytree'],\n 'max_depth': args['max_depth'] + 6,\n 'subsample': args['subsample']\n }\n xgb1 = xgb.train(params, args['train_data'], evals_result={'eval_metric': 'auc'},\n num_boost_round=100000, evals=[(args['train_data'], 'train'), (args['val_data'], 'val')],\n verbose_eval=False, early_stopping_rounds=15)\n y_score = xgb1.predict(args['val_data'])\n # y_predict = np.int64(y_score>0.5)\n fpr, tpr, threshods = roc_curve(args['val_data'].get_label(), y_score, pos_label=1)\n aucscore = auc(fpr, tpr)\n print('searching auc-score:',aucscore)\n return {'loss':-aucscore, 'status':STATUS_OK, 'iteration': xgb1.best_iteration}\n\ndef model_training(day, data, x_list, params_space, logger, out_folder_path):\n# select the right y\n# tmp_yname = 'Y_%sD_%sPCT'%(day,rate)\n\n tmp_yname = 'Y_%sD'%(day)\n train_list = x_list.copy()\n train_list.append(tmp_yname)\n tmp_data = data[train_list]\n tmp_data = tmp_data.dropna(subset=[tmp_yname])\n\n y = tmp_data[tmp_yname]\n x = tmp_data.drop(tmp_yname,axis=1)\n # train_x, val_x, train_y, val_y = train_test_split(x,y,test_size=0.1,random_state=68)\n tmp_train_size = int(x.shape[0] * 0.7)\n train_x = x.iloc[:tmp_train_size]\n train_y = y.iloc[:tmp_train_size]\n val_x = x.iloc[tmp_train_size:]\n val_y = y.iloc[tmp_train_size:]\n del tmp_data\n gc.collect()\n\n # check data\n length_1 = len(y[y==1])\n length_0 = len(y[y==0])\n\n logger.info('The size of data used for modeling: ' + str(len(train_x)))\n logger.info('The number of y=1 is: ' + str(length_1))\n logger.info('The number of y=0 is: ' + str(length_0))\n logger.info('feature num: %d' % len(x_list))\n logger.info('train data types:')\n logger.info(x.dtypes.unique())\n logger.info(y.unique())\n\n\n params_space['train_data'] = xgb.DMatrix(train_x,label=train_y,feature_names=train_x.columns)\n params_space['val_data'] = xgb.DMatrix(val_x,label=val_y,feature_names=val_x.columns)\n\n tmp_trial = Trials()\n best_sln = fmin(objective, space=params_space, algo=tpe.suggest, max_evals=10, trials=tmp_trial)\n\n # get best boost rounds\n tmp_idx = np.argmin(np.array(tmp_trial.losses()))\n best_boost_num = tmp_trial.results[tmp_idx]['iteration']\n\n # train_y = tmp_data[tmp_yname]\n # train_x = tmp_data.drop(tmp_yname,axis=1)\n\n # del params_space['train_data']\n # del params_space['val_data']\n\n params = {\n 'silent':1 , #设置成1则没有运行信息输出,最好是设置为0.是否在运行升级时打印消息。\n 'booster': 'gbtree',\n 'objective': 'binary:logistic',\n 'eval_metric': 'auc',\n 'nthread': 45,\n 'learning_rate': best_sln['learning_rate'],\n 'colsample_bytree': best_sln['colsample_bytree'],\n 'max_depth': best_sln['max_depth'] + 6,\n 'subsample': best_sln['subsample']\n }\n\n\n # re-divide train/validation sets\n # train_x, val_x, train_y, val_y = train_test_split(x,y,test_size=0.1,random_state=68)\n\n # use all data to train\n xgbdata = xgb.DMatrix(x,label=y,feature_names=train_x.columns)\n\n # use the num of boost get from early stopping\n clf = xgb.train(params, xgbdata, num_boost_round=best_boost_num)\n fea_imp = clf.get_score(importance_type='gain')\n importance = pd.DataFrame(fea_imp,index=['importance'])\n importance = pd.DataFrame({'feature':importance.columns.tolist(),'importance':importance.values[0].tolist()})\n # importance.to_excel('feature_importance_%sD_%sPCT.xlsx'%(str(day),str(rate)))\n importance.to_excel('%s/feature_importance_%dD.xlsx'%(out_folder_path, day))\n# save the model which has been trained\n# joblib.dump(clf,'train_model_%sD.m'%(str(day),str(rate)))\n clf.save_model('%s/train_model_%dD.m'% (out_folder_path, day))\n report = 'day = %dD'% day\n logger.info(report)\n logger.info('-----------------------------------------------------')\n\ndef model_testing_old(day,data,x_list,season, logger, out_folder_path):\n # select the right y\n tmp_yname = 'Y_%sD'%(day)\n test_list = x_list.copy()\n test_list.append(tmp_yname)\n tmp_data = data[test_list]\n tmp_data = tmp_data.dropna(subset=[tmp_yname])\n\n test_y = tmp_data[tmp_yname]\n test_x = tmp_data.drop(tmp_yname,axis=1)\n xgbdata = xgb.DMatrix(test_x,label=test_y,feature_names=x_list)\n logger.info('The size of data used for testing: ' + str(len(test_x)))\n logger.info('The number of y is: ' + str(len(test_y[test_y==1])))\n\n logger.info('feature num: %d' % len(x_list))\n logger.info('test data types:')\n logger.info(test_x.dtypes.unique())\n logger.info(test_y.unique())\n\n# evaluating the model\n xgb1 = xgb.Booster()\n xgb1.load_model('%s/train_model_%dD.m' % (out_folder_path, day))\n # clf = joblib.load('%s/train_model_%dD.m'%(out_folder_path, day))\n y_score = xgb1.predict(xgbdata)\n y_predict = np.int64(y_score>0.5)\n accuracyscore = accuracy_score(test_y, y_predict)\n# f1 = f1_score(test_y,y_predict,pos_label=0)\n fpr,tpr,threshods = roc_curve(test_y,y_score,pos_label = 1)\n ks = np.max(np.abs(tpr-fpr))\n aucscore = auc(fpr,tpr)\n precision = precision_score(test_y,y_predict,average='binary')\n recall = recall_score(test_y,y_predict,average='weighted')\n logger.info('precision: %f' % precision)\n logger.info('recall: %f' % recall)\n logger.info('auc: %f' % aucscore)\n logger.info('accuracyscore: %f' % accuracyscore)\n logger.info('K-S: %f' % ks)\n logger.info(classification_report(test_y,y_predict))\n logger.info(confusion_matrix(test_y,y_predict,labels=[0,1]))\n logger.info(confusion_matrix(test_y,y_predict,labels=[1,0]))\n report = 'day = %dD'%(day)\n logger.info(report)\n logger.info('-------------------------------------------------------')\n# save the evaluating result\n resultscore = [precision,recall,aucscore,accuracyscore,ks,str(classification_report(test_y,y_predict)),str(confusion_matrix(test_y,y_predict)),'%dD'%(day),'s%d'%season]\n columnname = ['precision','recall','auc','accuracyscore','K-S','classification_report','confusion_matrix','modeltype','season']\n result =pd.DataFrame(np.array(resultscore).reshape(1,9),columns = columnname)\n return result\n\ndef model_testing_new(day,data,x_list,season, logger, out_folder_path):\n # select the right y\n tmp_yname = 'Y_%sD'%(day)\n test_list = x_list.copy()\n test_list.append(tmp_yname)\n tmp_data = data[test_list]\n # tmp_data = tmp_data.dropna(subset=[tmp_yname])\n\n # fill y'nan with 0\n tmp_data.loc[:, tmp_yname] = tmp_data[tmp_yname].fillna(0)\n tmp_data.loc[:, tmp_yname] = tmp_data[tmp_yname].astype('int')\n\n test_y = tmp_data[tmp_yname]\n test_x = tmp_data.drop(tmp_yname,axis=1)\n xgbdata = xgb.DMatrix(test_x,label=test_y,feature_names=x_list)\n logger.info('The size of data used for testing: ' + str(len(test_x)))\n logger.info('The number of y is: ' + str(len(test_y[test_y==1])))\n\n# evaluating the model\n xgb1 = xgb.Booster()\n xgb1.load_model('%s/train_model_%dD.m' % (out_folder_path, day))\n # clf = joblib.load('%s/train_model_%dD.m'%(out_folder_path, day))\n y_score = xgb1.predict(xgbdata)\n y_predict = np.int64(y_score>0.5) ############## threshold to be predicted as 1\n accuracyscore = accuracy_score(test_y, y_predict)\n# f1 = f1_score(test_y,y_predict,pos_label=0)\n fpr,tpr,threshods = roc_curve(test_y,y_score,pos_label = 1)\n ks = np.max(np.abs(tpr-fpr))\n aucscore = auc(fpr,tpr)\n precision = precision_score(test_y,y_predict,average='binary')\n recall = recall_score(test_y,y_predict,average='weighted')\n logger.info('precision: %f' % precision)\n logger.info('recall: %f' % recall)\n logger.info('auc: %f' % aucscore)\n logger.info('accuracyscore: %f' % accuracyscore)\n logger.info('K-S: %f' % ks)\n logger.info(classification_report(test_y,y_predict))\n logger.info(confusion_matrix(test_y,y_predict,labels=[0,1]))\n logger.info(confusion_matrix(test_y,y_predict,labels=[1,0]))\n\n # check score under different thresholds\n threshold_list = list(range(50,100, 5))\n threshold_list = [round(x * 0.01,2) for x in threshold_list]\n scores_list = {}\n for tmp_thrhd in threshold_list:\n tmp_y_predict = np.int64(y_score > tmp_thrhd)\n tmp_precision = precision_score(test_y, tmp_y_predict, average='binary')\n tmp_recall = recall_score(test_y, tmp_y_predict, average='weighted')\n scores_list[tmp_thrhd] = [tmp_precision, tmp_recall]\n scores_list = pd.DataFrame(scores_list, index=['precision', 'recall'])\n logger.info(\"scores under different thresholds:\")\n logger.info(scores_list)\n report = 'day = %dD'%(day)\n logger.info(report)\n logger.info('-------------------------------------------------------')\n# save the evaluating result\n resultscore = [precision,recall,aucscore,accuracyscore,ks,str(classification_report(test_y,y_predict)),str(confusion_matrix(test_y,y_predict)),'%dD'%(day),'s%d'%season]\n columnname = ['precision','recall','auc','accuracyscore','K-S','classification_report','confusion_matrix','modeltype','season']\n result =pd.DataFrame(np.array(resultscore).reshape(1,9),columns = columnname)\n return result\n\n\n\ndef run(year, season, out_folder_path, out_predict_path):\n # ------------------ setting --------------------------------\n # season_start_date ={\n # 1: '-01-01',\n # 2: '-04-01',\n # 3: '-07-01',\n # 4: '-10-01'\n # }\n # season_end_date = {\n # 1: '-03-31',\n # 2: '-06-30',\n # 3: '-09-30',\n # 4: '-12-31'\n # }\n season_start_date = {\n 1: '-01-01',\n 2: '-03-01',\n 3: '-05-01',\n 4: '-07-01',\n 5: '-09-01',\n 6: '-11-01'\n }\n season_end_date = {\n 1: '-02-31',\n 2: '-04-31',\n 3: '-06-31',\n 4: '-08-31',\n 5: '-10-31',\n 6: '-12-31'\n }\n starttime_train = str(year) + season_start_date[season]\n endtime_train = str(year+1) + season_start_date[season]\n endtime_train = datetime.strftime(datetime.strptime(endtime_train, '%Y-%m-%d') - timedelta(days=30), '%Y-%m-%d') # drop 30 days to avoid using future data in training\n\n starttime_test = str(year+1) + season_start_date[season]\n endtime_test = str(year+1) + season_end_date[season]\n\n # starttime_train = '2012-01-01'\n # endtime_train = '2012-01-14'\n #\n # starttime_test = '2013-01-01'\n # endtime_test = '2013-01-04'\n\n # starttime_train1 = '%s-01-01'%year\n # endtime_train1 = '%s-06-30'%year\n # # endtime_train1 = '%s-01-04' % year\n # starttime_train2 = '%s-07-01'%year\n # endtime_train2 = '%s-12-31'%year\n # # endtime_train2 = '%s-07-04' % year\n # starttime_q1 = '%s-01-01'%(year+1)\n # endtime_q1 = '%s-03-31'%(year+1)\n # # endtime_q1 = '%s-01-04' % (year + 1)\n # # starttime_q2 = '%s-04-01'%(year+1)\n # # endtime_q2 = '%s-06-30'%(year+1)\n # # excel_h = 'resultscore_%s.xlsx'%(year)\n\n Y_table_name = 'STOCK_TOP_BOTTOM_Y'\n Y_days = [2,5,10,20]\n\n #starttime_train = '%s-06-21'%year\n #endtime_train = '%s-06-21'%year\n #starttime_q1 = '%s-06-21'%year\n #endtime_q1 = '%s-06-21'%year\n #starttime_q2 = '%s-06-21'%year\n #endtime_q2 = '%s-06-21'%year\n #excel_h = 'resultscore_%s.xlsx'%(year)\n\n #the scope of paremeters of model\n params_space = {\n 'booster': 'gbtree',\n 'objective': 'binary:logistic',\n 'eval_metric': 'auc',\n 'nthread': 50,\n 'learning_rate': hp.uniform(\"learning_rate\", 0.05, 0.15),\n 'max_depth': hp.randint('max_depth', 10),\n 'subsample': hp.uniform(\"subsample\", 0.5, 0.9),\n 'colsample_bytree': hp.uniform(\"colsample_bytree\", 0.5, 0.9),\n }\n\n # parameters = {\n # 'silent':1 , #设置成1则没有运行信息输出,最好是设置为0.是否在运行升级时打印消息。\n # 'nthread':30, # cpu 线程数 默认最大\n # 'learning_rate':0.1, # 如同学习率\n # #min_child_weight=0.5, # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言\n # #,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。\n # #这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。\n # 'max_depth':6, # 构建树的深度,越大越容易过拟合\n # 'gamma':0, # 树的叶子节点上作进一步分区所需的最小损失减少,越大越保守,一般0.1、0.2这样子。\n # 'subsample':0.9, # 随机采样训练样本 训练实例的子采样比\n # 'max_delta_step':0, #最大增量步长,我们允许每个树的权重估计。\n # 'colsample_bytree':0.9, # 生成树时进行的列采样\n # 'reg_lambda':1, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。\n # #reg_alpha=0, # L1 正则项参数\n # #scale_pos_weight=1.3, #如果取值大于0的话,在类别样本不平衡的情况下有助于快速收敛。平衡正负权重\n # #objective= 'multi:softmax', #多分类的问题 指定学习任务和相应的学习目标\n # #num_class=10, # 类别数,多分类与 multisoftmax 并用\n # ' n_estimators':500, #树的个数\n # 'seed':100, #随机种子\n # 'eval_metric': 'auc'\n # }\n\n #the return rate of stocks\n # return_rate = {'rate_2':[1,2,3,4,5],\n # 'rate_5':[2,3,5,7,10],\n # 'rate_10':[3,5,7,10,15],\n # 'rate_20':[4,7,10,15,20],\n # 'rate_30':[5,10,15,20,25]\n # }\n\n # ynamelist = []\n # for day in [2,5,10,20,30]:\n # for rate in return_rate['rate_%s'%(str(day))]:\n # ynamelist.append('Y_%sD_%sPCT'%(day,rate))\n\n # create logger\n logging.basicConfig(level=logging.INFO,\n format='[%(asctime)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='%s/%d_s%d.log' % (out_folder_path, year, season),\n filemode='w')\n\n logger = logging.getLogger('%d_s%d'%(year, season))\n\n\n '''---------------------------- training -----------------------------------'''\n # prepare training data\n logger.info('training has been started.')\n\n tmp_dt_start = datetime.strptime(starttime_train, '%Y-%m-%d')\n tmp_dt_end = datetime.strptime(endtime_train, '%Y-%m-%d')\n tmp_dt_mid = tmp_dt_start + (tmp_dt_end - tmp_dt_start) / 2\n end1_train = datetime.strftime(tmp_dt_mid, '%Y-%m-%d')\n start2_train = datetime.strftime(tmp_dt_mid + timedelta(days=1), '%Y-%m-%d')\n\n train_x1 = loadData(starttime_train, end1_train)\n train_x2 = loadData(start2_train, endtime_train)\n train_x = train_x1.append(train_x2)\n del train_x1, train_x2\n gc.collect()\n\n # train_x = loadData(starttime_train,endtime_train)\n train_y = yload(Y_table_name, starttime_train,endtime_train)\n train_y.drop('time_stamp',axis=1,inplace=True)\n xnamelist = train_x.columns.tolist() # feature names (without code & date)\n xnamelist.remove('code')\n xnamelist.remove('date')\n train_data = pd.merge(train_x,train_y,on = ['date','code'],how='left')\n\n del train_x, train_y\n gc.collect()\n\n # save training feature name\n out_path = '%s/xnamelist.pcl' % out_folder_path\n with open(out_path, 'wb') as out_file:\n pickle.dump(xnamelist, out_file)\n\n # preprocessing training data\n try:\n train_data.drop_duplicates(['code','date'],inplace = True)\n train_data = train_data.sort_values('date', ascending=True)\n except:\n logger.error('train_data error')\n\n train_data.drop(['date','code'],axis=1,inplace=True) # drop code & date\n\n #training the model\n # for day in [2,5,10,20,30]:\n for day in Y_days:\n model_training(day, train_data, xnamelist,params_space, logger, out_folder_path)\n #delete all the variables\n del day,params_space,train_data\n gc.collect()\n\n logger.info('training has finished')\n '''---------------------------- testing S1 -----------------------------------'''\n #S1\n # nowtime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logger.info('testing_q1 has been started')\n # logger.info(nowtime)\n\n # load training feature name\n feature_list_path = '%s/xnamelist.pcl' % out_folder_path\n with open(feature_list_path, 'rb') as in_file:\n xnamelist = pickle.load(in_file)\n\n #load the test data\n # test_x = loadData(starttime_q1,endtime_q1)\n # test_y = yload(Y_table_name, starttime_q1,endtime_q1)\n test_x = loadData(starttime_test, endtime_test)\n test_y = yload(Y_table_name, starttime_test, endtime_test)\n test_y.drop('time_stamp',axis=1,inplace=True)\n test_data = pd.merge(test_x,test_y,on = ['date','code'],how='left')\n\n del test_x,test_y\n gc.collect()\n\n #preprocessing testing data\n try:\n test_data.drop_duplicates(['code','date'],inplace = True)\n if 'index' in test_data.columns.tolist():\n test_data = test_data.drop(['index'],axis = 1)\n except:\n logger.error('test_data error')\n\n # stock_index_q1 = test_data[['date','code']]\n test_data.drop(['date','code'],axis=1,inplace=True) # drop code & date\n\n #dataframe to save the result\n resultscoredf_h = pd.DataFrame()\n\n for day in Y_days:\n result = model_testing_new(day,test_data,xnamelist,season, logger, out_folder_path)\n # y_score = pd.DataFrame(y_score)\n # y_score.columns = [\"y_1_%sD_%sPCT\"%(day,rate)]\n # stock_index_q1 = pd.concat([stock_index_q1,y_score],axis=1)\n resultscoredf_h = resultscoredf_h.append(result)\n\n # nowtime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logger.info('testing s%d has finished' % season)\n # print(nowtime)\n\n '_________________________________ Record Prediction __________________________________'\n # nowtime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logger.info('backtest data generating.')\n # print(nowtime)\n\n test_data = loadData(starttime_test,endtime_test)\n try:\n test_data.drop_duplicates(['code','date'],inplace = True)\n if 'index' in test_data.columns.tolist():\n test_data = test_data.drop(['index'],axis = 1)\n test_data.reindex()\n except:\n logger.error('train_data error')\n\n stock_index = test_data[['date','code']]\n test_data.drop(['date','code'],axis=1,inplace=True)\n test_data = xgb.DMatrix(test_data,feature_names=xnamelist)\n\n for day in Y_days:\n xgb1 = xgb.Booster()\n xgb1.load_model('%s/train_model_%dD.m'%(out_folder_path, day))\n y_score = xgb1.predict(test_data)\n y_score = pd.DataFrame(y_score,columns=['proba_1_%dD'%day])\n stock_index = pd.concat([stock_index,y_score],axis=1)\n logger.info('day = %sD'%day)\n stock_index.to_csv(\"%s/stockscore_%ds%d.csv\" % (out_predict_path, year, season),index=False,sep=',')\n\n # nowtime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n logger.info('backtest data has generated. ')\n # print(nowtime)\n\n\nif __name__ == '__main__':\n # result_folder_path = '/home/konghon/Documents/model_results/2012'\n year = 2012\n season = 1\n result_folder_path = '/home/nyuser/jianghan/FeatureAlgorithm/model_results/%d_s%d' % (year, season)\n # result_folder_path = r'D:\\model_results\\top_bottom\\test_v2'\n if not os.path.exists(result_folder_path):\n os.makedirs(result_folder_path)\n result_predict_path = '/home/nyuser/jianghan/FeatureAlgorithm/model_results/prediction'\n # result_predict_path = r'D:\\model_results\\top_bottom\\test_v2'\n run(year, season,result_folder_path, result_predict_path)","sub_path":"Models/xgb_trainingV2.py","file_name":"xgb_trainingV2.py","file_ext":"py","file_size_in_byte":22365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"122723024","text":"from distutils.core import setup, Extension\n\nmodule1 = Extension('_normaldist',\n sources = ['normaldist_wrap.c', 'normaldist.c'])\n\nsetup (name = 'normaldist',\n version = '1.0',\n description = 'This is a normaldist package',\n ext_modules = [module1],\n py_modules = [\"normaldist\"],)\n","sub_path":"C_extention/setup_normaldist.py","file_name":"setup_normaldist.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"70337635","text":"\"\"\" QuArK - Quake Army Knife\r\n\r\nPython macros available for direct call by QuArK\r\n\"\"\"\r\n#\r\n# Copyright (C) 1996-2000 Armin Rigo\r\n# THIS FILE IS PROTECTED BY THE GNU GENERAL PUBLIC LICENCE\r\n# FOUND IN FILE \"COPYING.TXT\"\r\n#\r\n#$Header$\r\n\r\n#\r\n# Macros are called by QuArK based on name. These are the\r\n# only direct calls that QuArK can make to Python. Usually,\r\n# Python provides callback to QuArK.\r\n#\r\n\r\n\r\nimport quarkx\r\nimport qtoolbar\r\nimport qutils\r\n\r\n#\r\n# Macros called when there is an object to display in a window.\r\n#\r\n\r\ndef MACRO_displaymap(self, what=None):\r\n \"Called when there is a map to display.\"\r\n qutils.loadmapeditor(what)\r\n import mapeditor\r\n if isinstance(self.info, mapeditor.MapEditor):\r\n self.info.ReopenRoot(self)\r\n else:\r\n mapeditor.MapEditor(self) # new map editor\r\n\r\ndef MACRO_displaybsp(self):\r\n MACRO_displaymap(self,'bsp')\r\n\r\n\r\ndef MACRO_displaymdl(self):\r\n \"Called when there is a model to display.\"\r\n qutils.loadmdleditor()\r\n import mdleditor\r\n if isinstance(self.info, mdleditor.ModelEditor):\r\n self.info.ReopenRoot(self)\r\n else:\r\n mdleditor.ModelEditor(self) # new model editor\r\n\r\n\r\n\r\n#\r\n# Macro called when QuArK needs the images of a Duplicator.\r\n#\r\n\r\ndef MACRO_duplicator(dup):\r\n \"Computes Duplicator images.\"\r\n qutils.loadmapeditor()\r\n import mapduplicator\r\n return mapduplicator.DupManager(dup).buildimages()\r\n\r\n\r\n#\r\n# Macro called when a linear operation is applied.\r\n#\r\n\r\ndef MACRO_applylinear(entity, matrix):\r\n \"Applies a linear distortion (rotate, zoom, etc) on an entity or a Duplicator.\"\r\n # Note : \"origin\" is updated by QuArK before it calls this macro.\r\n qutils.loadmapeditor()\r\n import mapentities\r\n mapentities.CallManager(\"applylinear\", entity, matrix)\r\n\r\n\r\n#\r\n# Macro called when the mouse is over a control with a hint\r\n#\r\n\r\ndef MACRO_hint(form, text=None):\r\n if form is None:\r\n return \"\"\r\n import qbaseeditor\r\n if not isinstance(form.info, qbaseeditor.BaseEditor):\r\n return\r\n return form.info.showhint(text)\r\n\r\n\r\n#\r\n# Macro called to build a map (when the big GO! button is pressed).\r\n#\r\n\r\ndef MACRO_buildmaps(maps, mode, extracted, cfgfile=\"\", defaultbsp=None):\r\n \"Builds maps and runs Quake.\"\r\n\r\n if mode is None:\r\n code = \"P\"\r\n text = \"Play\"\r\n else:\r\n code = quarkx.buildcodes[mode]\r\n text = quarkx.buildmodes[mode]\r\n forcepak = \"K\" in code\r\n runquake = \"P\" in code\r\n build = quarkx.newobj(\":\")\r\n\r\n if \"C\" in code: #\r\n build[\"Textures\"] = \"1\" # Complete rebuild\r\n build[\"QCSG1\"] = \"1\" #\r\n build[\"QBSP1\"] = \"1\"\r\n build[\"VIS1\"] = \"1\"\r\n build[\"LIGHT1\"] = \"1\"\r\n build[\"LIGHTCmd\"] = \"-extra\"\r\n\r\n elif \"F\" in code: #\r\n build[\"Textures\"] = \"1\" # Fast rebuild\r\n build[\"QCSG1\"] = \"1\" #\r\n build[\"QBSP1\"] = \"1\"\r\n\r\n else: #\r\n pass # Don't build maps\r\n #\r\n maplist = []\r\n for map in maps:\r\n root = map['Root']\r\n if root is None: continue\r\n root = map.findname(root)\r\n if root is None: continue\r\n maplist.append((map, root, build))\r\n\r\n qutils.loadmapeditor()\r\n import mapquakemenu\r\n mapquakemenu.RebuildAndRun(maplist, None, runquake, text, forcepak, extracted, cfgfile, defaultbsp)\r\n\r\n\r\n\r\n#\r\n# Macro called to \"pack\" a model.\r\n#\r\n\r\ndef MACRO_pack_model(model):\r\n import mdlpack\r\n return mdlpack.PackModel(model)\r\n\r\n#\r\n# Macro called when a model component is modified.\r\n#\r\n\r\ndef MACRO_update_model(component):\r\n import mdlpack\r\n mdlpack.UpdateModel(component)\r\n\r\n\r\n#\r\n# Macro called when an item in the '?' menu is selected.\r\n#\r\n\r\nhelpfn = {}\r\ndef MACRO_helpmenu(text):\r\n import qeditor\r\n getattr(qeditor, helpfn[text])()\r\n\r\n\r\n#\r\n# Macro called to open the OpenGL window in background\r\n#\r\n\r\ndef MACRO_OpenGL(minx, miny):\r\n import qopengl, qeditor\r\n qopengl.open(qeditor.mapeditor(), minx, miny, bkgnd=1) #, force=1)\r\n\r\ndef MACRO_shutdown(text):\r\n# quitfile=open(quarkx.exepath+'quit.txt','w')\r\n# quitfile.write('quitting\\n')\r\n import qutils\r\n\r\n del qutils.ico_objects\r\n del qutils.ico_editor\r\n \r\n for key in qutils.ico_dict.keys():\r\n del qutils.ico_dict[key]\r\n# quitfile.write('zapping '+key+'\\n')\r\n del qutils.ico_dict\r\n\r\n# quitfile.write('done\\n')\r\n# quitfile.close()\r\n \r\n#\r\n# ---- Dialog Boxes ----\r\n#\r\n\r\ndialogboxes = {}\r\n\r\ndef closedialogbox(name):\r\n try:\r\n dialogboxes[name].close()\r\n del dialogboxes[name]\r\n except KeyError:\r\n pass\r\n\r\n\r\n#\r\n# The class \"dialogbox\" is a base for actual dialog boxes.\r\n# See qeditor.py and mapfindreptex.py for examples.\r\n#\r\n\r\nclass dialogbox:\r\n\r\n dlgdef = \"\"\r\n size = (300,170)\r\n begincolor = None\r\n endcolor = None\r\n name = None\r\n dfsep = 0.6\r\n dlgflags = qutils.FWF_KEEPFOCUS | qutils.FWF_POPUPCLOSE\r\n\r\n def __init__(self, form, src, **buttons):\r\n name = self.name or self.__class__.__name__\r\n closedialogbox(name)\r\n f = quarkx.newobj(\"Dlg:form\")\r\n f.loadtext(self.dlgdef)\r\n self.f = f\r\n for pybtn in f.findallsubitems(\"\", ':py'):\r\n pybtn[\"sendto\"] = name\r\n self.buttons = buttons\r\n dlg = form.newfloating(self.dlgflags, f[\"Caption\"])\r\n dialogboxes[name] = dlg\r\n dlg.windowrect = self.windowrect()\r\n if self.begincolor is not None: dlg.begincolor = self.begincolor\r\n if self.endcolor is not None: dlg.endcolor = self.endcolor\r\n dlg.onclose = self.onclose\r\n dlg.info = self\r\n self.dlg = dlg\r\n self.src = src\r\n df = dlg.mainpanel.newdataform()\r\n self.df = df\r\n df.header = 0\r\n df.sep = self.dfsep\r\n df.setdata(src, f)\r\n df.onchange = self.datachange\r\n df.flags = 8 # DF_AUTOFOCUS\r\n dlg.show()\r\n\r\n def windowrect(self):\r\n x1,y1,x2,y2 = quarkx.screenrect()\r\n cx = (x1+x2)/2\r\n cy = (y1+y2)/2\r\n size = self.size\r\n return (cx-size[0]/2, cy-size[1]/2, cx+size[0]/2, cy+size[1]/2)\r\n\r\n def datachange(self, df):\r\n pass # abstract\r\n\r\n def onclose(self, dlg):\r\n dlg.info = None\r\n dlg.onclose = None # clear refs\r\n if self.df is not None:\r\n self.df.onchange = None\r\n self.df = None\r\n self.dlg = None\r\n del self.buttons\r\n\r\n def close(self, reserved=None):\r\n self.dlg.close()\r\n\r\n\r\ndef MACRO_pybutton(pybtn):\r\n dlg = dialogboxes[pybtn[\"sendto\"]]\r\n return dlg.info.buttons[pybtn.shortname]\r\n\r\ndef MACRO_makeaddon(self):\r\n import qutils\r\n a = quarkx.getqctxlist()\r\n a.reverse()\r\n i = 0\r\n while (a[i][\"GameDir\"] == None):\r\n i = i + 1\r\n a[i].makeentitiesfromqctx();\r\n\r\ndef MACRO_makeaddon_tex(self):\r\n import qutils\r\n a = quarkx.getqctxlist()\r\n a.reverse()\r\n i = 0\r\n while (a[i][\"GameDir\"] == None):\r\n i = i + 1\r\n a[i].maketexturesfromqctx();\r\n\r\nentfn = {}\r\n\r\ndef MACRO_loadentityplugins(self):\r\n import plugins\r\n plugins.LoadPlugins(\"ENT\")\r\n global MACRO_loadentityplugins\r\n MACRO_loadentityplugins = lambda x: None # next calls to loadmdleditor() do nothing\r\n\r\ndef MACRO_ent_convertfrom(text):\r\n import qeditor\r\n import qutils\r\n a = quarkx.getqctxlist()\r\n a.reverse()\r\n # Decker - Some menuitem-captions contains a '&'-character (you know, the one which tells what mnemonic-key can be used)\r\n # These '&'-characters has to be removed, for the entfn[text] to work properly.\r\n import string\r\n text = string.replace(text, \"&\", \"\")\r\n entf = entfn[text]\r\n if entf is not None:\r\n files = quarkx.filedialogbox(\"Select File\", text, entf[0], 0)\r\n if len(files) != 0:\r\n file = files[0]\r\n gn = a[0][\"GameDir\"]\r\n if (gn is None) or (gn == \"\"):\r\n gn = file\r\n entf[1](a[0].parent, file, gn)\r\n\r\n\r\n# ----------- REVISION HISTORY ------------\r\n#\r\n#$Log$\r\n#Revision 1.15 2001/10/22 10:28:20 tiglari\r\n#live pointer hunt, revise icon loading\r\n#\r\n#Revision 1.14 2001/10/20 02:13:18 tiglari\r\n#live pointer hunt: redo shutdown macro\r\n#\r\n#Revision 1.13 2001/07/27 11:31:47 tiglari\r\n#bsp study: plane viewing, faces in treeview\r\n#\r\n#Revision 1.12 2001/06/18 20:30:12 decker_dk\r\n#Replace all '&'-characters with nothing, for menuitem-captions used as indexes into python-style dictionaries.\r\n#\r\n#Revision 1.11 2001/06/13 23:01:13 aiv\r\n#Moved 'Convert From' stuff to python code (plugin type)\r\n#\r\n#Revision 1.10 2001/03/28 19:23:15 decker_dk\r\n#Added '(*.fgd)' to the filedialogbox-call.\r\n#\r\n#Revision 1.9 2001/03/15 21:09:01 aiv\r\n#moved .fgd reading to menu, sepearted texture & entity reading\r\n#\r\n#Revision 1.5 2000/06/02 16:00:22 alexander\r\n#added cvs headers\r\n#\r\n","sub_path":"runtime/tags/Q6_4_0alpha1/quarkpy/qmacro.py","file_name":"qmacro.py","file_ext":"py","file_size_in_byte":8945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"147878018","text":"\"\"\"\n\tUsage: \n\t\t\tdojo add_person []\n\t\t\tdojo create_room ...\n\t\t\tdojo print_room \n\t\t\tdojo print_allocations []\n\t\t\tdojo print_unallocated []\n\n\"\"\"\nfrom docopt import docopt\nfrom app.dojo import Dojo\nfrom cmd import Cmd\n\nnairobi=Dojo()\n\ndef start():\n\t\targuments = docopt(__doc__)\n\t\t\"\"\"Creating an instance of DOJO\"\"\"\n\n\t\t\"\"\"Parsing commandline arguments for commands\"\"\"\n\n\t\tcreate_room=arguments.get('create_room')\n\t\tadd_person=arguments.get('add_person')\n\t\tprint_room=arguments.get('print_room')\n\t\tprint_allocations=arguments.get('print_allocations')\n\t\tprint_unallocated=arguments.get('print_unallocated')\n\n\t\t\"\"\"if the parsed argument is create room, dojo proceeds to call the create room function\"\"\"\n\t\tif create_room:\n\t\t\trooms=arguments['']\n\t\t\troom_type=arguments['']\n\n\t\t\tif room_type=='livingSpace':\n\t\t\t\troom_type='living_space'\n\t\t\telif room_type=='office':\n\t\t\t\troom_type='office'\n\n\t\t\t\"\"\"Looping through a list of room names and calling dojo's create room function to create a room\"\"\"\n\t\t\tfor room in rooms:\n\t\t\t\tcreated=nairobi.create_room(room,room_type)\n\t\t\t\tif created !=False:\n\t\t\t\t\tprint(\"An {} called {} has been successfully created\".format(room_type,created.room_name))\n\n\t\t\"\"\"if the command is add_person, dojo calls its add_person method to add a person either staff or fellow\"\"\"\n\t\tif add_person:\n\t\t\tif arguments.get('') and arguments['']=='staff':\n\t\t\t\t\"\"\"Concatinate first name and last name\"\"\"\n\t\t\t\tperson=nairobi.add_person(''.join([arguments[''],arguments['']]),'staff')\n\t\t\t\t\n\t\t\t\tif person!=False:\n\t\t\t\t\tprint(\"Staff {} has been successfully added\".format(person.name))\n\t\t\t\t\tif len(person.office_allocation.strip())>0:\n\t\t\t\t\t\tprint(\"{} has been allocate office {}\".format(person.name,person.office_allocation))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"No offices to allocate {}\".format(person.name))\n\n\t\t\telif arguments.get('') and arguments['']=='fellow':\n\t\t\t\tneeds_space=False\n\t\t\t\tif arguments['']=='Y':\n\t\t\t\t\tneeds_space=True\n\t\t\t\tperson=nairobi.add_person(' '.join([arguments[''],arguments['']]),'fellow',needs_space)\n\t\t\t\t\n\t\t\t\tif person!=False:\n\t\t\t\t\tprint(\"Fellow {} has been successfully added\".format(person.name))\n\n\t\t\t\t\tif len(person.office_allocation.strip())>0:\n\t\t\t\t\t\tprint(\"{} has been allocate office {}\".format(person.name,person.office_allocation))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"No offices to allocate {}\".format(person.name))\n\n\t\tif print_room:\n\t\t\toccupants=nairobi.get_occupants(arguments[''])\n\t\t\tif len(occupants)>0:\n\t\t\t\tnames=[occupant.name for occupant in occupants]\n\t\t\t\tprint(\"{} occupy room {}\".format(','.join(names),arguments['']))\n\t\t\telse:\n\t\t\t\tprint(\"{} does not have occupants\".format(arguments['']))\n\n\t\tif print_allocations:\n\t\t\tallocations={**nairobi.office_allocations,**nairobi.living_space_allocations}\n\t\t\tif len(allocations)>0:\n\t\t\t\tprint(\"Allocations:\\n\")\n\t\t\t\tfor allocation,people in allocations.items():\n\t\t\t\t\tprint(allocation)\n\t\t\t\t\tprint(\"\\n\")\n\n\t\t\t\t\tfor person in people:\n\t\t\t\t\t\tprint(person.name)\n\n\t\t\t\tif arguments[''] and len(arguments[''])>0:\n\t\t\t\t\tprint(\"Writting to file\")\n\t\t\t\t\ttarget_file = open(arguments[''], 'w')\n\n\t\t\t\t\tfor person in unallocated:\n\t\t\t\t\t\ttarget_file.write(person)\n\t\t\telse:\n\t\t\t\tprint(\"Sorry,There are no allocations currently\")\n\n\t\tif print_unallocated:\n\t\t\tunallocated=nairobi.find_unallocated_people()\n\n\t\t\tprint(\"Unallocated People \\n\")\n\t\t\tfor person in unallocated:\n\t\t\t\tprint(person)\n\n\t\t\tif arguments[''] and len(arguments[''])>0:\n\t\t\t\tprint(\"Writting to file\")\n\t\t\t\ttarget_file = open(arguments[''], 'w')\n\n\t\t\t\tfor person in unallocated:\n\t\t\t\t\ttarget_file.write(person)\n\n\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"290916406","text":"__author__ = 'anna'\n#One card war game\n#From 1 to 4 players\n\nimport cards2, games\n\nclass W_Card(cards2.Card):\n \"\"\"A war game playing card\"\"\"\n\n @property\n def value(self):\n v = W_Card.RANKS.index(self.rank) + 2\n return v\nclass W_Deck(cards2.Deck):\n \"\"\"A war game deck\"\"\"\n def populate(self):\n for suit in W_Card.SUITS:\n for rank in W_Card.RANKS:\n self.cards.append(W_Card(rank, suit))\nclass W_Hand(cards2.Hand):\n \"\"\"A war game hand\"\"\"\n def __init__(self, name):\n super(W_Hand, self).__init__()\n self.name = name\n\n def __str__(self):\n rep = self.name + \":\\t\" + super(W_Hand, self).__str__() + \"(\" + str(self.total) + \")\"\n return rep\n @property\n def total(self):\n t = 0\n for card in self.cards:\n t += card.value\n return t\n\nclass W_Player(W_Hand):\n \"\"\"A war game player\"\"\"\n\n def win(self):\n print(self.name, \"wins.\")\n\n def lose(self):\n print(self.name, \"loses.\")\n\n def push(self):\n print(self.name, \"pushes.\")\n\nclass W_Game(object):\n \"\"\"A war game\"\"\"\n def __init__(self, names):\n self.players = [W_Player(name) for name in names]\n self.deck = W_Deck()\n self.deck.populate()\n self.deck.shuffle()\n\n\n def play(self):\n #deal 1 card to everyone\n self.deck.deal(self.players, per_hand=1)\n\n records = dict()\n for player in self.players:\n records[player.name] = player.total\n print(\"Records\", records)\n\n sort = sorted(records, key=records.get, reverse=True)\n print(\"Sorted keys\", sort)\n #winner rating using set\n st = sorted(set(sorted(records.values())), reverse=True)\n print(\"Set: \", st)\n win_rating = st[0]\n\n #announce winner(s)\n\n #this code is much easear and good but it may announce winners/users in a wrong order\n # for player in self.players:\n # if player.total == win_rating:\n # player.win()\n # else:\n # player.lose()\n\n for item in sorted(records.items()):\n if item[1] == win_rating:\n print(item[0], \"wins.\")\n\n #keys of winners to delete from records in order to left only losers\n to_del = [item[0] for item in records.items() if item[1] == win_rating]\n\n #deleting winners, only losers left\n for i in to_del:\n del records[i]\n\n #announce losers\n for loser in sorted(records):\n print(loser, \"loses.\")\n\n #remove everyone cards\n for player in self.players:\n player.clear()\n\ndef main():\n print(\"\\t\\tWelcome to War Game!\\n\")\n\n number = games.ask_number(\"How many players? (1-4): \", low=1, high=5)\n names = [input(\"Enter player name: \") for i in range(number)]\n print(names)\n print()\n\n game = W_Game(names)\n count = 0\n again = None\n while again != \"n\":\n count +=1\n if count > 1:\n game.deck.populate()\n game.deck.shuffle()\n game.play()\n again = games.ask_yes_no(\"\\nDo you want to play again?: \")\n\nmain()\ninput(\"\\nPress the enter key to exit.\")\n\n\n\n","sub_path":"Python GameBook/ch9_2.py","file_name":"ch9_2.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"78361","text":"from collections import OrderedDict\nimport cv2\nimport numpy as np\nfrom scipy.spatial import distance as dist\nimport imutils\n\n\nclass ShapeDetector:\n def __init__(self):\n pass\n\n def detect(self, contour):\n # initialize the shape name and approximate the contour\n perimeter = cv2.arcLength(curve=contour, closed=True)\n approx = cv2.approxPolyDP(\n curve=contour,\n epsilon=0.02 * perimeter,\n closed=True\n )\n\n # if the shape has 3 vertices, it is a triangle\n if len(approx) == 3:\n shape = 'triangle'\n\n # if the shape has 4 vertices, it is a rectangle\n elif len(approx) == 4:\n shape = 'rectangle'\n\n # otherwise, assume the shape is a circle\n else:\n shape = 'circle'\n\n # return the name of the shape\n return shape\n\n\nclass ColorLabeler:\n def __init__(self):\n # initialize the colors dictionary, containing the color\n # name as the key and the RGB tuple as the value\n colors = OrderedDict({\n 'red': (255, 0, 0),\n 'green': (0, 255, 0),\n 'blue': (0, 0, 255)\n })\n\n # allocate memory for the L*a*b* image, then initialize\n # the color names list\n self.lab = np.zeros(shape=(len(colors), 1, 3), dtype='uint8')\n self.color_names = []\n\n # loop over the colors dictionary\n for (color_number, (color_name, rgb)) in enumerate(colors.items()):\n # update the L*a*b* array and the color names list\n self.lab[color_number] = rgb\n self.color_names.append(color_name)\n\n # convert the L*a*b* array from the RGB color space\n # to L*a*b*\n self.lab = cv2.cvtColor(src=self.lab, code=cv2.COLOR_RGB2LAB)\n\n def label(self, image, contour):\n # construct a mask for the contour, then compute the\n # average L*a*b* value for the masked region\n mask = np.zeros(shape=image.shape[:2], dtype='uint8')\n cv2.drawContours(\n image=mask,\n contours=[contour],\n contourIdx=-1,\n color=255,\n thickness=-1\n )\n mask = cv2.erode(mask, None, iterations=2)\n mean = cv2.mean(image, mask=mask)[:3]\n\n # initialize the minimum distance found thus far\n min_dist = (np.inf, None)\n\n # loop over the known L*a*b* color values\n for (lab_num, color) in enumerate(self.lab):\n # compute the distance between the current L*a*b*\n # color value and the mean of the image\n distance = dist.euclidean(color[0], mean)\n\n # if the distance is smaller than the current distance,\n # then update the bookkeeping variable\n if distance < min_dist[0]:\n min_dist = (distance, lab_num)\n\n # return the name of the color with the smallest distance\n return self.color_names[min_dist[1]]\n\n\ndef _main():\n # load image and define template for counting figures\n image = cv2.imread(r'input.png')\n count_dict = OrderedDict((\n ('red rectangle', 0),\n ('blue triangle', 0),\n ('green circle', 0),\n ))\n\n # prepossess image\n blurred = cv2.GaussianBlur(\n src=image,\n ksize=(5, 5),\n sigmaX=0\n )\n gray = cv2.cvtColor(src=blurred, code=cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(\n src=gray,\n thresh=200,\n maxval=255,\n type=cv2.THRESH_BINARY_INV\n )[1]\n lab = cv2.cvtColor(src=blurred, code=cv2.COLOR_BGR2LAB)\n # show_img(thresh)\n\n # find contours in the thresholded image\n contours = cv2.findContours(\n image=thresh.copy(),\n mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_SIMPLE\n )\n contours = contours[0] if imutils.is_cv2() else contours[1]\n\n # initialize the shape detector and color labeler\n shape_detector = ShapeDetector()\n color_labeler = ColorLabeler()\n\n # loop over the contours\n for contour in contours:\n # compute the center of the contour\n M = cv2.moments(contour)\n centerX = int(M[\"m10\"] / M[\"m00\"])\n centerY = int(M[\"m01\"] / M[\"m00\"])\n\n # detect the shape of the contour and label the color\n shape = shape_detector.detect(contour)\n color = color_labeler.label(lab, contour)\n\n # draw the contours and the name of the shape and labeled\n # color on the image\n contour_label = '{} {}'.format(color, shape)\n cv2.drawContours(\n image=image,\n contours=[contour],\n contourIdx=-1,\n color=(255, 0, 255), # magenta\n thickness=2\n )\n cv2.putText(\n img=image,\n text=contour_label,\n org=(centerX - 20, centerY - 20),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.5,\n color=(255, 0, 255), # magenta\n thickness=2\n )\n\n # show_img(image)\n count_contour(shape, color, count_dict)\n\n print_dictionary(count_dict)\n\n\ndef show_img(img):\n cv2.namedWindow('output', cv2.WINDOW_NORMAL)\n cv2.imshow('output', img)\n cv2.waitKey(0) # press any key to continue\n\n\ndef count_contour(shape, color, count_dict):\n try:\n count_dict['{} {}'.format(color, shape)] += 1\n except KeyError:\n pass\n\n\ndef print_dictionary(dictionary):\n for key, value in dictionary.items():\n print(key, value)\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"find_shapes.py","file_name":"find_shapes.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"190690858","text":"import math,time\ndef fun(n):\n n_sq = math.sqrt(n)\n m = 0\n d = 1\n a = math.floor(n_sq)\n a0 = a\n l = [a0]\n while a!= 2*a0:\n m = d*a-m\n d = (n - m**2)/d\n a = (a0+m)//d\n l.append(int(a))\n return l\n\ndef fun2(N):\n period = 0\n for n in range(2,N+1):\n if math.sqrt(n)==int(math.sqrt(n)):\n continue\n elif len(fun(n)[1:]) % 2 != 0:\n period += 1\n else:\n continue\n return period\n\nif __name__ == '__main__':\n st = time.clock()\n x = fun2(10000)\n print(x,time.clock()-st)","sub_path":"Projecteuler/archives 64.py","file_name":"archives 64.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"596200547","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nprint(\"start importing\")\n\nimport os\nimport sys\n\nfrom pylab import *\nimport pandas, numpy\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier\n\nfrom hep_ml import uboost, gradientboosting as ugb, losses\n\nprint('importing rep')\nfrom rep.estimators import TMVAClassifier, SklearnClassifier, XGBoostClassifier\nfrom rep.report.metrics import RocAuc\nfrom rep.metaml import ClassifiersFactory\nprint('rep imported')\n\nprint('importing Root and root_pandas')\nfrom root_pandas import read_root, to_root\nfrom ROOT import TGraphErrors, TCanvas\nprint('root imported')\n\nprint(\"importing completed\")\n\nprimFoldnumber = int(sys.argv[1])\nn_threads = int(sys.argv[2])\n\nfoldnameList = []\n\nprint('reading root files')\nfor n in range(0,10):\n if n == primFoldnumber:\n continue\n foldnameList.append('/disk/users/odahme/KstarSelection/folds/fold'+ str(n) +'/fold'+ str(n) +'.root')\n\nfor f in range(0,len(foldnameList)):\n if f==0:\n trainD = read_root(foldnameList[f],'default')\n continue\n temp = read_root(foldnameList[f],'default')\n trainD = pandas.concat([trainD,temp],ignore_index=True)\n print('added fold number '+str(f))\n del temp\ntestD = read_root('/disk/users/odahme/KstarSelection/folds/fold'+ str(primFoldnumber) +'/fold'+ str(primFoldnumber) +'.root')\nprint('reading finished')\n#trainD = trainD.iloc[range(0,0000)]\n#testD = testD.iloc[range(0,10000)]\ntrainY = trainD['label']\ntestY = testD['label']\ntrainX = trainD.drop('label',axis=1)\ntestX = testD.drop('label',axis=1)\n\n#print(trainY)\n#print(testY)\n\nbool_index = np.logical_or(np.logical_or(\n np.logical_or(trainX['B0_M'] >= 6000., trainX['B0_M'] <= 5380.),\n\n np.logical_or(\n np.logical_and(trainX['J_psi_M']*trainX['J_psi_M']*1e-6 >= 8.,\n trainX['J_psi_M']*trainX['J_psi_M']*1e-6 <= 11.),\n np.logical_and(trainX['J_psi_M']*trainX['J_psi_M']*1e-6 >= 12.5,\n trainX['J_psi_M']*trainX['J_psi_M']*1e-6 <= 15.)\n )\n ),\n np.logical_or(trainX['J_psi_M']*trainX['J_psi_M']*1e-6 >= 19. , trainX['J_psi_M']*trainX['J_psi_M']*1e-6 <= 0.1)\n )\ncut_indexList = trainX.index[bool_index]\nprint(str(len(cut_indexList))+' values had been cut off.')\ntrainX = trainX.drop(cut_indexList)\nprint(str(len(trainX))+' values remain for training.')\ntrainY = trainY.drop(cut_indexList)\n\n\ntrain_features = [ \"B0_ENDVERTEX_CHI2\",\"B0_IP_OWNPV\",\"B0_IPCHI2_OWNPV\", \"B0_FD_OWNPV\",\"B0_FDCHI2_OWNPV\",\"B0_P\",\"B0_PT\",\n \"B0_relinfo_VTXISOBDTHARDFIRSTVALUE\" ]\nuniform_features = ['B0_M', \"B0_ThetaK\", \"B0_ThetaL\",\"B0_Phi\"]\nn_estimators = 500\nbase_estimator = DecisionTreeClassifier(max_depth=6)\n\n\nclassifiers = ClassifiersFactory()\n\nuboost_clf = uboost.uBoostClassifier(uniform_features=uniform_features, uniform_label=1,\n base_estimator=base_estimator,\n n_estimators=n_estimators, train_features=train_features,\n efficiency_steps=12, n_threads=n_threads)\n\nclassifiers['uBoost'] = SklearnClassifier(uboost_clf)\n\nclassifiers['sk_bdt'] = SklearnClassifier( AdaBoostClassifier( n_estimators=n_estimators), features=train_features)\n\n\n\nclasNameList = ['uBoost','sk_bdt']\n\n\nprint('start training')\nclassifiers.fit(trainX, trainY, parallel_profile='threads-2')\nreport = classifiers.test_on(testX, testY)\nsig_prob = classifiers.predict_proba(testX)\npred = zeros(len(testX))\nfor clasN in clasNameList:\n pred += sig_prob[clasN][:,1]\npred = pred/len(clasNameList)\ntestX.insert(len(testX.iloc[0]),'bdt',pred)\n\ntestX.to_root('/disk/users/odahme/KstarSelection/folds/fold'+ str(primFoldnumber) +'/clas_fold'+ str(primFoldnumber) +'.root')\n\n\nc1 = TCanvas('c1','fold_'+str(primFoldnumber)+'_roc',1920,1080)\nc1.cd()\nmultiGr, leg = report.roc()._plot_tmva()\nmultiGr.GetXaxis().SetTitle('false positive rate')\nmultiGr.GetYaxis().SetTitle('true positive rate')\nmultiGr.SetTitle('ROC curves')\nmultiGr.Draw()\nleg.Draw()\nc1.SaveAs('/disk/users/odahme/KstarSelection/folds/fold'+ str(primFoldnumber) +'/fold'+ str(primFoldnumber) +'_roc.pdf')\n","sub_path":"farm_selection/farm_test/clas_fold.py","file_name":"clas_fold.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"502780199","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'ipetrash'\n\n\n# SOURCE: https://stackoverflow.com/a/8551810/5909792\n\n\nfrom PyQt5 import Qt\n\n# A QApplication instance is necessary if fonts are used in the SVG\napp = Qt.QApplication([])\n\n# Load your SVG\nrenderer = Qt.QSvgRenderer(\"input.svg\")\n\nfor width, height in [(32, 32), (64, 64), (512, 512), (4096, 4096)]:\n # Prepare a QImage with desired characteritisc\n image = Qt.QImage(width, height, Qt.QImage.Format_ARGB32)\n\n # Partly transparent red-ish background\n image.fill(Qt.Qt.transparent)\n\n # Get QPainter that paints to the image\n painter = Qt.QPainter(image)\n renderer.render(painter)\n\n # Save, image format based on file extension\n image.save(\"output_{}x{}.png\".format(width, height))\n","sub_path":"qt__pyqt__pyside__pyqode/render_image_from_svg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"30880660","text":"class Node:\n def __init__(self, value):\n self._left = None\n self._right = None\n self._value = value\n\ndef dfs(root, startNode, endNode):\n stack = [(startNode, [startNode])]\n seen = set()\n seen.add(startNode)\n\n populateNodeToParentDict = populateNodeToParent(root)\n\n while stack:\n for i in range(len(stack)):\n (currentNode, path) = stack.pop()\n\n if currentNode == endNode:\n return path\n\n if currentNode is not None:\n if (currentNode._left is not None) and (currentNode._left not in seen):\n seen.add(currentNode._left)\n stack.append((currentNode._left, path + [currentNode._left]))\n\n if (currentNode._right is not None) and (currentNode._right not in seen):\n seen.add(currentNode._right)\n stack.append((currentNode._right, path + [currentNode._right]))\n\n parrentOfCurrentNode = populateNodeToParentDict.get(currentNode)\n if (parrentOfCurrentNode is not None) and (parrentOfCurrentNode not in seen):\n seen.add(parrentOfCurrentNode)\n stack.append((parrentOfCurrentNode, path + [parrentOfCurrentNode]))\n\ndef populateNodeToParent(root):\n res = {}\n if root is not None:\n return _populateNodeToParent(res, root, None)\n\ndef _populateNodeToParent(res, currentNode, parent):\n if currentNode is not None:\n res.update({currentNode: parent}) \n _populateNodeToParent(res, currentNode._left, currentNode)\n _populateNodeToParent(res, currentNode._right, currentNode)\n \n return res\n\nroot = Node(10)\nnode1 = Node(11)\nnode2 = Node(12)\nnode3 = Node(13)\nnode4 = Node(14)\nnode5 = Node(15)\n\nroot._left = node1\nroot._right = node2\nnode1._left = node3\nnode1._right = node4\nnode4._right = node5\n\npath = dfs(root, root, node4)\n\nfor i in path:\n print(i._value)\n","sub_path":"data-structures/trees/depthFirstSearch.py","file_name":"depthFirstSearch.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"114276314","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Views.\"\"\"\n\nimport logging\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.encoding import force_text\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views.generic import (\n CreateView,\n DeleteView,\n DetailView,\n ListView,\n UpdateView,\n View\n)\n\nfrom .forms import (\n VcnAccountCreationForm\n)\nfrom .models import (\n VcnAccount,\n)\nfrom .tokens import (\n account_activation_token\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass VcnAccountListView(ListView):\n \"\"\"View that returns the list of VCN accounts.\"\"\"\n\n model = VcnAccount\n staff = False\n webmaster = False\n\n def get_queryset(self):\n \"\"\"Get queryset.\n\n Staff user and superuser can see all the account (active and inactive)\n \"\"\"\n qs = VcnAccount.objects.all()\n\n if not self.request.user.is_staff and not self.request.user.is_superuser:\n qs = qs.filter(is_active=True)\n\n if self.staff:\n qs = qs.filter(is_staff=self.staff)\n\n if self.webmaster:\n qs = qs.filter(is_superuser=self.webmaster)\n\n return qs\n\n\nclass VcnAccountDetailView(DetailView):\n \"\"\"View that returns the detail of VCN account.\"\"\"\n\n model = VcnAccount\n # use username instead of pk\n slug_field = \"username\"\n\n\nclass VcnAccountCreateView(CreateView):\n \"\"\"View that creates a new VCN account.\"\"\"\n\n model = VcnAccount\n form_class = VcnAccountCreationForm\n\n def get(self, request, *args, **kwargs):\n \"\"\".\"\"\"\n if request.user.is_authenticated:\n return redirect(reverse(\"dj-vcn-accounts:update\", kwargs={'slug': request.user.username}))\n\n return super().get(request, args, kwargs)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Override POST method.\n\n User authenticated and tried to update the informations about an other user -> 403\n User is not authenticated -> 403\n \"\"\"\n if request.user.is_authenticated:\n raise PermissionDenied\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n \"\"\"...\"\"\"\n self.user = form.save(commit=False)\n self.user.is_active = False\n self.user.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n \"\"\"Get the URL after the success.\"\"\"\n messages.success(self.request, \"Your account has successfully been created.\"\n \"Go to your email account to finish the activation.\")\n return reverse('dj-vcn-accounts:detail', kwargs={'slug': self.object.username})\n\n\nclass VcnAccountUpdateView(UpdateView):\n \"\"\"View that updates a VCN account.\"\"\"\n\n model = VcnAccount\n fields = ['first_name', 'last_name', 'email', 'phone']\n # use username instead of pk\n slug_field = \"username\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Override GET method.\n\n User authenticated and tried to update the informations about an other user -> 403\n User is not authenticated -> 403\n \"\"\"\n self.object = self.get_object()\n\n # If user is superuser\n if request.user.is_superuser:\n logger.info(\"Superuser {} accessed (GET) the UpdateView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # If user is part of staff\n elif request.user.is_staff:\n logger.info(\"Staff user {} accessed (GET) the UpdateView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # Anonymous user can not update account\n elif request.user.is_anonymous:\n logger.error(\"Anonymous user tried to GET the UpdateView of {}'s account.\".format(self.object.username))\n raise PermissionDenied\n # Authenticated user can not update an other user account\n elif request.user.id != self.object.id:\n logger.error(\"User {} tried to GET the UpdateView of {}'s account.\".format(\n request.user.username, self.object.username))\n raise PermissionDenied\n\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Override POST method.\n\n User authenticated and tried to update the informations about an other user -> 403\n User is not authenticated -> 403\n \"\"\"\n self.object = self.get_object()\n\n # If user is superuser\n if request.user.is_superuser:\n logger.info(\"Superuser {} accessed (POST) the UpdateView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # If user is part of staff\n elif request.user.is_staff:\n logger.info(\"Staff user {} accessed (POST) the UpdateView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n elif request.user.is_anonymous:\n logger.error(\n \"Anonymous user tried to POST to the DeleteView of {}'s account.\".format(self.object.username)\n )\n raise PermissionDenied\n elif request.user.id != self.object.id:\n logger.error(\"User {} tried to POST to the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n raise PermissionDenied\n\n return super().post(request, *args, **kwargs)\n\n def get_success_url(self):\n \"\"\"Get the URL after the success.\"\"\"\n messages.success(self.request, \"You successfully updated your account.\")\n return reverse('dj-vcn-accounts:detail', kwargs={'slug': self.object.username})\n\n\nclass VcnAccountDeleteView(DeleteView):\n \"\"\"View that deletes a VCN account.\"\"\"\n\n model = VcnAccount\n # use username instead of pk\n slug_field = \"username\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Override GET method.\n\n User authenticated and tried to update the informations about an other user -> 403\n User is not authenticated -> 403\n \"\"\"\n self.object = self.get_object()\n\n # If user is superuser\n if request.user.is_superuser:\n logger.info(\"Superuser {} accessed (GET) the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # If user is part of staff\n elif request.user.is_staff:\n logger.info(\"Staff user {} accessed (GET) the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # Anonymous user can not update account\n elif request.user.is_anonymous:\n logger.error(\"Anonymous user tried to GET the DeleteView of {}'s account.\".format(self.object.username))\n raise PermissionDenied\n # Authenticated user can not update an other user account\n elif request.user.id != self.object.id:\n logger.error(\"User {} tried to GET the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n raise PermissionDenied\n\n return super().get(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"Override DELETE method (in DeleteView, post function calls delete function).\n\n User authenticated and tried to update the informations about an other user -> 403\n User is not authenticated -> 403\n \"\"\"\n self.object = self.get_object()\n\n # If user is superuser\n if request.user.is_superuser:\n logger.info(\"Superuser {} accessed (DELETE) the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n # If user is part of staff\n elif request.user.is_staff:\n logger.info(\"Staff user {} accessed (DELETE) the DeleteView of {}'s account.\".format(\n request.user.username, self.object.username))\n pass\n elif request.user.is_anonymous:\n logger.error(\"Anonymous user tried to delete {}'s account.\".format(self.object.username))\n raise PermissionDenied\n elif request.user.id != self.object.id:\n logger.error(\"User {} tried to delete {}'s account.\".format(\n request.user.username, self.object.username))\n raise PermissionDenied\n\n logger.info(\"VcnAccount {} deactivated\".format(self.object.get_username()))\n self.object.is_active = False\n self.object.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n \"\"\"Get the URL after the success.\"\"\"\n messages.success(self.request, \"You successfully deactivated your account.\")\n return reverse('dj-vcn-accounts:list')\n\n\nclass VcnAccountActivationView(View):\n \"\"\"View handled when the user activates its account.\n\n See https://medium.com/@frfahim/django-registration-with-confirmation-email-bb5da011e4ef for documentation\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"...\"\"\"\n try:\n user_id = force_text(urlsafe_base64_decode(kwargs['uidb64']))\n user = VcnAccount.objects.get(pk=user_id)\n logging.info(\"Activation requested for user {}\".format(user.get_username()))\n except(TypeError, ValueError, OverflowError, VcnAccount.DoesNotExist) as e:\n logging.exception(\"Failed to decode user during VcnAccountActivationView: \" + str(e))\n user = None\n\n if user is not None and account_activation_token.check_token(user, kwargs['token']):\n user.is_active = True\n user.save()\n else:\n messages.error(self.request, \"Activation link is invalid!\")\n raise Http404\n\n messages.success(self.request, 'Thank you for your email confirmation. Now you can login your account.')\n return redirect(reverse(\"dj_vcn_accounts:list\"))\n","sub_path":"dj_vcn_accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"450535933","text":"import numpy as np\r\nfrom pandas import *\r\nfrom matplotlib import pyplot as plt\r\nfrom pylab import *\r\nimport time\r\nimport logging\r\nimport os.path\r\nimport seaborn as sns\r\nfrom IPython.core.pylabtools import figsize\r\nfrom collections import defaultdict\r\nimport networkx as nx\r\n\r\n#insert paper into db\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.INFO)\r\n\r\n# 定义handler的输出格式\r\n#logger to console\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.INFO)\r\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\r\n#fh.setFormatter(formatter)\r\nch.setFormatter(formatter)\r\nlogger.addHandler(ch)\r\n#logger.addHandler(fh)Hs_Hr\r\ndir = \"D:\\\\zico's conference & presentation\\\\201806BOSTON\\\\ms\\\\data\\\\\"\r\n\r\nsrc_file = dir+\"Hs_Hr.txt\"\r\n\r\nsrc_weibo_file = dir+\"Weibo_RT_2.txt\"\r\n\r\nfig_file = dir+\"output\\\\Hr_level_size_al.png\"\r\ncolors= ['black','bisque','lightgreen','slategrey','lightcoral','gold',\r\n 'c','cornflowerblue','blueviolet','tomato','olivedrab',\r\n 'lightsalmon','sage','lightskyblue','orchid','hotpink',\r\n 'silver', 'slategray', 'indigo', 'darkgoldenrod','orange']\r\n\r\nplt.rcParams['figure.figsize'] = (10, 8)\r\n\r\nf = open(src_file,encoding='UTF-8', mode='r',errors='ignore')\r\nline= f.readline()\r\n\r\nx = []\r\ny = []\r\n\r\nnum_line_count=0\r\n\r\nuser_hr= defaultdict(str)\r\nhr_index= defaultdict(int)\r\nnum_hr=defaultdict(int)\r\ntmp_x1 = []\r\ntime_start=time.time()\r\n\r\nwhile line:\r\n# logger.info(tmp_src_file)\r\n num_line_count+=1\r\n if(num_line_count==1):\r\n line =f.readline()\r\n continue\r\n words = line.replace('\\n', '').split('\\t')\r\n# Hs = int(words[2].strip())\r\n Hr = int(words[1].strip())\r\n id= words[0].strip()\r\n user_hr[id] = Hr\r\n# sum_hr[Hr]+=Hs\r\n if(hr_index.get(Hr) is None):\r\n hr_index[Hr]=Hr\r\n\r\n if(num_line_count%100000==0):\r\n logger.info(num_line_count)\r\n\r\n num_hr[Hr]+=1\r\n# x.append(Hs)\r\n# y.append(Hr)\r\n\r\n line = f.readline()\r\nf.close()\r\n\r\ntime_end=time.time()\r\nlogger.info(\"Read H_R Data Done! Time Cost:%d s.\", time_end-time_start)\r\n\r\nx=[]\r\n#print(hr_index)\r\n#for xi in hr_index:\r\n# x.append(xi)\r\n#print(x)\r\n#exit()\r\ndefault_motif_size =5 -1\r\ndefault_popular_size =500\r\nerror_count = 0\r\nnum_line_count=0\r\n\r\nf_weibo = open(src_weibo_file,encoding='UTF-8', mode='r',errors='ignore')\r\nline =f_weibo.readline()\r\n\r\nnum_hr_cascade_size= defaultdict(int)\r\nnum_hr_level= defaultdict(int)\r\nnum_hr_count=defaultdict(int)\r\n\r\nwhile line:\r\n retweets=line.replace('\\n','').split(';')\r\n num_line_count+=1\r\n #logger.warn(len(retweets))\r\n# if(len(retweets)= default_motif_size):\r\n# break\r\n g.add_edge(id1,id2)\r\n# graph.add_one_edge(g, id1, id2)\r\n tmp_tweet_num_count+=1\r\n\r\n if(chk_t0==chk_t1 and chk_id0!=chk_id1):\r\n # logger.info(\"%s,%s\",tmp_retweets[0],tmp_retweets[1])\r\n error_count+=1\r\n# elif(tmp_tweet_num_count>=default_motif_size):\r\n else:\r\n for node in g.nodes():\r\n if node!=chk_id0: #not root\r\n# dist= nx.shortest_path_length(g,node,chk_id0)\r\n level = nx.shortest_path_length(g, chk_id0, node)\r\n hr=user_hr[node]\r\n num_hr_level[hr]+=level\r\n num_hr_count[hr]+=1\r\n num_hr_cascade_size[hr]+=len(tmp_retweets)\r\n# logger.info(\"%s -> %s: %d\",chk_id0,node,dist)\r\n line=f_weibo.readline()\r\n if(num_line_count%10000==0):\r\n time_end = time.time()\r\n logger.info(\"reading lines:%d,Time Cost:%d s\",num_line_count,time_end - time_start)\r\nlogger.error(\"error lines:%d/%d\",error_count,num_line_count)\r\ntime_end=time.time()\r\nlogger.info(\"Read Weibo Data Done! Time Cost:%d s.\", time_end-time_start)\r\nf_weibo.close()\r\n\r\n#tmp_x1 = sorted(tmp_x1, key=lambda x2: x2[0])\r\n\r\n\r\n\r\nx=[]\r\ny1=[]\r\ny2=[]\r\n\r\nfor xi in hr_index:\r\n x.append(xi)\r\nx = sorted(x)\r\nfor xi in x:\r\n y1.append(num_hr_level[xi]*1.0/num_hr_count[xi])\r\n y2.append(num_hr_cascade_size[xi] * 1.0 / num_hr_count[xi])\r\n\r\n\r\nfig = plt.figure()\r\n\r\nax1 = fig.add_subplot(111)\r\nax1.plot(x, y1,'o-',label=('Layer vs. $H_R$'),color=colors[0], linewidth='5')\r\nax1.set_ylabel('Layer',size ='30')\r\nax1.set_title(\"Cascade Size and Layer vs. $H_R$\")\r\nax1.set_xlabel('$H_R$',size ='30')\r\nplt.legend(loc=1)\r\n\r\nax2 = ax1.twinx() # this is the important function\r\nax2.plot(x, y2,'o-',label=('Cascade Size vs. $H_R$'),color=colors[2], linewidth='5')\r\nax2.set_ylabel('Cascade Size',size ='30')\r\nplt.legend(loc=1)\r\n\r\n\r\n#plt.gca().xaxis.set_major_locator(plt.NullLocator())$$\r\n#plt.gca().yaxis.set_major_locator(plt.NullLocator())\r\n#plt.subplots_adjust(top = 2, bottom = 2, right = 2, left = 20, hspace = 2, wspace = 2)\r\n#plt.margins(0,0)\r\n\r\nplt.savefig(fig_file,dpi=400,bbox_inches='tight')\r\n#plt.draw()\r\nplt.show()\r\n","sub_path":"cn/edu/hznu/initialreaction/PlotHsHrCascadeRelation.py","file_name":"PlotHsHrCascadeRelation.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"458325627","text":"#Structure from SA Lecture\nimport sys,re,random,math\nsys.dont_write_bytecode = True\n \nfrom options import *\nfrom utils import *\nfrom sk import *\n\nmyOpt = Options()\n\nclass Analyzer:\n n = 50\n old = [1 for i in range (0, n)]\n new = [1 for i in range (0, n)]\n era_lives = myOpt.era_lives;\n\n def bettered(self, new, old):\n\n def quartiles(value):\n return value*.25, value*.5, value*.75\n \n def betterifless():\n p1, median1, p3 = quartiles(new)\n IQR1=p3-p1\n p1, median2, p3 = quartiles(old)\n IQR2=p3-p1\n return median1 key:\n return self.limits[key] + self.answer(i - (key + 1))\n return i * 'I'\n","sub_path":"RomanNumerals.py","file_name":"RomanNumerals.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"86107586","text":"# -*- coding: utf-8 -*-\n\nimport random\n\ndef typoglycemia(a: str) -> str:\n return ' '.join(map(shuffle, s.split(' ')))\n\ndef shuffle(s: str) -> str:\n if len(s) <= 4:\n return s\n else:\n f = s[0]\n l = s[-1]\n m = s[1:-1]\n m = ''.join(random.sample(m, len(m)))\n return f + m + l\n\nif __name__ == '__main__':\n s = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\n print(s)\n s = typoglycemia(s)\n print(s)\n","sub_path":"src/sec01/q09.py","file_name":"q09.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"314005913","text":"import numpy as np\n\ndef main():\n # nw[[count, weight_max]]\n nw = list(map(int, input().split(' ')))\n # num[[weight, value]]\n num = [list(map(int, input().split(' '))) for i in range(nw[0])]\n num = np.array(num)\n # dp[[i_until, weight_level]]\n dp = np.zeros([nw[0]+1, nw[1]+1], dtype=int)\n\n for i in range(nw[0]):\n for w in range(nw[1]+1):\n if num[i,0] <= w:\n dp[i+1,w] = max(dp[i, w-num[i,0]]+num[i,1], dp[i,w])\n else:\n dp[i+1,w] = dp[i,w]\n print(dp)\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/dp/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"311117875","text":"##################################################################################################\n# In this section, we set the user authentication, user and app ID, model details, and the URL\n# of the image we want as an input. Change these strings to run your own example.\n#################################################################################################\n\n# Your PAT (Personal Access Token) can be found in the portal under Authentification\nPAT = 'YOUR_PAT_HERE'\n# Specify the correct user_id/app_id pairings\n# Since you're making inferences outside your app's scope\nUSER_ID = 'clarifai'\nAPP_ID = 'main'\n# Change these to whatever model and image URL you want to use\nMODEL_ID = 'general-image-recognition'\nMODEL_VERSION_ID = 'aa7f35c01e0642fda5cf400f543e7c40'\nIMAGE_URL = 'https://samples.clarifai.com/metro-north.jpg'\n\n############################################################################\n# YOU DO NOT NEED TO CHANGE ANYTHING BELOW THIS LINE TO RUN THIS EXAMPLE\n############################################################################\n\nfrom clarifai_grpc.channel.clarifai_channel import ClarifaiChannel\nfrom clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc\nfrom clarifai_grpc.grpc.api.status import status_code_pb2\n\nchannel = ClarifaiChannel.get_grpc_channel()\nstub = service_pb2_grpc.V2Stub(channel)\n\nmetadata = (('authorization', 'Key ' + PAT),)\n\nuserDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)\n\npost_model_outputs_response = stub.PostModelOutputs(\n service_pb2.PostModelOutputsRequest(\n user_app_id=userDataObject, # The userDataObject is created in the overview and is required when using a PAT\n model_id=MODEL_ID,\n version_id=MODEL_VERSION_ID, # This is optional. Defaults to the latest model version\n inputs=[\n resources_pb2.Input(\n data=resources_pb2.Data(\n image=resources_pb2.Image(\n url=IMAGE_URL\n )\n )\n )\n ]\n ),\n metadata=metadata\n)\nif post_model_outputs_response.status.code != status_code_pb2.SUCCESS:\n print(post_model_outputs_response.status)\n raise Exception(\"Post model outputs failed, status: \" + post_model_outputs_response.status.description)\n\n# Since we have one input, one output will exist here\noutput = post_model_outputs_response.outputs[0]\n\nprint(\"Predicted concepts:\")\nfor concept in output.data.concepts:\n print(\"%s %.2f\" % (concept.name, concept.value))\n\n# Uncomment this line to print the full Response JSON\n#print(output)","sub_path":"code_snippets/api-guide/predict/python/images_via_url.py","file_name":"images_via_url.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"104347762","text":"\"\"\"\nDjango settings for testprodapp project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\nimport os\nimport random\nimport django\nimport djangae.environment\nfrom djangae.settings_base import * # noqa\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/\n\nif djangae.environment.is_production_environment():\n DEBUG = False\n SECRET_KEY = ''.join([\n random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789')\n for i in range(50)\n ])\n ALLOWED_HOSTS = ['.appspot.com']\n CSRF_COOKIE_SECURE = True\n SESSION_COOKIE_SECURE = True\nelse:\n DEBUG = True\n SECRET_KEY = '&x$ts1u)tx#5zsi84555$(@mydbz06&q23p8=c6fs1!d4%1a^u'\n\n# Application definition\n\nINSTALLED_APPS = (\n 'djangae',\n 'django.contrib.admin',\n 'djangae.contrib.gauth_datastore',\n 'djangae.contrib.security',\n 'django.contrib.contenttypes',\n 'djangae.contrib.contenttypes',\n 'django.contrib.sessions',\n 'testapp',\n)\n\nMIDDLEWARE = (\n 'djangae.contrib.security.middleware.AppEngineSecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'djangae.contrib.gauth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'session_csrf.CsrfMiddleware',\n)\n\nif tuple(django.VERSION[:2]) < (1, 10):\n MIDDLEWARE_CLASSES = MIDDLEWARE\n\n\nROOT_URLCONF = 'testapp.urls'\nSITE_ID = 1\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'djangae.db.backends.appengine',\n },\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/dev/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/dev/howto/static-files/\n\nSTATIC_ROOT = BASE_DIR + 'static'\nSTATIC_URL = '/static/'\n\nAUTH_USER_MODEL = 'gauth_datastore.GaeDatastoreUser'\nAUTHENTICATION_BACKENDS = (\n 'djangae.contrib.gauth_datastore.backends.AppEngineUserAPIBackend',\n)\n\n# Here because of \"You haven't defined a TEMPLATES setting\" deprecation message\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n 'templates',\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth'\n ],\n 'debug': DEBUG,\n },\n },\n]\n\nfrom djangae.contrib.gauth.settings import * # noqa\n","sub_path":"testprodapp/testapp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"582272636","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport capnp\n\nthis_dir = os.path.dirname(__file__)\nout_path = os.path.join(this_dir, 'addressbook.bin')\naddressbook = capnp.load(os.path.join(this_dir, 'addressbook.capnp'))\n\ndef writeAddressBook(file):\n addresses = addressbook.AddressBook.newMessage()\n people = addresses.init('people', 2)\n\n alice = people[0]\n alice.id = 123\n alice.name = 'Alice'\n alice.email = 'alice@example.com'\n alicePhones = alice.init('phones', 1)\n alicePhones[0].number = \"555-1212\"\n alicePhones[0].type = 'mobile'\n alice.employment.school = \"MIT\"\n\n bob = people[1]\n bob.id = 456\n bob.name = 'Bob'\n bob.email = 'bob@example.com'\n bobPhones = bob.init('phones', 2)\n bobPhones[0].number = \"555-4567\"\n bobPhones[0].type = 'home'\n bobPhones[1].number = \"555-7654\"\n bobPhones[1].type = 'work'\n bob.employment.unemployed = None\n\n addresses.writeTo(file)\n\n\nif __name__ == '__main__':\n f = open(out_path, 'w')\n writeAddressBook(f)\n","sub_path":"examples/create_test_data.py","file_name":"create_test_data.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"154671668","text":"#***************************************************************************\n#* *\n#* Copyright (c) 2012 Sebastian Hoogen *\n#* *\n#* This program is free software; you can redistribute it and/or modify *\n#* it under the terms of the GNU Lesser General Public License (LGPL) *\n#* as published by the Free Software Foundation; either version 2 of *\n#* the License, or (at your option) any later version. *\n#* for detail see the LICENCE text file. *\n#* *\n#* This program is distributed in the hope that it will be useful, *\n#* but WITHOUT ANY WARRANTY; without even the implied warranty of *\n#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\n#* GNU Library General Public License for more details. *\n#* *\n#* You should have received a copy of the GNU Library General Public *\n#* License along with this program; if not, write to the Free Software *\n#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *\n#* USA *\n#* *\n#***************************************************************************\n\n__title__=\"FreeCAD OpenSCAD Workbench - Utility Fuctions\"\n__author__ = \"Sebastian Hoogen\"\n__url__ = [\"http://www.freecadweb.org\"]\n\n'''\nThis Script includes various pyhton helper functions that are shared across\nthe module\n'''\n\ndef translate(context,text):\n \"convenience function for Qt translator\"\n from PySide import QtGui\n return QtGui.QApplication.translate(context, text, None, \\\n QtGui.QApplication.UnicodeUTF8)\n\nclass OpenSCADError(Exception):\n def __init__(self,value):\n self.value= value\n #def __repr__(self):\n # return self.msg\n def __str__(self):\n return repr(self.value)\n\ndef searchforopenscadexe():\n import os,sys,subprocess\n if sys.platform == 'win32':\n testpaths = [os.path.join(os.environ.get('Programfiles(x86)','C:'),\\\n 'OpenSCAD\\\\openscad.exe')]\n if 'ProgramW6432' in os.environ:\n testpaths.append(os.path.join(os.environ.get('ProgramW6432','C:')\\\n ,'OpenSCAD\\\\openscad.exe'))\n for testpath in testpaths:\n if os.path.isfile(testpath):\n return testpath\n elif sys.platform == 'darwin':\n #test the default path\n testpath=\"/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD\"\n if os.path.isfile(testpath):\n return testpath\n else: #unix\n p1=subprocess.Popen(['which','openscad'],stdout=subprocess.PIPE)\n if p1.wait() == 0:\n opath=p1.stdout.read().split('\\n')[0]\n return opath\n\ndef workaroundforissue128needed():\n '''sets the import path depending on the OpenSCAD Verion\n for versions <= 2012.06.23 to the current working dir\n for versions above to the inputfile dir\n see https://github.com/openscad/openscad/issues/128'''\n vdate=getopenscadversion().split(' ')[2].split('.')\n year,mon=int(vdate[0]),int(vdate[1])\n return (year<2012 or (year==2012 and (mon <6 or (mon == 6 and \\\n (len(vdate)<3 or int(vdate[2]) <=23)))))\n #ifdate=int(vdate[0])+(int(vdate[1])-1)/12.0\n #if len(vdate)>2:\n # fdate+=int((vdate[2])-1)/12.0/31.0\n #return fdate < 2012.4759\n\ndef getopenscadversion(osfilename=None):\n import os,subprocess,time\n if not osfilename:\n import FreeCAD\n osfilename = FreeCAD.ParamGet(\\\n \"User parameter:BaseApp/Preferences/Mod/OpenSCAD\").\\\n GetString('openscadexecutable')\n if osfilename and os.path.isfile(osfilename):\n p=subprocess.Popen([osfilename,'-v'],\\\n stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)\n p.wait()\n stdout=p.stdout.read().strip()\n stderr=p.stderr.read().strip()\n return (stdout or stderr)\n\ndef newtempfilename():\n import os,time\n formatstr='fc-%05d-%06d-%06d'\n count = 0\n while True:\n count+=1\n yield formatstr % (os.getpid(),int(time.time()*100) % 1000000,count)\n\ntempfilenamegen=newtempfilename()\n\ndef callopenscad(inputfilename,outputfilename=None,outputext='csg',keepname=False):\n '''call the open scad binary\n returns the filename of the result (or None),\n please delete the file afterwards'''\n import FreeCAD,os,subprocess,tempfile,time\n def check_output2(*args,**kwargs):\n kwargs.update({'stdout':subprocess.PIPE,'stderr':subprocess.PIPE})\n p=subprocess.Popen(*args,**kwargs)\n stdoutd,stderrd = p.communicate()\n if p.returncode != 0:\n raise OpenSCADError('%s %s\\n' % (stdoutd.strip(),stderrd.strip()))\n #raise Exception,'stdout %s\\n stderr%s' %(stdoutd,stderrd)\n if stderrd.strip():\n FreeCAD.Console.PrintWarning(stderrd+u'\\n')\n if stdoutd.strip():\n FreeCAD.Console.PrintMessage(stdoutd+u'\\n')\n return stdoutd\n\n osfilename = FreeCAD.ParamGet(\\\n \"User parameter:BaseApp/Preferences/Mod/OpenSCAD\").\\\n GetString('openscadexecutable')\n if osfilename and os.path.isfile(osfilename):\n if not outputfilename:\n dir1=tempfile.gettempdir()\n if keepname:\n outputfilename=os.path.join(dir1,'%s.%s' % (os.path.split(\\\n inputfilename)[1].rsplit('.',1)[0],outputext))\n else:\n outputfilename=os.path.join(dir1,'%s.%s' % \\\n (tempfilenamegen.next(),outputext))\n check_output2([osfilename,'-o',outputfilename, inputfilename])\n return outputfilename\n else:\n raise OpenSCADError('OpenSCAD executeable unavailable')\n\ndef callopenscadstring(scadstr,outputext='csg'):\n '''create a tempfile and call the open scad binary\n returns the filename of the result (or None),\n please delete the file afterwards'''\n import os,tempfile,time\n dir1=tempfile.gettempdir()\n inputfilename=os.path.join(dir1,'%s.scad' % tempfilenamegen.next())\n inputfile = open(inputfilename,'w')\n inputfile.write(scadstr)\n inputfile.close()\n outputfilename = callopenscad(inputfilename,outputext=outputext,\\\n keepname=True)\n os.unlink(inputfilename)\n return outputfilename\n\ndef reverseimporttypes():\n '''allows to search for supported filetypes by module'''\n\n def getsetfromdict(dict1,index):\n if index in dict1:\n return dict1[index]\n else:\n set1=set()\n dict1[index]=set1\n return set1\n\n importtypes={}\n import FreeCAD\n for key,value in FreeCAD.getImportType().iteritems():\n if type(value) is str:\n getsetfromdict(importtypes,value).add(key)\n else:\n for vitem in value:\n getsetfromdict(importtypes,vitem).add(key)\n return importtypes\n\n\ndef fcsubmatrix(m):\n \"\"\"Extracts the 3x3 Submatrix from a freecad Matrix Object\n as a list of row vectors\"\"\"\n return [[m.A11,m.A12,m.A13],[m.A21,m.A22,m.A23],[m.A31,m.A32,m.A33]]\n\ndef multiplymat(l,r):\n \"\"\"multiply matrices given as lists of row vectors\"\"\"\n rt=zip(*r) #transpose r\n mat=[]\n for y in range(len(rt)):\n mline=[]\n for x in range(len(l)):\n mline.append(sum([le*re for le,re in zip(l[y],rt[x])]))\n mat.append(mline)\n return mat\n\ndef isorthogonal(submatrix,precision=4):\n \"\"\"checking if 3x3 Matrix is ortogonal (M*Transp(M)==I)\"\"\"\n prod=multiplymat(submatrix,zip(*submatrix))\n return [[round(f,precision) for f in line] \\\n for line in prod]==[[1,0,0],[0,1,0],[0,0,1]]\n\ndef detsubmatrix(s):\n \"\"\"get the determinant of a 3x3 Matrix given as list of row vectors\"\"\"\n return s[0][0]*s[1][1]*s[2][2]+s[0][1]*s[1][2]*s[2][0]+\\\n s[0][2]*s[1][0]*s[2][1]-s[2][0]*s[1][1]*s[0][2]-\\\n s[2][1]*s[1][2]*s[0][0]-s[2][2]*s[1][0]*s[0][1]\n\ndef isspecialorthogonalpython(submat,precision=4):\n return isorthogonal(submat,precision) and round(detsubmatrix(submat),precision)==1\n\ndef isrotoinversionpython(submat,precision=4):\n return isorthogonal(submat,precision) and round(detsubmatrix(submat),precision)==-1\n\ndef isspecialorthogonal(mat,precision=4):\n return abs(mat.submatrix(3).isOrthogonal(10**(-precision))-1.0) < \\\n 10**(-precision) and \\\n abs(mat.submatrix(3).determinant()-1.0) < 10**(-precision)\n\ndef decomposerotoinversion(m,precision=4):\n import FreeCAD\n rmat = [[round(f,precision) for f in line] for line in fcsubmatrix(m)]\n cmat = FreeCAD.Matrix()\n if rmat ==[[-1,0,0],[0,1,0],[0,0,1]]:\n cmat.scale(-1,1,1)\n return m*cmat,FreeCAD.Vector(1)\n elif rmat ==[[1,0,0],[0,-1,0],[0,0,1]]:\n cmat.scale(1,-1,1)\n return m*cmat, FreeCAD.Vector(0,1)\n elif rmat ==[[1,0,0],[0,1,0],[0,0,-1]]:\n cmat.scale(1,1,-1)\n return m*cmat, FreeCAD.Vector(0,0,1)\n else:\n cmat.scale(1,1,-1)\n return m*cmat, FreeCAD.Vector(0,0,1)\n\ndef mirror2mat(nv,bv):\n import FreeCAD\n \"\"\"calculate the transformation matrix of a mirror feature\"\"\"\n mbef=FreeCAD.Matrix()\n mbef.move(bv * -1)\n maft=FreeCAD.Matrix()\n maft.move(bv)\n return maft*vec2householder(nv)*mbef\n\ndef vec2householder(nv):\n \"\"\"calculated the householder matrix for a given normal vector\"\"\"\n import FreeCAD\n lnv=nv.dot(nv)\n l=2/lnv if lnv > 0 else 0\n hh=FreeCAD.Matrix(nv.x*nv.x*l,nv.x*nv.y*l,nv.x*nv.z*l,0,\\\n nv.y*nv.x*l,nv.y*nv.y*l,nv.y*nv.z*l,0,\\\n nv.z*nv.x*l,nv.z*nv.y*l,nv.z*nv.z*l,0,0,0,0,0)\n return FreeCAD.Matrix()-hh\n\n\ndef angneg(d):\n return d if (d <= 180.0) else (d-360)\n\ndef shorthexfloat(f):\n s=f.hex()\n mantisse, exponent = f.hex().split('p',1)\n return '%sp%s' % (mantisse.rstrip('0'),exponent)\n\n\ndef comparerotations(r1,r2):\n import FreeCAD\n '''compares two rotations\n a value of zero means that they are identical'''\n r2c=FreeCAD.Rotation(r2)\n r2c.invert()\n return r1.multiply(r2c).Angle\n\ndef findbestmatchingrotation(r1):\n import FreeCAD\n vangl = \\\n(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 11.25, 12.0, 13.0,\n14.0, 15.0, 16.0, (180.0/11.0), 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 22.5,\n23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, (360.0/11.0),\n33.0, 33.75, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0,\n44.0, 45.0, 46.0, 47.0, 48.0, 49.0,(540.0/11.0), 50.0, 51.0, (360.0/7.0),\n52.0, 53.0, 54.0, 55.0, 56.0, 56.25, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0,\n63.0, 64.0, 65.0,(720.0/11.0), 66.0, 67.0, 67.5, 68.0, 69.0, 70.0, 71.0,\n72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 78.75, 79.0, 80.0, 81.0,(900.0/11.0),\n82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0,\n95.0, 96.0, 97.0, 98.0,(1080.0/11.0), 99.0, 100.0, 101.0, 101.25, 102.0,\n(720.0/7.0), 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0,\n112.0, 112.5, 113.0, 114.0, (1260.0/11), 115.0, 116.0, 117.0, 118.0, 119.0,\n120.0, 121.0, 122.0, 123.0, 123.75, 124.0, 125.0, 126.0, 127.0, 128.0,\n 129.0, 130.0,(1440.0/11.0), 131.0, 132.0, 133.0, 134.0, 135.0, 136.0,\n137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 146.25,\n147.0, (1620.0/11.0), 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0,\n(1080.0/7.0), 155.0, 156.0, 157.0, 157.5, 158.0, 159.0, 160.0, 161.0, 162.0,\n163.0, (1800.0/11.0), 164.0, 165.0, 166.0, 167.0, 168.0, 168.75, 169.0, 170.0,\n171.0, 172.0, 173.0, 174.0, 175.0, 176.0, 177.0,178.0, 179.0,180.0,\n-179.0, -178.0, -177.0, -176.0, -175.0, -174.0, -173.0, -172.0, -171.0, -170.0,\n-169.0, -168.75, -168.0, -167.0, -166.0, -165.0, -164.0, (-1800.0/11.0),\n-163.0, -162.0, -161.0, -160.0, -159.0, -158.0, -157.5, -157.0, -156.0,\n-155.0, (-1080.0/7.0), -154.0, -153.0, -152.0, -151.0, -150.0, -149.0, -148.0,\n(-1620.0/11.0), -147.0, -146.25, -146.0, -145.0, -144.0, -143.0, -142.0,\n-141.0, -140.0, -139.0,-138.0, -137.0, -136.0, -135.0, -134.0, -133.0, -132.0,\n -131.0, (-1440/11.0), -130.0, -129.0, -128.0,-127.0, -126.0, -125.0, -124.0,\n -123.75, -123.0, -122.0, -121.0, -120.0, -119.0, -118.0, -117.0, -116.0,\n-115.0,(-1260.0/11.0), -114.0, -113.0, -112.5, -112.0, -111.0, -110.0, -109.0,\n-108.0, -107.0, -106.0, -105.0,-104.0, -103.0,(-720.0/7.0), -102.0, -101.25,\n-101.0, -100.0, -99.0, (-1080.0/11.0), -98.0, -97.0, -96.0, -95.0, -94.0,\n-93.0, -92.0, -91.0, -90.0, -89.0, -88.0, -87.0, -86.0, -85.0, -84.0, -83.0,\n-82.0,(-900.0/11.0), -81.0, -80.0, -79.0, -78.75, -78.0, -77.0, -76.0, -75.0,\n-74.0, -73.0, -72.0, -71.0, -70.0, -69.0, -68.0, -67.5, -67.0, -66.0,\n(-720.0/11.0), -65.0, -64.0, -63.0, -62.0, -61.0, -60.0, -59.0, -58.0, -57.0,\n-56.25, -56.0, -55.0, -54.0, -53.0, -52.0,(-360.0/7.0), -51.0, -50.0,\n(-540.0/11.0), -49.0, -48.0, -47.0, -46.0, -45.0, -44.0, -43.0, -42.0, -41.0,\n-40.0, -39.0, -38.0, -37.0, -36.0, -35.0, -34.0, -33.75, -33.0,(-360.0/11.0),\n-32.0, -31.0, -30.0, -29.0, -28.0, -27.0, -26.0, -25.0, -24.0, -23.0, -22.5,\n-22.0, -21.0, -20.0, -19.0, -18.0, -17.0,(-180.0/11.0), -16.0, -15.0, -14.0,\n-13.0, -12.0, -11.25, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,\n-2.0, -1.0)\n def tup2nvect(tup):\n \"\"\"convert a tuple to a normalized vector\"\"\"\n v=FreeCAD.Vector(*tup)\n v.normalize()\n return v\n\n def wkaxes():\n \"\"\"well known axes for rotations\"\"\"\n vtupl=((1,0,0),(0,1,0),(0,0,1),\n (1,1,0),(1,0,1),(0,1,1),(-1,1,0),(-1,0,1),(0,1,-1),\n (1,1,1),(1,1,-1),(1,-1,1),(-1,1,1))\n return tuple(tup2nvect(tup) for tup in vtupl)\n\n bestrot=FreeCAD.Rotation()\n dangle = comparerotations(r1,bestrot)\n for axis in wkaxes():\n for angle in vangl:\n for axissign in (1.0,-1.0):\n r2=FreeCAD.Rotation(axis*axissign,angle)\n dangletest = comparerotations(r1,r2)\n if dangletest < dangle:\n bestrot = r2\n dangle = dangletest\n return (bestrot,dangle)\n\ndef roundrotation(rot,maxangulardistance=1e-5):\n '''guess the rotation axis and angle for a rotation\n recreated from rounded floating point values\n (from a quaterion or transformation matrix)'''\n def teststandardrot(r1,maxangulardistance=1e-5):\n '''test a few common rotations beforehand'''\n import FreeCAD,itertools\n eulers = []\n for angle in (90,-90,180,45,-45,135,-135):\n for euler in itertools.permutations((0,0,angle)):\n eulers.append(euler)\n for euler in itertools.product((0,45,90,135,180,-45,-90,-135),repeat=3):\n eulers.append(euler)\n for euler in eulers:\n r2 = FreeCAD.Rotation(*euler)\n if comparerotations(r1,r2) < maxangulardistance:\n return r2\n\n if rot.isNull():\n return rot\n firstguess = teststandardrot(rot,maxangulardistance)\n if firstguess is not None:\n return firstguess\n #brute force\n bestguess,angulardistance = findbestmatchingrotation(rot)\n if angulardistance < maxangulardistance: #use guess\n return bestguess\n else: #use original\n return rot\n\ndef callopenscadmeshstring(scadstr):\n \"\"\"Call OpenSCAD and return the result as a Mesh\"\"\"\n import Mesh,os\n tmpfilename=callopenscadstring(scadstr,'stl')\n newmesh=Mesh.Mesh()\n newmesh.read(tmpfilename)\n try:\n os.unlink(tmpfilename)\n except OSError:\n pass\n return newmesh\n\ndef meshopinline(opname,iterable1):\n \"\"\"uses OpenSCAD to combine meshes\n takes the name of the CGAL operation and an iterable (tuple,list) of \n FreeCAD Mesh objects\n includes all the mesh data in the SCAD file\n \"\"\"\n from exportCSG import mesh2polyhedron\n return callopenscadmeshstring('%s(){%s}' % (opname,' '.join(\\\n (mesh2polyhedron(meshobj) for meshobj in iterable1))))\n\ndef meshoptempfile(opname,iterable1):\n \"\"\"uses OpenSCAD to combine meshes\n takes the name of the CGAL operation and an iterable (tuple,list) of \n FreeCAD Mesh objects\n uses stl files to supply the mesh data\n \"\"\"\n import os,tempfile\n dir1=tempfile.gettempdir()\n filenames = []\n for mesh in iterable1:\n outputfilename=os.path.join(dir1,'%s.stl' % tempfilenamegen.next())\n mesh.write(outputfilename)\n filenames.append(outputfilename)\n #absolute path causes error. We rely that the scad file will be in the dame tmpdir\n meshimports = ' '.join(\"import(file = \\\"%s\\\");\" % \\\n #filename \\\n os.path.split(filename)[1] for filename in filenames)\n result = callopenscadmeshstring('%s(){%s}' % (opname,meshimports))\n for filename in filenames:\n try:\n os.unlink(filename)\n except OSError:\n pass\n return result\n\ndef meshoponobjs(opname,inobjs):\n \"\"\"\n takes a string (operation name) and a list of Feature Objects\n returns a mesh and a list of objects that were used\n Part Objects will be meshed\n \"\"\"\n objs=[]\n meshes=[]\n for obj in inobjs:\n if obj.isDerivedFrom('Mesh::Feature'):\n objs.append(obj)\n meshes.append(obj.Mesh)\n elif obj.isDerivedFrom('Part::Feature'):\n #mesh the shape\n import FreeCAD\n params = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/OpenSCAD\")\n objs.append(obj)\n if False: # disabled due to issue 1292\n import MeshPart\n meshes.append(MeshPart.meshFromShape(obj.Shape,params.GetFloat(\\\n 'meshmaxlength',1.0), params.GetFloat('meshmaxarea',0.0),\\\n params.GetFloat('meshlocallen',0.0),\\\n params.GetFloat('meshdeflection',0.0)))\n else:\n import Mesh\n meshes.append(Mesh.Mesh(obj.Shape.tessellate(params.GetFloat(\\\n 'meshmaxlength',1.0))))\n else:\n pass #neither a mesh nor a part\n if len(objs) > 0:\n return (meshoptempfile(opname,meshes),objs)\n else:\n return (None,[])\n\ndef process2D_ObjectsViaOpenSCADShape(ObjList,Operation,doc):\n import FreeCAD,importDXF\n import os,tempfile\n dir1=tempfile.gettempdir()\n filenames = []\n for item in ObjList :\n outputfilename=os.path.join(dir1,'%s.dxf' % tempfilenamegen.next())\n importDXF.export([item],outputfilename,True,True)\n filenames.append(outputfilename)\n dxfimports = ' '.join(\"import(file = \\\"%s\\\");\" % \\\n #filename \\\n os.path.split(filename)[1] for filename in filenames)\n tmpfilename = callopenscadstring('%s(){%s}' % (Operation,dxfimports),'dxf')\n from OpenSCAD2Dgeom import importDXFface\n # TBD: assure the given doc is active\n face = importDXFface(tmpfilename,None,None)\n #clean up\n filenames.append(tmpfilename) #delete the ouptut file as well\n try:\n os.unlink(tmpfilename)\n except OSError:\n pass\n return face\n\ndef process2D_ObjectsViaOpenSCAD(ObjList,Operation,doc=None):\n import FreeCAD\n doc = doc or FreeCAD.activeDocument()\n face=process2D_ObjectsViaOpenSCADShape(ObjList,Operation,doc)\n obj=doc.addObject('Part::Feature',Operation)\n obj.Shape=face\n # Hide Children\n if FreeCAD.GuiUp:\n for index in ObjList :\n index.ViewObject.hide()\n return(obj)\n\ndef process3D_ObjectsViaOpenSCADShape(ObjList,Operation,maxmeshpoints=None):\n import FreeCAD,Mesh,Part\n params = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/OpenSCAD\")\n if False: # disabled due to issue 1292\n import MeshPart\n meshes = [MeshPart.meshFromShape(obj.Shape,params.GetFloat(\\\n 'meshmaxlength',1.0), params.GetFloat('meshmaxarea',0.0),\\\n params.GetFloat('meshlocallen',0.0),\\\n params.GetFloat('meshdeflection',0.0)) for obj in ObjList]\n else:\n meshes = [Mesh.Mesh(obj.Shape.tessellate(params.GetFloat(\\\n 'meshmaxlength',1.0))) for obj in ObjList]\n if max(mesh.CountPoints for mesh in meshes) < \\\n (maxmeshpoints or params.GetInt('tempmeshmaxpoints',5000)):\n stlmesh = meshoptempfile(Operation,meshes)\n sh=Part.Shape()\n sh.makeShapeFromMesh(stlmesh.Topology,0.1)\n solid = Part.Solid(sh)\n solid=solid.removeSplitter()\n if solid.Volume < 0:\n solid.complement()\n return solid\n\ndef process3D_ObjectsViaOpenSCAD(doc,ObjList,Operation):\n solid = process3D_ObjectsViaOpenSCADShape(ObjList,Operation)\n if solid is not None:\n obj=doc.addObject('Part::Feature',Operation) #non parametric objec\n obj.Shape=solid#.removeSplitter()\n if FreeCAD.GuiUp:\n for index in ObjList :\n index.ViewObject.hide()\n return(obj)\n\ndef process_ObjectsViaOpenSCADShape(doc,children,name,maxmeshpoints=None):\n if all((not obj.Shape.isNull() and obj.Shape.Volume == 0) \\\n for obj in children):\n return process2D_ObjectsViaOpenSCADShape(children,name,doc)\n elif all((not obj.Shape.isNull() and obj.Shape.Volume > 0) \\\n for obj in children):\n return process3D_ObjectsViaOpenSCADShape(children,name,maxmeshpoints)\n else:\n import FreeCAD\n FreeCAD.Console.PrintError( unicode(translate('OpenSCAD',\\\n \"Error all shapes must be either 2D or both must be 3D\"))+u'\\n')\n\ndef process_ObjectsViaOpenSCAD(doc,children,name):\n if all((not obj.Shape.isNull() and obj.Shape.Volume == 0) \\\n for obj in children):\n return process2D_ObjectsViaOpenSCAD(children,name,doc)\n elif all((not obj.Shape.isNull() and obj.Shape.Volume > 0) \\\n for obj in children):\n return process3D_ObjectsViaOpenSCAD(doc,children,name)\n else:\n import FreeCAD\n FreeCAD.Console.PrintError( unicode(translate('OpenSCAD',\\\n \"Error all shapes must be either 2D or both must be 3D\"))+u'\\n')\n\ndef removesubtree(objs):\n def addsubobjs(obj,toremoveset):\n toremove.add(obj)\n for subobj in obj.OutList:\n addsubobjs(subobj,toremoveset)\n\n import FreeCAD\n toremove=set()\n for obj in objs:\n addsubobjs(obj,toremove)\n checkinlistcomplete =False\n while not checkinlistcomplete:\n for obj in toremove:\n if (obj not in objs) and (frozenset(obj.InList) - toremove):\n toremove.remove(obj)\n break\n else:\n checkinlistcomplete = True\n for obj in toremove:\n obj.Document.removeObject(obj.Name)\n\ndef applyPlacement(shape):\n if shape.Placement.isNull():\n return shape\n else:\n import Part\n if shape.ShapeType == 'Solid':\n return Part.Solid(shape.childShapes()[0])\n elif shape.ShapeType == 'Face':\n return Part.Face(shape.childShapes())\n elif shape.ShapeType == 'Compound':\n return Part.Compound(shape.childShapes())\n elif shape.ShapeType == 'Wire':\n return Part.Wire(shape.childShapes())\n elif shape.ShapeType == 'Shell':\n return Part.Shell(shape.childShapes())\n else:\n return Part.Compound([shape])\n","sub_path":"freecad-0.14.3702/src/Mod/OpenSCAD/OpenSCADUtils.py","file_name":"OpenSCADUtils.py","file_ext":"py","file_size_in_byte":23519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"607890851","text":"import tkinter as tk \r\nimport math\r\nfrom tkinter import messagebox\r\n\r\nclass currency_trans(tk.Toplevel):\r\n def __init__(self, master, name):\r\n self.name=name\r\n self.path_customers = \"customers\\\\\"\r\n tk.Toplevel.__init__(self, master)\r\n global file\r\n with open(self.path_customers+self.name, 'r') as file:\r\n lines=file.readlines()\r\n self.title(\"Add funds to currency\")\r\n self.geometry(\"350x250\")\r\n self.balance = tk.StringVar(self, \"Available balance: \"+lines[3])\r\n self.info = tk.Label(self, textvariable=self.balance, fg=\"green\", font = (\"Algerian\", 13))\r\n self.info.pack()\r\n tk.Label(self, text=\"\").pack()\r\n tk.Label(self, text=\"\").pack()\r\n self.funds = tk.Entry(self)\r\n self.funds.pack()\r\n tk.Button(self, text=\"proceed\", command=self.mechanism).pack()\r\n self.info2 = tk.StringVar(self)\r\n tk.Label(self, textvariable=self.info2, fg=\"green\").pack()\r\n\r\n def mechanism(self):\r\n self.amount = float(self.funds.get())\r\n if self.amount < 0:\r\n messagebox.showerror('error', 'cannot proceed negative amount')\r\n else: \r\n with open(self.path_customers+self.name, 'r') as file:\r\n lines=file.readlines()\r\n if float(lines[3]) < self.amount:\r\n messagebox.showerror('error', 'No funds')\r\n else:\r\n with open(self.path_customers+self.name, 'w') as file2:\r\n lines[3]=str(float(lines[3])-self.amount)+'\\n'\r\n lines[4]=str(float(lines[4])+round(self.amount/3.70,2))+'\\n'\r\n for x in lines:\r\n file2.write(x)\r\n self.balance.set('%s' % (float(lines[3]) - self.amount))\r\n self.info2.set(\"Succes, \"+'%s' % (round(self.amount/3.70, 2))+\" usd added\")\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n","sub_path":"currency_transfer.py","file_name":"currency_transfer.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"525384682","text":"#!/usr/bin/python\r\n#\r\n# Copyright (c) 2020 GuopengLin, (@t-glin)\r\n#\r\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n__metaclass__ = type\r\n\r\n\r\nANSIBLE_METADATA = {'metadata_version': '1.1',\r\n 'status': ['preview'],\r\n 'supported_by': 'community'}\r\n\r\n\r\nDOCUMENTATION = '''\r\n---\r\nmodule: azure_rm_bandwidthschedule\nversion_added: '2.9'\nshort_description: Manage Azure BandwidthSchedule instance.\ndescription:\n - 'Create, update and delete instance of Azure BandwidthSchedule.'\noptions:\n device_name:\n description:\n - The device name.\n required: true\n type: str\n name:\n description:\n - The bandwidth schedule name.\n - The bandwidth schedule name which needs to be added/updated.\n required: true\n type: str\n resource_group_name:\n description:\n - The resource group name.\n required: true\n type: str\n start:\n description:\n - The start time of the schedule in UTC.\n type: str\n stop:\n description:\n - The stop time of the schedule in UTC.\n type: str\n rate_in_mbps:\n description:\n - The bandwidth rate in Mbps.\n type: integer\n days:\n description:\n - The days of the week when this schedule is applicable.\n type: list\n state:\n description:\n - Assert the state of the BandwidthSchedule.\n - >-\n Use C(present) to create or update an BandwidthSchedule and C(absent) to\n delete it.\n default: present\n choices:\n - absent\n - present\nextends_documentation_fragment:\n - azure\nauthor:\n - GuopengLin (@t-glin)\n\r\n'''\r\n\r\nEXAMPLES = '''\r\n - name: BandwidthSchedulePut\r\n azure_rm_bandwidthschedule: \r\n name: bandwidth-1\r\n device_name: testedgedevice\r\n resource_group_name: GroupForEdgeAutomation\r\n properties:\r\n days:\r\n - Sunday\r\n - Monday\r\n rate_in_mbps: 100\r\n start: '0:0:0'\r\n stop: '13:59:0'\r\n \r\n\r\n - name: BandwidthScheduleDelete\r\n azure_rm_bandwidthschedule: \r\n name: bandwidth-1\r\n device_name: testedgedevice\r\n resource_group_name: GroupForEdgeAutomation\r\n \r\n\r\n'''\r\n\r\nRETURN = '''\r\nid:\r\n description:\r\n - The path ID that uniquely identifies the object.\r\n returned: always\r\n type: str\r\n sample: null\r\nname:\r\n description:\r\n - The object name.\r\n returned: always\r\n type: str\r\n sample: null\r\ntype:\r\n description:\r\n - The hierarchical type of the object.\r\n returned: always\r\n type: str\r\n sample: null\r\nstart:\r\n description:\r\n - The start time of the schedule in UTC.\r\n returned: always\r\n type: str\r\n sample: null\r\nstop:\r\n description:\r\n - The stop time of the schedule in UTC.\r\n returned: always\r\n type: str\r\n sample: null\r\nrate_in_mbps:\r\n description:\r\n - The bandwidth rate in Mbps.\r\n returned: always\r\n type: integer\r\n sample: null\r\ndays:\r\n description:\r\n - The days of the week when this schedule is applicable.\r\n returned: always\r\n type: list\r\n sample: null\r\n\r\n'''\r\n\r\nimport time\r\nimport json\r\nimport re\r\nfrom ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt\r\nfrom copy import deepcopy\r\ntry:\r\n from msrestazure.azure_exceptions import CloudError\r\n from azure.mgmt.data import DataBoxEdgeManagementClient\r\n from msrestazure.azure_operation import AzureOperationPoller\r\n from msrest.polling import LROPoller\r\nexcept ImportError:\r\n # This is handled in azure_rm_common\r\n pass\r\n\r\n\r\nclass Actions:\r\n NoAction, Create, Update, Delete = range(4)\r\n\r\n\r\nclass AzureRMBandwidthSchedule(AzureRMModuleBaseExt):\r\n def __init__(self):\r\n self.module_arg_spec = dict(\r\n device_name=dict(\r\n type='str',\r\n required=True\r\n ),\r\n name=dict(\r\n type='str',\r\n required=True\r\n ),\r\n resource_group_name=dict(\r\n type='str',\r\n required=True\r\n ),\r\n start=dict(\r\n type='str',\r\n disposition='/start'\r\n ),\r\n stop=dict(\r\n type='str',\r\n disposition='/stop'\r\n ),\r\n rate_in_mbps=dict(\r\n type='integer',\r\n disposition='/rate_in_mbps'\r\n ),\r\n days=dict(\r\n type='list',\r\n disposition='/days',\r\n elements='str'\r\n ),\r\n state=dict(\r\n type='str',\r\n default='present',\r\n choices=['present', 'absent']\r\n )\r\n )\r\n\r\n self.device_name = None\r\n self.name = None\r\n self.resource_group_name = None\r\n self.body = {}\r\n\r\n self.results = dict(changed=False)\r\n self.mgmt_client = None\r\n self.state = None\r\n self.to_do = Actions.NoAction\r\n\r\n super(AzureRMBandwidthSchedule, self).__init__(derived_arg_spec=self.module_arg_spec,\r\n supports_check_mode=True,\r\n supports_tags=True)\r\n\r\n def exec_module(self, **kwargs):\r\n for key in list(self.module_arg_spec.keys()):\r\n if hasattr(self, key):\r\n setattr(self, key, kwargs[key])\r\n elif kwargs[key] is not None:\r\n self.body[key] = kwargs[key]\r\n\r\n self.inflate_parameters(self.module_arg_spec, self.body, 0)\r\n\r\n old_response = None\r\n response = None\r\n\r\n self.mgmt_client = self.get_mgmt_svc_client(DataBoxEdgeManagementClient,\r\n base_url=self._cloud_environment.endpoints.resource_manager,\r\n api_version='2019-08-01')\r\n\r\n old_response = self.get_resource()\r\n\r\n if not old_response:\r\n if self.state == 'present':\r\n self.to_do = Actions.Create\r\n else:\r\n if self.state == 'absent':\r\n self.to_do = Actions.Delete\r\n else:\r\n modifiers = {}\r\n self.create_compare_modifiers(self.module_arg_spec, '', modifiers)\r\n self.results['modifiers'] = modifiers\r\n self.results['compare'] = []\r\n if not self.default_compare(modifiers, self.body, old_response, '', self.results):\r\n self.to_do = Actions.Update\r\n\r\n if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):\r\n self.results['changed'] = True\r\n if self.check_mode:\r\n return self.results\r\n response = self.create_update_resource()\r\n elif self.to_do == Actions.Delete:\r\n self.results['changed'] = True\r\n if self.check_mode:\r\n return self.results\r\n self.delete_resource()\r\n else:\r\n self.results['changed'] = False\r\n response = old_response\r\n\r\n return self.results\r\n\r\n def create_update_resource(self):\r\n try:\r\n response = self.mgmt_client.bandwidth_schedules.create_or_update(device_name=self.device_name,\r\n name=self.name,\r\n resource_group_name=self.resource_group_name,\r\n parameters=self.body)\r\n if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):\r\n response = self.get_poller_result(response)\r\n except CloudError as exc:\r\n self.log('Error attempting to create the BandwidthSchedule instance.')\r\n self.fail('Error creating the BandwidthSchedule instance: {0}'.format(str(exc)))\r\n return response.as_dict()\r\n\r\n def delete_resource(self):\r\n try:\r\n response = self.mgmt_client.bandwidth_schedules.delete(device_name=self.device_name,\r\n name=self.name,\r\n resource_group_name=self.resource_group_name)\r\n except CloudError as e:\r\n self.log('Error attempting to delete the BandwidthSchedule instance.')\r\n self.fail('Error deleting the BandwidthSchedule instance: {0}'.format(str(e)))\r\n\r\n return True\r\n\r\n def get_resource(self):\r\n try:\r\n response = self.mgmt_client.bandwidth_schedules.get(device_name=self.device_name,\r\n name=self.name,\r\n resource_group_name=self.resource_group_name)\r\n except CloudError as e:\r\n return False\r\n return response.as_dict()\r\n\r\n\r\ndef main():\r\n AzureRMBandwidthSchedule()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"tmp/azure_rm_bandwidthschedule.py","file_name":"azure_rm_bandwidthschedule.py","file_ext":"py","file_size_in_byte":9132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"101021567","text":"from django.contrib import admin\nfrom test_calendar.models import *\n\n# Register your models here.\n\nclass SchoolClassAdmin(admin.ModelAdmin):\n\tlist_per_page = 500\n\tsearch_fields = ['name']\n\tlist_display = ['name']\n\nclass CourseAdmin(admin.ModelAdmin):\n\tlist_per_page = 500\n\tsearch_fields = ['name', 'code']\n\tlist_display = ['name', 'code']\n\nclass TestAdmin(admin.ModelAdmin):\n\tlist_per_page = 500\n\tsearch_fields = ['description', 'course']\n\tlist_display = ['description', 'course']\n\nadmin.site.register(SchoolClass, SchoolClassAdmin)\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(Test, TestAdmin)","sub_path":"apps/test_calendar/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"561387678","text":"from flask import *\nimport os\nfrom werkzeug.utils import secure_filename\nfrom keras.models import load_model\nimport numpy as np\nfrom PIL import Image\n\napp = Flask(__name__)\n\n# Classes of trafic signs\nclasses = { 0:'Humerus',\n 1:'Pelvis', \n 2:'Shoulder',\n 3:'Forearm',\n 4:'Wrist',\n 5:'Femur',\n 6:'Knee',\n 7:'Tibia',\n 8:'Ankle',\n 9:'Foot',\n }\n\ndef image_processing(img):\n model = load_model('./model/model.h5')\n data=[]\n image = Image.open(img)\n image = image.resize((30,30))\n data.append(np.array(image))\n X_test=np.array(data)\n Y_pred = model.predict_classes(X_test)\n return Y_pred\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['file']\n file_path = secure_filename(f.filename)\n f.save(file_path)\n # Make prediction\n result = image_processing(file_path)\n s = [str(i) for i in result]\n a = int(\"\".join(s))\n result = \"Predicted X-Ray is : \" +classes[a]\n os.remove(file_path)\n return result\n return None\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"494626913","text":"import psycopg2\nimport csv\nimport numpy as np\n\npath = 'crime.csv'\nconn = psycopg2.connect(host=\"localhost\", dbname=\"pdt_database\", user=\"postgres\", password=\"postgres\")\ncur = conn.cursor()\n\nwith open('crime.csv', encoding='windows-1250') as f:\n reader = csv.reader(f)\n next(reader)\n for id, row in enumerate(reader):\n row = np.array(row)\n row = np.concatenate((row, [id]))\n data = tuple(row[[17,0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15]])\n try:\n command = \" INSERT INTO crimes (id,incident_number,offence_code,offence_code_group,offence_description,district,reporting_area,occurred_on_date,year,month,day_of_week,hour,ucr_part,street,lat,long)\" \\\n \"VALUES (%s, '%s', %s,'%s','%s','%s',%s,'%s',%s,%s,'%s',%s,'%s','%s',%s,%s)\" \\\n % data\n cur.execute(command)\n conn.commit()\n except Exception as e:\n print(id, e)\n cur.execute(\"ROLLBACK\")\n conn.commit()\n","sub_path":"import/imoprt_crimes.py","file_name":"imoprt_crimes.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"110275253","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport globalFunctions\nimport re\nimport os\nimport logging\n\n\nclass MangaFox(object):\n def __init__(self, manga_url, **kwargs):\n\n current_directory = kwargs.get(\"current_directory\")\n self.logging = kwargs.get(\"log_flag\")\n self.sorting = kwargs.get(\"sorting_order\")\n self.comic_name = self.name_cleaner(manga_url)\n url_split = str(manga_url).split(\"/\")\n\n if len(url_split) is 6:\n self.full_series(comic_url=manga_url, comic_name=self.comic_name, sorting=self.sorting)\n else:\n self.single_chapter(manga_url, self.comic_name)\n\n\n def name_cleaner(self, url):\n initial_name = str(url).split(\"/\")[4].strip()\n safe_name = re.sub(r\"[0-9][a-z][A-Z]\\ \", \"\", str(initial_name))\n manga_name = str(safe_name.title()).replace(\"_\", \" \")\n\n return manga_name\n\n def single_chapter(self, comic_url, comic_name):\n source, cookies_main = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)\n\n current_chapter_volume = str(re.search(r\"current_chapter=\\\"(.*?)\\\";\", str(source)).group(1))\n chapter_number = re.search(r\"c(\\d+)\", current_chapter_volume).group(1)\n series_code = str(re.search(r\"series_code=\\\"(.*?)\\\";\", str(source)).group(1))\n current_page_number = int(str(re.search(r'current_page=(.*?)\\;', str(source)).group(1)).strip())\n last_page_number = int(str(re.search(r'total_pages=(.*?)\\;', str(source)).group(1)).strip())\n\n\n file_directory = str(comic_name) + '/' + str(chapter_number) + \"/\"\n\n directory_path = os.path.realpath(file_directory)\n\n globalFunctions.GlobalFunctions().info_printer(comic_name, chapter_number)\n\n if not os.path.exists(file_directory):\n os.makedirs(file_directory)\n\n for file_name in range(current_page_number, last_page_number +1):\n # http://mangafox.me/manga/colette_wa_shinu_koto_ni_shita/v03/c019/2.html\n chapter_url = \"http://mangafox.me/manga/\" + str(series_code) + \"/\" + str(current_chapter_volume) + \"/%s.html\" % str(file_name)\n logging.debug(\"Chapter Url : %s\" % chapter_url)\n\n source_new, cookies_new = globalFunctions.GlobalFunctions().page_downloader(manga_url=chapter_url, cookies=cookies_main)\n image_link_finder = source_new.findAll('div', {'class': 'read_img'})\n for link in image_link_finder:\n x = link.findAll('img')\n for a in x:\n image_link = a['src']\n\n file_name = \"0\" + str(file_name) + \".jpg\"\n logging.debug(\"Image Link : %s\" % image_link)\n globalFunctions.GlobalFunctions().downloader(image_link, file_name, chapter_url, directory_path, log_flag=self.logging)\n\n return 0\n\n def full_series(self, comic_url, comic_name, sorting, **kwargs):\n source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)\n\n all_links = re.findall(r\"href=\\\"(.*?)\\\" title=\\\"Thanks for\", str(source))\n logging.debug(\"All Links : %s\" % all_links)\n\n if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:\n for chap_link in all_links:\n self.single_chapter(comic_url=str(chap_link), comic_name=comic_name)\n\n elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:\n for chap_link in all_links[::-1]:\n self.single_chapter(comic_url=str(chap_link), comic_name=comic_name)\n\n print(\"Finished Downloading\")\n return 0\n","sub_path":"comic_dl/sites/mangaFox.py","file_name":"mangaFox.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"344847916","text":"import time, socket, sys\r\nfrom datetime import datetime as dt\r\nimport paho.mqtt.client as paho\r\nimport signal\r\nimport mraa\r\n\r\n# Init LEDs\r\nleds = []\r\nfor i in range(2,10):\r\n led = mraa.Gpio(i)\r\n # led.dir(mraa.DIR_OUT)\r\n leds.append(led)\r\n\r\nclass Philosopher(object):\r\n \"\"\"docstring for Philosopher\"\"\"\r\n def __init__(self, philosopher_id, led_no, left_fork, right_fork):\r\n self.philosopher_id = philosopher_id\r\n self.led_no = int(led_no)\r\n self.left_fork = left_fork\r\n self.right_fork = right_fork\r\n\r\n self.isAccepted = False\r\n self.isWaiting = False\r\n self.isRightForkAccepted = False\r\n self.isRightForkUsed = False\r\n\r\n\r\n # Handle Ctrl+C\r\n signal.signal(signal.SIGINT, self.control_c_handler)\r\n\r\n # MQTT initialization\r\n self.mqtt_client = paho.Client()\r\n self.mqtt_client.on_connect = self.on_connect\r\n self.mqtt_client.on_message = self.on_message\r\n self.mqtt_client.on_disconnect = self.on_disconnect\r\n self.mqtt_client.on_log = self.on_log\r\n self.mqtt_topic = 'kappa/philosopher'\r\n self.mqtt_client.will_set(self.mqtt_topic, '___Will of '+self.philosopher_id+' ___\\n\\n', 0, False)\r\n self.mqtt_client.connect('sansa.cs.uoregon.edu', '1883')\r\n self.mqtt_client.subscribe('kappa/butler')\r\n self.mqtt_client.loop_start()\r\n\r\n # Start process\r\n self.sendSitRequest()\r\n\r\n # Deal with control-c\r\n def control_c_handler(self, signum, frame):\r\n self.isDisconnected = True\r\n self.mqtt_client.disconnect()\r\n self.mqtt_client.loop_stop() # waits until DISCONNECT message is sent out\r\n print (\"Exit\")\r\n sys.exit(0)\r\n\r\n # MQTT Handlers\r\n def on_connect(self, client, userdata, flags, rc):\r\n pass\r\n\r\n def on_disconnect(self, client, userdata, rc):\r\n pass\r\n\r\n def on_log(self, client, userdata, level, buf):\r\n pass\r\n\r\n def on_message(self, client, userdata, msg):\r\n print(msg.payload)\r\n philosopher_id, content = msg.payload.split('.')\r\n if '_' in philosopher_id:\r\n philosopher_id, fork_id = philosopher_id.split('_')\r\n\r\n\r\n if philosopher_id == self.philosopher_id:\r\n # print(msg.payload)\r\n # print(\"Before: isAccepted\", self.isAccepted)\r\n # print('Before: isWaiting',self.isWaiting)\r\n # print('Before: isRightForkAccepted',self.isRightForkAccepted)\r\n if content == 'sitRequestAccepted':\r\n self.isAccepted = True\r\n self.turnOnLED()\r\n self.sendForkRequest(self.right_fork)\r\n if content == 'inQueue':\r\n self.isWaiting = True\r\n if content == 'forkAccepted':\r\n if not self.isRightForkAccepted:\r\n self.isRightForkAccepted = True\r\n self.sendForkRequest(self.left_fork)\r\n else:\r\n self.start_eat()\r\n if content == 'forkDoneUsing':\r\n if not self.isRightForkUsed:\r\n self.isRightForkUsed = True\r\n self.sendPutFork(self.right_fork, 'right')\r\n else:\r\n self.sendArise()\r\n if content == 'ariseAccepted':\r\n self.isAccepted = False\r\n self.isRightForkAccepted = False\r\n self.isRightForkUsed = False\r\n self.turnOffLED()\r\n # print(self.philosopher_id+'.sitRequest')\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id+'.sitRequest')\r\n\r\n\r\n # print(\"After: isAccepted\", self.isAccepted)\r\n # print('After: isWaiting',self.isWaiting)\r\n # print('After: isRightForkAccepted',self.isRightForkAccepted)\r\n\r\n # LED functions\r\n def turnOnLED(self):\r\n leds[self.led_no].write(0)\r\n\r\n def turnOffLED(self):\r\n leds[self.led_no].write(1)\r\n\r\n def blinkLED(self):\r\n for x in xrange(1,10):\r\n self.turnOffLED()\r\n time.sleep(0.5)\r\n self.turnOnLED()\r\n time.sleep(0.5)\r\n\r\n # Philosoher functions\r\n def sendSitRequest(self):\r\n # reset values\r\n self.isAccepted = False\r\n self.isWaiting = False\r\n self.isRightForkAccepted = False\r\n self.isRightForkUsed = False\r\n self.turnOffLED()\r\n while not self.isAccepted and not self.isWaiting:\r\n # print(self.philosopher_id+'.sitRequest')\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id+'.sitRequest')\r\n time.sleep(5)\r\n\r\n def sendForkRequest(self, fork):\r\n while True:\r\n user_input = raw_input('Press y to pick {} Fork\\n'.format(fork))\r\n if user_input.lower() == 'y':\r\n # print(self.philosopher_id+'_'+fork+'.forkRequest')\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id+'_'+fork+'.forkRequest')\r\n return\r\n\r\n def start_eat(self):\r\n while True:\r\n user_input = raw_input('Press y to Eat\\n')\r\n if user_input.lower() == 'y':\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id+'_'+str(self.led_no)+'.startedEating')\r\n self.mqtt_client.loop()\r\n self.blinkLED()\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id+'_'+str(self.led_no)+'.stoppedEating')\r\n self.mqtt_client.loop()\r\n self.sendPutFork(self.left_fork, 'left')\r\n return\r\n\r\n def sendPutFork(self, fork_id, fork_side):\r\n while True:\r\n user_input = raw_input('Press y to put {} fork down\\n'.format(fork_id))\r\n if user_input.lower() == 'y':\r\n # print(fork_id+'.putFork')\r\n self.mqtt_client.publish(self.mqtt_topic,\r\n self.philosopher_id+'_'+fork_id+'.putFork')\r\n return\r\n \r\n def sendArise(self):\r\n while True:\r\n user_input = raw_input('Press y to arise\\n')\r\n if user_input.lower() == 'y':\r\n # print(self.philosopher_id + '.arise')\r\n self.mqtt_client.publish(self.mqtt_topic, self.philosopher_id + '.arise')\r\n return\r\n\r\ndef main():\r\n arr = sys.argv\r\n if len (arr) != 5 :\r\n print ('Please enter valid input, e.g. python Philosopher.py ')\r\n sys.exit(1)\r\n if arr[2] not in '12345678' or len(arr[2]) > 1:\r\n print ('Please enter valid led number between 1 to 8')\r\n sys.exit(1)\r\n Philosopher(arr[1], arr[2], arr[3], arr[4])\r\n while True:\r\n time.sleep(10)\r\n\r\nmain()","sub_path":"Philosopher.py","file_name":"Philosopher.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"248921613","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2_objectDetect\nfrom time import ctime\n\n\n\ndef color_filter(hsv_frame, color):\n\n\tif (color == 'blue'):\n\t\tlower_blue = np.array([110,50,50])\n\t\tupper_blue = np.array([130,255,255])\n\tif (color == 'orange'):\n\t\tlower_orange = np.array([0,150,210])\n\t\tupper_orange = np.array([44,291,286])\n\tif (color == 'red'):\n\t\tlower_orange = np.array([150,150,50])\n\t\tupper_orange = np.array([180,255,150])\t\n\t\n\t\"\"\"\n\t#creating a mask to filter only orange, if any pixel falls in the range, it will be white\n\t#And this mask basically can be used for all purposes. It is 0 or 255 image with white \n\t#as anywhere it matched the color window\"\"\"\n\tmask = cv2.inRange(hsv_frame,lower_orange, upper_orange)\n\t#cv2.imshow('mask',mask)\n\t\"\"\"\n\t#filtering the orange. when white from mask ANDs with orange of hsv_frame, it will be orange\n\t#and everything else will be black\n\tres_frame = cv2.bitwise_and(hsv_frame, hsv_frame, mask = mask)\n\t#cv2.imshow('res',res_frame)\n\t\n\t#splitting the matrix into its components because i only need the gray which is similar to value array\n\t#h,s,gray_frame = cv2.split(res_frame) \n\t#cv2.imshow('gray',gray_frame)\n\t\"\"\"\n\treturn mask\n\n\ndef smooth(frame_to_smooth, size_erode, size_dilate):\n\tkernel_erode = np.ones((size_erode,size_erode),np.uint8)\n\tkernel_dilate = np.ones((size_dilate,size_dilate),np.uint8)\n\tkernel = np.ones((15,15),np.float32)/225\n\t\n\t\"\"\"\n\t#below are different ways to blur. Median blur seems to be the best\n\t\n\tsmoothed = cv2.filter2D(gray_frame, -1, kernel)\n\tcv2.imshow('smooth',smoothed)\n\tblur = cv2.GaussianBlur(gray_frame, (15,15), 0)\n\tcv2.imshow('blur',blur)\n\t\n\tmedian = cv2.medianBlur(gray_frame, 15)\n\tcv2.imshow('median',median)\n\t\"\"\"\n\t\n\t\"\"\"eroding the edges to smoothen the pictures by eroding the boundaries, \n\tuse the kernel to do this \"\"\"\n\terosion = cv2.erode(frame_to_smooth, kernel_erode, iterations =1)\n\t\n\t\"\"\"\n\t#expand the pixels inside the orange object that I want to track\n\t#used because sometimes there are holes inside a full object. this will make it whole\"\"\"\n\tsmoothed_frame= cv2.dilate(erosion, kernel_dilate, iterations =1)\n\t#cv2.imshow('dilation',smoothed_frame)\t\n\t\n\t\"\"\"\n\topening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n\tclosing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\t#cv2.imshow('open',opening) #removes false negative. removes white in the background\n\tcv2.imshow('close',closing) #removes false positives. removes holes within the object\n\n\tclosing = closing.copy()\n\t\"\"\"\n\t\t\n\treturn smoothed_frame\n\n\ndef draw_contour(frame, contours, contour_type):\n\t\tx_center = -1\n\t\ty_center = -1\n\t\t\n\t\tl= len(contours)\n\t\tstart_area = 0\n\t\tif l>0:\n\t\t\t#print \"found orange\"\n\t\t\tfor i in range(l):\n\t\t\t\tarea = cv2.contourArea(contours[i])\n\t\t\t\tif (area > start_area): \n\t\t\t\t\tbiggest_contour = cv2.contourArea(contours[i])\n\t\t\t\t\tstart_area = area\n\t\t\t\t\tindex = i\n\t\t\tcnt = contours[index]\n\t\t\t\n\t\t\tif contour_type == 'circle':\n\t\t\t\t(x,y),radius = cv2.minEnclosingCircle(cnt)\n\t\t\t\tcenter = (int(x),int(y))\n\t\t\t\tradius = int(radius)\n\t\t\t\timg = cv2.circle(frame,center,radius, (0,255,0),2,8)\n\n\t\t\telif contour_type == 'poly':\n\t\t\t\tperi = cv2.arcLength(cnt, True)\n\t\t\t\trect = cv2.approxPolyDP(cnt, 0.1 * peri, True) \n\t\t\t\tcv2.drawContours(frame, [rect], -1, (0,255,0), 2,8)\n\t\t\t\n\t\t\telif contour_type == 'rect':\n\t\t\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\t\t\tcv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2,8)\t\n\t\t\t\n\t\t\telif contour_type == 'ellipse':\n\t\t\t\tellipse = cv2.fitEllipse(cnt)\n\t\t\t\timg = cv2.ellipse(frame, ellipse, (0,255,0),2)\n\t\t\t\t\n\t\t\telif contour_type == 'none':\n\t\t\t\tcv2.drawContours(frame, contours, -1, (0,255,0), 2,8)\n\t\t\t\t\n\t\t\tleftmost = tuple(cnt[cnt[:,:,0].argmin()][0]) \n\t\t\trightmost = tuple(cnt[cnt[:,:,0].argmax()][0]) \n\t\t\ttopmost = tuple(cnt[cnt[:,:,1].argmin()][0]) \n\t\t\tbottomtmost = tuple(cnt[cnt[:,:,1].argmax()][0]) \t\n\t\t\tx_center = (leftmost[0]+rightmost[0])/2\n\t\t\ty_center = (topmost[1]+bottomtmost[1])/2\n\t\t\t\n\t\t\t\n\t\treturn frame, x_center, y_center\n\t\ndef write_frame(frame, string, date, color):\n\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\tcv2.putText(frame, string, (120,40), font, 0.5 , color, 2)\n\tcv2.putText(frame, date, (0,470), font, 0.5 , (0,0,0), 2)\n\treturn frame\n\t\n\n\t\ndef discard_t_sec(cap,t): #not using the first 10 sec as the camera does some focussing \n\ti =0\n\tindex = 20*t #frames/sec * time\n\twhile i in range(index):\n\t\tret , frame = cap.read()\n\t\t#cv2.imshow('frame',frame)\n\t\ti = i+1 \n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\n\n\"\"\"\ndetecting difference between two consective images \nif the pixel difference is less than 20, then force them to 0, so that \ncv2.countNonZero(diff) will not false trigger. if pixel difference is greater than \n10, must be a real movement and then force them to 255.255 is black and 0 will appear as white\n\"\"\"\ndef diffImg(t0,t1,t2):\n\tret, ThreshImage = cv2.threshold(cv2.absdiff(t2,t1), 10 ,255, cv2.THRESH_BINARY)\n\t#cv2.imshow('diffimage',cv2.absdiff(t2,t1)) # this is the grey image difference\n\t#cv2.imshow('threshimage',ThreshImage) \n\t#ThreshImage = cv2.GaussianBlur(ThreshImage, (5,5),0)\n\tsmooth_image = smooth(ThreshImage, 5, 5)\n\t#cv2.imshow('smoothImage',smooth_image)\n\treturn smooth_image\n\n\n\n\n\t\n\n","sub_path":"openCVLib.py","file_name":"openCVLib.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"133428023","text":"from datetime import datetime, timezone\nfrom unittest import mock, TestCase\n\nimport flask\nfrom freezegun import freeze_time\n\nfrom listle.models import Record\n\nnow = datetime.now(timezone.utc)\n\n\nclass TestRecord(TestCase):\n def setUp(self):\n self.app = flask.Flask(__name__)\n self.req_context = {\n 'path': '/blah',\n 'base_url': 'http://listle.test',\n 'json': {\n 'test1': 'value1',\n 'test2': 'value2'\n }\n }\n\n @freeze_time(now)\n def test_as_dict(self):\n expected_dict = {\n 'id': mock.ANY,\n 'meta': {\n 'charset': 'utf-8',\n 'url': f\"{self.req_context['base_url']}{self.req_context['path']}\",\n 'datetime': now.isoformat(),\n 'headers': {'Host': 'listle.test', 'Content-Type': 'application/json', 'Content-Length': '38'},\n 'user_agent': {'string': '', 'platform': None, 'browser': None, 'version': None, 'language': None}\n },\n 'fields': {'test1': 'value1', 'test2': 'value2'},\n }\n\n with self.app.test_request_context(**self.req_context):\n r = Record(flask.request)\n actual_dict = r.as_dict()\n self.assertDictEqual(actual_dict, expected_dict)\n\n @freeze_time(now)\n @mock.patch('listle.models.uuid')\n def test_iter(self, uuid_mock):\n uuid_mock.uuid4.return_value = 'test-uuid'\n expected_dict = {\n 'id': 'test-uuid',\n 'meta': {\n 'charset': 'utf-8',\n 'url': f\"{self.req_context['base_url']}{self.req_context['path']}\",\n 'datetime': now.isoformat(),\n 'headers': {'Host': 'listle.test', 'Content-Type': 'application/json', 'Content-Length': '38'},\n 'user_agent': {'string': '', 'platform': None, 'browser': None, 'version': None, 'language': None}\n },\n 'fields': {'test1': 'value1', 'test2': 'value2'},\n }\n\n with self.app.test_request_context(**self.req_context):\n r = Record(flask.request)\n actual_dict = dict(r)\n self.assertDictEqual(actual_dict, expected_dict)\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229735468","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://{}:{}@{}/{}\".format('noob', '123','localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \"\"\"\n TODO (each true test add . to result)\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\"\n\n\n # test categories success\n def test_success_categories(self):\n # use this once take care when add add new cate one_or_none do not accept 2 categories\n #new_cat = Category(type='yor_category_here3')\n #new_cat.insert()\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n # In Python, empty lists evaluate to False and non-empty lists evaluate to True\n self.assertTrue(data['categories'])\n\n # test questions success without query parameters\n def test_get_all_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n\n\n # test categories success without query parameters\n def test_success_questions_with_parameter(self):\n res = self.client().get('/categories/html/questions?page=1')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n self.assertTrue(data['questions'])\n self.assertEqual(data['current_category'],'html')\n\n #Test delete missing question\n def test_missing_question(self):\n res = self.client().delete('/questions/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'resource not found')\n\n\n #Test delete missing question\n def test_success_delete(self):\n # dynamic delete\n question_to_delete = Question(question='How we add Line break in HTML?', answer='
',category='html',difficulty=5)\n question_to_delete.insert()\n q_id = question_to_delete.id\n res = self.client().delete('/questions/{id}'.format(id=q_id))\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted'], q_id)\n\n # ---------------------------- POST TEST --------------------------------------------------\n \"\"\"\n Before use this test Notes Make sure to use the same question\n in test_success and test_unsuccess functions just add 1 to the html\n \"\"\"\n def test_success_post_question(self):\n success_and_unsucess_post_question = 'is there HTML57 version?'\n res = self.client().post('/questions',json={\n 'question':success_and_unsucess_post_question,\n 'answer':'False',\n 'category':'html',\n 'difficulty':1\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n\n #Test Repeated Question To Do that You need to use success_and_unsucess_post_question var as your question\n # this used to check if the repeated question can be submited\n def test_unsuccess_post_question(self):\n success_and_unsucess_post_question = 'is there HTML57 version?'\n res = self.client().post('/questions',json={\n 'question':success_and_unsucess_post_question,\n 'answer':'False',\n 'category':'html',\n 'difficulty':1\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 409)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'the question cannot be added, because it already exists')\n \"\"\"\n the previous 2 functions are premade used togther\n \"\"\"\n\n # test bad post request question can not asked\n def test_unprocessable_post_request(self):\n res = self.client().post('/questions',json={\n 'question':'IS Flask Bad?',\n 'answer':False,\n 'category':'python',\n 'difficulty':1})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n################################# Search Tests #####################################\n\n # test search conflict with adding new question 1 action can happend per request\n def test_conflict_post_request(self):\n res = self.client().post('/questions',json={\n 'question':'Can we Search and add new question in same request?',\n 'answer':'False',\n 'category':'python',\n 'difficulty':1,\n 'searchTerm':'Make confilit in Search'\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n # test success earch\n def test_success_search(self):\n res = self.client().post('/questions',json={\n 'searchTerm':'html'\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n #empty lists return false by default\n self.assertTrue(data['questions'])\n\n # test unsuccess earch\n def test_unsuccess_search(self):\n res = self.client().post('/questions',json={\n 'searchTerm':'rnsnufh1rkhytuwh'\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['message'])\n #empty lists return false by default\n self.assertEqual(data['questions'], [])\n\n########################### last tests #########################\n\n # test success category by id\n def test_success_categories_by_id(self):\n # do not add duplicated category\n new_cat = Category(type='test_cat5')\n new_cat.insert()\n cat_id = new_cat.id\n cat_type = 'test_cat5'\n print(cat_id)\n question_to_delete = Question(\n # do not add duplicated question\n question='How we add Line break in HTML8?',\n answer='
',\n category='test_cat5',\n difficulty=5\n )\n question_to_delete.insert()\n res = self.client().get('/categories/{cat_paramter}/questions'.format(cat_paramter=cat_id))\n #get_that_cat = Category.query.filter_by(id=1).one_or_none()\n #if get_that_cat:\n # get_that_cat = get_that_cat.type\n #else:\n # get_that_cat = None\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['questions'])\n self.assertTrue(data['categories'])\n self.assertEqual(data['current_category'], cat_type)\n\n\n # test success quizzes without category\n def test_quiz_success_without_category(self):\n res = self.client().post('/quizzes',json={\n \"previous_questions\":\"['what is larget HTML heading']\"\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['question'])\n self.assertTrue(data['visibleAnswer'])\n self.assertTrue(data['answer'])\n\n # test success quizzes without category\n def test_quiz_success_with_category(self):\n res = self.client().post('/quizzes',json={\n \"previous_questions\":\"['what is larget HTML heading']\",\n \"quiz_category\":\"html\"\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['question'])\n self.assertTrue(data['visibleAnswer'])\n self.assertTrue(data['answer'])\n\n # test questions success with string query paramters\n def test_success_questions(self):\n res = self.client().get('/categories/html/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n self.assertTrue(data['questions'])\n self.assertEqual(data['current_category'],'html')\n\n #Test delete an existing question\n #def delete_an_existing_question(self):\n # res = self.client().delete('/questions/1')\n # data = json.loads(res.data)\n # self.assertEqual(res.status_code, 200)\n # self.assertEqual(data['success'], True)\n # self.assertEqual(data['deleted'], '1')\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"371465399","text":"import logging\nimport sys\nimport json\nimport os\n\nlog = logging.getLogger()\nlog.setLevel(10)\nconsoleHandler = logging.StreamHandler(sys.stdout)\nlog.addHandler(consoleHandler)\n\ndef runAlgorithm(x, y, outputDir):\n z = int(x) + int(y)\n log.info(str(x)+\" + \"+str(y)+\" = \" +str(z))\n\n if not outputDir == \"\":\n fname = os.path.join(outputDir, str(x) + \"_\" + str(y) + \"_output.txt\")\n if os.path.exists(fname):\n append_write = 'a'\n else:\n append_write = 'w+'\n target = open(fname, 'w+')\n target.write(str(x)+\" + \"+str(y)+\" = \" +str(z))\n target.write(\"\\n\")\n target.close()\n\n # write to seed manifest\n if not outputDir == \"\":\n target = open(os.path.join(outputDir, \"seed.outputs.json\"), 'a')\n target.write(\"\\\"x\\\": \" + str(x) + \",\\n\")\n target.write(\"\\\"y\\\": \" + str(y) + \",\\n\")\n target.write(\"\\\"total\\\": \" + str(z))\n target.close()\n return z\n\nif __name__ == '__main__':\n sys_stdout = sys.stdout\n argv = sys.argv\n if argv is None:\n log.error('No inputs passed to job')\n sys.exit(2)\n argc = len(argv) - 1\n\n # Must always have an input file\n inputStr = argv[1]\n\n # output file is optional\n outputStr = \"\"\n if len(argv) > 2:\n outputStr = argv[2]\n if not os.path.exists(outputStr):\n os.makedirs(outputStr)\n\n if os.path.isdir(inputStr):\n inputs = []\n for filename in os.listdir(inputStr):\n input_obj = open(os.path.join(inputStr, filename))\n inputs.extend(input_obj.readlines())\n else:\n input_obj = open(inputStr, \"r\")\n inputs = input_obj.readlines()\n\n if not outputStr == \"\":\n fname = os.path.join(outputStr, \"seed.outputs.json\")\n if os.path.exists(fname):\n target = open(fname, 'w')\n else:\n target = open(fname, 'w+')\n\n target.write('{\\n')\n target.flush()\n target.close()\n\n for idx, line in enumerate(inputs):\n xy = line.split()\n total = runAlgorithm(xy[0], xy[1], outputStr)\n if idx < len(inputs)-1 and outputStr != \"\":\n target = open(fname, 'a')\n target.write(\",\\n\")\n target.close()\n else:\n target = open(fname, 'a')\n target.write(\"\\n\")\n target.flush()\n target.close()\n\n if not outputStr == \"\":\n target = open(os.path.join(outputStr, \"seed.outputs.json\"), 'a')\n target.write(\"}\")\n target.close\n\n log.info('Completed Python Wrapper')\n\n sys.exit(0)\n","sub_path":"examples/multi-addition-job/my_alg.py","file_name":"my_alg.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"441418921","text":"from openpyxl import load_workbook\nimport json\ndef main():\n print(\"读取文件\")\n try:\n with open(\"../res/xlsx/video.xlsx\", \"rb\") as f:\n print(f.read())\n except FileNotFoundError as e:\n print('指定的文件无法打开.')\n except IOError as e:\n print('读写文件时出现错误.')\n\n\ndef main1():\n wb = load_workbook('../res/xlsx/video.xlsx')\n for sheet in wb:\n print(sheet.rows)\n header = next(sheet.rows)\n video = dict()\n for row in sheet.rows:\n for i,h in enumerate(header):\n print(row[i])\n if h == 'IP':\n video[\"ip\"] = row[i]\n print(video[\"ip\"])\n\n\n wb.close()\n\n\n\ndef import_data(file_path: str, sheets=[]):\n wb = load_workbook(file_path)\n if not sheets:\n sheets = wb.sheetnames\n for sheet in sheets:\n print(f'--- begin execute [{sheet}]')\n ws = wb[sheet]\n rows = ws.values\n header = next(rows)\n print(f'|-- header: {header}')\n lithest = []\n for row in rows:\n video = dict()\n video[\"areaId\"] = '321200'\n for i, h in enumerate(header):\n if h == 'IP':\n video[\"ip\"] = row[i]\n if h == '路口/路段名称':\n video[\"name\"] = str(row[i])\n if h == '建设方向/点位':\n # excel里如果没有路段路口名称,就用建设点位方向\n if video[\"name\"] is None:\n video[\"name\"] = row[i]\n # excel里只读取简短方向的\n elif 0 < len(str(row[i])) < 4:\n video[\"name\"] = str(video[\"name\"]) + row[i]\n # excel里带特定标识的\n elif \"智能卡口\" in str(video[\"name\"]):\n video[\"name\"] = str(video[\"name\"]) + row[i]\n else:\n video[\"name\"] = row[i]\n if h == '设备类型':\n if row[i] == '电子警察' or row[i] == '普通监控':\n video[\"modelId\"] = '0001000300010000'\n if row[i] == '车辆卡口' or row[i] == '测速卡口':\n video[\"modelId\"] = '0001000300020000'\n if h == '经度':\n video['longitude'] = row[i]\n if h == '纬度':\n video[\"latitude\"] = row[i]\n if h == '设备状态':\n if row[i] == \"正常\":\n video[\"statusId\"] = \"0\"\n elif row[i] == \"维修\":\n video[\"statusId\"] = \"1\"\n elif row[i] == \"拆除\":\n video[\"statusId\"] = \"2\"\n else:\n video[\"statusId\"] = \"\"\n if h == '运维单位':\n if row[i] is None:\n print(\"empty maintain\")\n else:\n if '电信' in str(row[i]):\n video[\"maintainId\"] = ''\n if '南京蓝泰' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b98845b44000d'\n if '江苏尤特斯' in str(row[i]):\n video[\"maintainId\"] = ''\n if '南京中盾安防' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b988b3a6b0013'\n if '泰州诚安' in str(row[i]):\n video[\"maintainId\"] = ''\n if '上海宝康' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b9884a655000e'\n if '南京凌云' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b988294d9000a'\n if '河北中岗' in str(row[i]):\n video[\"maintainId\"] = ''\n if '南京洛普' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b988411c9000c'\n if '东南智能' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b988602730011'\n if '长天智远' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b9885687e0010'\n if '金中天' in str(row[i]):\n video[\"maintainId\"] = ''\n if '海阳' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b98917c5e0014'\n if '兴泰' in str(row[i]):\n video[\"maintainId\"] = ''\n if '隆鼎' in str(row[i]):\n video[\"maintainId\"] = ''\n if '宏达' in str(row[i]):\n video[\"maintainId\"] = '2c40288b6b78ba5e016b98832d6a000b'\n lithest.append(video)\n print(lithest)\n try:\n with open(\"../res/out/video.json\",\"w\", encoding='utf-8') as fs:\n json.dump(lithest, fs,ensure_ascii=False)\n fs.close()\n except IOError as e:\n print(e)\n\n\n\nif __name__ == \"__main__\":\n import_data(\"../res/xlsx/video.xlsx\")\n","sub_path":"code/fs/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"328156517","text":"import requests\n\n'''\nSimple stock simulator:\nGame simulates real stock market data provided by Alpha Vantage\nAPI key can be obtained freely please use your own key on their web site\nhttps://www.alphavantage.co/documentation/\n\nThis is the second version of this game, first version was text based.\nNew version features GUI using Tkinter library. I focused on functinality,\ncosmetics was not priority therefore excuse the apearance.\n\nCurrent version is hardcoded to request past 30 days of IBM stock data.\nFutere versions will provide ability to use different stock options and\ntimeframes.\n\nAlphaVantageAPI.py is the code making the request and running the simulation\nAlphaVantageTK.py is the main app with GUI.\n\nEnjoy.\n'''\n\n\nclass DayTrader:\n def __init__(self):\n self.bank_account = 10000\n self.num_of_shares = 0\n self.day_counter = 0\n\n def __str__(self):\n return \"Bank account balance $\" + str(\"{:.2f}\".format(self.bank_account)) + \" and You own {} shares\".format(self.num_of_shares)\n\n def buy(self, shares, price):\n if shares * price > self.bank_account:\n print(\"Sorry not enough cash in your bank account.\")\n else:\n self.bank_account -= shares * price\n self.num_of_shares += shares\n return True\n\n def sell(self, shares, price):\n if shares > self.num_of_shares:\n print(\"You dont have {} shares to sell\".format(shares))\n else:\n self.bank_account += shares * price\n self.num_of_shares -= shares\n return True\n\ndef load_data():\n MYFUNCTION = \"TIME_SERIES_DAILY\" # \"GLOBAL_QUOTE\"\n MYKEY = \"A7SW5BBTJ4PNLKAK\"\n MYSYMBOL = \"IBM\"\n #print(\"NOT Making a request.!!!!!\")\n #print(\"Data made up and sent.\")\n #data = [('2021-04-12', 134.59), ('2021-04-13', 144.59), ('2021-04-14', 154.59), ('2021-04-15', 184.59), ('2021-04-16', 194.59)]\n #return data\n r = requests.get(\"https://www.alphavantage.co/query?function=\"+ MYFUNCTION +\"&symbol=IBM&apikey=\" + MYKEY)\n data = r.json()\n\n load_prices = []\n counter = 0\n for k , v in data[\"Time Series (Daily)\"].items():\n load_prices.append((k, float(v[\"4. close\"])))\n counter += 1\n if counter == 30:\n break\n print(\"Date: \", k ,\" price \", v[\"4. close\"])\n\n print(\"Price Load complete... Lets play\")\n load_prices.reverse()\n return load_prices\n","sub_path":"AlphaVantageAPI.py","file_name":"AlphaVantageAPI.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"599347041","text":"from django.db import models\nfrom decimal import Decimal\nfrom django.db.models import Count, Q\n\n# Create your models here.\n\n\nclass Insurance_company (models.Model):\n name = models.CharField(max_length=200)\n long_name = models.CharField(max_length=200, default='', blank=False)\n\n logo = models.ImageField(upload_to='./')\n\n def __str__(self):\n return \"%s\" % (self.name)\n\n\nclass Types_of_insurance (models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=200)\n image = models.ImageField(upload_to='uploads/')\n\n def __str__(self):\n return \"%s\" % (self.name)\n\n\nclass Generic_coverage (models.Model):\n type = models.ForeignKey(\n Types_of_insurance, on_delete=models.CASCADE, null=True)\n coverage = models.CharField(max_length=200)\n sub_coverage = sub_coverage = models.CharField(\n max_length=200, null=False, blank=True, default=\"\")\n what_is_about = models.CharField(\n max_length=200, blank=True, null=False, default=\"\")\n priority = models.DecimalField(\n max_digits=5, decimal_places=0, default=Decimal('1'))\n\n def __str__(self):\n return \"%s %s %s\" % (self.type, self.coverage, self.sub_coverage)\n\n def get_sub_coverage(self):\n if self.sub_coverage:\n return self.sub_coverage\n else:\n return \"\"\n\n\nclass Package (models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=200)\n plan = models.CharField(max_length=200, default='', blank=True)\n offered_by = models.ForeignKey(Insurance_company, on_delete=models.CASCADE)\n type = models.ForeignKey(\n Types_of_insurance, on_delete=models.CASCADE, null=True)\n base_price_internal_use = models.DecimalField(\n max_digits=8, decimal_places=2, default=Decimal('0.00'))\n minimum_Premium_internal_use = models.DecimalField(\n max_digits=8, decimal_places=2, default=Decimal('0.00'))\n\n def __str__(self):\n return \"%s,%s,%s\" % (self.name, self.plan, self.offered_by)\n\n @property\n def insurance_company(self):\n return self.offered_by\n\n\nclass SectorPackagePrice (models.Model):\n sectorname = models.CharField(max_length=200)\n sectorprice = models.DecimalField(\n max_digits=8, decimal_places=2, default=Decimal('0.00'))\n SectorPackage = models.ForeignKey(\n Package, on_delete=models.CASCADE, null=True)\n sectorminprice = models.DecimalField(\n max_digits=8, decimal_places=2, default=Decimal('0.00'))\n\n def __str__(self):\n return \"%s,%s,%s\" % (self.sectorname, self.sectorprice, self.SectorPackage)\n\n\nclass Package_feature (models.Model):\n coverage = models.CharField(max_length=200)\n sub_coverage = models.CharField(\n max_length=200, null=False, blank=True, default=\"\")\n what_is_about = models.CharField(\n max_length=200, blank=True, null=False, default=\"\")\n generic_unit_of_measure = models.CharField(\n max_length=200, blank=True, null=False, default='$')\n uom_prefix = models.BooleanField(default=True, null=False)\n base_insured = models.DecimalField(\n max_digits=10, decimal_places=2, null=False, blank=True, default=Decimal('0.00'))\n decimal_places = models.DecimalField(\n max_digits=2, decimal_places=0, default=Decimal(0))\n base_insured_text = models.TextField(\n max_length=200, blank=True, null=False, default='')\n base_insured_detail_text = models.TextField(\n max_length=400, blank=True, null=False, default='')\n package = models.ForeignKey(Package, on_delete=models.CASCADE, null=True)\n not_actual = models.BooleanField(default=False, null=False)\n\n addon = models.BooleanField(default=False, null=False)\n addon_mandatory = models.BooleanField(default=False, null=False)\n addon_insured_text = models.CharField(\n max_length=200, blank=True, null=False, default=\"\")\n addon_multiplier = models.DecimalField(\n max_digits=12, decimal_places=2, default=Decimal('1.00'))\n addon_default_price_per_uom = models.DecimalField(\n max_digits=14, decimal_places=10, default=Decimal('0.000000'))\n addon_min_number_of_uom = models.DecimalField(\n max_digits=16, decimal_places=2, default=Decimal('0.00'))\n addon_max_no_of_uom = models.DecimalField(\n max_digits=12, decimal_places=2, default=Decimal('0.00'))\n generic_coverage = models.ForeignKey(\n Generic_coverage, on_delete=models.CASCADE, null=True, blank=True, related_name='general_coverage')\n addon_price_per_uom_for_sector = models.CharField(\n max_length=200, blank=True, null=False, default=\"[{'Office': 0.000000}]\")\n\n actual_coverage = models.CharField(max_length=200, blank=True, default=\"\")\n actual_subcoverage = models.CharField(\n max_length=200, null=False, blank=True, default=\"\")\n actual_insured_text_part1 = models.TextField(\n max_length=200, blank=True, null=False, default='')\n actual_insured_text_part2 = models.TextField(\n max_length=200, blank=True, null=False, default='')\n actual_sequence = models.DecimalField(\n max_digits=3, decimal_places=0, default=Decimal(1))\n actual_uom = models.CharField(\n max_length=200, blank=True, null=False, default='$\"')\n calculated_addon_units = models.DecimalField(\n max_digits=16, decimal_places=2, default=Decimal('0.00'))\n calculated_addon_totalpremium = models.DecimalField(\n max_digits=16, decimal_places=2, default=Decimal('0.00'))\n calculated_total_insured = models.DecimalField(\n max_digits=16, decimal_places=2, default=Decimal('0.00'))\n\n def adjusted_addon_multiplier(self):\n if (self.addon_multiplier > 1):\n return self.addon_multiplier\n m = [1, 5, 10, 20, 50, 100, 200, 500, 100, 200, 500, 1000, 2000,\n 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]\n for x in m:\n if ((self.addon_max_no_of_uom / x) <= 100):\n return x\n print('return: from :', self.addon_multiplier, 'max :',\n self.addon_max_no_of_uom, 'to :', 1000000)\n\n return x\n\n def max_number_of_uom(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n\n g = f.format(self.addon_max_no_of_uom)\n # print ('maxnumber of uom :' ,self.addon_max_no_of_uom, g,type(g))\n return g\n\n def uom(self):\n return self.generic_unit_of_measure\n\n def get_sub_coverage(self):\n if self.sub_coverage:\n return self.sub_coverage\n else:\n return \"\"\n\n def get_generic_coverage(self):\n return self.generic_coverage\n\n def based_insured_value(self):\n c = self.based_insured_display()\n if (len(c)):\n return Decimal(c)\n else:\n return Decimal(0.00)\n\n def based_insured_display(self):\n if (self.base_insured is None):\n myfield = Package_feature._meta.get_field(\n 'base_insured').get_default()\n f = '{0:.' + str(self.decimal_places) + 'f}'\n# \t\t\tprint (myfield)\n\n return str(f.format(myfield))\n\n aaa = ''\n\n if not(self.base_insured is None):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n bbb = f.format(self.base_insured)\n aaa = aaa + bbb\n# \t\tprint (aaa)\n return aaa\n\n def based_insured_text_display(self):\n ccc = ''\n if not (self.base_insured_text is None):\n if (len(self.base_insured_text) > 0):\n ccc = self.base_insured_text\n return ccc\n\n def base_insured_display_full(self):\n return self.based_insured_display() + '\\n' + self.based_insured_text_display()\n\n def addon_insured_display(self):\n if (not self.addon):\n return ''\n aaa = ''\n uom = self.generic_unit_of_measure\n if not(self.addon_insured_text is None):\n uom_suffix = self.addon_insured_text\n else:\n uom_suffix = ''\n aaa = uom + '(' + str(self.addon_min_number_of_uom) + \\\n ' to ' + str(self.addon_max_no_of_uom) + ')'\n aaa = aaa + '@' + str(self.addon_default_price_per_uom)\n if not (self.addon_multiplier == 1.00):\n aaa = aaa + ' per ' + uom + str(self.addon_multiplier)\n\n def init_addon_value(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n return f.format(self.addon_min_number_of_uom)\n\n def init_total_value(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n total = self.base_insured + self.addon_min_number_of_uom\n return f.format(total)\n\n def calculated_addon_totalpremium_display(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n return f.format(self.calculated_addon_totalpremium)\n\n def calculated_total_insured_display(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n print (self.calculated_total_insured)\n return f.format(self.calculated_total_insured)\n\n def init_premium(self):\n f = '{0:.' + str(self.decimal_places) + 'f}'\n total = self.addon_min_number_of_uom * self.addon_default_price_per_uom\n return f.format(total)\n\n def package_from(self):\n return self.package.insurance_company.name\n\n def package_name(self):\n return self.package.name\n\n def package_plan(self):\n return self.package.plan\n","sub_path":"insureapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"357157205","text":"\n\ndef merge_sort(l1):\n\tarr_len=len(l1)\n\tif arr_len==1:\n\t\t#print(l1)\n\t\treturn l1\n\t\n\ta=merge_sort(l1[:arr_len//2])\n\tb=merge_sort(l1[arr_len//2:])\n\tc=[]\n\ti,j=0,0\n\twhile i None:\n \"\"\"\n Initialize this Playstyle with BattleQueue as its battle queue.\n \"\"\"\n self.battle_queue = battle_queue\n self.is_manual = True\n\n def select_attack(self, parameter: Any = None) -> str:\n \"\"\"\n Return the attack for the next character in this Playstyle's\n battle_queue to perform.\n\n Return 'X' if a valid move cannot be found.\n \"\"\"\n raise NotImplementedError\n\n def copy(self, new_battle_queue: 'BattleQueue') -> 'Playstyle':\n \"\"\"\n Return a copy of this Playstyle which uses the BattleQueue\n new_battle_queue.\n \"\"\"\n raise NotImplementedError\n\n\nclass ManualPlaystyle(Playstyle):\n \"\"\"\n The ManualPlaystyle. Inherits from Playstyle.\n \"\"\"\n\n def select_attack(self, parameter: Any = None) -> str:\n \"\"\"\n Return the attack for the next character in this Playstyle's\n battle_queue to perform.\n\n parameter represents a key pressed by a player.\n\n Return 'X' if a valid move cannot be found.\n \"\"\"\n if parameter in ['A', 'S']:\n return parameter\n\n return 'X'\n\n def copy(self, new_battle_queue: 'BattleQueue') -> 'Playstyle':\n \"\"\"\n Return a copy of this ManualPlaystyle which uses the\n BattleQueue new_battle_queue.\n \"\"\"\n return ManualPlaystyle(new_battle_queue)\n\n\nclass RandomPlaystyle(Playstyle):\n \"\"\"\n The Random playstyle. Inherits from Playstyle.\n \"\"\"\n def __init__(self, battle_queue: 'BattleQueue') -> None:\n \"\"\"\n Initialize this RandomPlaystyle with BattleQueue as its battle queue.\n \"\"\"\n super().__init__(battle_queue)\n self.is_manual = False\n\n def select_attack(self, parameter: Any = None) -> str:\n \"\"\"\n Return the attack for the next character in this Playstyle's\n battle_queue to perform.\n\n Return 'X' if a valid move cannot be found.\n \"\"\"\n actions = self.battle_queue.peek().get_available_actions()\n\n if not actions:\n return 'X'\n\n return random.choice(actions)\n\n def copy(self, new_battle_queue: 'BattleQueue') -> 'Playstyle':\n \"\"\"\n Return a copy of this RandomPlaystyle which uses the\n BattleQueue new_battle_queue.\n \"\"\"\n return RandomPlaystyle(new_battle_queue)\n\n\ndef get_state_score(battle_queue: 'BattleQueue') -> int:\n \"\"\"\n Return an int corresponding to the highest score that the next player in\n battle_queue can guarantee.\n\n For a state that's over, the score is the HP of the character who still has\n HP if the next player who was supposed to act is the winner. If the next\n player who was supposed to act is the loser, then the score is -1 * the\n HP of the character who still has HP. If there is no winner (i.e. there's\n a tie) then the score is 0.\n\n >>> from a2_battle_queue import BattleQueue\n >>> from a2_characters import Rogue, Mage\n >>> bq = BattleQueue()\n >>> r = Rogue(\"r\", bq, ManualPlaystyle(bq))\n >>> m = Mage(\"m\", bq, ManualPlaystyle(bq))\n >>> r.enemy = m\n >>> m.enemy = r\n >>> bq.add(r)\n >>> bq.add(m)\n >>> get_state_score(bq)\n 30\n >>> m.set_hp(3)\n >>> get_state_score(bq)\n 100\n >>> r.set_hp(40)\n >>> get_state_score(bq)\n 40\n >>> bq.remove()\n r (Rogue): 40/100\n >>> bq.add(r)\n >>> get_state_score(bq)\n -10\n >>> m.set_hp(100)\n >>> m.set_sp(30)\n >>> r.set_hp(40)\n >>> r.set_sp(10)\n >>> get_state_score(bq)\n 79\n >>> r.set_hp(30)\n >>> r.set_sp(3)\n >>> m.set_hp(7)\n >>> m.set_sp(30)\n >>> get_state_score(bq)\n 7\n >>> r.set_sp(0)\n >>> r.set_hp(20)\n >>> m.set_hp(88)\n >>> m.set_sp(25)\n >>> bq.remove()\n m (Mage): 88/25\n >>> bq.add(m)\n >>> get_state_score(bq)\n 88\n \"\"\"\n # TODO: Implement the get_state_score function (which will be used in\n score = []\n current_player = battle_queue.peek()\n if battle_queue.is_over():\n if current_player == battle_queue.get_winner():\n return current_player.get_hp()\n elif current_player != battle_queue.get_winner():\n return current_player.enemy.get_hp() * -1\n elif battle_queue.get_winner() is None or battle_queue.is_empty():\n return 0\n else:\n new_bq_attack = battle_queue.copy()\n new_bq_special = battle_queue.copy()\n for move in battle_queue.peek().get_available_actions():\n if move == 'A':\n current_attacker = new_bq_attack.peek()\n # if isinstance(current_attacker, Sorcerer):\n # current_attacker = current_attacker.copy(new_bq_attack)\n new_bq_attack.peek().attack()\n new_bq_attack.peek()\n if not new_bq_attack.is_empty():\n new_bq_attack.remove()\n later_attacker = new_bq_attack.peek()\n if current_attacker == later_attacker:\n score.append(get_state_score(new_bq_attack))\n else:\n score.append(get_state_score(new_bq_attack) * -1)\n if move == 'S':\n current_attacker = new_bq_special.peek()\n new_bq_special.peek().special_attack()\n new_bq_special.peek()\n if not new_bq_special.is_empty():\n new_bq_special.remove()\n later_attacker = new_bq_special.peek()\n if current_attacker == later_attacker:\n score.append(get_state_score(new_bq_special))\n else:\n score.append(get_state_score(new_bq_special) * -1)\n return max(score)\n\n\nclass RecursiveMinimax(Playstyle):\n \"\"\"\n A class representing the RecursiveMinimax strategy.\n \"\"\"\n def __init__(self, battle_queue: 'BattleQueue') -> None:\n \"\"\"\n Initialize this RecursiveMinimax strategy.\n \"\"\"\n self.battle_queue = battle_queue\n self.is_manual = False\n\n def select_attack(self, parameter: Any = None) -> str:\n \"\"\"\n Return a move that will guarantee the best outcome using\n RecursiveMinimax.\n \"\"\"\n bq_attack = self.battle_queue.copy()\n bq_special = self.battle_queue.copy()\n if bq_attack.peek().get_available_actions() == ['A']:\n return 'A'\n elif ('A' in bq_attack.peek().get_available_actions()\n and 'S' in bq_special.peek().get_available_actions()):\n cur_bq_attack = bq_attack.peek()\n # if isinstance(cur_bq_attack, Sorcerer):\n # cur_bq_attack = cur_bq_attack.copy(bq_attack)\n bq_attack.peek().attack()\n bq_attack.peek()\n if not bq_attack.is_empty():\n bq_attack.remove()\n cur_bq_attack1 = bq_attack.peek()\n if cur_bq_attack == cur_bq_attack1:\n attack_score = get_state_score(bq_attack)\n else:\n attack_score = get_state_score(bq_attack) * -1\n cur_bq_special = bq_special.peek()\n bq_special.peek().special_attack()\n bq_special.peek()\n if not bq_special.is_empty():\n bq_special.remove()\n cur_bq_special1 = bq_special.peek()\n if cur_bq_special == cur_bq_special1:\n special_score = get_state_score(bq_special)\n else:\n special_score = get_state_score(bq_special) * -1\n if special_score < attack_score:\n return 'A'\n elif special_score > attack_score:\n return 'S'\n return 'A'\n return 'X'\n\n def copy(self, new_battle_queue: 'BattleQueue') -> 'RecursiveMinimax':\n \"\"\"\n Return a copy of the RecursiveMinimax with a new_battle_queue.\n \"\"\"\n return RecursiveMinimax(new_battle_queue)\n\n\nclass IterativeMinimax(Playstyle):\n \"\"\"\n A class representing the IterativeMinimax.\n \"\"\"\n def __init__(self, battle_queue: 'BattleQueue') -> None:\n \"\"\"\n Initialize this IterativeMinimax strategy.\n \"\"\"\n self.battle_queue = battle_queue\n self.is_manual = False\n\n def select_attack(self, parameter: Any = None) -> str:\n \"\"\"\n Return a move that will guarantee the best outcome using\n IterativeMinimax.\n \"\"\"\n bq_attack = self.battle_queue.copy()\n bq_special = self.battle_queue.copy()\n if bq_attack.peek().get_available_actions() == ['A']:\n return 'A'\n elif ('A' in bq_attack.peek().get_available_actions()\n and 'S' in bq_special.peek().get_available_actions()):\n cur_bq_attack = bq_attack.peek()\n # if isinstance(cur_bq_attack, Sorcerer):\n # cur_bq_attack = cur_bq_attack.copy(bq_attack)\n bq_attack.peek().attack()\n bq_attack.peek()\n if not bq_attack.is_empty():\n bq_attack.remove()\n cur_bq_attack1 = bq_attack.peek()\n if cur_bq_attack == cur_bq_attack1:\n attack_score = iterative_helper(bq_attack)\n else:\n attack_score = iterative_helper(bq_attack) * -1\n cur_bq_special = bq_special.peek()\n bq_special.peek().special_attack()\n bq_special.peek()\n if not bq_special.is_empty():\n bq_special.remove()\n cur_bq_special1 = bq_special.peek()\n if cur_bq_special == cur_bq_special1:\n special_score = iterative_helper(bq_special)\n else:\n special_score = iterative_helper(bq_special) * -1\n if special_score < attack_score:\n return 'A'\n elif special_score > attack_score:\n return 'S'\n return 'A'\n return 'X'\n\n def copy(self, new_battle_queue: 'BattleQueue') -> 'IterativeMinimax':\n \"\"\"\n Return a copy of the Iterative Minimax with a new_battle_queue.\n \"\"\"\n return IterativeMinimax(new_battle_queue)\n\n\ndef iterative_helper(battle_queue: 'BattleQueue') -> int:\n \"\"\"\n Return an int corresponding to the highest score that the next player in\n battle_queue can guarantee, iteratively.\n\n >>> from a2_battle_queue import BattleQueue\n >>> from a2_characters import Rogue, Mage\n >>> bq = BattleQueue()\n >>> r = Rogue(\"r\", bq, ManualPlaystyle(bq))\n >>> m = Mage(\"m\", bq, ManualPlaystyle(bq))\n >>> r.enemy = m\n >>> m.enemy = r\n >>> bq.add(r)\n >>> bq.add(m)\n >>> iterative_helper(bq)\n 30\n >>> m.set_hp(3)\n >>> iterative_helper(bq)\n 100\n >>> r.set_hp(40)\n >>> iterative_helper(bq)\n 40\n >>> bq.remove()\n r (Rogue): 40/100\n >>> bq.add(r)\n >>> iterative_helper(bq)\n -10\n >>> m.set_hp(100)\n >>> m.set_sp(30)\n >>> r.set_hp(40)\n >>> r.set_sp(10)\n >>> iterative_helper(bq)\n 79\n >>> r.set_hp(30)\n >>> r.set_sp(3)\n >>> m.set_hp(7)\n >>> m.set_sp(30)\n >>> iterative_helper(bq)\n 7\n >>> r.set_sp(0)\n >>> r.set_hp(20)\n >>> m.set_hp(88)\n >>> m.set_sp(25)\n >>> bq.remove()\n m (Mage): 88/25\n >>> bq.add(m)\n >>> iterative_helper(bq)\n 88\n \"\"\"\n stack = Stack()\n state = State(battle_queue)\n state.score = 0\n stack.add(state)\n # for move in battle_queue.peek().get_available_actions():\n # if move == 'A':\n # new_bq_attack = battle_queue.copy()\n # # if isinstance(cur_player, Sorcerer):\n # # cur_player.copy(new_bq_attack)\n # new_bq_attack.peek().attack()\n # new_bq_attack.peek()\n # if not new_bq_attack.is_empty():\n # new_bq_attack.remove()\n # state = State(new_bq_attack)\n # stack.add(state)\n # if move == 'S':\n # new_bq_special = battle_queue.copy()\n # new_bq_special.peek().special_attack()\n # new_bq_special.peek()\n # if not new_bq_special.is_empty():\n # new_bq_special.remove()\n # state = State(new_bq_special)\n # stack.add(state)\n while not stack.is_empty():\n current_state = stack.remove()\n first_bq = current_state.bq\n if first_bq.is_over():\n current_player = current_state.bq.peek()\n if current_player == first_bq.get_winner():\n current_state.score = current_player.get_hp()\n elif current_player != first_bq.get_winner():\n current_state.score = first_bq.peek().enemy.get_hp() * -1\n elif (first_bq.get_winner() is None or\n first_bq.is_empty()):\n current_state.score = 0\n elif current_state.children == []:\n stack.add(current_state)\n for move in first_bq.peek().get_available_actions():\n if move == 'A':\n second_bq_attack = first_bq.copy()\n # if isinstance(cur_player, Sorcerer):\n # cur_player.copy(second_bq_attack)\n cur_player = second_bq_attack.peek()\n second_bq_attack.peek().attack()\n second_bq_attack.peek()\n if not second_bq_attack.is_empty():\n second_bq_attack.remove()\n child_state = State(second_bq_attack)\n child_state.cur_player = cur_player\n current_state.children.append(child_state)\n if move == 'S':\n second_bq_special = first_bq.copy()\n cur_player = second_bq_special.peek()\n second_bq_special.peek().special_attack()\n second_bq_special.peek()\n if not second_bq_special.is_empty():\n second_bq_special.remove()\n child_state = State(second_bq_special)\n child_state.cur_player = cur_player\n current_state.children.append(child_state)\n for child in current_state.children:\n stack.add(child)\n else:\n score_first_bq = []\n for child in current_state.children:\n if child.bq.peek() != child.cur_player:\n score_first_bq.append(child.score * -1)\n else:\n score_first_bq.append(child.score)\n max_score = max(score_first_bq)\n current_state.score = max_score\n return state.score\n\n\nclass State:\n \"\"\"\n A class representing a State of the game.\n \"\"\"\n def __init__(self, battle_queue: 'BattleQueue', children=None) -> None:\n \"\"\"\n Initialize the State class\n \"\"\"\n self.bq = battle_queue\n self.children = [] if children is None else children[:]\n self.score = 0\n self.cur_player = None\n\n\nclass Stack:\n \"\"\"\n A class representing a Stack ADT.\n \"\"\"\n def __init__(self) -> None:\n \"\"\"\n Initiate the Stack class.\n \"\"\"\n self._content = []\n\n def __str__(self) -> str:\n \"\"\"\n Return a string representation of the Stack.\n \"\"\"\n return \", \".join(self._content) + \"<- Top\"\n\n def is_empty(self) -> bool:\n \"\"\"\n Return whether the stack is empty or not\n \"\"\"\n return self._content == []\n\n def add(self, item: Any) -> None:\n \"\"\"\n Add the item in the stack.\n \"\"\"\n self._content.append(item)\n\n def remove(self) -> Any:\n \"\"\"\n Remove the item on top of the Stack and return it\n \"\"\"\n return self._content.pop()\n# TODO: Implement classes for Recursive Minimax and Iterative Minimax\n\n\nif __name__ == '__main__':\n import python_ta\n python_ta.check_all(config='a2_pyta.txt')\n","sub_path":"a2/a2_playstyle.py","file_name":"a2_playstyle.py","file_ext":"py","file_size_in_byte":16409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"201675679","text":"\"\"\"Contains tests for the model abstractions and different models.\"\"\"\nimport unittest\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom gym.spaces import Box\nfrom hbaselines.hiro.tf_util import get_trainable_vars\nfrom hbaselines.hiro.policy import ActorCriticPolicy, FeedForwardPolicy\nfrom hbaselines.hiro.policy import GoalDirectedPolicy\n\n\nclass TestActorCriticPolicy(unittest.TestCase):\n \"\"\"Test the FeedForwardPolicy object in hbaselines/hiro/policy.py.\"\"\"\n\n def setUp(self):\n self.policy_params = {\n 'sess': tf.Session(),\n 'ac_space': Box(low=-1, high=1, shape=(1,), dtype=np.float32),\n 'ob_space': Box(low=-2, high=2, shape=(2,), dtype=np.float32),\n 'co_space': Box(low=-3, high=3, shape=(3,), dtype=np.float32),\n }\n\n def tearDown(self):\n self.policy_params['sess'].close()\n del self.policy_params\n\n def test_init(self):\n \"\"\"Validate that the graph and variables are initialized properly.\"\"\"\n policy = ActorCriticPolicy(**self.policy_params)\n\n # Check that the abstract class has all the required attributes.\n self.assertEqual(policy.sess, self.policy_params['sess'])\n self.assertEqual(policy.ac_space, self.policy_params['ac_space'])\n self.assertEqual(policy.ob_space, self.policy_params['ob_space'])\n self.assertEqual(policy.co_space, self.policy_params['co_space'])\n\n # Check that the abstract class has all the required methods.\n self.assertTrue(hasattr(policy, \"initialize\"))\n self.assertTrue(hasattr(policy, \"update\"))\n self.assertTrue(hasattr(policy, \"get_action\"))\n self.assertTrue(hasattr(policy, \"value\"))\n self.assertTrue(hasattr(policy, \"store_transition\"))\n self.assertTrue(hasattr(policy, \"get_stats\"))\n\n\nclass TestFeedForwardPolicy(unittest.TestCase):\n \"\"\"Test the FeedForwardPolicy object in hbaselines/hiro/policy.py.\"\"\"\n\n def setUp(self):\n self.policy_params = {\n 'sess': tf.Session(),\n 'ac_space': Box(low=-1, high=1, shape=(1,), dtype=np.float32),\n 'ob_space': Box(low=-2, high=2, shape=(2,), dtype=np.float32),\n 'co_space': Box(low=-3, high=3, shape=(3,), dtype=np.float32),\n 'buffer_size': 1e6,\n 'batch_size': 100,\n 'actor_lr': 1e-3,\n 'critic_lr': 1e-4,\n 'clip_norm': 0,\n 'critic_l2_reg': 0,\n 'verbose': 2,\n 'tau': 0.005,\n 'gamma': 0.001,\n 'normalize_observations': False,\n 'observation_range': (-5, 5),\n 'normalize_returns': False,\n 'return_range': (-5, 5),\n 'layer_norm': False,\n 'reuse': False,\n 'layers': None,\n 'act_fun': tf.nn.relu,\n 'scope': None\n }\n\n def tearDown(self):\n self.policy_params['sess'].close()\n del self.policy_params\n\n def test_init(self):\n \"\"\"Validate that the graph and variables are initialized properly.\"\"\"\n policy = FeedForwardPolicy(**self.policy_params)\n\n # Check that the abstract class has all the required attributes.\n self.assertEqual(policy.buffer_size, self.policy_params['buffer_size'])\n self.assertEqual(policy.batch_size, self.policy_params['batch_size'])\n self.assertEqual(policy.actor_lr, self.policy_params['actor_lr'])\n self.assertEqual(policy.critic_lr, self.policy_params['critic_lr'])\n self.assertEqual(policy.clip_norm, self.policy_params['clip_norm'])\n self.assertEqual(\n policy.critic_l2_reg, self.policy_params['critic_l2_reg'])\n self.assertEqual(policy.verbose, self.policy_params['verbose'])\n self.assertEqual(policy.tau, self.policy_params['tau'])\n self.assertEqual(policy.gamma, self.policy_params['gamma'])\n self.assertEqual(\n policy.normalize_observations,\n self.policy_params['normalize_observations'])\n self.assertEqual(\n policy.observation_range, self.policy_params['observation_range'])\n self.assertEqual(\n policy.normalize_returns, self.policy_params['normalize_returns'])\n self.assertEqual(\n policy.return_range, self.policy_params['return_range'])\n self.assertEqual(policy.layer_norm, self.policy_params['layer_norm'])\n self.assertEqual(policy.reuse, self.policy_params['reuse'])\n self.assertListEqual(policy.layers, [300, 300])\n self.assertEqual(policy.activ, self.policy_params['act_fun'])\n\n # Check that all trainable variables have been created in the\n # TensorFlow graph.\n self.assertListEqual(\n sorted([var.name for var in get_trainable_vars()]),\n ['model/pi/fc0/bias:0',\n 'model/pi/fc0/kernel:0',\n 'model/pi/fc1/bias:0',\n 'model/pi/fc1/kernel:0',\n 'model/pi/pi/bias:0',\n 'model/pi/pi/kernel:0',\n 'model/qf/fc0/bias:0',\n 'model/qf/fc0/kernel:0',\n 'model/qf/fc1/bias:0',\n 'model/qf/fc1/kernel:0',\n 'model/qf/qf_output/bias:0',\n 'model/qf/qf_output/kernel:0',\n 'target/pi/fc0/bias:0',\n 'target/pi/fc0/kernel:0',\n 'target/pi/fc1/bias:0',\n 'target/pi/fc1/kernel:0',\n 'target/pi/pi/bias:0',\n 'target/pi/pi/kernel:0',\n 'target/qf/fc0/bias:0',\n 'target/qf/fc0/kernel:0',\n 'target/qf/fc1/bias:0',\n 'target/qf/fc1/kernel:0',\n 'target/qf/qf_output/bias:0',\n 'target/qf/qf_output/kernel:0']\n )\n\n # Check that all the input placeholders were properly created.\n self.assertEqual(\n tuple(v.__int__() for v in policy.critic_target.shape),\n (None, 1))\n self.assertEqual(\n tuple(v.__int__() for v in policy.terminals1.shape),\n (None, 1))\n self.assertEqual(\n tuple(v.__int__() for v in policy.rew_ph.shape),\n (None, 1))\n self.assertEqual(\n tuple(v.__int__() for v in policy.action_ph.shape),\n (None, self.policy_params['ac_space'].shape[0]))\n self.assertEqual(\n tuple(v.__int__() for v in policy.obs_ph.shape),\n (None, self.policy_params['ob_space'].shape[0] +\n self.policy_params['co_space'].shape[0]))\n self.assertEqual(\n tuple(v.__int__() for v in policy.obs1_ph.shape),\n (None, self.policy_params['ob_space'].shape[0] +\n self.policy_params['co_space'].shape[0]))\n\n # Clear the graph.\n tf.reset_default_graph()\n\n def test_normalization(self):\n \"\"\"Test the normalizers for the observations and reward.\"\"\"\n pass\n\n def test_optimization(self):\n \"\"\"Test the losses and gradient update steps.\"\"\"\n pass\n\n def test_update_target(self):\n \"\"\"Test the soft and init target updates.\"\"\"\n pass\n\n def test_store_transition(self):\n \"\"\"Test the `store_transition` method.\"\"\"\n pass\n\n\nclass TestGoalDirectedPolicy(unittest.TestCase):\n \"\"\"Test the GoalDirectedPolicy object in hbaselines/hiro/policy.py.\"\"\"\n\n def setUp(self):\n self.policy_params = {\n 'sess': tf.Session(),\n 'ac_space': Box(low=-1, high=1, shape=(1,), dtype=np.float32),\n 'ob_space': Box(low=-2, high=2, shape=(2,), dtype=np.float32),\n 'co_space': Box(low=-3, high=3, shape=(3,), dtype=np.float32),\n 'buffer_size': 1e6,\n 'batch_size': 100,\n 'actor_lr': 1e-3,\n 'critic_lr': 1e-4,\n 'clip_norm': 0,\n 'critic_l2_reg': 0,\n 'verbose': 2,\n 'tau': 0.005,\n 'gamma': 0.001,\n 'normalize_observations': False,\n 'observation_range': (-5, 5),\n 'normalize_returns': False,\n 'return_range': (-5, 5),\n 'layer_norm': False,\n 'reuse': False,\n 'layers': None,\n 'act_fun': tf.nn.relu,\n 'meta_period': 10,\n 'relative_goals': False,\n 'off_policy_corrections': False,\n 'use_fingerprints': False,\n 'centralized_value_functions': False,\n 'connected_gradients': False\n }\n\n def tearDown(self):\n self.policy_params['sess'].close()\n del self.policy_params\n\n # Clear the graph.\n tf.reset_default_graph()\n\n def test_init(self):\n \"\"\"Validate that the graph and variables are initialized properly.\"\"\"\n policy = GoalDirectedPolicy(**self.policy_params)\n\n # Check that the abstract class has all the required attributes.\n self.assertEqual(policy.meta_period,\n self.policy_params['meta_period'])\n self.assertEqual(policy.relative_goals,\n self.policy_params['relative_goals'])\n self.assertEqual(policy.off_policy_corrections,\n self.policy_params['off_policy_corrections'])\n self.assertEqual(policy.use_fingerprints,\n self.policy_params['use_fingerprints'])\n self.assertEqual(policy.centralized_value_functions,\n self.policy_params['centralized_value_functions'])\n self.assertEqual(policy.connected_gradients,\n self.policy_params['connected_gradients'])\n\n # Check that all trainable variables have been created in the\n # TensorFlow graph.\n self.assertListEqual(\n sorted([var.name for var in get_trainable_vars()]),\n ['Manager/model/pi/fc0/bias:0',\n 'Manager/model/pi/fc0/kernel:0',\n 'Manager/model/pi/fc1/bias:0',\n 'Manager/model/pi/fc1/kernel:0',\n 'Manager/model/pi/pi/bias:0',\n 'Manager/model/pi/pi/kernel:0',\n 'Manager/model/qf/fc0/bias:0',\n 'Manager/model/qf/fc0/kernel:0',\n 'Manager/model/qf/fc1/bias:0',\n 'Manager/model/qf/fc1/kernel:0',\n 'Manager/model/qf/qf_output/bias:0',\n 'Manager/model/qf/qf_output/kernel:0',\n 'Manager/target/pi/fc0/bias:0',\n 'Manager/target/pi/fc0/kernel:0',\n 'Manager/target/pi/fc1/bias:0',\n 'Manager/target/pi/fc1/kernel:0',\n 'Manager/target/pi/pi/bias:0',\n 'Manager/target/pi/pi/kernel:0',\n 'Manager/target/qf/fc0/bias:0',\n 'Manager/target/qf/fc0/kernel:0',\n 'Manager/target/qf/fc1/bias:0',\n 'Manager/target/qf/fc1/kernel:0',\n 'Manager/target/qf/qf_output/bias:0',\n 'Manager/target/qf/qf_output/kernel:0',\n 'Worker/model/pi/fc0/bias:0',\n 'Worker/model/pi/fc0/kernel:0',\n 'Worker/model/pi/fc1/bias:0',\n 'Worker/model/pi/fc1/kernel:0',\n 'Worker/model/pi/pi/bias:0',\n 'Worker/model/pi/pi/kernel:0',\n 'Worker/model/qf/fc0/bias:0',\n 'Worker/model/qf/fc0/kernel:0',\n 'Worker/model/qf/fc1/bias:0',\n 'Worker/model/qf/fc1/kernel:0',\n 'Worker/model/qf/qf_output/bias:0',\n 'Worker/model/qf/qf_output/kernel:0',\n 'Worker/target/pi/fc0/bias:0',\n 'Worker/target/pi/fc0/kernel:0',\n 'Worker/target/pi/fc1/bias:0',\n 'Worker/target/pi/fc1/kernel:0',\n 'Worker/target/pi/pi/bias:0',\n 'Worker/target/pi/pi/kernel:0',\n 'Worker/target/qf/fc0/bias:0',\n 'Worker/target/qf/fc0/kernel:0',\n 'Worker/target/qf/fc1/bias:0',\n 'Worker/target/qf/fc1/kernel:0',\n 'Worker/target/qf/qf_output/bias:0',\n 'Worker/target/qf/qf_output/kernel:0']\n )\n\n # Test the worker_reward function.\n self.assertAlmostEqual(\n policy.worker_reward(\n states=np.array([1, 2, 3]),\n goals=np.array([3, 2, 1]),\n next_states=np.array([0, 0, 0])\n ),\n -3.7416573867873044\n )\n\n # Clear the graph.\n tf.reset_default_graph()\n\n def test_store_transition(self):\n \"\"\"Test the `store_transition` method.\"\"\"\n pass\n\n def test_meta_period(self):\n \"\"\"Verify that the rate of the Manager is dictated by meta_period.\"\"\"\n # Test for a meta period of 5.\n policy_params = self.policy_params.copy()\n policy_params['meta_period'] = 5\n policy = GoalDirectedPolicy(**policy_params)\n\n # FIXME: add test\n del policy\n\n # Clear the graph.\n tf.reset_default_graph()\n\n # Test for a meta period of 10.\n policy_params = self.policy_params.copy()\n policy_params['meta_period'] = 10\n policy = GoalDirectedPolicy(**policy_params)\n\n # FIXME: add test\n del policy\n\n def test_relative_goals(self):\n \"\"\"Validate the functionality of relative goals.\n\n This should affect the worker reward function as well as transformation\n from relative goals to absolute goals.\n \"\"\"\n policy_params = self.policy_params.copy()\n policy_params[\"relative_goals\"] = True\n policy = GoalDirectedPolicy(**policy_params)\n\n # Test the goal_xsition_model method.\n states = np.array([1, 2, 3])\n goals = np.array([4, 5, 6])\n next_states = np.array([7, 8, 9])\n new_goal = policy.goal_xsition_model(states, goals, next_states)\n np.testing.assert_array_almost_equal(new_goal, np.array([-2, -1, 0]))\n\n # Test the updated reward function. FIXME\n\n def test_off_policy_corrections(self):\n \"\"\"Validate the functionality of the off-policy corrections.\"\"\"\n # Set a random variable seed.\n np.random.seed(1)\n random.seed(1)\n tf.set_random_seed(1)\n\n policy_params = self.policy_params.copy()\n policy_params[\"relative_goals\"] = True\n policy_params[\"off_policy_corrections\"] = True\n policy = GoalDirectedPolicy(**policy_params)\n\n # Initialize the variables of the policy.\n policy.sess.run(tf.global_variables_initializer())\n\n # Test the _sample method.\n states = np.array(\n [[1, 2],\n [3, 4],\n [5, 6],\n [7, 8],\n [9, 10],\n [11, 12],\n [13, 14],\n [15, 16],\n [17, 18],\n [19, 20]]\n )\n next_states = -states\n num_samples = 10\n orig_goals = np.array(\n [[1, 1],\n [1, 1],\n [0, 0],\n [1, 1],\n [1, 1],\n [0, 0],\n [1, 1],\n [1, 1],\n [0, 0],\n [1, 1]]\n )\n samples = policy._sample(states, next_states, num_samples, orig_goals)\n\n # Check that the shape is correct.\n self.assertTupleEqual(\n samples.shape, (states.shape[0], states.shape[1], num_samples))\n\n # Check the last few elements are the deterministic components that\n # they are expected to be.\n np.testing.assert_array_almost_equal(\n samples[:, :, -2:].reshape(states.shape[0] * states.shape[1], 2).T,\n np.vstack(\n [np.array([-2] * states.shape[0] * states.shape[1]),\n orig_goals.flatten()]\n )\n )\n\n # Test the _log_probs method.\n manager_obs = np.array([[1, 2], [3, -1], [0, 0]])\n worker_obs = np.array([[1, 1], [2, 2], [3, 3]])\n actions = np.array([[1], [-1], [0]])\n goals = np.array([[0, 0], [-1, -1], [-2, -2]])\n error = policy._log_probs(manager_obs, worker_obs, actions, goals)\n np.testing.assert_array_almost_equal(\n error, [-3.912313e-03, -3.885057e-03, -7.010017e-07])\n\n # Test the _sample_best_meta_action method. FIXME\n\n def test_fingerprints(self):\n \"\"\"Validate the functionality of the fingerprints.\n\n This feature should add a fingerprint dimension to the manager and\n worker observation spaces, but NOT the context space of the worker or\n the action space of the manager. The worker reward function should also\n be ignoring the fingerprint elements during its computation. The\n fingerprint elements are passed by the algorithm, and tested under\n test_algorithm.py\n \"\"\"\n # Create the policy.\n policy_params = self.policy_params.copy()\n policy_params['use_fingerprints'] = True\n policy = GoalDirectedPolicy(**policy_params)\n\n # Test the observation spaces of the manager and worker, as well as the\n # context space of the worker and action space of the manager.\n self.assertTupleEqual(policy.manager.ob_space.shape, (3,))\n self.assertTupleEqual(policy.manager.ac_space.shape, (2,))\n self.assertTupleEqual(policy.worker.ob_space.shape, (3,))\n self.assertTupleEqual(policy.worker.co_space.shape, (2,))\n\n # Test worker_reward method within the policy.\n self.assertAlmostEqual(\n policy.worker_reward(states=np.array([1, 2, 3]),\n goals=np.array([0, 0]),\n next_states=np.array([1, 2, 3])),\n -np.sqrt(1**2 + 2**2)\n )\n\n def test_centralized_value_functions(self):\n \"\"\"Validate the functionality of the centralized value function.\n\n TODO: describe content\n \"\"\"\n pass\n\n def test_connected_gradients(self):\n \"\"\"Validate the functionality of the connected-gradients feature.\n\n TODO: describe content\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/fast_tests/test_policy.py","file_name":"test_policy.py","file_ext":"py","file_size_in_byte":17848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"441057770","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 11 23:22:00 2018\r\n\r\n@author: Customer\r\n\"\"\"\r\nimport pandas as pd\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\ndata=pd.read_csv('E:\\\\processed_tweets.csv')\r\n#data.loc[:,'Class']='Neutral'\r\ncount_sn=0\r\ncount_sp=0\r\ncount_wn=0\r\ncount_wp=0\r\ncount_n=0\r\n\r\nfor i in range(0,len(data)):\r\n sie=SentimentIntensityAnalyzer()\r\n diff=sie.polarity_scores(data.iloc[:,0][i])['compound']\r\n #diff=sie.polarity_scores(data.iloc[:,0][i])['pos']-sie.polarity_scores(data.iloc[:,0][i])['neg']\r\n #print(diff)\r\n if diff<-0.2:\r\n #data.iloc[:,-1][i]='Strongly Negative'\r\n count_sn+=1\r\n elif diff>0.2:\r\n #data.iloc[:,-1][i]='Strongly Positive'\r\n count_sp+=1\r\n elif diff<0.2 and diff>0.0:\r\n #data.iloc[:,-1][i]='Weakly Positive'\r\n count_wp+=1\r\n elif diff>-0.2 and diff<0.0:\r\n #data.iloc[:,-1][i]='Weakly Negative'\r\n count_wn+=1\r\n else:\r\n #data.iloc[:,-1][i]='Neutral'\r\n count_n+=1\r\n\r\nsn_perc=count_sn/len(data)*100\r\nsp_perc=count_sp/len(data)*100\r\nwn_perc=count_wn/len(data)*100\r\nwp_perc=count_wp/len(data)*100\r\nn_perc=count_n/len(data)*100\r\ntot_pos=sp_perc+wp_perc\r\ntot_neg=wn_perc+sn_perc\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nlabels = ['Strongly Negative','Strongly Positive','Weakly Positive','Weakly Negative','Neutral']\r\nsizes = [count_sn,count_sp,count_wp,count_wn,count_n]\r\ncolors=['red','yellowgreen','lightskyblue','gold','green']\r\nplt.pie(sizes, labels=labels,colors=colors)\r\nplt.title('Analysis of PTCL related tweets')\r\nplt.savefig('E:\\\\PTCL.png')\r\n\r\nprint(\"Detailed Report\")\r\nprint(\"Percentage of tweets that are Strongly Negative : \"+str(sn_perc)+'%')\r\nprint(\"Percentage of tweets that are Strongly Positive : \"+str(sp_perc)+'%')\r\nprint(\"Percentage of tweets that are Weakly Negative : \"+str(wn_perc)+'%')\r\nprint(\"Percentage of tweets that are Weakly Positive : \"+str(wp_perc)+'%')\r\nprint(\"Percentage of tweets that are Neutral : \"+str(n_perc)+'%')\r\nprint(\"Percentage of tweets that are Positive : \"+str(tot_pos)+'%')\r\nprint(\"Percentage of tweets that are Negative : \"+str(tot_neg)+'%')\r\n\r\n\r\n \r\n ","sub_path":"Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"244464477","text":"import asyncio\nfrom urllib.parse import quote_plus, quote\nfrom aiohttp.helpers import BasicAuth\nimport json\nimport re\nimport time\n\nfrom .tool import xml, jsonxml, htmlparse\n\n\n@asyncio.coroutine\ndef arxiv(arg, send):\n print('arxiv')\n\n arg.update({\n 'n': arg['n'] or '5',\n 'url': 'http://export.arxiv.org/api/query',\n 'xpath': arg['xpath'] or '//ns:entry',\n })\n params = {\n 'search_query': arg['query'],\n 'max_results': arg['n'],\n 'sortBy': 'lastUpdatedDate',\n 'sortOrder': 'descending',\n }\n field = [('./ns:id', 'text', '{}'), ('./ns:title', 'text', '{}')]\n def format(l):\n def f(e):\n return '[\\\\x0302{0}\\\\x0f] {1}'.format(e[0][21:], e[1].replace('\\n', ' '))\n return map(f, l)\n\n return (yield from xml(arg, [], send, params=params, field=field, format=format))\n\n\n@asyncio.coroutine\ndef wolfram(arg, send):\n print('wolfram')\n\n arg.update({\n 'n': arg['n'] or '2',\n 'url': 'http://api.wolframalpha.com/v2/query',\n 'xpath': arg['xpath'] or '//pod',\n })\n params = {\n 'appid': arg['meta']['bot'].key['wolfram'],\n 'units': 'metric',\n 'format': 'plaintext',\n 'input': arg['query'],\n }\n field = [('.', 'title', '\\\\x0300{}:\\\\x0f'), ('.//plaintext', 'text', '{}')]\n def format(l):\n #r = re.compile(r\"(? 0:\n #print(e)\n yield from self.request(e, get)\n pos = self.getpos(e, get.len)\n e = e[pos:]\n return get.l\n\n @asyncio.coroutine\n def __call__(self, pinyin, send):\n print('im')\n\n l = []\n pos = 0\n for m in self.comment.finditer(pinyin):\n l.extend(self.sep.split(pinyin[pos:m.start()]))\n #l.append(\"'\" + m.group()[2:-2])\n l.append(m.group()[1:-2])\n pos = m.end()\n l.extend(self.sep.split(pinyin[pos:]))\n #l = self.sep.split(pinyin)\n print(l)\n\n coros = [self.getitem(e) for e in l]\n lines = yield from asyncio.gather(*coros)\n line = ''.join(lines) if lines else 'Σ(っ °Д °;)っ 怎么什么都没有呀'\n\n return send(line)\n\n\nclass BIM(IM):\n\n def __init__(self):\n IM.__init__(self)\n self.arg = {\n 'n': '1',\n 'url': 'http://olime.baidu.com/py',\n 'xpath': '//result/item[1]/item[child::item]',\n }\n self.params = {\n 'inputtype': 'py',\n 'bg': '0',\n 'ed': '5',\n 'result': 'hanzi',\n 'resultcoding': 'unicode',\n 'ch_en': '0',\n 'clientinfo': 'web',\n 'version': '1',\n 'input': '',\n }\n self.field = [('./item[1]', 'text', '{}'), ('./item[2]', 'text', '{}')]\n self.format = lambda x: x\n\n @asyncio.coroutine\n def request(self, e, get):\n params = self.params.copy()\n params['input'] = e\n yield from jsonxml(self.arg, [], get, params=params, field=self.field, format=self.format)\n\n def getpos(self, e, l):\n if not (0 < l and l < len(e)):\n return len(e)\n for (i, c) in enumerate(self.letter.finditer(e)):\n if i == l:\n return c.start()\n return len(e)\n\n @asyncio.coroutine\n def __call__(self, arg, send):\n yield from IM.__call__(self, arg['pinyin'], send)\n\nbim = BIM()\n\n\nclass GIM(IM):\n\n def __init__(self):\n IM.__init__(self)\n self.arg = {\n 'n': '1',\n 'url': 'https://inputtools.google.com/request',\n # is always well formed?\n 'xpath': '/root/item[2]/item[1]',\n }\n self.params = {\n 'itc': 'zh-t-i0-pinyin',\n 'num': '1',\n 'cp': '0',\n 'cs': '0',\n 'ie': 'utf-8',\n 'oe': 'utf-8',\n 'app': 'demopage',\n 'text': '',\n }\n self.field = [\n ('./item[2]/item[1]', 'text', '{}'),\n ('./item[3]/item[1]', 'text', '{}'),\n ]\n self.format = lambda x: x\n\n @asyncio.coroutine\n def request(self, e, get):\n params = self.params.copy()\n params['text'] = e\n yield from jsonxml(self.arg, [], get, params=params, field=self.field, format=self.format)\n\n def getpos(self, e, l):\n if not (0 < l and l < len(e)):\n return len(e)\n return l\n\n @asyncio.coroutine\n def __call__(self, arg, send):\n yield from IM.__call__(self, arg['pinyin'], send)\n\ngim = GIM()\n\n# qq\n\n#@asyncio.coroutine\n#def qim(arg, send):\n# print('qim')\n# pinyin = arg['pinyin']\n# url = 'http://ime.qq.com/fcgi-bin/getword?q={0}'\n# xpath = '//result/item[1]/item'\n# field = [('./item[1]', 'text', '{}'), ('./item[2]', 'text', '{}')]\n#\n# class qimGet:\n# def __init__(self):\n# self.l = ''\n# self.len = 0\n# def __call__(self, l, n=-1, **kw):\n# if n < 0:\n# self.l += l\n# else:\n# l = list(l)[0]\n# self.l += l[0]\n# self.len = int(l[1])\n#\n# return (yield from im(pinyin, url, xpath, field, qimGet, send))\n\n\n# microsoft\n\n#class Microsoft:\n# class Get:\n# def __init__(self):\n# self.key = ''\n# self.expire = 0\n# def __call__(self, l, n=-1, **kw):\n# e = list(l)[0]\n# self.key = e[0]\n# self.expire = int(e[1])\n# def __init__(self, client, scope, type):\n# self.arg = {\n# 'url': 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13',\n# 'xpath': '/root',\n# }\n# self.field = [('./access_token', 'text', '{}'), ('./expires_in', 'text', '{}')]\n# self.format = lambda x: x\n# self.headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n# self.data = 'client_id={0}&client_secret={1}&scope={2}&grant_type={3}'.format(quote_plus(client[0]), quote_plus(client[1]), quote_plus(scope), quote_plus(type))\n# self.key = ''\n# self.time = 0\n# self.expire = 0\n# @asyncio.coroutine\n# def getkey(self):\n# t = time.time()\n# if (t - self.time) > self.expire:\n# yield from self.renew()\n# return self.key\n# @asyncio.coroutine\n# def renew(self):\n# get = Microsoft.Get()\n# yield from jsonxml(self.arg, [], get, method='POST', data=self.data, headers=self.headers, field=self.field, format=self.format)\n# self.time = time.time()\n# self.expire = get.expire - 60\n# self.key = get.key\n\n@asyncio.coroutine\ndef bing(arg, lines, send):\n print('bing')\n\n arg.update({\n 'n': arg['n'] or '1',\n 'url': 'https://api.datamarket.azure.com/Bing/Search/v1/Composite',\n 'xpath': '//d/results/item/Web/item',\n })\n params = {\n '$format': 'json',\n #'Sources': \"'web+image+video+news+spell'\",\n 'Sources': \"'web'\",\n 'Adult': \"'Off'\",\n 'Market': \"'en-US'\",\n 'Query': \"'{0}'\".format(' '.join(lines) or arg['query'] or ''),\n }\n key = arg['meta']['bot'].key['microsoft']\n auth = BasicAuth(key, key)\n field = [\n ('./Title', 'text', '{}'),\n ('./Url', 'text', '[\\\\x0302 {} \\\\x0f]'),\n ('./Description', 'text', '{}'),\n ]\n\n return (yield from jsonxml(arg, [], send, params=params, auth=auth, field=field))\n\n#class Mtran(Microsoft):\n# def __init__(self):\n# super().__init__(arg['meta']['bot'].key['microsoft'], 'http://api.microsofttranslator.com', 'client_credentials')\n# @asyncio.coroutine\n# def __call__(self, arg, send):\n# print('mtran')\n# f = arg['from'] or ''\n# t = arg['to'] or 'zh-CHS'\n# url = 'http://api.microsofttranslator.com/V2/Http.svc/Translate?format=json&text={0}&from={1}&to={2}'.format(quote_plus(arg['text']), quote_plus(f), quote_plus(t))\n#\n# key = yield from self.getkey()\n# headers = {'Authorization': 'Bearer ' + key}\n#\n# arg['n'] = 1\n# arg['url'] = url\n# arg['xpath'] = '/ns:string'\n#\n# return (yield from xml(arg, [], send, headers=headers))\n#\n#mtran = Mtran()\n\n\n@asyncio.coroutine\ndef mtran(arg, lines, send):\n print('mtran')\n\n arg.update({\n 'n': '1',\n 'url': 'https://api.datamarket.azure.com/Bing/MicrosoftTranslator/v1/Translate',\n 'xpath': '//d/results/item',\n })\n params = {\n '$format': 'json',\n 'To': \"'{0}'\".format(arg['to'] or 'zh-CHS'),\n 'Text': \"'{0}'\".format(' '.join(lines) or arg['text'] or ''),\n }\n if arg['from']:\n params['From'] = \"'{0}'\".format(arg['from'])\n key = arg['meta']['bot'].key['microsoft']\n auth = BasicAuth(key, key)\n field = [('./Text', 'text', '{}')]\n\n return (yield from jsonxml(arg, [], send, params=params, auth=auth, field=field))\n\n\n@asyncio.coroutine\ndef couplet(arg, send):\n print('couplet')\n\n shanglian = arg['shanglian']\n if len(shanglian) > 10:\n send('最多十个汉字喔')\n return\n\n arg.update({\n 'n': arg['n'] or '1',\n 'url': 'http://couplet.msra.cn/app/CoupletsWS_V2.asmx/GetXiaLian',\n 'xpath': '//d/XialianSystemGeneratedSets/item/XialianCandidates/item',\n })\n data = json.dumps({\n 'shanglian': shanglian,\n 'xialianLocker': '0' * len(shanglian),\n 'isUpdate': False,\n })\n headers = {'Content-Type': 'application/json'}\n\n return (yield from jsonxml(arg, [], send, method='POST', data=data, headers=headers))\n\n\n@asyncio.coroutine\ndef mice(arg, send):\n print('mice')\n url = 'http://www.msxiaoice.com/v2/context'\n\n input = arg['input']\n\n arg['n'] = '1'\n arg['url'] = url\n arg['xpath'] = '//d/XialianSystemGeneratedSets/item/XialianCandidates/item'\n\n data = {\n 'requirement': 1,\n 'input': input,\n 'args': '',\n }\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n\n return (yield from jsonxml(arg, [], send, method='POST', data=data, headers=headers))\n\n\n# google\n\n@asyncio.coroutine\ndef google(arg, lines, send):\n print('google')\n\n #type = arg.get('type') or 'web'\n #url = 'https://www.googleapis.com/customsearch/v1?key={0}&cx={1}&searchType={2}&q={3}'.format(quote_plus(key), quote_plus(cx), quote_plus(type), quote_plus(arg['query']))\n arg.update({\n 'n': arg['n'] or '1',\n 'url': 'https://www.googleapis.com/customsearch/v1',\n 'xpath': '//items/item',\n })\n params = {\n 'key': arg['meta']['bot'].key['google'],\n 'cx': arg['meta']['bot'].key['googleseid'],\n 'q': ' '.join(lines) or arg['query'] or '',\n }\n field = [\n ('./title', 'text', '{}'),\n ('./link', 'text', '[\\\\x0302 {} \\\\x0f]'),\n ('./snippet', 'text', '{}'),\n ]\n\n return (yield from jsonxml(arg, [], lambda m, **kw: send(m, newline=' ', **kw), params=params, field=field))\n\n\n@asyncio.coroutine\ndef gtran(arg, lines, send):\n print('google')\n\n arg.update({\n 'n': '1',\n 'url': 'https://translate.google.com/translate_a/single?dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&dt=at',\n 'xpath': '/root/item/item/item',\n })\n params = {\n 'client': 't',\n 'ie': 'UTF-8',\n 'oe': 'UTF-8',\n 'sl': arg['from'] or 'auto',\n 'tl': arg['to'] or 'zh-CN',\n 'hl': 'en',\n 'q': ' '.join(lines) or arg['text'] or '',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',\n }\n field = [('.', 'text', '{}')]\n\n return (yield from jsonxml(arg, [], send, params=params, field=field, headers=headers))\n\n\n@asyncio.coroutine\ndef dictg(arg, send):\n print('dictg')\n\n arg.update({\n 'url': 'https://glosbe.com/gapi/translate',\n 'xpath': '//tuc/item/meanings/item/text',\n })\n params = {\n 'format': 'json',\n 'from': arg['from'],\n 'dest': arg['to'],\n 'phrase': arg['text'],\n }\n\n return (yield from jsonxml(arg, [], send, params=params))\n\n\n@asyncio.coroutine\ndef cdict(arg, send):\n print('cdict')\n\n arg.update({\n 'url': 'https://api.collinsdictionary.com/api/v1/dictionaries/{0}/search/first/'.format(arg['dict'] or 'english'),\n 'xpath': '//entryContent',\n })\n params = {'format': 'html', 'q': arg['text']}\n headers = {'accessKey': arg['meta']['bot'].key['collins']}\n transform = lambda l: htmlparse(l[0].text).xpath('//span[@class = \"pos\"] | //span[@class = \"def\"]')\n\n return (yield from jsonxml(arg, [], send, params=params, transform=transform, headers=headers))\n\n\n@asyncio.coroutine\ndef urban(arg, send):\n print('urban')\n\n # unofficial\n arg.update({\n 'n': arg['n'] or '1',\n 'url': 'https://mashape-community-urban-dictionary.p.mashape.com/define',\n 'xpath': '//list/item',\n })\n params = {'term': arg['text']}\n headers = {'X-Mashape-Key': arg['meta']['bot'].key['mashape']}\n field = [\n ('./definition', 'text', '{}'),\n ('./permalink', 'text', '[\\\\x0302 {} \\\\x0f]'),\n ]\n\n return (yield from jsonxml(arg, [], send, params=params, field=field, headers=headers))\n\n\n@asyncio.coroutine\ndef breezo(arg, send):\n print('breezo')\n\n arg.update({\n 'n': '1',\n 'url': 'http://api-beta.breezometer.com/baqi/',\n 'xpath': '/root',\n })\n params = {'key': arg['meta']['bot'].key['breezo'], 'location': arg['city']}\n field = [('./' + x, 'text', '{}') for x in ['breezometer_description', 'breezometer_aqi', 'dominant_pollutant_text/main', 'random_recommendations/health']]\n\n return (yield from jsonxml(arg, [], send, params=params, field=field))\n\n\n@asyncio.coroutine\ndef speak(arg, send):\n print('speak')\n\n arg.update({\n 'n': '1',\n 'url': 'http://howtospeak.org:443/api/e2c',\n 'xpath': '//chinglish',\n })\n params = {\n 'user_key': arg['meta']['bot'].key['howtospeak'],\n 'notrans': '0',\n 'text': arg['text'],\n }\n\n return (yield from jsonxml(arg, [], send, params=params))\n\n\n@asyncio.coroutine\ndef watson(arg, send):\n pass\n\nhelp = [\n ('ip' , 'ip '),\n #('whois' , 'whois '),\n ('aqi' , 'aqi [all]'),\n ('bip' , 'bip '),\n ('bweather' , 'bweather '),\n ('btran' , 'btran [source lang:target lang] (text)'),\n ('bim' , 'bim (a valid pinyin starts with a lower case letter, followed by lower case letters or \\'; use \\'\\' in pair for comment)'),\n ('gim' , 'gim (a valid pinyin starts with a lower case letter, followed by lower case letters or \\'; use \\'\\' in pair for comment)'),\n #('bing' , 'bing [#max number][+offset]'),\n ('bing' , 'bing (query) [#max number][+offset]'),\n #('bing' , 'bing [#max number][+offset] (query)'),\n ('mtran' , 'mtran [source lang:target lang] (text)'),\n ('couplet' , 'couplet [#max number][+offset] -- 公门桃李争荣日 法国荷兰比利时'),\n #('google' , 'google [#max number][+offset]'),\n ('google' , 'google (query) [#max number][+offset]'),\n #('google' , 'google [#max number][+offset] (query)'),\n ('gtran' , 'gtran [source lang:target lang] (text)'),\n ('urban' , 'urban [#max number][+offset]'),\n ('speak' , 'speak '),\n ('wolfram' , 'wolfram [#max number][+offset]'),\n]\n\nfunc = [\n (ip , r\"ip\\s+(?P.+)\"),\n (whois , r\"whois\\s+(?P.+)\"),\n (aqi , r\"aqi\\s+(?P.+?)(\\s+(?Pall))?\"),\n (bip , r\"bip\\s+(?P.+)\"),\n (bid , r\"bid\\s+(?P.+)\"),\n (bphone , r\"bphone\\s+(?P.+)\"),\n (baqi , r\"baqi\\s+(?P.+)\"),\n (bweather , r\"bweather\\s+(?P.+)\"),\n #(btran , r\"btran(\\s+(?!:\\s)(?P\\S+)?:(?P\\S+)?)?\\s+(?P.+)\"),\n (btran , r\"btran(\\s+(?!:\\s)(?P\\S+)?:(?P\\S+)?)?(\\s+(?P.+))?\"),\n #(bim , r\"bim\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n (bim , r\"bim\\s+(?P.+)\"),\n (gim , r\"gim\\s+(?P.+)\"),\n #(qim , r\"qim\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(bing , r\"bing(\\s+type:(?P\\S+))?\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n (bing , r\"bing(?:\\s+(?![#\\+])(?P.+?))?(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(bing , r\"bing(\\s+type:(?P\\S+))?(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?(\\s+(?P.+))?\"),\n #(mtran , r\"mtran(\\s+(?!:\\s)(?P\\S+)?:(?P\\S+)?)?\\s+(?P.+)\"),\n (mtran , r\"mtran(\\s+(?!:\\s)(?P\\S+)?:(?P\\S+)?)?(\\s+(?P.+))?\"),\n (couplet , r\"couplet\\s+(?P\\S+)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(mice , r\"mice\\s+(?P.+)\"),\n #(google , r\"google(\\s+type:(?P(web|image)))?\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(google , r\"google\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n (google , r\"google(?:\\s+(?![#\\+])(?P.+?))?(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(google , r\"google(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?(\\s+(?P.+))?\"),\n (gtran , r\"gtran(\\s+(?!:\\s)(?P\\S+)?:(?P\\S+)?)?(\\s+(?P.+))?\"),\n (dictg , r\"dict\\s+(?P\\S+):(?P\\S+)\\s+(?P.+?)(\\s+#(?P\\d+))?\"),\n (cdict , r\"collins(\\s+d:(?P\\S+))?\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n (breezo , r\"breezo\\s+(?P.+)\"),\n (speak , r\"speak\\s+(?P.+)\"),\n (urban , r\"urban\\s+(?P.+?)(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n #(arxiv , r\"arxiv\\s+(?P.+?)(\\s+xpath:(?P.+?))?(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n (wolfram , r\"wolfram\\s+(?P.+?)(\\s+xpath:(?P.+?))?(\\s+(#(?P\\d+))?(\\+(?P\\d+))?)?\"),\n]\n","sub_path":"modules/commands/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":24731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"23582970","text":"t=int(input())\r\nfor j in range(t):\r\n\ts=input()\r\n\tlns=len(s)\r\n\tc=0\r\n\tfor i in range(0,lns-1):\r\n\t\tif (s[i]!=s[i+1]):\r\n\t\t\tc+=1\r\n\tif s[-1]=='-':\r\n\t\tc=c+1\r\n\tprint(\"Case #\" + str(j+1) +\": \"+str(c))\r\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_bhanodaig_revenge of pancakes.py","file_name":"16_0_2_bhanodaig_revenge of pancakes.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229702814","text":"import sys\nimport logging\nimport argparse\nfrom Queue import Queue, Empty\nfrom bokeh.plotting import figure, output_server, cursession, show, VBox\nimport seaborn as sns\n\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"rtfmri\")\n\n\nfrom rtfmri import ScannerInterface, MotionAnalyzer, setup_exit_handler\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-hostname\", default=\"cnimr\")\n parser.add_argument(\"-port\", default=21, type=int)\n parser.add_argument(\"-username\", default=\"\")\n parser.add_argument(\"-password\", default=\"\")\n parser.add_argument(\"-base_dir\", default=\"/export/home1/sdc_image_pool/images\")\n parser.add_argument(\"-debug\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n output_server(\"rtfmri_prototype\")\n\n rot_p = figure(plot_height=250, plot_width=700,\n tools=\"\", title=\"Rotation\")\n\n rot_colors = map(sns.mpl.colors.rgb2hex, sns.color_palette(\"Reds_d\", 3))\n for ax, color in zip(\"xyz\", rot_colors):\n rot_p.line([], [], name=\"rot_\" + ax,\n color=color, line_width=2, legend=ax)\n\n trans_p = figure(plot_height=250, plot_width=700,\n tools=\"\", title=\"Translation\")\n\n trans_colors = map(sns.mpl.colors.rgb2hex, sns.color_palette(\"Blues_d\", 3))\n for ax, color in zip(\"xyz\", trans_colors):\n trans_p.line([], [], name=\"trans_\" + ax,\n color=color, line_width=2, legend=ax)\n\n rms_p = figure(plot_height=250, plot_width=700,\n tools=\"\", title=\"Displacement\")\n\n rms_colors = map(sns.mpl.colors.rgb2hex, sns.color_palette(\"Greens_d\", 2))\n for kind, color in zip([\"ref\", \"pre\"], rms_colors):\n rms_p.line([], [], name=\"rms_\" + kind,\n color=color, line_width=2, legend=kind)\n\n scanner = ScannerInterface(hostname=args.hostname, port=args.port,\n username=args.username, password=args.password,\n base_dir=args.base_dir)\n result_q = Queue()\n rtmotion = MotionAnalyzer(scanner, result_q)\n\n setup_exit_handler(scanner, rtmotion)\n\n scanner.start()\n rtmotion.start()\n\n show(VBox(rot_p, trans_p, rms_p))\n\n while True:\n try:\n result = result_q.get(timeout=1)\n next_x = result[\"vol_number\"]\n\n for fig, kind in zip([rot_p, trans_p], [\"rot\", \"trans\"]):\n for ax in \"xyz\":\n\n ds = fig.select({\"name\": kind + \"_\" + ax})[0].data_source\n\n if result[\"new_acquisition\"]:\n x = [next_x]\n else:\n x = ds.data[\"x\"]\n x.append(next_x)\n ds.data[\"x\"] = x\n\n name = kind + \"_\" + ax\n next_y = result[name]\n if result[\"new_acquisition\"]:\n y = [result[name]]\n else:\n y = ds.data[\"y\"]\n y.append(result[name])\n ds.data[\"y\"] = y\n\n cursession().store_objects(ds)\n\n fig.x_range.end = result[\"ntp\"]\n cursession().store_objects(fig)\n\n for kind in [\"ref\", \"pre\"]:\n\n ds = rms_p.select({\"name\": \"rms_\" + kind})[0].data_source\n\n if result[\"new_acquisition\"]:\n x = [next_x]\n else:\n x = ds.data[\"x\"]\n x.append(next_x)\n ds.data[\"x\"] = x\n\n name = \"rms_\" + kind\n next_y = result[name]\n if result[\"new_acquisition\"]:\n y = [result[name]]\n else:\n y = ds.data[\"y\"]\n y.append(result[name])\n ds.data[\"y\"] = y\n\n cursession().store_objects(ds)\n\n rms_p.x_range.end = result[\"ntp\"]\n cursession().store_objects(rms_p)\n\n except Empty:\n pass\n #except:\n # scanner.shutdown()\n # raise\n","sub_path":"interface_prototype.py","file_name":"interface_prototype.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"531380278","text":"\"\"\"\r\nMediasite client class for channel-sepcific actions\r\n\r\nLast modified: May 2018\r\nBy: Dave Bunten\r\n\r\nLicense: MIT - see license.txt\r\n\"\"\"\r\n\r\nimport logging\r\n\r\n\r\nclass channel():\r\n def __init__(self, mediasite, *args, **kwargs):\r\n self.mediasite = mediasite\r\n self.channels = list()\r\n\r\n def get_all_channels(self):\r\n \"\"\"\r\n Gathers all mediasite channels\r\n\r\n returns:\r\n list of mediasite channels\r\n \"\"\"\r\n\r\n logging.info(\"Gathering all channels.\")\r\n\r\n if not self.channels:\r\n channels = list()\r\n current = 0\r\n increment = 100\r\n\r\n next_page = f'?$select=full&$skip={current}&$top={increment}'\r\n while next_page:\r\n result = self.mediasite.api_client.request(\"get\", \"MediasiteChannels\", next_page)\r\n if not self.mediasite.experienced_request_errors(result):\r\n result = result.json()\r\n if 'odata.error' in result:\r\n logging.error(result[\"odata.error\"][\"code\"] + \": \" + result[\"odata.error\"][\"message\"][\"value\"])\r\n else:\r\n data = result.get('value')\r\n channels.extend(data)\r\n next_link = result.get('odata.nextLink')\r\n next_page = next_link.split('?')[-1] if next_link else None\r\n\r\n self.mediasite.model.set_channels(channels)\r\n self.channels = channels\r\n\r\n return self.channels\r\n\r\n def get_channels_presentations(self, channel_id):\r\n route = f'MediasiteChannels/(\\'{channel_id}\\')/Presentations'\r\n result = self.mediasite.api_client.request('get', route)\r\n\r\n if not self.mediasite.experienced_request_errors(result):\r\n result = result.json()\r\n if \"odata.error\" in result:\r\n logging.error(result[\"odata.error\"][\"code\"] + \": \" + result[\"odata.error\"][\"message\"][\"value\"] + ' Channel ID : ' + channel_id)\r\n return result\r\n\r\n def enable_channel_downloads(self, channel_id):\r\n \"\"\"\r\n Enables mediasite channel downloads using provided channel ID\r\n\r\n Note: only returns a 204 http code on success\r\n\r\n params:\r\n channel_id: mediasite channel ID to enable downloads on\r\n\r\n returns:\r\n resulting response from the mediasite web api request to enable downloads on the folder\r\n \"\"\"\r\n\r\n logging.info(\"Enabling channel downloads for channel: '\"+channel_id)\r\n\r\n #prepare patch data to be sent to mediasite\r\n patch_data = {\"AllowPresentationDownload\":\"True\"}\r\n\r\n #make the mediasite request using the channel id and the patch data found above to enable downloads\r\n result = self.mediasite.api_client.request(\"patch\", \"MediasiteChannels('\"+channel_id+\"')/Settings\", \"\", patch_data)\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n return result\r\n\r\n def disable_channel_allow_links(self, channel_id):\r\n \"\"\"\r\n Disables mediasite channel links using provided channel ID\r\n\r\n Note: only returns a 204 http code on success\r\n\r\n params:\r\n channel_id: mediasite channel ID to disable links on\r\n\r\n returns:\r\n resulting response from the mediasite web api request\r\n \"\"\"\r\n\r\n logging.info(\"Disabling channel links for channel: '\"+channel_id)\r\n\r\n #prepare patch data to be sent to mediasite\r\n patch_data = {\"AllowChannelLinks\":\"False\"}\r\n\r\n #make the mediasite request using the channel id and the patch data found above to enable downloads\r\n result = self.mediasite.api_client.request(\"patch\", \"MediasiteChannels('\"+channel_id+\"')/Settings\", \"\", patch_data)\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n return result\r\n\r\n def add_module_to_channel(self, channel_id, module_guid):\r\n \"\"\"\r\n Add mediasite module to channel by channel id and module guid\r\n\r\n params:\r\n channel_id: mediasite channel id which will have the module added\r\n module_guid: mediasite module GUID (not to be confused with a module ID)\r\n\r\n returns:\r\n resulting response from the mediasite web api request\r\n \"\"\"\r\n\r\n logging.info(\"Associating channel: \"+channel_id+\" to module: \"+module_guid)\r\n\r\n #prepare patch data to be sent to mediasite\r\n post_data = {\"MediasiteId\":channel_id}\r\n\r\n #make the mediasite request using the channel id and the patch data found above to enable downloads\r\n result = self.mediasite.api_client.request(\"post\", \"Modules('\"+module_guid+\"')/AddAssociation\", \"\", post_data)\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n return result\r\n\r\n def create_channel(self, channel_name, description=\"\", parent_id=None):\r\n \"\"\"\r\n Creates mediasite channel using provided channel name, description, and parent folder id\r\n\r\n params:\r\n channel_name: name which will appear for the channel\r\n description: description which will appear for the channel (beneath name)\r\n folder_id: mediasite folder ID associated with the channel\r\n\r\n returns:\r\n resulting response from the mediasite web api request\r\n \"\"\"\r\n\r\n logging.info(\"Creating channel '\"+channel_name+\"' under parent folder \"+str(parent_id))\r\n\r\n post_data = {\"Name\":channel_name,\r\n \"Description\":description,\r\n \"LimitSearchToChannel\":True\r\n }\r\n\r\n if parent_id:\r\n post_data[\"LinkedFolderId\"] = parent_id\r\n\r\n result = self.mediasite.api_client.request(\"post\", \"MediasiteChannels\", \"\", post_data).json()\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n if \"odata.error\" in result:\r\n logging.error(result[\"odata.error\"][\"code\"]+\": \"+result[\"odata.error\"][\"message\"][\"value\"])\r\n\r\n return result\r\n\r\n def delete_channel(self, channel_id):\r\n \"\"\"\r\n Deletes mediasite schedule given schedule guid\r\n\r\n params:\r\n presentation_id: guid of a mediasite schedule\r\n\r\n returns:\r\n resulting response from the mediasite web api request\r\n \"\"\"\r\n\r\n logging.info(\"Deleting Mediasite channel: \"+channel_id)\r\n\r\n #request mediasite folder information on the \"Mediasite Users\" folder\r\n result = self.mediasite.api_client.request(\"delete\", \"MediasiteChannels('\"+channel_id+\"')\", \"\",\"\")\r\n\r\n if self.mediasite.experienced_request_errors(result):\r\n return result\r\n else:\r\n #if there is an error, log it\r\n if \"odata.error\" in result:\r\n logging.error(result[\"odata.error\"][\"code\"]+\": \"+result[\"odata.error\"][\"message\"][\"value\"])\r\n\r\n return result\r\n","sub_path":"assets/mediasite/modules/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"437101661","text":"\nimport uuid\nimport logging\nfrom datetime import datetime as dt\n\nfrom ruqs.config import AppConfig\nfrom ruqs.resources.aws import QueueClient\nfrom ruqs.manifests.writer import ManifestWriter\n\n\napp_config = AppConfig()\n\n\ndef build_staged_upload_job_message(job_id, table_name, manifest_s3_uri):\n return dict(\n stage_upload_job_id=job_id,\n table_name=table_name,\n manifest_s3_uri=manifest_s3_uri\n )\n\n\ndef stage_upload(\n source_data_queue_name=app_config.raw_data_queue_configs[0]['queue_name'],\n upload_job_queue_name=app_config.upload_job_queue_name,\n s3_object_router=None,\n archive_bucket_name=app_config.s3_archive_bucket,\n archive_manifest_key_prefix=app_config.s3_archive_manifest_key_prefix,\n get_manifest_writer=ManifestWriter,\n logger=logging.getLogger(__name__)):\n\n job_id = str(uuid.uuid4())\n logger.info('START JOB_ID {} JOB_NAME stage_upload '.format(job_id))\n\n try:\n next_messages = [dict(Id=str(uuid.uuid4()), MessageBody='i am a message at ' + str(dt.utcnow()))]\n if dt.utcnow().second % 2:\n next_messages.append(dict(next_messages[0], Id=str(uuid.uuid4())))\n\n source_queue = QueueClient(source_data_queue_name)\n\n if source_queue.load_queue().receive_messages().have_messages():\n next_messages = source_queue.get_latest_s3_message_records()\n\n manifest_writer = get_manifest_writer(s3_object_router=s3_object_router)\n manifest_archive_resp_bool = (\n manifest_writer\n .stage_s3_event_records(next_messages)\n .persist_all_manifests(archive_bucket_name, prefix=archive_manifest_key_prefix)\n )\n\n logger.info('All archives were successful: ' + str(manifest_archive_resp_bool))\n logger.info('Created and archived manifests: ' + str(manifest_writer.written_manifest_log))\n logging.info('Failed to archive: ' + str({\n m: lg for m, lg in manifest_writer.written_manifest_log.items() if lg['is_error']\n }))\n\n if manifest_writer.written_manifest_log:\n QueueClient(upload_job_queue_name).load_queue().send_messages([\n build_staged_upload_job_message(job_id, table_name, manifest_resp['manifest_s3_uri'])\n for table_name, manifest_resp in manifest_writer.written_manifest_log.items()\n if not manifest_resp['is_error']\n ])\n else:\n logger.info('No messages written to manifests.')\n\n source_queue.delete_all_retrieved_messages()\n else:\n logger.info('NO NEW MESSAGES IN QUEUE ' + source_data_queue_name)\n except Exception as e:\n logger.error('WTF: ' + str(e))\n","sub_path":"web/ruqs/tasks/stageupload.py","file_name":"stageupload.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"539675519","text":"# -*- coding:utf-8 -*-\n# !/usr/bin/Python3\nimport cv2\nimport json\nimport time\nimport requests\nimport os\n\npic_name = 'logo.png'\n\noasis_type_submit_url = 'http://222.185.251.62:22027/api/UpdateLvZhouPicCheck'\nxhs_submit_url = 'http://222.185.251.62:22027/api/UpdateRedBookPicCheck'\n\n# 测试url\nrb_hot_submit_url = 'http://222.185.251.62:22027/api/redbook/submithotcomment'\n\n# 微博下拉账号提交\nweibo_drop_submit = 'http://222.185.251.62:22027/api/PostWeiBoDdaData'\n\n\nclass PicHandle(object):\n def __init__(self):\n self.get_task_url = 'http://222.185.251.62:22027/api/phone/getpichandle'\n # self.get_task_url = 'http://localhost:22027/api/phone/getpichandle'\n self.headers = {'Content-Type': 'application/json'}\n\n # 获取任务\n def get_task(self):\n while True:\n try:\n # 获取图片处理\n req = requests.post(url=self.get_task_url, headers=self.headers)\n if req.status_code == 200:\n res = json.loads(req.text)\n if '任务为空' in res:\n time.sleep(60 * 1)\n continue\n res = json.loads(res)\n print(res)\n req = requests.get(res['PicUrl'], stream=True)\n if req.status_code == 200:\n with open(pic_name, 'wb') as f:\n for chunk in req:\n f.write(chunk)\n img = cv2.imread(pic_name)\n # 坐标(左上,右下)\n cv2.rectangle(img, (res['Left'], res['LeftTop']), (res['Right'], res['RightBottom']), (0, 0, 255),\n 5)\n cv2.imwrite('001_new3.png', img)\n b = PicHandle.pic_to_byte(self, '001_new3.png')\n body = {\n 'Id': res['LogId'],\n 'TaskId': res['TaskId'],\n 'TypeCode': res['TypeCode'],\n 'PicCheck': json.dumps(b),\n 'Message': '成功',\n }\n if res['TypeCode'] == 'JQHOT_0300': # 绿洲SEO\n re = requests.post(oasis_type_submit_url, headers=self.headers, data=json.dumps(body))\n print(re.text)\n if res['TypeCode'] == 'JQHOT_0200': # 小红书SEO\n requests.post(xhs_submit_url, headers=self.headers, data=json.dumps(body))\n if res['TypeCode'] == 'JQHOT_0202': # 小红书热评\n body = {\n 'LogId': res['LogId'],\n 'Pic': json.dumps(b)\n }\n requests.post(rb_hot_submit_url, headers=self.headers, data=json.dumps(body))\n if res['TypeCode'] == 'JQHOT_0106':\n body = {\n 'LogId': res['LogId'],\n 'PicStr': json.dumps(b)\n }\n requests.post(weibo_drop_submit, headers=self.headers, data=json.dumps(body))\n os.remove(pic_name)\n os.remove('001_new3.png')\n print('成功提交一条任务')\n else:\n print('状态码不为200:%s' % req.status_code)\n except Exception as e:\n print(e)\n\n def pic_to_byte(self, pic_name):\n b = []\n with open(pic_name, 'rb') as f:\n for i in f.read():\n b.append(i)\n return b\n\n\nif __name__ == '__main__':\n pic = PicHandle()\n pic.get_task()\n","sub_path":"业务截图/图片处理/pichandle.py","file_name":"pichandle.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"409789813","text":"#sum of Squ of Digits\ndef sumofdig(n):\n sum=0\n while n>0:\n digit=n%10\n square=digit*digit\n sum+=square\n n=int(n/10)\n print(sum)\ntry:\n n=int(input())\n sumofdig(n)\nexcept:\n print(\"invalid\")\n","sub_path":"Square_of_digit.py","file_name":"Square_of_digit.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"395139218","text":"#SELECT v.name, v.favorite_song, m.source, m.buy \r\n# FROM villagers AS v, music AS m \r\n# WHERE v.favorite_song=m.name \r\n# ORDER BY (m.name, v.name);\r\n\r\nimport redis\r\nimport timeit\r\n\r\nr = redis.Redis()\r\n\r\n#Keys corresponding to tables\r\n\r\nvillagers = range(15049,15440)\r\nmusic = range(8055,8153)\r\n\r\n#Query 35\r\n\r\nResults35 = []\r\n\r\nfor key_v in villagers:\r\n elem_decoded1 = r.hmget(key_v,'Favorite_Song')[0].decode()\r\n for key_m in music:\r\n elem_decoded2 = r.hmget(key_m,'Name')[0].decode()\r\n if (elem_decoded1 == elem_decoded2):\r\n Result35 = []\r\n Result35.append(r.hmget(key_v,'Name')[0].decode())\r\n Result35.append(r.hmget(key_v,'Favorite_Song')[0].decode())\r\n Result35.append(r.hmget(key_m,'Source')[0].decode())\r\n Result35.append(r.hmget(key_m,'Buy')[0].decode())\r\n Result35.append(r.hmget(key_m,'Name')[0].decode())\r\n Results35.append(Result35)\r\n\r\nL= sorted(Results35, key = lambda x: x[4])\r\n\r\nfor x in L:\r\n del x[-1]\r\n \r\n#print(\"Query 35 :\", L)\r\n\r\ntimeit.timeit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n ","sub_path":"SDD/Specialite_SDD/Big_Data/animal-crossing/query_35/query_35.py","file_name":"query_35.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"80998689","text":"from datetime import datetime\r\nfrom functools import reduce\r\nfrom operator import and_\r\nfrom urllib import request\r\n\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.db.models import Q\r\nfrom django.http import Http404\r\nfrom django.shortcuts import redirect, get_object_or_404, render\r\nfrom django.urls import reverse_lazy\r\nfrom django.views import generic\r\n\r\nfrom .forms import InquiryCreateForm\r\n\"\"\"from .forms import CommentCreateForm, ReplyCreateForm\"\"\"\r\nfrom .models import Article\r\n\"\"\"from .models import Post, Comment, Reply\"\"\"\r\n\r\nUser = get_user_model()\r\n\r\n\r\nclass Index(generic.ListView):\r\n \"\"\" TOPページ \"\"\"\r\n template_name = 'article/index.html'\r\n queryset = Article.objects.order_by('-created_at').filter(is_published=True)\r\n context_object_name = 'object_list'\r\n\r\n def get_context_data(self, **kwargs):\r\n ctx = super().get_context_data(**kwargs)\r\n # topRecommendedArticlesで最新の記事を1つ渡す\r\n ctx['first'] = Article.objects.filter(is_published=True).order_by('-created_at').first()\r\n # 2つ目以降の記事をリストで渡す\r\n ctx['list'] = Article.objects.filter(is_published=True).order_by('-created_at')[1:5]\r\n \"\"\"\r\n # Postモデルから値を貰う\r\n post = Post.objects.order_by('-created_at')\r\n ctx['post'] = post\r\n \"\"\"\r\n # 検索されたクエリを取り出す\r\n ctx['query'] = self.request.GET.get('q', '')\r\n # 検索されたクエリを集計する\r\n query_list = {}\r\n with open('./article/log/query.csv', encoding='UTF-8')as f:\r\n for item in f:\r\n columns = item.rstrip().split(',')\r\n query = columns[0]\r\n if query in query_list:\r\n query_list[query] += 1\r\n else:\r\n query_list[query] = 1\r\n # 検索されたクエリでトレンドワード作る\r\n trend_words = []\r\n for k, v in sorted(query_list.items(), key=lambda x: x[1], reverse=True):\r\n trend_words.append(str(k))\r\n ctx['trend_word1'] = trend_words[0]\r\n ctx['trend_word2'] = trend_words[1]\r\n ctx['trend_word3'] = trend_words[2]\r\n ctx['trend_word4'] = trend_words[3]\r\n ctx['trend_word5'] = trend_words[4]\r\n # 記事ランキングを作る\r\n ranking = Article.objects.filter(is_published=True).order_by('-views')[:5]\r\n ctx['ranking'] = ranking\r\n return ctx\r\n\r\n\r\nclass SearchResult(generic.ListView):\r\n \"\"\" 検索結果の表示 \"\"\"\r\n template_name = 'article/search_result.html'\r\n queryset = Article.objects.order_by('-created_at').filter(is_published=True)\r\n context_object_name = 'object_list'\r\n paginate_by = 8\r\n\r\n def get_queryset(self):\r\n queryset = Article.objects.order_by('-created_at').filter(is_published=True)\r\n keyword = self.request.GET.get('q')\r\n if keyword:\r\n exclusion = set([' ', ' '])\r\n q_list = ''\r\n for i in keyword:\r\n if i in exclusion:\r\n pass\r\n else:\r\n q_list += i\r\n query = reduce(\r\n and_, [Q(title__icontains=q) |\r\n Q(content__icontains=q) |\r\n Q(themes__theme__icontains=q)\r\n for q in q_list]\r\n )\r\n queryset = queryset.filter(query)\r\n # 検索されたクエリを書き込む\r\n with open('./article/log/query.csv', 'a', encoding='UTF-8')as f:\r\n today = datetime.today()\r\n f.write(keyword)\r\n f.write(',')\r\n f.write(str(today) + '\\n')\r\n return queryset\r\n\r\n def get_context_data(self, **kwargs):\r\n ctx = super().get_context_data(**kwargs)\r\n \"\"\"\r\n # Postモデルから値を貰う\r\n post = Post.objects.order_by('-created_at')\r\n ctx['post'] = post\r\n \"\"\"\r\n # 検索されたクエリを取り出す\r\n ctx['query'] = self.request.GET.get('q', '')\r\n # 検索結果後、記事数をカウントする\r\n keyword = self.request.GET.get('q', '')\r\n count = Article.objects.filter(\r\n Q(title__icontains=keyword) |\r\n Q(content__icontains=keyword) |\r\n Q(themes__theme__icontains=keyword)).filter(is_published=True).count()\r\n ctx['count'] = count\r\n # 検索されたクエリを集計する\r\n query_list = {}\r\n with open('./article/log/query.csv', encoding='UTF-8')as f:\r\n for item in f:\r\n columns = item.rstrip().split(',')\r\n query = columns[0]\r\n if query in query_list:\r\n query_list[query] += 1\r\n else:\r\n query_list[query] = 1\r\n # 検索されたクエリでトレンドワード作る\r\n trend_words = []\r\n for k, v in sorted(query_list.items(), key=lambda x: x[1], reverse=True):\r\n trend_words.append(str(k))\r\n ctx['trend_word1'] = trend_words[0]\r\n ctx['trend_word2'] = trend_words[1]\r\n ctx['trend_word3'] = trend_words[2]\r\n ctx['trend_word4'] = trend_words[3]\r\n ctx['trend_word5'] = trend_words[4]\r\n # ページネーション「●件ー●件の表示」\r\n page = self.request.GET.get('page')\r\n ctx['page'] = page\r\n if page is None or int(page) == 1:\r\n if count < 8:\r\n ctx['pagecountstart'] = 1\r\n ctx['pagecountend'] = count\r\n else:\r\n ctx['pagecountstart'] = 1\r\n ctx['pagecountend'] = 8\r\n else:\r\n ctx['pagecountstart'] = int(page) * 8 - 8\r\n ctx['pagecountend'] = int(page) * 8\r\n # 記事ランキングを作る\r\n ranking = Article.objects.filter(is_published=True).order_by('-views')[:5]\r\n ctx['ranking'] = ranking\r\n return ctx\r\n\r\n\r\nclass Detail(generic.DetailView):\r\n \"\"\" 詳細ページ \"\"\"\r\n template_name = 'article/detail.html'\r\n model = Article\r\n\r\n def get_context_data(self, **kwargs):\r\n # オーバーライド\r\n ctx = super().get_context_data(**kwargs)\r\n # 検索されたクエリを取り出す\r\n ctx['query'] = self.request.GET.get('q', '')\r\n # 検索されたクエリを集計する\r\n query_list = {}\r\n with open('./article/log/query.csv', encoding='UTF-8')as f:\r\n for item in f:\r\n columns = item.rstrip().split(',')\r\n query = columns[0]\r\n if query in query_list:\r\n query_list[query] += 1\r\n else:\r\n query_list[query] = 1\r\n # 検索されたクエリでトレンドワード作る\r\n trend_words = []\r\n for k, v in sorted(query_list.items(), key=lambda x: x[1], reverse=True):\r\n trend_words.append(str(k))\r\n ctx['trend_word1'] = trend_words[0]\r\n ctx['trend_word2'] = trend_words[1]\r\n ctx['trend_word3'] = trend_words[2]\r\n ctx['trend_word4'] = trend_words[3]\r\n ctx['trend_word5'] = trend_words[4]\r\n # カウンターをつける\r\n pk = self.kwargs['pk'] # PKを取得する\r\n count = Article.objects.get(pk=pk)\r\n count.views += 1\r\n count.save()\r\n # 記事ランキングを作る\r\n ranking = Article.objects.filter(is_published=True).order_by('-views')[:5]\r\n ctx['ranking'] = ranking\r\n return ctx\r\n\r\n\r\nclass Inquiry(generic.CreateView):\r\n \"\"\"問い合わせフォーム\"\"\"\r\n template_name = 'article/inquiry.html'\r\n form_class = InquiryCreateForm\r\n success_url = reverse_lazy('article:inquiry_done')\r\n\r\n\r\nclass InquiryDone(generic.TemplateView):\r\n \"\"\"問い合わせ完了\"\"\"\r\n template_name = 'article/inquiry_done.html'\r\n\r\n\r\nclass PrivacyPolicy(generic.TemplateView):\r\n \"\"\" プライバシーポリシー \"\"\"\r\n template_name = 'article/privacy_policy.html'\r\n","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"642697084","text":"import os\nimport csv\nimport sys\nimport string\n\n\ndef get_path(filename):\n curr_dir = os.getcwd()\n file_path = os.path.join(curr_dir, filename)\n return file_path\n\n\ndef read_csv(file_path):\n with open(file_path, 'r') as f:\n reader = csv.reader(f, delimiter=' ')\n for row in reader:\n print(row)\n\n\ndef read_write_csv(file_path):\n reading_file = open(file_path, 'r')\n reader = csv.reader(reading_file, delimiter=' ')\n writing_file = open(sys.argv[2], 'wt')\n writer = csv.writer(writing_file)\n for row in reader:\n writer.writerow(row)\n writing_file.close()\n reading_file.close()\n print(open(sys.argv[2], 'rt').read())\n\n\nread_write_csv(get_path(sys.argv[1]))\n","sub_path":"SamplePackage.py","file_name":"SamplePackage.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"80963669","text":"from django import forms\nfrom django.forms import ModelForm, Textarea\nfrom wikiblog.models import Page, Comment\n\n\nclass page_form(ModelForm):\n class Meta:\n model = Page\n fields = ('title', 'bodytext',)\n #fields = '__all__'\n \n labels = {\n 'title': 'Title',\n 'bodytext': 'Content',\n }\n \n widgets = {\n 'title': Textarea(attrs={'cols': 60, 'rows': 2, 'value': 'name'},),\n 'bodytext': Textarea(attrs={'cols': 80, 'rows': 20}),\n }\n\n\n \nclass upload_file_form(forms.Form):\n data = forms.FileField(label=\"\")\n\nclass choices_form(forms.Form):\n choices = forms.MultipleChoiceField(required=False,\n widget=forms.CheckboxSelectMultiple,)\n\n\nclass comment_form(ModelForm):\n class Meta:\n model = Comment\n fields = ('user_name', 'content',)\n #fields = '__all__'\n \n labels = {\n 'user_name': 'Name',\n 'content': 'Content',\n }\n \n widgets = {\n 'user_name': Textarea(attrs={'cols': 20, 'rows': 1,}),\n 'content': Textarea(attrs={'cols': 20, 'rows': 3}),\n }\n","sub_path":"LibraryProjectCode/littlelibrary/wikiblog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"568600355","text":"from lib.hand_eval import convert_string_to_int, score_best_five, eval_hand\nfrom Global import State\nfrom random import random\n\n\ndef quick_check_if_hole_helps(score, board):\n if len(board) == 3:\n board = board + [-1, -6]\n elif len(board) == 4:\n board = board + [-1]\n that_score = eval_hand(board)\n if that_score[0] == score[0]:\n return False\n return True\n\ndef split_raise(legal_actions):\n raising_action = [x for x in legal_actions if 'RAISE' in x or 'BET' in x]\n if not raising_action:\n return False, False\n r, lo, hi = raising_action[0].split(':')\n lo = int(lo)\n hi = int(hi)\n return lo, hi\n\n\nclass Flop(object):\n\n @classmethod\n def get_action(cls, data):\n # GETACTION potSize numBoardCards [boardCards] [stackSizes] numActivePlayers [activePlayers] numLastActions [lastActions] numLegalActions [legalActions] timebank\n data = data.split()\n getaction = data.pop(0)\n potSize = int(data.pop(0))\n numBoardCards = int(data.pop(0))\n\n board_cards = []\n for _ in range(numBoardCards):\n board_cards.append(convert_string_to_int(data.pop(0)))\n\n stack1 = int(data.pop(0))\n stack2 = int(data.pop(0))\n stack3 = int(data.pop(0))\n\n numActivePlayers = int(data.pop(0))\n active1 = data.pop(0)\n active2 = data.pop(0)\n active3 = data.pop(0)\n\n numLastActions = int(data.pop(0))\n\n prev_actions = []\n for _ in range(numLastActions):\n prev_actions.append(data.pop(0))\n\n numLegalActions = int(data.pop(0))\n\n legal_actions = []\n for _ in range(numLegalActions):\n legal_actions.append(data.pop(0))\n\n if numLegalActions == 1:\n return legal_actions[0]\n\n State.timebank = float(data.pop(0))\n\n\n # These are the variables based on position\n seat = State.seat\n numActivePlayers = numActivePlayers\n score = score_best_five(board_cards + State.hole_cards)\n\n\n # CHECK / BET 1\n # CALL / FOLD / RAISE 2\n\n\n # Case 1\n #######################################################################\n # Nobody else has acted\n # TODO: consider fold equity for betting and reverse pot odds\n if any([x for x in legal_actions if 'CHECK' in x]):\n # If we have a hand, then bet, if we don't then do not\n bet_prob = 0\n\n # We bet if we have more than a pair\n if score[0] > 1 and quick_check_if_hole_helps(score, board_cards):\n bet_prob = 1\n elif score[0] == 1 and quick_check_if_hole_helps(score, board_cards):\n val_of_pair = score[1]\n # val_of_pair goes from 0 - 12\n bet_prob += .28\n bet_prob += val_of_pair * .03\n elif score[0] == 0:\n bet_prob += score[1][0] * .01\n else:\n # This is our kicker to a pair on the board\n bet_prob = State.hole_cards[0] / 4 * .01\n\n # Do not bet if we do not beat the board\n if not quick_check_if_hole_helps(score, board_cards):\n guessed_win_prob = .1\n\n if random() < bet_prob:\n lo, hi = split_raise(legal_actions)\n\n # BET\n if score[0] >= 4:\n # Max bet with a straight or better\n bet_amt = hi\n return 'BET:%d' % bet_amt\n\n if score[0] >= 2:\n bet_amt = max(min(int((.25 + random()) * hi * State.aggressiveness), hi), lo)\n return 'BET:%d' % bet_amt\n\n if score[0] >= 1:\n bet_amt = max(min(int((.05 * score[1]) * hi * State.aggressiveness), hi), lo)\n return 'BET:%d' % bet_amt\n\n bet_amt = lo\n return 'BET:%d' % bet_amt\n else:\n return 'CHECK'\n\n\n # Case 2\n #######################################################################\n # Need to decide if we should FOLD / CALL / RAISE\n # TODO: Consider if we are facing multiple bets. Tune this\n\n if any([x for x in legal_actions if 'CALL' in x]):\n # Compute pot odds\n call_action = [x for x in legal_actions if 'CALL' in x][0]\n call_amt = int(call_action.split(':')[-1])\n pot_size = potSize\n\n pot_odds = float(call_amt) / (2 * call_amt + potSize)\n\n # Determine what the odds of winning are by guessing\n guessed_win_prob = 0\n if score[0] == 0:\n guessed_win_prob = float(score[1][0] / 13) / 40\n\n if score[0] <= 2:\n # PAIR\n if score[0] == 1:\n guessed_win_prob += .05 * score[1]\n\n # TWO PAIR\n if score[0] == 2:\n guessed_win_prob += .7\n guessed_win_prob += .05 * score[1]\n\n # If we are playing the board, we are not good\n if not quick_check_if_hole_helps(score, board_cards):\n guessed_win_prob = .1\n\n if pot_odds < guessed_win_prob:\n prev_bets = [x for x in prev_actions if 'RAISE' in x or 'BET' in x]\n multibet = len(prev_bets) >= 2\n if pot_odds < 2 * guessed_win_prob and not multibet:\n lo, hi = split_raise(legal_actions)\n if not lo:\n return call_action\n\n if pot_odds > 4 * guessed_win_prob:\n bet_amt = max(min(int(random() * 2 * lo * State.aggressiveness), hi), lo)\n else:\n bet_amt = max(min(int(random() * hi * State.aggressiveness), hi), lo)\n return 'RAISE:%d' % bet_amt\n\n return call_action\n\n return 'FOLD'\n\n lo, hi = split_raise(legal_actions)\n if not lo:\n return call_action\n\n # FULL HOUSE or better is always max raise\n if score[0] >= 6:\n return 'RAISE:%d' % hi\n\n # FLUSH\n if score[0] == 5:\n # If the kicker is high enough\n if score[1] >= 10:\n return 'RAISE:%d' % hi\n else:\n if random() < score[1] * .1:\n bet_amt = max(min(int(random() * hi * State.aggressiveness), hi), lo)\n return 'RAISE:%d' % bet_amt\n else:\n return call_action\n\n # STRAIGHT\n if score[0] == 4:\n # If the kicker is high enough\n if score[1] >= 10:\n return 'RAISE:%d' % hi\n return call_action\n\n # Otherwise we want to get the pot bigger\n if pot_odds < 2 * guessed_win_prob:\n bet_amt = max(min(int(random() * hi * State.aggressiveness), hi), lo)\n return 'RAISE:%d' % bet_amt\n\n return call_action\n\n return 'CHECK'\n","sub_path":"players/v0/Flop.py","file_name":"Flop.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"248683796","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Implement a Depth-First Search explorer.\n# Works in combination with lj_laser_heading, nj_laser, and nj_escape_crossing\n# jockeys.\n\nfrom __future__ import print_function, division\n\nfrom math import pi\n\nimport rospy\nimport actionlib\nfrom std_msgs.msg import Float32\n\nfrom lama_jockeys.msg import NavigateAction\nfrom lama_jockeys.msg import NavigateGoal\nfrom lama_jockeys.msg import LocalizeAction\nfrom lama_jockeys.msg import LocalizeGoal\nfrom lama_msgs.srv import GetCrossing\nfrom lama_interfaces.srv import ActOnMap\nfrom lama_interfaces.srv import ActOnMapRequest\nfrom lama_interfaces.core_interface import MapAgentInterface\n# import lama_interfaces.cleartext_interface_factory as li_cif\n# interface_factory = li_cif.cleartext_interface_factory\nimport lama_interfaces.interface_factory as li_if\ninterface_factory = li_if.interface_factory\nfrom lama_interfaces.graph_builder import get_edges_with_vertices\nfrom lama_interfaces.graph_builder import get_directed_graph_index\n\nfrom graph_transformer import GraphTransformer\n\n_max_dissimilarity_for_same = 0.05\n\n\ndef normalize_angles(angles):\n def normalize_angle(angle):\n return (angle + pi) % (2 * pi) - pi\n return [normalize_angle(a) for a in angles]\n\n\ndef debug(msg):\n rospy.logdebug('{}: {}'.format(rospy.get_name(), msg))\n\n\ndef jockey_client(jockey_name, action_type):\n client = actionlib.SimpleActionClient(jockey_name, action_type)\n while not client.wait_for_server(rospy.Duration(5)):\n rospy.loginfo(('{}: waiting for the jockey action ' +\n 'server ({})').format(rospy.get_name(), jockey_name))\n debug('communicating with the jockey ' +\n 'action server {}'.format(jockey_name))\n return client\n\n\nclass Edge:\n def __init__(self, id_, preceeding):\n \"\"\"An edge with link to its predecessor\n\n Parameters\n ----------\n id_: vertex id\n preceeding: Edge instance of preceeding edge\n \"\"\"\n self.id = id_\n self.preceeding = preceeding\n\n def path_to_start(self):\n if self.preceeding is None:\n # We return an empty list because the start node should not be\n # part of the final path.\n return []\n return self.preceeding.path_to_start() + [self.id]\n\n\nclass ExplorerNode(object):\n def __init__(self):\n # Node and server initialization.\n rospy.init_node('dfs_explorer', log_level=rospy.DEBUG)\n navigating_jockey_name = rospy.get_param('~navigating_jockey_name',\n 'navigating_jockey')\n localizing_jockey_name = rospy.get_param('~localizing_jockey_name',\n 'localizing_jockey')\n escape_jockey_name = rospy.get_param('~escape_jockey_name',\n 'nj_escape_jockey')\n\n # Navigate jockey server.\n self.navigate = jockey_client(navigating_jockey_name, NavigateAction)\n\n # Localize jockey server.\n self.localize = jockey_client(localizing_jockey_name, LocalizeAction)\n\n # Crossing escape jockey server.\n self.escape = jockey_client(escape_jockey_name, NavigateAction)\n\n # Map agent server.\n iface = MapAgentInterface(start=False)\n self.map_agent = rospy.ServiceProxy(iface.action_service_name,\n ActOnMap)\n\n # Descriptor getter for Crossing.\n self.crossing_interface_name = rospy.get_param('~crossing_interface',\n 'crossing')\n crossing_getter_name = self.crossing_interface_name + '_getter'\n self.crossing_getter = rospy.ServiceProxy(crossing_getter_name,\n GetCrossing)\n debug('waiting for service {}'.format(crossing_getter_name))\n self.crossing_getter.wait_for_service()\n debug('service {} available'.format(crossing_getter_name))\n\n # Exit angles getter and setter (double).\n self.exit_angles_interface_name = rospy.get_param(\n '~exit_angles_interface_name',\n 'dfs_explorer_exit_angle')\n exits_iface = interface_factory(\n self.exit_angles_interface_name,\n 'lama_interfaces/GetDouble',\n 'lama_interfaces/SetDouble')\n self.exit_angles_getter = exits_iface.getter_service_proxy\n self.exit_angles_setter = exits_iface.setter_service_proxy\n\n # Exit angle topic advertiser.\n self.exit_angle_topic_name = rospy.get_param(\"exit_angle_topic\",\n \"~exit_angle\")\n\n self.first_crossing_reached = False\n # Last visited node, also current node if the robot is on a node.\n self.last_vertex = None\n # Exit taken, represented by its angle, when leaving the last node.\n self.exit_taken = None\n self.next_vertex = None\n self.next_exit = None\n # The graph is organized as a map\n # vertex: [[vertex, exit_angle], [vertex, exit_angle], ...].\n # Where the second vertex is the vertex that will be at the next\n # crossing center when traversing edge (corridor) at absolute angle\n # exit_angle.\n # When starting to traverse an edge, vertex is set to None. A vertex\n # will be visited when all its neighbor vertices are not None.\n # The graph is then an oriented graph where the information for edge\n # a to b is the exit angle that was taken from a to reach b.\n self.graph_transformer = GraphTransformer(\n self.map_agent,\n self.crossing_getter,\n self.crossing_interface_name,\n self.exit_angles_getter,\n self.exit_angles_interface_name)\n\n debug('initialized')\n\n def move_to_crossing(self):\n \"\"\"Move the robot to the first crossing\n Move the robot to the first crossing so that we can have a descriptor\n list to start with with the DFS algorithm.\n \"\"\"\n debug('moving to crossing')\n nav_goal = NavigateGoal()\n nav_goal.action = nav_goal.TRAVERSE\n self.navigate.send_goal(nav_goal)\n self.navigate.wait_for_result()\n nav_result = self.navigate.get_result()\n if nav_result.final_state == nav_result.DONE:\n debug(('traversed to crossing center in {:.2f} s').format(\n nav_result.completion_time.to_sec()))\n else:\n err = '{}: something wrong happened, exiting!'.format(\n rospy.get_name())\n rospy.logerr(err)\n raise Exception(err)\n self.first_crossing_reached = True\n\n def loop(self):\n \"\"\"Run the DFS algorithm until all vertices are visited\n\n Procedure:\n 1. Get a new vertex descriptor when finished traversing, in the case\n that the vertex was not already visited.\n 2. Choose the vertex with the next exit to visit and the direction to\n move with DFS.\n 3. Move to that vertex.\n 4. Let the robot escape from the node in the chosen direction.\n 5. Let the navigating jockey move to the next crossing.\n 6. Repeat from 1. indefinitely.\n \"\"\"\n if not self.first_crossing_reached:\n rospy.logerr('Go to first crossing first')\n raise Exception('Go to first crossing first')\n\n while True:\n # 1. Get a new vertex descriptor (robot should be at crossing\n # center).\n debug('getting descriptor')\n if not self.get_current_descriptor():\n rospy.logwarn('No descriptor, exiting')\n break\n\n # 2. Choose the vertex with the next exit to visit\n vertex_and_angle = self.get_next_vertex_to_visit()\n if vertex_and_angle is None:\n rospy.loginfo('I visisted all crossings, successfully exiting')\n break\n self.next_vertex, self.next_exit = vertex_and_angle\n debug('next vertex to visit {} (exit angle: {})'.format(\n self.next_vertex, self.next_exit))\n\n # 3. Move to that vertex.\n self.move_to_next_crossing()\n\n # 4. Let the robot escape from the node in the chosen direction.\n # The edge does not exists yet, set the direction through a topic.\n self.escape_from_crossing()\n\n # 5. Let the navigating jockey move to the next crossing.\n self.move_to_crossing()\n\n def get_current_descriptor(self):\n \"\"\"Get the descriptors from the current crossing center\n\n Get the descriptors (i.e. write them into the database).\n Push the vertex if not already existent.\n Assign the descriptors to this vertex.\n\n Parameters\n ----------\n - origin_angle: float, absolute angle of the exit the robot took\n when starting from origin_vertex.\n \"\"\"\n loc_goal = LocalizeGoal()\n loc_goal.action = loc_goal.GET_VERTEX_DESCRIPTOR\n self.localize.send_goal_and_wait(loc_goal, rospy.Duration(0.5))\n loc_result = self.localize.get_result()\n if not loc_result:\n rospy.logerr('Did not receive vertex descriptor within ' +\n '0.5 s, exiting')\n return False\n rospy.logdebug('Received {} vertex descriptors'.format(\n len(loc_result.descriptor_links)))\n self.handle_vertex(loc_result.descriptor_links)\n return True\n\n def handle_vertex(self, descriptor_links):\n \"\"\"Push a new vertex into the database, if needed\n\n Get the dissimilarity of the current descriptor (unsaved) with\n descriptors saved in the database.\n Save the vertex and associate the given descriptors, if the vertex is\n new.\n \"\"\"\n vertex_come_from = self.last_vertex\n vertices, dissimilarities = self.get_dissimilarity()\n vertex_is_new = True\n if (dissimilarities and\n (min(dissimilarities) < _max_dissimilarity_for_same)):\n vertex_is_new = False\n if vertex_is_new:\n # Add vertex to map.\n map_action = ActOnMapRequest()\n map_action.action = map_action.PUSH_VERTEX\n response = self.map_agent(map_action)\n new_vertex = response.objects[0].id\n debug('new vertex {}'.format(new_vertex))\n # Assign descriptors.\n map_action = ActOnMapRequest()\n map_action.object.id = new_vertex\n map_action.action = map_action.ASSIGN_DESCRIPTOR_VERTEX\n for link in descriptor_links:\n map_action.descriptor_id = link.descriptor_id\n map_action.interface_name = link.interface_name\n self.map_agent(map_action)\n if link.interface_name == self.crossing_interface_name:\n self.current_crossing_id = link.descriptor_id\n # Get the exit_angles from the map.\n crossing_resp = self.crossing_getter(self.current_crossing_id)\n rospy.logdebug('Exit count: {}'.format(\n len(crossing_resp.descriptor.frontiers)))\n self.last_vertex = new_vertex\n else:\n # TODO: delete the redundant descriptors (quality-based\n # if possible)\n index_vertex_same = dissimilarities.index(min(dissimilarities))\n vertex_same = vertices[index_vertex_same]\n debug('already known vertex: {}'.format(vertex_same))\n self.last_vertex = vertex_same\n self.current_crossing_id = self.get_crossing_desc_id(vertex_same)\n # Add the edge to the map.\n if vertex_come_from is not None:\n self.add_edge_to_map(vertex_come_from, self.last_vertex,\n self.exit_taken)\n\n def get_dissimilarity(self):\n \"\"\"Return two lists: indexes and dissimilarities\"\"\"\n loc_goal = LocalizeGoal()\n loc_goal.action = loc_goal.GET_DISSIMILARITY\n debug('Requested GET_DISSIMILARITY')\n self.localize.send_goal_and_wait(loc_goal, rospy.Duration(0.5))\n loc_result = self.localize.get_result()\n if not loc_result:\n rospy.logerr('Did not received vertex descriptor within ' +\n '0.5 s, exiting')\n return None, None\n debug('received {} dissimilarities'.format(len(loc_result.idata)))\n return loc_result.idata, loc_result.fdata\n\n def add_edge_to_map(self, v0, v1, exit_angle):\n \"\"\"Add an edge and its associated descriptor to the map\n\n The oriented edge is from v0 to v1.\n The edge descriptor is the exit angle to take at v0 to go to v1.\n \"\"\"\n # Add edge.\n debug('adding edge ({}, {})'.format(v0, v1))\n map_action = ActOnMapRequest()\n map_action.action = map_action.PUSH_EDGE\n map_action.object.type = map_action.object.EDGE\n map_action.object.references.append(v0)\n map_action.object.references.append(v1)\n edge_response = self.map_agent(map_action)\n if not edge_response.objects:\n rospy.logerr('Database error')\n return\n edge_id = edge_response.objects[0].id\n debug('edge {} ({} -> {}) added'.format(edge_id, v0, v1))\n # Add descriptor.\n debug('adding descriptor')\n desc_response = self.exit_angles_setter(exit_angle)\n debug('descriptor {} added, angle: {}'.format(desc_response.id,\n exit_angle))\n # Assign descriptor.\n debug('assigining exit_angle descriptor {} to edge {}'.format(\n desc_response.id, edge_id))\n map_action = ActOnMapRequest()\n map_action.action = map_action.ASSIGN_DESCRIPTOR_EDGE\n map_action.object.id = edge_id\n map_action.descriptor_id = desc_response.id\n map_action.interface_name = self.exit_angles_interface_name\n self.map_agent(map_action)\n debug('descriptor assigned')\n\n def get_next_vertex_to_visit(self):\n \"\"\"Return the tuple (vertex, angle), which is the next unvisited vertex\n\n Return None if all vertices were visited.\n\n A vertex is unvisited if one of its exit_angles has no associated\n vertex.\n\n vertex is None if the robot never explored the exit with angle angle.\n \"\"\"\n def is_discovered(nodes):\n for v, a in nodes:\n if v is None:\n return False, (v, a)\n return True, (None, None)\n\n graph = self.graph_transformer.graph_from_map()\n stack = [(self.last_vertex, self.exit_taken)]\n discovered = []\n while stack:\n v = stack.pop(0)\n nodes = graph[v[0]]\n this_is_discovered, node = is_discovered(nodes)\n if this_is_discovered:\n if v not in discovered:\n discovered.append(v)\n for new_node in nodes:\n stack.append(new_node)\n else:\n return (v[0], node[1])\n\n def move_to_next_crossing(self):\n if self.next_vertex is None:\n # The current vertex is unvisited, don't need to move to another\n # vertex.\n return\n path = self.find_path_to_next_vertex()\n for vertex in path:\n # Escape from crossing center.\n goal = NavigateGoal()\n goal.action = goal.TRAVERSE\n goal.edge = self.first_edge(self.last_vertex, vertex)\n if goal.edge.id is None:\n err = 'No edge from {} to {}'.format(self.last_vertex, vertex)\n rospy.logfatal(err)\n return False\n debug('escaping along edge {}'.format(goal.edge.id))\n self.escape.send_goal_and_wait(goal)\n result = self.navigate.get_result()\n if result.final_state != result.DONE:\n err = 'Escape jockey did not succeed'\n rospy.logerr(err)\n return False\n # Go to next crossing.\n goal = NavigateGoal()\n goal.action = goal.TRAVERSE\n debug('moving to next crossing')\n self.navigate.send_goal_and_wait(goal)\n result = self.navigate.get_result()\n if result.final_state != result.DONE:\n err = 'Escape jockey did not succeed'\n rospy.logerr(err)\n return False\n self.last_vertex = vertex\n return True\n\n def first_edge(self, v0, v1):\n \"\"\"Return the first found edge (LamaObject) from v0 to v1\"\"\"\n edges = get_edges_with_vertices(v0, v1)\n if edges:\n return edges[0]\n return None\n\n def find_path_to_next_vertex(self):\n \"\"\"Return a list of vertices defining a path to self.next_vertex\n\n Return a list of vertices defining a path from the robot current\n position to self.next_vertex.\n self.last_vertex (the crossing the robot presently is in) will not be\n part of the path. The last vertex will be self.next_vertex.\n \"\"\"\n graph = get_directed_graph_index()\n start = self.last_vertex\n end = self.next_vertex\n queue = [Edge(start, None)]\n discovered = set()\n while queue:\n edge = queue.pop(0)\n vertex = edge.id\n if vertex == end:\n rospy.loginfo('Found path (from {}): {}'.format(\n start, edge.path_to_start()))\n return edge.path_to_start()\n if vertex not in discovered:\n discovered.add(vertex)\n for adjacent_vertex in graph[vertex]:\n queue.append(Edge(adjacent_vertex, edge))\n return None\n\n def get_crossing_desc_id(self, vertex):\n \"\"\"Return the id of the first Crossing associated with a vertex\"\"\"\n map_action = ActOnMapRequest()\n map_action.action = map_action.GET_DESCRIPTOR_LINKS\n map_action.object.id = vertex\n map_action.interface_name = self.crossing_interface_name\n response = self.map_agent(map_action)\n return response.descriptor_links[0].descriptor_id\n\n def escape_from_crossing(self):\n \"\"\"Escape from crossing towards an unknown edge and return when done\"\"\"\n exit_angle_publisher = rospy.Publisher(self.exit_angle_topic_name,\n Float32,\n queue_size=1,\n latch=True)\n exit_angle_publisher.publish(self.next_exit)\n nav_goal = NavigateGoal()\n nav_goal.action = nav_goal.TRAVERSE\n nav_goal.descriptor_link.descriptor_id = self.current_crossing_id\n debug('escaping from crossing {} with direction {}'.format(\n nav_goal.descriptor_link.descriptor_id, self.next_exit))\n self.escape.send_goal_and_wait(nav_goal)\n escape_result = self.escape.get_result()\n if escape_result.final_state != escape_result.DONE:\n err = 'Escape jockey did not succeed'\n rospy.logerr(err)\n raise Exception(err)\n self.exit_taken = self.next_exit\n\nnode = ExplorerNode()\nnode.move_to_crossing()\nnode.loop()\n","sub_path":"dfs_explorer/scripts/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":19365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"193302726","text":"\"\"\"\nThis code is adapted from the Jupyter notebook by Zihao Zhang, Stefan Zohren and Stephen Roberts\n'deepLOB: Deep Convolutional Neural Networks for Limit Order Books'\nOxford-Man Institute of Quantitative Finance, Department of Engineering Science, University of Oxford\n\nThe model is DeepOF from paper [2]. We apply it to LOBSTER data [3].\n\n[1] Zhang Z, Zohren S, Roberts S. deepLOB: Deep convolutional neural networks for limit order books.\n IEEE Transactions on Signal Processing. 2019 Mar 25;67(11):3001-12.\n https://arxiv.org/abs/1808.03668\n[2] Kolm, P N and Turiel, J and Westray, N. Deep Order Flow Imbalance: Extracting Alpha at Multiple Horizons from the Limit Order Book.\n (August 5, 2021). Available at SSRN: https://ssrn.com/abstract=3900141 or http://dx.doi.org/10.2139/ssrn.3900141\n[3] Huang, R and Polak, T. LOBSTER: Limit Order Book Reconstruction System. (December 27, 2011). \n Available at SSRN: https://ssrn.com/abstract=1977207 or http://dx.doi.org/10.2139/ssrn.1977207\n\"\"\"\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport keras\nimport multiprocessing as mp\nimport time\nimport os\nfrom keras import backend as K\nfrom keras.models import load_model, Model\nfrom keras.layers import Flatten, Dense, Dropout, Activation, Input, LSTM, CuDNNLSTM, Reshape, Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.utils import np_utils\n\nfrom sklearn.metrics import classification_report, accuracy_score\nimport matplotlib.pyplot as plt\n\n\nclass DataGenerator(tf.keras.utils.Sequence):\n def __init__(self, dir, batch_size, shuffle=True, samples_per_file=1, XYsplit=True, multiprocess=False):\n \"\"\"Initialization.\n :param dir: directory of files, contains folder \"X\" and \"Y\"\n :param batch_size:\n :param samples_per_file: how many samples are in each file\n :param shuffle\n Need batch_size to be divisible by samples_per_file\n \"\"\"\n self.dir = dir\n\n if XYsplit:\n self.Xfiles = os.listdir(os.path.join(dir, \"X\"))\n self.Yfiles = os.listdir(os.path.join(dir, \"Y\"))\n else:\n self.files = os.listdir(dir)\n\n self.batch_size = batch_size\n self.samples_per_file = samples_per_file\n self.files_per_batch = (self.batch_size // self.samples_per_file)\n self.shuffle = shuffle\n\n self.multiprocess = multiprocess\n self.XYsplit = XYsplit\n self.n_proc = mp.cpu_count()\n self.chunksize = batch_size // self.n_proc\n\n self.on_epoch_end()\n\n def __len__(self):\n # Denotes the number of batches per epoch\n return len(self.indices) // self.files_per_batch\n\n def __getitem__(self, index):\n # Generate indexes of the batch\n file_indices = self.indices[index * self.files_per_batch:(index + 1) * self.files_per_batch]\n\n # Generate data\n x, y = self.__data_generation(file_indices)\n\n return x, y\n\n def on_epoch_end(self):\n 'Shuffles indexes after each epoch'\n if self.XYsplit:\n assert (len(self.Xfiles) == len(self.Yfiles))\n self.indices = np.arange(len(self.Xfiles))\n else:\n self.indices = np.arange(len(self.files))\n if self.shuffle:\n np.random.shuffle(self.indices)\n\n def load_chunk(self, file_indices):\n x_list = []\n y_list = []\n for file_index in file_indices:\n if self.XYsplit:\n x_list.append(tf.convert_to_tensor(np.load(os.path.join(self.dir, \"X\", self.Xfiles[file_index]))))\n y_list.append(tf.convert_to_tensor(np.load(os.path.join(self.dir, \"Y\", self.Yfiles[file_index]))))\n else:\n with np.load(os.path.join(self.dir, self.files[file_index])) as data:\n x_list.append(tf.convert_to_tensor(data[\"X\"]))\n y_list.append(tf.convert_to_tensor(data[\"Y\"]))\n # data = np.load(os.path.join(self.dir, self.files[file_index]))\n # x_list.append(tf.convert_to_tensor(data[\"X\"]))\n # y_list.append(tf.convert_to_tensor(data[\"Y\"]))\n if self.samples_per_file==1:\n x = tf.stack(x_list)\n y = tf.stack(y_list)\n else:\n x = tf.concat(x_list, axis=0)\n y = tf.concat(y_list, axis=0)\n return x, y\n\n def __data_generation(self, file_indices):\n if self.multiprocess:\n # parallelize\n file_indices_chunks = np.array_split(file_indices, self.chunksize)\n\n with mp.Pool(processes=self.n_proc) as pool:\n # starts the sub-processes without blocking\n # pass the chunk to each worker process\n proc_results = [pool.apply_async(self.load_chunk, args=(file_indices_chunk,))\n for file_indices_chunk in file_indices_chunks]\n\n # blocks until all results are fetched\n results = [r.get() for r in proc_results]\n x = tf.concat(list(zip(*results))[0], axis=0)\n y = tf.concat(list(zip(*results))[1], axis=0)\n\n else:\n x, y = self.load_chunk(file_indices)\n\n return x, y\n\ndef create_deepOF(T, NF, number_of_lstm):\n input_lmd = Input(shape=(T, NF, 1))\n\n # build the convolutional block\n conv_first1 = Conv2D(32, (1, 2), strides=(1, 2))(input_lmd)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n\n conv_first1 = Conv2D(32, (1, 10))(conv_first1)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n conv_first1 = Conv2D(32, (4, 1), padding='same')(conv_first1)\n conv_first1 = keras.layers.LeakyReLU(alpha=0.01)(conv_first1)\n\n # build the inception module\n convsecond_1 = Conv2D(64, (1, 1), padding='same')(conv_first1)\n convsecond_1 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_1)\n convsecond_1 = Conv2D(64, (3, 1), padding='same')(convsecond_1)\n convsecond_1 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_1)\n\n convsecond_2 = Conv2D(64, (1, 1), padding='same')(conv_first1)\n convsecond_2 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_2)\n convsecond_2 = Conv2D(64, (5, 1), padding='same')(convsecond_2)\n convsecond_2 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_2)\n\n convsecond_3 = MaxPooling2D((3, 1), strides=(1, 1), padding='same')(conv_first1)\n convsecond_3 = Conv2D(64, (1, 1), padding='same')(convsecond_3)\n convsecond_3 = keras.layers.LeakyReLU(alpha=0.01)(convsecond_3)\n\n convsecond_output = keras.layers.concatenate([convsecond_1, convsecond_2, convsecond_3], axis=3)\n conv_reshape = Reshape((int(convsecond_output.shape[1]), int(convsecond_output.shape[3])))(convsecond_output)\n conv_reshape = keras.layers.Dropout(0.2, noise_shape=(None, 1, int(conv_reshape.shape[2])))(conv_reshape,\n training=True)\n\n # build the last LSTM layer\n conv_lstm = LSTM(number_of_lstm, batch_input_shape=(32, T, int(conv_reshape.shape[2])))(conv_reshape)\n\n # build the output layer\n out = Dense(3, activation='softmax')(conv_lstm)\n model = Model(inputs=input_lmd, outputs=out)\n adam = Adam(learning_rate=0.01, epsilon=1)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model\n\n\nif __name__ == '__main__':\n # limit gpu memory\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n# # use only one gpu\n# gpus = tf.config.list_physical_devices('GPU')\n# if gpus:\n# # Restrict TensorFlow to use only the first GPU\n# try:\n# tf.config.set_visible_devices(gpus[0], 'GPU')\n# logical_gpus = tf.config.list_logical_devices('GPU')\n# except RuntimeError as e:\n# # Visible devices must be set before GPUs have been initialized\n# print(e)\n\n # set random seeds\n np.random.seed(1)\n tf.random.set_seed(2)\n\n k = 4\n # which prediction horizon (k = (0, 1, 2, 3, 4) -> (10, 20, 30, 50, 100) order book events)\n T = 100\n # the length of a sample sequence. Even though this is a single long time series, LSTMs usually work with\n # input sequences of max length 200-400, we hence split the time series into sequences of length 100\n # rolling forward by one time-step each time.\n n_hiddens = 64\n # number of hidden states in LSTM\n \n checkpoint_filepath = './model_weights/deepOF_weights_AAL_W1_100/weights'\n\n # data\n val_generator = DataGenerator(r\"data/AAL_OF_W1_batch32/val\", batch_size=32, \n XYsplit=False, samples_per_file=32)\n train_generator = DataGenerator(r\"data/AAL_OF_W1_batch32/train\", batch_size=32, \n XYsplit=False, samples_per_file=32)\n test_generator = DataGenerator(r\"data/AAL_OF_W1_batch32/test\", batch_size=32, \n XYsplit=False, samples_per_file=32, shuffle=False)\n\n# # Create a MirroredStrategy.\n# strategy = tf.distribute.MirroredStrategy()\n# print('Number of devices: {}'.format(strategy.num_replicas_in_sync))\n# \n# # Open a strategy scope.\n# with strategy.scope():\n# # Everything that creates variables should be under the strategy scope.\n# \n# # build model\n# deepOF = create_deepOF(T, 40, n_hiddens)\n \n deepOF = create_deepOF(T, 20, n_hiddens)\n \n deepOF.summary()\n \n # load weights\n deepOF.load_weights(checkpoint_filepath)\n \n# # train model\n# model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n# filepath=checkpoint_filepath,\n# save_weights_only=True,\n# monitor='val_loss',\n# mode='auto',\n# save_best_only=True)\n# \n# early_stopping = EarlyStopping(monitor='val_accuracy', patience=20)\n# \n# deepOF.fit(train_generator, validation_data=val_generator,\n# epochs=50, verbose=1, workers=8,\n# callbacks=[model_checkpoint_callback, early_stopping])\n \n # evaluate model performance \n pred = deepOF.evaluate(test_generator, workers=8)\n \n # print('accuracy_score:', accuracy_score(np.argmax(testY_CNN, axis=1), np.argmax(pred, axis=1)))\n # print(classification_report(np.argmax(testY_CNN, axis=1), np.argmax(pred, axis=1), digits=4))\n ","sub_path":"auxiliary_code/old_code/v0/deepOF_LOBSTER.py","file_name":"deepOF_LOBSTER.py","file_ext":"py","file_size_in_byte":11321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"160895417","text":"import sys, math;\r\n\r\ndef isP(x):\r\n s = str(x)\r\n #print(\"s=\",s,\" back=\",s[::-1],\" true?=\",s[::-1]==s)\r\n return s[::-1]==s\r\ndef searchNotLargeThan(x):\r\n if x < fairsquares[0]:\r\n return 0\r\n L, R = 0, len(fairsquares)\r\n while R - L > 1:\r\n M = (L + R) // 2\r\n if fairsquares[M] <= x:\r\n L = M\r\n else:\r\n R = M\r\n return L+1\r\n \r\ndef solve():\r\n #return len([fs for fs in fairsquares if fs >= a and fs <= b])\r\n return searchNotLargeThan(b) - searchNotLargeThan(a-1)\r\n\r\ninputFile = sys.argv[1] if (len(sys.argv) > 1) else \"input.txt\";\r\noutputFile = sys.argv[2] if (len(sys.argv) > 2) else (inputFile + \"out.txt\") if (len(sys.argv) > 1) else \"output.txt\";\r\n#print(inputFile, outputFile)\r\nfile = open(outputFile, \"w\")\r\n\r\nbound = 10**25\r\nprint(bound)\r\nprint(bound+1)\r\nfairsquares = []\r\n\r\ndef rec(x):\r\n if x >= bound:\r\n return\r\n y = int(str(x) + str(x)[-2::-1])\r\n #print(\"Y:\",y)\r\n a = sum(x**2 for x in map(int,list(str(y)))) <= 9\r\n if a:\r\n z = y**2\r\n #print(\"X:\",z)\r\n if isP(z):\r\n #print(\"FS1: \",z)\r\n fairsquares.append(z)\r\n y = int(str(x) + str(x)[-1::-1])\r\n #print(\"Y :\",y)\r\n b = sum(x**2 for x in map(int,list(str(y)))) <= 9\r\n if b:\r\n z = y**2\r\n #print(\"X:\",z)\r\n if isP(z):\r\n #print(\"FS2: \",z)\r\n fairsquares.append(z)\r\n if a or b:\r\n for i in range(0,4):\r\n x = x * 10 + i\r\n rec(x)\r\n \r\n\r\nfor i in range(1,4):\r\n rec(i)\r\n\r\nprint(\"fairsquares are calculated\")\r\nfairsquares.sort()\r\nprint(\"fairsquares are sorted\")\r\nfasf = open(\"fas.txt\", \"w\")\r\nfor fs in fairsquares:\r\n fasf.write(str(fs) + \"\\n\")\r\nfasf.close() \r\n\r\n #print(fs)\r\n\r\nwith open(inputFile, 'r') as f:\r\n t = int(f.readline())\r\n #print(t)\r\n for i in range(1, t + 1):\r\n file.write(\"Case #\" + str(i) + \": \")\r\n a,b=map(int,f.readline().split())\r\n file.write(str(solve()) + \"\\n\")\r\nfile.close() \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"delphi/CodeJam/Practice/2013/QRC/fs/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"58852669","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\n\npygame.init()\n\nwinSize = [1200, 800]\npotInit = (0, 0)\npotCar = [10, 10]\ndropTime = 0\naccTime = 0\nspeed = 3\nmoveDict = {pygame.K_LEFT: 0, pygame.K_RIGHT: 0, pygame.K_UP: 0, pygame.K_DOWN: 0}\n\nthreeCar1 = pygame.image.load('img/twr1.png')\nthreeCar2 = pygame.image.load('img/twr2.png')\nflagCar = [0]\n\nstrIn = 'in'\nstrOut = 'out'\n\nwin = pygame.display.set_mode(winSize)\npygame.display.set_caption('今晚,我在秋名山等你')\nbackGround = pygame.image.load('img/bg2.jpg')\n\ndef showCar(flag):\n if flag[0] == 1000:\n flag[0] = 0\n if flag[0] % 50 < 25:\n win.blit(threeCar1, potCar)\n else:\n win.blit(threeCar2, potCar)\n flag[0] += 1\n\ndef moveObject(pot):\n global dropTime\n moveDict[pygame.K_UP] = 0\n pot[0] += (moveDict[pygame.K_RIGHT] - moveDict[pygame.K_LEFT])\n pot[1] += (moveDict[pygame.K_DOWN] - moveDict[pygame.K_UP])\n if pot[0] < 0:\n pot[0] = 0\n if pot[1] < 0:\n pot[1] = 0\n if pot[0] > 1000:\n pot[0] = 1000\n if pot[1] > 680:\n pot[1] = 680\n dropTime = 0\n if pot[1] < 680:\n pot[1] += 5\n if dropTime > 20:\n pot[1] -= dropTime/2\n dropTime -= 3\n elif dropTime > 0:\n pot[1] += 3\n\ndef jump(pot):\n global dropTime\n if pot[1] == 680:\n dropTime = 50\n\ndef acc():\n global accTime, speed\n speed = 5\n accTime = 100\n\ndef showText(text):\n font = pygame.font.Font('freesansbold.ttf', 16)\n surText = font.render(text, True, (0, 0, 0), (255, 255, 255))\n win.blit(surText, potInit)\n pygame.display.update()\n\nisCon = True\nwhile isCon:\n win.blit(backGround, potInit)\n showCar(flagCar)\n\n pygame.display.update()\n\n for i in pygame.event.get():\n if i.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if i.type == pygame.KEYDOWN:\n if i.key in moveDict:\n moveDict[i.key] = speed\n if i.key == pygame.K_SPACE:\n jump(potCar)\n if i.key == pygame.K_LCTRL:\n acc()\n\n if i.type == pygame.KEYUP:\n if i.key in moveDict:\n moveDict[i.key] = 0\n\n moveObject(potCar)\n if accTime == 0:\n speed = 3\n else:\n accTime -= 1","sub_path":"codee/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"594909094","text":"import json, requests, datetime, pytest\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\nurl = 'http://localhost:9999/statements'\r\n\r\n# Account for test_search_by_account\r\nselected_account = \"Peter Peterson\" \r\n# Date range for test_search_by_date & test_balance_before\r\ndate1 = \"2020-11-16 00:00:00\"\r\ndate2 = \"2020-11-19 00:00:00\"\r\n\r\ndef getstamp(input):\r\n stamp = datetime.datetime.strptime(input, '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\r\n return int(stamp.timestamp())\r\n\r\n# POST data to endpoint\r\nwith open('data.json') as json_file:\r\n data = json.load(json_file)\r\n for item in data:\r\n myjson = {\"statement\": {\"account_id\": item['account_id'], \"amount\": item['amount'], \"currency\": item['currency'], \"date\": getstamp(item['date'])} }\r\n requests.post(url, json = myjson)\r\n\r\n# Browser will open and close for each def_test\r\n@pytest.fixture(autouse=True)\r\ndef open_browser():\r\n global driver\r\n driver = webdriver.Firefox()\r\n driver.get(url)\r\n yield\r\n driver.quit()\r\n\r\ndef test_data_table():\r\n for iteration, item in enumerate(data):\r\n # Check id matches with table data\r\n pit = iteration + 1 # (since first iteration = 0)\r\n pid = int(driver.find_element_by_xpath(\"//tbody/tr[%s+1]/th[1]\" % iteration).text)\r\n assert pit == pid\r\n # Check if account_id matches with table data\r\n account = driver.find_element_by_xpath(\"//tbody/tr[%s+1]/td[1]\" % iteration).text\r\n assert item['account_id'] == account\r\n # Check if amount & currency matches with table data\r\n amount = driver.find_element_by_xpath(\"//tbody/tr[%s+1]/td[2]\" % iteration).text\r\n assert item['amount'] + \" \" + item['currency'] == amount\r\n # Check if date matches with table data\r\n date = driver.find_element_by_xpath(\"//tbody/tr[%s+1]/td[3]\" % iteration).text\r\n assert item['date'] == date\r\n\r\ndef test_search_by_account():\r\n driver.find_element_by_id(\"search_account_id\").send_keys(selected_account)\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n counter = 0\r\n\r\n for item in data:\r\n if item['account_id'] == selected_account:\r\n counter += 1\r\n\r\n elements = driver.find_elements_by_xpath(\"//*[contains(text(), '%s')]\" % selected_account)\r\n # Check if number of found account elements match counter\r\n assert counter == len(elements)\r\n\r\ndef test_search_by_date():\r\n driver.find_element_by_xpath(\"//input[@name='from_date']\").send_keys(date1)\r\n driver.find_element_by_xpath(\"//input[@name='to_date']\").send_keys(date2)\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n\r\n start = datetime.datetime.strptime(date1, '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\r\n end = datetime.datetime.strptime(date2, '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\r\n\r\n for iteration, _ in enumerate(data):\r\n\r\n try:\r\n date = driver.find_element_by_xpath(\"//tbody/tr[%s+1]/td[3]\" % iteration).text\r\n dateobj = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\r\n\r\n if start <= dateobj <= end:\r\n # This means test pass\r\n assert True\r\n else:\r\n # This means wrong date filtered, test fail\r\n assert False\r\n\r\n except NoSuchElementException:\r\n print(\"No more elements\")\r\n\r\ndef test_balance_before():\r\n balance = 0.0\r\n\r\n # Navigate to filtered date range to retrieve ID of filtered firstid\r\n driver.find_element_by_xpath(\"//input[@name='from_date']\").send_keys(date1)\r\n driver.find_element_by_xpath(\"//input[@name='to_date']\").send_keys(date2)\r\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\r\n\r\n firstid = driver.find_element_by_xpath(\"//tbody/tr[1]/th[1]\").text\r\n\r\n # Calculate sum of amounts until firstid\r\n for i in range(int(firstid)-1):\r\n balance += float(data[i]['amount'])\r\n\r\n # Check balance before on filtered page\r\n balance_before = driver.find_element_by_xpath(\"//div[@class='container' and contains(text(), 'Balance before')]\").text\r\n assert \"Balance before: \" + str(\"%.2f\" % balance) + \" \" + item['currency'] == balance_before\r\n\r\ndef test_balance_after():\r\n balance = 0.0\r\n \r\n # Calculate sum of amounts in whole data.json\r\n for item in data:\r\n balance += float(item['amount'])\r\n \r\n # Check balance after on main page (string comparison)\r\n balance_after = driver.find_element_by_xpath(\"//div[@class='col-sm']/..\").text.splitlines()[-1]\r\n assert \"Balance after \" + str(\"%.2f\" % balance) + \" \" + item['currency'] == balance_after","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"628536364","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Jul 11, 2016\nModifies passed text for file name\n@author: Levan Tsinadze\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport os\n\n# Static parameters\nSEPARATOR = '_'\n\nSPCS = {' ', '-', '–', '+', '.', ',', ':', ';', '!', '?', '\"', '\\'', '\\\\', \n '/', '(', ')', '{', '}', '[', ']', '\\\\n', '\\\\t'}\nDLTS = {'\\'', '-', '–', '+'}\nACCP = {f(chr(x)) for x in range(ord('a'), ord('z') + 1) for \n f in [lambda x: x, lambda x: x.upper()]}.union({str(x) for \n x in range(0, 10)})\n\n\nclass FileNameGenerator(object):\n \"\"\"File name generator\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(FileNameGenerator, self).__init__(*args, **kwargs)\n\n @staticmethod\n def _iterate_text(tx: str) -> str:\n \"\"\"\n Iterate over text and replace specian characters\n Args:\n tx: text for refinement\n\n Returns:\n next_ch: generator for next character\n \"\"\"\n sep_ch = False\n for idx, ch in enumerate(tx):\n if ch in ACCP:\n next_ch = ch.lower()\n sep_ch = True\n elif idx < len(tx) - 1 and sep_ch:\n next_ch = SEPARATOR\n sep_ch = False\n else:\n next_ch = ''\n sep_ch = False\n\n yield next_ch\n\n @staticmethod\n def generate_item(tx: str) -> str:\n \"\"\"\n Generates item from string\n Args:\n tx: input text to modify\n\n Returns:\n itm: next item from existing text\n \"\"\"\n return (''.join(ch for \n ch in FileNameGenerator._iterate_text(tx))).strip()\n\n def generate_name(self, *raw_texts: str) -> str:\n \"\"\"\n Generates appropriated file name from raw text\n Args:\n raw_texts: texts for file name generator\n\n Returns:\n generated file name\n \"\"\"\n return (''.join(self.generate_item(' '.join(raw_texts)))).strip()\n\n def __call__(self, *args, **kwargs):\n return self.generate_name(*args, **kwargs)\n\n\nclass PDFNameGenerator(FileNameGenerator):\n \"\"\"Generates PDF file name from article\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PDFNameGenerator, self).__init__(*args, **kwargs)\n\n def generate_pdf_name(self, raw_text: str) -> str:\n \"\"\"\n Generates file name for PDF file type\n Args:\n raw_text: input text\n\n Returns:\n generated PDF file name\n \"\"\"\n return f'{FileNameGenerator.generate_name(self, raw_text)}.pdf'\n\n\ndef config():\n \"\"\"\n Configure for input text\n Returns:\n conf: command line configuration parameters\n \"\"\"\n parser = argparse.ArgumentParser('File name generator from header', \n formatter_class=\n argparse.RawTextHelpFormatter)\n parser.add_argument('--header',\n nargs='+',\n type=str,\n required=True,\n help='Text to generate file name')\n parser.add_argument('--pdf',\n dest='pdf',\n action='store_true',\n help='Flag to generate file name with PDF extension')\n conf = parser.parse_args()\n\n return conf\n\n\nif __name__ == '__main__':\n cf = config()\n gen = FileNameGenerator()\n file_name = gen(*cf.header).strip()\n os.system(f'echo {file_name}| pbcopy')\n print(file_name)\n if cf.pdf:\n pdf_name = f'{file_name}.pdf'\n print(pdf_name)\n","sub_path":"header_modifier.py","file_name":"header_modifier.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"409189801","text":"#python3\n\"\"\"\nCoursera Specialization: Data Structure and Algorithm\nCourse: Algorithm Toolbox\n\nweek 5-5 Longest Common Subsequence of 3 sequences\n\nCompute the length of a longest common subsequence of three sequences.\n\n\"\"\"\ndef longest_comm_subseq_of2(n, m, p, a, b, c):\n # p == 0:\n for i in range(n+1):\n for j in range(m+1):\n if i == 0:\n memo[i][j][0] = 0\n elif j == 0:\n memo[i][j][0] = 0\n elif a[i-1] == b[j-1]:\n memo[i][j][0] = 1 + memo[i-1][j-1][0]\n else:\n memo[i][j][0] = max(memo[i-1][j][0], memo[i][j-1][0], memo[i-1][j-1][0])\n # m == 0:\n for i in range(n+1):\n for j in range(p+1):\n if i == 0:\n memo[i][0][j] = 0\n elif j == 0:\n memo[i][0][j] = 0\n elif a[i-1] == c[j-1]:\n memo[i][0][j] = 1 + memo[i-1][0][j-1]\n else:\n memo[i][0][j] = max(memo[i-1][0][j], memo[i][0][j-1], memo[i-1][0][j-1])\n\n # n == 0:\n for i in range(m+1):\n for j in range(p+1):\n if i == 0:\n memo[0][i][j] = 0\n elif j == 0:\n memo[0][i][j] = 0\n elif b[i-1] == c[j-1]:\n memo[0][i][j] = 1 + memo[0][i-1][j-1]\n else:\n memo[0][i][j] = max(memo[0][i-1][j], memo[0][i][j-1], memo[0][i-1][j-1])\n\n\ndef longest_comm_subseq_of3(n, m, p, a, b, c):\n longest_comm_subseq_of2(n, m, p, a, b, c)\n for i in range(n+1):\n for j in range(m+1):\n for k in range(p+1):\n if a[i-1] == b[j-1] == c[k-1]:\n memo[i][j][k] = 1 + memo[i-1][j-1][k-1]\n else:\n memo[i][j][0] = max(memo[i-1][j][k], memo[i][j-1][k], memo[i][j][k-1], memo[i][j-1][k-1], memo[i-1][j][k-1], memo[i-1][j-1][k], memo[i-1][j-1][k-1])\n\ndef longest_comm_subseq_of3_2(n, m, p, a, b, c):\n memo = [[[0 for k in range(p+1)] for j in range(m+1)] for i in range(n+1)]\n for i in range(n+1):\n for j in range(m+1):\n for k in range(p+1):\n if i==0 or j==0 or k==0:\n memo[i][j][k] = 0\n elif a[i-1] == b[j-1] == c[k-1]:\n memo[i][j][k] = memo[i-1][j-1][k-1] + 1\n else:\n memo[i][j][k] = max(max(memo[i-1][j][k], memo[i][j-1][k]), memo[i][j][k-1])\n return memo[n][m][p]\n\n\nif __name__ == '__main__':\n n = int(input())\n a = input().split()\n m = int(input())\n b = input().split()\n p = int(input())\n c = input().split()\n\n print(longest_comm_subseq_of3_2(n, m, p, a, b, c))\n\n\n","sub_path":"AlgorithmToolBox/Challenges/week_5/55_longestCommonSubseq_of3.py","file_name":"55_longestCommonSubseq_of3.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"570857205","text":"from django.test import TestCase\nfrom django.core.urlresolvers import resolve\nfrom .views import index\nimport unittest\nfrom django.shortcuts import render_to_response\n\nclass MainPageTests(TestCase):\n\n @classmethod\n def setUpClass(cls):\n from django.test import RequestFactory\n request_factory = RequestFactory()\n cls.request = request_factory.get('/')\n cls.request.session = {}\n \n\n def test_root_resolves_to_main_view(self ):\n main_page = resolve('/')\n self.assertEqual(main_page.func, index)\n\n def test_returns_appropriate_html_respos_code(self):\n resp = index(self.request)\n self.assertEquals(resp.status_code,200)\n\n def test_returns_exact_html(self):\n resp = index(self.request)\n self.assertEquals(resp.content,\n render_to_response(\"index.html\").content)\n\n def test_index_handles_logged_in_user(self):\n #create a session that appears to have a logged in user\n self.request.session = {\"user\" : \"1\"}\n \n import mock\n with mock.patch('main.views.User') as user_mock:\n \n #tell the mock what to do when called\n config = {'get_by_id.return_value':mock.Mock()}\n user_mock.configure_mock(**config)\n\n #run the test\n resp = index(self.request)\n\n #ensure we return the state of the session back to normal \n self.request.session = {}\n \n expected_html = render_to_response('user.html',{'user': user_mock.get_by_id(1)})\n self.assertEquals(resp.content, expected_html.content)\n\n","sub_path":"django_ecommerce/main/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"590359971","text":"\"\"\"\napi.twitch\n----------\n\nTwitch.tv service module.\nProvides a Stream class for Twitch TV Streams.\n\"\"\"\n\nimport requests\n\nfrom streaman.common import *\nfrom streaman.stream import Stream\n\nAPI_ROOT = \"https://api.twitch.tv/kraken\"\nURL_ROOT = \"http://www.twitch.tv\"\n#URL_ROOT = \"//www.twitch.tv\"\nSTREAMS_RESOURCE = \"/streams\"\n\nclass TwitchStream(Stream):\n \"\"\"\n Stream class used for twitch.tv streasms.\n \"\"\"\n\n def __init__(self, stream_uri):\n stream_name = stream_uri.split('/')[-1] \n url = self.generate_url(stream_name)\n super(TwitchStream, self).__init__(SERVICE_TWITCH, stream_name, stream_uri, url)\n self.__set_defaults()\n\n @staticmethod\n def generate_uri(stream_id):\n \"\"\" Generates a URI for an API Resource. \"\"\"\n stream_name = stream_id.split('/')[-1]\n return \"{0}{1}/{2}\".format(API_ROOT, STREAMS_RESOURCE, stream_name)\n\n @staticmethod\n def generate_url(stream_name):\n \"\"\" Generates a URL for a stream name on the Twitch service. \"\"\"\n return \"{0}/{1}\".format(URL_ROOT, stream_name)\n\n @staticmethod\n def is_valid(stream_uri):\n \"\"\" Ensures a stream is a valid Twitch.tv Stream. \"\"\"\n r = requests.get(stream_uri)\n if r.status_code == requests.codes.ok:\n return True\n return False\n\n def __set_defaults(self):\n \"\"\" Clears variables which are used when the stream is live. \"\"\"\n self.channel = None\n self.game = \"\"\n self.viewers = 0\n\n def __parse_stream_data(self, stream_data):\n \"\"\" Parses the JSON data recieved when the stream becomes live. \"\"\"\n self.channel = TwitchChannel(stream_data[\"channel\"])\n self.game = stream_data[\"game\"]\n self.viewers = stream_data[\"viewers\"]\n\n def __clear_attributes(self):\n \"\"\" Clears all of the attributes for this Stream when it is offline. \"\"\"\n self.channel = None\n\n def update(self, response, callback, index):\n \"\"\"\n Re-implemented from `Stream`.\n Checks if the status has changed.\n If the new status is live, then call `__parse_stream_data()`\n If the new status is offline, then call `__clear_attributes()`\n \"\"\"\n stream_data = response.json[\"stream\"]\n new_status = 1 ^ (not stream_data)\n if new_status != self.status:\n # Something changed!\n self.update_status(new_status)\n if new_status == STATUS_ONLINE:\n self.__parse_stream_data(stream_data)\n else:\n self.__clear_attributes()\n super(TwitchStream, self).update(callback, index)\n\n def get_viewers(self):\n return self.viewers \n\nclass TwitchChannel(object):\n \"\"\"\n A `Channel` provides additional graphical data for a `Stream`.\n \"\"\"\n\n def __init__(self, channel_data):\n self.name = channel_data[\"name\"]\n self.display_name = channel_data[\"display_name\"]\n self.game = channel_data[\"game\"]\n self.banner = channel_data[\"banner\"]\n self.logo = channel_data[\"logo\"]\n self.status = channel_data[\"status\"]\n self.created_at = channel_data[\"created_at\"]\n self.updated_at = channel_data[\"updated_at\"]\n\n","sub_path":"streaman/api/twitch.py","file_name":"twitch.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"120895595","text":"#!/usr/bin/env python3\n\nimport OrigemRegistro\nimport OrigemRegistroDAO\n\norigemRegistro = OrigemRegistro.OrigemRegistro()\norigemRegistroDAO = OrigemRegistroDAO.OrigemRegistroDAO()\n\nprint(\"Content-type: text/html\")\nprint(\"\")\nprint(\n\"\"\"\n\n\n\t\n\t\t\n\t\n\t\n\t\t

Origens dos Registros!!!

\n\t\tNova

\n\"\"\")\n\nfor origemRegistro in origemRegistroDAO.getLista():\n\t\n\tprint(\\\n\t\t\"\t\t{}
\".format(\\\n\t\t\torigemRegistro.getCOD_ORIGEM_REGISTRO(),\\\n\t\t\torigemRegistro.getORIGEM_REGISTRO(),\\\n\t\t))\n\nprint(\n\"\"\"\n\n\n\"\"\")\n","sub_path":"cgi-bin/origensRegistro (cópia).py","file_name":"origensRegistro (cópia).py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"238873687","text":"import scipy as sp\n\neps = 1e-9\n\ndef lambdaF(a,k):\n return lambda x,y: sp.tan(a*x+y) - a*x*y-0.3\n\ndef lambdaFDerivativeList(a,k): #return [f'x, f'y]\n return [lambda x,y: a*(1/(sp.cos(a*x+y)**2)-y), lambda x,y: 1/(sp.cos(a*x+y)**2)-a*x]\n\ndef lambdaG(a,k):\n return lambda x,y: x**2 + y**2 - k\n\ndef lambdaGDerivativeList(a,k): #return [g'x, g'y]\n return [lambda x,y: 2*x, lambda x,y: 2*y]\n\ndef getDeterminantList(f,g,a,k):\n fx,fy = lambdaFDerivativeList(a,k)\n gx,gy = lambdaGDerivativeList(a,k)\n \n det = lambda x,y: fx(x,y) * gy(x,y) - gx(x,y) * fy(x,y)\n detx = lambda x,y: g(x,y) * fy(x,y) - f(x,y) * gy(x,y)\n dety = lambda x,y: f(x,y) * gx(x,y) - g(x,y) * fx(x,y)\n \n return [det,detx,dety]\n\ndef findRoots(a,k,xarray,yarray):\n f = lambdaF(a,k)\n g = lambdaG(a,k)\n \n #find determinant functions\n det,detx,dety = getDeterminantList(f,g,a,k)\n \n minx, maxx = xarray\n miny, maxy = yarray\n \n stepx, stepy = (maxx - minx)*1e-2, (maxy-miny)*1e-2\n# roots = []\n \n y = miny\n for j in xrange(100):\n x = minx\n for i in xrange(100):\n if abs(f(x,y)) <= 0.1 and abs(g(x,y)) <= 0.1:\n #root resolver\n return approximateRoot(f,g,det,detx,dety,x,y,eps)\n x+=stepx\n y+=stepy\n \n# return roots \n \ndef approximateRoot(f,g,det,detx,dety,x0,y0,eps):\n x,y = x0, y0\n deltax, deltay = detx(x0,y0)/det(x0,y0), dety(x0,y0)/det(x0,y0)\n count = 0\n \n print('iteration step = '+str(count))\n print(' x = '+str(x)+' y = '+str(y)+' f(x,y) = '+str(f(x,y))+' g(x,y) = '+str(g(x,y))+'\\n')\n while abs(deltax) > eps and abs(deltay) > eps:\n count+=1\n x += deltax\n y += deltay\n current_det = det(x,y)\n deltax, deltay = detx(x,y)/current_det, dety(x,y)/current_det\n print('iteration step = '+str(count))\n print(' x = '+str(x)+' y = '+str(y)+' f(x,y) = '+str(f(x,y))+' g(x,y) = '+str(g(x,y))+'\\n')\n return (count,x,y) \n \n\nif __name__ == '__main__':\n inputValues=[[-1.2,-1.0],\n [-1.0,1.3],\n [-0.8,1.7],\n [-0.6,1.9],\n [-0.4,2.1]]\n for a,k in inputValues:\n print('\\na = '+str(a)+'; k = '+str(k)+':')\n roots = []\n roots.append(findRoots(a, k, [-2,0],[-2,0]))\n roots.append(findRoots(a, k, [-2,0],[0,2]))\n roots.append(findRoots(a, k, [0,2],[0,2]))\n roots.append(findRoots(a, k, [0,2],[-2,0]))\n if roots:\n for root in roots:\n if root:\n count, x, y = root\n print('all iteration steps = '+str(count))\n print('result: x = '+str(x)+'; y = '+str(y)+'\\n') \n else:\n print('\\nNo roots\\n')\n","sub_path":"SystemOfEquations/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"298483823","text":"#The Riddler Classic 2018-02-16: Fast Dodgeball Duel\n#Monte-Carlo simulation of optimal strategies\n\nimport random\n\nNsim = 1000000 #number of simulations\ncounts = [0]*3\n\n#accuracies\na = 1\nb = 0.8\nc = 0.5\n\nfor N in range(Nsim):\n\n #1 if alive, 0 if eliminated\n players = [1]*3\n\n while(sum(players) > 1):\n #generate throw successes\n throw = [random.random() < a, \\\n random.random() < b, \\\n random.random() < c]\n \n if (sum(players) == 3):\n\n #resolve a and b\n if (throw[0] and throw[1]): #a or b randomly lose\n players[random.randint(0, 1)] = 0\n elif (throw[0]): #b lose\n players[1] = 0\n elif (throw[1]): #a lose\n players[0] = 0\n\n #resolve c\n if (throw[2]): #a lose\n players[0] = 0\n \n elif (sum(players) == 2):\n \n #indices of alive players\n indices = [i for i, x in enumerate(players) if x == 1]\n i1 = indices[0]\n i2 = indices[1]\n \n if (throw[i1] and throw[i2]): #1 or 2 randomly lose\n players[random.sample(indices, 1)[0]] = 0\n elif (throw[i1]): #2 lose\n players[i2] = 0\n elif (throw[i2]): #1 lose\n players[i1] = 0\n \n #increment winner tally\n counts[players.index(1)] += 1\n\nprint('Winning probabilities of [a, b, c]')\nprint([count/Nsim for count in counts])\n","sub_path":"classic/fast_dodgeball_duel.py","file_name":"fast_dodgeball_duel.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"492818397","text":"#!/Users/rajanchaudhari/anaconda/bin/python\n# This script filters cns restraints if residue positions are 100 residues apart\n#USAGE: python filter_restraints.py \nfrom __future__ import print_function\nfrom math import fabs\nimport sys\ncns_file=sys.argv[1]\nresfile=open(\"inter_filtered.tbl\", 'w')\nwith open(cns_file, 'r') as infile:\n\tfor line in infile:\n\t\tres1=int(line.split()[3])\n\t\tres2=int(line.split()[8])\n\t\tif fabs(res2-res1)<100:\n\t\t\tprint(line.strip(), file=resfile)\n\t\t\t\t\t","sub_path":"Improving_HM/filter_restraints.py","file_name":"filter_restraints.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"224532392","text":"from collections import defaultdict\nimport random\nimport numpy as np\nfrom copy import deepcopy\n\n\nclass DynaQAgent:\n def __init__(self, alpha, epsilon, discount, n_steps, get_legal_actions, is_array=False):\n self.get_legal_actions = get_legal_actions\n self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))\n self._memoryModel = []\n self.alpha = alpha\n self.epsilon = epsilon\n self.discount = discount\n self.n_steps = n_steps\n self.is_array = is_array\n self.states = {}\n\n def array_to_int(self, state):\n if self.is_array:\n for name, value in self.states.items():\n if np.array_equal(value, state):\n return name\n self.states[str(len(self.states.keys()))] = deepcopy(state)\n return str(len(self.states.keys()) - 1)\n else:\n return state\n\n def get_qvalue(self, state, action):\n return self._qvalues[state][action]\n\n def set_qvalue(self, state, action, value):\n self._qvalues[state][action] = value\n\n def get_value(self, state):\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return 0.0\n if len(possible_actions) == 0:\n return 0.0\n\n value = max(self.get_qvalue(state, action) for action in possible_actions)\n\n return value\n\n def play(self, env, t_max=10**4):\n total_reward = 0.0\n state = env.reset()\n for t in range(t_max):\n a = self.get_action(state)\n next_state, r, done, _ = env.step(a)\n self.update(state, a, r, next_state)\n state = next_state\n total_reward += r\n if done:\n break\n return total_reward\n\n\n def update(self, state, action, reward, next_state):\n alpha = self.alpha\n gamma = self.discount\n q_update = self.get_qvalue(self.array_to_int(state), action) + alpha * \\\n (reward + gamma * self.get_value(self.array_to_int(next_state)) -\n self.get_qvalue(self.array_to_int(state), action))\n self.set_qvalue(self.array_to_int(state), action, q_update)\n self._memoryModel.append((self.array_to_int(state), action, reward, self.array_to_int(next_state)))\n self.search()\n\n def search(self):\n n_steps = self.n_steps\n alpha = self.alpha\n gamma = self.discount\n for _ in range(n_steps):\n state, action, reward, next_state = random.choice(self._memoryModel)\n\n q_update = self.get_qvalue(state, action) + alpha * (\n reward + gamma * self.get_value(next_state) - self.get_qvalue(state, action))\n self.set_qvalue(state, action, q_update)\n\n def get_best_action(self, state):\n\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n array_qvalues = [self.get_qvalue(self.array_to_int(state), action) for action in possible_actions]\n return possible_actions[array_qvalues.index(max(array_qvalues))]\n\n def get_action(self, state):\n # Pick Action\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n # agent parameters:\n epsilon = self.epsilon\n\n if random.random() > epsilon:\n chosen_action = self.get_best_action(state)\n else:\n chosen_action = random.choice(possible_actions)\n\n return chosen_action\n","sub_path":"agents/dyna_q.py","file_name":"dyna_q.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"308036086","text":"# -*- coding: utf-8 -*-\n\"\"\"A redis reader plugin with builtin redis client.\"\"\"\nimport sys\nimport time\nimport socket\nfrom collections import deque\nfrom collections import defaultdict\n\nimport plumd\nfrom plumd.util import Differential\n\n__author__ = 'Kenny Freeman'\n__email__ = 'kenny.freeman@gmail.com'\n__license__ = \"ISCL\"\n__docformat__ = 'reStructuredText'\n\nPY3 = sys.version_info > (3,)\n\n\nclass RedisError(Exception):\n \"\"\"A generic Redis error.\"\"\"\n\n pass\n\n\nclass RedisClient(object):\n \"\"\"A minimal Redis client.\"\"\"\n\n def __init__(self, log, addr, sfamily, timeout):\n \"\"\"A minimal Redis client.\n\n :param log: A logger created from loggging.getLogger\n :type log: logging.RootLogger\n :param addr: Either a tuple of ('host', port) or a path to a unix socket\n :type addr: str or tuple(str, int)\n :param sfamily: The socket family eg. socket.AF_INET or AF_UNIX\n :type sfamily: int\n :param timeout: The timeout in seconds for all socket operations\n :type timeout: float or int\n \"\"\"\n self.log = log\n self.net = RedisNet(log, addr, sfamily, timeout)\n\n def info(self, section=None):\n \"\"\"Return redis info.\n\n :param section: The info section to request from Redis\n :type section: str\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: dict\n \"\"\"\n ret = {}\n section = \"all\" if section is None else section\n self.net.send(\"INFO {0}\\r\\n\".format(section))\n for info_str in RedisResponse(self.net):\n for line in info_str.split(\"\\n\"):\n if not line or line[0] == \"#\" or line == '\\r':\n continue\n if line.find(\":\") >= 0:\n key, val = line.split(\":\")\n ret[key] = val\n return ret\n\n def config_get_multi(self, globs):\n \"\"\"Return redis config.\n\n :param globs: An containing glob search strings\n :type globs: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: dict\n \"\"\"\n for glob_str in globs:\n self.net.send(\"CONFIG GET {0}\\r\\n\".format(glob_str))\n vals = [val.strip() for val in RedisResponse(self.net)]\n yield dict(zip(vals[0::2], vals[1::2]))\n\n def scan(self, prefix, count=None):\n \"\"\"Return a deque of key names that match the requested prefix.\n\n :param prefix: The key prefix/glob to search for\n :type prefix: str\n :param count: The number of keys to request on each iteration\n :type count: int\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: deque\n \"\"\"\n scan_cmd = \"scan {0} match {1} count {2}\\r\\n\"\n count = 10 if count is None else count\n cursor = 0\n while True:\n # send scan request\n self.net.send(scan_cmd.format(cursor, prefix, count))\n # response is the next cursor followed by a list of matches\n resp = RedisResponse(self.net)\n cursor = int(next(resp))\n for key in resp:\n yield key.strip()\n if cursor == 0:\n break\n\n def get_multi(self, keys):\n \"\"\"Return the total value of all matching keys.\n\n :param keys: The iterable of keys to return the total for.\n :type keys: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :raise ValueError: when a key doesn't cast to int\n :rtype: int\n \"\"\"\n get_cmd = \"get {0}\\r\\n\"\n total = 0\n for key in keys:\n # send scan request\n self.net.send(get_cmd.format(key))\n # response should just be an int\n resp = RedisResponse(self.net)\n total += int(next(resp))\n return total\n\n def llen_multi(self, keys):\n \"\"\"Return the total length of each key provided.\n\n :param keys: The iterable of keys to return the total length of.\n :type keys: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: int\n \"\"\"\n llen_cmd = \"llen {0}\\r\\n\"\n total = 0\n for key in keys:\n # send scan request\n self.net.send(llen_cmd.format(key))\n # response should just be an int\n resp = RedisResponse(self.net)\n total += int(next(resp))\n return total\n\n def zcard_multi(self, keys):\n \"\"\"Return the total cardinality of each key provided.\n\n :param keys: The iterable of keys to return the total cardinality of.\n :type keys: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: int\n \"\"\"\n zcard_cmd = \"zcard {0}\\r\\n\"\n total = 0\n for key in keys:\n # send scan request\n self.net.send(zcard_cmd.format(key))\n # response should just be an int\n resp = RedisResponse(self.net)\n total += int(next(resp))\n return total\n\n def scard_multi(self, keys):\n \"\"\"Return the total cardinality of each key provided.\n\n :param keys: The iterable of keys to return the total cardinality of.\n :type keys: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: int\n \"\"\"\n scard_cmd = \"scard {0}\\r\\n\"\n total = 0\n for key in keys:\n # send scan request\n self.net.send(scard_cmd.format(key))\n # response should just be an int\n resp = RedisResponse(self.net)\n total += int(next(resp))\n return total\n\n def pfcount_multi(self, keys):\n \"\"\"Return the total cardinality of each key provided.\n\n :param keys: The iterable of keys to return the total cardinality of.\n :type keys: iterable\n :raises RedisError: for any socket related exceptions\n :raises RedisError: for unexpcted server responses\n :rtype: int\n \"\"\"\n pfcount_cmd = \"pfcount {0}\\r\\n\"\n total = 0\n for key in keys:\n # send scan request\n self.net.send(pfcount_cmd.format(key))\n # response should just be an int\n resp = RedisResponse(self.net)\n total += int(next(resp))\n return total\n\n\nclass RedisResponse(object):\n \"\"\"An iterable of redis command responses.\"\"\"\n\n def __init__(self, reader):\n \"\"\"An iterable of redis command responses.\n\n :param reader: A RedisNet reader instance\n :type reader: RedisNet\n\n :raises RedisError: for any socket related errors\n :raises RedisError: for any unknown responses\n :raises RedisError: for any Redis Errors returned\n :raises RedisError: for any ValueErrors encountered when casting\n \"\"\"\n self.reader = reader\n # handlers consume responses and add them to self.vals\n self.func = defaultdict(lambda: RedisResponse.h_unknown)\n self.func[\"*\"] = lambda buff: self.parse(int(buff))\n self.func[\"+\"] = lambda buff: self.vals.append(str(buff))\n # remove the \\r from the string\n self.func[\"$\"] = lambda buff: \\\n self.vals.append(self.reader.readnbytes(int(buff) + 2))\n self.func[\":\"] = lambda buff: self.vals.append(int(buff))\n self.func[\"-\"] = RedisResponse.h_error\n self.vals = deque()\n self.parse()\n\n def parse(self, nitems=None):\n \"\"\"Read the full response from self.sock.\n\n :raises RedisError: for any socket related Exceptions\n :raises RedisError: for any unknown types read\n :raises RedisError: for any redis protocol errors\n :rtype: varies\n \"\"\"\n nitems = 1 if nitems is None else nitems\n for i in xrange(nitems):\n try:\n buff = self.reader.readline()\n self.func[buff[0]](buff[1:])\n except (ValueError, IndexError) as exc:\n msg = \"could not parse response: {0}: {1}\"\n raise RedisError(msg.format(buff, exc))\n\n @staticmethod\n def h_unknown(buff):\n \"\"\"Uknown response handler.\n\n :param buff: A response buffer read from Redis\n :type buff: str\n :raises RedisError: this function always raises a RedisError\n \"\"\"\n raise RedisError(\"unknown command: {0}\".format(buff))\n\n @staticmethod\n def h_error(buff):\n \"\"\"Raise a RedisError with unknown command buff.\n\n :param buff: A response buffer read from Redis\n :type buff: str\n :raises RedisError: on any socket related exceptions\n :rtype: str\n \"\"\"\n msg = \"RedisResponse: h_error({0})\"\n raise RedisError(msg.format(buff))\n\n def __iter__(self):\n \"\"\"A Redis command response iterator.\n\n :rtype: iterator\n \"\"\"\n return self\n\n def __next__(self):\n \"\"\"Return the next response, if any.\n\n :rtype: object\n \"\"\"\n if not self.vals:\n raise StopIteration()\n return self.vals.popleft()\n\n def next(self):\n \"\"\"Return the next response, if any.\n\n :rtype: object\n \"\"\"\n if not self.vals:\n raise StopIteration()\n return self.vals.popleft()\n\n\nclass RedisNet(object):\n \"\"\"A helper class that talks to Redis on a unix/tcp socket.\"\"\"\n\n BUFF_LEN = 8192\n\n def __init__(self, log, addr, sfamily, timeout):\n \"\"\"A helper class that talks to Redis on a unix/tcp socket.\n\n :param log: A logger created from loggging.getLogger\n :type log: logging.RootLogger\n :param addr: Either a tuple of ('host', port) or a path to a unix socket\n :type addr: str or tuple(str, int)\n :param sfamily: The socket family eg. socket.AF_INET or AF_UNIX\n :type sfamily: int\n :param timeout: The timeout in seconds for all socket operations\n :type timeout: float or int\n \"\"\"\n self.log = log\n # addr can be unix socket or (host, port) tuple\n self.addr = addr\n # socket.AF_INET or socket.AF_UNIX\n self.sfamily = sfamily\n # all socket operations timeout\n self.timeout = timeout\n self.sock = None\n # read from our socket into this buffer\n # keep an index in the buffer that we've read up to\n # and record the total number of bytes in the buffer\n self.buff = \"\"\n self.buff_end = -1\n self.buff_i = -1\n\n def connect(self):\n \"\"\"Connect to Redis.\n\n :raises RedisError: for any socket related exceptions\n :rtype: Exception or None\n \"\"\"\n if self.sock:\n self.disconnect()\n try:\n # create the socket\n self.sock = socket.socket(self.sfamily, socket.SOCK_STREAM)\n # set timeout for socket operations\n self.sock.settimeout(self.timeout)\n self.sock.connect(self.addr)\n msg = \"RedisNet: connected: {0}:{1}\"\n self.log.info(msg.format(self.addr[0], self.addr[1]))\n except Exception as exc:\n msg = \"RedisNet: Exception during connect: {0}\"\n self.log.error(msg.format(exc))\n raise RedisError(msg.format(exc))\n return True\n\n def disconnect(self):\n \"\"\"Disconnect from Redis.\n\n :raises RedisError: for any socket related exceptions\n \"\"\"\n self.log.debug(\"RedisNet: disconnect\")\n if self.sock:\n try:\n self.sock.close()\n self.sock = None\n except Exception as exc:\n msg = \"RedisNet: exception during disconnect: {0}\"\n self.log.error(msg.format(exc))\n raise RedisError(msg.format(exc))\n\n def read(self):\n \"\"\"Read RedisNet.BUFF_LEN bytes from our socket into self.buff.\n\n Calls here overwrite self.buff and reset self.buff_i and\n self.buff_end.\n\n :raises RedisError: for any socket related exceptions\n \"\"\"\n if not self.sock and not self.connect():\n msg = \"RedisNet: unable to connect to: {0}\"\n raise RedisError(msg.format(self.addr))\n\n try:\n self.buff = self.sock.recv(RedisNet.BUFF_LEN)\n self.buff_end = len(self.buff)\n self.buff_i = 0\n except Exception as exc:\n msg = \"RedisNet: Exception during readline: {0}\"\n self.log.error(msg.format(exc))\n self.disconnect()\n raise RedisError(msg.format(exc))\n\n def recv(self, nbytes):\n \"\"\"Read nbytes from our socket and return it.\n\n :param nbytes: The number of bytes to read\n :type nbytes: int\n :raises RedisError: for any socket related exceptions\n :rytpe: str\n \"\"\"\n if not self.sock and not self.connect():\n msg = \"RedisNet: unable to connect to: {0}\"\n raise RedisError(msg.format(self.addr))\n\n ret = \"\"\n try:\n ret = self.sock.recv(nbytes)\n except Exception as exc:\n msg = \"RedisNet: Exception during recv: {0}\"\n self.log.error(msg.format(exc))\n self.disconnect()\n raise RedisError(msg.format(exc))\n return ret\n\n def readline(self):\n \"\"\"Get the next available line.\n\n :raises RedisError: for any socket related exceptions\n :rytpe: str\n \"\"\"\n if not self.sock and not self.connect():\n msg = \"RedisNet: unable to connect to: {0}\"\n raise RedisError(msg.format(self.addr))\n\n buffs = deque()\n while True:\n # do we have any data available?\n if self.buff_end < 0 or self.buff_i >= self.buff_end:\n # read data, reset buffer state\n while self.buff_end < 1:\n self.read()\n # now we have data, do we have a newline?\n i = self.buff[self.buff_i:].find(\"\\n\")\n if i > -1:\n # return line, advance buffer past it\n # move i past the newline\n # also need to find\n buff_i = self.buff_i\n buffs.append(self.buff[buff_i:buff_i+i])\n # advance beyond i\n self.buff_i = buff_i + i + 1\n # reset if we have no buffer left\n if self.buff_i >= self.buff_end:\n self.buff_i = -1\n self.buff_end = -1\n break\n # no newline yet, record and keep reading\n buffs.append(self.buff[self.buff_i:])\n self.buff_end = -1\n self.buff_i = -1\n ret = \"\".join(buffs)\n return ret\n\n def readnbytes(self, nbytes):\n \"\"\"Read nbytes from our socket.\n\n :param nbytes: The number of bytes to read\n :type nbytes: int\n :raises RedisError: for any socket related exceptions\n :rytpe: str\n \"\"\"\n\n # any bytes in our buffer?\n ret = \"\"\n buffs = deque()\n if self.buff_end and self.buff_i < self.buff_end:\n # do we have enough buffer to fullfill the request?\n nbytes_left = self.buff_end - self.buff_i\n if nbytes_left >= nbytes:\n # yes, advance our pointer\n buffi = self.buff_i\n buffs.append(self.buff[buffi:buffi+nbytes])\n self.buff_i += nbytes\n nbytes = 0\n else:\n # no, consume all of the buffer and then get remaining\n buffs.append(self.buff[self.buff_i:])\n # reset so next access on buffer forces a read\n self.buff_i = -1\n self.buff_end = -1\n nbytes -= nbytes_left\n # do we need more bytes?\n if nbytes:\n # just do a recv - don't use our buffer\n buffs.append(self.recv(nbytes))\n\n # join the buffers\n ret = \"\".join(buffs)\n return ret\n\n def send(self, cmd):\n \"\"\"Send the supplied string to the redis server.\n\n :param cmd: The string to send to the redis server\n :type cmd: str\n :raises RedisError: for any socket related exceptions\n \"\"\"\n if not self.sock and not self.connect():\n msg = \"RedisNet: unable to connect to: {0}\"\n raise RedisError(msg.format(self.addr))\n # send info request\n try:\n self.sock.sendall(cmd)\n except Exception as exc:\n msg = \"RedisNet: exception sending to server: {0}\"\n self.log.error(msg.format(exc))\n self.disconnect()\n raise RedisError(msg.format(exc))\n\n\nclass Redis(plumd.Reader):\n \"\"\"Plugin to record redis metrics.\"\"\"\n\n # default config values\n defaults = {\n 'poll.interval': 10,\n 'gauges': [\n \"aof_current_rewrite_time_sec\",\n \"aof_enabled\",\n \"aof_last_rewrite_time_sec\",\n \"aof_rewrite_in_progress\",\n \"aof_rewrite_scheduled\",\n \"blocked_clients\",\n \"client_biggest_input_buf\",\n \"client_longest_output_list\",\n \"connected_clients\",\n \"connected_slaves\",\n \"evicted_keys\",\n \"expired_keys\",\n \"instantaneous_input_kbps\",\n \"instantaneous_ops_per_sec\",\n \"instantaneous_output_kbps\",\n \"keyspace_hits\",\n \"keyspace_misses\",\n \"latest_fork_usec\",\n \"loading\",\n \"master_repl_offset\",\n \"mem_fragmentation_ratio\",\n \"pubsub_channels\",\n \"pubsub_patterns\",\n \"rdb_bgsave_in_progress\",\n \"rdb_changes_since_last_save\",\n \"rdb_current_bgsave_time_sec\",\n \"rdb_last_bgsave_time_sec\",\n \"rdb_last_save_time\",\n \"rejected_connections\",\n \"repl_backlog_active\",\n \"repl_backlog_first_byte_offset\",\n \"repl_backlog_histlen\",\n \"repl_backlog_size\",\n \"sync_full\",\n \"sync_partial_err\",\n \"sync_partial_ok\",\n \"total_commands_processed\",\n \"total_connections_received\",\n \"total_net_input_bytes\",\n \"total_net_output_bytes\",\n \"uptime_in_days\",\n \"uptime_in_seconds\",\n \"used_cpu_sys\",\n \"used_cpu_sys_children\",\n \"used_cpu_user\",\n \"used_cpu_user_children\",\n \"used_memory\",\n \"used_memory_lua\",\n \"used_memory_peak\",\n \"used_memory_rss\",\n \"master_last_io_seconds_ago\",\n \"master_sync_in_progress\",\n \"slave_repl_offset\",\n \"slave_priority\",\n \"slave_read_only\",\n \"connected_slaves\",\n \"master_repl_offset\",\n \"repl_backlog_active\",\n \"repl_backlog_size\",\n \"repl_backlog_first_byte_offset\",\n \"repl_backlog_histlen\"\n \"connected_slaves\"\n ],\n 'rates': [],\n 'configs': [\n 'maxmemory'\n ],\n 'keys': {\n # 'type': { metric_prefix: [key_prefix*, ...] }\n 'lists': {},\n 'zsets': {},\n 'sets': {},\n 'hlls': {}\n },\n 'addr': '127.0.0.1:6379',\n 'addr_type': 'inet',\n 'timeout': 10\n }\n\n def __init__(self, log, config):\n \"\"\"Plugin to record redis metrics.\n\n :param log: A logger\n :type log: logging.RootLogger\n :param config: a plumd.config.Conf configuration helper instance.\n :type config: plumd.config.Conf\n \"\"\"\n super(Redis, self).__init__(log, config)\n self.config.defaults(Redis.defaults)\n\n # metrics to record\n self.gauges = self.config.get('gauges')\n self.rates = self.config.get('rates')\n self.configs = self.config.get('configs')\n self.keys = self.config.get('keys')\n\n # Redis connection - either unix socket or tcp\n addr = self.config.get('addr')\n addr_type = self.config.get('addr_type').lower()\n if addr_type == \"unix\":\n sfamily = socket.AF_UNIX\n elif addr_type == \"inet\":\n try:\n host, port = addr.split(\":\")\n except AttributeError:\n msg = \"Redis: invalid address: {0}, (host:port)\"\n raise plumd.ConfigError(msg.format(addr))\n addr = (host, int(port))\n sfamily = socket.AF_INET\n else:\n msg = \"Redis: unsupported connection type: {0} (unix, inet)\"\n raise plumd.ConfigError(msg.format(addr_type))\n timeout = config.get('timeout')\n self.client = RedisClient(self.log, addr, sfamily, timeout)\n self.calc = Differential()\n\n def poll(self):\n \"\"\"Query Redis for metrics.\n\n :rtype: ResultSet\n \"\"\"\n # catch exceptions - simply skip the poll on error\n try:\n result = plumd.Result(\"redis\")\n\n # config values\n self.record_configs(result)\n\n # key sizes\n self.record_sizes(result)\n\n # get server metrics\n stats = self.client.info()\n\n # record gauges, rates\n self.record_metrics(stats, result)\n\n # replication, if any slaves are connected\n if \"slave0\" in stats:\n self.record_slaves(stats, result)\n\n # db metrics, maxmem\n self.record_dbs(stats, result)\n\n # record lists, zsets, sets and hll sizes\n self.record_sizes(result)\n\n # and finally command stats - if available\n self.record_cmdstats(result)\n\n except RedisError as exc:\n msg = \"Redis: exception during poll: {0}\"\n self.log.error(msg.format(exc))\n return plumd.ResultSet([result])\n\n def record_cmdstats(self, result):\n \"\"\"Record the stats from info commandstats.\n\n :param result: A result object to add metrics to\n :type result: ResultSet\n \"\"\"\n name = self.name\n infos = self.client.info(\"commandstats\")\n for key in sorted(infos.keys()):\n vals = infos[key].split(\",\")\n cstat, cname = key.split(\"_\")\n for val in vals:\n mname, mval = val.split(\"=\")\n metric = \"{0}.{1}.{2}.{3}\".format(name, cstat, cname, mname)\n result.add(plumd.Float(metric, mval))\n\n def record_metrics(self, stats, result):\n \"\"\"Record the configured gauges and metrics.\n\n :param stats: Dictionary returned from info command\n :type stats: dict\n :param result: A result object to add metrics to\n :type result: ResultSet\n \"\"\"\n timest = time.time()\n name = self.name\n\n # record gauges\n for stat in self.gauges:\n if stat in stats:\n mname = \"{0}.{1}\".format(name, stat)\n result.add(plumd.Float(mname, stats[stat]))\n\n # record rates\n for stat in self.rates:\n if stat in stats:\n mname = \"{0}.{1}\".format(name, stat)\n mval = self.calc.per_second(mname, float(stats[stat]), timest)\n result.add(plumd.Float(mname, mval))\n\n def record_dbs(self, stats, result):\n \"\"\"Record per database metrics into result.\n\n :param stats: Dictionary returned from info command\n :type stats: dict\n :param result: A result object to add metrics to\n :type result: ResultSet\n \"\"\"\n # db0:keys=1,expires=0,avg_ttl=0\n name = self.name\n db_fmt = \"db{0}\"\n metric_fmt = \"{0}.db.{1}.{2}\"\n\n for i in xrange(0, len(stats.keys())):\n dbname = db_fmt.format(i)\n if dbname not in stats:\n break\n try:\n vals = stats[dbname].split(\",\")\n dbmetrics = dict((k, v)\n for k, v in (v.split('=') for v in vals))\n for key, val in dbmetrics.items():\n metric_str = metric_fmt.format(name, i, key)\n result.add(plumd.Int(metric_str, val))\n except KeyError:\n self.log.error(\"Redis: invalid db entry: {0}\".format(dbname))\n\n def record_slaves(self, stats, result):\n \"\"\"Record slave metrics into result.\n\n :param stats: A dictionary returned from info command\n :type stats: dict\n :param result: A ResultSet object to add metrics to\n :type result: ResultSet\n \"\"\"\n # slave0:ip=127.0.0.1,port=6399,state=online,offset=239,lag=1\n name = self.name\n slave_str = \"slave{0}\"\n moffstr = 'master_repl_offset'\n moffset = 0\n try:\n moffset = int(stats[moffstr])\n except(TypeError, KeyError):\n self.log.error(\"Redis: no {0} value\".format(moffstr))\n\n # for each slave entry\n for i in xrange(0, len(stats.keys())):\n sname = slave_str.format(i)\n if sname not in stats:\n break\n try:\n vals = stats[sname].split(\",\")\n smetrics = dict((k, v)\n for k, v in (v.split('=') for v in vals))\n sip = smetrics['ip'].replace(\".\", \"_\")\n smname = \"{0}_{1}\".format(sip, smetrics['port'])\n\n # record offset and lag\n mname = \"{0}.slave.{1}.offset\".format(name, smname)\n soffset = moffset - int(smetrics['offset'])\n result.add(plumd.Int(mname, soffset))\n mname = \"{0}.slave.{1}.lag\".format(name, sname)\n result.add(plumd.Int(mname, smetrics['lag']))\n\n # if slave is online set online = 1, otherwise 0\n sonline = 1 if smetrics['state'] == \"online\" else 0\n mname = \"{0}.slave.{1}.online\".format(name, sname)\n result.add(plumd.Int(mname, sonline))\n except(TypeError, KeyError, ValueError):\n self.log.error(\"Redis: invalid slave entry: {0}\".format(sname))\n\n def record_configs(self, result):\n \"\"\"Record the configured configuration values.\n\n :param result: A ResultSet to record max mem to.\n :type result: plumd.ResultSet\n \"\"\"\n configs = self.configs\n if not configs:\n return\n name = self.name\n for config in self.client.config_get_multi(configs):\n for key, val in config.items():\n mstr = \"{0}.configs.{1}\".format(name, key)\n result.add(plumd.Float(mstr, val))\n\n def record_sizes(self, result):\n \"\"\"Record the total sizes of the configured keys.\n\n For each type of key (list, zset, set, hyperloglog) record\n total number of items for all keys.\n\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n if not self.keys:\n return\n keys = self.config.get(\"keys\")\n if \"strings\" in keys:\n self.record_strings(keys['strings'], result)\n if \"lists\" in keys:\n self.record_lists(keys['lists'], result)\n if \"zsets\" in keys:\n self.record_zsets(keys['zsets'], result)\n if \"sets\" in keys:\n self.record_sets(keys['sets'], result)\n if \"hlls\" in keys:\n self.record_hlls(keys['hlls'], result)\n\n def record_strings(self, lconfig, result):\n \"\"\"Record the total size of the configured string keys.\n\n eg. lconfig: {\"metric_name\": [ \"list\", \"of\", \"keys\"]}\n\n :param lconfig: A dict of metric name => list of key names\n :type lconfig: dict\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n name = self.name\n for mprefix, keys in lconfig.items():\n total = 0\n # get the total for this prefix\n try:\n total = self.client.get_multi(keys)\n except RedisError as exc:\n msg = \"ERROR: redis: record_strings: are {0} strings? {1}\"\n print(msg.format(mprefix, exc))\n else:\n mstr = \"{0}.sizes.strings.{1}\".format(name, mprefix)\n result.add(plumd.Int(mstr, total))\n\n def record_lists(self, lconfig, result):\n \"\"\"Record the total length of the configured lists.\n\n eg. lconfig: {\"metric_name\": [ \"list\", \"of\", \"keys\"]}\n\n :param lconfig: A dict of metric name => list of key names\n :type lconfig: dict\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n name = self.name\n for mprefix, keys in lconfig.items():\n total = 0\n # get the total for this prefix\n try:\n total = self.client.llen_multi(keys)\n except RedisError as exc:\n msg = \"ERROR: redis: record_lists: are {0} lists? {1}\"\n print(msg.format(mprefix, exc))\n mstr = \"{0}.sizes.lists.{1}\".format(name, mprefix)\n result.add(plumd.Int(mstr, total))\n\n def record_zsets(self, zconfig, result):\n \"\"\"Record the total length of the configured zsets.\n\n eg. zconfig: {\"metric_name\": [ \"list\", \"of\", \"keys\"]}\n\n :param zconfig: A dict of metric name => list of key names\n :type zconfig: dict\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n name = self.name\n for mprefix, keys in zconfig.items():\n # get the total for this prefix\n try:\n total = self.client.zcard_multi(keys)\n except RedisError as exc:\n msg = \"ERROR: redis: record_zsets: are {0} zsets? {1}\"\n print(msg.format(mprefix, exc))\n else:\n mstr = \"{0}.sizes.zset.{1}\".format(name, mprefix)\n result.add(plumd.Int(mstr, total))\n\n def record_sets(self, sconfig, result):\n \"\"\"Record the total length of the configured sets.\n\n eg. sconfig: {\"metric_name\": [ \"list\", \"of\", \"keys\"]}\n\n :param sconfig: A dict of metric name => list of key names\n :type sconfig: dict\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n name = self.name\n for mprefix, keys in sconfig.items():\n # get the total for this prefix\n try:\n total = self.client.scard_multi(keys)\n except RedisError as exc:\n msg = \"ERROR: redis: record_sets: are {0} sets? {1}\"\n print(msg.format(mprefix, exc))\n else:\n mstr = \"{0}.sizes.set.{1}\".format(name, mprefix)\n result.add(plumd.Int(mstr, total))\n\n def record_hlls(self, hllconfig, result):\n \"\"\"Record the total length of the configured hlls.\n\n eg. sconfig: {\"metric_name\": [ \"list\", \"of\", \"keys\"]}\n\n :param hllconfig: A dict of metric name => list of key names\n :type hllconfig: dict\n :param result: A ResultSet to record into.\n :type result: plumd.ResultSet\n \"\"\"\n name = self.name\n for mprefix, keys in hllconfig.items():\n # get the total for this prefix\n try:\n total = self.client.pfcount_multi(keys)\n except RedisError as exc:\n msg = \"ERROR: redis: record_hlls: are {0} hyperloglogs? {1}\"\n print(msg.format(mprefix, exc))\n else:\n mstr = \"{0}.sizes.hll.{1}\".format(name, mprefix)\n result.add(plumd.Int(mstr, total))\n","sub_path":"plumd/readers/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":32052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"608538576","text":"matrix1 = [[1, 3, 5], [2, 4, 6]]\nmatrix2 = [[5, 2, 4], [1, 0, 4]]\nmatrix1_lst = []\nmatrix2_lst = []\nnew_lst = []\nfor lst in matrix1:\n for num in lst:\n matrix1_lst.append(num)\nfor lst in matrix2:\n for num in lst:\n matrix2_lst.append(num)\nmatrix1_len = len(matrix1_lst)\nfor num in range(matrix1_len):\n product = matrix1_lst[num] + matrix2_lst[num]\n new_lst.append(product)\nresult_matrix_row1 = new_lst[:len(new_lst)//2]\nresult_matrix_row2 = new_lst[len(new_lst)//2:]\nprint(result_matrix_row1)\nprint(result_matrix_row2)","sub_path":"ds_matrix_add.py","file_name":"ds_matrix_add.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"71230075","text":"from lxml import etree\n\nroot_element = etree.Element(\"books\",interesting=\"totally\")\nele01 =etree.SubElement(root_element,\"book\",name=\"name01\",price=\"32.23\")\nprint(etree.tostring(root_element, encoding=\"utf-8\", pretty_print=True).decode(\"utf-8\"))\n\n\n\"\"\"\nbooksElement = document.addElement(\"books\");\n/ ** 加入一行注释 * /\nbooksElement.addComment(\"This is a test for dom4j, holen, 2004.9.11\");\n/ ** 加入第一个book节点 * /\nElement\nbookElement = booksElement.addElement(\"book\");\n/ ** 加入show属性内容 * /\nbookElement.addAttribute(\"show\", \"yes\");\n/ ** 加入title节点 * /\nElement\ntitleElement = bookElement.addElement(\"title\");\n/ ** 为title设置内容 * /\ntitleElement.setText(\"Dom4j Tutorials\");\n\n/ ** 类似的完成后两个book * /\nbookElement = booksElement.addElement(\"book\");\nbookElement.addAttribute(\"show\", \"yes\");\ntitleElement = bookElement.addElement(\"title\");\ntitleElement.setText(\"Lucene Studing\");\nbookElement = booksElement.addElement(\"book\");\nbookElement.addAttribute(\"show\", \"no\");\ntitleElement = bookElement.addElement(\"title\");\ntitleElement.setText(\"Lucene in Action\");\n\n\"\"\"","sub_path":"ex06_xml/x002.py","file_name":"x002.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"22759798","text":"#!/usr/bin/python\n\nimport sys\nimport re\nimport csv\nimport traceback\nimport operator\n\nimport argparse\n\nimport gotolong.cutil.cutil\n\nfrom gotolong.database.database import *\n\n\nclass Gweight(Database):\n def __init__(self):\n super(Gweight, self).__init__()\n self.gweight_captype_dict = {}\n self.gweight_table_truncate = False\n self.gweight_table_name = \"global_weight\"\n self.gweight_table_dict = {\n \"cap_type\": \"text\",\n \"cap_weight\": \"int\"\n }\n self.debug_level = 0\n\n def set_debug_level(self, debug_level):\n self.debug_level = debug_level\n\n def gweight_table_reload(self, truncate=False):\n self.gweight_table_truncate = truncate\n\n def gweight_table_create(self):\n # dump the sql for creation of table\n create_sql = gotolong.cutil.cutil.get_create_sql(self.gweight_table_name, self.gweight_table_dict)\n if self.debug_level > 0:\n print(create_sql)\n\n def gweight_load_row(self, row):\n try:\n row_list = row\n if len(row_list) == 0:\n print('ignored empty row', row_list)\n return\n\n cap_type = row_list[0]\n cap_weight = row_list[1]\n\n self.gweight_captype_dict[cap_type] = cap_weight\n\n if self.debug_level > 1:\n print('cop_type : ', cap_type, '\\n')\n print('cap_weight : ', cap_weight, '\\n')\n\n except IndexError:\n print('except ', row)\n except:\n print('except ', row)\n traceback.print_exc()\n\n def gweight_load_data(self, in_filename):\n table = self.gweight_table_name\n\n if self.gweight_table_truncate:\n self.db_table_truncate(table)\n\n row_count = self.db_table_count_rows(table)\n if row_count == 0:\n self.gweight_insert_data(in_filename)\n else:\n print('gweight data already loaded in db', row_count)\n print('display db data')\n self.gweight_load_data_from_db()\n\n def gweight_get_insert_row(self, line, row_bank):\n\n # split on comma\n row_list = line.split(',')\n\n if self.debug_level > 1:\n print('row_list', row_list)\n print('len row_list', len(row_list))\n\n (cap_type, cap_weight) = row_list\n\n if cap_type == 'Cap Type' or cap_weight == \"Cap Weight\":\n if self.debug_level > 0:\n print('skipped header line', row_list)\n return\n\n # remove any un-required stuff\n new_row = (cap_type, cap_weight)\n row_bank.append(new_row)\n\n def gweight_insert_data(self, in_filename):\n insert_sql = gotolong.cutil.cutil.get_insert_sql(self.gweight_table_name, self.gweight_table_dict)\n\n cursor = self.db_conn.cursor()\n with open(in_filename, 'rt') as csvfile:\n # insert row\n row_bank = []\n for line in csvfile:\n self.gweight_get_insert_row(line, row_bank)\n print('loaded gweight : ', len(row_bank))\n # insert row\n cursor.executemany(insert_sql, row_bank)\n # commit db changes\n self.db_conn.commit()\n\n def gweight_load_data_from_db(self):\n table_name = self.gweight_table_name\n cursor = self.db_table_load(table_name)\n for row in cursor.fetchall():\n if self.debug_level > 1:\n print(row)\n self.gweight_load_row(row)\n\n def gweight_dump_report_full(self, out_filename):\n\n fh = open(out_filename, \"w\")\n fh.write('cap_type, cap_weight\\n')\n for cap_type, cap_weight in self.gweight_captype_dict.items():\n p_str = str(cap_type)\n p_str += ', '\n p_str += str(cap_weight)\n p_str += '\\n'\n fh.write(p_str);\n fh.close()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Process arguments')\n # dest= not required as option itself is the destination in args\n parser.add_argument('-l', '--log_level', default='INFO', help='DEBUG|INFO|WARNING|ERROR|CRITICAL', type=str,\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n parser.add_argument('-d', '--debug_level', default='0', help='debug level 0|1|2|3', type=int,\n choices=[0, 1, 2, 3])\n parser.add_argument('-t', '--truncate_table', default='False', help='specify to truncate', action='store_true')\n parser.add_argument('-i', '--in_files', required=True, nargs='+', dest='in_files', help='in files')\n parser.add_argument('-o', '--out_files', required=True, nargs='+', dest='out_files', help='out files')\n\n args = parser.parse_args()\n\n debug_level = args.debug_level\n truncate_table = args.truncate_table\n\n # dummy assignment\n in_filename_phase = []\n out_filename_phase = []\n # use the argument as pattern\n for index, filename in enumerate(args.in_files):\n print('index = ', index, filename);\n in_filename_phase.append(filename)\n\n for index, filename in enumerate(args.out_files):\n print('index = ', index, filename);\n out_filename_phase.append(filename)\n\n # Main caller\n program_name = sys.argv[0]\n\n if len(sys.argv) < 4:\n print(\"usage: \" + program_name + \" ... \")\n sys.exit(1)\n\n if debug_level > 1:\n print('args :', len(sys.argv))\n\n gweight = Gweight()\n\n gweight.set_debug_level(debug_level)\n\n if truncate_table:\n gweight.gweight_table_reload(truncate_table)\n\n gweight.gweight_load_data(in_filename_phase[0])\n\n gweight.gweight_dump_report_full(out_filename_phase[0])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gotolong/gweight/gweight.py","file_name":"gweight.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"609924164","text":"# *_*coding:utf-8 *_*\n\"\"\"\nAuthor:szj\nDescri:\n\"\"\"\nimport sys, os\nfrom pathlib import Path\n\nROOT = Path(os.path.abspath(__file__)).parent.parent\nsys.path.append(str(ROOT))\nimport traceback\n\nimport click\nfrom critical_investing import mysql\nfrom tenacity import retry, wait_fixed, wait_random\n\nfrom settings import files\nfrom settings.sqls import tushare_fina_indicator, tushare_income\n\nimport tushare as ts\nimport datetime\nfrom random import choice\nfrom sqlitedict import SqliteDict\nfrom settings.envs import *\nimport doger\nlogger = doger.guru(LOG_LEVEL, __file__)\nfrom settings.envs import CONFIG\n\ntoken_list = CONFIG[\"tushare_tokens\"]\ntoken = choice(token_list) # 随机选择一个token\npro = ts.pro_api(token)\n\ntoday = datetime.datetime.now().strftime(\"%Y%m%d\")\nyesterday = (datetime.datetime.now() + datetime.timedelta(days=-1)).strftime(\"%Y%m%d\")\n\n\"\"\"\n输入参数\n\n名称\t类型\t必选\t描述\nts_code\tstr\tY\t股票代码\nann_date\tstr\tN\t公告日期\nstart_date\tstr\tN\t公告开始日期\nend_date\tstr\tN\t公告结束日期\nperiod\tstr\tN\t报告期(每个季度最后一天的日期,比如20171231表示年报)\nreport_type\tstr\tN\t报告类型: 参考下表说明\ncomp_type\tstr\tN\t公司类型:1一般工商业 2银行 3保险 4证券\n输出参数\n\n名称\t类型\t默认显示\t描述\nts_code\tstr\tY\tTS代码\nann_date\tstr\tY\t公告日期\nf_ann_date\tstr\tY\t实际公告日期\nend_date\tstr\tY\t报告期\nreport_type\tstr\tY\t报告类型 1合并报表 2单季合并 3调整单季合并表 4调整合并报表 5调整前合并报表 6母公司报表 7母公司单季表 8 母公司调整单季表 9母公司调整表 10母公司调整前报表 11调整前合并报表 12母公司调整前报表\ncomp_type\tstr\tY\t公司类型(1一般工商业2银行3保险4证券)\nbasic_eps\tfloat\tY\t基本每股收益\ndiluted_eps\tfloat\tY\t稀释每股收益\ntotal_revenue\tfloat\tY\t营业总收入\nrevenue\tfloat\tY\t营业收入\nint_income\tfloat\tY\t利息收入\nprem_earned\tfloat\tY\t已赚保费\ncomm_income\tfloat\tY\t手续费及佣金收入\nn_commis_income\tfloat\tY\t手续费及佣金净收入\nn_oth_income\tfloat\tY\t其他经营净收益\nn_oth_b_income\tfloat\tY\t加:其他业务净收益\nprem_income\tfloat\tY\t保险业务收入\nout_prem\tfloat\tY\t减:分出保费\nune_prem_reser\tfloat\tY\t提取未到期责任准备金\nreins_income\tfloat\tY\t其中:分保费收入\nn_sec_tb_income\tfloat\tY\t代理买卖证券业务净收入\nn_sec_uw_income\tfloat\tY\t证券承销业务净收入\nn_asset_mg_income\tfloat\tY\t受托客户资产管理业务净收入\noth_b_income\tfloat\tY\t其他业务收入\nfv_value_chg_gain\tfloat\tY\t加:公允价值变动净收益\ninvest_income\tfloat\tY\t加:投资净收益\nass_invest_income\tfloat\tY\t其中:对联营企业和合营企业的投资收益\nforex_gain\tfloat\tY\t加:汇兑净收益\ntotal_cogs\tfloat\tY\t营业总成本\noper_cost\tfloat\tY\t减:营业成本\nint_exp\tfloat\tY\t减:利息支出\ncomm_exp\tfloat\tY\t减:手续费及佣金支出\nbiz_tax_surchg\tfloat\tY\t减:营业税金及附加\nsell_exp\tfloat\tY\t减:销售费用\nadmin_exp\tfloat\tY\t减:管理费用\nfin_exp\tfloat\tY\t减:财务费用\nassets_impair_loss\tfloat\tY\t减:资产减值损失\nprem_refund\tfloat\tY\t退保金\ncompens_payout\tfloat\tY\t赔付总支出\nreser_insur_liab\tfloat\tY\t提取保险责任准备金\ndiv_payt\tfloat\tY\t保户红利支出\nreins_exp\tfloat\tY\t分保费用\noper_exp\tfloat\tY\t营业支出\ncompens_payout_refu\tfloat\tY\t减:摊回赔付支出\ninsur_reser_refu\tfloat\tY\t减:摊回保险责任准备金\nreins_cost_refund\tfloat\tY\t减:摊回分保费用\nother_bus_cost\tfloat\tY\t其他业务成本\noperate_profit\tfloat\tY\t营业利润\nnon_oper_income\tfloat\tY\t加:营业外收入\nnon_oper_exp\tfloat\tY\t减:营业外支出\nnca_disploss\tfloat\tY\t其中:减:非流动资产处置净损失\ntotal_profit\tfloat\tY\t利润总额\nincome_tax\tfloat\tY\t所得税费用\nn_income\tfloat\tY\t净利润(含少数股东损益)\nn_income_attr_p\tfloat\tY\t净利润(不含少数股东损益)\nminority_gain\tfloat\tY\t少数股东损益\noth_compr_income\tfloat\tY\t其他综合收益\nt_compr_income\tfloat\tY\t综合收益总额\ncompr_inc_attr_p\tfloat\tY\t归属于母公司(或股东)的综合收益总额\ncompr_inc_attr_m_s\tfloat\tY\t归属于少数股东的综合收益总额\nebit\tfloat\tY\t息税前利润\nebitda\tfloat\tY\t息税折旧摊销前利润\ninsurance_exp\tfloat\tY\t保险业务支出\nundist_profit\tfloat\tY\t年初未分配利润\ndistable_profit\tfloat\tY\t可分配利润\nupdate_flag\tstr\tN\t更新标识,0未修改1更正过\n主要报表类型说明\n\n代码\t类型\t说明\n1\t合并报表\t上市公司最新报表(默认)\n2\t单季合并\t单一季度的合并报表\n3\t调整单季合并表\t调整后的单季合并报表(如果有)\n4\t调整合并报表\t本年度公布上年同期的财务报表数据,报告期为上年度\n5\t调整前合并报表\t数据发生变更,将原数据进行保留,即调整前的原数据\n6\t母公司报表\t该公司母公司的财务报表数据\n7\t母公司单季表\t母公司的单季度表\n8\t母公司调整单季表\t母公司调整后的单季表\n9\t母公司调整表\t该公司母公司的本年度公布上年同期的财务报表数据\n10\t母公司调整前报表\t母公司调整之前的原始财务报表数据\n11\t调整前合并报表\t调整之前合并报表原数据\n12\t母公司调整前报表\t母公司报表发生变更前保留的原数据\n\"\"\"\n\n\n@retry(wait=wait_fixed(3) + wait_random(3, 5))\ndef get_stock_income(stock):\n token = choice(token_list) # 随机选择一个token\n pro = ts.pro_api(token)\n # 获取单只股票的利润表数据\n df = pro.income(ts_code=stock, start_date=\"20130101\", end_date=today)\n # time.sleep(np.random.randint(0, 3))\n return df\n\n\n@click.command()\n@click.option(\n \"-t\",\n \"--table\",\n default=tushare_income.TABLE,\n show_default=True,\n type=str,\n help=\"需要插入数据的表名.\",\n)\n@click.option(\n \"-s\",\n \"--start_from_scratch\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Whether or not start from scratch, 1 represent True and 0 is False.\",\n)\ndef run(table, start_from_scratch):\n if start_from_scratch: # bool 是否从头开始爬,False则从上次结束的地方开始\n dictionarydb[SCRAPYED_ITEMS_KEYNAME] = []\n try:\n # 获取A股上市股票列表\n stock_basic = pro.query(\n \"stock_basic\",\n exchange=\"\",\n list_status=\"L\",\n fields=\"ts_code,symbol,name,fullname,enname,area,industry,market,exchange,is_hs,list_date\",\n )\n\n scrapyed_items = dictionarydb.get(SCRAPYED_ITEMS_KEYNAME, []) # 记录处理过的股票数据\n\n # 遍历股票\n for i, row in stock_basic.iterrows():\n stock = row[\"ts_code\"]\n # 如果股票没有爬取过,则进行处理\n if not stock in scrapyed_items:\n logger.info(f'第{i}只股票:{stock}')\n # 描述:获取上市公司财务指标数据,为避免服务器压力,现阶段每次请求最多返回60条记录,可通过设置日期多次请求获取更多数据。\n df = get_stock_income(stock) # 获取股票财务指标数据\n if df.empty: # 如果财务数据为空,则不进行下一步\n logger.info(\"该股票没有利润数据:\", row)\n else:\n col_name = df.columns.tolist()\n col_name.insert(col_name.index(\"ts_code\"), \"id\") # 在 ts_code 列前面插入\n df = df.reindex(columns=col_name)\n df[\"id\"] = df[\"ts_code\"].map(str) + df[\"end_date\"].map(str)\n\n cols = df.columns.tolist()\n mysql.insert_df(table=table, df=df, cols=cols)\n\n # 把保存到数据库的股票代码记录一下\n scrapyed_items.append(stock)\n dictionarydb[SCRAPYED_ITEMS_KEYNAME] = scrapyed_items\n else:\n pass # 如果股票保存过,则跳过\n logger.info(\"A股所有利润数据爬取完成!\")\n dictionarydb[SCRAPYED_ITEMS_KEYNAME] = [] # 把爬取过的股票清空,下次继续从头爬取\n except:\n error = traceback.format_exc()\n sys.stderr.write(error)\n logger.info(\"Token:\", token)\n\n\nif __name__ == \"__main__\":\n dictionarydb = SqliteDict(\n files.SQLITE_DICT, tablename=\"tushare\", autocommit=True\n ) # access the db\n\n TUSHARE_INCOME = tushare_income.TABLE\n SCRAPYED_ITEMS_KEYNAME = TUSHARE_INCOME # 存放爬取过的股票代码的字典名\n\n run()\n","sub_path":"investing/stock_income.py","file_name":"stock_income.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"47794177","text":"import os\nimport sys\nimport threading\n\nfrom cocos.layer import Layer\nfrom cocos.sprite import Sprite\nfrom cocos.text import Label\n\nfrom pyglet.image import load\n\n\ndef get_cwd() -> str:\n try:\n return sys._MEIPASS\n except AttributeError:\n return os.path.abspath(\".\")\n\n\nclass MessageDisplay(Layer):\n def __init__(self):\n super(MessageDisplay, self).__init__()\n\n self.message = Label(\"\", position=(610, 50), anchor_x='center', anchor_y='center')\n self.add(self.message)\n\n self.show_message = []\n self.shown_msg_ind = None\n\n self.alert_msg = Label(\"\", position=(640, 360), anchor_x='center', anchor_y='center')\n self.add(self.alert_msg)\n\n def add_to_msg_list(self, x_min, x_max, y_min, y_max, msg):\n min_x = min(x_min, x_max)\n max_x = max(x_min, x_max)\n min_y = min(y_min, y_max)\n max_y = max(y_min, y_max)\n self.show_message.append(\n {\n 'x_min': min_x,\n 'x_max': max_x,\n 'y_min': min_y,\n 'y_max': max_y,\n 'msg': msg\n }\n )\n\n def update_msg(self, message: str) -> None:\n self.message.element.text = message\n\n def alert(self, message: str, timeout: float=2.0, x: int=640, y: int=360) -> None:\n # Display a message temporarily\n if self.alert_msg.element.text == \"\":\n # Only alert if other alert is not already present\n self.alert_msg.element.text = message\n self.alert_msg.position = (x, y)\n threading.Timer(timeout, self.clear_alert).start()\n\n def clear_alert(self):\n self.alert_msg.element.text = \"\"\n\n def check_location(self, pos: (float, float)) -> int:\n x = int(pos[0])\n y = int(pos[1])\n\n message = None\n i = None\n for ind, check in enumerate(self.show_message):\n if check[\"x_min\"] <= x <= check[\"x_max\"] and check[\"y_min\"] <= y <= check[\"y_max\"]:\n message = check['msg']\n i = ind\n\n if message:\n self.update_msg(message)\n else:\n self.update_msg(\"\")\n\n self.shown_msg_ind = i\n\n return i\n\n\nclass EventDisplay(MessageDisplay):\n def __init__(self):\n super(EventDisplay, self).__init__()\n\n self.pending_action = None\n\n def add_to_msg_list(self, x_min: int, x_max: int, y_min: int, y_max: int, msg: str, func=None) -> None:\n super(EventDisplay, self).add_to_msg_list(x_min, x_max, y_min, y_max, msg)\n self.show_message[-1]['action'] = func\n\n def check_location(self, pos: (float, float)) -> int:\n ind = super(EventDisplay, self).check_location(pos)\n if self.message.element.text == \"\":\n self.pending_action = None\n else:\n self.pending_action = self.show_message[ind]['action']\n\n return ind\n\n\nclass HUD(EventDisplay):\n def __init__(self):\n super(HUD, self).__init__()\n\n self.piece_count_display = Label(\"0\", position=(1260, 20))\n self.add(self.piece_count_display)\n\n piece_img = load(os.path.join(get_cwd(), \"assets\", \"art\", \"piece.png\"))\n piece_sprite = Sprite(piece_img, position=(1250, 20), scale=0.1)\n self.add(piece_sprite)\n\n def add_to_inventory(self, inventory: list, img: str, name: str) -> None:\n x = 20 + (45 * (len(inventory) - 1))\n img_img = load(os.path.join(get_cwd(), img))\n item_sprite = Sprite(img_img, position=(x, 40), scale=0.1)\n self.add(item_sprite)\n self.add(Label(name, position=(x, 5)))\n","sub_path":"smallentists/MessageDisplay.py","file_name":"MessageDisplay.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"228119879","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 17 10:25:58 2020\n\n@author: hanyl\n\"\"\"\nfrom collections import namedtuple\nfrom random import sample\n\nfrom Sentence import posGather\nfrom FilterDev import EntityInD\n\n__all__ = [\n \"evaluation_sen\",\n \"evaluation_sens\",\n \"causeFN\",\n \"stat_enum\"\n]\n\n'====evaluation===='\ndef evaluation_sen(sen_target, sen_pred):\n confusion = {}\n se_target = set( (entity.start, entity.end) for entity in sen_target.entities)\n se_pred = set( (entity.start, entity.end) for entity in sen_pred.entities)\n confusion['tp'] = [ (entity, sen_target)\n for entity in sen_target.entities\n if (entity.start, entity.end) in se_pred\n ]\n confusion['fn'] = [ (entity, sen_pred)\n for entity in sen_target.entities\n if not (entity.start, entity.end) in se_pred\n ]\n confusion['fp'] = [ (entity, sen_target, sen_pred)\n for entity in sen_pred.entities\n if not (entity.start, entity.end) in se_target\n ]\n confusion['tn'] = []\n return confusion\n\ndef evaluation_sens(sens_target, sens_pred, bl_stat = True):\n confusion = {'tp':[],\n 'fn':[],\n 'fp':[],\n 'tn':[]}\n for sen_target, sen_pred in zip(sens_target, sens_pred):\n sen_conf = evaluation_sen(sen_target, sen_pred)\n for key in confusion.keys():\n confusion[key].extend(sen_conf[key])\n if bl_stat:\n evaluation_stat(confusion)\n return confusion\n\n'=====分析====='\ndef evaluation_stat(confusion):\n tps = len(confusion['tp'])\n fns = len(confusion['fn'])\n fps = len(confusion['fp'])\n tns = len(confusion['tn'])\n print(' '*6 + 'Pred-Pos | Pred-Neg | Total')\n print('Pos {:8} {:8} {:8}'.format(tps, fns, tps+fns)) \n print('Neg {:8} {:8} {:8}'.format(fps, tns, fps+tns))\n print('Total {:8} {:8} {:8}'.format(tps+fps, fns+tns, tps+fns+fps+tns))\n print('{:.1%}'.format(tps/(tps+fns)))\n print('{:.1%}'.format(tps/(tps+fps)))\n\n# def overlapFN(confusion):\n# ofn = {'ppAA':[],\n# 'pApA':[],\n# 'pAAp':[],\n# 'AppA':[],\n# 'ApAp':[],\n# 'pAAp':[]}\n# overlaps, keys = _overlap()\n# for key in keys:\n# ofn[keys]\n# return ofn\n\n\n\nentityFN = namedtuple('entityFN', ['Entity', 'Sentence', 'pos', 'cA', 'Ac', 'FalsePOS', 'len4', 'len27', 'InDict', 'Other'])\ndef causeFN(confusion, myfilter, bl_pure = True):\n count_truth = len(confusion['tp']) + len(confusion['fn'])\n entitiesFN = []\n for item in confusion['fn']:\n entity = item[0]\n sentence = item[1]\n _, pos = posGather(sentence.POS_char[entity.start:entity.end]) \n # -A\n b3 = (entity.start - 1 >= 0 and sentence.text[entity.start-1] != ' ')\n b4 = (entity.end + 1 < len(sentence.text) and sentence.text[entity.end] not in ' .,')\n b5 = pos not in myfilter.sifter.list_pos\n b6 = len(entity.mention) < myfilter.sifter.min_length\n b6_2 = len(entity.mention) >= myfilter.sifter.max_length\n b7 = EntityInD(entity.mention, pos) in myfilter.dictLookuper.dict.keys() and \\\n myfilter.dictLookuper.dict[EntityInD(entity.mention, pos)] == 'COMMON'\n b8 = not any([b3, b4, b5, b6, b7])\n entitiesFN.append(entityFN(entity, sentence, pos, b3, b4, b5, b6, b6_2, b7, b8))\n records = []\n for i in range(3,len(entitiesFN[0])):\n records.append((entityFN._fields[i], sum([item[i] for item in entitiesFN]), sum([item[i] for item in entitiesFN])/count_truth))\n for record in records: \n print('{:>10} {:4} {:.1%}'.format(*record))\n if bl_pure:\n print('Stat of Pure Causes for FN:')\n stat_pureFN(entitiesFN)\n return entitiesFN\n\ndef stat_pureFN(entitiesFN):\n msg ='{:>10} {:4}'\n total = 0\n for key in entityFN._fields[3:]:\n sum_key = sum([ entityfn._asdict()[key] for entityfn in entitiesFN if sum(entityfn[3:]) == 1])\n print(msg.format(key,sum_key))\n total += sum_key\n print(msg.format('Total', total))\n\ndef stat_enum(entitiesFN, myfilter, num_samples = 20, key = 'Ac'):\n records = []\n for index, entityfn in enumerate(entitiesFN):\n if entityfn._asdict()[key] and sum(entityfn[3:]) == 1:\n records.append((index, entityfn))\n for index, entityfn in sample(records, min(len(records), num_samples)):\n entity = entityfn.Entity\n sentence = entityfn.Sentence\n space_counts = 5\n sli = slice(max(entity.start - space_counts, 0), min(entity.end + space_counts, len(sentence.text)))\n print(index)\n print(entity)\n if key in ['FalsePOS']:\n print(entityfn.pos)\n print(sentence.POS_char[sli])\n if key in ['Ac','cA']:\n print((' '*min(space_counts, entity.start)+'{}').format(entity.mention))\n print(sentence.text[sli])\n if key in ['InDict']:\n print(EntityInD(entity.mention, entityfn.pos),\n myfilter.dictLookuper.dict[EntityInD(entity.mention, entityfn.pos)])\n if key in ['len4', 'len27']:\n print('len:{}'.format(entity.end - entity.start))\n","sub_path":"Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"637284178","text":"import sqlite3\r\nconn = sqlite3.connect(\"Employee.db\")\r\ncursor = conn.cursor()\r\n\r\n# a) Create a Table Employee which will have the four columns - Name, ID, salary, and Department_id.\r\n\r\ncursor.execute(\"DROP TABLE IF EXISTS EMPLOYEE\")\r\n\r\ncursor.execute(\"\"\"CREATE TABLE EMPLOYEE (NAME TEXT, ID INTEGER,SALARY INTEGER,DEPARTMENT_ID INTEGER)\"\"\")\r\n\r\nprint(\"Table created\")\r\n\r\n# b) Add a new column ‘City’ to the Table Employee.\r\n\r\ncursor.execute(\"ALTER TABLE EMPLOYEE ADD CITY TEXT\")\r\n\r\nprint(\"Table altered\")\r\n\r\nconn.commit()\r\n\r\n# c) Insert 5 records into this table.\r\nfor i in range(0, 5):\r\n employee_det = (input(\"Enter the ID of Employee:\"), input('Enter the Name of the Employee:'),\r\n input(\"Enter the Salary of the Employee:\"), input(\"Enter the Department_Id of the employee:\"))\r\n cursor.execute(\"\"\"INSERT INTO EMPLOYEE(ID,Name,salary,Department_id)VALUES(?,?,?,?)\"\"\", employee_det)\r\n\r\ncursor.execute(\"\"\"SELECT rowid,* FROM EMPLOYEE\"\"\")\r\nresult = cursor.fetchall()\r\n\r\nprint(\"\\n ID,Name,salary And Department_id of the employee:\")\r\nfor item in result:\r\n print(item[0], item[1], item[2], item[3],item[4])\r\n\r\nconn.commit()\r\n# values = [('sandra', 101, 20000, 1, 'New York'), ('Peter', 102, 30000, 2, 'Washington'),\r\n# ('Sammy', 103, 35000, 2, 'Los Angles'),\r\n# ('Linda', 104, 25000, 3, 'california'), ('John', 105, 30000, 1, 'Georgia')]\r\n# cursor.executemany(\"INSERT INTO EMPLOYEE VALUES (?,?,?,?,?)\", values)\r\n# conn.commit()\r\n\r\n\r\n# d) Read the Name, ID, and Salary from the Employee table and print it\r\n\r\ndef employee_record():\r\n cursor.execute(\"SELECT NAME,ID,SALARY FROM EMPLOYEE\")\r\n\r\n record = cursor.fetchall()\r\n\r\n for row in record:\r\n print(\"\\nName:\", row[0])\r\n\r\n print(\"ID:\", row[1])\r\n\r\n print(\"Salary:\", row[2])\r\n\r\n\r\n# e) Print the details of employees whose names start with ‘j’ (or any letter input by the user)\r\n\r\ndef employee_details(e_name):\r\n cursor.execute(\"SELECT * FROM EMPLOYEE WHERE UPPER(NAME) LIKE '\" + e_name + \"%'\")\r\n\r\n result = cursor.fetchall()\r\n\r\n # print(result)\r\n\r\n if len(result) == 0:\r\n\r\n print(f\"No employee whose name starts with {e_name}\")\r\n\r\n else:\r\n\r\n print(f\"Employee details whose name starts with {e_name} are:\")\r\n\r\n for row in result:\r\n print(\"\\nName:\", row[0])\r\n\r\n print(\"ID:\", row[1])\r\n\r\n print(\"Salary:\", row[2])\r\n\r\n print(\"Department_id:\", row[3])\r\n\r\n print(\"City:\", row[4])\r\n\r\n\r\n# f)Print the details of employees with ID’s inputted by the user.\r\n\r\ndef employee_details_id(id):\r\n cursor.execute(\"SELECT * FROM EMPLOYEE WHERE ID =:e_id\", {\"e_id\": id})\r\n\r\n result = cursor.fetchall()\r\n\r\n print(f\"Employee details whose name starts with {id} are:\")\r\n\r\n for row in result:\r\n print(\"\\nName:\", row[0])\r\n\r\n print(\"ID:\", row[1])\r\n\r\n print(\"Salary:\", row[2])\r\n\r\n print(\"Department_id:\", row[3])\r\n\r\n print(\"City:\", row[4])\r\n\r\n\r\n# g)Change the name of the employee whose ID is input by the user.\r\n\r\ndef change_name(em_id, e_name):\r\n cursor.execute(\"SELECT NAME FROM EMPLOYEE WHERE ID =:id\", {\"id\": em_id})\r\n\r\n result = cursor.fetchone()\r\n\r\n print(\"Name of Employee before Update:\", result[0])\r\n\r\n cursor.execute(\"UPDATE EMPLOYEE SET NAME =:name WHERE ID =:id\", {\"name\": e_name, \"id\": em_id})\r\n\r\n print(\"Name changed\")\r\n\r\n cursor.execute(\"SELECT NAME FROM EMPLOYEE WHERE ID =:id\", {\"id\": em_id})\r\n\r\n result = cursor.fetchone()\r\n\r\n print(\"Name of Employee after Update:\", result[0])\r\n\r\n\r\nvar = True\r\n\r\nwhile var:\r\n\r\n print(\r\n\r\n \"\\n 1.Employee Record\\n 2..Employee details\\n 3.Employee details with ID\\n 4.To Change the Employee Name\\n \"\r\n\r\n \"5.exit\")\r\n\r\n ch = int(input(\"Enter the choice:\"))\r\n\r\n if ch == 1:\r\n employee_record()\r\n\r\n elif ch == 2:\r\n\r\n user_input = input(\"Enter the letter:\")\r\n employee_details(user_input.capitalize())\r\n\r\n elif ch == 3:\r\n\r\n user_input = int(input(\"Enter the ID:\"))\r\n employee_details_id(user_input)\r\n\r\n elif ch == 4:\r\n user_input = int(input(\"Enter the id of employee to change the name:\"))\r\n name = input(\"Enter the name you want:\")\r\n change_name(user_input, name.capitalize())\r\n\r\n elif ch == 5:\r\n var = False\r\n\r\nconn.commit()\r\nconn.close()\r\n","sub_path":"week4-DB-question1.py","file_name":"week4-DB-question1.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"130446152","text":"'''\nCreated on 4 Dec 2010\n\n@author: pjl\n'''\n\nfrom simulation import *\nimport pygame \n\n#\n# Manual drive a car around a track\n#\n\n\nclass CursorControl:\n\n def process(self,sensor,state,dt):\n control=Control()\n keyinput = pygame.key.get_pressed()\n\n if keyinput[pg.K_LEFT]:\n control.left=1\n\n if keyinput[pg.K_RIGHT]:\n control.right=1\n\n if keyinput[pg.K_UP]:\n control.up=1\n\n if keyinput[pg.K_DOWN]:\n control.down=1\n if state.ang == 180 and state.x == 60:\n control.up=1\n elif state.y == 600 and 60 < state.x < 240:\n control.left =1\n \n return control\n\n\n\ndt =.1\nbrain = CursorControl()\nnSensors = 4\nsensorRange = 2000\npod = CarPod(nSensors,sensorRange,brain,(255,0,0))\n#pod = GravityPod(nSensors,sensorRange,brain,(255,0,0))\npods = [pod]\nworld = World(\"../rect_world.txt\",pods)\nsim = Simulation(world,dt)\n\n#uncomment the next line to hide the walls.\n\n\n#sim.world.blind=True\n#sim.frameskipfactor=10\n\n\nsim.run()\n","sub_path":"PythonPodSim/attic/broken/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"95265000","text":"# coding = UTF-8\n\n'''\nSimple linear regression using Tensorflow\n\ncode for python 3\n'''\n\nimport tensorflow as tf\n\n# training data\nx_data = [1, 2, 3]\ny_data = [1, 2, 3]\n\n'''\nTry to find values for W and b that compute y_data = W * x_data + b\n(We know that W should be 1 and b 0, but Tensorflow will figure that out for us.)\n'''\n\nW = tf.Variable(tf.random_uniform([1], -10.0, 10.0))\nb = tf.Variable(tf.random_uniform([1], -10.0, 10.0))\n\n# linear hypothesis\nhypothesis = W * x_data + b\n\n# simplified cost function\ncost = tf.reduce_mean(tf.square(hypothesis - y_data))\n\n# minimize\na = tf.Variable(0.1) # learning rate, alpha: size of one step\noptimizer = tf.train.GradientDescentOptimizer(a) # method of optimization\ntrain = optimizer.minimize(cost) # what to minimize\n\n# initialize the variables\ninit = tf.initialize_all_variables()\n\n# launch\nsess = tf.Session()\nsess.run(init)\n\n# line fitting steps\nfor step in range(1001):\n sess.run(train)\n if step % 50 == 0:\n print (step, sess.run(cost), sess.run(W), sess.run(b))\n","sub_path":"lab02-1.py","file_name":"lab02-1.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"338014779","text":"from .flesh import Flesh\n\n\nclass FastFlesh(Flesh):\n \"\"\"\n 此类的目的是通过字段数组和值数组快速生成一个Flesh类对象\n \"\"\"\n\n def __init__(self, fields=(), values=(), opt=None):\n dct = {}\n for i in range(len(fields)):\n dct[fields[i].name] = values[i] if opt is None else opt(values, i)\n Flesh.__init__(self, **dct)\n","sub_path":"longan_sqlite/fast_flesh.py","file_name":"fast_flesh.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"542352901","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 24 14:07:15 2018\r\n\r\n@author: Frantz\r\n\"\"\"\r\n\r\nclass Solution:\r\n def removeElement(self, nums, val):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type val: int\r\n :rtype: int\r\n \"\"\"\r\n if not nums:\r\n return 0\r\n j = 0\r\n for i in range(len(nums)):\r\n if nums[i] != val:\r\n nums[j] = nums[i]\r\n j += 1\r\n return j\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nnums = [0,0,1,1,1,2,2,3,3,4]\r\nval = 2\r\nsol = Solution()\r\nresult = sol.removeElement(nums, val)\r\nprint(result)\r\n\r\n","sub_path":"LeetCode/27Remove Element.py","file_name":"27Remove Element.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"292064659","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/eddington_core/print_util.py\n# Compiled at: 2020-04-04 11:50:41\n# Size of source mod 2**32: 446 bytes\nimport math\n\ndef to_relevant_precision(a):\n if a == 0:\n return (0, 0)\n precision = 0\n abs_a = math.fabs(a)\n while abs_a < 1.0:\n abs_a *= 10\n precision += 1\n\n if a < 0:\n return (\n -abs_a, precision)\n return (\n abs_a, precision)\n\n\ndef to_precise_string(a, n):\n new_a, precision = to_relevant_precision(a)\n if precision < 3:\n return f\"{a:.{n + precision}f}\"\n return f\"{new_a:.{n}f}e-0{precision}\"","sub_path":"pycfiles/eddington_core-0.0.4-py3.7/print_util.cpython-37.py","file_name":"print_util.cpython-37.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"623492238","text":"import itertools\n\nimport pytest\n\nimport light_the_torch as ltt\nfrom light_the_torch._pip.common import InternalLTTError\nfrom light_the_torch._pip.find import maybe_add_option\nfrom light_the_torch.computation_backend import ComputationBackend\n\n\n@pytest.fixture\ndef patch_extract_dists(mocker):\n def patch_extract_dists_(return_value=None):\n if return_value is None:\n return_value = []\n return mocker.patch(\n \"light_the_torch._pip.find.extract_dists\", return_value=return_value\n )\n\n return patch_extract_dists_\n\n\n@pytest.fixture\ndef patch_run(mocker):\n def patch_run_():\n return mocker.patch(\"light_the_torch._pip.find.run\")\n\n return patch_run_\n\n\n@pytest.fixture\ndef computation_backends():\n return (\"cpu\", \"cu92\", \"cu101\", \"cu102\")\n\n\n@pytest.fixture\ndef channels():\n return (\"stable\", \"test\", \"nightly\")\n\n\n@pytest.fixture\ndef platforms():\n return (\"linux_x86_64\", \"macosx_10_9_x86_64\", \"win_amd64\")\n\n\n@pytest.fixture\ndef python_versions():\n return (\"3.6\", \"3.7\", \"3.8\")\n\n\n@pytest.fixture\ndef wheel_properties(computation_backends, platforms, python_versions):\n properties = []\n for properties_ in itertools.product(\n computation_backends, platforms, python_versions\n ):\n # macOS binaries don't support CUDA\n computation_backend, platform, _ = properties_\n if platform.startswith(\"macosx\") and computation_backend != \"cpu\":\n continue\n\n properties.append(\n dict(\n zip((\"computation_backend\", \"platform\", \"python_version\"), properties_)\n )\n )\n return tuple(properties)\n\n\ndef test_maybe_add_option_already_set(subtests):\n args = [\"--foo\", \"bar\"]\n assert maybe_add_option(args, \"--foo\",) == args\n assert maybe_add_option(args, \"-f\", aliases=(\"--foo\",)) == args\n\n\ndef test_find_links_internal_error(patch_extract_dists, patch_run):\n patch_extract_dists()\n patch_run()\n\n with pytest.raises(InternalLTTError):\n ltt.find_links([])\n\n\ndef test_find_links_computation_backend_detect(mocker, patch_extract_dists, patch_run):\n class GenericComputationBackend(ComputationBackend):\n @property\n def local_specifier(self):\n return \"generic\"\n\n computation_backend = GenericComputationBackend()\n mocker.patch(\n \"light_the_torch._pip.find.detect_computation_backend\",\n return_value=computation_backend,\n )\n\n patch_extract_dists()\n run = patch_run()\n\n with pytest.raises(InternalLTTError):\n ltt.find_links([], computation_backend=None)\n\n args, _ = run.call_args\n cmd = args[0]\n assert cmd.computation_backend == computation_backend\n\n\ndef test_find_links_computation_backend_str(\n subtests, patch_extract_dists, patch_run, computation_backends\n):\n patch_extract_dists()\n run = patch_run()\n\n for computation_backend in computation_backends:\n with subtests.test(computation_backend=computation_backend):\n run.reset()\n\n with pytest.raises(InternalLTTError):\n ltt.find_links([], computation_backend=computation_backend)\n\n args, _ = run.call_args\n cmd = args[0]\n assert cmd.computation_backend == ComputationBackend.from_str(\n computation_backend\n )\n\n\ndef test_find_links_unknown_channel():\n with pytest.raises(ValueError):\n ltt.find_links([], channel=\"channel\")\n\n\ndef test_find_links_platform(subtests, patch_extract_dists, patch_run, platforms):\n patch_extract_dists()\n run = patch_run()\n\n for platform in platforms:\n with subtests.test(platform=platform):\n run.reset()\n\n with pytest.raises(InternalLTTError):\n ltt.find_links([], platform=platform)\n\n args, _ = run.call_args\n options = args[2]\n assert options.platform == platform\n\n\ndef test_find_links_python_version(\n subtests, patch_extract_dists, patch_run, python_versions\n):\n patch_extract_dists()\n run = patch_run()\n\n for python_version in python_versions:\n python_version_tuple = tuple(int(v) for v in python_version.split(\".\"))\n with subtests.test(python_version=python_version):\n run.reset()\n\n with pytest.raises(InternalLTTError):\n ltt.find_links([], python_version=python_version)\n\n args, _ = run.call_args\n options = args[2]\n assert options.python_version == python_version_tuple\n\n\n@pytest.mark.slow\ndef test_find_links_torch_smoke(subtests, wheel_properties):\n dist = \"torch\"\n\n for properties in wheel_properties:\n with subtests.test(**properties):\n assert ltt.find_links([dist], **properties)\n\n\n@pytest.mark.slow\ndef test_find_links_torchaudio_smoke(subtests, wheel_properties):\n dist = \"torchaudio\"\n\n for properties in wheel_properties:\n # torchaudio has no published releases for Windows\n if properties[\"platform\"].startswith(\"win\"):\n continue\n with subtests.test(**properties):\n a = ltt.find_links([dist], **properties)\n assert a\n\n\n@pytest.mark.slow\ndef test_find_links_torchtext_smoke(subtests, wheel_properties):\n dist = \"torchtext\"\n\n for properties in wheel_properties:\n with subtests.test(**properties):\n assert ltt.find_links([dist], **properties)\n\n\n@pytest.mark.slow\ndef test_find_links_torchvision_smoke(subtests, wheel_properties):\n dist = \"torchvision\"\n\n for properties in wheel_properties:\n with subtests.test(**properties):\n assert ltt.find_links([dist], **properties)\n\n\n@pytest.mark.slow\ndef test_find_links_torch_channel_smoke(subtests, channels):\n dist = \"torch\"\n\n for channel in channels:\n with subtests.test(channel=channel):\n assert ltt.find_links([dist], computation_backend=\"cpu\", channel=channel)\n","sub_path":"tests/unit/pip/test_find.py","file_name":"test_find.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"644905674","text":"import matplotlib as mpl\nmpl.use('Qt5Agg')\nimport argparse\nimport powder_tSNE\n\nif __name__ == '__main__':\n parser=argparse.ArgumentParser()\n parser.add_argument('csv',nargs = '*',)\n\n args = parser.parse_args()\n for f in args.csv:\n print(f)\n system = powder_tSNE.visualize()\n","sub_path":"learning scripts/powder_manifold.py","file_name":"powder_manifold.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"540388051","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nimport datetime\nfrom .models import (MoscowPythonMeetup, LearnPythonCourse, GraduateProjects,\n LearnPythonCoursePrices, Feedback, Curators, GraduateStories)\n\n\ndef index(request):\n '''Docstring testc'''\n template = loader.get_template('mainpage/index.html')\n\n # Course data\n # Fixes #3 LearnPythonCourse matching query does not exist.\n try:\n current_course = LearnPythonCourse.objects.latest('course_index')\n except LearnPythonCourse.DoesNotExist:\n current_course = LearnPythonCourse()\n\n online_prices = LearnPythonCoursePrices.objects.filter(\n course_type='Online').order_by('price_range_price')\n offline_prices = LearnPythonCoursePrices.objects.filter(\n course_type='Offline').order_by('price_range_price')\n\n # Student projects data\n student_projects = list(GraduateProjects.objects.all())\n\n # Program dates data\n course_day_2_date = LearnPythonCourse.objects.all()[:1].get().course_day_2\n course_day_3_date = LearnPythonCourse.objects.all()[:1].get().course_day_3\n course_day_4_date = LearnPythonCourse.objects.all()[:1].get().course_day_4\n course_day_5_date = LearnPythonCourse.objects.all()[:1].get().course_day_5\n course_day_6_date = LearnPythonCourse.objects.all()[:1].get().course_day_6\n course_day_7_date = LearnPythonCourse.objects.all()[:1].get().course_day_7\n course_day_8_date = LearnPythonCourse.objects.all()[:1].get().course_day_8\n course_day_9_date = LearnPythonCourse.objects.all()[:1].get().course_day_9\n\n # User stories\n graduate_stories_list = list(GraduateStories.objects.all())\n\n # Curators data\n curators_list = Curators.objects.filter(curator_status=True)\n\n # Feedback data\n student_feedback = list(Feedback.objects.all())\n\n # Meetup data\n # Fixes #3 MoscowPythonMeetup matching query does not exist.\n try:\n current_meetup = MoscowPythonMeetup.objects.latest('meetup_number')\n except MoscowPythonMeetup.DoesNotExist:\n current_meetup = MoscowPythonMeetup()\n\n context = {\n 'meetup': current_meetup,\n 'course': current_course,\n 'projects': student_projects,\n 'online_price_ranges': online_prices,\n 'offline_price_ranges': offline_prices,\n 'course_day_1': current_course.course_start_date.strftime('%d.%m'),\n 'course_day_2': course_day_2_date.strftime('%d.%m'),\n 'course_day_3': course_day_3_date.strftime('%d.%m'),\n 'course_day_4': course_day_4_date.strftime('%d.%m'),\n 'course_day_5': course_day_5_date.strftime('%d.%m'),\n 'course_day_6': course_day_6_date.strftime('%d.%m'),\n 'course_day_7': course_day_7_date.strftime('%d.%m'),\n 'course_day_8': course_day_8_date.strftime('%d.%m'),\n 'course_day_9': course_day_9_date.strftime('%d.%m'),\n 'course_day_10': current_course.course_end_date.strftime('%d.%m'),\n 'registration_closes_date': current_course.end_registration_date.strftime(\n '%b %d, %Y %H:%M:%S'\n ),\n 'student_feedback': student_feedback,\n 'curators_list': curators_list,\n 'graduate_stories': graduate_stories_list\n\n }\n return HttpResponse(template.render(context, request))\n\n\ndef online(request):\n return render(request, 'mainpage/page3759545.html')","sub_path":"landing_page/mainpage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"395843990","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# method 2, based on method 1, but the edge cutting is much smarter\n# time O(n), space O(depth), and save forest (O(n))\n# idea: if a node has no parent and is not deleted, then add to the forest\n\n\nclass Solution2(object):\n def delNodes(self, root, to_delete):\n \"\"\"\n :type root: TreeNode\n :type to_delete: List[int]\n :rtype: List[TreeNode]\n \"\"\"\n to_delete = set(to_delete)\n forest = []\n self.delNodesHelper(None, root, to_delete, forest)\n return forest\n \n def delNodesHelper(self, parent, root, to_delete, forest):\n if not root:\n return None\n \n if root.val in to_delete: # this block of codes can be simplified to make code cleaner\n root.left = self.delNodesHelper(None, root.left, to_delete, forest)\n root.right = self.delNodesHelper(None, root.right, to_delete, forest)\n return None # important: this will cut connection of root with its parent, but not with its children\n else:\n if not parent:\n forest.append(root)\n root.left = self.delNodesHelper(root, root.left, to_delete, forest)\n root.right = self.delNodesHelper(root, root.right, to_delete, forest)\n return root\n\n\n# add candidates from parent level\nclass Solution1(object):\n def delNodes(self, root, to_delete):\n candidates = []\n to_delete = set(to_delete)\n if root and root.val not in to_delete:\n candidates.append(root)\n self.trim(None, root, to_delete, candidates)\n return candidates\n\n def trim(self, parent, root, to_delete, candidates):\n if not root:\n return\n if root.val not in to_delete:\n self.trim(root, root.left, to_delete, candidates)\n self.trim(root, root.right, to_delete, candidates)\n else:\n # store candidates\n if root.left and root.left.val not in to_delete:\n candidates.append(root.left)\n if root.right and root.right.val not in to_delete:\n candidates.append(root.right)\n # cut edge with parent\n if parent:\n if parent.left is root:\n parent.left = None\n else:\n parent.right = None\n # cut edge with children\n left = root.left\n right = root.right\n root.left = None\n root.right = None\n self.trim(None, left, to_delete, candidates)\n self.trim(None, right, to_delete, candidates)\n\n\n\"\"\"\nGiven the root of a binary tree, each node in the tree has a distinct value.\n\nAfter deleting all nodes with a value in to_delete, we are left with a forest (a disjoint union of trees).\n\nReturn the roots of the trees in the remaining forest. You may return the result in any order.\n\n \n\nExample 1:\n\n\n\nInput: root = [1,2,3,4,5,6,7], to_delete = [3,5]\nOutput: [[1,2,null,4],[6],[7]]\n \n\nConstraints:\n\nThe number of nodes in the given tree is at most 1000.\nEach node has a distinct value between 1 and 1000.\nto_delete.length <= 1000\nto_delete contains distinct values between 1 and 1000.\n\"\"\"\n","sub_path":"Templates/1110. Delete Nodes And Return Forest.py","file_name":"1110. Delete Nodes And Return Forest.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"255554546","text":"N = int(input())\nA = [list(map(int, input().split())) for i in range(N)]\n\ndi = [0,1,0,-1]\ndj = [1,0,-1,0]\ni = 1\nj = 1\nsumall = 0\nfor i in range(N):\n for j in range(N):\n # print(i, j, end=' ')\n sum = 0\n for k in range(4):\n ni = i + di[k]\n nj = j + dj[k]\n if ni>=0 and ni=0 and nj origin_list[i + 1]:\n # swap\n origin_list[i], origin_list[i + 1] = origin_list[i + 1], origin_list[i]\n i += 1\n j += 1\n return origin_list\n\n\nprint(sort_to_max([2, 10, -12, 2.5, 20, -11, 4, 4, 0]))\n\n# Задача-3:\n# Напишите собственную реализацию стандартной функции filter.\n# Разумеется, внутри нельзя использовать саму функцию filter.\n\nages = [5, 18, 17, 19, 14, 32]\n\n\n# функция ключ для фильтрации\ndef my_func(x):\n if x < 18:\n return False\n else:\n return True\n\n\n# фильтрация\ndef my_filter(func, lst):\n i = 0\n while i < len(lst):\n if func(lst[i]):\n pass\n print(\"F true\")\n else:\n lst.pop(i)\n i -= 1\n print(\"F false\")\n i += 1\n return lst\n\n\nprint(my_filter(my_func, ages))\n\n# Задача-4:\n# Даны четыре точки А1(х1, у1), А2(x2 ,у2), А3(x3 , у3), А4(х4, у4).\n# Определить, будут ли они вершинами параллелограмма.\n\nimport math\n\nA1 = [2, 1]\nA2 = [3, 5]\nA3 = [6, 6]\nA4 = [5, 2]\n\nB1 = [1, 1]\nB2 = [1, 5]\nB3 = [5, 5]\nB4 = [5, 1]\n\n# не параллелограмм\nC1 = [1, 1]\nC2 = [5, 5]\nC3 = [1, 5]\nC4 = [5, 1]\n\n\n# Функция проверяет является ли четырехугольник параллелограммом\ndef is_paral(a1, a2, a3, a4):\n # Определим функцию для проверки косинуса угла\n def cos_angle(b1, b2, b3):\n a = (b1[0] - b2[0]) * (b1[0] - b3[0]) + (b1[1] - b2[1]) * (b1[1] - b3[1])\n b = math.sqrt((b1[0] - b2[0]) ** 2 + (b1[1] - b2[1]) ** 2)\n c = math.sqrt((b1[0] - b3[0]) ** 2 + (b1[1] - b3[1]) ** 2)\n return a / (b * c)\n\n cos_a1 = cos_angle(a1, a2, a4)\n cos_a2 = cos_angle(a2, a3, a1)\n cos_a3 = cos_angle(a3, a4, a2)\n cos_a4 = cos_angle(a4, a3, a1)\n\n # проверяем равенство углов и сумму соседних углов,\n # т.е. проверяем свойства параллелограма\n if cos_a1 == cos_a3 and cos_a2 == cos_a4 and (cos_a1 + cos_a2) == 0.0 and (cos_a3 + cos_a4) == 0.0:\n return True\n else:\n return False\n\n\nprint(is_paral(A1, A2, A3, A4))\nprint(is_paral(B1, B2, B3, B4))\nprint(is_paral(C1, C2, C3, C4))\n","sub_path":"hw03_normal.py","file_name":"hw03_normal.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"634268044","text":"from .header import *\nfrom .pointers_array import *\n\nclass mark_sweep(object):\n def __init__(self, heap):\n self.heap = heap\n # This function should collect the memory in the heap\n def collect(self):\n\n self.mark(0)\n\n header_ptr = 0\n while(header_ptr != -1):\n if not header_get_garbage_flag(self.heap.data, header_ptr):\n header_ptr = self.heap.disallocate(header_ptr)\n\n header_ptr = self.heap.get_next_header_pointer(header_ptr)\n\n def __rec_collect(self, pointer):\n pass\n\n def mark(self, root_ptr):\n if header_get_garbage_flag(self.heap.data, root_ptr):\n return\n\n self.heap.data = header_set_garbage_flag(self.heap.data, root_ptr, 1)\n\n if not header_is_pointers_array(self.heap.data, root_ptr):\n return\n\n root_ptr_count = pointer_array_count(self.heap.data, root_ptr)\n for i in range(root_ptr_count):\n root_obj = pointer_array_get(self.heap.data, root_ptr, i)\n print(root_obj)\n self.mark(root_obj)\n","sub_path":"Lab5/Lab5/src/GC/mark_sweep.py","file_name":"mark_sweep.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"50355541","text":"import copy\nimport logging\nfrom itertools import chain\n\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\n\nfrom rltoolkit import config\nfrom rltoolkit.algorithms.ddpg.models import Actor, Critic\nfrom rltoolkit.buffer import ReplayBuffer\nfrom rltoolkit.rl import RL\nfrom rltoolkit.utils import measure_time\n\nlogger = logging.getLogger(__name__)\n\n\nclass DDPG(RL):\n def __init__(\n self,\n actor_lr: float = config.DDPG_LR,\n critic_lr: float = config.DDPG_LR,\n tau: float = config.TAU,\n update_batch_size: int = config.UPDATE_BATCH_SIZE,\n buffer_size: int = config.BUFFER_SIZE,\n random_frames: int = config.RANDOM_FRAMES,\n update_freq: int = config.UPDATE_FREQ,\n grad_steps: int = config.GRAD_STEPS,\n act_noise: float = config.ACT_NOISE,\n obs_norm: bool = config.OBS_NORM,\n *args,\n **kwargs,\n ):\n f\"\"\"Deep Deterministic Policy Gradient implementation\n\n Args:\n actor_lr (float, optional): Learning rate of the actor.\n Defaults to { config.DDPG_LR }.\n critic_lr (float, optional): Learning rate of the critic.\n Defaults to { config.DDPG_LR }.\n tau (float, optional): Tau coefficient for polyak averaging.\n Defaults to { config.TAU }.\n update_batch_size (int, optional): Batch size for gradient step.\n Defaults to { config.UPDATE_BATCH_SIZE }.\n buffer_size (int, optional): Size of replay buffer.\n Defaults to { config.BUFFER_SIZE }.\n random_frames (int, optional): Number of frames with random actions at\n the beggining. Defaults to { config.RANDOM_FRAMES }.\n update_freq (int, optional): Freqency of SAC updates (in frames).\n Defaults to { config.UPDATE_FREQ }.\n grad_steps (int, optional): Number of SAC updates for one step.\n Defaults to { config.GRAD_STEPS }.\n act_noise (float, optional): Actions noise multiplier.\n Defaults to { config.ACT_NOISE }.\n obs_norm (bool, optional): Observation normalization.\n Defaults to { False }.\n env_name (str, optional): Name of the gym environment.\n Defaults to { config.ENV_NAME }.\n gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.\n stats_freq (int, optional): Frequency of logging the progress.\n Defaults to { config.STATS_FREQ }.\n batch_size (int, optional): Number of frames used for one algorithm step\n (could be higher because batch collection stops when rollout ends).\n Defaults to { config.BATCH_SIZE }.\n iterations (int, optional): Number of algorithms iterations.\n Defaults to { config.ITERATIONS }.\n max_frames (int, optional): Limit of frames for training.\n Defaults to { None }.\n return_done (Union[int, None], optional): target return, which will stop\n training if reached. Defaults to { config.RETURN_DONE }.\n log_dir (str, optional): Path for basic logs which includes final model.\n Defaults to { config.LOG_DIR }.\n use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.\n tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.\n Defaults to { config.TENSORBOARD_DIR }.\n tensorboard_comment (str, optional): Comment for tensorboard files.\n Defaults to { config.TENSORBOARD_COMMENT }.\n verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.\n render (bool, optional): Render rollouts to tensorboard.\n Defaults to { config.RENDER }.\n\n \"\"\"\n super().__init__(*args, **kwargs)\n assert not self.discrete, \"DDPG works only on continuous actions space\"\n self._actor = None\n self.actor_optimizer = None\n self._actor_targ = None\n self._critic = None\n self.critic_optimizer = None\n self.critic_targ = None\n\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.tau = tau\n self.update_batch_size = update_batch_size\n self.buffer_size = buffer_size\n self.random_frames = random_frames\n self.update_freq = update_freq\n self.grad_steps = grad_steps\n self.act_noise = act_noise\n self.obs_norm = obs_norm\n self.obs_mean, self.obs_std = self._get_initial_obs_mean_std(self.obs_norm)\n\n self.actor = Actor(self.ob_dim, self.ac_lim, self.ac_dim)\n self.critic = Critic(self.ob_dim, self.ac_dim)\n\n self.replay_buffer = ReplayBuffer(\n self.buffer_size,\n self.ob_dim,\n self.ac_dim,\n discrete=self.discrete,\n dtype=torch.float32,\n device=self.device,\n obs_norm=self.obs_norm,\n )\n\n self.loss = {\"actor\": 0.0, \"critic\": 0.0}\n new_hparams = {\n \"hparams/actor_lr\": self.actor_lr,\n \"hparams/critic_lr\": self.critic_lr,\n \"hparams/tau\": self.tau,\n \"hparams/update_batch_size\": self.update_batch_size,\n \"hparams/buffer_size\": self.buffer_size,\n \"hparams/random_frames\": self.random_frames,\n \"hparams/update_freq\": self.update_freq,\n \"hparams/grad_steps\": self.grad_steps,\n \"hparams/act_noise\": self.act_noise,\n \"hparams/obs_norm\": self.obs_norm,\n }\n self.hparams.update(new_hparams)\n\n def set_model(self, model, lr):\n model.to(device=self.device)\n optimizer = self.opt(model.parameters(), lr=lr)\n return model, optimizer\n\n @property\n def actor(self):\n return self._actor\n\n @actor.setter\n def actor(self, model: torch.nn.Module):\n self._actor, self.actor_optimizer = self.set_model(model, self.actor_lr)\n self.actor_targ = copy.deepcopy(self._actor)\n for p in self.actor_targ.parameters():\n p.requires_grad = False\n\n @property\n def critic(self):\n return self._critic\n\n @critic.setter\n def critic(self, model: torch.nn.Module):\n self._critic, self.critic_optimizer = self.set_model(model, self.critic_lr)\n self.critic_targ = copy.deepcopy(self._critic)\n for p in self.critic_targ.parameters():\n p.requires_grad = False\n\n @measure_time\n def perform_iteration(self):\n f\"\"\"Single train step of algorithm\n\n Returns:\n Memory: Buffer filled with one batch\n float: Time taken for evaluation\n \"\"\"\n self.collect_batch_and_train(self.batch_size)\n self.replay_buffer = self.update_obs_mean_std(self.replay_buffer)\n return self.replay_buffer.last_rollout()\n\n def noise_action(self, obs, act_noise):\n action, _ = self._actor.act(obs)\n action += act_noise * torch.randn(self.ac_dim, device=self.device)\n return np.clip(action.cpu(), -self.ac_lim.cpu(), self.ac_lim.cpu()).to(\n self.device\n )\n\n def initial_act(self, obs) -> torch.Tensor:\n action = torch.tensor(self.env.action_space.sample()).unsqueeze(0)\n return action\n\n def collect_batch_and_train(self, batch_size: int, *args, **kwargs):\n f\"\"\"Perform full rollouts and collect samples till batch_size number of steps\n will be added to the replay buffer\n\n Args:\n batch_size (int): number of samples to collect and train\n *args, **kwargs: arguments for make_update\n \"\"\"\n collected = 0\n while collected < batch_size:\n self.stats_logger.rollouts += 1\n\n obs = self.env.reset()\n # end - end of the episode from perspective of the simulation\n # done - end of the episode from perspective of the model\n end = False\n obs = self.process_obs(obs)\n prev_idx = self.replay_buffer.add_obs(obs)\n ep_len = 0\n\n while not end:\n obs = self.replay_buffer.normalize(obs)\n if self.stats_logger.frames < self.random_frames:\n action = self.initial_act(obs)\n else:\n action = self.noise_action(obs, self.act_noise)\n action_proc = self.process_action(action, obs)\n obs, rew, done, _ = self.env.step(action_proc)\n ep_len += 1\n end = done\n done = False if ep_len == self.max_ep_len else done\n\n obs = self.process_obs(obs)\n next_idx = self.replay_buffer.add_obs(obs)\n self.replay_buffer.add_timestep(\n prev_idx, next_idx, action, rew, done, end\n )\n prev_idx = next_idx\n self.stats_logger.frames += 1\n collected += 1\n\n self.make_update(*args, **kwargs)\n\n def update_condition(self):\n return (\n len(self.replay_buffer) > self.update_batch_size\n and self.stats_logger.frames % self.update_freq == 0\n )\n\n def make_update(self):\n if self.update_condition():\n for _ in range(self.grad_steps):\n batch = self.replay_buffer.sample_batch(\n self.update_batch_size, self.device\n )\n self.update(*batch)\n\n def compute_qfunc_targ(\n self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor\n ):\n \"\"\"Compute targets for Q-functions\n\n Args:\n reward (torch.Tensor): batch of rewards\n next_obs (torch.Tensor): batch of next observations\n done (torch.Tensor): batch of done\n\n Returns:\n torch.Tensor: Q-function targets for the batch\n \"\"\"\n with torch.no_grad():\n next_action, _ = self.actor_targ(next_obs)\n q_target = self.critic_targ(next_obs, next_action)\n\n qfunc_target = reward + self.gamma * (1 - done) * q_target\n\n return qfunc_target\n\n def compute_pi_loss(self, obs):\n \"\"\"Loss for the policy\n\n Args:\n obs (torch.Tensor): batch of observations\n\n Returns:\n torch.Tensor: policy loss\n \"\"\"\n action, _ = self._actor(obs)\n loss = -self._critic(obs, action).mean()\n return loss\n\n def update_target_nets(self):\n \"\"\"Update target networks with Polyak averaging\n \"\"\"\n with torch.no_grad():\n # Polyak averaging:\n learned_params = chain(self._critic.parameters(), self._actor.parameters())\n targets_params = chain(\n self.critic_targ.parameters(), self.actor_targ.parameters()\n )\n for params, targ_params in zip(learned_params, targets_params):\n targ_params.data.mul_(1 - self.tau)\n targ_params.data.add_((self.tau) * params.data)\n\n def update(\n self,\n obs: torch.Tensor,\n next_obs: torch.Tensor,\n action: torch.Tensor,\n reward: torch.Tensor,\n done: torch.Tensor,\n ):\n \"\"\"DDPG update step\n\n Args:\n obs (torch.Tensor): observations tensor\n next_obs (torch.Tensor): next observations tensor\n action (torch.Tensor): actions tensor\n reward (torch.Tensor): rewards tensor\n done (torch.Tensor): dones tensor\n \"\"\"\n y = self.compute_qfunc_targ(reward, next_obs, done)\n\n # Update Q-function by one step\n y_q = self._critic(obs, action)\n loss_q = F.mse_loss(y_q, y)\n\n self.loss[\"critic\"] = loss_q.item()\n\n self.critic_optimizer.zero_grad()\n loss_q.backward()\n self.critic_optimizer.step()\n\n # Update policy by one step\n self._critic.eval()\n\n loss = self.compute_pi_loss(obs)\n self.loss[\"actor\"] = loss.item()\n\n self.actor_optimizer.zero_grad()\n loss.backward()\n self.actor_optimizer.step()\n\n # Update target networks\n\n self.update_target_nets()\n\n self._critic.train()\n\n def collect_params_dict(self):\n params_dict = {}\n params_dict[\"actor\"] = self.actor.state_dict()\n params_dict[\"critic\"] = self.critic.state_dict()\n params_dict[\"obs_mean\"] = self.replay_buffer.obs_mean\n params_dict[\"obs_std\"] = self.replay_buffer.obs_std\n return params_dict\n\n def apply_params_dict(self, params_dict):\n self.actor.load_state_dict(params_dict[\"actor\"])\n self.critic.load_state_dict(params_dict[\"critic\"])\n self.replay_buffer.obs_mean = params_dict[\"obs_mean\"]\n self.replay_buffer.obs_std = params_dict[\"obs_std\"]\n\n def save_model(self, save_path=None):\n if self.filename is None and save_path is None:\n raise AttributeError\n elif save_path is None:\n save_path = str(self.log_path)\n\n torch.save(self._actor.state_dict(), save_path + \"_actor_model.pt\")\n torch.save(self._critic.state_dict(), save_path + \"_critic_model.pt\")\n return save_path\n\n def process_obs(self, obs):\n f\"\"\"Pre-processing of observation before it will go to the policy\n\n Args:\n obs (iter): original observation from env\n\n Returns:\n torch.Tensor: processed observation\n \"\"\"\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n obs = torch.unsqueeze(obs, dim=0)\n return obs\n\n def process_action(self, action: torch.Tensor, obs: torch.tensor, *args, **kwargs):\n f\"\"\"Pre-processing of action before it will go the env.\n It will not be saved to the buffer.\n\n Args:\n action (torch.Tensor): action from the policy\n obs (torch.tensor): observations for this actions\n\n Returns:\n np.array: processed action\n \"\"\"\n action = action.cpu().numpy()[0]\n return action\n\n def test(self, episodes=None):\n f\"\"\"Run deterministic policy and log average return\n\n Args:\n episodes (int, optional): Number of episodes for test. Defaults to { 10 }.\n\n Returns:\n float: mean episode reward\n \"\"\"\n if episodes is None:\n episodes = self.test_episodes\n returns = []\n for j in range(episodes):\n obs = self.env.reset()\n done = False\n ep_ret = 0\n while not done:\n obs = self.process_obs(obs)\n obs = self.replay_buffer.normalize(obs)\n action, _ = self._actor.act(obs, deterministic=True)\n action_proc = self.process_action(action, obs)\n obs, r, done, _ = self.env.step(action_proc)\n ep_ret += r\n returns.append(ep_ret)\n\n return np.mean(returns)\n\n\nif __name__ == \"__main__\":\n with torch.cuda.device(0):\n model = DDPG(\n env_name=\"HalfCheetah-v2\",\n buffer_size=int(1e6),\n iterations=5,\n gamma=0.99,\n batch_size=200,\n stats_freq=1,\n test_episodes=2,\n use_gpu=True,\n obs_norm=True,\n # tensorboard_dir=\"tb_logs_tanh\",\n # tensorboard_comment=\"no_tanh\",\n log_dir=\"optional_logs\",\n )\n model.train()\n # model.save(\"tmp_norb.pkl\")\n","sub_path":"rltoolkit/algorithms/ddpg/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":15524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"274930293","text":"#Scott Floam\r\n#3/31/2015\r\n\r\nimport random\r\n\r\ndef main():\r\n choice =''\r\n list0=[]\r\n while choice != 6:\r\n menu()\r\n choice=getUserChoice()\r\n if choice !=6:\r\n list0=process_choice(choice,list0)\r\n print(list0)\r\n \r\n print(\"Goodbye!\")\r\n\r\ndef process_choice(choice,list1):\r\n list2=list1\r\n if choice == 0:\r\n list2=create_list()\r\n elif choice == 1:\r\n list2=add_to_end(list1)\r\n elif choice == 2:\r\n find_position(list1)\r\n elif choice == 3:\r\n list2=insert_at_position(list1)\r\n elif choice == 4:\r\n list2=remove_item(list1)\r\n elif choice == 5:\r\n list2=remove_at_position(list1)\r\n else:\r\n print(choice,\"is not a valid choice.\")\r\n return list2\r\n\r\n\r\ndef create_list():\r\n print(\"0. create_list()\")\r\n print()\r\n\r\n mylist=[]\r\n\r\n list_size = int(input(\"Enter the size of your list: \"))\r\n\r\n for i in range(list_size):\r\n mylist.append(random.randint(0,99))\r\n \r\n return mylist\r\n \r\n\r\ndef add_to_end(list1):\r\n print(\"1. add_to_end(list1)\")\r\n print()\r\n repeat = 'y'\r\n while repeat == 'y' or repeat == 'Y':\r\n\r\n\r\n mylist = list1\r\n print()\r\n newNumber = int(input(\"Add a number to the end of this list: \"))\r\n mylist.append(newNumber)\r\n print()\r\n repeat = input(\" Enter 'Y' or 'y' to add another number. Enter anything else to stop\\n adding numbers: \")\r\n print()\r\n print()\r\n return mylist\r\n\r\n\r\ndef find_position(list1):\r\n \r\n print(\"2. find_position(list1)\")\r\n print()\r\n \r\n mylist=list1\r\n\r\n\r\n repeat = 'y'\r\n while repeat == 'y' or repeat == 'Y':\r\n print()\r\n findNumber = int(input(\"Enter the number you would like to find the position for: \"))\r\n print()\r\n \r\n mylist_length = len(mylist)\r\n\r\n i = 0\r\n counter = 0\r\n list_validator = False\r\n \r\n for i in range(mylist_length):\r\n \r\n list_value = mylist[i]\r\n\r\n if list_value == findNumber:\r\n list_validator = True\r\n position = i\r\n counter += 1\r\n print(\"Position #\"+str(counter)+\" for the number, \" + str(findNumber)+\", is:\",position)\r\n print()\r\n if list_validator == False:\r\n print(\"This number is not in the list!\")\r\n print()\r\n print()\r\n repeat = input(\" Enter 'Y' or 'y' to find the position of another number. Enter\\n anything else to stop finding the position of numbers numbers: \")\r\n print()\r\n print()\r\n \r\n \r\ndef insert_at_position(list1):\r\n print(\"3. insert_at_position(list1)\")\r\n print()\r\n \r\n mylist=list1\r\n \r\n mylist_length = len(mylist)\r\n \r\n repeat = 'y'\r\n while repeat == 'y' or repeat == 'Y':\r\n print()\r\n new_number = int(input(\"Enter a number that you would like to add to the list: \"))\r\n print()\r\n position_choice = int(input(\"Enter the position in the list where you would like to add the new number: \"))\r\n print()\r\n\r\n if position_choice > 0 and position_choice < mylist_length:\r\n mylist.insert(position_choice,new_number)\r\n print(mylist)\r\n print()\r\n print()\r\n else:\r\n print(\"You entered an invalid position.\")\r\n print()\r\n print()\r\n print() \r\n repeat = input(\"Enter 'Y' or 'y' to insert another number. Enter anything else to stop inserting numbers: \")\r\n print()\r\n print()\r\n\r\n mylist_length =len(mylist)\r\n\r\n return mylist\r\n\r\n\r\ndef remove_item(list1):\r\n print(\"4. remove_item(list1)\")\r\n print()\r\n mylist=list1\r\n repeat = 'y'\r\n while repeat == 'y' or repeat == 'Y':\r\n print()\r\n findNumber = int(input(\"Enter the number that you would like to remove from the list: \"))\r\n print()\r\n \r\n list_validator = False\r\n \r\n for value in mylist:\r\n\r\n if value == findNumber:\r\n \r\n list_validator = True\r\n \r\n mylist.remove(findNumber) \r\n\r\n print()\r\n \r\n if list_validator == False:\r\n print(\"This number is not in the list!\")\r\n print()\r\n print() \r\n repeat = input(\"Enter 'Y' or 'y' to remove another number. Enter anything else to stop removing numbers: \")\r\n print()\r\n print()\r\n\r\n mylist_length =len(mylist)\r\n\r\n return mylist\r\n\r\ndef remove_at_position(list1):\r\n print(\"5. remove_at_position(list1)\")\r\n print()\r\n \r\n mylist=list1\r\n\r\n mylist_length = len(mylist)\r\n \r\n repeat = 'y'\r\n \r\n while repeat == 'y' or repeat == 'Y':\r\n print()\r\n position_choice = int(input(\"Enter the position in the list where you would like to remove a number: \"))\r\n print()\r\n\r\n if position_choice > 0 and position_choice < mylist_length:\r\n del mylist[position_choice]\r\n print(mylist)\r\n print()\r\n print()\r\n else:\r\n print(\"You entered an invalid position.\")\r\n print()\r\n print()\r\n \r\n repeat = input(\" Enter 'Y' or 'y' to remove another number by its position. Enter anything \\nelse to stop removing numbers by position:\")\r\n print()\r\n print()\r\n\r\n mylist_length =len(mylist)\r\n\r\n return mylist\r\n\r\ndef menu():\r\n print('''\r\n 0. Create list\r\n 1. Add a number to the end\r\n 2. Find the position of a number\r\n 3. Insert number at position\r\n 4. Remove number\r\n 5. Remove number at position\r\n 6. Quit\r\n ''')\r\n\r\ndef getUserChoice():\r\n choice=-1\r\n while choice <0 or choice > 6:\r\n print(\"Please select 0-6: \",end='')\r\n choice=int(input())\r\n return choice\r\n\r\nmain()\r\n","sub_path":"Program 14/Floam_Prog_14_List_Menu_Proj_4.py","file_name":"Floam_Prog_14_List_Menu_Proj_4.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"186207128","text":"import io\nimport sys\nimport asn1\nimport codecs\nfrom time import strptime, strftime, gmtime\n\n\ndef pretty_print(input_stream, output_stream, indent=0):\n \"\"\"Pretty print ASN.1 data.\"\"\"\n while not input_stream.eof():\n tag = input_stream.peek()\n if tag.typ == asn1.Types.Primitive:\n tag, value = input_stream.read()\n output_stream.write(' ' * indent)\n output_stream.write('[{}] {}: {}\\n'.format(class_id_to_string(tag.cls), tag_id_to_string(tag.nr), value_to_string(tag.nr, value, indent, output_stream, tag.cls)))\n # output_stream.write('{}\\n'.format(value_to_string(tag.nr, value, indent, output_stream)))\n elif tag.typ == asn1.Types.Constructed:\n output_stream.write(' ' * indent)\n output_stream.write('[{}] {}\\n'.format(class_id_to_string(tag.cls), tag_id_to_string(tag.nr)))\n input_stream.enter()\n pretty_print(input_stream, output_stream, indent + 2)\n input_stream.leave()\n\n\ntag_id_to_string_map = {\n asn1.Numbers.Boolean: \"BOOLEAN\",\n asn1.Numbers.Integer: \"INTEGER\",\n asn1.Numbers.BitString: \"BIT STRING\",\n asn1.Numbers.OctetString: \"OCTET STRING\",\n asn1.Numbers.Null: \"NULL\",\n asn1.Numbers.ObjectIdentifier: \"OBJECT\",\n asn1.Numbers.PrintableString: \"PRINTABLESTRING\",\n asn1.Numbers.IA5String: \"IA5STRING\",\n asn1.Numbers.UTCTime: \"UTCTIME\",\n asn1.Numbers.Enumerated: \"ENUMERATED\",\n asn1.Numbers.Sequence: \"SEQUENCE\",\n asn1.Numbers.Set: \"SET\",\n asn1.Numbers.UTF8String: \"UTF8 STRING\"\n}\n\nclass_id_to_string_map = {\n asn1.Classes.Universal: \"U\",\n asn1.Classes.Application: \"A\",\n asn1.Classes.Context: \"C\",\n asn1.Classes.Private: \"P\"\n}\n\nobject_id_to_string_map = {\n \"2.5.4.3\": \"commonName\",\n \"2.5.4.4\": \"surname\",\n \"2.5.4.5\": \"serialNumber\",\n \"2.5.4.6\": \"countryName\",\n \"2.5.4.7\": \"localityName\",\n \"2.5.4.8\": \"stateOrProvinceName\",\n \"2.5.4.9\": \"streetAddress\",\n \"2.5.4.10\": \"organizationName\",\n \"2.5.4.11\": \"organizationalUnitName\",\n \"2.5.4.12\": \"title\",\n \"2.5.4.13\": \"description\",\n \"2.5.4.42\": \"givenName\",\n\n \"2.5.29.14\": \"X509v3 Subject Key Identifier\",\n \"2.5.29.15\": \"X509v3 Key Usage\",\n \"2.5.29.16\": \"X509v3 Private Key Usage Period\",\n \"2.5.29.17\": \"X509v3 Subject Alternative Name\",\n \"2.5.29.18\": \"X509v3 Issuer Alternative Name\",\n \"2.5.29.19\": \"X509v3 Basic Constraints\",\n \"2.5.29.30\": \"X509v3 Name Constraints\",\n \"2.5.29.31\": \"X509v3 CRL Distribution Points\",\n \"2.5.29.32\": \"X509v3 Certificate Policies Extension\",\n \"2.5.29.33\": \"X509v3 Policy Mappings\",\n \"2.5.29.35\": \"X509v3 Authority Key Identifier\",\n \"2.5.29.36\": \"X509v3 Policy Constraints\",\n \"2.5.29.37\": \"X509v3 Extended Key Usage\",\n\n \"1.3.14.3.2.26\": \"hashAlgorithmIdentifier\",\n\n \"1.3.6.1.5.5.7.1.1\": \"authorityInfoAccess\",\n \"1.3.6.1.5.5.7.3.9\": \"id-kp-OCSPSigning\",\n \"1.3.6.1.5.5.7.48.2\": \"caIssuers\",\n \"1.3.6.1.5.5.7.48.1.1\": \"basic-response\",\n \"1.3.6.1.5.5.7.48.1.2\": \"nonce-extension\",\n \"1.3.6.1.5.5.7.48.1.3\": \"crl\",\n \"1.3.6.1.5.5.7.48.1.5\": \"no-check\",\n\n \"1.2.840.10045.2.1\": \"EC\",\n \"1.2.840.10045.4.3.3\": \"SHA384withECDSA\",\n\n \"1.2.840.113549.1.1.1\": \"rsaEncryption\",\n \"1.2.840.113549.1.1.2\": \"md2WithRSAEncryption\",\n \"1.2.840.113549.1.1.3\": \"md4WithRSAEncryption\",\n \"1.2.840.113549.1.1.4\": \"md5WithRSAEncryption\",\n \"1.2.840.113549.1.1.5\": \"sha1-with-rsa-signature\",\n \"1.2.840.113549.1.1.6\": \"rsaOAEPEncryption\",\n \"1.2.840.113549.1.1.7\": \"id-RSAES-OAEP\",\n \"1.2.840.113549.1.1.8\": \"id-mgfl\",\n \"1.2.840.113549.1.1.9\": \"id-pSpecified\",\n \"1.2.840.113549.1.1.10\": \"rsassa-pss\",\n \"1.2.840.113549.1.1.11\": \"sha256WithRSAEncryption\", \n \"1.2.840.113549.1.9.1\": \"emailAddress\",\n\n \"2.16.840.1.101.3.2.1.3.1\": \"id-fpki-certpcy-rudimentaryAssurance\",\n \"2.16.840.1.101.3.2.1.3.2\": \"id-fpki-certpcy-basicAssurance\",\n \"2.16.840.1.101.3.2.1.3.3\": \"id-fpki-certpcy-mediumAssurance\",\n \"2.16.840.1.101.3.2.1.3.4\": \"id-fpki-certpcy-highAssurance\",\n \"2.16.840.1.101.3.2.1.3.5\": \"id-fpki-certpcy-testAssurance\",\n \"2.16.840.1.101.3.2.1.3.6\": \"id-fpki-common-policy\",\n \"2.16.840.1.101.3.2.1.3.7\": \"id-fpki-common-hardware\",\n \"2.16.840.1.101.3.2.1.3.8\": \"id-fpki-common-devices\",\n \"2.16.840.1.101.3.2.1.3.12\": \"id-fpki-certpcy-mediumHardware\",\n \"2.16.840.1.101.3.2.1.3.13\": \"id-fpki-common-authentication\",\n \"2.16.840.1.101.3.2.1.3.14\": \"id-fpki-certpcy-medium-CBP\",\n \"2.16.840.1.101.3.2.1.3.15\": \"id-fpki-certpcy-mediumHW-CBP\",\n \"2.16.840.1.101.3.2.1.3.16\": \"id-fpki-common-high\",\n \"2.16.840.1.101.3.2.1.3.17\": \"id-fpki-common-cardAuth\",\n \"2.16.840.1.101.3.2.1.3.18\": \"id-fpki-certpcy-pivi-hardware\",\n \"2.16.840.1.101.3.2.1.3.19\": \"id-fpki-certpcy-pivi-cardAuth\",\n \"2.16.840.1.101.3.2.1.3.20\": \"id-fpki-certpcy-pivi-contentSigning\",\n \"2.16.840.1.101.3.2.1.3.39\": \"id-fpki-common-piv-contentSigning\",\n \"2.16.840.1.101.3.2.1.3.40\": \"id-fpki-common-derived-pivAuth\",\n\n # Verified from https://iase.disa.mil/pki-pke/Documents/unclass-dod_cp_v10-5.pdf\n \"2.16.840.1.101.2.1.11.2\": \"id-US-dod-basic\",\n \"2.16.840.1.101.2.1.11.4\": \"id-US-dod-FORTEZZA\",\n \"2.16.840.1.101.2.1.11.5\": \"id-US-dod-medium\",\n \"2.16.840.1.101.2.1.11.6\": \"id-US-dod-type1\",\n \"2.16.840.1.101.2.1.11.9\": \"id-US-dod-mediumHardware\",\n \"2.16.840.1.101.2.1.11.10\": \"id-US-dod-PIV-Auth\",\n \"2.16.840.1.101.2.1.11.17\": \"id-US-dod-mediumNPE\",\n \"2.16.840.1.101.2.1.11.18\": \"id-US-dod-medium-2048\",\n \"2.16.840.1.101.2.1.11.19\": \"id-US-dod-mediumHardware-2048\",\n \"2.16.840.1.101.2.1.11.20\": \"id-US-dod-PIV-Auth-2048\",\n \"2.16.840.1.101.2.1.11.31\": \"id-US-dod-peerInterop\",\n \"2.16.840.1.101.2.1.11.36\": \"id-US-dod-mediumNPE-112\",\n \"2.16.840.1.101.2.1.11.37\": \"id-US-dod-mediumNPE-128\",\n \"2.16.840.1.101.2.1.11.39\": \"id-US-dod-medium-112\",\n \"2.16.840.1.101.2.1.11.40\": \"id-US-dod-medium-128\",\n \"2.16.840.1.101.2.1.11.42\": \"id-US-dod-mediumHardware-112\",\n \"2.16.840.1.101.2.1.11.43\": \"id-US-dod-mediumHardware-128\"\n \n}\n\n\ndef tag_id_to_string(identifier):\n \"\"\"Return a string representation of a ASN.1 id.\"\"\"\n if identifier in tag_id_to_string_map:\n return tag_id_to_string_map[identifier]\n return '{:#02x}'.format(identifier)\n\n\ndef class_id_to_string(identifier):\n \"\"\"Return a string representation of an ASN.1 class.\"\"\"\n if identifier in class_id_to_string_map:\n return class_id_to_string_map[identifier]\n raise ValueError('Illegal class: {:#02x}'.format(identifier))\n\n\ndef object_identifier_to_string(identifier):\n if identifier in object_id_to_string_map:\n return object_id_to_string_map[identifier]\n return identifier\n\ndef value_to_string(tag_number, value, indent, output_stream, tag_class):\n if tag_number == asn1.Numbers.ObjectIdentifier:\n # Reads in URLs that aren't properly translated\n # These lines are just the characters printed with '.' in between each\n if tag_class == asn1.Classes.Context:\n asciiList = value.split('.')\n charList = []\n for ch in asciiList:\n charList.append(chr(int(ch)))\n return 'h{}'.format(''.join(charList[2:]))\n else:\n return object_identifier_to_string(value)\n elif tag_number == asn1.Numbers.OctetString:\n octDecoder = asn1.Decoder()\n octDecoder.start(value)\n try:\n pretty_print(octDecoder, output_stream, indent)\n except:\n try:\n return codecs.decode(value, \"ascii\")\n except:\n return value.hex().upper()\n \n elif tag_number == asn1.Numbers.UTF8String:\n try:\n return codecs.decode(value, \"ascii\")\n except:\n return 'Failed UTF8: {}'.format(value)\n elif tag_number == asn1.Numbers.Integer:\n idFill = hex(value).lstrip('0x')\n # Adds a leading zero if the hex values is odd in length. 0xA becomes 0x0A\n return idFill if len(idFill) % 2 == 0 else idFill.zfill(len(idFill)+1)\n elif tag_number == asn1.Numbers.UTCTime:\n return strftime('%B %d %Y %H:%M:%S UTC', strptime(str(value), \"%y%m%d%H%M%SZ\"))\n elif isinstance(value, bytes):\n return value.hex().upper()\n elif isinstance(value, str):\n return value\n else:\n return repr(value)\n\n\n\n","sub_path":"printLargeOctetImport.py","file_name":"printLargeOctetImport.py","file_ext":"py","file_size_in_byte":8346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"138347789","text":"from tw2.core import Validator, validation, ValidationError\n\n\nclass StartEndDateValidator(Validator):\n\n def __init__(self, start_date, end_date, **kw):\n self.ids = {\n 'start': start_date,\n 'end': end_date\n }\n super(StartEndDateValidator, self).__init__(**kw)\n\n def _validate_python(self, values, state=None):\n\n # at least one date are already failed validation, do nothing\n if values.get(self.ids['start']) == validation.Invalid or values.get(self.ids['end']) == validation.Invalid:\n return\n\n if values.get(self.ids['start']) >= values.get(self.ids['end']):\n raise ValidationError(\"start_date can not be greater than end_date\")\n","sub_path":"scrumify/scrumify/lib/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530855376","text":"import random\nfrom typing import Tuple\n\nfrom brain_memory.majorsystem import word_to_number, _load_wordlist\n\n\ndef _random_bool():\n return bool(random.getrandbits(1))\n\n\ndef _random_number() -> str:\n i = random.randint(0, 99)\n if (i < 10):\n return str(i) if _random_bool() else str(f'0{i}')\n return str(i)\n\n\ndef _random_word() -> str:\n wordlist = _load_wordlist()\n key = random.choice(list(wordlist.keys()))\n word = random.choice(wordlist[key])\n return word\n\n\ndef _ask_input(header: str, prefix: str):\n print(header)\n return input(prefix)\n\n\ndef _print_response(expr: bool, correct: str = 'correct!', wrong: str = 'nope'):\n if expr:\n print(correct)\n else:\n print(wrong)\n\n\ndef practice_to_word():\n n = _random_number()\n answer = _ask_input(header=f'convert: {n}',\n prefix='a word: ')\n converted_answer = word_to_number(answer)\n\n _print_response(expr=converted_answer == n)\n\n\ndef practice_to_number():\n w = _random_word()\n answer = _ask_input(header=f'convert: {w}',\n prefix='the number: ')\n converted_question = word_to_number(w)\n\n _print_response(answer == converted_question, wrong=f'not quite, correct number {converted_question}')\n","sub_path":"brain_memory/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"35361782","text":"\"\"\"\nCreated on Feb 2020\n\n@author: Yibo Yang\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n#np.random.seed(1234)\n \nif __name__ == \"__main__\":\n \n\t# Question 1\n #######################\n\t# functions for removeing the punctuations and stop words\n\t#######################\n\t# All possible punctuations\n\tpunctuations = '''!()-=+|[]{};:'\"\\,<>./?@#$%^&*_~'''\n\n\t# All possible stop words\n\tstop_words_list = []\n\twith open('stop_words_list.txt') as data_file:\n\t data=data_file.read()\n\t for i in data.split('\\n'):\n\t stop_words_list.append(i)\n\t \n\t# Parse the input sentence by removing all punctuations and stop words (one sentence each time)\n\tdef parse_sentence(sentence, punctuations, stop_words_list):\n\t # First remove punctuations by looping each char\n\t no_punct = \"\"\n\t for i in sentence:\n\t if i not in punctuations:\n\t no_punct = no_punct + i\n\t \n\t # Second remove stop words by looping each words\n\t final_sentence = \"\"\n\t for word in no_punct.split():\n\t if word not in stop_words_list:\n\t # See if we are at the start of the sentence \n\t if final_sentence == \"\":\n\t final_sentence = word\n\t else:\n\t final_sentence = final_sentence + \" \" + word\n\t \n\t return final_sentence\n\n\n\t#######################\n\t# load the data set\n\t#######################\n\twith open('amazonReviews.json') as data_file:\n\t data=data_file.read()\n\t jdata = pd.read_json(data, lines = True)\n\t \n\t \n\tnum_sentences = len(jdata)\n\t# List to store all the needed and parsed information\n\there = []\n\n\tprint(\"Parsing the data and monitor the process\")\n\tcount = 0\n\tfor i in range(num_sentences):\n\t # Get the ID of the review and lower the letters\n\t ID = jdata['reviewerID'][i]\n\n\t # Get the parsed review text \n\t Sentence = parse_sentence(jdata['reviewText'][i].lower(), punctuations, stop_words_list)\n\n\t # Append the ID and review sentence together as one data point (remove the short ones)\n\t if len(Sentence) > 2:\n\t here.append((ID, Sentence))\n\n\t # Count sentences parsed\n\t count += 1\n\t if count % 1000 == 0:\n\t print(count)\n\t \n\tnum_sentences = len(here)\n\n\n\n\t#######################\n\t# save the useful informations in the dataset\n\t#######################\n\tdataset=pd.DataFrame(here,columns=['ID','Text'])\n\n\tnum_sentences = dataset.shape[0]\n\t#dataset\n\tdataset.to_pickle(\"dataset.pkl\")\n\n\t#######################\n\t# choose the shingle length and build the dictionary mapping shingle to index\n\t#######################\n\tk_num = 5\n\n\tk_shingles_dict = {}\n\tnumber_of_seen_shingles = 0\n\n\tprint(\"Building the dictionary of shingle and monitor the process\")\n\t# Loop through each sentence\n\tfor i in range(num_sentences):\n\t # Print the process\n\t if i % 1000 == 0:\n\t print(i)\n\n\t # Load one sentence \n\t sentence = dataset[\"Text\"][i]\n\t \n\t # Compute the length of the sentence\n\t length = len(sentence)\n\t \n\t # If the sentence length is less than 50, do padding with 50 space\n\t if length < k_num:\n\t sentence = sentence + \" \" * k_num\n\t length += k_num\n\t \n\t # Loop the sentence\n\t for j in range(length - k_num + 1):\n\t temp = sentence[j:j+k_num]\n\t if temp not in k_shingles_dict:\n\t k_shingles_dict[temp] = number_of_seen_shingles\n\t number_of_seen_shingles += 1\n\n\n\n\t# Question 2\n\t#######################\n\t# Constructing the binary matrix (here I commented the code because creating this huge matrix will blow up some computer's memory\n\t# but this code runs well on my computer. As we do not use this part later on, I do not include it here. If you want to see the results,\n\t# you can uncomment this part and run)\n\t#######################\n\tn_row = len(k_shingles_dict)\n\tn_col = num_sentences\n\n\tprint(n_row, n_col)\n\n\t# Build the Binary data matrix\n\n\t#Binary_data_matrix = np.zeros((n_row, n_col))\n\n\t# Loop through each sentence\n\t#for i in range(num_sentences):\n\t # Print the process\n\t# if i % 1000 == 0:\n\t# print(i)\n\n\t # Load one sentence \n\t# sentence = dataset[\"Text\"][i]\n\t \n\t # Compute the length of the sentence\n\t# length = len(sentence)\n\t \n\t # If the sentence length is less than 50, do padding with 50 space\n\t# if length < k_num:\n\t# sentence = sentence + \" \" * k_num\n\t# length += k_num\n\t \n\t # Loop the sentence\n\t# for j in range(length - k_num + 1):\n\t# temp = sentence[j:j+k_num]\n\t# if temp in k_shingles_dict:\n\t# Binary_data_matrix[k_shingles_dict[temp], i] = 1\n\n\n\n\n\n\t# Question 3 \n\t#######################\n\t# design a better way to store the data by only using their non zero index\n\t#######################\n\n\t# generate vector for sentence index = i\n\tdef generate_binary_vector(i):\n\t # Load one sentence \n\t sentence = dataset[\"Text\"][i]\n\t \n\t Binary_data_vector = np.zeros((n_row, 1))\n\t # Compute the length of the sentence\n\t length = len(sentence)\n\t \n\t # If the sentence length is less than 50, do padding with 50 space\n\t if length < k_num:\n\t sentence = sentence + \" \" * k_num\n\t length += k_num\n\t \n\t # Loop the sentence\n\t for j in range(length - k_num + 1):\n\t temp = sentence[j:j+k_num]\n\t if temp in k_shingles_dict:\n\t Binary_data_vector[k_shingles_dict[temp]] = 1\n\n\t return Binary_data_vector\n\n\n\t#######################\n\t# 10,000 random pairs of Jaccard distance\n\t#######################\n\t# 10,000 pairs \n\tnum_pairs = 10000\n\tJaccard_distance = np.zeros((num_pairs, 1))\n\n\tprint(\"Computing the Jaccard distance in 10,000 random paris and monitor the process\")\n\tfor i in range(num_pairs):\n\t if i % 100 == 0:\n\t print(i)\n\t idx = np.random.choice(num_sentences, 2, replace = False)\n\t sentence_1 = generate_binary_vector(idx[0])\n\t sentence_2 = generate_binary_vector(idx[1])\n\t \n\t count_and = np.sum(sentence_1 * sentence_2)\n\t count_or = np.sum(sentence_1) + np.sum(sentence_2) - count_and\n\t \n\t Jaccard_distance[i] = 1 - count_and / count_or\n\t \n\t# plot the histogram\n\tplt.figure(1, figsize=(8,5))\n\tplt.hist(Jaccard_distance, label = \"Jaccard_distance_histogram\", bins = 100, density=True, alpha = 0.6)\n\tplt.xlabel('Jaccard distance',fontsize=13)\n\tplt.ylabel('Number of pairs',fontsize=13)\n\tplt.legend(loc='upper left', frameon=False, prop={'size': 10})\n\tplt.savefig('./10000Jaccard_distance', dpi = 600)\n\n\t# min of these Jaccard distance\n\tprint(\"min of these Jaccard distance\", min(Jaccard_distance))\n\t# mean of these Jaccard distance\n\tJaccard_distance = np.asarray(Jaccard_distance)\n\tprint(\"mean of these Jaccard distance\", np.mean(Jaccard_distance))\n\n\n\n\t# Question 4\n\t#######################\n\t# design a better way to store the data by only using their non zero index\n\t# loop through all the data and for each one, construct a vector storing its non-zero indices\n\t# the final data set we have is a list of list (mimic sparse matrix)\n\t#######################\n\n\n\tData_list = []\n\tprint(\"Constructing a better data structur to store the data by only storing the non zero indices and monitor the process\")\n\n\t# Loop through each sentence\n\tfor i in range(num_sentences):\n\t # Print the process\n\t if i % 1000 == 0:\n\t print(i)\n\n\t temp_list = []\n\t seen = set()\n\t # Load one sentence \n\t sentence = dataset[\"Text\"][i]\n\t \n\t # Compute the length of the sentence\n\t length = len(sentence)\n\t \n\t # If the sentence length is less than 50, do padding with 50 space\n\t if length < k_num:\n\t sentence = sentence + \" \" * k_num\n\t length += k_num\n\t \n\t # Loop the sentence\n\t for j in range(length - k_num + 1):\n\t temp = sentence[j:j+k_num]\n\t if temp in k_shingles_dict and temp not in seen:\n\t seen.add(temp)\n\t temp_list.append(k_shingles_dict[temp])\n\t \n\t Data_list.append(temp_list)\n\n\n\n\t# Question 5\n\t#######################\n\t# Computing the smallest prime number larger than R (number of total shingles) and call this prime number R\n\t#######################\n\n\t# Data_list contains all the sentence and the indices of shingle they include\n\n\t# Find the smallest prime numbergreater than n_row (number of shingles) in order to make good permutations\n\tR = n_row\n\n\tdef prime_number(num):\n\t # Iterate from 2 to n / 2 \n\t for i in range(2, num//2): \n\t # If num is divisible by any number between \n\t # 2 and n / 2, it is not prime \n\t if (num % i) == 0: \n\t return False \n\t else:\n\t return True\n\n\tprint(\"total number of shingles\", R)\n\t# If given number is greater than 1 \n\twhile prime_number(R) == False:\n\t R += 1\n\t \n\tprint(\"Prime number we use for the min-hashing\", R)\n\n\t#######################\n\t# constructing signature matrix\n\t#######################\n\n\tM = 600\n\tpi = np.random.randint(1, R - 1, size=(M, 2))\n\tprint(pi.shape)\n\n\n\tdef h_pi(data, pi, R, M):\n\t data = np.asarray(data)[None,:]\n\t aa = pi[:,0:1]\n\t bb = pi[:,1:2]\n\t hash_vector = np.min((aa * data + bb) % R ,axis=1)[:,None]\n\t return hash_vector\n\n\tHash_matrix = np.zeros((M, num_sentences))\n\n\tprint(\"Computing the signature matrix and monitor the process\")\n\n\tfor i in range(num_sentences):\n\t if i % 1000 == 0:\n\t print(i)\n\t data = Data_list[i]\n\t Hash_matrix[:, i:i+1] = h_pi(data, pi, R, M)\n\n\n\t#######################\n\t# determine the number of bands and number of elements r in each band\n\t#######################\n\n\t# Here we can conclude if we would like to catch all possible 80 % similar sentences, set m = 1000, r = 15, b = 40\n\t# If we just want to reduce the number of making mistakes \n\n\tx = np.linspace(0, 1, 100)[:,None]\n\n\tdef function_curve(x, r, b):\n\t return 1 - (1 - x**r)**b\n\n\tplt.figure(10)\n\tfor r in [5, 10, 15, 20, 40, 50, 100]:\n\t y = function_curve(x, r, 600//r)\n\t plt.plot(x, y, label = \"r=\" +str(r))\n\tplt.axvline(x=0.8)\n\tplt.axvline(x=0.5)\n\t \n\tplt.legend(loc='upper left', frameon=False, prop={'size': 10})\n\tplt.xlabel('Similarity',fontsize=13)\n\tplt.ylabel('Pr(hit)',fontsize=13)\n\tplt.savefig('./Determine_M', dpi = 600)\n\n\t# set the value for r and b\n\tr = 10\n\tb = 60\n\n\t# initially set the second prime number close to 100 times the shingles size\n\tP = R * 100\n\n\t#print(P)\n\t# If given number is greater than 1 \n\twhile prime_number(P) == False:\n\t P += 1\n\t \n\tprint(\"prime number we use for the second min-hashing assigning the bucket values\", P)\n\n\tpi_r = np.random.randint(1, P - 1, size=(r, 2))\n\t#print(pi_r.shape)\n\n\t#######################\n\t# functions for the second min-hashing\n\t#######################\n\n\t# function for returning the similar pairs of reviews in band # b\n\tdef find_similar_in_band(pi_r, b, r, P):\n\t buckets = {}\n\t a_v = pi_r[:, 0:1]\n\t b_v = pi_r[:, 1:2]\n\t for i in range(num_sentences):\n\t data = Hash_matrix[b*r:(b+1)*r,i:i+1]\n\t temp = np.sum((data * a_v + b_v) % P)\n\t if temp not in buckets:\n\t buckets[temp] = [i]\n\t else:\n\t buckets[temp].append(i)\n\t \n\t return buckets #pairs\n\t \n\t \n\tdef get_pairs(buckets):\n\t pairs = []\n\t for key in buckets:\n\t length = len(buckets[key])\n\t if length >= 2:\n\t for i in range(length):\n\t for j in range(i+1, length):\n\t pairs.append((buckets[key][i], buckets[key][j]))\n\n\t return pairs\n\n\n\t#######################\n\t# performing the second min-hashing and get the final candidate pairs \n\t#######################\n\tprint(\"Do the second min-hashing and assigne values for each bands into different buckets\")\n\n\t# Store all the buckets information for all the bands\n\tlist_buckets = []\n\n\t# Compute the bucket for the first band\n\tbuckets = find_similar_in_band(pi_r, 0, r, P)\n\n\t# Append the computed bucket in the list\n\tlist_buckets.append(buckets)\n\n\t# Parse the bucket information to get final pairs\n\tband_pair = get_pairs(buckets)\n\n\t# Build a dictionary of final candidate pairs (to avoid repeat count)\n\tfinal_pair = set()\n\tfor i in range(len(band_pair)):\n\t if band_pair[i] not in final_pair:\n\t final_pair.add(band_pair[i])\n\n\tprint(\"Going through the data band by band with total number of band\", b)\n\n\t# Compute the bucket for all the rest bands and parse the data\n\tprint(len(final_pair))\n\tfor i in range(1, b):\n\t print(i)\n\t buckets = find_similar_in_band(pi_r, i, r, P)\n\t list_buckets.append(buckets)\n\t get = get_pairs(buckets)\n\t for j in range(len(get)):\n\t if get[j] not in final_pair:\n\t final_pair.add(get[j])\n\n\tprint(\"number of final candidate pairs\", len(final_pair))\n\t \n\t#######################\n\t# Looping through the candidate pairs and compute the Jaccard distance for determining the similarity\n\t#######################\n\n\tfinal_length = len(final_pair)\n\n\n\toutput_pair = []\n\tcount = 0\n\n\t# Total Jaccard distance\n\tJaccard_distance_new_1 = []\n\n\t# Accepted Jaccard distance\n\tJaccard_distance_new_2 = []\n\n\tprint(\"Going through all the candidate pairs and check their Jaccard distance to determine whether to accept or not\")\n\n\tfor x in final_pair:\n\t count += 1\n\t if count % 100 == 0:\n\t print(count)\n\t idx1, idx2 = x\n\t sentence_1 = generate_binary_vector(idx1)\n\t sentence_2 = generate_binary_vector(idx2)\n\t \n\t count_and = np.sum(sentence_1 * sentence_2)\n\t count_or = np.sum(sentence_1) + np.sum(sentence_2) - count_and\n\t \n\t Jaccard_distance = 1 - count_and / count_or\n\t Jaccard_distance_new_1.append(Jaccard_distance)\n\t \n\t if Jaccard_distance <= 0.2:\n\t output_pair.append([idx1, idx2])\n\t Jaccard_distance_new_2.append(Jaccard_distance)\n\n\tplt.figure(3, figsize=(8,6))\n\tplt.hist(Jaccard_distance_new_2, label = \"Accepted Jaccard_distance\", bins = 100, density=True, alpha = 0.6)\n\tplt.legend(loc='upper right', frameon=False, prop={'size': 10})\n\tplt.savefig('./Accepted_Jaccard_Distance', dpi = 600)\n\t\n\tprint(\"final number of similar paris in the data set\", len(output_pair))\n\n\t# store all the pairs into csv file\n\timport csv\n\n\tnum_final_output_paris = len(output_pair)\n\n\tw = csv.writer(open(\"close_pairs.csv\", \"w\"))\n\tfor i in range(num_final_output_paris):\n\t idx_sim_1, idx_sim_2 = output_pair[i]\n\t w.writerow([dataset[\"ID\"][idx_sim_1], dataset[\"ID\"][idx_sim_2]])\n\n\n\t# Question 6\n\t#######################\n\t# finding the nearest reviewID given a new queried review \n\t#######################\n\n\n\n\n\t# relax the similarity to be 0.2 for the later work\n\tr = 2\n\tb = 300\n\n\tpi_r = np.random.randint(1, P - 1, size=(r, 2))\n\n\tprint(\"Building a relax threshold for finding nearest neighbor that has at least similarity 0.5\")\n\n\t# Store all the buckets information for all the bands\n\tlist_buckets = []\n\n\t# Compute the bucket for the first band\n\tbuckets = find_similar_in_band(pi_r, 0, r, P)\n\n\t# Append the computed bucket in the list\n\tlist_buckets.append(buckets)\n\n\n\tprint(\"Going through the data band by band for the relaxed case, with total number of band\", b)\n\n\t# Compute the bucket for all the rest bands and parse the data\n\tprint(len(final_pair))\n\tfor i in range(1, b):\n\t print(i)\n\t buckets = find_similar_in_band(pi_r, i, r, P)\n\t list_buckets.append(buckets)\n\n\n\t######### store all the datas needed from prediction \n\n\t\n\tnp.save(\"pi_r\", pi_r)\n\tnp.save(\"pi\", pi)\n\tnp.save(\"P\", P)\n\tnp.save(\"R\", R)\n\n\n\tw = csv.writer(open(\"shingle_dict.csv\", \"w\"))\n\tfor key, val in k_shingles_dict.items():\n\t\tw.writerow([key, val])\n\n\n\tnum_buckets = len(list_buckets)\n\n\timport json\n\t\n\twith open(\"list_buckets.json\", \"w\") as f:\n\t\tjson.dump(list_buckets, f)\n\n\n\t# generate vector storing the non zero indices for a given sentence \n\tdef convert_binary_vector(sentence):\n\t # Load one sentence \n\t \n\t Binary_data_vector = np.zeros((n_row, 1))\n\t # Compute the length of the sentence\n\t length = len(sentence)\n\t \n\t # If the sentence length is less than 50, do padding with 50 space\n\t if length < k_num:\n\t sentence = sentence + \" \" * k_num\n\t length += k_num\n\t \n\t # Loop the sentence\n\t for j in range(length - k_num + 1):\n\t temp = sentence[j:j+k_num]\n\t if temp in k_shingles_dict:\n\t Binary_data_vector[k_shingles_dict[temp]] = 1\n\n\t return Binary_data_vector\n\n\n\t#######################\n\t# function for finding the nearest reviewID given a new queried review \n\t#######################\n\n\tdef find_the_nearest(list_buckets, queried_review, b, r):\n\t # parsing the review by removing the punctuations and stop words\n\t sentence = parse_sentence(queried_review.lower(), punctuations, stop_words_list)\n\t \n\t # compute the indexs of the none zeros elements and put it into a list\n\t temp_list = []\n\t seen = set()\n\t \n\t length = len(sentence)\n\t \n\t if length < k_num:\n\t sentence = sentence + \" \" * k_num\n\t length += k_num\n\t \n\t for j in range(length - k_num + 1):\n\t temp = sentence[j:j+k_num]\n\t if temp in k_shingles_dict and temp not in seen:\n\t seen.add(temp)\n\t temp_list.append(k_shingles_dict[temp])\n\t \n\t # compute the signature vector \n\t signature_vector = h_pi(temp_list, pi, R, M)\n\t \n\t # compute the bucket value for each band and search in the bucket_list we got from the previous question\n\t nearest_index = []\n\t \n\t a_v = pi_r[:, 0:1]\n\t b_v = pi_r[:, 1:2]\n\t for i in range(b):\n\t data = signature_vector[i*r:(i+1)*r,0:1]\n\t temp = np.sum((data * a_v + b_v) % P)\n\t bucket = list_buckets[i]\n\t if temp in bucket:\n\t num = len(bucket[temp])\n\t for j in range(num):\n\t nearest_index.append(bucket[temp][j])\n\t \n\t # find the nearest index and concatenate them together and return \n\t return_ID = []\n\t num_nearest = len(nearest_index)\n\t for i in range(num_nearest):\n\t if dataset[\"ID\"][nearest_index[i]] not in return_ID:\n\t return_ID.append(dataset[\"ID\"][nearest_index[i]])\n\t \n\t length = len(nearest_index)\n\t if length > 0:\n\t sentence_1 = convert_binary_vector(sentence)\n\t Jaccard_distance_min = 1.\n\t id_min = nearest_index[0]\n\n\t for i in range(length):\n\t idx2 = nearest_index[i]\n\t sentence_2 = generate_binary_vector(idx2)\n\n\t count_and = np.sum(sentence_1 * sentence_2)\n\t count_or = np.sum(sentence_1) + np.sum(sentence_2) - count_and\n\n\t Jaccard_distance = 1 - count_and / count_or\n\t Jaccard_distance_new_1.append(Jaccard_distance)\n\n\t if Jaccard_distance < Jaccard_distance_min:\n\t Jaccard_distance_min = Jaccard_distance\n\t id_min = idx2\n\t else:\n\t id_min = -1\n\t \n\t if id_min == -1:\n\t final_nearest_ID = \"Cannot find any similar review having Jaccard distance less than 0.8 with the given one\"\n\t else:\n\t final_nearest_ID = dataset[\"ID\"][id_min]\n\t \n\t return return_ID, id_min, final_nearest_ID\n\t \n\n\tprint(\"Show some test cases for finding the nearest neighbor\")\n\n\tprint(\"\\n\")\n\n\t# test some different cases\n\tqueried_review = \"good quality dental chew less give one day freshens dogs breath quality edible ingredients\"\n\n\treturn_ID, idx, id_min = find_the_nearest(list_buckets, queried_review, b, r)\n\tprint(\"Inpute queried_review is:\", queried_review)\n\tprint(\"The closest reviewID of the given queried_review is:\", id_min)\n\tif idx != -1:\n\t\tprint(\"It is:\", dataset[\"Text\"][idx])\n\n\n\tprint(\"\\n\")\n\n\tqueried_review = \"dog loved birthday loves chance havent gotten see durability yet since days floating far hasnt chewed\"\n\n\treturn_ID, idx, id_min = find_the_nearest(list_buckets, queried_review, b, r)\n\tprint(\"Inpute queried_review is:\", queried_review)\n\tprint(\"The closest reviewID of the given queried_review is:\", id_min)\n\tif idx != -1:\n\t\tprint(\"It is:\", dataset[\"Text\"][idx])\n\n\tprint(\"\\n\")\n\n\tqueried_review = \"I have a nice good cat walk around me.\"\n\n\treturn_ID, idx, id_min = find_the_nearest(list_buckets, queried_review, b, r)\n\tprint(\"Inpute queried_review is:\", queried_review)\n\tprint(\"The closest reviewID of the given queried_review is:\", id_min)\n\tif idx != -1:\n\t\tprint(\"It is:\", dataset[\"Text\"][idx])\n\n\n\tprint(\"\\n\")\n\n\tqueried_review = \"I like to have enough time walking my dog run for long time\"\n\n\treturn_ID, idx, id_min = find_the_nearest(list_buckets, queried_review, b, r)\n\tprint(\"Inpute queried_review is:\", queried_review)\n\tprint(\"The closest reviewID of the given queried_review is:\", id_min)\n\tif idx != -1:\n\t\tprint(\"It is:\", dataset[\"Text\"][idx])\n\n\n\n\n","sub_path":"AmazonReview/Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"228846071","text":"import numpy as np\n\nclass ReciprocalToNormal:\n def __init__(self,\n primitive,\n frequencies,\n eigenvectors,\n cutoff_frequency=0,\n cutoff_hfrequency=10000.0,\n cutoff_delta = None):\n self._primitive = primitive\n self._frequencies = frequencies\n self._eigenvectors = eigenvectors\n self._cutoff_frequency = cutoff_frequency\n self._cutoff_hfrequency = cutoff_hfrequency\n self._cutoff_delta = cutoff_delta\n self._masses = self._primitive.get_masses()\n\n self._fc3_normal = None\n self._fc3_reciprocal = None\n\n def run(self, fc3_reciprocal, grid_triplet, g_skip=None, is_sym_fc3q=True):\n num_band = self._primitive.get_number_of_atoms() * 3\n self._fc3_reciprocal = fc3_reciprocal\n self._fc3_normal = np.zeros((num_band,) * 3, dtype='double')\n # fc3_normal_all = np.zeros((6, num_band, num_band, num_band), dtype='double')\n # if is_sym_fc3q:\n # for e, index in enumerate([[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]):\n # self._fc3_normal = np.zeros((num_band,) * 3, dtype='double')\n # if e == 0:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip)\n # fc3_normal_all[e] = self._fc3_normal\n # elif e == 1:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip.swapaxes(1, 2))\n # fc3_normal_all[e] = self._fc3_normal.swapaxes(1, 2)\n # elif e == 2:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip.swapaxes(0, 1))\n # fc3_normal_all[e] = self._fc3_normal.swapaxes(0, 1)\n # elif e == 3:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip.swapaxes(1,2).swapaxes(0,2))\n # fc3_normal_all[e] = self._fc3_normal.swapaxes(0,2).swapaxes(1,2)\n # elif e == 4:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip.swapaxes(1,2).swapaxes(0, 1))\n # fc3_normal_all[e] = self._fc3_normal.swapaxes(0,1).swapaxes(1,2)\n # elif e == 5:\n # self._reciprocal_to_normal(grid_triplet[index], g_skip=g_skip.swapaxes(0,2))\n # fc3_normal_all[e] = self._fc3_normal.swapaxes(0,2)\n # print np.abs(fc3_normal_all - fc3_normal_all[0]).max()\n self._reciprocal_to_normal(grid_triplet, g_skip=g_skip)\n\n\n def get_reciprocal_to_normal(self):\n return self._fc3_normal\n\n def _reciprocal_to_normal(self, grid_triplet, g_skip=None):\n e1, e2, e3 = self._eigenvectors[grid_triplet]\n f1, f2, f3 = self._frequencies[grid_triplet]\n num_band = len(f1)\n cutoff = self._cutoff_frequency\n for (i, j, k) in list(np.ndindex((num_band,) * 3)):\n if g_skip is not None and g_skip[i,j,k]:\n continue\n if f1[i] > cutoff and f1[i] < self._cutoff_hfrequency \\\n and f2[j] > cutoff and f3[k] > cutoff:\n f=self._frequencies[grid_triplet]\n fc3_elem = self._sum_in_atoms((i, j, k), (e1, e2, e3))\n fff = f1[i] * f2[j] * f3[k]\n self._fc3_normal[i, j, k] = np.abs(fc3_elem) ** 2 / fff\n\n def _sum_in_atoms(self, band_indices, eigvecs):\n num_atom = self._primitive.get_number_of_atoms()\n (e1, e2, e3) = eigvecs\n (b1, b2, b3) = band_indices\n\n sum_fc3 = 0j\n for (i, j, k) in list(np.ndindex((num_atom,) * 3)):\n sum_fc3_cart = 0\n for (l, m, n) in list(np.ndindex((3, 3, 3))):\n sum_fc3_cart += (e1[i * 3 + l, b1] *\n e2[j * 3 + m, b2] *\n e3[k * 3 + n, b3] *\n self._fc3_reciprocal[i, j, k, l, m, n])\n mass_sqrt = np.sqrt(np.prod(self._masses[[i, j, k]]))\n sum_fc3 += sum_fc3_cart / mass_sqrt\n\n return sum_fc3\n","sub_path":"anharmonic/phonon3/reciprocal_to_normal.py","file_name":"reciprocal_to_normal.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"523344371","text":"from netCDF4 import Dataset\nimport urllib\nimport glob\nimport json\nimport pandas as pd\n\nfnames=glob.glob(\"../netcdfs/annClim/PMIP4/*_annClim*nc\")\nnumncfiles=len(fnames)\n\nfor i in range(0,numncfiles):\n file = fnames[i]\n #print(file)\n #Step 1 load the netcdf file and get the values of several global attributes\n r = Dataset(file, mode='r')\n mip_era = getattr(r,'mip_era')\n pid = getattr(r,'tracking_id')\n activity_id = getattr(r,'activity_id')\n institution_id = getattr(r,'institution_id')\n source_id = getattr(r,'source_id')\n experiment_id = getattr(r,'experiment_id')\n sub_experiment_id = getattr(r,'sub_experiment_id')\n table_id = getattr(r,'table_id')\n variable_id = getattr(r,'variable_id')\n variant_label = getattr(r,'variant_label')\n grid_label = getattr(r,'grid_label')\n r.close()\n \n data_ref_syntax = mip_era+'.'+activity_id+'.'+institution_id+'.'+source_id+'.'+experiment_id\n \n #Remove 'hdl:' from the start of the pid\n if pid[0:4] == 'hdl:':\n pid = pid[4:]\n \n #Go to the webpage for the PID\n \n pidurl = 'https://handle-esgf.dkrz.de/lp/handles/'+pid\n \n readurl = urllib.request.urlopen(pidurl)\n data = json.loads(readurl.read())\n \n try:\n version_number = data[\"VERSION_NUMBER\"]\n except KeyError:\n try:\n datasetpid = data[\"URL_ORIGINAL_DATA\"][0][\"dataset\"]\n if datasetpid[0:4] == 'hdl:':\n datasetpid = datasetpid[4:]\n\n datasetpidurl = 'https://handle-esgf.dkrz.de/lp/handles/'+datasetpid\n \n readurl_dataset = urllib.request.urlopen(datasetpidurl)\n data_dataset = json.loads(readurl_dataset.read())\n try:\n version_number = data_dataset[\"VERSION_NUMBER\"]\n except KeyError:\n version_number = \"UNKNOWN\"\n except KeyError:\n version_number = \"UNKNOWN\"\n\n if i == 0:\n data = {'DATA_REF_SYNTAX': [data_ref_syntax],\n 'SUB_EX_ID': ['none'],\n 'ENS_MEMBER': [variant_label],\n 'TABLE_ID': [table_id],\n 'VAR_NAME': [variable_id],\n 'GRID_LABEL':[grid_label],\n 'VERSION_NO':[version_number],\n 'HANDLE':['hdl:'+pid],\n 'SUBPANEL':['b']}\n #df=pd.DataFrame(data,columns=['DATA_REF_SYNTAX','SUB_EX_ID','ENS_MEMBER','TABLE_ID','VAR_NAME','GRID_LABEL','VERSION_NO','HANDLE','SUBPANEL'])\n print('DATA_REF_SYNTAX,SUB_EX_ID,ENS_MEMBER,TABLE_ID,VAR_NAME,GRID_LABEL,VERSION_NO,HANDLE,SUBPANEL')\n print(data_ref_syntax+',none,'+variant_label+','+table_id+','+variable_id+','+grid_label+','+version_number+','+'hdl:'+pid+',')\n else:\n data = {'DATA_REF_SYNTAX': [data_ref_syntax],\n 'SUB_EX_ID': ['none'],\n 'ENS_MEMBER': [variant_label],\n 'TABLE_ID': [table_id],\n 'VAR_NAME': [variable_id],\n 'GRID_LABEL':[grid_label],\n 'VERSION_NO':[version_number],\n 'HANDLE':['hdl:'+pid],\n 'SUBPANEL':['b']}\n #df2=pd.DataFrame(data,columns=['DATA_REF_SYNTAX','SUB_EX_ID','ENS_MEMBER','TABLE_ID','VAR_NAME','GRID_LABEL','VERSION_NO','HANDLE','SUBPANEL'])\n #df.append(df2,ignore_index=True)\n print(data_ref_syntax+',none,'+variant_label+','+table_id+','+variable_id+','+grid_label+','+version_number+','+'hdl:'+pid+',')\n \n#df.to_csv('fig3.11_md_cmip6.csv')\n","sub_path":"scripts/scrape_together_CMIP6_metadata_file.py","file_name":"scrape_together_CMIP6_metadata_file.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"480567148","text":"# coding:utf8\n\nimport pandas\nfrom pandas import read_csv\n\ndata1 = read_csv(\n \"E:\\Workspace\\idata\\lab_data_analysis\\data\\lab18_01\\data1.csv\",\n sep=\"|\"\n)\n\ndata2 = read_csv(\n \"E:\\Workspace\\idata\\lab_data_analysis\\data\\lab18_01\\data2.csv\",\n sep=\"|\"\n)\n\ndata3 = read_csv(\n \"E:\\Workspace\\idata\\lab_data_analysis\\data\\lab18_01\\data3.csv\",\n sep=\"|\"\n)\n\n# 合并\ndata = pandas.concat([data1, data2, data3])\n\n# 不同的列是否能合并(缺的列会用na来补齐)\ndata = pandas.concat([\n data1[[\"id\", \"comments\"]],\n data2[[\"comments\", \"title\"]],\n data3[[\"id\", \"title\"]]\n])\n","sub_path":"lab_data_analysis/lab18_01.py","file_name":"lab18_01.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"305473874","text":"from lib.util import *\nfrom lib.relations import *\nfrom learner import *\nimport math\n\ndef random_subset(set, size):\n\tX = list(set)\n\tY = list()\n\tfor i in range(size):\n\t\tif len(X) > 0:\n\t\t\tindex = rr(len(X))\n\t\t\tvalue = X[index]\n\t\t\tdel X[index]\n\t\t\tY.append(value)\n\t\telse:break\n\treturn Y\n\nclass Recognizer:\n\tdef __init__(self, variables, learners, threshold, increment):\n\t\tself.variables = variables\n\t\tself.threshold = threshold\n\t\tself.increment = increment\n\t\tself.urgencies = list()\n\t\tself.utilities = list()\n\t\tself.learners = list()\n\n\t\tfor i in range(learners):\n\t\t\tself.learners.append(Learner(threshold=threshold, increment=increment))\n\t\t\tself.urgencies.append(.1)\n\t\t\tself.utilities.append(0)\n\t\tself.init()\n\n\tdef init(self):\n\t\tfor\ti in range(len(self.learners)):\n\t\t\tsize = rr(int(len(self.variables) * .115), int(len(self.variables)))\n\t\t\tself.learners[i].init(pattern=random_subset(self.variables, size))\n\n\tdef rank(self, example):\n\t\tutilities = []\n\t\tfor i in range(len(self.learners)):\n\t\t\tutilities.append(self.learners[i].test(example))\n\t\treturn utilities\n\n\tdef revise(self, index, utility):\n\t\tself.urgencies[index] += self.increment * utility * self.urgencies[index]\n\n\tdef states(self, utilities):\n\t\tstates = []\n\t\tfor i in range(len(utilities)):\n\t\t\tstate = utilities[i]\n\t\t\tdelta = -self.urgencies[i]\n\t\t\tfor j in range(len(utilities)):\n\t\t\t\tif i != j:\n\t\t\t\t\tdelta -= utilities[j]\n\t\t\tstate += delta * self.increment\n\t\t\tstates.append(state)\n\t\treturn states\n\n\tdef train(self, example):\n\t\tranks = self.rank(example)\n\t\tstates = self.states(ranks)\n\t\torder = reverse(sort(states))\n\t\tindex = order[0]\n\t\tbest = states[index]\n\n\t\tif best < self.threshold:\n\t\t\tempty = []\n\t\t\tsizes = []\n\t\t\tfor i in range(len(self.learners)):\n\t\t\t\tsizes.append(len(self.learners[i].pattern))\n\t\t\torder = sort(sizes)\n\t\t\tsample = order[0:int(len(order)/3)]\n\t\t\tindex = order[rr(len(sample))]\n\n\t\tperformance = 0\n\t\tfor i in range(len(self.learners)):\n\t\t\trank = ranks[i]\n\t\t\tplace = order.index(i)\n\t\t\ttotal = len(order)\n\t\t\t\n\t\t\tif i == index: \n\t\t\t\tobservation = example\n\t\t\telif self.urgencies[i] < 0:\n\t\t\t\tobservation = []\n\t\t\telse:observation = self.learners[i].example\n\t\t\tself.learners[i].train(observation)\n\n\t\t\tutility = self.learners[i].utility()\n\t\t\tself.utilities[i] = utility\n\t\t\tself.revise(i, utility)\n\t\t\tperformance += utility\n\n\t\treturn performance\n\n","sub_path":"src/recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"63918840","text":"import numpy as np\nfrom typing import Dict, Union\n\n# import from package\nfrom resistics.dataObjects.timeData import TimeData\nfrom resistics.utilities.utilsPrint import errorPrint\n\n\ndef polarityReversal(\n timeData: TimeData, reversal: Dict[str, bool], inplace: bool = True\n) -> TimeData:\n \"\"\"Multiply the data by -1 (polarity reversal)\n \n Parameters\n ----------\n timeData : TimeData\n timeData to normalise\n reversal : Dict[str, bool]\n Keys are channels and values are boolean flags for reversing\n inplace : bool, optional\n Whether to manipulate the data inplace\n\n Returns\n -------\n TimeData\n Normalised time data\n \"\"\"\n\n if not inplace:\n timeData = timeData.copy()\n timeData.data = polarityReversalData(timeData.data, reversal)\n timeData.addComment(\"Polarity reversal with parameters: {}\".format(reversal))\n return timeData\n\n\ndef polarityReversalData(\n data: Dict[str, np.ndarray], reversal: Dict[str, bool]\n) -> Dict[str, np.ndarray]:\n \"\"\"Polarity reverse data or simply multiply by -1\n \n Parameters\n ----------\n data : Dict\n Dictionary with channel as keys and data as values\n reversal : Dict[str, bool]\n Keys are channels and values are boolean flags for reversing\n \n Returns\n -------\n Dict\n Dictionary with channel as keys and normalised data as values\n \"\"\"\n\n for c in data:\n if c in reversal and reversal[c]:\n data[c] = data[c] * -1\n return data\n\n\ndef scale(\n timeData: TimeData, scalars: Dict[str, bool], inplace: bool = True\n) -> TimeData:\n \"\"\"Scale the data by an arbitrary amount\n \n Parameters\n ----------\n timeData : TimeData\n timeData to normalise\n scalars : Dict[str, float]\n Keys are channels and values are boolean flags for reversing\n inplace : bool, optional\n Whether to manipulate the data inplace\n\n Returns\n -------\n TimeData\n Normalised time data\n \"\"\"\n\n if not inplace:\n timeData = timeData.copy()\n timeData.data = scaleData(timeData.data, scalars)\n timeData.addComment(\"Time data scaled with scalars: {}\".format(scalars))\n return timeData\n\n\ndef scaleData(\n data: Dict[str, np.ndarray], scalars: Dict[str, bool]\n) -> Dict[str, np.ndarray]:\n \"\"\"Polarity reverse data or simply multiply by -1\n \n Parameters\n ----------\n data : Dict\n Dictionary with channel as keys and data as values\n scalars : Dict[str, float]\n Keys are channels and values are flaots\n \n Returns\n -------\n Dict\n Dictionary with channel as keys and normalised data as values\n \"\"\"\n\n for c in data:\n if c in scalars:\n data[c] = data[c] * scalars[c]\n return data\n\n\ndef intdiv(nom: Union[int, float], div: Union[int, float]) -> int:\n \"\"\"Return an integer result of division\n\n The division is expected to be exact and ensures an integer return rather than float.\n Code execution will exit if division is not exact\n \n Parameters\n ----------\n nom : int, float\n Nominator\n div : int, float\n Divisor \n\n Returns\n -------\n out : int\n Result of division\n \"\"\"\n\n if nom % div == 0:\n return nom // div\n else:\n errorPrint(\n \"utilsMath::intdiv\",\n \"intdiv assumes exits upon having a remainder to make sure errors are not propagated through the code\",\n quitRun=True,\n )\n return 0\n\n","sub_path":"resistics/utilities/utilsMath.py","file_name":"utilsMath.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"15077571","text":"from .model import Model\nimport cv2\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport math\n\n\nclass ColorMoments(Model):\n def __init__(self, widthOfWindow=100, heightOfWindow=100):\n self._widthOfWindow = widthOfWindow\n self._heightOfWindow = heightOfWindow\n\n def extractFeatures(self, img):\n # Convert to YUV first.\n imgYUV = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\n\n # Get the height / width / number of channels\n h = imgYUV.shape[0]\n w = imgYUV.shape[1]\n c = imgYUV.shape[2]\n\n # The number of windows\n numOfRows = int((h + 1) / self._heightOfWindow)\n numOfColumns = int((w + 1) / self._widthOfWindow)\n\n # The results we wanted.\n resultMean = np.zeros(shape=(numOfRows, numOfColumns, c))\n resultStd = np.zeros(shape=(numOfRows, numOfColumns, c))\n resultSkew = np.zeros(shape=(numOfRows, numOfColumns, c))\n\n for hIdx in range(numOfRows):\n # Find the end index of row window.\n hEndIdx = (\n ((hIdx + 1) * self._heightOfWindow) if hIdx < numOfRows - 1 else None\n )\n\n for wIdx in range(numOfColumns):\n # Find the end index of column window.\n wEndIdx = (\n ((wIdx + 1) * self._widthOfWindow)\n if wIdx < numOfColumns - 1\n else None\n )\n\n window = imgYUV[\n hIdx * self._heightOfWindow : hEndIdx,\n wIdx * self._widthOfWindow : wEndIdx,\n :,\n ]\n\n resultMean[hIdx, wIdx, :] = np.mean(window, axis=(0, 1))\n resultStd[hIdx, wIdx, :] = np.std(window, axis=(0, 1))\n # I tried scipy.stats.skew, but it seems the result is not correct.\n # Implement it by myself.\n resultSkew[hIdx, wIdx, 0] = np.sum(\n np.power(window[:, :, 0] - resultMean[hIdx, wIdx, 0], 3)\n ) / (self._heightOfWindow * self._widthOfWindow)\n resultSkew[hIdx, wIdx, 1] = np.sum(\n np.power(window[:, :, 1] - resultMean[hIdx, wIdx, 1], 3)\n ) / (self._heightOfWindow * self._widthOfWindow)\n resultSkew[hIdx, wIdx, 2] = np.sum(\n np.power(window[:, :, 2] - resultMean[hIdx, wIdx, 2], 3)\n ) / (self._heightOfWindow * self._widthOfWindow)\n\n resultSkew[hIdx, wIdx, 0] = np.sign(resultSkew[hIdx, wIdx, 0]) * (\n np.abs(resultSkew[hIdx, wIdx, 0])\n ) ** (1 / 3)\n resultSkew[hIdx, wIdx, 1] = np.sign(resultSkew[hIdx, wIdx, 1]) * (\n np.abs(resultSkew[hIdx, wIdx, 1])\n ) ** (1 / 3)\n resultSkew[hIdx, wIdx, 2] = np.sign(resultSkew[hIdx, wIdx, 2]) * (\n np.abs(resultSkew[hIdx, wIdx, 2])\n ) ** (1 / 3)\n\n return (resultMean, resultStd, resultSkew)\n\n def getSimilarity(self, feature1, feature2, distanceFunction):\n # Calculate distance.\n mean1, std1, skew1 = feature1\n mean2, std2, skew2 = feature2\n\n # Make the features to one-dimension vectors.\n feature1 = np.concatenate((mean1, std1, skew1), axis=2).flatten()\n feature2 = np.concatenate((mean2, std2, skew2), axis=2).flatten()\n\n return distanceFunction(feature1, feature2)\n\n def getSimilarityScore(self, data):\n return data\n\n def serializeFeature(self, featuresData):\n return featuresData\n\n def deserializeFeature(self, data):\n return data\n\n def extendChannel(self, channel):\n return np.reshape(\n np.repeat(channel, 3), (channel.shape[0], channel.shape[1], 3)\n )\n\n def visualizeFeatures(self, img, feature):\n # If we do not assign TKAgg, the fig.canvas.tostring_rgb() would raise error\n # since MacOS version does not implement it.\n matplotlib.use(\"TKAgg\", force=True)\n\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n imgYUV = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\n\n # Print the matrix of mean, std and skew\n with np.printoptions(precision=3, suppress=True):\n print(\"The mean of Y, U and V channel are:\")\n print(feature[0][:, :, 0])\n print(feature[0][:, :, 1])\n print(feature[0][:, :, 2])\n\n print(\"The std of Y, U and V channel are:\")\n print(feature[1][:, :, 0])\n print(feature[1][:, :, 1])\n print(feature[1][:, :, 2])\n\n print(\"The skew of Y, U and V channel are:\")\n print(feature[2][:, :, 0])\n print(feature[2][:, :, 1])\n print(feature[2][:, :, 2])\n\n mean = feature[0] / 255.0\n std = feature[1] / np.max(feature[1], axis=(0, 1))\n\n skew = feature[2] - np.min(feature[2], axis=(0, 1))\n skew = skew / np.max(skew, axis=(0, 1))\n\n # https://stackoverflow.com/questions/25862026/turn-off-axes-in-subplots/25864515\n # https://stackoverflow.com/questions/37723963/broadcast-one-channel-in-numpy-array-into-three-channels\n fig, axarr = plt.subplots(4, 4)\n fig.set_size_inches(8, 8, forward=True)\n\n fig.suptitle(\"Color Moments\")\n\n axarr[0, 0].imshow(imgRGB)\n axarr[0, 0].set_title(\"Origin image\")\n axarr[0, 0].axis(\"off\")\n\n axarr[0, 1].axis(\"off\")\n axarr[0, 2].axis(\"off\")\n axarr[0, 3].axis(\"off\")\n\n # Generate all visualized result for each channel and each moment.\n channels = [\"Y\", \"U\", \"V\"]\n dataPair = [(\"Channel\", imgYUV), (\"mean\", mean), (\"std\", std), (\"skew\", skew)]\n for i, channel in enumerate(channels):\n for j, data in enumerate(dataPair):\n axarr[i + 1, j].imshow(self.extendChannel(data[1][:, :, i]))\n axarr[i + 1, j].set_title(f\"{channel} {data[0]}\")\n axarr[i + 1, j].axis(\"off\")\n\n # https://stackoverflow.com/questions/43099734/combining-cv2-imshow-with-matplotlib-plt-show-in-real-time\n fig.canvas.draw()\n\n # Convert to ndarray to that we can use opencv to process it.\n buf = fig.canvas.tostring_rgb()\n img = np.fromstring(buf, dtype=np.uint8, sep=\"\").reshape(\n fig.canvas.get_width_height()[::-1] + (3,)\n )\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n return img\n\n def visualizeSimilarityResult(\n self, img1, features1, img2, features2, similarityData, rank=0, score=0\n ):\n matplotlib.use(\"TKAgg\", force=True)\n\n img1RGB = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)\n img1YUV = cv2.cvtColor(img1, cv2.COLOR_BGR2YUV)\n\n img2RGB = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)\n img2YUV = cv2.cvtColor(img2, cv2.COLOR_BGR2YUV)\n\n fig, axarr = plt.subplots(2, 4)\n fig.set_size_inches(8, 4, forward=True)\n fig.suptitle(f\"Rank {rank} distance {score}\")\n\n axarr[0, 0].imshow(img1RGB)\n axarr[0, 0].set_title(\"Target image\")\n axarr[0, 0].axis(\"off\")\n\n axarr[0, 1].imshow(self.extendChannel(img1YUV[:, :, 0]))\n axarr[0, 1].set_title(\"Y Channel\")\n axarr[0, 1].axis(\"off\")\n\n axarr[0, 2].imshow(self.extendChannel(img1YUV[:, :, 1]))\n axarr[0, 2].set_title(\"U Channel\")\n axarr[0, 2].axis(\"off\")\n\n axarr[0, 3].imshow(self.extendChannel(img1YUV[:, :, 2]))\n axarr[0, 3].set_title(\"V Channel\")\n axarr[0, 3].axis(\"off\")\n\n axarr[1, 0].imshow(img2RGB)\n axarr[1, 0].set_title(\"Query image\")\n axarr[1, 0].axis(\"off\")\n\n axarr[1, 1].imshow(self.extendChannel(img2YUV[:, :, 0]))\n axarr[1, 1].set_title(\"Y Channel\")\n axarr[1, 1].axis(\"off\")\n\n axarr[1, 2].imshow(self.extendChannel(img2YUV[:, :, 1]))\n axarr[1, 2].set_title(\"U Channel\")\n axarr[1, 2].axis(\"off\")\n\n axarr[1, 3].imshow(self.extendChannel(img2YUV[:, :, 2]))\n axarr[1, 3].set_title(\"V Channel\")\n axarr[1, 3].axis(\"off\")\n\n fig.canvas.draw()\n\n # Convert to ndarray to that we can use opencv to process it.\n buf = fig.canvas.tostring_rgb()\n img = np.fromstring(buf, dtype=np.uint8, sep=\"\").reshape(\n fig.canvas.get_width_height()[::-1] + (3,)\n )\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n return img\n\n def sortSimilarityScoreReverse(self):\n return False\n\n def dimensionReduction(self, featureList, dimRed, k=None):\n\n if type(dimRed).__name__ == \"LDA\":\n featureMatrix = []\n\n for feature in featureList:\n count = self.flattenFecture(feature, type(dimRed).__name__)\n\n featureMatrix = (\n (count)\n if len(featureMatrix) == 0\n else (np.concatenate((featureMatrix, count), axis=0))\n )\n\n return dimRed(featureMatrix, k)\n\n else:\n flatFeatureList = []\n for feature in featureList:\n flatFeatures = self.flattenFecture(feature, type(dimRed).__name__)\n\n flatFeatureList.append(flatFeatures)\n\n featureMatrix = np.concatenate(flatFeatureList)\n\n return dimRed(featureMatrix, k)\n\n def flattenFecture(self, feature, dimRedName=None):\n if dimRedName is not None and dimRedName.lower() == \"lda\":\n flat_mean_Y = np.reshape(feature[0][:, :, 0], -1)\n flat_mean_U = np.reshape(feature[0][:, :, 1], -1)\n flat_mean_V = np.reshape(feature[0][:, :, 2], -1)\n normalized_flat_mean_Y = (flat_mean_Y - np.min(flat_mean_Y)) / (\n np.max(flat_mean_Y) - np.min(flat_mean_Y)\n )\n normalized_flat_mean_U = (flat_mean_U - np.min(flat_mean_U)) / (\n np.max(flat_mean_U) - np.min(flat_mean_U)\n )\n normalized_flat_mean_V = (flat_mean_V - np.min(flat_mean_V)) / (\n np.max(flat_mean_V) - np.min(flat_mean_V)\n )\n\n flat_std_Y = np.reshape(feature[1][:, :, 0], -1)\n flat_std_U = np.reshape(feature[1][:, :, 1], -1)\n flat_std_V = np.reshape(feature[1][:, :, 2], -1)\n normalized_flat_std_Y = (flat_std_Y - np.min(flat_std_Y)) / (\n np.max(flat_std_Y) - np.min(flat_std_Y)\n )\n normalized_flat_std_U = (flat_std_U - np.min(flat_std_U)) / (\n np.max(flat_std_U) - np.min(flat_std_U)\n )\n normalized_flat_std_V = (flat_std_V - np.min(flat_std_V)) / (\n np.max(flat_std_V) - np.min(flat_std_V)\n )\n\n flat_skew_Y = np.reshape(feature[2][:, :, 0], -1)\n flat_skew_U = np.reshape(feature[2][:, :, 1], -1)\n flat_skew_V = np.reshape(feature[2][:, :, 2], -1)\n normalized_flat_skew_Y = (flat_skew_Y - np.min(flat_skew_Y)) / (\n np.max(flat_skew_Y) - np.min(flat_skew_Y)\n )\n normalized_flat_skew_U = (flat_skew_U - np.min(flat_skew_U)) / (\n np.max(flat_skew_U) - np.min(flat_skew_U)\n )\n normalized_flat_skew_V = (flat_skew_V - np.min(flat_skew_V)) / (\n np.max(flat_skew_V) - np.min(flat_skew_V)\n )\n\n count = np.zeros((9, 10), dtype=int)\n\n for (\n ele_normalized_flat_mean_Y,\n ele_normalized_flat_mean_U,\n ele_normalized_flat_mean_V,\n ele_normalized_flat_std_Y,\n ele_normalized_flat_std_U,\n ele_normalized_flat_std_V,\n ele_normalized_flat_skew_Y,\n normalized_flat_skew_U,\n normalized_flat_skew_V,\n ) in zip(\n normalized_flat_mean_Y,\n normalized_flat_mean_U,\n normalized_flat_mean_V,\n normalized_flat_std_Y,\n normalized_flat_std_U,\n normalized_flat_std_V,\n normalized_flat_skew_Y,\n normalized_flat_skew_U,\n normalized_flat_skew_V,\n ):\n\n if int(ele_normalized_flat_mean_Y) == 1:\n count[0][\n int(math.floor(ele_normalized_flat_mean_Y * 10 - 1))\n ] += 1\n else:\n count[0][int(math.floor(ele_normalized_flat_mean_Y * 10))] += 1\n\n if int(ele_normalized_flat_mean_U) == 1:\n count[1][\n int(math.floor(ele_normalized_flat_mean_U * 10 - 1))\n ] += 1\n else:\n count[1][int(math.floor(ele_normalized_flat_mean_U * 10))] += 1\n\n if int(ele_normalized_flat_mean_V) == 1:\n count[2][\n int(math.floor(ele_normalized_flat_mean_V * 10 - 1))\n ] += 1\n else:\n count[2][int(math.floor(ele_normalized_flat_mean_V * 10))] += 1\n\n if int(ele_normalized_flat_std_Y) == 1:\n count[3][\n int(math.floor(ele_normalized_flat_std_Y * 10 - 1))\n ] += 1\n else:\n count[3][int(math.floor(ele_normalized_flat_std_Y * 10))] += 1\n\n if int(ele_normalized_flat_std_U) == 1:\n count[4][\n int(math.floor(ele_normalized_flat_std_U * 10 - 1))\n ] += 1\n else:\n count[4][int(math.floor(ele_normalized_flat_std_U * 10))] += 1\n\n if int(ele_normalized_flat_std_V) == 1:\n count[5][\n int(math.floor(ele_normalized_flat_std_V * 10 - 1))\n ] += 1\n else:\n count[5][int(math.floor(ele_normalized_flat_std_V * 10))] += 1\n\n if int(ele_normalized_flat_skew_Y) == 1:\n count[6][\n int(math.floor(ele_normalized_flat_skew_Y * 10 - 1))\n ] += 1\n else:\n count[6][int(math.floor(ele_normalized_flat_skew_Y * 10))] += 1\n\n if int(normalized_flat_skew_U) == 1:\n count[7][int(math.floor(normalized_flat_skew_U * 10 - 1))] += 1\n else:\n count[7][int(math.floor(normalized_flat_skew_U * 10))] += 1\n\n if int(normalized_flat_skew_V) == 1:\n count[8][int(math.floor(normalized_flat_skew_V * 10 - 1))] += 1\n else:\n count[8][int(math.floor(normalized_flat_skew_V * 10))] += 1\n\n count = np.reshape(count, (-1))\n # count = np.squeeze(count)\n return count\n else:\n flatFeature1 = np.reshape(feature[0], (1, -1))\n flatFeature2 = np.reshape(feature[1], (1, -1))\n if dimRedName is not None and dimRedName.lower() == \"nmf\":\n flatFeatures = np.concatenate(\n (flatFeature1, flatFeature2), axis=1\n )\n else:\n flatFeature3 = np.reshape(feature[2], (1, -1))\n flatFeatures = np.concatenate(\n (flatFeature1, flatFeature2, flatFeature3), axis=1\n )\n\n return np.squeeze(flatFeatures)\n","sub_path":"Code/models/cm.py","file_name":"cm.py","file_ext":"py","file_size_in_byte":15414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"521553866","text":"from django.conf import settings\r\nfrom allauth.account.adapter import DefaultAccountAdapter\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom .models import UserInfo\r\nimport django.contrib.auth\r\nfrom django.core.cache import cache\r\nfrom django import forms\r\nfrom .utils import (is_phone_number_valid, generate_new_phone_verification_code)\r\n\r\n\r\nclass MyAccountAdapter(DefaultAccountAdapter):\r\n\r\n # def get_login_redirect_url(self, request):\r\n # path = '/accounts/{username}/'\r\n # return path.format(username=request.user.username)\r\n\r\n # def is_open_for_signup(self, request):\r\n # return False\r\n\r\n\r\n def authenticate(self, request, **credentials):\r\n \"\"\"Only authenticates, does not actually login. See `login`\"\"\"\r\n self.pre_authenticate(request, **credentials)\r\n number = credentials['username']\r\n # the following if is hacked by me haha\r\n if len(number) == 11 and number.isdigit() and number[0] == '1' \\\r\n and UserInfo.objects.filter(\r\n phonenumber = number,\r\n phone_is_verified = True,\r\n ).exists():\r\n credentials['username'] = UserInfo.objects.filter(\r\n phonenumber=number,\r\n phone_is_verified=True\r\n )[0].user.username\r\n user = django.contrib.auth.authenticate(**credentials)\r\n if user:\r\n cache_key = self._get_login_attempts_cache_key(\r\n request, **credentials)\r\n cache.delete(cache_key)\r\n else:\r\n self.authentication_failed(request, **credentials)\r\n return user\r\n\r\nclass MySignupForm(forms.Form):\r\n phonenumber = forms.CharField(label=\"手机号码\", max_length = 11)\r\n yanzhengma = forms.CharField(label=\"手机验证码\", max_length=4)\r\n # captcha = CaptchaField(label=\"图形验证码\")\r\n\r\n\r\n def clean_phonenumber(self):\r\n phonenumber = self.cleaned_data['phonenumber']\r\n if not is_phone_number_valid(phonenumber):\r\n raise forms.ValidationError(\"手机号码不正确\")\r\n return phonenumber\r\n\r\n def clean_yanzhengma(self):\r\n yanzhengma = self.cleaned_data['yanzhengma']\r\n if len(yanzhengma) != 4 or (not yanzhengma.isdigit()):\r\n raise forms.ValidationError(\"手机验证码格式不正确\")\r\n return yanzhengma\r\n\r\n def clean(self):\r\n super(MySignupForm, self).clean()\r\n key = 'phone_reg_code'+str(self.cleaned_data.get('phonenumber'))\r\n code = cache.get(key)\r\n # logger.warning('key:'+key)\r\n # logger.warning('cleaned_data:'+str(self.cleaned_data))\r\n # logger.warning('code from cache:'+code)\r\n if code is None:\r\n raise forms.ValidationError(\"手机验证none\")\r\n if code != self.cleaned_data.get('yanzhengma'):\r\n raise forms.ValidationError(\"手机验证不通过\")\r\n\r\n def signup(self, request, user):\r\n \"\"\" Required, or else it throws deprecation warnings \"\"\"\r\n user.profile = UserInfo.objects.create(user = user)\r\n user.profile.phonenumber = self.cleaned_data['phonenumber']\r\n user.profile.phone_is_verified = True\r\n user.profile.save()\r\n","sub_path":"lstngsvc/myadapter.py","file_name":"myadapter.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"585024595","text":"import boto3\r\nfrom tkinter import *\r\nimport datetime\r\nimport time\r\n#负责进行发送\r\n# Get the service resource\r\nsqs = boto3.client('sqs')\r\n\r\n\r\n# 发送消息给队列\r\ndef send_msg(text_msg):\r\n sqs.send_message(QueueUrl='https://sqs.us-east-1.amazonaws.com/332427856354/test', MessageBody=text_msg+'\\n')\r\n\r\n\r\n# 发送按钮\r\ndef send_msg_b():\r\n # 在聊天内容上方加一行 显示发送人及发送时间\r\n msgcontent = ('我:'.encode(\"utf-8\").decode(\"utf-8\")) + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + '\\n '\r\n text_msglist.insert(END, msgcontent, 'green')\r\n text_msglist.insert(END, text_msg.get('0.0', END))\r\n send_msg(text_msg.get('0.0', END))\r\n text_msg.delete('0.0', END)\r\n\r\n\r\nroot = Tk()\r\nroot.title(('发送'.encode(\"utf-8\").decode(\"utf-8\")))\r\n\r\n\r\n# 创建几个frame作为容器\r\nframe_left_top = Frame(width=380, height=260, bg='white')\r\nframe_left_center = Frame(width=380, height=100, bg='white')\r\nframe_left_bottom = Frame(width=380, height=30)\r\n# 创建需要的几个元素\r\ntext_msglist = Text(frame_left_top)\r\ntext_msg = Text(frame_left_center)\r\nbutton_sendmsg = Button(frame_left_bottom, text=('发送'.encode(\"utf-8\").decode(\"utf-8\")), command=send_msg_b)\r\n\r\n# 创建一个绿色的tag\r\ntext_msglist.tag_config('green', foreground='#008B00')\r\n# 使用grid设置各个容器位置\r\nframe_left_top.grid(row=0, column=1, padx=2, pady=5)\r\nframe_left_center.grid(row=1, column=1, padx=2, pady=5)\r\nframe_left_bottom.grid(row=2, column=1)\r\n\r\nframe_left_top.grid_propagate(0)\r\nframe_left_center.grid_propagate(0)\r\nframe_left_bottom.grid_propagate(0)\r\n# 把元素填充进frame\r\ntext_msglist.grid()\r\ntext_msg.grid()\r\nbutton_sendmsg.grid()\r\n# 主事件循环\r\nroot.mainloop()\r\n","sub_path":"sqs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"269101755","text":"import pandas as pd\nimport numpy as np\nimport glob\nnp.random.seed(0)\nimport seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\nimport click\n\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n@click.command(context_settings=CONTEXT_SETTINGS)\n@click.version_option(version='0.1')\n\n# Required arguments\n@click.option('-fv', '--fold_value', type=str,\n help='fold value for criterion for p value change')\n@click.option('-s', '--statistic', type=str,\n help='string denoting type of analysis')\n@click.option('-m', '--multi_corr', type=str,\n help='string denoting type of multiple corrections')\n@click.option('-p', '--param', type=str,\n help='string denoting params used')\n@click.option('-c', '--corr_compare', type=str,\n help='boolean denoting whether performing cooksd or not')\n@click.option('-i', '--input_dir', type=click.Path(exists=True),\n help='input dir with .txt files of data')\n@click.option('-o', '--output_dir', type=click.Path(exists=True),\n help='output dir to put config files')\n\n\ndef analyze_simulations_real(fold_value, statistic, multi_corr, param,\n corr_compare, input_dir, output_dir):\n '''\n Script for analysis of real data by CUTIE\n '''\n\n def parse_log(f, cookd):\n lines = [l.strip() for l in f.readlines()]\n defaulted = False\n if cookd == 'True':\n for l in lines:\n if \"defaulted\" in l:\n defaulted = True\n elif \"initial_corr\" in l:\n initial_corr = float(l.split(' ')[-1])\n elif \"false correlations according to cookd\" in l:\n false_corr = float(l.split(' ')[-1])\n elif \"true correlations according to cookd\" in l:\n true_corr = float(l.split(' ')[-1])\n elif \"runtime\" in l:\n runtime = float(l.split(' ')[-1])\n rs_false = np.nan\n rs_true = np.nan\n\n else:\n # check if FDR correction defaulted\n for l in lines:\n if \"defaulted\" in l:\n defaulted = True\n elif \"initial_corr\" in l:\n initial_corr = float(l.split(' ')[-1])\n elif \"false correlations\" in l:\n false_corr = float(l.split(' ')[-1])\n elif \"true correlations\" in l:\n true_corr = float(l.split(' ')[-1])\n elif \"FP/TN1\" in l:\n rs_false = float(l.split(' ')[-1])\n elif \"TP/FN1\" in l:\n rs_true = float(l.split(' ')[-1])\n elif \"runtime\" in l:\n runtime = float(l.split(' ')[-1])\n\n return defaulted, initial_corr, false_corr, true_corr, rs_false, rs_true, runtime\n\n\n headers = [\n 'analysis_id',\n 'parameter',\n 'distribution',\n 'statistic',\n 'mc_used', #NEW\n 'fold_value', # NEW\n 'pointwise', #NEW\n 'defaulted', # binary\n 'initial_corr',\n 'true_corr(TP_FN)',\n 'false_corr(FP_TN)',\n 'rs_true_corr_TP_FN',\n 'rs_false_corr_FP_TN',\n 'runtime'\n ]\n\n # populate df\n results_df = pd.DataFrame()\n\n mcs = multi_corr.split(',')\n fvs = fold_value.split(',')\n stats = statistic.split(',')\n cds = corr_compare.split(',')\n ds = ['lungtx', 'lungpt', 'lungc','who','hdac']\n for p in param:\n for mc in mcs:\n for fv in fvs:\n for s in stats:\n for cd in cds:\n for d in ds:\n # nomc_10_pearson_True_lungpt\n analysis_id = '_'.join([p, mc, fv, s, cd, d])\n path = input_dir + analysis_id + '/'\n files = sorted(glob.glob(path + '*.txt'))\n # grab most recent log file\n try:\n rel_logfile = files[-1]\n with open(rel_logfile, 'r') as f:\n defaulted, initial_corr, false_corr, \\\n true_corr, rs_false, rs_true, runtime = parse_log(f,cd)\n\n new_row = pd.DataFrame([[analysis_id, p, d, s,\n mc, fv, cd, defaulted,\n initial_corr, true_corr,\n false_corr, rs_true,\n rs_false, runtime]],\n columns=headers)\n\n results_df = results_df.append(new_row)\n except:\n print(analysis_id)\n print('Failed parsing')\n if cd == 'True':\n if s == 'pearson':\n print(analysis_id)\n else:\n print(analysis_id)\n\n colnames = ['LungTranscriptomics', 'Micrometa', 'Microbiome', 'Gene Expression', 'WHO']\n\n col_to_corr = {\n 'LungTranscriptomics': 292 * 97, #depends on sum vs unsum\n 'Micrometa': 83 * 897,\n 'Microbiome': 748 * 747 / 2,\n 'Gene Expression': 1000 * 999 / 2,\n 'WHO': 354 * 353 / 2\n }\n\n dists = ['lungtx', 'lungpt', 'lungc', 'hdac', 'who']\n\n dist_to_corr = {\n 'lungtx': 292 * 97,\n 'lungpt': 83 * 897,\n 'lungc': 748 * 747 / 2,\n 'hdac': 1000 * 999 / 2,\n 'who': 354 * 353 / 2\n }\n results_df.to_csv(output_dir + 'real_df.txt', sep='\\t')\n\n # populate indices and ids for the dfs\n for p in param:\n for fv in fvs:\n for mc in mcs:\n indices = []\n ids = []\n indices.append('_'.join(['pearson', 'cd', fv, mc, p]))\n indices.append('Pct initial corr')\n ids.append('_'.join([mc, fv, 'pearson', 'True', p]))\n for stat in stats:\n indices.append('_'.join([stat, fv, mc]))\n indices.append('Pct initial corr')\n ids.append('_'.join([mc, fv, stat, 'False']))\n\n # populate df\n df_array = []\n for i, (idstring, index) in enumerate(zip(ids, indices)):\n row_fracs = []\n mc, fv, s, cd = idstring.split('_')\n for dist in dists:\n row = results_df[(results_df['parameter'] == p) & (results_df['distribution'] == dist) & (results_df['statistic'] == s) \\\n & (results_df['mc_used'] == mc) & (results_df['fold_value'] == fv) & (results_df['pointwise'] == cd)]\n try:\n row_fracs.append(float(row['true_corr(TP_FN)'] /row['initial_corr'].values)) # correctly id tp\n except:\n row_fracs.append(np.nan)\n print('nan in row fracs')\n print(dist, idstring)\n\n df_array.append(row_fracs)\n\n initial_sig_fracs = []\n for dist in dists:\n row = results_df[(results_df['distribution'] == dist) & (results_df['statistic'] == s) \\\n & (results_df['mc_used'] == mc) & (results_df['fold_value'] == fv) & (results_df['pointwise'] == cd)]\n # change number 249500 to n_corr depending on dataset\n try:\n initial_sig_fracs.append(float(row['initial_corr'] / dist_to_corr[dist]))\n except:\n initial_sig_fracs.append(np.nan)\n\n df_array.append(initial_sig_fracs)\n\n pie_df = pd.DataFrame(data = df_array, index = indices, columns = colnames)\n pie_df = pie_df.rename_axis('Statistic')\n pie_df = pie_df.apply(pd.to_numeric).round(2)\n\n # parse the reverse sign shenanigans\n df_array = []\n\n # cut out the cookd parts\n rs_ids = ids[-len(stats):]\n rs_indices = indices[-2*len(stats):]\n for i, (idstring, index) in enumerate(zip(rs_ids, rs_indices)):\n # stat = 'Pearson'\n row_fracs = []\n mc, fv, s, cd, p = idstring.split('_')\n for dist in dists:\n row = results_df[(results_df['parameter'] == p) & (results_df['distribution'] == dist) & (results_df['statistic'] == s) \\\n & (results_df['mc_used'] == mc) & (results_df['fold_value'] == fv) & (results_df['pointwise'] == 'False')]\n try:\n row_fracs.append(float(row['rs_true_corr_TP_FN'] /row['initial_corr'].values)) # correctly id tp\n except:\n row_fracs.append(np.nan)\n print('failed to parse rs')\n print(dist, idstring)\n\n df_array.append(row_fracs)\n\n initial_sig_fracs = []\n for dist in dists:\n row = results_df[(results_df['parameter'] == p) & (results_df['distribution'] == dist) & (results_df['statistic'] == s) \\\n & (results_df['mc_used'] == mc) & (results_df['fold_value'] == fv) & (results_df['pointwise'] == 'False')]\n # change number 249500 to n_corr depending on dataset\n try:\n initial_sig_fracs.append(float(row['initial_corr'] / dist_to_corr[dist]))\n except:\n initial_sig_fracs.append(np.nan)\n\n df_array.append(initial_sig_fracs)\n\n rs_df = pd.DataFrame(data = df_array, index = rs_indices, columns = colnames)\n rs_df = rs_df.rename_axis('Statistic')\n rs_df = rs_df.apply(pd.to_numeric).round(2)\n\n # currently the four dfs are\n # pie_df and rs_df\n # only pie_df has cookd info in it\n # the outer loop has mc and fv so when you save fig make sure to incl those\n\n # dictionary from which to get results for pie plots\n dd = {}\n\n # cut out micrometa dataset\n pie_df = pie_df.drop(['Micrometa'],axis=1)\n nocd_pie_df = pie_df.iloc[2:,:]\n rs_df = rs_df.drop(['Micrometa'],axis=1)\n sub_colnames = ['LungTranscriptomics', 'Microbiome', 'Gene Expression', 'WHO']\n\n # obtain indices without cook's D\n vals = list(nocd_pie_df.index.values)\n # skips by 2 (AKA every other)\n new_vals = vals[0::2]\n for v in new_vals:\n dd[v] = {}\n\n for v in new_vals:\n # v = 'pearson_1_fdr'\n # check to make sure forward direction\n if v.split('_')[0][0] != 'r':\n dd[v]['rsTP'] = rs_df.loc[v,:].values\n else:\n dd[v]['rsFN'] = rs_df.loc[v,:].values\n\n\n for v in new_vals:\n rows = nocd_pie_df.iloc[vals.index(v):vals.index(v)+2,:].values\n if v.split('_')[0][0] != 'r':\n dd[v]['TP'] = rows[0]\n dd[v]['initial_sig'] = rows[1]\n else:\n dd[v]['FN'] = rows[0]\n dd[v]['initial_insig'] = rows[1]\n\n\n for_vals = new_vals[::2]\n v_to_cd = {}\n # just get Cook's D\n # should be '_'.join(['pearson', 'cd', fv, mc])\n # cd_val = list(pie_df.index.values)[0::2][0]\n\n # first two rows are cd\n rows = pie_df.iloc[0:2,:].values\n v_to_cd['TP'] = rows[0]\n v_to_cd['initial_sig'] = rows[1]\n\n # create figure\n f, axarr = plt.subplots(len(for_vals) + 1,len(sub_colnames))\n print(dd)\n\n # iterate over dataset\n for d in range(len(sub_colnames)):\n labels = ['TP', 'FP', 'N']\n colors = ['#66b3ff','#ff9999','#FFC000']#,'#ffcc99']\n TP = v_to_cd['TP'][d]\n P = v_to_cd['initial_sig'][d]\n sizes = [TP * P, (1-TP)*P,1-P]\n\n axs = axarr[0, d]\n # note colnames = ['Micrometa', 'Microbiome', 'Gene Expression', 'WHO']\n title = sub_colnames[d] + ', ' + 'Cook\\'s D' + '\\n' + str(int(col_to_corr[sub_colnames[d]]))\n axs.set_title(title)\n patches, texts, autotexts = axs.pie(sizes, colors = colors, labels=None, autopct='%1.1f%%', startangle=0,\n labeldistance = 1, pctdistance = 1.2)\n #plt.legend(patches, autotexts, loc='center left', bbox_to_anchor=(-0.1, 1.),fontsize=8)\n fs = 12\n ts = 12\n #patches[0].set_fontsize(fs)\n #patches[1].set_fontsize(fs)\n #patches[2].set_fontsize(fs)\n texts[0].set_fontsize(fs)\n texts[1].set_fontsize(fs)\n texts[2].set_fontsize(fs)\n autotexts[0].set_fontsize(ts)\n autotexts[1].set_fontsize(ts)\n autotexts[2].set_fontsize(ts)\n\n #draw circle\n centre_circle = plt.Circle((0,0),0.50,fc='white')\n fig = plt.gcf()\n fig.set_size_inches(10,10)\n #fig.gca().add_artist(centre_circle)\n axs.add_artist(centre_circle)\n # Equal aspect ratio ensures that pie is drawn as a circle\n axs.axis('equal')\n plt.tight_layout()\n #plt.show()\n\n # iterate over statistic\n for v in range(len(for_vals)):\n val = for_vals[v]\n\n # labels = ['TP', 'rsTP', 'FP', 'FN', 'rsFN', 'TN']\n labels = ['TP', 'rsTP', 'FP', 'FN', 'TN']\n # TP is blue FP is red FN is green TN is purple\n # for rs case\n # reverse sign but still true FP is non reverse sign\n colors = ['#66b3ff','#ADD8E6','#ff9999','#99ff99','#8064A2']\n TP = dd[val]['TP'][d]\n rsTP = dd[val]['rsTP'][d]\n P = dd[val]['initial_sig'][d]\n FN = dd['r' + val]['FN'][d]\n rsFN = dd['r' + val]['rsFN'][d]\n N = dd['r' + val]['initial_insig'][d]\n # sizes = [(TP - rsTP) * P, rsTP * P,(1-TP)*P, (FN - rsFN) * N, rsFN * N, (1-FN)*N]\n sizes = [(TP - rsTP) * P, rsTP * P,(1-TP)*P, FN * N, (1-FN)*N]\n print(sub_colnames[d])\n print(val)\n print(labels)\n print(sizes)\n\n # plt.subplot(len(new_vals),len(colnames),i)\n axs = axarr[v + 1, d]\n # title = colnames[d] + ', ' + val.split('_')[0] + '\\n' + str(int(dist_to_corr[colnames[d]]))\n # axs.set_title(title)\n\n # def draw_pie(sizes, colors):\n patches, texts, autotexts = axs.pie(sizes, colors = colors, labels=None, autopct='%1.1f%%', startangle=0,\n labeldistance = 1, pctdistance = 1.2)\n fs = 12\n ts = 12\n texts[0].set_fontsize(fs)\n texts[1].set_fontsize(fs)\n texts[2].set_fontsize(fs)\n texts[3].set_fontsize(fs)\n texts[4].set_fontsize(fs)\n autotexts[0].set_fontsize(ts)\n autotexts[1].set_fontsize(ts)\n autotexts[2].set_fontsize(ts)\n autotexts[3].set_fontsize(ts)\n autotexts[4].set_fontsize(ts)\n\n #draw circle\n centre_circle = plt.Circle((0,0),0.50,fc='white')\n fig = plt.gcf()\n fig.set_size_inches(10,10)\n #fig.gca().add_artist(centre_circle)\n axs.add_artist(centre_circle)\n # Equal aspect ratio ensures that pie is drawn as a circle\n axs.axis('equal')\n plt.tight_layout()\n\n f.savefig(output_dir + 'pieplots_dfreal_combined_' + p + '_' + mc + '_' + fv + '.pdf')\n plt.close(fig)\n\n\nif __name__ == \"__main__\":\n analyze_simulations_real()\n\n\n\n\n","sub_path":"scripts/analyze_simulations_real.py","file_name":"analyze_simulations_real.py","file_ext":"py","file_size_in_byte":17580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"401005998","text":"import requests\nimport time\nimport logging\nimport multiprocessing\n\nl = logging.getLogger()\nh = logging.StreamHandler()\nf = logging.Formatter(\"%(asctime)s: %(message)s\")\nh.setFormatter(f)\nl.addHandler(h)\nl.setLevel(logging.INFO)\n\n\ndef request_process(pipe):\n url = pipe.recv()\n while url is not None:\n start_time = time.time()\n requests.get(url)\n l.info(f\"{url} took {time.time() - start_time:.1f}s\")\n url = pipe.recv()\n\n\nif __name__ == \"__main__\":\n urls = [\n \"https://google.com\",\n \"https://amazon.com\",\n \"https://ebay.com\",\n \"https://bbc.co.uk\",\n \"https://youtube.com\",\n \"https://wikipedia.org\",\n \"https://twitter.com\",\n \"https://spotify.com\",\n \"https://uber.com\",\n \"https://apple.com\",\n \"https://microsoft.com\",\n ]\n urls_parent, urls_child = multiprocessing.Pipe()\n proc = multiprocessing.Process(target=request_process, args=(urls_child,))\n proc.start()\n for url in urls:\n urls_parent.send(url)\n urls_parent.send(None)\n proc.join()\n","sub_path":"python-advanced-features-master/11-multiprocessing/exercise-03.py","file_name":"exercise-03.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"438713105","text":"from extruder_turtle import ExtruderTurtle\nimport math\nimport random\n\nHAIRLENGTH = 1 ## 2\nHAIR_ANGLE = math.pi/3\nHAIR_DENSITY = 0.16 ## 0.08\nEXT_DENSITY = 0.03 # 0.05\nFEEDRATE = 900\nLAYER_HEIGHT = 0.3\n\nDIAMETER = 40\nNUM_SIDES = 50\nLAYERS = 50\ndtheta = 2*math.pi/NUM_SIDES\ndx = DIAMETER*math.sin(dtheta/2)\n\nt = ExtruderTurtle()\n\n## Set up the turtle\nt.name(\"braille-circle.gcode\")\nt.setup(x=100, y=100)\nt.rate(FEEDRATE)\nt.set_density(EXT_DENSITY)\n\nfor l in range(LAYERS):\n for k in range(NUM_SIDES):\n t.right(dtheta)\n t.forward(dx)\n if random.random() < HAIR_DENSITY:\n t.left(HAIR_ANGLE)\n t.forward(HAIRLENGTH)\n t.forward(-HAIRLENGTH)\n t.right(HAIR_ANGLE)\n\n ## Move to the next layer\n t.lift(LAYER_HEIGHT)\n\n## Save to a GCODE file\nt.finish()\n","sub_path":"demos/beta/braille_circle.py","file_name":"braille_circle.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"568903997","text":"# Modules\nfrom bs4 import BeautifulSoup\nimport grequests\nimport requests\n\niex_url_base = \"https://api.iextrading.com/1.0/\"\n\n\ndef get_symbols():\n \"\"\"\n :return: Gets all symbols(tickers) from IEX API\n \"\"\"\n symbols_json = requests.get(iex_url_base + \"ref-data/symbols\").json()\n symbols = [symbol[\"symbol\"] for symbol in symbols_json if symbol[\"type\"] == \"cs\"]\n return symbols\n\n\ndef init_stock_scores(symbols):\n \"\"\"Set all stock scores to zero. \"\"\"\n stock_scores = {symbol: 0 for symbol in symbols}\n return stock_scores\n\n\ndef set_batches(symbols):\n \"\"\"\n :param symbols: Give list of stock symbols\n :return: Concatenates stock symbols in lumps of\n up to 100 to allow for batch GET requests from IEX API\n \"\"\"\n batches = [\",\".join(symbols[i : i + 100]) for i in range(0, len(symbols), 100)]\n return batches\n\n\ndef get_responses(payloads):\n\n \"\"\"\n :param payloads: list of payloads for GET request\n :return: Returns all batch GET requests from API for given url_end.\n \"\"\"\n batch_url = f\"{iex_url_base}stock/market/batch?\"\n rs = (grequests.get(batch_url, params=payload) for payload in payloads)\n result = grequests.map(rs)\n outputs = [r.json() for r in result]\n return outputs\n\n\ndef get_stats(batch_data):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :return: Gives dictionary of each symbol's statistics. To get individual\n statistic for individual stock, use something of the general form stats[symbol]['stats'][specific_stat].\n Note that 'stats' is fixed string.\n \"\"\"\n payloads = [{\"symbols\": batch, \"types\": \"stats\"} for batch in batch_data]\n outputs = get_responses(payloads=payloads)\n stats = {\n symbol: outputs[outputs.index(batch_dict)][symbol]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return stats\n\n\ndef get_company(batch_data):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :return: Gives dictionary with each symbols info (sector, industry, CEO name, etc.)\n \"\"\"\n payloads = [{\"symbols\": batch, \"types\": \"company\"} for batch in batch_data]\n outputs = get_responses(payloads=payloads)\n company = {\n symbol: outputs[outputs.index(batch_dict)][symbol][\"company\"]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return company\n\n\ndef get_chart(batch_data, time=\"1m\"):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :param time: Length of time (1m = 1 month, 1y = 1 year, etc.) can go up to 5y\n :return: Gives large list of statistics for symbols in batch_data. To get individual\n statistic for individual stock, use something of the general form stats[symbol]['stats'][specific_stat].\n Note that 'stats' is fixed string.\n \"\"\"\n payloads = [\n {\"symbols\": batch, \"types\": \"chart\", \"range\": time} for batch in batch_data\n ]\n outputs = get_responses(payloads=payloads)\n chart = {\n symbol: outputs[outputs.index(batch_dict)][symbol]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return chart\n\n\ndef get_financials(batch_data):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :return: Gives large list of statistics for symbols in batch_data. To get individual\n statistic for individual stock, use something of the general form stats[symbol]['stats'][specific_stat].\n Note that 'stats' is fixed string.\n \"\"\"\n payloads = [{\"symbols\": batch, \"types\": \"financials\"} for batch in batch_data]\n outputs = get_responses(payloads=payloads)\n financials = {\n symbol: outputs[outputs.index(batch_dict)][symbol]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return financials\n\n\ndef get_splits(batch_data, time=\"1y\"):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :param time: Length of time (1m = 1 month, 1y = 1 year, etc.) can go up to 5y\n :return: Dictionary of stock splits\n \"\"\"\n payloads = [\n {\"symbols\": batch, \"types\": \"splits\", \"range\": time} for batch in batch_data\n ]\n outputs = get_responses(payloads=payloads)\n splits = {\n symbol: outputs[outputs.index(batch_dict)][symbol]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return splits\n\n\ndef get_dividends(batch_data, time=\"5y\"):\n \"\"\"\n :param batch_data: List of concatenated symbols -- use get_symbols() and set_batches()\n functions to set batch_data\n :param time: Length of time (1m = 1 month, 1y = 1 year, etc.) can go up to 5y\n :return: Dictionary of stock dividends\n \"\"\"\n payloads = [\n {\"symbols\": batch, \"types\": \"dividends\", \"range\": time} for batch in batch_data\n ]\n outputs = get_responses(payloads=payloads)\n dividends = {\n symbol: outputs[outputs.index(batch_dict)][symbol]\n for batch_dict in outputs\n for symbol in batch_dict\n }\n return dividends\n\n\ndef total_setup():\n \"\"\"\n :return: Total setup returns symbols, stock_scores, and batch_symbols.\n \"\"\"\n symbols = get_symbols()\n stock_scores, batch_symbols = init_stock_scores(symbols), set_batches(symbols)\n return symbols, stock_scores, batch_symbols\n\n\ndef soup_it(url):\n \"\"\"\n :param url: Give url for HTML code to be copied\n :return: Returns parsed HTML code (to strip info from)\n \"\"\"\n page = requests.get(url).text.encode(\"utf-8\").decode(\"ascii\", \"ignore\")\n soup = BeautifulSoup(page, \"html.parser\")\n return soup\n\n\ndef return_top(dictionary, x=None):\n \"\"\"\n :param dictionary: Give a dictionary with numeric values (ex: {'Ticker1':200, 'Ticker2':300, 'Ticker3':450})\n :param x: # of keys to be returned. Function defaults to return entire dictionary sorted.\n :return: Will return top x values with keys.\n \"\"\"\n x = len(dictionary) if x is None else x\n sorted_array = sorted(dictionary.items(), key=lambda a: a[1], reverse=True)\n return sorted_array[0:x]\n","sub_path":"stockscore/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"290072211","text":"from pymongo import MongoClient\nfrom bson.objectid import ObjectId\nfrom lib.utils.config import Settings\nfrom datetime import datetime, timedelta\nimport ldap, sys, binascii, os, simplejson\n\nclass Base:\n def parse_cursor_object(self, cursor):\n if cursor == None or cursor == \"\":\n return\n\n output = {}\n for key, value in cursor.items():\n if key == \"_id\":\n output['id'] = str(value)\n \n else:\n output[key] = value\n\n return output\n\nclass Manager(Base):\n def __init__(self, db=None):\n self.settings = Settings()\n self.config = self.settings.get_config(\"mongodb\")\n\n self.host = self.config['net']['bindIp']\n self.port = self.config['net']['port']\n\n self.mongo_client = MongoClient(self.host, self.port)\n\n if db != None:\n self.__db = self.mongo_client[db]\n else:\n self.__db = self.mongo_client[\"cee-tools\"]\n \n self.sessions = Session(self.mongo_client[\"cee-tools\"]) \n\n def db(self, db=None):\n if db == None:\n return self.__db \n else:\n return self.mongo_client[db]\n\n def login(self, username, password):\n output = {}\n\n return output\n \n def ldap_lookup(self, look_up):\n connect = ldap.initialize(self.ldap_server)\n\n base_dn = \"ou=users,dc=redhat,dc=com\"\n\n search_filter = 'uid=' + look_up + \",\" + base_dn\n result = connect.search_ext_s(base_dn,ldap.SCOPE_SUBTREE,search_filter)\n\n return result\n \n def ldap_auth(self, username, password):\n user_dn = \"uid=\" + username + \",ou=users,dc=redhat,dc=com\"\n base_dn = \"dc=redhat,dc=com\"\n\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)\n connect = ldap.open(self.ldap_server)\n \n connect.start_tls_s()\n search_filter = \"uid=\" + username\n\n try:\n connect.bind_s(user_dn, password)\n result = connect.search_s(base_dn, ldap.SCOPE_SUBTREE, search_filter)\n self.add_kerberos(result[0][1])\n return True, result[0][1]\n\n except :\n connect.unbind_s()\n return False, None\n \nclass Session(Base):\n def __init__(self, db):\n self.db = db\n \n self.admins = [\"mowens\"]\n self.developers = [\"pcarlson\", \"cww\", \"vanhoof\", \"mmezynsk\"]\n self.privileged = [\"jeffserv\", \"mknutson\", \"dwood\", \"jawilson\", \"vanhoof\", \"cww\"]\n \n def set(self, uid, ip):\n session = self.get(uid=uid, ip=ip)\n now = datetime.now()\n\n if session != None:\n doc = self.update(uid, ip) \n doc['ip'] = ip\n doc['uid'] = uid\n \n return doc\n\n else:\n doc = {\n \"ip\": ip,\n \"uid\": uid,\n \"expires\": now + timedelta(hours=24),\n \"token\": self.token(),\n }\n \n self.db.sessions.insert(doc)\n \n return doc\n\n def get(self, token=None, session_id=None, uid=None, ip=None):\n if token != None:\n session = self.db.sessions.find_one({\"token\": token})\n \n elif session_id != None:\n session = self.db.sessions.find_one({\"_id\": ObjectId(session_id)})\n\n elif uid != None and ip != None:\n session = self.db.sessions.find_one({\"uid\": uid, \"ip\": ip})\n \n elif uid != None and ip == None:\n sessions = []\n cursor = self.db.sessions.find({\"uid\": uid})\n for i in cursor:\n i['permissions'] = self.check_permissions(i)\n sessions.append(i)\n \n return sessions\n\n else:\n session = None\n \n output = self.parse_cursor_object(session)\n\n if output != None:\n output['permissions'] = self.check_permissions(output)\n \n return output\n \n def check_permissions(self, session):\n if session != None:\n permissions = self.get_permissions(session['uid'])\n\n if permissions == None:\n permissions = self.set_permissions(session['uid'], init=True)\n\n \n if session['uid'] in self.admins and \"admin\" not in permissions:\n permissions = self.set_permissions(session['uid'], \"admin\")\n #else:\n #permissions = self.remove_permission(session['uid'], \"admin\")\n\n if session['uid'] in self.developers and \"developers\" not in permissions: \n permissions = self.set_permissions(session['uid'], \"developer\")\n #else:\n #permissions = self.remove_permission(session['uid'], \"developer\")\n \n if session['uid'] in self.privileged and \"privileged\" not in permissions: \n permissions = self.set_permissions(session['uid'], \"privileged\")\n #else:\n #permissions = self.remove_permission(session['uid'], \"privileged\")\n \n return self.get_permissions(session['uid'])\n\n def get_permissions(self, uid):\n output = self.parse_cursor_object(self.db.permissions.find_one({\"uid\": uid}))\n if output != None:\n return output['permissions']\n \n def set_permissions(self, uid, permission=None, init=False):\n if init:\n self.db.permissions.insert({\"permissions\": [\"standard\"], \"uid\": uid})\n else:\n self.db.permissions.update({\"uid\": uid}, {\"$addToSet\": { \"permissions\": permission }})\n \n return self.get_permissions(uid) \n \n def remove_permission(self, uid, permission):\n self.db.permissions.update({\"uid\": uid}, {\"$pull\": { \"permissions\": permission }})\n \n return self.get_permissions(uid) \n\n def update(self, uid, ip):\n now = datetime.now()\n\n update = {\n \"expires\": now + timedelta(hours=24),\n \"token\": self.token(),\n }\n \n self.db.sessions.update({\"uid\": uid, \"ip\": ip}, {\"$set\": update})\n \n return self.get(uid=uid, ip=ip) \n\n def token(self):\n return binascii.hexlify(os.urandom(16))\n \n\n","sub_path":"python/lib/base/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"386659855","text":"import datetime\nimport base64\nimport os\n\nfrom flask import Flask, render_template, redirect, flash, abort, request, Markup, session, jsonify\nfrom flask_pymongo import PyMongo\nfrom flask_compress import Compress\nfrom flask_cors import CORS\nfrom werkzeug.exceptions import HTTPException\nfrom bson.objectid import ObjectId\nimport pyimgur\nfrom dotenv import load_dotenv\nfrom urllib.parse import quote\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\nif \"DYNO\" not in os.environ:\n load_dotenv()\napp.config.update(\n IMGUR_ID=os.environ[\"IMGUR_ID\"],\n MONGO_URI=os.environ[\"MONGO_URI\"],\n SECRET_KEY=os.environ[\"SECRET_KEY\"]\n)\napp.config[\"ImgurObject\"] = pyimgur.Imgur(app.config[\"IMGUR_ID\"])\nmongo = PyMongo(app)\nCompress(app)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\n@app.before_request\ndef before_request():\n if 'DYNO' in os.environ:\n if request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n return redirect(quote(url), code=301)\n\n\n@app.route(\"/\")\ndef blogs():\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n return render_template(\"blogs.html\", login_status=dict(session[\"logged_in\"]))\n else:\n return render_template(\"blogs.html\", login_status=None)\n\n\n@app.route(\"/post_blog\")\ndef post_blog():\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n return render_template(\"post_blog.html\", login_status=dict(session[\"logged_in\"]))\n else:\n flash(Markup(\"\"\"Please Login or Sign Up to Post a Blog\"\"\"))\n return redirect(\"/\")\n\n\n@app.route(\"/blog//\")\ndef return_blog(page):\n results = mongo.db.blogs.find_one({\"name\": page+\".html\"})\n if results is None:\n abort(404)\n else:\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n results[\"text\"] = Markup(results[\"text\"])\n return render_template(\n \"blog_template.html\",\n results=results,\n login_status=dict(session[\"logged_in\"])\n )\n else:\n return render_template(\n \"blog_template.html\",\n results=results,\n login_status=None\n )\n\n\n@app.route(\"/user//\")\ndef return_use(user):\n results = mongo.db.users.find_one({\"username\": user})\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n return render_template(\n \"user_template.html\",\n results_from_user=results,\n login_status=dict(session[\"logged_in\"])\n )\n else:\n return render_template(\n \"user_template.html\",\n results_from_user=results,\n login_status=None\n )\n\n\n@app.route(\"/sign_up\", methods=[\"GET\", \"POST\"])\ndef sign_up():\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n flash(\"Already Logged In\")\n return redirect(\"/\")\n if request.method == \"GET\":\n return render_template(\"sign_up.html\", login_status=None)\n elif request.method == \"POST\":\n if request.form[\"password\"] == request.form[\"confirm_password\"]:\n doc = {\n \"first_name\": request.form.get(\"first_name\"),\n \"last_name\": request.form.get(\"last_name\"),\n \"username\": request.form.get(\"username\"),\n \"email\": request.form.get(\"email\").lower(),\n \"password\": request.form.get(\"password\")\n }\n if mongo.db.users.find_one({\"email\": doc[\"email\"]}) is None:\n session[\"logged_in\"] = {\n \"first_name\": doc[\"first_name\"],\n \"last_name\": doc[\"last_name\"],\n \"email\": doc[\"email\"],\n \"username\": doc[\"username\"]\n }\n\n mongo.db.users.insert_one(doc)\n\n flash(\"Successfully Signed Up\")\n return redirect(\"/\")\n else:\n flash(\"An Account is Already Registered with that Email\")\n return redirect(\"/sign_up\")\n else:\n flash(\"Confirm Password Does Not Match Password\")\n return redirect(\"/sign_up\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n flash(\"Already Logged In\")\n return redirect(\"/\")\n\n if request.method == \"GET\":\n return render_template(\"login.html\", login_status=None)\n elif request.method == \"POST\":\n doc = {\n \"email\": request.form.get(\"email\").lower(),\n \"password\": request.form.get(\"password\")\n }\n found = mongo.db.users.find_one({\n \"email\": doc[\"email\"],\n \"password\": doc[\"password\"]}\n )\n if found is not None:\n session[\"logged_in\"] = {\n \"first_name\": found[\"first_name\"],\n \"last_name\": found[\"last_name\"],\n \"email\": found[\"email\"],\n \"username\": found[\"username\"]\n }\n flash(\"Successfully Logged In\")\n return redirect(\"/\")\n else:\n flash(\"Incorrect Email or Password\")\n return redirect(\"/login\")\n\n\n@app.route(\"/logout\")\ndef logout():\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n session[\"logged_in\"] = {}\n else:\n flash(\"Not Logged In\")\n\n return redirect(\"/\")\n\n\n@app.route(\"/api/blogs\")\ndef api_blogs():\n to_return = []\n for blog in reversed(sorted(list(mongo.db.blogs.find({})), key=lambda date: datetime.datetime.strptime(\n date[\"date_released\"] + date[\"time_released\"], \"%m/%d/20%y%H:%M:%S:%f\"\n ))):\n blog[\"_id\"] = str(blog[\"_id\"])\n blog[\"link\"] = \"https://blogger-101.herokuapp.com/\" + blog[\"link\"]\n to_return.append(blog)\n return jsonify(to_return)\n\n\n@app.route(\"/api/add_blog_new\", methods=[\"POST\"])\ndef add_blog_new():\n title = request.form.get(\"blog_title\")\n name = (\"_\".join(title.split(\" \"))).lower()\n to_upload_image = app.config[\"ImgurObject\"]._send_request(\n 'https://api.imgur.com/3/image',\n method='POST',\n params={\n 'image': base64.b64encode(request.files['file'].read())\n }\n )\n doc = {\n \"title\": title,\n \"user\": request.form.get(\"user\"),\n \"name\": name+\".html\",\n \"text\": request.form.get(\"blog_content\"),\n \"link\": \"/blog/%s\" % name,\n \"date_released\": datetime.datetime.utcnow().strftime(\"%m/%d/%Y\"),\n \"time_released\": datetime.datetime.utcnow().strftime(\"%H:%M:%S:%f\"),\n \"comments\": [],\n \"image\": to_upload_image[\"link\"]\n }\n mongo.db.blogs.insert_one(doc)\n return redirect(\"/\")\n\n\n@app.route(\"/api/check_user\", methods=[\"POST\"])\ndef check_user():\n email = (request.json[\"email\"]).lower()\n password = request.json[\"password\"]\n\n user_found = mongo.db.users.find_one({\"email\": email, \"password\": password})\n\n if user_found is not None:\n return {\"found\": True, \"user_found\": user_found[\"username\"]}\n else:\n return {\"found\": False}\n\n\n@app.route(\"/api/add_user\", methods=[\"POST\"])\ndef add_user():\n doc = {\n \"first_name\": request.json.get(\"first_name\"),\n \"last_name\": request.json.get(\"last_name\"),\n \"username\": request.json.get(\"username\"),\n \"email\": request.json.get(\"email\"),\n \"password\": request.json.get(\"password\")\n }\n if mongo.db.users.find_one({\"email\": doc[\"email\"]}) is None:\n if mongo.db.users.find_one({\"username\": doc[\"username\"]}) is None:\n mongo.db.users.insert_one(doc)\n return {\"success\": True, \"already\": None}\n else:\n return {\"success\": False, \"already\": \"username\"}\n else:\n if mongo.db.users.find_one({\"username\": doc[\"username\"], \"email\": doc[\"email\"]}) is not None:\n return {\"success\": False, \"already\": \"both\"}\n else:\n return {\"success\": False, \"already\": \"email\"}\n\n\n@app.route(\"/api/add_comment/\", methods=[\"POST\"])\ndef add_comment():\n blog = request.json[\"blog_title\"]\n comment_type = request.json[\"type\"]\n comment_content = \"‌\"+request.json[\"comment_content\"]\n blog_found = mongo.db.blogs.find_one({\"title\": blog})\n if blog_found is not None:\n if comment_type == \"main\":\n _id = mongo.db.comments.insert_one({\n \"comment\": comment_content,\n \"user\": request.json[\"user\"]\n })\n comments_tmp = blog_found[\"comments\"]\n comments_tmp.append([str(_id.inserted_id), []])\n mongo.db.blogs.update_one(\n {\"title\": blog},\n {\"$set\": {\"comments\": comments_tmp}}\n )\n return {\"worked\": True}\n else:\n id_of_comment = request.json[\"id\"]\n if [True for comment in blog_found[\"comments\"] if comment[0] == id_of_comment]:\n _id = mongo.db.comments.insert_one({\n \"comment\": comment_content,\n \"user\": request.json[\"user\"]\n })\n _id = str(_id.inserted_id)\n comments_tmp = blog_found[\"comments\"]\n comments_tmp[[comments_tmp.index(i) for i in comments_tmp if i[0] == request.json[\"id\"]][0]][1].append(_id)\n mongo.db.blogs.update_one(\n {\"title\": blog},\n {\"$set\": {\"comments\": comments_tmp}}\n )\n return {\"worked\": True}\n else:\n return {\"worked\": False}\n else:\n return {\"worked\": False}\n\n\n@app.route(\"/api/get_blog_comments\", methods=[\"POST\"])\ndef get_comments():\n if mongo.db.blogs.find_one({\"title\": request.json.get(\"blog_title\")}) is not None:\n comments = mongo.db.blogs.find_one({\"title\": request.json.get(\"blog_title\")})[\"comments\"]\n commentsToShow = []\n for comment in comments:\n returned = mongo.db.comments.find_one({\"_id\": ObjectId(str(comment[0]))})\n all_comments = [returned[\"comment\"], returned[\"user\"], str(returned[\"_id\"])]\n sub_comments = []\n for subComment in comment[1]:\n returned2 = mongo.db.comments.find_one({\"_id\": ObjectId(str(subComment))})\n sub_comments.append([returned2[\"comment\"], returned2[\"user\"]])\n all_comments.append(sub_comments)\n commentsToShow.append(all_comments)\n return {\"found\": commentsToShow, \"number_of_comments\": len(commentsToShow)}\n else:\n return {\"found\": False}\n\n\n@app.errorhandler(HTTPException)\ndef errorHandling(error):\n flash(\"Page Not Found\")\n if \"logged_in\" in session and session[\"logged_in\"] is not None:\n return render_template(\n \"error.html\",\n error=error,\n code=error.code,\n login_status=dict(session[\"logged_in\"])\n )\n else:\n return render_template(\n \"error.html\",\n error=error,\n code=error.code,\n login_status=None\n )\n\n\ndef list_blogs():\n return reversed(\n sorted(\n list(mongo.db.blogs.find({})), key=lambda date: datetime.datetime.strptime(date[\"date_released\"] + date[\"time_released\"], \"%m/%d/20%y%H:%M:%S:%f\")\n )\n )\n\n\napp.add_template_global(list_blogs, name=\"find_blogs\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=(\"DYNO\" not in os.environ), threaded=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"481451820","text":"from __future__ import division\nimport tensorflow as tf\nimport sys\n\ndef variable_summaries(var,var_name):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(var_name+'_summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\nclass Model(object):\n\n\tdef __init__(self,features,labels,seq_length,config,is_training):\n\n\t\tif is_training:\n\t\t\tbatch_size=config.batch_size\n\n\t\telse:\n\t\t\tbatch_size=1\n\n\t\tglobal_step = tf.Variable(0, trainable=False)\n\t\tself._global_step=global_step\n\n\t\t# lstm cells definition\n\t\twith tf.variable_scope('forward'):\n\n\t\t\tforward_cells = []\n\t\t\tfor i in range(config.num_layers):\n\t\t\t\twith tf.variable_scope('layer_{:d}'.format(i)):\n\t\t\t\t\tlstm_cell_forward = tf.contrib.rnn.LSTMCell(config.n_hidden,use_peepholes=True,\n forget_bias=1.0,activation=tf.tanh,\n initializer=tf.random_uniform_initializer(minval=-0.1,maxval=0.1))\n\t\t\t\t\tforward_cells.append(lstm_cell_forward)\n\n\t\t\t#initial_states_fw=forward_cells.zero_state(batch_size,tf.float32)\n\n\t\twith tf.variable_scope('backward'):\n\n\t\t\tbackward_cells = []\n\t\t\tfor i in range(config.num_layers):\n\t\t\t\twith tf.variable_scope('layer_{:d}'.format(i)):\n\t\t\t\t\tlstm_cell_backward = tf.contrib.rnn.LSTMCell(config.n_hidden,use_peepholes=True,\n forget_bias=1.0,activation=tf.tanh,\n initializer=tf.random_uniform_initializer(minval=-0.1,maxval=0.1))\n\t\t\t\t\tbackward_cells.append(lstm_cell_backward)\n\n\t\t\t#initial_states_bw=forward_cells.zero_state(batch_size,tf.float32)\n\n\t\twith tf.variable_scope('RNN'):\n\t\t\trnn_outputs, output_state_fw, output_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcells_fw=forward_cells,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcells_bw=backward_cells,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tinputs=features,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tinitial_states_fw=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tinitial_states_bw=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsequence_length=seq_length,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tparallel_iterations=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tscope=None)\n\n\t\t\trnn_output_fw , rnn_output_bw = tf.split(rnn_outputs,num_or_size_splits=2, axis=2)\n\n\t\twith tf.variable_scope('output'):\n\t\t\toutput_fw_weights = tf.get_variable('forward_weights',[config.n_hidden,config.audio_labels_dim],dtype=tf.float32,\n\t\t\t initializer=tf.random_uniform_initializer(minval=-0.1,maxval=0.1))\n\t\t\toutput_bw_weights = tf.get_variable('backward_weights',[config.n_hidden,config.audio_labels_dim],dtype=tf.float32,\n\t\t\t initializer=tf.random_uniform_initializer(minval=-0.1,maxval=0.1))\n\t\t\toutput_biases = tf.get_variable('biases',shape=[config.audio_labels_dim],dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\tinitializer=tf.random_uniform_initializer(minval=-0.1,maxval=0.1))\n\n\t\t\trnn_output_fw = tf.reshape(rnn_output_fw,[-1,config.n_hidden])\n\t\t\trnn_output_bw = tf.reshape(rnn_output_bw,[-1,config.n_hidden])\t\t\n\t\t\t\n\t\t\toutput = tf.matmul(rnn_output_fw,output_fw_weights) + tf.matmul(rnn_output_bw,output_bw_weights) + output_biases\n\t\t\n\t\t\tlogits = tf.reshape(output,[batch_size,-1,config.audio_labels_dim])\n\n\t\tif is_training:\n\t\t# evaluate cost and optimize\n\t\t\twith tf.name_scope('cost'):\n\t\t\t\tself._cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels))\n\t\t\t\ttf.summary.scalar('cost',self._cost)\n\n\t\t\twith tf.name_scope('optimizer'):\n\t\t\t\tlearning_rate = tf.train.exponential_decay(config.learning_rate, global_step,\n\t\t\t config.updating_step, config.learning_decay, staircase=True)\n\n\t\t\t\tself._learning_rate= learning_rate\n\n\t\t\t\tif \"momentum\" in config.optimizer_choice:\n\t\t\t\t\tself._optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)\n\t\t\t\telif \"adam\" in config.optimizer_choice:\n\t\t\t\t\tself._optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Optimizer must be either momentum or adam. Closing.\")\n\t\t\t\t\tsys.exit()\n\n\t\t\t\t# gradient clipping\n\t\t\t\tgradients , variables = zip(*self._optimizer.compute_gradients(self._cost))\n\t\t\t\tclip_grad = [None if gradient is None else tf.clip_by_norm(gradient, 10.0) for gradient in gradients] \n\t\t\t\tself._optimize = self._optimizer.apply_gradients(zip(clip_grad,variables),global_step=self._global_step)\n\n\t\telse:\n\n\t\t\tposteriors=tf.nn.softmax(logits)\n\t\t\tprediction=tf.argmax(logits, axis=2)\n\t\t\tcorrect = tf.equal(prediction,tf.to_int64(labels))\n\t\t\taccuracy=tf.reduce_mean(tf.cast(correct,tf.float32))\n\n\t\t\tself._posteriors=posteriors\n\t\t\tself._accuracy=accuracy\n\t\t\tself._labels = labels\n\t\t\tself._prediction = prediction\n\n\t@property\n\tdef cost(self):\n\t\treturn self._cost\n\n\t@property\n\tdef optimize(self):\n\t\treturn self._optimize\n\n\t@property\n\tdef correct(self):\n\t\treturn self._correct\n\n\t@property\n\tdef posteriors(self):\n\t\treturn self._posteriors\n\n\t@property\n\tdef accuracy(self):\n\t\treturn self._accuracy\n\n\t@property\n\tdef labels(self):\n\t\treturn self._labels\n\n\t@property\n\tdef learning_rate(self):\n\t\treturn self._learning_rate\n\n\t@property\n\tdef global_step(self):\n\t\treturn self._global_step\n\n\n\t@property\n\tdef prediction(self):\n\t\treturn self._prediction","sub_path":"BiLSTM/bi_rnn_model.py","file_name":"bi_rnn_model.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"210992890","text":"import re\n\nfrom django.forms import ValidationError\nfrom nose.tools import eq_\nfrom pyquery import PyQuery as pq\n\nfrom kitsune.sumo.tests import TestCase\nfrom kitsune.users.forms import ProfileForm, username_allowed\nfrom kitsune.users.tests import TestCaseBase\nfrom kitsune.users.validators import TwitterValidator\n\nFACEBOOK_URLS = (\n ('https://facebook.com/valid', True),\n ('http://www.facebook.com/valid', True),\n ('htt://facebook.com/invalid', False),\n ('http://notfacebook.com/invalid', False),\n ('http://facebook.com/', False),\n)\n\n\nclass ProfileFormTestCase(TestCaseBase):\n form = ProfileForm()\n\n def setUp(self):\n self.form.cleaned_data = {}\n\n def test_facebook_pattern_attr(self):\n \"\"\"Facebook field has the correct pattern attribute.\"\"\"\n fragment = pq(self.form.as_ul())\n facebook = fragment('#id_facebook')[0]\n assert 'pattern' in facebook.attrib\n\n pattern = re.compile(facebook.attrib['pattern'])\n for url, match in FACEBOOK_URLS:\n eq_(bool(pattern.match(url)), match)\n\n def test_clean_facebook(self):\n clean = self.form.clean_facebook\n for url, match in FACEBOOK_URLS:\n self.form.cleaned_data['facebook'] = url\n if match:\n clean() # Should not raise.\n else:\n self.assertRaises(ValidationError, clean)\n\n\nclass TwitterValidatorTestCase(TestCase):\n\n def setUp(self):\n\n def test_valid(self):\n TwitterValidator('a_valid_name')\n\n def test_has_number(self):\n TwitterValidator('valid123')\n\n def test_has_letter_number_underscore(self):\n TwitterValidator('valid_name_123')\n\n def test_has_slash(self):\n # Twitter usernames can not have slash \"/\"\n self.assertRaises(ValidationError, lambda: TwitterValidator('x/'))\n\n def test_has_at_sign(self):\n # Dont Accept Twitter Username with \"@\"\n self.assertRaises(ValidationError, lambda: TwitterValidator('@x'))\n\n\nclass Testusername_allowed(TestCase):\n def test_good_names(self):\n data = [\n ('ana', True),\n ('rlr', True),\n ('anal', False),\n ]\n\n for name, expected in data:\n eq_(username_allowed(name), expected)\n","sub_path":"kitsune/users/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"313647655","text":"#!/usr/bin/python\nimport praw\n\nreddit = praw.Reddit('bot1')\n\nsubreddit = reddit.subreddit(\"FCFLfootball\")\n\nreddit_base_url = \"http://www.reddit.com\"\n\nleague_name = 'FCFL'\n\nteams_flair = '[TEAMS]'\ndraft_flair = '[DRAFT]'\n\nedit_tag = 'EDIT'\n\nauctions_file = './pickle/auctions_file.pickle'\nleague_file = './pickle/league_file.pickle'\nplayer_bids_file = './pickle/player_bids_file.pickle'\nteam_assignments_file = './pickle/team_assignments_file.pickle'\n\nchars_to_strip = '$#!@%'\n\nmax_player_copies = 2\n\n# pick_time_seconds = 86400 # Real limit -- 24 Hours\npick_time_seconds = 43200 # Test Limit -- 12 hours\n\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"279375812","text":"\"\"\"Helper classes and functions for engines.py\"\"\"\nimport re\nimport keyword\nfrom collections.abc import MutableMapping\n\n\nclass InvalidQueryError(ValueError):\n def __init__(self, message):\n super().__init__(message)\n\n\nclass Results:\n \"\"\"\n Wraps query, query execution time, results, fields and provides method\n to convert results into json schema\n \"\"\"\n def __init__(self, query, results, execution_time):\n self._query = query\n self._data = results\n self._execution_time = execution_time * 1e-6\n\n @property\n def query(self):\n return self._query\n\n @property\n def data(self):\n return self._data\n\n @property\n def execution_time(self):\n return self._execution_time\n\n def to_json(self):\n obj = {\n 'meta': {\n 'query': self.query,\n 'execution_time_ms': self.execution_time\n },\n 'data': [\n dict(zip(document._fields, document)) for document in self.data\n ]\n }\n return obj\n\n def __len__(self):\n return len(self.data)\n\n\ndef make_python_identifier(string):\n s = string.lower()\n s = s.strip()\n # Make spaces into underscores\n s = re.sub('[\\\\s\\\\t\\\\n]+', '_', s)\n # Remove invalid characters\n s = re.sub('[^0-9a-zA-Z_]', '', s)\n # Remove leading characters until we find a letter or underscore\n s = re.sub('^[^a-zA-Z_]+', '', s)\n # Check that the string is not a python identifier\n while s in keyword.kwlist:\n s += '_1'\n return s\n\n\ndef convert_to_schema(json_like):\n \"\"\"\n Replaces all dict values to their types. Operates inplace.\n \"\"\"\n for key, value in json_like.items():\n if isinstance(value, MutableMapping):\n convert_to_schema(value)\n else:\n json_like[key] = type(value)\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"100817612","text":"import sys\nimport pygame\nimport random \n\npygame.init() \n\nclass Window: \n \"\"\"Window class contains the basic elements to launch a pygame window and refresh it\"\"\"\n\n TITLE = \"(MC)GYVR 2600 - (C) 1985 PLACEHOLDER INC\" \n SPRITE_SIZE = 30 \n RESOLUTION = [15*SPRITE_SIZE,15*SPRITE_SIZE] \n ICON_PTH = pygame.image.load(\"sprites/mur.png\") \n\n WINDOW = pygame.display.set_mode(RESOLUTION) \n WIN_TITLE = pygame.display.set_caption(TITLE)\n WIN_ICON = pygame.display.set_icon(ICON_PTH.convert_alpha())\n\n @classmethod \n def refresh(cls): \n pygame.display.flip() \n\n\nclass Level: \n \"\"\"Level class manages level creation on logical and graphical level\"\"\"\n \n def __init__(self, textver_pth):\n self.textver_pth = textver_pth\n self.backgr = pygame.image.load(\"sprites/fond_n.png\")\n self.wall_spr = pygame.image.load(\"sprites/mur2.png\")\n self.ligh_sprite = pygame.image.load(\"sprites/lig.png\")\n \n @property\n def listver (self): \n with open(self.textver_pth) as f: \n l = list()\n for x in f.readlines(): \n l += [list(x.strip('\\n'))] \n return l\n \n def display(self, guard): \n backgr_pf = self.backgr.convert()\n wall_spr_pf = self.wall_spr.convert()\n ligh_sprite_pf = self.ligh_sprite.convert_alpha()\n\n Window.WINDOW.blit(backgr_pf, [0,0]) \n\n for y, a in enumerate(self.listver): \n for x,b in enumerate(a): \n if b == 'W': \n Window.WINDOW.blit(wall_spr_pf, [x*Window.SPRITE_SIZE, y*Window.SPRITE_SIZE])\n if b == 'U' and guard.orientation == 'up': \n Window.WINDOW.blit(ligh_sprite_pf, [x*Window.SPRITE_SIZE, y*Window.SPRITE_SIZE])\n if b == 'L' and guard.orientation == 'left':\n Window.WINDOW.blit(ligh_sprite_pf, [x*Window.SPRITE_SIZE, y*Window.SPRITE_SIZE])\n\n\nclass Character: \n \"\"\"Character class manages character sprites, positioning, orientation and display\"\"\"\n \n def __init__(self, sprite_pth, position, orientation):\n self.position = position \n self.sprite = pygame.image.load(sprite_pth)\n self.ko = False\n self.orientation = orientation\n self.count = 0\n \n @property\n def position_pf(self): \n return list(x*Window.SPRITE_SIZE for x in self.position)\n \n @property\n def X (self): \n return self.position[0] \n\n @property\n def Y (self): \n return self.position[1] \n\n def orient (self):\n self.count += 1\n\n if self.count % 30 == 0: \n if self.orientation == 'left': \n self.sprite = pygame.transform.rotate (self.sprite, -90)\n self.orientation = \"up\" \n elif self.orientation == 'up':\n self.sprite = pygame.transform.rotate(self.sprite, 90)\n self.orientation = \"left\" \n\n def display (self): \n sprite_pf = self.sprite.convert_alpha() \n if self.ko == False:\n Window.WINDOW.blit(sprite_pf, self.position_pf) \n\n\nclass Player (Character): \n \"\"\"Player class is a subclass of Character. It especially manages player movement, items collection, and ability to send a guard off to sleep\"\"\"\n \n def __init__(self, spriteG_pth, spriteD_pth, spriteH_pth, spriteB_pth, position, orientation, level): \n super().__init__(spriteD_pth, position, orientation) \n self.items = list()\n self.nl = level.listver\n self.sprite_pth_dict = dict(left = spriteG_pth, right = spriteD_pth, up = spriteH_pth, down = spriteB_pth) \n \n def move(self, direction): \n self.sprite = pygame.image.load(self.sprite_pth_dict[direction])\n self.orientation = direction\n\n if direction == 'right':\n if self.X+1= 0 and self.nl[self.Y-1][self.X] != 'W': \n self.position[1] -= 1\n\n elif self.Y-1 < 0 and self.nl[self.Y+14][self.X] != 'W': \n self.position[1] = 14\n\n if direction == 'left': \n if self.X-1 >=0 and self.nl[self.Y][self.X-1] != 'W': \n self.position[0] -= 1\n \n elif self.X-1 <0 and self.nl[self.Y][self.X+14] != 'W':\n self.position[0] = 14\n\n def gather(self, *items): \n if self.position in Item.VAL_POSITIONS:\n for item in items:\n if self.position == item.position and item.get == False:\n self.items += [item.rank]\n item.get = True\n print(self.items)\n\n def sleep (self, guard): \n monitored = dict(left = [guard.position,[13,14], [12,14], [11,14], [10,14], [9,14]], up = [guard.position, [14,13], [14,12], [14,11], [14,10], [14,9]])\n\n if self.position in monitored[guard.orientation]:\n\n if sorted(self.items) == [1,2,3] and self.position == guard.position:\n \n if self.orientation == 'right' and guard.orientation == 'up' or self.orientation == 'down' and guard.orientation == 'left':\n guard.ko = True\n print(\"Awesome! You won! Thanks for playing!\")\n pygame.quit()\n sys.exit()\n \n else:\n self.ko = True\n for x in range(15):\n print(\"What... What have you done...\")\n \n else: \n self.ko = True\n for x in range(15):\n print(\"What... What have you done...\")\n\n\nclass Item: \n \"\"\"Item class manages item positioning and display\"\"\"\n\n VAL_POSITIONS = list() \n \n def __init__(self, rank_item, sprite_pth):\n self.rank = rank_item\n self.position = self.VAL_POSITIONS[rank_item-1]\n self.get = False\n self.sprite = pygame.image.load(sprite_pth)\n \n @classmethod\n def get_positions(cls, nombre_item, level_listver):\n pos_positions = list()\n\n for y, a in enumerate(level_listver): \n for x, b in enumerate(a):\n if b == '0': \n pos_positions += [[x,y]]\n \n while len(cls.VAL_POSITIONS) < nombre_item: \n a = random.choice(pos_positions)\n if a not in cls.VAL_POSITIONS: \n cls.VAL_POSITIONS += [a] \n\n @property\n def position_pf(self): \n return list(x*Window.SPRITE_SIZE for x in self.position)\n \n def display(self):\n sprite_pf = self.sprite.convert_alpha() \n if not self.get: \n Window.WINDOW.blit(sprite_pf, self.position_pf)\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"385203075","text":"from z3 import *\r\nfrom Tkinter import *\r\nfrom openpyxl import load_workbook\r\nfrom tkFileDialog import askopenfilename\r\nimport time\r\n\r\n#################### Variable ######################\r\n\r\nx, y, z=Ints('x y z')\r\np, q, r=Bools('p q r')\r\n\r\n#################### Solve Enter ####################\r\n\r\ndef solve_e(event):\r\n solve()\r\n\r\n#################### Solve #########################\r\n\r\ndef solve():\r\n text1.delete(0.0, END)\r\n text2.delete(0.0, END)\r\n\r\n start_time=time.time() # time record start\r\n s=Solver()\r\n try:\r\n s.add(eval(ent.get()))\r\n\r\n if s.check()==sat:\r\n text1.insert(END, s)\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, s.check())\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, s.model()) \r\n else: #unsat\r\n text1.insert(END, s)\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, s.check())\r\n \r\n end_time=time.time() # time record end\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'verification time : %f (s)' % (end_time-start_time))\r\n ent.delete(0, END)\r\n except:\r\n text1.insert(END, 'Syntax Error!')\r\n ent.delete(0, END)\r\n pass\r\n \r\n\r\n################## EXCEL #####################\r\n\r\ndef excel(): \r\n text1.delete(0.0, END)\r\n text2.delete(0.0, END)\r\n\r\n cnt=0 # all cases\r\n cntSat=0 # sat count\r\n\r\n posSat=list() # sat position\r\n posUnsat=list() # unsat position\r\n posSyntaxError=list() # syntax error position\r\n\r\n fname = askopenfilename(filetypes=((\"Microsoft Office Excel(.xlsx)\", \"*.xlsx\"),(\"All files\", \"*.*\") ))\r\n wb = load_workbook(filename =fname, use_iterators = True)\r\n\r\n start_time=time.time() # time record start\r\n\r\n for sheet in wb:\r\n for row in sheet.rows:\r\n s=Solver()\r\n cntSyntaxError=0\r\n cntTitle=0\r\n\r\n for cell in row:\r\n if cntTitle==0:\r\n title=cell.value\r\n cntTitle+=1\r\n else: \r\n rowVal=len(row)-1 \r\n try: \r\n if cell.value!=None:\r\n s.add(eval(cell.value))\r\n cnt+=1\r\n if cnt%rowVal==0 and cntSyntaxError==0:\r\n text2.insert(END, '(%d) %s' % (cnt/rowVal, title)) \r\n text2.insert(END, '\\n\\n')\r\n if s.check()==sat:\r\n cntSat+=1\r\n posSat.append(cnt/rowVal)\r\n text2.insert(END, s)\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, s.check())\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, s.model())\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, '--------------------------------------------------------------------------------\\n')\r\n else: #Unsat\r\n posUnsat.append(cnt/rowVal)\r\n text2.insert(END, s)\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, s.check())\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, '--------------------------------------------------------------------------------\\n')\r\n except:\r\n if cntSyntaxError==0:\r\n cntSyntaxError+=1\r\n text2.insert(END, '(%d) %s' % ((cnt/rowVal)+1, title))\r\n posSyntaxError.append((cnt/rowVal)+1)\r\n cnt+=1\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, 'Syntax Error!')\r\n text2.insert(END, '\\n\\n')\r\n text2.insert(END, '--------------------------------------------------------------------------------\\n') \r\n ent.delete(0, END)\r\n pass \r\n else:\r\n cnt+=1\r\n \r\n end_time=time.time() # time record end \r\n ent.delete(0, END)\r\n text1.delete(0.0, END)\r\n text2.insert(END, '--------------------------------------------------------------------------------\\n')\r\n text2.focus_set()\r\n \r\n if cntSat==cnt/rowVal: # For all, Sat\r\n text1.insert(END, '[[ RESULT ]]')\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'All cases are Satisfiable! (%d)\\n\\n' % (cnt/rowVal))\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'verification time : %f (s)' % (end_time-start_time)) \r\n else: # Exists, Unsat or Syntax error\r\n text1.insert(END, '[[ RESULT ]]')\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'All cases (%d)\\n\\n' % (cnt/rowVal))\r\n text1.insert(END, 'Sat(%d) / Unsat(%d) / Syntax Error(%d)' % ((cntSat), (cnt/rowVal-cntSat-len(posSyntaxError)), len(posSyntaxError)))\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'Sat position ')\r\n text1.insert(END, posSat) #Sat position\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'Unsat position ')\r\n text1.insert(END, posUnsat) #Unsat position\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'Syntax Error position ')\r\n text1.insert(END, posSyntaxError)\r\n text1.insert(END, '\\n\\n')\r\n text1.insert(END, 'verification time : %f (s)' % (end_time-start_time)) \r\n \r\n \r\n# GUI #############################################\r\nroot=Tk()\r\nroot.title(\"Z3 GUI Tool\")\r\n\r\n#####################################################\r\n\r\nframe1=Frame(root)\r\nframe1.pack()\r\n\r\nent=Entry(frame1, width=60)\r\nent.pack(side=LEFT, pady=20)\r\nent.focus_set()\r\nent.bind('', solve_e) #enter event\r\n\r\nbtn1=Button(frame1, text=\"Solve\", command=solve)\r\nbtn1.pack(side=LEFT)\r\n\r\nbtn2=Button(frame1, text=\"EXCEL\", command=excel)\r\nbtn2.pack(side=LEFT)\r\n\r\n#####################################################\r\n\r\nframe2=Frame(root)\r\nframe2.pack()\r\n\r\ntext1=Text(frame2, width=80, height=13)\r\ntext1.pack(side=LEFT, fill=Y)\r\ntext1.insert(END, \"Hello, Z3 GUI Tool!\")\r\ntext1.insert(END, '\\n\\n')\r\ntext1.insert(END, \"Specify property to verify.\")\r\n\r\n#####################################################\r\n\r\nframe4=Frame(root)\r\nframe4.pack()\r\nlabel=Label(frame4, width=80, height=1)\r\nlabel.pack()\r\n\r\n#####################################################\r\n\r\nframe3=Frame(root)\r\nframe3.pack()\r\n\r\ntext2=Text(frame3, width=80, height=20)\r\ntext2.pack(side=LEFT, fill=Y)\r\n\r\ns = Scrollbar(frame3)\r\ns.pack(side=RIGHT, fill=Y)\r\ns.config(command=text2.yview)\r\ntext2.config(yscrollcommand=s.set)\r\n\r\n#####################################################\r\nroot.mainloop()\r\n\r\n\r\n","sub_path":"Z3 GUI Tool.py","file_name":"Z3 GUI Tool.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"9413214","text":"import collections\n\nimport networkx as nx\nimport numpy as np\n\nfrom .scores import population_balance_score\nfrom .state import CENTROID_DIM_LENGTH, log_contested_edges\n\ntry:\n from .biconnected import calculate_com_inner\n cython_biconnected = True\nexcept ImportError:\n print(\"No Cython for you!\")\n cython_biconnected = False\n\n\n\ndef contested_edges_naive(state):\n # generate contested edges by testing each edge in the graph. it's brute force and definitely works\n contested = set()\n\n for edge in state.graph.edges:\n if state.node_to_color[edge[0]] != state.node_to_color[edge[1]]:\n contested.add((min(edge[0], edge[1]), max(edge[0], edge[1]))) # always store small, large\n # if state.graph.nodes[edge[0]]['coloring'] != graph.nodes[edge[1]]['coloring']:\n # contested.add((min(edge), max(edge))) # always store small, large\n return contested\n\n\ndef calculate_com_naive(state, weight_attribute=None):\n com = {}\n total_weight = {}\n for district_id, nodes in state.color_to_node.items():\n if weight_attribute is None:\n weights = {node_id: 1 for node_id in nodes}\n else:\n weights = {state.graph.nodes()[node_id][weight_attribute] for node_id in nodes}\n\n total_weight[district_id] = sum(weights.values())\n com[district_id] = np.array([sum([state.graph.nodes()[i]['Centroid'][j]*weights[i] for i in nodes]\n )/total_weight[district_id] for j in range(CENTROID_DIM_LENGTH)], dtype='d')\n return com, total_weight\n\n\ndef update_center_of_mass(state):\n\n if not hasattr(state, 'com_centroid'):\n state.com_centroid, state.com_total_weight = calculate_com_naive(state)\n state.com_updated = state.iteration\n\n for i in state.move_log[state.com_updated:]:\n if i is not None:\n node_id, old_color, new_color = i\n (state.com_centroid[new_color], state.com_centroid[old_color],\n state.com_total_weight[new_color], state.com_total_weight[old_color]) = calculate_com_one_step(state, i)\n\n state.com_updated = state.iteration\n\n\ndef create_district_boundary_naive(state):\n\n district_boundary = collections.defaultdict(set)\n\n for node_id, color in state.node_to_color.items():\n\n for neighbor in state.graph.neighbors(node_id):\n if state.node_to_color[neighbor]!= color:\n neighbor_color = state.node_to_color[neighbor]\n district_boundary[(min(color, neighbor_color), max(color, neighbor_color))].add(\n (min(node_id, neighbor), max(node_id, neighbor)))\n\n return district_boundary\n\n\ndef update_contested_edges(state):\n if not hasattr(state, 'contested_edges'):\n state.contested_edges = contested_edges_naive(state)\n state.contested_nodes = contested_nodes_naive(state) # maybe we should track this separately?\n state.contested_edges_updated = state.iteration # set to current iteration\n\n # this may be an empty list if it's already been updated\n for move in state.move_log[state.contested_edges_updated:]:\n\n if move is not None:\n node_id, old_color, new_color = move\n # move is provided as (node_id, color_id)\n neighbors = state.graph.edges(node_id)\n # edges to add\n state.contested_edges.update(\n {(min(u, v), max(u, v)) for u, v in neighbors if state.node_to_color[v] != new_color})\n # edges to remove\n state.contested_edges.difference_update(\n {(min(u, v), max(u, v)) for u, v in neighbors if state.node_to_color[v] == new_color})\n\n neighboring_nodes = set(nx.neighbors(state.graph, node_id))\n\n # add neighbors that aren't new_color\n state.contested_nodes.update(neighboring_nodes-state.color_to_node[new_color])\n\n # remove neighbors that are new_color\n state.contested_nodes.difference_update(neighboring_nodes.intersection(state.color_to_node[new_color]))\n\n if state.log_contested_edges:\n log_contested_edges(state) # separated into function so we can track how expensive this is\n\n # # at some point it will be more efficient to just naively reconstruct the contested edges, we should look out for this\n state.contested_edges_updated = state.iteration\n\n\ndef perimeter_naive(state):\n # TODO refactor\n dd = collections.defaultdict(int)\n\n for n0, n1 in state.contested_edges:\n shared_length = state.graph.edges()[(n0,n1)]['border_length']\n dd[state.node_to_color[n0]] += shared_length\n dd[state.node_to_color[n1]] += shared_length\n\n return dd\n\ndef area_naive(state):\n\n return {district_id: sum(state.graph.nodes()[node_id]['area'] for node_id in state.color_to_node[district_id])\n for district_id in state.color_to_node.keys()}\n\n\ndef update_perimeter_and_area(state):\n\n # this version assumes that this will get run EVERY time a node is flipped\n update_contested_edges(state) # guarantee contested edges updated before proceeding\n\n if not hasattr(state, 'district_to_perimeter'):\n state.district_to_perimeter = perimeter_naive(state)\n state.district_to_area = area_naive(state)\n state.perimeter_updated = state.iteration # set to current iteration\n\n for move in state.move_log[state.perimeter_updated:]:\n\n if move is None:\n continue\n\n node_id, old_color, new_color = move\n\n for neighbor in state.graph.neighbors(node_id):\n if neighbor in state.color_to_node[new_color]:\n # we need to reduce the perimeter of new_color by their shared amount\n state.district_to_perimeter[new_color] -= state.graph.edges[(node_id, neighbor)]['border_length']\n\n elif neighbor in state.color_to_node[old_color]:\n # we need to increase the perimeter of old_color by their shared amount\n state.district_to_perimeter[old_color] += state.graph.edges[(node_id, neighbor)]['border_length']\n\n else:\n # we need to increase the perimeter of new_color AND decrease of old color. no change to the perimeter of the 3rd district\n state.district_to_perimeter[new_color] += state.graph.edges[(node_id, neighbor)]['border_length']\n state.district_to_perimeter[old_color] -= state.graph.edges[(node_id, neighbor)]['border_length']\n\n\n if state.include_external_border:\n state.district_to_perimeter[old_color] -= state.graph.nodes()[node_id]['external_border']\n state.district_to_perimeter[new_color] += state.graph.nodes()[node_id]['external_border']\n\n state.district_to_area[old_color] -= state.graph.nodes()[node_id]['area']\n state.district_to_area[new_color] += state.graph.nodes()[node_id]['area']\n state.perimeter_computer.update(node_id, old_color, new_color)\n\n state.perimeter_updated = state.iteration\n\n\ndef update_population(state):\n\n if not hasattr(state, 'population_counter'):\n state.population_counter = calculate_population_naive(state)\n\n if state.ideal_pop is None:\n state.ideal_pop = sum(state.population_counter.values())/len(state.population_counter)\n\n state.population_deviation = population_balance_naive(state, state.ideal_pop)\n\n else:\n for move in state.move_log[state.population_counter_updated:]:\n if move is not None:\n node_id, old_color, new_color = move\n state.population_counter[old_color] -= state.graph.nodes()[node_id]['population']\n state.population_counter[new_color] += state.graph.nodes()[node_id]['population']\n state.population_deviation = population_balance_score(state, move)\n\n state.population_counter_updated = state.iteration\n\n\ndef check_population(state, node_id, old_color, new_color, minimum=400, maximum=1200):\n return ((state.population_counter[old_color] - state.graph.nodes()[node_id]['population']) > minimum\n and state.population_counter[new_color] + state.graph.nodes()[node_id]['population'] < maximum\n )\n\n\ndef population_balance_naive(state, ideal_pop):\n return np.sqrt(sum((sum([state.graph.nodes()[node]['population'] for node in nodes]) - ideal_pop)**2\n for nodes in state.color_to_node.values()))\n\n\ndef process_boundary_move(state, node_id, old_color, new_color, neighbor):\n neighbor_color = state.node_to_color[neighbor]\n if neighbor_color != new_color:\n key = (min(neighbor_color, new_color), max(neighbor_color, new_color))\n state.district_boundary[key].add((min(neighbor, node_id), max(neighbor, node_id)))\n if neighbor_color != old_color:\n key = (min(neighbor_color, old_color), max(neighbor_color, old_color))\n state.district_boundary[key].remove((min(neighbor, node_id), max(neighbor, node_id)))\n\n\ndef update_district_boundary(state):\n\n if not hasattr(state, 'district_boundary'):\n\n state.district_boundary = create_district_boundary_naive(state)\n state.district_boundary_updated = state.iteration\n\n moves_to_do = [i for i in state.move_log[state.district_boundary_updated:] if i is not None]\n\n # TODO we have repeated code in two places, rip out into function\n\n if len(moves_to_do) < 5:\n for node_id, old_color, new_color in moves_to_do:\n for neighbor in state.graph.neighbors(node_id):\n neighbor_color = state.node_to_color[neighbor]\n key = (min(neighbor_color, new_color), max(neighbor_color, new_color))\n # node_key = (min(neighbor, node_id), max(neighbor))\n\n if neighbor_color != new_color:\n state.district_boundary[key].add((min(neighbor, node_id), max(neighbor, node_id)))\n else: # color == new_color\n node_key = (min(neighbor, node_id), max(neighbor, node_id))\n key = (min(neighbor_color, old_color), max(neighbor_color, old_color))\n if node_key in state.district_boundary[key]:\n state.district_boundary[key].remove(node_key)\n\n else:\n perturbed_nodes = dict()\n for node_id, old_color, new_color in moves_to_do:\n\n if node_id in perturbed_nodes:\n perturbed_nodes[node_id][1] = new_color\n else:\n perturbed_nodes[node_id] = (old_color, new_color)\n\n for node_id, (old_color, new_color) in perturbed_nodes.items():\n\n for neighbor in state.graph.neighbors(node_id):\n neighbor_color = state.node_to_color[neighbor]\n key = (min(neighbor_color, new_color), max(neighbor_color, new_color))\n\n if neighbor_color != new_color:\n state.district_boundary[key].add((min(neighbor, node_id), max(neighbor, node_id)))\n else: # color == new_color\n # key = (min(neighbor_color, new_color), max(neighbor_color, old_color))\n state.district_boundary[key].remove((min(neighbor, node_id), max(neighbor, node_id)))\n\n state.district_boundary_updated = state.iteration\n\n\ndef update_boundary_nodes_naive(state):\n counter = collections.Counter([v for k,v in state.node_to_color.items() if state.graph.nodes()[k]['boundary']])\n return counter\n\n\ndef update_boundary_nodes(state):\n\n if not hasattr(state, 'boundary_node_counter'):\n state.boundary_node_counter = update_boundary_nodes_naive(state)\n\n else:\n for move in state.move_log[state.boundary_node_updated:]:\n if move is not None and state.graph.nodes()[move[0]]['boundary']:\n # if we flipped a boundary node\n state.boundary_node_counter[move[1]] -= 1\n state.boundary_node_counter[move[2]] += 1\n\n state.boundary_node_updated = state.iteration\n\n\ndef compactness_naive(state):\n\n perimeter_dict = perimeter_naive(state)\n\n area_dict = {district_id: sum(node['population'] for node in state.graph.nodes() if node in state.graph.color_to_node[district_id])\n for district_id in state.graph.color_to_node}\n\n return sum(perimeter_dict[district_id]**2/area_dict[district_id] for district_id in state.graph.color_to_node)\n\n\ndef calculate_com_one_step(state, proposal, weight_attribute=None):\n node_id, old_color, new_color = proposal\n\n # com_centroid = copy.deepcopy(state.com_centroid) # ugh, should this function just be side-effecting? how bad is this cost?\n # total_weight = copy.deepcopy(state.com_total_weight)\n node = state.graph.nodes()[node_id] # how expensive is this lookup, anyways?\n\n weight = node[weight_attribute] if weight_attribute is not None else 1\n\n if cython_biconnected:\n output_new = calculate_com_inner(node['Centroid'], weight, state.com_centroid[new_color],\n state.com_total_weight[new_color])\n output_old = calculate_com_inner(node['Centroid'], -weight, state.com_centroid[old_color],\n state.com_total_weight[old_color])\n\n return np.array(output_new[0:2], dtype='d'), np.array(output_old[0:2], dtype='d'), output_new[2], output_old[2]\n\n else:\n\n centroid_new_color = (node['Centroid'] * weight + state.com_centroid[new_color] * state.com_total_weight[new_color])/(\n state.com_total_weight[new_color] + weight)\n centroid_old_color = (-node['Centroid'] * weight + state.com_centroid[old_color] * state.com_total_weight[old_color])/(\n state.com_total_weight[old_color] - weight)\n\n total_weight_new_color = state.com_total_weight[new_color] + weight\n total_weight_old_color = state.com_total_weight[old_color] - weight\n\n return centroid_new_color, centroid_old_color, total_weight_new_color, total_weight_old_color\n\n\ndef calculate_population_naive(state):\n return {district_id: sum(state.graph.nodes()[node_id]['population']\n for node_id in state.color_to_node[district_id]) for district_id in state.color_to_node}\n\n\ndef contested_nodes_naive(state):\n contested = set()\n for node_id in state.graph.nodes():\n color = state.node_to_color[node_id]\n if not all(color==state.node_to_color[other_id] for other_id in nx.neighbors(state.graph, node_id)):\n contested.add(node_id)\n return contested\n","sub_path":"build/lib.win-amd64-3.7/nrmc/updaters.py","file_name":"updaters.py","file_ext":"py","file_size_in_byte":14444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"332465325","text":"# Copyright 2008-2018 Univa Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mock\n\nimport boto\nfrom moto import mock_ec2_deprecated\nfrom tortuga.db.hardwareProfilesDbHandler import HardwareProfilesDbHandler\nfrom tortuga.db.models.node import Node\nfrom tortuga.db.softwareProfilesDbHandler import SoftwareProfilesDbHandler\nfrom tortuga.resourceAdapter.aws.aws import Aws, ResourceAdapter\n\n\ndef test_instantiation():\n \"\"\"\n Simple test to ensure resource adapter can be instantiated\n \"\"\"\n\n assert Aws()\n\n\ndef test_instantiation_with_addHostSession():\n \"\"\"\n Simple test to ensure resource adapter can be instantiated\n \"\"\"\n\n adapter = Aws(addHostSession=123)\n\n assert adapter.addHostSession == 123\n\n\ndef test_installer_public_ipaddress():\n with mock.patch(\n 'tortuga.resourceAdapter.aws.Aws.installer_public_ipaddress',\n new_callable=mock.PropertyMock) \\\n as installer_public_ipaddress_mock:\n installer_public_ipaddress_mock.return_value = '1.2.3.4'\n\n assert Aws()._get_installer_ip() == '1.2.3.4'\n\n\ndef test_installer_public_ipaddress_with_hardwareprofile():\n class DummyNic:\n def __init__(self, ip):\n self.ip = ip\n\n class DummyHardwareProfile:\n def __init__(self):\n self.nics = [\n DummyNic('1.2.3.4'),\n DummyNic('2.3.4.5'),\n ]\n\n ip = Aws()._get_installer_ip(hardwareprofile=DummyHardwareProfile())\n\n assert ip == '1.2.3.4'\n\n\n@mock.patch.object(Aws, '_load_config_from_database')\ndef test_deleteNode(load_config_dict_mock, dbm):\n load_config_dict_mock.return_value = {\n 'awsaccesskey': 'the_key',\n 'awssecretkey': 'the_secret'\n }\n\n with mock_ec2_deprecated():\n with dbm.session() as session:\n adapter = Aws()\n\n node = session.query(Node).filter(\n Node.name == 'ip-10-10-10-1.ec2.internal').one()\n\n adapter.deleteNode([node])\n\n\n@mock.patch.object(Aws, 'get_instance_size_mapping')\n@mock.patch.object(Aws, 'fire_provisioned_event')\n@mock.patch.object(Aws, '_pre_add_host')\n@mock.patch.object(Aws, '_load_config_from_database')\n@mock_ec2_deprecated\ndef test_start(load_config_dict_mock, pre_add_host_mock,\n fire_provisioned_even_mock, get_instance_size_mapping_mock,\n dbm, valid_ami):\n \"\"\"\n Test ResourceAdapter.start() workflow\n \"\"\"\n\n get_instance_size_mapping_mock.return_value = 8\n\n load_config_dict_mock.return_value = {\n 'awsaccesskey': 'the_key',\n 'awssecretkey': 'the_secret',\n 'keypair': 'the_keypair',\n 'ami': valid_ami,\n 'use_instance_hostname': 'true',\n 'instancetype': 'the_instancetype'\n }\n\n with dbm.session() as session:\n adapter = Aws(addHostSession='123EXAMPLE')\n\n # override default sleep time\n adapter.LAUNCH_INITIAL_SLEEP_TIME = 0.0\n\n addNodesRequest = {\n 'count': 2,\n }\n\n hardwareprofile = HardwareProfilesDbHandler().getHardwareProfile(\n session, 'aws2'\n )\n\n softwareprofile = SoftwareProfilesDbHandler().getSoftwareProfile(\n session, 'compute'\n )\n\n nodes = adapter.start(\n addNodesRequest, session, hardwareprofile,\n dbSoftwareProfile=softwareprofile\n )\n\n assert nodes and isinstance(nodes, list) and \\\n isinstance(nodes[0], Node)\n\n assert nodes[0].instance.instance\n\n if len(nodes) > 1:\n assert nodes[1].instance.instance\n\n pre_add_host_mock.assert_called()\n\n fire_provisioned_even_mock.assert_called()\n\n\n@mock.patch.object(Aws, 'fire_provisioned_event')\n@mock.patch.object(Aws, '_pre_add_host')\n@mock.patch.object(Aws, '_load_config_from_database')\n@mock_ec2_deprecated\ndef test_start_update_node(load_config_dict_mock, pre_add_host_mock,\n fire_provisioned_event_mock, dbm, valid_ami):\n configDict = {\n 'awsaccesskey': 'the_key',\n 'awssecretkey': 'the_secret',\n 'ami': valid_ami,\n 'use_instance_hostname': 'true',\n }\n\n load_config_dict_mock.return_value = configDict\n\n with dbm.session() as session:\n addHostSession = '123EXAMPLE'\n\n adapter = Aws(addHostSession=addHostSession)\n\n # override default sleep time\n adapter.LAUNCH_INITIAL_SLEEP_TIME = 0.0\n\n count = 3\n\n hardwareprofile = HardwareProfilesDbHandler().getHardwareProfile(\n session, 'aws2'\n )\n\n softwareprofile = SoftwareProfilesDbHandler().getSoftwareProfile(\n session, 'compute'\n )\n\n # create instances to be associated with nodes\n conn = boto.connect_ec2(configDict['awsaccesskey'],\n configDict['awssecretkey'])\n\n conn.run_instances(\n configDict['ami'],\n min_count=count,\n max_count=count\n )\n\n # get newly created instances\n instances = conn.get_only_instances()\n\n # intialize 'addNodesRequest'\n addNodesRequest = {\n 'nodeDetails': [],\n }\n\n for instance in instances:\n addNodesRequest['nodeDetails'].append({\n 'name': instance.private_dns_name,\n 'metadata': {\n 'ec2_instance_id': instance.id,\n 'ec2_ipaddress': instance.private_ip_address,\n }\n })\n\n # call Aws.start() with instance metadata\n nodes = adapter.start(\n addNodesRequest, session, hardwareprofile,\n dbSoftwareProfile=softwareprofile\n )\n\n assert nodes and len(nodes) == count\n\n assert isinstance(nodes[0], Node)\n\n assert nodes[0].softwareprofile.name == softwareprofile.name\n\n assert nodes[0].hardwareprofile.name == hardwareprofile.name\n\n assert nodes[0].addHostSession == addHostSession\n\n fire_provisioned_event_mock.assert_called()\n\n pre_add_host_mock.assert_called()\n","sub_path":"tests/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"377000936","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask import render_template\n\nimport time\nimport serial\nimport GetUserData\nimport GetSat\nimport GetLook\nimport Pelco_D\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef index():\n\tcontext = {\n\t\t'lat': 0,\n\t\t'lon': 0,\n\t\t'alt': 0,\n\t\t'az': 0,\n\t\t'el': 0\n\t}\n\treturn render_template('index.html', **context)\n\n@app.route('/update', methods=['POST', \"GET\"])\ndef update():\n\tGetUserData.update(\"gui\")\n\tres = {\n\t\t'az': \"Update Done\",\n\t\t'el': \"\t\",\n\t}\n\treturn jsonify(res)\n\n\n@app.route('/predict', methods=['POST', \"GET\"])\ndef predict():\n\treceive = request.json\n\n\tSat = str(receive['satname'])\n\tLat = float(receive['lat'])\n\tLon = float(receive['lon'])\n\tAlt = float(receive['alt'])\n\n\tline1, line2, Lat, Lon, Alt = GetUserData.get_user_data(\"gui\", Sat, Lat, Lon, Alt)\n\tGetSat.generate(line1, line2)\n\tGetLook.generate(Lat, Lon, Alt)\n\n\ttt = time.time()\n\teciSat = GetSat.get_eciSat(tt)\n\tAZ, EL = GetLook.GetLook(tt, eciSat)\n\n\tres = {\n\t\t'az': AZ,\n\t\t'el': EL\n\t}\n\treturn jsonify(res)\n\n@app.route('/newTracker', methods=['POST', \"GET\"])\ndef newTracker():\n\treceive = request.json\n\tglobal Tracker\n\tif receive['cmd'] == \"y\":\n\t\tTracker = Pelco_D.Tracker(\"/dev/ttyUSB0\",2400)\n\tif receive['cmd'] == \"n\":\n\t\tTracker.close()\n\t\tdel Tracker\n\treturn \"ok\"\n\n\n@app.route('/setstep', methods=['POST', \"GET\"])\ndef setstep():\n\n\treceive = request.json\n\n\tif receive['cmd'] == \"LD\":\n\t\tTracker.down()\n\t\ttime.sleep(0.5)\n\t\tTracker.left()\n\tif receive['cmd'] != \"LD\":\n\t\tif receive['cmd'] == \"up\":\n\t\t\tTracker.up()\n\t\tif receive['cmd'] == \"down\":\n\t\t\tTracker.down()\n\t\tif receive['cmd'] == \"left\":\n\t\t\tTracker.left()\n\t\tif receive['cmd'] == \"right\":\n\t\t\tTracker.right()\n\t\ttime.sleep(0.5)\n\t\tTracker.stop()\n\n\treturn \"ok\"\n\n@app.route('/track', methods=['POST', \"GET\"])\ndef track():\n\treceive = request.json\n\tif receive['lat'] == \"update\":\n\t\tGetUserData.update(\"gui\")\n\t\tres = {\n\t\t\t'az': \"Update Done\",\n\t\t\t'el': \"\t\",\n\t\t}\n\n\treturn jsonify(res)\n\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run('0.0.0.0', 8080)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"125288058","text":"import pytest\n\nfrom leetcode.medium.ex0101_0200.ex0209 import InitialSolution\n\n\n@pytest.mark.parametrize(\"target, nums, expected\", [\n (7, [2,3,1,2,4,3], 2),\n (4, [1, 4, 4], 1),\n (11, [1,1,1,1,1,1,1,1], 0),\n (10, [10, 0, 0, 9, 1], 1),\n (10, [10], 1),\n (3, [2], 0),\n (1, [0], 0),\n])\ndef test_initial_finds_subarray(target: int, nums: list[int], expected: int):\n result = InitialSolution().minSubArrayLen(target, nums)\n assert result == expected\n","sub_path":"python/leetcode/medium/ex0101_0200/test/test_ex0209.py","file_name":"test_ex0209.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"413543363","text":"# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.\nimport string\nfrom typing import Any, Dict, List\n\nimport numpy as np\nfrom cycler import cycler\nfrom matplotlib import pyplot as plt\n\nfrom .mplab_evaluation import inf_colors\nfrom .pandemic_viz import PandemicViz\nfrom ..environment import PandemicObservation, sorted_infection_summary, InfectionSummary, LocationParams, \\\n PandemicSimState\nfrom ..utils import checked_cast\n\n__all__ = ['MatplotLibViz']\n\n\nclass MatplotLibViz(PandemicViz):\n \"\"\"Pandemic19 reinforcement learning matplotlib visualization\"\"\"\n\n _num_persons: int\n _max_hospitals_capacity: int\n _num_stages: int\n _show_reward: bool\n _show_stages: bool\n\n _gis: List[np.ndarray]\n _gts: List[np.ndarray]\n _location_type_is: Dict[str, int]\n _stages: List[np.ndarray]\n _rewards: List[float]\n\n _gis_legend: List[str]\n _critical_index: int\n _stage_indices: np.ndarray\n _ncols: int\n _nrows: int\n\n def __init__(self, num_persons: int,\n hospital_params: LocationParams,\n num_stages: int,\n show_reward: bool = False,\n show_stages: bool = True):\n \"\"\"\n :param num_persons: total number of persons in the simulator\n :param hospital_params: hospital location params\n :param num_stages: number of stages in the environment\n :param show_reward: show cumulative reward plot\n :param show_stages: show stages plot\n \"\"\"\n self._num_persons = num_persons\n self._max_hospitals_capacity = hospital_params.num * hospital_params.visitor_capacity\n self._num_stages = num_stages\n self._show_reward = show_reward\n self._show_stages = show_stages\n\n self._gis = []\n self._gts = []\n self._location_type_is = {}\n self._stages = []\n self._rewards = []\n\n self._gis_legend = [summ.value for summ in sorted_infection_summary]\n self._critical_index = self._gis_legend.index(InfectionSummary.CRITICAL.value)\n self._stage_indices = np.arange(num_stages)[..., None]\n self._ncols = 4\n self._nrows = 1\n\n def record(self, data: Any, **kwargs: Any) -> None:\n if isinstance(data, PandemicSimState):\n state = checked_cast(PandemicSimState, data)\n obs = PandemicObservation.create_empty()\n obs.update_obs_with_sim_state(state)\n self._location_type_is = {k.__name__: v for k, v in state.location_type_infection_summary.items()}\n elif isinstance(data, PandemicObservation):\n obs = data\n else:\n raise ValueError('Unsupported data type')\n\n self._gis.append(obs.global_infection_summary)\n self._gts.append(obs.global_testing_summary)\n self._stages.append(obs.stage)\n if 'reward' in kwargs:\n self._rewards.append(kwargs['reward'])\n\n def plot(self) -> None:\n \"\"\"Make plots\"\"\"\n gis = np.vstack(self._gis).squeeze()\n gts = np.vstack(self._gts).squeeze()\n stages = np.concatenate(self._stages).squeeze()\n\n plt.figure(figsize=(12, 4 * self._nrows))\n plt.rc('axes', prop_cycle=cycler(color=inf_colors))\n\n axs = []\n\n axs.append(plt.subplot(self._nrows, self._ncols, 1))\n plt.plot(gis)\n plt.legend(self._gis_legend, loc=1)\n plt.ylim([-0.1, self._num_persons + 1])\n plt.title('Global Infection Summary')\n plt.xlabel('time (days)')\n plt.ylabel('persons')\n\n axs.append(plt.subplot(self._nrows, self._ncols, 2))\n plt.plot(gts)\n plt.legend(self._gis_legend, loc=1)\n plt.ylim([-0.1, self._num_persons + 1])\n plt.title('Global Testing Summary')\n plt.xlabel('time (days)')\n plt.ylabel('persons')\n\n axs.append(plt.subplot(self._nrows, self._ncols, 3))\n plt.plot(gis[:, self._critical_index])\n plt.plot(np.arange(gis.shape[0]), np.ones(gis.shape[0]) * self._max_hospitals_capacity, 'y')\n plt.legend([InfectionSummary.CRITICAL.value, 'Max hospital capacity'], loc=1)\n plt.ylim([-0.1, self._max_hospitals_capacity * 3])\n plt.title('Critical Summary')\n plt.xlabel('time (days)')\n plt.ylabel('persons')\n\n axs.append(plt.subplot(self._nrows, self._ncols, 4))\n y = np.arange(len(self._location_type_is.keys()))\n plt.barh(y, [v/self._num_persons for v in self._location_type_is.values()])\n plt.yticks(y, list(self._location_type_is.keys()))\n plt.xlim([-0.1, 1.1])\n plt.title('% Infections / Location Type')\n plt.xlabel('% infections')\n plt.ylabel('location type')\n\n if self._show_stages:\n axs.append(plt.subplot(self._nrows, self._ncols, 4))\n plt.plot(stages)\n plt.ylim([-0.1, self._num_stages + 1])\n plt.title('Stage')\n plt.xlabel('time (days)')\n\n if self._show_reward and len(self._rewards) > 0:\n axs.append(plt.subplot(self._nrows, self._ncols, 5))\n plt.plot(np.cumsum(self._rewards))\n plt.title('Cumulative Reward')\n plt.xlabel('time (days)')\n\n plot_ref_labels = string.ascii_lowercase\n plot_ref_label_i = 0\n for ax in axs:\n ax.annotate(f'({plot_ref_labels[plot_ref_label_i]})', (0.5, 0.), xytext=(0, -25 - 20),\n textcoords='offset points', xycoords='axes fraction',\n ha='center', va='center', size=14)\n plot_ref_label_i += 1\n\n plt.tight_layout()\n\n plt.show()\n","sub_path":"python/pandemic_simulator/viz/mplib_viz.py","file_name":"mplib_viz.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"333336608","text":"import logging\nimport apache_beam as beam\nfrom apache_beam.io import ReadFromText\nfrom apache_beam.io import WriteToText\n\nclass FormatTable(beam.DoFn):\n def process(self, element):\n record = element\n \n #Get values from record\n county = record.get('County')\n impD = record.get('Alcohol_Impaired_Driving_Deaths')\n deaths = record.get('Driving_Deaths')\n percentage = record.get('Alcohol_Impaired__percentage_')\n z = record.get('Z_Score')\n \n county = county.upper()\n \n new_record = {'County':county,'ImpDeaths':impD,'DrivingDeaths':deaths,'ImpDeathsP':percentage,'ZScore':z}\n\n return[new_record]\n\n#Project Id is needed for bigquery data source, even with local execution.\noptions = {\n 'project': 'axial-module-216302'\n}\n\nopts = beam.pipeline.PipelineOptions(flags=[], **options)\n\n#construct a pipeline object and set configuration options \n#(pipeline runner that will execute pipeline --> DirectRunner)\n#(contains project ID: axial-module-216302)\nwith beam.Pipeline('DirectRunner', options=opts) as p:\n\n #Create PCollection from Big Query dataset.\n deaths_pcoll = p | 'Read Deaths15' >> beam.io.Read(beam.io.BigQuerySource(query='SELECT * FROM IowaIAlcoholImpairedDrivingDeaths.2015Deaths WHERE County IS NOT NULL')) \n \n #Use ParDo to filter PCollection and select counties where impaired deaths higher than 10%. \n formated_pcoll = deaths_pcoll | beam.ParDo(FormatTable())\n\n # Write PCollection to a log file\n formated_pcoll | 'Write to File 1' >> WriteToText('deaths15.txt')\n \n #Create table in BigQuery\n qualified_table_name = 'axial-module-216302:beam.ParDo_AlcoholImpairedDrivingDeaths15'\n table_schema = 'County:STRING,ImpDeaths:INTEGER,DrivingDeaths:INTEGER,ImpDeathsP:INTEGER,ZScore:FLOAT'\n\n formated_pcoll | 'Write to BigQuery' >> beam.io.Write(beam.io.BigQuerySink(qualified_table_name,\n schema=table_schema, \n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED, \n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE))\nlogging.getLogger().setLevel(logging.ERROR)\n","sub_path":"IowaProject/ParDo_AlcoholImpairedDrivingDeaths15_single.py","file_name":"ParDo_AlcoholImpairedDrivingDeaths15_single.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"31549434","text":"from roomTile import RoomTile\nfrom functools import partial\n\n# author: Brendan Jang\n\n# unique player actions in this room:\n# * take mirror\n# * put mirror on shrine\n# * put picture on shrine\n# * light candles\n# * look at shrine\n# * press button\n# * tie rope to shrine\n# * climb down rope\n# * go north\n\n# Features in this room:\n# * Shrine that the player must place items on and tie a rope to.\n# * Candles that player must light\n# * Hidden doorway at the north\n\n\nclass HiddenRoom(RoomTile):\n def __init__(self, x, y):\n super().__init__(x, y)\n self.x = x\n self.y = y\n self.name = 'HiddenRoom'\n self.long_desc = \\\n 'You walk out of the wardrobe on the other side into a strange ' \\\n 'and confusing room.\\n ' \\\n 'It seems as if all the little creaks that you heard before are ' \\\n 'gone, leaving you in complete and utter dead silence.\\n' \\\n 'There are no visible lights for you to turn on in this room. ' \\\n 'However, there is a circular skylight in the center of the ' \\\n 'room.\\n' \\\n 'A large beam of moonlight is gleaming in, bringing a faint ' \\\n 'glow to the whole room.\\n' \\\n 'You walk around and notice that the walls are filled with ' \\\n 'strange images.\\n' \\\n 'There is a strange shrine in the middle of the room. Maybe ' \\\n 'you should take a better look.\\n' \\\n 'There is a cracked circular mirror in the corner of the room.\\n' \\\n 'There are no other ways in and out of this room except through ' \\\n 'the door that you first came in through in the south.\\n' \\\n 'You wonder what secrets this room holds...\\n'\n self.short_desc = \\\n '\\nYou are in secret hidden room with a strange ' \\\n 'shrine in the middle. The moonlight is shining on it. There is ' \\\n 'a door to the south.\\n'\n self.items = [self.mirror, self.candle, self.shrine]\n self.tied = False\n self.visited = False\n\n def room_description(self, player):\n if self.visited:\n print(self.short_desc)\n self.print_items_in_room()\n else:\n self.visited = True\n print(self.long_desc)\n self.print_items_in_room()\n\n def print_items_in_room(self):\n num_items = len(self.items)\n print(f\"\\nThere are {num_items} items in this room: \\n\")\n for x in range(len(self.items)):\n print(self.items[x])\n print(\"\\n\")\n\n def light(self, item, player):\n lit = False\n for i in player.inventory_list:\n if i.name == item.name:\n lit = True\n self.candle.lit = True\n self.shrine.candles_lit = True\n print(self.shrine.candles_lit_desc())\n if not lit:\n print('\\nYou have nothing to light it with.\\n')\n\n def item_to_shrine(self, item, player):\n for i in player.inventory_list:\n if i.name == item.name:\n if i.name == 'mirror':\n self.shrine.items.append(i)\n player.inventory_list.remove(i)\n print(self.shrine.mirror_placed_desc())\n if self.shrine.button_revealed is True:\n self.items.append(self.button)\n elif i.name == 'third painting':\n self.shrine.items.append(i)\n player.inventory_list.remove(i)\n print(self.shrine.picture_placed_desc())\n if self.shrine.button_revealed is True:\n self.items.append(self.button)\n else:\n self.shrine.items.append(i)\n print(f'\\nYou placed {i} on the shrine.\\n')\n print('\\nThat item does not belong here...\\n')\n\n def item_from_shrine(self, item, player):\n for i in self.shrine.items:\n if i.name == item.name and item.name != 'candle':\n player.inventory_list.append(i)\n self.shrine.items.remove(i)\n print(f'\\n{i} was removed from the shrine and added to '\n 'your inventory.\\n')\n else:\n print('\\nYou can\\'t do that.\\n')\n\n def press_button(self, shrine):\n if shrine.button_revealed is True:\n shrine.button_pressed = True\n print('\\nYou press the button and a hidden door suddenly appears '\n 'on the north side of this room.\\n')\n else:\n print('\\nThere is no button to press.\\n')\n\n def go_north(self, player):\n if self.shrine.button_pressed is True:\n print('\\nYou walk towards the hidden doorway. It looks like an '\n 'incomplete chute of some sort. It seems like you '\n 'will need to get down it somehow...\\n')\n else:\n print('\\nYou cannot do that!.\\n')\n\n def tie_rope(self, rope, player):\n found = False\n for i in player.inventory_list:\n if i.name == rope.name and self.shrine.button_pressed is True:\n self.tied = True\n found = True\n print('\\nYou tie one end of the rope to the shrine and throw '\n 'the other end down the chute.\\n')\n elif i.name == rope.name and self.shrine.button_pressed is False:\n self.tied = True\n found = True\n print('\\nYou tie one end of the rope to the shrine but you '\n 'have nowhere to put the other end.\\n')\n if not found:\n print('\\nYou cannot do that yet!\\n')\n\n def climb(self, player):\n if self.tied is True:\n print('\\nYou steadily climb down the chute with your rope and '\n 'it come out behind the house. You have finally escaped '\n 'from the wretched mansion.\\n'\n '\\nYou have won the game! Thank you for completing our '\n 'haunted mansion escape adventure!\\n'\n '\\nThe game will now exit.\\n')\n player.won = True\n exit()\n else:\n print('\\nYou might want to tie the rope to something before '\n 'climbing down.\\n')\n\n @staticmethod\n def no_message():\n print('\\nYou can not go that way.\\n')\n\n @staticmethod\n def deny():\n print('\\nYou can not do that!\\n')\n # the available actions in a room.\n # you will need to call the action from the player class,\n # or maybe action class\n def available_actions(self, player, command):\n actions_dict = {\n 'look at mirror': partial(self.look_item, self.mirror, player),\n 'light candles': partial(self.light, self.matches, player),\n 'light the candles': partial(self.light, self.matches, player),\n 'take candle': self.deny,\n 'take candles': self.deny,\n 'take shrine' : self.deny,\n 'put mirror on shrine': partial(self.item_to_shrine, self.mirror,\n player),\n 'put first painting on shrine': partial(self.item_to_shrine,\n self.first_pic, player),\n 'put second painting on shrine': partial(self.item_to_shrine,\n self.second_pic, player),\n 'put third painting on shrine': partial(self.item_to_shrine,\n self.third_pic, player),\n 'look at candles': partial(self.look_item, self.candle, player),\n 'press button': partial(self.press_button, self.shrine),\n 'look at shrine': self.shrine.description,\n 'take mirror from shrine': partial(self.item_from_shrine,\n self.mirror, player),\n 'take mirror off shrine': partial(self.item_to_shrine, self.mirror,\n player),\n 'take third painting from shrine': partial(self.item_from_shrine,\n self.third_pic, player),\n 'take second painting from shrine': partial(self.item_from_shrine,\n self.second_pic,\n player),\n 'take first painting from shrine': partial(self.item_from_shrine,\n self.first_pic, player),\n 'go west': self.no_message,\n 'move west': self.no_message,\n 'go north': partial(self.go_north, player),\n 'tie rope to shrine': partial(self.tie_rope, self.rope, player),\n 'climb down rope': partial(self.climb, player),\n 'climb rope': partial(self.climb, player)\n }\n\n # check in room actions dict for command\n # then check roomTile actions for command\n # if command doesnt exist in either then notify\n if command in actions_dict.keys():\n actions_dict[command]()\n else:\n super().available_actions(player, command)\n","sub_path":"CS 467/Capstone Project/hiddenRoom.py","file_name":"hiddenRoom.py","file_ext":"py","file_size_in_byte":9290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"146039433","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport time\nimport os\n\n# 数据预处理\ntransform_train = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomGrayscale(), # 数据增强,为了防止出现数据过拟合\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntransform_test = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\n# 定义神经网络\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, 3, padding=1)\n self.conv2 = nn.Conv2d(64, 64, 3, padding=1)\n self.pool1 = nn.MaxPool2d(2, 2)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu1 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(64, 128, 3, padding=1)\n self.conv4 = nn.Conv2d(128, 128, 3, padding=1)\n self.pool2 = nn.MaxPool2d(2, 2, padding=1)\n self.bn2 = nn.BatchNorm2d(128)\n self.relu2 = nn.ReLU()\n\n self.conv5 = nn.Conv2d(128, 128, 3, padding=1)\n self.conv6 = nn.Conv2d(128, 128, 3, padding=1)\n self.conv7 = nn.Conv2d(128, 128, 1, padding=1)\n self.pool3 = nn.MaxPool2d(2, 2, padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n self.relu3 = nn.ReLU()\n\n self.conv8 = nn.Conv2d(128, 256, 3, padding=1)\n self.conv9 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv10 = nn.Conv2d(256, 256, 1, padding=1)\n self.pool4 = nn.MaxPool2d(2, 2, padding=1)\n self.bn4 = nn.BatchNorm2d(256)\n self.relu4 = nn.ReLU()\n\n self.conv11 = nn.Conv2d(256, 512, 3, padding=1)\n self.conv12 = nn.Conv2d(512, 512, 3, padding=1)\n self.conv13 = nn.Conv2d(512, 512, 1, padding=1)\n self.pool5 = nn.MaxPool2d(2, 2, padding=1)\n self.bn5 = nn.BatchNorm2d(512)\n self.relu5 = nn.ReLU()\n\n self.fc14 = nn.Linear(512*4*4, 1024)\n self.drop1 = nn.Dropout2d()\n self.fc15 = nn.Linear(1024, 1024)\n self.drop2 = nn.Dropout2d()\n self.fc16 = nn.Linear(1024, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.pool1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.pool2(x)\n x = self.bn2(x)\n x = self.relu2(x)\n\n x = self.conv5(x)\n x = self.conv6(x)\n x = self.conv7(x)\n x = self.pool3(x)\n x = self.bn3(x)\n x = self.relu3(x)\n\n x = self.conv8(x)\n x = self.conv9(x)\n x = self.conv10(x)\n x = self.pool4(x)\n x = self.bn4(x)\n x = self.relu4(x)\n\n x = self.conv11(x)\n x = self.conv12(x)\n x = self.conv13(x)\n x = self.pool5(x)\n x = self.bn5(x)\n x = self.relu5(x)\n x = x.view(-1, 512*4*4)\n x = F.relu(self.fc14(x))\n x = self.drop1(x)\n x = F.relu(self.fc15(x))\n x = self.drop2(x)\n x = self.fc16(x)\n\n return x\n\n\n# 训练神经网络\ndef train(network, dev):\n # 定义优化器和损失函数\n path = 'weights.tar'\n init_epoch = 0\n optimizer = optim.Adam(network.parameters(), lr=0.0001)\n if os.path.exists(path) is not True:\n loss = nn.CrossEntropyLoss()\n else: # 读取中断模型结果,继续运行模型\n checkpoint = torch.load(path)\n network.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n init_epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n # 训练\n for epoch in range(init_epoch, 100):\n time_start = time.time() # 计时工具,每个epoch训练用时\n running_loss = 0.0\n total = 0\n correct = 0\n for i, data in enumerate(train_loader, 0):\n # 数据读取\n inputs, labels = data\n inputs, labels = inputs.to(dev), labels.to(dev)\n # 梯度清零\n optimizer.zero_grad()\n # 前向传播 + 后向传播 + 求损失\n outputs = network(inputs)\n l = loss(outputs, labels)\n l.backward()\n optimizer.step()\n # 输出运行结果\n running_loss += l.item()\n if i % 500 == 499: # 每500个mini-batch输出一次\n print('[%d, %5d] loss: %.4f' % (epoch, i, running_loss / 500))\n running_loss = 0.0\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n print('神经网络对于 %d 张训练图像的精度为: %.3f %%' % (total, 100.0 * correct / total))\n total = 0\n correct = 0\n torch.save({'epoch': epoch,\n 'model_state_dict': net.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, path)\n\n print('epoch %d 用时 %3f s' % (epoch, time.time() - time_start))\n\n print('训练结束')\n\n\n# 测试神经网络\ndef test(network, dev):\n correct = 0\n total = 0\n with torch.no_grad():\n for data in test_loader:\n images, labels = data\n images, labels = images.to(dev), labels.to(dev)\n outputs = network(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('神经网络对于10000张测试图像的测试精度如下: %.3f %%' % (100.0 * correct / total))\n\n\nif __name__ == '__main__':\n # 0. 参数定义\n classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n # 1. 读取并且预处理数据集\n # 训练集\n train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=100, shuffle=True, num_workers=2)\n # 测试集\n test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=50, shuffle=False, num_workers=2)\n # 2. 定义神经网络\n net = Net()\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n net = net.to(device)\n # 3. 神经网络训练\n train(net, device)\n # 4. 神经网络测试\n test(net, device)","sub_path":"ImageClassification/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"280847494","text":"from nulldev import nulldev\n\nfrom pathlib import Path\nfrom subprocess import check_call, Popen, PIPE\nfrom re import compile as regex\nfrom tempfile import NamedTemporaryFile\n\n_attach = r'hdiutil', r'attach', r'-imagekey', r'diskimage-class=CRawDiskImage'\n_dev_mnt = regex(r'(\\S+)\\s+(.+)$')\n\nclass OSXSparseDir(object):\n def __init__(self, suffix='', prefix=r'tmp', dir_=None, mib=1,\n verbose=True):\n self._suffix, self._prefix, self._dir = suffix, prefix, dir_\n self._mib, self._verbose = mib, verbose\n def __enter__(self):\n kwargs = dict(suffix=self._suffix, prefix=self._prefix, dir=self._dir,\n delete=False\n )\n with NamedTemporaryFile(**kwargs) as tmp:\n tmp.truncate(1024 * 1024 * self._mib)\n dmg = tmp.name\n self.dmg = dmg\n if self._verbose:\n kwargs = {}\n else:\n kwargs = dict(stdout=nulldev)\n check_call((r'newfs_udf', dmg), **kwargs)\n p = Popen(_attach + (dmg,), stdout=PIPE)\n m = _dev_mnt.match(next(p.stdout))\n self._dev, self._mnt = m.groups()\n return self\n @property\n def dev(self):\n return self._dev\n @property\n def tmp(self):\n return self._mnt\n def __exit__(self, *_):\n command = 'hdiutil', 'detach', self._dev\n if self._verbose:\n kwargs = {}\n else:\n kwargs = dict(stdout=nulldev)\n check_call(command, **kwargs)\n Path(self.dmg).unlink()\n @staticmethod\n def new(*args, **kwargs):\n return OSXSparseDir(*args, **kwargs).__enter__()\n release = __exit__\n\nfrom unittest import TestCase, skipIf\n\nfrom capability import sparse_file_capable\n\nfrom platform import uname\n\n@skipIf(uname()[0] != r'Darwin', None)\nclass T(TestCase):\n def setUp(self):\n self.sparse = OSXSparseDir.new(verbose=False)\n def tearDown(self):\n self.sparse.release()\n def test(self):\n gotten = sparse_file_capable(self.sparse.tmp)\n self.assertTrue(gotten)\n","sub_path":"osxsparsedir.py","file_name":"osxsparsedir.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"230475799","text":"# -*- coding: utf-8 -*-\r\n\r\ndef page(baseurl,totalnum,currentpage):\r\n perpage=20 # 每页显示条数\r\n pagenum=11 #页面显示多少页\r\n temp=divmod(totalnum, perpage) #计算商也余数\r\n totalpage=temp[0]\r\n if temp[1]: #如果存在余数,加一页\r\n totalpage+=1\r\n start=1\r\n if currentpage>6:\r\n start=currentpage-5","sub_path":"django_fenye/htmlhelper/pagehelper.py","file_name":"pagehelper.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"572269340","text":"import requests\nfrom lxml import etree\nimport pymysql\n\nconn = pymysql.connect(\n host=\"localhost\",\n port=3306,\n user=\"root\",\n password=\"123456\",\n database=\"goods\",\n charset=\"utf8\",\n )\ncursor = conn.cursor()\nurl = \"http://news.21cn.com/zt/2020/gzxxgzfy/\"\n\npayload = {}\nheaders = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Referer': 'http://www.21cn.com/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cookie': '_wa_pk_cookie=314824490; _wa_session_cookie=1583398708000-1955942274'\n}\n\nresponse = requests.request(\"GET\", url, headers=headers, data=payload).text\n# print(response)\nhtml = etree.HTML(response)\na_urls = html.xpath(\"//a[@class='information-item text-full ']/@href\")\nfor a in a_urls:\n print(a)\n headers1 = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cookie': '_wa_pk_cookie=314824490; _wa_session_cookie=1583398708000-1955942274; fe21cnAdRotator_iframe_hf1_box=0'\n }\n\n response1 = requests.request(\"GET\", a, headers=headers, data=payload).text\n html1 = etree.HTML(response1)\n title = html1.xpath(\"//h1[@class='title']/text()\")[0]\n content = \"\"\n ps = html1.xpath(\"//div[@id='article_text']/p\")\n for p in ps:\n t = \"\".join(p.xpath(\".//text()\")).strip()\n content = content + \"\\n\" + t\n if len(content) > 0:\n print(title)\n content = content.strip()\n source = \"21CN\"\n word = \"武汉疫情\"\n try:\n sql = \"insert into xinwen(title, content, source, word, url) values(%s, %s, %s, %s, %s)\"\n cursor.execute(sql, (title, content, source, word, a))\n conn.commit()\n except Exception as e:\n print(e)","sub_path":"03/0302/21cn.py","file_name":"21cn.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"256850270","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger\n# Create your views here.\nfrom shop.models import Goods, Order, Cart\n\n\nclass Basket:\n\n def __init__(self, id, name, price, quantity):\n self.id = id\n self.name = name\n self.price = price\n self.quantity = quantity\n\n\n# convenience method as used in several methods\ndef get_basket(request):\n basket = request.session.get('basket', [])\n print('get_basket is %s' % basket)\n print(type(basket))\n products = []\n for item in basket:\n product = Goods.objects.get(id=item[0])\n # create a new object of the class basket\n basket = Basket(item[0], product.g_name, product.g_price, item[1])\n # products include all those basket\n products.append(basket)\n return products\n\n\ndef basket(request):\n products = get_basket(request)\n return render(request, 'shop/basket.html', {'products': products})\n\n\n#\n# def product_list(request):\n# products = Goods.objects.all()\n# return render(request, 'shop/product_list.html', context={'products': products})\n\n# def product_list(request):\n# products = Goods.objects.all()\n# basket = request.session.get('basket', []) # if dont have session of basket then make a new list\n# request.session['basket'] = basket\n# print('product_list basket is %s' % basket)\n# print(type(basket))\n# return render(request, 'shop/product_list.html', {'products': products})\n\n\ndef product_list(request):\n basket = request.session.get('basket', []) # if dont have session of basket then make a new list\n request.session['basket'] = basket\n print('product_list basket is %s' % basket)\n print(type(basket))\n\n # Set receiving page number\n page_Index = request.GET.get('page')\n # Query all server information\n products = Goods.objects.all()\n # Divide information into demand pages\n p = Paginator(products, 30)\n # Get page_ Index page data\n server_page_list = p.get_page(page_Index)\n # Transfer the current page number, current page data and page number information to the template\n return render(request, 'shop/product_list.html', {'server_page_list': server_page_list})\n\n\ndef product_detail(request, id):\n product = get_object_or_404(Goods, id=id)\n return render(request, 'shop/product_detail.html', context={'product': product})\n\n\ndef product_buy(request):\n if request.method == \"POST\":\n temp_id = int(request.POST.get('id', ''))\n try:\n quantity = int(request.POST.get('quantity', ''))\n basket = request.session['basket']\n print('product_buy basket is %s' % basket)\n basket.append([temp_id, quantity])\n request.session['basket'] = basket\n except Exception as e:\n return redirect('product_list')\n return redirect('product_list')\n\n\ndef purchase(request):\n if request.user.is_authenticated:\n user = request.user\n products = get_basket(request)\n total = 0\n for product in products:\n total += product.price * product.quantity\n return render(request, 'shop/purchase.html', {'products': products, 'user': user, 'total': total})\n else:\n return redirect('login')\n\n\n# save order, clear basket and thank customer\ndef payment(request):\n products = get_basket(request)\n user = request.user\n order = Order.objects.create(customer=user.customer)\n order.refresh_from_db()\n for product in products:\n product_item = get_object_or_404(Goods, id=product.id)\n cart = Cart.objects.create(product=product_item, quantity=product.quantity, user_id=user.id)\n cart.refresh_from_db()\n # request.session['basket'].clear()\n del request.session['basket']\n return redirect('order')\n\n\ndef order(request):\n user_id = request.user.id\n currentUsers = Cart.objects.filter(user_id=user_id)\n return render(request, 'shop/order.html', locals())\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"262782854","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport nibabel as nib\nimport multiprocessing\nfrom os import getpid\nimport logging\nimport psutil\nfrom dipy.tracking.streamline import compress_streamlines\nfrom tractseg.libs.Utils import Utils\nfrom dipy.segment.metric import ResampleFeature\n\nlogging.basicConfig(format='%(levelname)s: %(message)s') # set formatting of output\nlogging.getLogger().setLevel(logging.INFO)\n\n#Global variables needed for shared memory of parallel fiber compression\nglobal COMPRESSION_ERROR_THRESHOLD\nCOMPRESSION_ERROR_THRESHOLD = None\nglobal FIBER_BATCHES\nFIBER_BATCHES = None\n\n# Worker Functions for multithreaded compression\ndef compress_fibers_worker_shared_mem(idx):\n # Function that runs in parallel must be on top level (not in class/function) otherwise it can not be pickled and then error\n streamlines_chunk = FIBER_BATCHES[idx] # shared memory; by using indices each worker accesses only his part\n result = compress_streamlines(streamlines_chunk, tol_error=COMPRESSION_ERROR_THRESHOLD)\n logging.debug('PID {}, DONE'.format(getpid()))\n return result\n\n\nclass FiberUtils:\n\n @staticmethod\n def compress_streamlines(streamlines, error_threshold=0.1):\n nr_processes = psutil.cpu_count()\n number_streamlines = len(streamlines)\n\n if nr_processes >= number_streamlines:\n nr_processes = number_streamlines - 1\n if nr_processes < 1:\n nr_processes = 1\n\n chunk_size = int(number_streamlines / nr_processes)\n\n if chunk_size < 1:\n # logging.warning(\"\\nReturning early because chunk_size=0\")\n return streamlines\n fiber_batches = list(Utils.chunks(streamlines, chunk_size))\n\n global COMPRESSION_ERROR_THRESHOLD\n global FIBER_BATCHES\n COMPRESSION_ERROR_THRESHOLD = error_threshold\n FIBER_BATCHES = fiber_batches\n\n # logging.debug(\"Main program using: {} GB\".format(round(Utils.mem_usage(print_usage=False), 3)))\n pool = multiprocessing.Pool(processes=nr_processes)\n\n #Do not pass data in (doubles amount of memory needed), but only idx of shared memory (needs only as much memory as single\n # thread version (only main thread needs memory, others almost 0).\n # Shared memory version also faster (around 20-30%?).\n # Needed otherwise memory problems when processing the raw tracking output (on disk >10GB and in memory >20GB)\n result = pool.map(compress_fibers_worker_shared_mem, range(0, len(fiber_batches)))\n\n streamlines_c = Utils.flatten(result)\n return streamlines_c\n\n @staticmethod\n def save_streamlines_as_trk(filename, streamlines, affine):\n '''\n streamlines: list of 2D ndarrays list(ndarray(N,3))\n affine: affine of reference img (e.g. brainmask)\n '''\n affine = np.abs(affine) #have to positive\n #offset not needed (already part of streamline coordinates?)\n affine[0, 3] = 0\n affine[1, 3] = 0\n affine[2, 3] = 0\n # Make a trackvis header so we can save streamlines\n trackvis_header = nib.trackvis.empty_header()\n trackvis_header['voxel_order'] = 'RAS'\n nib.trackvis.aff_to_hdr(affine, trackvis_header, pos_vox=False, set_order=False)\n streamlines_trk_format = [(sl, None, None) for sl in streamlines]\n nib.trackvis.write(filename, streamlines_trk_format, trackvis_header, points_space=\"rasmm\")\n\n @staticmethod\n def convert_tck_to_trk(filename_in, filename_out, reference_affine, compress_err_thr=0.1, smooth=None):\n '''\n Convert tck file to trk file and compress\n\n :param filename_in:\n :param filename_out:\n :param compress_err_thr: compress fibers if setting error threshold here (default: 0.1mm)\n :param smooth: smooth streamlines (default: None)\n 10: slight smoothing, 100: very smooth from beginning to end\n :return:\n '''\n from dipy.tracking.metrics import spline\n\n streamlines = nib.streamlines.load(filename_in).streamlines # Load Fibers (Tck)\n\n if smooth is not None:\n streamlines_smooth = []\n for sl in streamlines:\n streamlines_smooth.append(spline(sl, s=smooth))\n streamlines = streamlines_smooth\n\n #Compressing also good to remove checkerboard artefacts from tracking on peaks\n if compress_err_thr is not None:\n streamlines = FiberUtils.compress_streamlines(streamlines, compress_err_thr)\n FiberUtils.save_streamlines_as_trk(filename_out, streamlines, reference_affine)\n\n @staticmethod\n def resample_fibers(streamlines, nb_points=12):\n streamlines_new = []\n for sl in streamlines:\n feature = ResampleFeature(nb_points=nb_points)\n streamlines_new.append(feature.extract(sl))\n return streamlines_new\n\n","sub_path":"tractseg/libs/FiberUtils.py","file_name":"FiberUtils.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442196008","text":"# -*- coding: utf-8 -*-\n\n\ndef index():\n \"\"\"\n Forwards an index request to the recents page.\n \"\"\"\n redirect(URL('recents'))\n return dict()\n \nfields = [db.lioli_main.id, db.lioli_main.unique_id, db.lioli_main.body, db.lioli_main.loves, db.lioli_main.leaves, db.lioli_main.age, db.lioli_main.gender]\n\ndef recents():\n \"\"\"\n Shows 10 submissions for a user to vote on.\n \"\"\"\n page = request.vars.page or 0\n items_per_page = 5\n total_pages = db(db.lioli_main.accepted == 1).count() / items_per_page\n page_min = int(page) * items_per_page\n page_max = page_min + items_per_page\n where_clause = (db.lioli_main.accepted == 1)\n rows = db(where_clause).select(limitby=(page_min, page_max), orderby=~db.lioli_main.id, *fields)\n return dict(rows=rows, total_pages=total_pages)\n\ndef random():\n \"\"\"\n Shows a random set of 10 submissions for a user to vote on.\n \"\"\"\n where_clause = (db.lioli_main.accepted == 1)\n rows = db(where_clause).select(limitby=(0, 10), orderby='', *fields)\n return dict(rows=rows)\n\ndef search():\n \"\"\"\n uses ajax to bring up a search via keywords for people.\n \"\"\"\n return dict(form=FORM(INPUT(_id='keyword', _name='keyword',\n _onkeyup=\"ajax('bg_find', ['keyword'], 'target');\")),\n target_div=DIV(_id='target'))\n \ndef show():\n \"\"\"\n shows the searched for data in more detail\n \"\"\"\n u_id = request.args(0) or redirect(URL('search'))\n row = db((db.lioli_main.accepted==1) & (db.lioli_main.unique_id==u_id)).select().first()\n return dict(row=row)\n\ndef submit():\n \"\"\"\n gets user submissions and enters them into the database.\n \"\"\"\n message = 'Please input a submission'\n form = SQLFORM.factory(\n Field('body', 'text', requires=IS_NOT_EMPTY()),\n Field('age', requires= IS_IN_SET((range(12, 125))), widget = SQLFORM.widgets.options.widget),\n Field('gender', requires = IS_IN_SET((('F', 'M'))), widget = SQLFORM.widgets.radio.widget))\n if form.process().accepted:\n unique_id = insert_into_my_db(form.vars.body, form.vars.age, form.vars.gender)\n message = \"Thank you for your submission. It is now awaiting moderator approval. Please refer to %s in the future to see how people have voted on it\" % (unique_id)\n response.flash = 'Thank you.'\n return dict(form=form, message=message)\n \ndef about():\n \"\"\"\n leads to default/about.html which gives a description of the application:\n \"\"\"\n return dict()\n \ndef bg_find():\n \"\"\"\n function called by ajax to display search results\n \"\"\"\n pattern = '%' + request.vars.keyword.lower() + '%'\n where_clause = ((db.lioli_main.body.lower().like(pattern)) |(db.lioli_main.unique_id.like(pattern))) & (db.lioli_main.accepted==1)\n pages = db(where_clause).select(orderby=(''), limitby=(0,10))\n items = [DIV(A(row.unique_id, _href=URL('show', args=row.unique_id)), P(row.body, _class=\"search_preview\")) for row in pages]\n return UL(*items).xml()\n\n\n\n\n\n##functions for voting::\n\ndef add_loves():\n \"\"\"\n called by AJAX used for voting up by one \n \"\"\"\n row = db(db.lioli_main.id == request.vars.id).select().first()\n new_loves = row.loves + 1\n row.update_record(loves=new_loves)\n return str(new_loves)\n \ndef add_leaves():\n \"\"\"\n called by AJAX used for voting down by one\n \"\"\"\n row = db(db.lioli_main.id == request.vars.id).select().first()\n new_leaves = row.leaves + 1\n row.update_record(leaves=new_leaves)\n return str(new_leaves)\n\n\n\n\n\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n \"\"\"\n return dict(form=auth())\n","sub_path":"public_html/applications/init/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"547550314","text":"import os\nimport numpy as np\nimport tensorflow as tf\n\nccf_train_data = \"train_dataset_trunc.csv\"\nccf_test_data = \"test_dataset_trunc.csv\"\n\ndataset_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'datasets'))\nprint(dataset_dir)\n\nccf_train_filepath = os.path.join(dataset_dir, ccf_train_data)\nccf_test_filepath = os.path.join(dataset_dir, ccf_test_data)\n\ndef load_data(filepath):\n from numpy import genfromtxt\n\n csv_data = genfromtxt(filepath, delimiter=\",\", skip_header=1)\n data = []\n labels = []\n\n for d in csv_data:\n data.append(d[:-1])\n labels.append(d[-1])\n\n return np.array(data), np.array(labels)\n\ntrain_dataset, train_labels = load_data(ccf_train_filepath)\ntest_dataset, test_labels = load_data(ccf_test_filepath)\n\ntrain_pl = tf.placeholder(\"float\", [None, 28])\ntest_pl = tf.placeholder(\"float\", [28])\n\nknn_prediction = tf.reduce_sum(tf.abs(tf.add(train_pl, tf.negative(test_pl))), axis=1)\n\npred = tf.argmin(knn_prediction, 0)\n\nwith tf.Session() as tf_session:\n missed = 0\n\n for i in range(len(test_dataset)):\n knn_index = tf_session.run(pred, feed_dict={train_pl: train_dataset, test_pl: test_dataset[i]})\n\n print(\"Predicted class {} -- True class {}\".format(train_labels[knn_index], test_labels[i]))\n\n if train_labels[knn_index] != test_labels[i]:\n missed += 1\n\n tf.summary.FileWriter(\"../samples/article/logs\", tf_session.graph)\n\nprint(\"Missed: {} -- Total: {}\".format(missed, len(test_dataset)))","sub_path":"kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"11355048","text":"\"\"\"Adds mypy type-checking cell magic to jupyter/ipython.\n\nsource: https://gist.github.com/knowsuchagency/f7b2203dd613756a45f816d6809f01a6\n\"\"\"\n\nfrom IPython.core.magic import register_cell_magic\n\n\n@register_cell_magic\ndef typecheck(line, cell):\n \"\"\"Run the following cell though mypy.\n\n Any parameters that would normally be passed to the mypy CLI can be passed on the\n first line, with the exception of the -c flag we use to pass the code from the cell\n we want to execute. For example:\n ```\n %%typecheck --ignore-missing-imports\n ...\n ...\n ...\n ```\n mypy stdout and stderr will print prior to output of cell. If there are no conflicts,\n nothing will be printed by mypy.\n \"\"\"\n from IPython import get_ipython\n from mypy import api\n\n # inserting a newline at the beginning of the cell\n # ensures mypy's output matches the the line\n # numbers in jupyter\n\n cell = '\\n' + cell\n\n mypy_result = api.run(['-c', cell] + line.split())\n\n if mypy_result[0]: # print mypy stdout\n print(mypy_result[0])\n\n if mypy_result[1]: # print mypy stderr\n print(mypy_result[1])\n\n shell = get_ipython()\n shell.run_cell(cell)\n","sub_path":"ipython/startup/typecheck.py","file_name":"typecheck.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"395543760","text":"from collections import OrderedDict\n\nfrom sm.engine.util import SMConfig\nfrom sm.engine.msm_basic.formula_imager_segm import compute_sf_images\nfrom sm.engine.msm_basic.formula_img_validator import sf_image_metrics\nfrom sm.engine.search_algorithm import SearchAlgorithm\n\nimport logging\nlogger = logging.getLogger('sm-engine')\n\n\nclass MSMBasicSearch(SearchAlgorithm):\n\n def __init__(self, sc, ds, ds_reader, mol_db, fdr, ds_config):\n super(MSMBasicSearch, self).__init__(sc, ds, ds_reader, mol_db, fdr, ds_config)\n self.metrics = OrderedDict([('chaos', 0), ('spatial', 0), ('spectral', 0),\n ('total_iso_ints', [0, 0, 0, 0]),\n ('min_iso_ints', [0, 0, 0, 0]),\n ('max_iso_ints', [0, 0, 0, 0])])\n self.max_fdr = 0.5\n\n def search(self):\n \"\"\" Search for molecules in the dataset\n\n Returns\n -------\n : tuple\n (ion metrics DataFrame, ion image pyspark.RDD)\n \"\"\"\n logger.info('Running molecule search')\n ion_images = compute_sf_images(self._sc, self._ds_reader, self._mol_db.get_ion_peak_df(),\n self.ds_config['image_generation']['ppm'])\n all_sf_metrics_df = self.calc_metrics(ion_images)\n sf_metrics_fdr_df = self.estimate_fdr(all_sf_metrics_df)\n sf_metrics_fdr_df = self.filter_sf_metrics(sf_metrics_fdr_df)\n ion_images = self.filter_sf_images(ion_images, sf_metrics_fdr_df)\n\n return sf_metrics_fdr_df, ion_images\n\n def calc_metrics(self, sf_images):\n all_sf_metrics_df = sf_image_metrics(sf_images, self.metrics, self._ds, self._ds_reader,\n self._mol_db, self._sc)\n return all_sf_metrics_df\n\n def estimate_fdr(self, all_sf_metrics_df):\n sf_msm_df = self._mol_db.get_ion_sorted_df()\n sf_msm_df = sf_msm_df.join(all_sf_metrics_df.msm).fillna(0)\n sf_adduct_fdr = self._fdr.estimate_fdr(sf_msm_df)\n columns = list(self.metrics.keys()) + ['msm', 'fdr']\n sf_metrics_fdr_df = all_sf_metrics_df.join(sf_adduct_fdr, how='inner')[columns]\n return sf_metrics_fdr_df\n\n def filter_sf_metrics(self, sf_metrics_df):\n return sf_metrics_df[sf_metrics_df.fdr <= self.max_fdr]\n","sub_path":"sm/engine/msm_basic/msm_basic_search.py","file_name":"msm_basic_search.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"244259600","text":"import logging\nimport os\n\nfrom django.conf import settings\n\nfrom hiicart.models import Payment\nfrom hiicart.utils import call_func\n\nclass GatewayError(Exception):\n pass\n\n# Should these have a common base class? Seems excessive vs. code duplication.\n\nclass _SharedBase(object):\n \"\"\"Shared base class between IPNs and Gateways that accommodates their significant shared functionality.\"\"\"\n\n def __init__(self, name, default_settings={}):\n self.name = name.upper()\n self.log = logging.getLogger(\"hiicart.gateway.\" + self.name)\n if self.name not in settings.HIICART_SETTINGS:\n raise GatewayError(\"Settings not defined for %s\" % self.name)\n self.settings = default_settings.copy()\n self.settings.update(settings.HIICART_SETTINGS[self.name])\n # Copy down some settings, if not overridden locally\n if \"LIVE\" not in self.settings: # app-level setting with gateway-level override\n self.settings[\"LIVE\"] = settings.HIICART_SETTINGS[\"LIVE\"]\n if \"EXPIRATION_GRACE_PERIOD\" not in self.settings and \"EXPIRATION_GRACE_PERIOD\" in settings.HIICART_SETTINGS:\n self.settings[\"EXPIRATION_GRACE_PERIOD\"] = settings.HIICART_SETTINGS[\"EXPIRATION_GRACE_PERIOD\"]\n if \"CHARGE_RECURRING_GRACE_PERIOD\" not in self.settings and \"CHARGE_RECURRING_GRACE_PERIOD\" in settings.HIICART_SETTINGS:\n self.settings[\"CHARGE_RECURRING_GRACE_PERIOD\"] = settings.HIICART_SETTINGS[\"CHARGE_RECURRING_GRACE_PERIOD\"]\n\n def _create_payment(self, cart, amount, transaction_id, state):\n \"\"\"Record a payment.\"\"\"\n pmnt = Payment(amount=amount, gateway=self.name, cart=cart, \n state=state, transaction_id=transaction_id)\n pmnt.save()\n return pmnt\n\n def _update_with_cart_settings(self, hiicart):\n \"\"\"Pull cart-specific settings and update self.settings with them.\n We need an DI facility to get cart-specific settings in. This way,\n we're able to have different carts use different google accounts.\"\"\"\n if not settings.HIICART_SETTINGS.get(\"CART_SETTINGS_FN\", False):\n return\n s = call_func(settings.HIICART_SETTINGS[\"CART_SETTINGS_FN\"], hiicart)\n self.settings.update(s)\n\n def _require_files(self, filenames):\n \"\"\"Verify a file exists on disk. Usually use for key files.\"\"\"\n errors = []\n for filename in filenames:\n if not os.path.isfile(filename):\n errors.append(filename)\n if len(errors) > 0:\n raise GatewayError(\"The following files are required for %s: %s\" % (\n self.name, \", \".join(errors)))\n\n def _require_settings(self, settings):\n \"\"\"Verify that certain settings exist, raising an error if not.\"\"\"\n errors = []\n for setting in settings:\n if setting not in self.settings:\n errors.append(setting)\n if len(errors) > 0:\n raise GatewayError(\"The following settings are required for %s: %s\" % (\n self.name, \", \".join(errors)))\n\n\nclass IPNBase(_SharedBase):\n \"\"\"\n Base class for IPN handlers.\n\n Provides shared functionality among IPN implementations\n \"\"\"\n pass # All covered by _SharedBase for now\n\n\nclass PaymentGatewayBase(_SharedBase):\n \"\"\"\n Base class for all payment gateways.\n\n Provides a common interface for working with all payment gateways.\n \"\"\"\n def cancel_recurring(self, cart):\n \"\"\"Cancel recurring items with gateway. Returns a CancelResult.\"\"\"\n raise NotImplementedError\n\n def charge_recurring(self, cart, grace_period=None):\n \"\"\"\n Charge recurring purchases if necessary.\n \n Charges recurring items with the gateway, if possible. An optional\n grace period can be provided to avoid premature charging. This is\n provided since the gateway might be in another timezone, causing\n a mismatch between when an account can be charged.\n \"\"\"\n raise NotImplementedError\n\n def sanitize_clone(self, cart):\n \"\"\"Remove any gateway-specific changes to a cloned cart.\"\"\"\n raise NotImplementedError\n\n def submit(self, cart, collect_address=False):\n \"\"\"Submit a cart to the gateway. Returns a SubmitResult.\"\"\"\n raise NotImplementedError\n\n\nclass CancelResult(object):\n \"\"\"\n The result of a cancel operation.\n Currently supported result types are url and None.\n \n url: The user should to be redirected to result.url.\n None: type is set to None if no further action is required.\n \"\"\"\n def __init__(self, type, url=None):\n if type is not None and type != \"url\":\n raise GatewayError(\"Unknown return type %s\" % type)\n self.type = type\n self.url = url\n\n\nclass SubmitResult(object):\n \"\"\"\n The result of a submit operation.\n Currently supported result types are url, form, and None.\n \n url: The user should to be redirected to result.url.\n form: form_action is the target url; form_fields is a dict of form data.\n None: type is set to None if no further action is required.\n \"\"\"\n def __init__(self, type, url=None, form_data=None):\n self.type = type\n if url and form_data:\n raise GatewayError(\"Gateway returned url AND form data.\")\n self.url = url\n if type == \"form\":\n self.form_action = form_data[\"action\"]\n self.form_fields = form_data[\"fields\"]\n else:\n self.form_action = None\n self.form_fields = None\n","sub_path":"hiicart/gateway/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"510663065","text":"import json\nimport requests\nfrom pprint import pprint\nfrom yaml import load\nimport time\nfrom datetime import datetime, timedelta\nimport os\n\n\n\"\"\"\nСкрипт каллибри собирает статистику заданному порогу дат - не больше, не меньше.\n\n\"\"\"\n\n\nclass One_day_lead_collector:\n \n \n def __init__ (self, date, client):\n self.date = date\n self.client = client\n f = open(os.path.join('clients/'+client, 'config.yaml') , 'r')\n self.configuration = load(f)\n \n def work (self):\n \n date = self.date\n client = self.client \n configuration = self.configuration\n \n email = configuration['callibri_email']\n token = configuration['callibri_token']\n site_ids = configuration['callibri_id']\n startdate = date\n enddate = date\n\n #сюда собираем данные\n self.leads = []\n \n\n for site_id in site_ids:\n\n r = requests.get('https://api.callibri.ru/site_get_statistics?user_email={}&user_token={}&site_id={}&date1={}&date2={}'\\\n .format(email, token, site_id, startdate, enddate)).json()\n\n try:\n jdata = r['channels_statistics'][0]['calls']\n\n for lead in jdata:\n user_id = lead['id']\n channel_id = lead['channel_id']\n phone = lead['phone']\n region = lead['region']\n duration = lead['duration']\n traffictype = lead['traffic_type']\n landingpage = lead['landing_page']\n download = lead['link_download']\n try:\n utm_term = lead['utm_term']\n except:\n None\n\n\n my_lead = [user_id, channel_id, phone, region, traffictype, landingpage, duration, download ]\n self.leads += [my_lead]\n except:\n print ('Код ошибки - {}'.format(r['code']))\n if r['code'] == 200:\n print ('Запрос нормальный. Неизвестная ошибка. Если channels_statistics пуст - все хорошо.')\n print (r)","sub_path":"onedaycollect.py","file_name":"onedaycollect.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"165952873","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n__author__ = 'c8d8z8@gmail.com'\n\"\"\"pi6x URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\n#import event54.views\n#import event.views\nimport pi6x.views\n#import searchbysolr_app.views\n#import jquery_plugins_demo.views\n#admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n #url(r'^hello/$', pi6x.views.hello),\n # app-polls\n #url(r'^polls/', include('polls.urls', namespace='polls')),\n\n # account manage page\n #url('^app_list$', pi6x.views.app_list),\n #url('^auth/'),\n\n # jquery plugins demo\n #url(r'jquery_plugins_demo',)\n\n\n\n # 程序入口 的 登陆\n url(r'^$', 'pi6x.views.index', name='index'),\n #url(r'^login$',pi6x.views.login),\n #url(r'^logout$',signout),\n\n # import apps urls\n url(r'^solr/', include('searchbysolr_app.urls')),\n #url(r'^admin/', include(admin.site.urls)),\n url(r'^weibo/', include('weibo.urls')),\n #url(r'^event54/$', event54.views.index),\n #url(r'^event54/authorize', event54.views.authorize),\n #event\n #url(r'^event/$', event.views.index),\n #url(r'^event/create$', event.views.create),\n\n #url(r'^hello/$', hello),\n #url(r'^time/$', 'firstsite.view.current_datetime', name='current_datetime'),\n #url(r'^time/(\\d{1,2})/$', ctime),\n #url(r'^time/plus/(\\d{1,2})/$','firstsite.view.hours_add', name='time-plus'),\n #url(r'^person/$','firstsite.view.person', name='person'),\n #url(r'^.*$', 'firstsite.view.error', name='error'),\n\n # api\n #url('^api/auth/login$', auth_login),\n #url('^api/auth/callback$', auth_callback),\n #url('^api/weibo/home$', weibo_statuses_home_timeline),\n #url('^api/weibo/other/kownperson$', weibo_other_kownperson),\n #url('^api/weibo/post$', weibo_post),\n\n url(r'^sysctl$',pi6x.views.sysctl),\n\n ]\n","sub_path":"webapps/pi6x/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"160303258","text":"import math\nimport random\nimport pdb\n#export MPLBACKEND=TKAgg\nimport matplotlib\n#matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm_notebook\n#from celluloid import Camera\nimport phyre\nimport csv\nimport os\nimport imageio\n\n\n\ndef data_pic(tasks,simulator):\n for task_index in range(len(tasks)):\n task_id = simulator.task_ids[task_index]\n imgpath=os.path.join('datasets_pic',task_id)\n os.makedirs(imgpath)\n #pdb.set_trace()\n #TODO: save the initial scene\n initial_scene = simulator.initial_scenes[task_index]\n #plt.imshow(phyre.observations_to_float_rgb(initial_scene))\n plt.imshow((initial_scene))\n print(initial_scene.shape)\n #plt.title(f'Task {task_index}');\n plt.savefig('{}/ini{}.png'.format(imgpath,task_index))\n pdb.set_trace()\n # initial_featurized_objects = simulator.initial_featurized_objects[task_index]\n # print('Initial featurized objects shape=%s dtype=%s' % (initial_featurized_objects.features.shape, initial_featurized_objects.features.dtype))\n # bar=np.array([x for x in initial_featurized_objects.features[0] if x[5]==1])\n # bar=bar.reshape(1,bar.shape[0],-1)\n # ball=np.array([x for x in initial_featurized_objects.features[0] if x[4]==1])\n # ball=ball.reshape(1,ball.shape[0],-1)\n\n # bar=[x for x in initial_featurized_objects.features[0] if x[5]==1]\n # bar=bar.reshape(1,bar.shape[0],-1)\n # plt.imshow(phyre.observations_to_float_rgb(bar))\n # plt.savefig('ini_bar.png')\n # bar=[x for x in initial_featurized_objects.features[0] if x[5]==1]\n # bar=bar.reshape(1,bar.shape[0],-1)\n # plt.imshow(phyre.observations_to_float_rgb(bar))\n # plt.savefig('ini_bar.png')\n\n np.set_printoptions(precision=3)\n #print(initial_featurized_objects.features)\n\n actions = simulator.build_discrete_action_space(max_actions=1000)#escape the invalid situation\n #print('A random action:', actions[0])\n #print(actions)\n #pdb.set_trace()\n # The simulator takes an index into simulator.task_ids.\n #action = random.choice(actions)\n # Set need_images=False and need_featurized_objects=False to speed up simulation, when only statuses are needed.\n num=0\n try:\n for i in range(len(actions)):\n action=actions[i]\n simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True,stride=1)\n if(simulation.status==0):continue\n img_ac=os.imgpath.join(imgpath,'act{}'.format(num))\n num+=1\n os.mkdir(img_ac) \n for j,image in enumerate(simulation.images):\n img = phyre.observations_to_float_rgb(image)\n plt.imsave('{}/img{}.jpg'.format(img_ac,j),img)\n #plt.savefig('{}/act{}_{}.jpg'.format(imgpath,action,j))\n #if(simulation.status!=0): break\n # May call is_* methods on the status to check the status.\n except(TypeError):\n pdb.set_trace()\ndef datasets(tasks,simulator):\n for task_index in range(len(tasks)):\n task_id = simulator.task_ids[task_index]\n task_id=list(task_id)\n for i in range(len(task_id)):\n if task_id[i] == ':':\n task_id[i] = '.'\n task_id=''.join(task_id)\n \n\n imgpath=os.path.join('datasets_img',task_id)\n os.makedirs(imgpath)\n vecpath=os.path.join('datasets_vec',task_id)\n os.makedirs(vecpath)\n gifpath=os.path.join('datasets_gif',task_id)\n os.makedirs(gifpath)\n actions = simulator.build_discrete_action_space(max_actions=1000)\n num=0\n imgs=[]\n try:\n for i in range(len(actions)):\n imgs=[]\n action=actions[i]\n simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True,stride=1)\n if(simulation.status==0):continue\n img_ac=os.path.join(imgpath,'act{}'.format(num))\n vec_ac=os.path.join(vecpath,'act{}'.format(num))\n gif_ac=os.path.join(gifpath,'act{}'.format(num))\n num+=1\n os.mkdir(img_ac) \n os.mkdir(vec_ac)\n os.mkdir(gif_ac)\n fo=simulation.featurized_objects\n #phyre.save_observation_series_to_gif(simulation.images,'{}/gif{}.gif'.format(gif_ac,i))\n for j,image in enumerate(simulation.images):\n img = phyre.observations_to_float_rgb(image)\n plt.imsave('{}/img{}.png'.format(img_ac,j),img)\n imgs.append(imageio.imread('{}/img{}.png'.format(img_ac,j)))\n \n filename = '{}/vec{}.csv'.format(vec_ac,j)\n with open (filename,'w') as file_object:\n writer=csv.DictWriter(file_object,['x','y','angle','diameter','shape','color'])\n writer.writeheader()\n for k in range(fo.num_objects):\n \n writer.writerow({'x':fo.states[j][k][0],'y':fo.states[j][k][1],'angle':fo.states[j][k][2],'diameter':fo.diameters[k],'shape':fo.shapes[k],'color':fo.colors[k]})\n imageio.mimsave('{}/gif{}.gif'.format(gif_ac,i), imgs, 'GIF', duration = 0.1) \n #plt.savefig('{}/act{}_{}.jpg'.format(imgpath,action,j))\n #if(simulation.status!=0): break\n # May call is_* methods on the status to; check the status.\n except(TypeError):\n pdb.set_trace()\n\n\n # filename = 'featurized_objects.csv'\n # with open (filename,'w') as file_object:\n # writer=csv.writer(file_object)\n # writer.writerow([simulation.featurized_objects.num_objects])\n # writer.writerow([simulation.featurized_objects.num_scene_objects])\n # writer.writerow([simulation.featurized_objects.num_user_inputs])\n # writer.writerow([simulation.featurized_objects.colors])\n # writer.writerow([simulation.featurized_objects.diameters])\n # writer.writerow([simulation.featurized_objects.states[0]])\n # for i in range((simulation.featurized_objects.features).shape[0]):\n # for j in range((simulation.featurized_objects.features).shape[1]):\n # writer.writerow(simulation.featurized_objects.features[i][j])\n # print('Number of observations returned by simulator:', len(simulation.images))\n # #print(len(simulation.featurized_objects))\n # print(simulation.featurized_objects.shapes)\n # print(simulation.featurized_objects.diameters)\n # print(simulation.featurized_objects.states.shape)\n # print(simulation.featurized_objects.states.shape[0])\n # pdb.set_trace()\n # num_across = 5\n # height = int(math.ceil(len(simulation.images) / num_across))\n # fig, axs = plt.subplots(height, num_across, figsize=(20, 15))\n # fig.tight_layout()\n # plt.subplots_adjust(hspace=0.2, wspace=0.2)\ndef mk_gif(simulation):\n fig=plt.figure()\n camera=Camera(fig)\n for i,image in enumerate(simulation.images):\n img = phyre.observations_to_float_rgb(image)\n plt.imshow(img)\n camera.snap()\n\n animation = camera.animate()\n animation.save('pic/try{}.gif'.format(task_index),writer='pillow')\n # We can visualize the simulation at each timestep.\n # for i, (ax, image) in enumerate(zip(axs.flatten(), simulation.images)):\n # # Convert the simulation observation to images.\n # img = phyre.observations_to_float_rgb(image)\n # ax.imshow(img)\n # ax.title.set_text(f'Timestep {i}')\n # ax.get_xaxis().set_ticks([])\n # ax.get_yaxis().set_ticks([])\n # ax.figure.savefig('{}.png'.format(i))\n\nrandom.seed(0)\neval_setup = 'ball_cross_template'\n# fold_id = 0 # For simplicity, we will just use one fold for evaluation.\n# train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, fold_id)\n# print(*dev_tasks, sep=', ')\n# pdb.set_trace()\naction_tier = phyre.eval_setup_to_action_tier(eval_setup)\n# print('Action tier for', eval_setup, 'is', action_tier)\n# tasks = dev_tasks[:10]\n#tasks=['00123:097','00013:020','00016:194','00021:024','00111:023','00112:007']\ntasks=['00000:000']\n\n# Create the simulator from the tasks and tier.\nsimulator = phyre.initialize_simulator(tasks, action_tier)\ntask_index = 0 # Note, this is a integer index of task within simulator.task_ids.\ndatasets(tasks,simulator)","sub_path":"try_phyre.py","file_name":"try_phyre.py","file_ext":"py","file_size_in_byte":9363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"649337682","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n__author__ = 'IT小叮当'\n__time__ = '2021-03-14 16:11'\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\n\n#支持中文显示\nfrom pylab import *\nmpl.rcParams['font.sans-serif'] = ['SimHei']\n\ndf1 = pd.read_excel(\"G:\\\\AdaSGD-D\\\\resnet18_cmopare_cifar10\\\\resnet18_cifar10_adam.xlsx\")\ndf2 = pd.read_excel(\"G:\\\\AdaSGD-D\\\\resnet18_cmopare_cifar10\\\\resnet18_cifar10_sgd.xlsx\")\ndf3 = pd.read_excel(\"G:\\\\AdaSGD-D\\\\resnet18_cmopare_cifar10\\\\resnet18_cifar10_pid.xlsx\")\n\ndef plt_curve(objective):\n\n plt.plot(df1[objective],label='Adam',linewidth=1.5,c='b',ls='-')\n plt.plot(df2[objective],label='SGD-M',linewidth=1.5,c='r',ls='-.')\n plt.plot(df3[objective],label='AdaSGD-D',linewidth=1.5,c='g',ls='--')\n\n # plt.plot(df[\"blue1\"],df[\"blue2\"],label='较小学习率',linewidth=3,color='b',ls='-.')\n # plt.plot(df[\"red1\"],df[\"red2\"],label='适当学习率',linewidth=3,color='r')\n # plt.plot(df[\"black1\"],df[\"black2\"],label='较大学习率',linewidth=3,color='k',ls=':')\n #plt.xlabel(\"Epoch\")\n plt.xlabel(\"轮数\")\n if objective == \"Train Loss\":\n yname = \"训练损失\"\n plt.ylabel(yname)\n if objective == \"Valid Loss\":\n yname = \"验证损失\"\n plt.ylabel(yname)\n if objective == \"Train Acc\":\n yname = \"训练准确度\"\n plt.ylabel(yname + \"(%)\")\n if objective == \"Valid Acc\":\n yname = \"验证准确度\"\n plt.ylabel(yname + \"(%)\")\n #plt.ylabel(objective)\n #plt.xlim(0, 20)\n plt.xticks(range(0,101,5))\n plt.legend()\n # plt.grid()\n #plt.show(bbox_inches='tight')\n plt.savefig('G:\\\\AdaSGD-D\\\\compare_cifar10\\\\resnet18\\\\'+ objective+'.jpg',dpi=300,bbox_inches='tight')\n clf()\n time.sleep(3)\n print('cifar10'+objective+'绘制完成!')\n\nplt_curve('Train Loss')\n\nplt_curve('Valid Loss')\n\nplt_curve('Train Acc')\n\nplt_curve('Valid Acc')\n\n","sub_path":"plt_resnet18_compare_cifar10.py","file_name":"plt_resnet18_compare_cifar10.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"242720695","text":"\"\"\"Python program to chaeck nature of roots of a quadratic equation \"\"\"\nimport math\na = int(input(\"Enter the value of a in quadratic equation : \"))\nb = int(input(\"Enter the value of b in quadratic equation : \"))\nc = int(input(\"Enter the value of c in quadratic equation : \"))\ndiscriminant = (b**2)-(4*a*c)\nif(discriminant > 0):\n root1 = (-b + math.sqrt(discriminant)/ (2*a))\n root2 = (-b - math.sqrt(discriminant)/ (2*a))\n print(\"Two Discriminant Real ROOts Exits : root1 = %.2f and root2 = %.2f \" %(root1,root2))\nelif (discriminant==0):\n root1 = root2 = -b / (2*a)\n print(\"Two Discriminant Real ROOts Exits : root1 = %.2f and root2 = %.2f \" %(root1,root2))\nelif (discriminant<0):\n root1= root2 = -b / (2*a)\n imaginary = math.sqrt(-discriminant) / (2*a)\n print(\"Two Discriminant Real ROOts Exits : root1 = %.2f + %.2f and root2 = %.2f - %.2f \" %(root1,imaginary, root2, imaginary))\n","sub_path":"nature_of_roots_of_quadratic_equation.py","file_name":"nature_of_roots_of_quadratic_equation.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"215215521","text":"#!/usr/bin/env python\n\nimport os\n\n\ndef switchmode(var):\n if var in os.environ and os.environ.get(var).upper() == 'DEV':\n print('Running Development Server', flush=True)\n os.system('python identidock.py')\n else:\n print('Runnig Production Server', flush=True)\n os.system('uwsgi --http 0.0.0.0:9090 --wsgi-file /app/identidock.py --callable app --stats 0.0.0.0:9191')\n\n\nif __name__ == '__main__':\n switchmode('ENV')\n","sub_path":"cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"599800758","text":"import os\nfrom datetime import datetime, timezone\nfrom io import StringIO\n\nfrom django.core.management import call_command\nfrom django.test import TestCase\nfrom lxml import etree\n\nfrom rr.models.attribute import Attribute\nfrom rr.models.contact import Contact\nfrom rr.models.serviceprovider import ServiceProvider, SPAttribute\nfrom rr.models.usergroup import UserGroup\nfrom rr.utils.ldap_metadata_generator import ldap_metadata_generator_list\n\nTESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), \"../testdata/ldap_metadata.xml\")\n\n\nclass LdapMetadataTestCase(TestCase):\n def setUp(self):\n validation_time = datetime.strptime(\"20190102 12:01:02\", \"%Y%m%d %H:%M:%S\").astimezone(timezone.utc)\n self.sp = ServiceProvider.objects.create(\n entity_id=\"ldaptestservice\",\n service_type=\"ldap\",\n production=True,\n target_group=\"restricted\",\n service_account=True,\n service_account_contact=\"service.user@example.org +358501234567\",\n server_names=\"ldaptest.example.org\\nldaptest-2.example.org\",\n validated=validation_time,\n )\n attr_cn = Attribute.objects.create(\n friendlyname=\"cn\",\n name=\"urn:oid:cn\",\n attributeid=\"id-cn\",\n nameformat=\"urn:uri\",\n public_ldap=True,\n group=\"name\",\n )\n attr_mail = Attribute.objects.create(\n friendlyname=\"mail\", name=\"urn:oid:mail\", attributeid=\"id-mail\", nameformat=\"urn:uri\", public_ldap=True\n )\n UserGroup.objects.create(sp=self.sp, name=\"grp-gamma\", validated=validation_time)\n Contact.objects.create(sp=self.sp, email=\"contact@example.org\", type=\"support\")\n SPAttribute.objects.create(sp=self.sp, attribute=attr_cn, validated=validation_time)\n SPAttribute.objects.create(sp=self.sp, attribute=attr_mail, validated=validation_time)\n self.test_metadata = open(TESTDATA_FILENAME).read()\n self.maxDiff = None\n\n def test_ldap_metadata_generation(self):\n metadata_tree = ldap_metadata_generator_list(validated=True, production=True, include=None)\n metadata = etree.tostring(metadata_tree, pretty_print=True, encoding=\"UTF-8\")\n self.assertEqual(metadata.decode(\"utf-8\"), self.test_metadata)\n\n def test_exportldap_management_command(self):\n out = StringIO()\n call_command(\"exportldap\", \"-p\", stdout=out)\n self.assertEqual(out.getvalue(), '\\n' + self.test_metadata)\n","sub_path":"rr/tests/test_ldap_metadata.py","file_name":"test_ldap_metadata.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"587094417","text":"# -*- coding: utf8 -*-\n\nfrom unittest import TestCase\nimport operator\n\nopmap = {\n '+': operator.add, 'add': operator.add,\n '-': operator.sub, 'sub': operator.sub,\n '*': operator.mul, 'mul': operator.mul,\n '/': operator.truediv, 'div': operator.truediv,\n}\n\n\nclass aUnittest(object):\n test_type = None\n commutes = ['add', 'mul']\n str_equality = False\n equal_alternatives = True\n\n def names(self):\n return {}\n\n def bin_examples(self, name,\n commutes=False, alternatives=False, scalar=False):\n '''Retorna um iterador sobre os exemplos (a, b, c) onde\n bin(a, b) == c, para um operador binário fornecido'''\n\n # Extrai todos os resultados registrados\n prefix = '%s_' % name\n names = self.names()\n res = {k[4:]: v for (k, v) in names.items() if k.startswith(prefix)}\n\n # Itera sobre os resultados para selecionar os operandos\n obj_tt = self.test_type\n for k, res in res.items():\n a, b = [names[c] for c in k]\n\n # Caso scalar esteja ligado, só utiliza os resultados em que um\n # dos membros do par não seja de obj_tt\n if scalar:\n if isinstance(a, obj_tt) and isinstance(b, obj_tt):\n continue\n else:\n if ((not alternatives)\n and ((not isinstance(a, obj_tt)) or\n (not isinstance(b, obj_tt)))):\n continue\n\n if alternatives:\n name_a, name_b = k\n\n # Retorna todas permutações com a\n a_alts = [v for (k, v) in names.items()\n if k.startswith(name_a + '_')]\n for a_alt in a_alts:\n if isinstance(b, obj_tt) or isinstance(a_alt, obj_tt):\n if commutes:\n yield (b, a_alt, res)\n yield (a_alt, b, res)\n\n # Retorna todas permutações com b\n b_alts = [v for (k, v) in names.items()\n if k.startswith(name_b + '_')]\n for b_alt in b_alts:\n if isinstance(a, obj_tt) or isinstance(b_alt, obj_tt):\n if commutes:\n yield (b_alt, a, res)\n yield (a, b_alt, res)\n\n else:\n # Retorna a comutação\n if commutes:\n yield (b, a, res)\n yield (a, b, res)\n\n def bin_assert(self, op, a, b, res):\n value = opmap[op](a, b)\n msg = '%s + %s != %s, got %s' % (a, b, res, value)\n assert self.equals(value, res), msg\n\n def bin_worker(self, op, **kwds):\n commutes = op in self.commutes\n for a, b, res in self.bin_examples(op, commutes=commutes):\n self.bin_assert(op, a, b, res)\n\n def equals(self, a, b):\n if a == b:\n return True\n elif hasattr(a, 'almost_equal'):\n if a.almost_equal(b):\n return True\n elif hasattr(b, 'almost_equal'):\n if b.almost_equal(a):\n return True\n elif self.str_equality and str(a) == str(b):\n return True\n else:\n return False\n\n # Operações binárias de tipos iguais ######################################\n def test_add(self):\n self.bin_worker('add')\n\n def test_sub(self):\n self.bin_worker('sub')\n\n def test_mul(self):\n self.bin_worker('mul')\n\n def test_div(self):\n self.bin_worker('div')\n\n # Operações binárias com tipos escalares ##################################\n def test_add_scalar(self):\n self.bin_worker('add', scalar=True)\n\n def test_sub_scalar(self):\n self.bin_worker('sub', scalar=True)\n\n def test_mul_scalar(self):\n self.bin_worker('mul', scalar=True)\n\n def test_div_scalar(self):\n self.bin_worker('div', scalar=True)\n\n # Operações binárias de tipos alternativos ################################\n def test_add_alts(self):\n self.bin_worker('add', alternatives=True)\n\n def test_sub_alts(self):\n self.bin_worker('add', alternatives=True)\n\n def test_mul_alts(self):\n self.bin_worker('add', alternatives=True)\n\n def test_div_alts(self):\n self.bin_worker('add', alternatives=True)\n\n # Testa igualdade com alternativas ########################################\n def test_equal_alternatives(self):\n if self.equal_alternatives:\n names = self.names()\n objs = [(k, v) for (k, v) in names.items() if len(k) == 1 and\n isinstance(v, self.test_type)]\n for name, obj in objs:\n prefix = name + '_'\n alts = [v for (k, v) in names.items() if k.startswith(prefix)]\n for alt in alts:\n msg = '%s != %s' % (obj, alt)\n assert obj == alt, msg\n\nif __name__ == '__main__':\n from unittest import TestCase\n from FGAme import Vec2\n\n class VectorTest(aUnittest, TestCase):\n test_type = Vec2\n\n def names(self):\n a = Vec2(1, 2)\n b = Vec2(3, 4)\n c = Vec2(5, 6)\n a_tuple = (1, 2)\n a_list = [1, 2]\n m = 2\n\n add_ab = (4, 6)\n sub_ab = (-2, -2)\n sub_ba = (2, 2)\n\n mul_ma = (2, 4)\n div_am = (0.5, 1)\n\n return locals()\n\n t = VectorTest()\n t.test_mul_alts()\n\n import nose\n nose.runmodule('__main__')\n","sub_path":"src/mathtools/base/unittests.py","file_name":"unittests.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"161089637","text":"try:\n archivo = open('CreacionArchivo.txt' ,'w', encoding='utf8')\n archivo.write('aqui estamos agregando informacion\\n')\n archivo.write('Saliendo')\n\nexcept Exception as e:\n print(e)\nfinally:\n archivo.close()\n print('fin del archivo')\n","sub_path":"CreacionArchivos.py","file_name":"CreacionArchivos.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"643121459","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 14 13:40:23 2019\n\n@author: yongjie.su\n\"\"\"\n\n\nimport os\nfrom collections import defaultdict\n\n\nclass UAESUtils:\n \n def __init__(self):\n \n pass\n \n # 读取A2L 返回行号和Line的字典\n @staticmethod\n def readLineDictFromA2L(a2lPath):\n \n lineDict = defaultdict(str)\n with open(a2lPath, 'r', encoding=\"ISO-8859-1\") as fileObj:\n num = 1\n line = fileObj.readline()\n while line:\n lineDict[num] = line.rstrip('\\n')\n try:\n line = fileObj.readline()\n except:\n # logSP.addLog(\"Warning: row number %d can not prase\"%num)\n print(\"Warning: row number %d can not prase in %s\"%(num, a2lPath))\n line = \"CANT NOT PRASE\"\n num += 1\n \n return lineDict\n \n @staticmethod\n def outputDict(outputPath, outputDict, append=False):\n \n if not append:\n if os.path.exists(outputPath):\n # 简单粗暴的做法 存在就删除再创建\n os.remove(outputPath)\n \n with open(outputPath, 'a+', encoding=\"ISO-8859-1\") as fileObj:\n \n for rowNumber, line in outputDict.items():\n fileObj.write(line + \"\\n\")\n \n @staticmethod\n def outputListDict(outputPath, outputLD, append=False):\n \n if not append:\n if os.path.exists(outputPath):\n os.remove(outputPath)\n \n with open(outputPath, \"a+\", encoding=\"ISO-8859-1\") as fileObj:\n for i in range(len(outputLD)):\n for key, line in outputLD[i].items():\n fileObj.write(line + \"\\n\")\n \n @staticmethod\n def outputSingleLine(outputPath, outputCont, append=True):\n \n if not append:\n if os.path.exists(outputPath):\n # 简单粗暴的做法 存在就删除再创建\n os.remove(outputPath)\n \n with open(outputPath, 'a+', encoding=\"utf-8\") as fileObj:\n fileObj.write(outputCont + \"\\n\")\n \n @staticmethod\n def outputLines(outputPath, outputContList, append=False):\n \n if not append:\n if os.path.exists(outputPath):\n # 简单粗暴的做法 存在就删除再创建\n os.remove(outputPath)\n \n with open(outputPath, 'a+', encoding=\"utf-8\") as fileObj:\n for i in range(len(outputContList)):\n fileObj.write(outputContList[i] + \"\\n\")\n\n # 更新数据 \n @staticmethod \n def updateDF(df, compareCol, compareValue, modifyCol, modifyValue):\n \n index = df[df[compareCol] == compareValue].index[0]\n df.loc[index][modifyCol] = modifyValue\n \n # 插入一条数据\n @staticmethod\n def insertDF(df, index, valueList):\n \n df.loc[index] = valueList\n\n\n#UAESUtils.readLineDictFromA2L(\"E:\\\\06.pyprogram\\\\04.Marelli\\\\Origin_A2L\\\\UAES.a2l\")","sub_path":"A2LConvertSWSH/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"225092693","text":"import re, sys, os\nimport pandas as pd \nclass data_filter():\n def __init__(self, ctx): \n self.ctx=ctx \n self.lst=[] \n def apply(self, df):\n if 'data_filters' not in self.ctx.config.keys(): return df\n self.lst=df.to_dict('records')\n lst=[]\n for i,rec in enumerate(self.lst):\n rec=self._get_filter(rec) \n lst.append(rec)\n return pd.DataFrame(lst)\n def _get_filter(self, d): \n data_filters=self.ctx.config['data_filters'] \n for j,e in enumerate(data_filters):\n field=e['field'] \n if 'remove' in e.keys(): \n d[field]=re.sub(e['remove'],'',d[field])\n return d \n","sub_path":"CodeGenerators/lib/data_filter.py","file_name":"data_filter.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"152371261","text":"# Use a HAWKS and sklearn example, and show the performance (boxplot) for HAWKS, moons, and blobs\nfrom pathlib import Path\nfrom sklearn.datasets import make_blobs, make_moons\nimport hawks\n\nSEED_NUM = 42\nSAVE_FOLDER = Path.cwd()\nNUM_RUNS = 5\nNUM_CLUSTERS = 5\n\ngenerator = hawks.create_generator({\n \"hawks\": {\n \"seed_num\": SEED_NUM,\n \"num_runs\": NUM_RUNS\n },\n \"dataset\": {\n \"num_clusters\": NUM_CLUSTERS\n }\n})\ngenerator.run()\n# Analyse the hawks datasets\ndf, _ = hawks.analysis.analyse_datasets(\n generator=generator,\n source=\"HAWKS\",\n seed=SEED_NUM,\n save=False\n)\n# Make the blobs datasets\ndatasets = []\nlabel_sets = []\nfor run in range(NUM_RUNS):\n data, labels = make_blobs(\n n_samples=1000,\n n_features=2,\n centers=NUM_CLUSTERS,\n random_state=SEED_NUM+run\n )\n datasets.append(data)\n label_sets.append(labels)\n# Analyse the blobs datasets\ndf, _ = hawks.analysis.analyse_datasets(\n datasets=datasets,\n label_sets=label_sets,\n source=\"SK-Blobs\",\n seed=SEED_NUM,\n save=False,\n prev_df=df\n)\n# Make the moons datasets\ndatasets = []\nlabel_sets = []\nfor run in range(NUM_RUNS):\n data, labels = make_moons(\n n_samples=1000,\n noise=2,\n random_state=SEED_NUM+run\n )\n datasets.append(data)\n label_sets.append(labels)\n# Analyse the moons datasets\ndf, _ = hawks.analysis.analyse_datasets(\n datasets=datasets,\n label_sets=label_sets,\n source=\"SK-Moons\",\n seed=SEED_NUM,\n save=False,\n prev_df=df\n)\n# Get the clustering algorithms into one column\ndf = df.melt(\n id_vars=[col for col in df if not col.startswith(\"c_\")],\n value_vars=[col for col in df if col.startswith(\"c_\")],\n var_name=\"Algorithm\",\n value_name=\"ARI\"\n)\n# Remove the c_ prefix to algorithm names\ndf['Algorithm'] = df['Algorithm'].map(lambda x: str(x)[2:])\n# Make the boxplot\nhawks.plotting.create_boxplot(\n df=df,\n x=\"source\",\n y=\"ARI\",\n hue=\"Algorithm\",\n show=True,\n fpath=SAVE_FOLDER / \"clustering_performance\",\n xlabel=\"Source\"\n)","sub_path":"examples/clustering_example.py","file_name":"clustering_example.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"410862542","text":"\"\"\"\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\n\nimport geodesic, predict, residual, compare\nreload(geodesic)\nreload(predict)\nreload(residual)\nreload(compare)\n\nfrom model.rxnnet.examples.MAI2 import net as net_mai\nfrom model.rxnnet.examples.MAR2 import net as net_mar\nfrom model.rxnnet.examples.MMI2 import net as net_mmi\nfrom model.rxnnet.examples.MMR2 import net as net_mmr\nfrom model.rxnnet import experiments\nreload(experiments)\n\n\n# make data expts\ndexpts_mai = experiments.Experiments()\ndexpts_mai.add(((), 'S', np.inf))\n#dexpts_mai.add((('kf_R1',2), 'S', np.inf))\n#dexpts_mai.add((('kf_R2',2), 'S', np.inf))\ndexpts_mai.add(((), 'J_R1', np.inf))\n\ndexpts_mar = experiments.Experiments()\ndexpts_mar.add(((), 'S', np.inf))\ndexpts_mar.add((('k_R1',[0.25,0.5,2,4]), 'S', np.inf))\ndexpts_mar.add((('k_R2',[0.25,0.5,2,4]), 'S', np.inf))\ndexpts_mar.add(((), 'J_R1', np.inf))\ndexpts_mar.add((('k_R1',[0.25,0.5,2,4]), 'J_R1', np.inf))\ndexpts_mar.add((('k_R2',[0.25,0.5,2,4]), 'J_R1', np.inf))\n\ndexpts_mmi = experiments.Experiments()\ndexpts_mmi.add(((), 'S', np.inf))\ndexpts_mmi.add((('Vf_R1',2), 'S', np.inf))\ndexpts_mmi.add((('Vf_R2',2), 'S', np.inf))\ndexpts_mmi.add(((), 'J_R1', np.inf))\n\ndexpts_mmr = experiments.Experiments()\ndexpts_mmr.add(((), 'S', np.inf))\ndexpts_mmr.add((('V_R1',[0.25,0.5,2,4]), 'S', np.inf))\ndexpts_mmr.add((('V_R2',[0.25,0.5,2,4]), 'S', np.inf))\ndexpts_mmr.add(((), 'J_R1', np.inf))\ndexpts_mmr.add((('V_R1',[0.25,0.5,2,4]), 'J_R1', np.inf))\ndexpts_mmr.add((('V_R2',[0.25,0.5,2,4]), 'J_R1', np.inf))\n\n# make prediction expts\npexpts = experiments.Experiments()\npexpts.add(((), ('J_R1','v_R1'), np.inf))\npexpts.add(((), ('J_R1','v_R2'), np.inf))\npexpts.add(((), ('v_R1','S'), np.inf))\npexpts.add(((), ('v_R2','S'), np.inf))\n\n\ncmpn12 = compare.Comparison(net_mai, net_mar, dexpts=dexpts_mai, dexpts2=dexpts_mar, pexpts=pexpts)\ncmpn21 = compare.Comparison(net_mar, net_mai, dexpts=dexpts_mar, dexpts2=dexpts_mai, pexpts=pexpts)\ncmpn24 = compare.Comparison(net_mar, net_mmr, dexpts=dexpts_mar, dexpts2=dexpts_mmr, pexpts=pexpts)\na\np1 = [1,2]\ncost2, p2, z, z2 = cmpn12.cmp_prediction(p=p1, ens=False, p0=[1,1], in_logp=True, disp=0)\nenergies2, ps2, z, zs2 = cmpn12.cmp_prediction(p=p1, ens=True, in_logp=True,\n scheme='sigma', sigma0=0.1, \n p0=[1,1], cutoff_singval=1e-3,\n disp=0, nstep=1000, seed=1, \n interval_print_step=100)\n\n\n\n\n","sub_path":"tmp/test_compare.py","file_name":"test_compare.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"172399929","text":"import os \nimport time\nimport copy\nimport h5py\nimport numpy as np \n# from capy import *\nfrom utility import *\nfrom operators import *\nfrom qutip import Bloch\nfrom time import gmtime\nfrom IPython import embed\nimport scipy.linalg as sla\nimport matplotlib.pyplot as plt\n\n\n# define location of archive file\narchivepath = \"C:\\\\Users\\\\Joshua\\\\Documents\\\\Projects\\\\Code\\\\Python\\\\Modules\\\\qChain\\\\archive.h5\"\n\n# Add Latex physics package to preamble for matplotlib\nplt.rc('text', usetex=True)\nplt.rc('text.latex', preamble=r'\\usepackage{amsmath} \\usepackage{physics} \\usepackage{amssymb}')\n\n\nclass SpinSystem(object):\n \"\"\"\n class that defines a spin-[1/2, 1] system, keeping track of its state and evolution\n \"\"\"\n\n def __init__(self, spin: str=\"half\", init=None):\n # spin of system to be defined\n self.spin = spin\n # initial state of each particle [zero, one, super]\n self.init = init\n \n\n # initialise the system \n self.initialise()\n\n def initialise(self):\n \"\"\"\n initialises the spin system\n \"\"\"\n if self.spin == \"half\":\n # dimension of system\n self.dim = 2\n # initial state\n self.state = op1[\"pz\"]\n elif self.spin ==\"one\":\n # dimension of system\n self.dim = 3\n # initial state\n self.state = op2[\"po\"]\n\n if self.init is not None: \n if type(self.init) is str:\n if self.init is \"super\":\n self.state = op1[\"h\"] @ self.state\n elif self.init is \"isuper\":\n self.state = op1[\"s\"] @ op1[\"h\"] @ self.state\n elif self.init is \"isuperT\":\n self.state = op1[\"sdg\"] @ op1[\"h\"] @ self.state\n elif self.init is \"zero\":\n self.state = op1[\"pz\"]\n elif self.init is \"one\":\n self.state = op1[\"po\"]\n else:\n raise ValueError(\"Unrecognised initial state: {}\".format(self.init))\n else:\n # custom start state\n self.state = np.matrix(self.init)\n \n def evolve(self, unitary, save=False):\n \"\"\"\n evolve the spin system \n \"\"\"\n \n # evolve state \n if save:\n self.state = unitary @ self.state\n return self.state\n else:\n return unitary @ self.state\n\n def measure(self, state=None, project=np.asarray([[1,0]])):\n \"\"\"\n perform projective measurement in computational basis\n \"\"\"\n if state is None:\n state = self.state\n\n return np.abs(np.dot(project, state))**2\n\n def state_evolve(self, hamiltonian, t=[0,1,1e-3], cache=True, bloch=[False, 10], **kwargs):\n \"\"\"\n computes the probability of finding the system in the projection state over the given range\n \"\"\"\n if len(t)<3: t.append(1e-3)\n # time array\n time = np.arange(t[0], t[1], t[2]) \n self.time = time\n # preallocate probability array\n probs = np.zeros((len(time)), dtype=np.float64)\n # create list for snap shot states to plot\n if bloch:\n bloch_points = []\n\n # compute unitary given hamiltonian for 0 -> t_delta\n if callable(hamiltonian):\n # hamiltonian is time dependent\n flag = True\n else:\n flag = False \n unitary = sla.expm(-1j*hamiltonian*t[2]/hbar)\n \n if cache and flag:\n # create hamiltonian cache to reduce repeated call overhead\n h_cache = hamiltonian(time)\n u_cache = [myexpm(-1j*h_cache[:,:,i]*t[2]/hbar) for i in range(len(time)) if i!=0]\n state_cache = np.zeros((2,1,len(time)), dtype=np.complex128)\n\n # add initial state to caches\n state_cache[:,:,0] = self.state\n probs[0] = self.measure(state_cache[:,:,0], **kwargs)\n\n # iterate through cached hamiltonians\n for j,tstep in enumerate(time):\n if j!=0:\n # compute measurement probablity of projector\n state_cache[:,:,j] = self.evolve(u_cache[j-1], save=True)\n probs[j] = self.measure(state=state_cache[:,:,j], **kwargs)\n\n # add state to bloch plot\n if bloch[0] and j % bloch[1] == 0:\n bloch_points.append(self.get_bloch_vec(np.outer(self.state.H, self.state)))\n \n # save caches\n self.h_cache = h_cache\n self.u_cache = u_cache\n self.state_cache = state_cache\n\n else:\n # compute unitary in a piecewise fashion (this would be really easy to parellise but I probably shouldn't)\n for i,tstep in enumerate(time):\n print(tstep)\n if flag:\n unitary = expm_eig(-1j*hamiltonian(tstep)/hbar, t[2])\n \n # compute measurement probablity of projector\n probs[i] = self.measure(state=self.evolve(unitary, save=True), **kwargs)\n # add state to bloch plot\n if bloch[0] and i % bloch[1] == 0:\n bloch_points.append(self.get_bloch_vec(np.outer(self.state.H, self.state)))\n \n \n # plot evolution on the Bloch sphere\n if bloch:\n # convert to qutips annoying format\n # TODO: preallocate\n x,y,z = [],[],[]\n for vec in bloch_points:\n x.append(vec[0])\n y.append(vec[1])\n z.append(vec[2])\n\n bloch_points = [x,y,z]\n return time, probs, bloch_points\n else:\n return time, probs\n\n def frame_transform(self, cstate=None, frame=[\"interaction\", \"lab\"], project=meas1[\"0\"], bloch=[False, 10]):\n \"\"\"\n EXPERIMENTAL\n Transform a state or set of states to a specified reference frame from another. This method\n is still in the experimental phase. It works well for going from simpler reference frames to\n complicated oned but the reverse is prone to numerical instability. \n \"\"\"\n if cstate is None:\n cstate = np.copy(self.state_cache)\n\n # compute reference frame map\n if callable(frame):\n unitary_map = frame\n # determine transition case and define appropriate time dependent unitary operator\n elif frame[0] == \"lab\":\n if frame[1] == \"interaction\":\n def unitary_map(t, larmor=gyro): \n return np.asarray([[np.exp(1j*np.pi*larmor*t), 0], [0, np.exp(-1j*np.pi*larmor*t)]])\n elif frame == \"dressed\":\n # define dressed state transform\n def dressed(t, omega=1e4, detuning=0): return np.asarray([[np.cos(np.arctan(\n omega/detuning)/2), -np.sin(np.arctan(omega/detuning)/2)], [np.sin(np.arctan(omega/detuning)/2), np.cos(np.arctan(omega/detuning)/2)]])\n\n def unitary_map(t, larmor=larmor): return dressed(t) @ np.asarray([[np.exp(1j*np.pi*larmor*t), 0], [0, np.exp(-1j*np.pi*larmor*t)]])\n else:\n raise ValueError(\"Unrecognised output reference frame\")\n\n elif frame[0] == \"interaction\":\n if frame[1] == \"lab\":\n def unitary_map(t, larmor=gyro): \n return np.asarray([[np.exp(-1j*np.pi*larmor*t), 0], [0, np.exp(1j*np.pi*larmor*t)]])\n elif frame == \"dressed\":\n # define dressed state transform\n def unitary_map(t, omega=1e4, detuning=0): return np.asarray([[np.cos(np.arctan(\n omega/detuning)/2), np.sin(np.arctan(omega/detuning)/2)], [-np.sin(np.arctan(omega/detuning)/2), np.cos(np.arctan(omega/detuning)/2)]])\n else:\n raise ValueError(\"Unrecognised output reference frame\")\n\n elif frame[0] == \"dressed\":\n if frame[1] == \"interaction\":\n def unitary_map(t, larmor=gyro): \n return np.asarray([[np.cos(np.arctan(omega/detuning)/2), -np.sin(np.arctan(omega/detuning)/2)], [np.sin(np.arctan(omega/detuning)/2), np.cos(np.arctan(omega/detuning)/2)]])\n elif frame == \"lab\":\n # define dressed state transform\n def dressed(t, omega=1e4, detuning=0): return np.asarray([[np.cos(np.arctan(\n omega/detuning)/2), -np.sin(np.arctan(omega/detuning)/2)], [np.sin(np.arctan(omega/detuning)/2), np.cos(np.arctan(omega/detuning)/2)]])\n\n def unitary_map(t, larmor=larmor): return dressed(t) @ np.asarray([[np.exp(1j*np.pi*larmor*t), 0], [0, np.exp(-1j*np.pi*larmor*t)]])\n else:\n raise ValueError(\"Unrecognised output reference frame\")\n\n else:\n raise ValueError(\"Unrecognised input reference frame\")\n\n # apply transform to states\n if len(np.shape(cstate))==3:\n new_states = [unitary_map(t) @ cstate[:,:,step] for step,t in enumerate(self.time)]\n nprobs = np.squeeze([np.abs(project @ nstate)**2 for i,nstate in enumerate(new_states)])\n # save new states and projection probabilities\n self.state_cache = new_states\n self.probs = nprobs\n else:\n new_states = unitary_map(self.time[-1]) @ cstate\n return new_states\n\n\n\n\n def bloch_plot(self, filename, points=None, save=False):\n \"\"\"\n Plot the current state on the Bloch sphere using\n qutip. \n \"\"\"\n if points is None:\n # convert current state into density operator\n rho = np.outer(self.state.H, self.state)\n # get Bloch vector representation\n points = self.get_bloch_vec(rho)\n # Can only plot systems of dimension 2 at this time\n assert len(points) == 3, \"System dimension must be spin 1/2 for Bloch sphere plot\"\n \n # create instance of 3d plot\n bloch = Bloch(fig=1, figsize=[9,9], view=[190,10])\n # add state\n bloch.add_points(points)\n bloch.render()\n\n if save is True:\n print('Bloch plot saved to Sim Results folder')\n path = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/' + str(filename) + '.png'\n bloch.fig.savefig(path, dpi=800, transparent=True)\n\n def bloch_plot2(self, filename, save=False, vecList = [], vecColour = [], view = [190,10], points=None, folder = False, fig = False, ax = False):\n \"\"\"\n Plot the current state on the Bloch sphere using\n qutip. \n \"\"\"\n if points is None:\n # convert current state into density operator\n rho = np.outer(self.state.H, self.state)\n # get Bloch vector representation\n points = self.get_bloch_vec(rho)\n # Can only plot systems of dimension 2 at this time\n assert len(points) == 3, \"System dimension must be spin 1/2 for Bloch sphere plot\"\n \n # create instance of 3d plot\n if not fig or not ax:\n bloch = Bloch(figsize=[9,9], view=view)\n else:\n bloch = Bloch(fig=fig, axes = ax, view=view)\n # add state\n bloch.add_points(points)\n # bloch.zlabel = [r'$\\left|+z\\right>$',r'$\\left|-z\\right>$']\n bloch.zlabel = [r'$\\ket{+_z}$',r'$\\ket{-_z}$']\n # bloch.ylabel = [r'$\\ket{+_y}$',r'$\\ket{-_y}$']\n # bloch.ylabel = [r'$\\ket{+_y}$',r'$\\ket{-_y}$']\n # print(vecList.shape)\n\n # add vectors\n if vecList.shape[1] == 3:\n if len(vecColour) >= vecList.shape[0] and len(vecColour) > 0:\n bloch.vector_color = vecColour\n else:\n bloch.vector_color = ['#CC6600','royalblue','r','m','g']\n bloch.add_vectors(vecList)\n # add field vector\n # bloch.add_vectors([1,0,0.15])\n # bloch.add_vectors([0,0,1])\n # bloch.add_vectors([1,0,0])\n\n # render bloch sphere\n if not fig or not ax:\n bloch.render()\n else:\n # bloch.render(fig = fig, axes = ax)\n bloch.render(fig = fig)\n\n # save output\n if save is True:\n if not folder:\n folder = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/'\n print('Bloch plot saved to ' + str(folder))\n path1 = folder + str(filename) + '.png'\n path2 = folder + str(filename) + '.pdf'\n bloch.fig.savefig(path1, dpi=800, transparent=True)\n bloch.fig.savefig(path2, dpi=800, transparent=True)\n\n # return axes for annotations\n return bloch.fig, bloch.axes\n\n def get_bloch_vec(self, rho):\n \"\"\"\n compute the bloch vector for some 2 dimensional density operator rho\n \"\"\"\n u = 2*np.real(rho[0,1])\n v = 2*np.imag(rho[1,0])\n w = np.real(rho[0,0] - rho[1,1])\n return [u,v,w]\n\n def prob_plot(self, time, probs, filename, commitVersion,save=False):\n \"\"\"\n Formatted code for plot (why must plot code always be hideous?)\n \"\"\"\n\n commitText = 'commit: ' + str(commitVersion)\n\n plt.figure(2)\n plt.plot(time, probs)\n plt.ylim([np.min(probs)*1.05, np.max(probs)*1.05])\n plt.xlim([time[0], time[-1]])\n plt.grid()\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Probability\")\n plt.annotate(commitText, xy=(0.85,0.97), xycoords = 'axes fraction', fontsize=12)\n plt.title(filename)\n\n if save is True:\n path = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/' + str(filename) + '.png'\n print('Probabiblity plot saved to Sim Results folder')\n plt.savefig(path)\n\n # plt.show()\n\n def project_plot(self, time, projections, filename, commitVersion, save=False):\n \"\"\"\n Formatted code for plot (why must plot code always be hideous?)\n \"\"\"\n\n commitText = 'commit: ' + str(commitVersion)\n # projections = 2*probs - 1;\n\n plt.figure(3)\n plt.plot(time, projections)\n plt.ylim([np.min(projections)*1.05, np.max(projections)*1.05])\n plt.xlim([time[0], time[-1]])\n plt.grid()\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"$F_x$ projection ($$)\")\n # plt.annotate(commitText, xy=(0.85,0.97), xycoords = 'axes fraction', fontsize=12)\n # plt.title(filename)\n plt.title('Bloch Sphere x Axis Projection')\n\n if save is True:\n path = 'C:/Users/Boundsy/Desktop/Uni Work/PHS2360/Sim Results/' + str(filename) + '.png'\n print('Projection plot saved to Sim Results folder')\n plt.savefig(path)\n\n # plt.show()\n\n def bloch_animate(self, filename, pnts, save = False, name=\"Bloch_animate\"):\n \"\"\"\n Animates the path of a state through the set of pure states - requires ffmpeg\n \"\"\"\n from pylab import figure\n import matplotlib.animation as animation\n from mpl_toolkits.mplot3d import Axes3D\n\n # set up plot environment\n fig = figure()\n ax = Axes3D(fig, azim=-40, elev=30)\n sphere = Bloch(axes=ax)\n\n # define animation function (from qutip docs)\n def animate(i):\n sphere.clear()\n sphere.add_points([pnts[0][:i+1],pnts[1][:i+1],pnts[2][:i+1]])\n sphere.make_sphere()\n return ax\n\n def init():\n sphere.vector_color = ['r']\n return ax\n\n ani = animation.FuncAnimation(fig, animate, np.arange(len(pnts[0])),\n init_func=init, repeat=False)\n \n if save:\n file = 'C:/Users/ccbou2/GitHub/Honours2019/Final presentation files' + str(filename)\n ani.save(file + \".mp4\", fps=20)\n\n\ndef field_gen(field_params):\n \"\"\"\n Creates a 3 element list of functions that wholly defines a classical electromagnetic field \n given a dictionary of parameters\n \"\"\"\n\n # list of field functions\n field_vector = []\n for i,struct in enumerate(field_params[\"struct\"]):\n\n # generate a simple sinusoid function with amplitude and frequency\n if struct is \"sinusoid\":\n field_vector.append(lambda t,j=i: field_params[\"amp\"][j]*(np.cos(2*np.pi*field_params[\"freqb\"][j]*t))) \n # generate a constant DC bias field\n elif struct is \"constant\":\n field_vector.append(lambda t,j=i: field_params[\"amp\"][j]*t/t)\n # generate a pulsed sinusoid with frequency omega beginning at time tau (seconds) \n elif struct is \"pulse\":\n # generate field callable with num pulsed sinusoids\n field_vector.append(pulse_gen(field_params[\"freqb\"][i], field_params[\"tau\"][i], amp=field_params[\"amp\"][i]))\n elif struct is \"tchirp\":\n # define chirp component\n chirp = lambda t,j=i: np.heaviside(t-field_params[\"tau\"][j],1.0)*field_params[\"misc\"][j]*np.tanh(0.01*(t-field_params[\"tau\"][j])/field_params[\"tau\"][j])\n # generate field with time varying amplitude\n constant = lambda t,j=i: field_params[\"amp\"][j]\n # add to field vectors\n field_vector.append(lambda t: constant(t) + chirp(t)) \n elif struct is \"custom\":\n field_vector.append(field_params[\"misc\"][i])\n else:\n raise ValueError(\"Unrecognised field type: {}\".format(struct))\n\n\n return field_vector\n\ndef field_plot(field_vector, time=np.linspace(0,0.5,1e4)):\n \"\"\"\n Plots the signal components of some field vector over the specified time doman\n \"\"\"\n fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True, sharey=False)\n for i, row in enumerate(ax):\n row.plot(time, field_vector[i](time))\n row.set_title(\"Field vector along {} axis\".format(['x','y','z'][i]))\n plt.xlabel(\"Time (s)\")\n #plt.ylabel(\"Ampltiude ($Hz/Gauss$)\")\n plt.show()\n\n\nclass Hamiltonian(object):\n \"\"\"\n Defines the Hamiltonian that acts on a SpinSystem class - dimension of Hilbert spaces of the two must match\n \"\"\"\n def __init__(self, spin=\"half\", freq=500):\n # spin of system\n self.spin = spin\n # define oscillation frequency \n self.freq = freq\n # define energy seperation\n self.energy = hbar*self.freq\n # define internal Hamiltonian for spin system\n if self.spin == \"half\":\n # two energy eigenstates (0,1)\n self.free_ham = 0.5*hbar*np.asarray([[-self.freq, 0],[0, self.freq]], dtype=np.complex128)\n elif self.spin == \"one\":\n # TODO: three energy eigenstates (0,1,2)\n pass\n else:\n print(\"Unrecognised spin system {}: aborting\".format(self.spin))\n\n\n def generate_simple(self, potential=None,):\n \"\"\"\n Creates a simple Hamiltonian based off the input Potential or defaults\n \"\"\"\n # potential component of Hamiltonian\n self.potential = potential\n\n # check if potential is custom or one of defaults of style [coupling, phase, rwa]\n if self.potential is not None:\n if type(self.potential) == list:\n coupling = self.potential[0]\n tuning = self.potential[1]\n \n elif self.init is \"zero\":\n self.state = op1[\"pz\"]\n\n # apply rotating wave approximation\n if potential[2]:\n self.potential = 0.0\n self.hamiltonian = 0.5*hbar*np.asarray([[tuning, coupling],\n [coupling, -tuning]], dtype=np.complex128)\n # full hamiltonian in interaction picture (t dependency is annoying)\n else:\n # define the off diagonal components as lambda functions for t dependency \n offdiag = lambda t: coupling*(1 + np.exp(-2j*(self.freq+tuning)*t))\n self.hamiltonian = lambda t: 0.5*hbar*np.asarray([[tuning, offdiag(t)],\n [np.conj(offdiag(t)), -tuning]],\n dtype=np.complex128)\n else:\n pass\n else:\n self.potential = 0.0\n\n\n def generate_field_hamiltonian(self, fields):\n \"\"\"\n generates an arbitrary magnetic field hamiltonian. Field must\n be a function with time as its only argument.\n \"\"\"\n # store field \n self.fields = fields\n # enforce function type\n for field in fields: assert callable(field), \"Field {} must be callable\".format(i)\n # redundant constants for clarity\n self.hamiltonian = lambda t: 0.5*hbar*2*np.pi*(fields[0](t)*op1[\"x\"] + fields[1](t)*op1[\"y\"] + fields[2](t)*op1[\"z\"])\n\n # cache version of hamiltonian\n def hamiltonian2(time):\n return 0.5*hbar*2*np.pi*np.asarray([[fields[2](time), fields[0](time)+1j*fields[1](time)],\n [fields[0](time)-1j*fields[1](time), -1*fields[2](time)]])\n self.hamiltonian_cache = hamiltonian2\n\n\n def bloch_field(self, time):\n \"\"\"\n Plot magnetic field vector normalised to the bloch sphere\n \"\"\"\n # seperate coordinates into axis set\n xb = self.fields[0](time)\n yb = self.fields[1](time)\n zb = self.fields[2](time)\n # add to list and normalise\n points = [list(xb), list(yb), list(zb)]\n sqsum = 0.0\n for i,point in enumerate(points[0]):\n # compute magnitude and store largest value\n sqsum_test = np.sqrt(points[0][i]**2 + points[1][i]**2 + points[2][i]**2)\n if sqsum_test > sqsum:\n sqsum = sqsum_test\n \n points = points/sqsum\n\n # create Bloch sphere instance\n bloch = Bloch()\n # add points and show\n bloch.add_points(points)\n bloch.show()\n\ndef data_retrieval(sim_params):\n \"\"\"\n Retrieves a data set from the archive if it has already been simulated with identical parameters\n \"\"\"\n\n #TODO: Fix this mess\n\n # define base name for retrieval\n root_group = \"Atomic_Sense\" \n \n # open archive and check if atomic sensor data exists\n with h5py.File(archivepath, 'a') as archive:\n # create group if it doesn't exist\n if root_group not in archive:\n atomic_sense = archive.create_group(root_group)\n print(\"No data found, exiting\")\n return None\n else:\n atomic_sense = archive[root_group]\n\n # iterate through parameter list\n for dataset in atomic_sense:\n flag = False\n for key,val in sim_params.items():\n\n # check for equivalency between floats\n if isinstance(val, float):\n if np.isclose(atomic_sense[dataset].attrs[key], val):\n flag = True\n else:\n flag = False\n break\n else:\n if atomic_sense[dataset].attrs[key] == val:\n flag = True\n else:\n flag = False\n break\n if flag:\n print(\"Data set found in archive\")\n # get sensor data\n print(dataset)\n return np.asarray(atomic_sense[dataset])\n else:\n print(\"Data set not found in archive\")\n\n return None\n\ndef data_store(sim_params, data, name=None, verbose=True):\n \"\"\"\n Stores a simulation instance \n \"\"\"\n\n # define base name for retrieval\n root_group = \"Atomic_Sense\"\n # turn parameter set into name string\n date = gmtime()\n root = sim_params[\"struct\"] + \"_{}_{}_{}_{}_{}_{}\".format(date.tm_sec, date.tm_min, date.tm_hour, date.tm_mday, date.tm_mon, date.tm_year)\n if name is not None:\n root += str(name)\n\n # open archive and check if atomic sensor data exists\n with h5py.File(archivepath, 'a') as archive:\n # create group if it doesn't exist\n if root_group not in archive:\n atomic_sense = archive.create_group(root_group)\n else:\n atomic_sense = archive[root_group]\n\n for dataset in atomic_sense:\n flag = False\n for key, val in sim_params.items():\n if atomic_sense[dataset].attrs[key] == val:\n flag = True\n else:\n flag = False\n break\n if flag:\n print(\"Simulation event already exists, ignoring save request\")\n return\n \n if verbose:\n print(\"Saving simulation results to archive file\")\n dataset = atomic_sense.create_dataset(root, data=np.asarray(data))\n # save attributes\n for key,val in sim_params.items():\n dataset.attrs[key] = val\n\n\ndef pseudo_fourier(struct, sig_amp=1, sig_freq=360, f_range=[250,450, 1], sig=None, tau=[0.01], t=0.5, noise=0.0, plot=False, verbose=True):\n \"\"\"\n Computes the projection |<1|psi>|^2 of a two level system for a given point in the signal parameter space with different frequency tunings\n \"\"\"\n\n # define magnetic fields\n pulse = pulse_gen(freq=341, tau=[1e-2], amp=20)\n\n # set detune max value\n detune_max = 5e4\n # time at which detuning sweep occurs\n detune_tau = 4e-4\n # time for sweep to occur\n swt = 0.01\n # bias field strength\n bias_amp = gyro\n\n # create parameter dictionary\n sim_params = {\"struct\": struct, \n \"sig_amp\": sig_amp,\n \"sig_freq\": sig_freq,\n \"f_range_low\": f_range[0],\n \"f_range_high\": f_range[1],\n \"f_range_step\": f_range[2],\n \"num\": len(tau)}\n\n result = None #data_retrieval(sim_params)\n if result is None:\n # projection array\n projs = []\n # frequencies to sample\n freqs = np.arange(f_range[0],f_range[1], f_range[2])\n # initiate Hamiltonian instance\n ham = Hamiltonian()\n for freq in freqs:\n if verbose:\n print(\"Computing evolution with tuning frequency: {:.2f} Hz\\r\".format(freq), end=\"\",flush=True)\n\n def Bx(t, omega_amp=freq): return omega_amp*(t/t)\n def By(t): return 0\n def Bz(t): return pulse(t)\n\n \n\n # define magnetic field vector parameters\n params = {\"struct\": [\"custom\", \"constant\", \"custom\"], \n \"freqb\": [sig_freq, 50, 0], # frequency in Hz\n \"tau\": [tau, None, None], # time event of pulse\n \"amp\":[sig_amp, 0, freq], # amplitude in Gauss -> 1 Gauss ~= 700000 Hz precession\n \"misc\": [Bz, None, Bx]} # misc parameters\n # generate magentic fields\n fields = field_gen(field_params=params)\n # compute Hamiltonian for updated magnetic field\n ham.generate_field_hamiltonian(fields) \n # redefine atom spin system\n atom = SpinSystem(init=\"super\")\n # evolve state using hamiltonian\n time, probs, pnts = atom.state_evolve(t=[1e-44, t, 1/2e5], hamiltonian=ham.hamiltonian_cache, project=meas1[\"1\"], bloch=[False, 5])\n #atom.frame_transform(project=meas1[\"0\"])\n projs.append(probs[-1])\n\n projs = np.asarray(projs)\n # format into single chunk for hdf5 compression \n data_matrix = np.zeros((2, len(projs)))\n signal_matrix = np.zeros((4, len(time)))\n data_matrix[0,:] = freqs\n data_matrix[1,:] = projs\n signal_matrix[0,:] = time\n signal_matrix[1,:] = fields[0](time)\n signal_matrix[2,:] = fields[1](time)\n signal_matrix[3,:] = fields[2](time)\n\n sim_params[\"name\"] = \"Fourier\"\n #data_store(sim_params, data=data_matrix, name=\"_Fourier_measurements\")\n sim_params[\"name\"] = \"Field\"\n #data_store(sim_params, data=data_matrix, name=\"_Field_signal\")\n\n else:\n freqs = result[0,:]\n projs = result[1,:]\n # define magnetic field vector parameters\n params = {\"struct\": [struct, \"constant\", \"constant\"], \n \"freqb\": [sig_freq, 0, 0], # frequency in Hz\n \"tau\": [tau, None, None], # time event of oulse\n \"amp\":[sig_amp, 0, 0], # amplitude in Gauss -> 1 Gauss ~= 700000 Hz precession\n \"misc\": [sig, None,None]} # misc parameters\n # generate magentic fields\n fields = field_gen(field_params=params)\n \n # set max of uniform distribution to be max of projector oscillations\n #noise_lvl = noise*np.max(projs-np.mean(projs))\n # add white noise to projector results\n #projs += np.random.normal(scale=noise_lvl,size=np.size(projs))\n if plot:\n plt.plot(freqs, projs, 'o--')\n plt.xlabel(\"Frequency of Bias field (Hz)\")\n plt.ylabel(\"$|<1|\\psi(t_1)>|^2$\")\n plt.title(\"Probability vs tuning frequency for {} Hz {} beginning at {} seconds\".format(sig_freq, params[\"struct\"][0], params[\"tau\"][0]))\n plt.grid()\n plt.show()\n\n return np.asarray(freqs), np.asarray(projs), fields[0]\n\n","sub_path":"qChain.py","file_name":"qChain.py","file_ext":"py","file_size_in_byte":29418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"505046536","text":"import configparser\nimport logging\nimport pathlib\nlogger = logging.getLogger(__name__)\n\nglobal config\nconfig = configparser.ConfigParser()\n\n\ndef init_config():\n logger.info(\"Reading configuration file\")\n\n cfgpath = pathlib.Path(pathlib.Path(__file__).resolve().parent, \"motd.cfg\")\n\n optionxform = config.optionxform\n config.optionxform = str\n\n config.read(cfgpath)\n\n default_sections = (\"motd\", \"update\")\n\n for s in default_sections:\n logger.debug(f\"Checking for {s}\")\n if s not in config.sections():\n logger.debug(f\"Creating {s} in config\")\n config[s] = {}\n\n defaults = ((\"motd\", \"HorizontalPad\", \"2\"),\n (\"update\", \"BufferLength\", \"400\"),\n (\"update\", \"MinimumDaysSinceUpdate\", \"2\"))\n\n for opt in defaults:\n tmp = config[opt[0]].get(opt[1], opt[2])\n config[opt[0]][opt[1]] = tmp\n\n with cfgpath.open(\"w\") as f:\n config.write(f)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"623344784","text":"import os\nimport pickle\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom pytorch_transformers import BertTokenizer\nfrom data_utils import pad_and_truncate\nimport pandas as pd\n\nclass CovData(Dataset):\n def __init__(self, data_path, tokenizer):\n cov_data = pd.read_csv(data_path, engine = 'python', encoding = 'utf-8')\n dim = cov_data.shape[0]\n all_data = []\n for line in range(dim):\n text = cov_data.at[line, 'text']\n label = cov_data.at[line, 'label']\n\n text_raw_indices = tokenizer.text_to_sequence(text)\n text_bert = '[CLS]{}[SEP]'.format(text)\n text_bert_indices = tokenizer.text_to_sequence(text_bert)\n bert_segments_ids = np.asarray([0] * (np.sum(text_raw_indices != 0) + 2))\n bert_segments_ids = pad_and_truncate(bert_segments_ids, tokenizer.max_seq_len)\n polarity = int(label) + 1\n \n\n data = {\n 'text_bert_indices': text_bert_indices,\n 'bert_segments_ids': bert_segments_ids,\n 'polarity': polarity,\n }\n all_data.append(data)\n \n self.data = all_data\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return len(self.data)","sub_path":"cov_data_utils.py","file_name":"cov_data_utils.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"87471312","text":"# Solution from modified Rabin Karp's algorithm. Keep completed list of any character.\n# And compare to the patterns. This is identical to using a hashset and incrementing/decrementing.\ndef problem(string, pattern):\n\n\tdef cmp(a, b):\n\t\tfor i in range(len(a)):\n\t\t\tif a[i] != b[i]:\n\t\t\t\treturn False\n\t\treturn True\n\n\tCHARACTERS = 256\n\tm = len(string)\n\tn = len(pattern)\n\tresult = []\n\n\tmain_count = [0 for _ in range(CHARACTERS)]\n\tpattern_count = [0 for _ in range(CHARACTERS)]\n\t\n\tfor i in range(n):\n\t\tmain_count[ord(string[i])] += 1\n\t\tpattern_count[ord(pattern[i])] += 1\n\n\tfor i in range(n, m):\n\t\t\n\t\tif cmp(main_count, pattern_count):\n\t\t\t# Back track to patterns initial start, not end.\n\t\t\tresult.append(i-n)\n\n\t\t# Remove old\n\t\tmain_count[ord(string[i-n])] -= 1\n\n\t\t# Update new\n\t\tmain_count[ord(string[i])] += 1\n\t\t\n\t# Doesn't update last, check again to make up for it\n\n\n\tif cmp(main_count, pattern_count):\n\t\tresult.append(m-n)\n\t\n\treturn result\n","sub_path":"Problem_111/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"580040252","text":"import csv, json, zipfile\nimport requests, PyPDF2, fitz\n\n# pip3 install PyMuPDF\n\n\n# Get ZIP file\nzip_file_url = 'https://disclosures-clerk.house.gov/public_disc/financial-pdfs/2021FD.ZIP'\npdf_file_url = 'https://disclosures-clerk.house.gov/public_disc/ptr-pdfs/2021/'\n\nr = requests.get(zip_file_url)\nzipfile_name = '2021.zip'\n\nwith open(zipfile_name, 'wb') as f:\n f.write(r.content)\n\n# Extract ZIP File\n# https://docs.python.org/3/library/zipfile.html#zipfile-objects\n\nwith zipfile.ZipFile(zipfile_name) as z:\n z.extractall('.')\n\n# Open File.txt\nwith open('2021FD.txt') as f:\n for line in csv.reader(f, delimiter='\\t'):\n if line[1] == 'Pelosi':\n print(line)\n date = line[7]\n doc_id = line[8]\n\n r = requests.get(f\"{pdf_file_url}{doc_id}.pdf\")\n\n with open(f\"{doc_id}.pdf\", 'wb') as pdf_file:\n pdf_file.write(r.content)\n\n# Open File.txt\nwith open('2021FD.txt') as f:\n for line in csv.reader(f, delimiter='\\t'):\n if line[1] == 'Peters':\n print(line)\n date = line[7]\n doc_id = line[8]\n\n r = requests.get(f\"{pdf_file_url}{doc_id}.pdf\")\n\n with open(f\"{doc_id}.pdf\", 'wb') as pdf_file:\n pdf_file.write(r.content)\n\n# https://pymupdf.readthedocs.io/en/latest/tutorial.html\n\n# doc = fitz.open(\"20018539.pdf\")\n\n# page = doc.load_page(page_id=0)\n\n# json_data = page.get_text('json')\n\n# json_data = json.loads(json_data)\n\n# print(json_data.keys())\n\n# for block in json_data['blocks']:\n# if 'lines' in block:\n# print(block)","sub_path":"nancy.py","file_name":"nancy.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"497123258","text":"#!/usr/bin/env python\n\n__author__ = \"B3mB4m\"\n__copyright__ = \"Copyright 2016 B3mB4m\"\n\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__email__ = \"b3mb4m@protonmail.com\"\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2016 B3mB4m\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\n\n\nimport sys\nimport re\nfrom linux_32 import *\nfrom linux_64 import *\nfrom auxiliary import *\n\nclass stack(object):\n\tdef __init__(self, syntaxtype=None, target=None, bits=32, endian=\"little\"):\n\t\tself.bits = bits\n\t\tself.endian = endian.lower()\n\t\tself.mul = [4 if self.bits == 32 else 8]\n\t\tself.generate = target\n\t\tself.syntaxtype = [\"Intel\" if syntaxtype == None else syntaxtype][0]\n\t\tself.cout = 0\n\t\tself.registers = [\n\t\t\t\t\"rbx\",\n\t\t\t\t\"rdi\",\n\t\t\t\t\"rsi\",\n\t\t\t\t\"rdx\",\n\t\t\t\t\"r10\",\n\t\t\t\t\"r8\",\n\t\t\t\t\"r9\",\n\t\t]\n\t\tself.string = None\n\t\tself.raw = \"\"\n\t\tself.newlist = [x for x in range(len(str(self.generate)))]\n\t\tfor i in xrange(0, len(self.generate)):\n\t\t\tself.newlist[i] = self.generate[i]\n\t\t\tself.cout += 1;\t\n\n\t\tif \"/\" in self.generate:\n\t\t\tself.string = self.calculatorifpath( self.generate)\t\n\t\telse:\n\t\t\tself.string = self.calculatorifstring( self.generate)\n\n\n\n\t\tself.string = self.string[::-1]\n\t\tif self.bits == 64:\n\t\t\tself.register = {\n\t\t\t\t'r9':'',\n\t\t\t\t'r8':'',\n\t\t\t\t'r10':'',\n\t\t\t\t'rdx':'',\n\t\t\t\t'rsi':'',\n\t\t\t\t'rdi':'',\n\t\t\t\t'rbx':'',\n\t\t\t}\n\t\t\tfor x in self.string:\n\t\t\t\tif len(x) >= 2:\n\t\t\t\t\tx = x[::-1]\n\t\t\t\tif \"r9\" in x[0]:\n\t\t\t\t\tself.register['r9'] = self.rearrange(x)\n\t\t\t\telif \"r8\" in x[0]:\n\t\t\t\t\tself.register['r8'] = self.rearrange(x)\n\t\t\t\telif \"r10\" in x[0]:\n\t\t\t\t\tself.register['r10'] = self.rearrange(x)\n\t\t\t\telif \"rdx\" in x[0]:\n\t\t\t\t\tself.register['rdx'] = self.rearrange(x)\n\t\t\t\telif \"rsi\" in x[0]:\n\t\t\t\t\tself.register['rsi'] = self.rearrange(x)\n\t\t\t\telif \"rdi\" in x[0]:\n\t\t\t\t\tself.register['rdi'] = self.rearrange(x)\n\t\t\t\telif \"rbx\" in x[0]:\n\t\t\t\t\tself.register['rbx'] = self.rearrange(x)\t\n\t\t\tself.string = [x[::-1] for x in self.string]\n\n\t\telse:\n\t\t\tfor x in self.string:\n\t\t\t\tx = x.replace(\"push 0x\", \"\").replace(\"push 0x\", \"\").replace(\"pushw 0x\", \"\").replace(\"push $0x\", \"\").replace(\"push word 0x\", \"\").replace(\"push 0x\", \"\")\n\t\t\t\tsecond = re.findall(\"..?\", x)[::-1]\n\t\t\t\tself.raw += \"\\\\x68\"+\"\\\\x\"+\"\\\\x\".join(second)\n\t\n\n\t\tdel self.newlist\n\t\tdel self.mul\n\t\tdel self.bits\n\t\tdel self.endian\n\t\tdel self.generate\n\t\tdel self.syntaxtype\n\t\tdel self.cout\n\t\tdel self.registers\n\n\tdef calculatorifstring(self, string):\n\t\tself.empty = []\n\t\tif len(string) == self.mul[0]:\n\t\t\tif self.syntaxtype == \"Intel\":\n\t\t\t\tstack = \"push 0x%s\" % (string[::-1].encode('hex'))\n\t\t\telif self.syntaxtype == \"AT&T\":\n\t\t\t\tstack = \"push $0x%s\" % (string[::-1].encode('hex'))\n\t\t\treturn stack\n\n\t\telif len(string) % self.mul[0] == 0:\n\t\t\treturn self.splitter( string)\n\t\telse:\n\t\t\tdwordpart = string[0:(len(string)-len(string)%4)]\n\t\t\twordpart = string[(len(string)-len(string)%4):len(string)]\n\t\t\tself.empty += self.splitter( dwordpart)\n\t\t\tself.empty += self.splitter( wordpart, \"WordTime\")\n\t\t\treturn self.empty\n\n\n\tdef calculatorifpath(self, hexme):\n\t\tself.fill = \"/\"\n\t\tstack = []\n\t\tif len(hexme) % 4 == 0:\n\t\t\tself.fill = self.fill \n\t\telif len(hexme) % 4 == 1:\n\t\t\tself.fill = self.fill * 4\n\t\telif len(hexme)\t% 4 == 2:\n\t\t\tself.fill = self.fill * 3\n\t\telif len(hexme) % 4 == 3:\n\t\t\tself.fill = self.fill * 2\n\n\t\tif self.cout == self.mul[0]: \n\t\t\tif self.bits == 64\t:\n\t\t\t\tfor x in self.registers:\n\t\t\t\t\tif self.syntaxtype == \"Intel\":\n\t\t\t\t\t\tstack.append(\"mov %s,0x%s\" % (x,hexme[::-1].encode('hex')))\n\t\t\t\t\telif self.syntaxtype == \"AT&T\":\n\t\t\t\t\t\tx = x.replace(\"r\", \"%r\")\n\t\t\t\t\t\tstack.append(\"movabs $0x%s,%s\" % (hexme[::-1].encode('hex'), x))\n\t\t\t\treturn stack\n\t\t\telse:\n\t\t\t\tif self.syntaxtype == \"Intel\":\n\t\t\t\t\tstack.append(\"push 0x%s\" % (hexme[::-1].encode('hex')))\n\t\t\t\telif self.syntaxtype == \"AT&T\":\n\t\t\t\t\tstack.append(\"push $0x%s\" % (hexme[::-1].encode('hex')))\n\t\t\t\treturn stack\n\t\t\t\t\n\t\telif self.cout > 4:\n\t\t\tif len(hexme) % 4 == 0:\n\t\t\t\treturn self.hextime(self.fill, hexme)\n\t\t\telif len(hexme) % 4 == 1:\n\t\t\t\treturn self.hextime(self.fill, hexme)\n\t\t\telif len(hexme) % 4 == 2:\n\t\t\t\treturn self.hextime(self.fill, hexme)\n\t\t\telif len(hexme) % 4 == 3:\n\t\t\t\treturn self.hextime(self.fill, hexme)\t\t\n\n\n\n\tdef hextime(self, putmein, hexme):\n\t\tfor i in xrange(0, len(hexme)):\n\t\t\tif hexme[i] == \"/\":\n\t\t\t\tself.newlist[i] = putmein\n\t\t\t\tfixstring = self.complie( self.newlist)\n\t\t\t\treturn self.splitter(fixstring)\n\n\n\tdef complie(self, givemethatstring):\n\t\tcompliestring = \"\"\n\t\tfor i in givemethatstring:\n\t\t\tcompliestring += i\n\t\treturn compliestring\t\t\t\n\n\n\tdef splitter(self, hexdump, pushword=\"None\"):\n\t\tself.mylist = []\n\t\tif pushword == \"None\":\n\t\t\tfixmesempai = re.findall('....?', hexdump)\n\t\t\tfor x in fixmesempai:\n\t\t\t\tself.syntaxtyper( str(x[::-1].encode(\"hex\")), \"dword\")\n\t\telse:\n\t\t\tdot = \".\" * len(hexdump)\n\t\t\tfixmesempai = re.findall(dot+'?', hexdump)\n\t\t\tfor x in fixmesempai[::-1]:\n\t\t\t\tif dot > 2:\n\t\t\t\t\tself.syntaxtyper( str(x[::-1].encode(\"hex\")), \"dword\")\n\t\t\t\telse:\n\t\t\t\t\tself.syntaxtyper( str(x[::-1].encode(\"hex\")), \"word\")\n\n\t\tif self.bits == 64:\n\t\t\tself.mylist = [x.replace(\"push 0x\", \"\").replace(\"push $0x\", \"\") for x in self.mylist]\t\n\t\t\tcache = []\n\t\t\tfor reg in self.registers:\n\t\t\t\tif self.syntaxtype == \"Intel\":\n\t\t\t\t\tcache.append([(\"mov {0},0x{1}\").format(reg,\"\".join(x)) for x in [self.mylist[x:x+2][::-1] for x in range(0, len(self.mylist), 2)]])\t\n\t\t\t\telse:\n\t\t\t\t\tcache.append([(\"movabs $0x{0},%{1}\").format(\"\".join(x),reg) for x in [self.mylist[x:x+2][::-1] for x in range(0, len(self.mylist), 2)]])\n\t\t\treturn cache\n\t\telse:\n\t\t\treturn self.mylist\n\n\n\tdef syntaxtyper(self, getstring, dwordORword):\n\t\tif self.syntaxtype == \"Intel\":\n\t\t\tif dwordORword == \"dword\":\n\t\t\t\tgetstring = \"push 0x%s\" % getstring\n\t\t\t\tself.mylist.append(getstring)\n\t\t\telif dwordORword == \"word\":\n\t\t\t\tgetstring = \"push word 0x%s\" % getstring\n\t\t\t\tself.mylist.append(getstring)\n\t\telif self.syntaxtype == \"AT&T\":\n\t\t\tif dwordORword == \"dword\":\n\t\t\t\tgetstring = \"push $0x%s\" % getstring\n\t\t\t\tself.mylist.append(getstring)\n\t\t\telif dwordORword == \"word\":\n\t\t\t\tgetstring = \"pushw 0x%s\" % getstring\n\t\t\t\tself.mylist.append(getstring)\n\n\n\tdef rearrange(self, relist):\n\t\tpadd = \"\"\n\t\tcheck = relist[0]\n\n\t\tif \"r9\" in check:\n\t\t\tpadd = \"r9\"\n\t\telif \"r8\" in check:\n\t\t\tpadd = \"r8\"\n\t\telif \"r10\" in check:\n\t\t\tpadd = \"r10\"\n\t\telif \"rdx\" in check:\n\t\t\tpadd = \"rdx\"\n\t\telif \"rsi\" in check:\n\t\t\tpadd = \"rsi\"\n\t\telif \"rdi\" in check:\n\t\t\tpadd = \"rdi\"\n\t\telif \"rbx\" in check:\n\t\t\tpadd = \"rbx\"\n\n\n\t\txor = \"xor {},{}\".format(padd, padd)\n\t\tpush = \"push {}\".format(padd)\n\t\tlea = \"lea {}, [rsp]\".format(padd)\n\n\t\tcache = []\n\t\tfor x in relist:\n\t\t\tif x == relist[0]:\n\t\t\t\tcache.append(xor)\n\t\t\t\tcache.append(x)\n\t\t\telse:\n\t\t\t\tcache.append(push)\n\t\t\t\tcache.append(x)\n\n\t\t\tif x == relist[-1]:\n\t\t\t\tcache.append(push)\t\t\n\t\t\t\tcache.append(lea)\n\t\treturn cache\n\n\t\t","sub_path":"stack/stack/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"546779774","text":"import os\n\ndef runSimWithDifferentAngles(wingProfile, wingAngle, wingAngleIncrement, iterations):\n\n\twingProfileWithoutSTL = \"\"\n\tlengthCount = len(wingProfile) - 4\n\tcount = 0\n\t\n\tfor i in wingProfile:\n\t\tif count != lengthCount:\n\t\t\twingProfileWithoutSTL += i\n\t\telse:\n\t\t\tbreak\n\t\tcount += 1\n\t\t\n\n\tfor i in range(iterations):\n\t\t\n\t\tfolderName = wingProfileWithoutSTL + \"_\" + str(wingAngle)\n\n\t\t# Make new simulaion directory\n\t\tos.system(\"cp -r /home/mm/Documents/originalFolders/wing /home/mm/Documents/TESTFOLDER_CFD/\" + folderName)\n\t\tos.chdir(folderName)\n\t\tos.system(\"./Allclean\")\n\t\t\n\t\t# Import and transform wing to current wing angle\n\t\tos.chdir(\"constant/triSurface\")\n\t\tos.system(\"cp -r /home/mm/Documents/originalModels/\" + wingProfile + \" wing.stl\")\n\t\tos.system(\"surfaceTransformPoints -rollPitchYaw '(0 \" + str(wingAngle) + \" 0)' wing.stl wing.stl\")\n\t\t\n\t\t# Run simulation\n\t\tos.chdir(\"/home/mm/Documents/TESTFOLDER_CFD/\" + folderName)\n\t\tos.system('./Allrun')\n\t\t\n\t\t# Read coeffisients and append to final txt file\n\t\tfilehandle = open(\"log.simpleFoam\", \"r\")\n\t\tlogFile = filehandle.readlines()\n\t\tfilehandle.close()\n\n\t\tcoeffisients = []\n\n\t\tfor i in range(len(logFile)-9, len(logFile)-4):\n\t\t\tcoeffisients.append(logFile[i])\n\n\t\tos.chdir(\"/home/mm/Documents/TESTFOLDER_CFD\")\n\t\t\n\t\tresults = open(\"RESULTS.txt\", \"a\")\n\t\tresults.write(folderName + \"\\n\")\n\t\tfor i in coeffisients:\n\t\t\tresults.write(i)\n\t\t\t\n\t\tresults.write(\"\\n\")\n\t\tresults.close()\n\t\t\n\t\t# Update wing angle\n\t\twingAngle = wingAngle + wingAngleIncrement\n","sub_path":"HanSolo/openFoam/backup/runSimWithDifferentAngles.py","file_name":"runSimWithDifferentAngles.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"640139866","text":"import discord\r\nimport asyncio\r\nimport discord.abc\r\nimport os\r\nfrom discord.ext import commands\r\nimport random\r\n\r\n\r\nBot = commands.Bot(command_prefix=\"%\")\r\n\r\nBot.remove_command(\"help\")\r\n\r\nchannel = Bot.get_channel(608265615446245378) # тестовый канал\r\nBot.load_extension(\"jishaku\")\r\nclient = discord.Client()\r\nJISHAKU_NO_UNDERSCORE = True\r\n\r\n\r\n@Bot.command()\r\nasync def load(ctx, extensions):\r\n Bot.load_extension(f'cogs.{extensions}')\r\n await ctx.send(\"loaded\")\r\n\r\n\r\n@Bot.command()\r\nasync def unload(ctx, extensions):\r\n Bot.unload_extension(f'cogs.{extensions}')\r\n await ctx.send(\"unloaded\")\r\n\r\n\r\n@Bot.command()\r\nasync def reload(ctx, extensions):\r\n Bot.unload_extension(f'cogs.{extensions}')\r\n Bot.load_extension(f'cogs.{extensions}')\r\n await ctx.send(\"reloaded\")\r\n\r\n# выведет аватарку пользователя, если пользовател не задан, выведет аватарку автора\r\n@Bot.command()\r\nasync def avatar(ctx, member: discord.Member = None):\r\n user = ctx.message.author if (member == None) else member\r\n embed = discord.Embed(title=f\"Аватарка пользователя {user}\",\r\n description=f\"[Ссылка на изображение]({user.avatar_url})\", color=user.color)\r\n embed.set_footer(text=f'Вызвал: {ctx.message.author}', icon_url=str(ctx.message.author.avatar_url))\r\n embed.set_image(url=user.avatar_url)\r\n await ctx.message.delete()\r\n await ctx.send(embed=embed)\r\n\r\n\r\n# пинг ответа бота\r\n@Bot.command(pass_context=True, name='ping', brief='Показать текущий пинг')\r\n@commands.cooldown(1, 1, commands.BucketType.user)\r\nasync def ping(ctx):\r\n await ctx.message.delete()\r\n em = discord.Embed(title='**Текущая задержка:**', description=f'``{Bot.ws.latency * 1000:.0f} ms``', color=0x42f4f4)\r\n em.set_author(name=f'Ping', icon_url=Bot.user.avatar_url)\r\n em.set_footer(text=f'{ctx.author}', icon_url=ctx.author.avatar_url)\r\n await ctx.send(embed=em)\r\n\r\n\r\n@Bot.command()\r\nasync def hueta(ctx, *, args):\r\n await ctx.message.delete()\r\n await ctx.send(f\"{''.join(random.sample(args,len(args)))}\")\r\n\r\n \r\nfor filename in os.listdir('./cogs'):\r\n if filename.endswith('.py'):\r\n Bot.load_extension(f'cogs.{filename[:-3]}')\r\n\r\nBot.run('')\r\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"364638074","text":"# Import libraries\r\nimport os.path\r\nfrom bottle import route, run, response, static_file, request, error, Bottle, template\r\nfrom json import dumps, loads, load\r\nfrom jsonschema import validate\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom CC_price_model import whlsle_prc\r\n\r\nclass CC_API:\r\n def __init__(self, port, local):\r\n self._app = Bottle()\r\n self._route() #Launch route\r\n self._local = local\r\n self._port = port\r\n\t\t# will want to load the model from pickle file. Or better to load in prediction function or separate function?\r\n if local:\r\n self._host = '127.0.0.1'\r\n else:\r\n self._host = '0.0.0.0'\r\n\t\t# Set default dict structure and types\r\n self.schema={\"type\" : \"object\", \"properties\" : {\r\n \"date\" : {\"type\" : \"string\"},\r\n\t\t \"date_sessiontime\": {\"type\" : \"number\"},\r\n\t\t \"month\": {\"type\" : \"number\"},\r\n\t\t \"retailer\": {\"type\" : \"string\"},\r\n\t\t \"retailer_city\": {\"type\" : \"string\"},\r\n\t\t \"processor\": {\"type\" : \"string\"},\r\n\t\t \"processor_city\": {\"type\" : \"string\"},\r\n\t\t \"producer\": {\"type\" : \"string\"},\r\n\t\t \"producer_city\": {\"type\" : \"string\"},\r\n\t\t \"retailer_to_seattle\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_tacoma\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_bellingham\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_olympia\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_bellingham\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_vancouver\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_kennewick\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_yakima\": {\"type\" : \"number\"},\r\n\t\t \"retailer_to_spokane\": {\"type\" : \"number\"},\r\n\t\t \"strain_display_name\": {\"type\" : \"string\"},\r\n\t\t \"product_name\": {\"type\" : \"string\"},\r\n\t\t \"product_size_g\": {\"type\" : \"number\"},\r\n\t\t \"distance\": {\"type\" : \"number\"},\r\n\t\t \"total_lb_sold\": {\"type\" : \"number\"},\r\n\t\t \"units_sold\": {\"type\" : \"number\"},\r\n\t\t \"lab_name\": {\"type\" : \"string\"},\r\n\t\t \"thc\": {\"type\" : \"number\"},\r\n\t\t \"cbd\": {\"type\" : \"number\"},\r\n\t\t \"moisture\": {\"type\" : \"number\"},\r\n\t\t \"comp_1\": {\"type\" : \"number\"},\r\n\t\t \"comp_2\": {\"type\" : \"number\"},\r\n\t\t \"comp_3\": {\"type\" : \"number\"},\r\n\t\t \"comp_4\": {\"type\" : \"number\"},\r\n },\r\n }\r\n\r\n def start(self):\r\n self._app.run(server='paste', host=self._host, port=self._port)\r\n\r\n def _route(self):\r\n self._app.hook('before_request')(self._strip_path) # Needed to prevent errors.\r\n self._app.route('/', callback=self._homepage) # We tell to the API to listen on \"/\" and execute the action \"_homepage()\" when \"/\" is called\r\n\r\n # Response to Post\r\n self._app.route('/generate_wholesale_price', method=\"POST\", callback=self._doAction)\r\n\r\n # Response to Get. Return error.\r\n self._app.route('/generate_wholesale_price', method=\"GET\", callback=self._noinputerror)\r\n\r\n def _strip_path(self):\r\n request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')\r\n\r\n def _homepage(self):\r\n return static_file(\"index.html\", root=os.path.join(os.getcwd(),'html')) # Setup homepage\r\n\r\n def _noinputerror(self):\r\n rv = {\"success\": False,\"payload\": {\"pplb_pretax\":\"error no json input\"}}\r\n return dumps(rv)\r\n\t\t\r\n def _doAction(self):\r\n try:\r\n insamp_info = request.json['sample_info']\r\n valcheck=validate(insamp_info,self.schema) #validate schema\r\n except:\r\n rv = {\"success\": False,\"payload\": {\"pplb_pretax\":\"incomplete json request\"}}\r\n return dumps(rv)\r\n response.content_type = 'application/json'\r\n outsam_pplb = whlsle_prc(insamp_info) #model goes here\r\n rv = {\"success\": True,\"payload\": {\"pplb_pretax\":outsam_pplb}}\r\n return dumps(rv) # We dump the dictionary into json file and return it.","sub_path":"CC_api.py","file_name":"CC_api.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"350918664","text":"from django.contrib import admin\n\n# Register your models here.\n\nfrom .models import FamilyInvite, Invite\n\nclass InviteInline(admin.StackedInline):\n\tmodel = Invite\n\textra = 0\n\nclass FamilyAdmin(admin.ModelAdmin):\n\tfieldsets = [\n\t\t(None, {'fields': ['name']}),\n\t\t('Don\\'t touch this', {'fields': ['code', 'override_code'], 'classes': ['collapse']}),\n\t]\n\tinlines = [InviteInline]\n\nadmin.site.register(FamilyInvite, FamilyAdmin)\n","sub_path":"maxmort/wedding/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"638019745","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom searchapp.models import Product\nfrom searchapp.forms import DataForm\nimport django_filters\n\n\n# Create your views here.\n\n\ndef home(request):\n if request.method==\"POST\":\n datas=request.POST.get('data')\n pro_id=Product.objects.filter(data__icontains=datas)[:20]\n\n if not pro_id:\n html = '

Data Not Found.


'\n return HttpResponse(html)\n else:\n myData=[]\n for i in pro_id:\n myData.append(i.data)\n html = '

Here i have printed Maximum 20 matched data.


%s ' % myData\n return HttpResponse(html)\n else:\n return render(request, 'home.html')\n","sub_path":"searchapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"359647458","text":"import pytest\nfrom pathlib import Path\n\nimport PySAM.ResourceTools as tools\nimport PySAM.Windpower as wp\n\n\ndef test_solar():\n solar = str(Path(__file__).parent / \"blythe_ca_33.617773_-114.588261_psmv3_60_tmy.csv\")\n data = tools.TMY_CSV_to_solar_data(solar)\n assert(data['lat'] == 33.61)\n assert(data['lon'] == -114.58)\n assert(data['dn'][7] == 262)\n assert(data['df'][7] == 16)\n assert(data['gh'][7] == 27)\n assert(data['tdry'][7] == pytest.approx(8.96, 0.1))\n\n\ndef test_wind():\n wind = str(Path(__file__).parent / \"AR Northwestern-Flat Lands.srw\")\n data = tools.SRW_to_wind_data(wind)\n assert(data['fields'] == [1, 2, 4, 3, 1, 2, 4, 3, 1, 2, 4, 3, 1, 2, 4, 3])\n assert(data['heights'] == [50, 50, 50, 50, 80, 80, 80, 80, 110, 110, 110, 110, 140, 140, 140, 140])\n assert(data['data'][0] == [9.587, 0.953420183, 173, 9.466, 10.247, 0.950086356, 174, 11.637, 10.627, 0.946649889,\n 175, 13.249, 10.997, 0.94340982, 175, 14.509])\n\n wind_model = wp.new()\n wind_model.Resource.wind_resource_data = data\n returned_data = wind_model.Resource.wind_resource_data['data'][0]\n for i, d in enumerate(data['data'][0]):\n assert(d == pytest.approx(returned_data[i], 1e-3))\n","sub_path":"tests/test_ResourceTools.py","file_name":"test_ResourceTools.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"341971277","text":"import pygame\npygame.font.init()\n\n#Colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0 , 0)\nGREEN = (0 , 255, 0)\nBLUE = (90, 90 , 90)\nYELLOW = (255,255,0)\nGREY = (185, 185, 185)\nBACKGROUND = (50, 50, 50)\n\n#General setings\nFRAMES_PER_SECOND = 60\nTILE_SELECTOR_SIZE = 74\nTOP_BORDER_HEIGHT = 30\n\n#Fonts\nOPTIONS_FONT = pygame.font.SysFont('comicsans', 30)","sub_path":"map_generator/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"217073069","text":"import sys, os\nimport numpy as np\nimport statsmodels.stats.multitest\n\n\nif (len(sys.argv) < 4):\n print(\"python3 fdrPASTAAResult.py PASTAA_result, outputFile, pvalue\")\nelse:\n PASTAA_result = sys.argv[1]\n output_ = sys.argv[2]\n p = float(sys.argv[3])\n\n\n TFs = []\n pvalues = []\n #read result\n with open(PASTAA_result, 'r') as result:\n for line in result:\n line = line.strip().split('\\t')\n TFs.append(line[0]) \n if float(line[1]) > 1:\n pvalues.append(1.0)\n else:\n pvalues.append(float(line[1]))\n\t\n #determine fdr\n\n rec, cor_pvalue = statsmodels.stats.multitest.fdrcorrection(pvals = pvalues, alpha = p,is_sorted = True)\n#\tprint(rec)\n#\tprint(cor_pvalue)\n counter = 0\n with open(output_, 'w') as o:\n o.write(\"TF\\tpvalue(fdr correction)\\n\")\n for i in rec:\n if i == True:\n o.write(TFs[counter] + '\\t' + str(cor_pvalue[counter]) + '\\n')\n counter+=1\n\t\n","sub_path":"src/fdrPASTAAResult.py","file_name":"fdrPASTAAResult.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"153705251","text":"#!/usr/bin/env python3\n\"\"\"\nOpenOCD RPC example, covered by GNU GPLv3 or later\nCopyright (C) 2014 Andreas Ortmann (ortmann@finf.uni-hannover.de)\n\"\"\"\n\nimport socket\nimport itertools\nimport sys\n\ndef hexify(data):\n return \"\" if data is None else (\"0x%08x\" % data)\n\nclass OpenOcd:\n COMMAND_TOKEN = '\\x1a'\n def __init__(self, verbose=False):\n self.verbose = verbose\n self.tclRpcIp = \"127.0.0.1\"\n self.tclRpcPort = 6666\n self.bufferSize = 4096\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n def __enter__(self):\n self.sock.connect((self.tclRpcIp, self.tclRpcPort))\n return self\n\n def __exit__(self, type, value, traceback):\n try:\n self.send(\"exit\")\n finally:\n self.sock.close()\n\n def send(self, cmd):\n \"\"\"Send a command string to TCL RPC. Return the result that was read.\"\"\"\n data = ('capture \"' + cmd + '\"' + OpenOcd.COMMAND_TOKEN).encode(\"utf-8\")\n if self.verbose:\n print(\"<- \", data)\n\n self.sock.send(data)\n return self._recv()\n\n def _recv(self):\n \"\"\"Read from the stream until the token (\\x1a) was received.\"\"\"\n data = bytes()\n while True:\n chunk = self.sock.recv(self.bufferSize)\n data += chunk\n if bytes(OpenOcd.COMMAND_TOKEN, encoding=\"utf-8\") in chunk:\n break\n\n if self.verbose:\n print(\"-> \", data)\n\n data = data.decode(\"utf-8\").strip()\n data = data[:-1] # strip trailing \\x1a\n\n return data.strip()\n\nif __name__ == \"__main__\":\n if sys.argv[1] == 'start':\n asm = [\n 0x000104b7, # lui s1, 0x10\n 0x0404849b, # addiw s1, s1, 0x40\n ]\n else:\n asm = [\n 0x000104b7, # lui s1, 0x10\n ]\n asm += [\n 0x7b149073, # csrw dpc, s1\n 0x7b002473, # csrr s0, dcsr\n 0x00346413, # ori s0, s0 3 # change to m-mode\n 0x7b041073, # csrw dcsr, s0\n 0x00100073 # ebreak\n ]\n\n disable_vm = [\n 0x00000493, # li s1, 0x0\n 0x18049073, # csrw satp, s1\n 0x00100073 # ebreak\n ]\n\n print(asm)\n\n progbuf_cmds = [(i + 0x20, data) for i, data in enumerate(asm)]\n with OpenOcd() as ocd:\n # Halt\n ocd.send(\"riscv dmi_write 0x10 0x80000001\")\n # Progbuf\n for reg, data in progbuf_cmds:\n ocd.send(\"riscv dmi_write {} {}\".format(reg, data))\n # Access Register\n ocd.send(\"riscv dmi_write 0x17 0x361001\") # regno = sp, postexec, transfer, 64-bit\n abstractcs = ocd.send(\"riscv dmi_read 0x16\")\n if abstractcs != \"0x10000002\":\n print(\"Exec error: abstracts \" + abstractcs)\n # Resume\n ocd.send(\"riscv dmi_write 0x10 0x40000001\")\n","sub_path":"apps/rbb-server/ocd_rpc_example.py","file_name":"ocd_rpc_example.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"54470667","text":"from .utils import *\r\nfrom collections import Counter\r\nimport random\r\n\r\nclass Sequence:\r\n \"\"\"DNA sequence class. Defalt value: ATCG, DNA, No label\"\"\"\r\n\r\n def __init__(self, sequence=\"ATCG\", sequence_type=\"DNA\", label='No Label'):\r\n \"\"\"Sequence initialization, validation.\"\"\"\r\n self.sequence = sequence.upper()\r\n self.label = label.capitalize()\r\n self.sequence_type = sequence_type.upper()\r\n self.is_valid = self.__validate()\r\n assert self.is_valid, f\"Provided data does not seem to be a correct {self.sequence_type} sequence\"\r\n \r\n def __validate(self):\r\n \"\"\"Check the sequence to make sure it is a valid DNA string\"\"\"\r\n return set(NUCLEOTIDE_BASE[self.sequence_type]).issuperset(self.sequence)\r\n \r\n def get_sequence_type(self):\r\n \"\"\"Returns sequence type\"\"\"\r\n return self.sequence_type\r\n\r\n def get_sequence_info(self):\r\n \"\"\"Returns 4 strings. Full sequence information\"\"\"\r\n return f\"[Label]: {self.label}\\n[Sequence]: {self.sequence}\\n[Biotype]: {self.sequence_type}\\n[Length]: {len(self.sequence)}\"\r\n\r\n def generate_sequence(self, length=10, sequence_type=\"DNA\"):\r\n \"\"\"Generate a random DNA sequence, provided the length\"\"\"\r\n sequence = ''.join([random.choice(NUCLEOTIDE_BASE[sequence_type]) for x in range(length)])\r\n self.__init__(sequence, sequence_type, \"Randomly generated sequence\")\r\n \r\n def get_sequence(self):\r\n \"\"\"Get a sequence strand, sequence type, and sequence length from user\"\"\"\r\n isReady = 'y'\r\n while isReady == 'y':\r\n sequence_strand = input(\"Enter sequence strand: \")\r\n sequence_type = input(\"Enter sequence type: \")\r\n sequence_label = input(\"Enter sequence label: \")\r\n try:\r\n self.__init__(sequence_strand, sequence_type, sequence_label)\r\n isReady = 'n'\r\n except:\r\n isReady = 'y'\r\n finally:\r\n isReady = input(\"\\nWould you like to enter a new one? (y or n) \").lower()\r\n\r\n def set_sequence(self, sequence_strand, sequence_type, sequence_label):\r\n \"\"\"Set sequence strand, sequence type, and sequence length from user input\"\"\"\r\n self.__init__(sequence_strand, sequence_type, sequence_label)\r\n \r\n def upload_sequence(self, path):\r\n \"\"\"Upload sequence strand, sequence type, and sequence length from file input\"\"\"\r\n sequence_strand, sequence_type, sequence_label = read_FASTA(path)\r\n self.__init__(sequence_strand, sequence_type, sequence_label)\r\n\r\n def nucleotide_frequency(self):\r\n \"\"\"Count nucleotides in a given sequence. Return a dictionary\"\"\"\r\n return f\"[Base Frequency]: {dict(Counter(self.sequence))}\"\r\n\r\n def transcription(self):\r\n \"\"\"DNA -> RNA Transcription. Replacing Thymine with Uracil\"\"\"\r\n if self.sequence_type == \"DNA\":\r\n return f\"[Transcription]: {self.sequence.replace('T', 'U')}\"\r\n return \"Not a DNA sequence\"\r\n\r\n def reverse_complement(self):\r\n \"\"\"\r\n Swapping adenine with thymine and guanine with cytosine.\r\n Reversing newly generated string\r\n \"\"\"\r\n if self.sequence_type == \"DNA\":\r\n mapping = str.maketrans('ATCG', 'TAGC')\r\n else:\r\n mapping = str.maketrans('AUCG', 'UAGC')\r\n return f\"[Reverse Complement]: {self.sequence.translate(mapping)[::-1]}\"\r\n\r\n def gc_content(self):\r\n \"\"\"GC Content in a DNA/RNA sequence\"\"\"\r\n return f\"[GC CONTENT]: {round((self.sequence.count('C') + self.sequence.count('G')) / len(self.sequence) * 100)}%\"\r\n\r\n def gc_content_subsec(self, k=20):\r\n \"\"\"GC Content in a DNA/RNA sub-sequence length k. k=20 by default\"\"\"\r\n res = []\r\n for i in range(0, len(self.sequence) - k + 1, k):\r\n subseq = self.sequence[i:i + k]\r\n res.append(\r\n round((subseq.count('C') + subseq.count('G')) / len(subseq) * 100))\r\n return res\r\n\r\n def translate_seq(self, init_pos=0):\r\n \"\"\"Translates a DNA sequence into an aminoacid sequence\"\"\"\r\n if self.sequence_type == \"DNA\":\r\n return [DNA_CODONS[self.sequence[pos:pos + 3]] for pos in range(init_pos, len(self.sequence) - 2, 3)]\r\n elif self.sequence_type == \"RNA\":\r\n return [RNA_CODONS[self.sequence[pos:pos + 3]] for pos in range(init_pos, len(self.sequence) - 2, 3)]\r\n\r\n def codon_usage(self, aminoacid):\r\n \"\"\"Provides the frequency of each codon encoding a given aminoacid in a DNA sequence\"\"\"\r\n tmpList = []\r\n if self.sequence_type == \"DNA\":\r\n for i in range(0, len(self.sequence) - 2, 3):\r\n if DNA_CODONS[self.sequence[i:i + 3]] == aminoacid:\r\n tmpList.append(self.sequence[i:i + 3])\r\n\r\n elif self.sequence_type == \"RNA\":\r\n for i in range(0, len(self.sequence) - 2, 3):\r\n if RNA_CODONS[self.sequence[i:i + 3]] == aminoacid:\r\n tmpList.append(self.sequence[i:i + 3])\r\n\r\n freqDict = dict(Counter(tmpList))\r\n totalWight = sum(freqDict.values())\r\n for sequence in freqDict:\r\n freqDict[sequence] = round(freqDict[sequence] / totalWight, 2)\r\n return freqDict\r\n\r\n def gen_reading_frames(self):\r\n \"\"\"Generate the six reading frames of a DNA sequence, including reverse complement\"\"\"\r\n frames = []\r\n frames.append(self.translate_seq(0))\r\n frames.append(self.translate_seq(1))\r\n frames.append(self.translate_seq(2))\r\n tmp_seq = Sequence(self.reverse_complement().split(\" \")[-1], self.sequence_type)\r\n frames.append(tmp_seq.translate_seq(0))\r\n frames.append(tmp_seq.translate_seq(1))\r\n frames.append(tmp_seq.translate_seq(2))\r\n del tmp_seq\r\n return frames\r\n\r\n def proteins_from_rf(self, aa_seq):\r\n \"\"\"Compute all possible proteins in an aminoacid sequence and return a list of possible proteins\"\"\"\r\n current_prot = []\r\n proteins = []\r\n for aa in aa_seq:\r\n if aa == \"_\":\r\n # STOP accumulating amino acids if _ - STOP was found\r\n if current_prot:\r\n for p in current_prot:\r\n proteins.append(p)\r\n current_prot = []\r\n else:\r\n # START accumulating amino acids if M - START was found\r\n if aa == \"M\":\r\n current_prot.append(\"\")\r\n for i in range(len(current_prot)):\r\n current_prot[i] += aa\r\n return proteins\r\n\r\n def all_proteins_from_orfs(self, startReadPos=0, endReadPos=0, ordered=False):\r\n \"\"\"Compute all possible proteins for all open reading frames\"\"\"\r\n \"\"\"Protine Search DB: https://www.ncbi.nlm.nih.gov/nuccore/NM_001185097.2\"\"\"\r\n \"\"\"API can be used to pull protein info\"\"\"\r\n if endReadPos > startReadPos:\r\n tmp_seq = Sequence(self.sequence[startReadPos: endReadPos], self.sequence_type)\r\n rfs = tmp_seq.gen_reading_frames()\r\n else:\r\n rfs = self.gen_reading_frames()\r\n\r\n res = []\r\n for rf in rfs:\r\n prots = self.proteins_from_rf(rf)\r\n for p in prots:\r\n res.append(p)\r\n\r\n if ordered:\r\n return sorted(res, key=len, reverse=True)\r\n return res","sub_path":"genesequence/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"321271860","text":"#coding=utf-8\nimport cv2\nimport random\nimport numpy as np\nimport semantic.visualization_utils as smv\nfrom PIL import Image\n\ncolors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef draw_rectangle(img, p1, p2, color=[255, 0, 0], thickness=2):\n cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)\n\ndef draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):\n p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))\n p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))\n cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)\n p1 = (p1[0]+15, p1[1])\n cv2.putText(img, str(label), p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)\n\ndef get_text_pos_fn(pmin,pmax,bbox,label):\n if bbox[0]<0.1:\n p1 = (pmax[0],pmin[1])\n else:\n p1 = pmin\n return (p1[0]-5,p1[1])\n\ndef random_color_fn(label):\n del label\n nr = len(colors_tableau)\n return colors_tableau[random.randint(0,nr-1)]\n\ndef default_text_fn(label,score):\n return str(label)\n\n'''\ncolor_fn: tuple(3) (*f)(label)\ntext_fn: str (*f)(label,score)\nget_text_pos_fn: tuple(2) (*f)(lt_corner,br_corner,bboxes,label)\n'''\ndef draw_bboxes(img, classes, scores=None, bboxes=None,\n color_fn=random_color_fn,\n text_fn=default_text_fn,\n get_text_pos_fn=get_text_pos_fn,\n thickness=4,show_text=True,font_scale=1.2,text_color=(0.,255.,0.),\n is_relative_coordinate=True,\n is_show_text=None,\n fill_bboxes=False):\n bboxes_thickness = thickness if not fill_bboxes else -1\n if is_relative_coordinate:\n shape = img.shape\n else:\n shape = [1.0,1.0]\n if len(img.shape)<2:\n print(f\"Error img size {img.shape}.\")\n return img\n img = np.array(img)\n if scores is None:\n scores = np.ones_like(classes,dtype=np.float32)\n if not isinstance(bboxes,np.ndarray):\n bboxes = np.array(bboxes)\n for i in range(bboxes.shape[0]):\n try:\n bbox = bboxes[i]\n if color_fn is not None:\n color = color_fn(classes[i])\n else:\n color = (random.random()*255, random.random()*255, random.random()*255)\n p10 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))\n p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))\n cv2.rectangle(img, p10[::-1], p2[::-1], color, bboxes_thickness)\n if show_text and text_fn is not None:\n f_show_text = True\n if is_show_text is not None:\n f_show_text = is_show_text(p10,p2)\n\n if f_show_text:\n s = text_fn(classes[i], scores[i])\n p = get_text_pos_fn(p10,p2,bbox,classes[i])\n cv2.putText(img, s, p[::-1], cv2.FONT_HERSHEY_DUPLEX,\n fontScale=font_scale,\n color=text_color,\n thickness=1)\n except:\n bbox = bboxes[i]\n p10 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))\n p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))\n if color_fn is not None:\n color = color_fn(classes[i])\n else:\n color = (random.random()*255, random.random()*255, random.random()*255)\n print(\"Error:\",img.shape,shape,bboxes[i],classes[i],p10,p2,color,thickness)\n \n\n return img\n\ndef draw_legend(labels,text_fn,img_size,color_fn,thickness=4,font_scale=1.2,text_color=(0.,255.,0.),fill_bboxes=True):\n '''\n Generate a legend image\n Args:\n labels: list[int] labels\n text_fn: str fn(label) trans label to text\n img_size: (H,W) the legend image size, the legend is drawed in veritical direction\n color_fn: tuple(3) fn(label): trans label to RGB color\n thickness: text thickness\n font_scale: font size\n text_color: text color\n Returns:\n\n '''\n boxes_width = max(img_size[1]//3,20)\n boxes_height = img_size[0]/(2*len(labels))\n def lget_text_pos_fn(pmin, pmax, bbox, label):\n p1 = (pmax[0]+5, pmax[1]+5)\n return p1\n\n bboxes = []\n for i,l in enumerate(labels):\n xmin = 5\n xmax = xmin+boxes_width\n ymin = int((2*i+0.5)*boxes_height)\n ymax = ymin + boxes_height\n bboxes.append([ymin,xmin,ymax,xmax])\n img = np.ones([img_size[0],img_size[1],3],dtype=np.uint8)\n def _text_fn(x,_):\n return text_fn(x)\n return draw_bboxes(img,labels,bboxes=bboxes,color_fn=color_fn,text_fn=_text_fn,\n get_text_pos_fn=lget_text_pos_fn,\n thickness=thickness,\n show_text=True,\n font_scale=font_scale,\n text_color=text_color,\n is_relative_coordinate=False,\n fill_bboxes=fill_bboxes)\n\n\n\n'''\nmask only include the area within bbox\n'''\ndef draw_bboxes_and_mask(img,classes,scores,bboxes,masks,color_fn=None,text_fn=None,thickness=4,show_text=False,fontScale=0.8):\n masks = masks.astype(np.uint8)\n for i,bbox in enumerate(bboxes):\n if color_fn is not None:\n color = list(color_fn(classes[i]))\n else:\n color = [random.random()*255, random.random()*255, random.random()*255]\n x = int(bbox[1]*img.shape[1])\n y = int(bbox[0]*img.shape[0])\n w = int((bbox[3]-bbox[1])*img.shape[1])\n h = int((bbox[2]-bbox[0])*img.shape[0])\n if w<=0 or h<=0:\n continue\n mask = masks[i]\n mask = cv2.resize(mask,(w,h))\n mask = np.expand_dims(mask,axis=-1)\n img[y:y+h,x:x+w,:] = (img[y:y+h,x:x+w,:]*(np.array([[[1]]],dtype=np.float32)-mask*0.4)).astype(np.uint8)+(mask*color*0.4).astype(np.uint8)\n\n img = draw_bboxes(img,classes,scores,bboxes,\n color_fn=color_fn,\n text_fn=text_fn,\n thickness=thickness,\n show_text=show_text,\n fontScale=fontScale)\n return img\n\n'''\nmask include the area of whole image\n'''\ndef draw_bboxes_and_maskv2(img,classes,scores,bboxes,masks,color_fn=None,text_fn=None,thickness=4,\n show_text=False,\n fontScale=0.8):\n if not isinstance(masks,np.ndarray):\n masks = np.array(masks)\n masks = masks.astype(np.uint8)\n for i,bbox in enumerate(bboxes):\n if color_fn is not None:\n color = list(color_fn(classes[i]))\n else:\n color = [random.random()*255, random.random()*255, random.random()*255]\n x = int(bbox[1]*img.shape[1])\n y = int(bbox[0]*img.shape[0])\n w = int((bbox[3]-bbox[1])*img.shape[1])\n h = int((bbox[2]-bbox[0])*img.shape[0])\n if w<=0 or h<=0:\n continue\n mask = masks[i]\n img = smv.draw_mask_on_image_array(img,mask,color=color,alpha=0.4)\n\n img = draw_bboxes(img,classes,scores,bboxes,\n color_fn=color_fn,\n text_fn=text_fn,\n thickness=thickness,\n show_text=show_text,\n fontScale=fontScale)\n return img\n\ndef convert_semantic_to_rgb(semantic,color_map,return_nparray=False):\n '''\n convert semantic label map to rgb PIL image or a np.ndarray\n Args:\n semantic: [H,W] label value\n color_map: list[int], [r0,g0,b0,r1,g1,b1,....]\n Returns:\n image: [H,W,3]\n '''\n new_mask = Image.fromarray(semantic.astype(np.uint8)).convert('P')\n new_mask.putpalette(color_map)\n if return_nparray:\n return np.array(new_mask.convert('RGB'))\n return new_mask\n\ndef draw_semantic_on_image(image,semantic,color_map,alpha=0.4,ignored_label=0):\n '''\n draw semantic on image\n Args:\n image:\n semantic: [H,W] label value\n color_map: list[int], [r0,g0,b0,r1,g1,b1,....]\n alpha:\n ignored_label:\n Returns:\n return image*(1-alpha)+semantic+alpha\n '''\n mask = convert_semantic_to_rgb(semantic,color_map=color_map,return_nparray=True)\n new_img = image.astype(np.float32)*(1-alpha)+mask.astype(np.float32)*alpha\n new_img = np.clip(new_img,0,255).astype(np.uint8)\n pred = np.expand_dims(semantic!=ignored_label,axis=-1)\n new_img = np.where(pred,new_img,image)\n return new_img\n","sub_path":"object_detection2/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"91484356","text":"### IMPORTS ###\n\nimport os\nimport re\nimport numpy as np\n\n### CHOOSE TARGET GENOME SUCH THAT ONLY THE APPROPRIATE FASTA AND PAXDB IS UNHASHED ###\n\n#A. ferrooxidans - CP001219\nfasta = './2_FASTA_CDS_paxdb/CP001219.txt'\nPaxDb = './3_PaxDb_data/1. A.ferrooxidans - 243159-WHOLE_ORGANISM-integrated.txt'\n\n#B. anthracis - AE017225\n# fasta = './2_FASTA_CDS_paxdb/AE017225.txt'\n# PaxDb = './3_PaxDb_data/2. B.anthracis - 260799-GPM_201408.txt'\n\n#B. henselae - BX897699\n# fasta = './2_FASTA_CDS_paxdb/BX897699.txt'\n# PaxDb = './3_PaxDb_data/3. B.henselae - 283166-Bhenselae_Albrethsen_2013.txt'\n\n#B. thetaiotaomicron - AE015928\n# fasta = './2_FASTA_CDS_paxdb/AE015928.txt'\n# PaxDb = './3_PaxDb_data/4. B.thetaiotaomicron VPI-5482 - 226186-GPM_201408.txt'\n\n#C. jejuni - AL111168\n# fasta = './2_FASTA_CDS_paxdb/AL111168.txt'\n# PaxDb = './3_PaxDb_data/5. C.jejuni - 192222-Campylobacter_jejuni_SC_biomart_17684_E__1reps.txt'\n\n#D. deserti - CP001114\n# fasta = './2_FASTA_CDS_paxdb/CP001114.txt'\n# PaxDb = './3_PaxDb_data/6. D.deserti - 546414-DeinococcusDeserti_PRIDE.txt'\n\n#D. vulgaris - AE017285\n# fasta = './2_FASTA_CDS_paxdb/AE017285.txt'\n# PaxDb = './3_PaxDb_data/7. D.vulgaris - 882-WHOLE_ORGANISM-integrated.txt'\n\n#E. coli - U00096\n# fasta = './2_FASTA_CDS_paxdb/U00096.txt'\n# PaxDb = './3_PaxDb_data/8. E.coli - 511145-Ecoli_iBAQ_arike_2012.txt'\n\n#H. pylori - AE000511\n# fasta = './2_FASTA_CDS_paxdb/AE000511.txt'\n# PaxDb = './3_PaxDb_data/9*. H.pylori 26695 - 85962-WHOLE_ORGANISM-integrated.txt'\n\n#L. interrogans - AE016823\n# fasta = './2_FASTA_CDS_paxdb/AE016823.txt'\n# PaxDb = './3_PaxDb_data/10. L.interrogans - 267671-PA_201308.txt'\n\n#L. lactis - AE005176\n# fasta = './2_FASTA_CDS_paxdb/AE005176.txt'\n# PaxDb = './3_PaxDb_data/11*. L.lactis - 272623-Lactococcus_lactis_Lahtvee_2014.txt'\n\n#L. pneumophila - AE017354\n# fasta = './2_FASTA_CDS_paxdb/AE017354.txt'\n# PaxDb = './3_PaxDb_data/12*. L.pneumophila - 272624-GPM_201408.txt'\n\n#M. aeruginosa - AP009552\n# fasta = './2_FASTA_CDS_paxdb/AP009552.txt'\n# PaxDb = './3_PaxDb_data/13. M.aeruginosa - 449447-WHOLE_ORGANISM-integrated.txt'\n\n#M. pneumoniae - CP002077*\n#fasta = './2_FASTA_CDS_paxdb/CP002077.txt'\n#PaxDb = './3_PaxDb_data/14. M.pneumoniae - 722438-Mycoplasma_pneumoniae_M129_Kuhner_et_al_Science2009.txt'\n\n#M. tuberculosis - AL123456\n# fasta = './2_FASTA_CDS_paxdb/AL123456.txt'\n# PaxDb = './3_PaxDb_data/15*. M.tuberculosis - 83332-WHOLE_ORGANISM-integrated.txt'\n\n#N. meningitidis - AE002098\n# fasta = './2_FASTA_CDS_paxdb/AE002098.txt'\n# PaxDb = './3_PaxDb_data/16*. N.meningitidis - 122586-GPM_201408.txt'\n\n#S. aureus - BA000017\n# fasta = './2_FASTA_CDS_paxdb/BA000017.txt'\n# PaxDb = './3_PaxDb_data/17*. S.aureus Mu50 - 158878-GPM_201408.txt'\n\n#S. oneidensis - AE014299\n# fasta = './2_FASTA_CDS_paxdb/AE014299.txt'\n# PaxDb = './3_PaxDb_data/18. S.oneidensis MR-1 - 211586-GPM_201408.txt'\n\n#S. pyogenes - AE004092\n# fasta = './2_FASTA_CDS_paxdb/AE004092.txt'\n# PaxDb = './3_PaxDb_data/19*. S.pyogenes M1GAS - 160490-StreptococcusPyogenes_M1GAS_PRIDE.txt'\n\n#S. enterica - AE006468\n# fasta = './2_FASTA_CDS_paxdb/AE006468.txt'\n# PaxDb = './3_PaxDb_data/20*. S.typhimurium - 99287-WHOLE_ORGANISM-integrated.txt'\n\n#Synechocystis sp. - AP012205\n# fasta = './2_FASTA_CDS_paxdb/AP012205.txt'\n# PaxDb = './3_PaxDb_data/21*. Synechocystis.sp. 6803 - 1148-SynechocystisSp_strain_PRIDE.txt'\n\n#Y. pestis - AL590842\n# fasta = './2_FASTA_CDS_paxdb/AL590842.txt'\n# PaxDb = './3_PaxDb_data/22*. Y.pestis - 214092-Yersinia_pestis_SC_biomart_18099_O__3reps.txt'\n\n### FUNCTIONS ###\n\ndef main(fasta, PaxDb):\n\n #Get file paths and load data\n raw_fasta = open(os.path.abspath(fasta)).read()\n raw_paxdb = open(os.path.abspath(PaxDb)).read()\n\n #Obtain Accession\n g = raw_fasta.split('>')\n g1 = g[1].split(';')\n accession = g1[0]\n\n #Get gene IDs for two groups\n HEGs, LEGs = get_expression(raw_paxdb)\n\n #If FASTA gene is in either list, append to two new output strings\n HEGs_fasta, LEGs_fasta = get_new_txt(raw_fasta, HEGs, LEGs)\n\n #Create two new CSVs\n get_CSVs(HEGs_fasta, LEGs_fasta, accession)\n\n\ndef get_expression(raw_paxdb):\n\n ''' Obtain the IDs of HEGs and LEGs '''\n\n #Create nested list for pax data, incl locus tag and protein expression\n raw = []\n\n #Create list, through which we can access the information for each gene\n if 'raw_spectral_count' in raw_paxdb:\n i = raw_paxdb.split('raw_spectral_count\\n')\n i1 = i[1].split('\\n')\n del i1[-1]\n\n for i in i1:\n chunked = i.split('\\t')\n internal_id = chunked[0]\n external_id = chunked[1]\n abundance = chunked[2]\n raw_count = chunked[3]\n raw.append([internal_id, external_id, float(abundance)])\n\n else:\n i = raw_paxdb.split('abundance\\n')\n i1 = i[1].split('\\n')\n del i1[-1]\n\n for i in i1:\n chunked = i.split('\\t')\n internal_id = chunked[0]\n external_id = chunked[1]\n abundance = chunked[2]\n raw.append([internal_id, external_id, float(abundance)])\n\n srted = sorted(raw, key = lambda x: float(x[2]))\n\n #Calculate top and bottom quartiles\n abundances = []\n\n for i in srted:\n abundances.append(i[2])\n\n array = np.array(abundances)\n top = np.percentile(array, 75)\n bottom = np.percentile(array, 25)\n\n #Create two lists - HEGs (top 25%) and LEGs (bottom 25%)\n HEGs_values = []\n HEGs_IDs = []\n\n for i in srted:\n if i[2] >= top:\n HEGs_values.append(i[2])\n\n for i in srted:\n if i[2] in HEGs_values:\n HEGs_IDs.append(i[1])\n\n LEGs_values = []\n LEGs_IDs = []\n\n for i in srted:\n if i[2] <= bottom:\n LEGs_values.append(i[2])\n\n for i in srted:\n if i[2] in LEGs_values:\n LEGs_IDs.append(i[1])\n\n return HEGs_IDs, LEGs_IDs\n\ndef get_new_txt(raw_fasta, HEGs, LEGs):\n\n ''' Build new FASTA files - one for HEGs and one for LEGs for each genome '''\n\n HEGs_fasta = ''\n LEGs_fasta = ''\n\n #Refine HEGs and LEGs to just numbers and letters\n refined_HEGs = [''.join(re.findall('[a-zA-Z\\d]', i)) for i in HEGs]\n refined_LEGs = [''.join(re.findall('[a-zA-Z\\d]', i)) for i in LEGs]\n\n #Get chunks to iterate through\n chunks = raw_fasta.split('>')\n chunks.pop(0)\n\n htest = []\n ltest = []\n\n for i in chunks:\n a = i.split('\\n')\n a1 = a[0].split(';')\n id = a1[4]\n refined_id = ''.join(re.findall('[a-zA-Z\\d]', id))\n\n for j in refined_HEGs:\n if refined_id in j:\n HEGs_fasta += \">\" + i\n\n for k in refined_LEGs:\n if refined_id in k:\n LEGs_fasta += \">\" + i\n\n return HEGs_fasta, LEGs_fasta\n\ndef get_CSVs(HEGs_fasta, LEGs_fasta, accession):\n\n ''' Create output files '''\n\n subdir1 = '2_FASTA_CDS_hegs'\n filename1 = accession + '.txt'\n filepath1 = os.path.join(subdir1, filename1)\n f1 = open(filepath1, 'a')\n f1.write(HEGs_fasta)\n f1.close()\n\n subdir2 = '2_FASTA_CDS_legs'\n filename2 = accession + '.txt'\n filepath2 = os.path.join(subdir2, filename2)\n f2 = open(filepath2, 'a')\n f2.write(LEGs_fasta)\n f2.close()\n\n### RUN ###\n\nif __name__ == '__main__':\n main(fasta, PaxDb)\n","sub_path":"0_Python_scripts/1.5_Filtering_cds_HEGs_LEGs.py","file_name":"1.5_Filtering_cds_HEGs_LEGs.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"609018397","text":"# LER DOIS NUMEROS REIS E IMPRIMIR O QUADRADO DA DIFERENÇA DO PRIMEIRO VALO PELO SEGUNDO E A DIFERENÇA DOS QUADRADOS.\n\nnum1 = float(input(\"Digite o primeiro numero: \"))\nnum2 = float(input(\"Digite o segundo numero: \"))\nd = float\nq = float\nd = (num1 - num2)**2\nq = num1**2 - num2**2\nprint(\"Diferença = \",int(d))\nprint(\"Diferença do quadrado = \",int(q))\n","sub_path":"Prog_77_pag_56.py","file_name":"Prog_77_pag_56.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"481744106","text":"\n# coding: utf-8\n\n# In[6]:\n\nimport sys\n\n\n# In[80]:\n\nf1=sys.argv[1]\nf2=sys.argv[2]\n\n\n# In[7]:\n\n#f1=\"Big5-ZhuYin.map\"\n#f2=\"test.map\"\n\n\n# In[8]:\n\nzy_big5=dict()\nwith open(f1,'r') as f:\n for line in f:\n line= line.split()\n big5=line[0]\n tokens=line[1].decode('big5').split('/')\n zys=set([x[0].encode('big5') for x in tokens])\n for zy in zys:\n if zy in zy_big5:\n zy_big5[zy].add(big5)\n else:\n zy_big5[zy]=set([big5])\n\n\n# In[9]:\n\nbig5set=set([])\nwith open(f2,'w') as f:\n for zy in zy_big5:\n f.write(zy+' '+' '.join(zy_big5[zy])+'\\n')\n for big5 in zy_big5[zy]:\n if big5 not in big5set:\n f.write(big5+' '+big5+'\\n')\n big5set.add(big5)\n\n\n# In[ ]:\n\n\n\n","sub_path":"sources/dsp_hw3/dsp_hw3_trigram/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"343226694","text":"import sys\n\nfrom flask_migrate import Migrate\nfrom flask_migrate import MigrateCommand\nfrom flask_script import Manager\n\nfrom shopyoapi.init import db\nfrom app import app\n\nfrom shopyoapi.cmd import clean\nfrom shopyoapi.cmd import initialise\nfrom shopyoapi.cmd import create_module\nfrom shopyoapi.database import autoload_models\n\nmigrate = Migrate(app, db, compare_type=True)\nmanager = Manager(app)\n\nmanager.add_command(\"db\", MigrateCommand)\n\n\ndef runserver():\n app.run()\n\n\ndef rundebug():\n app.run(debug=True, host=\"0.0.0.0\")\n\n\ndef custom_commands(args):\n # non migration commands\n if args[1] != \"db\":\n if args[1] == \"initialise\":\n autoload_models()\n initialise()\n elif args[1] == \"clean\":\n clean()\n elif args[1] == \"runserver\":\n runserver()\n elif args[1] == \"rundebug\":\n rundebug()\n elif args[1] == \"test\":\n print(\"test ok\")\n elif args[1] == 'startapp' and args[2]:\n create_module(args[2])\n sys.exit()\n elif args[1] == \"db\":\n autoload_models()\n\n\nif __name__ == \"__main__\":\n custom_commands(sys.argv)\n manager.run()\n","sub_path":"shopyo/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"261807930","text":"import importlib\nimport os\nimport sys\nfrom collections import OrderedDict\n\nimport easytrader\nfrom logbook import Logger, StreamHandler\n\nfrom .clock_engine import *\nfrom .event_engine import *\nfrom .event_type import EventType\nfrom .quotation_engine import *\nfrom strategy_wrapper import ProcessWrapper\n\nlog = Logger(os.path.basename(__file__))\nStreamHandler(sys.stdout).push_application()\n\nPY_VERSION = sys.version_info[:2]\nif PY_VERSION < (3, 5):\n raise Exception('Python 版本需要 3.5 或以上, 当前版本为 %s.%s 请升级 Python' % PY_VERSION)\n\n\nclass MainEngine:\n \"\"\"主引擎,负责行情 / 事件驱动引擎 / 交易\"\"\"\n\n def __init__(self, broker, need_data='me.json'):\n \"\"\"初始化事件 / 行情 引擎并启动事件引擎\n \"\"\"\n self.user = easytrader.use(broker)\n self.user.prepare(need_data)\n\n self.event_engine = EventEngine()\n self.quotation_engine = Quotation(self.event_engine)\n self.clock_engine = ClockEngine(self.event_engine)\n\n self.event_engine.register(EventType.TIMER, self.second_click)\n\n # 保存读取的策略类\n self.strategies = OrderedDict()\n self.process_list = list()\n\n print('启动主引擎')\n\n def second_click(self, event):\n pass\n\n def start(self):\n \"\"\"启动主引擎\"\"\"\n self.event_engine.start()\n self.quotation_engine.start()\n self.clock_engine.start()\n\n def stop(self):\n self.event_engine.stop()\n self.clock_engine.stop()\n for p in self.process_list:\n p.stop()\n\n def load_strategy(self):\n \"\"\"动态加载策略\"\"\"\n s_folder = 'strategies'\n strategies = os.listdir(s_folder)\n strategies = filter(lambda file: file.endswith('.py') and file != '__init__.py', strategies)\n importlib.import_module(s_folder)\n for strategy_file in strategies:\n strategy_module_name = os.path.basename(strategy_file)[:-3]\n log.info('加载策略: %s' % strategy_module_name)\n strategy_module = importlib.import_module('.' + strategy_module_name, 'strategies')\n strategy = getattr(strategy_module, 'Strategy')(self.user)\n process = ProcessWrapper(strategy)\n self.process_list.append(process)\n for process in self.process_list:\n\n self.event_engine.register(EventType.QUOTATION, process.on_event)\n self.event_engine.register(EventType.CLOCK, process.on_clock)\n log.info('加载策略完毕')\n","sub_path":"easyquant/main_engine.py","file_name":"main_engine.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"615978192","text":"\"\"\"\nLazy-evaluated prime lib made for Project Euler, v2\nAuthor: Spaceoff\n\"\"\"\nfrom math import sqrt, ceil\n\nprime_list = list([2])\nprime_set = set(prime_list)\n\ndef prime_cur():\n return prime_list[len(prime_list) - 1]\n\ndef prime_str():\n return str(prime_list)\n\ndef root(x):\n return ceil(sqrt(x)) + 1\n\ndef add_prime(x):\n prime_list.append(x)\n prime_set.add(x)\n\ndef is_next_prime(x):\n div_max = root(x) # max divisor before repetition\n for prime in prime_list:\n if prime > div_max:\n break\n elif x % prime == 0:\n return False\n return True\n\ndef compute_to(x):\n cur = prime_cur()\n while cur < x:\n cur += 1\n if is_next_prime(cur):\n add_prime(cur)\n\ndef compute_to_ordiv(x, val):\n cur = prime_cur()\n while cur < x:\n cur += 1\n if is_next_prime(cur):\n add_prime(cur)\n if val % cur == 0:\n return False\n return True\n\ndef compute_to_root(x):\n compute_to(root(x))\n\ndef compute_n(n):\n cur = prime_cur()\n while len(prime_list) < n:\n cur += 1\n if is_next_prime(cur):\n add_prime(cur)\n\ndef is_prime(x):\n if x in prime_set:\n return True\n elif x <= prime_cur():\n return False\n else:\n for prime in prime_list:\n if x % prime == 0:\n return False\n return compute_to_ordiv(x//2, x)\n\ndef prime_n(n):\n compute_n(n)\n return prime_list[n-1]\n\ndef prime_factors(x):\n if is_prime(x): return [x]\n\n factors = list()\n div_max = root(x)\n for i in range(2, div_max):\n if x % i == 0:\n if is_prime(i):\n factors.append(i)\n\n inv = x // i\n if is_prime(inv):\n factors.append(inv)\n return factors\n","sub_path":"Project Euler (spoiler warning)/problem51/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"640925132","text":"import tkinter as tk\n\n\nclass Game(tk.Frame):\n def __init__(self, master=None, colorCoded=False, title=\"\", changeText=False):\n super().__init__(master)\n \n self.title = title\n self.changeText = changeText\n self.master = master\n self.colorCoded = colorCoded\n self[\"bg\"] = \"#F3F1E9\"\n \n self.place(relwidth=1, relheight=1)\n \n \n def changedText(self, text):\n if self.changeText:\n if str(text) == \"1\":\n return \"O\"\n elif str(text) == \"2\":\n return \"X\"\n elif str(text) == \"0\":\n return \"\"\n \n \n def colorCoding(self,text):\n if text == \"2\":\n return \"#E7DED4\", \"#7B7269\"\n elif text == \"4\":\n return \"#E7DAC3\", \"#7B7269\"\n elif text == \"8\":\n return \"#EBAC76\", \"#7B7269\"\n elif text == \"16\":\n return \"#EE9160\",\"#EEEEEE\"\n elif text == \"32\":\n return \"#EF795C\",\"#EEEEEE\"\n elif text == \"64\":\n return \"#EC5E35\",\"#EEEEEE\"\n elif text == \"128\":\n return \"#EBAC76\", \"#7B7269\"\n elif text == \"256\":\n return \"#E5CA57\", \"#7B7269\"\n elif text == \"512\":\n return \"#E6C34E\", \"#7B7269\"\n else:\n return \"#C7BBAF\", \"#7B7269\"\n \n def draw(self, board):\n title = tk.Label(self, text = self.title, fg = \"#7B7269\")\n title.place(relwidth = 1, relheight = 0.15)\n title.config(font=(\"Arial\", 30))\n \n self.frame = tk.Frame(self)\n self.frame.place(relwidth = 0.8, relheight = 0.8, relx = 0.1, rely=0.15)\n\n a = 1 / len(board)\n for y in range(len(board)):\n for x in range(len(board[y])):\n text = self.changedText(board[y][x])\n \n if self.colorCoded == True:\n bgcolor, fgcolor = self.colorCoding(str(text))\n else:\n bgcolor, fgcolor = \"#C7BBAF\", \"#7B7269\"\n \n \n canvas = tk.Canvas(self.frame, bg=bgcolor, highlightthickness=3, highlightbackground=\"#B5A9A3\")\n canvas.place(relwidth = a, relheight = a, relx = a*x, rely = a*y)\n \n canvas.create_text(50, 50, anchor=\"center\", font=(\"Purisa\", 20), text=text, fill=fgcolor)","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"284216710","text":"import numpy as np\n\n\ndef approximate_lonlat_distance(dlon, dlat, lat0, how='manhattan'):\n deg_len = 110.25 # km\n dlat = dlat * np.cos(lat0 * np.pi / 180)\n\n dist_mat = None\n\n if how == 'manhattan':\n dist_mat = (np.abs(dlon) + np.abs(dlat)) * deg_len\n elif how == 'euclidean':\n dist_mat = np.sqrt(dlon ** 2 + dlat ** 2) * deg_len\n\n return dist_mat\n\n\ndef get_distance(dx, dy, how='manhattan'):\n if how == 'manhattan':\n dist_mat = np.abs(dx) + np.abs(dy)\n elif how == 'euclidean':\n dist_mat = np.sqrt(dx ** 2 + dy ** 2)\n elif how == 'L_infty_norm':\n dist_mat = np.maximum(np.abs(dx), np.abs(dy))\n\n return dist_mat\n\n\ndef get_dist_mat(pos_1, pos_2, islonlat, how='euclidean'):\n pos_diff_list = []\n for i in range(2):\n pos = np.meshgrid(pos_2[:, i], pos_1[:, i])\n pos_diff_list.append(np.diff(pos, axis=0)[0]) # pos[1] - pos[0]\n\n if i == 1:\n lat0 = np.sum(pos, axis=0)[0] / 2\n\n if islonlat:\n dist_mat = approximate_lonlat_distance(pos_diff_list[0], pos_diff_list[1], lat0, how)\n else:\n dist_mat = get_distance(pos_diff_list[0], pos_diff_list[1], how)\n\n return dist_mat\n","sub_path":"agarwal_paper/functions/get_dist_matrix.py","file_name":"get_dist_matrix.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"134245615","text":"import collections\nimport filecmp\nimport shutil\nimport tempfile\nimport time\nimport unittest\nimport uuid\nfrom os import path\n\nimport six\nfrom core_data_modules.util import PhoneNumberUuidTable\n\n\nclass TestPhoneNumberUuidTable(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_add_get_phone(self):\n lut = PhoneNumberUuidTable()\n uuid = lut.add_phone(\"01234123123\")\n self.assertEqual(lut.get_uuid(\"01234123123\"), uuid)\n self.assertEqual(lut.get_uuid(\"(01234) 123123\"), uuid)\n self.assertEqual(lut.add_phone(\"01234123123\"), uuid)\n self.assertEqual(lut.add_phone(\"+1234 123-123\"), uuid)\n self.assertRaises(KeyError, lambda: lut.get_uuid(\"01234000001\"))\n\n def test_numbers(self):\n lut = PhoneNumberUuidTable()\n lut.add_phone(\"1234000001\")\n lut.add_phone(\"1234000002\")\n\n if six.PY2:\n self.assertIs(type(lut.numbers()), list)\n if six.PY3:\n self.assertIsInstance(iter(lut.numbers()), collections.Iterable)\n\n self.assertSetEqual(set(lut.numbers()), {\"1234000001\", \"1234000002\"})\n\n def test_iternumbers(self):\n lut = PhoneNumberUuidTable()\n if six.PY2:\n lut.add_phone(\"1234000001\")\n lut.add_phone(\"1234000002\")\n self.assertSetEqual(set(lut.iternumbers()), {\"1234000001\", \"1234000002\"})\n if six.PY3:\n self.assertRaises(AttributeError, lambda: lut.iternumbers())\n\n def test_uuids(self):\n lut = PhoneNumberUuidTable()\n uuids = {lut.add_phone(\"01234000001\"), lut.add_phone(\"01234000002\")}\n\n if six.PY2:\n self.assertIs(type(lut.uuids()), list)\n if six.PY3:\n self.assertIsInstance(iter(lut.uuids()), collections.Iterable)\n\n self.assertSetEqual(set(lut.uuids()), uuids)\n\n def test_iteruuids(self):\n lut = PhoneNumberUuidTable()\n if six.PY2:\n uuids = {lut.add_phone(\"01234000001\"), lut.add_phone(\"01234000002\")}\n self.assertSetEqual(set(lut.iteruuids()), uuids)\n if six.PY3:\n self.assertRaises(AttributeError, lambda: lut.iteruuids())\n\n def test_dumps_loads(self):\n lut = PhoneNumberUuidTable()\n lut.add_phone(\"01234000001\")\n lut.add_phone(\"01234000002\")\n lut.add_phone(\"01234000003\")\n\n dumped = lut.dumps()\n loaded = lut.loads(dumped)\n\n self.assertEqual(lut, loaded)\n \n def get_dump_load_lut(self):\n table = {\n \"1234000001\": \"4bf3388a-039b-4ca7-8789-319cf8ee343c\",\n \"1234000002\": \"62815f71-2721-42a6-856c-9cd66b66d6b5\",\n \"1234000003\": \"6becf322-7819-44f1-b212-5a13066def17\"\n }\n\n lut = PhoneNumberUuidTable(table)\n self.assertEqual(lut.get_uuid(\"01234000003\"), \"6becf322-7819-44f1-b212-5a13066def17\")\n self.assertEqual(lut.get_phone(\"62815f71-2721-42a6-856c-9cd66b66d6b5\"), \"1234000002\")\n \n return lut\n\n def test_dump(self):\n file_path = path.join(self.test_dir, \"test_output.json\")\n lut = self.get_dump_load_lut()\n\n with open(file_path, \"w\") as f:\n lut.dump(f, sort_keys=True)\n\n self.assertTrue(filecmp.cmp(file_path, \"tests/util/resources/phone_number_table_sample.json\"))\n\n def test_load(self):\n with open(\"tests/util/resources/phone_number_table_sample.json\", \"r\") as f:\n lut = PhoneNumberUuidTable.load(f)\n\n expected = self.get_dump_load_lut()\n self.assertEqual(lut, expected)\n\n @staticmethod\n def time_table_operations():\n \"\"\"\n Times various PhoneNumberUuidTable options with 100k numbers.\n\n Not automatically run as part of the test suite.\n \"\"\"\n lut = PhoneNumberUuidTable()\n\n print(\"Times:\")\n\n # Generate 100k UUIDs\n start = time.time()\n for x in range(100000):\n str(uuid.uuid4())\n end = time.time()\n print(\"Generate 100k UUIDs\", end - start)\n\n # Generate some phone numbers\n numbers = []\n for x in range(100000):\n numbers.append(\"+44123456\" + str(x).zfill(6))\n\n # Add all of those phone numbers to the LUT.\n start = time.time()\n uuids = []\n for n in numbers:\n uuids.append(lut.add_phone(n))\n end = time.time()\n print(\"Add 100k numbers\", end - start)\n\n # Read all of the numbers in the LUT.\n start = time.time()\n for u in uuids:\n lut.get_phone(u)\n end = time.time()\n print(\"Lookup 100k numbers\", end - start)\n\n # Serialize\n start = time.time()\n dumped = lut.dumps()\n end = time.time()\n print(\"Serialize to json string\", end - start)\n\n # Deserialize\n start = time.time()\n PhoneNumberUuidTable.loads(dumped)\n end = time.time()\n print(\"Deserialize from json string\", end - start)\n","sub_path":"tests/util/test_phone_number_uuid_table.py","file_name":"test_phone_number_uuid_table.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"356697176","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nfrom . import views\n\nurlpatterns = patterns(\n '',\n url(\n r'^create$',\n views.news_create\n ),\n url(\n r'^list$',\n views.news_list\n ),\n url(\n r'^(?P[0-9]+)/(?P[-a-zA-Z]{0,255})$',\n views.news_view,\n name='news_view'\n ),\n url(\n r'^(?P[0-9]+)/(?P[-a-zA-Z]{0,255})/edit$',\n views.news_edit,\n name='news_edit'\n ),\n url(\n r'^(?P[0-9]+)/(?P[-a-zA-Z]{0,255})/delete$',\n views.news_delete,\n name='news_delete'\n )\n)","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"413605328","text":"from common import d_count, Watch\n\nmax_pan = 0\n\ndef pandigital9(n):\n if d_count(n) != 9: return False\n b = 0\n while n > 0:\n b |= 1 << (n % 10)\n n //= 10\n return b == 0b1111111110\n\nWatch.start()\nmax_pan = 0\nfor n in range(1, 10000):\n pandigital = 0\n for i in range(1, 9):\n p = n * i\n pandigital = pandigital * 10**d_count(p) + p\n if d_count(pandigital) >= 9:\n if pandigital9(pandigital):\n max_pan = max(max_pan, pandigital)\n else: break\nprint(max_pan)\nWatch.stop()\n\n\n\n ","sub_path":"1_49/src/task38/s38.py","file_name":"s38.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"469378348","text":"from abc import abstractmethod\nimport os\nimport json\nimport copy, collections\nimport boto3\nimport json\n\nfrom invoke import run, Context\nfrom time import sleep, time\nfrom enum import IntEnum\nfrom test import test_utils\nfrom test.test_utils import LOGGER, ecr as ecr_utils\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Any, List\nfrom pathlib import Path\n\n\n@dataclass\nclass VulnerablePackageDetails:\n \"\"\"\n VulnerablePackageDetails dataclass is used to represent the \"package_details\" for\n a single vulnerability in Allowlist format.\n \"\"\"\n\n file_path: str\n name: str\n package_manager: str\n version: str\n release: str\n\n def __init__(\n self,\n name: str,\n version: str,\n release: str = None,\n *args: Any,\n **kwargs: Any,\n ):\n self.file_path = kwargs.get(\"filePath\") or kwargs.get(\"file_path\")\n self.name = name\n self.package_manager = kwargs.get(\"packageManager\") or kwargs.get(\"package_manager\")\n self.version = version\n self.release = release\n\n\n@dataclass\nclass AllowListFormatVulnerabilityForEnhancedScan:\n \"\"\"\n AllowListFormatVulnerabilityForEnhancedScan represents how the data looks for a single vulnerability in the allowlist format.\n The data from the ECR Enhanced Results are deserialized into AllowListFormatVulnerabilityForEnhancedScan dataclass. In\n other words, vulnerabilities from the ecr format are directly deserialized into vulnerabilities in Allowlist\n format using AllowListFormatVulnerabilityForEnhancedScan dataclass.\n \"\"\"\n\n description: str\n vulnerability_id: str\n name: str\n package_name: str\n package_details: VulnerablePackageDetails\n remediation: dict\n cvss_v3_score: float\n cvss_v30_score: float\n cvss_v31_score: float\n cvss_v2_score: float\n cvss_v3_severity: str\n source_url: str\n source: str\n severity: str\n status: str\n title: str\n\n def __init__(\n self,\n description: str,\n remediation: dict,\n severity: str,\n status: str,\n title: str,\n *args: Any,\n **kwargs: Any,\n ):\n self.description = description\n packageVulnerabilityDetails = kwargs.get(\"packageVulnerabilityDetails\")\n self.vulnerability_id = (\n packageVulnerabilityDetails[\"vulnerabilityId\"]\n if packageVulnerabilityDetails\n else kwargs[\"vulnerability_id\"]\n )\n self.name = (\n packageVulnerabilityDetails[\"vulnerabilityId\"]\n if packageVulnerabilityDetails\n else kwargs[\"name\"]\n )\n self.package_name = None if packageVulnerabilityDetails else kwargs[\"package_name\"]\n self.package_details = (\n None\n if packageVulnerabilityDetails\n else VulnerablePackageDetails(**kwargs[\"package_details\"])\n )\n self.remediation = remediation\n self.source_url = (\n packageVulnerabilityDetails[\"sourceUrl\"]\n if packageVulnerabilityDetails\n else kwargs[\"source_url\"]\n )\n self.source = (\n packageVulnerabilityDetails[\"source\"]\n if packageVulnerabilityDetails\n else kwargs[\"source\"]\n )\n self.severity = severity\n self.status = status\n self.title = title\n self.cvss_v30_score = (\n self.get_cvss_score(packageVulnerabilityDetails, score_version=\"3.0\")\n if packageVulnerabilityDetails\n else kwargs[\"cvss_v30_score\"]\n )\n self.cvss_v31_score = (\n self.get_cvss_score(packageVulnerabilityDetails, score_version=\"3.1\")\n if packageVulnerabilityDetails\n else kwargs[\"cvss_v31_score\"]\n )\n self.cvss_v3_score = self.cvss_v31_score if self.cvss_v31_score > 0 else self.cvss_v30_score\n self.cvss_v2_score = (\n self.get_cvss_score(packageVulnerabilityDetails, score_version=\"2.0\")\n if packageVulnerabilityDetails\n else kwargs[\"cvss_v2_score\"]\n )\n self.cvss_v3_severity = (\n self.get_cvss_v3_severity(self.cvss_v3_score)\n if packageVulnerabilityDetails\n else kwargs[\"cvss_v3_severity\"]\n )\n\n def __eq__(self, other):\n assert type(self) == type(other), f\"Types {type(self)} and {type(other)} mismatch!!\"\n ## Ignore version key in package_details as it might represent the version of the package existing in the image\n ## and might differ from image to image, even when the vulnerability is same.\n ## Also ignore the title key of the vulnerablitiy, because, sometimes, 1 vulnerability impacts multiple packages.\n ## In that case, the title key is generated by ECR scans by mentioning the name of all packages in a random order. This fails during comparison.\n if test_utils.check_if_two_dictionaries_are_equal(\n dataclasses.asdict(self.package_details),\n dataclasses.asdict(other.package_details),\n ignore_keys=[\"version\"],\n ):\n return test_utils.check_if_two_dictionaries_are_equal(\n dataclasses.asdict(self),\n dataclasses.asdict(other),\n ignore_keys=[\"package_details\", \"title\"],\n )\n return False\n\n def get_cvss_score(self, packageVulnerabilityDetails: dict, score_version: str = \"3.1\"):\n \"\"\"\n The ECR Enhanced Scan returns the CVSS scores as a list under packageVulnerabilityDetails[\"cvss\"].\n The list looks like:\n \"packageVulnerabilityDetails\": {\n \"cvss\": [\n {\n \"baseScore\": 7.7,\n \"scoringVector\": \"CVSS:3.1/AV:N/AC:H/PR:H/UI:N/S:C/C:H/I:N/A:H\",\n \"source\": \"SNYK\",\n \"version\": \"3.1\"\n },\n {\n \"baseScore\": 6.5,\n \"scoringVector\": \"CVSS:2.0/AV:N/AC:H/PR:H/UI:N/.....\",\n \"source\": \"SNYK\",\n \"version\": \"2.0\"\n }\n ]\n }\n This method iterates through all the CVSS scores and returns the baseScore for a particular CVSS version.\n :param packageVulnerabilityDetails: dict, as described above\n :param score_version: str, desired CVSS version\n :return: float, CVSS score\n \"\"\"\n for cvss_score in packageVulnerabilityDetails[\"cvss\"]:\n if cvss_score[\"version\"] == score_version:\n return float(cvss_score[\"baseScore\"])\n return 0.0\n\n ## Taken from https://nvd.nist.gov/vuln-metrics/cvss and section 5 of first.org/cvss/specification-document\n def get_cvss_v3_severity(self, cvss_v3_score: float):\n if cvss_v3_score >= 9.0:\n return \"CRITICAL\"\n elif cvss_v3_score >= 7.0:\n return \"HIGH\"\n elif cvss_v3_score >= 4.0:\n return \"MEDIUM\"\n elif cvss_v3_score >= 0.1:\n return \"LOW\"\n return \"UNDEFINED\" # Used to represent None Severity as well\n\n def set_package_details_and_name(self, package_details: VulnerablePackageDetails):\n self.package_details = package_details\n self.package_name = self.package_details.name\n\n\nclass ECRScanFailureException(Exception):\n \"\"\"\n Base class for other exceptions\n \"\"\"\n\n pass\n\n\nclass CVESeverity(IntEnum):\n UNDEFINED = 0\n INFORMATIONAL = 1\n LOW = 2\n MEDIUM = 3\n HIGH = 4\n CRITICAL = 5\n\n\nclass ScanVulnerabilityList:\n \"\"\"\n ScanVulnerabilityList is a class that reads and stores a vulnerability list in the Allowlist format. The format in which\n the allowlist JSON files are stored on the DLC repo is referred as the Allowlist Format. This class allows easy comparison\n of 2 Allowlist formatted vulnerability lists and defines methods to convert ECR Scan Lists to Allowlist Format lists that\n can be stored within the class itself.\n \"\"\"\n\n def __init__(self, minimum_severity=CVESeverity[\"MEDIUM\"]):\n self.vulnerability_list = {}\n self.minimum_severity = minimum_severity\n\n @abstractmethod\n def are_vulnerabilities_equivalent(self, vulnerability_1, vulnerability_2):\n pass\n\n @abstractmethod\n def get_vulnerability_package_name_from_allowlist_formatted_vulnerability(self, vulnerability):\n pass\n\n @abstractmethod\n def construct_allowlist_from_allowlist_formatted_vulnerabilities(\n self, allowlist_formatted_vulnerability_list\n ):\n pass\n\n def get_flattened_vulnerability_list(self):\n \"\"\"\n Returns the vulnerability list in the flattened format. For eg., if a vulnerability list looks like\n {\"k1\":[{\"a\":\"b\"},{\"c\":\"d\"}], \"k2\":[{\"e\":\"f\"},{\"g\":\"h\"}]}, it would return the following:\n [{\"a\":\"b\"},{\"c\":\"d\"},{\"e\":\"f\"},{\"g\":\"h\"}]\n\n :return: List(dict)\n \"\"\"\n if self.vulnerability_list:\n return [\n vulnerability\n for package_vulnerabilities in self.vulnerability_list.values()\n for vulnerability in package_vulnerabilities\n ]\n return []\n\n def get_sorted_vulnerability_list(self):\n \"\"\"\n This method is specifically made to sort the vulnerability list which is actually a dict\n and has the following structure:\n {\n \"packge_name1\":[\n {\"name\":\"cve-id1\", \"uri\":\"http..\" ..},\n {\"name\":\"cve-id2\", \"uri\":\"http..\" ..}\n ],\n \"packge_name2\":[\n {\"name\":\"cve-id1\", \"uri\":\"http..\" ..},\n {\"name\":\"cve-id2\", \"uri\":\"http..\" ..}\n ]\n }\n We want to first sort the innermost list of dicts based on the \"name\" of each dict and then we sort the\n outermost dict based on keys i.e. package_name1 and package_name2.\n Note: We do not change the actual vulnerability list.\n :return: dict, sorted vulnerability list\n \"\"\"\n copy_dict = copy.deepcopy(self.vulnerability_list)\n for key, list_of_complex_types in copy_dict.items():\n uniquified_list = test_utils.uniquify_list_of_complex_datatypes(list_of_complex_types)\n uniquified_list.sort(\n key=lambda dict_element: dict_element[\"name\"]\n if isinstance(dict_element, dict)\n else dict_element.name\n )\n return dict(sorted(copy_dict.items()))\n\n def save_vulnerability_list(self, path):\n if self.vulnerability_list:\n sorted_vulnerability_list = self.get_sorted_vulnerability_list()\n with open(path, \"w\") as f:\n json.dump(sorted_vulnerability_list, f, indent=4)\n else:\n raise ValueError(\"self.vulnerability_list is empty.\")\n\n def __contains__(self, vulnerability):\n \"\"\"\n Check if an input vulnerability exists on the allow-list\n\n :param vulnerability: dict JSON object consisting of information about the vulnerability in the format\n presented by the ECR Scan Tool\n :return: bool True if the vulnerability is allowed on the allow-list.\n \"\"\"\n package_name = self.get_vulnerability_package_name_from_allowlist_formatted_vulnerability(\n vulnerability\n )\n if package_name not in self.vulnerability_list:\n return False\n for allowed_vulnerability in self.vulnerability_list[package_name]:\n if self.are_vulnerabilities_equivalent(vulnerability, allowed_vulnerability):\n return True\n return False\n\n def __cmp__(self, other):\n \"\"\"\n Compare two ScanVulnerabilityList objects for equivalence\n\n :param other: Another ScanVulnerabilityList object\n :return: True if equivalent, False otherwise\n \"\"\"\n if not other or not other.vulnerability_list:\n return not self.vulnerability_list\n\n if sorted(self.vulnerability_list.keys()) != sorted(other.vulnerability_list.keys()):\n return False\n\n for package_name, package_vulnerabilities in self.vulnerability_list.items():\n if len(self.vulnerability_list[package_name]) != len(\n other.vulnerability_list[package_name]\n ):\n return False\n for v1, v2 in zip(\n self.get_sorted_vulnerability_list()[package_name],\n other.get_sorted_vulnerability_list()[package_name],\n ):\n if not self.are_vulnerabilities_equivalent(v1, v2):\n return False\n return True\n\n def __eq__(self, other):\n \"\"\"\n Compare two ScanVulnerabilityList objects for equivalence.\n\n :param other: Another ScanVulnerabilityList object\n :return: True if equivalent, False otherwise\n \"\"\"\n return self.__cmp__(other)\n\n def __ne__(self, other):\n \"\"\"\n Reverse of __eq__\n\n :param other: Another ScanVulnerabilityList object\n :return: True if not equivalent, False otherwise\n \"\"\"\n return not self.__eq__(other)\n\n def __sub__(self, other):\n \"\"\"\n Difference between ScanVulnerabilityList objects\n\n :param other: Another ScanVulnerabilityList object\n :return: List of vulnerabilities that exist in self, but not in other\n \"\"\"\n if not self.vulnerability_list:\n return None\n if not other or not other.vulnerability_list:\n return self\n\n missing_vulnerabilities = [\n vulnerability\n for package_vulnerabilities in self.vulnerability_list.values()\n for vulnerability in package_vulnerabilities\n if vulnerability not in other\n ]\n if not missing_vulnerabilities:\n return None\n\n difference = type(self)(minimum_severity=self.minimum_severity)\n difference.construct_allowlist_from_allowlist_formatted_vulnerabilities(\n missing_vulnerabilities\n )\n return difference\n\n def __add__(self, other):\n \"\"\"\n Does Union between ScanVulnerabilityList objects\n\n :param other: Another ScanVulnerabilityList object\n :return: Union of vulnerabilites exisiting in self and other\n \"\"\"\n flattened_vulnerability_list_self = self.get_flattened_vulnerability_list()\n flattened_vulnerability_list_other = other.get_flattened_vulnerability_list()\n all_vulnerabilities = flattened_vulnerability_list_self + flattened_vulnerability_list_other\n if not all_vulnerabilities:\n return None\n union_vulnerabilities = test_utils.uniquify_list_of_complex_datatypes(all_vulnerabilities)\n\n union = type(self)(minimum_severity=self.minimum_severity)\n union.construct_allowlist_from_allowlist_formatted_vulnerabilities(union_vulnerabilities)\n return union\n\n\nclass ECRBasicScanVulnerabilityList(ScanVulnerabilityList):\n \"\"\"\n A child class of ScanVulnerabilityList that is specifically made to deal with ECR Basic Scans.\n \"\"\"\n\n def get_vulnerability_package_name_from_allowlist_formatted_vulnerability(self, vulnerability):\n \"\"\"\n Get Package Name from a vulnerability JSON object.\n For ECR Basic Scans, the format of the vulnerability is same in ecr format and allowlist format, so this function\n can be used interchangeably.\n\n :param vulnerability: dict JSON object consisting of information about the vulnerability in the Allowlist format data\n which is same as ECR Scan Tool data for ECR Basic Scanning.\n :return: str package name\n \"\"\"\n for attribute in vulnerability[\"attributes\"]:\n if attribute[\"key\"] == \"package_name\":\n return attribute[\"value\"]\n return None\n\n def construct_allowlist_from_file(self, file_path):\n \"\"\"\n Read JSON file and prepare the object with all allowed vulnerabilities\n\n :param file_path: Path to the allow-list JSON file.\n :return: dict self.vulnerability_list\n \"\"\"\n with open(file_path, \"r\") as f:\n file_allowlist = json.load(f)\n for package_name, package_vulnerability_list in file_allowlist.items():\n for vulnerability in package_vulnerability_list:\n if CVESeverity[vulnerability[\"severity\"]] >= self.minimum_severity:\n if package_name not in self.vulnerability_list:\n self.vulnerability_list[package_name] = []\n self.vulnerability_list[package_name].append(vulnerability)\n return self.vulnerability_list\n\n def construct_allowlist_from_allowlist_formatted_vulnerabilities(\n self, allowlist_formatted_vulnerability_list\n ):\n \"\"\"\n Read a vulnerability list and construct the vulnerability_list\n\n :param vulnerability_list: list ECR Scan Result results\n :return: dict self.vulnerability_list\n \"\"\"\n for vulnerability in allowlist_formatted_vulnerability_list:\n package_name = (\n self.get_vulnerability_package_name_from_allowlist_formatted_vulnerability(\n vulnerability\n )\n )\n if package_name not in self.vulnerability_list:\n self.vulnerability_list[package_name] = []\n if CVESeverity[vulnerability[\"severity\"]] >= self.minimum_severity:\n self.vulnerability_list[package_name].append(vulnerability)\n return self.vulnerability_list\n\n def construct_allowlist_from_ecr_scan_result(self, ecr_format_vulnerability_list):\n \"\"\"\n Read a vulnerability list and construct the vulnerability_list\n For Basic Scan, the ecr scan vulnerabilities and the allowlist vulnerabilities have the same format\n and hence we can use the same function.\n\n :param vulnerability_list: list ECR Scan Result results\n :return: dict self.vulnerability_list\n \"\"\"\n return self.construct_allowlist_from_allowlist_formatted_vulnerabilities(\n ecr_format_vulnerability_list\n )\n\n def are_vulnerabilities_equivalent(self, vulnerability_1, vulnerability_2):\n \"\"\"\n Check if two vulnerability JSON objects are equivalent\n\n :param vulnerability_1: dict JSON object consisting of information about the vulnerability in the format\n presented by the ECR Scan Tool\n :param vulnerability_2: dict JSON object consisting of information about the vulnerability in the format\n presented by the ECR Scan Tool\n :return: bool True if the two input objects are equivalent, False otherwise\n \"\"\"\n if (vulnerability_1[\"name\"], vulnerability_1[\"severity\"]) == (\n vulnerability_2[\"name\"],\n vulnerability_2[\"severity\"],\n ):\n # Do not compare package_version, because this may have been obtained at the time the CVE was first observed\n # on the ECR Scan, which would result in unrelated version updates causing a mismatch while the CVE still\n # applies on both vulnerabilities.\n if all(\n attribute in vulnerability_2[\"attributes\"]\n for attribute in vulnerability_1[\"attributes\"]\n if not attribute[\"key\"] == \"package_version\"\n ):\n return True\n return False\n\n\nclass ECREnhancedScanVulnerabilityList(ScanVulnerabilityList):\n \"\"\"\n A child class of ScanVulnerabilityList that is specifically made to deal with ECR Enhanced Scans.\n \"\"\"\n\n def get_vulnerability_package_name_from_allowlist_formatted_vulnerability(\n self, vulnerability: AllowListFormatVulnerabilityForEnhancedScan\n ):\n \"\"\"\n Get Package Name from a vulnerability JSON object\n :param vulnerability: dict JSON object consisting of information about the vulnerability in the Allowlist Format.\n :return: str package name\n \"\"\"\n return vulnerability.package_name\n\n def construct_allowlist_from_file(self, file_path):\n \"\"\"\n Read JSON file that has the vulnerability data saved in the Allowlist format itself and prepare the object with\n all the vulnerabilities in the Allowlist format as well.\n\n :param file_path: Path to the allow-list JSON file.\n :return: dict self.vulnerability_list\n \"\"\"\n with open(file_path, \"r\") as f:\n file_allowlist = json.load(f)\n for _, package_vulnerability_list in file_allowlist.items():\n allowlist_formatted_package_vulnerability_list = [\n AllowListFormatVulnerabilityForEnhancedScan(**vulnerability)\n for vulnerability in package_vulnerability_list\n ]\n self.construct_allowlist_from_allowlist_formatted_vulnerabilities(\n allowlist_formatted_package_vulnerability_list\n )\n return self.vulnerability_list\n\n def construct_allowlist_from_allowlist_formatted_vulnerabilities(\n self,\n allowlist_formatted_vulnerability_list: List[AllowListFormatVulnerabilityForEnhancedScan],\n ):\n \"\"\"\n Read a vulnerability list in the AllowListFormat and construct the vulnerability_list in the same format.\n\n :param vulnerability_list: list ECR Scan Result results\n :return: dict self.vulnerability_list\n \"\"\"\n for vulnerability in allowlist_formatted_vulnerability_list:\n package_name = (\n self.get_vulnerability_package_name_from_allowlist_formatted_vulnerability(\n vulnerability\n )\n )\n if CVESeverity[vulnerability.cvss_v3_severity] < self.minimum_severity:\n continue\n if package_name not in self.vulnerability_list:\n self.vulnerability_list[package_name] = []\n self.vulnerability_list[package_name].append(vulnerability)\n return self.vulnerability_list\n\n def allow_vendor_severity_override(self, vulnerability_obj):\n \"\"\"\n If package source is from an allowed vendor, allow the vendor's severity to take precedence\n Args:\n vulnerability_obj (AllowListFormatVulnerabilityForEnhancedScan): object representing the vulnerability\n Return:\n bool: Whether to allow the vulnerability or not\n \"\"\"\n allowed_vendors = {\"UBUNTU_CVE\"}\n return (\n vulnerability_obj.source in allowed_vendors\n and CVESeverity[vulnerability_obj.severity] < self.minimum_severity\n )\n\n def allow_cvss_v3_severity(self, vulnerability_obj):\n \"\"\"\n If CVSS v3 score is less than the threshold, return True, else return False\n Args:\n vulnerability_obj (AllowListFormatVulnerabilityForEnhancedScan): object representing the vulnerability\n Return:\n bool: Whether to allow the vulnerablity or not\n \"\"\"\n return CVESeverity[vulnerability_obj.cvss_v3_severity] < self.minimum_severity\n\n def construct_allowlist_from_ecr_scan_result(self, ecr_format_vulnerability_list):\n \"\"\"\n Read an ECR formatted vulnerability list and construct the Allowlist Formatted vulnerability_list\n\n :param vulnerability_list: list ECR Scan Result results\n :return: dict self.vulnerability_list\n \"\"\"\n for ecr_format_vulnerability in ecr_format_vulnerability_list:\n for vulnerable_package in ecr_format_vulnerability[\"packageVulnerabilityDetails\"][\n \"vulnerablePackages\"\n ]:\n allowlist_format_vulnerability_object = AllowListFormatVulnerabilityForEnhancedScan(\n **ecr_format_vulnerability\n )\n vulnerable_package_object = VulnerablePackageDetails(**vulnerable_package)\n allowlist_format_vulnerability_object.set_package_details_and_name(\n vulnerable_package_object\n )\n if self.allow_cvss_v3_severity(\n allowlist_format_vulnerability_object\n ) or self.allow_vendor_severity_override(allowlist_format_vulnerability_object):\n continue\n if (\n allowlist_format_vulnerability_object.package_name\n not in self.vulnerability_list\n ):\n self.vulnerability_list[allowlist_format_vulnerability_object.package_name] = []\n self.vulnerability_list[allowlist_format_vulnerability_object.package_name].append(\n allowlist_format_vulnerability_object\n )\n self.vulnerability_list = self.get_sorted_vulnerability_list()\n return self.vulnerability_list\n\n def are_vulnerabilities_equivalent(self, vulnerability_1, vulnerability_2):\n \"\"\"\n Check if two vulnerability JSON objects are equivalent\n\n :param vulnerability_1: dict, JSON object consisting of information about the vulnerability in the Allowlist Format\n :param vulnerability_2: dict, JSON object consisting of information about the vulnerability in the Allowlist Format\n :return: bool True if the two input objects are equivalent, False otherwise\n \"\"\"\n return vulnerability_1 == vulnerability_2\n\n def get_summarized_info(self):\n \"\"\"\n Gets summarized info regarding all the packages vulnerability_list and all the vulenrability IDs corresponding to them.\n \"\"\"\n summarized_list = []\n for package_name, vulnerabilities in self.vulnerability_list.items():\n for vulnerability in vulnerabilities:\n summarized_list.append(\n (package_name, vulnerability.vulnerability_id, vulnerability.severity)\n )\n summarized_list = sorted(list(set(summarized_list)))\n return summarized_list\n\n\ndef get_ecr_vulnerability_package_version(vulnerability):\n \"\"\"\n Get Package Version from a vulnerability JSON object\n\n :param vulnerability: dict JSON object consisting of information about the vulnerability in the format\n presented by the ECR Scan Tool\n :return: str package version\n \"\"\"\n for attribute in vulnerability[\"attributes\"]:\n if attribute[\"key\"] == \"package_version\":\n return attribute[\"value\"]\n return None\n\n\ndef get_ecr_scan_allowlist_path(image_uri):\n dockerfile_location = test_utils.get_dockerfile_path_for_image(image_uri)\n image_scan_allowlist_path = dockerfile_location + \".os_scan_allowlist.json\"\n if (\n not any(image_type in image_uri for image_type in [\"neuron\", \"eia\"])\n and test_utils.is_covered_by_ec2_sm_split(image_uri)\n and test_utils.is_ec2_sm_in_same_dockerfile(image_uri)\n ):\n if test_utils.is_ec2_image(image_uri):\n image_scan_allowlist_path = image_scan_allowlist_path.replace(\n \"Dockerfile\", \"Dockerfile.ec2\"\n )\n else:\n image_scan_allowlist_path = image_scan_allowlist_path.replace(\n \"Dockerfile\", \"Dockerfile.sagemaker\"\n )\n\n # Each example image (tied to CUDA version/OS version/other variants) can have its own list of vulnerabilities,\n # which means that we cannot have just a single allowlist for all example images for any framework version.\n if \"example\" in image_uri:\n # The extracted dockerfile_location in case of example image points to the base gpu image on top of which the\n # example image was built. The dockerfile_location looks like\n # tensorflow/training/docker/2.7/py3/cu112/Dockerfile.ec2.gpu.example.os_scan_allowlist.json\n # We want to change the parent folder such that it points from cu112 folder to example folder and\n # looks like tensorflow/training/docker/2.7/py3/example/Dockerfile.gpu.example.os_scan_allowlist.json\n dockerfile_location = dockerfile_location.replace(\".ec2.\", \".\")\n base_gpu_image_path = Path(dockerfile_location)\n image_scan_allowlist_path = os.path.join(\n str(base_gpu_image_path.parent.parent), \"example\", base_gpu_image_path.name\n )\n image_scan_allowlist_path += \".example.os_scan_allowlist.json\"\n return image_scan_allowlist_path\n\n\ndef _save_lists_in_s3(save_details, s3_bucket_name):\n \"\"\"\n This method takes in a list of filenames and the data corresponding to each filename and stores it in\n the s3 bucket.\n\n :param save_details: list[(string, list)], a lists of tuples wherein each tuple has a filename and the corresponding data.\n :param s3_bucket_name: string, name of the s3 bucket\n \"\"\"\n s3_client = boto3.client(\"s3\")\n for filename, data in save_details:\n with open(filename, \"w\") as outfile:\n json.dump(data, outfile, indent=4)\n s3_client.upload_file(Filename=filename, Bucket=s3_bucket_name, Key=filename)\n\n\ndef get_target_image_uri_using_current_uri_and_target_repo(\n image, target_repository_name, target_repository_region, append_tag=\"\"\n):\n \"\"\"\n This function helps formulate a target image uri for a given image such that the target uri retains\n the old uri info (i.e. old repo name and old repo tag).\n\n :param image: str, image uri\n :param target_repository_name: str, name of target repository\n :param target_repository_region: str, region of target repository\n :param append_tag: str, string that needs to be appended at the end of the tag\n :return: str, target image uri\n \"\"\"\n sts_client = boto3.client(\"sts\", region_name=target_repository_region)\n account_id = sts_client.get_caller_identity().get(\"Account\")\n registry = ecr_utils.get_ecr_registry(account_id, target_repository_region)\n (\n original_image_repository,\n original_image_tag,\n ) = test_utils.get_repository_and_tag_from_image_uri(image)\n if append_tag:\n upgraded_image_tag = f\"{original_image_repository}-{original_image_tag}-{append_tag}\"\n else:\n upgraded_image_tag = f\"{original_image_repository}-{original_image_tag}\"\n target_image_uri = f\"{registry}/{target_repository_name}:{upgraded_image_tag}\"\n return target_image_uri\n\n\ndef run_upgrade_on_image_and_push(image, new_image_uri):\n \"\"\"\n Creates a container for the image being tested. Runs apt update and upgrade on the container\n and the commits the container as new_image_uri. This new image is then pushed to the ECR.\n\n :param image: str\n :param new_image_uri: str\n \"\"\"\n max_attempts = 10\n ctx = Context()\n docker_run_cmd = f\"docker run -id --entrypoint='/bin/bash' {image}\"\n container_id = ctx.run(f\"{docker_run_cmd}\", hide=True).stdout.strip()\n apt_command = \"apt-get update && apt-get upgrade\"\n docker_exec_cmd = f\"docker exec -i {container_id}\"\n attempt_count = 0\n apt_ran_successfully_flag = False\n # When a command or application is updating the system or installing a new software, it locks the dpkg file (Debian package manager).\n # Since we have multiple processes running for the tests, there are cases when one of the process locks the dpkg file\n # In this scenario, we get error: ‘E: Could not get lock /var/lib/dpkg/lock’ while running apt-get update\n # That is why we need multiple tries to ensure that it succeeds in one of the tries.\n # More info: https://itsfoss.com/could-not-get-lock-error/\n while True:\n run_output = ctx.run(f\"{docker_exec_cmd} {apt_command}\", hide=True, warn=True)\n attempt_count += 1\n if not run_output.ok:\n test_utils.LOGGER.info(\n f\"Attempt no. {attempt_count} on image: {image}\"\n f\"Could not run apt update and upgrade. \\n\"\n f\"Stdout is {run_output.stdout} \\n\"\n f\"Stderr is {run_output.stderr} \\n\"\n f\"Failed status is {run_output.exited}\"\n )\n sleep(2 * 60)\n elif run_output.ok:\n apt_ran_successfully_flag = True\n break\n if attempt_count == max_attempts:\n break\n if not apt_ran_successfully_flag:\n raise RuntimeError(\n f\"Could not run apt update and upgrade on image: {image}. \\n\"\n f\"Stdout is {run_output.stdout} \\n\"\n f\"Stderr is {run_output.stderr} \\n\"\n f\"Failed status is {run_output.exited}\"\n )\n ctx.run(f\"docker commit {container_id} {new_image_uri}\", hide=True)\n ctx.run(f\"docker rm -f {container_id}\", hide=True)\n ctx.run(f\"docker push {new_image_uri}\", hide=True)\n\n\ndef _invoke_lambda(function_name, payload_dict={}):\n \"\"\"\n Asyncronously Invokes the passed lambda.\n\n :param function_name: str, name of the lambda function\n :param payload_dict: dict, payload to be sent to the lambda\n \"\"\"\n lambda_client = boto3.client(\"lambda\", region_name=test_utils.DEFAULT_REGION)\n response = lambda_client.invoke(\n FunctionName=function_name,\n InvocationType=\"Event\",\n LogType=\"Tail\",\n Payload=json.dumps(payload_dict),\n )\n status_code = response.get(\"StatusCode\")\n if status_code != 202:\n raise ValueError(\"Lambda call not made properly. Status code returned {status_code}\")\n\n\ndef get_apt_package_name(ecr_package_name):\n \"\"\"\n Few packages have different names in the ecr scan and actual apt. This function returns an\n apt name of an ecr package.\n :param ecr_package_name: str, name of the package in ecr scans\n :param apt_package_name: str, name of the package in apt\n \"\"\"\n name_mapper = {\n \"cyrus-sasl2\": \"libsasl2-2\",\n \"glibc\": \"libc6\",\n \"libopenmpt\": \"libopenmpt-dev\",\n \"fribidi\": \"libfribidi-dev\",\n }\n return name_mapper.get(ecr_package_name, ecr_package_name)\n\n\ndef create_and_save_package_list_to_s3(old_filepath, new_packages, new_filepath, s3_bucket_name):\n \"\"\"\n This method conducts the union of packages present in the original apt-get-upgrade\n list and new list of packages passed as an argument. It makes a new file and stores\n the results in it.\n :param old_filpath: str, path of original file\n :param new_packages: list[str], consists of list of packages\n :param new_filpath: str, path of new file that will have the results of union\n :param s3_bucket_name: string, name of the s3 bucket\n \"\"\"\n file1 = open(old_filepath, \"r\")\n lines = file1.readlines()\n current_packages = [line.strip() for line in lines]\n package_list = current_packages\n new_packages = [get_apt_package_name(new_package) for new_package in new_packages]\n union_of_old_and_new_packages = set(package_list).union(set(new_packages))\n unified_package_list = list(union_of_old_and_new_packages)\n unified_package_list.sort()\n unified_package_list_for_storage = [\n f\"{package_name}\\n\" for package_name in unified_package_list\n ]\n file1.close()\n run(f\"rm -rf {new_filepath}\")\n with open(new_filepath, \"w\") as file2:\n file2.writelines(unified_package_list_for_storage)\n s3_client = boto3.client(\"s3\")\n s3_client.upload_file(Filename=new_filepath, Bucket=s3_bucket_name, Key=new_filepath)\n\n\ndef save_scan_vulnerability_list_object_to_s3_in_json_format(\n image, scan_vulnerability_list_object, append_tag, s3_bucket_name\n):\n \"\"\"\n Saves the vulnerability list in the s3 bucket. It uses image to decide the name of the file on\n the s3 bucket.\n\n :param image: str, image uri\n :param vulnerability_list: ScanVulnerabilityList\n :param s3_bucket_name: string, name of the s3 bucket\n :return: str, name of the file as stored on s3\n \"\"\"\n processed_image_uri = image.replace(\".\", \"-\").replace(\"/\", \"-\").replace(\":\", \"-\")\n file_name = f\"{processed_image_uri}-{append_tag}.json\"\n scan_vulnerability_list_object.save_vulnerability_list(file_name)\n s3_client = boto3.client(\"s3\")\n s3_client.upload_file(Filename=file_name, Bucket=s3_bucket_name, Key=file_name)\n return file_name\n\n\ndef get_vulnerabilites_fixable_by_upgrade(\n image_allowlist, ecr_image_vulnerability_list, upgraded_image_vulnerability_list\n):\n \"\"\"\n Finds out the vulnerabilities that are fixable by apt-get update and apt-get upgrade.\n\n :param image_allowlist: ScanVulnerabilityList, Vulnerabities that are present in the respective allowlist in the DLC git repo.\n :param ecr_image_vulnerability_list: ScanVulnerabilityList, Vulnerabities recently detected WITHOUT running apt-upgrade on the originally released image.\n :param upgraded_image_vulnerability_list: ScanVulnerabilityList, Vulnerabilites exisiting in the image WITH apt-upgrade run on it.\n :return: ScanVulnerabilityList/NONE, either ScanVulnerabilityList object or None if no fixable vulnerability\n \"\"\"\n fixable_ecr_image_scan_vulnerabilites = (\n ecr_image_vulnerability_list - upgraded_image_vulnerability_list\n )\n fixable_allowlist_vulnerabilites = image_allowlist - upgraded_image_vulnerability_list\n vulnerabilities_fixable_by_upgrade = None\n if fixable_ecr_image_scan_vulnerabilites and fixable_allowlist_vulnerabilites:\n vulnerabilities_fixable_by_upgrade = (\n fixable_ecr_image_scan_vulnerabilites + fixable_allowlist_vulnerabilites\n )\n elif fixable_ecr_image_scan_vulnerabilites:\n vulnerabilities_fixable_by_upgrade = fixable_ecr_image_scan_vulnerabilites\n elif fixable_allowlist_vulnerabilites:\n vulnerabilities_fixable_by_upgrade = fixable_allowlist_vulnerabilites\n return vulnerabilities_fixable_by_upgrade\n\n\ndef conduct_failure_routine(\n image,\n image_allowlist,\n ecr_image_vulnerability_list,\n upgraded_image_vulnerability_list,\n s3_bucket_for_storage,\n):\n \"\"\"\n This method conducts the entire process that is supposed to be followed when ECR test fails. It finds all\n the fixable and non fixable vulnerabilities and all the packages that can be upgraded and finally invokes\n the Auto-Secure lambda for further processing.\n\n :param image: str, image uri\n :param image_allowlist: ScanVulnerabilityList, Vulnerabities that are present in the respective allowlist in the DLC git repo.\n :param ecr_image_vulnerability_list: ScanVulnerabilityList, Vulnerabities recently detected WITHOUT running apt-upgrade on the originally released image.\n :param upgraded_image_vulnerability_list: ScanVulnerabilityList, Vulnerabilites exisiting in the image WITH apt-upgrade run on it.\n :param s3_bucket_for_storage: s3 name of the bucket that would be used for saving all the important data that needs to be stored during failure routine.\n :return: dict, a dictionary consisting of the entire summary of the steps run within this method.\n \"\"\"\n s3_filename_for_allowlist = save_scan_vulnerability_list_object_to_s3_in_json_format(\n image, upgraded_image_vulnerability_list, \"allowlist\", s3_bucket_for_storage\n )\n s3_filename_for_current_image_ecr_scan_list = (\n save_scan_vulnerability_list_object_to_s3_in_json_format(\n image, ecr_image_vulnerability_list, \"current-ecr-scanlist\", s3_bucket_for_storage\n )\n )\n original_filepath_for_allowlist = get_ecr_scan_allowlist_path(image)\n edited_files = [\n {\n \"s3_filename\": s3_filename_for_allowlist,\n \"github_filepath\": original_filepath_for_allowlist,\n }\n ]\n vulnerabilities_fixable_by_upgrade = get_vulnerabilites_fixable_by_upgrade(\n image_allowlist, ecr_image_vulnerability_list, upgraded_image_vulnerability_list\n )\n newly_found_non_fixable_vulnerabilites = upgraded_image_vulnerability_list - image_allowlist\n fixable_list = {}\n if vulnerabilities_fixable_by_upgrade:\n fixable_list = vulnerabilities_fixable_by_upgrade.vulnerability_list\n apt_upgrade_list_filename = (\n f\"apt-upgrade-list-{test_utils.get_processor_from_image_uri(image)}.txt\"\n )\n s3_filename_for_apt_upgrade_list = s3_filename_for_allowlist.replace(\n \"allowlist.json\", apt_upgrade_list_filename\n )\n original_filepath_for_apt_upgrade_list = os.path.join(\n os.path.dirname(original_filepath_for_allowlist), apt_upgrade_list_filename\n )\n new_package_list = fixable_list if isinstance(fixable_list, list) else list(fixable_list.keys())\n create_and_save_package_list_to_s3(\n original_filepath_for_apt_upgrade_list,\n new_package_list,\n s3_filename_for_apt_upgrade_list,\n s3_bucket_for_storage,\n )\n edited_files.append(\n {\n \"s3_filename\": s3_filename_for_apt_upgrade_list,\n \"github_filepath\": original_filepath_for_apt_upgrade_list,\n }\n )\n newly_found_non_fixable_list = {}\n if newly_found_non_fixable_vulnerabilites:\n newly_found_non_fixable_list = newly_found_non_fixable_vulnerabilites.vulnerability_list\n message_body = {\n \"edited_files\": edited_files,\n \"fixable_vulnerabilities\": fixable_list,\n \"non_fixable_vulnerabilities\": newly_found_non_fixable_list,\n }\n ## TODO: Make the conditions below as if test_utils.is_canary_context() and test_utils.is_time_for_invoking_ecr_scan_failure_routine_lambda() and os.getenv(\"REGION\") == test_utils.DEFAULT_REGION:\n ## to make sure that we just invoke the ECR_SCAN_FAILURE_ROUTINE_LAMBDA once everyday\n if test_utils.is_canary_context() and os.getenv(\"REGION\") == test_utils.DEFAULT_REGION:\n # boto3.Session().region_name == test_utils.DEFAULT_REGION helps us invoke the ECR_SCAN_FAILURE_ROUTINE_LAMBDA\n # from just 1 account\n _invoke_lambda(\n function_name=test_utils.ECR_SCAN_FAILURE_ROUTINE_LAMBDA, payload_dict=message_body\n )\n return_dict = copy.deepcopy(message_body)\n return_dict[\"s3_filename_for_allowlist\"] = s3_filename_for_allowlist\n return_dict[\n \"s3_filename_for_current_image_ecr_scan_list\"\n ] = s3_filename_for_current_image_ecr_scan_list\n return return_dict\n\n\ndef process_failure_routine_summary_and_store_data_in_s3(failure_routine_summary, s3_bucket_name):\n \"\"\"\n This method is especially constructed to process the failure routine summary that is generated as a result of\n calling conduct_failure_routine. It extracts lists and calls the save lists function to store them in the s3\n bucket.\n\n :param failure_routine_summary: dict, dictionary returned as an outcome of conduct_failure_routine method\n :param s3_bucket_name: string, name of the s3 bucket\n :return s3_filename_for_fixable_list: string, filename in the s3 bucket for the fixable vulnerabilities\n :return s3_filename_for_non_fixable_list: string, filename in the s3 bucket for the non-fixable vulnerabilities\n \"\"\"\n s3_filename_for_allowlist = failure_routine_summary[\"s3_filename_for_allowlist\"]\n s3_filename_for_fixable_list = s3_filename_for_allowlist.replace(\n \"allowlist.json\", \"fixable-vulnerability-list.json\"\n )\n s3_filename_for_non_fixable_list = s3_filename_for_allowlist.replace(\n \"allowlist.json\", \"non-fixable-vulnerability-list.json\"\n )\n save_details = []\n save_details.append(\n (s3_filename_for_fixable_list, failure_routine_summary[\"fixable_vulnerabilities\"])\n )\n save_details.append(\n (s3_filename_for_non_fixable_list, failure_routine_summary[\"non_fixable_vulnerabilities\"])\n )\n _save_lists_in_s3(save_details, s3_bucket_name)\n return s3_filename_for_fixable_list, s3_filename_for_non_fixable_list\n\n\ndef run_scan(ecr_client, image):\n scan_status = None\n start_time = time()\n ecr_utils.start_ecr_image_scan(ecr_client, image)\n while (time() - start_time) <= 600:\n scan_status, scan_status_description = ecr_utils.get_ecr_image_scan_status(\n ecr_client, image\n )\n if scan_status == \"FAILED\" or scan_status not in [None, \"IN_PROGRESS\", \"COMPLETE\"]:\n raise ECRScanFailureException(\n f\"ECR Scan failed for {image} with description: {scan_status_description}\"\n )\n if scan_status == \"COMPLETE\":\n break\n sleep(1)\n if scan_status != \"COMPLETE\":\n raise TimeoutError(f\"ECR Scan is still in {scan_status} state. Exiting.\")\n\n\ndef wait_for_enhanced_scans_to_complete(ecr_client, image):\n \"\"\"\n For Continuous Enhanced scans, the images will go through `SCAN_ON_PUSH` when they are uploaded for the\n first time. During that time, their state will be shown as `PENDING`. From next time onwards, their status will show\n itself as `ACTIVE`.\n\n :param ecr_client: boto3 Client for ECR\n :param image: str, Image URI for image being scanned\n \"\"\"\n scan_status = None\n scan_status_description = \"\"\n start_time = time()\n while (time() - start_time) <= 45 * 60:\n try:\n scan_status, scan_status_description = ecr_utils.get_ecr_image_enhanced_scan_status(\n ecr_client, image\n )\n except ecr_client.exceptions.ScanNotFoundException as e:\n LOGGER.info(e.response)\n LOGGER.info(\n \"It takes sometime for the newly uploaded image to show its scan status, hence the error handling\"\n )\n if scan_status == \"ACTIVE\":\n break\n sleep(1 * 60)\n if scan_status != \"ACTIVE\":\n raise TimeoutError(\n f\"ECR Scan is still in {scan_status} state with description: {scan_status_description}. Exiting.\"\n )\n\n\ndef fetch_other_vulnerability_lists(image, ecr_client, minimum_sev_threshold):\n \"\"\"\n For a given image it fetches all the other vulnerability lists except the vulnerability list formed by the\n ecr scan of the current image. In other words, for a given image it fetches upgraded_image_vulnerability_list and\n image_scan_allowlist.\n\n :param image: str Image URI for image to be tested\n :param ecr_client: boto3 Client for ECR\n :param minimum_sev_threshold: string, determines the minimum severity threshold for ScanVulnerabilityList objects. Can take values HIGH or MEDIUM.\n :return upgraded_image_vulnerability_list: ScanVulnerabilityList, Vulnerabilites exisiting in the image WITH apt-upgrade run on it.\n :return image_allowlist: ScanVulnerabilityList, Vulnerabities that are present in the respective allowlist in the DLC git repo.\n \"\"\"\n new_image_uri_for_upgraded_image = get_target_image_uri_using_current_uri_and_target_repo(\n image,\n target_repository_name=test_utils.UPGRADE_ECR_REPO_NAME,\n target_repository_region=os.getenv(\"REGION\", test_utils.DEFAULT_REGION),\n append_tag=\"upgraded\",\n )\n run_upgrade_on_image_and_push(image, new_image_uri_for_upgraded_image)\n run_scan(ecr_client, new_image_uri_for_upgraded_image)\n scan_results_with_upgrade = ecr_utils.get_ecr_image_scan_results(\n ecr_client, new_image_uri_for_upgraded_image, minimum_vulnerability=minimum_sev_threshold\n )\n scan_results_with_upgrade = ecr_utils.populate_ecr_scan_with_web_scraper_results(\n new_image_uri_for_upgraded_image, scan_results_with_upgrade\n )\n upgraded_image_vulnerability_list = ECRBasicScanVulnerabilityList(\n minimum_severity=CVESeverity[minimum_sev_threshold]\n )\n upgraded_image_vulnerability_list.construct_allowlist_from_ecr_scan_result(\n scan_results_with_upgrade\n )\n image_scan_allowlist = ECRBasicScanVulnerabilityList(\n minimum_severity=CVESeverity[minimum_sev_threshold]\n )\n image_scan_allowlist_path = get_ecr_scan_allowlist_path(image)\n if os.path.exists(image_scan_allowlist_path):\n image_scan_allowlist.construct_allowlist_from_file(image_scan_allowlist_path)\n return upgraded_image_vulnerability_list, image_scan_allowlist\n","sub_path":"test/test_utils/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":47846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464572680","text":"#!/usr/bin/env python2.7\n#Sholl Group / Phillips 66\n#Purpose: Window Size \n#Name: Ross Verploegh\n#GTaccountID: rverploegh3\n#Date Originated: February 09, 2015\n#Date Revised: February 09, 2015\n############################################################################################################\n\n###Import all the important modules\nimport sys\nimport subprocess\nimport os\nimport re\n#import support_functions as sp\nimport time\n#import xlwt\nimport math as m\nimport shutil\nimport fileinput\nimport numpy as np\nimport glob\nimport random\nfrom decimal import *\nfrom lammps import lammps\n#import pypar\nfrom mpi4py import MPI\nimport smtplib\nfrom email.MIMEText import MIMEText\nfrom scipy import constants\nfrom itertools import chain\nfrom copy import deepcopy\nimport scipy.io as io\nfrom collections import namedtuple\nimport copy\n\n\nfrom support_functions import *\nfrom sssr import *\nimport COMBINEDv2\nimport COMBINEDv3\n\n############################################################################################################\nclass NODES():\n def __init__(self,neighbors):\n self.neighbors = neighbors\n\n def neighbor_list(self,r):\n templist=list(self.neighbors[r,:])\n templist = [x-1 for x in templist if x != 0]\n return(templist )\n \n############################################################################################################\n\ndef f4(seq): \n # order preserving\n noDupes = []\n [noDupes.append(i) for i in seq if not noDupes.count(i)]\n return noDupes\n\n############################################################################################################\ndef CALCULATEMSD(TIME_ARRAY,MSD_ARRAY,STARTING_POS,CURRENT_POS,IMAGE_FLAGS,RT,n,ST):\n TIME_ARRAY[ST,n]=RT\n MSD=((CURRENT_POS.x+IMAGE_FLAGS.x*aLEN*x_expand)-STARTING_POS.x)**2+((CURRENT_POS.y+IMAGE_FLAGS.y*aLEN*y_expand)-STARTING_POS.y)**2+((CURRENT_POS.z+IMAGE_FLAGS.z*aLEN*z_expand)-STARTING_POS.z)**2\n MSD_ARRAY[ST,n]=MSD\n return(TIME_ARRAY,MSD_ARRAY)\n\n#########################\ndef expand_UC(myid,natoms,atom_type,coord_frac,x_expand,y_expand,z_expand):\n #First expand the atom_type vector\n nunitcells=x_expand*y_expand*z_expand\n\n #Then expand the actual unit cell\n lx=aLEN\n xytilt=bLEN*np.cos(np.radians(gammaD))\n xztilt=cLEN*np.cos(np.radians(betaD))\n ly=np.sqrt(bLEN**2-(xytilt**2))\n yztilt=(bLEN*cLEN*np.cos(np.radians(alphaD))-xytilt*xztilt)/float(ly)\n lz=np.sqrt(cLEN**2 - xztilt**2 - yztilt**2)\n print(lx,ly,lz)\n print(xytilt,xztilt,yztilt)\n\n atom_type_new=[]\n xpos_newf=[]\n ypos_newf=[]\n zpos_newf=[]\n for ucx in range(x_expand):\n for ucy in range(y_expand):\n for ucz in range(z_expand):\n for atom in range(natoms):\n\n if ucx < (x_expand-1) and ucy < (y_expand-1) and ucz < (z_expand-1):\n if float(coord_frac[atom,0]) <= 0.99 and float(coord_frac[atom,1]) <= 0.99 and float(coord_frac[atom,2]) <= 0.99: #if it is not on the border, add it\n atom_type_new.append(atom_type[atom])\n xpos_newf.append(float(coord_frac[atom,0])+ucx)\n ypos_newf.append(float(coord_frac[atom,1])+ucy)\n zpos_newf.append(float(coord_frac[atom,2])+ucz)\n\n else: #add border atoms if on the outside wall\n atom_type_new.append(atom_type[atom])\n xpos_newf.append(float(coord_frac[atom,0])+ucx)\n ypos_newf.append(float(coord_frac[atom,1])+ucy)\n zpos_newf.append(float(coord_frac[atom,2])+ucz)\n\n xpos_newf=np.array(xpos_newf,dtype='f')\n ypos_newf=np.array(ypos_newf,dtype='f')\n zpos_newf=np.array(zpos_newf,dtype='f')\n coord_fracn=np.vstack([xpos_newf,ypos_newf,zpos_newf])\n\n return(atom_type_new,coord_fracn)\n\ndef expand_UC_CC(myid,natoms,atom_type,coord_frac,x_expand,y_expand,z_expand):\n #First expand the atom_type vector\n nunitcells=x_expand*y_expand*z_expand\n atom_type_new=[]\n for n in range(nunitcells):\n for atom in range(len(atom_type)):\n atom_type_new.append(atom_type[atom])\n\n #Then expand the actual unit cell\n lx=aLEN\n xytilt=bLEN*np.cos(np.radians(gammaD))\n xztilt=cLEN*np.cos(np.radians(betaD))\n ly=np.sqrt(bLEN**2-(xytilt**2))\n yztilt=(bLEN*cLEN*np.cos(np.radians(alphaD))-xytilt*xztilt)/float(ly)\n lz=np.sqrt(cLEN**2 - xztilt**2 - yztilt**2)\n print(lx,ly,lz)\n print(xytilt,xztilt,yztilt)\n\n xpos_newf=[]\n ypos_newf=[]\n zpos_newf=[]\n for ucx in range(x_expand):\n for ucy in range(y_expand):\n for ucz in range(z_expand):\n for atom in range(natoms):\n xpos_newf.append(float(coord_frac[atom,0])+ucx)\n ypos_newf.append(float(coord_frac[atom,1])+ucy)\n zpos_newf.append(float(coord_frac[atom,2])+ucz)\n\n xpos_newf=np.array(xpos_newf,dtype='f')\n ypos_newf=np.array(ypos_newf,dtype='f')\n zpos_newf=np.array(zpos_newf,dtype='f')\n coord_fracn=np.vstack([xpos_newf,ypos_newf,zpos_newf])\n\n return(atom_type_new,coord_fracn)\n#########################\ndef transformation_matrix(aLEN,bLEN,cLEN,alphaD,betaD,gammaD,x_expand,y_expand,z_expand):\n#Calculating the transformation matrix \n#Fractional coordinates (a-axis is collinear with the x-axis)\n alpha_rad=alphaD*(np.pi/180)\n beta_rad=betaD*(np.pi/180)\n gamma_rad=gammaD*(np.pi/180)\n a=aLEN*x_expand\n b=bLEN*y_expand\n c=cLEN*z_expand\n \n c1 = c*m.cos(beta_rad)\n c2 = c*(m.cos(alpha_rad) - m.cos(gamma_rad) * m.cos(beta_rad))/m.sin(gamma_rad)\n c3 = (c**2 - c1**2 - c2**2)**0.5\n #C = [ a b*cos(gamma) c1 ; 0 b*sin(gamma) c2; 0 0 c3];\n C_mat=np.zeros([3,3])\n C_mat[0][0] = a\n C_mat[0][1] = b*m.cos(gamma_rad)\n C_mat[0][2] = c1\n C_mat[1][1] = b*m.sin(gamma_rad)\n C_mat[1][2] = c2\n C_mat[2][2] = c3\n C_mat_inv=np.linalg.inv(C_mat)\n #print C_mat\n #print C_mat_inv\n return(C_mat,C_mat_inv)\n#########################\ndef bonds(myid,natoms,conn_matrix_char,conn_matrix_nums,NN_number):\n print('part 1')\n BOND_A=[]\n BOND_B=[]\n \n bond_count=0\n for atom in range(natoms):\n length_index=0\n for j in range(0,NN_number): #4,8\n if conn_matrix_char[atom][j]!='':\n length_index+=1\n\n for k in range(length_index):\n bond_count+=1\n BOND_A.append(int(atom+1))\n BOND_B.append(int(conn_matrix_nums[atom][k]))\n print(bond_count)\n bond_count=(bond_count/2) #This is need since there are bond duplicates\n BOND_A=np.array(BOND_A,dtype='int')\n BOND_B=np.array(BOND_B,dtype='int')\n BOND_ARRAY=np.vstack((BOND_A,BOND_B))\n BOND_ARRAY=BOND_ARRAY.transpose()\n\n #There are duplicates in the bond array so I need to remove them\n print('part 2')\n count=1\n BOND_ARRAY_sorted=copy.deepcopy(np.sort(BOND_ARRAY)) #sort the array\n flag_array=np.zeros([len(BOND_ARRAY)]) #set flags for bonds that need to be removed\n BOND_ARRAY_SHORT=np.zeros([len(BOND_ARRAY)/2,2])\n\n flag_array=COMBINEDv2.bond_fix(BOND_ARRAY_sorted,flag_array) #######THIS IS FUCKKKKKED################\n#__________________________________________________\n #for i in range(len(BOND_ARRAY_sorted)):\n #for j in range(i+1,len(BOND_ARRAY_sorted)):\n #if BOND_ARRAY_sorted[i][0]==BOND_ARRAY_sorted[j][0]:\n #if BOND_ARRAY_sorted[i][1]==BOND_ARRAY_sorted[j][1]:\n #flag_array[i][0]=1\n#__________________________________________________\n\n counterblah = -1\n for i in range(len(BOND_ARRAY_sorted)):\n if str(flag_array[i])=='0':\n #if flag_array[i][0]==0:\n counterblah+=1\n BOND_ARRAY_SHORT[counterblah][0]=int(BOND_ARRAY[i][0])\n BOND_ARRAY_SHORT[counterblah][1]=int(BOND_ARRAY[i][1])\n\n print('part 3')\n BOND_ARRAYback=BOND_ARRAY_SHORT\n #print(len(BOND_ARRAY))\n #print(len(BOND_ARRAY_SHORT))\n BOND_ARRAY_sortedback=BOND_ARRAY_SHORT\n BOND_ARRAY_sortedback=np.sort(BOND_ARRAY_sortedback)\n BOND_ARRAYback=BOND_ARRAYback.astype(int)\n BOND_ARRAYback=BOND_ARRAY_sortedback.astype(int)\n return(bond_count,BOND_ARRAYback)\n\n\n############################################################################################################\n############################################################################################################\ndef measure_SOP(linker_composition,linker_binaryNNlist,n_linkerNNs):\n SOP_store=[]\n for clust in range(len(linker_binaryNNlist)):\n if linker_binaryNNlist[clust,0]==0: #WHY WOULD I STAND ON ZIF-90 LINKERS????????????????\n probAB=float(np.sum(linker_binaryNNlist[clust,1:n_linkerNNs+1]))/float(n_linkerNNs) #THIS IS THE PROBABILITY THAT I SEE A ZIF-90 (PD)\n if linker_composition != float(0):\n SOP=1-float(probAB)/float(linker_composition)\n else:\n SOP=0\n SOP_store.append(SOP)\n SOP_store=np.array(SOP_store,dtype='float')\n avg_SOP=np.average(SOP_store)\n return(avg_SOP)\n\ndef switch_linkers(rand_linker_1,rand_linker_2,NCL,linker_NNlist,linkersswitching,n_linkerNNs,LBNNlistB): #THIS NEEDS TO BE PLACED INTO FORTRAN\n #Replace the occurances of the linker 2 with linker 1 (it now becomes a 90)\n #print(rand_linker_1,rand_linker_2)\n #print(linkersswitching)\n LSlist=[rand_linker_1 if x==rand_linker_2 else x for x in linkersswitching]\n #print(LSlist)\n\n LBNNlist=copy.deepcopy(LBNNlistB)\n NNs_linkerswitch_1=linker_NNlist[rand_linker_1-1,:] #these get turned to 1's\n NNs_linkerswitch_2=linker_NNlist[rand_linker_2-1,:] #these get turned to 0's\n \n #print(rand_linker_1,rand_linker_2)\n #print(NNs_linkerswitch_1)\n #print(NNs_linkerswitch_2)\n\n\n #turning to 1's\n LBNNlist[rand_linker_1-1,0]=1\n for i in range(len(NNs_linkerswitch_1)): #this is always 6\n NNindex=int(NNs_linkerswitch_1[i])-1\n for neighbor in range(n_linkerNNs): #this is always 6\n if rand_linker_1==linker_NNlist[NNindex,neighbor]:\n LBNNlist[NNindex,neighbor+1]=1\n\n #turning to 0's\n LBNNlist[rand_linker_2-1,0]=0\n for i in range(len(NNs_linkerswitch_2)): #this is always 6\n NNindex=int(NNs_linkerswitch_2[i])-1\n for neighbor in range(n_linkerNNs): #this is always 6\n if rand_linker_2==linker_NNlist[NNindex,neighbor]:\n LBNNlist[NNindex,neighbor+1]=0\n\n #Do the same for each row of the linker_binaryNNlist\n #LBNNlist=np.zeros((NCL,n_linkerNNs+1),dtype='int')\n #for clust in range(NCL):\n # clustID=clust+1\n # if clustID in LSlist:\n # LBNNlist[clust,0]=1\n # for clustnum in range(NCL):\n # clustIDB=clustnum+1\n # for neighbor in range(n_linkerNNs):\n # if clustIDB==linker_NNlist[clust,neighbor] and (clustIDB in LSlist):\n # LBNNlist[clust,neighbor+1]=1\n return(LBNNlist,LSlist)\n############################################################################################################\n\ndef main(myid,main_argument): \n ###Set the main paths \n global running_dir\n running_dir=str(os.getcwd())\n global head_dir\n head_dir=str(os.getcwd()).replace('/src','')\n global input\n input=head_dir+'/input'\n global output\n output=head_dir+'/output'\n global src\n src=head_dir+'/src'\n global databases\n databases=head_dir+'/database' \n name='ZIF_FRAMEWORK'\n file_argument='8_justZN24.xyz'\n MAINOUTPUTFILE=open(output+'/MAIN_OUT_FILE.txt','w')\n\n\n ### Global INPUTS\n SEED= SETBASH_SEED\n np.random.seed(SEED)\n global x_expand\n global y_expand\n global z_expand\n global aLEN\n global bLEN\n global cLEN\n global alphaD\n global betaD\n global gammaD\n x_expand= SETBASH_XEXPAND\n y_expand= SETBASH_YEXPAND\n z_expand= SETBASH_ZEXPAND\n\n aLEN= 16.94302 #33.88604\n bLEN= 16.94302 #33.88604\n cLEN= 16.94302 #33.88604\n alphaD= 90.0\n betaD= 90.0\n gammaD= 90.0\n file_name=input+'/'+str(file_argument)\n\n set_SOP= SETBASH_SOP\n set_real_SOP= SETBASH_REALSOP\n max_iterations= SETBASH_MAXITERS\n MCbeta= SETBASH_BETA\n RMC_errorTOL= SETBASH_ERRORTOL\n SWITCH_FRAC= SETBASH_SWFRAC\n flag_SRO= SETBASH_SROFLAG\n\n Nmolecules= SETBASH_NMOLES\n MCSTEPS= SETBASH_MCSTEPS\n PRINTFREQ= SETBASH_PRINTFREQ\n\n\n #Create transformation matrixes (fractional coordinates)\n C_mat,C_mat_inv=transformation_matrix(aLEN,bLEN,cLEN,alphaD,betaD,gammaD,1,1,1)\n C_mat_LARGE,C_mat_inv_LARGE=transformation_matrix(aLEN,bLEN,cLEN,alphaD,betaD,gammaD,x_expand,y_expand,z_expand)\n\n\n #I need two layers \n #first I put down the Zn atoms and determine the SRO and mixing of the two hybrid species\n #second I lay down the Centers of the cages\n\n\n ###Read xyz file\n oneprint(myid,'XYZ read.')\n print(file_name)\n atom_type,xpos,ypos,zpos=xyz_read(file_name) #do file format checking to catch errors\n natoms=len(atom_type)\n\n ###Expand the unit cell\n xposnpc=np.array(xpos,dtype='f')\n yposnpc=np.array(ypos,dtype='f')\n zposnpc=np.array(zpos,dtype='f')\n coord_reg=np.vstack([xposnpc,yposnpc,zposnpc])\n coord_reg=coord_reg.T\n coord_frac=np.dot(C_mat_inv,coord_reg.T)\n coord_frac=np.array(coord_frac.T)\n\n #Expand the unit cell\n atom_type_new,coord_fracn=expand_UC(myid,natoms,atom_type,coord_frac,x_expand,y_expand,z_expand)\n natoms=len(atom_type_new)\n print('NATOMS: '+str(natoms))\n coord_frac=np.array(coord_fracn.T)\n print('FRAC COORDS: '+str(coord_frac.shape))\n coord_reg=np.dot(C_mat,coord_frac.T)\n coord_reg=coord_reg.T\n print('CART COORDS: '+str(coord_reg.shape))\n xpos=coord_reg[:,0]\n ypos=coord_reg[:,1]\n zpos=coord_reg[:,2]\n\n atom_type=np.array(atom_type_new,dtype='S7')\n atom_typeINT=[]\n for atom in range(natoms):\n if atom_type[atom]=='H' or atom_type[atom]=='H2' or atom_type[atom]=='H3':\n atom_typeINT.append(1)\n if atom_type[atom]=='C' or atom_type[atom]=='C1' or atom_type[atom]=='C2' or atom_type[atom]=='C3':\n atom_typeINT.append(2)\n if atom_type[atom]=='N':\n atom_typeINT.append(3)\n if atom_type[atom]=='O':\n atom_typeINT.append(4)\n if atom_type[atom]=='Zn':\n atom_typeINT.append(2) #this is to trick the system\n atom_typeINT=np.array(atom_typeINT,dtype='int')\n\n file=open(output+'/'+str(name)+'TEST_EXPAND'+str(x_expand)+str(y_expand)+str(z_expand)+'.xyz','w')\n file.write(str(len(atom_type))+'\\n')\n file.write(str(name)+' '+str(aLEN)+' '+str(bLEN)+' '+str(cLEN)+' '+str(alphaD)+' '+str(betaD)+' '+str(gammaD)+'\\n')\n for i in range(len(atom_type)):\n file.write(str(atom_type[i]).replace(\"['\",'').replace(\"']\",'')+' '+str(float(xpos[i]))+' '+str(float(ypos[i]))+' '+str(float(zpos[i]))+'\\n')\n file.write('\\r')\n file.close()\n\n file=open(output+'/TEST_FRAC'+str(x_expand)+str(y_expand)+str(z_expand)+'.xyz','w')\n file.write(str(natoms)+'\\n\\n')\n for x in range(natoms):\n #file.write(str(atom_type[x])+' '+str(coord_frac[x,0]*aLEN)+' '+str(coord_frac[x,1]*bLEN)+' '+str(coord_frac[x,2]*cLEN)+'\\n')\n file.write(str(atom_type[x])+' '+str(coord_frac[x,0]*1)+' '+str(coord_frac[x,1]*1)+' '+str(coord_frac[x,2]*1)+'\\n')\n file.close()\n\n ###Generate the connectivity matrix and bonds\n\n print (COMBINEDv3.__doc__)\n\n print('Removing duplicates.')\n bond_fix = 0.001\n radii_array=[]\n for atom in range(natoms):\n atom1=atom_type[atom]\n rad1=3\n radii_array.append(rad1)\n radii_array1=np.array(radii_array,dtype='f')\n remove_atoms=np.zeros([natoms,1],dtype='int')\n remove_atoms=COMBINEDv3.remove_duplicatesnp(atom_typeINT,radii_array1,coord_frac,C_mat,x_expand,y_expand,z_expand,remove_atoms,bond_fix)\n natoms2remove=np.sum(remove_atoms)\n print('CURRENT ATOMS:',natoms)\n print('REMOVE ATOMS:',natoms2remove)\n natomsOLD=natoms\n natoms=natoms-int(natoms2remove)\n print('NEW ATOMS:',natoms)\n\n atom_typenew=np.empty([natoms,1],dtype='a3')\n atom_typeINTnew=np.zeros([natoms,1],dtype='int')\n coord_regnew=np.empty([natoms,3],dtype='f')\n coord_fracnew=np.empty([natoms,3],dtype='f')\n count=0\n for atom in range(natomsOLD):\n if int(remove_atoms[atom,0]) == 0:\n #Update atom_typeCHAR and INT\n atom_typenew[count,0]=atom_type[atom]\n atom_typeINTnew[count,0]=atom_typeINT[atom]\n #Update regular and fractional coords\n coord_regnew[count,0]=coord_reg[atom,0]\n coord_regnew[count,1]=coord_reg[atom,1]\n coord_regnew[count,2]=coord_reg[atom,2]\n coord_fracnew[count,0]=coord_frac[atom,0]\n coord_fracnew[count,1]=coord_frac[atom,1]\n coord_fracnew[count,2]=coord_frac[atom,2]\n count+=1\n atom_type=copy.deepcopy(atom_typenew)\n atom_typeINT=copy.deepcopy(atom_typeINTnew)\n coord_reg=copy.deepcopy(coord_regnew)\n coord_frac=copy.deepcopy(coord_fracnew)\n xpos=coord_reg[:,0]\n ypos=coord_reg[:,1]\n zpos=coord_reg[:,2]\n\n############\n print('Sorting atoms.')\n #DO COMPLICATED SORTING OF THE ATOMS\n xflags=[]\n yflags=[]\n zflags=[]\n for atom in range(natoms):\n if coord_reg[atom,0] >= x_expand*aLEN-0.01:\n xflags.append(1)\n else:\n xflags.append(0)\n if coord_reg[atom,1] >= y_expand*bLEN-0.01:\n yflags.append(1)\n else:\n yflags.append(0)\n if coord_reg[atom,2] >= z_expand*cLEN-0.01:\n zflags.append(1)\n else:\n zflags.append(0)\n xBOUND_n=np.sum(xflags) \n yBOUND_n=np.sum(yflags) \n zBOUND_n=np.sum(zflags)\n BOUND_n_total=int(xBOUND_n)+int(yBOUND_n)+int(zBOUND_n) \n print(xBOUND_n,yBOUND_n,zBOUND_n)\n\n atom_typenew=np.empty([natoms,1],dtype='a3')\n atom_typeINTnew=np.zeros([natoms,1],dtype='int')\n coord_regnew=np.empty([natoms,3],dtype='f')\n coord_fracnew=np.empty([natoms,3],dtype='f')\n\n atom_typeREMOVED=np.empty([natoms-BOUND_n_total,1],dtype='a3')\n atom_typeINTREMOVED=np.zeros([natoms-BOUND_n_total,1],dtype='int')\n coord_regREMOVED=np.empty([natoms-BOUND_n_total,3],dtype='f')\n coord_fracREMOVED=np.empty([natoms-BOUND_n_total,3],dtype='f')\n\n count_reg=0\n count_partway=natoms-BOUND_n_total\n ixflags=np.empty([natoms,1],dtype='int')\n iyflags=np.empty([natoms,1],dtype='int')\n izflags=np.empty([natoms,1],dtype='int')\n for atom in range(natoms):\n if int(xflags[atom])==1 or int(yflags[atom])==1 or int(zflags[atom])==1:\n atom_typenew[count_partway,0]=atom_type[atom,0]\n atom_typeINTnew[count_partway,0]=atom_typeINT[atom,0]\n coord_regnew[count_partway,0]=coord_reg[atom,0]\n coord_regnew[count_partway,1]=coord_reg[atom,1]\n coord_regnew[count_partway,2]=coord_reg[atom,2]\n coord_fracnew[count_partway,0]=coord_frac[atom,0]\n coord_fracnew[count_partway,1]=coord_frac[atom,1]\n coord_fracnew[count_partway,2]=coord_frac[atom,2]\n if int(xflags[atom])==1:\n ixflags[count_partway,0]=1\n else:\n ixflags[count_partway,0]=0\n if int(yflags[atom])==1:\n iyflags[count_partway,0]=1\n else:\n iyflags[count_partway,0]=0\n if int(zflags[atom])==1:\n izflags[count_partway,0]=1\n else:\n izflags[count_partway,0]=0\n count_partway+=1\n else:\n atom_typenew[count_reg,0]=atom_type[atom,0]\n atom_typeINTnew[count_reg,0]=atom_typeINT[atom,0]\n coord_regnew[count_reg,0]=coord_reg[atom,0]\n coord_regnew[count_reg,1]=coord_reg[atom,1]\n coord_regnew[count_reg,2]=coord_reg[atom,2]\n coord_fracnew[count_reg,0]=coord_frac[atom,0]\n coord_fracnew[count_reg,1]=coord_frac[atom,1]\n coord_fracnew[count_reg,2]=coord_frac[atom,2]\n atom_typeREMOVED[count_reg,0]=atom_type[atom,0]\n atom_typeINTREMOVED[count_reg,0]=atom_typeINT[atom,0]\n coord_regREMOVED[count_reg,0]=coord_reg[atom,0]\n coord_regREMOVED[count_reg,1]=coord_reg[atom,1]\n coord_regREMOVED[count_reg,2]=coord_reg[atom,2]\n coord_fracREMOVED[count_reg,0]=coord_frac[atom,0]\n coord_fracREMOVED[count_reg,1]=coord_frac[atom,1]\n coord_fracREMOVED[count_reg,2]=coord_frac[atom,2]\n ixflags[count_reg,0]=0\n iyflags[count_reg,0]=0\n izflags[count_reg,0]=0\n count_reg+=1\n\n atom_type=copy.deepcopy(atom_typenew)\n atom_typeINT=copy.deepcopy(atom_typeINTnew)\n coord_reg=copy.deepcopy(coord_regnew)\n coord_frac=copy.deepcopy(coord_fracnew)\n xpos=coord_reg[:,0]\n ypos=coord_reg[:,1]\n zpos=coord_reg[:,2]\n xposREMOVED=coord_regREMOVED[:,0]\n yposREMOVED=coord_regREMOVED[:,1]\n zposREMOVED=coord_regREMOVED[:,2]\n\n file=open(output+'/'+str(name)+'TEST_EXPAND'+str(x_expand)+str(y_expand)+str(z_expand)+'.xyz','w')\n file.write(str(len(atom_type))+'\\n')\n file.write(str(name)+' '+str(aLEN)+' '+str(bLEN)+' '+str(cLEN)+' '+str(alphaD)+' '+str(betaD)+' '+str(gammaD)+'\\n')\n for i in range(len(atom_type)):\n file.write(str(atom_type[i]).replace(\"['\",'').replace(\"']\",'')+' '+str(float(xpos[i]))+' '+str(float(ypos[i]))+' '+str(float(zpos[i]))+'\\n')\n file.write('\\r')\n file.close()\n\n############\n print('Connectivity Map generating - PART 1.')\n #this will need the expansion in xyz directions when I expand unit cell\n conn_matrix1=np.zeros([natoms,9])\n conn_matrix_char=np.zeros([natoms,4],dtype='a3')\n conn_matrix_nums1=np.zeros([natoms,4],dtype='int')\n bond_fix = 0.001\n radii_array=[]\n for atom in range(natoms):\n atom1=atom_type[atom]\n rad1=3\n radii_array.append(rad1)\n radii_array1=np.array(radii_array,dtype='f')\n conn_matrix1,conn_matrix_nums1=COMBINEDv3.connectivity_mappingnp(atom_typeINT,radii_array1,coord_frac,C_mat,x_expand,y_expand,z_expand,conn_matrix1,conn_matrix_nums1,bond_fix)\n #conn_matrix,conn_matrix_nums,conn_matrix_char=connectivity_mapping(natoms,radii,atom_type,atype,coord_frac,C_mat,x_expand,y_expand,z_expand) \n #print(conn_matrix_char)\n print(conn_matrix_nums1)\n print(conn_matrix1)\n conn_matrix=copy.deepcopy(conn_matrix1)\n conn_matrix_nums=copy.deepcopy(conn_matrix_nums1)\n\n file=open(output+'/connect_check2_nums.xyz','w')\n for x in range(natoms):\n file.write(str(conn_matrix_nums[x,0])+' '+str(conn_matrix_nums[x,1])+' '+str(conn_matrix_nums[x,2])+' '+str(conn_matrix_nums[x,3])+'\\n')\n file.close()\n\n for atom in range(natoms):\n for i in range(4):\n if conn_matrix_nums[atom,i] != 0 and conn_matrix_nums[atom,i] <= natoms:\n #print(atom_type[int(conn_matrix_nums[atom,i])-1,0])\n conn_matrix_char[atom,i]=atom_type[int(conn_matrix_nums[atom,i])-1,0]\n #print(conn_matrix_char)\n\n print('Connectivity Map generated!')\n\n file=open(output+'/connect_check1_char.xyz','w')\n for x in range(natoms):\n file.write(str(conn_matrix_char[x,0])+' '+str(conn_matrix_char[x,1])+' '+str(conn_matrix_char[x,2])+' '+str(conn_matrix_char[x,3])+'\\n')\n file.close()\n##############\n print('Window Search Algorithm.')\n ###FIND THE WINDOWS USING THE SSSR ALGORITHM\n bond_count,BOND_ARRAYback=bonds(myid,natoms,conn_matrix_char,conn_matrix_nums,4)\n print('LINKER:',bond_count)\n #print(BOND_ARRAYback)\n \n METAL_DICT=defaultdict(list)\n for linker in range(bond_count):\n METAL_DICT[linker+1].append(BOND_ARRAYback[linker,0])\n METAL_DICT[linker+1].append(BOND_ARRAYback[linker,1])\n #print(METAL_DICT)\n\n ZNatoms=NODES(conn_matrix_nums)\n #print(ZNatoms.neighbors)\n print(ZNatoms.neighbor_list(0))\n\n rin=[]\n rings=[]\n for r0 in range(natoms): #ring member 0\n rin.append(r0+1)\n\n for r1 in ZNatoms.neighbor_list(r0):\n rin.append(r1+1) #ring member 1 \n\n for r2 in ZNatoms.neighbor_list(r1):\n if r2 == r1 or r2 == r0: continue # to avoid the case of a-b-a ...\n else: rin.append(r2+1)\n\n for r3 in ZNatoms.neighbor_list(r2):\n if r3 == r2 or r3 == r1 or r3==r0: continue\n else: rin.append(r3+1)\n\n for r4 in ZNatoms.neighbor_list(r3):\n if r4 == r3 or r4 == r2 or r4==r1 or r4==r0: continue\n else: rin.append(r4+1)\n\n for r5 in ZNatoms.neighbor_list(r4):\n if r5 == r4 or r5 == r3 or r5==r2 or r5==r1 or r5==r0: continue\n else: rin.append(r5+1)\n\n for r6 in ZNatoms.neighbor_list(r5):\n if r6 == r0: \n #rin.append(r6)\n rings.append(list(rin)) # find a ring, save it\n #rin.pop()\n else: continue \n rin.pop()\n rin.pop()\n rin.pop()\n rin.pop()\n rin.pop()\n rin.pop()\n\n print('N_WINDOWS_FAKE: ',len(rings))\n #print(rings)\n\n\n a = []\n sort=rings\n counter=0\n INDEXES=[]\n for q in sorted((set(q) for q in sort), key=len, reverse=True):\n if not any(q.issubset(Q) for Q in a):\n a.append(q)\n INDEXES.append(counter)\n counter+=1\n a = [list(q) for q in a]\n\n print(len(a))\n \n realrings=[]\n for index in INDEXES:\n realrings.append(rings[index])\n \n #print(realrings)\n print('N_WINDOWS_REAL:: ',len(realrings))\n\n\n file=open(output+'/ringcenters.xyz','w')\n for ring in realrings:\n #print(ring)\n x=0\n y=0\n z=0\n for element in ring:\n #print(element)\n x+=xpos[element-1]\n y+=ypos[element-1]\n z+=zpos[element-1]\n xmid=x/float(6)\n ymid=y/float(6)\n zmid=z/float(6)\n #print(xmid,ymid,zmid)\n file.write('O '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n file.close()\n\n \n#FIRST EXPAND CELL (24 atoms)\n#REMOVE DUPLICATE ZN ATOMS WITH NO PERIODIC BOUNDARY CONDITIONS\n#SORT ATOMS SO THAT THE ONES TO BE CUT WILL BE THE LAST ONES (JUST SET THE INPUT FILE TO THIS)\n#FIND WINDOWS: CURRENTLY TOO MANY BONDS (LINKERS) CANNOT ASSIGN YET\n#FIND ZN ATOMS ON THE EDGES OF THE BOX (X, Y, Z)\n#LINK THESE TO THEIR COUNTERPARTS ON THE OTHER SIDE\n print('Assigning Zn buddies.')\n ZN_buddies=np.empty([BOUND_n_total,2],dtype='int')\n ZN_buddies=COMBINEDv3.zn_buddiesp(atom_typeINT,radii_array1,coord_frac,C_mat,x_expand,y_expand,z_expand,ZN_buddies,bond_fix)\n ZN_buddies=np.array(ZN_buddies,dtype='int')\n #print(ZN_buddies)\n\n#UPDATE THE WINDOW DEFINITIONS BY SUBSITUTING IN THEIR INDICES (include a tag saying whether an ZN atom that is part of the ring is on the wrong side)\n print('Updating Ring Definitions.')\n realrings_newzns=[]\n RING_DICT=defaultdict(list)\n ring_counter=1\n for ring in realrings:\n newring=[]\n for element in ring:\n flag_break=0\n for buddies in ZN_buddies:\n if element == buddies[1]:\n #print(element,buddies[1])\n #Correct the ring\n newring.append(buddies[0])\n if ixflags[element-1,0]==1:\n RING_DICT[ring_counter].append(1)\n if iyflags[element-1,0]==1:\n RING_DICT[ring_counter].append(2)\n if izflags[element-1,0]==1:\n RING_DICT[ring_counter].append(3)\n flag_break=1\n break\n if flag_break == 0:\n newring.append(element)\n RING_DICT[ring_counter].append(0)\n\n realrings_newzns.append(newring)\n ring_counter+=1\n\n #print(realrings)\n #print(realrings_newzns)\n #print(RING_DICT)\n\n#SAVE THE POSITIONS OF EACH OF THESE ZN ATOMS\n print('Location Corrections to Zn buddies.')\n file=open(output+'/ringcentersNEW.xyz','w')\n ring_counter=1\n window_center_xpos=[]\n window_center_ypos=[]\n window_center_zpos=[]\n for ring in realrings_newzns:\n #print(ring)\n x=0\n y=0\n z=0\n #CORRECTIONS\n fixes=RING_DICT[ring_counter]\n #print(fixes)\n fix=0\n for element in ring:\n #print(element,fixes[fix])\n if fixes[fix] == 1:\n x+=(xpos[element-1]+aLEN*x_expand)\n else:\n x+=xpos[element-1]\n\n if fixes[fix] == 2:\n y+=(ypos[element-1]+bLEN*y_expand)\n else:\n y+=ypos[element-1]\n\n if fixes[fix] == 3:\n z+=(zpos[element-1]+cLEN*z_expand)\n else:\n z+=zpos[element-1]\n\n fix+=1\n xmid=x/float(6)\n ymid=y/float(6)\n zmid=z/float(6)\n window_center_xpos.append(xmid)\n window_center_ypos.append(ymid)\n window_center_zpos.append(zmid)\n\n #print(xmid,ymid,zmid)\n file.write('O '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n ring_counter+=1\n file.close()\n\n\n#REMOVE THE ZN ATOMS THAT ARE ON THE EDGES OF THE BOX\n#RE-UPDATE THE CONNECTIVITY MATRIX #THIS GIVES THE CORRECT NUMBER OF LINKERS AND THE RIGHT BOND DEIFNITIONS\n#CHECK TO MAKE SURE NONE OF THE ARRAYS GET OUT OF POSITION\n############\n print('Connectivity Map generating - PART 2 (PERIODIC).')\n natomsREMOVED=len(atom_typeREMOVED)\n #this will need the expansion in xyz directions when I expand unit cell\n conn_matrix1REMOVED=np.zeros([natomsREMOVED,9])\n conn_matrix_charREMOVED=np.zeros([natomsREMOVED,4],dtype='a3')\n conn_matrix_nums1REMOVED=np.zeros([natomsREMOVED,4],dtype='int')\n bond_fix = 0.001\n radii_array=[]\n for atom in range(natomsREMOVED):\n atom1=atom_typeREMOVED[atom]\n rad1=3\n radii_array.append(rad1)\n radii_array1=np.array(radii_array,dtype='f')\n conn_matrix1REMOVED,conn_matrix_nums1REMOVED=COMBINEDv3.connectivity_mappingp(atom_typeINTREMOVED,radii_array1,coord_fracREMOVED,C_mat,x_expand,y_expand,z_expand,conn_matrix1REMOVED,conn_matrix_nums1REMOVED,bond_fix)\n #print(conn_matrix_char)\n print(conn_matrix_nums1REMOVED)\n print(conn_matrix1REMOVED)\n conn_matrixREMOVED=copy.deepcopy(conn_matrix1REMOVED)\n conn_matrix_numsREMOVED=copy.deepcopy(conn_matrix_nums1REMOVED)\n\n file=open(output+'/connect_check4_nums.xyz','w')\n for x in range(natomsREMOVED):\n file.write(str(conn_matrix_numsREMOVED[x,0])+' '+str(conn_matrix_numsREMOVED[x,1])+' '+str(conn_matrix_numsREMOVED[x,2])+' '+str(conn_matrix_numsREMOVED[x,3])+'\\n')\n file.close()\n\n for atom in range(natomsREMOVED):\n for i in range(4):\n if conn_matrix_numsREMOVED[atom,i] != 0 and conn_matrix_numsREMOVED[atom,i] <= natoms:\n #print(atom_type[int(conn_matrix_numsREMOVED[atom,i])-1,0])\n conn_matrix_charREMOVED[atom,i]=atom_typeREMOVED[int(conn_matrix_numsREMOVED[atom,i])-1,0]\n #print(conn_matrix_char)\n\n print('Connectivity Map generated!')\n\n file=open(output+'/connect_check3_char.xyz','w')\n for x in range(natomsREMOVED):\n file.write(str(conn_matrix_charREMOVED[x,0])+' '+str(conn_matrix_charREMOVED[x,1])+' '+str(conn_matrix_charREMOVED[x,2])+' '+str(conn_matrix_charREMOVED[x,3])+'\\n')\n file.close()\n\n bond_countREMOVED,BOND_ARRAYbackREMOVED=bonds(myid,natomsREMOVED,conn_matrix_charREMOVED,conn_matrix_numsREMOVED,4)\n print('LINKER:',bond_countREMOVED)\n##############\n #print(BOND_ARRAYback)\n #print(BOND_ARRAYbackREMOVED)\n #print(realrings_newzns)\n\n#######1\n#ASSIGN EACH EDGE TO A PARTICULAR WINDOW (3 EDGES PER WINDOW): BASED ON SOME CRITERIA\n#Loop THROUGH WINDOWS \n#Store edges in an array\n#Find window that shares an edge that was assigned\n LINKER_WINDOW_DICT=defaultdict(list)\n LINKER_WINDOW_DICTpointer=defaultdict(list)\n edges_visted=[]\n edges_visted_EOflag=[]\n\n EDGE_QFILL=[]\n EDGE_QFILL_FLAG=[]\n\n windows_visited=[]\n print('Assigning edges to windows.')\n for edge in BOND_ARRAYbackREMOVED:#-----------------------------------------------------------> OUTER LOOP (length of edges) this obviously is not repeated on\n #print('EDGE:',edge)\n windowflag=0\n window_counter=1 #this says which windows are which\n\n if any(list(e)==list(np.sort(edge)) for e in edges_visted):\n blahblah=1\n #print('edge assigned')\n else:\n ASSIGNEDFLAG=0\n previousWINDOWS_EOSTATUS='None'\n for window in realrings_newzns:#--------------------------------------------------------> 1st INNER LOOP (length of windows) after 500 edges, reduce \n windowflag=0\n windowindex=0\n\n windowtemp=[]\n windowtemp=copy.deepcopy(window)\n windowtemp.append(windowtemp[0])\n edgestemp=[]\n for i in range(6):#--------------------------------------------------------> 2st INNER LOOP (length of 6) this cannot change\n windowedge=[windowtemp[i],windowtemp[i+1]]\n edgestemp.append(windowedge)\n if all(x in np.sort(windowedge) for x in list(edge)):\n #print(edge,window,i)\n windowflag=1\n\n #We need to set the window index based on windows already taken\n if window_counter in windows_visited: #we already have this window assigned\n tempblah=LINKER_WINDOW_DICT[window_counter]\n previousWINDOWS_EOSTATUS=tempblah[3]\n windowindex=2\n elif window_counter not in windows_visited: #we already have this window assigned\n windowindex=1\n windows_visited.append(window_counter)\n\n #we need the edge index for later\n saved_index_of_edge=i\n\n if windowflag == 1: #we have a window\n #print(edgestemp,windowindex)\n \n if windowindex == 1: #we have an unassigned window\n if (saved_index_of_edge)%2 == 0 or saved_index_of_edge==0: \n #Label all even edges as being part of that window 0,2,4\n edges_visted.append(np.sort(edgestemp[0]))\n edges_visted_EOflag.append('even')\n edges_visted.append(np.sort(edgestemp[2]))\n edges_visted_EOflag.append('even')\n edges_visted.append(np.sort(edgestemp[4]))\n edges_visted_EOflag.append('even')\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[0]))\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[2]))\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[4]))\n LINKER_WINDOW_DICT[window_counter].append('even')\n ASSIGNEDFLAG=1\n\n #save in visited edges\n if (saved_index_of_edge)%2 != 0 and saved_index_of_edge!=0: #we have an odd\n #Label all odd edges as being part of that window 1,3,5\n edges_visted.append(np.sort(edgestemp[1]))\n edges_visted_EOflag.append('odd')\n edges_visted.append(np.sort(edgestemp[3]))\n edges_visted_EOflag.append('odd')\n edges_visted.append(np.sort(edgestemp[5]))\n edges_visted_EOflag.append('odd')\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[1]))\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[3]))\n LINKER_WINDOW_DICT[window_counter].append(np.sort(edgestemp[5]))\n LINKER_WINDOW_DICT[window_counter].append('odd')\n ASSIGNEDFLAG=1\n #save in visited edges\n if windowindex == 2: #we have an assigned window\n #print('do nothing right now')\n blahblahblah=1\n if ASSIGNEDFLAG==1:\n #print('got here')\n break\n window_counter+=1\n\n\n if len(edges_visted)>10000000:\n break\n\n\n #print((edges_visted))\n #print((edges_visted_EOflag))\n #print((np.sort(windows_visited)))\n #print(realrings_newzns)\n #print((LINKER_WINDOW_DICT))\n\n#SAVE THE POSITIONS OF EACH OF THESE ZN ATOMS\n print('Untyped Assigned Linkers to Windows - VISUAL.')\n file=open(output+'/LINKER_WINDOW.xyz','w')\n file.write(str(len(edges_visted))+'\\n\\n')\n \n for i in range(len(LINKER_WINDOW_DICT)):\n #ZNVALUES=realrings[i]\n ZNVALUESINRING=realrings_newzns[i]\n fixes=RING_DICT[i+1]\n #print(ZNVALUES,ZNVALUESINRING,fixes)\n #print(i)\n templist=LINKER_WINDOW_DICT[i+1]\n edgelist=[templist[0],templist[1],templist[2]]\n flag=templist[3]\n fixcounter=0\n for edge in edgelist:\n fixcounter=0\n atom1=edge[0] \n for atom in ZNVALUESINRING:\n if atom==atom1:\n FIX=fixes[fixcounter]\n if FIX==1:\n ix1=1\n else: \n ix1=0\n if FIX==2:\n iy1=1\n else: \n iy1=0\n if FIX==3:\n iz1=1\n else: \n iz1=0\n fixcounter+=1\n \n atom2=edge[1] \n fixcounter=0\n atom2=edge[1] \n for atom in ZNVALUESINRING:\n if atom==atom2:\n FIX=fixes[fixcounter]\n if FIX==1:\n ix2=1\n else: \n ix2=0\n if FIX==2:\n iy2=1\n else: \n iy2=0\n if FIX==3:\n iz2=1\n else: \n iz2=0\n fixcounter+=1\n\n xmid=(xpos[atom1-1]+float(ix1)*x_expand*aLEN + xpos[atom2-1]+float(ix2)*x_expand*aLEN)/float(2)\n ymid=(ypos[atom1-1]+float(iy1)*y_expand*bLEN + ypos[atom2-1]+float(iy2)*y_expand*bLEN)/float(2)\n zmid=(zpos[atom1-1]+float(iz1)*z_expand*cLEN + zpos[atom2-1]+float(iz2)*z_expand*cLEN)/float(2)\n\n file.write('O'+str(i)+' '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n file.close()\n\n\n#######2\n print(' ')\n print(' ')\n print('STARTING THE MONTE CARLO PROCEDURE STEPS.')\n print(' ')\n print(' ')\n n_linkerNNs=6\n #set_SOP=1\n #MCbeta=1000000\n #RMC_errorTOL=0.01\n\n#GENERATE THE EDGE NEIGHBOR LISTS\n NCL=len(BOND_ARRAYbackREMOVED)\n linker_NNlist=np.ones((NCL,6),dtype='int')\n for clusta in range(NCL):\n MET_LISTA=BOND_ARRAYbackREMOVED[clusta]\n #print(MET_LISTA)\n counter = 0\n for metalindex in range(2):\n met_inA=[int(MET_LISTA[metalindex])]\n for clustb in range(NCL):\n MET_LISTB=BOND_ARRAYbackREMOVED[clustb]\n #print(str([i for i in met_inA if i in MET_LISTB]))\n if str([i for i in met_inA if i in MET_LISTB])!='[]' and clusta != clustb:\n #print(clusta,counter,clustb+1)\n linker_NNlist[clusta,counter]=(clustb+1)\n counter+=1\n\n #print(linker_NNlist)\n #print(BOND_ARRAYbackREMOVED)\n\n#ASSIGN EACH EDGE A TYPE\n PERFECT_NUMBER2SWITCH=int(round(SWITCH_FRAC*float(NCL),0))\n oneprint(myid,'Pick which linkers to switch.')\n linkersswitching=[]\n for clust in range(NCL):\n if len(linkersswitching)==PERFECT_NUMBER2SWITCH:\n break\n if np.random.ranf() < SWITCH_FRAC+0.2:\n linkersswitching.append(clust+1)\n print('#CLUSTERS CHOOSEN::'+str(len(linkersswitching)))\n print('FRACTION OF NEW LINKER::'+str(float(len(linkersswitching))/float(NCL)))\n print('CLUSTERS CHOOSEN::'+str(linkersswitching))\n linker_composition=(float(len(linkersswitching))/float(NCL)) #THIS HAS TO BE ZIF-90(PD) since I LOOK AWAY FROM ZIF-8(GOLD) LINKERS \n\n #oneprint(myid,'Pick which linkers to switch.')\n #linkersswitching=[]\n #for clust in range(NCL):\n # if np.random.ranf() < SWITCH_FRAC: #-----------------------------------------------------------> input\n # linkersswitching.append(clust+1)\n #print('#CLUSTERS CHOOSEN::'+str(len(linkersswitching)))\n #print('FRACTION OF NEW LINKER::'+str(float(len(linkersswitching))/float(NCL)))\n #print('CLUSTERS CHOOSEN::'+str(linkersswitching))\n #linker_composition=(float(len(linkersswitching))/float(NCL)) #THIS HAS TO BE ZIF-90(PD) since I LOOK AWAY FROM ZIF-8(GOLD) LINKERS\n\n MAINOUTPUTFILE.write('Composition '+str(linker_composition)+'\\n')\n MAINOUTPUTFILE.flush()\n\n #(4)for a certain composition, choose which linkers get a 0(old linker) or a 1(new linker) and update the binary NN list\n linker_binaryNNlist=np.zeros((NCL,7),dtype='int')\n for clust in range(NCL):\n clustID=clust+1\n if clustID in linkersswitching:\n linker_binaryNNlist[clust,0]=1\n for clustnum in range(NCL):\n clustIDB=clustnum+1\n for neighbor in range(6):\n if clustIDB==linker_NNlist[clust,neighbor] and (clustIDB in linkersswitching):\n linker_binaryNNlist[clust,neighbor+1]=1\n print('FIRST LINKER BINARY LIST')\n #print(linkersswitching)\n #print(linker_binaryNNlist)\n print(np.sum(linker_binaryNNlist[:,0]))\n\n#RUN REVERSE MONTE CARLO ON THE STRUCTURE GIVING EACH EDGE A NEW TYPE IF NEEDED\n #(5)perform the MC scheme to get a certain short range order parameter [alpha=(1-Probability of 0 given 1)/(composition of 1)]:: no 0s next to 1s (alpha==1) and all 0s next to 1s (alpha==-1)\n #(6)switch the linkers that have a 1 attached to their index\n oneprint(myid,'Run MC Scheme.')\n SOP_old=measure_SOP(linker_composition,linker_binaryNNlist,n_linkerNNs)\n SOP_new=SOP_old\n SOP_print=SOP_old\n error_SOP_old=np.abs(SOP_old-set_real_SOP)\n error_SOP_saved=[]\n\n SRO_file=open(output+'/SRO_OUT_FILE.txt','w')\n #for i in range(10000):#---------------------------------------------------------------------------------------------------------------------------> DO IN INPUT SCRIPT\n icounter=-1\n if len(linkersswitching) > 0:\n #while (error_SOP_old > RMC_errorTOL): #probably set a certain number of interations\n while (icounter <= max_iterations):\n icounter+=1\n if icounter%10000 == 0:\n SRO_file.write(str(icounter)+' '+str(SOP_print)+' '+str(error_SOP_old)+'\\n')\n SRO_file.flush()\n #--1--#Pick two random linkers (1 is old and 2 is new)\n linkerflag=0\n while (linkerflag==0): #so we don't pick the same linker\n rand_linker_1=np.random.random_integers(1,NCL)\n if (rand_linker_1 in linkersswitching): #one of these has to be an 8 and the other a 90\n linkerflag=0\n else:\n linkerflag=1\n\n linkerflag=0\n while (linkerflag==0): #so we don't pick the same linker\n rand_linker_2=np.random.random_integers(1,NCL)\n if (rand_linker_2 in linkersswitching): #one of these has to be an 8 and the other a 90\n linkerflag=1\n else:\n linkerflag=0\n #print(rand_linker_1,rand_linker_2)\n\n #--2--#Switch the two linkers we picked\n linker_binaryNNlist_new,linkersswitching_new=switch_linkers(rand_linker_1,rand_linker_2,NCL,linker_NNlist,linkersswitching,n_linkerNNs,linker_binaryNNlist)\n #print(np.sum(linker_binaryNNlist_new[:,0]))\n #time.sleep(2)\n #print(linkersswitching_new)\n \n #--3--#Calculate the new SOP\n SOP_new=measure_SOP(linker_composition,linker_binaryNNlist_new,n_linkerNNs)\n\n #--4--#Run the MC acceptance criteria\n error_SOP_new=np.abs(SOP_new-set_real_SOP)\n if error_SOP_new < error_SOP_old:\n #accept the move\n #print('A')\n linker_binaryNNlist=linker_binaryNNlist_new\n error_SOP_old=error_SOP_new\n linkersswitching=linkersswitching_new\n error_SOP_saved.append(error_SOP_old)\n SOP_print=SOP_new\n elif error_SOP_new >= error_SOP_old:\n randaccept=np.random.ranf()\n if randaccept < np.exp(-MCbeta*error_SOP_new):\n #accept the move\n #print('A')\n linker_binaryNNlist=linker_binaryNNlist_new\n error_SOP_old=error_SOP_new\n linkersswitching=linkersswitching_new\n error_SOP_saved.append(error_SOP_old)\n SOP_print=SOP_new\n elif randaccept >= np.exp(-MCbeta*error_SOP_new):\n #reject the move\n #print('R')\n linker_binaryNNlist=linker_binaryNNlist\n error_SOP_old=error_SOP_old\n linkersswitching=linkersswitching\n error_SOP_saved.append(error_SOP_old)\n SOP_print=SOP_print\n if flag_SRO == 'pos':\n if icounter > max_iterations:\n break\n elif flag_SRO == 'neg':\n if icounter > max_iterations:\n break\n print(SOP_new)\n print(error_SOP_old)\n SRO_file.close()\n MAINOUTPUTFILE.write('SRO '+str(SOP_new)+'\\n')\n MAINOUTPUTFILE.flush()\n #print(linkersswitching_new)\n #print(linker_binaryNNlist_new)\n #print(np.sum(linker_binaryNNlist))\n file=open(output+'/reverseMC.dat','w')\n for i in range(len(error_SOP_saved)):\n file.write(str(i+1)+' '+str(error_SOP_saved[i])+'\\n')\n file.close()\n\n#SAVE THE POSITIONS OF EACH OF THESE ZN ATOMS\n print('Giving a type to all the edges - VISUAL.')\n file=open(output+'/LINKER_WINDOW_TYPED.xyz','w')\n file.write(str(len(edges_visted))+'\\n\\n')\n \n window_types=[]\n for i in range(len(LINKER_WINDOW_DICT)):\n #ZNVALUES=realrings[i]\n ZNVALUESINRING=realrings_newzns[i]\n fixes=RING_DICT[i+1]\n #print(ZNVALUES,ZNVALUESINRING,fixes)\n #print(i)\n templist=LINKER_WINDOW_DICT[i+1]\n edgelist=[templist[0],templist[1],templist[2]]\n flag=0\n fixcounter=0\n edge_type_counter=0\n for edge in edgelist:\n #FIND WHETHER AN EDGE IS AN 8 or 90\n #LOOP THROUGH THE EDGE LIST AND FIND THE INDEX\n saved_index=0\n saved_type=0\n for edgeindex in range(len(BOND_ARRAYbackREMOVED)):\n check_this_edge=BOND_ARRAYbackREMOVED[edgeindex]\n if all(check_this_edge==edge):\n saved_index=edgeindex\n saved_type=int(linker_binaryNNlist[saved_index,0])\n #print(saved_index,saved_type)\n break\n #USE INDEX TO REFERENCE THE FIRST COLUMN OF THE BINARY NN LIST\n fixcounter=0\n atom1=edge[0] \n for atom in ZNVALUESINRING:\n if atom==atom1:\n FIX=fixes[fixcounter]\n if FIX==1:\n ix1=1\n else: \n ix1=0\n if FIX==2:\n iy1=1\n else: \n iy1=0\n if FIX==3:\n iz1=1\n else: \n iz1=0\n fixcounter+=1\n \n atom2=edge[1] \n fixcounter=0\n atom2=edge[1] \n for atom in ZNVALUESINRING:\n if atom==atom2:\n FIX=fixes[fixcounter]\n if FIX==1:\n ix2=1\n else: \n ix2=0\n if FIX==2:\n iy2=1\n else: \n iy2=0\n if FIX==3:\n iz2=1\n else: \n iz2=0\n fixcounter+=1\n\n xmid=(xpos[atom1-1]+float(ix1)*x_expand*aLEN + xpos[atom2-1]+float(ix2)*x_expand*aLEN)/float(2)\n ymid=(ypos[atom1-1]+float(iy1)*y_expand*bLEN + ypos[atom2-1]+float(iy2)*y_expand*bLEN)/float(2)\n zmid=(zpos[atom1-1]+float(iz1)*z_expand*cLEN + zpos[atom2-1]+float(iz2)*z_expand*cLEN)/float(2)\n\n if saved_type== 0: #RED IS SLOW RATE\n file.write('O'+str(i)+' '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n edge_type_counter+=saved_type\n else: #BLUE IS FAST RATE\n file.write('Cl'+str(i)+' '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n edge_type_counter+=saved_type\n\n if edge_type_counter==0:\n window_types.append(0)\n elif edge_type_counter==1:\n window_types.append(1)\n elif edge_type_counter==2:\n window_types.append(2)\n elif edge_type_counter==3:\n window_types.append(3)\n\n file.close()\n\n\n\n#######3 \n#Position of each window is the midpoint between each cage center\n\n#Lay down the cage centers\n natomsCC=2\n atom_typeCC=['C','C']\n xposnpcCC=np.array([8.47151,16.94302],dtype='f')\n yposnpcCC=np.array([8.47151,16.94302],dtype='f')\n zposnpcCC=np.array([8.47151,16.94302],dtype='f')\n coord_regCC=np.vstack([xposnpcCC,yposnpcCC,zposnpcCC])\n coord_regCC=coord_regCC.T\n coord_fracCC=np.dot(C_mat_inv,coord_regCC.T)\n coord_fracCC=np.array(coord_fracCC.T)\n\n #Expand the unit cell\n atom_type_newCC,coord_fracnCC=expand_UC_CC(myid,natomsCC,atom_typeCC,coord_fracCC,x_expand,y_expand,z_expand)\n natomsCC=len(atom_type_newCC)\n print('NATOMS: '+str(natomsCC))\n coord_fracCC=np.array(coord_fracnCC.T)\n print('FRAC COORDS: '+str(coord_fracCC.shape))\n coord_regCC=np.dot(C_mat,coord_fracCC.T)\n coord_regCC=coord_regCC.T\n print('CART COORDS: '+str(coord_regCC.shape))\n xposCC=coord_regCC[:,0]\n yposCC=coord_regCC[:,1]\n zposCC=coord_regCC[:,2]\n\n atom_typeCC=np.array(atom_type_newCC,dtype='S7')\n atom_typeINTCC=[]\n for atom in range(natomsCC):\n if atom_typeCC[atom]=='H' or atom_typeCC[atom]=='H2' or atom_typeCC[atom]=='H3':\n atom_typeINTCC.append(1)\n if atom_typeCC[atom]=='C' or atom_typeCC[atom]=='C1' or atom_typeCC[atom]=='C2' or atom_typeCC[atom]=='C3':\n atom_typeINTCC.append(2)\n if atom_typeCC[atom]=='N':\n atom_typeINTCC.append(3)\n if atom_typeCC[atom]=='O':\n atom_typeINTCC.append(4)\n if atom_typeCC[atom]=='Zn':\n atom_typeINTCC.append(5)\n atom_typeINTCC=np.array(atom_typeINTCC,dtype='int')\n\n file=open(output+'/'+str(name)+'_CAGECENTERS_TEST_EXPAND'+str(x_expand)+str(y_expand)+str(z_expand)+'.xyz','w')\n file.write(str(len(atom_typeCC))+'\\n')\n file.write(str(name)+' '+str(aLEN)+' '+str(bLEN)+' '+str(cLEN)+' '+str(alphaD)+' '+str(betaD)+' '+str(gammaD)+'\\n')\n for i in range(len(atom_typeCC)):\n file.write(str(atom_typeCC[i]).replace(\"['\",'').replace(\"']\",'')+str(i+1)+' '+str(float(xposCC[i]))+' '+str(float(yposCC[i]))+' '+str(float(zposCC[i]))+'\\n')\n file.write('\\r')\n file.close()\n\n file=open(output+'/CAGECENTERS_TEST_FRAC'+str(x_expand)+str(y_expand)+str(z_expand)+'.xyz','w')\n file.write(str(natomsCC)+'\\n\\n')\n for x in range(natomsCC):\n #file.write(str(atom_typeCC[x])+' '+str(coord_fracCC[x,0]*aLEN)+' '+str(coord_fracCC[x,1]*bLEN)+' '+str(coord_fracCC[x,2]*cLEN)+'\\n')\n file.write(str(atom_typeCC[x])+' '+str(coord_fracCC[x,0]*1)+' '+str(coord_fracCC[x,1]*1)+' '+str(coord_fracCC[x,2]*1)+'\\n')\n file.close()\n\n#Generate cage center connectivity\n print('Connectivity Map generating - PART 3 (CAGE CENTERS).')\n #this will need the expansion in xyz directions when I expand unit cell\n conn_matrix1CC=np.zeros([natomsCC,17])\n conn_matrix_charCC=np.zeros([natomsCC,8],dtype='a3')\n conn_matrix_nums1CC=np.zeros([natomsCC,8],dtype='int')\n bond_fixCC = 0.001\n radii_arrayCC=[]\n for atom in range(natomsCC):\n atom1=atom_typeCC[atom]\n rad1CC=8\n radii_arrayCC.append(rad1CC)\n radii_array1CC=np.array(radii_arrayCC,dtype='f')\n print (COMBINEDv2.connectivity_mapping.__doc__)\n conn_matrix1CC,conn_matrix_nums1CC=COMBINEDv2.connectivity_mapping(atom_typeINTCC,radii_array1CC,coord_fracCC,C_mat,x_expand,y_expand,z_expand,conn_matrix1CC,conn_matrix_nums1CC,bond_fixCC)\n #print(conn_matrix_char)\n print(conn_matrix_nums1CC)\n print(conn_matrix1CC)\n conn_matrixCC=copy.deepcopy(conn_matrix1CC)\n conn_matrix_numsCC=copy.deepcopy(conn_matrix_nums1CC)\n\n file=open(output+'/connect_check2_nums_CAGECENTERS.xyz','w')\n for x in range(natomsCC):\n file.write(str(conn_matrix_numsCC[x,0])+' '+str(conn_matrix_numsCC[x,1])+' '+str(conn_matrix_numsCC[x,2])+' '+str(conn_matrix_numsCC[x,3])+' '+\n str(conn_matrix_numsCC[x,4])+' '+str(conn_matrix_numsCC[x,5])+' '+str(conn_matrix_numsCC[x,6])+' '+str(conn_matrix_numsCC[x,7])+'\\n')\n file.close()\n\n for atom in range(natomsCC):\n for i in range(8):\n if conn_matrix_numsCC[atom,i] != 0 and conn_matrix_numsCC[atom,i] <= natomsCC:\n conn_matrix_charCC[atom,i]=atom_typeCC[int(conn_matrix_numsCC[atom,i])-1]\n #print(conn_matrix_char)\n\n print('Connectivity Map generated!')\n\n file=open(output+'/connect_check1_char_CAGECENTERS.xyz','w')\n for x in range(natomsCC):\n file.write(str(conn_matrix_charCC[x,0])+' '+str(conn_matrix_charCC[x,1])+' '+str(conn_matrix_charCC[x,2])+' '+str(conn_matrix_charCC[x,3])+' '+\n str(conn_matrix_charCC[x,4])+' '+str(conn_matrix_charCC[x,5])+' '+str(conn_matrix_charCC[x,6])+' '+str(conn_matrix_charCC[x,7])+'\\n')\n file.close()\n\n#Generate cage bonds\n bond_countCC,BOND_ARRAYbackCC=bonds(myid,natomsCC,conn_matrix_charCC,conn_matrix_numsCC,8)\n print('CAGE CENTER CONNECTIONS:',bond_countCC)\n print('WINDOWS:',len(LINKER_WINDOW_DICT))\n #print(windows_visited)\n #print(len(windows_visited))\n #print(window_types)\n #print(BOND_ARRAYbackCC)\n\n#Get the window distributions\n file=open(output+'/WINDOW_DISTRIBUTIONS.data','w')\n bins=np.array([0,1,2,3,4,5,6])\n window_probabilities=np.histogram(window_types, bins,density=True)\n probs=window_probabilities[0]\n file.write('0 '+str(probs[0])+'\\n')\n file.write('1 '+str(probs[1])+'\\n')\n file.write('2 '+str(probs[2])+'\\n')\n file.write('3 '+str(probs[3])+'\\n')\n file.close()\n\n\n#Link each of the bonds to a window (equal number)\n file=open(output+'/WINDOW_CENTERS_ORDERED_TYPED.xyz','w')\n for w in range(len(windows_visited)):\n file.write('C'+str(w)+'_'+str(window_types[w])+' '+str(window_center_xpos[w])+' '+str(window_center_ypos[w])+' '+str(window_center_zpos[w])+'\\n')\n file.close()\n \n CC_window_index=[]\n CC_connection_type=[]\n CC_imagewrap_flag_x=[] \n CC_imagewrap_flag_y=[] \n CC_imagewrap_flag_z=[] \n\n file=open(output+'/CAGE_CENTERS_MD.xyz','w')\n for CC in BOND_ARRAYbackCC:\n CC_1=CC[0]-1\n CC_2=CC[1]-1\n #print(' ')\n #print(CC_1+1,CC_2+1)\n xposCC_1=xposCC[CC_1]\n yposCC_1=yposCC[CC_1]\n zposCC_1=zposCC[CC_1]\n xposCC_2=xposCC[CC_2]\n yposCC_2=yposCC[CC_2]\n zposCC_2=zposCC[CC_2]\n dx=np.abs(xposCC_1-xposCC_2)\n dy=np.abs(yposCC_1-yposCC_2)\n dz=np.abs(zposCC_1-zposCC_2)\n #print(dx,dy,dz)\n\n x_flag=0\n y_flag=0\n z_flag=0\n Aflag=0\n Bflag=0\n #Check to see if either on on the edge, if so move the one NOT on the edge over\n\n if dx >=9 and (xposCC_1 <= x_expand*aLEN+0.01 and xposCC_1 >= x_expand*aLEN-0.01):\n x_flag=1\n Aflag=1\n if dy >=9 and (yposCC_1 <= y_expand*bLEN+0.01 and yposCC_1 >= y_expand*bLEN-0.01):\n y_flag=1\n Aflag=1\n if dz >=9 and (zposCC_1 <= z_expand*cLEN+0.01 and zposCC_1 >= z_expand*cLEN-0.01):\n z_flag=1\n Aflag=1\n\n if dx >=9 and (xposCC_2 <= x_expand*aLEN+0.01 and xposCC_2 >= x_expand*aLEN-0.01):\n x_flag=1\n Bflag=1\n if dy >=9 and (yposCC_2 <= y_expand*bLEN+0.01 and yposCC_2 >= y_expand*bLEN-0.01):\n y_flag=1\n Bflag=1\n if dz >=9 and (zposCC_2 <= z_expand*cLEN+0.01 and zposCC_2 >= z_expand*cLEN-0.01):\n z_flag=1\n Bflag=1\n\n #print(Aflag,Bflag,x_flag,y_flag,z_flag)\n\n #Get midpoint\n if Aflag==1 or Bflag==1: #The first or second atom is on the edge\n if x_flag==1:\n CC_imagewrap_flag_x.append(1)\n xmid=(xposCC_1+(xposCC_2-x_expand*aLEN))/float(2)\n else:\n CC_imagewrap_flag_x.append(0)\n xmid=(xposCC_1+xposCC_2)/float(2)\n\n if y_flag==1:\n CC_imagewrap_flag_y.append(1)\n ymid=(yposCC_1+(yposCC_2-y_expand*bLEN))/float(2)\n else:\n CC_imagewrap_flag_y.append(0)\n ymid=(yposCC_1+yposCC_2)/float(2)\n\n if z_flag==1:\n CC_imagewrap_flag_z.append(1)\n zmid=(zposCC_1+(zposCC_2-z_expand*cLEN))/float(2)\n else:\n CC_imagewrap_flag_z.append(0)\n zmid=(zposCC_1+zposCC_2)/float(2)\n else: \n CC_imagewrap_flag_x.append(0)\n CC_imagewrap_flag_y.append(0)\n CC_imagewrap_flag_z.append(0)\n xmid=(xposCC_1+xposCC_2)/float(2)\n ymid=(yposCC_1+yposCC_2)/float(2)\n zmid=(zposCC_1+zposCC_2)/float(2)\n\n #print(xmid,ymid,zmid)\n file.write('C '+str(xmid)+' '+str(ymid)+' '+str(zmid)+'\\n')\n\n #Loop through the linker windows and find the closest one to the midpoint\n for window in range(len(window_types)):\n xwin=window_center_xpos[window]\n ywin=window_center_ypos[window]\n zwin=window_center_zpos[window]\n\n dwinx=np.abs(xwin-xmid)\n dwiny=np.abs(ywin-ymid)\n dwinz=np.abs(zwin-zmid)\n\n #Assign that CC to that window\n if dwinx<=0.01 and dwiny<=0.01 and dwinz<=0.01:\n CC_window_index.append(window)\n CC_connection_type.append(window_types[window])\n break\n file.close()\n #print(CC_connection_type)\n print(len(CC_connection_type))\n\n\n#RUN KMC WHICH IS BELOW!!!!\n ### KMC ALGORITHM FOR MOLECULES AT INFINITE DILUTION\n #Nmolecules=0\n vector = namedtuple('vector',['x','y','z'],verbose=False)\n #MCSTEPS=1000\n #PRINTFREQ=1\n\n ### 1. DEFINE THE RATES\n k_rates=[1e6,1e7,1e8,1e9] #the rates of a molecule through each type of window (pure0,one 1, two 1s,pure1) #hops per second 0 1 2 3 \n\n#Each window has a specific rate\n CC_rate_library=defaultdict(list)\n for cagecenter in range(natomsCC): #go through all the cages\n for N in range(8): #for each of the 8 neighbors\n NN=conn_matrix_numsCC[cagecenter,N]\n edge_1=np.sort([cagecenter+1,NN])\n for edge in range(len(BOND_ARRAYbackCC)):\n edge_2=np.sort(BOND_ARRAYbackCC[edge])\n if all(edge_1==edge_2):\n #this needs to include information about passing through boundaries if edge chosen\n #sign of flag will be picked later, if xyzleaving > xyzreceiving then flag is positive and vica versa\n CC_rate_library[cagecenter+1].append([NN,CC_window_index[edge],CC_connection_type[edge],k_rates[CC_connection_type[edge]],xposCC[NN-1],yposCC[NN-1],zposCC[NN-1],CC_imagewrap_flag_x[edge],CC_imagewrap_flag_y[edge],CC_imagewrap_flag_z[edge]])\n break\n #print(CC_rate_library)\n\n\n\n #########THIS IS WHERE I WILL BUILD MY PERCOLATION SCHEME###############\n #I WILL FIND THE NUMBER OF CLUSTERS RELATING TO EACH TYPE OF RATE EDGE\n\n ########################################################################\n\n #for i in range(natomsCC):\n # listtemp=CC_rate_library[i+1]\n # print(len(listtemp))\n\n ### BEGIN THE KMC SIMULATION\n\n TIME_ARRAY=np.zeros((MCSTEPS/PRINTFREQ+1,Nmolecules))\n MSD_ARRAY=np.zeros((MCSTEPS/PRINTFREQ+1,Nmolecules))\n for n in range(Nmolecules):\n print('ADSORBATE: ',n)\n\n #TRACK REAL TIME, SIMULATION TIME, AND THE MEAN SQUARED DISPLACEMENT\n moviefile=open(output+'/movie.xyz','w')\n ST=0\n RT=0\n MSD=0\n #Pick a random position/cage to start in\n CCstart=np.random.random_integers(1,natomsCC)\n CCcurrent=CCstart\n STARTING_POS=vector(xposCC[CCstart-1],yposCC[CCstart-1],zposCC[CCstart-1])\n CURRENT_POS=vector(xposCC[CCstart-1],yposCC[CCstart-1],zposCC[CCstart-1])\n moviefile.write('1 \\n\\n')\n moviefile.write('Ag '+str(CURRENT_POS.x)+' '+str(CURRENT_POS.y)+' '+str(CURRENT_POS.z)+'\\n')\n moviefile.flush()\n IMAGE_FLAGS=vector(0,0,0)\n TIME_ARRAY,MSD_ARRAY=CALCULATEMSD(TIME_ARRAY,MSD_ARRAY,STARTING_POS,CURRENT_POS,IMAGE_FLAGS,RT,n,ST)\n\n while ST < MCSTEPS:\n \n #EACH WINDOW TYPE REFERS TO A PARTICULAR RATE \n RATES=[] #always reset, get these from the defined rates above\n #print(CCcurrent)\n listofpossibleNNs=CC_rate_library[CCcurrent]\n #print(listofpossibleNNs)\n for k_index in range(8):\n temparray=listofpossibleNNs[k_index]\n #print(temparray)\n RATES.append(temparray[3])\n\n #FOR EACH WINDOW TYPE, GET THE SUM OF THE RATES\n ktotal=np.sum(RATES)\n\n #GET THE PROBABILITY OF CHOOSING ONE OF THE 8 RATES\n r1=np.random.rand()*ktotal\n\n #MAGIC HAPPENS HERE\n counter=0\n for rate in RATES:\n if counter==0:\n klower=0\n kupper=np.sum(RATES[0:counter+1])\n else:\n klower=np.sum(RATES[0:counter])\n kupper=np.sum(RATES[0:counter+1])\n\n if r1 > klower and r1<= kupper:\n window_chosen=counter+1 #THIS HAS TO BE A NUMBER BETWEEN 1 and 8\n #print('WINDOW: ',window_chosen)\n templistB=listofpossibleNNs[counter]\n CCcurrent=templistB[0]\n #print('CC: ',CCcurrent)\n pos_chosen=vector(templistB[4],templistB[5],templistB[6]) #THIS SAYS WHAT DIRECTION WE HOP IN\n img_chosen=vector(templistB[7],templistB[8],templistB[9])\n break\n\n counter+=1 #THIS WILL GO UP TO 7(8)\n\n #UPDATE THE POSITION OF THE ADSORBATE USING THE VECTOR CHOSEN--------------------------------------------------------------------------#FIX THIS PART\n if img_chosen.x == 1:\n #Forwards\n if pos_chosen.x < CURRENT_POS.x:\n x_holder=IMAGE_FLAGS.x+1\n #Backwards\n elif pos_chosen.x > CURRENT_POS.x:\n x_holder=IMAGE_FLAGS.x-1\n else:\n x_holder=IMAGE_FLAGS.x\n\n if img_chosen.y == 1:\n #Forwards\n if pos_chosen.y < CURRENT_POS.y:\n y_holder=IMAGE_FLAGS.y+1\n #Backwards\n elif pos_chosen.y > CURRENT_POS.y:\n y_holder=IMAGE_FLAGS.y-1\n else:\n y_holder=IMAGE_FLAGS.y\n\n if img_chosen.z == 1:\n #Forwards\n if pos_chosen.z < CURRENT_POS.z:\n z_holder=IMAGE_FLAGS.z+1\n #Backwards\n elif pos_chosen.z > CURRENT_POS.z:\n z_holder=IMAGE_FLAGS.z-1\n else:\n z_holder=IMAGE_FLAGS.z\n\n IMAGE_FLAGS=vector(x_holder,y_holder,z_holder)\n #print('FLAGS:',IMAGE_FLAGS)\n\n CURRENT_POS=pos_chosen\n #print(CURRENT_POS)\n moviefile.write('1 \\n\\n')\n moviefile.write('Ag '+str(CURRENT_POS.x)+' '+str(CURRENT_POS.y)+' '+str(CURRENT_POS.z)+'\\n')\n moviefile.flush()\n\n #UPDATE THE TIME (SIMULATION AND REAL) INCREMENT\n ST+=1\n r2=np.random.ranf()\n deltaT=float(-np.log(r2))/float(ktotal)\n RT+=deltaT\n\n if myid == 0 and ST%PRINTFREQ==0:\n TIME_ARRAY,MSD_ARRAY=CALCULATEMSD(TIME_ARRAY,MSD_ARRAY,STARTING_POS,CURRENT_POS,IMAGE_FLAGS,RT,n,ST)\n\n #PRINT OUT THE ARRAY\n moviefile.close()\n\n\n if myid == 0:\n TIME_ARRAY=np.mean(TIME_ARRAY,axis=1)\n MSD_ARRAY=np.mean(MSD_ARRAY,axis=1)\n TIME_ARRAY=TIME_ARRAY.reshape(len(TIME_ARRAY),1)\n MSD_ARRAY=MSD_ARRAY.reshape(len(MSD_ARRAY),1)\n MSD_ARRAY=MSD_ARRAY*(1e-20)\n\n slope,residual,rank,s=np.linalg.lstsq(TIME_ARRAY,MSD_ARRAY)\n R2 = 1 - residual/(MSD_ARRAY.size * MSD_ARRAY.var())\n if R2<0.97:\n print('WARNING::'+str(R2))\n diffusivity=slope/(float(6)) #this is in units of m2/s\n print(slope/float(8*(14.75**2)))\n print(diffusivity)\n\n file=open(output+'/diffusivity.data','a')\n file.write(str(diffusivity).replace('[','').replace(']','')+'\\n')\n file.close()\n\n\n file=open(output+'/MSD.data','w')\n for i in range(len(TIME_ARRAY)):\n file.write(str(TIME_ARRAY[i]).replace('[','').replace(']','')+' '+str(MSD_ARRAY[i]).replace('[','').replace(']','')+'\\n')\n file.close()\n\n\n MAINOUTPUTFILE.close()\n\n\n########################################################################################################################################################################\n print('REACHED STOPPING POINT')\n return()\n time.sleep(10000000000) #STOP HERE\n########################################################################################################################################################################\n\n\n \n\n\n\n\n\n############################################################################################################\nif __name__==\"__main__\":\n time.sleep(2)\n numprocs = MPI.COMM_WORLD.Get_size() # Number of processors\n myid = MPI.COMM_WORLD.Get_rank() # Id of this processor\n node = MPI.Get_processor_name()\n print (\"I am proc %d of %d on node %s\" %(myid, numprocs, node))\n time.sleep(3)\n\n if myid == 0:\n print('KMC Module is running as an independent program.')\n print('The following KMC Module input arugments are '+str(sys.argv[1::]))\n \n if myid > -1:\n main(myid,sys.argv[1])\n\nelse:\n print('KMC Module is running as an imported module.')","sub_path":"kmc_package/src_temp/kmc.py","file_name":"kmc.py","file_ext":"py","file_size_in_byte":65132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"70742499","text":"# -*- coding: utf-8 -*-\n\nimport sqlite3 as sql\nimport os, time\n\n#BHARF = \"ÇĞİIÖŞÜ\"\n#KHARF = \"çğiıöşü\"\n#string.lower() metodu I ve İ dönüşümlerinde sorunlu\nBHARFX = \"Iİ\"\nKHARFX = \"ıi\"\n\nAYRACLAR = \",\\.;«»!?-:/\\*+_=\\\"<>()'[]|º#&%\"\n\ndef kucukHarfYap(sozcuk):\n ss = ''\n for i in range(len(sozcuk)):\n ok = False\n for j in range(len(BHARFX)):\n if sozcuk[i]== BHARFX[j]:\n ss += KHARFX[j]\n ok = True\n break\n if ok == False:\n ss += sozcuk[i]\n ss = ss.lower()\n return ss\n\nclass Veritabani:\n def __init__(self, dosya=None):\n if dosya is None:\n self.vt = sql.connect(\":memory:\")\n else:\n self.vt = sql.connect(dosya)\n\n def sema(self, sema):\n cr = self.vt.cursor()\n cr.execute(sema)\n self.vt.commit()\n\n def sorgu(self, sorgu):\n cr = self.vt.cursor()\n cr.execute(sorgu)\n self.vt.commit()\n\n def cevap(self, sorgu):\n cr = self.vt.cursor()\n cr.execute(sorgu)\n return cr.fetchall()\n\n\nclass AnaSozluk(Veritabani):\n def __init__(self, dosya=\"anasozluk.db\", yeni=False):\n #def __init__(self, dosya=None, yeni=False):\n Veritabani.__init__(self, dosya)\n try:\n cevap = self.cevap(\"select * from sozcukler limit 1\")\n except:\n yeni = True\n\n if (yeni is True) or (os.path.isfile(dosya) is False):\n self.sema(\"CREATE TABLE sozcukler (sozcuk TEXT, frekans INT)\")\n\n def liste_ekle(self, liste):\n for kelime, sayi_ in liste.items():\n print (kelime, sayi_)\n self.ekle(kelime, sayi = sayi_)\n\n def ekle(self, sozcuk, sayi =1):\n kelime, frekans = self.kontrol(sozcuk)\n if kelime is not None:\n if frekans == 0:\n sorgu_cumlesi = 'insert into sozcukler values(\"%s\", 1) ' % kelime\n self.sorgu(sorgu_cumlesi)\n else:\n sorgu_cumlesi = \"update sozcukler set frekans = %d where sozcuk = '%s' \" % (frekans + sayi, kelime)\n self.sorgu(sorgu_cumlesi)\n\n def kontrol(self, sozcuk):\n cr = self.vt.cursor()\n cr.execute('select * from sozcukler where sozcuk = \"%s\" ' % sozcuk)\n cevaplar = cr.fetchall()\n if len(cevaplar) > 1:\n return None, \"Birden fazla sozcuk dondu, problem var\"\n elif len(cevaplar) == 1:\n return cevaplar[0]\n else:\n return sozcuk, 0\n\n def kapat(self):\n self.vt.commit()\n self.vt.close()\n\n\nclass Derlem:\n def __init__(self, icerik):\n self.anasozluk = AnaSozluk()\n self.icerik = icerik\n self.incele()\n\n def incele(self):\n def is_tek_tire_var(sozcuk_):\n if sozcuk_.count(\"-\") == 1:\n return True\n return False\n\n def is_tirnak_icinde(sozcuk_):\n if sozcuk_[0] == \"'\" or sozcuk_[-1] == \"'\":\n return True\n elif sozcuk_[0] == '\"' or sozcuk_[-1] == '\"':\n return True\n else:\n return False\n\n def is_tek_tirnak_alpha(sozcuk_):\n if is_tirnak_icinde(sozcuk_):\n return False\n var = sozcuk_.count(\"'\")\n if (sozcuk_[0] == \"'\") or (sozcuk_[-1] == \"'\"):\n return False\n say0 = sozcuk_.find(\"'\")\n\n if var == 1:\n s1 = sozcuk_[:say0]\n s2 = sozcuk_[say0 + 1:]\n if s1.isalpha() and s2.isalpha():\n return True\n else:\n return False\n else:\n return False\n\n sozcukler = []\n hatalar = []\n satir0 = \"\"\n for satir in self.icerik:\n satir0 += str(satir).strip()\n if len(satir0) > 0:\n if satir0[-1] == \"-\":\n satir0 = satir0[:-1]+' ' # Satır sonundaki tireyi boşluk yap\n continue\n else:\n for ayirac in AYRACLAR:\n satir0 = satir0.replace(ayirac, \" \")\n for kelime in satir0.split():\n if kelime.isalpha():\n sozcukler.append(kelime)\n elif kelime.isalnum() or kelime.isdigit():\n pass\n else:\n k = kelime.strip(AYRACLAR)\n sozcukler.append(k)\n satir0 = \"\"\n temp = {}\n for sozcuk in sozcukler:\n if (sozcuk == \"\") or (sozcuk.isdigit()):\n continue\n if is_tek_tirnak_alpha(sozcuk):\n pass\n elif is_tek_tire_var(sozcuk):\n pass\n elif sozcuk.isalpha():\n pass\n else:\n hatalar.append(sozcuk)\n continue\n #sozcuk = sozcuk.lower()\n sozcuk = kucukHarfYap(sozcuk)\n if sozcuk in temp:\n temp[sozcuk] += 1\n else:\n temp[sozcuk] = 1\n self.anasozluk.liste_ekle(temp)\n\nclass PDFDerlem(Derlem):\n def __init__(self, hedef):\n #TODO duzgun turkce metin cikarabilen bir pdf modulu bulmak gerek.\n #import pyPdf\n icerik = \"\"\n pdf = pyPdf.PdfFileReader(open(hedef,\"rb\"))\n for sayfa in pdf.pages:\n icerik += sayfa.extractText()\n Derlem.__init__(self, icerik.encode(\"utf8\").splitlines(True))\n\n\nclass HTMLDerlem(Derlem):\n def __init__(self, hedef):\n from bs4 import BeautifulSoup\n import urllib.request\n text = \"\"\n url = hedef\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html,\"lxml\")\n if soup is not None:\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n text = soup.getText()\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = '\\n'.join(chunk for chunk in chunks if chunk)\n\n #Derlem.__init__(self, text.encode(\"utf-8\").splitlines(True))\n Derlem.__init__(self, text.splitlines(True))\n\nclass TXTDerlem(Derlem):\n def __init__(self, hedef):\n icerik = \"\"\n with open(hedef,encoding=\"utf-8\") as fdosya:\n for sat in fdosya:\n icerik += sat\n Derlem.__init__(self, icerik.splitlines(True))\n\nif __name__ == '__main__':\n assert kucukHarfYap(\"ÇĞIİÖŞÜ\")==\"çğıiöşü\"\n basla = time.perf_counter()\n #htmltest = HTMLDerlem(\"http://manap.se/test.txt\")\n #pdftest = PDFDerlem(\"veri/test.pdf\")\n txttest = TXTDerlem(\"veri/txttest.txt\")\n print(\"Toplam çalışma süresi = {} saniye\".format(time.perf_counter()-basla))\n","sub_path":"derlem.py","file_name":"derlem.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"115061293","text":"\"\"\"\n64. Analogy data experiment\nDownload word analogy evaluation dataset. \nCompute the vector as follows: vec(word in second column) - vec(word in first column) + vec(word in third column). \nFrom the output vector, (1) find the most similar word and (2) compute the similarity score with the word. \nAppend the most similar word and its similarity to each row of the downloaded file.\n\"\"\"\nimport time\nimport gensim\nfrom tqdm import tqdm\n\n# ファイルの作成\ndef analogy_data_experiment(wv, dataset, new_files):\n with open(dataset, 'r') as f:\n with open(new_files, 'w') as g:\n for line in tqdm(f):\n line = line.strip().split(' ')\n if len(line) == 4:\n vector = wv.most_similar(positive = [line[1], line[2]], negative = [line[0]], topn = 1)\n result = ' '.join(line + [vector[0][0], str(vector[0][1])])\n g.write(f'{result}\\n')\n else:\n result = ' '.join(line)\n g.write(f'{result}\\n')\n\n\nif __name__ == '__main__':\n start = time.time()\n model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)\n analogy_data_experiment(model, 'questions-words-copy.txt', 'ch07_64-answer.txt')\n end = time.time()\n print(end - start, '[s]')\n\"\"\"\n19558it [1:07:01, 4.86it/s]\n4080.8371748924255 [s]\n\"\"\"","sub_path":"2023/honda/ch07/ch07_64.py","file_name":"ch07_64.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"76836448","text":"\"\"\"\nRuby Blueprint\n==============\n\n**Fabric environment:**\n\n.. code-block:: yaml\n\n blueprints:\n - blues.ruby\n\n settings:\n ruby:\n gems: # List of ruby gems to install (Optional)\n # - sass\n\n\"\"\"\nfrom fabric.decorators import task\n\nfrom refabric.api import run, info\nfrom refabric.context_managers import sudo\nfrom refabric.contrib import blueprints\n\nfrom . import debian\n\n__all__ = ['setup', 'configure']\n\n\nblueprint = blueprints.get(__name__)\n\n\n@task\ndef setup():\n \"\"\"\n Install Ruby and configured gems\n \"\"\"\n install()\n configure()\n\n\n@task\ndef configure():\n \"\"\"\n Install configured gems\n \"\"\"\n install_gems()\n\n\ndef install():\n with sudo():\n info('Installing Ruby v1.9.3')\n debian.apt_get('install', 'ruby1.9.3')\n\n info('Installing Bundler')\n gem('install', 'bundler')\n\n\ndef install_gems():\n info('Installing Gems')\n gems = blueprint.get('gems', [])\n gem('install', *gems)\n\n\ndef gem(command, *options):\n info('Running gem {}', command)\n with sudo():\n run('gem {} {} --no-ri --no-rdoc'.format(command, ' '.join(options)))\n","sub_path":"blues/ruby.py","file_name":"ruby.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"258856349","text":"import unittest\nimport os\nimport numpy as np\n\nfrom platform import python_implementation\n\nfrom sentinelhub import read_data, write_data, TestSentinelHub\n\n\nclass TestIO(TestSentinelHub):\n\n class IOTestCase:\n\n def __init__(self, filename, mean, shape=(2048, 2048, 3)):\n self.filename = filename\n self.mean = mean\n self.shape = shape\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.test_cases = [\n cls.IOTestCase('img.tif', 13577.494856),\n cls.IOTestCase('img.jpg', 52.41194),\n cls.IOTestCase('img.png', 52.33736),\n cls.IOTestCase('img-8bit.jp2', 47.09060, (343, 343, 3)),\n cls.IOTestCase('img-15bit.jp2', 0.3041897, (1830, 1830)),\n cls.IOTestCase('img-16bit.jp2', 0.3041897, (1830, 1830)),\n ]\n\n def test_img_read(self):\n\n for test_case in self.test_cases:\n with self.subTest(msg=test_case.filename):\n file_path = os.path.join(self.INPUT_FOLDER, test_case.filename)\n img = read_data(file_path)\n\n self.assertEqual(img.shape, test_case.shape,\n 'Expected shape {}, got {}'.format(test_case.shape, img.shape))\n\n if test_case.filename != 'img.jpg' or python_implementation() != 'PyPy':\n self.assertAlmostEqual(np.mean(img), test_case.mean, delta=1e-4,\n msg='Expected mean {}, got {}'.format(test_case.mean, np.mean(img)))\n\n self.assertTrue(img.flags['WRITEABLE'], msg='Obtained numpy array is not writeable')\n\n new_file_path = os.path.join(self.OUTPUT_FOLDER, test_case.filename)\n write_data(new_file_path, img)\n new_img = read_data(new_file_path)\n\n if not test_case.filename.endswith('jpg'):\n self.assertTrue(np.array_equal(img, new_img), msg=\"Original and new image are not the same\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_io_utils.py","file_name":"test_io_utils.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"451074402","text":"import requests\n\n\ndef geocode(address):\n geocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n\n geocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": address,\n \"format\": \"json\"}\n\n response = requests.get(geocoder_api_server, params=geocoder_params)\n\n if not response:\n raise RuntimeError(f'Ошибка выполнения запроса:\\n' \\\n f'{response.url}\\n' \\\n f'Статус: {response.status_code} {response.reason}')\n data = response.json()\n features = data[\"response\"][\"GeoObjectCollection\"][\"featureMember\"]\n return features[0][\"GeoObject\"] if features else None\n\n\ndef get_coordinates(address):\n toponym = geocode(address)\n if toponym is None:\n return None, None\n toponym_coordinates = toponym[\"Point\"][\"pos\"]\n toponym_longitude, toponym_lattitude = toponym_coordinates.split(\" \")\n return float(toponym_longitude), float(toponym_lattitude)\n\n\ndef get_ll_span(address):\n toponym = geocode(address)\n if toponym is None:\n return None, None\n toponym_coordinates = toponym[\"Point\"][\"pos\"]\n toponym_longitude, toponym_lattitude = toponym_coordinates.split(\" \")\n ll = ','.join([toponym_longitude, toponym_lattitude])\n\n envelope = toponym['boundedBy']['Envelope']\n left, bottom = map(float, envelope['lowerCorner'].split(' '))\n right, top = map(float, envelope['upperCorner'].split(' '))\n\n dx = abs(left - right) / 2\n dy = abs(bottom - top) / 2\n\n span = f'{dx},{dy}'\n\n return ll, span\n","sub_path":"geocoder.py","file_name":"geocoder.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"347250175","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 28 11:12:48 2018\r\n@author: eesungkim\r\n\"\"\"\r\nimport os\r\nimport librosa\r\nimport argparse\r\nimport numpy as np\r\nimport scipy.io.wavfile as wav\r\nimport librosa.display\r\nimport tflearn\r\nfrom utils.utils import * \r\nfrom utils.estnoise_ms import * \r\nimport tensorflow as tf\r\nfrom keras.layers import Input\r\nfrom keras.models import Model\r\nfrom keras import backend as k\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Activation, AveragePooling2D\r\nimport keras\r\nfrom keras import metrics\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras import initializers\r\nfrom keras.layers import LeakyReLU, PReLU, ELU\r\nfrom numpy.linalg import norm\r\n\r\n\r\ndef NMF_DNN(args):\r\n \"\"\"Speech Enhancement using NMF-DNN\r\n \"\"\"\r\n PATH_MATLAB='\"C:/Program Files/MATLAB/R2014a/bin/matlab.exe\"'\r\n PATH_ROOT = os.getcwd() \r\n PATH_MATLAB1 = os.path.join(PATH_ROOT , 'PESQ_MATLAB/execute_pesq.m')\r\n \r\n os.chdir(PATH_ROOT)\r\n\r\n #for NMF\r\n path_clean_train = os.path.join(PATH_ROOT, args.input_clean_train)\r\n (sr, clean_train) = wav.read(path_clean_train)\r\n stft_clean_train = librosa.stft(clean_train, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n # stft_clean_train = stft_clean_train[:,:10000]\r\n magnitude_clean_train, _ = divide_magphase(stft_clean_train, power=1)\r\n\r\n #for DNN\r\n path_dnn_clean_train = os.path.join(PATH_ROOT , args.input_dnn_clean_train)\r\n (sr, dnn_clean_train) = wav.read(path_dnn_clean_train)\r\n dnn_stft_clean_train = librosa.stft(dnn_clean_train, n_fft=args.num_FFT, hop_length=args.hop_size,window=args.window)\r\n dnn_magnitude_clean_train, _= divide_magphase(dnn_stft_clean_train, power=1)\r\n\r\n\r\n path_dnn_noisy_train = os.path.join(PATH_ROOT , args.input_dnn_noisy_train)\r\n (sr, dnn_noisy_train) = wav.read(path_dnn_noisy_train)\r\n dnn_stft_noisy_train = librosa.stft(dnn_noisy_train, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n dnn_magnitude_noisy_train, _= divide_magphase(dnn_stft_noisy_train, power=1)\r\n\r\n path_noise = os.path.join(PATH_ROOT, args.input_noise)\r\n (sr, noise_dnn) = wav.read(path_noise)\r\n dnn_stft_noise_train = librosa.stft(noise_dnn, n_fft=args.num_FFT, hop_length=args.hop_size,window=args.window)\r\n dnn_magnitude_noise_train, _= divide_magphase(dnn_stft_noise_train, power=1)\r\n\r\n #for Noise\r\n path_noise_1 = os.path.join(PATH_ROOT, args.input_noise_1)\r\n (sr, noise_1) = wav.read(path_noise_1)\r\n stft_noise_1 = librosa.stft(noise_1, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n magnitude_noise_1, _ = divide_magphase(stft_noise_1, power=1)\r\n\r\n\r\n path_noise_2 = os.path.join(PATH_ROOT, args.input_noise_2)\r\n (sr, noise_2) = wav.read(path_noise_2)\r\n stft_noise_2 = librosa.stft(noise_2, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n\r\n magnitude_noise_2, _ = divide_magphase(stft_noise_2, power=1)\r\n path_noise_3 = os.path.join(PATH_ROOT, args.input_noise_3)\r\n (sr, noise_3) = wav.read(path_noise_3)\r\n stft_noise_3 = librosa.stft(noise_3, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n magnitude_noise_3, _ = divide_magphase(stft_noise_3, power=1)\r\n\r\n\r\n path_clean_test = os.path.join(PATH_ROOT, args.input_clean_test)\r\n (sr, clean_test) = wav.read(path_clean_test)\r\n\r\n\r\n path_noisy_test = os.path.join(PATH_ROOT, args.input_noisy_test)\r\n (sr, noisy_test) = wav.read(path_noisy_test)\r\n stft_noisy_test = librosa.stft(noisy_test, n_fft=args.num_FFT, hop_length=args.hop_size, window=args.window)\r\n magnitude_noisy_test, phase_noisy_test = divide_magphase(stft_noisy_test, power=1)\r\n\r\n\r\n\r\n\r\n\r\n # NMF training stage\r\n #####################################################################################\r\n #obtain the basis matrix of clean_speech\r\n W_clean_train, H_clean_train = NMF_MuR(magnitude_clean_train,args.r,args.max_iter,args.display_step,const_W=False,init_W=0)\r\n\r\n # noise\r\n ##########################################################\r\n # 1) 각 노이즈 마다 3000 frame 씩 이어붙혀서 총 9000으로 만들어서 40 base 로 만들기\r\n magnitude_noise_1, magnitude_noise_2, magnitude_noise_3= magnitude_noise_1[:,:3000],magnitude_noise_2[:,:3000],magnitude_noise_3[:,:3000]\r\n nmf_magnitude_noise = np.concatenate((magnitude_noise_1, magnitude_noise_2, magnitude_noise_3),axis=1)\r\n #obtain the basis matrix of noise\r\n W_noise, H_noise = NMF_MuR(nmf_magnitude_noise,args.r,args.max_iter,args.display_step,const_W=False,init_W=0)\r\n\r\n\r\n # # 2) base 13,13,14로 이어 붙히기\r\n # # magnitude_noise_1, magnitude_noise_2, magnitude_noise_3 = magnitude_noise_1[:, :3000], magnitude_noise_2[:,:3000], magnitude_noise_3[:,:3000]\r\n #\r\n # #obtain the basis matrix of noise\r\n # W_noise_1, _ = NMF_MuR(magnitude_noise_1,13,args.max_iter,args.display_step,const_W=False,init_W=0)\r\n # W_noise_2, _ = NMF_MuR(magnitude_noise_2, 13, args.max_iter, args.display_step, const_W=False, init_W=0)\r\n # W_noise_3, _ = NMF_MuR(magnitude_noise_3, 14, args.max_iter, args.display_step, const_W=False, init_W=0)\r\n # ####################################################\r\n # W_noise = np.concatenate((W_noise_1, W_noise_2, W_noise_3),axis=1)\r\n\r\n # iH is the output of DNN\r\n _, H_NMF_encoding_estimated_clean = NMF_MuR(dnn_magnitude_clean_train,args.r,args.max_iter,args.display_step,const_W=True, init_W=W_clean_train)\r\n _, H_NMF_encoding_estimated_noise = NMF_MuR(dnn_magnitude_noise_train,args.r,args.max_iter,args.display_step,const_W=True, init_W=W_noise)\r\n\r\n\r\n # DNN training stage\r\n #####################################################################################\r\n X_train = dnn_magnitude_noisy_train.T\r\n y_train = np.concatenate([H_NMF_encoding_estimated_clean,H_NMF_encoding_estimated_noise], axis=0).T\r\n X_test = magnitude_noisy_test\r\n # DNN training stage\r\n #####################################################################################\r\n k.clear_session()\r\n def get_dnn_model(X_train, y_train, args):\r\n # LeakyReLU, PReLU, ELU, ThresholdedReLU, SReLU\r\n model = Sequential()\r\n model.add(Dense(args.n_hidden, input_dim=X_train.shape[1], init='glorot_normal')) # glorot_normal,he_normal\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(args.n_hidden, init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n # model.add(Activation('relu'))\r\n model.add(LeakyReLU(alpha=0.1))\r\n model.add(Dropout(args.drop_out))\r\n\r\n model.add(Dense(units=y_train.shape[1], init='glorot_normal'))\r\n model.add(BatchNormalization())\r\n model.add(Activation('linear'))\r\n\r\n model.compile(loss='mse',\r\n optimizer='adam',\r\n metrics=['mse'])\r\n # model.summary()\r\n return model\r\n\r\n model = get_dnn_model(X_train, y_train, args)\r\n\r\n with tf.device('/gpu:0'):\r\n model_info = model.fit(X_train, y_train,\r\n batch_size = args.n_batch,\r\n epochs = args.n_epoch,\r\n validation_split = 0.1)\r\n # plot_model_history(model_info)\r\n print(\"Training complete.\")\r\n\r\n #Enhancement stage\r\n #####################################################################################\r\n H_DNN_for_NMF_encoding_matrix = model.predict(X_test.T).T\r\n\r\n H_estimated_from_DNN_clean = H_DNN_for_NMF_encoding_matrix[:args.r,:]\r\n H_estimated_from_DNN_noise = H_DNN_for_NMF_encoding_matrix[args.r:,:]\r\n\r\n magnitude_estimated_from_DNN_clean = np.matmul(W_clean_train,H_estimated_from_DNN_clean)\r\n magnitude_estimated_from_DNN_noise = np.matmul(W_noise,H_estimated_from_DNN_noise)\r\n\r\n #Gain function similar to wiener filter to enhance the speech signal\r\n wiener_gain = np.power(magnitude_estimated_from_DNN_clean,args.p) / \\\r\n ( np.power(magnitude_estimated_from_DNN_clean,args.p) + np.power(magnitude_estimated_from_DNN_noise, args.p))\r\n magnitude_estimated_clean = wiener_gain * magnitude_noisy_test\r\n\r\n #Reconstruction\r\n stft_reconstructed_clean = merge_magphase(magnitude_estimated_clean, phase_noisy_test)\r\n signal_reconstructed_clean =librosa.istft(stft_reconstructed_clean, hop_length=args.hop_size, window=args.window)\r\n signal_reconstructed_clean = signal_reconstructed_clean.astype('int16')\r\n #####################################################################################\r\n output_path_estimated_noisy_test = os.path.join(PATH_ROOT, args.output_file)\r\n wav.write(output_path_estimated_noisy_test,sr,signal_reconstructed_clean)\r\n\r\n # Display signals, spectrograms\r\n show_signal(clean_test,noisy_test,signal_reconstructed_clean,sr)\r\n show_spectrogram(clean_test,noisy_test, signal_reconstructed_clean, sr, args.frame_length,args.hop_size)\r\n # # =============================================================================\r\n # # PESQ\r\n # # =============================================================================\r\n # from pymatbridge import Matlab\r\n # mlab = Matlab()\r\n # mlab = Matlab(executable=PATH_MATLAB)\r\n # mlab.start()\r\n # #PATH_MATLAB1 = os.path.join(PATH_ROOT , \"PESQ_MATLAB\",\"execute_pesq.m\")\r\n # result_PESQ = mlab.run_func(PATH_MATLAB1, {'arg1': sr})\r\n # noisy_original_PESQ = result_PESQ['result'][0][0]\r\n # enhanced_PESQ = result_PESQ['result'][1][0]\r\n # mlab.stop()\r\n # snr=args.input_noisy_test\r\n # name=snr[53:-9]\r\n # print(\"[%s]\\n Original: %.2f\\n NMF-DNN\\t: %.2f\"%(name,noisy_original_PESQ,enhanced_PESQ))\r\n #\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='NMF-DNN Speech Enhancement')\r\n parser.add_argument('--datasets_dir', type=str, default='datasets/')\r\n #for NMF\r\n parser.add_argument('--input_clean_train', type=str, default='datasets/timit_clean_selected_train_total.wav')\r\n parser.add_argument('--input_noisy_train', type=str, default='datasets/timit_noisy_selected_train_total.wav')\r\n\r\n parser.add_argument('--input_clean_test', type=str, default='datasets/timit_clean_selected/timit_clean_selected_test.wav')\r\n parser.add_argument('--input_noisy_test', type=str, default='datasets/timit_noisy_selected/test_match/timit_noisy_babble_snr10_test.wav')\r\n\r\n #for noise\r\n parser.add_argument('--input_noise_1', type=str, default='datasets/noise/NOISEX/babble.wav')\r\n parser.add_argument('--input_noise_2', type=str, default='datasets/noise/NOISEX/factory1.wav')\r\n parser.add_argument('--input_noise_3', type=str, default='datasets/noise/NOISEX/machinegun.wav')\r\n\r\n #for DNN\r\n parser.add_argument('--input_dnn_clean_train', type=str, default='datasets/timit_clean_selected_train_total.wav')\r\n parser.add_argument('--input_dnn_noisy_train', type=str, default='datasets/timit_noisy_selected_train_total.wav')\r\n parser.add_argument('--input_noise', type=str, default='datasets/timit_noise_selected_total.wav')\r\n\r\n parser.add_argument('--output_file', type=str, default='datasets/output/estimated_clean_NMF-DNN.wav')\r\n parser.add_argument('--num_FFT', type=int, default='512', help='')\r\n parser.add_argument('--hop_size', type=int, default='128', help='')\r\n parser.add_argument('--window', type=str, default='hamming',help='boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann, kaiser')\r\n parser.add_argument('--r', type=int, default='40', help='number of basis in NMF')\r\n parser.add_argument('--max_iter', type=int, default='50', help='number of maximum of NMF iteration')\r\n parser.add_argument('--display_step', type=int, default='10', help='display step in NMF interation')\r\n parser.add_argument('--p', type=int, default='2', help='parameter in wiener filter for gain')\r\n parser.add_argument('--n_epoch', type=int, default='50', help='number of DNN epoch')\r\n parser.add_argument('--n_hidden', type=int, default='400', help='hidden units of DNN')\r\n parser.add_argument('--drop_out', type=float, default='1', help='dropout of DNN')\r\n parser.add_argument('--n_batch', type=int, default='1024', help='mini batch size')\r\n\r\n return check_args(parser.parse_args())\r\n\r\ndef check_args(args):\r\n if not os.path.exists(args.datasets_dir):\r\n os.makedirs(args.datasets_dir)\r\n assert args.num_FFT >= 1, 'number of FFT size must be larger than or equal to one'\r\n assert args.hop_size < args.num_FFT, 'hop size must be smaller than number of FFT size'\r\n return args\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n NMF_DNN(args)\r\n","sub_path":"main_NMF_DNN.py","file_name":"main_NMF_DNN.py","file_ext":"py","file_size_in_byte":13584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"50122170","text":"import tensorflow as tf\nimport tensorflow_addons as tfa\n\nimport sys\nimport os\nfrom glob import glob\nimport png\nsys.path.append(os.path.join(__file__,'..','..'))\n\nfrom tfDataIngest import tfDataSetParquet as tfDsParquet\n\ninputDataDir = sys.argv[1]\noutputDir = sys.argv[2]\n\nseed = 3213434\n\n# test app\nif __name__ == \"__main__\":\n files = glob(os.path.join(inputDataDir,\"train*.parquet\"))\n print(\"Found {0} parquet files in input dir {1}\".format(len(files),inputDataDir))\n print(\"First is {0}\".format(files[0]))\n ds = tfDsParquet.create_parquet_dataset([files[0]])\n\n ds = ds.skip(4).take(1).repeat(100)\n\n def shear(sampleId, pixels):\n '''appling skeq transforms'''\n #shearFactor = tf.random.uniform([2],minval= -0.2,maxval=0.2,seed=seed)\n shearFactor = tf.random.truncated_normal([2],mean= 0.0,stddev=0.06,seed=seed)\n sX = shearFactor[0] # X skew factor\n sY = shearFactor[1] # Y skew factor\n tX = 236*0.5 # image centre X\n tY = 137*0.5 # image centre Y\n\n # translate -> shear -> translate back\n\n # [a0, a1, a2, b0, b1, b2, c0, c1]\n M = [1.0, sX , sX*tY, sY, 1.0, tX*sY, 0.0, 0.0]\n \n #pixInversed = pixels\n pixInversed = 255 - pixels # we need to work with inversed as shearing adds black color on new areas\n shearedInversed = tfa.image.transform(pixInversed, M, interpolation='BILINEAR')\n pixels = 255 - shearedInversed\n #pixels = shearedInversed\n return sampleId,pixels\n\n ds = ds.map(shear, num_parallel_calls=tf.data.experimental.AUTOTUNE).prefetch(8)\n\n idx = 0\n for element in ds.as_numpy_iterator(): \n #print(\"Iterating...\")\n sampleId,pixels = element\n sampleId = sampleId.decode(\"utf-8\")\n fileName = os.path.join(outputDir,\"{0}-{1}.png\".format(sampleId,idx))\n png.from_array(pixels, mode=\"L\").save(fileName)\n #print(element)\n #print(\"sample name is {0}\".format(sampleId))\n #print(sampleIds.shape)\n #print(pixels.shape)\n # a += 1\n # if a > 10:\n # break\n idx += 1\n print(\"Done\")\n #print(\"{0} elements in the dataset\".format(len(ds.)))","sub_path":"code/scripts/GeneratePNG_Preview_shear.py","file_name":"GeneratePNG_Preview_shear.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"321867930","text":"from PIL import Image\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport torch.utils.data as data\nfrom os import listdir\nfrom os.path import join\nimport numpy as np\nimport torch\nimport os\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\"])\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\nclass MultiStyleDataset(data.Dataset):\n def __init__(self, content_fname, styles_fpath, fine_size):\n super(MultiStyleDataset, self).__init__()\n assert is_image_file(content_fname), \\\n '{} must be an image file'.format(content_fname)\n self.style_images_basedir = styles_fpath\n self.content_fname = content_fname\n self.style_image_fnames = [\n fname for fname in listdir(styles_fpath) if is_image_file(fname)]\n self.fine_size = fine_size\n #self.normalize = transforms.Normalize(mean=[103.939,116.779,123.68],std=[1, 1, 1])\n #normalize = transforms.Normalize(mean=[123.68,103.939,116.779],std=[1, 1, 1])\n self.prep = transforms.Compose([\n transforms.Scale(fine_size),\n transforms.ToTensor(),\n #transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), #turn to BGR\n ])\n\n def __getitem__(self, index):\n styleImgPath = os.path.join(self.style_images_basedir, self.style_image_fnames[index])\n contentImg = default_loader(self.content_fname)\n styleImg = default_loader(styleImgPath)\n\n # resize\n if self.fine_size != 0:\n w, h = contentImg.size\n if(w > h):\n if w != self.fine_size:\n neww = self.fine_size\n newh = h * neww // w\n contentImg = contentImg.resize((neww, newh))\n styleImg = styleImg.resize((neww, newh))\n else:\n if h != self.fine_size:\n newh = self.fine_size\n neww = w * newh // h\n contentImg = contentImg.resize((neww, newh))\n styleImg = styleImg.resize((neww, newh))\n\n\n # Preprocess Images\n contentImg = transforms.ToTensor()(contentImg)\n styleImg = transforms.ToTensor()(styleImg)\n _out_fname_elems = self.content_fname.split('/')[-1].split('.')[:-1]\n out_fname = '{}_style_{}.jpg'.format('.'.join(_out_fname_elems), index)\n return contentImg.squeeze(0), styleImg.squeeze(0), out_fname\n\n def __len__(self):\n # You should change 0 to the total size of your dataset.\n return len(self.style_image_fnames)\n","sub_path":"Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"27611841","text":"# 题目要求将k个已经排序的链表进行合并,返回合并后的链表。\n# 方法1: 使用PQ\n# 思路为首先将lists中所有元素,也就是k个链表的头指针放入PQ当中.\n# 放入PQ的形式为一个tuple, tuple中为(该头指针的值, 插入顺序, 指针)。 注意heapq模块比较的时候,如果tuple中第一个元素相同,则会自动比较第二\n# 个元素,所以插入顺序在这里主要起到一个区别作用。使得使用头指针的值,插入顺序便可以在PQ中取出最值。\n# 如果不加入插入顺序,则当头指针的值相同,例如第一个链表有值为2,第二个链表也有值为2。则Python从PQ中取出元素,进行自动比较的时候\n# 由于指针所对应的ListNode的值相同,接下来进行ListNode类型的比较,由于是非原始数据类型会报错。\n\n# 建立一个dummyNode,并且设置cur指针指向目前结果链表的最后一个结点。\n# 在每次循环中,从PQ中每次取出最小值,也就是最小堆的堆顶元素。 将cur现在所指的结点的next赋值,使得cur结点指向该结点,并且右移动cur指针。\n# 并且如果弹出的指针还有下一个元素则再加入PQ当中。直到PQ为空。\n# 在加入元素的时候,只有cur指针移动,dummyNode指针没有移动,一直在结果链表的开头。所以结果返回dummyNode.next\n\n# 时间复杂度为: O(nlogk), 其中n为所有链表数字的数目总和,k为链表的数目。当遍历PQ的时候,要执行n次,每次弹出,放入都是log(k).\n# 空间复杂度为: O(n + k), 其中O(n)为返回结果的新链表,O(k)为PQ最多存k个元素\nimport heapq\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n PQ = []\n count = 1\n for eachHead in lists:\n if eachHead:\n heapq.heappush(PQ, (eachHead.val, count, eachHead))\n count += 1\n dummyNode = ListNode(None)\n cur = dummyNode\n while PQ:\n nextNode = heapq.heappop(PQ)[2]\n cur.next = nextNode\n cur = cur.next\n if nextNode.next:\n heapq.heappush(PQ, (nextNode.next.val, count, nextNode.next))\n count += 1\n return dummyNode.next\n\n\n# 方法2: 分治法\n# 我们首先将原列表不断切分直到只含两个元素。然后和mergeSort过程一样\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n# 时间复杂度为: O(nlogk), 其中n为所有链表数字的数目总和,需要O(n)合并链表。分log(k)次\n# 空间复杂度为: O(1).不需要额外空间。\nclass Solution:\n def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:\n # 递归方法mergeSort\n def mergeSort(left, right):\n # 如果左右指针相等,直接返回该链表\n if left == right:\n return lists[left]\n else:\n mid = int((left + right) / 2)\n # 返回左边排序好的链表\n leftPart = mergeSort(left, mid)\n # 返回右边排序好的链表\n rightPart = mergeSort(mid + 1, right)\n\n # 迭代的将左右链表结合,建立dummyNode\n dummyNode = ListNode(None)\n cur = dummyNode\n # 如果左右链表都还有元素\n while leftPart and rightPart:\n # 如果左边链表小\n if leftPart.val < rightPart.val:\n # 将新构建的连到左边\n cur.next = leftPart\n # 左边链表向右移动一步\n leftPart = leftPart.next\n # 如果右边链表小\n else:\n # 将新构建的连到右边\n cur.next = rightPart\n # 右边链表向右移动一步\n rightPart = rightPart.next\n cur = cur.next\n # 如果左边还有剩余,联入结果链表\n while leftPart:\n cur.next = leftPart\n leftPart = leftPart.next\n cur = cur.next\n # 如果右边还有剩余,联入结果链表\n while rightPart:\n cur.next = rightPart\n rightPart = rightPart.next\n cur = cur.next\n\n return dummyNode.next\n\n if lists:\n return mergeSort(0, len(lists) - 1)\n else:\n return None\n","sub_path":"面试-LeetCode题/基础数据结构4-链表/LeetCode23(MergeKSortedLists)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"584143619","text":"from datetime import date, timedelta\nfrom time import sleep\n\nfrom osbot_aws.apis.IAM import IAM\n\nfrom osbot_aws.apis.Session import Session\n\nfrom gw_bot.helpers.Test_Helper import Test_Helper\nfrom osbot_aws.apis.API_Gateway import API_Gateway\nfrom osbot_aws.helpers.Rest_API import Rest_API\n\n\nclass test_API_Gateway(Test_Helper):\n\n def setUp(self):\n super().setUp()\n self.api_gateway = API_Gateway()\n self.rest_api = Rest_API('temp_rest_api').create()\n self.api_id = self.rest_api.id()\n\n #self.test_api_key_id = '24owcfrcjc'\n #self.test_resource_id = 'lf80sgepa5'\n #self.test_usage_plan_id = '7a6fl9'\n\n\n def test__init__(self):\n assert type(self.api_gateway.api_gateway).__name__ == 'APIGateway'\n\n def test_account(self):\n #from osbot_jupyter.utils.Trace_Call import Trace_Call\n #trace = Trace_Call()\n #trace.include_filter=['osbot*', 'gwbot*','boto*','ConnectionPool*']\n #self.result = trace.invoke_method(self.api_gateway.account)\n\n account = self.api_gateway.account()\n assert account.get('apiKeyVersion') == '4'\n assert account.get('cloudwatchRoleArn') == 'arn:aws:iam::311800962295:role/api-gateway-write-to-cloud-watch'\n assert account.get('features') == ['UsagePlans']\n assert account.get('throttleSettings') == {'burstLimit': 5000, 'rateLimit': 10000.0}\n\n def test_api_key(self):\n assert 'value' in set(self.api_gateway.api_key(self.test_api_key_id, include_value=True))\n\n def test_api_key_create__delete(self):\n key_name = 'temp new key'\n api_keys = self.api_gateway.api_keys() # store current api_keys\n api_key = self.api_gateway.api_key_create(key_name) # create key\n self.api_gateway.api_key_delete(api_key.get('id')) # delete it using `id`\n self.api_gateway.api_key_create(key_name) # create it again\n self.api_gateway.api_key_delete(key_name) # delete it using `name`\n assert api_keys == self.api_gateway.api_keys() # confirm api_keys are unchanged\n\n def test_api_keys(self):\n keys = self.api_gateway.api_keys('name',include_values=True)\n assert len(keys) > 1\n assert list(keys.values()).pop().get('enabled') == True\n\n def test_deployments(self):\n items = self.api_gateway.deployments(self.test_api_id)\n assert len(items) > 2\n\n def test_deployment_create(self):\n stage = 'Test_QA'\n api_id = Rest_API('temp_rest_api').create().id()\n self.result = self.api_gateway.deployment_create(api_id, stage)\n\n def test_domain_name_add_path_mapping(self):\n rest_api_id = self.api_gateway.rest_api_id('lambda-proxy')\n domain_name = '*.gw-proxy.com'\n base_path = ''\n self.result = self.api_gateway.domain_name_add_path_mapping(rest_api_id=rest_api_id,domain_name=domain_name,base_path=base_path)\n\n def test_domain_name_create(self):\n domain_name = '*.gw-proxy.com'\n certificate_arn = 'arn:aws:acm:eu-west-1:311800962295:certificate/1f191c3a-0214-4ef5-9f03-27cc0b46bef3'\n self.result = self.api_gateway.domain_name_create__regional(domain_name=domain_name, certificate_arn=certificate_arn)\n\n def test_domain_name_delete(self):\n domain_name = 'gw-proxy.com'\n self.result = self.api_gateway.domain_name_delete(domain_name=domain_name)\n\n\n def test_domain_names(self):\n self.result = self.api_gateway.domain_names(index_by='regionalDomainName')\n #assert len(self.api_gateway.domain_names()) > 0\n\n #not working: 'Invalid Method identifier specified'\n def test_integration(self):\n api_name = 'Slack GW-Bot' #'Jira Sync' #'VP-SaaS-Proxy'\n path = '/slack-handler' #'/jira-on-change' # '/{proxy+}'\n http_method = 'POST'\n api_id = self.api_gateway.rest_api_id(api_name)\n resource_id = self.api_gateway.resource_id(api_id, path)\n self.result = self.api_gateway.integration(api_id, resource_id, http_method)\n\n def test_method_invoke_test(self):\n rest_api = Rest_API('temp_rest_api').create()\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n method = 'GET'\n self.result = self.api_gateway.method_invoke_test(api_id,resource_id,method)\n\n def test_integration_create__http(self):\n uri = 'http://httpbin.org/robots.txt'\n rest_api = Rest_API('temp_rest_api').create()\n method = 'GET'\n integration_method = 'GET'\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n self.api_gateway.method_create(api_id, resource_id, method)\n self.result = self.api_gateway.integration_create__http(api_id=api_id, resource_id=resource_id, uri=uri, http_method=method, integration_http_method=integration_method)\n\n def test_integration_create__lambda(self):\n from osbot_aws.apis.Lambda import Lambda\n #_lambda = Lambda('gw_bot.lambdas.dev.hello_world')\n lambda_name = 'gw_bot_lambdas_dev_hello_world'\n #rest_api = Rest_API('temp_rest_api').create()\n #api_id = rest_api.id()\n resource_id = self.rest_api.resource_id('/')\n http_method = 'GET'\n self.api_gateway.method_create(self.api_id, resource_id, http_method)\n self.api_gateway.integration_create__lambda(api_id =self.api_id, resource_id=resource_id, lambda_name=lambda_name, http_method=http_method)\n self.integration_add_permission_to_lambda(lambda_name)\n\n # add method and integration responses to lambda function\n response_models = {'application/json': 'Empty'}\n self.api_gateway.method_response_create(self.api_id, resource_id, http_method, '200', response_models)\n\n # test method execution\n #self.result = self.api_gateway.method_invoke_test(self.api_id, resource_id, http_method)\n\n self.result = self.api_gateway.deployment_create(self.api_id,'QA-Lambda')\n\n# stage_url = self.api_gateway.stage_url(self.api_id,aws_region,'QA-Lambda')\n\n# from pbx_gs_python_utils.utils.Http import GET\n# self.result = GET(stage_url)\n #self.result = stage_url\n\n\n def test_integration_response(self):\n rest_api = Rest_API('temp_rest_api').create()\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n method = 'POST'\n status_code = '200'\n self.result = self.api_gateway.integration_response(api_id, resource_id,method, status_code)\n\n def test_integration_response_create(self):\n rest_api = Rest_API('temp_rest_api').create()\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n method = 'GET'\n status_code = '200'\n response_templates = {'application/json': ''}\n self.result = self.api_gateway.integration_response_create(api_id, resource_id,method, status_code, response_templates)\n\n\n\n def test_method(self):\n api_name = 'VP-SaaS-Proxy'\n path = '/{proxy+}'\n api_id = self.api_gateway.rest_api_id(api_name)\n resource_id = self.api_gateway.resource_id(api_id, path)\n resource_method = self.api_gateway.resource_methods(api_id, path).pop()\n method = self.api_gateway.method(api_id, resource_id, resource_method)\n assert method.get('httpMethod') == resource_method\n\n def test_method_create__delete(self):\n api_name = 'temp-unit-test-api'\n path = '/'\n method = 'POST'\n api_id = self.api_gateway.rest_api_create(api_name).get('id') # create api\n resource_id = self.api_gateway.resource(api_id, path).get('id') # get resource id\n self.result = self.api_gateway.method_create(api_id, resource_id,method) # create method\n assert method in self.api_gateway.resource_methods(api_id, path) # confirm it exists\n self.result = self.api_gateway.method_delete(api_id, resource_id,method) # delete method\n assert [] == self.api_gateway.resource_methods(api_id, path) # confirm it doesn't exist\n self.api_gateway.rest_api_delete(api_id) # delete api\n\n def test_method_response(self):\n rest_api = Rest_API('temp_rest_api').create()\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n method = 'GET'\n status_code = '200'\n self.result = self.api_gateway.method_response(api_id, resource_id,method, status_code)\n\n def test_method_response_create(self):\n rest_api = Rest_API('temp_rest_api').create()\n api_id = rest_api.id()\n resource_id = rest_api.resource_id('/')\n http_method = 'GET'\n status_code = '200'\n response_models = {'application/json': 'Empty'}\n self.result = self.api_gateway.method_response_create(api_id,resource_id,http_method,status_code,response_models)\n\n #self.result = self.api_gateway.method_invoke_test(api_id, resource_id, http_method)\n\n def test_models(self):\n assert len(self.api_gateway.models(self.api_id)) > 1\n\n def test_stage(self):\n stage_name = list(set(self.api_gateway.stages(self.api_id, index_by='stageName'))).pop()\n self.result = self.api_gateway.stage(self.api_id, stage_name)\n\n def test_stages(self):\n self.result = self.api_gateway.stages(self.api_id, index_by='stageName')\n\n def test_resource_create(self):\n api_name = 'temp-unit-test-api'\n new_path = 'test-path'\n\n # create Rest API\n rest_api = self.api_gateway.rest_api_create(api_name)\n api_id = rest_api.get('id')\n path_id = self.api_gateway.resource(api_id, '/').get('id')\n\n # create resource\n new_path_id = self.api_gateway.resource_create(api_id,path_id,new_path).get('id')\n\n # confirm resource exists\n assert self.api_gateway.resource(api_id, f'/{new_path}').get('id') == new_path_id\n\n # delete resource and Rest API\n self.api_gateway.resource_delete(api_id, new_path_id)\n sleep(2) # without this we get the botocore.errorfactory.TooManyRequestsException\n self.api_gateway.rest_api_delete(api_id)\n\n\n def test_resources(self):\n assert len(self.api_gateway.resources(self.api_id)) > 1\n assert self.api_gateway.resources('VP-SaaS-Proxy').get('id') == self.api_id\n assert self.api_gateway.resources('AAAA-BBB') == {'error': 'API not found: AAAA-BBB'}\n\n def test_rest_api_create__delete(self):\n rest_api = self.api_gateway.rest_api_create('temp test api ABC') # create rest_api\n sleep(1) # wait a little before deleting\n self.result = self.api_gateway.rest_api_delete(rest_api.get('id')) # delete it\n try:\n pass\n except Exception as error:\n self.result = f'{error}'\n\n def test_rest_apis(self):\n items = self.api_gateway.rest_apis(index_by='id')\n assert self.api_id in items\n\n def test_usage(self):\n usage_plan_id = self.api_gateway.usage_plans('name').get('1k month').get('id')\n days = 10\n usage = self.api_gateway.usage(usage_plan_id, days)\n assert len(usage) > 1\n #assert len(usage[usage_plan_id]) == days\n\n def test_usage__as_chart_data(self):\n days = 10\n print('-----')\n self.result = self.api_gateway.usage__as_chart_data(self.test_usage_plan_id, days)\n\n def test_usage_plan_keys(self):\n usage_plan_id = self.api_gateway.usage_plans('name').get('1k day').get('id')\n self.result = self.api_gateway.usage_plan_keys(usage_plan_id)\n\n def test_usage_plan_add_key(self):\n key_name = 'temp_key_name'\n usage_plan_id = self.api_gateway.usage_plans('name').get('1k day').get('id')\n temp_key_id = self.api_gateway.api_key_create(key_name).get('id')\n usage_plan_key = self.api_gateway.usage_plan_add_key(usage_plan_id, temp_key_id).get('id')\n\n assert usage_plan_key in self.api_gateway.usage_plan_keys(usage_plan_id)\n self.result = self.api_gateway.usage_plan_remove_key(usage_plan_id, temp_key_id)\n assert usage_plan_key not in self.api_gateway.usage_plan_keys(usage_plan_id)\n self.api_gateway.api_key_delete(key_name)\n\n def test_usage_plans(self):\n assert self.api_gateway.usage_plans('name').get('1k day').get('quota') == {'limit': 1000, 'offset': 0, 'period': 'DAY'}","sub_path":"tests/unit/apis/test_API_Gateway.py","file_name":"test_API_Gateway.py","file_ext":"py","file_size_in_byte":12959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"339211493","text":"import webbrowser\n\n##google = input(\"Google search: \")\n##webbrowser.open(\"http://www.google.com/search?btnG=1&q=%s\" %google)\n\nurl = 'http://www.python.org/'\n\n# Open URL in a new tab, if a browser window is already open.\nwebbrowser.open_new_tab(url + 'doc/')\n\n# Open URL in new window, raising the window if possible.\nwebbrowser.open_new(url)\n\n","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"443611525","text":"def solution(people, limit):\n answer = 0\n people.sort(reverse=True)\n left = 0\n right = len(people) - 1\n now_weight = 0\n while left <= right:\n now_weight += people[left]\n left += 1\n if now_weight + people[right] <= limit:\n right -= 1\n answer += 1\n now_weight = 0\n return answer","sub_path":"programmers_고득점_Kit/그리디/구명보트.py","file_name":"구명보트.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"167951873","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n\t# Examples:\n\turl(r'^$', 't42cc.about.views.index', name='index'),\n\turl(r'^edit/$', 't42cc.about.views.edit_data', name='edit_data'),\n\turl(r'^requests/$', 't42cc.about.views.requests', name='requests'),\n\t# url(r'^blog/', include('blog.urls')),\n\turl(r'^login/$', 'django.contrib.auth.views.login',name='login'),\n\turl(r'^logout/$', 'django.contrib.auth.views.logout',name='logout'),\n\turl(r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns += patterns('',\n\t(r'^(?Pmedia/.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT})\n)\n\n# vim: tabstop=4 fdm=marker expandtab\n","sub_path":"t42cc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"151169209","text":"import os\n\n #pipenv install environ python-dotenv\n # LOAD .env\nfrom environs import Env\nenv = Env()\nenv.read_env() \n\nfrom fastapi import FastAPI\n\n#import database settings\nfrom app.db import database, engine, metadata\n\n#import routes here\nfrom app.routes import user\n\n\napp = FastAPI( \n\t\ttitle=\"User API\",\n \tdescription=\"User API Documentation\",\n \tversion=\"1.0.0\",)\n\n#create databases.\nmetadata.create_all(engine)\n\n\n# connect to database on startup\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n#disconnect database on shutdown\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n#Register routes here.\napp.include_router(user.router, prefix = \"/user\", tags = ['user'])\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"603388446","text":"# name: imgCompKMeans.py\n# author: molloykp (Nov 2019)\n# purpose: K-Means compression on an image\n\nimport numpy as np\n\nfrom matplotlib.image import imread\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import KMeans as kmeans\nfrom sklearn.preprocessing import StandardScaler\n\nimport argparse\n\n\ndef parseArguments():\n parser = argparse.ArgumentParser(\n description='KMeans compression of images')\n\n parser.add_argument('--imageFileName', action='store',\n dest='imageFileName', default=\"\", required=True,\n help='input image file')\n parser.add_argument('--k', action='store',\n dest='k', default=\"\", type=int, required=True,\n help='number of clusters')\n\n parser.add_argument('--outputFileName', action='store',\n dest='outputFileName', default=\"\", required=True,\n help='output imagefile name')\n\n return parser.parse_args()\n\ndef main():\n parms = parseArguments()\n\n img = imread(parms.imageFileName)\n kTimes = parms.k\n img_size = img.shape\n\n # Reshape it to be 2-dimension\n # in other words, its a 1d array of pixels with colors (RGB)\n\n X = img.reshape(img_size[0] * img_size[1], img_size[2])\n\n # Insert your code here to perform\n # -- KMeans clustering\n # -- replace colors in the image with their respective centroid\n\n # Init must be random and n_init must be 1\n kmeans.n_iter_ = kTimes # setting kmeans to iterate k times\n X_compressed = KMeans(init=\"random\", n_init=1, n_clusters=15, verbose=1).fit(X) # use loop to run 10 times?\n\n # Document Instructions:\n #\n # For one of the images that has been supplied, run kmeans 10 times with k = 15 and\n # report/plot the sum of the squared errors (inertia_).\n\n # Briefly explain why the results vary (1-2 sentences).\n\n\n # save modified image (code assumes new image in a variable\n # called X_compressed)\n # Reshape to have the same dimension as the original image \n\n X_compressed.reshape(img_size[0], img_size[1], img_size[2])\n\n fig, ax = plt.subplots(1, 1, figsize = (8, 8))\n\n ax.imshow(X_compressed)\n for ax in fig.axes:\n ax.axis('off')\n plt.tight_layout()\n plt.savefig(parms.outputFileName,dpi=400,bbox_inches='tight',pad_inches=0.05)\n plt.show()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"imgCompKMeans.py","file_name":"imgCompKMeans.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"628986172","text":"import sys\nimport ProjectSetup as setup\nimport http.client as http\nimport json\n\n#This function will attempt to setup an HTTP connection with the web server\n#and retrieve the data from the database. This data will be used to set the state\n#of the LED's \ndef getLightStates(piNo, hostName, url):\n data = {} #This will hold the retrieved data is sent in the\n #HTTP response's body\n key = url[1:] #key is \"GPIOStatuses\", slicing the URL b/c its the same as the key\n #minus the forward slash\n try:\n conn = http.HTTPConnection(hostName)#Setting up an HTTP connection\n conn.request(\"GET\", url, None, {\"rpino\":piNo})#Requesting the web server\n #with the Raspberry Pi's no. (specified when creating the RaspberryPiLight\n #object) sent as a header\n response = conn.getresponse() #Obtaining server response\n version = response.version\n status = response.status\n reason = response.reason\n print(\"HTTP version used: \" + str(version))\n print(\"HTTP status code: \" + str(status))\n print(\"Reason: \" + reason)\n\n if (status >= 200) & (status < 300): #If response is OK\n body = response.read()\n print(body)\n conn.close() #Read the response's body and close the connection\n data = json.loads(body.decode(\"utf-8\"))#The data sent from the server\n #is in the form of a Javascript document, this converts the document\n #to a Python object, The data is also converted from bytes into a\n #String using the utf-8 character encoding\n #data = json.loads(\"{\\\"\"+key+\"\\\"\"+\":\" + \"\\\"00000\\\"}\")\n print(data[key])\n\n except http.HTTPException:\n print(\"Error connecting to Web Server\")\n except ValueError:\n print(\"Potential error with JSON Decoding\")\n except:\n print(\"Unexpected error\", sys.exc_info()[0])\n \n #By enumerating the data, the index can be used to control the corresponding\n #GPIO output pin for the LED's, the state will either be equal to a 1 or 0\n #based on data sent by the response, this will drive the LED's high or low.\n #The state of each LED is also stored in the lightStatesList (Will be used\n #to help determine what the LED's should do when a button on the breadboard\n #is pressed.\n if key in data:\n for index, state in enumerate(data[key]):\n setup.GPIO.output(setup.gpioList[index], int(state))\n if len(setup.lightStatesList) <= index:\n setup.lightStatesList.append(state)\n else:\n setup.lightStatesList[index] = state\n\n \n \n","sub_path":"ProjectGetState.py","file_name":"ProjectGetState.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"604569537","text":"from unittest import TestCase\n\nfrom src.leilao.dominio import Usuario, Lance, Leilao\nfrom src.leilao.excecoes import LanceInvalido\n\n\nclass TestLeilao(TestCase):\n\n def setUp(self):\n self.beni = Usuario(\"Beni\", 500.0)\n self.lance_do_beni = Lance(self.beni, 100.0)\n self.leilao = Leilao(\"Celular\")\n\n def test_deve_retornar_o_maior_e_o_menor_numero_quando_adicionados_em_ordem_crescente(self):\n daniel = Usuario(\"Daniel\", 500.0)\n lance_do_daniel = Lance(daniel, 150.00)\n\n self.leilao.propoe(self.lance_do_beni)\n self.leilao.propoe(lance_do_daniel)\n\n menor_valor_esperado = 100.0\n maior_valor_esperado = 150.0\n\n self.assertEqual(menor_valor_esperado, self.leilao.menor_lance)\n self.assertEqual(maior_valor_esperado, self.leilao.maior_lance)\n\n def test_nao_deve_permitir_propor_lance_em_ordem_decrescente(self):\n\n with self.assertRaises(LanceInvalido):\n daniel = Usuario(\"Daniel\", 500.0)\n lance_do_daniel = Lance(daniel, 150.00)\n\n self.leilao.propoe(lance_do_daniel)\n self.leilao.propoe(self.lance_do_beni) #100\n\n def test_deve_retornar_o_mesmo_valor_para_maior_e_menor_quando_tiver_somente_um_lance(self):\n lance = Lance(self.beni, 150.0)\n\n self.leilao.propoe(lance)\n\n self.assertEqual(150, self.leilao.menor_lance)\n self.assertEqual(150, self.leilao.maior_lance)\n\n def test_deve_retornar_o_maior_e_o_menor_valor_quando_tiver_tres_lances(self):\n daniel = Usuario(\"Daniel\", 500.0)\n\n bruno = Usuario(\"Bruno\", 500.0)\n\n lance_do_bruno = Lance(bruno, 90.0)\n lance_do_beni = Lance(self.beni, 140.0)\n lance_do_daniel = Lance(daniel, 150.0)\n\n self.leilao.propoe(lance_do_bruno)\n self.leilao.propoe(lance_do_beni)\n self.leilao.propoe(lance_do_daniel)\n\n menor_valor_esperado = 90.0\n maior_valor_esperado = 150.0\n\n self.assertEqual(menor_valor_esperado, self.leilao.menor_lance)\n self.assertEqual(maior_valor_esperado, self.leilao.maior_lance)\n\n def test_deve_permitir_propor_lance_caso_o_leilao_nao_tenha_lances(self):\n self.leilao.propoe(self.lance_do_beni)\n\n quantidade_de_lances_recebida = len(self.leilao.lances)\n\n self.assertEqual(1, quantidade_de_lances_recebida)\n\n def test_deve_permitir_propor_um_lance_caso_o_ultimo_usuario_seja_diferente(self):\n yuri = Usuario(\"Yuri\", 500.0)\n lance_do_yuri = Lance(yuri, 200)\n\n self.leilao.propoe(self.lance_do_beni)\n self.leilao.propoe(lance_do_yuri)\n\n quantidade_de_lances_recebido = len(self.leilao.lances)\n\n self.assertEqual(2, quantidade_de_lances_recebido)\n\n def test_nao_deve_permitir_propor_lance_caso_usuario_seja_o_mesmo(self):\n lance_do_beni200 = Lance(self.beni, 200)\n\n #esperando a exceção\n with self.assertRaises(LanceInvalido):\n self.leilao.propoe(self.lance_do_beni)\n self.leilao.propoe(lance_do_beni200)\n","sub_path":"tests/test_leilao.py","file_name":"test_leilao.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"614838271","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import redirect\n\nfrom django.core.urlresolvers import reverse\n\nfrom django.contrib import messages\n\nfrom django.views.decorators.http import require_POST\nfrom .models import DiscountCoupon\nfrom .forms import DiscountCouponForm\n\nfrom orders.models import Order\n\nfrom django.core import serializers\n\n# Create your views here.\n\n@require_POST\ndef discountcoupon_apply(request):\n # now = timezone.now()\n user = request.user\n discount_coupon_form = DiscountCouponForm(request.POST or None, prefix=\"discount_coupon_form\")\n if discount_coupon_form.is_valid():\n data = discount_coupon_form.cleaned_data\n dogsize_name = data.get(\"dogsize_name\")\n item_name = data.get(\"item_name\")\n code = data.get(\"coupon_code\")\n code = code.upper()\n if code==\"\":\n request.session['discountcoupon_id'] = None\n request.session['affiliate_phonenumber'] = None\n return redirect(reverse('orders:order_subscribe', kwargs={'dogsize_name': dogsize_name, 'item_name': item_name}))\n elif _is_code_discountcoupon(code) :\n discountcoupon = DiscountCoupon.objects.get(coupon_code__exact=code, available=True)\n if discountcoupon.only_newcustomer :\n if Order.objects.filter(user=user).count() == 0 :\n request.session['affiliate_phonenumber'] = None\n request.session['discountcoupon_id'] = discountcoupon.id\n else :\n request.session['affiliate_phonenumber'] = None\n request.session['discountcoupon_id'] = None\n messages.error(request, 'Sorry, This Coupon can be used for new joiner only.')\n else :\n request.session['affiliate_phonenumber'] = None\n request.session['discountcoupon_id'] = discountcoupon.id\n\n else :\n request.session['discountcoupon_id'] = None\n request.session['affiliate_phonenumber'] = None\n messages.error(request, 'Coupon Code is Invalid or Expired')\n return redirect(reverse('orders:order_subscribe', kwargs={'dogsize_name': dogsize_name, 'item_name': item_name}))\n\n\ndef _is_code_discountcoupon(code) :\n try :\n DiscountCoupon.objects.get(coupon_code__exact=code,available=True)\n return True\n except DiscountCoupon.DoesNotExist:\n return False","sub_path":"_pawductbox/coupons/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"102026602","text":"# 给定m和r,使用拟人拟物法解不等圆packing问题\n\nimport numpy as np\nimport random\nfrom matplotlib import pyplot as plt\nimport seaborn\nimport heapq\n\n\nclass circle:\n\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n self.nei = 0\n\n def show(self):\n plot_circle(self.x, self.y, self.r)\n\n\ndef plot_circle(xx, yy, r):\n theta = np.arange(0, 2 * np.pi, 0.01)\n x = xx + r * np.cos(theta)\n y = yy + r * np.sin(theta)\n plt.plot(x, y)\n\n\ndef dis(x1, x2, y1,y2): # 计算两点间的距离\n dis = ((x1 - x2)**2+(y1 - y2)**2)**0.5\n return dis\n\n\ndef find_min_nei(circles): # 找到圆集合中邻居最少的元素\n min = 10\n nei_num = 0\n for i in range(len(circles)):\n nei = 0\n for j in range(len(circles)):\n if i == j: continue\n if circles[i].r + circles[j].r - dis(circles[i].x, circles[j].x, circles[i].y, circles[j].y) == 0:\n nei = nei +1\n if abs(circles[i].x)+ circles[i].r >= 1:\n nei = nei +1\n if abs(circles[i].y) + circles[i].r >= 1:\n nei = nei +1\n if nei < min:\n min = nei\n nei_num = i\n return nei_num\n\n\ndef expand(circles): # 保证各个圆与边界与其他圆相切\n for i in range(len(circles)):\n bool = 1\n min = 3\n for j in range(len(circles)):\n if i == j : continue\n if circles[i].r + circles[j].r - dis(circles[i].x, circles[j].x, circles[i].y, circles[j].y)>=0:\n bool = 0\n else:\n if -(circles[i].r + circles[j].r - dis(circles[i].x, circles[j].x, circles[i].y, circles[j].y)) < min:\n min = -(circles[i].r + circles[j].r - dis(circles[i].x, circles[j].x, circles[i].y, circles[j].y))\n if abs(circles[i].x)+ circles[i].r >= 1:\n bool = 0\n else:\n if 1-abs(circles[i].x) - circles[i].r < min:\n min = 1-abs(circles[i].x) - circles[i].r\n if abs(circles[i].y) + circles[i].r >= 1:\n bool = 0\n else:\n if 1-abs(circles[i].y) - circles[i].r < min:\n min = 1-abs(circles[i].y) - circles[i].r\n if bool ==1:\n circles[i].r = circles[i].r + min\n\n\ndef area(circles):\n area=0\n for i in range(len(circles)):\n area = area + np.pi*(circles[i].r**2)\n return area\n\n\ndef nosmall_step(circles, UU): # 拟人法,防止陷入最小值陷阱\n UU = np.sum(UU, axis=0)\n for i in range(len(circles)):\n UU[i] = UU[i]/(circles[i].r**2)\n UU = list(UU)\n max_list = list(map(UU.index, heapq.nlargest(5, UU)))\n circles[max_list[0]].x = circles[max_list[0]].r-1\n circles[max_list[0]].y = 1-circles[max_list[0]].r\n circles[max_list[1]].x = 1-circles[max_list[1]].r\n circles[max_list[1]].y = 1 - circles[max_list[1]].r\n circles[max_list[2]].x = 1 - circles[max_list[2]].r\n circles[max_list[2]].y = circles[max_list[2]].r - 1\n circles[max_list[3]].x = circles[max_list[3]].r - 1\n circles[max_list[3]].y = circles[max_list[3]].r - 1\n circles[max_list[4]].x = 0\n circles[max_list[4]].y = 0\n\n\ndef update(circles, step): # 更新函数,梯度下降使系统势能最小\n U = [[0 for i in range(m)] for i in range(m)]\n UW = 0\n move_vector = [0., 0.]\n for i in range(len(circles)):\n if circles[i].x + circles[i].r > 1:\n L = circles[i].r - 1 + circles[i].x\n UW = UW + L ** 2\n force = [0, -1]\n if L < 0.001:\n L = L * 100\n move_vector[0] = move_vector[0] + L * force[0]\n move_vector[1] = move_vector[1] + L * force[1]\n if circles[i].x - circles[i].r < -1:\n L = circles[i].r - 1 - circles[i].x\n UW = UW + L ** 2\n force = [0, 1]\n if L < 0.001:\n L = L * 100\n move_vector[0] = move_vector[0] + L * force[0]\n move_vector[1] = move_vector[1] + L * force[1]\n if circles[i].y + circles[i].r > 1:\n L = circles[i].r - 1 + circles[i].y\n UW = UW + L ** 2\n force = [-1, 0]\n if L < 0.001:\n L = L * 100\n move_vector[0] = move_vector[0] + L * force[0]\n move_vector[1] = move_vector[1] + L * force[1]\n if circles[i].y - circles[i].r < -1:\n L = circles[i].r - 1 - circles[i].y\n UW = UW+ L ** 2\n force = [1, 0]\n if L < 0.01:\n L = L * 10\n move_vector[0] = move_vector[0] + L * force[0]\n move_vector[1] = move_vector[1] + L * force[1]\n for j in range(len(circles)):\n if i == j: continue\n L = circles[i].r + circles[j].r - dis(circles[i].x, circles[j].x, circles[i].y, circles[j].y)\n if L > 0:\n U[i][j] = U[i][j] + L**2\n force = [circles[i].x - circles[j].x, circles[i].y - circles[j].y]\n if force[0] != 0:\n force[0] = force[0]/np.linalg.norm(force, ord=2)\n if force[1] != 0:\n force[1] = force[1] / np.linalg.norm(force, ord=2)\n if L < 0.001:\n L = L*100\n move_vector[0] = move_vector[0] + L*force[0]\n move_vector[1] = move_vector[1] + L*force[1]\n circles[i].x = circles[i].x + step * move_vector[0]\n circles[i].y = circles[i].y + step * move_vector[1]\n if circles[i].x + circles[i].r > 1: circles[i].x = 1 - circles[i].r\n if circles[i].x - circles[i].r <-1: circles[i].x = circles[i].r - 1\n if circles[i].y + circles[i].r > 1: circles[i].y = 1 - circles[i].r\n if circles[i].y - circles[i].r <-1: circles[i].y = circles[i].r - 1\n return U, UW\n\n\nif __name__ == '__main__':\n m = 13\n r = [1, 0.17, 0.17, 0.17, 0.17, 0.085, 0.085, 0.085, 0.085, 0.085, 0.085, 0.085, 0.085]\n circles = []\n S = 4\n step = 0.1 # 拟物法步长\n times = 1 # 要优化的次数\n for i in range(0,m): # 随机初始化一组半径r\n x = random.randint(-100, 100) / 100\n y = random.randint(-100, 100) / 100\n circles = circles + [circle(x, y, r[i])]\n for i in range(0, times):\n step = 0.1\n # circles[find_min_nei(circles)].r = circles[find_min_nei(circles)].r + 0.005\n U, UW= update(circles, step)\n # expand(circles)\n U1 = U\n UW1 = UW\n while (np.sum(U)!= 0):\n U, UW = update(circles, step)\n # for j in range(len(circles)):\n # circles[j].show()\n # if np.sum(U)<0.01:\n # plt.xlim(-1, 1)\n # plt.ylim(-1, 1)\n # plt.show()\n if np.sum(U) + UW>= np.sum(U1) + UW1:\n step = step * 0.8\n if step<0.01:\n print(\"随机\")\n nosmall_step(circles, U)\n step = 0.1\n U1 = U\n UW1 = UW\n print(\"第%d次优化,U的值为%f\"%(i, np.sum(U)))\n for i in range(len(circles)):\n circles[i].show()\n plt.xlim(-1, 1)\n plt.ylim(-1, 1)\n plt.show()","sub_path":"unit3/Given_r,m=10.py","file_name":"Given_r,m=10.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"550205060","text":"from wiesel import wsl_distributions\nfrom wiesel import utils\nfrom wiesel import path\n\nimport os\n\n\nclass Path(object):\n pass\n\n\nclass WslMountedPath(object):\n def __init__(self, windows_path: str, distribution: wsl_distributions.RegisteredDistribution):\n self._path = os.path.abspath(windows_path)\n print(self._path)\n self._distro = distribution\n self._drive_mapping = self._read_mounts()\n\n def _read_mounts(self):\n drive_mapping = {}\n\n mount_process = self._distro.run(['mount'])\n\n if not mount_process.is_successful():\n raise Exception(\"Could not execute mount\")\n\n line: utils.CmdOutputLine\n for line in mount_process.complete_output:\n mount = path.linux_mount.LinuxMount(line.inner)\n if mount.fs_vfstype == \"9p\" and mount.opts.get('aname', False) == \"drvfs\":\n drive_mapping[mount.fs_spec] = mount.fs_file\n\n print(drive_mapping)\n return drive_mapping\n\n def translate(self):\n adjusted_path = self._path\n for mapping in self._drive_mapping.keys():\n if self._path.startswith(mapping):\n adjusted_path = adjusted_path.replace(mapping, self._drive_mapping[mapping] + \"/\")\n break\n return adjusted_path.replace('\\\\', '/')\n","sub_path":"wiesel/path/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"71458992","text":"class Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n # Create a set to keep track of seen values\n nums_cache = set()\n \n # Iterate over the list\n for num in nums:\n \n # If the value has not been seen, add it to the catch\n if num not in nums_cache:\n nums_cache.add(num)\n \n # If the value has already been seen, return true\n else:\n return True\n \n # If there were no duplicated, then return false\n return false","sub_path":"day_1/contains_duplicates/contains_dup.py","file_name":"contains_dup.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"349627099","text":"from django.conf.urls import patterns, include, url\r\nfrom django.views.generic import TemplateView\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\ndef to_template(template_name):\r\n return TemplateView.as_view(template_name=template_name)\r\n\r\nurlpatterns = patterns('core.views',\r\n url(r'^$', 'index', name='index'),\r\n url(r'^to-giver/$', to_template('to-giver.html'), name='to_giver'),\r\n url(r'^to-reader/$', to_template('to-reader.html'), name='to_reader'),\r\n url(r'^shipping/$', to_template('shipping.html'), name='shipping'),\r\n url(r'^help/$', to_template('help.html'), name='help'),\r\n url(r'^account/summary/$', 'account_summary', name='account_summary'),\r\n url(r'^account/orders/reading/$', 'account_reading_orders', name='account_reading_orders'),\r\n url(r'^account/orders/giving/$', 'account_giving_orders', name='account_giving_orders'),\r\n url(r'^account/material/$', 'account_material', name='account_material'),\r\n url(r'^account/material/new/$', 'account_material_edit', name='account_material_new'),\r\n url(r'^account/material/(?P\\d+)/edit/$', 'account_material_edit', name='account_material_edit'),\r\n url(r'^account/add/author/$', 'account_add_author', name='account_add_author'),\r\n url(r'^account/add/publisher/$', 'account_add_publisher', name='account_add_publisher'),\r\n \r\n url(r'^account/material/new/existing/$', to_template('account/material_new_from_existing.html'), name='material_new_from_existing'),\r\n url(r'^users/(?P[\\w.@+-]+)/$', 'user_profile', name='user_profile'),\r\n url(r'^check_out/$', 'check_out', name='check_out'),\r\n url(r'^order/(?P\\d+)/ship/$', 'ship_order', name='ship_order'),\r\n)\r\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"367290032","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom escalonador import Escalonador\nfrom termcolor import colored\n\n\nclass FIFO(Escalonador):\n def __init__(self, file):\n super().__init__(file)\n self.cabecalho = 'Tempo\\tProcessos (P_# / tempo_total ms / tempo_restante ms)'\n self.cabecalho_efeitos = (colored(self.cabecalho,'cyan', None, ['bold']))\n self.timeline_linhas.append(self.cabecalho_efeitos)\n\n def escalonar(self):\n if self.processo_executando == 0:\n self.processo_executando = 1\n self.processando[self.processo_executando].estado = 1\n return self.processando[self.processo_executando]\n\n if self.quantidade_estado('Pronto') == 0:\n return None\n\n key = (self.processo_executando+1)\n\n while self.processando[key].estado != 'Pronto':\n key += 1\n\n if ((key) == (len(self.processando))):\n return None\n\n if key != self.processo_executando:\n self.processo_executando = key\n\n return self.processando[key]\n\n def executar(self, processo):\n if processo.tipo == 'user':\n processo.tempo_de_entrada = self.timeline\n processo.estado = 1\n quant_processos = len(self.processando)\n atualizar_saida = True\n while processo.tempo_restante > 0:\n if atualizar_saida:\n atualizar_saida = False\n self.mostrar_timeline()\n self.timeline += 1\n self.verificar_entrada_de_processos()\n if (len(self.processando)) > quant_processos:\n quant_processos = len(self.processando)\n atualizar_saida = True\n processo.tempo_executado = 1\n if (processo.eventos_I_O > 0 and processo.tempo_restante > 0):\n processo.estado = 2\n processo.tempo_de_saida = self.timeline\n self.GER_I_O.estado = 1\n self.mostrar_timeline()\n atualizar_saida = False\n while processo.eventos_I_O > 0:\n processo._Processo__eventos_I_O -= 1\n # processo._Processo__tempo_de_espera += 1\n self.timeline += 1\n self.atraso_ms()\n processo.estado = 1\n self.GER_I_O.estado = 4\n processo.tempo_de_entrada = self.timeline\n self.mostrar_timeline()\n if processo.tempo_restante == 0:\n processo.estado = 3\n atualizar_saida = True\n self.atraso_ms()\n\n def montar_linhas(self):\n linha = '{:02d}\\t'.format(self.timeline)\n for i in range(len(self.processando)):\n if self.processando[i].estado:\n if self.processando[i].tipo == 'user':\n aux = 'P_{0.id} / {0.tempo_total:02d}ms / {0.tempo_restante:02d}ms'.format(self.processando[i])\n else:\n aux = 'P_{0.id} / Tratamento de I/O'.format(self.processando[i])\n if self.processando[i].estado == 'Pronto':\n aux = colored(aux, 'cyan', None, ['bold'])\n elif self.processando[i].estado == 'Executando':\n aux = colored(aux, 'yellow', None, ['underline', 'bold'])\n elif self.processando[i].estado == 'Bloqueado para I/O':\n aux = colored(aux, 'red', None, ['bold'])\n elif self.processando[i].estado == 'Concluído':\n aux = colored(aux, 'green', None, ['bold'])\n elif self.processando[i].estado == 'Suspenso':\n aux = colored(aux, 'cyan', None, ['dark'])\n linha += aux\n linha += ' | '\n self.timeline_linhas.append(linha)\n","sub_path":"fifo.py","file_name":"fifo.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202563906","text":"#Code by Daniel V. Samarov, Ph.D. \n#National Instiute of Standards and Technology\n#Inofrmation Technology Laboratory\n#Statistical Engineering Division \n\n#Prepared for day 2 of the 2017 \"Machine Learning for Materials Research\" bootcamp and workshop at the U. of Maryland Nanocenter\n\n# We start off by loading the modules and libraries that we will be using. \n# Note, there are various \"best practices\" that are out there for this, \n# including selecting the specific functions with a module that will used \n# rather than selecting all of (say) numpy. We won't worry about this too much\n# here, but it's something to keep in mind.\n\n\n#%%\n# numpy is a module for a variety of numeric operations and helper functions, \n# but also linear algebra\nimport numpy as np\n# sklearn is a general machine learning and statistics module, for our purposes\n# we grab the linear regression module\nfrom sklearn.linear_model import LinearRegression, ElasticNetCV, \\\nRidge, Lasso, LassoCV, LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import svm\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n# Get performance metrics\nfrom sklearn.metrics import r2_score, mean_squared_error, roc_auc_score, \\\nconfusion_matrix, roc_curve\n# pandas has some similar functionality to numpy but provides some improved\n# data processing steps functionality.\nimport pandas as pd\n# scipy is a general purpose scientific computing module\nfrom scipy.linalg import svd\nfrom scipy.stats import f\n# matplotlibn is the primary plotting library for Python\nimport matplotlib.pyplot as plt\n# Plot style\nplt.style.use('ggplot')\n# Function for loading MATLAB files\nfrom scipy.io import loadmat\n\n# Another plotting library\nimport seaborn as sns\n#%%\n\n\n# Start by loading our regression example - start with the training data\n# Our files directory\ndata_dir = '/home/dan/Dropbox/MLMR_bootcamp/supervised_learning/examples.xlsx'\n# NOTE: the constant term b0 is included in the matrix, for most the of the \n# models in Python we can pass this in without issue, we'd just need to specify\n# that an intercept term should NOT be fit. For our purposes we're going to\n# drop that first column since for some of our functions it creates some issues\n# (more on this below).\nx_train = pd.read_excel(data_dir, sheetname='Training correlation matrix',\n header=None).values\ny_train = pd.read_excel(data_dir,sheetname='Training formation energies',\n header=None).values[:, 0]\n\n# Testing\nx_test = pd.read_excel(data_dir, sheetname='Test correlation matrix',\n header=None).values\ny_test = pd.read_excel(data_dir, sheetname='Test formation energies',\n header=None).values[:, 0]\n#%%\n \n# range() creates an ordered list starting with 0, try it out. Python uses 0\n# indexing in it's indexing (unlike R and MATLAB). Using the pandas \ncols = range(1, x_train.shape[1])\nx_train = x_train[:, cols]\nx_test = x_test[:, cols]\n\n# Look at structure of the data and it's distribution\nprint(x_train.shape)\nprint(x_train.mean(0))\nprint(x_train.var(0))\nprint(x_train.min(0))\nprint(x_train.max(0))\n\n#%%\n# Let's take a look at our training data\ncorr_fig = plt.figure()\ncorr_plot = corr_fig.add_subplot(1,1,1)\ncorr_plot.plot(x_train[:,0], y_train, 'o', label = 'Training')\n\n#%%\n# Next we can add some additional points from the test data set and see\n# how well the model generalizes (not too bad given that we're looking at just\n# one feature from 146)\ncorr_plot.plot(x_test[:,0], y_test, 'o', color='green', label = 'Testing')\n# Can easily add a legend, note the \"label\" argument is automatically passed in\ncorr_plot.legend(loc='upper right')\ncorr_plot.set_xlabel('Cluster Function 1')\ncorr_plot.set_ylabel('Formation Energy')\n#%%\ncorr_fig.savefig('plots/CF1_vs_FE.png')\n\n#%%\n# If we want to get a clearer visualization of what's going on can look at\n# several group values versus FE. This is probably not as transparent as what\n# you might find in R, but provides an illustration of some of the \"more\n# advanced\" plotting features. There is a manual way to do this using similar\n# syntax to MATPLOT's subplot. \nfig_data = plt.figure()\n\nfor i in range(9):\n ax_data = fig_data.add_subplot(3,3,i+1)\n ax_data.scatter(x_train[:,i], y_train, marker='o', s=10)\n plt.xlabel('CF = '+str(i+1))\n plt.ylabel('FE')\nplt.tight_layout()\n\n#%%\n# Some alternative approaches that provide easy access to more aesthetically\n# pleasing plots (but are syntactically a bit less clear)\nn_cols = range(9)\nx_col = x_train[:,n_cols].T.reshape((x_train[:,n_cols].shape[0]*len(n_cols), 1))[:,0]\ng_col = sum([[i]*x_train.shape[0] for i in n_cols], [])\ny_col = sum([y_train.tolist()]*len(n_cols), [])\ndata_df = pd.DataFrame({'Cluster Function': x_col, 'CF':g_col, 'FE': y_col})\nscatplt_mat = sns.FacetGrid(data_df, col = 'CF', col_wrap=int(np.sqrt(len(n_cols))), \n sharex=False)\nscatplt_mat.map(sns.regplot, 'Cluster Function', 'FE', fit_reg = False, scatter_kws={'s':10})\n\n#%%\nscatplt_mat.savefig('plots/scatplt_mat.png')\n\n#%%\n# Let's look at a simple linear model fit to the data. \n# The structure for fitting models in Python is a bit different then what\n# you might be accustomed to in R, MATLAB, etc. Not all models are fit in \n# the same way, but they tend to follow a similar syntax so it's pretty easy\n# to get things up and running once you have the hang of it.\nlm1 = LinearRegression()\nlm1.fit(x_train[:, [0]], y_train)\np1 = lm1.predict(x_train[:, [0]])\n#%%\n\n# Let's take a look at what information the \"ols\" object contains\nprint(lm1.coef_)\nprint(lm1.intercept_)\n\n#%%\n# Check the above\nrho = np.corrcoef(x_train[:,0],y_train)[0,1]\nsy = np.sqrt(y_train.var())\nsx = np.sqrt(x_train[:,0].var())\n# Slope\nprint(rho * sy / sx)\n# Intercept\nprint(np.mean(y_train - x_train[:,0] * rho * sy / sx))\n\n\n#%%\n# Add the regression line to the plot\ncorr_plot.plot(x_train[:,0], p1, color='r')\n\n#%%\ncorr_fig.savefig('plots/CF1_vs_FE_fit.png')\n\n#%%\n# Looking at the residuals we can see that there distribution isn't as \n# randomly distributed as we might like, suggesting that our model isn't quite\n# capturing everything that's going on. This is of course to be expected since\n# we're just using one of the available predictors.\nfig_plot, res_plot = plt.subplots()\nres_plot.plot(y_train, y_train - p1, \"o\")\nres_plot.axhline(0, color=\"r\")\nres_plot.set_xlabel('Observed')\nres_plot.set_ylabel('Residual')\n\n#%%\nfig_plot.savefig('plots/CF1_resid.png')\n\n#%%\n# Recall from our discussion in class, when p > n the problem becomes \"ill-posed\", \n# as such specific considerations need to be taken into account. Before getting \n# to that let's try selecting multiple columns to get used to the syntax of\n# both data manipulation, model fitting and output\nsub_cols = [0, 10, 30, 73]\nlm2 = LinearRegression()\n# Fit the model (you'll notice as we go through the examples that the structure\n# of fitting the model is fairly consistent)\nlm2.fit(x_train[:, sub_cols], y_train)\n# Get predictions\np2 = lm2.predict(x_test[:, sub_cols])\n\n# Look at the resulting coefficient estimate\nprint(lm2.coef_)\nprint(lm2.intercept_)\n\n#%%\n# NOTE: as stated when p > n standard regression runs into some problems\n# which requires care. in particular we note that the\n# LinearRegression function will still fit a model irrespective of the fact\n# that the inverse does not exist. To account for this it is likely using a \n# \"pseudo-inverse\". To see what the issue is more explicitly let's take a look \n# at the eigen values\n# NOTE: For this example we need to explicitly include the intercept term in\n# our design matrix\nx_tr = np.hstack((np.ones((x_train.shape[0],1)), x_train))\ne_vecs_l, e_vals, e_vecs_r = svd(x_tr.T.dot(x_tr))\neig_fig, eig_plot = plt.subplots()\neig_plot.plot(e_vals)\neig_plot.set_xlabel('Index')\neig_plot.set_ylabel('Value')\neig_plot.set_title('Eigen values of the covariance matrix')\n\n#%%\neig_fig.savefig('plots/eigen.png')\n\n\n#%%\n# And to see how this effects the prediction let's work through the rest of the\n# least squares solution\n# First compute (x^Tx)^-1\nprint(1/e_vals)\ne_vals_trunc_inv = 1/e_vals\ne_vals_trunc_inv[e_vals <= 1e-11] = 0.0\nxtx_inv = e_vecs_l.dot(np.diag(e_vals_trunc_inv)).dot(e_vecs_r)\n# Then multiply the latter by x^Ty\nb_full = xtx_inv.dot(x_tr.T).dot(y_train)\nx_te = np.hstack((np.ones((x_test.shape[0],1)), x_test))\np_full = x_te.dot(b_full)\n\n#%%\n# So, now let's take a look at what happens when we fit a regression without\n# accounting for the fact that p > n using sklearn's model. This should be\n# fairly similar to our results above. \nlm_tot = LinearRegression(fit_intercept=False)\nlm_tot.fit(x_tr, y_train)\np_full2 = lm_tot.predict(x_te)\n\n#%%\n# Fit with testing data, if our model is doing a good job the hope is that\n# the difference between coefficient estimates isn't too big\nlm_tot2 = LinearRegression(fit_intercept=False)\nlm_tot2.fit(x_te, y_test)\n\nprint(mean_squared_error(lm_tot2.coef_, lm_tot.coef_))\n\n#%%\n# Next look at a couple of performance metrics against training and testing. \n# First take a look at how well our model fit the observed (training) data \n# when using one CF\nprint(r2_score(y_train, p1))\nprint(mean_squared_error(y_train, p1))\n\n#%%\n# Of course what's of more interest to us though is the performance of our model on the\n# test data set, this tells us how well it generalizes\np1_test = lm1.predict(x_test[:,[0]])\nprint(r2_score(y_test, p1_test))\nprint(mean_squared_error(y_test, p1_test))\n\n#%%\n# Next let's look at the performance with 4 features\nprint( r2_score(y_test, p2))\nprint( mean_squared_error(y_test, p2))\n\n#%%\n# And the full model\nprint(r2_score(y_test, p_full))\nprint(mean_squared_error(y_test, p_full))\n\n#%%\n# NOTE: Depending on what your objectives are the sklearn interface may not\n# quite capture all your needs. For example, in the R function \"lm\" we get\n# a number of helpful summary statistics on the model fit (e.g. p-values,\n# F-statistics, etc.). There are some ways of getting at this but it isn't the\n# default. That said something like the F-statistic can easily be computed \n# manually\nmse1 = mean_squared_error(y_train, p1)\ndf1 = len(lm1.coef_) + 1\nmse4 = mean_squared_error(y_train, lm2.predict(x_train[:,sub_cols]))\ndf2 = len(lm2.coef_) + 1\nn = x_train.shape[0]\nf_stat = ((mse1 - mse4)/(df2 - df1))/(mse4/(n - df2))\nprint(f_stat)\nprint(1 - f.cdf(f_stat, df2 - df1, n - df2))\n\n#%%\n# Next let's try ridge regression and compare results. First we look at what \n# happens to the eigenvalues when we add an off set\nprint(1/(e_vals + 10.0))\n\n#%%\nlm_ridge = Ridge(alpha = 10.0)\nlm_ridge.fit(x_train, y_train)\np_ridge = lm_ridge.predict(x_test)\nprint(r2_score(y_test, p_ridge))\nprint(np.sqrt(mean_squared_error(y_test, p_ridge)))\n\n#%%\nlm_ridge2 = Ridge(alpha = 10.0)\nlm_ridge2.fit(x_test, y_test)\n\n#%%\n# See how consistent the estimates are\nprint(mean_squared_error(lm_ridge.coef_, lm_ridge2.coef_))\n\n#%%\n# How does this impact the coefficient estimates?\ncomp_fig = plt.figure()\ncomp_plot = comp_fig.add_subplot(1,1,1)\ncomp_plot.bar(np.arange(x_train.shape[1]), lm_tot.coef_[1:], label='LinReg')\ncomp_plot.bar(np.arange(x_train.shape[1]), lm_ridge.coef_, label='Ridge')\ncomp_plot.legend()\ncomp_plot.set_title('Linear Regression vs. Ridge Estimates')\n\n#%%\ncomp_fig.savefig('plots/LR_vs_RIDGE.png')\n\n#%%\n# We can also try out a number of alpha values and see how they each perform.\n# Create a series of alpha values to looks at\nalpha = np.arange(.1, 20.1, .1)\n# An array to store the results\nridge_res = np.empty((0, 3))\nfor a in alpha:\n lm_ridge = Ridge(alpha=a)\n lm_ridge.fit(x_train, y_train)\n p_ridge = lm_ridge.predict(x_test)\n r2 = r2_score(y_test, p_ridge)\n rmse = mean_squared_error(y_test, p_ridge)\n # These lines store the associated results into a 2D array\n res_a = [[a, r2, rmse]]\n ridge_res = np.append(ridge_res, res_a, axis = 0)\n# Look at the results\nprint(ridge_res)\n\n#%%\n\n# Let's now plot the results (and a slightly more complex plotting example)\nridge_fig = plt.figure()\nridge_r2 = ridge_fig.add_subplot(1,1,1)\nridge_r2_line = ridge_r2.plot(ridge_res[:, 0], ridge_res[:, 1], label='R2')\nridge_r2.set_ylabel('R2')\nridge_rmse = ridge_r2.twinx()\nridge_rmse_line = ridge_rmse.plot(ridge_res[:, 0], ridge_res[:, 2], \n label = 'MSE', color='blue')\nridge_r2.set_xlabel('gamma')\nridge_rmse.set_ylabel('MSE')\nridge_line = ridge_r2_line + ridge_rmse_line\nridge_labs = [l.get_label() for l in ridge_line]\nridge_r2.legend(ridge_line, ridge_labs, loc='center right')\n\n#%%\nridge_fig.savefig('plots/ridge_sol_path.png')\n#%%\n\n# Compare results of standard and ridge regression\nres_df = pd.DataFrame({'Method':['Standard', 'Ridge'], \n'MSE':[mean_squared_error(y_test, p_full), mean_squared_error(y_test, p_ridge)]})\n\n#%%\n# From these results we can see that ridge regression provides an improvement\n# over standard regression. \n\n# Next we consider the LASSO and Elastic Net models\n# Traing the lasso model\nlasso = Lasso(fit_intercept=True, alpha = 0.1)\nlasso.fit(x_train, y_train)\np_lasso = lasso.predict(x_test)\nprint(r2_score(y_test, p_lasso))\nprint(mean_squared_error(y_test, p_lasso))\nprint(sum(lasso.coef_ == 0))\n\n#%%\n\n# Next fitting the lasso and finding the optimal regularization using\n# built in cross-validation provides further improvement over the LASSO\nlasso_cv = LassoCV(fit_intercept=True, n_alphas=100, normalize=False)\nlasso_cv.fit(x_train, y_train)\np_lasso_cv = lasso_cv.predict(x_test)\nprint(r2_score(y_test, p_lasso_cv))\nprint(mean_squared_error(y_test, p_lasso_cv))\nprint(sum(lasso_cv.coef_ == 0) )\n\n#%%\n# Let's visualize the solution path\nlassop = lasso_cv.path(x_train, y_train)\nlassop_fig = plt.figure()\nlassop_plot = lassop_fig.add_subplot(1,1,1)\nlassop_plot.plot(np.log(lassop[0]),lassop[1].T)\nlassop_plot.set_xlabel('lambda value (log scale)')\nlassop_plot.set_ylabel('Coefficient estimate value')\nlassop_plot.set_title('lasso solution path')\n\n#%%\nlassop_fig.savefig('plots/lasso_path.png')\n#%%\n# Next let's try some different values of l1_ratio (which then incorporates\n# the l2 constraint)\n# Next fitting the lasso and finding the optimal regularization using\n# built in cross-validation provides further improvement over the LASSO\nen_cv = ElasticNetCV(fit_intercept=True, n_alphas=100, normalize=False, \n l1_ratio=0.01)\nen_cv.fit(x_train, y_train)\np_en_cv = en_cv.predict(x_test)\nprint(r2_score(y_test, p_en_cv))\nprint(mean_squared_error(y_test, p_en_cv))\nprint(sum(en_cv.coef_ == 0))\n\n#%%\nenp = en_cv.path(x_train, y_train)\nenp_fig = plt.figure()\nenp_plot = enp_fig.add_subplot(1,1,1)\nenp_plot.plot(np.log(enp[0]),enp[1].T)\nenp_plot.set_xlabel('lambda value (log scale)')\nenp_plot.set_ylabel('Coefficient estimate value')\nenp_plot.set_title('EN solution path')\n\n#%%\nenp_fig.savefig('plots/en_path.png')\n\n#%%\n# Update results\nres_df.loc[2,:] = [mean_squared_error(y_test, p_lasso_cv), 'lasso']\nres_df.loc[3,:] = [mean_squared_error(y_test, p_en_cv), 'EN']\nprint(res_df)\n#%%\n# Next we take a look at a model motivated by the underlying physics associated\n# with the material. A key takeaway here is how much better this approach does\n# than an \"out of the box\" machine learning technique.\nds_reg = pd.read_excel(data_dir, sheetname = 'Distance and Shape regularizer', \n header=None).values\n\n# Helper function for fitting our model\ndef mat_pow(x, p):\n u, d, v = svd(x)\n return u.dot(np.diag(d**p)).dot(v)\n \nsq_ds_reg = mat_pow(ds_reg, 0.5)\nx_train_aug = np.vstack((x_tr, sq_ds_reg))\n\n# Create augmented matrix\ny_train_aug = np.concatenate([y_train, np.zeros(x_train_aug.shape[1])])\nen_ds_reg = LinearRegression(fit_intercept=False, normalize=False)\nen_ds_reg.fit(x_train_aug, y_train_aug)\n# Predict on test\np_ds_reg_test = en_ds_reg.predict(x_te)\nprint(mean_squared_error(y_test, p_ds_reg_test))\nprint(r2_score(y_test, p_ds_reg_test))\n\n\n#%% \n###############################################################################\n# Next we'll look at a hyperspectral imaging (HSI) application\n###############################################################################\n# Load HSI data\n\nhsi = loadmat('HSIData.mat')\nkeys = hsi.keys()\n# Take a look at what's contained in the object\nprint(keys)\n\n# Grab the objects we'll be using\nY = hsi['Y']\nYarr = hsi['Yarr']\nlab = hsi['labels']\nlab2 = hsi['label2']\nall_indx = np.arange(len(lab2))\ntrain_indx = hsi['indx']\ntest_indx = np.delete(all_indx, train_indx)\n\n#%%\n# Take a look at the image\nimg_fig = plt.figure()\nimg_plot = img_fig.add_subplot(1,1,1)\nimg_plot.grid(False)\nimg_plot.imshow(Yarr[:,:,[100,50,10]])\nimg_plot.set_xticklabels(['']*Yarr.shape[0])\nimg_plot.set_yticklabels(['']*Yarr.shape[0])\n\n#%%\nimg_fig.savefig('plots/kidney_image.png')\n#%% \n# Grab two wavelengths and visualize the distribution of classes, kidney vs.\n# other\nkid2_fig = plt.figure(figsize=(8,8))\nkid2_class = kid2_fig.add_subplot(1,1,1)\nkid2_mark = ['o', 'x']\nkid2_col = ['red', 'blue']\nkid2_leg = ['kidney', 'other']\nfor i in range(2):\n ind = lab2 == i + 1\n kid2_class.scatter(Y[ind,80], Y[ind,122], color=kid2_col[i], \n marker=kid2_mark[i], label = kid2_leg[i], alpha=0.5)\nkid2_class.set_xlabel('Wavelength 81')\nkid2_class.set_ylabel('Wavelength 123')\nkid2_class.legend()\nkid2_fig\n\n#%%\nkid2_fig.savefig('plots/kid_2class.png')\n\n#%%\n# Next lets plot the image with the 2 class labels. Since we're going to be\n# reusing this set of plots let's write a simple function to simplify things\ndef plot_labels(l,s,cmap=plt.cm.hot_r):\n \n fig = plt.figure(figsize=(8,8))\n plot = fig.add_subplot(1,1,1)\n plot.grid(False)\n plot.imshow(np.reshape(l, s).T, cmap=cmap)\n plot.set_xticklabels(['']*s[0])\n plot.set_yticklabels(['']*s[0])\n plt.show()\n \n return fig\n\nclass_fig = plot_labels(lab2, (Yarr.shape[0],Yarr.shape[1]))\n\n#%%\nclass_fig.savefig('plots/img_2class.png')\n\n#%%\n# Run LDA\nlda = LinearDiscriminantAnalysis(solver='lsqr')\nlda.fit(Y[train_indx,:], lab2[train_indx])\np_lda = lda.predict_proba(Y[test_indx,:])\nl_lda = lda.predict(Y[test_indx,:])\nconfusion_matrix(lab2[test_indx], l_lda)\n\n#%% Visualize results\nlda_fig = plot_labels(lda.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\n\n#%%\nlda_fig.savefig('plots/lda_2class_pred.png')\n\n#%%\n# Try out sparse lda\nslda = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=0.9)\nslda.fit(Y[train_indx,:], lab2[train_indx])\np_slda = slda.predict_proba(Y[test_indx,:])\nl_slda = slda.predict(Y[test_indx,:])\nconfusion_matrix(lab2[test_indx], l_slda)\n\n#%%\nslda_fig = plot_labels(slda.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\n\n#%%\nslda_fig.savefig('plots/slda_2class_pred.png')\n\n#%%\n# Let's take a look at some performance metrics\naroc_lda = roc_auc_score(lab2[test_indx]-1, p_lda[:,1])\naroc_slda = roc_auc_score(lab2[test_indx]-1, p_slda[:,1])\n\n# We can always look at plots of the results\nroc_lda = roc_curve(lab2[test_indx]-1, p_lda[:,1])\nroc_slda = roc_curve(lab2[test_indx]-1, p_slda[:,1])\n\nroc_fig = plt.figure(figsize=(8,8))\nroc_plot = roc_fig.add_subplot(1,1,1)\nroc_plot.plot(roc_lda[0], roc_lda[1], label = 'LDA: AUC = ' + str(aroc_lda))\nroc_plot.plot(roc_slda[0], roc_slda[1], label = 'SLDA: AUC = ' + str(aroc_slda))\nroc_plot.legend()\nroc_plot.set_xlabel('False Positive Rate')\nroc_plot.set_ylabel('True Positive Rate')\n\n#%%\nroc_fig.savefig('plots/roc_lda_slda.png')\n\n#%%\n# Next running logistic regression\nlr = LogisticRegression(C=1e6, fit_intercept=False)\nlr.fit(Y[train_indx,:], lab2[train_indx])\np_lr = lr.predict_proba(Y[test_indx,:])\nl_lr = lr.predict(Y[test_indx,:])\naroc_lr = roc_auc_score(lab2[test_indx]-1, p_lr[:,1])\n\n#%% Can also try sparse logistic regression\nslr = LogisticRegression(C=1.0, fit_intercept=False, penalty='l1')\nslr.fit(Y[train_indx,:], lab2[train_indx])\np_slr = slr.predict_proba(Y[test_indx,:])\nl_slr = slr.predict(Y[test_indx,:])\naroc_slr = roc_auc_score(lab2[test_indx]-1, p_slr[:,1])\n\n#%% And visualize LR\nlr_fig = plot_labels(lr.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\n#%%\nlr_fig.savefig('plots/lr_2class_pred.png')\n\n#%% SLR\nslr_fig = plot_labels(slr.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\n\n#%%\nslr_fig.savefig('plots/slr_2class_pred.png')\n\n#%%\n# And look at the roc curves\n# We can always look at plots of the results\nroc_lr = roc_curve(lab2[test_indx]-1, p_lr[:,1])\nroc_slr = roc_curve(lab2[test_indx]-1, p_slr[:,1])\n\nroc_lrfig = plt.figure(figsize=(8,8))\nroc_lrplot = roc_lrfig.add_subplot(1,1,1)\nroc_lrplot.plot(roc_lda[0], roc_lda[1], \n label = 'LR: AUC = ' + str(aroc_lr))\nroc_lrplot.plot(roc_slda[0], roc_slda[1], \n label = 'SLR: AUC = ' + str(aroc_slr))\nroc_lrplot.legend()\nroc_lrplot.set_xlabel('False Positive Rate')\nroc_lrplot.set_ylabel('True Positive Rate')\n\n#%%\nroc_lrfig.savefig('plots/roc_lr_slr.png')\n\n#%% \n# Take a look at the coefficient estimates\ncoefs_fig = plt.figure()\nlr_coefs_plot = coefs_fig.add_subplot(2, 1, 1)\nlr_coefs_plot.bar(np.arange(Y.shape[1]), lr.coef_[0])\nlr_coefs_plot.set_title('Logistic Regression')\nlr_coefs_plot.set_ylabel('Value')\n\nslr_coefs_plot = coefs_fig.add_subplot(2, 1, 2)\nslr_coefs_plot.bar(np.arange(Y.shape[1]), slr.coef_[0])\nslr_coefs_plot.set_title('Sparse Logistic Regression')\nslr_coefs_plot.set_ylabel('Value')\nslr_coefs_plot.set_xlabel('Wavelength')\n\n#%%\ncoefs_fig.savefig('plots/lr_slr_coef.png')\n\n#%%\n# These models can also be easily extended to handle multiple classes\nmlr = LogisticRegression(C=1e6, fit_intercept=False, multi_class='multinomial',\n solver='lbfgs')\nmlr.fit(Y[train_indx,:], lab[train_indx])\np_mlr = mlr.predict(Y[test_indx,:])\npd.DataFrame(confusion_matrix(lab[test_indx], p_mlr))\n\n#%%\n# Sparse version\nsmlr = LogisticRegression(C=1000, fit_intercept=False,\n penalty='l1', solver='liblinear')\nsmlr.fit(Y[train_indx,:], lab[train_indx])\np_smlr = smlr.predict(Y[test_indx,:])\npd.DataFrame(confusion_matrix(lab[test_indx], p_smlr))\n\n#%% \n# Visualize the results\nmclass_fig = plot_labels(lab, (Yarr.shape[0],Yarr.shape[1]), \n cmap = plt.cm.Blues_r)\n\n#%%\nmclass_lr_fig = plot_labels(mlr.predict(Y), (Yarr.shape[0],Yarr.shape[1]), \n cmap = plt.cm.Blues_r)\n\n#%%\nmclass_slr_fig = plot_labels(smlr.predict(Y), (Yarr.shape[0],Yarr.shape[1]), \n cmap = plt.cm.Blues_r)\n\n#%%\nmclass_fig.savefig('plots/mclass.png')\nmclass_lr_fig.savefig('plots/lr.png')\nmclass_slr_fig.savefig('plots/slr.png')\n\n#%%\n# Try fitting SVM\nlsvc = svm.SVC(kernel='linear')\nlsvc.fit(Y[train_indx,:], lab2[train_indx])\nl_lsvc = lsvc.predict(Y[test_indx,:])\nlsvc_fig = plot_labels(lsvc.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\nconfusion_matrix(lab2[test_indx], l_lsvc)\n#%%\nksvc = svm.SVC(kernel='rbf', gamma = 0.5)\nksvc.fit(Y[train_indx,:], lab2[train_indx])\nl_ksvc = ksvc.predict(Y[test_indx,:])\nksvc_fig = plot_labels(ksvc.predict(Y), (Yarr.shape[0],Yarr.shape[1]))\nconfusion_matrix(lab2[test_indx], l_ksvc)\n\n#%%\n# Save images\nlsvc_fig.savefig('plots/lsvc.png')\nksvc_fig.savefig('plots/ksvc.png')\n\n#%%\n# Multi-class SVM\n\n# Try fitting SVM\nlmsvc = svm.SVC(kernel='linear', C=1000)\nlmsvc.fit(Y[train_indx,:], lab[train_indx])\nl_lmsvc = lmsvc.predict(Y[test_indx,:])\nlmsvc_fig = plot_labels(lmsvc.predict(Y), (Yarr.shape[0],Yarr.shape[1]), \n plt.cm.Blues_r)\npd.DataFrame(confusion_matrix(lab[test_indx], l_lmsvc))\n#%%\nkmsvc = svm.SVC(kernel='rbf', gamma = 0.5, C=1000)\nkmsvc.fit(Y[train_indx,:], lab[train_indx])\nl_kmsvc = kmsvc.predict(Y[test_indx,:])\nkmsvc_fig = plot_labels(kmsvc.predict(Y), (Yarr.shape[0],Yarr.shape[1]), \n plt.cm.Blues_r)\npd.DataFrame(confusion_matrix(lab[test_indx], l_kmsvc))\n\n#%%\n# Save images\nlmsvc_fig.savefig('plots/lmsvc.png')\nkmsvc_fig.savefig('plots/kmsvc.png')\n\n#%% \n# Next we try fitting a simple NN\nmlp = MLPClassifier(hidden_layer_sizes=(50,50,))\nmlp.fit(Y[train_indx,:], lab[train_indx])\nl_mlp = mlp.predict(Y[test_indx,:])\npd.DataFrame(confusion_matrix(lab[test_indx], l_mlp))\n\nmlp_fig = plot_labels(mlp.predict(Y), (Yarr.shape[0],Yarr.shape[1]), \n plt.cm.Blues_r)\n\n#%%\nmlp_fig.savefig('plots/mlp.png')","sub_path":"Supervised_Learning/supervised_learning.py","file_name":"supervised_learning.py","file_ext":"py","file_size_in_byte":24137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"274406622","text":"import time\nfrom collections import Counter\n\nclass _ProfEntry:\n def __init__(self, name: str, parent: 'Profiler'):\n self.name = name\n self.parent = parent\n\n def __enter__(self):\n self.parent.enter(self.name)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.parent.exit()\n\nclass Profiler:\n def __init__(self, disabled: bool = False):\n self.disabled = disabled\n self._timing = dict()\n self._counter = Counter()\n self._context = []\n self._start_time_by_context = dict()\n self.separator = '/'\n\n def __call__(self, *args, **kwargs):\n if args:\n return self.profile(args[0])\n\n def profile(self, name):\n return _ProfEntry(name, self)\n\n def enter(self, name: str):\n if not self.disabled:\n if self.separator in name:\n raise RuntimeError(\"Unsupported context name, don't use '%s': %s\" % (self.separator, name))\n self._context.append(name)\n self._start_time_by_context[self.separator.join(self._context)] = time.time()\n\n def exit(self):\n if not self.disabled:\n prev_context_name = self.separator.join(self._context)\n self._context.pop()\n delta = time.time() - self._start_time_by_context[prev_context_name]\n self._timing[prev_context_name] = (self._timing.get(prev_context_name) or 0.0) + delta\n self._counter[prev_context_name] += 1\n\n def clear(self):\n self._timing.clear()\n self._start_time_by_context.clear()\n self._context.clear()\n self._counter.clear()\n\n def tree(self):\n root_tree = dict()\n for path, value in self._timing.items():\n path = path.split(self.separator)\n tree = root_tree\n for p in path:\n if p in tree:\n tree = tree[p]\n else:\n tree[p] = dict()\n tree = tree[p]\n tree['__timing__'] = value\n tree['__counts__'] = self._counter[path]\n return root_tree\n\n def print(self):\n def traverse(tree):\n pass\n traverse(self.tree())\n\nprofiler = Profiler()\n","sub_path":"my/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"341348919","text":"\"\"\"\nAuthors: Swati Bhartiya, Victor Trejo, and Utkarsh Bali\nDescription: This file preprocessed data for the currency exchange problem.\n\t\t\t The data is sampled using two sample rates daily at closing day and\n\t\t\t hourly. Each sampling will yield three files for training, testing and cross validation.\n\t\t\t These files are stored under data/processed/currency_exchange folder.\n\t\t\t The data is normalized by dividing the rate output value by the max.\n\"\"\"\n\nimport sys, os, csv, pywt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import shuffle\n\n# Adding reference to the scripts folder\ncurrentFileDir = os.path.dirname(\\\n\tos.path.abspath(__file__)\n)\nsys.path.append(\\\n\tcurrentFileDir + '/../'\n)\n\nimport utilities as utl\n\n\ndef denoise(signal, levels = 3):\n\t\"\"\"\n\tApplies Haar Wavelet transforms to denoise the data signal\n\tParameters:\n\t\tsignal -> signal to be denoised\n\t\tlevels -> max level of decomposition.\n\treturns: the denoised signal.\n\t\"\"\"\n\twavelet = pywt.Wavelet('haar')\n\tn = levels\n\tthreshold = np.sqrt( 2*np.log( n*np.log2(n) ) )\n\tcoefficients = pywt.wavedec(\\\n\t\tsignal,\n\t\twavelet,\n\t\tlevel = n\n\t)\n\t# Soft Threshold function\n\tsoftThresholdFunction = lambda x: pywt.threshold(x, threshold)\n\tconservedCoefficients = map(softThresholdFunction, coefficients)\n\treturn pywt.waverec(conservedCoefficients, wavelet)\n\n\t\n\ndef getClosingDaySamples(data):\n\t\"\"\"\n\tSamples the data at sampling rate by date choosing the closing day value/\n\tparameters: \n\t\t\t\tdata -> data to sample\n\n\treturn: The sampled data\n\t\"\"\"\n\tdata = [tuple(r.split()) for r in data]\n\tsamples = []\n\tfor i in xrange(len(data)):\n\t\tif i == (len(data)-1) or data[i][0]!=data[i+1][0]:\n\t\t\tsamples.append(data[i])\n\treturn samples\n\n\ndef getHourlyDataSamples(data):\n\t\"\"\"\n\tSamples the data at sampling rate by hour\n\tparameters: \n\t\t\t\tdata -> data to sample\n\n\treturn: The sampled data\n\t\"\"\"\n\tdata = [tuple(r.split()) for r in data]\n\tsamples = []\n\tsamplesForHour = []\n\tfor i in xrange(len(data)):\n\t\tsamplesForHour.append(data[i])\n\t\tif i == (len(data)-1) or int(float(data[i][1]))!=int(float(data[i+1][1])):\n\t\t\tday = data[i][0]\n\t\t\thour = str(int(float(data[i][1])))\n\t\t\taverageForHour = str(float(sum(float(x[2]) for x in samplesForHour))/len(samplesForHour))\n\t\t\tsamples.append((day, hour, averageForHour))\n\t\t\tsamplesForHour = []\n\treturn samples\n\n\ndef plotTimeSeries(atClosingDaySampledData, hourlySampledData, extraTitle=\"\"):\n\t\"\"\"\n\tPlots the time series for the sampling by day and hour.\n\tParameters:\n\t\t\t atClosingDaySampledData -> sampled data by day\n\t\t\t hourlySampledData -> sampled data by hour\n\t\"\"\"\n\thourlyValues = [float(x[2]) for x in hourlySampledData]\n\tatClosingDayValues = [float(x[2]) for x in atClosingDaySampledData]\n\tplt.subplot(211)\n\tplt.title(\"Values Sampled at closing day {}\".format(extraTitle))\n\tplt.plot(atClosingDayValues)\n\tplt.subplot(212)\n\tplt.title(\"Values Sampled hourly {}\".format(extraTitle))\n\tplt.plot(hourlyValues)\n\tplt.show()\n\ndef processedFileNamesLocations(samplingType):\n\t\"\"\"\n\tGets the location of the destination file for training, cross validation and\n\ttesting, given a string that represents the type of sampling.\n\tParameters:\n\t\t\t\tsamplingType -> string that identifies the type of sampling\n\tReturns: three string values: training file location, cross validation file location and\n\t\t\t testing file location.\n\t\"\"\"\n\tdirName = '{}/../../data/processed/currency_exchange/'.format(currentFileDir)\n\ttraining = '{}{}_training.dat'.format(dirName, samplingType)\n\ttesting = '{}{}_testing.dat'.format(dirName, samplingType)\n\treturn training, testing \n\n\ndef storeProcessedData(data, samplingType, level):\n\t\"\"\"\n\tStores the preprocessed data.\n\tIt divides in to training(0.70 of data),\n\tand testing (30\\% of the data), and stores\n\tit in three different files for each split.\n\tParameters:\n\t\t\tdata -> data to be stored\n\t\t\tsamplingType -> to tag the files so it can \n\t\t\t\t\t\t\tbe differentiated from the other sampling \n\t\t\t\t\t\t\ttype files.\n\t\"\"\"\n\ttotal = len(data)\n\t# Splits the data into training, cross validation\n\t# and testing with the ratios 0.70, 0.20, 0.10 of the \n\t# total \n\ttrainingSize = (7 * total)/10\n\t# This value is going to be used as normalizer.\n\tmaxExchangeRateValue = max(float(x[2]) for x in data)\n\ttrainingSet = data[:trainingSize]\n\t# Normalizing hourly data\n\ttrainingSet = normalize(\\\n\t\ttrainingSet,\n\t\tmaxExchangeRateValue\n\t)\n\n\t# Denoising daily data\n\ttrainingSet = denoiseOutputData(\\\n\t\ttrainingSet, level\n\t)\n\n\ttestingSet = data[trainingSize:]\n\ttestingSet = normalize(\\\n\t\ttestingSet,\n\t\tmaxExchangeRateValue\n\t)\n\n\ttestingSet = denoiseOutputData(\\\n\t\ttestingSet, level\n\t)\n\n\t# Getting the names of the destination file.\n\ttrainLoc, testLoc = processedFileNamesLocations(samplingType)\n\ttrainingSet = [\" \".join(x) for x in trainingSet]\n\ttestingSet = [\" \".join(x) for x in testingSet]\n\t\n\t# Saving the split files.\n\tutl.saveFileAtLocation(trainingSet, trainLoc)\n\tutl.saveFileAtLocation(testingSet, testLoc)\n\ndef normalize(data, xMax):\n\t\"\"\"\n\tNormalizes the exchange rate values by dividing each one by the max.\n\tParameters:\n\t\tdata -> data to be normalized\n\t\txMax -> max value\n\treturns: the normalized data.\n\t\"\"\"\n\tdays, hours, outputs = zip(*data)\n\toutputs = map(lambda x: str(float(x)/xMax), outputs)\n\treturn zip(days, hours, outputs)\n\n\ndef denoiseOutputData(data, levels = 3):\n\t\"\"\"\n\tDenoises the output signal of the data.\n\tParameters:\n\t\tdata -> data that contain the signal to denoise\n\t\tlevels -> max level of decomposition of the signal\n\tReturns: the data with the signal denoised.\n\t\"\"\"\n\tdays, hours, outputs = zip(*data) \n\toutputs = denoise(outputs, levels)\n\toutputs = map(lambda x: str(x), outputs)\n\treturn zip(days, hours, outputs)\n\n\n\nif __name__ == \"__main__\":\n\tfirstSourceFileLocation = '{}/../../data/unprocessed/currency_exchange/C1-5.dat'.format(currentFileDir)\n\tsecondSourceFileLocation = '{}/../../data/unprocessed/currency_exchange/C6-10.dat'.format(currentFileDir)\t\n\tignoreLinesFunction = lambda l: 'set C part' in l\n\n\tdataFirstHalf = utl.readFileIgnoringLinesForCondition(\\\n\t\t\tfirstSourceFileLocation,\n\t\t\tignoreLinesFunction\n\t)\n\n\tdataSecondHalf = utl.readFileIgnoringLinesForCondition(\\\n\t\t\tsecondSourceFileLocation,\n\t\t\tignoreLinesFunction\n\t)\n\n\twholeData = []\n\twholeData.extend(dataFirstHalf)\n\twholeData.extend(dataSecondHalf)\n\t\n\t# Sampling at closing day and hourly.\n\t# For the hourly sampling an average for hour is calculated.\n\tatClosingDaySampledData = getClosingDaySamples(wholeData)\n\thourlySampledData = getHourlyDataSamples(wholeData)\n\t\n\t# Plotting the time series after sampling\n\tplotTimeSeries(\\\n\t\tatClosingDaySampledData,\n\t\thourlySampledData\n\t)\n\n\t# # Normalizing daily data\n\t# atClosingDaySampledDataNormalized = normalize(\\\n\t# \tatClosingDaySampledData,\n\t# \tmaxExchangeRateValue\n\t# )\n\n\t# # Normalizing hourly data\n\t# hourlySampledDataNormalized = normalize(\\\n\t# \thourlySampledData,\n\t# \tmaxExchangeRateValue\n\t# )\n\n\t# # Denoising daily data\n\t# atClosingDaySampledDataDenoised = denoiseOutputData(\\\n\t# \tatClosingDaySampledDataNormalized, 2\n\t# )\n\n\t# # Denoising hourly data\n\t# hourlySampledDataDenoised = denoiseOutputData(\\\n\t# \thourlySampledDataNormalized, 3\n\t# )\n\n\n\t# Plotting the normalized and denoised time series after sampling\n\t# plotTimeSeries(\\\n\t# \tatClosingDaySampledDataDenoised,\n\t# \thourlySampledDataDenoised,\n\t# \t\"Normalized and Denoised\"\n\t# )\n\n\t# Storing processed data\n\tstoreProcessedData(atClosingDaySampledData, 'at_closing_day', 2)\n\tstoreProcessedData(hourlySampledData, 'hourly', 3)\n\t\n\t\n\n\n","sub_path":"scripts/currency_exchange/currency_exchange_preprocessing.py","file_name":"currency_exchange_preprocessing.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"369945885","text":"from crhd_generator import PlotCity, PlotCRHD\nimport geopandas as gpd\n\nsave_path = './data/Guangzhou/images/'\ngrid_path = './data/Guangzhou/grids/tianhe_gridded.shp'\n\ngrids = gpd.read_file(grid_path)\n\ngrids.crs = 'EPSG:4326'\n\ncoords = {\n 'x': [],\n 'y': []\n }\n\nimg = PlotCRHD(center_point=(23.222421, 113.346080), \n dist=1000, \n name='Guanghzou',\n save_path='./data/'\n ) ","sub_path":"crhd_plot_gz.py","file_name":"crhd_plot_gz.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"277030586","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.8/site-packages/MetaStalk/modules/GPSCheck.py\n# Compiled at: 2020-05-08 18:09:31\n# Size of source mod 2**32: 1351 bytes\n\"\"\"Makes geo chart with plots of GPS data\"\"\"\nimport logging\nimport plotly.express as px\nimport MetaStalk.utils as utils\nlog = logging.getLogger('MetaStalk')\n\ndef gps_check(photos: list) -> px.scatter_mapbox:\n \"\"\"GPS_Check\n\n Takes a list of photos and creates a geo plot of them\n\n Arguments:\n photos {list} -- A list of dictionaries with phot information.\n\n Returns\n px.scatter_mapbox -- Map plot with photos plotted.\n \"\"\"\n log.info('Starting GPS Chart')\n lats = []\n longs = []\n gps_photos = []\n for each in photos:\n if 'GPS GPSLatitudeRef' in each.keys():\n gps_photos.append(each['item'])\n gps_data = utils.gps_parse(each)\n lats.append(gps_data['latitude'])\n longs.append(gps_data['longitude'])\n log.debug('%s has GPS data', each['item'])\n else:\n log.info('%s has no GPS data', each['item'])\n else:\n points = []\n for x, _ in enumerate(gps_photos):\n points.append((lats[x], longs[x]))\n else:\n fig = px.scatter_mapbox(lon=longs, lat=lats,\n hover_name=gps_photos,\n title='Geo Locations')\n fig.update_layout(mapbox_style='open-street-map', title_x=0.5)\n return fig","sub_path":"pycfiles/MetaStalk-2.2.post1.linux-x86_64.tar/GPSCheck.cpython-38.py","file_name":"GPSCheck.cpython-38.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"572321244","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 22 23:34:20 2019\n\n@author: Carlos\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport statsmodels.formula.api as smf\n\nFOLDER = r'C:\\Users\\Carlos\\OneDrive - UC San Diego\\IRS\\outfiles'\nFILE = '\\FINALDATABASE.xlsx'\n\nSHOCK_YEARS = [1998, 2001, 2002, 2004, 2005, 2006, 2007,2008,2009,2010, 2011, 2012, 2013, 2014]\nRESPONSE_VARS = ['employmentpopratio']\n\nDF = pd.read_excel(FOLDER + FILE, index_col=0)\nDF.employmentpopratio = DF.employmentpopratio * 100\n\n\ndef generate_vars(DF,RESPONSE_VARS,SHOCK_YEARS,PANEL_ID='cz',YEAR_ID='year',PREFIX='shock_'):\n for RESPONSE in RESPONSE_VARS:\n for YEAR in SHOCK_YEARS:\n TEMP = DF[ DF[YEAR_ID] == YEAR ][[PANEL_ID,RESPONSE]]\n TEMP = TEMP.rename(columns={RESPONSE: 'd' + RESPONSE + '_' + str(YEAR)})\n DF = pd.merge(DF, TEMP, on='cz', how='left')\n DF['d' + RESPONSE + '_' + str(YEAR)] = (DF[RESPONSE]-DF['d' + RESPONSE + '_' + str(YEAR)])*100\n return DF\n\nDF = generate_vars(DF, RESPONSE_VARS, SHOCK_YEARS)\n\ndef irf(DF, RESPONSE_VARS, SHOCK_YEARS, PANEL_ID='cz',YEAR_ID='year',PREFIX='shock_'):\n import pandas as pd\n import numpy as np\n \n RESULTS = pd.DataFrame()\n for SHOCK in SHOCK_YEARS:\n for RESPONSE in RESPONSE_VARS:\n BETAS = []\n INTERCEPTS = []\n SE = []\n for YEAR in set(DF.year):\n if YEAR <= SHOCK:\n pass\n else:\n try:\n results = smf.ols(\n ('d' + str(RESPONSE) + '_' + str(SHOCK) +\n ' ~ 1 + shock_' + str(SHOCK) +\n ' + avg_initial_income_' + str(SHOCK)),\n data=DF[DF[YEAR_ID] == YEAR]).fit()\n INTERCEPTS.append(results.params[0])\n BETAS.append(results.params[1])\n SE.append(results.bse[1])\n except:\n BETAS.append(np.nan)\n SE.append(np.nan)\n BETAS = [np.nan] * (len(set(DF.year)) - len(BETAS)) + BETAS\n SE = [np.nan] * (len(set(DF.year)) - len(SE)) + SE\n RESULTS[str(RESPONSE) + str(SHOCK) + '_b'] = BETAS\n RESULTS[str(RESPONSE) + str(SHOCK) + '_se'] = SE\n RESULTS['year'] = set(DF.year)\n return RESULTS\n\nfor YEAR in SHOCK_YEARS:\n DF['shock_' + str(YEAR)] = DF['shock_' + str(YEAR)] * DF['avg_initial_income_' + str(YEAR)]\n\nRESULTS = irf(DF, RESPONSE_VARS, SHOCK_YEARS)\nRESULTS.to_excel(FOLDER + '/results2.xls')\n","sub_path":"regional_inequality_US/7.LPIRF_estimation_multiplier.py","file_name":"7.LPIRF_estimation_multiplier.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"288959957","text":"\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.base.catalog\n This module implements vManage API Catalog\n\"\"\"\nfrom collections import namedtuple\nfrom .models_base import IndexConfigItem, ConfigItem, RealtimeItem\nfrom .rest_api import is_version_newer\n\n\nCATALOG_TAG_ALL = 'all'\n\n# Catalog of configuration items\n_catalog = list() # [(, , , , ), ...]\n\nCatalogEntry = namedtuple('CatalogEntry', ['tag', 'info', 'index_cls', 'item_cls', 'min_version'])\n\n# Catalog of realtime items\n_rt_catalog = list() # [(, , , , ), ...]\n\nRTCatalogEntry = namedtuple('RTCatalogEntry', ['tag', 'selector', 'info', 'rt_cls', 'min_version'])\n\n#\n# Configuration catalog functions\n#\n# Order in which config items need to be deleted (i.e. reverse order in which they need to be pushed), considering\n# their high-level dependencies.\n_tag_dependency_list = [\n 'template_device',\n 'template_feature',\n 'policy_vsmart',\n 'policy_vedge',\n 'policy_security',\n 'policy_voice',\n 'policy_customapp',\n 'policy_definition',\n 'policy_profile',\n 'policy_list',\n]\n\n\ndef ordered_tags(tag, single=False, reverse=False):\n \"\"\"\n Generator which yields the specified tag plus any 'child' tags (i.e. dependent tags), following the order in which\n items need to be removed based on their dependencies (e.g. template_device before template_feature). The overall\n order is defined by _tag_dependency_list.\n If special tag 'all' is used, all items from _tag_dependency_list are yielded.\n :param tag: tag string or 'all'\n :param single: Optional, when True only a the one (first) tag is yielded. Used mainly for convenience of the caller.\n :param reverse: If true, yield tags in reverse order\n :return: Selected tags in order, as per _tag_dependency_list\n \"\"\"\n find_tag = (tag == CATALOG_TAG_ALL)\n for item in _tag_dependency_list if not reverse else reversed(_tag_dependency_list):\n if not find_tag:\n if item == tag:\n find_tag = True\n else:\n continue\n yield item\n\n if single:\n break\n\n\ndef register(tag, info, item_cls, min_version=None):\n \"\"\"\n Decorator used for registering config item index/handler classes with the catalog.\n The class being decorated needs to be a subclass of IndexConfigItem.\n :param tag: Tag string associated with this item. String 'all' is reserved and cannot be used.\n :param info: Item information used for logging purposes\n :param item_cls: The config item handler class, needs to be a subclass of ConfigItem\n :param min_version: (optional) Minimum vManage version that supports this catalog item.\n :return: decorator\n \"\"\"\n def decorator(index_cls):\n if not isinstance(index_cls, type) or not issubclass(index_cls, IndexConfigItem):\n raise CatalogException(f'Invalid config item index class register attempt: {index_cls.__name__}')\n if not isinstance(item_cls, type) or not issubclass(item_cls, ConfigItem):\n raise CatalogException(\n f'Invalid config item class register attempt {index_cls.__name__}: {item_cls.__name__}'\n )\n if not isinstance(tag, str) or tag.lower() == CATALOG_TAG_ALL:\n raise CatalogException(f'Invalid tag provided for class {index_cls.__name__}: {tag}')\n if tag not in _tag_dependency_list:\n raise CatalogException(f'Unknown tag provided: {tag}')\n\n _catalog.append(CatalogEntry(tag, info, index_cls, item_cls, min_version))\n\n return index_cls\n\n return decorator\n\n\ndef catalog_size():\n \"\"\"\n Return number of entries in the catalog\n :return: integer\n \"\"\"\n return len(_catalog)\n\n\ndef catalog_iter(*tags, version=None):\n \"\"\"\n Return an iterator of (, , , ) tuples matching the specified tag(s) and supported\n by vManage version.\n :param tags: tags indicating catalog entries to return\n :param version: Target vManage version. Only returns catalog items supported by the target vManage.\n If not specified or None, version is not verified.\n :return: iterator of (, , , ) tuples from the catalog\n \"\"\"\n def match_tags(catalog_entry):\n return CATALOG_TAG_ALL in tags or catalog_entry.tag in tags\n\n def match_version(catalog_entry):\n return catalog_entry.min_version is None or version is None or not is_version_newer(version,\n catalog_entry.min_version)\n\n return (\n (entry.tag, entry.info, entry.index_cls, entry.item_cls)\n for entry in _catalog if match_tags(entry) and match_version(entry)\n )\n\n\ndef catalog_tags():\n \"\"\"\n Return unique tags used by items registered with the catalog\n :return: Set of unique tags\n \"\"\"\n return {entry.tag for entry in _catalog}\n\n\n#\n# Realtime catalog functions\n#\ndef rt_register(tag, selector, info, min_version=None):\n \"\"\"\n Decorator used for registering realtime items with the realtime catalog.\n The class being decorated needs to be a subclass of RealtimeItem.\n :param tag: Tag string associated with this item. String 'all' is reserved and cannot be used.\n :param selector: String used to further filter entries that match the tags.\n :param info: Item information used for logging purposes\n :param min_version: (optional) Minimum vManage version that supports this catalog item.\n :return: decorator\n \"\"\"\n def decorator(realtime_cls):\n if not isinstance(realtime_cls, type) or not issubclass(realtime_cls, RealtimeItem):\n raise CatalogException(f'Invalid realtime item class register attempt: {realtime_cls.__name__}')\n if not isinstance(tag, str) or tag.lower() == CATALOG_TAG_ALL:\n raise CatalogException(f'Invalid tag provided for class {realtime_cls.__name__}: {tag}')\n if not isinstance(selector, str) or selector.lower() == CATALOG_TAG_ALL:\n raise CatalogException(f'Invalid selector provided for class {realtime_cls.__name__}: {selector}')\n\n _rt_catalog.append(RTCatalogEntry(tag, selector, info, realtime_cls, min_version))\n\n return realtime_cls\n\n return decorator\n\n\ndef rt_catalog_size():\n \"\"\"\n Return number of entries in the realtime catalog\n :return: integer\n \"\"\"\n return len(_rt_catalog)\n\n\ndef rt_catalog_iter(*tags, version=None):\n \"\"\"\n Return an iterator of (, ) tuples matching the specified tag(s), selector and supported\n by vManage version.\n :param tags: Tags to filter catalog entries to return. If 2 or more tags are provided, the last one is considered\n a selector.\n :param version: Target vManage version. Only returns catalog items supported by the target vManage.\n If not specified or None, version is not verified.\n :return: iterator of (, ) tuples from the realtime catalog\n \"\"\"\n if len(tags) > 1:\n group_list = tags[:-1]\n selector = tags[-1]\n else:\n group_list = tags\n selector = None\n\n def match_groups(catalog_entry):\n return CATALOG_TAG_ALL in group_list or catalog_entry.tag in group_list\n\n def match_selector(catalog_entry):\n return selector is None or catalog_entry.selector == selector\n\n def match_version(catalog_entry):\n return catalog_entry.min_version is None or version is None or not is_version_newer(version,\n catalog_entry.min_version)\n\n return (\n (entry.info, entry.rt_cls)\n for entry in _rt_catalog if match_groups(entry) and match_selector(entry) and match_version(entry)\n )\n\n\ndef rt_catalog_tags():\n \"\"\"\n Return unique tags used by items registered with the realtime catalog\n :return: Set of unique tags\n \"\"\"\n return {entry.tag for entry in _rt_catalog}\n\n\ndef rt_catalog_commands():\n \"\"\"\n Return set of commands registered with the realtime catalog. These are the combination of tags and selectors\n :return: Set of commands\n \"\"\"\n return {f'{entry.tag} {entry.selector}' for entry in _rt_catalog}\n\n\nclass CatalogException(Exception):\n \"\"\" Exception for config item catalog errors \"\"\"\n pass\n","sub_path":"cisco_sdwan/base/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"1460882","text":"import wx\nimport os\nimport os.path\n\nimport numpy as np\nimport PIL\nimport PIL.Image\n\nfrom skimage.data import astronaut\nfrom skimage.color import gray2rgb\nfrom skimage.filters import sobel\nfrom skimage.segmentation import felzenszwalb, slic, quickshift, watershed\nfrom skimage.segmentation import mark_boundaries\nimport skimage.util\n\nimport matplotlib.pyplot as plt\n# sys.path.append('./../../ControlPanel/')\n# import DefaultParams\n\nxsize = 256\nysize = 256\n# ----------------------------------------------------------------------\n# ----------------------------------------------------------------------\n# ----------------------------------------------------------------------\ndef MakeBitmapRGB(self, width, height):\n # Make a bitmap using an array of RGB bytes\n bpp = 3 # bytes per pixel\n bytes = array.array('B', [0] * width * height * bpp)\n\n for y in xrange(height):\n for x in xrange(width):\n offset = y * width * bpp + x * bpp\n r, g, b = self.GetRGB(x, y, bpp)\n bytes[offset + 0] = r\n bytes[offset + 1] = g\n bytes[offset + 2] = b\n\n self.rgbBmp = wx.BitmapFromBuffer(width, height, bytes)\n\ndef _Execute_Segmentation(self, img):\n\n scale_factor = img.shape[0] * img.shape[1] / (xsize * ysize)\n\n ID_Algorithm = self.SelectAlgorithm.GetSelection()\n if ID_Algorithm == 0: # Felzenszwalbs's method\n f_scale = self.f_scale.GetValue()\n f_sigma = self.f_sigma.GetValue()\n f_minsize = self.f_minsize.GetValue()\n segments = felzenszwalb(img, scale=f_scale, sigma=f_sigma, min_size=f_minsize)\n print(\"Felzenszwalb number of segments: {}\".format(len(np.unique(segments))))\n elif ID_Algorithm == 1: # SLIC\n s_nseg = self.s_nseg.GetValue() * scale_factor\n s_comp = self.s_comp.GetValue()\n s_sigma = self.s_sigma.GetValue()\n segments = slic(gray2rgb(img), n_segments=s_nseg, compactness=s_comp, sigma=s_sigma)\n print('SLIC number of segments: {}'.format(len(np.unique(segments))))\n elif ID_Algorithm == 2: # Quickshift\n q_ksize = self.q_ksize.GetValue()\n q_maxdist = self.q_maxdist.GetValue()\n q_ratio = self.q_ratio.GetValue()\n segments = quickshift(gray2rgb(img), kernel_size=q_ksize, max_dist=q_maxdist, ratio=q_ratio)\n print('Quickshift number of segments: {}'.format(len(np.unique(segments))))\n elif ID_Algorithm == 3: # Compact watershed\n w_nmark = self.w_nmark.GetValue() * scale_factor\n w_comp = self.w_comp.GetValue()\n gradient = sobel(img)\n segments = watershed(gradient, markers=w_nmark, compactness=w_comp)\n\n return segments\n\ndef SP_PrepareInitalPreview(self):\n\n img = PIL.Image.open('y=00000000,x=00000000.tif')\n img = img.crop((0, 0, xsize, ysize))\n img = img.convert(\"RGB\")\n w, h = img.size\n bimg = wx.Bitmap.FromBuffer(w, h, img.tobytes())\n\n return bimg\n\ndef SP_UpdatePreview(self, event):\n\n img = PIL.Image.open('y=00000000,x=00000000.tif')\n box = (0, 0, xsize, ysize)\n img = img.crop(box)\n img = skimage.util.img_as_float(img)\n segments = _Execute_Segmentation(self, img)\n img = mark_boundaries(img, segments, color=(1, 1, 0.5))\n img = (img * 255).astype('uint8')\n\n #print 'Max: ', np.max(img)\n #print 'Min: ', np.min(img)\n #print 'Segmant.shape: ', img.shape\n #print 'Segmant.shape: ', img.dtype\n\n\n\n #img = wx.ImageFromBuffer(img.shape[0], img.shape[1], img)\n #img = img.Scale(xsize, ysize, wx.IMAGE_QUALITY_HIGH)\n #bimg = img.ConvertToBitmap()\n # bimg = MakeBitmap2(img[:,:,0], img[:,:,1], img[:,:,2], 128)\n # bimg = wx.BitmapFromBuffer(img.shape[0], img.shape[1], img)\n # bimg = wx.BitmapFromBufferRGBA(img.shape[0], img.shape[1], img)\n\n image = PIL.Image.fromarray(img)\n bimg = wx.Bitmap.FromBuffer(img.shape[0], img.shape[1], image.tobytes())\n self.bitmap_1.SetBitmap(bimg)\n self.Refresh()\n self.Update()\n","sub_path":"Plugins/superpixel/Obs/wxDialogs.py","file_name":"wxDialogs.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"347647612","text":"# encoding: utf-8\n# Economic of co-firing in two power plants in Vietnam\n#\n#\n#\n# (c) Minh Ha-Duong, An Ha Truong 2016\n# minh.haduong@gmail.com\n# Creative Commons Attribution-ShareAlike 4.0 International\n#\n#\n\"\"\"Plot the air pollutants emissions and CO2 emissions figure.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nfrom manuscript1.parameters import MongDuong1System, NinhBinhSystem\nfrom model.utils import kt, Mt, y, array, concatenate\n\n#%%\n\n\ndef plot_emissions(system, axes):\n \"\"\"Plot to compare atmospheric pollution with and without cofiring.\"\"\"\n baseline = system.emissions_baseline() * y\n cofiring = system.emissions_cofiring() * y\n\n def emis(segment, pollutant=\"CO2\", unit=Mt):\n return (\n array(\n [\n baseline.at[\"Total_\" + segment, pollutant],\n cofiring.at[\"Total_\" + segment, pollutant],\n ]\n )\n / unit\n )\n\n def emis3(segment):\n return concatenate(\n [\n emis(segment, \"SO2\", kt),\n # emis(segment, \"PM10\", kt),\n emis(segment, \"PM2.5\", kt),\n emis(segment, \"NOx\", kt),\n ]\n )\n\n def barhstack(axes, bottom, width, height=0.48, xlabel=None):\n \"\"\"Plot a horizontal stacked bar, width is a sequence of three horizontal dimensions.\"\"\"\n axes.barh(bottom, width[0], height, color=\"darkred\", edgecolor=\"none\")\n axes.barh(\n bottom, width[1], height, width[0], color=\"mistyrose\", edgecolor=\"none\"\n )\n axes.barh(\n bottom,\n width[2],\n height,\n width[0] + width[1],\n color=\"salmon\",\n edgecolor=\"none\",\n )\n axes.tick_params(axis=\"y\", length=0)\n axes.set_xlabel(xlabel)\n\n bot1 = [0, 0.5]\n bot2 = [2, 2.5, 3.5, 4, 5, 5.5]\n\n barhstack(\n axes,\n bot1,\n [emis(\"plant\"), emis(\"transport\"), emis(\"field\")],\n xlabel=\"CO2 emission (Mt/y)\",\n )\n\n barhstack(\n axes.twiny(),\n bot2,\n [emis3(\"plant\"), emis3(\"transport\"), emis3(\"field\")],\n xlabel=\"Air pollutant Emission (kt/y)\",\n )\n\n plt.yticks(\n concatenate((bot1, bot2)),\n (\n \"CO2 ex ante\",\n \"CO2 ex post\",\n \"SO2 ex ante\",\n \"SO2 ex post\",\n # \"PM10 ex ante\",\n # \"PM10 ex post\",\n \"PM2.5 ex ante\",\n \"PM2.5 ex post\",\n \"NOx ex ante\",\n \"NOx ex post\",\n ),\n )\n\n legend_plant = mpatches.Patch(color=\"darkred\", label=\"Plant\")\n legend_transport = mpatches.Patch(color=\"mistyrose\", label=\"Reseller\")\n legend_field = mpatches.Patch(color=\"salmon\", label=\"Farmers\")\n axes.legend(\n handles=[legend_plant, legend_transport, legend_field],\n bbox_to_anchor=(0.98, 0.8),\n prop={\"size\": 9},\n title=system.plant.name + \" Emissions\",\n frameon=False,\n )\n\n\n# noinspection PyTypeChecker\nFIGURE, AXESS = plt.subplots(nrows=1, ncols=2, figsize=[12, 6])\nplot_emissions(MongDuong1System, AXESS[0])\nplot_emissions(NinhBinhSystem, AXESS[1])\nFIGURE.tight_layout()\n\nplt.savefig(\"figure_emissions.pdf\")\n","sub_path":"manuscript1/figure/emissions.py","file_name":"emissions.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"255079223","text":"import os\nimport shutil\nimport unittest\n\nimport cv2\nimport numpy as np\nimport requests\n\nimport paddlehub as hub\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\nclass TestHubModule(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n img_url = 'https://unsplash.com/photos/pg_WCHWSdT8/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjYyNDM2ODI4&force=true&w=640'\n if not os.path.exists('tests'):\n os.makedirs('tests')\n response = requests.get(img_url)\n assert response.status_code == 200, 'Network Error.'\n with open('tests/test.jpg', 'wb') as f:\n f.write(response.content)\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n img = cv2.imread('tests/test.jpg')\n video = cv2.VideoWriter('tests/test.avi', fourcc, 20.0, tuple(img.shape[:2]))\n for i in range(40):\n video.write(img)\n video.release()\n cls.module = hub.Module(name=\"humanseg_server\")\n\n @classmethod\n def tearDownClass(cls) -> None:\n shutil.rmtree('tests')\n shutil.rmtree('inference')\n shutil.rmtree('humanseg_server_output')\n shutil.rmtree('humanseg_server_video_result')\n\n def test_segment1(self):\n results = self.module.segment(paths=['tests/test.jpg'], use_gpu=False, visualization=False)\n self.assertIsInstance(results[0]['data'], np.ndarray)\n\n def test_segment2(self):\n results = self.module.segment(images=[cv2.imread('tests/test.jpg')], use_gpu=False, visualization=False)\n self.assertIsInstance(results[0]['data'], np.ndarray)\n\n def test_segment3(self):\n results = self.module.segment(images=[cv2.imread('tests/test.jpg')], use_gpu=False, visualization=True)\n self.assertIsInstance(results[0]['data'], np.ndarray)\n\n def test_segment4(self):\n results = self.module.segment(images=[cv2.imread('tests/test.jpg')], use_gpu=True, visualization=False)\n self.assertIsInstance(results[0]['data'], np.ndarray)\n\n def test_segment5(self):\n self.assertRaises(AssertionError, self.module.segment, paths=['no.jpg'])\n\n def test_segment6(self):\n self.assertRaises(AttributeError, self.module.segment, images=['test.jpg'])\n\n def test_video_stream_segment1(self):\n img_matting, cur_gray, optflow_map = self.module.video_stream_segment(frame_org=cv2.imread('tests/test.jpg'),\n frame_id=1,\n prev_gray=None,\n prev_cfd=None,\n use_gpu=False)\n self.assertIsInstance(img_matting, np.ndarray)\n self.assertIsInstance(cur_gray, np.ndarray)\n self.assertIsInstance(optflow_map, np.ndarray)\n img_matting, cur_gray, optflow_map = self.module.video_stream_segment(frame_org=cv2.imread('tests/test.jpg'),\n frame_id=2,\n prev_gray=cur_gray,\n prev_cfd=optflow_map,\n use_gpu=False)\n self.assertIsInstance(img_matting, np.ndarray)\n self.assertIsInstance(cur_gray, np.ndarray)\n self.assertIsInstance(optflow_map, np.ndarray)\n\n def test_video_stream_segment2(self):\n img_matting, cur_gray, optflow_map = self.module.video_stream_segment(frame_org=cv2.imread('tests/test.jpg'),\n frame_id=1,\n prev_gray=None,\n prev_cfd=None,\n use_gpu=True)\n self.assertIsInstance(img_matting, np.ndarray)\n self.assertIsInstance(cur_gray, np.ndarray)\n self.assertIsInstance(optflow_map, np.ndarray)\n img_matting, cur_gray, optflow_map = self.module.video_stream_segment(frame_org=cv2.imread('tests/test.jpg'),\n frame_id=2,\n prev_gray=cur_gray,\n prev_cfd=optflow_map,\n use_gpu=True)\n self.assertIsInstance(img_matting, np.ndarray)\n self.assertIsInstance(cur_gray, np.ndarray)\n self.assertIsInstance(optflow_map, np.ndarray)\n\n def test_video_segment1(self):\n self.module.video_segment(video_path=\"tests/test.avi\", use_gpu=False)\n\n def test_save_inference_model(self):\n self.module.save_inference_model('./inference/model')\n\n self.assertTrue(os.path.exists('./inference/model.pdmodel'))\n self.assertTrue(os.path.exists('./inference/model.pdiparams'))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"modules/image/semantic_segmentation/humanseg_server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"291957049","text":"from src.models.db import FetchFilterParams\nfrom src.models.db import AggregateDatasetModel, AggregateModelData\n\nclass SummarizeDatasetRepo(object):\n def __init__(self):\n self.fetchFilterParams = FetchFilterParams()\n self.aggregateDatasetModel = AggregateDatasetModel()\n self.aggregateModel = AggregateModelData() \n\n\n def search(self):\n corpus_stats = self.fetchFilterParams.search()\n return corpus_stats\n\n def aggregate(self, search_data):\n corpus_stats = []\n if search_data[\"type\"] == \"model\":\n corpus_stats,count = self.aggregateModel.data_aggregator(search_data)\n else:\n corpus_stats,count = self.aggregateDatasetModel.data_aggregator(search_data)\n if not corpus_stats:\n return corpus_stats,0\n return corpus_stats, count\n\n def aggregate_models(self, search_data):\n corpus_stats,count = self.aggregateModel.data_aggregator(search_data)\n if not corpus_stats:\n return corpus_stats,0\n return corpus_stats","sub_path":"backend/metric/ulca-metric-api/src/repositories/summarize_dataset.py","file_name":"summarize_dataset.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"461934572","text":"from io import BytesIO\nfrom PIL import Image, ImageDraw, ImageFont\nimport requests\nimport math\nimport os\n\ndefault_bg = os.path.join(os.path.dirname(__file__), 'assets', 'card.png')\nonline = os.path.join(os.path.dirname(__file__), 'assets', 'online.png')\noffline = os.path.join(os.path.dirname(__file__), 'assets', 'offline.png')\nidle = os.path.join(os.path.dirname(__file__), 'assets', 'idle.png')\ndnd = os.path.join(os.path.dirname(__file__), 'assets', 'dnd.png')\nstreaming = os.path.join(os.path.dirname(__file__), 'assets', 'streaming.png')\nfont1 = os.path.join(os.path.dirname(__file__), 'assets', 'font.ttf')\nfont2 = os.path.join(os.path.dirname(__file__), 'assets', 'font2.ttf')\n\nclass DisrankGenerator:\n def __init__(self, bg_image:str=None, profile_image:str=None, level:int=1, current_xp:int=0, user_xp:int=20, next_xp:int=100, user_position:int=1, user_name:str='AliTheKing#9129', user_status:str='online'):\n self.user_name = user_name\n self.user_position = user_position\n self.level = level\n self.current_xp = current_xp\n self.user_xp = user_xp\n self.next_xp = next_xp\n \n if not bg_image:\n card = Image.open(default_bg).convert(\"RGBA\")\n else:\n bg_bytes = BytesIO(requests.get(bg_image).content)\n card = Image.open(bg_bytes).convert(\"RGBA\")\n\n width, height = card.size\n if width == 900 and height == 238:\n pass\n else:\n x1 = 0\n y1 = 0\n x2 = width\n nh = math.ceil(width * 0.264444)\n y2 = 0\n\n if nh < height:\n y1 = (height / 2) - 119\n y2 = nh + y1\n\n card = card.crop((x1, y1, x2, y2)).resize((900, 238))\n self.card = card\n \n profile_bytes = BytesIO(requests.get(profile_image).content)\n profile = Image.open(profile_bytes)\n profile = profile.convert('RGBA').resize((180, 180))\n self.profile = profile\n \n if user_status == 'online':\n status = Image.open(online)\n if user_status == 'offline':\n status = Image.open(offline)\n if user_status == 'idle':\n status = Image.open(idle)\n if user_status == 'streaming':\n status = Image.open(streaming)\n if user_status == 'dnd':\n status = Image.open(dnd)\n status = status.convert(\"RGBA\").resize((40,40))\n self.status = status\n \n # ======== Fonts to use =============\n self.font_normal = ImageFont.truetype(font1, 36)\n self.font_small = ImageFont.truetype(font1, 20)\n self.font_signa = ImageFont.truetype(font2, 25)\n\n # ======== Colors ========================\n self.WHITE = (189, 195, 199)\n self.DARK = (252, 179, 63)\n self.YELLOW = (255, 234, 167)\n \n\n def generate(self):\n profile_pic_holder = Image.new(\n \"RGBA\", self.card.size, (255, 255, 255, 0)\n ) # Is used for a blank image so that i can mask\n\n # Mask to crop image\n mask = Image.new(\"RGBA\", self.card.size, 0)\n mask_draw = ImageDraw.Draw(mask)\n mask_draw.ellipse(\n (29, 29, 209, 209), fill=(255, 25, 255, 255)\n ) # The part need to be cropped\n\n # Editing stuff here\n def get_str(xp):\n if xp < 1000:\n return str(xp)\n if xp >= 1000 and xp < 1000000:\n return str(round(xp / 1000, 1)) + \"K\"\n if xp > 1000000:\n return str(round(xp / 1000000, 1)) + \"M\"\n\n draw = ImageDraw.Draw(self.card)\n draw.text((245, 22), self.user_name, self.DARK, font=self.font_normal)\n draw.text((245, 98), f\"Rank #{self.user_position}\", self.DARK, font=self.font_small)\n draw.text((245, 123), f\"Level {self.level}\", self.DARK, font=self.font_small)\n draw.text(\n (245, 150),\n f\"Exp {get_str(self.user_xp)}/{get_str(self.next_xp)}\",\n self.DARK,\n font=self.font_small,\n )\n\n # Adding another blank layer for the progress bar\n # Because drawing on card dont make their background transparent\n blank = Image.new(\"RGBA\", self.card.size, (255, 255, 255, 0))\n blank_draw = ImageDraw.Draw(blank)\n blank_draw.rectangle(\n (245, 185, 750, 205), fill=(255, 255, 255, 0), outline=self.DARK\n )\n\n xpneed = self.next_xp - self.current_xp\n xphave = self.user_xp - self.current_xp\n\n current_percentage = (xphave / xpneed) * 100\n length_of_bar = (current_percentage * 4.9) + 248\n\n blank_draw.rectangle((248, 188, length_of_bar, 202), fill=self.DARK)\n blank_draw.ellipse((20, 20, 218, 218), fill=(255, 255, 255, 0), outline=self.DARK)\n\n profile_pic_holder.paste(self.profile, (29, 29, 209, 209))\n\n pre = Image.composite(profile_pic_holder, self.card, mask)\n pre = Image.alpha_composite(pre, blank)\n\n # Status badge\n # Another blank\n blank = Image.new(\"RGBA\", pre.size, (255, 255, 255, 0))\n blank.paste(self.status, (169, 169))\n\n final = Image.alpha_composite(pre, blank)\n final_bytes = BytesIO()\n final.save(final_bytes, 'png')\n final_bytes.seek(0)\n return final_bytes\n","sub_path":"disrank/thkc_disrank.py","file_name":"thkc_disrank.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"578146113","text":"import scr.FormatFunctions as Format\nimport scr.SamplePathClasses as PathCls\nimport scr.FigureSupport as Figs\nimport scr.StatisticalClasses as Stat\nimport Parameters as P\n\n\ndef print_outcomes(sim_output, strategy_name):\n \"\"\" prints the outcomes of a simulated cohort under steady state\n :param sim_output: output of a simulated cohort\n :param strategy_name: the name of the selected therapy\n \"\"\"\n\n # mean and confidence interval text of patient survival time\n total_reward_CI_text = Format.format_estimate_interval(\n estimate=sim_output.get_ave_reward(),\n interval=sim_output.get_CI_reward(alpha=P.ALPHA),\n deci=1)\n\n # print survival time statistics\n print(strategy_name)\n print(\" Estimate of average reward and {:.{prec}%} confidence interval:\".format(1 - P.ALPHA, prec=0),\n total_reward_CI_text)\n\n\ndef draw_survival_curves_and_histograms(sim_output_no_drug, sim_output_with_drug):\n \"\"\" draws the survival curves and the histograms of survival time\n :param sim_output_no_drug: output of a cohort simulated when drug is not available\n :param sim_output_with_drug: output of a cohort simulated when drug is available\n \"\"\"\n\n # get survival curves of both treatments\n survival_curves = [\n sim_output_no_drug.get_survival_curve(),\n sim_output_with_drug.get_survival_curve()\n ]\n\n # graph survival curve\n PathCls.graph_sample_paths(\n sample_paths=survival_curves,\n title='Survival curve',\n x_label='Simulation time step',\n y_label='Number of alive patients',\n legends=['No Drug', 'With Drug']\n )\n\n # histograms of survival times\n set_of_survival_times = [\n sim_output_no_drug.get_survival_times(),\n sim_output_with_drug.get_survival_times()\n ]\n\n # graph histograms\n Figs.graph_histograms(\n data_sets=set_of_survival_times,\n title='Histogram of patient survival time',\n x_label='Survival time',\n y_label='Counts',\n bin_width=1,\n legend=['No Drug', 'With Drug'],\n transparency=0.6\n )\n\n\ndef print_comparative_outcomes(sim_output_fair, sim_output_unfair):\n \"\"\" prints expected and percentage increase in survival time when drug is available\n :param sim_output_no_drug: output of a cohort simulated when drug is not available\n :param sim_output_with_drug: output of a cohort simulated when drug is available\n \"\"\"\n\n # increase in survival time\n increase = Stat.DifferenceStatIndp(\n name='Increase in survival time',\n x=sim_output_unfair.get_rewards(),\n y_ref=sim_output_fair.get_rewards()\n )\n # estimate and CI\n estimate_CI = Format.format_estimate_interval(\n estimate=increase.get_mean(),\n interval=increase.get_t_CI(alpha=P.ALPHA),\n deci=1\n )\n print(\"Average increase in reward and {:.{prec}%} confidence interval:\".format(1 - P.ALPHA, prec=0),\n estimate_CI)\n","sub_path":"SupportSteady.py","file_name":"SupportSteady.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"467987413","text":"#!usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nfrom tkinter import *\r\n\r\n\r\na_total_money=10000\r\nb_total_money=10000\r\n\r\nminimum_profit=100\r\n\r\nb_return_ratio_max=10\r\n\r\ndef calculate_results(a_return_ratio):\r\n\tresult=\"\"\r\n\tcount = 0\r\n\tfor i in np.arange(0.1, b_return_ratio_max, 0.02):\r\n\t\tfor a_money in range(100,a_total_money,100):\r\n\t\t\tfor b_money in range(100,b_total_money,100):\r\n\r\n\t\t\t\ta_return_ratio = float(a_return_ratio)\r\n\t\t\t\ta_money = float(a_money)\r\n\t\t\t\tb_money = float(b_money)\r\n\r\n\r\n\t\t\t\ta_win=(a_return_ratio*a_money)-(a_money+b_money)\r\n\t\t\t\tb_win=(b_money*i)-(a_money+b_money)\r\n\r\n\t\t\t\tif a_win>minimum_profit and b_win>minimum_profit :\r\n\r\n\t\t\t\t\tresult+=\"B最小对冲倍数==>>\"+str(i)\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tresult+=\"A_money==>>\"+str(a_money)\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tresult+=\"B_money==>>\"+str(b_money)\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tresult+=\"如果A胜利==>>\"+str(a_win)\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tresult+=\"如果B胜利==>>\"+str(b_win)\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tresult+=\"================\"\r\n\t\t\t\t\tresult+=\"\\n\"\r\n\t\t\t\t\tcount+=1\r\n\t\t\t\t\tif count>=20:\r\n\t\t\t\t\t\treturn result\r\n\treturn result\r\n\r\ndef on_submit(event=None):\r\n\tinput_text = input_box.get()\r\n\tresult_text = calculate_results(input_text)\r\n\tresult_textbox.delete(1.0, END) # 清空文本框内容\r\n\tresult_textbox.insert(END, result_text)\r\n\t# 聚焦\r\n\tinput_box.focus_set()\r\n\tinput_box.select_range(0, END)\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\twindow = Tk()\r\n\twindow.title(\"倍率计算器\")\r\n\r\n\tframe_with_margin = Frame(window, pady=10)\r\n\tframe_with_margin.pack()\r\n\r\n\tlabel = Label(frame_with_margin, text=\"输入A倍率:\")\r\n\tlabel.grid(row=0, column=0)\r\n\t# label.pack()\r\n\r\n\tinput_box = Entry(frame_with_margin)\r\n\tinput_box.grid(row=0, column=1)\r\n\tinput_box.focus_set() # 设置输入框为默认聚焦\r\n\r\n\tsubmit_button = Button(frame_with_margin, text=\"计算\", command=on_submit)\r\n\tsubmit_button.grid(row=0, column=2)\r\n\r\n\tinput_box.bind(\"\", on_submit)\r\n\r\n\r\n\tresult_frame = Frame(frame_with_margin, padx=8, pady=10) # 创建带有外边距的 Frame\r\n\tresult_frame.grid(row=1, columnspan=3)\r\n\r\n\tresult_textbox = Text(result_frame, height=45, width=54)\r\n\tresult_textbox.pack()\r\n\r\n\t# 获取屏幕的宽高\r\n\tscreen_width = window.winfo_screenwidth()\r\n\tscreen_height = window.winfo_screenheight()\r\n\r\n\t# 计算窗口位置使其居中\r\n\twindow_width = 400\r\n\twindow_height = 650\r\n\tx = (screen_width - window_width) // 2\r\n\ty = (screen_height - window_height) // 2\r\n\twindow.geometry(f\"{window_width}x{window_height}+{x}+{y}\")\r\n\r\n\twindow.mainloop()","sub_path":"球/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"500426847","text":"#######################################################################\n# Copyright (C) #\n# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# 2016 Kenta Shimada(hyperkentakun@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\n\n\n\nclass CliffWalking:\n # world height\n WORLD_HEIGHT = 4\n \n # world width\n WORLD_WIDTH = 12\n \n # probability for exploration\n EPSILON = 0.1\n \n # all possible actions\n ACTION_UP = 0\n ACTION_DOWN = 1\n ACTION_LEFT = 2\n ACTION_RIGHT = 3\n ACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]\n \n # initial state action pair values\n START = [3, 0]\n GOAL = [3, 11]\n \n def __init__(self,penalty = -100):\n self.penalty = penalty\n \n def step(self,state, action):\n i, j = state\n if action == self.ACTION_UP:\n next_state = [max(i - 1, 0), j]\n elif action == self.ACTION_LEFT:\n next_state = [i, max(j - 1, 0)]\n elif action == self.ACTION_RIGHT:\n next_state = [i, min(j + 1, self.WORLD_WIDTH - 1)]\n elif action == self.ACTION_DOWN:\n next_state = [min(i + 1, self.WORLD_HEIGHT - 1), j]\n else:\n assert False\n \n reward = -1\n \n if (action == self.ACTION_DOWN and i == 2 and 1 <= j <= 10) or (\n action == self.ACTION_RIGHT and state == self.START):\n reward = self.penalty\n next_state = self.START\n \n \n return next_state, reward\n \n # choose an action based on epsilon greedy algorithm\n def choose_action(self,state, q_value):\n if np.random.binomial(1, self.EPSILON) == 1:\n return np.random.choice(self.ACTIONS)\n else:\n ## values here means Q(s',a')\n values_ = q_value[state[0], state[1], :]\n return np.random.choice([action for action, value_ in enumerate(values_) if value_ == np.max(values_)])\n \n # an episode with Sarsa\n # @q_value: values for state action pair, will be updated\n # @expected: if True, will use expected Sarsa algorithm\n # @step_size: step size for updating\n # @return: total rewards within this episode\n def sarsa(self,q_value, expected=False, step_size = 0.9):\n state = self.START\n action = self.choose_action(state, q_value)\n rewards = 0.0\n while state != self.GOAL:\n next_state, reward = self.step(state, action)\n next_action = self.choose_action(next_state, q_value)\n rewards += reward\n if not expected:\n target = q_value[next_state[0], next_state[1], next_action]\n else:\n # calculate the expected value of new state\n target = 0.0\n q_next = q_value[next_state[0], next_state[1], :]\n best_actions = np.argwhere(q_next == np.max(q_next))\n for action_ in self.ACTIONS:\n if action_ in best_actions:\n target += ((1.0 - self.EPSILON) / len(best_actions) + self.EPSILON / len(self.ACTIONS)) * q_value[next_state[0], next_state[1], action_]\n else:\n target += self.EPSILON / len(self.ACTIONS) * q_value[next_state[0], next_state[1], action_]\n target *= 0.7\n q_value[state[0], state[1], action] += step_size * (\n reward + target - q_value[state[0], state[1], action])\n state = next_state\n action = next_action\n return rewards\n \n # an episode with Q-Learning\n # @q_value: values for state action pair, will be updated\n # @step_size: step size for updating\n # @return: total rewards within this episode\n def q_learning(self,q_value,step_size = 0.9):\n state = self.START\n rewards = 0.0\n while state != self.GOAL:\n action = self.choose_action(state, q_value)\n next_state, reward = self.step(state, action)\n rewards += reward\n # Q-Learning update\n q_value[state[0], state[1], action] += step_size * (\n reward + step_size * np.max(q_value[next_state[0], next_state[1], :]) -\n q_value[state[0], state[1], action])\n state = next_state\n return rewards\n \n \n \n \n \n def double_q_learning(self,q_value_1,q_value_2, step_size=0.9):\n state = self.START\n rewards = 0.0\n while state != self.GOAL:\n action = self.choose_action(state, np.add(q_value_1,q_value_2))\n next_state, reward = self.step(state, action)\n rewards += reward\n if (np.random.binomial(1, 0.5) == 1):\n actionFromQ1 = np.argmax(q_value_1[next_state[0],next_state[1],:])\n q_value_1[state[0], state[1], action] += step_size * (\n reward + (q_value_2[next_state[0], next_state[1], actionFromQ1]) -\n q_value_1[state[0], state[1], action])\n else:\n actionFromQ2 = np.argmax(q_value_2[next_state[0],next_state[1],:])\n q_value_2[state[0], state[1], action] += step_size * (\n reward + (q_value_1[next_state[0], next_state[1], actionFromQ2]) -\n q_value_2[state[0], state[1], action])\n state = next_state\n return rewards\n \n \n # print optimal policy\n def print_optimal_policy(self,q_value):\n optimal_policy = []\n for i in range(0, self.WORLD_HEIGHT):\n optimal_policy.append([])\n for j in range(0, self.WORLD_WIDTH):\n if [i, j] == self.GOAL:\n optimal_policy[-1].append('G')\n continue\n bestAction = np.argmax(q_value[i, j, :])\n if bestAction == self.ACTION_UP:\n optimal_policy[-1].append('U')\n elif bestAction == self.ACTION_DOWN:\n optimal_policy[-1].append('D')\n elif bestAction == self.ACTION_LEFT:\n optimal_policy[-1].append('L')\n elif bestAction == self.ACTION_RIGHT:\n optimal_policy[-1].append('R')\n for row in optimal_policy:\n print(row)\n \n\n def TestSarsa(self,episodes = 10000,stepSize = 0.5):\n # episodes of each run\n rewards_sarsa = np.zeros(episodes)\n q_sarsa = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, 4))\n for i in range(0, episodes):\n rewards_sarsa[i] += self.sarsa(q_sarsa,step_size = stepSize)\n \n # averaging over independt runs\n# rewards_sarsa /= runs \n print('Sarsa Optimal Policy:')\n self.print_optimal_policy(q_sarsa)\n \n \n def TestExpectedSarsa(self,episodes = 5000,stepSize = 0.5):\n # episodes of each run\n rewards_sarsa = np.zeros(episodes)\n q_sarsa = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, 4))\n for i in range(0, episodes):\n rewards_sarsa[i] += self.sarsa(q_sarsa,expected = True,step_size = stepSize)\n \n # averaging over independt runs\n# rewards_sarsa /= runs \n print('Expected Sarsa Optimal Policy:')\n self.print_optimal_policy(q_sarsa)\n \n def TestQLearning(self,episodes = 5000,stepSize = 0.9):\n rewards_q_learning = np.zeros(episodes)\n q_q_learning = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, 4))\n for i in range(0, episodes):\n rewards_q_learning[i] += self.q_learning(q_q_learning,step_size = stepSize)\n# rewards_q_learning /= runs\n \n print('Q-Learning Optimal Policy:')\n self.print_optimal_policy(q_q_learning)\n \n \n \n def TestDoubleQLearning(self,episodes = 30000,stepSize = 0.9):\n rewards_q_double_learning = np.zeros(episodes)\n q_double_1 = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, 4))\n q_double_2 = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, 4))\n for i in range(0, episodes):\n rewards_q_double_learning += self.double_q_learning(q_double_1,q_double_2,step_size = stepSize)\n# rewards_q_double_learning /= runs\n \n print('Double-Q-Learning Optimal Policy:')\n self.print_optimal_policy(np.add(q_double_1,q_double_2))\n\n\nif __name__ == '__main__':\n# figure_6_6()\n game = CliffWalking()\n game.TestSarsa()\n game.TestExpectedSarsa()\n","sub_path":"Cliff-Walking/cliff_walking.py","file_name":"cliff_walking.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"179601924","text":"import pygame\r\nimport random\r\nimport math\r\nimport numpy as np\r\nfrom collections import namedtuple\r\n\r\nSize = namedtuple('Size', 'w h')\r\n\r\n# Window\r\nWINDOW = Size(400, 1000)\r\nWINDOW_RECT = pygame.Rect(0, 0, WINDOW.w, WINDOW.h)\r\nINDENT = 15\r\n\r\n# Game\r\nSPEED = 200\r\nBLACK = (0, 0, 0)\r\n\r\n\r\nclass CapsuleLanderMP:\r\n __size = Size(70, 70)\r\n __engine_power = 1\r\n __side_engine_power = 2\r\n __gravity = 2\r\n\r\n def __init__(self):\r\n # Render stuff\r\n self.display = None\r\n self.clock = None\r\n self.font = None\r\n self.is_init = False\r\n\r\n # Capsule\r\n self.img = None\r\n self.x = random.randint(self.__size.w + INDENT, WINDOW.w - self.__size.w - INDENT)\r\n self.y = self.__size.h + INDENT\r\n self.angle = random.randint(0, 359)\r\n self.distance = 0.0\r\n self.falling_speed = 0\r\n self.state = np.array([self.x,\r\n self.y,\r\n self.angle,\r\n self.distance,\r\n self.falling_speed])\r\n self.capsule_rect = pygame.Rect(self.x, self.y, 70, 70)\r\n self.capsule_rect.center = (self.x, self.y)\r\n\r\n # World\r\n self.surface_rect = pygame.Rect(0, WINDOW.h - 100, WINDOW.w, WINDOW.h)\r\n self.action = np.zeros(4)\r\n\r\n def step(self, action):\r\n \"\"\"Play game step\"\"\"\r\n assert 0 <= action < self.action.size\r\n\r\n # Update capsule rect\r\n self.capsule_rect.center = (self.x, self.y)\r\n\r\n # Apply action\r\n prev_angle = self.angle\r\n prev_y = self.y\r\n if action == 0:\r\n self.__set_angle(self.__side_engine_power)\r\n self.falling_speed = self.__gravity\r\n elif action == 1:\r\n self.x, self.y = calculate_new_pos(self.x, self.y, self.__engine_power, self.angle)\r\n self.falling_speed = self.__gravity - (prev_y - self.y)\r\n elif action == 2:\r\n self.__set_angle(-self.__side_engine_power)\r\n self.falling_speed = self.__gravity\r\n else:\r\n self.falling_speed = self.__gravity\r\n\r\n # Calculate y position and distance to the ground\r\n self.y += self.__gravity\r\n self.distance = self.surface_rect.y - self.y\r\n\r\n # Calculate reward\r\n if self.distance < 200: # with distance 200 capsule will have time to land correctly\r\n reward = self.__get_reward(prev_angle)\r\n reward += 2 if self.__get_score() else -2\r\n else:\r\n reward = -self.__get_reward(prev_angle)\r\n if self.falling_speed > self.__gravity:\r\n reward += self.falling_speed - self.__gravity\r\n\r\n # Check if is terminal\r\n is_terminal = False\r\n if self.__is_colliding(self.surface_rect):\r\n is_terminal = True\r\n score = self.__get_score()\r\n reward = score if score else -100\r\n if self.__is_out_of_bounds():\r\n is_terminal = True\r\n reward = -100\r\n\r\n return self.__get_norm_state(), reward, is_terminal, {}\r\n\r\n def __set_angle(self, val):\r\n \"\"\"Set angle and save values from 0 to 359\"\"\"\r\n self.angle += val\r\n if self.angle > 359:\r\n self.angle = -1 + self.angle - 359\r\n if self.angle < 0:\r\n self.angle = 359 + self.angle\r\n\r\n def __is_colliding(self, rect):\r\n return self.capsule_rect.colliderect(rect)\r\n\r\n def __is_out_of_bounds(self):\r\n return not self.capsule_rect.colliderect(WINDOW_RECT)\r\n\r\n def __get_score(self):\r\n \"\"\"Get score based on capsule angle where 90 is North\"\"\"\r\n if 40 <= self.angle <= 140:\r\n if self.angle <= 90:\r\n return 100 * (self.angle / 90)\r\n else:\r\n return 100 * (1 - (self.angle - 90) / 90)\r\n else:\r\n return 0\r\n\r\n def __get_reward(self, previous_angle):\r\n \"\"\"Get reward based on capsule angle where 90 is North\"\"\"\r\n if self.angle != previous_angle: # check if capsule got rotated\r\n if 90 < self.angle <= 270: # if capsule nose points left\r\n return 1 if previous_angle > self.angle else -1 # we will reward going clockwise\r\n else: # if our nose points right\r\n if abs(self.angle - previous_angle) > self.__side_engine_power: # step through 359 0 threshold\r\n return 1 if previous_angle > self.angle else -1 # reward going counterclockwise\r\n return 1 if previous_angle < self.angle else -1 # reward going counterclockwise\r\n return 0\r\n\r\n def __get_norm_state(self):\r\n \"\"\"Normalize state values in range -1 to 1\"\"\"\r\n range = [-1, 1]\r\n n_state = [np.interp(self.x, [0 - self.__size.w, WINDOW.w + self.__size.w], range),\r\n np.interp(self.y, [0, self.surface_rect.y], range),\r\n np.interp(self.angle, [0, 359], range),\r\n np.interp(self.distance, [self.__size.h / 2, self.surface_rect.y], range),\r\n np.interp(self.falling_speed, [self.__gravity - self.__engine_power,\r\n self.__gravity + self.__engine_power], range)]\r\n return np.array(n_state, dtype=float)\r\n\r\n def get_random_action(self):\r\n return random.randint(0, self.action.size - 1)\r\n\r\n def reset(self):\r\n \"\"\"Reset the game\"\"\"\r\n self.x = random.randint(self.__size.w + INDENT, WINDOW.w - self.__size.w - INDENT)\r\n self.y = self.__size.h + INDENT\r\n self.angle = random.randint(0, 359)\r\n self.distance = 0.0\r\n self.falling_speed = 0.0\r\n self.state = np.array([self.x,\r\n self.y,\r\n self.angle,\r\n self.distance,\r\n self.falling_speed])\r\n self.capsule_rect.center = (self.x, self.y)\r\n\r\n return self.step(3)[0]\r\n\r\n def init_pygame(self):\r\n pygame.init()\r\n self.display = pygame.display.set_mode(WINDOW)\r\n pygame.display.set_caption('Space Landing')\r\n self.clock = pygame.time.Clock()\r\n self.font = pygame.font.Font('arial.ttf', 25)\r\n\r\n self.img = pygame.transform.scale(pygame.image.load('capsule.png'), self.__size)\r\n\r\n self.is_init = True\r\n\r\n def render(self):\r\n if not self.is_init:\r\n self.init_pygame()\r\n\r\n # For closing window without crushing\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n\r\n # Background\r\n self.display.fill((255, 255, 255))\r\n\r\n # Surface\r\n pygame.draw.rect(self.display, BLACK, self.surface_rect)\r\n\r\n # Capsule\r\n r_img, r_rect = rotate(self.img,\r\n self.angle - 90,\r\n (self.x, self.y))\r\n self.display.blit(r_img, r_rect)\r\n\r\n # Statistics\r\n self.__draw_statistics()\r\n\r\n # Pygame\r\n pygame.display.flip()\r\n self.clock.tick(SPEED)\r\n\r\n def close(self):\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n def __draw_statistics(self):\r\n font_h = self.font.size('S')[1] # font height\r\n self.display.blit(self.font.render('Angle: {:.2f}'.format(self.angle), True, BLACK),\r\n (0, 0 + font_h * 0))\r\n self.display.blit(self.font.render('Distance: {:.2f}'.format(self.distance), True, BLACK),\r\n (0, 0 + font_h * 1))\r\n self.display.blit(self.font.render('Falling Speed: {:.2f}'.format(self.falling_speed), True, BLACK),\r\n (0, 0 + font_h * 2))\r\n\r\n\r\n# ------------------------------------------------------------------\r\n# Helpers\r\n# ------------------------------------------------------------------\r\ndef rotate(surface, angle, pos):\r\n r_surface = pygame.transform.rotozoom(surface, angle, 1)\r\n r_rect = r_surface.get_rect(center=pos)\r\n return r_surface, r_rect\r\n\r\n\r\n# ------------------------------------------------------------------\r\ndef calculate_new_pos(x, y, speed, angle):\r\n angle_in_radians = angle * math.pi / 180\r\n new_x = x + speed * math.cos(angle_in_radians)\r\n new_y = y - speed * math.sin(angle_in_radians)\r\n return new_x, new_y\r\n","sub_path":"2 DuelingDDQN A3C/environment_mp.py","file_name":"environment_mp.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"217453694","text":"# Definition for a binary tree Link.\n# class TreeLink(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeLink\n :type sum: int\n :rtype: List[List[int]]\n \"\"\"\n self.results = []\n self.recurse(root, sum, [])\n return self.results\n \n def recurse(self, Link, sum, partial):\n if not Link:\n return\n if sum==Link.val and Link.left is None and Link.right is None:\n self.results.append(partial+[Link.val])\n else:\n if Link.left:\n self.recurse(Link.left, sum-Link.val, partial+[Link.val])\n if Link.right:\n self.recurse(Link.right, sum-Link.val, partial+[Link.val])\n","sub_path":"leetcode-solutions-master/113_path_sum_ii/path_sum_ii.py","file_name":"path_sum_ii.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"529258846","text":"#! C:\\Python36\\python.exe\n# coding:utf-8\n'''\nabout what\n'''\nimport threading\nfrom socket import socket\n\nfrom os import system\n\nimport sys\n\nimport time\n\nhost = \"127.0.0.1\"\nport = 1234\n\n# 发送消息线程执行此方法\ndef sendOutput():\n\n # 源源不断地发送\n while True:\n try:\n msg = input(\"请输入:\")\n clientSocket.send(msg.encode(\"utf-8\"))\n print(\"客户端:%s\" % (msg))\n\n # 如果服务端已断线,则结束发送线程\n except ConnectionResetError:\n print(\"服务端已断开连\")\n break\n\n# 接收消息线程执行此方法\ndef recvInput():\n\n # 源源不断地接收消息\n while True:\n try:\n reply = clientSocket.recv(1024).decode(\"utf-8\")\n print(\"服务端:%s\" % (reply))\n\n # 如果服务端已断线,则结束消息接收线程\n except ConnectionResetError:\n print(\"服务端已断开连\")\n break\n\n\nif __name__ == \"__main__\":\n\n # 创建socket对象、连接服务端\n clientSocket = socket()\n clientSocket.connect((host,port))\n\n # 收发消息都在独立的线程中进行\n threading.Thread(target=sendOutput).start()\n threading.Thread(target=recvInput).start()\n\n # clientSocket.close()\n print(\"main over\")\n time.sleep(10)","sub_path":"day1/1025/demos/W5/day4/00HmClient.py","file_name":"00HmClient.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"347577512","text":"def do_math(left_term, operator, right_term):\n\tif operator == '*':\n\t\treturn left_term * right_term\n\telif operator == '+':\n\t\treturn left_term + right_term\n\telse:\n\t\traise Exception('Unknown operator: ' + operator)\n\t\ndef is_operator(c, operator_priorities):\n\tfor operators in operator_priorities:\n\t\tif c in operators:\n\t\t\treturn True\n\t\t\t\n\treturn False\n\t\ndef evaluate_expression(expression, operator_priorities):\n\tin_parenthesis = 0\n\tsubexpression = None\n\t\n\tterms = []\n\tfor c in expression:\n\t\tterm = None\n\t\t\n\t\tif c == ' ':\n\t\t\tcontinue\t\t\n\t\telif c == ')':\n\t\t\tin_parenthesis -= 1\n\t\t\tif in_parenthesis == 0:\n\t\t\t\tterm = evaluate_expression(' '.join(subexpression), operator_priorities)\n\t\t\telse:\n\t\t\t\tsubexpression.append(c)\n\t\telif c == '(':\n\t\t\tin_parenthesis += 1\n\t\t\tif in_parenthesis == 1:\n\t\t\t\tsubexpression = []\n\t\t\telse:\n\t\t\t\tsubexpression.append(c)\n\t\telif in_parenthesis != 0:\n\t\t\tsubexpression.append(c)\n\t\telif is_operator(c, operator_priorities):\n\t\t\tterm = c\n\t\telif c.isdigit():\n\t\t\tterm = int(c)\n\t\telse:\n\t\t\traise Exception(\"Unknown symbol in expression: \" + c)\n\t\t\t\n\t\tif term != None:\n\t\t\tterms.append(term)\n\t\n\tfor operators in operator_priorities:\n\t\tleft_term = None\n\t\tright_term = None\n\t\toperator = ''\n\t\t\n\t\tnew_terms = []\n\t\tfor i, term in enumerate(terms):\n\t\t\tif is_operator(term, operator_priorities):\n\t\t\t\toperator = term\n\t\t\telse:\n\t\t\t\tif left_term == None:\n\t\t\t\t\tleft_term = term\n\t\t\t\telse:\n\t\t\t\t\tright_term = term\n\t\t\t\t\t\n\t\t\tif left_term != None and right_term != None:\n\t\t\t\tif operator in operators:\n\t\t\t\t\tleft_term = do_math(left_term, operator, right_term)\n\t\t\t\t\tright_term = None\n\t\t\t\telse:\n\t\t\t\t\tnew_terms.append(left_term)\n\t\t\t\t\tnew_terms.append(operator)\n\t\t\t\t\t\n\t\t\t\t\tleft_term = right_term\n\t\t\t\t\tright_term = None\n\t\t\t\t\t\n\t\tif left_term != None:\n\t\t\tnew_terms.append(left_term)\n\t\t\t\n\t\tterms = new_terms\n\t\t\t\n\treturn terms[0]\t\t\t\n\t\nsum = 0\nwith open('input.txt') as fd:\n\tlines = fd.readlines()\n\t\n\tfor line in lines:\n\t\tsum += evaluate_expression(line.strip(), [['+', '*']])\n\t\t\nprint(sum)\n","sub_path":"day_18/advent_of_code_day_18_1.py","file_name":"advent_of_code_day_18_1.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"564917903","text":"\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\nfrom collections import defaultdict\n\nfrom sklearn.decomposition import PCA\n\nimport pickle as pk\nfrom datetime import datetime\n\nfrom skimage.io import imread\n\nfrom PIL import Image, ImageDraw \n\nfrom time import time\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom utils.COCO_Utils.COCO_like_dataset import CocoLikeDataset \n\nfrom query_finder_class import * \n\nimport json\n\n\n\nif __name__ == '__main__' :\n\n # GPU OPTIONS\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)\n\n parser = argparse.ArgumentParser()\n #parser.add_argument('-dataset_name', help='dataset name', type=str, choices=['DocExplore', 'flickrlogos_47'], default='flickrlogos_47')\n #parser.add_argument('-coco_images', help='image directory in coco format', type=str, default = '/mnt/BE6CA2E26CA294A5/Datasets/flickrlogos_47_COCO/images/train')\n #parser.add_argument('-annotation_json', help='image directory in coco format', type=str, default = '/mnt/BE6CA2E26CA294A5/Datasets/flickrlogos_47_COCO/annotations/instances_train.json')\n #parser.add_argument('-query_path', help='path to queries', type=str, default = '/mnt/BE6CA2E26CA294A5/Datasets/flickrlogos_47_COCO/images/queries_train/')\n parser.add_argument('-query_class', help='class of the query', type=str, default = 'adidas_symbol')\n parser.add_argument('-query_instance', help = 'filename of the query', type=str, default = 'random')\n #parser.add_argument('-feat_savedir', help='directory of features database', type=str, default='/home/jeancherubini/Documents/feature_maps')\n parser.add_argument('-principal_components', help='amount of components kept (depth of feature vectors)', type=int, default=64)\n parser.add_argument('-model', help='model used for the convolutional features', type=str, choices=['resnet', 'VGG16'], default='VGG16') \n parser.add_argument('-layer', help='resnet layer(s) used for extraction, they can be:\\n for VGG: {0}\\n for resnet:{1}\\n For multiple layers, a semicolon \",\" can be used to separate '.format(\n 'conv1_relu, conv2_block3_out, conv3_block4_out, conv4_block6_out, conv5_block3_out',\n 'block1_conv2, block2_conv2, block3_conv3, block4_conv3, block5_conv3'), type=str, default='block3_conv3') \n parser.add_argument('-p', help='max points collected from each heatmap', type=int, default=15)\n parser.add_argument('-cfg', help='config file with paths', type=str)\n parser.add_argument('-all', help='search all dataset queries or only one of them per class', type=int, default=0)\n\n \n params = parser.parse_args()\n\n #Complete argswith routes from config file\n with open(params.cfg) as json_data_file:\n cfg_data = json.load(json_data_file)\n \n params.dataset_name = cfg_data['dataset_name']\n params.coco_images = cfg_data['coco_images']\n params.annotation_json = cfg_data['annotation_json'] \n params.query_path = cfg_data['query_path']\n params.feat_savedir = cfg_data['feat_savedir']\n\n\n finder = query_finder()\n\n if not os.path.isdir(params.feat_savedir + '/' + params.dataset_name + '/' + params.model + '_transformations/' + str(params.principal_components) + '/detections'):\n os.makedirs(params.feat_savedir +'/' + params.dataset_name + '/' + params.model + '_transformations/' + str(params.principal_components) + '/detections')\n \n if not os.path.isdir(params.feat_savedir + '/' + params.dataset_name + '/' + params.model + '_' + params.layer + '_transformations/' + str(params.principal_components) + '/detections'):\n os.makedirs(params.feat_savedir +'/' + params.dataset_name + '/' + params.model + '_' + params.layer + '_transformations/' + str(params.principal_components) + '/detections')\n\n \n time_file_transformations = open('{0}/{1}/{2}/{3}/detections/time.txt'.format(params.feat_savedir, params.dataset_name, params.model + '_transformations', params.principal_components),'w')\n time_file_transformations.close()\n\n for query_class in os.listdir(params.query_path):\n for query_instance in sorted(os.listdir(params.query_path + '/' + query_class)):\n query = finder.get_query(params, query_class, query_instance)\n layer_to_use = finder.select_scale_query(params, query)\n params.layer = layer_to_use\n if not os.path.isdir(params.feat_savedir + '/' + params.dataset_name + '/' + params.model + '_' + params.layer + '_transformations/' + str(params.principal_components) + '/detections'):\n os.makedirs(params.feat_savedir +'/' + params.dataset_name + '/' + params.model + '_' + params.layer + '_transformations/' + str(params.principal_components) + '/detections')\n print('layer_to_use: ', layer_to_use)\n queries_transformated = finder.get_query_transformations(query)\n finder.search_query_transformations(params, query_class, query_instance, queries_transformated)\n #get detection results to resume in one folder\n results_query = open('{0}/{1}/{2}_transformations/{3}/detections/{4}/{5}.txt'.format(params.feat_savedir, params.dataset_name, params.model + '_' + params.layer, params.principal_components, query_class, query_instance.replace('.png','').replace('.jpg','')),'r')\n \n \n\n if not os.path.isdir(params.feat_savedir + '/' + params.dataset_name + '/' + params.model + '_transformations/' + str(params.principal_components) + '/detections/'+query_class):\n os.makedirs(params.feat_savedir + '/' + params.dataset_name + '/' + params.model + '_transformations/' + str(params.principal_components) + '/detections/'+query_class)\n\n results_transformations = open('{0}/{1}/{2}/{3}/detections/{4}/{5}.txt'.format(params.feat_savedir, params.dataset_name, params.model + '_transformations', params.principal_components, query_class, query_instance.replace('.png','').replace('.jpg','')),'w')\n for line in results_query.readlines():\n results_transformations.write(line)\n\n #Query times\n times_file = open('{0}/{1}/{2}_transformations/{3}/detections/time.txt'.format(params.feat_savedir, params.dataset_name, params.model + '_' + params.layer, params.principal_components),'r')\n time_file_transformations = open('{0}/{1}/{2}/{3}/detections/time.txt'.format(params.feat_savedir, params.dataset_name, params.model + '_transformations', params.principal_components),'a')\n\n\n for line in times_file.readlines():\n instance = line.split('\\t')[0]\n if instance == query_instance:\n time_file_transformations.write(params.layer + '\\t' + line)\n\n\n time_file_transformations.close()\n\n if not params.all:\n break\n \n\n","sub_path":"search_all_dataset_transformations.py","file_name":"search_all_dataset_transformations.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464706716","text":"\r\n\r\nclass Category:\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self.amount = 0\r\n self.ledger = []\r\n\r\n def deposit(self, amount, description=\"\"):\r\n self.amount += amount\r\n self.ledger.append({\"amount\": amount, \"description\": description})\r\n\r\n def withdraw(self, amount, description=\"\"):\r\n if self.check_funds(amount):\r\n self.amount -= amount\r\n self.ledger.append({\"amount\": amount * -1, \"description\": description})\r\n return True\r\n return False\r\n\r\n def get_balance(self):\r\n return self.amount\r\n\r\n def check_funds(self, amount):\r\n if self.get_balance() >= amount:\r\n return True\r\n return False\r\n\r\n\r\n def transfer(self, amount, category):\r\n if self.check_funds(amount):\r\n self.withdraw(amount, \"Transfer to \" + category.name)\r\n category.deposit(amount, \"Transfer from \" + self.name)\r\n return True\r\n return False\r\n\r\n def __str__(self):\r\n title = self.name.center(30, \"*\") + \"\\n\"\r\n item = ''\r\n i = 1\r\n for i in range (len(self.ledger)):\r\n item += '{:23.23}'.format(self.ledger[i][\"description\"]) + '{:>7.2f}'.format(self.ledger[i][\"amount\"]) + \"\\n\"\r\n\r\n total = self.get_balance()\r\n output = title + item + \"Total: \" + str(total)\r\n return output\r\n\r\n def get_withdrawls(self):\r\n total = 0\r\n for item in self.ledger:\r\n if item[\"amount\"] < 0:\r\n total += item[\"amount\"]\r\n return total\r\n\r\n\r\ndef truncate(n):\r\n multiplier = 10\r\n return int(n * multiplier) / multiplier\r\n\r\ndef getTotals(categories):\r\n total = 0\r\n breakdown = []\r\n for category in categories:\r\n total += category.get_withdrawls()\r\n breakdown.append(category.get_withdrawls())\r\n rounded = list(map(lambda x: truncate(x/total), breakdown))\r\n return rounded\r\n\r\ndef create_spend_chart(categories):\r\n \"\"\"Create spend chart that takes a list of categories as an argument. It should return\r\n a string that is a bar chart.\"\"\"\r\n title = \"Percentage spent by category\\n\"\r\n i = 100\r\n totals = getTotals(categories)\r\n while i >= 0:\r\n cat_spaces = \" \"\r\n for total in totals:\r\n if total * 100 >= i:\r\n cat_spaces += \"o \"\r\n else:\r\n cat_spaces += \" \"\r\n title += str(i).rjust(3) + \"|\" + cat_spaces + (\"\\n\")\r\n i -= 10\r\n\r\n dashes = \"-\" + \"---\"*len(categories)\r\n names = []\r\n x_axis = \"\"\r\n for category in categories:\r\n names.append(category.name)\r\n\r\n maxi = max(names, key=len)\r\n\r\n for x in range(len(maxi)):\r\n nameStr = ' '*5\r\n for name in names:\r\n if x >= len(name):\r\n nameStr += \" \"\r\n else:\r\n nameStr += name[x] + \" \"\r\n if(x != len(maxi) - 1):\r\n nameStr += '\\n'\r\n\r\n x_axis += nameStr\r\n\r\n title += dashes.rjust(len(dashes)+4) + \"\\n\" + x_axis\r\n return title\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"118840239","text":"#!/usr/local/bin/python2.7\n# encoding: utf-8\n\nimport sys\nfrom twisted.python.util import println\nfrom org.mbs3.pyrax.devops import Challenge6, Challenge8\n\ndef main(argv=None): # IGNORE:C0111\n '''Command line options.'''\n\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv)\n \n for a in argv:\n println(a)\n\n if(len(argv) <= 1):\n println(\"No arguments, exiting\")\n elif(argv[1] == \"challenge6\"):\n c = Challenge6.Challenge6()\n c.challenge(argv[2:])\n elif(argv[1] == \"challenge8\"):\n c = Challenge8.Challenge8()\n c.challenge(argv[2:])\n else:\n println(\"I don't recognize your argument %s \" % argv[0])\n \nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"python-pyrax/src/Console.py","file_name":"Console.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"389492393","text":"from django.db import models\n\n# Create your models here.\nfrom django.conf import settings\nfrom django.utils.timezone import now\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass OAuthUser(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Пользователь', blank=True, null=True,\n on_delete=models.CASCADE)\n openid = models.CharField(max_length=50)\n nikename = models.CharField(max_length=50, verbose_name='Ник')\n token = models.CharField(max_length=150, null=True, blank=True)\n picture = models.CharField(max_length=350, blank=True, null=True)\n type = models.CharField(blank=False, null=False, max_length=50)\n email = models.CharField(max_length=50, null=True, blank=True)\n matedata = models.TextField(null=True, blank=True)\n created_time = models.DateTimeField('Время создания', default=now)\n last_mod_time = models.DateTimeField('Время редактирования', default=now)\n\n def __str__(self):\n return self.nikename\n\n def get_info(self):\n return \"author: %s, openid: %s, nikename: %s, token: %s, picture: %s, type: %s, email: %s, matedata: %s\" % (self.author, self.openid,\n self.nikename, self.token, self.picture, self.type, self.email, self.matedata)\n\n class Meta:\n verbose_name = 'oauth user'\n verbose_name_plural = verbose_name\n ordering = ['-created_time']\n\n\nclass OAuthConfig(models.Model):\n TYPE = (\n ('weibo', 'Weibo'),\n ('google', 'Google'),\n ('github', 'GitHub'),\n ('vk', 'VK'),\n ('facebook', 'FaceBook'),\n ('qq', 'QQ'),\n )\n type = models.CharField('Тип', max_length=10, choices=TYPE, default='a')\n appkey = models.CharField(max_length=200, verbose_name='AppKey')\n appsecret = models.CharField(max_length=200, verbose_name='AppSecret')\n callback_url = models.CharField(max_length=200, verbose_name='Обратный адрес', blank=False, default='')\n is_enabled = models.BooleanField('Включен', default=True, blank=False, null=False)\n created_time = models.DateTimeField('Время создания', default=now)\n last_mod_time = models.DateTimeField('Время редактирования', default=now)\n\n def clean(self):\n if OAuthConfig.objects.filter(type=self.type).exclude(id=self.id).count():\n raise ValidationError(_(self.type + 'Уже существует'))\n\n def __str__(self):\n return self.type\n\n class Meta:\n verbose_name = 'oauth config'\n verbose_name_plural = verbose_name\n ordering = ['-created_time']\n","sub_path":"oauth/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"232100636","text":"import sys\nfrom collections import deque\n\ndef input_data():\n readl = sys.stdin.readline\n S, E = map(int, readl().split())\n return S, E\n\n\n\n# 입력받는 부분\nS,E = input_data()\n\n\n## 소수면 1 아니면 0\ndef is_prime(n):\n for i in range(2,n):\n if n%i==0:\n\n return 0\n\n return 1\n\n\ndef is_prime2(n):\n pass\n\n\n\n\n## 한자리수 차이나면 1 아니면 0\ndef digit(a,b):\n a = str(a)\n b = str(b)\n count = 0\n for i in range(4):\n if a[i]!=b[i]:\n count+=1\n if count>=2 : return 0\n return 1\n\n\nprime=[]\n\nfor x in range(1000,10000):\n if is_prime(x)==1:\n prime.append(x)\n\n\nchk = [False]*10000\n\n\ndef bfs():\n q = deque()\n\n q.append((S,0))\n\n chk[S]=True\n\n while q:\n n,d= q.popleft()\n\n for x in prime:\n\n ## 방문처리\n if chk[x]==True : continue\n ## 자리수 차이\n if digit(x,n)==0 : continue\n ## 소수처리\n if is_prime(x)==0: continue\n ## 종료처리\n if x == E: return d+1\n chk[x]=True\n q.append((x,d+1))\n\n return -1\n\n\nprint(bfs())\n\n\n","sub_path":"problems/소수와 함꼐하는 여행.py","file_name":"소수와 함꼐하는 여행.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"652129406","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-01-03 18:48:21\n# @Author : Ying Sun\n# @Link : Ying.example.com\n# @Version : 0.1\n\nimport commands\n\n\ndef monitor(first_invoke=1):\n monitor_dic = {\n 'SwapUsage': 'percentage',\n 'MemUsage': 'percentage',\n }\n\n shell_command = \"grep 'MemTotal\\|MemFree\\|Buffers\\|^Cached\\|SwapTotal\\|SwapFree' /proc/meminfo\"\n status,result = commands.getstatusoutput(shell_command)\n if status != 0:\n value_dic = {'status': status}\n else:\n value_dic = {'status': status}\n for i in result.split('kB\\n'):\n key = i.split()[0].strip(':')\n value = i.split()[1]\n value_dic[key] = value\n\n if monitor_dic['SwapUsage'] == 'percentage':\n pass\n value_dic['SwapUsage'] = int(\n value_dic['SwapTotal']) - int(value_dic['SwapFree'])\n MemUsage = int(value_dic['MemTotal']) - int(value_dic['MemFree']) + \\\n int(value_dic['Buffers']) + int(value_dic['Cached'])\n\n if monitor_dic['MemUsage'] == 'percentage':\n value_dic['MemUsage_p'] = str(int(MemUsage) * 100 / int(value_dic['MemTotal']))\n value_dic['MemUsage'] = MemUsage\n\n return value_dic\n","sub_path":"daya13/monitor_client/plugins/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"477232001","text":"#-*- coding: utf-8 -*-\nfrom django.shortcuts import render,render_to_response,RequestContext\nfrom django.http import HttpResponse,HttpResponseRedirect,Http404\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom register.forms import puppet_from,UserFrom,puppet_admin_modForm,salt_adminmodForm\nfrom register.models import puppet_admin,puppet_host,salt_admin\nfrom django.core.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.middleware.csrf import get_token\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\ndef login(request):\n if request.method == 'POST':\n form = UserFrom()\n if form.is_valid():\n username = request.POST.get('usernmae','')\n password = request.POST.get('password','')\n user = auth.authenticate(username=username,password=password)\n if user is not None and user.is_active:\n #request.session['user_id'] = user.id\n csrf_token = get_token(request)\n c = { }\n c.update(csrf(request))\n return render_to_response(\"index.html\", c)\n #return HttpResponseRedirect('/index/')\n else:\n return render_to_response('error.html')\n\n else:\n form = UserFrom()\n #\n return render_to_response('login.html',{'form':form},context_instance=RequestContext(request))\n #except UserFrom.DoesNotExist:\n # return HttpResponse(\"Your username and password didn't match.\")\ndef index(request):\n c = { }\n c.update(csrf(request))\n return render_to_response(\"index.html\", c)\n #return render_to_response('index.html')\ndef logout(request):\n return render_to_response('login.html')\n\ndef puppet_main(request):\n return render_to_response('puppet.html')\n\ndef puppet_admin_s(request):\n form = puppet_admin_modForm()\n '''\n if request.method == 'POST':\n form = puppet_admin_modForm(data=request.POST)\n if form.is_valid():\n csrf_token = get_token(request)\n puppet_model_path = form.cleaned_data['puppet_model_path']\n puppet_files_server_path = form.cleaned_data['puppet_files_server_path']\n puppet_config_path = form.cleaned_data['puppet_config_path']\n #port_user = request.user\n new = puppet_admin.objects.create(puppet_model_path=puppet_model_path,puppet_files_server_path=puppet_files_server_path,puppet_config_path=puppet_config_path)\n new.save()\n print(\"ok\")\n results = puppet_admin.objects.all()\n #new_portfolio = puppet_admin(puppet_model_path=puppet_model_path,puppet_files_server_path=puppet_files_server_path,puppet_config_path=puppet_config_path)\n #new_portfolio.save()\n c = {'form': form}\n c.update(csrf(request))\n return render_to_response(\"puppet_admin.html\", c)\n\n #return render_to_response('puppet_admin.html', {'csrf_token': csrf_token})\n else:\n form = puppet_admin_modForm()\n '''\n return render_to_response('puppet_admin.html', {'form': form},context_instance=RequestContext(request))\n\ndef puppet_admin_config(request):\n #if request.user.is_authenticated():\n if request.method == 'POST':\n form = puppet_admin_modForm(request.POST)\n\n if form.is_valid():\n csrf_token = get_token(request)\n puppet_model_path = form.cleaned_data['puppet_model_path']\n puppet_files_server_path = form.cleaned_data['puppet_files_server_path']\n puppet_config_path = form.cleaned_data['puppet_config_path']\n puppet_server_ip = form.cleaned_data['puppet_server_ip']\n obj = puppet_admin(puppet_model_path=puppet_model_path,puppet_files_server_path=puppet_files_server_path,puppet_config_path=puppet_config_path,puppet_server_ip=puppet_server_ip,id=1)\n obj.save()\n #c = { }\n #c.update(csrf(request))\n #return render_to_response(\"puppet_c_s.html\", c)\n return HttpResponseRedirect('/puppet_c_s/')\n else:\n form = puppet_admin_modForm()\n\n return render_to_response('puppet_admin.html', {'form': form},context_instance=RequestContext(request))\n #else:\n # return HttpResponseRedirect('/login/')\n#@login_required\ndef puppet_admin_c_s(request):\n lines = puppet_admin.objects.order_by(\"-id\")\n paginator = Paginator(lines, 10)\n page = request.GET.get('page')\n try:\n show_lines = paginator.page(page)\n except PageNotAnInteger:\n # # If page is not an integer, deliver first page.\n show_lines = paginator.page(1)\n except EmptyPage:\n # # If page is out of range (e.g. 9999), deliver last page of results.\n show_lines = paginator.page(paginator.num_pages)\n return render_to_response('puppet_c_s.html', RequestContext(request, {'blog': show_lines,}))\n\n\ndef salt_main(request):\n return render_to_response('salt_main.html')\n\ndef salt_admin_s(request):\n lines = salt_admin.objects.order_by(\"-id\")\n paginator = Paginator(lines, 10)\n page = request.GET.get('page')\n try:\n show_lines = paginator.page(page)\n except PageNotAnInteger:\n # # If page is not an integer, deliver first page.\n show_lines = paginator.page(1)\n except EmptyPage:\n show_lines = paginator.page(paginator.num_pages)\n return render_to_response('salt_c_s.html', RequestContext(request, {'salt_admin': show_lines,}))\n\ndef salt_admin_config(request):\n if request.method == 'POST':\n salt_form = salt_adminmodForm(request.POST)\n if salt_form.is_valid():\n csrf_token = get_token(request)\n ip = salt_form.cleaned_data['ip']\n name = salt_form.cleaned_data['name']\n idc = salt_form.cleaned_data['local']\n obj = salt_admin(ip=ip,name=name,local=idc,id=1)\n obj.save()\n\n return HttpResponseRedirect('/salt_c_s/')\n else:\n salt_form = salt_adminmodForm()\n return render_to_response('salt_admin.html', {'salt_form': salt_form },context_instance=RequestContext(request))\n\n\ndef cobbler_main(request):\n\n return render_to_response('cobbler_mian.html')\n\ndef cobbler_admin(request):\n\n return render_to_response('cobbler_admin.html')\n\n\n","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"109871039","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# from graph_6 import Graph\nfrom graph_6_extend import Graph\n\n\ndef build_word_graph(word_file):\n buckets = dict()\n g = Graph()\n count = 0\n\n with open(word_file, 'r') as f:\n for line in f:\n word = line[:-1]\n for i in range(len(word)):\n letter_list = list(word)\n letter_list[i] = '_'\n bucket = ''.join(letter_list)\n if bucket in buckets:\n buckets[bucket].append(word)\n else:\n buckets[bucket] = [word]\n # print(buckets)\n # break\n # 同一个 buckets key 下的 vertex 之间需要关联起来 无向\n for bucket in buckets.keys():\n for key_1 in buckets[bucket]:\n for key_2 in buckets[bucket]:\n if key_1 != key_2:\n g.add_edge(key_1, key_2)\n count += 1\n print(count)\n return g\n\n\nif __name__ == \"__main__\":\n build_word_graph('four_letter.txt')\n # my result is 73182 edges\n","sub_path":"algorithm/psads/7-chapter/graph_8.py","file_name":"graph_8.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"200950850","text":"#ben schackman\n#06/14/2021\n#game w/ menu\nimport os\nimport sys \nimport time\nimport random\nimport datetime\n\nos.system('cls')\n\nprint(\"1.- play the game \\n2.- Print top scores(print the score file)\\n3. Exit\")\nselection = int(input().strip())\nwhile selection!=3:\n if selection == 1:\n gameWords= ['Jump','Swim','Skateboard','computer','keyboard','Triangle','laptop','Square','charger','mouse','software','rectangle']\n name =input(\"What is your name?\")\n print(name, end = \" \")\n answer = input(\" Do you want to play? \").upper()\n #yes -> YES\n print(\"\\n \",gameWords) #delete when code works properly \n while \"Y\" in answer:\n print(name,\" Good luck\")\n word=random.choice(gameWords) #stores word to be guessed\n print(word)\n turns =10 # defines turns\n guesses=''\n counter=len(word)\n wrongGuesses= 0\n while turns >0 and counter>=0:\n for char in word: #graphic of guesses\n if char in guesses:\n print(char, end=' ')\n else:\n print('_', end=' ')\n newGuess=input(\"\\n Give me a letter \") #input\n if newGuess in word:\n ball= word.count(newGuess)\n counter= counter - ball\n guesses += newGuess #guesses= guesses+newGuesses\n print(\"You are Right!!\" )\n \n else:\n turns -= 1\n print (\"Sorry that is wrong you still have \", turns,\" turns\") \n wrongGuesses = wrongGuesses + 1\n if turns > 0: #if they win - add them to leader board file\n hsFile = open('hs.txt','a')\n hsFile.write(\"Name: \")\n hsFile.write(name)\n hsFile.write(\"Wrong Guesses: \")\n hsFile.close\n \n answer = input(\" Do you want to play? \").upper()\n def printscore():\n def updatescore():\n print(name,\"you score is: \", score)\n selection = 4\n \n if selection == 2:\n hsFile = open('hs.txt', 'r')\n print(hsFile.read())\n hsFile.close\n print(\"1.- play the game \\n2.- Print top scores(print the score file)\\n 3. Exit\")\n selection = int(input().strip())\n\n\n\n\n# print(\"hello... let me guess your name...\")\n# time.sleep(2) #basically a wait function - creates a pause in the program\n# print(\"...almost...\")\n# print(\"... yes, you are Ben\")\n# time.sleep(2)\n# os.system('cls') #clears console\n# file=input(\"Please add file name add extension of the file. Ex File.txt :\")\n# #check if the file exists\n# if os.path.exists(file):\n# f1= open('txtTest.txt','r' )\n# print(f1.read())\n# f1.close\n# else:\n# print(\"the file does not exist\")\n","sub_path":"HWCHALLENGE.py","file_name":"HWCHALLENGE.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6331980","text":"import unittest\nfrom pathlib import Path\n\nfrom blambda.utils.lambda_manifest import LambdaManifest\n\n\nclass TestFindFunc(unittest.TestCase):\n def test_manifest_naming(self):\n test_cases = [\n ['adwords/textad.json', 'adwords/textad', 'textad', 'adwords', 'adwords_textad'],\n ['timezone/timezone.json', 'timezone', 'timezone', 'timezone', 'timezone'],\n ['echo/echo_worker.json', 'echo_worker', 'echo_worker', 'echo', 'echo_worker'],\n ['worker/some_worker.json', 'worker/some_worker', 'some_worker', 'worker', 'worker_some_worker'],\n ]\n\n for (path, full, short, group, deployed) in test_cases:\n with self.subTest(manifest=path):\n m = LambdaManifest(path)\n self.assertEqual(m.full_name, full)\n self.assertEqual(m.short_name, short)\n self.assertEqual(m.group, group)\n self.assertEqual(m.deployed_name, deployed)\n\n def test_source_files(self):\n \"\"\"Make sure the 'source files' section of the manifest is properly handled in the 'blambda deps' case\"\"\"\n\n root = Path(__file__).parent / 'data' / 'manifests'\n path = root / 'source_files.json'\n manifest = LambdaManifest(path)\n\n expect = [\n ((root / '../shared/a.coffee'), (root / 'a.coffee')),\n ((root / '../shared/shared.txt'), (root / 'shared.txt')),\n ((root / '../shared/shared.txt'), (root / 'shared.txt')),\n ]\n for src, dest in manifest.source_files():\n src_expect, dest_expect = expect.pop(0)\n self.assertEqual(src.resolve(), src_expect.resolve())\n self.assertEqual(dest.resolve(), dest_expect.resolve())\n\n def test_source_files_deploy(self):\n \"\"\"Make sure the 'source files' section of the manifest is properly handled in the 'blambda deploy' case\"\"\"\n\n root = Path(__file__).parent / 'data' / 'manifests'\n path = root / 'source_files.json'\n manifest = LambdaManifest(path)\n\n outdir = Path('/tmp')\n\n expect = [\n ((root / 'testfile.coffee'), (outdir / 'source_files' / 'testfile.coffee')),\n ((root / 'plaincopy.txt'), (outdir / 'plaincopy.txt')),\n ((root / '../shared/a.coffee'), (outdir / 'a.coffee')),\n ((root / '../shared/shared.txt'), (outdir / 'shared.txt')),\n ((root / '../shared/shared.txt'), (outdir / 'shared.txt')),\n ]\n for src, dest in manifest.source_files(dest_dir=outdir):\n src_expect, dest_expect = expect.pop(0)\n self.assertEqual(src.resolve(), src_expect.resolve())\n self.assertEqual(dest, dest_expect)\n","sub_path":"tests/test_lambda_manifest.py","file_name":"test_lambda_manifest.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"530713043","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# dns.DNSZoneProfile tests\n# ----------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ----------------------------------------------------------------------\n\n# Python modules\nfrom __future__ import absolute_import\n# Third-party modules\nimport pytest\n# NOC modules\nfrom .base import BaseModelTest\nfrom noc.dns.models.dnszoneprofile import DNSZoneProfile\nfrom noc.dns.models.dnsserver import DNSServer\n\n\nclass TestTestDnsDNSZoneProfile(BaseModelTest):\n model = DNSZoneProfile\n\n @pytest.mark.parametrize(\"zoneprofile,dns_server,is_authoritative\", [\n (\"p1\", \"ns1.example.com\", True),\n (\"p1\", \"ns2.example.com\", True),\n (\"p2\", \"ns1.example.com\", True),\n (\"p2\", \"ns2.example.com\", True),\n (\"p3\", \"ns1.example.com\", True),\n (\"p3\", \"ns2.example.com\", False),\n ])\n def test_autoritative_servers(self, zoneprofile, dns_server, is_authoritative):\n zp = DNSZoneProfile.objects.get(name=zoneprofile)\n assert zp\n ns = DNSServer.objects.get(name=dns_server)\n assert ns\n assert (ns in zp.authoritative_servers) is is_authoritative\n","sub_path":"tests/models/test_dns_dnszoneprofile.py","file_name":"test_dns_dnszoneprofile.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"432115081","text":"import Iterador\nimport time\nimport argparse\n\n\n# Recurrencia\n# t(n, LC, LNC) = min(t(n - 1, LC, LNC), t(n - 1, LCappended, LNCremoved))\n# abs(sumaLC - sumaLNC) si n = 0\n\n\ndef minimumSumPartitionProblem(args, vector, num_of_elements):\n if len(vector) == 0:\n return 0\n\n print(str(num_of_elements) + \" ELEMENTOS\")\n timeStart = time.time()\n resultRecursive, s1, s2 = recursive(vector)\n timeEnd = time.time()\n print(\"[BFRecur] Solution (\" + str(resultRecursive) + \") || t (\" + str(round((timeEnd - timeStart), 3)) + \" s)\")\n # print(\"La mejor combinación es s1 = \" + str(s1) + \" y s2= \" + str(s2))\n # print(\"Ha tardado \" + str(round((timeEnd - timeStart), 3)) + \"\\n\")\n\n timeStart = time.time()\n resultItera, s1, s2 = iterative(vector)\n timeEnd = time.time()\n print(\"[BFItera] Solution (\" + str(resultItera) + \") || t (\" + str(round((timeEnd - timeStart), 3)) + \")\")\n # print(\"La mejor combinación es s1 = \" + str(s1) + \" y s2= \" + str(s2))\n # print(\"Ha tardado \" + str(round((timeEnd - timeStart), 3)) + \"\\n\")\n\n timeStart = time.time()\n resultGreedy, s1, s2 = greedy(vector)\n timeEnd = time.time()\n print(\"[Greedy] Solution (\" + str(resultGreedy) + \") || t (\" + str(round((timeEnd - timeStart), 3)) + \")\")\n # print(\"La mejor combinación es s1 = \" + str(s1) + \" y s2= \" + str(s2))\n # print(\"Ha tardado \" + str(round((timeEnd - timeStart), 3)) + \"\\n\")\n\n timeStart = time.time()\n resultMemoization, s1, s2 = memoization(vector)\n timeEnd = time.time()\n print(\n \"[DP-Memoization] Solution (\" + str(resultMemoization) + \") || t (\" + str(\n round((timeEnd - timeStart), 3)) + \" s)\")\n # print(\"La mejor combinación es s1 = \" + str(s1) + \" y s2= \" + str(s2))\n # print(\"Ha tardado \" + str(round((timeEnd - timeStart), 3)) + \"\\n\")\n\n\ndef recursive(vector: list):\n # Variables de apoyo.\n # LC = elementos seleccionados de vector\n # LNC = elementos no seleccionados de vector\n # s1, s2 = donde se guardarán las mejores combinaciones.\n LC, LNC, s1, s2 = list(), vector.copy(), list(), list()\n\n def t(n, LC: list, LNC: list):\n sumaLC, sumaLNC = sum(LC), sum(LNC)\n\n if n == 0:\n return abs(sumaLC - sumaLNC), LC, LNC\n\n # Se modifican las listas añadiendo un nuevo valor\n LCappended, LNCremoved = modify_lists(LC, LNC, n)\n\n return min(t(n - 1, LC, LNC), t(n - 1, LCappended, LNCremoved))\n\n # Método auxiliar\n def modify_lists(LC, LNC, n):\n LCappended, LNCremoved = LC.copy(), LNC.copy()\n LCappended.append(vector[n])\n LNCremoved.remove(vector[n])\n return LCappended, LNCremoved\n\n minimum_subtract, s1, s2 = t(len(vector) - 1, LC, LNC)\n\n return minimum_subtract, s1, s2\n\n\ndef iterative(vector: list):\n iterator = Iterador.Iterator(len(vector))\n\n minSubtract, bestComb_s1, bestComb_s2 = max(vector) + 1, 0, 0\n\n while iterator.hasNext():\n counter = iterator.getNext()\n\n # Listas para cada partición.\n s1, s2 = list(), vector.copy()\n\n # Elementos elegidos para la primera porción s1.\n for i in range(len(counter)):\n if counter[i] == 1:\n s1.append(vector[i])\n # Eliminamos el valor de s2\n s2.remove(vector[i])\n\n # Se realiza la resta y se compara\n resta = abs(sum(s1) - sum(s2))\n if resta < minSubtract:\n bestComb_s1, bestComb_s2, minSubtract = s1, s2, resta\n\n return minSubtract, bestComb_s1, bestComb_s2\n\n\ndef greedy(vector: list):\n # Se ordena de mayor a menor y se van añadiendo los valores alternativamente\n # entre las listas s1, s2 según el tamaño de cada lista (si la suma de los valores\n # de s1 es mayor a la suma de s2, entonces el valor se meterá en s2 y viceversa).\n\n vector.sort(reverse=True)\n s1, s2 = list(), list()\n\n for i in vector:\n if sum(s1) > sum(s2):\n s2.append(i)\n else:\n s1.append(i)\n\n return abs(sum(s1) - sum(s2)), s1, s2\n\n\ndef memoization(vector: list):\n LC, LNC, s1, s2 = list(), vector.copy(), list(), list()\n cache = {}\n\n def memMiniumSumPartionProblem(n, LC: list, LNC: list):\n\n # La estructura de la clave es N|LC\n key = str(n) + \"|\" + str(LC)\n if key not in cache:\n sumaLC, sumaLNC = sum(LC), sum(LNC)\n\n if n == 0:\n r = abs(sumaLC - sumaLNC), LC, LNC\n else:\n LCappended, LNCremoved = modify_lists(LC, LNC, n)\n\n r = min(memMiniumSumPartionProblem(n - 1, LC, LNC),\n memMiniumSumPartionProblem(n - 1, LCappended, LNCremoved))\n\n # Memorizamos el resultado.\n cache[key] = r\n\n return cache[key]\n\n def modify_lists(LC, LNC, n):\n LCappended, LNCremoved = LC.copy(), LNC.copy()\n LCappended.append(vector[n])\n LNCremoved.remove(vector[n])\n return LCappended, LNCremoved\n\n return memMiniumSumPartionProblem(len(vector) - 1, LC, LNC)\n\n","sub_path":"minSum.py","file_name":"minSum.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"195688905","text":"import json\nimport hashlib\n\n\ndef make_genome_json(name):\n # Genomes\n return json.dumps(\n {\n \"data_category\": \"Annotated Sequence\",\n \"data_type\": \"Complete Genomic Sequence\",\n \"data_format\": \"gb\",\n \"source\": \"NCBI\",\n \"submitter_id\": name + \"_gb\",\n \"file_name\": name + \".gb\",\n }\n )\n\n\ndef make_sequence_json(obj, seqtype):\n # Sequences\n # Data Category: Protein or Nucleotide\n category = \"Protein\" if seqtype == \"aa\" else \"Nucleotide\"\n return json.dumps(\n {\n \"data_category\": category,\n \"data_type\": \"Sequence\",\n \"data_format\": \"fasta\",\n \"submitter_id\": obj.id + \"_fasta\",\n \"file_name\": obj.id + \".fasta\",\n }\n )\n\n\ndef make_alignment_json(file, aligner):\n # Alignments\n virus_sequence_alignment_id = file.replace(\".\", \"_\")\n # Data Category: Protein or Nucleotide\n category = \"Protein\" if \"-aa\" in file else \"Nucleotide\"\n return json.dumps(\n {\n \"data_category\": category,\n \"data_type\": \"Sequence Alignment\",\n \"data_format\": \"fasta\",\n \"submitter_id\": virus_sequence_alignment_id,\n \"file_name\": file,\n \"alignment_tool\": aligner,\n }\n )\n\n\ndef make_hmm_json(file):\n # HMMs\n virus_sequence_hmm_id = file.replace(\".\", \"_\")\n # Data Category: Protein or Nucleotide\n category = \"Protein\" if \"-aa\" in file else \"Nucleotide\"\n return json.dumps(\n {\n \"data_category\": category,\n \"data_type\": \"Sequence HMM\",\n \"data_format\": \"hmmer\",\n \"submitter_id\": virus_sequence_hmm_id,\n \"file_name\": file,\n }\n )\n\n\ndef checksum(self, filename):\n with open(filename, \"rb\") as f:\n bytes = f.read()\n return hashlib.md5(bytes).hexdigest()\n","sub_path":"covid_bio/make_json.py","file_name":"make_json.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"156868388","text":"#use this to read in a mallet topic model file and output a very similar csv file used for machine learning classification\nimport sys\n\nmallet_output=sys.argv[1]\nf=open(mallet_output,'r')\ndoctopic=f.read()\ndoctopic_list=doctopic.split('\\n')\nf.close()\nfilenames=[]\ndata=[]\ndata_str=[]\nfor line in doctopic_list:\n if line!='':\n line=line.split('\\t')\n filenames.append(line[1])\n line_float=[float(i) for i in line[2:]]\n data_str.append(line[2:])\n data.append(line_float) \n\n\nnames=[file.split('/')[-1].split('.')[0].split(\"_\")[0] for file in filenames]\nheader=range(20)\nheader.append('class')\nheader=[str(i) for i in header]\noutputname=mallet_output.split('.')[0]+\"-train.csv\"\n\nf=open(outputname,'w')\nf.write(','.join(header)+'\\n')\nfor i in range(len(data)):\n numbers=\",\".join(data_str[i])\n label=names[i]\n line=numbers + \",\" + label\n f.write(line+\"\\n\")\nf.close()","sub_path":"ismir-scripts/output_mallet_train.py","file_name":"output_mallet_train.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"483042961","text":"import datetime\n\nimport dateutil #requires: pip install python-dateutil\nfrom dateutil import tz\nimport json\nimport pkg_resources\n\nfrom gcf.geni.SecureThreadedXMLRPCServer import SecureThreadedXMLRPCRequestHandler\nfrom gcf.geni.SecureXMLRPCServer import SecureXMLRPCRequestHandler\n\nfrom terms_conditions import TermsAndConditionsDB\nfrom terms_conditions_helper import TermsAndConditionsHelper\n\n\nclass TermsAndConditionsSite(TermsAndConditionsHelper):\n _TC_SITE = None\n\n @classmethod\n def get(cls):\n if cls._TC_SITE is None:\n cls._TC_SITE = TermsAndConditionsSite()\n return cls._TC_SITE\n\n def __init__(self):\n super(TermsAndConditionsSite, self).__init__()\n self._html = pkg_resources.resource_string(__name__, 'terms_conditions.html')\n self._js = pkg_resources.resource_string(__name__, 'terms_conditions.js')\n self._css = pkg_resources.resource_string(__name__, 'terms_conditions.css')\n\n def html(self):\n return self._html\n\n def js(self):\n return self._js\n\n def css(self):\n return self._css\n\n def register_accept(self, user_urn, user_accepts):\n safe_accepts = {}\n keys = ['accept_main', 'accept_userdata']\n for key in keys:\n safe_accepts[key] = bool(user_accepts[key]) if key in user_accepts else False\n\n safe_accepts['testbed_access'] = self.derive_testbed_access(safe_accepts)\n\n accept_until = datetime.datetime.now(tz.tzutc()) + datetime.timedelta(days=365)\n\n self._db.register_user_accepts(user_urn,\n safe_accepts,\n accept_until.isoformat())\n # datetime.datetime.now(datetime.timezone.utc).isoformat())\n return\n\n def register_decline(self, user_urn):\n self._db.delete_user_accepts(user_urn)\n return\n\n def get_user_accepts(self, user_urn):\n return super(TermsAndConditionsSite, self).get_user_accepts(user_urn)\n\n\nclass SecureXMLRPCAndTermsAndConditionsSiteRequestHandler(SecureThreadedXMLRPCRequestHandler):\n def find_client_urn(self):\n cert_dict = self.request.getpeercert()\n # self.log_message(\"findClientUrn in: %s\", cert_dict)\n if cert_dict is None:\n return None\n if 'subjectAltName' in cert_dict:\n san = cert_dict['subjectAltName']\n for entry in san:\n (san_type, san_val) = entry\n if san_type == 'URI' and san_val.startswith('urn:publicid:IDN+'):\n return san_val\n return None\n\n def read_request_data(self, max_bytes=None):\n #copied from SimpleXMLRPCServer do_POST\n max_chunk_size = 10 * 1024 * 1024\n size_remaining = int(self.headers[\"content-length\"])\n\n if max_bytes is not None and size_remaining > max_bytes:\n self.send_error(400, \"Client is sending too much data\")\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n return None\n\n L = []\n while size_remaining:\n chunk_size = min(size_remaining, max_chunk_size)\n chunk = self.rfile.read(chunk_size)\n if not chunk:\n break\n L.append(chunk)\n size_remaining -= len(L[-1])\n\n if len(L) == 0:\n self.send_error(400, \"Required data missing\")\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n return None\n\n data = ''.join(L)\n return self.decode_request_content(data)\n\n def do_POST(self):\n \"\"\"Handles the HTTP POST request.\n\n Most calls will be forwarded because they are XML-RPC calls, and get forwarded to the real method.\n \"\"\"\n # we don't actually support any POST at the moment. If we did, we'd intercept it here, and do it instead of defering to XML-RPC\n\n #call super method\n # super(SecureXMLRPCRequestHandler, self).do_POST() # new style\n SecureXMLRPCRequestHandler.do_POST(self)\n\n def do_DELETE(self):\n \"\"\"Handles the HTTP DELETE request.\n \"\"\"\n self.log_message(\"Got server DELETE call: %s\", self.path)\n if self.path == '/terms_conditions' or self.path == '/terms_conditions/' or self.path == '/terms_conditions/accept':\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n TermsAndConditionsSite.get().register_decline(client_urn)\n self.send_response(204) # No Content\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n # self.wfile.close()\n return\n\n self.send_error(405, \"Method not allowed here\")\n\n def do_PUT(self):\n \"\"\"Handles the HTTP PUT request.\n \"\"\"\n self.log_message(\"Got server PUT call: %s\", self.path)\n if self.path == '/terms_conditions/accept':\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n data = self.read_request_data(max_bytes=1000) #These are always very small JSON messages\n if data is None:\n # we assume read_request_data has set the right error\n return\n try:\n user_accepts = json.loads(data)\n except ValueError:\n self.send_error(400, \"JSON parse exception\")\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n return\n\n TermsAndConditionsSite.get().register_accept(client_urn, user_accepts)\n self.send_response(204) # No Content\n self.send_header(\"Content-length\", \"0\")\n self.end_headers()\n # self.wfile.close()\n return\n\n self.send_error(405, \"Method not allowed here\")\n\n def do_GET(self):\n \"\"\"Handles the HTTP GET request.\n\n GET calls are never XML-RPC calls, so we should return 404 if we don't handle them\n \"\"\"\n self.log_message(\"Got server GET call: %s\", self.path)\n if self.path == '/terms_conditions' or self.path == '/terms_conditions/':\n self.send_response(301)\n self.send_header(\"Location\", \"/terms_conditions/index.html\")\n self.end_headers()\n return\n\n if self.path == '/terms_conditions/index.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n response = TermsAndConditionsSite.get().html()\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n return\n\n if self.path == '/terms_conditions/terms_conditions.js':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/javascript\")\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n response = TermsAndConditionsSite.get().js()\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n return\n\n if self.path == '/terms_conditions/terms_conditions.css':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/css\")\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n response = TermsAndConditionsSite.get().css()\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n return\n\n if self.path == '/terms_conditions/accept':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n client_urn = self.find_client_urn()\n if client_urn is None:\n self.report_forbidden()\n return\n response = json.dumps(TermsAndConditionsSite.get().get_user_accepts(client_urn), indent=4)\n if response is None:\n self.report_404()\n return\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n return\n\n self.report_404()\n\n def report_forbidden(self):\n self.send_response(403)\n response = 'Forbidden'\n self.send_header(\"Content-type\", \"text/plain\")\n self.send_header(\"Content-length\", str(len(response)))\n self.end_headers()\n self.wfile.write(response)\n","sub_path":"gcf_docker_plugin/terms_conditions/terms_conditions_site_request_handler.py","file_name":"terms_conditions_site_request_handler.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"213925432","text":"'''\r\nfrom flask import render_template, session, redirect, url_for, current_app\r\nfrom .. import db\r\nfrom ..models import User\r\nfrom ..email import send_email\r\nfrom . import main\r\nfrom .forms import NameForm\r\n\r\n\r\n@main.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n form = NameForm()\r\n if form.validate_on_submit():\r\n user = User.query.filter_by(username=form.name.data).first()\r\n if user is None:\r\n user = User(username = form.name.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n session['known'] = False\r\n if current_app.config['FLASKY_ADMIN']:\r\n send_email(current_app.config['FLASKY_ADMIN'], 'New User',\r\n 'mail/new_user', user=user)\r\n else:\r\n session['known'] = True\r\n session['name'] = form.name.data\r\n return redirect(url_for('.index'))\r\n return render_template('index.html',\r\n form=form, name=session.get('name'),\r\n known=session.get('known', False))\r\n'''\r\n\r\n#首页、用户页、编辑个人资料、博客文章单页、编辑单页文章\r\n\r\nfrom flask import render_template,flash,redirect,url_for,request,abort\r\nfrom flask_login import login_required,current_user\r\nfrom .. import db\r\nfrom .forms import EditProfileForm,PostForm\r\nfrom . import main\r\nfrom ..models import User,Post\r\n\r\n\r\n@main.route('/',methods = ['GET','POST'])\r\ndef index():\r\n form = PostForm()\r\n if form.validate_on_submit():\r\n post = Post(body = form.body.data,author = current_user._get_current_object())\r\n db.session.add(post)\r\n db.session.commit()\r\n return redirect(url_for('main.index'))\r\n #posts = Post.query.order_by(Post.timestamp.desc()).all()\r\n\r\n # 通过request.args.get获取一个url所带的参数,这里是获取参数\"paga\"的值,如果不存在则返回默认值1\r\n page = request.args.get('page',1,type = int)\r\n # paginate() return per_page items from page,pagination是当前页中的文章(只有5个),内容\r\n #随着页数page改变而变,它是个flask-sqlalchemy对象,下面的posts才是要显示的文章本体。\r\n pagination = Post.query.order_by(Post.timestamp.desc()).paginate(\r\n page,per_page=5,error_out=False\r\n )\r\n posts = pagination.items # items代表当前页面的项目\r\n return render_template('index.html',form = form , posts = posts, pagination = pagination)\r\n\r\n#filter_by(username = username).\r\n@main.route('/user/')\r\ndef user(username):\r\n user = User.query.filter_by(username = username).first()\r\n if user is None:\r\n abort(404)\r\n #posts = user.posts.order_by(Post.timestamp.desc()).all()\r\n page = request.args.get('page',1,type = int)\r\n pagination = user.posts.order_by(Post.timestamp.desc()).paginate(\r\n page,per_page=5,error_out=False\r\n )\r\n posts = pagination.items\r\n return render_template('user.html',user = user,posts = posts,pagination = pagination)\r\n\r\n@main.route('/edit-profile',methods = ['GET','POST'])\r\n@login_required\r\ndef edit_profile():\r\n form = EditProfileForm()\r\n if form.validate_on_submit():\r\n current_user.name = form.name.data\r\n current_user.location = form.location.data\r\n current_user.about_me = form.about_me.data\r\n\r\n db.session.add(current_user._get_current_object())\r\n db.session.commit()\r\n\r\n flash('Your profile has been updated.')\r\n return redirect(url_for('main.user',username = current_user.username))\r\n form.name.data = current_user.name\r\n form.location.data = current_user.location\r\n form.about_me.data = current_user.about_me\r\n return render_template('edit_profile.html',form = form)\r\n\r\n\r\n@main.route('/post/')\r\ndef post(id):\r\n post = Post.query.get_or_404(id)\r\n return render_template('post.html',posts = [post])\r\n\r\n\r\n@main.route('/edit/',methods = ['GET','POST'])\r\n@login_required\r\ndef edit(id):\r\n post = Post.query.get_or_404(id)\r\n if current_user != post.author:\r\n abort(404)\r\n form = PostForm()\r\n if form.validate_on_submit():\r\n post.body = form.body.data\r\n db.session.add(post)\r\n db.session.commit()\r\n flash('The post has been updated.')\r\n return redirect(url_for('.post',id = post.id))\r\n form.body.data = post.body\r\n return render_template('edit_post.html',form = form)\r\n\r\n@main.route('/follow/')\r\n@login_required\r\ndef follow(username):\r\n user = User.query.filter_by(username = username).first()\r\n if user is None:\r\n flash('Invalid user.')\r\n return redirect(url_for('.index'))\r\n if current_user.is_following(user):\r\n flash('你已经关注了这个用户。')\r\n return redirect(url_for('main.user',username = username))\r\n current_user.follow(user)\r\n flash('you are now following %s.' % username)\r\n return redirect(url_for('.user',username = username))\r\n\r\n@main.route('/unfollow/')\r\n@login_required\r\ndef unfollow(username):\r\n user = User.query.filter_by(username = username).first()\r\n if user is None:\r\n flash('Invalid user.')\r\n return redirect(url_for('.index'))\r\n if not current_user.is_following(user):\r\n flash('现在你没关注这个用户。')\r\n return redirect(url_for('.user',username = username))\r\n current_user.unfollow(user)\r\n flash('you are not following %s anymore.' % username)\r\n return redirect(url_for('.user',username = username))\r\n\r\n@main.route('/followers/')\r\ndef followers(username):\r\n user = User.query.filter_by(username = username).first()\r\n if user is None:\r\n flash('Invalid user.')\r\n return redirect(url_for('.index'))\r\n page = request.args.get('page',1,type = int)\r\n pagination = user.followers.paginate(page,per_page=5,error_out=False)\r\n follows = [{'user':item.follower,'timestamp':item.timestamp} for item in pagination.items]\r\n return render_template('followers.html',user = user,title = \"Followers of\",\r\n endpoint = '.followers',pagination = pagination,follows = follows)\r\n\r\n@main.route('/followed-by/')\r\ndef followed_by(username):\r\n user = User.query.filter_by(username = username).first()\r\n if user is None:\r\n flash('Invalid user.')\r\n return redirect(url_for('.index'))\r\n page = request.args.get('page',1,type = int)\r\n pagination = user.followed.paginate(page,per_page=5,error_out=False)\r\n follows = [{'user':item.followed,'timestamp':item.timestamp} for item in pagination.items]\r\n return render_template('followers.html',user = user,title = \"Followed by\",\r\n endpoint = '.followed_by',pagination = pagination,follows = follows)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Flask/app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"56341931","text":"import receiver as rec\nimport transmitter as trans\n\nACCESS_TOKEN = \"ACCESS_TOKEN\"\n\ndef main():\n receiver = rec.Receiver(ACCESS_TOKEN)\n transmitter = trans.Transmitter(\"localhost\", 5555)\n\n transmitter.send_conversations(\n receiver.conversations()\n )\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"receiver/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"52295773","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 30 18:13:41 2020\n\n@author: Vladimir Sivak\n\"\"\"\n\nimport os\nos.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"]='true'\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\n# append parent 'gkp-rl' directory to path \nimport sys\nsys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))\n\nimport qutip as qt\nimport tensorflow as tf\nimport numpy as np\nfrom math import sqrt, pi\nfrom rl_tools.agents import PPO\nfrom tf_agents.networks import actor_distribution_network\nfrom rl_tools.agents import actor_distribution_network_gkp\nfrom rl_tools.tf_env import helper_functions as hf\n\n\"\"\"\nTrain PPO agent to do GKP sensor state preparation with universal gate sequence\nconsisting of SNAP gates and oscillator displacements.\n\nThe episodes start from vacuum, and GKP stabilizer measurements are performed\nin the end to assign reward.\n\n\"\"\"\n\nroot_dir = r'E:\\data\\gkp_sims\\PPO\\examples\\gkp_S50'\ndeltas = [0.25]\nrandom_seeds = [2]\n\nfor Delta in deltas:\n # setup directory for each simulation\n delta_dir = os.path.join(root_dir,'delta'+str(Delta))\n if not os.path.isdir(delta_dir): os.mkdir(delta_dir)\n\n for seed in random_seeds:\n sim_dir = os.path.join(delta_dir,'seed'+str(seed))\n\n # Params for environment\n env_kwargs = {\n 'control_circuit' : 'snap_and_displacement',\n 'init' : 'vac',\n 'H' : 1,\n 'T' : 6, \n 'attn_step' : 1,\n 'N' : 200}\n \n # Params for reward function\n reward_kwargs = {'reward_mode' : 'stabilizers_v2',\n 'Delta' : Delta, 'beta' : sqrt(pi),\n 'sample' : True}\n \n reward_kwargs_eval = {'reward_mode' : 'stabilizers_v2',\n 'Delta' : Delta, 'beta' : sqrt(pi),\n 'sample' : False}\n \n # Params for action wrapper\n action_script = 'snap_and_displacements'\n action_scale = {'alpha':6, 'theta':pi}\n to_learn = {'alpha':True, 'theta':True}\n \n train_batch_size = 1000\n eval_batch_size = 1000\n \n train_episode_length = lambda x: env_kwargs['T']\n eval_episode_length = lambda x: env_kwargs['T']\n \n # Create drivers for data collection\n from rl_tools.agents import dynamic_episode_driver_sim_env\n \n collect_driver = dynamic_episode_driver_sim_env.DynamicEpisodeDriverSimEnv(\n env_kwargs, reward_kwargs, train_batch_size, \n action_script, action_scale, to_learn, train_episode_length)\n \n eval_driver = dynamic_episode_driver_sim_env.DynamicEpisodeDriverSimEnv(\n env_kwargs, reward_kwargs_eval, eval_batch_size, \n action_script, action_scale, to_learn, eval_episode_length)\n \n PPO.train_eval(\n root_dir = sim_dir,\n random_seed = seed,\n num_epochs = 10000,\n # Params for train\n normalize_observations = True,\n normalize_rewards = False,\n discount_factor = 1.0,\n lr = 1e-3,\n lr_schedule = None,\n num_policy_updates = 20,\n initial_adaptive_kl_beta = 0.0,\n kl_cutoff_factor = 0,\n importance_ratio_clipping = 0.25,\n value_pred_loss_coef = 0.005,\n gradient_clipping = 1.0,\n # Params for log, eval, save\n eval_interval = 200,\n save_interval = 200,\n checkpoint_interval = 100000,\n summary_interval = 10000,\n # Params for data collection\n train_batch_size = train_batch_size,\n eval_batch_size = eval_batch_size,\n collect_driver = collect_driver,\n eval_driver = eval_driver,\n replay_buffer_capacity = 7000,\n # Policy and value networks\n ActorNet = actor_distribution_network.ActorDistributionNetwork,\n actor_fc_layers = (),\n value_fc_layers = (),\n use_rnn = True,\n actor_lstm_size = (12,),\n value_lstm_size = (12,)\n )","sub_path":"examples/paper_state_prep_SNAP_displacements_gkp_finite_energy.py","file_name":"paper_state_prep_SNAP_displacements_gkp_finite_energy.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"184615147","text":"\"\"\"\nALGORITMO: Multilayer Perceptron (MLP)\nREPRESENTACAO DE TEXTO: TF-IDF\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics.classification import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.neural_network import MLPClassifier\n\nfrom models.text_processing import TextProcessing\n\n# importacao do dataset do IMDb\n# file_train = pd.read_csv(\n# 'C:\\\\Users\\\\anaju\\\\PycharmProjects\\\\SentimentAnalysis-MovieReviews\\\\datasets\\\\imdb-train.csv'\n# ).drop(\"PhraseId\", axis=1)\n# file_test = pd.read_csv(\n# 'C:\\\\Users\\\\anaju\\\\PycharmProjects\\\\SentimentAnalysis-MovieReviews\\\\datasets\\\\imdb-test-labelled.csv'\n# ).drop(\"PhraseId\", axis=1)\n# dataset = file_train.append(file_test, ignore_index=True)\n\n# importacao do dataset do Rotten Tomatoes\ndataset = pd.read_csv(\n 'C:\\\\Users\\\\anaju\\\\PycharmProjects\\\\SentimentAnalysis-MovieReviews\\\\datasets\\\\rotten-tomatoes.tsv',\n sep='\\t', encoding='ISO-8859–1'\n)\ndataset = dataset.drop(['id', 'rating', 'critic', 'top_critic', 'publisher', 'date'], axis=1)\ndataset.dropna(inplace=True)\ndataset = dataset.reset_index(drop=True)\ndataset['fresh'] = dataset['fresh'].replace({'rotten': 0, 'fresh': 1})\n\n# esse codigo sera executado 10x, entao usarei essa variavel como seed\niteracao = 0\n\n# embaralhar dataset (pegar 10k exemplos apos o shuffle)\ndataset = dataset.sample(n=10000, random_state=iteracao).reset_index(drop=True)\n\n# processamento do texto\ncorpus = []\nprocessor = TextProcessing(reduction='S')\n# processor = TextProcessing(reduction='L')\n\nprint('Processando os textos...')\nfor sentence in range(len(dataset)):\n # processed_sentence = processor.process_text(dataset['Phrase'][sentence]) # IMDb\n processed_sentence = processor.process_text(dataset['review'][sentence]) # Rotten Tomatoes\n corpus.append(processed_sentence)\n\n# representacao em vetores e obtencao das classes\nprint('Gerando o TF-IDF...')\ntf_idf = processor.generate_tfidf(corpus)\nclasses = dataset.iloc[:, -1].values\n\n# k-Fold cross validation\nk_fold = KFold(n_splits=10, random_state=iteracao, shuffle=True)\naux_accuracy = 0\naux_f1_score = 0\naux_precision = 0\naux_recall = 0\nconf_matrices = np.zeros((2, 2))\ni = 1\n\n# treino, teste e avaliacao\nprint('Iniciando o k-Fold...')\nfor train_index, test_index in k_fold.split(tf_idf):\n x_train, x_test = tf_idf[train_index], tf_idf[test_index]\n y_train, y_test = classes[train_index], classes[test_index]\n\n # treino do modelo\n print(f'Gerando o Modelo {i}...')\n classifier = MLPClassifier(hidden_layer_sizes=50, max_iter=200, learning_rate_init=0.01).fit(x_train, y_train)\n\n # classificando o conjunto de teste\n y_pred = classifier.predict(x_test)\n\n # metricas de desempenho\n aux_accuracy += accuracy_score(y_test, y_pred)\n aux_f1_score += f1_score(y_test, y_pred)\n aux_precision += precision_score(y_test, y_pred)\n aux_recall += recall_score(y_test, y_pred)\n conf_matrices += np.asarray(confusion_matrix(y_test, y_pred))\n\n print(f'Modelo {i} finalizado e avaliado.')\n i += 1\n\n# resultados\nprint(f'\\nITERATION #{iteracao} -----------------------')\nprint(f'Accuracy = {aux_accuracy / k_fold.n_splits}')\nprint(f'F1 Score = {aux_f1_score / k_fold.n_splits}')\nprint(f'Precision = {aux_precision / k_fold.n_splits}')\nprint(f'Recall = {aux_recall / k_fold.n_splits}')\nprint(f'Examples x Attributes = {tf_idf.shape}')\nprint(f'Confusion Matrix = \\n{np.array(list(conf_matrices))}')\n","sub_path":"models/06-2-mlp-tfidf.py","file_name":"06-2-mlp-tfidf.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"622967606","text":"from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor\nfrom vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor\nfrom vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor\nimport cv2\nimport sys\n\nnet_type = \"mb1-ssd\"\nmodel_path = \"models/mobilenet-v1-ssd-mp-0_675.pth\"\nlabel_path = \"models/voc-model-labels.txt\"\n\nclass_names = [name.strip() for name in open(label_path).readlines()]\nnum_classes = len(class_names)\n\nif net_type == 'vgg16-ssd':\n net = create_vgg_ssd(len(class_names), is_test=True)\nelif net_type == 'mb1-ssd':\n net = create_mobilenetv1_ssd(len(class_names), is_test=True)\nelif net_type == 'mb1-ssd-lite':\n net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)\nelif net_type == 'mb2-ssd-lite':\n net = create_mobilenetv2_ssd_lite(len(class_names), is_test=True)\nelif net_type == 'sq-ssd-lite':\n net = create_squeezenet_ssd_lite(len(class_names), is_test=True)\nelse:\n print(\"The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.\")\n sys.exit(1)\nnet.load(model_path)\n\nif net_type == 'vgg16-ssd':\n predictor = create_vgg_ssd_predictor(net, candidate_size=200)\nelif net_type == 'mb1-ssd':\n predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200)\nelif net_type == 'mb1-ssd-lite':\n predictor = create_mobilenetv1_ssd_lite_predictor(net, candidate_size=200)\nelif net_type == 'mb2-ssd-lite':\n predictor = create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200)\nelif net_type == 'sq-ssd-lite':\n predictor = create_squeezenet_ssd_lite_predictor(net, candidate_size=200)\nelse:\n print(\"The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.\")\n sys.exit(1)\n\nTEST_IMAGE = \"pix/cat1.jpg\"\n\n\norig_image = cv2.imread(TEST_IMAGE)\n\nimage = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)\nboxes, labels, probs = predictor.predict(image, 10, 0.4)\nprint(f\"Detect Objects: {labels.size(0)}\")\nfor i in range(boxes.size(0)):\n box = boxes[i, :]\n label = f\"{class_names[labels[i]]}: {probs[i]:.2f}\"\n boxes = f\"({box[0]},{box[1]}) --> ({box[2]},{box[3]})\"\n print(label)\n print(boxes)","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"306890932","text":"from StdSuites import string\nfrom string import ascii_letters\n\ntext = open('tweet.txt', 'r').read().split()\n\nfor line in text:\n for word in line:\n if word in ascii_letters or word in string.digits:\n if line in text:\n text.remove(line)\n\nf = open('formatted_text.txt', 'w')\nfor x in text:\n f.write(str(text) + '¥n')\n\nf.close()\n","sub_path":"tweet_formatter.py","file_name":"tweet_formatter.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"7176375","text":"import random\nfrom time import sleep\n\n## This is how we initially created the deck ##\n\"\"\"\nCLUB = \"\\u2663\"\nHEART = \"\\u2665\"\nDIAMOND = \"\\u2666\"\nSPADE = \"\\u2660\"\nsuits = ['\\u2663', '\\u2665', '\\u2666', '\\u2660']*13\nvalues = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']*4\ntrial_deck = {}\nfor i in range(1, 53):\n\ttrial_deck[i] = {\n\t\t\t\t\t\t'card': values[i-1] + ' ' + suits[i-1],\n\t\t\t\t\t\t'actual_value': []\n\t\t\t\t\t}\n\"\"\"\n\n# Dictionary for a deck of 52 cards\nDECK_DICT = {\n\t\t\t\t1: {'card': 'A ♣', 'value': [1, 11]},\n\t\t\t\t2: {'card': '2 ♥', 'value': [2]},\n\t\t\t\t3: {'card': '3 ♦', 'value': [3]},\n\t\t\t\t4: {'card': '4 ♠', 'value': [4]},\n\t\t\t\t5: {'card': '5 ♣', 'value': [5]},\n\t\t\t\t6: {'card': '6 ♥', 'value': [6]},\n\t\t\t\t7: {'card': '7 ♦', 'value': [7]},\n\t\t\t\t8: {'card': '8 ♠', 'value': [8]},\n\t\t\t\t9: {'card': '9 ♣', 'value': [9]},\n\t\t\t\t10: {'card': '10 ♥', 'value': [10]},\n\t\t\t\t11: {'card': 'J ♦', 'value': [10]},\n\t\t\t\t12: {'card': 'Q ♠', 'value': [10]},\n\t\t\t\t13: {'card': 'K ♣', 'value': [10]},\n\t\t\t\t14: {'card': 'A ♥', 'value': [1, 11]},\n\t\t\t\t15: {'card': '2 ♦', 'value': [2]},\n\t\t\t\t16: {'card': '3 ♠', 'value': [3]},\n\t\t\t\t17: {'card': '4 ♣', 'value': [4]},\n\t\t\t\t18: {'card': '5 ♥', 'value': [5]},\n\t\t\t\t19: {'card': '6 ♦', 'value': [6]},\n\t\t\t\t20: {'card': '7 ♠', 'value': [7]},\n\t\t\t\t21: {'card': '8 ♣', 'value': [8]},\n\t\t\t\t22: {'card': '9 ♥', 'value': [9]},\n\t\t\t\t23: {'card': '10 ♦', 'value': [10]},\n\t\t\t\t24: {'card': 'J ♠', 'value': [10]},\n\t\t\t\t25: {'card': 'Q ♣', 'value': [10]},\n\t\t\t\t26: {'card': 'K ♥', 'value': [10]},\n\t\t\t\t27: {'card': 'A ♦', 'value': [1, 11]},\n\t\t\t\t28: {'card': '2 ♠', 'value': [2]},\n\t\t\t\t29: {'card': '3 ♣', 'value': [3]},\n\t\t\t\t30: {'card': '4 ♥', 'value': [4]},\n\t\t\t\t31: {'card': '5 ♦', 'value': [5]},\n\t\t\t\t32: {'card': '6 ♠', 'value': [6]},\n\t\t\t\t33: {'card': '7 ♣', 'value': [7]},\n\t\t\t\t34: {'card': '8 ♥', 'value': [8]},\n\t\t\t\t35: {'card': '9 ♦', 'value': [9]},\n\t\t\t\t36: {'card': '10 ♠', 'value': [10]},\n\t\t\t\t37: {'card': 'J ♣', 'value': [10]},\n\t\t\t\t38: {'card': 'Q ♥', 'value': [10]},\n\t\t\t\t39: {'card': 'K ♦', 'value': [10]},\n\t\t\t\t40: {'card': 'A ♠', 'value': [1, 11]},\n\t\t\t\t41: {'card': '2 ♣', 'value': [2]},\n\t\t\t\t42: {'card': '3 ♥', 'value': [3]},\n\t\t\t\t43: {'card': '4 ♦', 'value': [4]},\n\t\t\t\t44: {'card': '5 ♠', 'value': [5]},\n\t\t\t\t45: {'card': '6 ♣', 'value': [6]},\n\t\t\t\t46: {'card': '7 ♥', 'value': [7]},\n\t\t\t\t47: {'card': '8 ♦', 'value': [8]},\n\t\t\t\t48: {'card': '9 ♠', 'value': [9]},\n\t\t\t\t49: {'card': '10 ♣', 'value': [10]},\n\t\t\t\t50: {'card': 'J ♥', 'value': [10]},\n\t\t\t\t51: {'card': 'Q ♦', 'value': [10]},\n\t\t\t\t52: {'card': 'K ♠', 'value': [10]}}\n\nclass Player:\n\t\"\"\"Create a player with attributes and methods for player centered gameplay.\n\n\tHas attributes for player's current hand and overall balance. Has methods\n\tfor showing a hand or balance, getting a bet, drawing cards, checking for\n\taces in a hand, computing the value of the current hand, doubling\n\tthe bet, updating the running balance, and resetting the attributesself.\n\n\t\"\"\"\n\thand_value = 0\n\thas_ace = False\n\tbusted = False\n\thand_won = None\n\tbalance = 1000\n\tbet = 0\n\n\tdef __init__(self, name):\n\t\t\"\"\"Initialize Player class. Takes parameter 'name'.\"\"\"\n\t\tself.name = name\n\t\tself.player_hand = []\n\n\tdef show_balance(self):\n\t\t\"\"\"Print the current balance.\"\"\"\n\t\tprint(\"\\n{} currently has ${}.\".format(self.name, self.balance))\n\n\tdef show_hand(self):\n\t\t\"\"\"Print the player current hand.\"\"\"\n\t\tto_print = []\n\t\tfor i in range(len(self.player_hand)):\n\t\t\tto_print.append(self.player_hand[i][0])\n\t\treturn self.name + \": [\" + \"] [\".join(to_print) + \"]\"\n\n\tdef get_wager(self):\n\t\t\"\"\"Get a bet input and add to bet.\"\"\"\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\twager = int(input(\"\\nHow much would you like to bet?\" + \\\n\t\t\t\t\" (Minimum is $10).\" + \"\\n\" + \"Bet: $ \"))\n\t\t\t\tif wager < 10:\n\t\t\t\t\tprint(\"\\n\" + \"The minimum bet is $10. Please try again...\" \\\n\t\t\t\t\t+ \"\\n\")\n\t\t\t\t\tsleep(1)\n\t\t\t\telif wager > self.balance:\n\t\t\t\t\tprint(\"\\n\" + \"You cannot bet more money than you have!\" + \\\n\t\t\t\t\t\" You bet ${}, but you only have ${}.\".format(wager, self.balance) + \\\n\t\t\t\t\t\" Please try again...\" + \"\\n\")\n\t\t\t\t\tsleep(1)\n\t\t\t\telse:\n\t\t\t\t\tself.bet += wager\n\t\t\t\t\tprint(\"\\n\" + (\"=\" * 45) + \"\\n\" + \\\n\t\t\t\t\t\t\"{}'s bet for this hand is ${}.\".format(self.name, self.bet) + \\\n\t\t\t\t\t\t\"\\n\" + (\"=\" * 45) + \"\\n\")\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(\"\\n\" + \"Your bet must be an integar number.\" + \\\n\t\t\t\t\"Please try again...\" + \"\\n\")\n\t\t\t\tsleep(1)\n\n\tdef get_card(self):\n\t\t\"\"\"Get a card and append to hand.\"\"\"\n\t\tcard_key = shuffled_cards.give_one_card()\n\t\thand_card = [DECK_DICT[card_key][\"card\"], DECK_DICT[card_key][\"value\"]]\n\t\tself.player_hand.append(hand_card)\n\n\tdef check_if_ace(self):\n\t\t\"\"\"Check last card in hand for ace and set has_ace.\"\"\"\n\t\tif self.player_hand[len(self.player_hand)-1][0][0] == \"A\":\n\t\t\tself.has_ace = True\n\n\tdef set_hand_values(self):\n\t\t\"\"\"Add card value to hand value and ace value if ace is in hand.\"\"\"\n\t\tself.hand_value += self.player_hand[len(self.player_hand)-1][1][0]\n\n\tdef double_down_bet(self):\n\t\t\"\"\"Double player bet.\"\"\"\n\t\tself.bet = int(self.bet * 2)\n\t\treturn self.bet\n\n\tdef update_balance(self):\n\t\t\"\"\"Update running balance.\"\"\"\n\t\tif self.busted == True or self.hand_won\t== False:\n\t\t\tself.balance -= self.bet\n\t\telif self.hand_won == True:\n\t\t\tself.balance += self.bet\n\n\tdef reset_player_attr(self):\n\t\t\"\"\"Reset attributes except balance.\"\"\"\n\t\tself.player_hand = []\n\t\tself.hand_value = 0\n\t\tself.has_ace = False\n\t\tself.bet = 0\n\t\tself.hand_won = None\n\t\tself.busted = False\n\nclass Card_Deck:\n\t\"\"\"Create a deck of cards to be used by players and dealerself.\n\n\tCreate new deck as list of integars, shuffle deck list, remove card from\n\tdeck list and return.\n\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.deck = []\n\n\tdef new_deck(self):\n\t\t\"\"\"Create a new card deck list with 6 standard card decks.\"\"\"\n\t\tself.deck = [x for x in range(1,53)] * 6\n\n\tdef shuffle(self):\n\t\t\"\"\"Shuffle the deck list.\"\"\"\n\t\trandom.shuffle(self.deck)\n\n\tdef give_one_card(self):\n\t\t\"\"\"Remove card from deck list and return.\"\"\"\n\t\tcard = self.deck.pop()\n\t\treturn card\n\nclass Dealer():\n\t\"\"\"Create the dealer in Blackjack.\"\"\"\n\tdealer_cards = []\n\n\tdef dealer_get_initial_cards(self):\n\t\t\"\"\"Get dealer's initial two cards.\"\"\"\n\t\tfirst_card = shuffled_cards.give_one_card()\n\t\tsecond_card = shuffled_cards.give_one_card()\n\t\tself.dealer_cards.append(first_card)\n\t\tself.dealer_cards.append(second_card)\n\n\tdef get_dealer_face_up_card(self):\n\t\t\"\"\"Print out only the first of dealer's cards.\"\"\"\n\t\tprint('Dealer:\\t', \"[\", DECK_DICT[self.dealer_cards[0]]['card'], \"]\")\n\n\tdef get_dealer_two_card_sum(self):\n\t\t\"\"\"Get sum of dealer's two cards, and return their total along with\n\t\tTrue or False value for aces.\"\"\"\n\t\ttotal = 0\n\t\tcard_1_ace = dealer.check_if_ace(self.dealer_cards[0])\n\t\tcard_2_ace = dealer.check_if_ace(self.dealer_cards[1])\n\t\tif card_1_ace == False and card_2_ace == False: # neither is an ace\n\t\t\ttotal += DECK_DICT[self.dealer_cards[0]]['value'][0] + DECK_DICT[self.dealer_cards[1]]['value'][0] # sum of both cards\n\t\telif card_1_ace == True and card_2_ace == True: # both are aces - very rare\n\t\t\ttotal = 12\n\t\telif card_1_ace == True: # card 1 is an Ace\n\t\t\ttotal += DECK_DICT[self.dealer_cards[0]]['value'][1] + DECK_DICT[self.dealer_cards[1]]['value'][0] # sum of both cards (where first is Ace = 11)\n\t\telif card_2_ace == True: # card 2 is an Ace\n\t\t\ttotal += DECK_DICT[self.dealer_cards[0]]['value'][0] + DECK_DICT[self.dealer_cards[1]]['value'][1] # sum of both cards (where second is Ace = 11)\n\t\treturn total, card_1_ace, card_2_ace\n\n\tdef check_if_ace(self, card):\n\t\t\"\"\"Check if card is an Ace.\"\"\"\n\t\tif len(DECK_DICT[card]['value']) == 2:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef less_than_18(self, total, card_1_ace, card_2_ace):\n\t\t\"\"\"Direct dealer actions if card is a soft 17 or less.\"\"\"\n\t\tif (card_1_ace == False and card_2_ace == False): # make sure no Aces\n\t\t\tsoft_card = False\n\t\t\twhile True:\n\t\t\t\tcurr_card = shuffled_cards.give_one_card() # take card from deck\n\t\t\t\tself.dealer_cards.append(curr_card) # add to dealer's current hand\n\t\t\t\ttemp_total = total + DECK_DICT[curr_card]['value'][0]\n\n\t\t\t\tif dealer.check_if_ace(curr_card) and total < 11: # if new card is an ace and total is less than 11\n\t\t\t\t\ttotal += 11 # ace is 11, we have a soft hand\n\t\t\t\t\tsoft_card = True\n\t\t\t\telif dealer.check_if_ace(curr_card) and total >= 11: # if new card is an ace and total is >= 11\n\t\t\t\t\ttotal += 1 # ace can only be 1, otherwise dealer busts\n\t\t\t\telif soft_card == True and temp_total >= 22: # 'busted' w/ soft hand; not real bust\n\t\t\t\t\ttotal = temp_total - 10\n\t\t\t\t\tsoft_card = False\n\t\t\t\telse:\n\t\t\t\t\ttotal += DECK_DICT[curr_card]['value'][0]\n\n\t\t\t\tdealer.print_dealer_cards(running_total=total, turn=False) # print dealer cards after getting next card\n\n\t\t\t\tif total >= 17:\n\t\t\t\t\tbreak\n\t\t\treturn total\n\t\telse:\t# if ace exists in one/both of dealer's first two cards\n\t\t\tsoft_card = True # assume it is a 'soft hand' (since it is either AA, A2, A3, A4, A5, A6)\n\t\t\twhile True:\n\t\t\t\tcurr_card = shuffled_cards.give_one_card() # take card from deck\n\t\t\t\tself.dealer_cards.append(curr_card) # add to dealer's current hand\n\t\t\t\tcurr_card_value = DECK_DICT[curr_card]['value'][0] # Ace will always = 1 in this case\n\t\t\t\ttemp_total = total + curr_card_value\n\n\t\t\t\tif temp_total >= 22 and soft_card: # check if dealer busts with soft hand\n\t\t\t\t\ttotal = temp_total - 10 # we previously assumed Ace is 11, this brings it back down by 10 so A = 1\n\t\t\t\t\tsoft_card = False\n\t\t\t\telif temp_total >= 22 and soft_card == False: # dealer busts\n\t\t\t\t\ttotal = temp_total\n\t\t\t\telse:\n\t\t\t\t\ttotal = temp_total\n\n\t\t\t\tdealer.print_dealer_cards(turn = False, running_total=total) # print dealer cards after getting next card\n\n\t\t\t\tif total >= 17:\n\t\t\t\t\tbreak\n\t\t\treturn total\n\n\n\tdef dealer_cards_check_total(self):\n\t\t\"\"\"Check if total is hard 17, > hard 17, or less than 18.\"\"\"\n\t\ttotal, card_1_ace, card_2_ace = dealer.get_dealer_two_card_sum() # getting sum of first 2 cards in dealer's hand\n\t\tdealer.print_dealer_cards(running_total=total, turn = True) # Initial print out of dealer's 2 cards\n\t\tif total > 17 :\n\t\t\treturn total\n\t\telif total == 17 and card_1_ace == False and card_2_ace == False: # total is hard 17\n\t\t\treturn total\n\t\telif total <= 17: # soft 17 or less\n\t\t\treturn dealer.less_than_18(total, card_1_ace, card_2_ace)\n\n\n\tdef print_dealer_cards(self, running_total, turn):\n\t\t\"\"\"Print the dealer's current hand.\"\"\"\n\t\tif turn == True:\n\t\t\tprint('\\nDealer\\'s Hand:\\n-----------\\n') # header\n\t\t\tfor card in self.dealer_cards:\n\t\t\t\tprint(\"[\", DECK_DICT[card]['card'], \"]\")\n\t\t\t\tturn = False\n\t\t\t\tsleep(0.5)\n\t\t\tprint('Calculating...')\n\t\t\tsleep(0.5)\n\t\t\tprint('\\n\\tTotal: ', running_total)\n\t\t\tsleep(0.5)\n\t\telse:\n\t\t\tprint(\"[\", DECK_DICT[self.dealer_cards[-1]]['card'], \"]\")\n\t\t\tsleep(0.5)\n\t\t\tprint('Calculating...')\n\t\t\tsleep(0.5)\n\t\t\tprint('\\n\\tTotal: ', running_total)\n\t\t\tsleep(0.5)\n\n\tdef dealer_reset_attr(self):\n\t\tself.dealer_cards = []\n\n\tdef dealer_blackjack(self):\n\t\ttotal = 0\n\t\tfor card in self.dealer_cards:\n\t\t\tace = self.check_if_ace(card)\n\t\t\tif ace:\n\t\t\t\ttotal += 11\n\t\t\telse:\n\t\t\t\ttotal += DECK_DICT[card]['value'][0]\n\t\treturn total\n\n# Get total players\ndef get_num_players():\n while True:\n try:\n num_players = int(input(\"\\nPlease enter the number of players (1-4): \"))\n\n if not num_players:\n print(\"\\nYou must enter a number of players to play.\\n\")\n sleep(1)\n elif num_players <1:\n print(\"\\nAt least one player is needed to play.\\n\")\n elif num_players >4:\n print(\"\\nThe maximum number of players is 4.\\n\")\n else:\n return num_players\n break\n except:\n print(\"\\nNumber of players must be an integar.\\n\")\n\n# Get a player name\ndef get_player_name():\n\twhile True:\n\t\tuser_name = input(\"\\nPlease enter your name: \")\n\t\tif not user_name:\n\t\t\tprint(\"You must enter a name to play. Please try again...\\n\")\n\t\t\tsleep(1)\n\t\telse:\n\t\t\treturn user_name\n\t\t\tbreak\n\n# Give player one card and print value of hand if player doubles down\ndef double_down(player):\n\tplayer.get_card()\n\tplayer.check_if_ace()\n\tplayer.set_hand_values()\n\tprint(player.show_hand())\n\n\tif player.has_ace and player.busted == False:\n\t\tprint('\\nTotal: {} \\n'.format((player.hand_value+10)))\n\t\tsleep(.5)\n\telse:\n\t\tprint('\\nTotal: {} \\n'.format(player.hand_value))\n\t\tsleep(.5)\n\n# Get final outcome\ndef get_outcome(dealer_final_total, player_final_total, player):\n\tif dealer_final_total > 21:\n\t\tprint(\"{}: \".format(player.name), \"You win!\")\n\t\tprint(player.show_hand(), \" Total Value: \", player_final_total)\n\t\tplayer.hand_won = True\n\telif dealer_final_total < player_final_total:\n\t\tprint(\"{}: \".format(player.name), \"You win!\")\n\t\tprint(player.show_hand(), \" Total Value: \", player_final_total)\n\t\tplayer.hand_won = True\n\telif dealer_final_total > player_final_total:\n\t\tprint(\"{}: \".format(player.name), \"You lost.\")\n\t\tprint(player.show_hand(), \" Total Value: \", player_final_total)\n\t\tplayer.hand_won = False\n\telif dealer_final_total == player_final_total:\n\t\tprint(\"{}: \".format(player.name), \"Push!\")\n\t\tprint(player.show_hand(), \" Total Value: \", player_final_total)\n\n# Instructions for gameplay\nrules = \"\"\"\n\t\t\tInstructions:\n\t\t\tThe point of the game is to beat the dealer's hand without\n\t\t\tgoing over 21. You will be dealt 2 cards, as will the dealer\n\t\t\tbut only one of the dealer's cards will be shown. You can choose\n\t\t\tto stand (s) - stop being dealt cards, or hit (h) - continue\n\t\t\tto be dealth cards. All face cards are worth 10, an Ace can be\n\t\t\teither 1 or 11.\n\t\t\tThe Dealer must continue to take cards until they are at a hard 17\n\t\t\tor above (Please Note: Ace + 6 = Soft 17, dealer must hit).\n\n\t\t\tBetting:\n\t\t\tYou start off with $1000 and minimum bet is $10. If you have an\n\t\t\tinitial hand of 10 or 11, you have the option to 'double down (d)',\n\t\t\tdoubling your current bet, and receiving only 1 more card.\n\t\t\tIf you win against the dealer, you double your money; lose, you lose\n\t\t\tyour money, and tie (aka 'push') - keep your money.\n\t\t\tBlackjack pays 3 to 2.\n\t\t\t\"\"\"\n\nlogo = \"\"\"\n888888b. 888 888 d8b 888\n888 \"88b 888 888 Y8P 888\n888 .88P 888 888 888\n8888888K. 888 8888b. .d8888b 888 888 8888 8888b. .d8888b 888 888\n888 \"Y88b 888 \"88b d88P\" 888 .88P \"888 \"88b d88P\" 888 .88P\n888 888 888 .d888888 888 888888K 888 .d888888 888 888888K\n888 d88P 888 888 888 Y88b. 888 \"88b 888 888 888 Y88b. 888 \"88b\n8888888P\" 888 \"Y888888 \"Y8888P 888 888 888 \"Y888888 \"Y8888P 888 888\n 888\n d88P\n 888P\"\n\"\"\"\n\n# Create an instance of Card_Deck()\nshuffled_cards = Card_Deck()\n\n# Create instance of Dealer()\ndealer = Dealer()\n\nprint(\"\\nWelcome to...\\n\")\nsleep(1)\nprint(logo)\nsleep(2)\nprint(\"\\n\", rules)\nsleep(1)\n\ntotal_players = get_num_players()\nplayer_list = []\n\nfor i in range(total_players):\n player_list.append(Player(get_player_name()))\n\nblackjack_running = True\n\n### START OF GAME/WHILE LOOP ###\nwhile blackjack_running == True:\n\n\t# Get new deck and shuffle\n\tshuffled_cards.new_deck()\n\tshuffled_cards.shuffle()\n\tbusted_players = 0\n\n\tprint()\n\tsleep(.5)\n\n\tfor player in player_list:\n\t\tprint(\"\\n♠ ♦ ♣ ♥ ♠ ♦ ♣ ♥ {}'s bet. ♠ ♦ ♣ ♥ ♠ ♦ ♣ ♥\".format(player.name))\n\t\tsleep(.5)\n\t\t# Reset player hand and quit_list\n\t\tplayer.reset_player_attr()\n\t\tquit_list = []\n\n\t\t# Get player bet\n\t\tplayer.get_wager()\n\t\tsleep(1)\n\n\t# Deal cards to dealer, and show dealer's first card\n\tdealer.dealer_get_initial_cards()\n\n\tfor player in player_list:\n\t\tprint()\n\t\tprint(\"\\n♦ ♣ ♥ ♠ ♦ ♣ ♥ {}'s turn. ♦ ♣ ♥ ♠ ♦ ♣ ♥ \".format(player.name))\n\t\tsleep(1)\n\n\t\tprint()\n\t\tdealer.get_dealer_face_up_card()\n\t\tprint()\n\n\t\t# Deal a card to the player, check for ace,\n\t\t# increment value of hand, repeat for 2nd card\n\t\tplayer.get_card()\n\t\tplayer.check_if_ace()\n\t\tplayer.set_hand_values()\n\t\tplayer.get_card()\n\t\tplayer.check_if_ace()\n\t\tplayer.set_hand_values()\n\t\tsleep(1)\n\n\t\t#Show the player's hand and current hand value:\n\t\tprint(player.show_hand())\n\n\t\tif player.has_ace:\n\t\t\tprint('Total: {} or {}\\n'.format(player.hand_value, (player.hand_value + 10)))\n\t\telse:\n\t\t\tprint('Total: {}\\n'.format(player.hand_value))\n\t\t\tsleep(1)\n\n\t\t# Set variables for first turn blackjack\n\t\tturn_1 = True\n\t\tdealer_21 = False\n\n\t\t# Check for blackjack first turn\n\t\twhile True:\n\t\t\tif (player.hand_value + 10) == 21 and player.has_ace == True and turn_1:\n\t\t\t\tprint('\\n\\nBLACKJACK!\\n')\n\t\t\t\tplayer.hand_value += 10\n\t\t\t\tsleep(1)\n\t\t\t\tbreak\n\t\t\tif dealer.dealer_blackjack() == 21:\n\t\t\t\tdealer_21 = True\n\t\t\t\tbreak\n\n\t\t\t\t\t# Get user input for gameplay: hit, stand or double down\n\t\t\tif (player.hand_value == 10 or player.hand_value == 11) and turn_1 and (player.bet*2) < player.balance:\n\t\t\t\ths_input = input('Would you like to hit (h), stand (s), or double down (d)?\\n' )\n\t\t\telse:\n\t\t\t\ths_input = input('Would you like to hit (h) or stand (s)?\\n')\n\n\t\t\tif hs_input.lower() in ['d', 'dd', 'double', 'double down'] and turn_1:\n\t\t\t\tplayer.double_down_bet()\n\t\t\t\tsleep(.5)\n\t\t\t\tprint()\n\t\t\t\tdouble_down(player)\n\t\t\t\tif player.has_ace and (player.hand_value +10) <=21:\n\t\t\t\t\tplayer.hand_value +=10\n\t\t\t\telse:\n\t\t\t\t\tif player.hand_value > 21:\n\t\t\t\t\t\tprint('You busted!\\n')\n\t\t\t\t\t\tsleep(1)\n\t\t\t\t\t\tplayer.busted = True\n\t\t\t\t\t\tbusted_players += 1\n\t\t\t\t\t\tturn_1 = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tturn_1 = False\n\t\t\t\tbreak\n\t\t\telif hs_input.lower() in ['s', 'stand']:\n\t\t\t\tif (player.hand_value + 10) <= 21 and player.has_ace == True:\n\t\t\t\t\tplayer.hand_value += 10\n\t\t\t\tbreak\n\t\t\telif hs_input.lower() in ['h', 'hit']:\n\t\t\t\tplayer.get_card()\n\t\t\t\tplayer.check_if_ace()\n\t\t\t\tplayer.set_hand_values()\n\t\t\t\tprint(\"\\n\" + player.show_hand())\n\t\t\t\tturn_1 = False\n\t\t\t\tsleep(1)\n\n\t\t\t\tif (player.hand_value + 10) > 21 and player.hand_value <= 21:\n\t\t\t\t\tprint('Total: ', player.hand_value)\n\t\t\t\telif player.hand_value > 21:\n\t\t\t\t\tprint('Total: ', player.hand_value)\n\t\t\t\t\tsleep(1)\n\t\t\t\t\tprint('You busted!\\n')\n\t\t\t\t\tsleep(1)\n\t\t\t\t\tplayer.busted = True\n\t\t\t\t\tbusted_players += 1\n\t\t\t\t\tbreak\n\t\t\t\telif (player.hand_value + 10) <= 21 and player.has_ace:\n\t\t\t\t\tprint('Total: {} or {}'.format(player.hand_value, (player.hand_value + 10)))\n\t\t\t\t\tsleep(1)\n\t\t\t\telse:\n\t\t\t\t\tprint('Total: ', player.hand_value)\n\t\t\t\t\tsleep(1)\n\n\t\t\telse:\n\t\t\t\tsleep(1)\n\t\t\t\tprint('Please only enter hit (h) or stand (s)')\n\t\t\t\tsleep(1)\n\t\t\t\tcontinue\n\n\tif busted_players < total_players:\n\t\tdealer_final_total= dealer.dealer_cards_check_total()\n\telse:\n\t\tdealer_final_total = 20\n\n\tprint('\\n', '='*15, '\\n FINAL OUTCOME\\n', '='*15, \"\\n\")\n\tsleep(1)\n\n\tdealer_hand_print = []\n\tfor i in range(len(dealer.dealer_cards)):\n\t\tdealer_hand_print.append(DECK_DICT[dealer.dealer_cards[i]]['card'])\n\tprint(\"Dealer's final hand: \" + \"[\" + \"] [\".join(dealer_hand_print) + \"]\")\n\n\tif busted_players < total_players:\n\t\tprint(\"Dealer's hand value: \", dealer_final_total)\n\n\tif dealer_final_total > 21:\n\t\tprint('Dealer busts!\\n')\n\t\tsleep(1)\n\telif dealer_21 == True:\n\t\tprint(\"\\nDealer has Blackjack!\\n\")\n\t\tsleep(1)\n\n\tprint(\"=\"*50, \"\\n\")\n\n\tfor player in player_list:\n\t\tif player.hand_value == 21 and len(player.player_hand) == 2:\n\t\t\tprint(\"{}: \".format(player.name), \"BLACKJACK!\")\n\t\t\tprint(player.show_hand(), \" Total Value: \", (player.hand_value))\n\t\t\tplayer.hand_won = True\n\t\t\tplayer.bet = int(player.bet * 1.5)\n\t\telif dealer_21 == True:\n\t\t\tprint(\"{}: \".format(player.name), \"You lost.\")\n\t\t\tprint(player.show_hand(), \" Total Value: \", player.hand_value)\n\t\t\tplayer.hand_won = False\n\t\telif player.busted == False:\n\t\t\tget_outcome(dealer_final_total, player.hand_value, player)\n\t\telse:\n\t\t\tprint(\"{}: \".format(player.name), \"You busted.\")\n\t\t\tprint(player.show_hand(), \" Total Value: \", player.hand_value)\n\n\t\t# Adjust player balance\n\t\tplayer.update_balance()\n\t\tplayer.show_balance()\n\t\tprint(\"=\"*50)\n\t\tprint()\n\t\tsleep(1)\n\n\t# Reset dealer attributes\n\tdealer.dealer_reset_attr()\n\n\tquit_list = []\n\t# Ask if player would like to play again\n\tfor player in player_list:\n\t\twhile True:\n\t\t\tif player.balance < 10:\n\t\t\t\tprint(\"\\n{}, your balance is \", player.balance, \\\n\t\t\t\t\" dollars which is below the minimum bet.\" + \\\n\t\t\t\t\"You do not have enough money to play again.\".format(player.name))\n\t\t\t\tquit_list.append(player)\n\t\t\t\tbreak\n\n\t\t\tplay_again = input(\"\\n{}, would you like to play again (yes/no)?\".format(player.name))\n\t\t\tif play_again.lower() in [\"y\", \"yes\"]:\n\t\t\t\tsleep(1)\n\t\t\t\tbreak\n\t\t\telif play_again.lower() in [\"n\", \"no\"]:\n\t\t\t\tprint(\"\\nThanks for playing!\")\n\t\t\t\tquit_list.append(player)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"\\nPlease only enter 'yes' or 'no'\\n\")\n\t\t\t\tsleep(1)\n\n\tfor q_player in quit_list:\n\t\tplayer_list.remove(q_player)\n\n\tif len(player_list) <1:\n\t\tblackjack_running = False\n","sub_path":"Final_Project_Multiplayer.py","file_name":"Final_Project_Multiplayer.py","file_ext":"py","file_size_in_byte":20769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"18762768","text":"# -*- encoding: utf-8 -*-\nimport asyncio\nimport serial_asyncio\nimport logging\nfrom enocean_async.communicators.communicator import Server\nfrom enocean_async.protocol.packet import Packet, RadioPacket\nfrom enocean_async.communicators.utils import Serial_to_Tcp\nfrom functools import partial\nfrom aioconsole import ainput\n\nclass TCPCommunicator():\n logger = logging.getLogger('enocean_async.communicators.TCPCommunicator')\n def __init__(self, host='', port=9637):\n self.host = host\n self.port = port\n self.loop = None\n self.server = None\n \n\n\n async def packet(self, recived):\n '''Override this async method to decide what to do with the packet recived by the TCPCommunicator'''\n self.logger.debug(recived)\n return \n\n\n async def close(self):\n await ainput(\"Press a button to close the Server\\n\")\n self.server.close()\n self.loop.stop()\n \n\n def start(self):\n '''This method will start the Enocean gateway in another asynchronous thread.\\n\n Call it to start the TCPCommunication'''\n self.loop = asyncio.get_event_loop()\n self.logger.info('TCPCommunicator started')\n Gateway = partial (Server, self.packet, self.loop, self.logger)\n self.server = self.loop.create_server(Gateway,host=self.host, port= self.port)\n self.loop.create_task(self.server)\n self.loop.create_task(self.close())\n self.loop.run_forever()\n self.loop.close()\n \n \n\n","sub_path":"enocean_async/communicators/tcpcommunicator.py","file_name":"tcpcommunicator.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"350812585","text":"import os\nimport cv2\nimport tqdm\nimport argparse\nimport multiprocessing as mp\nimport numpy as np\nfrom detectron2.config import get_cfg\nimport predictor\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_directory\", help=\"Path to input directory.\")\n parser.add_argument(\"output_directory\", help=\"Path to output directory.\")\n parser.add_argument(\"-resize\", type=int, default=512, help=\"Resolution to resize to.\")\n parser.add_argument(\"-padding\", type=int, default=16, help=\"Image padding around bbox.\")\n parser.add_argument(\"-gpu\", \"--gpu\", type=int,\n help=\"Gpu Number.\", default=2)\n\n return parser.parse_args()\n\n\ndef setup_cfg():\n # load config from file and command-line arguments\n cfg = get_cfg()\n\n config_file = \"./detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml\"\n cfg.merge_from_file(config_file)\n\n # opts = [\"MODEL.WEIGHTS\",\n # \"detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl\"]\n\n opts = [\"MODEL.WEIGHTS\", \"detectron_model.pkl\"]\n cfg.merge_from_list(opts)\n # Set score_threshold for builtin models\n confidence_threshold = 0.5\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold\n cfg.freeze()\n return cfg\n\n\ndef main():\n args = parse_args()\n cfg = setup_cfg()\n\n mp.set_start_method(\"spawn\", force=True)\n\n output_directory = args.output_directory\n\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n\n image_dir = os.path.join(output_directory, \"images\")\n if not os.path.exists(image_dir):\n os.mkdir(image_dir)\n\n inferencer = predictor.VisualizationDemo(cfg, parallel=True)\n item_counter = 0\n\n resolution = args.resize\n\n video_file = cv2.VideoWriter(\n filename=os.path.join(output_directory, \"video.mkv\"),\n # some installation of opencv may not support x264 (due to its license),\n # you can try other format (e.g. MPEG)\n fourcc=cv2.VideoWriter_fourcc(*\"x264\"),\n fps=float(60),\n frameSize=(resolution, resolution),\n isColor=True,\n )\n\n inputs = []\n inputs = list(os.path.join(args.input_directory, file) for file in os.listdir(\n args.input_directory) if os.path.exists(os.path.join(args.input_directory, file) and \"mp4\" in os.path.join(args.input_directory, file)))\n\n print(\"Processing the following inputs:\\n%s\" % \" \" + \"\\n \".join(inputs))\n\n for input_video in inputs:\n print(\"Processing:\", input_video)\n video = cv2.VideoCapture(input_video)\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n for vis_frame in tqdm.tqdm(inferencer.run_on_video(video, width, height, resolution, args.padding), total=num_frames):\n if vis_frame is not None:\n vis_frame.save(\n os.path.join(image_dir, \"%s.png\" % item_counter))\n video_file.write(np.asarray(vis_frame)[:, :, ::-1])\n item_counter += 1\n\n video_file.release()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"617707554","text":"import json\nimport os,re\nimport time\nimport asyncio\nfrom asammdf import MDF\nimport asyncio\nfrom multiprocessing import Pool\nimport numpy\nimport shutil\nimport pymysql\ndef get_FileSize(filePath):\n fsize = os.path.getsize(filePath)\n fsize = fsize/float(1024 * 1024)\n return round(fsize, 2)\ntime_1 = [0.5*x for x in range(1,21)]\ntime_1.append('10<')\n# 没有数据的文件的名称\nif os.path.exists('./loser_file.json'):\n with open('./loser_file.json', 'r') as f:\n loser_file_data = json.load(f)\nelse:\n loser_file_data = []\n\nif os.path.exists('./result_data.json'):\n with open('./result_data.json', 'r') as f:\n result_dict = json.load(f)\nelse:\n result_dict = {\n '0~40':{'left':[0,[]],'right':[0,[]]},\n '40-60':{'left':[0,[]],'right':[0,[]]},\n '60-80':{'left':[0,[]],'right':[0,[]]},\n '80-100':{'left':[0,[]],'right':[0,[]]}\n }\nif os.path.exists('./new_result_data.json'):\n with open('./new_result_data.json', 'r') as f:\n new_result_dict = json.load(f)\nelse:\n new_result_dict = {\n '0~40':[0 for x in time_1],\n '40-60':[0 for x in time_1],\n '60-80':[0 for x in time_1],\n '80-100':[0 for x in time_1]\n }\nimport copy\nif os.path.exists('./mysql_data.json'):\n with open('./mysql_data.json', 'r') as f:\n mysql_data = json.load(f)\n mysql_data_1 = copy.deepcopy(mysql_data)\nelse:\n mysql_data = []\n mysql_data_1 = copy.deepcopy(mysql_data)\nfrom decimal import Decimal\ndef index_number(li,defaultnumber):\n select = Decimal(str(defaultnumber)) - Decimal(str(li[0]))\n index = 0\n if defaultnumber < (li[-2] + 0.5):\n for i in range(1, len(li) - 2):\n select2 = Decimal(str(defaultnumber)) - Decimal(str(li[i]))\n if (abs(select) > abs(select2)):\n select = select2\n index = i\n else:\n index = len(li) -1\n return index\nif os.path.exists('./Pre_prepared_data.json'):\n with open('./mysql_data.json', 'r') as f:\n Pre_prepared_data = json.load(f)\nelse:\n Pre_prepared_data = []\nLine_pressing = []\n# 总里程和时间\nall_odometer = 0\nall_time = 0\n\n\nmy_list = []\n\n\n\n\nclass openMDF:\n def __init__(self, path, roadtype=0):\n self.filename = path.split('/')[-1].split('.')[0]\n self.roadType = roadtype\n if self.filename not in loser_file_data:\n self.open_file(path)\n self.task_1()\n def open_file(self, path):\n print('开启mf4文件')\n self.mf = MDF(path)\n def close_file(self):\n print('关闭mf4文件')\n self.mf.close()\n\n # 统计转向灯开启时间\n def task_1(self):\n signal_left = '_g_PL_AD_fw_PL_AD_FCT_RunnableSppHmi_RunnableSppHmi_m_sppHmiInput_out_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_dirIndL'\n signal_right = '_g_PL_AD_fw_PL_AD_FCT_RunnableSppHmi_RunnableSppHmi_m_sppHmiInput_out_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_dirIndR'\n signal_speed = '_g_PL_AD_fw_PL_AD_FCT_RunnableFsm_RunnableFsm._m_fsmController._m_fip._m_displayedSpeedCalculator._m_displaySpeed._m_value'\n # 轮端转角\n signal_RoadWheelAngle = '_g_PL_AD_fw_VMC_VMC_FW_MvpVse_VseSes_VseSes_m_portVehicleStateEstimation_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_estimation_RoadWheelAngle_Front._m_value'\n # 左航向角\n Left_heading_angle_signal = '_g_GAC_A18_NET_net_apl_g_netRunnable_rbCanRxLD_serializer_m_CNetVFC_Line_SenderPort_1_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_vfc_lineInformation._0_._VFC_Line_HeadingAngle'\n # 左侧车道线\n Left_lane_line_signal = '_g_GAC_A18_NET_net_apl_g_netRunnable_rbCanRxLD_serializer_m_CNetVFC_Line_SenderPort_1_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_vfc_lineInformation._0_._VFC_Line_Dy'\n # 右航向角\n Right_heading_angle_signal = '_g_GAC_A18_NET_net_apl_g_netRunnable_rbCanRxLD_serializer_m_CNetVFC_Line_SenderPort_1_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_vfc_lineInformation._1_._VFC_Line_HeadingAngle'\n # 右侧车道线\n Right_lane_line_signal = '_g_GAC_A18_NET_net_apl_g_netRunnable_rbCanRxLD_serializer_m_CNetVFC_Line_SenderPort_1_local.TChangeableMemPool._._._m_arrayPool._0_._elem._m_vfc_lineInformation._1_._VFC_Line_Dy'\n # vxvRef\n vxvRef_signal = '_g_PL_AD_fw_DACoreCyclic_HV_PerPmeRunnable_PerPmeRunnable_m_pmePort_out_local.TChangeableMemPool._._._m_arrayPool._0_._elem._vxvRef_sw'\n odometer_signal = 'ICM_TotalOdometer'\n\n\n # Line ID 255\n left_line_signal = 'VFC_Line01_LineID'\n right_line_signal = 'VFC_Line02_LineID'\n\n if self.mf:\n try:\n left = self.mf.get(signal_left)\n right = self.mf.get(signal_right)\n speed = self.mf.get(signal_speed)\n RoadWheelAngle = self.mf.get(signal_RoadWheelAngle).samples.tolist()\n Left_heading_angle = self.mf.get(Left_heading_angle_signal).samples.tolist()\n Left_lane_line = self.mf.get(Left_lane_line_signal).samples.tolist()\n print(len(Left_lane_line))\n print(len(left.samples.tolist()))\n Right_heading_angle = self.mf.get(Right_heading_angle_signal).samples.tolist()\n Right_lane_line = self.mf.get(Right_lane_line_signal).samples.tolist()\n vxvRef = self.mf.get(vxvRef_signal)\n odometer = self.mf.get(odometer_signal)\n left_line = self.mf.get(left_line_signal).samples.tolist()\n right_line = self.mf.get(right_line_signal).samples.tolist()\n except Exception as e:\n print(e)\n loser_file_data.append(self.filename)\n else:\n def func(data):\n num = data.samples.tolist()[0]\n index_list = [0]\n for index in range(len(data.samples.tolist())):\n if num != data.samples.tolist()[index]:\n num = data.samples.tolist()[index]\n index_list.append(index)\n if len(data.samples.tolist())-1 not in index_list:\n index_list.append(len(data.samples.tolist())-1)\n return index_list\n # 处理数据长度问题 speed, vxvRef\n st = speed.timestamps.tolist()\n start_time = 0\n stop_time = -2\n # 计算差值\n left_data = []\n left_time = left.timestamps.tolist()\n\n old_spd = speed.samples.tolist()\n old_vxv = vxvRef.samples.tolist()\n old_spd_time = speed.timestamps.tolist()\n old_vxvRef = vxvRef.samples.tolist()\n new_spd = ['' for x in left_time]\n new_vxvRef = ['' for x in left_time]\n for tt in range(len(old_spd_time)):\n this_index = index_number(left_time, old_spd_time[tt])\n new_spd[this_index] = old_spd[tt]\n new_vxvRef[this_index] = old_vxvRef[tt]\n for i in range(len(new_spd)):\n if new_spd[i] == '':\n if i==0:\n if new_spd[i+1] != 0:\n new_spd[i] = new_spd[i+1]\n else:\n new_spd[i] = 0\n else:\n new_spd[i] = new_spd[i-1]\n if new_vxvRef[i] == '':\n if i==0:\n if new_vxvRef[i+1] != 0:\n new_vxvRef[i] = new_vxvRef[i+1]\n else:\n new_vxvRef[i] = 0\n else:\n new_vxvRef[i] = new_vxvRef[i-1]\n left_index_list = func(left)\n right_index_list = func(right)\n if left_index_list == [] and right_index_list == []:\n self.close_file()\n return False\n new_data = {\n 'left': [],\n 'left_time': [],\n 'right': [],\n 'right_time': [],\n 'spd': [],\n 'wheel': [],\n 'Left_heading_angle': [],\n 'left_lane': [],\n 'Right_heading_angle': [],\n 'right_lane': [],\n 'vxvRef': [],\n 'left_line': [],\n 'right_line': []\n }\n # \n for index in range(len(left_index_list)-1):\n new_data['left'].append(left.samples.tolist()[left_index_list[index]:left_index_list[index+1]])\n new_data['left_time'].append(left.timestamps.tolist()[left_index_list[index]:left_index_list[index+1]])\n new_data['right'].append([])\n new_data['right_time'].append([])\n new_data['spd'].append(new_spd[left_index_list[index]:left_index_list[index+1]])\n new_data['wheel'].append(RoadWheelAngle[left_index_list[index]:left_index_list[index+1]])\n\n new_data['Left_heading_angle'].append(Left_heading_angle[left_index_list[index]:left_index_list[index+1]])\n new_data['left_lane'].append(Left_lane_line[left_index_list[index]:left_index_list[index+1]])\n new_data['Right_heading_angle'].append(Right_heading_angle[left_index_list[index]:left_index_list[index+1]])\n new_data['right_lane'].append(Right_lane_line[left_index_list[index]:left_index_list[index+1]])\n new_data['vxvRef'].append(new_vxvRef[left_index_list[index]:left_index_list[index+1]])\n new_data['left_line'].append(left_line[left_index_list[index]:left_index_list[index+1]])\n new_data['right_line'].append([])\n for index in range(len(right_index_list)-1):\n new_data['left'].append([])\n new_data['left_time'].append([])\n new_data['right'].append(right.samples.tolist()[right_index_list[index]:right_index_list[index+1]])\n new_data['right_time'].append(right.timestamps.tolist()[right_index_list[index]:right_index_list[index+1]])\n new_data['spd'].append(new_spd[right_index_list[index]:right_index_list[index+1]])\n new_data['wheel'].append(RoadWheelAngle[right_index_list[index]:right_index_list[index+1]])\n\n new_data['Left_heading_angle'].append(Left_heading_angle[right_index_list[index]:right_index_list[index+1]])\n new_data['left_lane'].append(Left_lane_line[right_index_list[index]:right_index_list[index+1]])\n new_data['Right_heading_angle'].append(Right_heading_angle[right_index_list[index]:right_index_list[index+1]])\n new_data['right_lane'].append(Right_lane_line[right_index_list[index]:right_index_list[index+1]])\n new_data['vxvRef'].append(new_vxvRef[right_index_list[index]:right_index_list[index+1]])\n new_data['left_line'].append([])\n new_data['right_line'].append(right_line[right_index_list[index]:right_index_list[index+1]])\n # for key,value in new_data.items():\n # print(len(value))\n # 车道线判断逻辑函数\n def func2(i,direction,speed,max_spd,min_spd):\n print(direction)\n return_data = 0\n lane_list = []\n overrun = []\n for lane_line in range(len(new_data[f'{direction}_lane'][i])-1):\n if new_data[f'{direction}_lane'][i][lane_line] == 2048:\n print('无信号')\n continue\n this_num = new_data[f'{direction}_lane'][i][lane_line] * 0.015625 - 32\n next_num = new_data[f'{direction}_lane'][i][lane_line + 1] * 0.015625 - 32\n print(next_num - this_num)\n if abs(next_num - this_num) >= 2.4:\n lane_list.append(lane_line)\n print(lane_list)\n if lane_list != []:\n if len(lane_list) > 1:\n print('多次变道, 不做计算')\n else:\n print(f'单次变道{direction}')\n new_lane = new_data[f'{direction}_lane'][i][lane_list[0]+1:]\n this_len = int(len(new_lane)/5)\n new_list = [sum(new_lane[:this_len])/len(new_lane[:this_len]),sum(new_lane[this_len:this_len*2])/len(new_lane[this_len:this_len*2]),sum(new_lane[this_len*2:this_len*3])/len(new_lane[this_len*2:this_len*3]),sum(new_lane[this_len*3:this_len*4])/len(new_lane[this_len*3:this_len*4]),sum(new_lane[this_len*4:])/len(new_lane[this_len*4:])]\n if new_list[0] > new_list[1] > new_list[2] > new_list[3] > new_list[4]:\n # 变道到车轮压线时间\n Time_from_lighting_to_pressing_line = new_data[f'{direction}_time'][i][lane_list[0]] - new_data[f'{direction}_time'][i][0]\n Line_pressing_data = [self.filename,speed,max_spd,min_spd,Time_from_lighting_to_pressing_line, self.roadType]\n if Line_pressing_data not in Line_pressing:\n Line_pressing.append(Line_pressing_data)\n return_data = 1\n else:\n this_len = int(len(new_data[f'{direction}_lane'][i])/5)\n new_list = [\n sum(new_data[f'{direction}_lane'][i][:this_len])/len(new_data[f'{direction}_lane'][i][:this_len]),\n sum(new_data[f'{direction}_lane'][i][this_len:this_len*2])/len(new_data[f'{direction}_lane'][i][this_len:this_len*2]),\n sum(new_data[f'{direction}_lane'][i][this_len*2:this_len*3])/len(new_data[f'{direction}_lane'][i][this_len*2:this_len*3]),\n sum(new_data[f'{direction}_lane'][i][this_len*3:this_len*4])/len(new_data[f'{direction}_lane'][i][this_len*3:this_len*4]),\n sum(new_data[f'{direction}_lane'][i][this_len*4:])/len(new_data[f'{direction}_lane'][i][this_len*4:])]\n if min(new_list) == new_list[-1] and new_data[f'{direction}_lane'][i] != new_data[f'{direction}_lane'][-1]:\n this_lane_list = new_data[f'{direction}_lane'][i][this_len*4:]\n if len(new_data[f'{direction}_lane'][i+1]) >= 200:\n this_lane_list.extend(new_data[f'{direction}_lane'][i+1][:200])\n else:\n this_lane_list.extend(new_data[f'{direction}_lane'][i+1])\n for lane_line in range(len(this_lane_list)-1):\n if this_lane_list[lane_line] == 2048:\n print('无信号')\n continue\n this_num = this_lane_list[lane_line] * 0.015625 - 32\n next_num = this_lane_list[lane_line + 1] * 0.015625 - 32\n print(next_num - this_num)\n if abs(next_num - this_num) >= 2.4:\n lane_list.append(lane_line)\n if lane_list != []:\n if len(lane_list) > 1:\n print('多次变道, 不做计算')\n else:\n print(f'单次变道{direction}')\n new_lane = new_data[f'{direction}_lane'][i][lane_list[0]+1:]\n this_len = int(len(new_lane)/5)\n new_list = [sum(new_lane[:this_len])/len(new_lane[:this_len]),sum(new_lane[this_len:this_len*2])/len(new_lane[this_len:this_len*2]),sum(new_lane[this_len*2:this_len*3])/len(new_lane[this_len*2:this_len*3]),sum(new_lane[this_len*3:this_len*4])/len(new_lane[this_len*3:this_len*4]),sum(new_lane[this_len*4:])/len(new_lane[this_len*4:])]\n if new_list[0] > new_list[1] > new_list[2] > new_list[3] > new_list[4]:\n # 变道到车轮压线时间\n Time_from_lighting_to_pressing_line = new_data[f'{direction}_time'][i][lane_list[0]] - new_data[f'{direction}_time'][i][0]\n Line_pressing_data = [self.filename,speed,max_spd,min_spd,Time_from_lighting_to_pressing_line, self.roadType]\n if Line_pressing_data not in Line_pressing:\n Line_pressing.append(Line_pressing_data)\n return_data = 1\n return return_data\n \n \n def func3(key,i,speed,max_spd,min_spd,direction):\n result_dict[key][direction][0] += 1\n this_time = new_data['{}_time'.format(direction)][i][-1] - new_data['{}_time'.format(direction)][i][0]\n #################################\n this_index = index_number(time_1,this_time)\n new_result_dict[key][this_index] += 1\n #################################\n mysql_data_sql = [self.filename,direction, speed,max_spd,min_spd, this_time, self.roadType]\n if mysql_data_sql not in mysql_data:\n mysql_data.append(mysql_data_sql)\n #################################\n result_dict[key][direction][1].append(this_time)\n # 数据规整完成, 长度一致,无变化\n for i in range(len(new_data['spd'])):\n judgment_basis = 0\n for j in new_data['wheel'][i]:\n if j > 0.1:\n judgment_basis = 1\n break\n if judgment_basis == 0:\n try:\n # 车速计算\n speed = int(sum(new_data['spd'][i]))/len(new_data['spd'][i]) * 3.6\n # 最大车速留档\n max_spd = int(max(new_data['spd'][i])) * 3.6\n # 最小车速留档\n min_spd = int(min(new_data['spd'][i])) * 3.6\n except IndexError:\n break\n else:\n if speed >=80:\n if 1 in new_data['left'][i] and 1 not in new_data['right'][i]:\n # 判断车道线情况\n # Time_from_lighting_to_pressing_line 为开启变道灯到压线的时间\n # 这里的判断逻辑需要重新计算, 考虑变道中跳变仅存在一次,并且确实变道完成\n # 如:\n # 1.跨过车道线完成跳变之后, 继续变动完成变道操作\n # 2.跨过车到线之后的数据迅速达到正常车位, 即车道线扣除车身之后最大值应该为60公分,否则视为未变道成功, 需要继续查看后续数据来判断\n lane = func2(i,'left',speed,max_spd,min_spd)\n # 车道线检测\n if lane == 1:\n func3('80-100',i,speed,max_spd,min_spd,'left')\n elif 1 not in new_data['left'][i] and 1 in new_data['right'][i]:\n lane = func2(i,'right',speed,max_spd,min_spd)\n if lane == 1:\n \n func3('80-100',i,speed,max_spd,min_spd,'right')\n else:\n continue\n elif speed >=60:\n if 1 in new_data['left'][i] and 1 not in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'left',speed,max_spd,min_spd)\n if lane == 1:\n func3('60-80',i,speed,max_spd,min_spd,'left')\n elif 1 not in new_data['left'][i] and 1 in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'right',speed,max_spd,min_spd)\n if lane == 1:\n func3('60-80',i,speed,max_spd,min_spd,'right')\n else:\n continue\n elif speed >=40:\n if 1 in new_data['left'][i] and 1 not in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'left',speed,max_spd,min_spd)\n if lane == 1:\n func3('40-60',i,speed,max_spd,min_spd,'left')\n elif 1 not in new_data['left'][i] and 1 in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'right',speed,max_spd,min_spd)\n if lane == 1:\n func3('40-60',i,speed,max_spd,min_spd,'right')\n else:\n continue\n elif speed > 0:\n if 1 in new_data['left'][i] and 1 not in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'left',speed,max_spd,min_spd)\n if lane == 1:\n func3('0~40',i,speed,max_spd,min_spd,'left')\n elif 1 not in new_data['left'][i] and 1 in new_data['right'][i]:\n # 判断车道线情况\n lane = func2(i,'right',speed,max_spd,min_spd)\n if lane == 1:\n func3('0~40',i,speed,max_spd,min_spd,'right')\n # result_dict['0~40']['right'][1].append(this_time)\n else:\n continue\n else:\n continue\n \n this_odometer = odometer.samples.tolist()\n this_time = odometer.timestamps.tolist()\n odo = this_odometer[-1] - this_odometer[0]\n t = this_time[-1] - this_time[0]\n global all_odometer\n all_odometer += odo\n global all_time\n all_time += t\n self.close_file()\n return new_result_dict\n finally:\n self.close_file()\n \nclass OpenFile:\n def __init__(self,path):\n self.file_list = []\n for root,dirs,files in os.walk(path):\n for i in files:\n if i.endswith('.MF4'):\n self.file_list.append(os.path.join(root,i))\ndef created_db(data):\n conn = pymysql.connect(host='10.178.229.1', port=3306, user='root', password='rbac2020',database='file_system')\n cursor = conn.cursor()\n sql = 'insert into cornering_lamps (file_name,direction,speed,duration,is_delete) values(%s,%s,%s,%s,0)'\n select_sql = 'select * from cornering_lamps where file_name=%s and direction=%s and speed=%s and duration=%s'\n for i in data:\n try:\n \n select_data = cursor.fetchall()\n if not cursor.execute(sql, (i[0],i[1],i[2],i[3])):\n cursor.execute(sql, (i[0],i[1],i[2],i[3]))\n conn.commit()\n except Exception as e:\n conn.rollback()# 发生错误时回滚\n print('提交失败:', i, e)\ndef created_db_2(data):\n conn = pymysql.connect(host='10.178.229.1', port=3306, user='root', password='rbac2020',database='file_system')\n print(data)\n cursor = conn.cursor()\n sql = 'insert into line_pressing (file_name,speed,time,is_delete) values(%s,%s,%s,0)'\n select_sql = 'select * from line_pressing where file_name=%s and speed=%s and time=%s'\n for i in data:\n try:\n \n select_data = cursor.fetchall()\n if not cursor.execute(sql, (i[0],i[1],i[2])):\n cursor.execute(sql, (i[0],i[1],i[2]))\n conn.commit()\n except Exception as e:\n conn.rollback()# 发生错误时回滚\n print('提交失败:', i, e)\nif __name__ == \"__main__\":\n # base_file = [\n # r'\\\\abtvdfs.de.bosch.com\\ismdfs\\loc\\szh\\DA\\Radar\\05_Radar_ER\\01_GAC'\n # ]\n # all_file_list = []\n # if os.path.exists('./mf4_file_path.json'):\n # with open('./mf4_file_path.json', 'r') as f:\n # all_file_list = json.load(f)\n # else:\n # for i in base_file:\n # print('开始检索文件')\n # new_file = OpenFile(i)\n # file_list = new_file.file_list\n # print(file_list)\n # new_file_list = []\n # for i in file_list:\n # file_size = get_FileSize(i)\n # if file_size > 500:\n # # new_file_list.append(i)\n # all_file_list.append(i)\n # with open('./mf4_file_path.json', 'w', encoding='UTF-8') as f:\n # print('开始写入文件')\n # json.dump(all_file_list,f)\n file_list = [\n #{\n # 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_11-55_17_0016.MF4', 'roadType': '1'\n #},\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_13-52_35_0028.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_13-48_35_0026.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_14-33_19_0035.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_14-21_19_0029.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_13-56_49_0017.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_13-47_15_0028.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_14-31_19_0034.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_13-46_35_0025.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_13-44_35_0024.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_11-59_17_0018.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_12-01_17_0019.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-15//GAC_A18_2020-09-15_12-03_17_0020.MF4', 'roadType': '1'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-18//GAC_A18_2020-09-18_14-17_49_0036.MF4', 'roadType': '2'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-18//GAC_A18_2020-09-18_14-19_49_0037.MF4', 'roadType': '2'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_10-43_50_0033.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_10-45_50_0034.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_10-47_50_0035.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_10-49_50_0036.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_13-05_38_0037.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_13-50_58_0017.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-17//GAC_A18_2020-09-17_13-53_29_0018.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-18//GAC_A18_2020-09-18_14-15_49_0035.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-19//GAC_A18_2020-09-19_12-55_50_0046.MF4', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_10-09_42_0018', 'roadType': '3'\n },\n {\n 'path':'//abtvdfs.de.bosch.com//ismdfs//loc//szh//DA//Radar//05_Radar_ER//01_GAC//02_A18HF//7043C//20200915-20200923//A18-7043C_2020-09-20//GAC_A18_2020-09-20_10-11_42_0019.MF4', 'roadType': '3'\n }\n]\n for path in file_list:\n file_name = path['path'].split('//')[-1]\n print(file_name)\n if os.path.exists(path['path']):\n print('该文件已存在, 不需要重复复制')\n else:\n continue\n if os.path.exists('./{}'.format(file_name)):\n print('该文件已存在, 不需要重复复制')\n else:\n shutil.copy(path['path'], './')\n om = openMDF('./{}'.format(file_name), path['roadType'])\n print(mysql_data)\n print(Line_pressing)\n # with open('./result_data.json', 'w', encoding='utf-8') as f:\n # json.dump(result_dict, f)\n # with open('./new_result_data.json', 'w', encoding='utf-8') as f:\n # json.dump(new_result_dict, f)\n # list3=list(set(mysql_data).difference(set(mysql_data_1)))\n # created_db(list3)\n # with open('./mysql_data.json', 'w', encoding='utf-8') as f:\n # json.dump(mysql_data, f)\n # mysql_data_1 = copy.deepcopy(mysql_data)\n # created_db_2(Line_pressing)\n # with open('./Pre_prepared_data.json', 'w', encoding='utf-8') as f:\n # json.dump(Pre_prepared_data, f)\n # print('正在删除文件', file_name)\n # with open('./loser_file.json', 'w', encoding='utf-8') as f:\n # json.dump(loser_file_data, f)\n # os.remove('./{}'.format(file_name))\n print(my_list)\n with open('./ceshi.json') as f:\n json.dump(my_list,f)","sub_path":"新项目/openMF4 - 副本.py","file_name":"openMF4 - 副本.py","file_ext":"py","file_size_in_byte":33910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"48892321","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport logging\nimport os\n\nfrom azure_iot_device.device_client import DeviceClient\n\nlogging.basicConfig(level=logging.INFO)\n\nconn_str = os.environ.get(\"DEVICE_CONNECTION_STRING\")\nclient = DeviceClient.from_connection_string(conn_str)\n\ndef connection_state_callback(status):\n print(\"connection status: \" + status)\n if status == \"connected\":\n client.send(\"foo\")\n\ndef c2d_handler(msg):\n print(msg)\n\nclient.on_connection_state = connection_state_callback\nclient.on_c2d_message = c2d_handler\nclient.connect()\n\nwhile True:\n continue","sub_path":"samples/send_receive.py","file_name":"send_receive.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"393902830","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\" module devoted to build rss files \"\"\"\n\nimport os\nimport time\nimport hashlib\nimport fnmatch\nimport textwrap\nfrom time import localtime, strftime\n\nimport sgconf\nimport sgproc\nimport sgexternal\nimport sggen\n\nfrom sgglobals import *\n\nglobal listarss\nlistarss = []\n#\ndef rssaddtolist(nomefile, mydate):\n\tglobal listarss\n\tlistarefuse = []\n\tmaxnum = sgconf.cfgget(\"rsslistlength\")\n\n\tif os.path.splitext(nomefile)[1] == sgconf.cfgget(\"processingext\"):\n\t\tcln = nomefile[len(sgconf.cfgget(\"dirstart\")):]\n\t\t# nome = mydate + \" \" + cln\n\n\t\tif listarss:\n\t\t\tif (mydate + \" \" + nomefile) in listarss:\n\t\t\t\treturn\n\t\t\tif len(listarss) > maxnum * 2:\n\t\t\t\tlistarss = sorted(listarss, reverse=True)[:maxnum]\n\n\t\t# no upper files\n\t\tif os.path.dirname(cln) == \"\":\n\t\t\treturn\n\t\t# no files in site directory\n\t\tif fnmatch.fnmatch(cln, os.path.join(\"site\", \"*.*\")):\n\t\t\treturn\n\t\t# no image galleries\n\t\tif fnmatch.fnmatch(cln, os.path.join(sgconf.cfgget(\"dirimages\"), \"*\")):\n\t\t\treturn\n\t\tif os.path.dirname(cln) == sgconf.cfgget(\"dirposts\"):\n\t\t\treturn\n\t\tif os.path.dirname(cln) == sgconf.cfgget(\"dirperma\"):\n\t\t\treturn\n\t\tif fnmatch.fnmatch(cln, os.path.join(sgconf.cfgget(\"dirposts\"), \"*\", sgconf.cfgget(\"indexfile\") + \".*\")):\n\t\t\treturn\n\n\t\t# managing to no add some directories or files to rss.xml\n\t\tif sgconf.cfgget(\"privatepaths\") != \"\":\n\t\t\tlistarefuse = sgconf.cfgget(\"privatepaths\").split(\"|\")\n\t\tif sgconf.cfgget(\"rssnoadd\") != \"\":\n\t\t\tlistarefuse += sgconf.cfgget(\"rssnoadd\").split(\"|\")\n\t\tfor lritem in listarefuse:\n\t\t\tif nomefile.find(lritem) > 0:\n\t\t\t\treturn\n\n\t\tlistarss.append(mydate + \" \" + nomefile)\n\n\ndef rssgo():\n\t\"\"\" main function \"\"\"\n\n\tglobal listarss\n\tfilerss = os.path.join(sgconf.cfgget(\"dirstart\"), \"rss.xml\")\n\tres = []\n\tmp = sgproc.Pagina()\n\tlista = sorted(listarss, reverse=True)[:sgconf.cfgget(\"rsslistlength\")]\n\n\tif len(lista) == 0:\n\t\treturn\n\n\t# will avoid creation of a new rss file if there are no file added\n\tif os.path.exists(filerss):\n\t\tfileh = open(filerss)\n\t\tlines = fileh.readlines()\n\t\tfileh.close()\n\t\ttry:\n\t\t\toldfirst = os.path.splitext(os.path.basename(lines[14].replace(\"\", \"\")))[0]\n\t\t\tnewfirst = os.path.splitext(os.path.basename(sgconf.cfgget(\"lastpost\")))[0]\n\t\t\tif oldfirst == newfirst:\n\t\t\t\treturn\n\t\texcept:\n\t\t\tpass\n\n\tdt = strftime(\"%a, %d %b %Y %H:%M:%S %z\", localtime())\n\n\tres.append(\"\")\n\tres.append(\"\")\n\tres.append(\"\")\n\t#res.append(\"\")\n\tres.append(\"\" + sgconf.cfgget(\"sitename\") + \"\")\n\tres.append(\"http://\" + sgconf.cfgget(\"sitename\") + \"/\")\n\tres.append(\"\")\n\tres.append(\"\" + sgconf.cfgget(\"defaultlang\") + \"\")\n\tres.append(\"\" + dt + \"\")\n\tres.append(\"\" + dt + \"\")\n\tres.append(\"http://blogs.law.harvard.edu/tech/rss\")\n\tres.append(\"\" + sggen.getversion(True) + \"\")\n\n\tfor ele in lista:\n\t\tmyfile = ele[9:]\n\t\tfn = myfile.replace(sgconf.cfgget(\"dirstart\"), \"\")\n\t\tfn = os.path.splitext(fn)[0] + \".html\"\n\t\tdt = ele[:8]\n\n\t\tf = time.mktime((rssint(dt[0:4]), rssint(dt[4:6]), rssint(dt[6:8]), 0, 0, 1, 0, 0, 0))\n\t\tdt = time.strftime(\"%a, %d %b %Y %H:%M:%S %z\", time.localtime(f))\n\n\t\tsgproc.textget(myfile, mp)\n\t\tdescr = rsspreview(mp.text, mp.title, mp.filepath)\n\n\t\t#xtext, xtitle, xtags, xfilesize, xfilelastm, xauthor,xfilename\n\t\tsitelink = os.path.join(sgconf.cfgget(\"sitename\"), fn)\n\t\tres.append(\" \")\n\t\tres.append(\" <![CDATA[\" + mp.title + \"]]>\")\n\t\tres.append(\" \")\n\t\tres.append(\" http://\" + sitelink + \"\")\n\t\tres.append(\" \")\n\t\tres.append(\" \" + dt + \"\")\n\t\tres.append(\" http://\" + sitelink + \"\")\n\t\tres.append(\" \" + rsssetguid(ele) + \"\")\n\t\tres.append(\" \")\n\n\t#res.append(\"\")\n\tres.append(\"\")\n\tres.append(\"\")\n\n\tif os.path.exists(filerss):\n\t\tos.remove(filerss)\n\tfilecontent = \"\\n\".join(res)\n\n\tsgutils.file_write(filerss, filecontent, \"w\")\n\n\ndef rsssetauthor(proposedname, proposedmail):\n\t\"\"\" if author is not setted, than get defaults function should return an email address\n\n\t:param proposedname: default author mode\n\t:param proposedmail: default author mail\n\t:return: author name for rss feed\n\t\"\"\"\n\n\tif proposedname == \"\":\n\t\tres = sgconf.cfgget(\"rssauthor\")\n\telse:\n\t\tif proposedmail.find(\"@\") < 2 or proposedname == \"\":\n\t\t\tres = sgconf.cfgget(\"rssauthor\")\n\t\telse:\n\t\t\tres = proposedmail + \" (\" + proposedname + \")\"\n\n\treturn res\n\n\ndef rsssetguid(fromthis):\n\t\"\"\" creates a guid hashing the name of a file, removing path before doing it\n\t this is necessary due it's needed to avoid duplicates\n\n\t:param fromthis: original\n\t:return: the guid\n\t\"\"\"\n\n\tif fromthis.find(os.sep) >= 0:\n\t\tstringa = os.path.basename(fromthis)\n\telse:\n\t\tstringa = fromthis\n\treturn hashlib.md5(stringa.encode()).hexdigest()\n\n\ndef rssint(valore):\n\t\"\"\" make a value an integer\n\t:param valore: value to be processed\n\t:return: 'integerized' value\n\t\"\"\"\n\ttry:\n\t\ti = int(valore)\n\texcept:\n\t\ti = 0\n\n\treturn i\n\n\ndef rsspreview(text, alternative, linkf):\n\t\"\"\" get a bit of the text to be used in rss summary. Different from the other summary in index files, it doesn't consider\n\t\tactually the fact that languages for article can be more of one.\n\n\t:param text: text of article\n\t:param alternative: if text is none, generally title is\n\t:param linkf:\n\t:return: smaller text\n\t\"\"\"\n\tmisura = int(sgconf.cfgget(\"rsssummarylength\"))\n\tthumbpath = os.path.join(os.path.dirname(linkf), \"thumbnails\", os.path.splitext(os.path.basename(linkf))[0] + \".jpg\")\n\n\tif text != \"\":\n\t\ttext = sgutils.removehtml(text)\n\t\ttext = textwrap.shorten(text, width=misura, placeholder=\"\")\n\t\tif text.rfind(\".\") > misura - 10:\n\t\t\ttext = text[:text.rfind(\".\")]\n\t\telif text.rfind(\" \") > misura - 20:\n\t\t\ttext = text[:text.rfind(\" \")]\n\telse:\n\t\ttext = alternative\n\n\tres = text + \" [...]\"\n\n\tres = sgutils.removehtml(res)\n\tres = res.replace(\"<\", \"\")\n\tif os.path.exists(thumbpath):\n\t\tres = \"

\\n\" + res\n\n\treturn res\n\nif __name__ == \"__main__\":\n\tsgutils.showmsg(ERROR_LAUNCHED_SCRIPT, 0)","sub_path":"sgrss.py","file_name":"sgrss.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"575289279","text":"from flask import Blueprint, render_template, redirect, flash, url_for\n\nfrom app import db\nfrom app.decorators import require_role\nfrom app.forms import init_form\nfrom app.forms.redirect import RedirectForm\nfrom app.models.redirect import Redirect\nfrom app.roles import Roles\n\nblueprint = Blueprint('redirect', __name__, url_prefix='/redirect')\n\n\n@blueprint.route('/', methods=['GET', 'POST'])\n@blueprint.route('/edit//', methods=['GET', 'POST'])\n@require_role(Roles.REDIRECT_WRITE)\ndef view(redirect_id=None):\n redirection = Redirect.query.get(redirect_id) if redirect_id else None\n\n form = init_form(RedirectForm, obj=redirection)\n\n if form.validate_on_submit():\n\n fro = form.data['fro'].rstrip('/')\n to = form.data['to']\n\n old_redirection = Redirect.query.filter(Redirect.fro == fro).first()\n\n if old_redirection and old_redirection.id != redirect_id:\n flash('Er is al een omleiding vanaf dat pad gedefiniëerd.',\n 'danger')\n else:\n if redirection:\n redirection.fro = fro\n redirection.to = to\n else:\n redirection = Redirect(fro, to)\n\n db.session.add(redirection)\n db.session.commit()\n\n flash('De omleiding is succesvol opgeslagen.')\n\n return redirect(url_for('redirect.view',\n redirect_id=redirection.id))\n\n redirections = Redirect.query.order_by(Redirect.fro).all()\n\n return render_template('redirect.htm', redirections=redirections,\n redirection=redirection, form=form)\n\n\n@blueprint.route('/delete//', methods=['GET', 'POST'])\n@require_role(Roles.REDIRECT_WRITE)\ndef delete(redirect_id):\n redirection = Redirect.query.get_or_404(redirect_id)\n\n db.session.delete(redirection)\n db.session.commit()\n\n flash('De omleiding is succesvol verwijderd.')\n\n return redirect(url_for('redirect.view'))\n","sub_path":"app/views/redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"628165524","text":"# https://github.com/igormilovanovic/python-data-viz-cookbook/blob/master/3367OS_Code/3367OS_06_Code/ch06/pirates_temperature.csv\n# Sebastiaan Arendsen\n# 6060072\n#\n# Converts CSV2JSON\n\n# imports\nimport csv\nimport json\n\n# constants\nINPUT = 'data/gdppc.csv'\nOUTPUT = 'data/gdppc.json'\n\n# function to read csv file\ndef csv_reader(f):\n\n # init empty list for csv data\n data = []\n with open(f, 'r') as infile:\n\n # use DictReader to create dicts\n reader = csv.DictReader(infile)\n for row in reader:\n data.append(row)\n return data\n\ndef json_writer(f, data):\n with open(f, 'w') as outfile:\n json.dump(data, outfile)\n\nif __name__ == '__main__':\n\n # read the csv and produce the JSON\n json_writer(OUTPUT, csv_reader(INPUT))","sub_path":"Homework/week_4/convertCSV2JSON.py","file_name":"convertCSV2JSON.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464429098","text":"# -*- coding: utf-8 -*-\r\n\r\nimport torch\r\nimport numpy as np\r\nimport pickle\r\nfrom os import makedirs\r\nfrom os.path import exists\r\n\r\nfrom .functional_torch import L21_norm\r\nfrom .utils import one_hot_encoder, classifier_array, min_max_scaler\r\n\r\nclass IDES_rv_torch(object):\r\n '''\r\n class IDES_rv_torch(object)\r\n \r\n Attributes:\r\n classifier_pool: ndarray of base learners, essay H.\r\n classifier_number: int, essay k.\r\n character_number: int, essay m.\r\n target_number: int, essay d.\r\n weight_martix: 2D-tensor, k * m weight martix, essay W.\r\n encoder: one_hot_encoder.\r\n '''\r\n def __init__(self, classifier_pool, base_label):\r\n '''\r\n IDES_rv_torch.__init__(self, classifier_pool)\r\n \r\n Parameters:\r\n classifier_pool: an ensemble or a list of base learners.\r\n base_label: lables that base classifiers use.\r\n '''\r\n self.classifier_pool = classifier_array(classifier_pool)\r\n self.classifier_number = len(classifier_pool)\r\n self.character_number = None\r\n self.target_number = None\r\n self.weight_martix = None\r\n self.encoder = one_hot_encoder()\r\n self.encoder.new_rule(base_label, return_one_hot = False)\r\n self.target_number = len(self.encoder)\r\n \r\n def ensemble_probability(self, sample_input):\r\n '''\r\n IDES_rv_torch._ensemble_probability(self, sample_input)\r\n \r\n Parameters:\r\n sample_input: 2D-tensor, n * m data martix of input sample.\r\n \r\n Return:\r\n probability_result: 3D-tensors, n * d * k, all probabilities predicted by base learners.\r\n '''\r\n probability_result = torch.tensor([base_classifier.predict_proba(sample_input) for base_classifier in self.classifier_pool])\r\n probability_result = probability_result.permute(1, 2, 0)\r\n return probability_result\r\n \r\n def ensemble_correlation(self, sample_input):\r\n '''\r\n IDES_rv_torch._ensemble_correlation(self, sample_input)\r\n \r\n Parameters:\r\n sample_input: 2D-tensor, n * m data martix of input sample.\r\n \r\n Return:\r\n correlation_result: 2D-tensor, n * d data martix, result of correlation, essay phi.\r\n '''\r\n sample_number = len(sample_input)\r\n probability_result = self.ensemble_probability(sample_input)\r\n correlation_result = torch.matmul(probability_result, self.weight_martix)\r\n correlation_result = torch.bmm(correlation_result, sample_input.view(-1, self.character_number, 1)).view(sample_number, -1)\r\n return correlation_result\r\n \r\n def criterion_cross_entropy(self, sample_input, sample_target, IDES_lambda):\r\n '''\r\n IDES_rv_torch._loss(self, sample_input, sample_target, IDES_lambda = 1.5, need_loss = True)\r\n Calculate loss and derivative of loss in mode cross entropy.\r\n \r\n Parameters:\r\n sample_input: 2D-tensor, n * m data martix of input sample.\r\n sample_target: 1D-tensor, n encoded lable vector of input sample.\r\n IDES_lambda: float, coefficient of regularization.\r\n \r\n Return:\r\n loss: 1 * 1 tensor, loss.\r\n '''\r\n correlation_result = self.ensemble_correlation(sample_input)\r\n cross_entropy_term = torch.nn.CrossEntropyLoss()(correlation_result, sample_target)\r\n regularization_term = torch.mm(self.weight_martix, sample_input.T)\r\n regularization_term = L21_norm(regularization_term)\r\n loss = cross_entropy_term + IDES_lambda * regularization_term\r\n return loss\r\n \r\n def criterion_mean_square(self, sample_input, sample_target, IDES_lambda):\r\n '''\r\n IDES_rv_torch._loss(self, sample_input, sample_target, IDES_lambda = 1.5, need_loss = True)\r\n Calculate loss and derivative of loss in mode mean square.\r\n \r\n Parameters:\r\n sample_input: 2D-tensor, n * m data martix of input sample.\r\n sample_target: 2D-tensor, n * d one hot encoded label martix of input sample.\r\n IDES_lambda: float, coefficient of regularization.\r\n \r\n Return:\r\n loss: 1 * 1 tensor, loss.\r\n '''\r\n correlation_result = self.ensemble_correlation(sample_input)\r\n softmax_result = torch.nn.functional.softmax(correlation_result, dim = -1)\r\n cross_entropy_term = torch.nn.MSELoss()(softmax_result, sample_target)\r\n regularization_term = torch.mm(self.weight_martix, sample_input.T)\r\n regularization_term = L21_norm(regularization_term)\r\n loss = cross_entropy_term + IDES_lambda * regularization_term\r\n return loss\r\n \r\n def fit_once(self, sample_input, sample_target, IDES_lambda, learning_rate, loss_mode):\r\n '''\r\n IDES_rv_torch.fit_once(self, sample_input, sample_target, IDES_lambda, learning_rate)\r\n An iteration during learning.\r\n \r\n Parameters:\r\n sample_input: 2D-tensor, n * m data martix of input sample.\r\n sample_target: 1D-tensor or 2D tensor, n encoded lable vector of input sample for cross entropy loss, or n * d one hot encoded label martix for mean square loss.\r\n IDES_lambda: float, coefficient of regularization.\r\n learning_rate: float, learning rate.\r\n loss_mode: str, if it is set as 'cross_entropy', use CrossEntropyLoss, else use MSELoss.\r\n \r\n Return:\r\n loss_value: float, value of loss.\r\n '''\r\n self.weight_martix.requires_grad_(True)\r\n loss = self.criterion_cross_entropy(sample_input, sample_target, IDES_lambda) if loss_mode == 'cross_entropy' else self.criterion_mean_square(sample_input, sample_target, IDES_lambda)\r\n loss.backward()\r\n self.weight_martix = self.weight_martix.detach() - learning_rate * self.weight_martix.grad\r\n loss_value = loss.item()\r\n return loss_value\r\n \r\n def fit(self, sample_input, sample_label, IDES_lambda = 1.5, iteration_number = 50, learning_rate = 1e-3, random_state = None, loss_mode = 'cross_entropy'):\r\n '''\r\n IDES_rv_torch.fit(self, sample_input, sample_label, IDES_lambda = 1.5, iteration_number = 50, learning_rate = 1e-3)\r\n Process to train weight martix.\r\n \r\n Parameters:\r\n sample_input: 2D-ndarray, n * m data martix of input sample.\r\n sample_label: 1D-ndarray, n lable vector of input sample.\r\n IDES_lambda: float, optional, default = 1.5, coefficient of regularization.\r\n iteration_number: int, optional, default = 50, number of iterations.\r\n learning_rate: float, optional, default = 1e-3, learning rate.\r\n random_state: int or None, optional, default = None, random state to initialize weight martix, if None, initialize it totally randomly.\r\n loss_mode: str, optional, default = 'cross_entropy', if it is set as 'mean_square', use MSELoss.\r\n \r\n Return:\r\n loss_trend: list of float, the values of loss in all iterations.\r\n '''\r\n if self.character_number == None:\r\n self.character_number = sample_input.shape[1]\r\n self.weight_martix = torch.from_numpy(np.random.RandomState(random_state).rand(self.classifier_number, self.character_number)).double() if not random_state is None else torch.rand(self.classifier_number, self.character_number).double()\r\n sample_input = torch.from_numpy(sample_input)\r\n sample_target = self.encoder.to_index(sample_label) if loss_mode == 'cross_entropy' else self.encoder.to_one_hot(sample_label)\r\n sample_target = torch.from_numpy(sample_target)\r\n loss_trend = [self.fit_once(sample_input, sample_target, IDES_lambda, learning_rate, loss_mode) for iteration in range(iteration_number)]\r\n return loss_trend\r\n \r\n def predict(self, sample_input, IDES_rou = 0.2, select_mode = 'value'):\r\n '''\r\n IDES_rv_torch.predict(self, sample_input, IDES_rou = 0.2)\r\n Predict according to input samples.\r\n \r\n Parameters:\r\n sample_input: 2D-ndarray, n1 * m data martix of input sample.\r\n IDES_rou: float, optional, default = 0.2, threshold of classitier selection.\r\n select_mode: str, optional, default = 'value', if set it 'value', rou will be the value threshold. IF set it 'ratio', rou will be the ratio threshold.\r\n \r\n Return:\r\n sample_prediction: 1D-ndarray, n1 label vector of predictions.\r\n '''\r\n sample_number = len(sample_input)\r\n sample_input = torch.from_numpy(sample_input)\r\n predict_rou = torch.mm(self.weight_martix, sample_input.T).numpy()\r\n if select_mode == 'value':\r\n predict_rou = min_max_scaler(predict_rou, axis = 0)\r\n index_prediction = []\r\n for sample_index in range(sample_number):\r\n classifier_selected = self.classifier_pool[predict_rou[:, sample_index] >= IDES_rou]\r\n classifier_prediction = np.array([classifier.predict(sample_input[[sample_index]]) for classifier in classifier_selected])\r\n prediction_unique, prediction_count = np.unique(classifier_prediction, return_counts = True)\r\n vote_prediction = prediction_unique[prediction_count.argmax()]\r\n index_prediction.append(vote_prediction)\r\n else:\r\n classifier_selected_number = round(len(self.classifier_pool) * IDES_rou)\r\n index_prediction = []\r\n for sample_index in range(sample_number):\r\n classifier_selected = self.classifier_pool[predict_rou[:, sample_index].argsort()[- classifier_selected_number :]]\r\n classifier_prediction = np.array([classifier.predict(sample_input[[sample_index]]) for classifier in classifier_selected])\r\n prediction_unique, prediction_count = np.unique(classifier_prediction, return_counts = True)\r\n vote_prediction = prediction_unique[prediction_count.argmax()]\r\n index_prediction.append(vote_prediction)\r\n index_prediction = np.array(index_prediction, dtype = np.int64)\r\n sample_prediction = self.encoder.from_index(index_prediction)\r\n return sample_prediction\r\n \r\n def score(self, sample_input, sample_label, IDES_rou = 0.2, select_mode = 'value'):\r\n '''\r\n IDES_rv_torch.predict(self, sample_input, IDES_rou = 0.2)\r\n Predict according to input samples.\r\n \r\n Parameters:\r\n sample_input: 2D-ndarray, n * m data martix of input sample.\r\n sample_label: 1D-ndarray, n lable vector of input sample.\r\n IDES_rou: float, optional, default = 0.2, threshold of classitier selection.\r\n select_mode: str, optional, default = 'value', if set it 'value', rou will be the value threshold. IF set it 'ratio', rou will be the ratio threshold.\r\n \r\n Return:\r\n accuracy: float, accuracy of predictions.\r\n '''\r\n sample_prediction = self.predict(sample_input, IDES_rou, select_mode)\r\n accuracy = np.sum(sample_prediction == sample_label) / len(sample_label)\r\n return accuracy\r\n \r\n def save_model(self, path):\r\n '''\r\n IDES_rv_torch.save_model(self, path)\r\n Save model as pkl file.\r\n \r\n Parameters:\r\n path: str, path to save the model.\r\n '''\r\n if '/' in path:\r\n root_path = '/'.join(path.split('/')[:-1])\r\n if not exists(root_path):\r\n makedirs(root_path)\r\n with open(path, 'wb') as model_file:\r\n pickle.dump(self, model_file)\r\n \r\ndef load_model(path):\r\n '''\r\n load_model(path)\r\n Load model from pkl file.\r\n \r\n Parameters:\r\n path: str, path to load the model.\r\n \r\n Return:\r\n IDES_model: IDES_rv_torch, the model.\r\n '''\r\n with open(path, 'rb') as model_file:\r\n IDES_model = pickle.load(model_file)\r\n return IDES_model","sub_path":"IDES_torch.py","file_name":"IDES_torch.py","file_ext":"py","file_size_in_byte":12075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"232281444","text":"\"\"\"\n\t@ricardoifc\n\tejercicio5\n\"\"\"\n\"\"\"\nManejo de colecciones y tuplas\n\n\n# Encontrar la siguiente estructura\n#\n\n[(16.333333333333332, 'Ángel'), (16.666666666666668, 'José'), (13.0, 'Ana')]\n(16.666666666666668, 'José')\n[(13.0, 'Ana'), (16.666666666666668, 'José'), (16.333333333333332, 'Ángel')]\n\n \n\nDadas las siguientes estructuras\n\n\"\"\"\n\nparaleloA = [(19, 10, 20), (20, 20, 10), (20, 10, 9)]\nnombres = [\"Ángel\", \"José\", \"Ana\"]\n# saco el promedio de las 3 notas y lo uno en un zip con nombre\nprom = (list(zip((list(map(lambda x: (x[0] + x[1] + x[2]) / 3, paraleloA))),\n nombres)))\n#imprimo el prom, el maximo y despues convierto en reversa y imprimo reversa\nprint(prom)\nprint(max(prom))\nprom.reverse()\nprint(prom)","sub_path":"Python/3er ciclo/1er BM/05/clase04-ricardoifc-master/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"397734419","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Api, Resource\napp = Flask(__name__)\n\napi=Api(app)\n\ndef checkposted(posted, function):\n if(function == \"add\" or function == \"subtract\" or function == \"multiply\"):\n if \"x\" not in posted or \"y\" not in posted:\n return 301\n else:\n return 200\n else:\n if \"x\" not in posted or \"y\" not in posted:\n return 301\n elif(int(posted[\"y\"]) == 0):\n return 302\n else:\n return 200\n\nclass Add(Resource):\n def post(self):\n posted = request.get_json()\n status = checkposted(posted, \"add\")\n if(status != 200):\n retJ = {\n \"Message\": \"An error happened\",\n \"Status code\": status\n }\n return jsonify(retJ)\n x = posted[\"x\"]\n y = posted[\"y\"]\n x = int(x)\n y = int(y)\n sum = x+y\n ret = {\n 'Message': sum,\n 'Status code': 200\n }\n return jsonify(ret)\n\nclass Subtract(Resource):\n def post(self):\n posted = request.get_json()\n status = checkposted(posted, \"subtract\")\n if(status != 200):\n retJ = {\n \"Message\": \"An error happened\",\n \"Status code\": status\n }\n return jsonify(retJ)\n x = posted[\"x\"]\n y = posted[\"y\"]\n x = int(x)\n y = int(y)\n diff = x-y\n ret = {\n 'Message': diff,\n 'Status code': 200\n }\n return jsonify(ret)\nclass Multiply(Resource):\n def post(self):\n posted = request.get_json()\n status = checkposted(posted, \"multiply\")\n if (status != 200):\n retJ = {\n \"Message\": \"An error happened\",\n \"Status code\": status\n }\n return jsonify(retJ)\n x = posted[\"x\"]\n y = posted[\"y\"]\n x = int(x)\n y = int(y)\n prod = x * y\n ret = {\n 'Message': prod,\n 'Status code': 200\n }\n return jsonify(ret)\nclass Division(Resource):\n def post(self):\n posted = request.get_json()\n status = checkposted(posted, \"divide\")\n if (status != 200):\n retJ = {\n \"Message\": \"An error happened\",\n \"Status code\": status\n }\n return jsonify(retJ)\n x = posted[\"x\"]\n y = posted[\"y\"]\n x = int(x)\n y = int(y)\n quo = x*1.0/ y\n ret = {\n 'Message': quo,\n 'Status code': 200\n }\n return jsonify(ret)\n\napi.add_resource(Add, \"/add\")\napi.add_resource(Subtract,\"/subtract\")\napi.add_resource(Multiply,\"/multiply\")\napi.add_resource(Division,\"/divide\")\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"308831512","text":"#!/usr/bin/python2 \n# -*- coding: utf-8 -*-\n\nfrom pyspark import SparkContext \nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import to_date,months_between,current_timestamp\n\ndata_enh=\"hdfs://10.190.2.112/data/data_dump.txt\"\ntrain_set=\"hdfs://10.190.2.112/data/train_set.txt\"\nval_set=\"hdfs://10.190.2.112/data/val_set.txt\"\ntest_set=\"hdfs://10.190.2.112/data/test.txt\"\n\n#获得数据\nspark = SparkSession.builder.master(\"spark://10.190.2.112:7077\").appName(\"pssql03\").getOrCreate()\ndf = spark.read.csv(data_enh,header=None,encoding=\"utf-8\",inferSchema=True,sep=\"\\t\").drop_duplicates() #去重\ndf=df.withColumn('Tdate',to_date(df[8], 'dd/MM/yyyy'))\ndf=df.filter(\"year(Tdate)>1893\")\n\n#执行查询\ndf1=df.filter(\"Tdate is not null\").withColumn('Cur_time',to_date(current_timestamp(),\"dd/MM/yyyy\"))\ndf2=df1.select(months_between('Cur_time','Tdate').alias(\"month_inv\")) #选定月份的差额作为新的列,取别名为month_inv\ndf2.createOrReplaceTempView(\"Tur_db3\")\nq=\"\"\"select a.flag,count(a.flag)\nfrom\n(select \ncase when month_inv<=228 and month_inv>=0 then '1' \nwhen month_inv>228 and month_inv<=348 then '2'\nwhen month_inv>348 and month_inv<=456 then '3'\nwhen month_inv>588 and month_inv<=672 then '4' \nwhen month_inv>720 then '5' ELSE NULL END flag\nfrom Tur_db3) a\nwhere a.flag is not null\ngroup by a.flag\norder by flag\"\"\"\nspark.sql(q).show()\n","sub_path":"sparke/spark3.py","file_name":"spark3.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"389677436","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# Number of positions marked off along the hallway\nNUM_POSITIONS = 33\n\n# Configure print options\nnp.set_printoptions(precision=1,linewidth=94,threshold=10)\n\n# Initialize the lists to store the address, signal strengths, and counts in\naddressList = []\nsignalStrengths = [[] for i in range(NUM_POSITIONS)]\naddressCount = [[] for i in range(NUM_POSITIONS)]\n\n# Boolean to control whether the strongest signals are included in the plot\nplotStrongest = False\n\n# Pull in the data from the nth csv file and put addresses into addressList, signal strengths\n# into the nth column of signalStrengths, and number of frames from that address into addressCount\nfor tile in range(NUM_POSITIONS):\n filename = 'Tile ' + str(tile) + '.csv'\n print(\"Parsing file '\", filename,\"'\",sep='')\n with open(filename,mode='r') as csvData:\n hallData = csv.reader(csvData, dialect='excel')\n rowNum = 0\n for row in hallData:\n if rowNum==0:\n header = row\n else:\n address = row[2]\n signalStrength = row[7]\n if signalStrength.endswith(' dBm'):\n signalStrength = int(signalStrength[:-4])\n elif signalStrength=='':\n rowNum += 1\n break\n try:\n position = addressList.index(address)\n signalStrengths[tile][position] += signalStrength\n addressCount[tile][position] += 1\n except ValueError:\n addressList.append(address)\n signalStrengths[tile].append(signalStrength)\n addressCount[tile].append(1)\n rowNum += 1\n # Divide the signal strengths by number observed to get the average strength for that address\n signalStrengths[tile] = [x/y if y!=0 else 0 for x,y in zip(signalStrengths[tile],addressCount[tile])]\n # Initialize the next position with a bunch of zeros\n if tile != NUM_POSITIONS-1:\n signalStrengths[tile+1] = [0 for i in signalStrengths[tile]]\n addressCount[tile+1] = [0 for i in addressCount[tile]]\n\n# Go back and put in -100dBm for all of the addresses with frames not observed at each location\nnumAddresses = len(addressList)\nfor i in range(len(signalStrengths)):\n for j in range(len(signalStrengths[i])):\n if signalStrengths[i][j] == 0:\n signalStrengths[i][j] = -1000\n while len(signalStrengths[i]) < numAddresses:\n signalStrengths[i].append(-1000)\n addressCount[i].append(0)\n\n# This will print the number of frames recieved from each address at the locations listed\n# for location in [0, 10, 21, 32]:\n# print(\"Stations observed at location \", location, \":\\n\", addressCount[location])\n\nfor i in range(numAddresses):\n factor = max([signalStrengths[j][i] for j in range(NUM_POSITIONS)])\n for j in range(NUM_POSITIONS):\n if signalStrengths[j][i]==-1000:\n signalStrengths[j][i] =-100\n # else:\n # signalStrengths[j][i] -= factor\n\n# Plot all of them in the first subplot\n# plt.subplot(2,1,1)\n\nfor i in range(len(addressList)):\n if (max([addressCount[j][i] for j in range(NUM_POSITIONS)]) > 600):\n plt.plot(range(NUM_POSITIONS),[signalStrengths[j][i] for j in range(NUM_POSITIONS)],label=addressList[i])\n\n\n# plt.ylim(ymax=0.0000001) # Adjust the maximum value of the y axis\nplt.title(\"RSSI Signal Powers Across Hallway in dBm\")\nplt.ylabel(\"Signal Power (dBm)\")\nplt.xlabel(\"Hallway Position (tile number)\")\nplt.legend(loc='lower left')\n\n# Plot the ones that had smaller signal strengths in the second subplot\n# plt.subplot(2,1,2)\n\n# for i in range(len(addressList)):\n# if (max([addressCount[j][i] for j in range(NUM_POSITIONS)]) > 600) and (max([signalStrengths[j][i] for j in range(NUM_POSITIONS)]) < 1e-6):\n# plt.plot(range(NUM_POSITIONS),[signalStrengths[j][i] for j in range(NUM_POSITIONS)],label=addressList[i])\n\n\n# plt.title(\"Signal Powers Across Hallway\")\n# plt.ylabel(\"Signal Power (mW)\")\n# plt.xlabel(\"Hallway Position (tile number)\")\n# # plt.legend(loc='center left')\n\n# plt.tight_layout()\nplt.show()\n","sub_path":"Trevin/DataSet1/HallwaySignalPlot.py","file_name":"HallwaySignalPlot.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"381678585","text":"\"\"\"\r\nAuthor : Abraham Flores\r\nFile : HW2.py\r\nLanguage : Python 3.5\r\nCreated : 2/22/2018\r\nEdited : 2/26/2018\r\n\r\nSan Digeo State University \r\nMTH 693b : Computational Partial Differential Equations\r\n\r\nStrikwerda 3.4.1 : Boundary Conditions\r\n\r\nOne Way Wave Equation \r\n U_t + U_x = 0\r\n x = [0,1]\r\n t = [0,6.3]\r\n \r\n U_0(x) = Alpha*e^(-beta*(x-delta)^2)\r\n Alpha = 5\r\n beta = 100\r\n delta = .5\r\n U(t,-1) = 0\r\n U(t,3) = U(t,3-h)\r\n \r\n h = 1/40\r\n lambda = .95\r\n \r\n A. u(t,0) = 0; u(t,1) = 2u(t,1-h) - u(t,1-2h)\r\n B. u(t,0) = 0; u(t,1) = 0\r\n C. u(t,0) = 2u(t,h) - u(t,2h); u(t,1) = u(t-k,1-h)\r\n D u(t,0) = 0; u(t,1) = u(t-k,1-h)\r\n \r\n\"\"\"\r\nimport os,glob\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\n\r\n#Generates intial value function\r\ndef intial_foo(x):\r\n return 5*np.exp(-100*(x-.5)**2)\r\n\r\ndef plot(x,U,time,title,annotation,fileLoc):\r\n sns.set(font_scale = 2)\r\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\r\n fig,ax = plt.subplots()\r\n fig.set_size_inches(16,10)\r\n plt.plot(x,U,linewidth=3.0,label=\"t = \"+ str(round(time,3)),color=\"r\")\r\n plt.axis([0, 1, 0, 8]) \r\n plt.xlabel('x (Spatial)')\r\n plt.ylabel('U(x,t)')\r\n plt.title(title)\r\n ax.annotate(annotation[0],xy=(0,0) ,xytext=(.05, 7.5))\r\n ax.annotate(annotation[1],xy=(0,0) ,xytext=(.05, 6.5))\r\n\r\n plt.legend()\r\n plt.savefig(fileLoc+\".png\")\r\n plt.close()\r\n \r\ndef makeGif(gifName):\r\n os.chdir('Figures')\r\n #Create txt file for gif command\r\n fileList = glob.glob('*.png') #star grabs everything,\r\n fileList.sort()\r\n #writes txt file\r\n file = open('FileList.txt', 'w')\r\n for item in fileList:\r\n file.write(\"%s\\n\" % item)\r\n file.close()\r\n\r\n os.system('convert -delay 10 @FileList.txt ' + gifName + '.gif')\r\n os.system('del FileList.txt')\r\n os.system('del *.png')\r\n os.chdir('..')\r\n \r\n#A.\r\n# u(t,0) = 0 \r\n# u(t,1) = 2u(t,1-h) - u(t,1-2h) \r\ndef A(h,Lamb):\r\n #generate array of intial values at t = 0\r\n X = np.arange(0,1+h,h)\r\n temp = []\r\n for dx in X: \r\n temp.append(intial_foo(dx))\r\n \r\n prev_ = np.array(temp)\r\n #Need first step from FTCS\r\n current_ = .5*Lamb*(np.roll(prev_,1)-np.roll(prev_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n current_[-1] = 2*current_[-2] - current_[-3]\r\n current_[0] = 0\r\n \r\n steps = int(3.14/(Lamb*h)) + 2\r\n for time in range(steps):\r\n #plot \r\n title = \"BC: A\"\r\n str_time = '0'*(4-len(str(time)))+str(time)\r\n outFile = \"Figures\\LF\" + str_time\r\n BC = [\"u(t,0) = 0\",\"u(t,1) = 2u(t,1-h) - u(t,1-2h)\"]\r\n plot(X,prev_,time*Lamb*h,title,BC,outFile)\r\n \r\n #implement Scheme\r\n next_ = Lamb*(np.roll(current_,1)-np.roll(current_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n next_[-1] = 2*next_[-2] - next_[-3]\r\n next_[0] = 0\r\n \r\n prev_ = current_\r\n current_ = next_\r\n \r\n #makeGif\r\n makeGif(\"BC_a\")\r\n return 0\r\n\r\n#B. \r\n# u(t,0) = 0\r\n# u(t,1) = 0\r\n\r\ndef B(h,Lamb):\r\n #generate array of intial values at t = 0\r\n X = np.arange(0,1+h,h)\r\n temp = []\r\n for dx in X: \r\n temp.append(intial_foo(dx))\r\n \r\n prev_ = np.array(temp)\r\n #Need first step from FTCS\r\n current_ = .5*Lamb*(np.roll(prev_,1)-np.roll(prev_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n current_[-1] = 0\r\n current_[0] = 0\r\n \r\n steps = int(3.14/(Lamb*h)) + 2\r\n for time in range(steps):\r\n #plot \r\n title = \"BC: B\"\r\n str_time = '0'*(4-len(str(time)))+str(time)\r\n outFile = \"Figures\\LF\" + str_time\r\n BC = [\"u(t,0) = 0\",\"u(t,1) = 0\"]\r\n plot(X,prev_,time*Lamb*h,title,BC,outFile)\r\n \r\n #implement Scheme\r\n next_ = Lamb*(np.roll(current_,1)-np.roll(current_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n next_[-1] = 0\r\n next_[0] = 0\r\n \r\n prev_ = current_\r\n current_ = next_\r\n \r\n #makeGif\r\n makeGif(\"BC_b\")\r\n return 0\r\n\r\n#C. \r\n# u(t,0) = 2u(t,h) - u(t,2h)\r\n# u(t,1) = u(t-k,1-h)\r\ndef C(h,Lamb):\r\n #generate array of intial values at t = 0\r\n X = np.arange(0,1+h,h)\r\n temp = []\r\n for dx in X: \r\n temp.append(intial_foo(dx))\r\n \r\n prev_ = np.array(temp)\r\n #Need first step from FTCS\r\n current_ = .5*Lamb*(np.roll(prev_,1)-np.roll(prev_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n current_[-1] = prev_[-2]\r\n current_[0] = 2*current_[1] - current_[2]\r\n \r\n steps = int(3.14/(Lamb*h)) + 2\r\n for time in range(steps):\r\n #plot \r\n title = \"BC: C\"\r\n str_time = '0'*(4-len(str(time)))+str(time)\r\n outFile = \"Figures\\LF\" + str_time\r\n BC = [\"u(t,0) = 2u(t,h) - u(t,2h)\",\"u(t,1) = u(t-k,1-h)\"]\r\n plot(X,prev_,time*Lamb*h,title,BC,outFile)\r\n \r\n #implement Scheme\r\n next_ = Lamb*(np.roll(current_,1)-np.roll(current_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n next_[-1] = current_[-2]\r\n next_[0] = 2*next_[1] - next_[2]\r\n \r\n prev_ = current_\r\n current_ = next_\r\n \r\n #makeGif\r\n makeGif(\"BC_c\")\r\n return 0\r\n\r\n#D\r\n#u(t,0) = 0\r\n#u(t,1) = u(t-k,1-h)\r\ndef D(h,Lamb):\r\n #generate array of intial values at t = 0\r\n X = np.arange(0,1+h,h)\r\n temp = []\r\n for dx in X: \r\n temp.append(intial_foo(dx))\r\n \r\n prev_ = np.array(temp)\r\n #Need first step from FTCS\r\n current_ = .5*Lamb*(np.roll(prev_,1)-np.roll(prev_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n current_[-1] = prev_[-2]\r\n current_[0] = 0\r\n \r\n steps = int(3.14/(Lamb*h)) + 2\r\n for time in range(steps):\r\n #plot \r\n title = \"BC: D\"\r\n str_time = '0'*(4-len(str(time)))+str(time)\r\n outFile = \"Figures\\LF\" + str_time\r\n BC = [\"u(t,0) = 0\",\"u(t,1) = u(t-k,1-h)\"]\r\n plot(X,prev_,time*Lamb*h,title,BC,outFile)\r\n \r\n #implement Scheme\r\n next_ = Lamb*(np.roll(current_,1)-np.roll(current_,-1)) + prev_\r\n \r\n #Boundary Conditions\r\n next_[-1] = current_[-2]\r\n next_[0] = 0\r\n \r\n prev_ = current_\r\n current_ = next_\r\n \r\n #makeGif\r\n makeGif(\"BC_d\")\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n h = 1.0/40\r\n L = 0.95\r\n A(h,L)\r\n B(h,L)\r\n D(h,L)\r\n \r\n#Change y-limit on plot to -8 : 8 to see error. annotate 6.5->5, 7.5->7\r\n #C(h,L) \r\n \r\n \r\n\"\"\"\r\nReport: \r\n The final boundary condition (D) is the only one that does not fall\r\n victim to the oscillations of leapfrog. D. makes use of leapfrog stability \r\n for the right boundary and trivialy ignores the left. As the solution is\r\n propagating to the right. All other conditions either amplfiy the parsitic\r\n solution (A) or amplfify error due to the oscillations (B(right),C(left))\r\n\r\n\"\"\"","sub_path":"Strikwerda-Problems/Chapter-3/Section-4/Problem-1/p341.py","file_name":"p341.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108849569","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields,models,api, _\nfrom odoo.exceptions import UserError\nfrom datetime import *\nimport re,json,urllib3,base64\n\n# Wizard para mostrar la preview de las guias de remision:\nclass Preview_Remission_Guide_Wizard(models.TransientModel):\n\t_name = 'preview.remission.guide.wizard'\n\tline_ids = fields.One2many('preview.remission.guide.wizard.line','wizard_id')\n\t\n\tpicking_id = fields.Many2one('stock.picking')\n\n\t####COMPAÑIA####\n\tcompany_id = fields.Many2one(related='picking_id.company_id',string=u'Compañía')\n\tcompany_ruc = fields.Char(related='company_id.partner_id.vat')\n\tcompany_name = fields.Char(related='company_id.name')\n\tcompany_image = fields.Binary(related='company_id.logo')\n\n\t# picking fields:\n\telectronic_guide = fields.Boolean(related='picking_id.electronic_guide') \n\tkardex_date = fields.Datetime(related='picking_id.kardex_date') \n\tpicking_type_id = fields.Many2one(related='picking_id.picking_type_id')\n\tname = fields.Char(related='picking_id.name')\n\tgross_weight = fields.Float(related='picking_id.total_gross_weight')\n\tnum_pieces = fields.Float(related='picking_id.number_of_packages')\n\tinvoice_id = fields.Many2one(related='picking_id.invoice_id')\n\ttransfer_reason_id = fields.Many2one(related='picking_id.reason_transfer')\n\tnumberg = fields.Char(related='picking_id.numberg')\n\tnote = fields.Text(related='picking_id.note')\n\tseries = fields.Char('Serie de Guia')\n\tnumber = fields.Char('Nro de Guía')\n\tpdf_url = fields.Char(related='picking_id.print_web_version_pdf',string='PDF guía de remisión')\n\tsuccess_message = fields.Char(string='*')\n\t\n\treceiver_partner_id = fields.Many2one('res.partner',related='picking_id.partner_id.commercial_partner_id')\n\t\n\t# Destinos:\n\tstart_point = fields.Char(string='Punto de Partida',related='picking_id.starting_point')\n\tubigeo_start = fields.Char(related='picking_id.picking_type_id.warehouse_id.partner_id.zip')\n\tpoint_arrival = fields.Char(string='Punto de llegada',related='picking_id.ending_point')\n\tubigeo_arrival= fields.Char(related='picking_id.partner_id.zip')\n\t\n\t# transportista\n\ttransporter_id = fields.Many2one(related='picking_id.carrier_id_it')\n\ttransporter_doc_id = fields.Many2one(related='transporter_id.l10n_latam_identification_type_id')\n\ttransporter_doc = fields.Char(related='transporter_id.vat')\n\ttransporter_type = fields.Selection(related='picking_id.type_of_transport')\n\n\t# conductor\n\tdriver_id = fields.Many2one(related='picking_id.driver_id',string='Conductor')\n\tdriver_doc_id = fields.Many2one(related='driver_id.l10n_latam_identification_type_id')\n\tdriver_doc = fields.Char(related='driver_id.vat')\n\t\n\tlicense_num = fields.Char(related='driver_id.number_driver_licence',string='Nro Licencia')\n\n\tvehicle_id = fields.Many2one(related='picking_id.vehicle_id')\n\tlicense_plate = fields.Char(related='vehicle_id.license_plate')\n\t\n\tdate_traslate = fields.Date(related='picking_id.transfer_date',string='Fecha de traslado')\n\n\tdef post_request(self):\n\t\tparameters = self.env['main.parameter'].search([('company_id','=',self.company_id.id)],limit=1)\n\t\tguide_line = next(filter(lambda s:s.series_id == self.picking_id.serie_guia,parameters.guide_series_ids))\n\t\tif not self.electronic_guide:\n\t\t\treturn \n\t\tseries = self.env['remission.guide.series'].search([('series_id','=',self.picking_id.serie_guia.id),('parameter_id.company_id','=',self.company_id.id)],limit=1)\n\t\t# Validaciones:\n\t\tif not series:\n\t\t\traise UserError(u'No se ha encontrado una configuración de parámetros para la serie de guía %s\\nVaya a Contabilidad->Configuración->Parámetros->Guía de remisión Electrónica para configurarlos.'%self.picking_id.serie_guia.name)\n\t\tnow = datetime.strftime(fields.Date.context_today(self),'%Y-%m-%d')\n\t\tdate_traslate_pick = self.date_traslate if self.date_traslate else self.kardex_date.date()\n\t\ttraslate = datetime.strftime(date_traslate_pick,'%Y-%m-%d')\n\n\n\n\t\t# valor opara poner la factura en la observación\n\t\tdocfact = \"\"\n\t\tif self.invoice_id.id:\n\t\t\tif self.invoice_id.state == 'posted': \n\t\t\t\tdocfact=' Fac/Bol.: '+self.invoice_id.ref\n\t\t\n\t\tif self.picking_id.sale_id.id:\n\t\t\tif self.picking_id.sale_id.client_order_ref:\n\t\t\t\tdocfact += ' Orden de Cliente: ' + self.picking_id.sale_id.client_order_ref\n\t\t\n\t\tif not self.line_ids:\n\t\t\traise UserError('No hay líneas de bienes disponibles')\n\n\t\tdata = {\n\t\t\t\"operacion\": \"generar_guia\",\n\t\t\t\"tipo_de_comprobante\": 7, # guia de rem remitente = 7\n\t\t\t\"serie\": self.series,\n\t\t\t\"numero\": self.number,\n\t\t\t\"cliente_tipo_de_documento\": self.receiver_partner_id.l10n_latam_identification_type_id.code_sunat,\n\t\t\t\"cliente_numero_de_documento\":self.receiver_partner_id.vat,\n\t\t\t\"cliente_denominacion\": self.receiver_partner_id.name,\n\t\t\t\"cliente_direccion\": self.receiver_partner_id.street,\n\t\t\t\"cliente_email\": self.receiver_partner_id.email or '',\n\t\t\t\"cliente_email_1\": \"\",\n\t\t\t\"cliente_email_2\": \"\",\n\t\t\t\"fecha_de_emision\":now,\n\t\t\t\"observaciones\": 'LIC. ' + self.license_num + ' ' +self.note if self.note else 'LIC. ' + self.license_num+ docfact,\n\t\t\t\"motivo_de_traslado\": self.transfer_reason_id.code,\n\t\t\t\"peso_bruto_total\": str(self.gross_weight) if self.gross_weight else \"0\",\n\t\t\t\"numero_de_bultos\": str(int(self.num_pieces)) if self.num_pieces else \"0\",\n\t\t\t\"tipo_de_transporte\": self.transporter_type,\n\t\t\t\"fecha_de_inicio_de_traslado\": traslate,\n\t\t\t\"transportista_documento_tipo\": self.transporter_doc_id.code_sunat,\n\t\t\t\"transportista_documento_numero\": self.transporter_doc,\n\t\t\t\"transportista_denominacion\": self.transporter_id.name,\n\t\t\t\"transportista_placa_numero\": self.license_plate or \"\",\n\t\t\t\"conductor_documento_tipo\": self.driver_doc_id.code_sunat,\n\t\t\t\"conductor_documento_numero\": self.driver_doc,\n\t\t\t\"conductor_denominacion\": self.driver_id.name,\n\t\t\t\"punto_de_partida_ubigeo\": self.ubigeo_start if self.ubigeo_start else self.picking_id.picking_type_id.warehouse_id.partner_id.district_id.code,\n\t\t\t\"punto_de_partida_direccion\": self.start_point,\n\t\t\t\"punto_de_llegada_ubigeo\": self.ubigeo_arrival if self.ubigeo_arrival else self.picking_id.partner_id.district_id.code,\n\t\t\t\"punto_de_llegada_direccion\": self.point_arrival,\n\t\t\t\"enviar_automaticamente_a_la_sunat\": \"true\",\n\t\t\t\"enviar_automaticamente_al_cliente\": \"true\",\n\t\t\t\"codigo_unico\": \"\",\n\t\t\t\"formato_de_pdf\": \"\"\n\t\t}\n\t\tg_items = []\n\t\tfor l in self.line_ids:\n\t\t\tif l.move_id.state == 'done':\n\t\t\t\tg_items.append({\n\t\t\t\t\t\"unidad_de_medida\":'ZZ' if l.product_id.type=='service' else 'NIU',\n\t\t\t\t\t\"codigo\": l.default_code,\n\t\t\t\t\t\"descripcion\": l.product_id.name_get()[0][1].replace(l.default_code,\"\").replace(\"[]\",\"\") if l.default_code else l.product_id.name_get()[0][1],\n\t\t\t\t\t\"cantidad\": l.quantity,\n\t\t\t\t})\n\t\tdata.update({\"items\":g_items})\n\t\t#raise UserError(str(data))\n\t\tself.picking_id.json_post = data\n\t\thttp = urllib3.PoolManager()\n\t\ttry:\n\t\t\tr = http.request('POST',\n\t\t\t\t\t\t\tguide_line.path,\n\t\t\t\t\t\t\theaders = {'Content-Type':'application/json',\n\t\t\t\t\t\t\t\t\t 'Authorization':'Token token = \"%s\"'%guide_line.token},\n\t\t\t\t\t\t\tbody = json.dumps(data))\n\t\texcept urllib3.exceptions.HTTPError as e:\n\t\t\traise UserError(u'Error al procesar datos de guía electrónica!\\nDetalles:\\n'+e.read())\n\t\tresponse = json.loads(r.data.decode('utf-8'))\n\t\tself.picking_id.json_get = response\n\t\tif 'errors' in response:\n\t\t\traise UserError('Respuesta del Facturador: ' + response['errors'])\n\t\tif 'enlace_del_pdf' in response:\n\t\t\tself.picking_id.print_web_version_pdf = response['enlace_del_pdf']\n\t\tif 'enlace_del_xml' in response:\n\t\t\tself.picking_id.print_web_version_xml = response['enlace_del_xml']\n\t\tif 'aceptada_por_sunat' in response:\n\t\t\tself.picking_id.sunat_state = '3'\n\t\t\tself.success_message = u'La transacción se realizó con éxito!'\n\n\t\treturn {\"type\": \"ir.actions.do_nothing\",}\n\n\tdef print_remission_guide(self):\n\t\t# Ya que cada empresa/cliente tiene un formato particular de guía de remisión,\n\t\t# el método print_remision de stock.picking debe encargarse de eso.\n\t\tmethod = getattr(self.picking_id,'print_remision',None)\n\t\tif callable(method):\n\t\t\treturn self.picking_id.print_remision()\n\t\treturn True\n\n\tdef download_pdf_file(self):\n\t\tif not self.electronic_guide or not self.pdf_url:\n\t\t\traise UserError('Archivo PDF no disponible')\n\t\ttry:\n\t\t\tif self.pdf_url != '':\n\t\t\t\thttp = urllib3.PoolManager()\n\t\t\t\tresponse = http.request('GET',self.pdf_url)\n\t\t\telse:\n\t\t\t\treturn\n\t\texcept urllib3.exceptions.HTTPError as err:\n\t\t\tif err.code == 404:\n\t\t\t\traise UserError(u'El archivo ha sido removido o no está disponible')\n\t\t\telse:\n\t\t\t\traise UserError(u'Ha ocurrido un error al intentar obtener su archivo.')\n\t\telse:\n\t\t\treturn self.env['popup.it'].get_file(u'Guia Electrónica %s.pdf'%self.numberg,base64.encodestring(response.data))\n\t\t\n\t\t\nclass PreviewRemissionGuideWizardLine(models.TransientModel):\n\t_name = 'preview.remission.guide.wizard.line'\n\twizard_id = fields.Many2one('preview.remission.guide.wizard')\n\tmove_id = fields.Many2one('stock.move')\n\tproduct_id = fields.Many2one(related='move_id.product_id')\n\tquantity = fields.Float(related='move_id.product_uom_qty')\n\tuom_id = fields.Many2one(related='move_id.product_uom')\n\tdefault_code = fields.Char(related='product_id.default_code')\n\n\n","sub_path":"remission_guide_it/wizard/view_remission_guide_wizard.py","file_name":"view_remission_guide_wizard.py","file_ext":"py","file_size_in_byte":9102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"460435965","text":"from flask import Flask, render_template, request\nfrom name_path import samat\nimport os, fnmatch\nfrom diff import textDiff\nfrom move import get_replaced_items\n\nMAX_FILE_SIZE = 1024 * 1024 + 1\n\napp = Flask(__name__)\n\ndef load_structure(filepath):\n with open(filepath, 'r') as file_handler:\n return (file_handler.read())\n\ndef load_structure_as_list(filepath):\n with open(filepath, 'r') as file_handler:\n return (file_handler.readlines())\n\n\n\n@app.route('/')\ndef form():\n huawei = os.getcwd()+'/compare/'\n print(huawei)\n listOfFiles = os.listdir(huawei)\n pattern = \"*.html\" \n samat = [entry for entry in listOfFiles if fnmatch.fnmatch(entry, pattern)]\n a = load_structure(huawei+samat[0])\n b = load_structure(huawei+samat[1])\n c = textDiff(a,b)\n d = load_structure_as_list(huawei+samat[0])\n e = load_structure_as_list(huawei+samat[1])\n f = get_replaced_items(d,e)\n return render_template('post.html', samat = c ,a = a, b =b, f = f)\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"35_diff_service/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462470633","text":"# PyPy3 정답, Python 3 시간 초과\nimport sys\ninput = sys.stdin.readline\n\n\ndef check(k, x, y):\n if k in sudoku[x]:\n return False\n\n # zip() 쓰면 시간 초과\n # if k in list(zip(*sudoku))[y]:\n # return False\n for i in range(9):\n if sudoku[i][y] == k:\n return False\n \n ii, jj = (x // 3) * 3, (y // 3) * 3\n\n for i in range(3):\n for j in range(3):\n if sudoku[ii + i][jj + j] == k:\n return False\n\n return True\n \n\ndef DFS(depth):\n if depth == len(zeroList): \n for line in sudoku:\n print(*line)\n sys.exit(0)\n \n for k in range(1, 10):\n x = zeroList[depth][0]\n y = zeroList[depth][1]\n \n if check(k, x, y):\n sudoku[x][y] = k\n DFS(depth + 1)\n sudoku[x][y] = 0\n\n\nsudoku = [list(map(int, input().split())) for _ in range(9)]\nzeroList = [(i, j) for j in range(9) for i in range(9) if sudoku[i][j] == 0]\n\nDFS(0)\n","sub_path":"BaekjoonOnlineJudge/2580/2580.py","file_name":"2580.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"514768773","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport logging\nimport sys\nimport traceback\nimport hashlib\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef error_message(e, message=None, cause=None):\n \"\"\"\n Formats exception message + cause\n :param e:\n :param message:\n :param cause:\n :return: formatted message, includes cause if any is set\n \"\"\"\n if message is None and cause is None:\n return None\n elif message is None:\n return '%s, caused by %r' % (e.__class__, cause)\n elif cause is None:\n return message\n else:\n return '%s, caused by %r' % (message, cause)\n\n\nclass Tracelogger(object):\n \"\"\"\n Prints traceback to the debugging logger if not shown before\n \"\"\"\n\n def __init__(self, logger=None):\n self.logger = logger\n self._db = set()\n\n def log(self, cause=None, do_message=True, custom_msg=None):\n \"\"\"\n Loads exception data from the current exception frame - should be called inside the except block\n :return:\n \"\"\"\n message = error_message(self, cause=cause)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback_formatted = traceback.format_exc()\n traceback_val = traceback.extract_tb(exc_traceback)\n\n md5 = hashlib.md5(traceback_formatted).hexdigest()\n\n if md5 in self._db:\n # self.logger.debug('Exception trace logged: %s' % md5)\n return\n\n if custom_msg is not None and cause is not None:\n self.logger.debug('%s : %s' % (custom_msg, cause))\n elif custom_msg is not None:\n self.logger.debug(custom_msg)\n elif cause is not None:\n self.logger.debug('%s' % cause)\n\n self.logger.debug(traceback_formatted)\n self._db.add(md5)\n\n\n","sub_path":"codesign/trace_logger.py","file_name":"trace_logger.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"520059764","text":"import math\nfrom typing import List\nfrom unittest import skip\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.test import TestCase as DjangoTestCase, override_settings\nfrom django.utils import timezone\n\nfrom django_comments_tree.models import (TreeComment)\nfrom django_comments_tree.tests.models import Article\n\nutime = lambda d: timezone.now() - timezone.timedelta(days=d)\n\n\nclass ArticleBaseTestCase(DjangoTestCase):\n def setUp(self):\n self.article_1 = Article.objects.create(\n title=\"September\", slug=\"september\", body=\"During September...\")\n self.article_2 = Article.objects.create(\n title=\"October\", slug=\"october\", body=\"What I did on October...\")\n\n\ndef calc_count(n, x):\n return pow(math.ceil(pow(n, 1 / x)), x)\n\n\ndef parent_for(node, path_to_node):\n basepath = node._get_basepath(node.path, node.depth - 1)\n if basepath in path_to_node:\n return path_to_node.get(basepath).id\n return None\n\n\ndef make_lots_of_comments(root: TreeComment,\n count: int = None,\n depth: int = 5,\n prefix: str = \"C\"):\n \"\"\" Make comments algorithmically and optionally randomly\n This can be called recursively, so that we can create\n a hierarchy of comments, for deep testing, and performance\n testing.\n\n root: node to attach comments to.\n count: int\n \"\"\"\n count = count if count is not None else 2 ** depth\n total_comments = 0\n children = []\n for x in range(count):\n comment = f\"{prefix}{x}\"\n child = root.add_child(comment=comment)\n # print(f\"Created comment {comment}\")\n total_comments += 1\n children.append(child)\n if depth > 1:\n n = make_lots_of_comments(child,\n count=count // 2,\n depth=depth - 1,\n prefix=comment + \"-R\")\n total_comments += n\n return total_comments\n\n\ndef make_comments(root, spec, default_kwargs=None):\n \"\"\" Make nested comments, based on provided spec\n spec format:\n [('Comment',), ('Comment2', [('Child',)],)]\n A comment tuple has 3 possible arguments:\n comment: string\n children: array of comments\n kwargs: dict of arguments to comment creation\n\n \"\"\"\n\n args_default = default_kwargs if default_kwargs is not None else {}\n for args in [list(x) for x in spec]:\n comment = args.pop(0) if len(args) else \"Default Comment\"\n child_spec = args.pop(0) if len(args) else []\n kwargs = args.pop(0) if len(args) else args_default\n\n child = root.add_child(comment=comment, **kwargs)\n make_comments(child, child_spec)\n\n\nclass TestTreeCommentQueries(ArticleBaseTestCase):\n\n def setUp(self):\n super().setUp()\n self.article_ct = ContentType.objects.get(app_label=\"tests\",\n model=\"article\")\n\n self.site1 = Site.objects.get(pk=1)\n self.site2 = Site.objects.create(domain='site2.com', name='site2.com')\n\n self.root_1 = TreeComment.objects.get_or_create_root(self.article_1)\n self.root_1_pk = self.root_1.pk\n self.root_2 = TreeComment.objects.get_or_create_root(\n self.article_1, site=self.site2)\n self.root_2_pk = self.root_2.pk\n\n r1 = TreeComment.objects.get(pk=self.root_1_pk)\n old = utime(10)\n new = utime(1)\n\n make_comments(r1, [\n ('Comment 1', [\n ('Comment 1, Reply 1',),\n ('Comment 1, Reply 2', [\n ('Comment 1, Reply 2, Reply 1', [], {'updated_on': utime(1)})\n ], {'updated_on': utime(1.5)})\n ],),\n ('Comment 2', [\n ('Comment 2, Reply 1', [], {'updated_on': utime(1.5)},),\n ('Comment 2, Reply 2', [\n ('Comment 2, Reply 2, Reply 1', [], {'updated_on': utime(0.5)},)\n ], {'updated_on': utime(1.3)},)\n ], {'updated_on': utime(1.6)}),\n ], {'updated_on': utime(5)})\n\n make_comments(self.root_2, [\n ('Comment 1', [\n ('Comment 1, Reply 1', [], {'is_public': False},),\n ('Comment 1, Reply 2', [\n ('Comment 1, Reply 2, Reply 1', [], {'is_public': False},)\n ], {'is_public': False},)\n ],),\n ('Comment 2', [\n ('Comment 2, Reply 1',),\n ('Comment 2, Reply 2', [\n ('Comment 2, Reply 2, Reply 1',)\n ],)\n ],),\n ], {'updated_on': utime(3)})\n\n def test_unfiltered_tree(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_1.refresh_from_db()\n print(self.root_1.get_descendant_count())\n print(self.root_1.get_children_count())\n tree = TreeComment.tree_from_comment(self.root_1)\n self.assertEqual(len(tree), 2,\n \"Expected 2 comments for this node\")\n self.assertEqual(len(tree[0]['children']), 2,\n \"Expected 2 replies to first comment\")\n self.assertEqual(len(tree[0]['children'][0]['children']), 0,\n \"Expected no replies to first comment reply\")\n self.assertEqual(len(tree[0]['children'][1]['children']), 1,\n \"Expected 1 reply to second comment reply\")\n\n def test_private_tree(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_2.refresh_from_db()\n print(self.root_2.get_descendant_count())\n print(self.root_2.get_children_count())\n tree = TreeComment.tree_from_comment(self.root_2)\n self.assertEqual(len(tree), 2,\n \"Expected 2 comments for this node\")\n self.assertEqual(len(tree[0]['children']), 0,\n \"Expected 0 public replies to first comment\")\n self.assertEqual(len(tree[1]['children'][1]['children']), 1,\n \"Expected 1 reply to second comment reply\")\n\n def test_public_filter(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_2.refresh_from_db()\n print(self.root_2.get_descendant_count())\n print(self.root_2.get_children_count())\n tree = TreeComment.tree_from_comment(self.root_2, filter_public=False)\n self.assertEqual(len(tree), 2,\n \"Expected 2 comments for this node\")\n self.assertEqual(len(tree[0]['children']), 2,\n \"Expected 2 replies to first comment\")\n self.assertEqual(len(tree[0]['children'][0]['children']), 0,\n \"Expected no replies to first comment reply\")\n self.assertEqual(len(tree[0]['children'][1]['children']), 1,\n \"Expected 1 reply to second comment reply\")\n\n @skip('Not ready yet')\n def test_filter_old_messages(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_1.refresh_from_db()\n print(self.root_1.get_descendant_count())\n print(self.root_1.get_children_count())\n tree = TreeComment.tree_from_comment(self.root_1,\n filter_public=False,\n start=utime(20),\n end=utime(2))\n\n self.assertEqual(len(tree), 1,\n \"Expected 2 comments for this node\")\n self.assertEqual(len(tree[0]['children']), 0,\n \"Expected one reply to first comment\")\n self.assertEqual(len(tree[1]['children']), 2,\n \"Expected 2 replies to second comment\")\n self.assertEqual(len(tree[0]['children'][0]['children']), 0,\n \"Expected 1 replies to first comment reply\")\n self.assertEqual(len(tree[0]['children'][0]['children']), 1,\n \"Expected 1 replies to first comment reply\")\n\n @skip('Not ready yet')\n def test_filter_new_messages(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_1.refresh_from_db()\n print(self.root_1.get_descendant_count())\n print(self.root_1.get_children_count())\n now = timezone.now()\n tree = TreeComment.tree_from_comment(self.root_1,\n filter_public=False,\n start=now - timezone.timedelta(days=2),\n end=now)\n\n self.assertEqual(len(tree), 1,\n \"Expected 2 comments for this node\")\n self.assertEqual(len(tree[0]['children']), 1,\n \"Expected 2 replies to first comment\")\n self.assertEqual(len(tree[0]['children'][0]['children']), 1,\n \"Expected no replies to first comment reply\")\n\n\nclass TestTreeCommentPerformance(ArticleBaseTestCase):\n\n def setUp(self):\n super().setUp()\n self.article_ct = ContentType.objects.get(app_label=\"tests\",\n model=\"article\")\n\n self.site1 = Site.objects.get(pk=1)\n self.site2 = Site.objects.create(domain='site2.com', name='site2.com')\n\n self.root_1 = TreeComment.objects.get_or_create_root(self.article_1)\n self.root_1_pk = self.root_1.pk\n self.root_2 = TreeComment.objects.get_or_create_root(\n self.article_1, site=self.site2)\n self.root_2_pk = self.root_2.pk\n\n r1 = TreeComment.objects.get(pk=self.root_1_pk)\n\n make_lots_of_comments(r1, count=4, depth=4)\n\n def test_unfiltered_tree(self):\n # there is no comment posted yet to article_1 nor article_2\n self.root_1.refresh_from_db()\n cnt = self.root_1.get_descendant_count()\n\n data = TreeComment.structured_tree_data(self.root_1)\n print(f\"Comment: {data.get('comment')}\")\n\n self.assertEqual(cnt, len(data.get('comments')),\n \"Expected count to match\")\n n_children = self.root_1.get_children_count()\n child_comments = [n for n in data.get('comments')\n if n.depth == 1]\n self.assertEqual(n_children, len(child_comments),\n \"Expected tree to have matching children\")\n","sub_path":"django_comments_tree/tests/test_queries.py","file_name":"test_queries.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202160647","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 7 15:52:52 2018\n\n@author: owen\n\"\"\"\n\n# There are a total of n courses you have to take, labeled from 0 to n - 1.\n\n# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n\n# Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?\n\n# 是否能完成拓扑排序\n\nclass Solution:\n \"\"\"\n @param: numCourses: a total of n courses\n @param: prerequisites: a list of prerequisite pairs\n @return: true if can finish all courses or false\n \"\"\"\n def canFinish(self, numCourses, prerequisites):\n # write your code here\n # topological sort + BFS, time O(numCourses * len(pre)), space O(numCourses + len(pre))\n graph = collections.defaultdict(list)\n indegree = collections.defaultdict(int) # default = 0, or use indegree = [0] * numCourses\n for edge in prerequisites:\n graph[edge[1]].append(edge[0])\n indegree[edge[0]] += 1\n \n dq = collections.deque()\n for i in range(numCourses): # for these courses have no prerequisites, in-degree is 0\n if indegree[i] == 0:\n dq.append(i)\n \n cnt = 0 # count how many courses are visited\n while dq:\n curr = dq.popleft()\n cnt += 1\n for nei in graph[curr]:\n indegree[nei] -= 1\n if indegree[nei] == 0:\n dq.append(nei)\n \n return cnt == numCourses\n","sub_path":"Course Schedule.py","file_name":"Course Schedule.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"520267247","text":"import matplotlib.pyplot as plt\nfrom GraphStat.Graph import stat\ndef plot_nodes_weight(graph,file,RANGE=-1,heng=0):#dicdata:字典的数据。\n #RANGE:截取显示的字典的长度。\n #heng=0,代表条状图的柱子是竖直向上的。heng=1,代表柱子是横向的。考虑到文字是从左到右的,让柱子横向排列更容易观察坐标轴。\n dicdata=stat.get_weight_distribution(graph)\n by_value = sorted(dicdata.items(),key = lambda item:item[1],reverse=True)\n x = []\n y = []\n plt.yticks( fontsize=7)\n for d in by_value:\n x.append(d[0])\n y.append(d[1])\n if heng == 0:\n figure=plt.bar(x[0:RANGE], y[0:RANGE])\n plt.savefig(file)\n plt.show()\n return\n elif heng == 1:\n figure=plt.barh(x[0:RANGE],y[0:RANGE] )\n plt.savefig(file)\n plt.show()\n return\n else:\n return \"heng的值仅为0或1!\"\n","sub_path":"GraphStat/Visualization/plotnodes.py","file_name":"plotnodes.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"95911518","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0002_auto_20150719_1918'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='created_on',\n field=models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='product',\n name='updated_on',\n field=models.DateTimeField(default=django.utils.timezone.now, auto_now=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='product',\n name='slug',\n field=models.SlugField(max_length=20, blank=True),\n ),\n ]\n","sub_path":"products/migrations/0003_auto_20150721_1249.py","file_name":"0003_auto_20150721_1249.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"338390965","text":"from collections import defaultdict\nfrom util import *\nimport re\nfrom queue import Queue,Empty\nfrom threading import Thread\nimport sys\n\n#Problem code\n\nclass Interpreter:\n\n\t#array is an array of ints contains the program \n\t#inp is the input as an array\n\tdef __init__(self, array, inp, useDefaultInput=True):\n\t\tself.array = array.copy()\n\t\tself.inputs = Queue()\n\t\tfor i in inp:\n\t\t\tself.inputs.put(i)\n\t\tself.defaultInput = array[-1]\n\t\tself.useDefaultInput = useDefaultInput\n\t\tself.output = Queue()\n\t\tself.relativeBase = 0\n\t\tself.isRunning = False\n\n\tdef __repr__(self):\n\t\treturn str(self.__dict__)\n\n\tdef runProgram(self):\n\t\tself.isRunning = True\n\t\tops = {\n\t\t\t1:self.opAdd,\n\t\t\t2:self.opMul,\n\t\t\t3:self.opInput,\n\t\t\t4:self.opOutput,\n\t\t\t5:self.opJmpTrue,\n\t\t\t6:self.opJmpFalse,\n\t\t\t7:self.opLessThan,\n\t\t\t8:self.opEquals,\n\t\t\t9:self.opAdjustBase,\n\t\t\t99:self.opHalt\n\t\t}\n\t\tpc = 0\n\t\twhile True:\n\t\t\topcode,modes = self.parseOpcodeAndModes(pc)\n\t\t\tnewpc = ops[opcode](pc,modes)\n\t\t\tif newpc == -1:\n\t\t\t\tself.isRunning = False\n\t\t\t\treturn\n\t\t\tpc = newpc\n\n\tdef getInput(self):\n\t\tif self.useDefaultInput:\n\t\t\ttry:\n\t\t\t\ta = self.inputs.get(False)\n\t\t\t\treturn a\n\t\t\texcept:\n\t\t\t\treturn self.defaultInput\n\t\telse:\n\t\t\treturn self.inputs.get()\n\n\tdef opAdd(self, pc, modes):\n\t\tself.write(pc+3, modes[2], self.read(pc+1,modes[0]) + self.read(pc+2,modes[1]))\n\t\treturn pc+4\n\n\tdef opMul(self, pc, modes):\n\t\tself.write(pc+3, modes[2], self.read(pc+1,modes[0]) * self.read(pc+2,modes[1]))\n\t\treturn pc+4\n\n\tdef opInput(self, pc, modes):\n\t\tself.write(pc+1, modes[0], self.getInput())\n\t\treturn pc+2\n\n\tdef opOutput(self, pc, modes):\n\t\tself.output.put(self.read(pc+1,modes[0]))\n\t\treturn pc+2\n\n\tdef opJmpTrue(self, pc, modes):\n\t\tif self.read(pc+1, modes[0]) != 0:\n\t\t\treturn self.read(pc+2, modes[1])\n\t\treturn pc+3\n\n\tdef opJmpFalse(self, pc, modes):\n\t\tif self.read(pc+1, modes[0]) == 0:\n\t\t\treturn self.read(pc+2, modes[1])\n\t\treturn pc+3\n\n\tdef opLessThan(self, pc, modes):\n\t\tif self.read(pc+1,modes[0]) < self.read(pc+2,modes[1]):\n\t\t\tself.write(pc+3, modes[2],1)\n\t\telse:\n\t\t\tself.write(pc+3, modes[2],0)\n\t\treturn pc+4\n\n\tdef opEquals(self, pc, modes):\n\t\tif self.read(pc+1,modes[0]) == self.read(pc+2,modes[1]):\n\t\t\tself.write(pc+3, modes[2],1)\n\t\telse:\n\t\t\tself.write(pc+3, modes[2],0)\n\t\treturn pc+4\n\n\tdef opHalt(self, pc, modes):\n\t\treturn -1\n\n\tdef opAdjustBase(self, pc, modes):\n\t\tself.relativeBase += self.read(pc+1,modes[0])\n\t\treturn pc+2\n\n\tdef parseOpcodeAndModes(self,pc):\n\t\tinstruction = self.array[pc]\n\t\topcode = instruction % 100\n\t\tmodes = [0,0,0]\n\t\tfor x in range(len(modes)):\n\t\t\tmodes[x] = (instruction//pow(10,2+x)) % 10\n\t\treturn opcode,modes\n\n\tdef checkSize(self,index,mode):\n\t\tself.makeIndexValid(index)\n\t\tif mode == 0:\n\t\t\tself.makeIndexValid(self.array[index])\n\t\telif mode == 2:\n\t\t\tself.makeIndexValid(self.array[index]+self.relativeBase)\n\n\tdef makeIndexValid(self,index):\n\t\tsize = len(self.array)\n\t\tif index >= size:\n\t\t\tdif = index - len(self.array) + 1\n\t\t\tself.array.extend([0]*dif)\n\n\tdef read(self, index, mode):\n\t\tself.checkSize(index,mode)\n\t\tif mode == 0:\n\t\t\treturn self.array[self.array[index]]\n\t\telif mode == 1:\n\t\t\treturn self.array[index]\n\t\telif mode == 2:\n\t\t\treturn self.array[self.array[index]+self.relativeBase]\n\n\tdef write(self, index, mode, value):\n\t\tself.checkSize(index,mode)\n\t\tif mode == 0:\n\t\t\tself.array[self.array[index]] = value\n\t\telif mode == 1:\n\t\t\tself.array[index] = value\n\t\telif mode == 2:\n\t\t\tself.array[self.array[index]+self.relativeBase] = value\t\n\ndef part1(data):\n\trobot = Robot(data)\n\trobot.run()\n\treturn robot.countPaintedPanels()\n\ndef part2(data):\n\trobot = Robot(data)\n\trobot.run(1)\n\treturn robot.drawPanels()\n\nclass Robot:\n\n\tturnLeftLut = {\n\t\tVector2(0,1) : Vector2(1,0),\n\t\tVector2(1,0) : Vector2(0,-1),\n\t\tVector2(0,-1) : Vector2(-1,0),\n\t\tVector2(-1,0) : Vector2(0,1),\n\t\t}\n\n\tturnRightLut = {\n\t\tVector2(0,1) : Vector2(-1,0),\n\t\tVector2(1,0) : Vector2(0,1),\n\t\tVector2(0,-1) : Vector2(1,0),\n\t\tVector2(-1,0) : Vector2(0,-1),\n\t\t}\n\n\tdef __init__(self, program):\n\t\tself.program = Interpreter(program, [], False)\n\t\tself.input = self.program.inputs\n\t\tself.direction = Vector2(0,-1)\n\t\tself.position = Vector2(0,0)\n\t\tself.panels = defaultdict(int)\n\n\tdef parseOutput(self):\n\t\ttry:\n\t\t\tself.panels[self.position] = self.program.output.get(timeout=10)\n\t\t\trotation = self.program.output.get(timeout=1)\n\t\t\tif rotation == 0: #turn left\n\t\t\t\tself.direction = Robot.turnLeftLut[self.direction]\n\t\t\telif rotation == 1:\n\t\t\t\tself.direction = Robot.turnRightLut[self.direction]\n\t\t\tself.position = self.position.add(self.direction)\n\t\texcept Empty:\n\t\t\treturn #Ignored since only timout case should be after the program finished running\n\n\n\tdef run(self, initialPaint=0):\n\t\tself.panels[self.position] = initialPaint\n\t\tself.program.isRunning = True\n\t\tthread = Thread(target = self.program.runProgram)\n\t\tthread.start()\n\t\twhile self.program.isRunning:\n\t\t\tself.input.put(self.panels[self.position])\n\t\t\tself.parseOutput()\n\t\tthread.join()\n\n\tdef drawPanels(self):\n\t\tprintchars = [\"░\", \"▓\", \" \"]\n\t\tminx = sys.maxsize\n\t\tmaxx = -sys.maxsize\n\t\tminy = sys.maxsize\n\t\tmaxy = -sys.maxsize\n\t\tfor vector in self.panels.keys():\n\t\t\tminx = min(vector.x,minx)\n\t\t\tminy = min(vector.y,minx)\n\t\t\tmaxx = max(vector.x,maxx)\n\t\t\tmaxy = max(vector.y,maxy)\n\t\twidth = maxx - minx\n\t\theight = maxy - miny\n\t\tresult = \"\"\n\t\tfor y in range(miny,maxy+1):\n\t\t\tfor x in range(minx,maxx+1):\n\t\t\t\tpos = Vector2(x,y)\n\t\t\t\tif pos in self.panels:\n\t\t\t\t\tresult += printchars[self.panels[pos]]\n\t\t\t\telse:\n\t\t\t\t\tresult += printchars[2]\n\t\t\tresult += \"\\n\"\n\t\treturn result\t\n\n\tdef countPaintedPanels(self):\n\t\treturn len(self.panels.keys())\n\n#Execution stuff\n\ndef main():\n\trawInput = open(\"./input/11.txt\").read()\n\tdata = commaSeparatedLineToInts(rawInput)\n\tprint(part1(data))\n\tprint(part2(data))\n\treturn\n\nmain()\n\n#Python reminders\n#range(start, end+1, step), len\n#{}, for k in dict, for k,v in dict.items(), for v in dict.values() \n#set(), .add(x), .remove(x), .discard(x) no error if missing, x in s, |= union, &= intersect, -= difference, .copy()\n#[], .append(), .insert(i,x), .pop([i]), .remove(x), .reverse(), sort(arr) in place, sorted(arr) new arr \n#heap: heapq lib, heappush(list, value), heappop(list), value can be a tuple and gets sorted by 1st item\n#map(single param function, list)\n#filter(single param boolean returning function, list)\n#reduce(2 param function, list)\n#lambda x: x**2\n#// integer division in python3\n#sys.maxsize\n#common global functions: abs() max() min() len()\n#from queue import Queue ->thread safe, can be blocking, .put() .get()\n#from threading import Thread ->thread = Thread(target = f). .start(), .join()\n#from itertools import permutations -> permutations(list)\n#from types import SimpleNamespace -> objet style = SimpleNamespace(**dict)\t","sub_path":"Advent 2019/advent python/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"370995937","text":"#!/rnda10/home/kimdy/anaconda3/bin/python\nimport re\nf=open(\"PV2109K2_TOP.net\")\noutf=open(\"PV2109K2_TOP_INC.net\", \"w\")\nlines=f.readlines()\n\nfor line in lines:\n line=line.strip()\n\n if re.search(\"$\", line): # delete * comment line\n tmp=line.split(\"$\")\n line=tmp[0]\n\n if re.search(\"^\\.inc\", line): # delete * comment line\n datas=line.split('\"')\n print( datas[1] )\n tmp=open( datas[1] , \"r\")\n tmplines=tmp.readlines()\n for tmpline in tmplines:\n tmpline=tmpline.strip()\n\n if re.search(\"$\", tmpline): # delete * comment line\n tmp=tmpline.split(\"$\")\n tmpline=tmp[0]\n\n outf.writelines( tmpline.lower() + \"\\n\")\n# tmp.close()\n else:\n outf.writelines( line.lower() + \"\\n\")\n\nf.close()\noutf.close()\n\n","sub_path":"TieFind_inc/pyinc.py","file_name":"pyinc.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"554417829","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Author: Dennis Gunia\n# Date created: 26.05.2021\n# Python Version: 3.6\nfrom multiprocessing.connection import Client\nfrom buffer import HistoryBuffer\nimport sys, tty, termios\nimport os\nimport readline\nimport atexit\n\nSERVER_IP = 'localhost'\nSERVER_PORT = 6000\n\nprint('\\033[1m\\033[95mDNSProxy CLI v0.0.1 - 2021/05/26\\033[0m')\nprint(\"Trying to connect to Server on {}:{}\".format(SERVER_IP,SERVER_PORT))\n\n\n\nhistfile = os.path.join(os.path.expanduser(\"~\"), \".dnshist\")\ntry:\n readline.read_history_file(histfile)\n readline.set_history_length(9000)\nexcept IOError:\n pass\n\n\natexit.register(readline.write_history_file, histfile)\n\n\nclass SimpleCompleter:\n\n def __init__(self, options):\n self.options = sorted(options)\n self.options2 = sorted([\"get\", \"list\", \"save\", \"load\", \"check\"])\n\n def complete(self, text, state):\n response = None\n if state == 0:\n # This is the first time for this text,\n # so build a match list.\n if text:\n #print(\"Try \", text)\n self.matches = [\n s\n for s in self.options\n if s and s.startswith(text)\n ]\n \n else:\n #print(\"Try \", state)\n self.matches = self.options[:]\n\n\n # Return the state'th item from the match list,\n # if we have that many.\n try:\n response = self.matches[state]\n except IndexError:\n response = None\n\n return response\n\n\ndef input_loop():\n line = ''\n while line != 'stop':\n line = input('Prompt (\"stop\" to quit): ')\n print('Dispatch {}'.format(line))\n\n\n# Register the completer function\nOPTIONS = ['help', 'providers', 'status']\nreadline.set_completer(SimpleCompleter(OPTIONS).complete)\nreadline.parse_and_bind('tab: complete')\n\ndef readLine():\n return input('\\033[94m> \\033[0m')\n\ntry:\n address = (SERVER_IP, SERVER_PORT)\n conn = Client(address, authkey=b'secret password')\n # get server version\n conn.send('getver')\n msg = conn.recv()\n print(\"Connected! Server-Version: {}\\n\\nUse command 'exit' to close CLI.\\n\".format(msg[0]))\n\n\nexcept ConnectionRefusedError:\n print(\"Conenction Refused!\")\n exit(1)\n\n\nwhile True:\n \n line = \"cmd:{}\".format(readLine())\n #line = \"cmd:{}\".format(input('\\033[94m> \\033[0m'))\n #buffer.append(line)\n conn.send(line)\n if line == \"cmd:exit\":\n break\n else:\n while True:\n msg = conn.recv()\n if msg[0] == 9999:\n break\n elif msg[0] == 0:\n # success\n print(msg[1])\n else:\n # error\n print(\"\\033[93m{}\\033[0m\".format(msg[1]))\n\nconn.close()","sub_path":"DNSProxyCli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"649896713","text":"import abc\n\nfrom typing import Sequence, Callable, Awaitable\n\nfrom pyapp.events import AsyncEvent\n\n\nclass BaseQueue(abc.ABC):\n async def __aenter__(self) -> \"BaseQueue\":\n await self.open()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n async def open(self):\n \"\"\"\n Open queue\n \"\"\"\n\n async def close(self):\n \"\"\"\n Close Queue\n \"\"\"\n\n\nclass MessageQueue(BaseQueue, metaclass=abc.ABCMeta):\n \"\"\"\n Message Queue messaging pattern.\n\n Messages are delivered to the first listener to query for the next message\n eg::\n\n |--> [Listener 1]\n [Sender] -| [Listener 2]\n | [Listener 2]\n\n \"\"\"\n new_message = AsyncEvent[Callable[[str], Awaitable]]()\n\n @abc.abstractmethod\n async def send(self, message: str):\n \"\"\"\n Send a message to the task queue\n \"\"\"\n\n @abc.abstractmethod\n async def receive(self, count: int = 1) -> Sequence[str]:\n \"\"\"\n Receive a message (or messages) from the task queue\n \"\"\"\n\n @abc.abstractmethod\n async def listen(self):\n \"\"\"\n Start listening on the queue for messages\n \"\"\"\n\n\nclass PubSubQueue(BaseQueue, metaclass=abc.ABCMeta):\n \"\"\"\n Publish-Subscribe messaging pattern.\n\n Messages are broadcast to all subscribed listeners eg::\n\n |--> [Listener 1]\n [Sender] -|--> [Listener 2]\n |--> [Listener 3]\n\n \"\"\"\n @abc.abstractmethod\n async def publish(self, message: str, topic: str):\n \"\"\"\n Publish a message to queue\n \"\"\"\n\n @abc.abstractmethod\n async def subscribe(self, topic: str):\n \"\"\"\n Subscribe to a named topic\n \"\"\"\n\n @abc.abstractmethod\n async def cancel_subscription(self, topic: str):\n \"\"\"\n Unsubscribe from a topic\n \"\"\"\n","sub_path":"pyapp_ext/aiomq/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"259786379","text":"#!/usr/bin/python\n#coding=utf-8\nimport requests\nimport json\nimport re\nimport socket\nimport socks\n\n#\n#模块开发:河南师范大学 fgfxf\n#\n#模块名称:Python 品易代理模块\n#\n#官网:http://pc.py.cn/\n#\nproxies={'http':'http://127.0.0.1:8080','https':'https://127.0.0.1:8080'} # debug查看发包\ndef testProxies():\n #连接iptool.lu测试ip\n url=\"https://ip.tool.lu\"\n UA={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62'}\n response=requests.get(url=url,headers=UA)\n return response.text\n\ndef Str2IP(ipstr):\n ex=r\"\\b(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\\b\"\n ret=re.findall(ex,ipstr,re.S)\n return ret[0]\n\ndef setSocketProxies(ip,port):\n _socket=socket.socket #保存原始无代理状态\n socks.set_default_proxy(socks.SOCKS5, ip,port)\n socket.socket = socks.socksocket \n print(\"设置\"+ip+\":\"+str(port)+\"为socket代理\")\n return _socket\n\ndef RecoverSocket(_socket):\n socket.socket =_socket\n\ndef loginPYCNProxies(USERNAME,PASSWORD):\n IndexUrl=\"http://pc.py.cn/\"\n UA={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62'}\n print(requests.get(url=IndexUrl,headers=UA))\n loginUrl=\"http://pycn.yapi.3866866.com/login\"\n headers={\n 'POST':'/login HTTP/1.1',\n 'Host':'pycn.yapi.3866866.com',\n 'Content-Length': '47',\n 'Accept': 'text/html, */*; q=0.01',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Origin': 'http://pc.py.cn',\n 'Referer': 'http://pc.py.cn/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',\n 'Connection': 'close'\n }\n data={\n 'phone':USERNAME,\n 'password':PASSWORD,\n 'remember':0\n }\n resp=requests.post(url=loginUrl,headers=headers,data=data)\n token=json.loads(resp.text)\n token=token.get('ret_data').get(\"token\")\n return token\n \ndef AddWhiteList(ip,token):\n url=\"http://pycn.yapi.3866866.com/user/save_white_ip\"\n headers={\n 'POST':'/user/save_white_ip HTTP/1.1',\n 'Host':'pycn.yapi.3866866.com',\n 'Content-Length': '10',\n 'Accept': 'text/html, */*; q=0.01',\n 'Authorization':'Bearer '+ token,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Origin': 'http://pc.py.cn',\n 'Referer': 'http://pc.py.cn/',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',\n 'Connection': 'close'\n\n }\n data={\n 'ip':ip\n }\n resp=requests.post(url=url,headers=headers,data=data)\n return resp.text\n\ndef GetProxiesIPlist(API):\n UA={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62'}\n response=requests.get(url=API,headers=UA).text\n return response\n\n\n","sub_path":"pycnProxies.py","file_name":"pycnProxies.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"27683686","text":"\"\"\"Script for generating the pickle file for the PlayerModel estimator\"\"\"\n\nfrom server.ml_models import PlayerModel\nfrom server.ml_models.player_model import PlayerModelData\n\n\ndef main():\n data = PlayerModelData()\n estimator = PlayerModel()\n estimator.fit(*data.train_data())\n estimator.save()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"backend/scripts/models/generate_player_model.py","file_name":"generate_player_model.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"16049867","text":"from src.BalancingRobot import *\nfrom src.Simulator import *\nfrom src.controllers.PIDController import *\n\n# This script shows the behavior of the robot controlled by a PID controller\n\nif __name__ == \"__main__\":\n\n # Import model\n model = BalancingRobot()\n \n # Set initial state [m, m/s, rad, rad/s]\n model.set_state(np.mat([[0.1], [0.0]]))\n \n # Define controller\n controller = Control(model)\n\n # Simulate\n sim = Simulator(model, controller)\n t, state_list = sim.simulate()\n\n # Plot data\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.plot(t, state_list[:, 0])\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"theta (rad)\")\n plt.title(\"Pendulum angle\")\n plt.grid()\n plt.subplot(1, 2, 2)\n plt.plot(t, state_list[:, 1])\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"thetadot (rad/s)\")\n plt.title(\"Pendulum angular velocity\")\n plt.grid()\n plt.tight_layout()\n\n # Save figure in data\n plt.savefig(\"../data/robot_PID_result.png\")\n plt.show()\n","sub_path":"scripts/robot_pid_control.py","file_name":"robot_pid_control.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"192740742","text":"# -*- coding: UTF-8 -*-\r\nimport hashlib\r\nimport requests\r\nimport re\r\nimport json\r\nimport os\r\n\r\n\r\nclass download:\r\n def __init__(self, save_path, code, num):\r\n # 定义请求头\r\n self.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'}\r\n self.save_path = save_path.replace('/', '\\\\')\r\n if code == 'aid':\r\n self.zipname = 'av'\r\n self.basicname = 'aid'\r\n elif code == 'bvid':\r\n self.zipname = 'bv'\r\n self.basicname = 'bvid'\r\n self.num = num\r\n\r\n def get_video_download_urls(self, part, quality = 80):\r\n print('正在获取下载链接...', end = '')\r\n url_list = []\r\n size_list = []\r\n # 取得并访问可以获取URL的网址\r\n cid = re.findall('\"cid\":(.*?),',\r\n requests.get(f'https://api.bilibili.com/x/player/pagelist?{self.basicname}={self.num}').text)[\r\n part]\r\n entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'\r\n appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')\r\n params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, quality, quality)\r\n chksum = hashlib.md5(bytes(params + sec, 'utf8')).hexdigest()\r\n url = 'https://interface.bilibili.com/v2/playurl?%s&sign=%s' % (params, chksum)\r\n # 设置referer项\r\n referer = f'https://www.bilibili.com/video/{self.zipname}{self.num}'\r\n return_url = json.loads(requests.get(url).text)['durl']\r\n for urls in return_url:\r\n url_list.append(urls['url'])\r\n size_list.append(urls['size'])\r\n # 返回数据\r\n return {'url': url_list, 'referer': referer, 'cid': cid}\r\n\r\n def video_downloader(self, url, referer):\r\n # 更新请求头,加上referer\r\n self.headers.update({'Referer': referer})\r\n video_download_name = []\r\n command = 'aria2c.exe \"{}\" --conf-path=./aria2.conf --referer={} -d \"{}/\" -o \"part{}\"'\r\n for range_var in range(len(url)):\r\n print(f'\\r下载中...[{range_var + 1}/{len(url)}]', end = '')\r\n os.system(command.format(url[range_var], referer, self.save_path, range_var + 1))\r\n video_download_name.append('part' + str(range_var + 1))\r\n # 返回下载的视频分段文件名列表\r\n return {'filename': video_download_name}\r\n\r\n def danmaku_downloader(self, cid):\r\n print('下载弹幕中...')\r\n danmaku_download_name = []\r\n # 获取弹幕\r\n danmaku_file = requests.get(f'https://comment.bilibili.com/{cid}.xml')\r\n # 写入数据\r\n with open(self.save_path + 'danmaku.xml', 'wb') as file:\r\n danmaku_download_name.append('danmaku')\r\n file.write(danmaku_file.content)\r\n file.flush()\r\n # 返回写入的弹幕名称\r\n return {'filename': danmaku_download_name}\r\n\r\n def xml2ass(self, filename = 'danmaku'):\r\n \"\"\"使用DanmakuFactory将弹幕XML文件转换为字幕ASS文件\"\"\"\r\n if 'bulidings(2)\n \"\"\"\n final_mask=np.zeros((height,width),np.uint8)\n for idx,file in enumerate(mask_pool):\n ret,img = load_img(input_path+file,grayscale=True)\n assert (ret == 0)\n label_value=0\n if 'road' in file:\n label_value =ROAD_VALUE\n elif 'building' in file:\n label_value=BUILDING_VALUE\n # label_value = idx+1\n for i in tqdm(range(height)):\n for j in range(width):\n if img[i,j]>=FOREGROUND:\n print (\"img[{},{}]:{}\".format(i,j,img[i,j]))\n if label_value==ROAD_VALUE:\n final_mask[i,j]=label_value\n elif label_value==BUILDING_VALUE and final_mask[i,j]!=ROAD_VALUE:\n final_mask[i,j]=label_value\n # print (\"final_mask[{},{}]:{}\".format(i, j, final_mask[i, j]))\n\n\n return final_mask\n\n\n\nif __name__=='__main__':\n\n x,y=check_input_file(input_path,mask_pool)\n\n result_mask=combine_all_mask(x,y,input_path,mask_pool)\n\n plt.imshow(result_mask, cmap='gray')\n plt.title(\"combined mask\")\n plt.show()\n\n cv2.imwrite(output_file,result_mask)","sub_path":"postprocess/combine_diffclass_for_singlemodel_result.py","file_name":"combine_diffclass_for_singlemodel_result.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"343640724","text":"\"\"\"\nFile: major.py\nAuthor: Vu Nguyen\nDate: 9/16/2020\nSection: 31\nDescription:\n\n\"\"\"\n\n\ndef major():\n academic_major = input(\"What is your major? \").upper()\n if (academic_major == \"CMSC\") or (academic_major == \"CMPE\"):\n print(\"You need to earn at least a B for CMSC 201 to count.\")\n else:\n print(\"You need to earn at least a C for CMSC 201 to count.\")\n\n\nmajor()\n","sub_path":"major.py","file_name":"major.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"115827285","text":"import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx=ssl.create_default_context()\nctx.check_hostname=False\nctx.verify_mode= ssl.CERT_NONE\nc=0\ns=0\n\nurl=input(\"Enter-\")\nhtml=urllib.request.urlopen(url, context=ctx).read()\nsoup=BeautifulSoup(html,\"html.parser\")\n\ntags=soup('span')\nfor tag in tags:\n y=int(tag.text)\n s=s+y\n c=c+1\n\nprint(\"Count\",c)\nprint(\"Sum\",s)\n","sub_path":"webscrap.py","file_name":"webscrap.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"647452702","text":"import datetime\nimport os\n\nimport requests\n\nurl = \"https://api.hypr.cl/raw/\"\n\nheaders = {\n 'x-api-key': os.getenv('HYPR_API_KEY'),\n 'command': \"list\",\n 'time_start': \"2019-08-01T00:00:00Z\", 'time_stop': \"2019-08-01T00:00:00Z\", 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\", 'Host': \"api.hypr.cl\", 'Accept-Encoding': \"gzip, deflate\", 'Content-Length': \"0\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n}\n\n\ndef get_datetime_str(dt):\n return dt.replace(microsecond=0).isoformat() + \"Z\"\n\n\n# EXAMPLE: get_heat_map(datetime.datetime(2019, 8, 1, 0, 58, 0), datetime.datetime(2019, 8, 1, 0, 58, 30))\ndef get_raw(time_start, time_stop):\n headers['time_start'] = get_datetime_str(time_start)\n headers['time_stop'] = get_datetime_str(time_stop)\n response = requests.request(\"POST\", url, headers=headers)\n if response.status_code != 200:\n print(f\"Error {response.status_code} for {time_start} - {time_stop} interval\")\n return response.text\n\n\ndef sample_day_per_minute(year, month, day, sec_per_minute=3, sec=0, output_to_multiple_files=True):\n cur_time = datetime.datetime(year, month, day, 0, 0, sec)\n day_end = datetime.datetime(year, month, day, 23, 59, sec)\n\n while cur_time <= day_end:\n time_stop = cur_time + datetime.timedelta(seconds=sec_per_minute)\n result = get_raw(cur_time, time_stop)\n if output_to_multiple_files:\n filename = cur_time.strftime('%H%M')\n text_file = open(filename + \".json\", \"w\")\n text_file.write(result)\n text_file.close()\n cur_time += datetime.timedelta(minutes=1)\n\n print(\"THE END\")\n\n\nif __name__ == \"__main__\":\n sample_day_per_minute(2019, 8, 1)\n","sub_path":"util/coll_raw.py","file_name":"coll_raw.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"347240229","text":"'''\nThis is a demo python program to demo python and the important module for Image Recogniton using CNN\n\n\nAuthor: Bryan Chen\n\n'''\nfrom keras.models import load_model\nimport numpy as np\nfrom keras.preprocessing.image import img_to_array, load_img\nimport matplotlib.pyplot as plt \n\n# 載入模型\nmodel = load_model('model_3.h5') # trained by large data\nmodel.summary() #print model summary\n\ndef plot_a_image(image,title,result):\n fig = plt.gcf()\n fig.set_size_inches(5, 6)\n plt.imshow(x) # RGB type 0~255 int or 0~1 float\n plt.title(title,fontsize=12)\n plt.text(20,170,'The prediction result --> '+ result,fontsize=15)\n plt.show()\n\nwhile 1:\n\n i_str=input('Input Image (index:1~12500, out of range for exit):')\n i=int(i_str)\n if (i<1 or i>12500):\n break\n Img_path = 'data/test2/'+str(i)+'.jpg'\n print('Imge file path:',Img_path)\n \n img_org = load_img(Img_path) # PIL image\n print('The original image!')\n img_org.show()\n img = load_img(Img_path,target_size=(150,150)) # PIL image\n # print('The target_size image!')\n # img.show()\n x = img_to_array(img) # this is a Numpy array with shape ( Y, X , 3)\n print(x.shape)\n print(x.dtype)\n x = x.astype('float32') / 255.0\n\n x1=x.reshape(1,150,150,3)\n print('Image after Reshape = ', x1.shape)\n\n\n prediction=model.predict(x1)\n print('The prediction value is', prediction[0])\n if (prediction[0] > 0.5): \n print('It is a dog image!')\n else:\n print('It is a cat image!') \n\n prediction=np.rint(prediction)\n print(prediction)\n print(prediction.shape)\n print(prediction.dtype)\n prediction=prediction.astype(int)\n print(prediction)\n print(prediction.shape)\n print(prediction.dtype)\n print('prediction[0]=',prediction[0])\n label_dict={0:'cat', 1:'Dog'}\n\n in_title='input Image:' + Img_path \n\n i=prediction[0]\n i=int(i)\n in_result=label_dict[i]\n print(in_result)\n plot_a_image(x,in_title,in_result)\n\n\n'''\nimport numpy as np\nx_4d=np.zeros((10,150,150,3),dtype=float) # create 4 dimention ndarray with elements of zero\nprint('Dimmentions =',x_4d.ndim) \n# x_4d[0]=x\n# print(x_4d)\n\nindex_str= input('Input the first index(1~12490) of test image :') #input is a str\nbatch_size = 10\nfor i in range (batch_size):\n index= int(index_str) + i # transfer index type to int and plus i (0~9)\n Img_path = 'data/test2/'+ str(index) +'.jpg'\n img = load_img(Img_path,target_size=(150,150)) \n x = img_to_array(img) # this is a Numpy array with shape ( Y, X , 3)\n x = x.astype('float32') / 255.0\n x_4d[i]=x \nprediction=model.predict(x_4d)\nprediction=np.rint(prediction)\nprint('The prediction value is', prediction[:10])\n'''","sub_path":"demo_02.py","file_name":"demo_02.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"581682003","text":"import sprites\nimport pygame\nimport graphics\n\nROSA, BLACK, WHITE, GREEN, RED, BROWN, YELLOW, BLUE, NIGHTBLUE, STARBLUE, GREY, SCREEN_HEIGHT, SCREEN_WIDTH = graphics.färger()\n\ndef öppna_intro():\n\n # Objekt och data\n ridå = sprites.Ridå()\n ridå.x = 0\n ridå.y = -500\n ridå.bredd = 1366\n ridå.höjd = 1268\n ridå.change_y = 3\n ridå.färg = WHITE\n\n\"\"\"\n # spelnamn = sprites.Text()\n # spelnamn.text = \"RYMDSPEL\"\n # spelnamn.x = 200\n # spelnamn.y = 300\n # spelnamn.colour = BLACK\n # spelnamn.font = 100\n\n done = False\n clock = pygame.time.Clock()\n parameter = 0\n\n # Main loop\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n done = True\n if event.type == pygame.QUIT:\n print(\"User has asked to quit.\")\n done = True\n\n screen.fill(NIGHTBLUE)\n\n spelnamn.skriv(screen)\n\n ridå.rita(screen)\n ridå.rörelse()\n\n pygame.display.flip()\n clock.tick(60)\n\"\"\"\n\n","sub_path":"Projekt/Rest o Test/Intro.py","file_name":"Intro.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"428968643","text":"#!/bin/python\n\n#recursively read netcdf files for plasma profile and FWR2D output\n#create the data input file for VisIt to make the movie\n\nimport sdp.visualization.visit as vi\nimport numpy as np\nimport os\n\nfluc_file_head = 'fluctuation'\nTstart = 1\nTend =760\nTstep = 1\nreflect_file = 'schradi.cdf'\npara_out_file_head = 'para_out'\nfull_wave_out_file_head = 'fullw_out'\n\nrun_dir = '../runs/'\nvi_out_dir = '../vtk_small_files/'\n\nwavefreq = 73 # in GHz\n\nfor i in range(Tstart,Tend+1,Tstep):\n flucfname = run_dir+str(i)+'/'+fluc_file_head+str(i)+'.cdf'\n reffname = run_dir+str(i)+'/'+str(wavefreq)+'/'+reflect_file\n flucoutfname = vi_out_dir + fluc_file_head+str(i)+'.vtk'\n paraoutfname = vi_out_dir + para_out_file_head + str(i) + '.vtk'\n fullwoutfname = vi_out_dir + full_wave_out_file_head +str(i) + '.vtk'\n\n fwr = vi.FWR_Loader(freq = wavefreq*1E9, flucfname = flucfname, fwrfname = reffname, mode = 'X')\n\n flucmesh = fwr.load_profile()\n flucmesh.output_vtk(fname = flucoutfname)\n del flucmesh\n\n para_mesh = fwr.load_paraxial()\n para_mesh.output_vtk(fname = paraoutfname)\n del para_mesh\n\n fullw_mesh = fwr.load_fullwave()\n fullw_mesh.output_vtk(fname = fullwoutfname)\n del fullw_mesh\n del fwr\n\n\n\n\n","sub_path":"src/python2/sdp/scripts/FWR_Postprocess/make_all_vtk.py","file_name":"make_all_vtk.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"341914634","text":"primes=[]\ndef findp(k):\n if k<=2:\n return 0\n else:\n flag=0\n j=int((k-1)**(1/2))\n if k-1 in primes:\n return k-1\n for i in range(2,j+1):\n if (k-1)%i==0:\n flag=1\n break\n if flag==0:\n primes.append(k-1)\n return k-1\n else:\n return findp(k-1)\n\nn=int(input())\nq=int(input())\na=list(map(int,input().split()))\nfor _ in range(q):\n k,l,r=map(int,input().split())\n if k==1:\n for m in range(l-1,r):\n p=findp(a[m])\n a[m]-=p\n else:\n sum=0\n for m in range(l-1,r):\n sum+=a[m]\n print(sum)","sub_path":"codechef/loc_may18/primeclo.py","file_name":"primeclo.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"288712974","text":"from django.contrib.auth.mixins import LoginRequiredMixin\r\nfrom django.contrib.auth.models import User, Group\r\nfrom django.http import JsonResponse, QueryDict\r\nfrom django.views.generic import ListView, View\r\n\r\nfrom accounts.mixins import PermissionRequiredMixin\r\nfrom accounts.models import Profile\r\nfrom accounts.user.forms import AddUserForm\r\nfrom dashboard.common import get_errors_message\r\nimport logging\r\nlogger = logging.getLogger(\"myself\")\r\n# 查看用来列表\r\nclass UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):\r\n permission_required = \"auth.view_user\"\r\n permission_redirect_field_name = \"index\"\r\n template_name = \"user/userlist.html\"\r\n model = User\r\n paginate_by = 8\r\n before_range_num = 4\r\n after_range_num = 4\r\n ordering = \"id\"\r\n\r\n def get_queryset(self):\r\n queryset = super(UserListView, self).get_queryset()\r\n queryset = queryset.filter(is_superuser=False)\r\n username = self.request.GET.get(\"search_username\", None)\r\n logger.debug(queryset)\r\n if username:\r\n queryset = queryset.filter(username__icontains=username)\r\n return queryset\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(UserListView, self).get_context_data(**kwargs)\r\n\r\n # 当前页 的前7条\r\n context['page_range'] = self.get_pagerange(context['page_obj'])\r\n # 处理搜索条件\r\n context['search_username']=\"\" # 设置默认为空\r\n search_data = self.request.GET.copy()\r\n try:\r\n search_data.pop(\"page\")\r\n except:\r\n pass\r\n context.update(search_data.dict())\r\n context['search_data'] = \"&\" + search_data.urlencode()\r\n return context\r\n\r\n def get_pagerange(self, page_obj):\r\n current_index = page_obj.number\r\n start = current_index - self.before_range_num\r\n end = current_index + self.after_range_num\r\n if start <= 0:\r\n start = 1\r\n if end >= page_obj.paginator.num_pages:\r\n end = page_obj.paginator.num_pages\r\n return range(start, end + 1)\r\n\r\n # @method_decorator(permission_required(\"auth.add_user\", login_url=reverse(\"error\",kwargs={\"next\":\"dashboard\", \"msg\":\"没有权限,请联系管理员\"})))\r\n # @method_decorator(permission_required(\"auth.add_user\",login_url=\"/\"))\r\n def get(self, request, *args, **kwargs):\r\n return super(UserListView, self).get(request, *args, **kwargs)\r\n\r\n\r\n# 修改用户状态\r\nclass ModifyUserStatusView(LoginRequiredMixin, PermissionRequiredMixin, View):\r\n permission_required = \"auth.change_user\"\r\n permission_redirect_field_name = \"index\"\r\n\r\n def post(self, request):\r\n uid = request.POST.get(\"uid\", \"\")\r\n ret = {\"status\": 0}\r\n try:\r\n user_obj = User.objects.get(id=uid)\r\n # user_obj.is_active = False if user_obj.is_active else True\r\n if user_obj.is_active:\r\n user_obj.is_active = False\r\n else:\r\n user_obj.is_active = True\r\n user_obj.save()\r\n except User.DoesNotExist:\r\n ret['status'] = 1\r\n ret['errmsg'] = \"用户不存在\"\r\n\r\n return JsonResponse(ret)\r\n\r\n\r\n# 修改用户组\r\nclass ModifyUserGroupView(LoginRequiredMixin, PermissionRequiredMixin, View):\r\n permission_required = (\"auth.change_user\", \"auth.delete_user\")\r\n permission_redirect_field_name = \"index\"\r\n\r\n def get(self, request):\r\n print(request.GET)\r\n uid = request.GET.get('uid', \"\")\r\n group_objs = Group.objects.all()\r\n try:\r\n user_obj = User.objects.get(id=uid)\r\n except User.DoesNotExist:\r\n pass\r\n else:\r\n group_objs = group_objs.exclude(id__in=user_obj.groups.values_list(\"id\"))\r\n return JsonResponse(list(group_objs.values(\"id\", \"name\")), safe=False)\r\n\r\n def put(self, request):\r\n ret = {\"status\": 0}\r\n data = QueryDict(request.body)\r\n uid = data.get(\"uid\", \"\")\r\n gid = data.get(\"gid\", \"\")\r\n try:\r\n user_obj = User.objects.get(id=uid)\r\n except User.DoesNotExist:\r\n ret['status'] = 1\r\n ret['errmsg'] = \"用户不存在\"\r\n return JsonResponse(ret)\r\n try:\r\n group_obj = Group.objects.get(id=gid)\r\n except Group.DoesNotExist:\r\n ret['status'] = 1\r\n ret['errmsg'] = \"用户组不存在\"\r\n return JsonResponse(ret)\r\n user_obj.groups.add(group_obj)\r\n return JsonResponse(ret)\r\n\r\n def delete(self, request):\r\n ret = {\"status\": 0}\r\n data = QueryDict(request.body)\r\n try:\r\n user_obj = User.objects.get(id=data.get('uid', \"\"))\r\n group_obj = Group.objects.get(id=data.get('gid', \"\"))\r\n user_obj.groups.remove(group_obj)\r\n\r\n # group_obj.user_set.remove(user_obj)\r\n except User.DoesNotExist:\r\n ret['status'] = 1\r\n ret['errmsg'] = \"用户不存在\"\r\n except Group.DoesNotExist:\r\n ret['status'] = 1\r\n ret['errmsg'] = \"用户组不存在\"\r\n return JsonResponse(ret)\r\n\r\n\r\n# 新增用户类\r\nclass AddUserView(View):\r\n def post(self, request):\r\n res = {\"status\": 0}\r\n user_data = AddUserForm(request.POST)\r\n if user_data.is_valid():\r\n try:\r\n user = {\"username\": user_data.cleaned_data[\"username\"], \"email\": user_data.cleaned_data[\"email\"],}\r\n user_obj = User(**user)\r\n user_obj.set_password(user_data.cleaned_data[\"password\"])\r\n user_obj.save()\r\n profile = {\"user\": user_obj, \"name\": user_data.cleaned_data[\"name\"],\r\n \"phone\": user_data.cleaned_data[\"phone\"], \"weixin\": user_data.cleaned_data[\"weixin\"]}\r\n Profile(**profile).save()\r\n except Exception as e:\r\n print(e)\r\n res[\"status\"] = 1\r\n res[\"errmsg\"] = \"保存发布发生异常,请查看后台日志\"\r\n else:\r\n err = user_data.errors.as_data()\r\n res[\"status\"] = 1\r\n res[\"errmsg\"] = get_errors_message(err)\r\n return JsonResponse(res)\r\n","sub_path":"accounts/user/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"227284160","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : Lysander Tseng\n# Email : qingshan.tseng@gmail.com\n# Time : 2020/2/18 16:41\n# User : Magic\n# Product : PyCharm\n# Project : 030\n# File : 030.4.py\n# Intro : 编写一个程序,用户输入关键字,查找当前文件夹内\n# (如果当前文件夹内包含文件夹,则进入文件夹继续搜索)所有含有\n# 该关键字的文本文件(.txt后缀),要求显示该文件所在的位置以\n# 及关键字在文件中的具体位置(第几行第几个字符)\nimport os\n\n\ndef line_retrieve(line, key):\n \"\"\"检索并记录一行中关键词的出现次数和具体位置\"\"\"\n\n line_len = len(line)\n key_len = len(key)\n column = []\n for i in range(line_len):\n if line[i:i+key_len] == key:\n column.append(i)\n return column\n\n\ndef exact_search(aim_dir, key_word):\n \"\"\"定义一个精确搜索函数,用于查找文件内部信息\"\"\"\n\n os.chdir(aim_dir)\n key_location_list = []\n for cur_path, in_dir, in_file in os.walk(aim_dir):\n for each_file in in_file:\n if each_file.endswith('.txt'):\n with open(each_file, ) as f:\n for line_number, each_line in enumerate(f.readlines()):\n column_number = line_retrieve(each_line, key_word)\n final_path = os.path.join(cur_path, each_file)\n key_location_list.append([final_path, line_number, column_number])\n for path, line, column in key_location_list:\n print(f\"在文件【{path}】中找到关键字【{key_word}】\\n\"\n f\"关键字出现在第{line}行,第{column}个位置。\\n\")\n\n\nactual_aim_dir = input(\"请输入目标文件夹:\")\nactual_key_word = input(\"请输入关键字:\")\n\nexact_search(actual_aim_dir, actual_key_word)\n","sub_path":"Python/Homework/030/030.4.py","file_name":"030.4.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"319816406","text":"# class Movie:\n# def __init__(self,id=None,name=None,year=None,rank=None):\n# self.id=id\n# self.name=name\n# self.year=year\n# self.rank=rank\n\n# for key,value in kwargs.items():\n# parameters=key.split('__')\n# operation_dict={\"lt\":'<',\"gt\":'>',\"lte\":'<=',\"gte\":'>=',\"neq\":'!=',\"in\":'in'}\n# if parameters[0] not in columns:\n# raise InvalidField\n# if len(parameters)==1:\n# if parameters[0] in columns:\n# condition='''{}=\"{}\"'''.format(key,value)\n# elif parameters[1]==\"contains\":\n# if not isinstance(value,str):\n# raise ValueError\n# condition='''name like \"%{}%\"'''.format(value)\n# elif parameters[1]==\"in\":\n# p=tuple(value)\n# condition='''{} in {}'''.format(parameters[0],p)\n# else:\n# if not isinstance(value,int):\n# raise ValueError\n# condition='''{} {} {}'''.format(parameters[0],operation_dict[parameters[1]],value)\n# multiple_conditions.append(condition)\n \n# multiple_conditions=' AND '.join(multiple_conditions)\n\n\n\n# import sqlite3,random,string\n# connection = sqlite3.connect(\"movie_booking_slots.sqlite3\")\n# crsr=connection.cursor()\n# names=[]\n# for i in range(20):\n# alphabets=list(string.ascii_lowercase)\n# name=''\n# for i in range(random.randint(1,20)):\n# name+=random.choice(alphabets)\n# names.append(name)\n\n# for i in range(1000):\n# name=random.choice(names)\n# gender=random.choice(['M','F'])\n# age=random.randint(5,100)\n# movie_id=random.randint(1,100)\n# theater_id=random.randint(1,100)\n# crsr.execute('''INSERT INTO Audience(name,gender,age,movie_id,theater_id) values(?,?,?,?,?)''',(name,gender,age,movie_id,theater_id))\n \n# connection.commit()\n# connection.close()\ndef read_data(sql_query):\n\timport sqlite3\n\tconnection = sqlite3.connect(\"imdb.sqlite3\")\n\tcrsr = connection.cursor() \n\tcrsr.execute(sql_query) \n\tans= crsr.fetchall() \n\tconnection.close() \n\treturn ans\n\nans=read_data('''select distinct(name) from movie inner join cast on id=mid limit 10''')\nfor i in ans:\n print(i)\n\ndef write_data(sql_query):\n\timport sqlite3\n\tconnection = sqlite3.connect(\"selected_students.sqlite3\")\n\tcrsr = connection.cursor() \n\tcrsr.execute(\"PRAGMA foreign_keys=on;\") \n\tcrsr.execute(sql_query) \n\tconnection.commit() \n\tconnection.close()\n\treturn crsr","sub_path":"dbms_resources/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"428745941","text":"\"\"\"Project Euler Question 5\n\n 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\n\"\"\"\n\n### makes a first guess\n\nmax_int = 20\n\nnumber = 1 * 2 * 3 * 5 * 7 * 11 * 13 * 17 * 19\nnumber = number - (number % max_int)\n\nwhile True:\n divided_by_all_in_range = True\n for i in range(2, max_int):\n if number % i != 0:\n divided_by_all_in_range = False\n break\n if divided_by_all_in_range:\n print(number)\n exit()\n\n number += max_int # since the number has to end in 0, this incriments by 0's\n","sub_path":"Python/Q000x/Q0005.py","file_name":"Q0005.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"170877869","text":"#coding:utf-8\nimport os\nimport pandas as pd\nimport numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nDIR_IMAGES = 'sansan-001/images'\nIMG_SIZE = 100\n\ndef load(df):\n X = []\n for i, row in df.iterrows():\n img = Image.open(os.path.join(DIR_IMAGES, row.filename))\n img = img.crop((row.left, row.top, row.right, row.bottom))\n img = img.convert('L')\n img = img.resize((IMG_SIZE, IMG_SIZE), resample=Image.BICUBIC)\n\n x = np.asarray(img, dtype=np.float32)\n x = x.flatten()\n X.append(x)\n\n X = np.array(X)\n return X\n","sub_path":"Kaggle/Sansan/image_load.py","file_name":"image_load.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"434905487","text":"#=========================================================================\n#=========================================================================\n'''\nauthor Benjamin Lee Hinchliffe\ncommented and modified by Gabriele Luigi Mura\ncommented and modified by Jed Hollom\ncommented and modified by Anthony Stannard\n\nthis script outputs the \"vertex normal\" for each mesh points\nalthough normal are defined with respect to a face the definition can \nbe forced and it can be possible to assigne a normal to a vertex\nsee TAU user guide\n'''\n#=========================================================================\n#=========================================================================\n\n\n\n\n#=================\n#IMPORTING MODULES\n#=================\nimport sys\nfrom scipy.io import *\nimport read_taumesh as readm\nimport numpy as np\nimport cPickle as pkl\nimport lcst\nfrom math import *\nfrom TwoDfuns import *\nimport time\n\n\n\n#====================\n#USER DEFINE FUNCTION\n#====================\n\ndef SUMM(sequence, start=0):\n for value in sequence:\n start = start + value\n return start\n\n\ndef DOT(AA, BB):\n return SUMM(((A*B) for A, B in zip(AA, BB)), start=0)\n\ndef delaunay_deform(new_a,name):\n\t#===================================================================================\n\t#READING & SETTING THE VARIABLES FROM THE RELATIVE VOLUME COEFFICIENTS (NETCDF FILES)\n\t#===================================================================================\n\t\n\tprint ('\\n')\n\tprint('[1] reading relative volume coefficients')\n\tstart = time.clock()\n\t#Read in solution of 1st step\n\tf = netcdf_file('relvolcoef_netcdf','r')\n\teco = f.variables['Rel_volume_coeffs'][:] #deluanay \"e\" coefficient\n\tvert = f.variables['Simplex_vert_index'][:] #vertices of the delaunay triangulation\n\tpo_ref = f.variables['Point_index'][:] #point reference\n\tf.close()\n\t\n\t#================\n\t#READING THE MESH\n\t#================\n\t#Automatic mesh path finding\n\t#import os\n\t#for file in os.listdir('../'):\n\t#\tif file.endswith('.taumesh') or file.endswith('.grid'):\n\t#\t\tmesh = '../'+file\n\t#chord=1.0\n\t\n\t\n\t#bds=[[3],[4],[5],[6]] #Defining the boundaries\n\t\n\t'''\n\tRAE2822 2D Wing\n\t[3] ff\n\t[4] upper_surface\n\t[5] lower_surface\n\t'''\n\t\n\tmesh = 'rae2822.taumesh'\n\tread_in = readm.readTaumesh() #Read mesh in\n\tno_of_bounds = 5\n\tdata_inner0, data_inner1, data_ff, data_us, data_ls, mesh_points = read_in.read2(mesh,no_of_bounds)\n\tmesh_points0,mesh_points1,na,na = separateMeshPoints(mesh_points) # Only need mesh points on y=0 plane\n\t\n\t# Store Node IDs for y = 0 plane and y = 1 plane\n\tNodeIDs0 = data_inner0[:,3]\n\tNodeIDs1 = data_inner1[:,3]\n\t'''\n\t-data- are all the points along with xyz marker and ID on the boundaries -ff,up,down and\n\t-mesh_points- these are all the points including the volume internal mesh point\n\t'''\n\t\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#==================================\n\t#READING FROM PICKLE THE BOUNDARIES\n\t#==================================\n\t'''\n\there we are reading from a pickle file, althogh not necessary for such a small mesh it will save space later on\n\twhen using large mesh\n\tif you run the command \n\tprint ('to compare'), data_up[0]\n\tyou will see that tha data are the same, what changes is only the format of the file\n\t'''\n\t\n\t#Load the pickle file (the mesh file but in a different format)\n\tprint('[3] reading pickle file')\n\tbdry_data = pkl.load(open('bdry_data.p','rb'))\n\t\n\tdata_ff = bdry_data[\"Data_ff\"]\n\tdata_ls = bdry_data[\"Data_ls\"]\n\tdata_us = bdry_data[\"Data_us\"]\n\t\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#=====================\n\t#ROTATING SURFACE GRID \n\t#=====================\n\t'''\n\tinitialisation of the new point, so the new point have the same coordinates execpt for the one we are moving. this make sense \n\tonly if we are going to move each mesh point at the time\n\tech of this point has three column for x y z + ID but not sure about this one, it depends on how the data were extracted\n\t'''\n\t\n\tprint('[4] deforming the surface')\n\t\n\t#Make copy of original data\n\t#Upper surface\n\toriginal_data_us0,original_data_us1,na,na = separateSymmetryPlanes(data_us)\n\t#Lower surface\n\toriginal_data_ls0,original_data_ls1,na,na = separateSymmetryPlanes(data_ls)\n\t\n\t# Original a values\n\t# au: [ 0.12919237 0.12013562 0.17774294 0.07507853 0.29833847 -0.02940065 0.39216305 0.04469565 0.26027172 0.17421293 0.19707076 0.20970594]\n\t# al: [-0.12846913 -0.13816236 -0.12472814 -0.21670836 0.0306742 -0.53413441 0.27668371 -0.45672158 0.11430043 -0.14970269 0.01428077 0.05581 ]\n\t\n\t# Get new coordinates of airfoil\n\t# Create instance of lcst class\n\tLCST = lcst.lcst()\n\tupperSurf,lowerSurf = LCST.get_profile(new_a,data_us[:,0],data_ls[:,0])\n\t\n\t# New coordinates of airfoil after change in the parameters\n\tdata_ls[:,2] = lowerSurf[1]\n\tdata_us[:,2] = upperSurf[1]\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#=====================\n\t#COMPUTING SENSITIVITY\n\t#=====================\n\t'''\n\tnow we have made a manual deformation sign epsilon value\n\tas we are using the eco (e coefficients), we need to use them to find the new point\n\tthis is done by using a inner point product between the e coeffiecient and the new coordinate of the new points\n\t'''\n\t\n\t\n\tprint('[5] computing surface sensitivities')\n\t#Farfield\n\tdata_ff0,data_ff1,na,na = separateSymmetryPlanes(data_ff)\n\t\n\t#Upper surface\n\tdata_us0,data_us1,na,na = separateSymmetryPlanes(data_us)\n\t\n\t#Lower surface\n\tdata_ls0,data_ls1,na,na = separateSymmetryPlanes(data_ls)\n\t\n\t#Orig_points0 must be in the same order points0 in Find Vol ratios script for vertices info to be accurate\n\torig_points0 = np.row_stack([original_data_ls0[:,:4],original_data_us0[:,:4],data_ff0[:,:4]])\n\tdef_points0 = np.row_stack([data_ls0[:,:4],data_us0[:,:4],data_ff0[:,:4]])\n\t#orig_points1 = np.row_stack([original_data_ls1[:,:4],original_data_us1[:,:4],data_ff1[:,:4]])\n\t#def_points1 = np.row_stack([data_ls1[:,:4],data_us1[:,:4],data_ff1[:,:4]])\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#======================================================\n\t#PROPAGATING THE DEFORMATION USING THE DELAUNAY MAPPING\n\t#======================================================\n\t\n\t'''\n\tinitialising the new vector that will containt all the mesh internal points\n\t'''\n\tdef_mesh_points = mesh_points0.copy() # In the correct node order\n\t\n\t'''\n\tdo that along x,y and z using user defined function DOT as we are working with small number it is preferred to work without using python built-in functions\n\t'''\n\tprint('')\n\tprint('[6] propagating the deformation')\n\tfor i in po_ref:\n\t\n\t # i is the Node ID\n\t def_mesh_points[i,0] = DOT(eco[i],def_points0[vert[i],0]) \n\t def_mesh_points[i,1] = DOT(eco[i],def_points0[vert[i],1])\n\t def_mesh_points[i,2] = DOT(eco[i],def_points0[vert[i],2])\n\t\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#=============================\n\t#PRINTING STATISTICS ON SCREEN\n\t#=============================\n\t\n\tprint('')\n\tprint ('[7] check on the original mesh')\n\tcounter=0\n\t\n\tfor j in range(3):\n\t\tfor i in range(len(def_mesh_points)):\n\t\t\ta=mesh_points[i,j] \n\t\t\tb=def_mesh_points[i,j]\n\t\t\n\t\t\tif a!=b and abs(a-b)>1e-12:\t\n\t\t\t\t#print a, b\n\t\t\t\t#print ('difference'), abs(a-b)\n\t\t\t\tcounter +=1\n\tprint ('point different'), counter/3\n\tprint ('point in total'), len(def_mesh_points)\n\t\n\tprint('')\n\t\n\t\n\t\n\t\n\tsys.stdout.flush() #Force Printout\n\t\n\t#========================================================================\n\t#SAVING THE UPDATED PICKLE FILE & THE NEW MESH FILE CONTAINING DEFORMATION\n\t#========================================================================\n\t# Add the y=1 plane mesh points\n\tdef_mesh_points1 = def_mesh_points.copy()\n\tdef_mesh_points1[:,1] = 1\n\tdef_mesh_points1[:,3] = mesh_points1[:,3]\n\tdef_mesh_points = np.row_stack([def_mesh_points,def_mesh_points1])\n\t\n\t# Get boundary ID info for every mesh point\n\tNodeIDs1 = data_inner0[:,3]\n\tNodeIDs2 = data_inner1[:,3]\n\tNodeIDsff = data_ff[:,3]\n\tNodeIDsus = data_us[:,3]\n\tNodeIDsls = data_ls[:,3]\n\t\n\tprint('')\n\tprint ('[8] saving')\n\tread_in.write_tau2D_2(mesh,name,def_mesh_points)\n\t#read_in.write_tau2D_4(mesh+'_def3',def_mesh_points)\n\t\n\tsys.stdout.flush() #Force Printout\n\tend = time.clock()\n\tprint ('this script took seconds'), end-start\n\t#===\n\t#EOF\n\t#===\n","sub_path":"Mesh deformation/delaunay_deform.py","file_name":"delaunay_deform.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"647044427","text":"\"\"\"\nAll movements and turns are by increments of 5.\nRight arror key = move forward\nLeft arrow key = move backward\nr = turn right\nl = turn left\nu = pen up\nd = pen down\nh = go home\nc= clear\n\"\"\"\n\nfrom tkinter import *\nfrom turtle import *\n\nroot = Tk()\nT = Text(root, root.title(\"Controls\"),height=8, width=60)\nT.pack()\nT.insert(END, \"Right arrow key = move forwar\\nleft arrow key = move backward\\nr = turn right\\n1 = turn left\\nu = pen up\\nd = pen down\\nh = go home\\nc = clear\")\n\n\ndef main():\n width(2)\n speed(0)\n pencolor(\"blue\")\n onkey(up, \"u\")\n onkey(down, \"d\")\n onkey(clear, \"c\")\n onkey(home, \"h\")\n onkey(lambda: forward(5), \"Right\")\n onkey(lambda: back(5), \"Left\")\n onkey(lambda: left(5), \"l\")\n onkey(lambda: right(5), \"r\")\n listen()\n return \"Done!\"\nif __name__ == \"__main__\":\n msg = main()\n print(msg)\n mainloop()\n","sub_path":"KeybDraw.py","file_name":"KeybDraw.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"201362344","text":"import time\n\ndef fibo(num):\n if num <= 1:\n return num\n return fibo(num-1) + fibo(num-2)\n\ndef interfibo(num):\n pre_Fibo_1 = 0\n pre_Fibo_2 = 1\n result = 0 \n\n if num <= 1:\n return num \n \n for i in range(num-1):\n result = pre_Fibo_1 + pre_Fibo_2\n pre_Fibo_1 = pre_Fibo_2\n pre_Fibo_2 = result\n\n return result\n\nwhile True:\n nbr = int(input(\"Enter a number : \"))\n if nbr == -1:\n break\n ts = time.time()\n fibonumber = interfibo(nbr)\n ts = time.time() - ts\n print(\"InterFibo(%d) = %d, time %.6f\" %(nbr, fibonumber, ts))\n ts = time.time()\n fibonumber = fibo(nbr)\n ts = time.time() - ts\n print(\"Fibo(%d) = %d, time %.6f\\n\" %(nbr, fibonumber, ts))\n","sub_path":"software-project2/assignments/assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"216729867","text":"# -*- coding: utf-8 -*-\n#~ #----------------------------------------------------------------------\n\nimport os, shutil, time\nimport xml.dom.minidom\n#LMT path\nLMT_PATH = 'C:\\\\Users\\\\admin\\\\Downloads\\\\LMT_LTE'\n#OSP path\nOSP_PATH = 'C:\\\\Users\\\\admin\\\\Downloads\\\\OSP_STUDIO_X64\\\\OSP_STUDIO_X64\\\\log'\n# 文件传输类型字典\nFILE_TRANS_TYPE_DIC = {\n 'operationLog':1 # 操作日志\n ,'alterLog':2 # 变更日志\n ,'omSecurityLog':3 # 安全日志\n ,'alarmLog':4 # 告警日志文件\n ,'omKeyLog':5 # 重要过程日志\n ,'updateLog':6 # 升级日志\n ,'debugLog':7 # 黑匣子日志\n ,'statelessAlarmLog':8 # 异常日志\n ,'eventLog':9 # 事件日志\n ,'userLog':10 # 用户日志\n ,'cfgDataConsistency':11 # 配置数据一致性文件\n ,'stateDataConsistency':12 # 状态数据一致性文件\n ,'dataConsistency':13 # 数据一致性文件\n ,'curConfig':14 # 当前运行配置文件\n ,'planConfig':15 # 期望配置文件\n ,'equipSoftwarePack':16 # 主设备软件包\n ,'coldPatchPack':17 # 主设备冷补丁包\n ,'hotPatchPack':18 # 主设备热补丁包\n ,'rruEquipSoftwarePack':19 # RRU软件包\n ,'relantEquipSoftwarePack':20 # 电调天线软件包\n ,'enviromentEquipSoftwarePackPack':21 # 环境监控软件包\n ,'gpsEquipSoftwarePack':22 # GPS软件包\n ,'equip1588SoftwarePack':23 # 1588软件包\n ,'cnssEquipSoftwarePackPack':24 # 北斗软件包\n ,'generalFile':25 # 普通文件\n ,'lmtMDBFile':26 # 数据库文件\n ,'activeAlarmFile':27 # 活跃告警文件\n ,'performanceFile':28 # 性能文件\n ,'cfgPatchFile':29 # 数据补丁文件\n ,'snapshotFile':30 # 快照配置文件\n ,'cdlFile':31 # CDL文件\n ,'sctpLogFile':32 # sctp快照日志文件\n ,'dumpLogFile':33 # dump快照日志文件\n ,'ocuEquipSoftwarePack':34 # ocu软件包\n ,'servicecdlFile':35 # 业务CDL文件\n ,'mroFile':36 # MRO文件\n ,'mrsFile':37 # MRS文件\n ,'mreFile':38 # MRE文件\n ,'mmlOpLog':39 # 直连接口操���日志\n ,'mmlPmFile':40 # 直连接口性能文件\n ,'gtsaLog':41 # GTSA远程日志\n ,'iotLog':42 # IOT测量日志\n ,'traceUserLog':43 # 跟踪用户日志\n ,'pcapLog':44 # PCAP日志\n ,'immediatMdt':45 # ImmediatMdt文件\n ,'loggedMdt':46 # LoggedMdt文件\n ,'rlf':47 # RlfMdt文件\n ,'ripLog':48 # RIP测量日志\n ,'rncDisa':49 # RNC容灾配置数据文件\n ,'raeFile':50 # RAE相关文件\n ,'riaLog':51 # RIA检测日志\n ,'slotRipFile':52 # 时隙级干扰统计文件\n ,'symbolRipFile':53 # 符号级干扰统计文件\n#添加RRU日志内容\n ,'rrualarm':0 #告警日志\n ,'rruuser':1 #用户日志\n ,'rrusys':2 #系统日志\n #,'all':3 #全部rru日志\n }\n\n# 日志文件类型与文件名称关系,用于检查日志文件是否上传成功(目前只添加了公共日志部分,其他日志后续实现时需添加)\nLOG_FILE_TYPE_AND_NAME_DIC = {\n 'operationLog':'_operatelog.lgz' # 操作日志\n ,'alterLog':2 # 变更日志\n ,'omSecurityLog':3 # 安全日志\n ,'alarmLog':'_alarmlog.lgz' # 告警日志文件\n ,'omKeyLog':'_omkeylog.lgz' # 重要过程日志\n ,'updateLog':6 # 升级日志\n ,'debugLog':'_debuglog.lgz' # 黑匣子日志\n ,'statelessAlarmLog':'_exceptionlog.lgz' # 异常日志\n ,'eventLog':'eventlog.lgz' # 事件日志\n ,'userLog':10 # 用户日志\n ,'cfgDataConsistency':'_configconsistency.dcb' # 配置数据一致性文件\n ,'stateDataConsistency':'_stateconsistency.dcb' # 状态数据一致性文件\n ,'dataConsistency':'_dataconsistency.dcb' # 数据一致性文件\n ,'curConfig':'_cur.cfg' # 当前运行配置文件\n ,'planConfig':15 # 期望配置文件\n ,'equipSoftwarePack':16 # 主设备软件包\n ,'coldPatchPack':17 # 主设备冷补丁包\n ,'hotPatchPack':18 # 主设备热补丁包\n ,'rruEquipSoftwarePack':19 # RRU软件包\n ,'relantEquipSoftwarePack':20 # 电调天线软件包\n ,'enviromentEquipSoftwarePackPack':21 # 环境监控软件包\n ,'gpsEquipSoftwarePack':22 # GPS软件包\n ,'equip1588SoftwarePack':23 # 1588软件包\n ,'cnssEquipSoftwarePackPack':24 # 北斗软件包\n ,'generalFile':25 # 普通文件\n ,'lmtMDBFile':'lm.dtz' # 数据库文件\n ,'activeAlarmFile':'_activealarm.lgz' # 活跃告警文件\n ,'performanceFile':28 # 性能文件\n ,'cfgPatchFile':29 # 数据补丁文件\n ,'snapshotFile':30 # 快照配置文件\n ,'cdlFile':31 # CDL文件\n ,'sctpLogFile':32 # sctp快照日志文件\n ,'dumpLogFile':'XXXXX' # dump快照日志文件\n ,'ocuEquipSoftwarePack':34 # ocu软件包\n ,'servicecdlFile':35 # 业务CDL文件\n ,'mroFile':36 # MRO文件\n ,'mrsFile':37 # MRS文件\n ,'mreFile':38 # MRE文件\n ,'mmlOpLog':39 # 直连接口操作日志\n ,'mmlPmFile':40 # 直连接口性能文件\n ,'gtsaLog':41 # GTSA远程日志\n ,'iotLog':42 # IOT测量日志\n ,'traceUserLog':'XXXXX' # 跟踪用户日志\n ,'pcapLog':44 # PCAP日志\n ,'immediatMdt':45 # ImmediatMdt文件\n ,'loggedMdt':46 # LoggedMdt文件\n ,'rlf':47 # RlfMdt文件\n ,'ripLog':48 # RIP测量日志\n ,'rncDisa':49 # RNC容灾配置数据文件\n ,'raeFile':50 # RAE相关文件\n ,'riaLog':51 # RIA检测日志\n ,'slotRipFile':52 # 时隙级干扰统计文件\n ,'symbolRipFile':53 # 符号级干扰统计文件\n#添加RRU日志内容\n ,'rrualarm': '_00rrualarm.lgz'#告警日志\n ,'rruuser':'_00rruuser.lgz' #用户日志\n ,'rrusys':'_00rrusys.lgz' #系统日志\n #,'all':['_00rrualarm.lgz', '_00rruuser.lgz', '_00rrusys.lgz'] #全部rru日志\n }\n#Board Log\nHSCTD_PROCID_AND_FILETYPE = {\n '0' : '1;11;13;15;16;17;18;44;45;49;50;51;55;56;57;58;59;61;70;71;74;81',\n #'1' : 'all',\n '2' : '1;44;45;49;50;51;55;56;57;58;59;61;71;81',\n '3' : '1;44;45;49;50;51;55;56;57;58;59;61;71;81',\n '4' : '1;44;45;49;50;51;55;56;57;58;59;61;71;81',\n}\n#cell log\nHBPODD_cell_FILETYPE = {\n 'cell' : '_0000060001.lgz'\n}\n\nHBPOD_PROCID_AND_FILETYPE = {\n '0' : '1;10;14;71;74;81',\n '1' : '1;60;64;66;71;81',\n '2' : '1;60;64;66;71;81',\n '3' : '1;60;64;66;71;81',\n '4' : '1;60;64;66;71;81',\n '5' : '1;60;64;66;71;81',\n '6' : '1;61;63;65;67;71;81',\n '8' : '1;71;81',\n '10': '1;71;81',\n}\n#~ #----------------------------------------------------------------------\ndef IsSubString_bak(SubStrList,Str):\n '''\n #判断字符串Str是否包含序列SubStrList中的每一个子字符串\n #>>>SubStrList=['F','EMS','txt']\n #>>>Str='F06925EMS91.txt'\n #>>>IsSubString(SubStrList,Str)#return True (or False)\n '''\n flag=True\n for substr in SubStrList:\n print(\"substr:{0}\".format(substr))\n if not(substr in Str):\n flag=False\n\n return flag\n\n#~ #----------------------------------------------------------------------\ndef IsSubString(SubStrList,Str):\n '''\n #判断字符串Str是否包含序列SubStrList中的每一个子字符串\n #>>>SubStrList=['F','EMS','txt']\n #>>>Str='F06925EMS91.txt'\n #>>>IsSubString(SubStrList,Str)#return True (or False)\n '''\n flag=True\n print(\"SubStrList:{0}\".format(SubStrList))\n if not(SubStrList in Str):\n flag=False\n\n return flag\n\n#~ #----------------------------------------------------------------------\ndef IsLogFileExists(FindPath,FlagStr=[]):\n '''\n #获取目录中指定的文件名\n #>>>FlagStr=['F','EMS','txt'] #要求文件名称中包含这些字符\n #>>>FileList=IsLogFileExists(FindPath,FlagStr) #\n '''\n import os\n if (type(FlagStr) == int):\n FlagStr = str(FlagStr)\n\n flag=False\n FileList=[]\n FileNames=os.listdir(FindPath)\n print(FileNames)\n if (len(FileNames)>0):\n for fn in FileNames:\n if (len(FlagStr)>0):\n #返回指定类型的文件名\n if (IsSubString(FlagStr,fn)):\n flag=True\n break\n else:\n #默认直接返回所有文件名\n flag=False\n return flag\n#~ #----------------------------------------------------------------------\ndef isRRULogFileExists(ftp_dir, name_key):\n \"\"\"\n 包含name_key的文件是否都出现在dir中\n 返回bool\n \"\"\"\n wait_seconds = 0\n while True:\n time.sleep(1)\n wait_seconds += 1\n try:\n listdir = os.listdir(ftp_dir)\n except FileNotFoundError:\n listdir = os.listdir(os.makedirs(ftp_dir))\n for file in listdir:\n if name_key in file:\n return True\n elif wait_seconds == 200:\n return False\n#~ #----------------------------------------------------------------------\ndef GetFileList(FindPath,FlagStr=[]):\n '''\n #获取目录中指定的文件名\n #>>>FlagStr=['F','EMS','txt'] #要求文件名称中包含这些字符\n #>>>FileList=GetFileList(FindPath,FlagStr) #\n '''\n import os\n FileList=[]\n FileNames=os.listdir(FindPath)\n if (len(FileNames)>0):\n for fn in FileNames:\n if (len(FlagStr)>0):\n #返回指定类型的文件名\n if (IsSubString(FlagStr,fn)):\n fullfilename=os.path.join(FindPath,fn)\n FileList.append(fullfilename)\n else:\n #默认直接返回所有文件名\n fullfilename=os.path.join(FindPath,fn)\n FileList.append(fullfilename)\n\n #对文件名排序\n if (len(FileList)>0):\n FileList.sort()\n\n return FileList\n\n\n#~ #----------------------------------------------------------------------\ndef GetFileTransTypeIndexByName(strName):\n '''\n #根据名称获取传输文件类型的索引\n #>>>GetFileTransTypeIndexByName('operationLog') #Return 1\n '''\n return FILE_TRANS_TYPE_DIC[strName]\n\ndef GetLogFileKeyNameByType(strType):\n '''\n #根据日志文件类型获取文件名称的关键字,用于检查日志文件是否上传成功。\n #如:文件名为'enb_0_20090110011032+8_operatelog.lgz',关键字为'_operatelog.lgz'\n #>>>GetLogFileKeyNameByType('operationLog') # Return '_operatelog.lgz'\n\t'''\n return LOG_FILE_TYPE_AND_NAME_DIC[strType]\n\ndef Getcell_LogFileByType(strType):\n '''\n #根据日志文件类型获取文件名称的关键字,用于检查日志文件是否上传成功。\n #如:文件名为'enb_0_20090110011032+8_operatelog.lgz',关键字为'_operatelog.lgz'\n #>>>GetLogFileKeyNameByType('operationLog') # Return '_operatelog.lgz'\n '''\n return HBPODD_cell_FILETYPE[strType]\n#~ #majingwei-------------------------------------------------------------------\ndef get_index_by_slotNo(slotNo):\n '''根据槽位号获取实例列表\n 例如:slotNo = 6 return [ 0.0.6.0 | 0.0.6.1 | 0.0.6.2 | 0.0.6.3 | 0.0.6.4 | 0.0.6.5 | 0.0.6.6 | 0.0.6.7 | 0.0.6.8 | 0.0.6.9 | 0.0.6.10 ]\n '''\n index_list = list()\n hsctd_slotNo_range = (0, 1)\n hbpod_slotNo_range = (6, 7, 8, 9, 10, 11)\n if int(slotNo) in hsctd_slotNo_range:\n for procId in read_xml_of_board_log(LMT_PATH, 'HSCTD', '0-0.23').keys():\n index_list.append('0.0.' + str(slotNo) + '.' + str(procId))\n return index_list\n elif int(slotNo) in hbpod_slotNo_range:\n for procId in read_xml_of_board_log(LMT_PATH, 'HBPOD', '22').keys():\n index_list.append('0.0.' + str(slotNo) + '.' + str(procId))\n return index_list\n else:\n return 'error: slotNo is wrong!'\n\ndef parse_board_log_idx(board_log_idx):\n '''根据实例索引获取debugUplpadType取值\n 例如:0.0.6.2 返回'1;44;45;49;50;51;55;56;57;58;59;61;71;81'\n '''\n idx_list = board_log_idx.split('.')\n hsctd_slotNo_range = (0, 1, )\n hbpod_slotNo_range = (6, 7, 8, 9, 10, 11, )\n if int(idx_list[2]) in hsctd_slotNo_range:\n #return HSCTD_PROCID_AND_FILETYPE[str(idx_list[3])]\n return read_xml_of_board_log(LMT_PATH, 'HSCTD', '0-0.23')[str(idx_list[3])]\n elif int(idx_list[2]) in hbpod_slotNo_range:\n #return HBPOD_PROCID_AND_FILETYPE[str(idx_list[3])]\n return read_xml_of_board_log(LMT_PATH, 'HBPOD', '22')[str(idx_list[3])] \n return 'error'\n\ndef Clear_folder(folder_path):\n '''清空指定文件夹'''\n shutil.rmtree(folder_path)\n os.makedirs(folder_path)\n\ndef Create_folder(folder_path):\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path)\n os.makedirs(folder_path)\n else:\n os.makedirs(folder_path)\n\ndef get_file_keyname(index):\n index_list = index.split('.')\n slot_str = str(index_list[2])\n procId_str = str(index_list[3])\n if 1 == len(str(index_list[2])):\n slot_str = '0' + str(index_list[2])\n if 1 == len(str(index_list[3])):\n procId_str = '0' + str(index_list[3])\n return '0000' + slot_str + procId_str\n\ndef isBoardLogFileExists(ftp_dir, index, typeNo):\n \"\"\"检查0.0.6.1的'1;10;14;71;74;81'这些日志是否存在\n \"\"\"\n wait_seconds = 0\n type_list = typeNo.split(';')\n\n file_keyname_list = list()\n for subTypeNo in type_list:\n if 1 == len(subTypeNo):\n file_keyname_list.append(get_file_keyname(index) + '0' + str(subTypeNo))\n else: \n file_keyname_list.append(get_file_keyname(index) + str(subTypeNo))\n print(file_keyname_list)\n while True:\n time.sleep(1)\n wait_seconds += 1\n try:\n listdir = os.listdir(ftp_dir)\n except FileNotFoundError:\n listdir = os.listdir(os.makedirs(ftp_dir))\n for file in listdir:\n for name_key in file_keyname_list:\n if name_key in file:\n print(name_key)\n file_keyname_list.remove(name_key)\n\n if 0 == len(file_keyname_list):\n return True\n elif wait_seconds == 200:\n return False\n\ndef read_xml_of_board_log(lmt_path, board_describe, board_type):\n #example: board_describe is \"HSCTD\",board_type is \"0-0.23\"\n board_type_dict = dict()\n\n dom = xml.dom.minidom.parse(lmt_path + '\\\\LMT\\\\config\\\\LMTBoardLogType5216.xml')\n basebandtoDsp = dom.documentElement\n boards = basebandtoDsp.getElementsByTagName('BOARD')\n for board in boards:\n if board.hasAttribute('DESCRIBE'):\n if (board_describe == board.getAttribute('DESCRIBE') and board_type == board.getAttribute('TYPE')):\n processers = board.getElementsByTagName('PROCESSOR')\n for processer in processers:\n cores = processer.getElementsByTagName('CORE')\n for core in cores:\n if core.hasAttribute('TYPE'):\n logshows = core.getElementsByTagName('LOGSHOW')\n board_type_value = ''\n for logshow in logshows:\n board_type_value +=logshow.getAttribute('TYPE') + ';'\n #print(core.getAttribute('TYPE'))\n #print(board_type_value)\n if '6' == processer.getAttribute('TYPE') and 'ZU21DR' == processer.getAttribute('DESCRIBE'):\n board_type_dict['8'] = board_type_value\n board_type_dict['10'] = board_type_value\n else:\n board_type_dict[str(core.getAttribute('TYPE'))] = board_type_value\n return board_type_dict\n\ndef Copy_Files_To_SpecifiedDir(osp_path, specified_path):\n alllist = os.listdir(osp_path)\n\n for file in alllist:\n old_name = osp_path + '\\\\' + str(file)\n new_name = specified_path + '\\\\' + str(file)\n shutil.copyfile(old_name, new_name)\n#~#xinjinadd\ndef get_index_by_slotNo_two(slotNo):\n '''根据槽位号获取实例列表\n 例如:slotNo = 6 return 0.0.6\n '''\n index_num = '0.0.'\n hsctd_slotNo_range = (0, 1)\n hbpod_slotNo_range = (6, 7, 8, 9, 10, 11)\n if int(slotNo) in hsctd_slotNo_range:\n index_num += str(slotNo)\n return index_num\n elif int(slotNo) in hbpod_slotNo_range:\n index_num += str(slotNo)\n return index_num\n else:\n return 'error: slotNo is wrong!'\n\ndef member_decode(item):\n \"\"\"TSP测试用 引用参数接收后解码\"\"\"\n ret = hex(int.from_bytes(item[0], byteorder=\"big\"))\n return ret\n\ndef member_decode_to_integer_little(item):\n ret = int(int.from_bytes(item[0], byteorder=\"little\"))\n return ret\n\ndef member_decode_to_binary_little(item):\n ret = bin(int.from_bytes(item[0], byteorder=\"little\"))\n return ret\n\ndef member_decode_to_hex_little(item):\n ret = hex(int.from_bytes(item[0], byteorder=\"little\"))\n return ret\n\ndef member_decode_to_integer_negative(item):\n import string\n ret = int(int.from_bytes(item[0], byteorder=\"little\"))\n\n if(0x80000000 == (ret & 0x80000000)):\n ret_cvt = ~(ret - 1) & 0xffff\n ret = ret_cvt\n return (-ret)\n\ndef set_DownloadTime():\n\n import time\n import string\n\n #软件本版下载时间需要特殊格式\n oid_time_formart = (7, 226, 9, 27, 6, 33, 17, 0, 43, 0, 0)\n oid_time_src = list(oid_time_formart)\n\n local_time = time.localtime(time.time())\n\n time_Year = local_time[0]\n time_Month = local_time[1]\n time_Day = local_time[2]\n time_Hour = local_time[3]\n time_Minite = local_time[4]\n time_Second = local_time[5]\n\n oid_time_src[0] = time_Year / (256)\n oid_time_src[1] = time_Year % (256)\n oid_time_src[2] = time_Month\n oid_time_src[3] = time_Day\n oid_time_src[4] = (time_Hour - 8)\n oid_time_src[5] = time_Minite\n oid_time_src[6] = time_Second\n oid_time_src[7] = 0\n\n return tuple(oid_time_src)\n\ndef member_decode_str_to_list(item):\n \"\"\"sysiic用 结构体引用参数接收后解码\"\"\"\n import struct\n print(type(item[0]))\n ret = struct.unpack('BBBBi', item[0])\n return ret\n \ndef get_boardIndex_by_slotNo(slotNo):\n\tboardIndex_list = list()\n\thsctd_slotNo_range = (0, 1)\n\thbpod_slotNo_range = (6, 7 , 8, 9, 10, 11)\n\n\tif int(slotNo) in hsctd_slotNo_range:\n\t\tindex_str = '0.0.' + str(slotNo)\n\t\tboardIndex_list.append(index_str)\n\t\treturn boardIndex_list\n\telif int(slotNo) in hbpod_slotNo_range:\n\t\tindex_str = '0.0.' + str(slotNo)\n\t\tboardIndex_list.append(index_str)\t\n\t\treturn boardIndex_list\n\telse:\n\t\treturn 'error'\t\n\treturn boardIndex_list\n\ndef get_procIndex_by_slotNo(slotNo):\n\tboardIndex_list = list()\n\thsctd_slotNo_range = (0, 1)\n\thbpod_slotNo_range = (6, 7 , 8, 9, 10, 11)\n\n\tif int(slotNo) in hsctd_slotNo_range:\n\t\tboardIndex_list.append('0.0.' + str(slotNo) + '.0')\n\t\treturn boardIndex_list\n\telif int(slotNo) in hbpod_slotNo_range:\n\t\tfor loop in range(0, 6):\n\t\t\tboardIndex_list.append('0.0.' + str(slotNo) + '.' + str(loop))\n\t\treturn boardIndex_list\n\telse:\n\t\treturn 'error'\t\n\treturn boardIndex_list\n\ndef should_be_equal_for_str(str1, str2):\n if str(str1) == str(str2):\n return True\n return False\n\ndef get_host_ip():\n import socket\n IP_List = list()\n Target_Ip = '0.0.0.0'\n\n #获取包含主机名和IP在内的IP信息表\n HostInfo_List = socket.gethostbyname_ex(socket.gethostname())\n #获取我们需要的内外网IP集合\n IP_List = HostInfo_List[2]\n \n #遍历列表,查找是否存在外网IP网段\n for ip_loop in IP_List:\n ip = ip_loop[0:10]\n if(ip == '172.27.45.' or ip == '172.27.245' or ip == '172.27.246'):\n Target_Ip = ip_loop\n break\n\n return Target_Ip\n\n#~ #AAU DD函数中AIU_WRITE_FPGA函数需要将寄存去地址和值进行转换-------------------\ndef fpga_addregval_to_buff(u16Addr, u32RegVal):\n buff = list()\n u16Addr &= ~0x8000; #/*最高位0表示读FPGA寄存器*/\n buff.append(int(u16Addr)) #buff[0]\n buff.append((u32RegVal >> 16) & (0xffff)) #buff[1]\n buff.append(u32RegVal & (0xffff)) #buff[2]\n return buff\n\ndef file_path_prepare(fullPath, fename):\n ''' 检查指定路径文件是否存在、检查是否为指定后缀的文件\n fullPath:文件路径 例:D:\\\\Git\\\\NSA_80_20\\\\cur.cfg\n fename:文件扩展名 例:.cfg\n '''\n\n if os.path.exists(fullPath) != True:\n return 'error:path not exist', None, None\n\n if os.path.isfile(fullPath) != True:\n return 'error:path is not file', None, None\n\n [dirName, fileName] = os.path.split(fullPath)\n [fname, fexName] = os.path.splitext(fileName)\n\n if fename != fexName:\n return 'error:fename not exist', None, None\n\n return True, dirName, fileName\n\ndef get_oid_index(str):\n oid_list = str.split('.')\n return oid_list[-1]\n\n","sub_path":"OM/utils/CiUtils.py","file_name":"CiUtils.py","file_ext":"py","file_size_in_byte":20662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"307402002","text":"import sys\nfrom collections import Counter\nsys.stdin = open(\"7453_합이 0인 네 정수.txt\", \"rt\")\n\n\ndef solution(N, A, B, C, D):\n AB, CD = [], []\n for i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(-(C[i] + D[j]))\n\n answer = 0\n cntr = Counter(CD)\n for num in AB:\n answer += cntr[num]\n print(answer)\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n N = int(input())\n A, B, C, D = [], [], [], []\n for _ in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\n solution(N, A, B, C, D)\n","sub_path":"BaekJoon/단계별로 풀어보기/투 포인터/7453_합이 0인 네 정수.py","file_name":"7453_합이 0인 네 정수.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"515853073","text":"import sys\nimport os\n\nfrom classifier import Classifier\n\n\ndef main():\n classifier = Classifier()\n match = error = 0\n for root, dirs, files in os.walk(\"test\"):\n for f in files:\n if os.path.basename(f) == '.DS_Store':\n continue\n\n full_path = root + os.sep + f\n\n res = classifier.classify(full_path)[0]\n\n expected = root.split(os.sep)[1]\n\n # import pdb; pdb.set_trace()\n\n if res[0] == expected:\n print('MATCH act={} file={} score={}'.format(res[0], f, res[1]))\n match += 1\n else:\n print('ERR act={} exp={} file={} score={}'.format(res[0], expected, f, res[1]))\n error += 1\n\n print('RESULT: SUCCESS=' + str(match / (match + error)))\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"classify_test.py","file_name":"classify_test.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"378691772","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom random import choice\nfrom random import uniform\n\nfrom time import sleep\nfrom datetime import datetime\n\nimport csv\n\n\n# def get_html(url, useragent=None, proxy=None):\ndef get_html(url):\n\n\tsleeptime = uniform(3,5)\n\tsleep(sleeptime)\n\n\t# print('\\tget_html')\n\tproxy, useragent = get_url_data()\n\n\tr = requests.get(url, headers=useragent, proxies=proxy)\n\treturn r.text\n\n\ndef get_ip(html):\n\t# print('\\tget_ip')\n\n\tsoup = BeautifulSoup(html, 'lxml')\n\terror = 'none'\n\n\ttry:\n\t\tip = soup.find('span', class_='ip').text.strip()\n\texcept Exception as e:\n\t\tip='none'\n\t\terror = 'error:{}\\tip'.format(type(e))\n\t\t# print('error:{}\\tip'.format(type(e)))\n\n\ttry:\n\t\tua = soup.find('span', class_='ip').find_next_sibling('span').text.strip()\n\texcept Exception as e:\n\t\tua='none'\n\t\terror = 'error:{}\\tuser agent'.format(type(e))\n\t\t# print('error:{}\\tuser agent'.format(type(e)))\n\n\t# print('{}\\n{}\\n---------------'.format(ip, ua))\n\treturn ip, error\n\n\ndef write_csv(data):\n\twith open('change_proxy_useragent.csv', 'a') as f:\n\t\twriter = csv.writer(f)\n\t\t\n\t\twriter.writerow((\n\t\t\t\t\tdata['proxy'],\n\t\t\t\t\tdata['useragent'],\n\t\t\t\t\tdata['error'],\n\t\t\t\t\tdata['datetime']\n\t\t\t\t\t))\n\ndef write_console(data, counter):\n\t# print('#{}\\nproxy: {}\\nuseragent: {}\\nerror: {}\\nip: {}\\ndatetime: {}\\n'.format(\n\t# \tcounter,\t\t\n\t# \tdata['proxy'],\n\t# \tdata['useragent'],\n\t# \tdata['error'],\n\t# \tdata['ip'],\n\t# \tdata['datetime']\n\t# \t))\n\tprint('#{}\\nip: {}\\n'.format(\n\t\tcounter,\t\n\t\tdata['ip']\n\t\t))\n\ndef to_log(data, counter):\n\t#write_csv(data)\n\twrite_console(data, counter)\n\n\ndef get_url_data():\n\tuseragents = open('useragents.txt').read().split('\\n')\n\tproxies = open('proxies.txt').read().split('\\n')\n\n\tproxy = {'http':'http://' + choice(proxies)}\n\tuseragent = {'User-Agent':choice(useragents)}\n\n\treturn proxy, useragent\n\ndef main():\n\tprint('begin\\n--------------------')\n\n\turl = 'http://sitespy.ru/my-ip'\n\n\t# useragents = open('useragents.txt').read().split('\\n')\n\t# proxies = open('proxies.txt').read().split('\\n')\n\n\t#print(proxies[-1])\n\n\tfor i in range(10):\n\n\t\t# sleeptime = uniform(3,5)\n\t\t# sleep(sleeptime)\n\n\t\t# proxy = {'http':'http://' + choice(proxies)}\n\t\t# useragent = {'User-Agent':choice(useragents)}\n\t\tproxy, useragent = get_url_data()\n\n\t\tdt = datetime.now()\n\n\n\t\t# print('#{} [{}]\\n{}\\n********\\n{}\\n{}\\n********'.format(i, sleeptime, dt, proxy, useragent))\n\n\t\ttry:\n\t\t\thtml = get_html(url)\n\t\texcept Exception as e:\n\t\t\tto_log({\n\t\t\t\t'proxy':proxy,\n\t\t\t\t'useragent':useragent,\n\t\t\t\t'error':type(e),\n\t\t\t\t'ip':'none',\n\t\t\t\t'datetime':dt\n\t\t\t}, i)\n\t\t\tcontinue\n\t\tip, error = get_ip(html)\n\n\t\tto_log({\n\t\t\t'proxy':proxy,\n\t\t\t'useragent':useragent,\n\t\t\t'error':error,\n\t\t\t'ip':ip,\n\t\t\t'datetime':dt\n\t\t}, i)\n\n\n\t\t# data = {\n\t\t# \t'proxy':proxy,\n\t\t# \t'useragent':useragent,\n\t\t# \t'error':out['error']\n\t\t# }\n\n\t\t# write_csv(data)\n\n\tprint('--------------------\\nbetti')\n\n\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"else/change_proxy_useragent.py","file_name":"change_proxy_useragent.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"408125339","text":"\"\"\"\nfind the number of 1 in a binary number\n\n\"\"\"\n\n\nclass Solution(object):\n # O(n)\n def findN(self, n):\n res = 0\n while n != 0:\n if n & 1 == 1:\n res += 1\n n = n >> 1\n return res\n\n # O(n) n is the number if 1\n def findN2(self, n):\n res = 0\n while n != 0:\n n = n & (n - 1)\n res += 1\n return res\n\n\ntest = Solution()\nprint(test.findN2(4))\n","sub_path":"find_ones_in_binary_number.py","file_name":"find_ones_in_binary_number.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"150173440","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/pouch/__init__.py\n# Compiled at: 2008-09-12 16:45:19\nfrom urlparse import urlparse\nimport httplib, datetime, copy, urllib, simplejson\n\nclass SchemeError(Exception):\n pass\n\n\nclass HTTPInteraction(object):\n\n def __init__(self, uri):\n self.url = urlparse(uri)\n\n def __getattr__(self, name):\n if name.upper() in ('HEAD', 'GET', 'PUT', 'DELETE', 'POST'):\n return lambda path, body=None, headers=None: self.request(name.upper(), path, body, headers)\n return\n\n def request(self, method, path, body=None, headers=None):\n if self.url.scheme == 'http':\n connection = httplib.HTTPConnection(self.url.netloc)\n elif self.url.scheme == 'https':\n connection = httplib.HTTPSConnection(self.url.netloc)\n else:\n raise SchemeError(self.url.scheme + ' is not a valid scheme.')\n if headers is None:\n headers = {'content-type': 'application/json', 'user-agent': 'pouch-0.1pre'}\n connection.request(method, path, body=body, headers=headers)\n response = connection.getresponse()\n response.body = response.read()\n return response\n\n\ndef make_path(dbname, *args):\n return '/' + ('/').join([dbname] + list(args))\n\n\nclass Connection(object):\n \"\"\"Handles the connection to a given server.\"\"\"\n\n def __init__(self, uri):\n self.http = HTTPInteraction(uri)\n\n def create_db(self, name):\n response = self.http.put('/' + name.lower())\n assert simplejson.loads(response.body)['ok']\n return Database(self, name.lower())\n\n def delete_db(self, name):\n response = self.http.delete('/' + name.lower())\n assert simplejson.loads(response.body)['ok']\n\n def db_info(self, name):\n response = self.http.get('/' + name.lower())\n return simplejson.loads(response.body)\n\n\nclass Database(object):\n \"\"\"Handles interaction with a single couchdb database.\"\"\"\n\n def __init__(self, name, connection=None):\n global GLOBAL_CONNECTION\n self.name = name\n if connection is None:\n self.connection = GLOBAL_CONNECTION\n else:\n self.connection = connection\n self.views = Views(self)\n return\n\n def create_doc(self, obj):\n response = self.connection.http.post('/' + self.name, body=simplejson.dumps(obj))\n assert response.status == 201\n response_obj = simplejson.loads(response.body)\n assert response_obj['ok']\n return response_obj\n\n def update_doc(self, obj):\n response = self.connection.http.put(make_path(self.name, obj['_id']), body=simplejson.dumps(obj))\n assert response.status == 201\n response_obj = simplejson.loads(response.body)\n assert response_obj['ok']\n return response_obj\n\n def get_doc(self, _id):\n return simplejson.loads(self.connection.http.get(make_path(self.name, _id)).body)\n\n\nALL_MODEL_CLASSES = {}\nGLOBAL_CONNECTION = None\nGLOBAL_DB = None\n\ndef set_globals(uri, dbname):\n global GLOBAL_CONNECTION\n global GLOBAL_DB\n GLOBAL_CONNECTION = Connection(uri)\n GLOBAL_DB = Database(dbname, GLOBAL_CONNECTION)\n for cls in ALL_MODEL_CLASSES.values():\n cls.db = GLOBAL_DB\n\n\nclass ModelMeta(type):\n\n def __init__(cls, name, bases, attrdict):\n if cls.__name__ != 'Model':\n super(ModelMeta, cls).__init__(name, bases, dict([ (k, attrdict[k]) for k in ['__module__'] ]))\n assert cls.__name__ not in ALL_MODEL_CLASSES.keys()\n cls.__restrictions__ = dict([ (k, v) for (k, v) in attrdict.items() if not k.startswith('__') if type(v).__name__ != 'function' ])\n assert cls.__name__ not in ALL_MODEL_CLASSES.keys()\n cls.type = cls.__name__\n ALL_MODEL_CLASSES[cls.type] = cls\n else:\n super(ModelMeta, cls).__init__(name, bases, attrdict)\n\n\nclass Model(object):\n __metaclass__ = ModelMeta\n db = GLOBAL_DB\n __reserved_words__ = [\n 'db', 'save']\n\n def __init__(self, **kwargs):\n if self.db is None:\n db = GLOBAL_DB\n else:\n db = self.db\n self.__dict__ = {}\n self.db = db\n for (k, v) in self.__restrictions__.items():\n if hasattr(v, 'auto_add') and v.auto_add is True:\n kwargs[k] = v.auto()\n if hasattr(v, 'cast') and k in kwargs.keys():\n kwargs[k] = v.cast(kwargs[k])\n\n for (key, value) in kwargs.items():\n self.__setattr__(key, value)\n\n if not kwargs.has_key('type'):\n self.__dict__['type'] = self.type\n return\n\n def __setattr__(self, key, value):\n if key.startswith('__'):\n return object.__setattr__(self, key, value)\n if self.__restrictions__.has_key(key):\n self.__restrictions__[key].validate(value)\n self.__dict__[key] = value\n\n def __str__(self):\n return self.__class__.__name__ + '(pouch.Model): ' + str(self.__dict__)\n\n def save(self):\n upload_dict = copy.copy(self.__dict__)\n for r in self.__reserved_words__:\n upload_dict.pop(r, None)\n\n for key in self.__restrictions__.keys():\n if upload_dict.has_key(key):\n upload_dict[key] = self.__restrictions__[key].marshall(upload_dict[key])\n\n if hasattr(self, '_id'):\n response_dict = self.db.update_doc(upload_dict)\n else:\n response_dict = self.db.create_doc(upload_dict)\n self.__dict__.update({'_id': response_dict['id'], '_rev': response_dict['rev']})\n return\n\n @classmethod\n def get(cls, _id):\n db = cls.db or GLOBAL_DB\n return cls(**dict([ (str(k), v) for (k, v) in db.get_doc(_id).items() ]))\n\n def __cmp__(self, other):\n if other.__dict__ == self.__dict__:\n return 0\n\n\nclass ModelRestrictionValidationError(Exception):\n pass\n\n\nclass Restriction(object):\n\n def validate(self, value):\n if hasattr(self, 'type'):\n if type(value) != self.type:\n raise ModelRestrictionValidationError('Value of ' + str(value) + ' is not ' + self.type.__name__)\n else:\n return\n\n marshall = lambda self, value: value\n\n\nclass LooserRestriction(object):\n\n def validate(self, value):\n if hasattr(self, 'type'):\n try:\n self.type(value)\n except:\n raise ModelRestrictionValidationError('Value of ' + str(value) + ' is not ' + self.type.__name__)\n else:\n return\n\n marshall = lambda self, value: self.type(value)\n\n\nclass Unicode(LooserRestriction):\n type = unicode\n\n\nclass Int(Restriction):\n type = int\n\n\nclass Float(Restriction):\n type = float\n\n\nclass List(LooserRestriction):\n type = list\n\n\nclass Dict(Restriction):\n type = dict\n\n\nclass Bool(Restriction):\n type = bool\n\n\nclass DateTime(Restriction):\n\n def __init__(self, auto_now=False):\n self.auto_now = auto_now\n self.auto_add = auto_now\n\n type = datetime.datetime\n\n def marshall(self, value):\n return value.isoformat()\n\n def auto(self):\n return datetime.datetime.now()\n\n\nclass View(object):\n\n def __init__(self, db, path):\n self.db, self.path = db, path\n\n def __call__(self, **kwargs):\n for (k, v) in kwargs.items():\n if type(v) is bool:\n kwargs[k] = str(v).lower()\n if k in ('key', 'startkey', 'endkey'):\n kwargs[k] = simplejson.dumps(v)\n\n query_string = urllib.urlencode(kwargs)\n if len(query_string) is not 0:\n path = self.path + '?' + query_string\n else:\n path = self.path\n result = self.db.connection.http.get(path).body\n return simplejson.loads(result)\n\n\nclass DesignDocument(object):\n\n def __init__(self, db, path):\n self.db, self.path = db, path\n\n def __getattr__(self, name):\n return View(self.db, self.path + '/' + name)\n\n\nclass Views(object):\n\n def __init__(self, db):\n self.db = db\n\n def __getattr__(self, name):\n return DesignDocument(self.db, make_path(self.db.name, '_view', name))","sub_path":"pycfiles/Poulda-0.9.tar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"176509010","text":"# -*- coding: utf-8 -*-\nimport dateutil.parser\nfrom datetime import datetime\nfrom pytz import timezone\nimport os\nimport urllib\n\ndef etc_convertdate(isodate):\n date = dateutil.parser.parse(isodate)\n return date.strftime(\"%Y-%m-%d %H:%M\")\n\ndef add_timezone_to_date(date_str):\n new_date = datetime.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev')\n new_date_timezone = TZ.localize(new_date)\n return new_date_timezone.strftime(\"%Y-%m-%d %H:%M:%S%z\")\n\ndef convert_etc_date_to_iso_format(date_time_from_ui):\n new_timedata = datetime.strptime(date_time_from_ui, '%d-%m-%Y\\n%H:%M')\n new_date_time_string = new_timedata.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n return new_date_time_string\n\ndef etc_download_file(url, file_name, output_dir):\n urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))\n\ndef convert_etc_string(string):\n return {\n 'True': '1',\n 'False': '0',\n u\"Так\": True,\n u\"Hi\": False,\n u'Очікування пропозицій': 'active.tendering',\n u'Період аукціону': 'active.auction',\n u'Кваліфікація переможця': 'active.qualification',\n u'Пропозиції розглянуто': 'active.awarded',\n u'Аукціон не відбувся': 'unsuccessful',\n u'Аукціон завершено': 'complete',\n u'Аукціон відмінено': 'cancelled',\n u'Чорновик': 'draft',\n u'Майна банків': 'dgfOtherAssets',\n u'Прав вимоги за кредитами': 'dgfFinancialAssets',\n u'Вперше': 1,\n u'Вдруге': 2,\n u'Втретє': 3,\n u'Вчетверте': 4,\n u'Грн.': 'UAH',\n u'(включно з ПДВ)': True,\n u'(без ПДВ)': False,\n u'[переможець розглядається кваліфікаційною комісією]': 'pending',\n u'[Oчікування кінця кваліфікації переможця]': 'pending.waiting',\n u'[Учасник достроково забрав гарантійний внесок]': 'cancelled',\n u'[Очікується протокол]': 'pending.verification',\n u'[Очікується оплата]': 'pending.payment',\n u'[Оплачено, очікується підписання договору]': 'active',\n u'[Кваліфікаційна комісія відмовила переможцю]': 'unsuccessful',\n }.get(string, string)\n\n","sub_path":"etc_helper.py","file_name":"etc_helper.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"530437582","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\nclass fosa_accounts(models.Model):\n\t_name = 'sacco.fosa.accounts'\n\n\tname = fields.Char()\n\taccount_type = fields.Many2one('sacco.fosa.account.types')\n\tstate = fields.Selection([('draft',\"Draft\"),('pending',\"Pending\"),('active',\"Active\"),('suspended',\"Suspended\")], default = 'draft')\n\tmember = fields.Many2one('sacco.member')\n\tbalance = fields.Float()\n\tledger_ids = fields.One2many('sacco.member.ledger.entry','member_no')\n\n\t@api.one\n\t@api.onchange('name')\n\tdef get_sequence(self):\n\t\tsetup = self.env['sacco.setup'].search([('id','=',1)])\n\t\tsequence = self.env['ir.sequence'].search([('id','=',setup.fosa_account_nos.id)])\n\t\tself.name = sequence.next_by_id(sequence.id, context = None)\n\n\nclass account_types(models.Model):\n\t_name = 'sacco.fosa.account.types'\n\n\tname = fields.Char()\n\tdescription = fields.Char()\n\tdefault_debit = fields.Many2one('account.account')\n\tdefault_credit = fields.Many2one('account.account')\n\tminimum_balance = fields.Float()\n\nclass teller_transactions(models.Model):\n\t_name = 'sacco.fosa.teller.treasury.transactions'\n\n\tname = fields.Char()\n\tdescription = fields.Text()\n\tdate = fields.Date()\n\ttransaction_type = fields.Selection([('issue',\"Issue to Teller\"),('return',\"Return to Treasury\"),('inter',\"Inter-Teller Transfer\")])\n\ttransfer_from = fields.Many2one('res.bank')\n\ttransfer_from_balance = fields.Float()\n\ttransfer_to = fields.Many2one('transfer_to')\n\ttransfer_to_balance = fields.Float()\n\tcashier = fields.Many2one('res.users', default = lambda self:self.env.user)\n\tamount = fields.Float()\n\n\t@api.one\n\t@api.onchange('name')\n\tdef get_sequence(self):\n\t\tsetup = self.env['sacco.setup'].search([('id','=',1)])\n\t\tsequence = self.env['ir.sequence'].search([('id','=',setup.treasury_teller_transction_nos.id)])\n\t\tself.name = sequence.next_by_id(sequence.id, context = None)\n\nclass fosa_transactions(models.Model):\n\t_name = 'sacco.fosa.teller.transactions'\n\n\tname = fields.Char()\n\tdescription = fields.Text()\n\tdate = fields.Date()\n\t#member = fields.Many2one('sacco.member')\n\tamount = fields.Float()\n\tline_ids = fields.One2many('sacco.fosa.teller.transaction.lines','header_id')\n\n\t@api.one\n\t@api.onchange('name')\n\tdef get_sequence(self):\n\t\tsetup = self.env['sacco.setup'].search([('id','=',1)])\n\t\tsequence = self.env['ir.sequence'].search([('id','=',setup.teller_transaction_nos.id)])\n\t\tself.name = sequence.next_by_id(sequence.id, context = None)\n\nclass fosa_transaction_lines(models.Model):\n\t_name = 'sacco.fosa.teller.transaction.lines'\n\n\theader_id = fields.Many2one('sacco.fosa.teller.transactions')\n\ttransaction_type = fields.Selection([('receipt',\"Receipt\"),('payment',\"Payment\")])\n\tmember = fields.Many2one('sacco.member')\n\taccount = fields.Many2one('sacco.fosa.accounts')\n\tamount = fields.Float()\n\nclass fosa_setup(models.Model):\n\t_inherit = 'sacco.setup'\n\n\ttreasury_teller_transction_nos = fields.Many2one('ir.sequence')\n\tteller_transaction_nos = fields.Many2one('ir.sequence')\n\tfosa_account_nos = fields.Many2one('ir.sequence')\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"108182468","text":"import numpy as np\nfrom matplotlib import cm\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as mtransforms\nfrom matplotlib.mlab import griddata\nimport pprint\n\n\nl1=np.loadtxt('circular-light-matr-elem.dat')\nN=int(np.sqrt((len(l1)))-1)\n\nlData=[]\nfor i in range(1,N+1):\n\tfor j in range(1,N+1):\n\t\ter=l1[(i)*(N+1)+j,1]\n\t\tlData.append(er)\nlData=np.array(lData)\nlData=lData.reshape(N,N)\nx=np.linspace(-1*N,1*N,4*N)\ny=np.linspace(-1*N,1*N,4*N)\n\nZ=lData\nZ=np.append(Z,Z,axis=0)\nZ=np.append(Z,Z,axis=0)\nZ=np.append(Z,Z,axis=1)\nZ=np.append(Z,Z,axis=1)\n\nf=interpolate.interp2d(x,y,Z,kind='cubic')\nZ=f(x,y)\n\nfx=f.x\nfy=f.y\nnewf=np.zeros((4*N,4*N))\nfor i in range(len(fx)):\n\tfor j in range(len(fy)):\n\t\tnewf[i,j]=f(fx[i], -0.5*fx[i]+0.866025*fy[j])\n\n\n\nx=np.linspace(-1*N,1*N,4*N)\ny=np.linspace(-1*N,1*N,4*N)\nf=interpolate.interp2d(x,y,newf,kind='cubic')\n\n\n\nx=np.linspace(-0.66667,0.66667,4*N)\ny=np.linspace(-0.66667,0.66667,4*N)\nx=x*50\ny=y*50\nZ=f(x,y)\n\nfig, ax=plt.subplots()\n\n#im=ax.imshow(Z,interpolation='spline16',origin='lower',extent=[-1.1547,1.1547,-1.1547,1.1547],cmap='jet')\nx=np.linspace(-0.66667,0.66667,4*N)\ny=np.linspace(-0.66667,0.66667,4*N)\nnewZ=np.zeros((4*N,4*N))\nnew_x=[]\nnew_y=[]\nfor i in range(len(x)):\n\tif -0.66667 < x[i] and x[i] < 0.66667:\n\t\tfor j in range(len(y)):\n\t\t\tif -0.57735 < y[j] and y[j] < 0.57735:\n\n\t\t\t\tif np.abs(y[j])<(-1.73205*np.abs(x[i])+1.1547):\n\n\t\t\t\t\tnewZ[j,i]=Z[i,j]\n\n\nnewZ=np.ma.masked_where(newZ ==0,newZ)\nnewZ=np.ma.array(newZ)\n#pprint.pprint(msk_Z)\n#Z=Z.reshape((345,300))\n\nim=ax.imshow(newZ,interpolation='spline16',origin='upper',extent=[-1.1547,1.1547,-1.1547,1.1547],cmap='jet')\n\n#im=ax.pcolormesh(x, y, z)\ntransform=mtransforms.Affine2D().rotate_deg(90)\ntrans_data = transform + ax.transData\n#im=ax.contourf(x, y, Z, 8, alpha=.75, cmap='jet')\n\n#im.set_transform(trans_data)\n\nY, X = np.ogrid[-1:1:500j, -1.141:1.141:500j]\nax.contour(X.ravel(),Y.ravel(),abs(Y)<=-1.73205*np.abs(X)+1.97,colors='black',linewidth=1.2,interpolation='spline')\nax.hlines(.9903,-0.5668,0.5668,linewidth=2.)\nax.hlines(-.9903,-0.5668,0.5668,linewidth=2.)\n\nplt.axis(\"off\")\nplt.show()\n","sub_path":"MtrixElems_stdrd.py","file_name":"MtrixElems_stdrd.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"331961225","text":"# ------------------------------ Базы данных -----------------------------\n# Импортируем библиотеку, соответствующую типу нашей базы данных\nimport sqlite3\n\nfrom icecream import ic\n\n# Создаем соединение с нашей базой данных\n# В нашем примере у нас это просто файл базы\nwith sqlite3.connect(\"company.db3\") as conn:\n conn.row_factory = sqlite3.Row\n # Создаем курсор - это специальный объект который делает запросы и получает их результаты\n cursor = conn.cursor()\n\n # ТУТ БУДЕТ НАШ КОД РАБОТЫ С БАЗОЙ ДАННЫХ\n # КОД ДАЛЬНЕЙШИХ ПРИМЕРОВ ВСТАВЛЯТЬ В ЭТО МЕСТО\n\n cursor.execute(\n \"\"\"\n create table if not exists Terminal (\n id INTEGER primary key,\n title TEXT,\n configuration TEXT\n );\n \"\"\"\n )\n #\n try:\n cursor.execute(\n \"\"\"\n insert into Terminal (id, title, configuration)\n VALUES (?, ?, ?);\"\"\",\n (13, \"Terminal Bingo\", '{\"simle\":\"nothing\"}'),\n )\n except sqlite3.IntegrityError:\n pass\n #\n cursor.execute(\"SELECT * FROM Terminal where id = :id\", {\"id\": 13})\n #\n z = cursor.fetchone()\n while z:\n print(z)\n ic(z[0])\n ic(z[1])\n ic(z[2])\n z = cursor.fetchone()\n\n cursor.execute(\"SELECT * FROM Terminal where id = ?\", (13,))\n for row in cursor:\n ic(row[\"id\"])\n ic(row[\"title\"])\n ic(row[\"configuration\"])\n #\n items = [(1, \"aaa\", \"config1\"), (100, \"bbb\", \"config2\"), (444, \"ttt\", \"config3\")]\n try:\n cursor.executemany(\n \"\"\"\n insert into Terminal (id, title, configuration)\n VALUES (?, ?, ?);\"\"\",\n items,\n )\n except sqlite3.IntegrityError:\n pass\n\n cursor.execute(\"SELECT * FROM Terminal\")\n for row in cursor:\n for value in row:\n ic(value)\n\n print()\n #\n cursor.executescript(\n \"\"\"\n create table if not exists Terminal (\n id INTEGER primary key,\n title TEXT,\n configuration TEXT\n );\n\n\n SELECT * FROM Terminal\n \"\"\"\n )\n\n term_id = \"13 OR 1 = 1\"\n cursor.execute(f\"SELECT * FROM Terminal where id = {term_id}\")\n # cursor.execute(\"SELECT * FROM Terminal where id = ?\", (term_id,))\n for row in cursor:\n ic(row)\n\n# Не забываем закрыть соединение с базой данных\n# conn.close()\n","sub_path":"lesson_3/codes_3/01_db.py","file_name":"01_db.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"641685350","text":"#coding:utf-8\nimport urlparse\nfrom proto.constant_pb2 import *\nfrom util.protoutil import *\nfrom datetime import *\nimport time\nimport json\n\n\ndef set_normal(pb,item, fields = None, not_fields = None):\n copy_simple_field(pb,item,fields,not_fields)\n\ndef set_brief(pb,brief):\n set_normal(pb,brief, fields=['avatar','gold','nick'])\n pb.uid = brief.id\n pb.room_card = 3\n\ndef set_player_for_hall(pb,player):\n pb.uid = player.id\n pb.nick = player.nick\n pb.avatar = player.avatar\n pb.seat = int(player.seat)\n pb.ip = '-1'\n pb.is_connect = -1\n pb.score = -1\n\ndef set_announcement(pb,announcement):\n set_normal(pb,announcement)\n\ndef set_game_setting(pb,item, items):\n set_normal(pb,item, fields=['id','label','desc','sort'])\n pb.conf = json.dumps(set_game_setting_json(item, items))\n\ndef set_game_setting_json(item, items, is_children = False):\n conf = []\n for level2_item in items:\n if level2_item.level == 2 and item.id == level2_item.pid:\n conf_item = {\n \"id\":level2_item.id,\n \"label\":level2_item.label,\n \"required\":level2_item.required,\n \"key\":level2_item.key,\n \"sort\":level2_item.sort\n }\n conf_item_childs = []\n for level3_item in items:\n if level3_item.level == 3 and level2_item.id == level3_item.pid:\n conf_item_childs.append({\n 'id':level3_item.id,\n 'type':level3_item.type,\n 'label':level3_item.label,\n 'key':level3_item.key,\n 'default':level3_item.default,\n 'desc':level3_item.desc,\n 'sort':level3_item.sort,\n 'disabled':map(lambda x:int(x), level3_item.disabled.split(',') ) if level3_item.disabled != None and len(level3_item.disabled)>0 else [],\n 'enabled':map(lambda x:int(x), level3_item.enabled.split(',') ) if level3_item.enabled != None and len(level3_item.enabled)>0 else [],\n })\n\n conf_item['items'] = conf_item_childs\n conf.append(conf_item)\n return conf\n\ndef set_player(pb,user,user_gf,gifts = None):\n\n copy_simple_field(pb,user,not_fields = [\"birthday\",\"best\"])\n copy_simple_field(pb,user_gf)\n if user.birthday != None:\n pb.birthday = user.birthday.strftime('%Y-%m-%d')\n pb.uid = user.id\n\n if user_gf.best != None and user_gf.best.strip() != \"\":\n pks = user_gf.best.split(\",\")\n for pk in pks:\n pb_poker = pb.best.add()\n f,v = pk.split(\"-\")\n pb_poker.flower = int(f)\n pb_poker.value = int(v)\n\n if gifts != None:\n for gift in gifts:\n pb_gift = pb.gifts.add()\n copy_simple_field(pb_gift,gift)\n\n\n\ndef set_reward(pb,reward,reward_logs):\n copy_simple_field(pb,reward, not_fields = [\"is_daily\",\"params\"])\n pb.state = -1\n for item in reward_logs:\n if reward.id == item.task_id:\n pb.state = item.state\n break\n\ndef set_signs(pb, sign):\n copy_simple_field(pb,sign)\n\ndef set_shop_item(pb, shopitem, items):\n copy_simple_field(pb,shopitem,not_fields =[\"item_type\"])\n pb.item_type = SHOP_ITEM if shopitem.type == SHOP_ITEM else SHOP_GOLD\n if items != None:\n for im in items:\n if shopitem.item_id == im.id:\n pb.item.id = im.id\n pb.item.icon = im.icon\n pb.item.name = im.name\n pb.item.count = 1\n pb.item.description = im.description\n break;\n\ndef set_trades(pb,trade,seller):\n copy_simple_field(pb,trade, not_fields = [\"sell_time\",\"buyer\",\"buy_time\"])\n pb.type = SHOP_GOLD\n pb.seller_name = seller.nick\n\ndef set_bag_item(pb,user_item, items):\n for item in items:\n if user_item.item_id == item.id:\n pb.id = item.id\n pb.name = item.name\n pb.icon = item.icon\n pb.description = item.description\n pb.count = user_item.countof\n break;\n\ndef set_bag_gift(pb,user_gift, gifts):\n for gift in gifts:\n if user_gift.gift_id == gift.id:\n pb.id = gift.id\n pb.name = gift.name\n pb.icon = gift.icon\n pb.count = user_gift.countof\n break;\n\ndef set_mail(pb,mail):\n copy_simple_field(pb,mail)\n\ndef set_gifts_str(redis,pb,gifts):\n if gifts != None:\n for gift in gifts.split(','):\n conf = redis.hget('conf_gift', gift[0])\n pb_gift = pb.gifts.add()\n if conf == None:\n continue\n conf = json.loads(conf)\n pb_gift.id = conf['id']\n pb_gift.name = conf['name']\n pb_gift.icon = conf['icon']\n pb_gift.count = int(gift[2])\n\ndef set_friend_apply(pb, friend_apply, gifts = None):\n pb.id = friend_apply.id\n pb.apply_from = friend_apply.uid1\n pb.apply_from_nick = friend_apply.uid1_nick\n pb.to = friend_apply.uid2\n pb.time = int(time.mktime(time.strptime(friend_apply.apply_time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')))\n pb.message = friend_apply.message\n pb.apply_from_avatar = friend_apply.avatar\n if gifts != None:\n for gift in gifts:\n pb_gift = pb.gifts.add()\n copy_simple_field(pb_gift,gift)\n\ndef set_room_table(pb,uid,redis):\n rooms = redis.keys('room_users_*')\n for room in rooms:\n table_id = redis.hget(room,uid)\n if table_id != None:\n pb.table_id = int(table_id)\n pb.room_id = int(room[11:])\n return\n\n\n\ndef get_params(pb_params):\n params = urlparse.parse_qs(pb_params)\n for k in params.keys():\n params[k] = params[k][0]\n return params\n \"\"\"\n params = {}\n for pb_param in pb_params:\n params[pb_param.key] = pb_param.value\n return params\n \"\"\"\n\ndef set_proto_params(pb_params,params):\n if params == None or len(params) == 0:\n return\n for k,v in params.items():\n pb_param = pb_params.add()\n pb_param.key = k\n pb_param.value = str(v)\n\n\ndef set_proto_results(pb_results,results):\n if results == None or len(results) == 0:\n return\n for k,v in results.items():\n pb_param = pb_results.add()\n pb_param.key = k\n pb_param.value = str(v)","sub_path":"code/helper/protohelper.py","file_name":"protohelper.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"590792433","text":"# 27.4.2020\n# file contains functions used in EMM minim process\n\n## packages\nimport numpy as np\n# from scipy.optimize import fsolve\n# from scipy.special import j0, j1\nfrom scipy.optimize import minimize\nfrom scipy.optimize import least_squares\n\n\n## defining a function calculating the fluorescence signal for given parameters\n\ndef get_hist_fit(fotkor, voltages, t_res, t_measure, background_photocounts, hist_sigma, max_phi_unc = 0.3, phi0 = 1.1,\n sign_DeltaS = False):\n # function returns DeltaS_S_ratio, Delta_S_S_ratio_sigma, fot_phi, fot_phi_sigma, hist_sigma\n\n nu = (voltages[:,0] - voltages[:,1]) / (voltages[:,0] + voltages[:,1])\n # pomocne promene\n fotkor_shape = np.shape(fotkor)\n # casova skala foton-kor. dat\n t_scale = np.array(range(0, fotkor_shape[0])) * t_res\n\n# # ----- odecet pozadi\n# bg_ph_sum = background_photocounts * t_measure # celkovy pocet fotonu pozadi za cas mereni\n# last_bin_ratio = fotkor[fotkor_shape[0] - 2, :] / fotkor[fotkor_shape[0] - 3, :] # pomer mezi county v poslednim/predposlednim binu\n# bg_ph_per_bin = bg_ph_sum / (fotkor_shape[0] - 2 + last_bin_ratio)\n\n# fotkor[:fotkor_shape[0] - 2, :] = fotkor[:fotkor_shape[0] - 2, :] - bg_ph_per_bin\n# fotkor[fotkor_shape[0] - 2, :] = fotkor[fotkor_shape[0] - 2, :] - bg_ph_per_bin * last_bin_ratio\n# #-----------------------------\n\n #----- pro vice ruznych casu mereni a pozadi\n # ----- odecet pozadi\n for i in range(fotkor_shape[1]):\n bg_ph_sum = background_photocounts[i] * t_measure[i] # celkovy pocet fotonu pozadi za cas mereni\n last_bin_ratio = fotkor[fotkor_shape[0] - 2, i] / fotkor[fotkor_shape[0] - 3, i] # pomer mezi county v poslednim/predposlednim binu\n bg_ph_per_bin = bg_ph_sum / (fotkor_shape[0] - 2 + last_bin_ratio)\n\n fotkor[:fotkor_shape[0] - 2, i] = fotkor[:fotkor_shape[0] - 2, i] - bg_ph_per_bin\n fotkor[fotkor_shape[0] - 2, i] = fotkor[fotkor_shape[0] - 2, i] - bg_ph_per_bin * last_bin_ratio\n #-----------------------------\n\n #------- odhad RF frekvence i s nejistotou\n # odhad periody triggeru\n T_trig = (fotkor[fotkor_shape[0] - 2, :] / fotkor[fotkor_shape[0] - 3, :]) * t_res + t_scale[fotkor_shape[0] - 2]\n\n T_trig_sigma = t_res\n # frekvence buzeni pasti\n\n drive_freq = 1 / T_trig\n drive_freq_sigma = 1 / T_trig ** 2 * T_trig_sigma\n Omega = 2 * np.pi * np.mean( drive_freq )\n Omega_sigma = 2 * np.pi * np.sqrt( sum( drive_freq_sigma**2) / fotkor_shape[1] )\n\n ####### definice fce, ktera vraci likehood, pomoci ktereho budu fitovat\n\n def likehood_transform(x, Omega, S, time_step, sigma):\n # definuju funkci vracejici logaritmus pravdepodobnosti, ze z distrubuce dane sinusovkou, co fituji vyberu pozorovane body\n # predpokladam, ze kazdy bod je normalne rozdelen kolem sinusovky\n\n len_S = len(S)\n # print(len_S)\n S_fit = x[0] * (1 + x[1] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2]))\n\n sum_term = ((S - S_fit) / sigma) ** 2\n log_term = np.log(np.ones(len_S) * sigma * np.sqrt(2 * np.pi))\n\n return (0.5 * np.sum(sum_term) + np.sum(log_term)) # vraci -log( likehood)\n\n def likehood_transform_jac(x, Omega, S, time_step, sigma):\n # vektor jacob. likehood fce\n len_S = len(S)\n sum_term0 = 2 / sigma ** 2 * (1 + x[1] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2])) * (\n x[0] * x[1] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2]) + x[0] - S)\n\n sum_term1 = 2 / sigma ** 2 * x[0] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2]) * (\n x[0] * x[1] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2]) + x[0] - S)\n\n sum_term2 = (-2 / sigma ** 2) * x[0] * x[1] * (\n x[0] * x[1] * np.cos(Omega * time_step * np.arange(0, len_S) + x[2]) + x[0] - S) * np.sin(\n Omega * time_step * np.arange(0, len_S) + x[2])\n\n return (0.5 * np.array([np.sum(sum_term0), np.sum(sum_term1), np.sum(sum_term2)]))\n ############## fitovani\n\n # cyklus fitujici vsechny foton-korelacni data\n\n x = np.zeros((3, fotkor_shape[1]))\n DeltaS_S_ratio = []\n sigmas = np.zeros((3, fotkor_shape[1]))\n Delta_S_S_ratio_sigma = []\n fot_phi = []\n fot_phi_sigma = []\n\n\n\n for i in range(fotkor_shape[1]):\n\n ##############------------- cast kodu maximalizujici likehood\n # -----\n # podminky urcujici prijimuti reseni\n # max_phi_unc = 0.3\n # phi0 = 1.1\n\n # --- zde budu zkouset postupne ruzne pocatecni body tak, aby minimalizace vybrala globalni minimum\n x0 = [fotkor[:fotkor_shape[0] - 2, i].mean(), 0.5 * (fotkor[:fotkor_shape[0] - 2, i].max()\n - fotkor[:fotkor_shape[0] - 2,\n i].min()) / fotkor[:fotkor_shape[0] - 2,\n i].mean(), phi0]\n # x0 = [fotkor[:fotkor_shape[0]-2,i].mean(), 0.5* ( fotkor[:fotkor_shape[0]-2,i].max()\n # -fotkor[:fotkor_shape[0]-2,i].min() )/fotkor[:fotkor_shape[0]-2,i].mean(), phi0, 100]\n # fit = minimize(likehood_transform, x0, args=(Omega, fotkor[:fotkor_shape[0]-2,i], t_res, hist_sigma[i] ), tol=1e-10 )\n fit = minimize(likehood_transform, x0, args=(Omega, fotkor[:fotkor_shape[0] - 2, i], t_res, hist_sigma[i]),\n tol=1e-10,\n jac=likehood_transform_jac)\n # fit = minimize(likehood_transform_sigma, x0, args=(Omega, fotkor[:fotkor_shape[0]-2,i], t_res), tol=1e-10)\n # print(fit)\n # print('\\n')\n if (np.sqrt(fit.hess_inv[2, 2]) > max_phi_unc) or (fit.x[1] < 0):\n x0 = [fotkor[:fotkor_shape[0] - 2, i].mean(), 0.5 * (fotkor[:fotkor_shape[0] - 2, i].max()\n - fotkor[:fotkor_shape[0] - 2,\n i].min()) / fotkor[\n :fotkor_shape[0] - 2, i].mean(),\n -phi0]\n # x0 = [fotkor[:fotkor_shape[0]-2,i].mean(), 0.5* ( fotkor[:fotkor_shape[0]-2,i].max()\n # -fotkor[:fotkor_shape[0]-2,i].min() )/fotkor[:fotkor_shape[0]-2,i].mean(), -phi0, 100]\n fit = minimize(likehood_transform, x0,\n args=(Omega, fotkor[:fotkor_shape[0] - 2, i], t_res, hist_sigma[i]), tol=1e-10,\n jac=likehood_transform_jac)\n # fit = minimize(likehood_transform_sigma, x0, args=(Omega, fotkor[:fotkor_shape[0]-2,i], t_res), tol=1e-10)\n # print(fit)\n # print('\\n')\n\n # ---- odhad nejistot parametru----\n # C = fit.hess_inv # variancni-kovariancni matice\n C = fit.hess_inv\n # -------------------------------\n x[:, i] = fit.x\n sigmas[:, i] = np.sqrt(np.diagonal(C))\n DeltaS_S_ratio.append(x[1, i])\n\n # ---- faze fot-kor signalu\n fot_phi.append(np.angle(DeltaS_S_ratio[i] * np.exp(1j * x[2, i])))\n\n # ---- sigma delta s ku s\n Delta_S_S_ratio_sigma.append(np.sqrt(C[1, 1]))\n\n # ---- sigma fot_phi\n fot_phi_sigma.append(np.sqrt(C[2, 2]))\n\n # S_0 = x[0, :] # parametry S_0\n # DeltaS = x[1, :] * S_0 # delta S\n DeltaS_S_ratio = np.array(DeltaS_S_ratio)\n # doplneni zamenek, pokud skoci faze fotkor signalu o vice nez pi/2\n if sign_DeltaS:\n signchange_ind = np.argwhere(np.abs( np.angle( np.exp(1j*np.array(fot_phi) ) * np.exp(-1j * fot_phi[0]) ) )> np.pi/2)\n try:\n DeltaS_S_ratio[signchange_ind] = - DeltaS_S_ratio[signchange_ind]\n except:\n pass\n\n return( DeltaS_S_ratio, Delta_S_S_ratio_sigma, fot_phi, fot_phi_sigma, x, Omega, Omega_sigma, nu, t_scale )\n## fce vracejici hodnoty pro vykresleni fitu histogramu\ndef get_hist_fit_values(t_scale, x, Omega):\n # input: casova osa histogramu, x=[S_0, DeltaS_S, fot_phi], Omega\n def fit_func(x, Omega, time_points):\n return x[0]*( 1 + x[1] * np.cos(Omega * time_points + x[2]) )\n\n time_fit = np.linspace(0, t_scale.max(), 200)\n fotkor_fit = fit_func(x, Omega, time_fit)\n return(time_fit, fotkor_fit)\n\n## fce vracejici koeficienty fitu zavislosti deltaS_S_ratio na \\nu, prusecik s nulou, interval napeti pro dalsi iteraci\ndef get_DeltaS_S_nu_fit(DeltaS_S_ratio, nu, U_avg=500, iter_coef=0.25):\n # input: amplitudy modulace, prislusejici nu, hodnota napeti na axialnich el. kolem ktere hledam interval,\n# koef. pro dalsi iteraci napeti\n\n def MM_resid(x, deltaS_S, nu):\n return (deltaS_S - x[0] - x[1] * nu)\n\n def MM_line(x, nu):\n return (x[0] + x[1] * nu)\n\n x0 = [0.06, +0.1]\n fit = least_squares(MM_resid, x0, args=(DeltaS_S_ratio, nu),\n ftol=1e-10, xtol=1e-10)\n linfit = fit.x\n\n nu_MM_zero = - linfit[0] / linfit[1] # expected MM zero\n\n #--- vypocet napeti, kde budu hledat v pristi iteraci\n # naleznu interval ve kterem lezi minimum a urcim napeti, pro ktere bych mel merit v dalsi iteraci\n min_inverv_leng = np.abs(min(nu) - nu_MM_zero) * iter_coef\n min_interv = np.array([nu_MM_zero - min_inverv_leng, nu_MM_zero + min_inverv_leng])\n\n # prepocet na napeti\n U_5 = U_avg * (1 + min_interv)\n U_6 = U_avg * (1 - min_interv)\n\n # plot fitu\n nu_fit = np.linspace( nu_MM_zero - min_inverv_leng/iter_coef, nu_MM_zero + min_inverv_leng/iter_coef, 200)\n DeltaS_S_fit_nu = MM_line(linfit, nu_fit)\n\n return( U_5, U_6, min_interv, linfit, nu_fit, DeltaS_S_fit_nu )\n##\ndef get_DeltaS_S_xz_fit(DeltaS_S_ratio, fot_phi, U_komp_x, DeltaS_S_min_z, fot_phi_min_z, gamma, iter_coef=0.25):\n # input: modulace, napeti na kompenzacni el., iteracni koef.\n # pouze pro dva body\n # funkce vraci napeti pro dalsi iteraci, fit\n\n def MM_resid(x, deltaS_S, nu):\n return (deltaS_S - x[0] - x[1] * nu)\n\n def MM_line(x, nu):\n return (x[0] + x[1] * nu)\n\n x0 = [0.06, +0.1]\n\n\n fit = least_squares(MM_resid, x0, args=(DeltaS_S_ratio, U_komp_x),\n ftol=1e-10, xtol=1e-10)\n linfit = fit.x\n\n # ----- hledani bodu s odpovidajici pozadovanou hodnotou modulace a faze\n # gamma = 45 / 180 * np.pi # uhel mezi smerem z a svazkem Sxz\n\n DeltaS_S_ratio_xz_teor = DeltaS_S_min_z * np.cos(gamma)\n fot_phi_xz_teor = fot_phi_min_z\n\n U_komp_x_mozne_res = np.array(\n [(DeltaS_S_ratio_xz_teor - linfit[0]) / linfit[1], (-DeltaS_S_ratio_xz_teor - linfit[0]) / linfit[1]])\n\n sign_of_points_res = np.sign(MM_line(linfit, U_komp_x_mozne_res))\n sign_of_data_points = np.sign(MM_line(linfit, U_komp_x)) # zde jsem zjistil, na ktere strane se nachazi hledany bod\n\n # ---- obema moznym resenim priradim komplexni cisla podle toho, na ktere strane od nuly jsou\n # pak spocitam rozdil mezi timto prirazenym uhlem a pozadovanym uhlem a vyberu z moznych reseni nejlepsi schodu\n\n phase_dif = np.abs(\n np.angle(sign_of_data_points * sign_of_points_res * np.exp(1j *( np.array(fot_phi) - fot_phi_xz_teor) )) )\n\n # rozdil fazi mezi moznym resenim a pozadovanou fazi\n U_komp_x_res = U_komp_x_mozne_res[np.argmin(phase_dif)]\n\n # vyberu interval, pro dalsi iteraci\n U_komp_x_interval = [U_komp_x_res - np.abs(U_komp_x_res - U_komp_x).min() * iter_coef,\n U_komp_x_res + np.abs(U_komp_x_res - U_komp_x).min() * iter_coef]\n\n # fit ke vraceni\n U_komp_x_fit = np.linspace(min(U_komp_x_mozne_res) - np.abs(U_komp_x - U_komp_x_res).max(),\n max(U_komp_x_mozne_res) + np.abs(U_komp_x - U_komp_x_res).max(), 200)\n DeltaS_S_fit = MM_line(linfit, U_komp_x_fit)\n\n return( U_komp_x_interval, linfit, U_komp_x_fit, DeltaS_S_fit, DeltaS_S_ratio_xz_teor, fot_phi_xz_teor )\n##\ndef get_DeltaS_S_xy_fit(DeltaS_S_ratio, fot_phi, U_komp_y, DeltaS_S_min_z,\n DeltaS_S_min_xz, fot_phi_min_z, fot_phi_min_xz, gamma, epsilon, iter_coef=0.25):\n # input: modulace, napeti na kompenzacni el., iteracni koef.\n # pouze pro dva body\n # funkce vraci napeti pro dalsi iteraci, fit\n\n def MM_resid(x, deltaS_S, nu):\n return (deltaS_S - x[0] - x[1] * nu)\n\n def MM_line(x, nu):\n return (x[0] + x[1] * nu)\n\n x0 = [0.06, +0.1]\n\n\n fit = least_squares(MM_resid, x0, args=(DeltaS_S_ratio, U_komp_y),\n ftol=1e-10, xtol=1e-10)\n linfit = fit.x\n\n # ----- hledani bodu s odpovidajici pozadovanou hodnotou modulace a faze\n # gamma = 45 / 180 * np.pi # uhel mezi smerem z a svazkem Sxz\n DeltaS_S_ratio_xy_teor_complex = np.sin(epsilon)/np.sin(gamma) * ( DeltaS_S_min_xz*np.exp(1j*fot_phi_min_xz) -\n DeltaS_S_min_z*np.cos(gamma)*np.exp(1j*fot_phi_min_z))\n DeltaS_S_ratio_xy_teor = np.abs(DeltaS_S_ratio_xy_teor_complex)\n fot_phi_xy_teor = np.angle(DeltaS_S_ratio_xy_teor_complex)\n\n U_komp_y_mozne_res = np.array(\n [(DeltaS_S_ratio_xy_teor - linfit[0]) / linfit[1], (-DeltaS_S_ratio_xy_teor - linfit[0]) / linfit[1]])\n\n sign_of_points_res = np.sign(MM_line(linfit, U_komp_y_mozne_res))\n sign_of_data_points = np.sign(MM_line(linfit, U_komp_y)) # zde jsem zjistil, na ktere strane se nachazi hledany bod\n\n # ---- obema moznym resenim priradim komplexni cisla podle toho, na ktere strane od nuly jsou\n # pak spocitam rozdil mezi timto prirazenym uhlem a pozadovanym uhlem a vyberu z moznych reseni nejlepsi schodu\n\n phase_dif = np.abs(\n np.angle(sign_of_data_points * sign_of_points_res * np.exp(1j *( np.array(fot_phi) - fot_phi_xy_teor) )) )\n\n # rozdil fazi mezi moznym resenim a pozadovanou fazi\n U_komp_y_res = U_komp_y_mozne_res[np.argmin(phase_dif)]\n\n # vyberu interval, pro dalsi iteraci\n U_komp_y_interval = [U_komp_y_res - np.abs(U_komp_y_res - U_komp_y).min() * iter_coef,\n U_komp_y_res + np.abs(U_komp_y_res - U_komp_y).min() * iter_coef]\n\n # fit ke vraceni\n U_komp_y_fit = np.linspace(min(U_komp_y_mozne_res) - np.abs(U_komp_y - U_komp_y_res).max(),\n max(U_komp_y_mozne_res) + np.abs(U_komp_y - U_komp_y_res).max(), 200)\n DeltaS_S_fit = MM_line(linfit, U_komp_y_fit)\n\n return( U_komp_y_interval, linfit, U_komp_y_fit, DeltaS_S_fit, DeltaS_S_ratio_xy_teor, fot_phi_xy_teor )\n\n## funkce vracejici bety v ortogonalni bazi xyz\n# muze byt dosazeno i DeltaS_S\ndef get_beta_xyz_phi_xyz(variables):\n # input: beta = [beta_z, beta_xz, beta_xy] variables = [beta, fot_phi, gamma, epsilon]\n # fot_phi = [phi_z, phi_xz, phi_xy]\n\n # output: beta_xyz = [beta_x, beta_y, beta_z]\n beta = variables[:3]\n fot_phi = variables[3:6]\n gamma = variables[6]\n epsilon = variables[7]\n\n beta_x_comp = 1 / np.sin(gamma) * (\n beta[1] * np.exp(1j * fot_phi[1]) - beta[0] * np.cos(gamma) * np.exp(1j * fot_phi[0]))\n beta_y_comp = 1 / np.cos(epsilon) * (np.sin(epsilon) * beta_x_comp - beta[2] * np.exp(1j * fot_phi[2]))\n\n return (np.array([np.abs(beta_x_comp), np.abs(beta_y_comp)]),\n np.array([np.angle(beta_x_comp), np.angle(beta_y_comp)])\n )\n\n#--------------- numericke reseni deltaS/S -> beta\n\nfrom scipy.optimize import fsolve\nfrom scipy.special import j0, j1\n\ndef get_A(decay_rate, detun):\n # returns driven oscillator amplitude in point given by detun param.\n return 1 / 2 * (decay_rate / 2 - detun * 1j) / (detun ** 2 + (decay_rate / 2) ** 2)\n\ndef fl_signal(beta, laser_detun, Omega, decay_rate):\n # function calculates deltaS/S0 photon-correlation signal\n # input: beta, laser detuning, RF drive freq, decay rate\n A_minus = get_A(decay_rate, laser_detun - Omega)\n A_plus = get_A(decay_rate, laser_detun + Omega)\n A = get_A(decay_rate, laser_detun)\n\n numer = 2 * j0(beta) * j1(beta) * np.abs(np.conj(A) * A_plus - A * np.conj(A_minus))\n denom = j0(beta) ** 2 * np.abs(A) ** 2 + j1(beta) ** 2 * (np.abs(A_plus) ** 2 + np.abs(A_minus) ** 2)\n\n return numer / denom\n\ndef get_beta(Omega, decay_rate, laser_detun, norm_mod_amp):\n # function calculates the corresponding beta for given known parameters\n # input: drive frequency, decay_rate, laser detuning, normalized modulation amplitude given by ph_corr_signal\n # output: float value of beta\n def root_func(beta, laser_detun, Omega, decay_rate, norm_mod_amp):\n return fl_signal(beta, laser_detun, Omega, decay_rate) - norm_mod_amp\n\n sol = fsolve(root_func, np.array([0.01]), args=(laser_detun, Omega, decay_rate, norm_mod_amp), full_output=1)\n return float(sol[0])\n\n\n#-------------------------------------","sub_path":"micromotion_measurements/EMM_min_func.py","file_name":"EMM_min_func.py","file_ext":"py","file_size_in_byte":16861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"365259168","text":"#\n# Copyright (c) 2019 Nordic Semiconductor ASA\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# 3. Neither the name of Nordic Semiconductor ASA nor the names of other\n# contributors to this software may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# 4. This software must only be used in or with a processor manufactured by Nordic\n# Semiconductor ASA, or in or with a processor manufactured by a third party that\n# is used in combination with a processor manufactured by Nordic Semiconductor.\n#\n# 5. Any software provided in binary or object form under this license must not be\n# reverse engineered, decompiled, modified and/or disassembled.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\nfrom driver_setup import Settings, setup_adapter\nimport logging\nlogger = logging.getLogger(__name__)\nimport random\nimport string\nimport time\nimport unittest\nfrom queue import Queue\nfrom threading import Condition, Thread\n\nimport xmlrunner\nfrom pc_ble_driver_py.ble_driver import (\n BLEAdvData,\n BLEConfig,\n BLEConfigConnGatt,\n BLEDriver,\n BLEEnableParams,\n BLEGapIOCaps,\n BLEGapSecKDist,\n BLEGapSecParams,\n BLEGapSecStatus,\n driver,\n util,\n)\nfrom pc_ble_driver_py.observers import BLEAdapterObserver, BLEDriverObserver\n\n\n\npasskeyQueue = Queue()\nauthStatusQueue = Queue()\n\n\nclass Central(BLEDriverObserver, BLEAdapterObserver):\n def __init__(self, adapter):\n self.adapter = adapter\n logger.info(\"Central adapter is %d\", self.adapter.driver.rpc_adapter.internal)\n self.conn_q = Queue()\n self.adapter.observer_register(self)\n self.adapter.driver.observer_register(self)\n self.conn_handle = None\n self.connecting = False\n\n def start(self, connect_with):\n self.connect_with = connect_with\n self.connecting = False\n logger.info(\"scan_start, trying to find %s\", self.connect_with)\n self.adapter.driver.ble_gap_scan_start()\n self.conn_handle = self.conn_q.get(timeout=10)\n Thread(\n target=self.adapter.authenticate,\n args=(self.conn_handle, None),\n kwargs={\"bond\": True, \"mitm\": True, \"io_caps\": BLEGapIOCaps.keyboard_only},\n ).start()\n\n def stop(self):\n self.connecting = False\n\n if self.conn_handle:\n self.adapter.driver.ble_gap_disconnect(self.conn_handle)\n\n def on_gap_evt_adv_report(\n self, ble_driver, conn_handle, peer_addr, rssi, adv_type, adv_data\n ):\n if BLEAdvData.Types.complete_local_name in adv_data.records:\n dev_name_list = adv_data.records[BLEAdvData.Types.complete_local_name]\n elif BLEAdvData.Types.short_local_name in adv_data.records:\n dev_name_list = adv_data.records[BLEAdvData.Types.short_local_name]\n else:\n return\n\n dev_name = \"\".join(chr(e) for e in dev_name_list)\n\n if dev_name == self.connect_with and self.connecting == False:\n self.connecting = True\n address_string = \"\".join(\"{0:02X}\".format(b) for b in peer_addr.addr)\n logger.info(\n \"Trying to connect to peripheral advertising as %s, address: 0x%s\",\n dev_name,\n address_string,\n )\n\n self.adapter.connect(peer_addr, tag=1)\n\n def on_gap_evt_connected(\n self, ble_driver, conn_handle, peer_addr, role, conn_params\n ):\n self.conn_q.put(conn_handle)\n\n def on_gap_evt_auth_key_request(self, ble_driver, conn_handle, **kwargs):\n passkey = passkeyQueue.get(timeout=10)\n pk = util.list_to_uint8_array(passkey)\n\n driver.sd_ble_gap_auth_key_reply(\n ble_driver.rpc_adapter,\n conn_handle,\n kwargs[\"key_type\"],\n pk.cast(),\n )\n\n\nclass Peripheral(BLEDriverObserver, BLEAdapterObserver):\n def __init__(self, adapter):\n self.adapter = adapter\n logger.info(\n \"Peripheral adapter is %d\", self.adapter.driver.rpc_adapter.internal\n )\n self.conn_q = Queue()\n self.adapter.observer_register(self)\n self.adapter.driver.observer_register(self)\n\n def start(self, adv_name):\n adv_data = BLEAdvData(complete_local_name=adv_name)\n self.adapter.driver.ble_gap_adv_data_set(adv_data)\n self.adapter.driver.ble_gap_adv_start()\n\n def on_gap_evt_connected(\n self, ble_driver, conn_handle, peer_addr, role, conn_params\n ):\n self.conn_q.put(conn_handle)\n\n def on_gap_evt_sec_params_request(self, ble_driver, conn_handle, **kwargs):\n sec_params = BLEGapSecParams.from_c(kwargs[\"peer_params\"])\n\n sec_params.io_caps = BLEGapIOCaps.display_only\n sec_params.min_key_size = 7\n sec_params.kdist_own = BLEGapSecKDist(False, False, False, False)\n sec_params.kdist_peer = BLEGapSecKDist(False, False, False, False)\n\n self.adapter.driver.ble_gap_sec_params_reply(\n conn_handle, BLEGapSecStatus.success, sec_params=sec_params\n )\n\n def on_gap_evt_passkey_display(self, ble_driver, conn_handle, passkey):\n passkeyQueue.put(passkey)\n\n def on_gap_evt_auth_status(\n self,\n ble_driver,\n conn_handle,\n error_src,\n bonded,\n sm1_levels,\n sm2_levels,\n kdist_own,\n kdist_peer,\n auth_status,\n ):\n authStatusQueue.put(auth_status)\n\n\nclass Passkey(unittest.TestCase):\n def setUp(self):\n settings = Settings.current()\n\n central = setup_adapter(\n settings.serial_ports[0],\n False,\n settings.baud_rate,\n settings.retransmission_interval,\n settings.response_timeout,\n settings.driver_log_level,\n )\n\n self.central = Central(central)\n\n peripheral = setup_adapter(\n settings.serial_ports[1],\n False,\n settings.baud_rate,\n settings.retransmission_interval,\n settings.response_timeout,\n settings.driver_log_level,\n )\n\n # Advertising name used by peripheral and central\n # to find peripheral and connect with it\n self.adv_name = \"\".join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(20)\n )\n self.peripheral = Peripheral(peripheral)\n\n def test_passkey(self):\n self.peripheral.start(self.adv_name)\n self.central.start(self.adv_name)\n authStatus = authStatusQueue.get(timeout=200)\n self.assertTrue(authStatus == BLEGapSecStatus.success)\n\n def tearDown(self):\n self.central.adapter.close()\n self.peripheral.adapter.close()\n\n\ndef test_suite():\n return unittest.TestLoader().loadTestsFromName(__name__)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=Settings.current().log_level,\n format=\"%(asctime)s [%(thread)d/%(threadName)s] %(message)s\",\n )\n unittest.main(\n testRunner=xmlrunner.XMLTestRunner(\n output=Settings.current().test_output_directory\n ),\n argv=Settings.clean_args(),\n )\n","sub_path":"tests/test_passkey.py","file_name":"test_passkey.py","file_ext":"py","file_size_in_byte":8371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"36496452","text":"import pytest\r\nfrom selenium import webdriver\r\nfrom pages.pages import MainPage\r\nfrom models.user import User\r\n\r\n\r\n@pytest.fixture()\r\ndef driver():\r\n driver = webdriver.Chrome()\r\n driver.implicitly_wait(5)\r\n yield driver\r\n driver.quit()\r\n\r\n\r\n@pytest.fixture(autouse=True)\r\ndef app(driver):\r\n base_url = \"http://127.0.0.1/oxwall/\"\r\n driver.get(base_url)\r\n return\r\n\r\n\r\n@pytest.fixture()\r\ndef logged_user(driver):\r\n user = User(username='admin', password='pass', real_name=\"Admin\")\r\n main_page = MainPage(driver)\r\n main_page.login_as(user)\r\n yield user\r\n main_page.logout()\r\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"173301915","text":"from typing import List\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n if max(nums) < 0:\n return max(nums)\n\n dp = [0] * (len(nums))\n dp[0] = max(dp[0], nums[0])\n\n for i in range(1, len(nums)):\n dp[i] = max(0, dp[i - 1] + nums[i])\n print(dp)\n return max(dp)\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n pre = 0\n res = float(\"-inf\")\n for num in nums:\n if pre > 0:\n res = max(res, pre + num)\n pre += num\n else:\n res = max(res, num)\n pre = num\n return res\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n current_sum = nums[0]\n res = nums[0]\n\n for i in range(1, len(nums)):\n current_sum = max(nums[i], current_sum + nums[i])\n res = max(res, current_sum)\n\n return res\n\n\nif __name__ == \"__main__\":\n # numbers = [-2,1,-3,4,-1,2,1,-5,4]\n numbers = [-2, 1]\n result = Solution().maxSubArray(numbers)\n print(result)\n","sub_path":"Leetcode/0053-Maximum-Subarray.py","file_name":"0053-Maximum-Subarray.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"306373712","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n这个脚本仅用来获取股票更多方面的信息\n\"\"\"\n\nimport random\nimport time\nimport datetime\nimport logging\nimport json\n\nfrom bs4 import BeautifulSoup\n\nfrom models import StockInfo\nfrom config import core_concept, company_survey, exchange_market, stock_value\nfrom logger import setup_logging\nfrom collector.collect_data_util import send_request\n\n\nquery_step = 100 # 每次查询数据库的步长,以防出现cursor超时的错误\n\n\ndef estimate_market(stock_number):\n market = ''\n for i in exchange_market:\n if stock_number[:2] in i.get('pattern'):\n market = i.get('market')\n break\n\n if not market:\n raise Exception('Wrong stock number %s' % stock_number)\n return market\n\n\ndef collect_company_survey(stock_info):\n query_id = estimate_market(stock_info.stock_number)+stock_info.stock_number\n\n company_survey_url = company_survey.format(query_id)\n retry = 5\n survey_table = ''\n\n while retry:\n try:\n survey_html = send_request(company_survey_url)\n survey_soup = BeautifulSoup(survey_html, 'lxml')\n survey_table = survey_soup.find('table', id='Table0').find_all('td')\n break\n except Exception:\n retry -= 1\n time.sleep(1)\n\n if not survey_soup or not survey_table:\n return\n\n stock_info.stock_name = survey_table[4].text.strip()\n stock_info.company_name_cn = survey_table[0].text.strip()\n stock_info.company_name_en = survey_table[1].text.strip()\n stock_info.used_name = survey_table[2].text.strip()\n stock_info.account_firm = survey_table[30].text.strip()\n stock_info.law_firm = survey_table[29].text.strip()\n stock_info.industry_involved = survey_table[10].text.strip()\n stock_info.business_scope = survey_table[32].text.strip()\n stock_info.company_introduce = survey_table[31].text.strip()\n stock_info.area = survey_table[23].text.strip()\n\n core_concept_url = core_concept.format(query_id)\n concept_html = send_request(core_concept_url)\n market_plate = BeautifulSoup(concept_html, 'lxml').find('div', class_='summary').find('p').text\\\n .replace(u'要点一:所属板块 ', '').replace(u'。', '').strip()\n stock_info.market_plate = market_plate\n\n if 'sh' in query_id:\n q_id = stock_info.stock_number + '1'\n elif 'sz' in query_id:\n q_id = stock_info.stock_number + '2'\n\n if q_id:\n stock_value_url = stock_value.format(q_id)\n try:\n res = send_request(stock_value_url)\n data = json.loads(res.replace('callback(', '').replace(')', ''))['Value']\n if data:\n circulated_value = int(data[45])\n total_value = int(data[46])\n stock_info.circulated_value = circulated_value\n stock_info.total_value = total_value\n except Exception as e:\n logging.error('Error when get %s value:%s' % (stock_info.stock_number, e))\n\n stock_info.update_time = datetime.datetime.now()\n stock_info.save()\n\n\ndef start_collect_detail():\n try:\n all_stocks = StockInfo.objects()\n except Exception as e:\n logging.error('Error when query StockInfo:' + str(e))\n raise e\n\n stocks_count = len(all_stocks)\n skip = 0\n\n while skip < stocks_count:\n try:\n stocks = StockInfo.objects().skip(skip).limit(query_step)\n except Exception as e:\n logging.error('Error when query skip %s StockInfo:%s' % (skip, e))\n stocks = []\n\n for i in stocks:\n try:\n collect_company_survey(i)\n except Exception as e:\n logging.error('Error when collect %s data: %s' % (i.stock_number, e))\n time.sleep(random.random())\n skip += query_step\n\n\nif __name__ == '__main__':\n setup_logging(__file__, logging.WARNING)\n logging.info('Start to collect stock detail info')\n start_collect_detail()\n logging.info('Collect stock detail info Success')\n","sub_path":"collector/collect_stock_detail_info.py","file_name":"collect_stock_detail_info.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"430135346","text":"from pyramid.response import Response\nfrom pyramid.httpexceptions import (\n HTTPFound,\n HTTPNotFound,\n )\n\nfrom pyramid.view import view_config\n\nfrom sqlalchemy.exc import DBAPIError\n\nfrom sqlalchemy import (\n asc,\n and_\n)\n\nfrom poab.helpers.fractions import (\n Fraction\n)\n\nfrom poab.helpers.timetools import (\n timediff\n)\n\nimport markdown\n\nfrom decimal import Decimal, ROUND_HALF_UP\n\nfrom poab.models import (\n DBSession,\n Log,\n Track,\n Trackpoint,\n Image,\n Timezone,\n Country,\n Continent\n )\n\nimport re\n\ndef fetch_images_for_trackpoints(q):\n trackpoints = q.all()\n trkpt_list=list()\n for trackpoint in trackpoints:\n trkpt_list.append(trackpoint.id)\n q = DBSession.query(Image).filter(and_(Image.trackpoint.in_(trkpt_list)))\n images = q.order_by(asc(Image.timestamp_original)).all()\n return images\n\n\n\n@view_config(\n route_name='view',\n renderer='/view/view.mako',\n)\n\n@view_config(\n route_name='view:action',\n renderer='/view/view.mako',\n)\ndef view_view(request):\n try:\n action=request.matchdict['action']\n except:\n action='c'\n try:\n id=int(request.matchdict['id'])\n except:\n id=0\n try:\n page_number=int(request.matchdict['page'].replace('/',''))\n except:\n page_number=None\n if id==0 and page_number==None:\n q = DBSession.query(Image).order_by(Image.timestamp_original)\n image_count=q.count()\n page_fract=float(Fraction(str(image_count)+'/10'))\n if int(str(page_fract).split('.')[1])==0:\n page=int(str(page_fract).split('.')[0])-1\n else: \n page=str(page_fract).split('.')[0]\n elif page_number==None:\n page=0\n else:\n page=page_number\n #navstring=countryDetails(model,id)\n curr_page=int(page)\n #return { 'bla': log_count}\n if id==0:\n ##TODO what was the idea behind \"country_id!=None\"?\n ##q = DBSession.query(Trackpoint).filter(Trackpoint.country_id!=None)\n #q = DBSession.query(Trackpoint)\n #images=fetch_images_for_trackpoints(q)\n images=Image.get_images()\n #print '\\n\\n\\n\\n\\n'\n #print images\n #print '\\n\\n\\n\\n\\n'\n elif action=='c':\n #q = DBSession.query(Trackpoint).filter(and_(Trackpoint.country_id==id))\n #images=fetch_images_for_trackpoints(q)\n images=Image.get_images()\n elif action=='log':\n #q = DBSession.query(Trackpoint).filter(and_(Trackpoint.id==id))\n #images=fetch_images_for_trackpoints(q)\n log = DBSession.query(Log).filter(Log.id==id).one()\n images = DBSession.query(Image).filter(Image.logs.contains(log)).order_by(Image.timestamp_original).all()\n elif action=='id':\n images = DBSession.query(Image).filter(Image.id==id).order_by(Image.timestamp_original).all()\n\n elif action=='infomarker':\n images = DBSession.query(Image).filter(Image.trackpoint==id).order_by(Image.timestamp_original).all()\n page_list=list()\n pages_list=list()\n i=0\n for image in images:\n page_list.append(image)\n i=i+1\n if i==10:\n page_list.reverse()\n pages_list.append(page_list)\n page_list=list()\n i=0\n if i<10 and i>0:\n page_list.reverse()\n pages_list.append(page_list)\n viewlist=list()\n #print page_list\n #print pages_list\n #print curr_page\n #print pages_list[curr_page]\n if len(pages_list) < curr_page:\n curr_page=len(pages_list)-1\n for image in pages_list[curr_page]:\n if image.trackpoint:\n trackpoint_id=image.trackpoint\n else:\n trackpoint_id=3572 #TODO\n prefix='near '\n q = DBSession.query(Trackpoint).filter(Trackpoint.id==trackpoint_id)\n try:\n trackpointinfo=q.one()\n #print '\\n\\n\\n\\n'\n #print trackpointinfo.location_ref[0].name\n except:\n trackpointinfo = Trackpoint(\n track_id = None,\n latitude = None,\n longitude = None,\n altitude = None,\n velocity = None,\n temperature = None,\n direction = None,\n pressure = None,\n timestamp = None,\n uuid = None\n )\n #print image.location.replace('/srv','')\n #print '\\n\\n\\n\\n'\n ##TODO: fix timezone\n ##q = DBSession.query(Timezone).filter(Timezone.id==trackpointinfo.timezone_id)\n q = DBSession.query(Timezone).filter(Timezone.id==8)\n timezone = q.one()\n localtime = image.timestamp_original+timezone.utcoffset\n deltaseconds=round(timezone.utcoffset.days*86400+timezone.utcoffset.seconds)\n #TODO THIS SUCKS!\n class Viewdetail(object):\n def __init__(self, image, photoid, name, location, title, comment, alt, aperture, shutter, focal_length, iso, trackpointinfo, localtime, timezone, utcoffset, log, author):\n self.image = image\n self.photoid=photoid\n self.name=name\n self.location=location\n self.title=title\n self.comment=comment\n self.alt=alt\n self.aperture= image.aperture\n self.shutter= image.shutter\n self.focal_length= image.focal_length\n self.iso= image.iso\n #logdate=c.loginfo.created.strftime('%Y-%m-%d') #needed for the imagepath\n self.trackpointinfo=trackpointinfo\n self.localtime=localtime\n self.timezone=timezone\n #calculate the offset in seconds\n self.utcoffset=utcoffset\n self.log = log\n self.author = image.author_img_ref\n viewdetail = Viewdetail(image, image.id, image.name, image.location.replace('/srv',''), image.title, image.comment, image.alt, image.aperture, image.shutter, image.focal_length, image.iso, trackpointinfo, localtime.strftime('%Y-%m-%d %H:%M:%S'), timezone, timediff(deltaseconds), image.log, image.author)\n viewlist.append(viewdetail)\n\n return {\n 'pages_list': pages_list,\n 'curr_page': int(curr_page),\n 'viewlist': viewlist,\n 'request': request,\n 'action': action,\n 'id': id,\n }\n\n","sub_path":"poab/views/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"447655394","text":"import numpy as np\nfrom PIL import ImageFont, ImageDraw, Image\nimport textwrap\nimport cv2\n\n# Create a black image\n\n# Write some Text\ndef puttext(height, x, y, text, filename):\n print(filename)\n lines = textwrap.wrap(text, width=90)\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (x,y)\n fontScale = 1\n fontColor = (0,0,0)\n lineType = 2\n b,g,r,a = 0,0,0,0\n img = cv2.imread(filename)\n fontpath = \"utils/Lohit-Devanagari.ttf\" \n font = ImageFont.truetype(fontpath, height)\n img_pil = Image.fromarray(img)\n draw = ImageDraw.Draw(img_pil)\n y_text = y\n text_w = x\n for line in lines:\n width, height = font.getsize(line)\n draw.text((x, y_text), line, font=font, fill=(b,g,r,a))\n text_w, text_h = draw.textsize(line, font)\n y_text += height\n # draw.text((x, y), text, font = font, fill = (b,g,r,a))\n img = np.array(img_pil)\n # cv2.putText(img,text, \n # bottomLeftCornerOfText, \n # font, \n # fontScale,\n # fontColor,\n # lineType)\n\n #Display the image\n # cv2.imshow(\"img\",img)\n\n #Save image\n cv2.imwrite(filename, img)\n return (text_w, y_text)\n # cv2.waitKey(0)","sub_path":"python/utils/puttext.py","file_name":"puttext.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"321085587","text":"n = int(input())\nnums = []\ntimes = 0\nfor i in range(n):\n opt, num = input().split(' ')\n num = int(num)\n if opt == '1':\n nums.append(num)\n elif opt == '2':\n nums.remove(num)\n elif opt == '3':\n index = 0\n for j in range(len(nums)):\n if nums[j] == num:\n print(j + 1)\n break\n elif opt == '4':\n print(nums[num - 1])\n elif opt == '5':\n for j in range(1, len(nums)):\n if nums[j - 1] < num and nums[j] >= num:\n print(nums[j - 1])\n break\n \n else:\n for j in range(len(nums)):\n if num < nums[j]:\n print(nums[j])\n break\n nums.sort()\n","sub_path":"Code/CodeRecords/2236/60587/317589.py","file_name":"317589.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"319692430","text":"import numpy as np\n\nclass Perceptron:\n\n\t\"\"\"docstring for Perceptron:\n\t\tTakes 2 arguments one is the length of the input compulsory\n\t\tThe second argumnet is weight which is optional\n\t\tIf weights are given \"they are taken else these are allocated based on the input length passed with .5 as weights\"\"\"\n\tdef __init__(self, input_length,weights=None):\n\t\tif weights is None:\n\t\t\tself.weights = np.ones(input_length) * .5\n\t\telse:\n\t\t\tself.weights = weights\n\t@staticmethod\n\tdef unit_step_function(x):\n\t\t# if x > .5 then 1 is returned else 0 is 1 \n\t\tif x > .5:\n\t\t\treturn 1\n\t\treturn 0\n\tdef __call__(self,in_data):\n\t\t# weighted_input is calculated to product of weights and input leads to an array \n\t\tweighted_input = self.weights * in_data\n\t\t# the weight_input vector is summed up and stored in the weighted_sum\n\t\tweighted_sum = weighted_input.sum()\n\t\t# the weighted sum is passed to unit_step_function which returns a sinlge value number either 0 or 1\n\t\treturn Perceptron.unit_step_function(weighted_sum)\n\n\np = Perceptron(2,np.array([1,1]))\ninputs = np.array([[0,0],[0,1],[1,0],[1,1]])\nfor x in inputs:\n\tp(np.array(x))\n\nprint(p(np.array([0,0])))\nprint(p(np.array([0,1])))\nprint(p(np.array([1,0])))\nprint(p(np.array([1,1])))","sub_path":"and_gate_in_neural_network.py","file_name":"and_gate_in_neural_network.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"194789518","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 09:46:25 2018\n\n@author: yjwu\n\nPython 3.5.2\n\n\"\"\"\n\nfrom __future__ import print_function\nimport math\nimport cv2\nimport dataloader\nimport CKA\n\nimport torch\nfrom torch import optim\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\n\nfrom spiking_model import*\nfrom graphviz import Digraph\nfrom args import get_parser\n\nparser = get_parser()\nargs = parser.parse_args()\n\n# device set\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ncuda = torch.cuda.is_available()\nif cuda:\n device_num = torch.cuda.device_count()\n snn = torch.nn.DataParallel(SDDC(), device_ids=list(range(device_num)))\nelse:\n device_num = 1\n snn = SDDC()\n\nsnn.to(device)\nbatch_size = args.base_batch_size * device_num\n\n# data loader set\nsource_loader, target_test_loader = dataloader.load_data(args.ROOT_PATH, args.SOURCE_NAME, batch_size)\ntarget_train_loader = dataloader.load_training(args.ROOT_PATH, args.TARGET_NAME, batch_size)\n\n\n# train prepare\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\nacc_record = list([])\nloss_train_record = list([])\nloss_test_record = list([])\n\ncriterion = nn.MSELoss()\n\ncriterion_dict = {\"linear_CKA\": CKA.linear_CKA,\n \"KL\": torch.nn.KLDivLoss()}\nmmd_criterion = criterion_dict[args.mmd_function]\noptimizer = torch.optim.Adam(snn.parameters(), lr=args.learning_rate)\n\n\n# get laplace image from resize image\ndef get_lap(tensor_data):\n shape = tensor_data.size()\n out = []\n for i in range(shape[0]):\n img = transforms.ToPILImage()(tensor_data[i]).convert('RGB')\n cv_img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2GRAY)\n cv_img_lap = cv2.Laplacian(cv_img, cv2.CV_8U, ksize=args.laplace_size)\n tensor_out = transforms.ToTensor()(cv_img_lap)\n tensor_out = tensor_out.reshape((1, 1, shape[2], shape[3]))\n out.append(tensor_out)\n return torch.cat(out, 0)\n\n\ndef calculate_error_classification(spikes, des_spikes):\n err = (torch.sum(spikes, -1, keepdim=True) - des_spikes)\n return err\n\n\ndef calculate_l2_loss_classification(spikes, des_spikes):\n return torch.sum(calculate_error_classification(spikes, des_spikes) ** 2) / 2 * 1\n\n\ndef step_decay(epoch, learning_rate):\n \"\"\"\n learning rate step decay\n :param epoch: current training epoch\n :param learning_rate: initial learning rate\n :return: learning rate after step decay\n \"\"\"\n initial_lrate = learning_rate\n drop = 0.8\n epochs_drop = 10.0\n lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))\n return lrate\n\n\ndef make_dot(var, params=None):\n \"\"\" Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n \"\"\"\n if params is not None:\n assert isinstance(params.values()[0], Variable)\n param_map = {id(v): k for k, v in params.items()}\n\n node_attr = dict(style='filled',\n shape='box',\n align='left',\n fontsize='12',\n ranksep='0.1',\n height='0.2')\n dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"12,12\"))\n seen = set()\n\n def size_to_str(size):\n return '(' + (', ').join(['%d' % v for v in size]) + ')'\n\n def add_nodes(var):\n if var not in seen:\n if torch.is_tensor(var):\n dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')\n elif hasattr(var, 'variable'):\n u = var.variable\n name = param_map[id(u)] if params is not None else ''\n node_name = '%s\\n %s' % (name, size_to_str(u.size()))\n dot.node(str(id(var)), node_name, fillcolor='lightblue')\n else:\n dot.node(str(id(var)), str(type(var).__name__))\n seen.add(var)\n if hasattr(var, 'next_functions'):\n for u in var.next_functions:\n if u[0] is not None:\n dot.edge(str(id(u[0])), str(id(var)))\n add_nodes(u[0])\n if hasattr(var, 'saved_tensors'):\n for t in var.saved_tensors:\n dot.edge(str(id(t)), str(id(var)))\n add_nodes(t)\n\n add_nodes(var.grad_fn)\n return dot\n\n\ndef train_ddcnet(epoch, model, learning_rate, source_loader, target_loader):\n \"\"\"\n train source and target domain on ddcnet\n :param epoch: current training epoch\n :param model: defined ddcnet\n :param learning_rate: initial learning rate\n :param source_loader: source loader\n :param target_loader: target train loader\n :return:\n \"\"\"\n log_interval = 10\n # LEARNING_RATE = step_decay(epoch, learning_rate)\n # print('Learning Rate: ', LEARNING_RATE)\n # optimizer = optim.SGD([\n # # {'params': model.features.parameters()},\n # # {'params': model.classifier.parameters()},\n # # {'params': model.bottleneck.parameters(), 'lr': LEARNING_RATE},\n # # {'params': model.final_classifier.parameters(), 'lr': LEARNING_RATE},\n # {'params': model.parameters(), 'lr': LEARNING_RATE}\n # ], lr=LEARNING_RATE / 10, momentum=momentum, weight_decay=l2_decay)\n\n # enter training mode\n model.train()\n\n iter_source = iter(source_loader)\n iter_target = iter(target_loader)\n num_iter = len(source_loader)\n\n correct = 0\n total_loss = 0\n\n for i in range(1, num_iter):\n source_data, source_label = iter_source.next()\n target_data, _ = iter_target.next()\n\n source_lap = get_lap(source_data)\n target_lap = get_lap(target_data)\n\n if i % len(target_loader) == 0:\n iter_target = iter(target_loader)\n if cuda:\n source_data, source_lap, source_label = source_data.cuda(), source_lap.cuda(), source_label.cuda()\n target_data, target_lap = target_data.cuda(), target_lap.cuda()\n\n source_data, source_lap, source_label = Variable(source_data), Variable(source_lap), Variable(source_label)\n target_data, target_lap = Variable(target_data), Variable(target_lap)\n\n model.zero_grad()\n optimizer.zero_grad()\n\n # source_preds, mmd_loss = model(source_data, source_lap, target_data, target_lap, epoch, i)\n source_preds, source_feature, target_feature = model(source_data, source_lap, target_data, target_lap, epoch, i)\n mmd_loss = torch.abs(mmd_criterion(source_feature.detach(), target_feature.detach()))\n\n source_label_ = torch.zeros(batch_size, args.num_classes).cuda().scatter_(1, source_label.view(-1, 1), 1)\n _, predicted = source_preds.max(1)\n correct += float(predicted.eq(source_label).sum().item())\n\n clf_loss = criterion(source_preds, source_label_) # clf_criterion(source_preds, source_label)\n\n loss = clf_loss\n total_loss += clf_loss.item()\n\n loss.backward()\n\n optimizer.step()\n\n if i % log_interval == 4:\n print('Train Epoch {}: [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tsoft_Loss: {:.6f}\\tmmd_Loss: {:.6f}'.format(\n epoch, i * len(source_data), len(source_loader) * batch_size,\n 100. * i / len(source_loader), loss, clf_loss.item(), mmd_loss.item()))\n\n total_loss /= len(source_loader)\n acc_train = float(correct) * 100. / (len(source_loader) * batch_size)\n\n print('{} set: Average classification loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n args.SOURCE_NAME, total_loss, correct, len(source_loader.dataset), acc_train))\n\n\ndef test_ddcnet(model, target_loader):\n \"\"\"\n test target data on fine-tuned alexnet\n :param model: trained alexnet on source data set\n :param target_loader: target dataloader\n :return: correct num\n \"\"\"\n\n model.eval()\n test_loss = 0\n correct = 0\n\n for data, target in target_test_loader:\n data_lap = get_lap(data) # > torch.ones((1, 227, 227), device=device) * 0.3\n\n if cuda:\n data, data_lap, target = data.cuda(), data_lap.cuda(), target.cuda()\n data, data_lap, target = Variable(data), Variable(data_lap), Variable(target)\n target_preds, _1, _2 = model(data, data_lap, data, data_lap, 0, 0)\n\n _, predicted = target_preds.max(1)\n correct += float(predicted.eq(target).sum().item())\n\n target_ = torch.zeros(batch_size, args.num_classes).cuda().scatter_(1, target.view(-1, 1), 1)\n test_loss += criterion(target_preds, target_) # sum up batch loss\n\n # test_loss += clf_criterion(target_preds, target) # sum up batch loss\n # pred = target_preds.data.max(1)[1] # get the index of the max log-probability\n # correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(target_loader)\n print('{} set: Average classification loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n args.TARGET_NAME, test_loss.item(), correct, len(target_loader.dataset),\n # TARGET_NAME, test_loss.data[0], correct, len(target_loader.dataset),\n 100. * correct / len(target_loader.dataset)))\n\n return correct\n\n\nif __name__ == '__main__':\n for epoch in range(1, args.num_epochs + 1):\n print('Train Epoch: ', epoch)\n train_ddcnet(epoch, snn, args.learning_rate, source_loader, target_train_loader)\n with torch.no_grad():\n correct = test_ddcnet(snn, target_test_loader)\n # acc_record.append(correct)\n # if epoch % 5 == 0:\n # print(correct)\n # print('Saving..')\n # state = {\n # 'net': snn.state_dict(),\n # 'acc': correct,\n # 'epoch': epoch,\n # 'acc_record': acc_record,\n # }\n # if not os.path.isdir('checkpoint'):\n # os.mkdir('checkpoint')\n # torch.save(state, './checkpoint/ckpt' + args.names + '.t7')\n # best_acc = correct\n","sub_path":"SDDC_train.py","file_name":"SDDC_train.py","file_ext":"py","file_size_in_byte":10272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"405083660","text":"import os, pyaes, sys, argparse\r\n\r\ndef encrypt(pub_key, plain_text, secret_cip):\r\n n, e = open(pub_key, 'r').read().split(',')\r\n plain_text = open(plain_text, 'rb').read()\r\n aes_key = os.urandom(16)\r\n cipher_text = pyaes.AESModeOfOperationCTR(aes_key).encrypt(plain_text)\r\n p = int.from_bytes(aes_key, sys.byteorder)\r\n e, n = int(e), int(n)\r\n cipher_key = pow(p, e, n)\r\n cipher_key = bytes(str(cipher_key).encode())\r\n secret_file = open(secret_cip, 'wb')\r\n secret_file.write(b'%b %b' % (cipher_text, cipher_key))\r\n secret_file.close()\r\n #return plain_text\r\n\r\ndef decrypt(prv_key, secret_cip, plain_text):\r\n n, d = open(prv_key, 'r').read().split(',')\r\n c_items = open(secret_cip, 'rb').read().split(b' ')\r\n cipher_text = b' '.join(c_items[:-1])\r\n cipher_key = c_items[-1]\r\n cipher_key, d, n = int(cipher_key), int(d), int(n)\r\n aes_key = pow(cipher_key, d, n)\r\n aes = pyaes.AESModeOfOperationCTR(aes_key.to_bytes(16, sys.byteorder))\r\n decrypted = aes.decrypt(cipher_text)\r\n plain_text_file = open(plain_text, 'wb')\r\n plain_text_file.write(decrypted)\r\n plain_text_file.close()\r\n #return decrypted\r\n\r\nparser = argparse.ArgumentParser(prog=\"My RSA Encryptor/Decryptor.\")\r\ngroup = parser.add_mutually_exclusive_group(required=True)\r\ngroup.add_argument(\"-e\", help=\"Encrypt with public key\")\r\ngroup.add_argument(\"-d\", help=\"Decrypt with private key\")\r\n\r\nparser.add_argument(\"source\", help=\"Source File\")\r\nparser.add_argument(\"destination\", help=\"Destination File\")\r\n\r\nargs = parser.parse_args()\r\n\r\nif args.e:\r\n encrypt(args.e, args.source, args.destination)\r\nelif args.d:\r\n decrypt(args.d, args.source, args.destination)\r\n","sub_path":"crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"527508906","text":"import numpy as np\nfrom typing import List\nfrom Appearance import Appearance\nfrom Sphere import Sphere\n\nVec = List[float]\n\n\nclass Shape:\n def __init__(self,\n name: str = \"\",\n appearance: Appearance = Appearance(),\n geometry: Sphere = Sphere()):\n self.name: str = name\n self.appearance: Appearance = appearance\n self.geometry: Sphere = geometry\n def __str__(self):\n template = \"{0} Shape {{\\n\\\n\\tappearance\\t{1}\\n\\\n\\tgeometry\\t{2}\\n\\\n}}\"\n return template.format(\n \"DEF \" + self.name if self.name != \"\" else self.name,\n str(self.appearance),\n str(self.geometry)\n )\npass","sub_path":"vrml/vrml/Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"361599033","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Elena'\n\nimport unittest\nimport a_driver_config as config\n\n\nclass vip_chat_landscape(unittest.TestCase):\n\n def setUp(self):\n self.driver = config.driver\n\n\n def test_vip_chat_landcsape(self):\n self.driver.find_element_by_name(u'Войти').click()\n fields = self.driver.find_elements_by_tag_name(\"textfield\")\n fields[0].send_keys(\"elena-liatris@yandex.ru\")\n fields[1].send_keys(\"worktops911\")\n self.driver.find_element_by_name(u'Войти').click()\n self.driver.find_elements_by_tag_name('FrameLayout')[2].click()\n self.driver.find_element_by_name(u'Диалоги').click()\n self.driver.find_elements_by_tag_name('RelativeLayout')[3].click()\n # доработать - как сделать горизонтальную ориентацию\n # self.driver.orientation('landscape')\n self.driver.find_element_by_name(u'Написать сообщение').send_keys(\"Hi! How are you?\")\n self.driver.find_element_by_name(u'Отпр.').click()\n\n\n\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Tests_work/classes/test_vip_chat_landscape.py","file_name":"test_vip_chat_landscape.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"397296520","text":"from curses import wrapper\nimport curses\nimport os\nimport sys,time,random\n\n\n# Gibt eine Datei in eine Fenster aus \ndef boxOutput(datei,fenster):\n x = 1\n y = 1\n file = open(datei, \"r\")\n text = file.read()\n for letter in text:\n if( xmax -5 == x):\n y = y + 1\n x = 1\n else:\n fenster.addch(y, x,letter)\n fenster.refresh()\n fenster.border(0)\n fenster.refresh()\n x = x + 1\n time.sleep(0.008)\n file.close()\n\n\n\ndef main(stdscr):\n def rufusAnzeige():\n rufusPanel.addstr(1, 1, \"Rufus\")\n rufusPanel.refresh()\n# Clear screen\n stdscr.clear()\n screen = curses.initscr()\n screen.border(0)\n\n#Info über die Felder auslesen\n global ymax\n global xmax\n global ylow\n global xlow\n ymax, xmax = screen.getmaxyx()\n ylow, xlow = screen.getbegyx()\n \n# legt ein neuens Feld an (height, width, begin_y, begin_x)\n box1 = curses.newwin(int(ymax *0.3) ,xmax - 4,int(ymax - ymax * 0.31 ),xlow + 2)\n box2 = curses.newwin(int(ymax *0.6) ,xmax - 30 ,int(ylow + 2),xlow + 2)\n rufusPanel = curses.newwin(int(ymax *0.2) ,xlow + 26,int(ylow + 2),xmax - 28)\n inventa = curses.newwin(int(ymax *0.4) ,xlow + 26,int(ymax *0.2+2),xmax - 28)\n\n##Läd die Boxen das Erste mal das sie angzeigt wereden#######\n box1.box()\n box2.box()\n rufusPanel.box()\n inventa.box()\n screen.refresh()\n box2.refresh()\n box1.refresh()\n rufusPanel.refresh()\n inventa.refresh()\n######################################################\n rufusAnzeige()\n boxOutput(\"text.txt\",box1)\n\n stdscr.refresh()\n stdscr.getkey()\n\nwrapper(main)\n","sub_path":"basisScreen.py","file_name":"basisScreen.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"7116050","text":"#!/usr/bin/env python\n\nimport sys\nfrom struct import unpack, Struct\nimport os.path\nfrom common import Section, BFile\n\nfpms = 0.02997\ndef frameToHMSMS(frame):\n totalms = frame/fpms\n ms = totalms%1000\n seconds = (totalms/1000)%60\n minutes = (totalms/60000)%60\n hours = totalms/3600000\n return (hours,minutes,seconds,ms)\n\nclass Inf1(Section):\n header = Struct('>HH4x')\n def read(self, fin, start, chunksize):\n count, self.size = self.header.unpack(fin.read(8))\n assert chunksize-16 >= self.size*count, (chunksize, self.size, count)\n self.inf = [None]*count\n for j in range(count):\n if self.size == 24:\n self.inf[j] = unpack('>LHHLLLL', fin.read(self.size))\n elif self.size == 12:\n self.inf[j] = unpack('>LHHL', fin.read(self.size))\n elif self.size == 4:\n self.inf[j] = unpack('>L', fin.read(self.size))\n elif self.size == 8:\n self.inf[j] = unpack('>LL', fin.read(self.size))\n else:\n raise Exception(\"Unknown size %d\" % self.size)\n #self.inf.sort(key=lambda a: a[0])\n\nclass Dat1(Section):\n def read(self, fin, start, size):\n self.data = fin.read(size-8)\n\nclass BMessages(BFile):\n sectionHandlers = {b'INF1': Inf1, b'DAT1': Dat1}\n def readHeader(self, fin):\n super(BMessages, self).readHeader(fin)\n assert self.signature == b'MESGbmg1', self.signature\n\nif len(sys.argv) != 2:\n sys.stderr.write(\"Usage: %s \\n\"%sys.argv[0])\n exit(1)\n\nfin = open(sys.argv[1], 'rb')\nbmg = BMessages()\nbmg.read(fin)\nfin.close()\n\nif bmg.inf1.size == 12:\n # subtitle format\n srtout = open(os.path.splitext(sys.argv[1])[0]+\".srt\", 'w')\n for j, (offset, start, end, unknown) in enumerate(bmg.inf1.inf):\n srtout.write(u\"%d\\n\"%(j+1))\n srtout.write(u\"%02d:%02d:%02d,%03d --> \"%frameToHMSMS(start))\n srtout.write(u\"%02d:%02d:%02d,%03d\\n\"%frameToHMSMS(end))\n if j+1 < len(bmg.inf1.inf):\n nextOffset = bmg.inf1.inf[j+1][0]\n else:\n nextOffset = len(bmg.dat1.data)\n srtout.write(bmg.dat1.data[offset:bmg.dat1.data.find(b'\\0', offset)].decode('shift-jis'))\n srtout.write(u\"\\n\\n\")\n srtout.close()\nelse:\n txtout = open(os.path.splitext(sys.argv[1])[0]+\".txt\", 'wb')\n for j, indices in enumerate(bmg.inf1.inf):\n offset = indices[0]\n if j+1 < len(bmg.inf1.inf):\n nextOffset = bmg.inf1.inf[j+1][0]\n else:\n nextOffset = len(bmg.dat1.data)\n end = bmg.dat1.data.find(b'\\0', offset)\n data = bmg.dat1.data[offset:end]\n txtout.write(data)#.decode('shift-jis'))\n txtout.write(b\"\\n\\n\")\n txtout.close()\n","sub_path":"bmg.py","file_name":"bmg.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"443351807","text":"from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nfrom SimpleCV import *\nimport pykemon\nimport pickle\nimport json\nimport sys\nimport cgi\n\n# Load classification model\nclassifier = pickle.load(file('model.pkl'))\n\nclass Handler(BaseHTTPRequestHandler):\n # Get Request handler\n def do_GET(self):\n if self.path == '/':\n f = open(\"index.html\")\n self.send_response(200)\n self.send_header('Content-type','text-html')\n self.end_headers()\n self.wfile.write(f.read())\n f.close()\n\n else:\n self.send_response(200)\n self.send_header('Content-type','text-html')\n self.end_headers()\n self.wfile.write('Page not found')\n return\n\n # Post Request handler\n def do_POST(self):\n if self.path == '/':\n # Read form\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n })\n filename = \"test\"\n data = form.file.read()\n open(\"/tmp/%s\"%filename, \"wb\").write(data)\n frame = Image(\"/tmp/%s\"%filename)\n # Classify image\n cls = classifier.classify(frame)\n if cls:\n print(cls)\n # Get pokemon data from Poke Api\n pokemon = pykemon.get(pokemon=cls)\n description = pykemon.get(description_id=pokemon.descriptions[sorted(pokemon.descriptions.keys())[-1]][20:-1])\n pokemon_type = pykemon.get(type_id=pokemon.types[list(pokemon.types.keys())[0]][13:-1])\n sprite = pykemon.get(sprite_id=pokemon.sprites[sorted(pokemon.sprites.keys())[-1]][15:-1])\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/json\")\n self.send_header(\"Connection\", \"close\")\n self.end_headers()\n # Respond with Pokemon data in JSON\n self.wfile.write(json.dumps({'name':pokemon.name,'description':description.description,'type':pokemon_type.name,'sprite':'http://pokeapi.co/'+sprite.image}).encode())\n\n\ndef run(port=8000):\n print('http server is starting...')\n server_address = ('127.0.0.1', port)\n httpd = HTTPServer(server_address, Handler)\n print('http server is running...listening on port %s' %port)\n httpd.serve_forever()\n\nif __name__ == '__main__':\n from optparse import OptionParser\n op = OptionParser(__doc__)\n\n op.add_option(\"-p\", default=8000, type=\"int\", dest=\"port\",\n help=\"port #\")\n\n opts, args = op.parse_args(sys.argv)\n\n run(opts.port)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464023650","text":"from string import Template\nfrom gen_config_utils import *\nfrom netaddr import IPAddress\n\ndef genFfrl(gw, instance,config):\n if not \"ffrl_ipv4\" in config['gws'][\"%i,%i\"%(gw,instance)]:\n return\n with open(\"ffrl.tpl\",\"r\") as fp:\n tmpl = Template(fp.read())\n data = \"\"\n for ep in ffrlEndpoints:\n localv4 = config['gws'][\"%i,%i\"%(gw,instance)][\"internalipv4\"]\n natv4 = config['gws'][\"%i,%i\"%(gw,instance)][\"ffrl_ipv4\"]\n data += tmpl.substitute(IFACE=ep,\n TUN_LOCAL_V4=str(IPAddress(config['gws'][\"%i,%i\"%(gw,instance)][\"ffrlv4\"][ep])+1),\n TUN_REMOTE_V4=config['gws'][\"%i,%i\"%(gw,instance)][\"ffrlv4\"][ep],\n TUN_LOCAL_V6=str(IPAddress(config['gws'][\"%i,%i\"%(gw,instance)][\"ffrlv6\"][ep])+1),\n GRE_REMOTE=ffrlEndpoints[ep],\n GRE_LOCAL=localv4,\n NAT_V4=natv4)\n\n with open(\"etc/network/interfaces.d/ffrl\",\"w\") as fp:\n fp.write(data)\n\n","sub_path":"gen_config_ffrl.py","file_name":"gen_config_ffrl.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"509724537","text":"#! /usr/bin/env python\n\n\"\"\"\nLibrary of functions that can be used for co-registration of raster data\n\nFor many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom pygeotools.lib import malib, iolib\n\ndef apply_xy_shift(ds, dx, dy, createcopy=True):\n \"\"\"\n Apply horizontal shift to GDAL dataset GeoTransform\n \n Returns:\n GDAL Dataset copy with updated GeoTransform\n \"\"\"\n print(\"X shift: \", dx)\n print(\"Y shift: \", dy)\n \n #Update geotransform\n gt_orig = ds.GetGeoTransform()\n gt_shift = np.copy(gt_orig)\n gt_shift[0] += dx \n gt_shift[3] += dy\n\n print(\"Original geotransform:\", gt_orig)\n print(\"Updated geotransform:\", gt_shift)\n\n #Update ds Geotransform\n if createcopy:\n ds_align = iolib.mem_drv.CreateCopy('', ds, 1)\n else:\n #Update in place, assume ds is opened as GA_Update\n ds_align = ds\n ds_align.SetGeoTransform(gt_shift)\n return ds_align\n\ndef apply_z_shift(ds, dz, createcopy=True):\n print(\"Z shift: \", dz)\n if createcopy:\n ds_shift = iolib.mem_drv.CreateCopy('', ds, 1)\n else:\n ds_shift = ds\n b = ds_shift.GetRasterBand(1)\n a = iolib.b_getma(b)\n a += dz\n b.WriteArray(a.filled())\n return ds_shift\n\ndef compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):\n \"\"\"Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method\n \"\"\"\n #This defines the search window size\n #Use half-pixel stride?\n #Note: stride is not properly implemented \n #stride = 1\n #ref = dem1[::stride,::stride]\n #kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]\n kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]\n #Want to pad evenly on both sides, so add +1 here\n m = np.zeros((pad[0]*2+1, pad[1]*2+1))\n \n #Find integer pixel offset\n i = j = 0\n for i in range(m.shape[0]):\n print(i)\n for j in range(m.shape[1]):\n print(j)\n ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]\n diff = ref - kernel\n \n #Remove outliers beyond IQR\n diff_iqr = malib.calcperc(diff, (25,75))\n diff = np.ma.masked_outside(diff, *diff_iqr)\n \"\"\" \n diff_med = np.ma.median(diff)\n diff_mad = malib.mad(diff)\n diff_madr = (diff_med - mad, diff_med + mad)\n diff = np.ma.masked_outside(diff, diff_madr) \n \"\"\"\n #Masked areas will decrease sum! Normalize by count of valid pixels\n m[i,j] = np.ma.abs(diff).sum()/diff.count()\n \n #Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement \n m = -m \n\n int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))\n int_offset = int_argmax - pad\n \n sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))\n sp_offset = sp_argmax - pad\n\n if plot:\n plt.figure()\n plt.title('Sum of Absolute Differences')\n plt.imshow(m)\n plt.scatter(*sp_argmax[::-1])\n plt.show()\n\n return m, int_offset, sp_offset\n\n#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement\ndef compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False): \n \"\"\"Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method\n \"\"\"\n\n #Apply edge detection filter up front - improves results when input DEMs are same resolution\n if prefilter:\n print(\"Applying LoG edge-detection filter to DEMs\")\n sigma = 1\n import scipy.ndimage\n #Note, ndimage alone propagates Nans and greatly reduces valid data area\n #Use the malib.nanfill wrapper to avoid this\n dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma) \n dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma) \n\n import scipy.signal\n #Compute max offset given dem spatial resolution\n #Should implement arbirary x and y search space\n #xsearch = (20, 41)\n #ysearch = (-10, 1)\n stride = 1\n ref = dem1[::stride,::stride]\n kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]\n #kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]\n\n #Normalize\n ref = (ref - ref.mean()) / ref.std()\n kernel = (kernel - kernel.mean()) / kernel.std()\n\n #Consider using astropy.convolve here instead of scipy.correlate?\n\n print(\"Adding random noise to masked regions\")\n #Generate random noise to fill gaps before correlation in frequency domain\n #Normal distribution N(mean, std^2)\n #ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()\n #kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()\n #This provides noise in proper range, but noise propagates to m, peak is in different locations!\n #ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))\n #kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))\n\n #This provides a proper normal distribution with mean=0 and std=1\n ref_noise = ref.mask * (np.random.randn(*ref.shape))\n kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))\n #Add the noise\n ref = ref.filled(0) + ref_noise\n kernel = kernel.filled(0) + kernel_noise\n\n print(\"Running 2D correlation with search window (x,y): %i, %i\" % (pad[1], pad[0]))\n m = scipy.signal.correlate2d(ref, kernel, 'valid')\n #This has memory issues, but ndimage filters can handle nan\n #m = scipy.ndimage.filters.correlate(ref, kernel)\n \n print(\"Computing sub-pixel peak\")\n int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))\n int_offset = int_argmax*stride - pad\n #int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]]) \n\n print(m.argmax())\n print(m.shape)\n print(int_argmax)\n print(int_offset)\n\n #Find sub-pixel peak\n sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))\n #May need to split this into integer and decimal components, multipy stride*int and add decimal\n #sp_offset = int_offset + (sp_argmax - int_argmax)\n sp_offset = sp_argmax - pad\n #sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]]) \n\n print(sp_argmax)\n print(sp_offset)\n\n if plot: \n fig, ax = plt.subplots()\n ax.set_title('NCC offset, parabolic SPR')\n ax.imshow(m)\n #plt.scatter(*int_argmax[::-1])\n ax.scatter(*sp_argmax[::-1])\n else:\n fig = None\n\n return m, int_offset, sp_offset, fig\n\n#Function for fitting Nuth and Kaab (2011)\ndef nuth_func(x, a, b, c):\n y = a * np.cos(np.deg2rad(b-x)) + c\n #Per Suki suggestion, can use Phasor addition\n #y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c\n return y\n\n#This is the Nuth and Kaab (2011) method\ndef compute_offset_nuth(dh, slope, aspect):\n \"\"\"Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method\n \"\"\"\n import scipy.optimize as optimization\n\n #mean_dh = dh.mean()\n #mean_slope = slope.mean()\n #c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope))) \n \n med_dh = malib.fast_median(dh)\n med_slope = malib.fast_median(slope)\n c_seed = (med_dh/np.tan(np.deg2rad(med_slope))) \n\n x0 = np.array([0.0, 0.0, c_seed])\n \n print(\"Computing common mask\")\n common_mask = ~(malib.common_mask([dh, aspect, slope]))\n\n xdata = aspect[common_mask]\n ydata = dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))\n\n #Generate synthetic data to test curve_fit\n #xdata = np.arange(0,360,0.01)\n #ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))\n \n #Limit sample size\n #n = 10000\n #idx = random.sample(range(xdata.size), n)\n #xdata = xdata[idx]\n #ydata = ydata[idx]\n\n \"\"\"\n #Fit to original, unfiltered data\n fit = optimization.curve_fit(nuth_func, xdata, ydata, x0)[0]\n print(fit) \n genplot(xdata, ydata, fit) \n \"\"\"\n\n \"\"\"\n #Filter to remove outliers \n #Compute median absolute difference\n y_med = np.median(ydata)\n y_mad = malib.mad(ydata)\n mad_factor = 3\n y_perc = [y_med - y_mad*mad_factor, y_med + y_mad*mad_factor]\n\n y_idx = ((ydata >= y_perc[0]) & (ydata <= y_perc[1]))\n ydata_clip = ydata[y_idx]\n xdata_clip = xdata[y_idx]\n\n fit = optimization.curve_fit(nuth_func, xdata_clip, ydata_clip, x0)[0]\n print(fit)\n genplot(xdata_clip, ydata_clip, fit) \n \"\"\"\n #Compute robust statistics for 1-degree bins\n nbins = 360\n bin_range = (0., 360.)\n bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', \\\n nbins=nbins, bin_range=bin_range)\n bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', \\\n nbins=nbins, bin_range=bin_range)\n\n \"\"\"\n #Mask bins in grid directions, can potentially contain biased stats\n badbins = [0, 45, 90, 180, 225, 270, 315]\n bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)\n bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)\n \"\"\"\n\n #Remove any empty bins\n #idx = ~(np.ma.getmaskarray(bin_med))\n\n #Remove any bins with only a few points\n min_count = 9\n idx = (bin_count.filled(0) >= min_count) \n\n bin_med = bin_med[idx]\n bin_centers = bin_centers[idx]\n\n fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]\n f = genplot(bin_centers, bin_med, fit, xdata=xdata, ydata=ydata) \n plt.show()\n #genplot(xdata, ydata, fit) \n\n print(fit)\n return fit, f\n\ndef genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):\n bin_range = (0, 360)\n a = (np.arange(*bin_range))\n f_a = nuth_func(a, fit[0], fit[1], fit[2])\n nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)\n if xdata.size > maxpts:\n import random\n idx = random.sample(list(range(xdata.size)), 10000)\n else:\n idx = np.arange(xdata.size)\n f, ax = plt.subplots(figsize=(6,6))\n ax.set_xlabel('Aspect (deg)')\n ax.set_ylabel('dh/tan(slope) (m)')\n ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')\n ax.plot(x, y, 'ro', label='Bin median')\n ax.axhline(color='gray')\n ax.plot(a, f_a, 'b', label=nuth_func_str)\n ax.set_xlim(*bin_range)\n abs_ylim = np.max([np.abs(y.min()), np.abs(y.max())])\n pad = 0.2 * abs_ylim \n ylim = (-abs_ylim - pad, abs_ylim + pad)\n #ylim = (y.min() - pad, y.max() + pad)\n minylim = (-10,10)\n if ylim[0] > minylim[0]:\n ylim = minylim\n ax.set_ylim(*ylim)\n ax.legend(prop={'size':8})\n return f \n\n#Function copied from from openPIV pyprocess\ndef find_first_peak(corr):\n \"\"\"\n Find row and column indices of the first correlation peak.\n \n Parameters\n ----------\n corr : np.ndarray\n the correlation map\n \n Returns\n -------\n i : int\n the row index of the correlation peak\n \n j : int\n the column index of the correlation peak \n \n corr_max1 : int\n the value of the correlation peak\n \n Original code from openPIV pyprocess\n\n \"\"\" \n ind = corr.argmax()\n s = corr.shape[1] \n \n i = ind // s \n j = ind % s\n \n return i, j, corr.max()\n\n#Function copied from from openPIV pyprocess\ndef find_subpixel_peak_position(corr, subpixel_method='gaussian'):\n \"\"\"\n Find subpixel approximation of the correlation peak.\n \n This function returns a subpixels approximation of the correlation\n peak by using one of the several methods available. If requested, \n the function also returns the signal to noise ratio level evaluated \n from the correlation map.\n \n Parameters\n ----------\n corr : np.ndarray\n the correlation map.\n \n subpixel_method : string\n one of the following methods to estimate subpixel location of the peak: \n 'centroid' [replaces default if correlation map is negative], \n 'gaussian' [default if correlation map is positive], \n 'parabolic'.\n \n Returns\n -------\n subp_peak_position : two elements tuple\n the fractional row and column indices for the sub-pixel\n approximation of the correlation peak.\n\n Original code from openPIV pyprocess\n\n \"\"\"\n # initialization\n default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)\n\n # the peak locations\n peak1_i, peak1_j, dummy = find_first_peak(corr)\n \n try:\n # the peak and its neighbours: left, right, down, up\n c = corr[peak1_i, peak1_j]\n cl = corr[peak1_i-1, peak1_j]\n cr = corr[peak1_i+1, peak1_j]\n cd = corr[peak1_i, peak1_j-1] \n cu = corr[peak1_i, peak1_j+1]\n \n # gaussian fit\n if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':\n subpixel_method = 'centroid'\n \n try: \n if subpixel_method == 'centroid':\n subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),\n ((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))\n \n elif subpixel_method == 'gaussian':\n subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),\n peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu)))) \n \n elif subpixel_method == 'parabolic':\n subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),\n peak1_j + (cd-cu)/(2*cd-4*c+2*cu)) \n \n except: \n subp_peak_position = default_peak_position\n \n except IndexError:\n subp_peak_position = default_peak_position\n \n return subp_peak_position[0], subp_peak_position[1]\n\n","sub_path":"demcoreg/coreglib.py","file_name":"coreglib.py","file_ext":"py","file_size_in_byte":13999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"172756541","text":"import sys\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4 import QtGui\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtSql import *\r\n\r\nfrom main_window import *\r\nfrom profile_toolbar import *\r\nfrom profile_picture import *\r\n\r\nclass DisplayProfileWidget(QWidget):\r\n \"\"\"A class to display the model and represent a view on the profile tab\"\"\"\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.ToolBarWidgetLayout=QVBoxLayout()\r\n self.LayoutWidget=QWidget()\r\n self.HBoxLayout=QHBoxLayout()\r\n self.LeftWidgetsLayout=QHBoxLayout()\r\n self.RightWidgetsLayout=QVBoxLayout()\r\n self.display_profile_layout()\r\n self.display_profile_toolbar_widget()\r\n self.setLayout(self.ToolBarWidgetLayout)\r\n self.model=None\r\n \r\n \r\n \r\n \r\n \r\n\r\n def display_profile_layout(self):\r\n self.HBoxLayout.addLayout(self.LeftWidgetsLayout)\r\n self.HBoxLayout.addLayout(self.RightWidgetsLayout)\r\n if not hasattr(self,\"profile_picture\"):\r\n self.profile_picture=ProfilePicture()\r\n self.LeftWidgetsLayout.addWidget(self.profile_picture)\r\n if not hasattr(self,\"first_name\"):\r\n self.first_name=QLabel(\"FirstName\")\r\n self.LeftWidgetsLayout.addWidget(self.first_name)\r\n if not hasattr(self,\"last_name\"):\r\n self.last_name=QLabel(\"LastName\")\r\n self.LeftWidgetsLayout.addWidget(self.last_name)\r\n\r\n self.HBoxLayout.addLayout(self.LeftWidgetsLayout)\r\n\r\n if not hasattr(self,\"user_email\"):\r\n self.user_email=QLabel(\"Email\")\r\n self.RightWidgetsLayout.addWidget(self.user_email)\r\n if not hasattr(self,\"recent_tricks\"):\r\n self.recent_tricks=QLabel(\"Recently Completed Tricks\")\r\n self.RightWidgetsLayout.addWidget(self.recent_tricks)\r\n if not hasattr(self,\"recent_tricks_list\"):\r\n self.recent_tricks_list=QLabel(\"Recently Completed Tricks List\")\r\n self.RightWidgetsLayout.addWidget(self.recent_tricks_list)\r\n\r\n self.HBoxLayout.addLayout(self.RightWidgetsLayout)\r\n self.LayoutWidget.setLayout(self.HBoxLayout)\r\n \r\n \r\n\r\n def display_profile_toolbar_widget(self):\r\n if not hasattr(self,\"profile_tool_bar\"):\r\n self.profile_tool_bar=DisplayProfileToolbar()\r\n self.ToolBarWidgetLayout.addWidget(self.profile_tool_bar)\r\n self.ToolBarWidgetLayout.addWidget(self.LayoutWidget)\r\n \r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"Implementation/profile_widget.py","file_name":"profile_widget.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"31978403","text":"\"\"\"\nChallenge 5\n\n*** To run both Parts 1 and 2 of this test together, specify the following in a Terminal:\n # Show stdcalls for print statements, loggins calls, etc., by disabling stdout/stderr capturing\n pytest challenges\\test_challenge5.py -s\nor, for fuller output:\n pytest challenges\\test_challenge5.py -rP \n\n*** Part 1 - Create test_challenge5.py and write a test that does the following:\n\n1. Go to copart.com\n2. Search for \"porsche\"\n3. Change Show Entries to 100\n4. Print the number of occurrences for each Model\nExample: There might be x3 PANAMERA T and x11 CAYENNE\n\nTo run just Part 1 of this test, specify the following in a Terminal:\n pytest -k print_models_for_given_make -s\nor, for fuller output:\n pytest -k print_models_for_given_make -rP \nThe -rP reporting option gives the full output of passing tests [including print() statements]\n\n*** Part 2 - Using the same, first three steps of Part 1, write a test that then does the following:\n\n1. Count the number of occurrences of each Damage type\n2. However, you need to map the Damage types to these:\n• REAR END\n• FRONT END\n• MINOR DENT/SCRATCHES\n• UNDERCARRIAGE\n3. Any Damage type that does NOT match the above types should be grouped into a MISC Damage type\n• Example: SIDE and ALL OVER would each count towards MISC\n• Example Output: REAR END: 2, FRONT END: 7, MINOR DENT/SCRATCHES: 22, UNDERCARRIAGE: 0, MISC: 4\n\nTo run just Part 2 of this test, specify the following in a Terminal:\n pytest -k print_damage_for_given_make -s\nor, for fuller output:\n pytest -k print_damage_for_given_make -rP\n\"\"\"\n\nfrom collections import Counter\n\n# Custom imports\n\nfrom pages.copart_home import CopartHomePage\nfrom services.header_search import HeaderSearch\nfrom services.copart_search_results_page import CopartSearchResultsPage\n\n\n# Because the DAMAGE column is scrolled out of view,\n# (with no scrollbar to easily scroll it back into view)\n# 'span.text' just returns blank text for it.\n# So, we need to use 'span.get_attribute(\"textContent\")' instead.\n# See https://sqa.stackexchange.com/questions/42907/ +\n# how-to-get-text-from-an-element-when-gettext-fails\ndef get_counts(spans) -> Counter:\n counts = Counter()\n for span in spans:\n key = span.get_attribute(\"textContent\")\n counts.update({key: 1})\n return counts\n\n\n# Challenge 5, Part 1\ndef test_print_models_for_given_make(driver, wait):\n\n # GIVEN the Copart home page is displayed\n\n search_page = CopartHomePage(driver, wait)\n search_page.display()\n\n # WHEN the user searches the Copart home page for \"porsche\"\n # AND then changes Show Entries to 100\n\n query = \"porsche\"\n\n HeaderSearch(search_page).search(query)\n\n results_per_page = CopartSearchResultsPage(search_page)\n results_per_page.set_results_per_page(100)\n\n # THEN Print the number of occurrences for each Model\n # Example: ... CAYENNE: 16, ... PANAMERA T: 2, ...\n\n # Get the span elements for the \"Model\" column of the search results table\n spans = search_page.driver.find_elements(\n *CopartHomePage.td_span_locator(\"model\"))\n\n # Count the number of entries of each model in the table rows\n models = get_counts(spans)\n\n # Print models Counter, but ordered alphabetically (except with \"ALL OTHER\" at the end)\n models_list = sorted(list(set(models) - {\"ALL OTHER\"})) + [\"ALL OTHER\"]\n print()\n print(\", \".join('{}: {}'.format(model, models[model])\n for model in models_list))\n print()\n\n\n# Challenge 5, Part 2\ndef test_print_damage_for_given_make(driver, wait):\n \"\"\" Summarize damage information for all vehicles of the specified make \"\"\"\n\n # GIVEN the Copart home page is displayed\n\n search_page = CopartHomePage(driver, wait)\n search_page.display()\n\n # WHEN the user searches the Copart home page for \"porsche\"\n # AND then changes Show Entries to 100\n\n query = \"porsche\"\n\n HeaderSearch(search_page).search(query)\n\n results_per_page = CopartSearchResultsPage(search_page)\n results_per_page.set_results_per_page(100)\n\n # THEN Print the number of occurrences of each Damage type\n # Example: FRONT END: 49, REAR END: 12, MINOR DENT/SCRATCHES: 5, UNDERCARRIAGE: 2, MISC: 32\n\n # Get the span elements for the \"Damage\" column of the search results table\n spans = driver.find_elements(\n *CopartHomePage.td_span_locator(\"damagedescription\"))\n\n MAIN_DAMAGE_TYPES = [\n \"FRONT END\", \"REAR END\", \"MINOR DENT/SCRATCHES\", \"UNDERCARRIAGE\"\n ]\n\n damages = get_counts(spans)\n\n # Group all damages not contained in MAIN_DAMAGE_TYPES as \"MISC\"\n damages_grouped = Counter()\n for damage in damages:\n damages_grouped.update({\n (damage if damage in MAIN_DAMAGE_TYPES else \"MISC\"):\n damages[damage]\n })\n\n # Print damages Counter (but in the order of MAIN_DAMAGE_TYPES) + f\", MISC: {misc_count}\"\n print()\n print(\", \".join('{}: {}'.format(damage_type, damages_grouped[damage_type])\n for damage_type in (MAIN_DAMAGE_TYPES + [\"MISC\"])))\n print()\n","sub_path":"challenges/test_challenge5.py","file_name":"test_challenge5.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"195180326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 10 18:18:29 2017\n\n@author: Harvey\n\"\"\"\nfrom random import random as rand, gauss\n\nclass Sensor:\n # accuracy, misses, false alarms\n # location\n def __init__(self, sensor_id, position, lane, accuracy, p_d, fa=None, freq = 1):\n self.id = sensor_id\n self.position = position\n self.lane = lane # can this be a list???\n self.accuracy = accuracy\n self.p_d = p_d\n self.fa = fa\n self.frequency = freq\n \n def genMeasurements(self, truth, measure):\n # truth is a traci instance of the true traffic state\n # measure is a function which retrieves the measured state from truth\n m = measure(self,truth)\n # remove some measurements based on probability of detection\n if m:\n m = [i for i in m if rand() < self.p_d]\n # add Gaussian error to each component of each measurement\n for i in m:\n i['time'] += self.accuracy[0] * gauss(0,1)\n if i['speed'] and (len(self.accuracy) > 1):\n i['speed'] += self.accuracy[1] * gauss(0,1)\n return m\n \n def measure_T_and_V(self,truth):\n m = []\n if truth.inductionloop.getLastStepVehicleNumber(self.id) > 0:\n vd = truth.inductionloop.getVehicleData(self.id)\n for v in vd:\n mm = {}\n mm['sensor'] = self.id\n mm['lane'] = truth.inductionloop.getLaneID(self.id)\n mm['time'] = v[2]\n mm['speed'] = truth.inductionloop.getLastStepMeanSpeed(self.id)\n m.append(mm)\n return m\n \nclass Association:\n \n def __init__(self, belief):\n self.belief = belief\n pass\n \n def associate(self, measures):\n pass\n \nclass FusionArchitecture:\n \n def __init__(self,sensors,fusion_nodes,edges):\n self.sensors = sensors\n self.f_nodes = fusion_nodes\n self.f_edges = edges\n \n def genDetectorFile(self,file):\n with open(file, 'w+') as f:\n f.write('\\n')\n for s in self.sensors:\n f.write(' \\n'\n % (s.id, s.lane, s.position, s.frequency))\n f.write('')","sub_path":"utils/fusion_arch.py","file_name":"fusion_arch.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"393695454","text":"from calendar import HTMLCalendar\nfrom django.utils.html import conditional_escape as esc\nfrom django.utils.safestring import mark_safe\nfrom itertools import groupby\nfrom calendar import HTMLCalendar, monthrange\nfrom datetime import date\n\nclass DatelineCalendar(HTMLCalendar):\n\n def __init__(self, labs):\n super(DatelineCalendar, self).__init__()\n self.labs = self.group_by_day(labs)\n\n def formatday(self, day, weekday):\n if day != 0:\n cssclass = self.cssclasses[weekday]\n if date.today() == date(self.year, self.month, day):\n cssclass += ' today'\n if day in self.labs:\n cssclass += ' filled'\n body = ['

    ']\n for workout in self.labs[day]:\n body.append('
  • ')\n body.append(esc(workout.name))\n body.append('
  • ')\n body.append('
')\n return self.day_cell(cssclass, '%d %s' % (day, ''.join(body)))\n return self.day_cell(cssclass, day)\n return self.day_cell('noday', ' ')\n\n def formatmonth(self, year, month):\n self.year, self.month = year, month\n return super(DatelineCalendar, self).formatmonth(year, month)\n\n def group_by_day(self, labs):\n field = lambda workout: workout.close_Date.day\n return dict(\n [(day, list(items)) for day, items in groupby(labs, field)]\n )\n\n def day_cell(self, cssclass, body):\n return '%s' % (cssclass, body)\n","sub_path":"src/django_project/rfwa/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"364465127","text":"from queue import Queue\n\n\"\"\"\nDijkstra's #Preet \n\"\"\"\ndef dijkstra(map, office,target):\n # make an empty dictionary for the answers\n paths = {}\n # make adjacency list dictionary\n adj = {}\n # make dictionary for distances\n distances = {}\n # make weights dictionary of dictionaries\n weights = {}\n for road in map:\n start = road[0]\n end = road[1]\n length = road[2]\n\n if start not in distances:\n if start == office:\n distances[start] = 0\n else:\n distances[start] = float('inf')\n if end not in distances:\n if end == office:\n distances[end] = 0\n else:\n distances[end] = float('inf')\n if start in adj:\n adj[start].append(end)\n else:\n adj[start] = [end]\n if end in adj:\n adj[end].append(start)\n else:\n adj[end] = [start]\n\n if start not in weights:\n weights[start] = {}\n weights[start][end] = length\n if start in weights:\n if end not in weights[start]:\n weights[start][end] = length\n\n if end not in weights:\n weights[end] = {}\n weights[end][start] = length\n if end in weights:\n if start not in weights[end]:\n weights[end][start] = length\n # make predecessors dictionary\n pre = {}\n for key in distances:\n pre[key] = None\n # make empty list\n Q = []\n # insert key value pair (d[v],v) into\n # list for each vertex\n for vertex in distances:\n Q.append((distances[vertex], vertex))\n # sort the list in decreasing order\n Q.sort(reverse=True)\n # iterate as long as Q isn't empty\n while Q:\n (dist, u) = Q.pop(-1)\n for v in adj[u]:\n if ((distances[v] > (distances[u] + weights[v][u])) and ((distances[v], v) in Q)): #\n Q.remove((distances[v], v))\n distances[v] = distances[u] + weights[v][u]\n Q.append((distances[v], v))\n Q.sort(reverse=True)\n pre[v] = u\n\n # now add answers to the dictionary\n for key in distances:\n k = key\n path = []\n while k != office:\n path.append(k)\n k = pre[k]\n path.append(office)\n path.reverse()\n paths[key] = path\n if key == target:\n break\n return paths\nclass Package:\n def __init__(self, id):\n self.id = id\n self.address = \"\"\n self.office = \"\"\n self.ownerName = \"\"\n self.collected = False\n self.delivered = False\n\n\nclass Truck:\n def __init__(self, id, n, loc):\n self.id = id\n self.size = n\n self.location = loc\n self.packages = {}\n\n def collectPackage(self, pk):\n if self.location == pk.office and len(self.packages) < self.size:\n # Add package to packages dictionary\n self.packages[pk.id] = pk\n\n # Set package pickedUp to true\n pk.collected = True\n\n def deliverPackage(self, pk):\n # Find delivery location\n # drive to required location\n if pk.id in self.packages:\n # remove the package from truck\n del self.packages[pk.id]\n\n # set delivery status to true for package\n pk.delivered = True\n\n\n def driveTo(self, loc1, loc2):\n # set truck's location to location 2\n if self.location == loc1 and loc1 != loc2:\n self.location = loc2\n\n\n\n\n\"\"\"\ndeliveryService\n\"\"\"\ndef deliveryService(map, truck, packages):\n deliveredTo = {}\n stops = []\n #store all the packages info to make a while loop\n pk_dic = {}\n for package in packages:\n if package.office in pk_dic:\n pk_dic[package.office] = pk_dic[package.office] + [package]\n else:\n pk_dic[package.office] = [package]\n for keys in pk_dic:\n pk_dic[keys].sort(key = lambda x: x.address)\n # keep running until all packages are gone\n while pk_dic:\n # check to see if we are at an office\n if truck.location in pk_dic:\n #counter to remove from pk_dic\n s_space = len(truck.packages)\n for pk in pk_dic[truck.location]:\n if pk.delivered != True:\n truck.collectPackage(pk)\n #if counter is greater than 0\n new_space = len(truck.packages) - s_space\n # we will reduce diction\n if new_space > 0:\n pk_dic[truck.location] = pk_dic[truck.location][new_space:]\n if pk_dic[truck.location] == []:\n del pk_dic[truck.location]\n #check for path of eath packages and drive there and delivers\n q = Queue()\n for key in truck.packages:\n q.put([truck.packages[key],truck.packages[key].address])\n while not q.empty():\n pk, loc = q.get()\n path = dijkstra(map, truck.location,loc)\n for i in path[loc][1:]:\n truck.driveTo(truck.location, i)\n stops.append(i)\n truck.deliverPackage(pk)\n deliveredTo[pk.id] = truck.location\n\n #drive to new UPS stop in pk_dic\n if pk_dic != {}:\n loc = list(pk_dic.keys())[0]\n path = dijkstra(map, truck.location,loc)\n for i in path[loc][1:]:\n truck.driveTo(truck.location, i)\n stops.append(i)\n return (deliveredTo, stops)\n","sub_path":"Truck-simulator/part2Task2.py","file_name":"part2Task2.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"455776077","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport inspect\nimport yaml\nimport pycurl\nimport logging\nimport re\nfrom xml.etree import cElementTree as ET # For JUnit output\n\n# Python 3 compatibility\nif sys.version_info[0] > 2:\n from past.builtins import basestring\n\n# Dirty hack to allow for running this as a script :-/\nif __name__ == '__main__':\n sys.path.append(os.path.dirname(os.path.dirname(\n os.path.realpath(__file__))))\n from pyresttest.six import text_type\n from pyresttest.binding import Context\n from pyresttest import generators\n from pyresttest import validators\n from pyresttest import tests\n from pyresttest.parsing import *\n\n from pyresttest.validators import Failure\n from pyresttest.tests import Test, DEFAULT_TIMEOUT\n from pyresttest.benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark\n from pyresttest.macros import *\nelse: # Normal imports\n from . import six\n from .six import text_type\n\n # Pyresttest internals\n from . import binding\n from .binding import Context\n from . import generators\n from . import parsing\n from .parsing import *\n from . import validators\n from .validators import Failure\n from . import tests\n from .tests import Test, DEFAULT_TIMEOUT\n from . import benchmarks\n from .benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark\n from . import macros\n from .macros import *\n\n\"\"\"\nExecutable class, ties everything together into the framework.\nModule responsibilities:\n- Read & import test test_files\n- Parse test configs\n- Provide executor methods for sets of tests and benchmarks\n- Collect and report on test/benchmark results\n- Perform analysis on benchmark results\n\"\"\"\nLOGGING_LEVELS = {'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL}\n\nlogging.basicConfig(format='%(levelname)s:%(message)s')\nlogger = logging.getLogger('pyresttest')\n\n\nclass cd:\n \"\"\"Context manager for changing the current working directory\"\"\"\n # http://stackoverflow.com/questions/431684/how-do-i-cd-in-python/13197763#13197763\n\n def __init__(self, newPath):\n self.newPath = newPath\n\n def __enter__(self):\n if self.newPath: # Don't CD to nothingness\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n if self.newPath: # Don't CD to nothingness\n os.chdir(self.savedPath)\n\ndef read_test_file(path):\n \"\"\" Read test file at 'path' in YAML \"\"\"\n # TODO allow use of safe_load_all to handle multiple test sets in a given\n # doc\n teststruct = yaml.safe_load(read_file(path))\n return teststruct\n\n\ndef parse_testsets(base_url, test_structure, test_files=set(), working_directory=None, vars=None):\n \"\"\" Convert a Python data structure read from validated YAML to a set of structured testsets\n The data structure is assumed to be a list of dictionaries, each of which describes:\n - a tests (test structure)\n - a simple test (just a URL, and a minimal test is created)\n - or overall test configuration for this testset\n - an import (load another set of tests into this one, from a separate file)\n - For imports, these are recursive, and will use the parent config if none is present\n\n Note: test_files is used to track tests that import other tests, to avoid recursive loops\n\n This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets\n \"\"\"\n\n tests_out = list()\n testset_config = TestSetConfig()\n testsets = list()\n benchmarks = list()\n\n if working_directory is None:\n working_directory = os.path.abspath(os.getcwd())\n\n if vars and isinstance(vars, dict):\n testset_config.variable_binds = vars\n\n # returns a testconfig and collection of tests\n for node in test_structure: # Iterate through lists of test and configuration elements\n if isinstance(node, dict): # Each config element is a miniature key-value dictionary\n node = lowercase_keys(node)\n for key in node:\n if key == u'import':\n importfile = node[key] # import another file\n if importfile not in test_files:\n logger.debug(\"Importing test sets: \" + importfile)\n test_files.add(importfile)\n import_test_structure = read_test_file(importfile)\n with cd(os.path.dirname(os.path.realpath(importfile))):\n import_testsets = parse_testsets(\n base_url, import_test_structure, test_files, vars=vars)\n testsets.extend(import_testsets)\n elif key == u'url': # Simple test, just a GET to a URL\n mytest = Test()\n val = node[key]\n assert isinstance(val, basestring)\n mytest.url = base_url + val\n tests_out.append(mytest)\n elif key == u'test': # Complex test with additional parameters\n with cd(working_directory):\n child = node[key]\n mytest = Test.parse_test(base_url, child)\n tests_out.append(mytest)\n elif key == u'benchmark':\n benchmark = parse_benchmark(base_url, node[key])\n benchmarks.append(benchmark)\n elif key == u'config' or key == u'configuration':\n testset_config = parse_configuration(\n node[key], base_config=testset_config)\n testset = TestSet()\n testset.tests = tests_out\n testset.config = testset_config\n testset.benchmarks = benchmarks\n testsets.append(testset)\n return testsets\n\ndef read_file(path):\n \"\"\" Read an input into a file, doing necessary conversions around relative path handling \"\"\"\n with open(path, \"r\") as f:\n string = f.read()\n f.close()\n return string\n\ndef log_failure(failure, context=None, testset_config=TestSetConfig()):\n \"\"\" Log a failure from a test \"\"\"\n logger.error(\"Test Failure, failure type: {0}, Reason: {1}\".format(\n failure.failure_type, failure.message))\n if failure.details:\n logger.error(\"Validator/Error details:\" + str(failure.details))\n\nclass LoggerCallbacks(MacroCallbacks):\n \"\"\" Uses a standard python logger \"\"\"\n def log_status(self, input):\n logger.info(str(input))\n def log_intermediate(self, input):\n logger.debug(str(input))\n def log_failure(self, input):\n logger.error(str(input))\n def log_success(self, input):\n logger.info(str(input))\n\n\nclass JUnitCallback(MacroCallbacks):\n \"\"\" Uses junit standard xml output \"\"\"\n\n def __init__(self):\n self.el_test_suites = None\n self.test_suite_current_id = 0\n self.group_test_suite_map = None\n self.working_directory = os.path.abspath(os.getcwd())\n self.path = 'test-results.xml'\n\n def start_testset(self, input):\n self.el_test_suites = ET.Element('testsuites')\n self.el_test_suites.set('name', self.camelizeStr(str(input)))\n self.test_suite_current_id = 0\n self.group_test_suite_map = dict()\n\n def end_testset(self, input):\n self.write_file(self.el_test_suites)\n\n def log_status(self, input):\n logger.info(str(input))\n\n def log_intermediate(self, input):\n logger.debug(\"LOGGER INTERMEDIATE: \" + str(input))\n\n def log_failure(self, input):\n if isinstance(input, TestResponse):\n el_test_suite = self.get_test_suite(input.test.group)\n try:\n num_tests = int(el_test_suite.get('tests', '0'))\n except ValueError:\n num_tests = 0\n el_test_suite.set('tests', str(num_tests + 1))\n try:\n num_failures = int(el_test_suite.get('failures', '0'))\n except ValueError:\n num_failures = 0\n el_test_suite.set('failures', str(num_failures + 1))\n el_test_case = self.start_test_case(el_test_suite, input.test, \"Ko\")\n failure_messages = []\n for idx, failure in enumerate(input.failures):\n el_failure = ET.SubElement(el_test_case, 'failure')\n if failure.message:\n el_failure.set('message', failure.message)\n if failure.failure_type:\n el_failure.set('type', str(failure.failure_type))\n if failure.details:\n failure_messages.append(\"\\n\\n====================================== FAILURE \")\n failure_messages.append(str(idx))\n failure_messages.append(\" DETAILS ======================================\\n\")\n failure_messages.append(failure.details)\n el_system_err = ET.SubElement(el_test_case, 'system-err')\n el_system_err.text = ''.join(failure_messages)\n else:\n logger.error(str(input))\n\n def log_success(self, input):\n if isinstance(input, TestResponse):\n el_test_suite = self.get_test_suite(input.test.group)\n try:\n num_tests = int(el_test_suite.get('tests', '0'))\n except ValueError:\n num_tests = 0\n el_test_suite.set('tests', str(num_tests + 1))\n el_test_case = self.start_test_case(el_test_suite, input.test, \"Ok\")\n else:\n logger.info(str(input))\n\n def get_test_suite(self, group_name):\n \"\"\" Return the test suite for group_name. If it does'nt exist, it will be created. \"\"\"\n if group_name in self.group_test_suite_map.keys():\n el_test_suite = self.group_test_suite_map[group_name]\n else:\n el_test_suite = ET.SubElement(self.el_test_suites, 'testsuite')\n el_test_suite.set('id', str(self.test_suite_current_id))\n self.test_suite_current_id += 1\n suite_name = self.aggregate_name(self.el_test_suites.get('name',''), self.camelizeStr(group_name))\n el_test_suite.set('name', suite_name)\n el_test_suite.set('failures', '0')\n self.group_test_suite_map[group_name] = el_test_suite\n return el_test_suite\n\n def start_test_case(self, el_suite, test, status):\n \"\"\" Start a test case and return the Element \"\"\"\n el_test_case = ET.SubElement(el_suite,'testcase')\n el_test_case.set('name', test.name)\n num_assertion = 1 # At least one assertion least on status\n if test.validators:\n num_assertion += len(test.validators)\n el_test_case.set('assertions', str(num_assertion))\n testcase_classname = self.aggregate_name(el_suite.get('name',''), self.camelizeStr(test.name))\n el_test_case.set('classname', testcase_classname)\n el_test_case.set('status', status)\n return el_test_case\n\n def set_o_path(self, path, default_name='test-results.xml'):\n \"\"\" Set output path, where the JUnit results willbe written\n If path is incorrect (directory does not exists), an error is logged and the path stay unchanged.\n\n path -- the path where to write JUnit output\n default_name -- the default name of the file, if not present in the path (default 'test-results.xml')\n \"\"\"\n with cd(self.working_directory):\n if os.path.isdir(path):\n self.path = os.path.join(path, default_name) # Default file name\n else:\n dir_path, filename = os.path.split(path)\n if not os.path.isdir(dir_path): # The directory does not exit, log error\n logger.error('JUnit Error: ouput dir {0} does not exist. File will be writed to default path ({1}).'.format(dir_path, self.path))\n else:\n self.path = path\n \n def camelizeStr(self, mystr):\n \"\"\" Return a string formatted to camelCase \"\"\"\n camelized = ''\n if mystr:\n pattern = re.compile('[\\W_]+')\n camelized = pattern.sub('', mystr.title())\n camelized = camelized[0].lower() + camelized[1:]\n return camelized\n \n def aggregate_name(self, *args):\n \"\"\" Aggrgate name to format a java-like package name \"\"\"\n return \".\".join(args)\n \n def set_working_directory(self, dir_path):\n if os.path.isdir(dir_path):\n self.working_directory = dir_path\n else:\n logger.error('Junit Error: setting working dir to {0} : directory does not exist.'.format(dir_path))\n\n def write_file(self, root):\n \"\"\" Write root elemet to file \"\"\"\n tree = ET.ElementTree(root)\n with cd(self.working_directory):\n logger.debug(\"Writing junit output to: {0}\".format(self.path))\n tree.write(self.path, encoding=\"UTF-8\", xml_declaration=True)\n\n\ndef run_testsets(testsets):\n \"\"\" Execute a set of tests, using given TestSet list input \"\"\"\n group_results = dict() # results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n curl_handle = pycurl.Curl()\n myconfig = TestSetConfig()\n if len(testsets) > 0:\n myconfig = testsets[0].config\n testset_name = myconfig.name\n\n # Invoked during macro execution to report results\n # FIXME I need to set up for logging before/after/during requests\n if myconfig.junit:\n callbacks = JUnitCallback()\n if myconfig.working_directory is not None:\n callbacks.set_working_directory(myconfig.working_directory)\n if myconfig.junit_path is not None:\n callbacks.set_o_path(myconfig.junit_path)\n else:\n callbacks = LoggerCallbacks()\n\n callbacks.start_testset(testset_name)\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n context = Context()\n\n # Bind variables & add generators if pertinent\n if myconfig.variable_binds:\n context.bind_variables(myconfig.variable_binds)\n if myconfig.generators:\n for key, value in myconfig.generators.items():\n context.add_generator(key, value)\n\n # Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next\n # test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n # Run tests, collecting statistics as needed\n for test in mytests:\n # Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = test.execute_macro(callbacks=callbacks, testset_config=myconfig, context=context, curl_handle=curl_handle)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: # Print failure, increase failure counts for that test group\n # Use result test URL to allow for templating\n logger.error('Test Failed: ' + test.name + \" URL=\" + result.test.url +\n \" Group=\" + test.group + \" HTTP Status Code: \" + str(result.response_code))\n\n # Print test failure reasons\n if result.failures:\n for failure in result.failures:\n log_failure(failure, context=context,\n testset_config=myconfig)\n\n # Increment test failure counts for that group (adding an entry\n # if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: # Test passed, print results\n logger.info('Test Succeeded: ' + test.name +\n \" URL=\" + test.url + \" Group=\" + test.group)\n\n # Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print(\n 'STOP ON FAILURE! stopping test set execution, continuing with other test sets')\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logger.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logger.info(\"Benchmark Starting: \" + benchmark.name +\n \" Group: \" + benchmark.group)\n benchmark_result = benchmark.execute_macro(callbacks=callbacks, testset_config=myconfig, context=context)\n print(benchmark_result)\n logger.info(\"Benchmark Done: \" + benchmark.name +\n \" Group: \" + benchmark.group)\n\n if benchmark.output_file: # Write file\n logger.debug(\n 'Writing benchmark to file in format: ' + benchmark.output_format)\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logger.debug(\"Benchmark writing to file: \" +\n benchmark.output_file)\n write_method(my_file, benchmark_result,\n benchmark, testset_config=myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print(\"===================================\")\n\n # Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n\n passfail = {True: u'SUCCEEDED: ', False: u'FAILED: '}\n output_string = \"Test Group {0} {1}: {2}/{3} Tests Passed!\".format(group, passfail[failures == 0], str(test_count - failures), str(test_count))\n\n if myconfig.skip_term_colors:\n print(output_string)\n else:\n if failures > 0:\n print('\\033[91m' + output_string + '\\033[0m')\n else:\n print('\\033[92m' + output_string + '\\033[0m')\n\n callbacks.end_testset(testset_name)\n return total_failures\n\n\ndef register_extensions(modules):\n \"\"\" Import the modules and register their respective extensions \"\"\"\n if isinstance(modules, basestring): # Catch supplying just a string arg\n modules = [modules]\n for ext in modules:\n # Get the package prefix and final module name\n segments = ext.split('.')\n module = segments.pop()\n package = '.'.join(segments)\n # Necessary to get the root module back\n module = __import__(ext, globals(), locals(), package)\n\n # Extensions are registered by applying a register function to sets of\n # registry name/function pairs inside an object\n extension_applies = {\n 'VALIDATORS': validators.register_validator,\n 'COMPARATORS': validators.register_comparator,\n 'VALIDATOR_TESTS': validators.register_test,\n 'EXTRACTORS': validators.register_extractor,\n 'GENERATORS': generators.register_generator\n }\n\n has_registry = False\n for registry_name, register_function in extension_applies.items():\n if hasattr(module, registry_name):\n registry = getattr(module, registry_name)\n for key, val in registry.items():\n register_function(key, val)\n if registry:\n has_registry = True\n\n if not has_registry:\n raise ImportError(\n \"Extension to register did not contain any registries: {0}\".format(ext))\n\n# AUTOIMPORTS, these should run just before the main method, to ensure\n# everything else is loaded\ntry:\n import jsonschema\n register_extensions('pyresttest.ext.validator_jsonschema')\nexcept ImportError as ie:\n logging.debug(\n \"Failed to load jsonschema validator, make sure the jsonschema module is installed if you wish to use schema validators.\")\n\ntry:\n import jmespath\n register_extensions('pyresttest.ext.extractor_jmespath')\nexcept ImportError as ie:\n logging.debug(\n \"Failed to load jmespath extractor, make sure the jmespath module is installed if you wish to use jmespath extractor.\")\n\ndef main(args):\n \"\"\"\n Execute a test against the given base url.\n\n Keys allowed for args:\n url - REQUIRED - Base URL\n test - REQUIRED - Test file (yaml)\n print_bodies - OPTIONAL - print response body\n print_headers - OPTIONAL - print response headers\n log - OPTIONAL - set logging level {debug,info,warning,error,critical} (default=warning)\n interactive - OPTIONAL - mode that prints info before and after test exectuion and pauses for user input for each test\n absolute_urls - OPTIONAL - mode that treats URLs in tests as absolute/full URLs instead of relative URLs\n skip_term_colors - OPTIONAL - mode that turn off the output term colors\n \"\"\"\n\n if 'log' in args and args['log'] is not None:\n logger.setLevel(LOGGING_LEVELS.get(\n args['log'].lower(), logging.INFO))\n else:\n logger.setLevel(logging.INFO)\n\n if 'import_extensions' in args and args['import_extensions']:\n extensions = args['import_extensions'].split(';')\n\n # We need to add current folder to working path to import modules\n working_folder = args['cwd']\n if working_folder not in sys.path:\n sys.path.insert(0, working_folder)\n register_extensions(extensions)\n\n test_file = args['test']\n test_structure = read_test_file(test_file)\n\n my_vars = None\n if 'vars' in args and args['vars'] is not None:\n my_vars = yaml.safe_load(args['vars'])\n if my_vars and not isinstance(my_vars, dict):\n raise Exception(\"Variables must be a dictionary!\")\n\n # Set up base URL\n base_url = args['url']\n\n if 'absolute_urls' in args and args['absolute_urls']:\n base_url = ''\n\n tests = parse_testsets(base_url, test_structure,\n working_directory=os.path.dirname(test_file), vars=my_vars)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None and bool(args['print_bodies']):\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'print_headers' in args and args['print_headers'] is not None and bool(args['print_headers']):\n t.config.print_headers = safe_to_bool(args['print_headers'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n if 'verbose' in args and args['verbose'] is not None:\n t.config.verbose = safe_to_bool(args['verbose'])\n\n if 'ssl_insecure' in args and args['ssl_insecure'] is not None:\n t.config.ssl_insecure = safe_to_bool(args['ssl_insecure'])\n\n if 'skip_term_colors' in args and args['skip_term_colors'] is not None:\n t.config.skip_term_colors = safe_to_bool(args['skip_term_colors'])\n\n if 'junit' in args and args['junit'] is not None:\n t.config.junit = safe_to_bool(args['junit'])\n if 'junit_path' in args and args['junit_path'] is not None:\n t.config.junit_path = args['junit_path']\n\n t.config.working_directory = os.path.dirname(test_file)\n\n # Execute all testsets\n failures = run_testsets(tests)\n\n sys.exit(failures)\n\n\ndef command_line_run(args_in):\n args = parse_command_line_args(args_in)\n main(args)\n\n# Allow import into another module without executing the main method\nif(__name__ == '__main__'):\n command_line_run(sys.argv[1:])\n","sub_path":"pyresttest/resttest.py","file_name":"resttest.py","file_ext":"py","file_size_in_byte":24455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"611792017","text":"import pygame\nfrom pygame.locals import *\nimport time\nimport random\nSIZE=40\nBG_COLOR=(255,0,0)\nclass Apple:\n def __init__(self,screen):\n self.food=pygame.image.load('resources/food.png').convert_alpha()\n self.food=pygame.transform.scale(self.food,(40,40))\n self.screen=screen\n self.x=SIZE*random.randint(0,15)\n self.y=SIZE*random.randint(0,15)\n \n def draw(self):\n self.screen.blit(self.food,(self.x,self.y))\n pygame.display.flip()\n \n def move(self):\n self.x=SIZE*random.randint(0,15)\n self.y=SIZE*random.randint(0,15)\n self.draw()\n\nclass Snake:\n def __init__(self,screen,length):\n self.length=length\n self.screen=screen\n self.block=pygame.image.load('resources/snake.png').convert()\n self.x=[SIZE]*length\n self.y=[SIZE]*length\n self.direction='down'\n\n def draw(self):\n self.screen.fill((255,0,0))\n for i in range(0,self.length):\n self.screen.blit(self.block,(self.x[i],self.y[i]))\n pygame.display.flip()\n \n def increase_length(self):\n self.length+=1\n self.x.append(0)\n self.y.append(0)\n\n def move_down(self):\n self.direction='down'\n def move_up(self):\n self.direction='up'\n def move_left(self):\n self.direction='left'\n def move_right(self):\n self.direction='right'\n \n def walk(self):\n for i in range(self.length-1,0,-1):\n self.x[i]=self.x[i-1]\n self.y[i]=self.y[i-1]\n \n if(self.direction=='down'):\n self.y[0]=self.y[0]+SIZE\n if(self.direction=='up'):\n self.y[0]=self.y[0]-SIZE\n if(self.direction=='left'):\n self.x[0]=self.x[0]-SIZE\n if(self.direction=='right'):\n self.x[0]=self.x[0]+SIZE\n\n self.draw()\n\n \nclass Game:\n def __init__(self):\n pygame.init()\n pygame.display.set_caption('Snake by Pratik')\n programIcon = pygame.image.load('resources/icon.png')\n pygame.display.set_icon(programIcon)\n pygame.mixer.init()\n #self.play_bg()\n self.surface=pygame.display.set_mode((680, 680))\n self.surface.fill((255,0,0))\n self.snake=Snake(self.surface,1)\n self.snake.draw() \n self.food=Apple(self.surface)\n self.food.draw()\n\n\n def play_sound(self,sound):\n pygame.mixer.music.load(f'resources/{sound}.mp3')\n pygame.mixer.music.play()\n \n def play_bg(self):\n pygame.mixer.music.load('resources/bg.mp3')\n pygame.mixer.music.play()\n\n def play(self):\n self.snake.walk()\n self.food.draw()\n self.display_score()\n pygame.display.flip()\n \n if self.check_collision(self.snake.x[0],self.snake.y[0],self.food.x,self.food.y):\n self.play_sound('sound')\n self.snake.increase_length()\n self.food.move()\n\n if self.border_collision(self.snake.x[0],self.snake.y[0]):\n self.play_sound('gameover')\n raise 'Game Over'\n \n for i in range(3,self.snake.length):\n if self.check_collision(self.snake.x[0],self.snake.y[0],self.snake.x[i],self.snake.y[i]):\n self.play_sound('gameover')\n raise \"Game Over\"\n\n\n def check_collision(self,x1,y1,x2,y2):\n if x1>=x2 and x1=y2 and y1 0:\n\n for token in tokenized_seq:\n if token not in vocab:\n vocab[token] = nextValue\n int_to_word_map[nextValue] = token\n nextValue += 1\n count += 1\n if count >= sizes[idx]:\n break\n if count >= sizes[idx]:\n break\n print(\"finished\")\n return vocab, int_to_word_map\n\n\ndef translate_int_to_words(sample, int_to_word_map):\n answer = []\n for i in range(0, len(sample)):\n answer.append(int_to_word_map.get(sample[i]))\n return answer\n\n\ndef build_test_samples(paths, sample_size, vocab):\n samples = []\n sizes = [sample_size/2, sample_size-sample_size/2]\n for idx, path in enumerate(paths):\n l = os.listdir(path)\n for i in range(0, int(sizes[idx])):\n with open(os.path.join(path, l[i]), encoding='utf-8') as fh:\n sample = convert_words_to_ints(fh.read(), vocab)\n sample = [i for i in sample if i != -1] # Remove all UNK\n samples.append(sample)\n return samples\n\n# Same as above, but for character models.\n\n\ndef build_vocab_chars(paths, sample_size):\n vocab = {}\n nextValue = 0\n count = 0\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n sequence = fh.read()\n if len(sequence) < 500 and len(sequence) > 0:\n for character in sequence:\n if character not in vocab:\n vocab[character] = nextValue\n nextValue += 1\n count += 1\n if count == sample_size:\n break\n if count == sample_size:\n break\n print(\"finished\")\n return vocab\n\n\n# Sample is a plain string - not a list -- UNK token has value zero --> changing it to -1\n# Convert the sample to a integer representation, which is an Nx1 array of ints,\n# where N is the number of tokens in the sequence.\ndef convert_words_to_ints(sample, vocab):\n sequence = sample.split()\n answer = np.zeros(len(sequence), dtype=np.int64)\n for n, token in enumerate(sequence):\n answer[n] = vocab.get(token, -1)\n return answer\n\n\ndef convert_test_seq_into_ints(sample, vocab):\n sequence = sample.split()\n answer = []\n for token in sequence:\n if vocab.get(token, -1) is not None:\n answer.append(vocab.get(token, -1))\n return answer\n\n# Same as above, but for characters.\n\n\ndef convert_chars_to_ints(sample, vocab):\n answer = np.zeros(len(sample), dtype=np.uint)\n for n, token in enumerate(sample):\n answer[n] = vocab.get(token, 0)\n return answer\n\n\n# Sample is a plain string - not a list -- UNK token has value zero.\n# Convert the sample to a one-hot representation, which is an NxV matrix,\n# where N is the number of tokens in the sequence and V is the vocabulary\n# size observed on the training data.\ndef convert_words_to_onehot(sample, vocab):\n sequence = sample.split()\n onehot = np.zeros((len(sequence), len(vocab)+1), dtype=np.uint)\n for n, token in enumerate(sequence):\n onehot[n, vocab.get(token, 0)] = 1\n return onehot\n\n# Same as above, but for characters.\n\n\ndef convert_chars_to_onehot(sample, vocab):\n onehot = np.zeros((len(sample), len(vocab)+1), dtype=np.uint)\n for n, token in enumerate(sample):\n onehot[n, vocab.get(token, 0)] = 1\n return onehot\n\n\n# Read every file located at given path, convert to one-hot OR integer representation,\n# and collect the results into a python list.\ndef load_and_convert_data_words_to_onehot(paths, vocab):\n data = []\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n data.append(convert_words_to_onehot(fh.read(), vocab))\n return data\n\n# Same as above, but uses a character model\n\n\ndef load_and_convert_data_chars_to_onehot(paths, vocab):\n data = []\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n data.append(convert_chars_to_onehot(fh.read(), vocab))\n return data\n\n\ndef load_and_convert_data_words_to_ints(paths, vocab, sample_size):\n data = []\n count = 0\n sizes = [sample_size/2, sample_size-sample_size/2]\n for idx, path in enumerate(paths):\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n sample = convert_words_to_ints(fh.read(), vocab)\n if len(sample) > 0 and len(sample) < 100:\n data.append(sample)\n count += 1\n if count >= sizes[idx]:\n break\n if count >= sizes[idx]:\n break\n print(\"finished\")\n return data\n\n\ndef load_and_convert_test_data_to_ints(paths, vocab, sample_size):\n data = []\n count = 0\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n sample = convert_test_seq_into_ints(fh.read(), vocab)\n if len(sample) > 0 and len(sample) < 100:\n data.append(sample)\n count += 1\n if count >= sample_size/2:\n break\n if count >= sample_size/2:\n break\n print(\"finished\")\n return data\n\n\ndef load_and_convert_data_words_to_ints(paths, vocab, sample_size):\n data = []\n count = 0\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n sample = convert_words_to_ints(fh.read(), vocab)\n if len(sample) > 0 and len(sample) < 100:\n data.append(sample)\n count += 1\n if count >= sample_size/2:\n break\n if count >= sample_size/2:\n break\n print(\"finished\")\n return data\n\n# Same as above, but uses a character model\n\n\ndef load_and_convert_data_chars_to_ints(paths, vocab, sample_size):\n data = []\n count = 0\n for path in paths:\n for filename in os.listdir(path):\n with open(os.path.join(path, filename), encoding='utf-8') as fh:\n sequence = fh.read()\n if len(sequence) < 500 and len(sequence) > 0:\n data.append(convert_chars_to_ints(sequence, vocab))\n count += 1\n if count == sample_size:\n break\n if count == sample_size:\n break\n return data\n\n\nif __name__ == '__main__':\n print(\"NLP Util smoketest.\")\n paths = ['../data/imdbFor246/train/pos', '../data/imdbFor246/train/neg']\n print(\"Begin loading vocab... \", end='')\n sys.stdout.flush()\n begin = time()\n vocab = build_vocab_chars(paths)\n end = time()\n print('done in', end-begin, 'seconds. Found', len(vocab), 'unique tokens.')\n print('Begin loading all data and converting to ints... ', end='')\n sys.stdout.flush()\n begin = time()\n data = load_and_convert_data_chars_to_ints(paths, vocab)\n end = time()\n print('done in', end-begin, 'seconds.')\n\n print(\"Data[0] = \", data[0])\n print('Press enter to quit.')\n input()\n print('Quitting.. may take some time to free memory.')\n","sub_path":"starterCode/nlputil.py","file_name":"nlputil.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"427703882","text":"import gettext\n\nfrom gi.repository import GObject, GLib, Gio\n\nimport grpc\n\nimport transfers\nimport prefs\nimport util\nimport notifications\nfrom util import OpStatus, OpCommand, TransferDirection\n\n_ = gettext.gettext\n\nclass CommonOp(GObject.Object):\n __gsignals__ = {\n \"status-changed\": (GObject.SignalFlags.RUN_LAST, None, ()),\n \"initial-setup-complete\": (GObject.SignalFlags.RUN_LAST, None, ()),\n \"op-command\": (GObject.SignalFlags.RUN_LAST, None, (int,)),\n \"progress-changed\": (GObject.SignalFlags.RUN_LAST, None, ())\n }\n def __init__(self, direction, sender, uris=None):\n super(CommonOp, self).__init__()\n self.uris = uris\n self.sender = sender\n self.direction = direction\n self.status = OpStatus.INIT\n self.start_time = GLib.get_monotonic_time() # for sorting in the op list\n\n self.total_size = 0\n self.total_count = 0\n self.size_string = \"\"\n self.description = \"\"\n self.name_if_single = None\n self.mime_if_single = \"application/octet-stream\" # unknown\n self.gicon = Gio.content_type_get_symbolic_icon(self.mime_if_single)\n\n self.error_msg = \"\"\n\n self.progress_tracker = None\n\n def progress_report(self, report):\n self.current_progress_report = report\n\n if report.progress == 1.0:\n self.status = OpStatus.FINISHED\n self.emit_status_changed()\n return\n\n self.emit(\"progress-changed\")\n\n def get_progress_text(self):\n try:\n return self.current_progress_report.progress_text\n except AttributeError:\n return \"\"\n\n def get_progress(self):\n try:\n return self.current_progress_report.progress\n except AttributeError:\n return 0\n\n def set_error(self, e=None):\n if e == None:\n self.error_msg = \"\"\n return\n\n if isinstance(e, GLib.Error):\n self.error_msg = e.message\n elif isinstance(e, grpc.RpcError):\n self.error_msg = e.details()\n else:\n self.error_msg = str(e)\n\n @util._idle\n def emit_initial_setup_complete(self):\n self.emit(\"initial-setup-complete\")\n\n @util._idle\n def emit_status_changed(self):\n self.emit(\"status-changed\")\n\n def set_status(self, status):\n pass\n\nclass SendOp(CommonOp):\n def __init__(self, sender=None, receiver=None, receiver_name=None, uris=None):\n super(SendOp, self).__init__(TransferDirection.TO_REMOTE_MACHINE, sender, uris)\n self.receiver = receiver\n self.sender_name = GLib.get_real_name()\n self.receiver_name = receiver_name\n\n self.resolved_files = []\n self.first_missing_file = None\n\n self.file_send_cancellable = None\n\n self.current_progress_report = None\n\n # These are the first-level base names (no path, just the filename) that we'll send to the server\n # to check for pre-existence. We know that if these files/folders don't exist, none of their children\n # will. This is a bit simple, but until we need more, it's fine.\n self.top_dir_basenames = []\n\n def set_status(self, status):\n self.status = status\n self.emit_status_changed()\n\n def prepare_send_info(self):\n self.status = OpStatus.CALCULATING\n self.emit_status_changed()\n\n error = transfers.gather_file_info(self)\n\n self.update_ui_info(error)\n\n def update_ui_info(self, error):\n if error == None:\n self.size_string = GLib.format_size(self.total_size)\n print(\"Calculated %d files, with a size of %s\" % (self.total_count, self.size_string))\n\n if self.total_count > 1:\n # Translators: Don't need to translate singular, we show the filename if there's only one\n self.description = gettext.ngettext(\"%d file\",\n \"%d files\", self.total_count) % (self.total_count,)\n self.gicon = Gio.ThemedIcon.new(\"edit-copy-symbolic\")\n else:\n self.description = self.resolved_files[0].basename\n self.gicon = Gio.content_type_get_symbolic_icon(self.mime_if_single)\n\n self.set_status(OpStatus.WAITING_PERMISSION)\n else:\n if isinstance(error, GLib.Error) and error.code == Gio.IOErrorEnum.NOT_FOUND:\n self.status = OpStatus.FILE_NOT_FOUND\n self.description = \"\"\n self.error_msg = \"\"\n self.first_missing_file = self.top_dir_basenames[-1]\n self.gicon = Gio.ThemedIcon.new(\"dialog-error-symbolic\")\n else:\n self.status = OpStatus.FAILED_UNRECOVERABLE\n self.description = \"\"\n self.set_error(error)\n\n self.emit_initial_setup_complete()\n self.emit_status_changed()\n\n # Widget handlers\n\n def cancel_transfer_request(self):\n self.emit(\"op-command\", OpCommand.CANCEL_PERMISSION_BY_SENDER)\n\n def retry_transfer(self):\n self.emit(\"op-command\", OpCommand.RETRY_TRANSFER)\n\n def pause_transfer(self):\n pass\n\n def stop_transfer(self):\n self.emit(\"op-command\", OpCommand.STOP_TRANSFER_BY_SENDER)\n\n def remove_transfer(self):\n self.emit(\"op-command\", OpCommand.REMOVE_TRANSFER)\n\n# This represents a send or receive 'job', there would be potentially many of these.\nclass ReceiveOp(CommonOp):\n def __init__(self, sender):\n super(ReceiveOp, self).__init__(TransferDirection.FROM_REMOTE_MACHINE, sender)\n self.sender_name = self.sender\n self.receiver_name = GLib.get_real_name()\n\n # If there's insufficient disk space, always ask for permission\n # If we're overwriting, there's a preference to check whether we need to ask or not.\n self.have_space = False\n self.existing = False\n\n # This is set when a transfer starts - it's a grpc.Future that we can cancel() if the user\n # wants the transfer to stop.\n self.file_iterator = None\n self.current_progress_report = None\n # These are the first-level base names (no path, just the filename) that we'll send to the server\n # to check for pre-existence. We know that if these files/folders don't exist, none of their children\n # will. This is a bit simple, but until we need more, it's fine.\n self.top_dir_basenames = []\n\n def set_status(self, status):\n self.status = status\n\n if status == OpStatus.FINISHED:\n notifications.TransferCompleteNotification(self)\n\n self.emit_status_changed()\n\n def prepare_receive_info(self):\n self.size_string = GLib.format_size(self.total_size)\n print(\"Transfer request received for %d files, with a size of %s\" % (self.total_count, self.size_string))\n\n self.have_space = util.have_free_space(self.total_size)\n self.existing = util.files_exist(self.top_dir_basenames) and prefs.prevent_overwriting()\n self.update_ui_info()\n\n def update_ui_info(self):\n if self.total_count > 1:\n # Translators: Don't need to translate singular, we show the filename if there's only one\n self.description = gettext.ngettext(\"%d file\",\n \"%d files\", self.total_count) % (self.total_count,)\n self.gicon = Gio.ThemedIcon.new(\"edit-copy-symbolic\")\n else:\n self.description = self.name_if_single\n self.gicon = Gio.content_type_get_symbolic_icon(self.mime_if_single)\n\n self.status = OpStatus.WAITING_PERMISSION\n\n notifications.NewOpUserNotification(self)\n self.emit_initial_setup_complete()\n\n # Widget handlers\n def accept_transfer(self):\n self.emit(\"op-command\", OpCommand.START_TRANSFER)\n\n def decline_transfer_request(self):\n self.emit(\"op-command\", OpCommand.CANCEL_PERMISSION_BY_RECEIVER)\n\n def stop_transfer(self):\n self.emit(\"op-command\", OpCommand.STOP_TRANSFER_BY_RECEIVER)\n\n def remove_transfer(self):\n self.emit(\"op-command\", OpCommand.REMOVE_TRANSFER)\n\n","sub_path":"src/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"354442942","text":"'''\n\tFile name: utils.py\n\tAuthor(s): Kevin Lyons, Alex Aubuchon\n\tDate created: 5/12/2017\n\tDate last modified: 6/11/2017\n\tPython Version: 3.5\n\tPurpose: Simple utils script to be used alongside our server, among other files. Various\n\t\ttasks, including model serialization and math operations.\n\tTODO:\n\t\t- None at this time.\n'''\n\n# General imports\nimport sys, json, os, time, pickle, traceback, logging, atexit, subprocess, threading, copy\nimport smtplib, base64, glob, random, numpy as np\nfrom email.mime.text import MIMEText\nfrom enum import Enum\n\n# Prevent TensorFlow log statements\n# Taken from https://github.com/tensorflow/tensorflow/issues/8340\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Keras import for JSON functionality\nfrom keras.models import model_from_json\n\n# Custom imports\nfrom cityiograph import City\nfrom config import *\n\ndef serialize_model(model, root_filename):\n\t'''\n\tSerializes a Keras model to a JSON and h5 data file\n\tInput: \tmodel - instance of Keras model to be serialized\n\troot_filename - string representing root of JSON and h5 data file for model\n\tOutput:\tNone - simply write the model to the files\n\t'''\n\n\t# Convert to JSON\n\tmodel_in_json = model.to_json()\n\n\t# Write to file\n\twith open(root_filename + \".json\", \"w\") as json_file:\n\t\tjson_file.write(model_in_json)\n\n\t# Save weights\n\tmodel.save_weights(root_filename + \".h5\")\n\ndef deserialize_model(root_filename):\n\t'''\n\tDeserialze data in .json and .h5 files into a Keras model that can be used for ML prediction\n\tInput: \troot_filename - string representing root of JSON and h5 data file for model\n\tOutput:\tmodel - instance of Keras model taken from data\n\t'''\n\n\t# Read JSON string\n\twith open(root_filename + '.json', 'r') as f:\n\t\tmodel_in_json = f.read()\n\n\t# Load model with architecture and weights\n\tmodel = model_from_json(model_in_json)\n\tmodel.load_weights(root_filename + '.h5')\n\n\t# Compile the model with loss, optimizer and metrics and return\n\tmodel.compile(loss = LOSS_FUNCTION, optimizer = OPTIMIZER, metrics = KERAS_METRICS)\n\treturn model\n\ndef compute_accuracy(true, pred):\n\t'''\n\tCompute percent accuracy between 2 input matrices (true and predicted values)\n\tInput: \ta, b - np array n x ( )\n\tOutput: accuracy - scalar that represents (1 - percent error) between a and b, in range [0, 1]\n\t'''\n\n\t# Simple solution taken from http://stackoverflow.com/questions/20402109/calculating-percentage-error-by-comparing-two-arrays\n\treturn 1 - np.mean(true != pred)\n\ndef cell_features(cell):\n\t'''\n\tGet the 2 input features for a given cell\n\tCurrently using population and is road\n\tInput: \tcell - instance of cityiograph.Cell\n\tOutput:\tfeats - list of input features for this cell\n\t'''\n\tfeats = [ cell.population ]\n\tfeats.append(0) if (cell.type_id == ROAD_ID) else feats.append(1)\n\treturn feats\n\ndef cell_results(cell):\n\t'''\n\tGet the 2 output features for a given cell\n\tCurrently using traffic score and wait time\n\tInput: \tcell - instance of cityiograph.Cell\n\tOutput:\tfeats - list of output features for this cell\n\t'''\n\treturn [ cell.data[\"traffic\"], cell.data[\"wait\"] ]\n\ndef get_features(city):\n\t'''\n\tGet the input feature vector for a given city\n\tInput: \tcity - instance of cityiograph.City\n\tOutput:\tfeats - np array of input features for this city\n\t'''\n\tfeatures = []\n\tfor i in range(city.width):\n\t\tfor j in range(city.height):\n\t\t\tcell = city.cells.get((i, j))\n\t\t\tfeatures += cell_features(cell)\n\treturn np.array(features)\n\ndef get_results(city):\n\t'''\n\tGet the output feature vector for a given city\n\tInput: \tcity - instance of cityiograph.City\n\tOutput:\tfeats - np array of output features for this city\n\t'''\n\tresults = []\n\tfor i in range(city.width):\n\t\tfor j in range(city.height):\n\t\t\tcell = city.cells.get((i, j))\n\t\t\tresults += cell_results(cell)\n\treturn np.array(results)\n\ndef output_to_city(city, output):\n\t'''\n\tCustom method to write new data to city object for later serialization\n\tInput:\tcity - instance of cityiograph.City\n\toutput - list of traffic and wait scores, alternating\n\tOutput:\tcity - simply write this data to the existing city object and return\n\t'''\n\ti = 0\n\tfor x in range(city.width):\n\t\tfor y in range(city.height):\n\t\t\tcell = city.cells.get((x, y))\n\t\t\tcell.data[\"traffic\"] = int(round(output[i]))\n\t\t\tcell.data[\"wait\"] = int(round(output[i + 1]))\n\t\t\ti += 2\t\n\treturn city\n\ndef write_city(city, timestamp = None):\n\t'''\n\tWrite a city to a JSON file\n\tInput: \tcity - instance of simCity - city to be logged OR dictionary ready to be logged\n\tOutput: None - write city as JSON to specified filename for later ML purposes\n\t'''\n\n\tif isinstance(city, dict):\n\t\t# Get filename\n\t\tfilename = os.path.join(PREDICTED_CITIES_DIRECTORY, 'city_predicted_output_' + timestamp + '.json')\n\n\t\t# Write to JSON\n\t\twith open(filename, 'w') as f:\n\t\t\tf.write(json.dumps(city))\n\n\telse:\n\t\t# Get filename\n\t\tfilename = city.filename\n\n\t\t# Handle full city object case\n\t\t# Convert to dictionary object for editing\n\t\td = city.cityObject.to_dict()\n\n\t\t# Add UNIX timestamp to JSON\n\t\td['objects']['timestamp'] = city.timestamp\n\n\t\t# Write dictionary to JSON\n\t\twith open(filename, 'w') as f:\n\t\t\tf.write(json.dumps(d))\n\n\tlog.info(\"City data written at filename = {}.\".format(os.path.abspath(filename)))\n\n# Set up logging functionality\nlog = logging.getLogger('__main__')\n\n# First time log file initialized\n# Taken from http://stackoverflow.com/questions/82831/how-do-i-check-whether-a-file-exists-using-python\nif not os.path.isfile(LOGGER_FILENAME):\n\tfirst = True\nelse:\n\tfirst = False\n\n# Set up logger to file AND console\n# Taken from multiple sources\n# http://stackoverflow.com/questions/13733552/logger-configuration-to-log-to-file-and-print-to-stdout\n# https://docs.python.org/2/howto/logging.html\nlogFormatter = logging.Formatter(\"%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s\", '%m/%d/%Y %I:%M:%S %p')\nlog.setLevel(logging.DEBUG)\n\nfileHandler = logging.FileHandler(LOGGER_FILENAME)\nfileHandler.setFormatter(logFormatter)\nlog.addHandler(fileHandler)\n\nconsoleHandler = logging.StreamHandler(sys.stdout)\nconsoleHandler.setFormatter(logFormatter)\nlog.addHandler(consoleHandler)\n\nif first:\n\tlog.info(\"Successfully initialized log file at {}.\".format(LOGGER_FILENAME))\n\n# Configure log to handle all uncaught exceptions and reboot server, if needed\n# Taken from http://stackoverflow.com/questions/8050775/using-pythons-logging-module-to-log-all-exceptions-and-errors\n# Also from http://stackoverflow.com/questions/4564559/get-exception-description-and-stack-trace-which-caused-an-exception-all-as-a-st\ndef handler(t, value, tb):\n\t# Log exception\n\tmessage = str(value) + \"\\n\" + \"\\n\".join(traceback.format_tb(tb))\n\tlog.exception(message)\n\n\t# Determine the source and act accordingly\n\tfilename = traceback.extract_tb(tb)[0].filename\n\tif t != KeyboardInterrupt and filename == os.path.basename(SERVER_FILENAME):\n\t\tlog.warning(SERVER_NAME + \" has been stopped.\")\n\t\tif AUTO_RESTART:\n\t\t\tlog.warning(\"Attempting to reboot {}...\".format(SERVER_NAME))\n\t\t\ttime.sleep(5) # Small delay\n\t\t\trestart(message)\n\t\telse:\n\t\t\tnotify(message, False)\n\n# Set default hook for system\nsys.excepthook = handler\n\ndef restart(message):\n\t'''\n\tRestarts the CityMatrixServer after some fatal error. Notifies of any error message via e-mail.\n\tInput: \tmessage - string describing the error message\n\tOutput:\tNone - restart server and send e-mail accordingly\n\t'''\n\n\tdid_restart = False\n\ttry:\n\t\tsubprocess.Popen([PYTHON_VERSION, SERVER_FILENAME, \"FALSE\"])\n\texcept:\n\t\tlog.exception(\"Unable to restart \" + SERVER_NAME + \".\")\n\telse:\n\t\tdid_restart = True\n\tfinally:\n\t\tnotify(message, did_restart)\n\ndef notify(message, did_restart):\n\t'''\n\tSends notification of server crash and reboot to users.\n\tInput:\tmessage - string describing the error message\n\t\t\tdid_restart - bool indiciating success of restart operation\n\tOutput:\tNone - send e-mail to users\n\t'''\n\n\ttry:\n\t\t# Retreive data from credentials file\n\t\tcred = pickle.load(open(CREDENTIALS_FILENAME, 'rb'))\n\t\tusername, password = tuple(base64.b64decode(cred[k]).decode() for k in ['u', 'p'])\n\t\t\n\t\t# Set up STMP connection\n\t\tserver = smtplib.SMTP(SMTP_HOSTNAME, SMTP_PORT)\n\t\tserver.ehlo()\n\t\tserver.starttls()\n\t\tserver.login(username, password)\n\n\t\t# Prepare e-mail message\n\t\tbody = 'This is a notice that {} has been stopped. See below for stack trace information.\\n\\n{}\\n\\n'.format(SERVER_NAME, message)\n\t\tif did_restart:\n\t\t\tbody += '{} was able to successfully restart.'.format(SERVER_NAME)\n\t\telse:\n\t\t\tbody += '{} could not restart at this time.'.format(SERVER_NAME)\n\n\t\tmsg = MIMEText(body)\n\t\tmsg['Subject'] = '{} has been stopped.'.format(SERVER_NAME)\n\t\tmsg['From'] = username\n\t\tmsg['To'] = \", \".join(EMAIL_LIST)\n\n\t\t# Send message and log\n\t\tserver.sendmail(username, EMAIL_LIST, msg.as_string())\n\t\tserver.close()\n\t\tlog.info(\"Successfully notified users via e-mail.\")\n\n\texcept Exception as e:\n\t\tlog.exception(e)\n\t\tlog.warning(\"Unable to notify users via e-mail.\")\n\nclass CityChange(Enum):\n\t'''\n\tCustom enum to describe the difference between two cities.\n\t'''\n\tNO = -1, # Exact same cities\n\tFIRST = 0, # First city in our directory\n\tDENSITY = 1, # Some change in the density array\n\tCELL = 2 # Some change in a road/building cell on the grid\n\ndef diff_cities(current_city, prev_city = None):\n\t'''\n\tDetermine if a new city is different from the existing one in memory, and if so, how?\n\tInput: \tcurrent_city - instance of cityiograph.City object - incoming city to server\n\t\t\tprev_city - instance of cityiograph.City object - may be given if we are doing direct comparison\n\tOutput:\tReturn the difference between current city and previouly saved one\n\t'''\n\n\tif prev_city is None:\n\t\t# First, get the most recent city from our saved set\n\t\t# Taken from http://stackoverflow.com/questions/39327032/how-to-get-the-latest-file-in-a-folder-using-python\n\t\tfiles = glob.glob(INPUT_CITIES_DIRECTORY + '*')\n\n\t\t# If this is the first city, return need for prediction\n\t\tif len(files) == 0: return ( CityChange.FIRST , True )\n\n\t\t# Run comparison on this city and most recent one\n\t\twith open(max(files, key = os.path.getctime), 'r') as f:\n\t\t\t# Load prev_city from JSON\n\t\t\tprev_city = City(f.read())\n\t\t\n\t# Now, compare directly for densities, size and cells\n\tif prev_city.equals(current_city):\n\t\treturn ( CityChange.NO, False ) # No difference\n\telse: # Yes, we have a difference, let's explore\n\t\tresult = []\n\t\tif prev_city.densities != current_city.densities:\n\t\t\tfor i, d in enumerate(prev_city.densities):\n\t\t\t\tif current_city.densities[i] != d:\n\t\t\t\t\tresult.append(i)\n\t\t\treturn ( CityChange.DENSITY , [ result, prev_city ] )\n\t\telse:\n\t\t\t# We likely have some cell mismatch(es) - need to find\n\t\t\tfor x in range(prev_city.width):\n\t\t\t\tfor y in range(prev_city.height):\n\t\t\t\t\told = prev_city.cells.get((x, y))\n\t\t\t\t\tnew = current_city.cells.get((x, y))\n\t\t\t\t\tif not old.equals(new):\n\t\t\t\t\t\tresult.append( (x, y) )\n\t\t\treturn ( CityChange.CELL , [ result, prev_city ] )\n\n# Set up our exception handler on this new thread\n# Taken from https://bugs.python.org/issue1230540\nrun_old = threading.Thread.run\ndef run(*args, **kwargs):\n\ttry:\n\t\trun_old(*args, **kwargs)\n\texcept (KeyboardInterrupt, SystemExit):\n\t\traise\n\texcept:\n\t\tsys.excepthook(*sys.exc_info())\n\t\tthreading.Thread.run = run\n\ndef async_process(commands, hook, log, city):\n\t'''\n\tCustom method to run process on new thread and notify with hook when returned\n\tTaken from http://stackoverflow.com/questions/2581817/python-subprocess-callback-when-cmd-exits\n\tInput: \tcommands - list of strings of commands to be send to subprocess module\n\thook - function to be called once simulation is complete\n\tlog - instance of CityLogger that we can write to on new thread\n\tcity - instance of sim.SimCity that we are simulating\n\tOutput:\tNone - simply run the process and notify when complete\n\t'''\n\tdef run(commands, hook, log, city):\n\t\t# Run process with commands and streams\n\t\tp = subprocess.Popen(commands, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n\t\t\n\t\t# Taken from http://stackoverflow.com/questions/15535240/python-popen-write-to-stdout-and-log-file-simultaneously\n\t\t# Also, rstrip from http://stackoverflow.com/questions/275018/how-can-i-remove-chomp-a-newline-in-python\n\t\tfor line in p.stdout: log.info(line.decode('utf-8').rstrip())\n\t\t\n\t\tstatus = p.wait() # Wait for command to complete\n\t\thook(city, status) # Call our hook with city object\n\t\treturn\n\n\t# Set up threading functionality\n\tthread = threading.Thread(target = run, args = (commands, hook, log, city))\n\tthread.start()\n\ndef metrics_dictionary(metrics):\n\t'''\n\tHelper method to convert list of tuples to dictionary for JSON submission.\n\tInput:\tmetrics - list of tuples of the form [ ('Population Density Performance', 0.11217427049946581, 1) , ... ]\n\tOutput:\td - dictionary mapping metric name -> value\n\t'''\n\n\treturn { name : [ value , weight ] for name, value, weight in metrics }","sub_path":"global/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"531646236","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport time\nimport sys\n\ndef get_logger(name, console_log = True, file_log = True):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG) # 设置默认日志等级\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')\n if console_log:\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.formatter = formatter\n logger.addHandler(console_handler)\n if file_log:\n # file_handler = logging.FileHandler(name + time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()) + \".log\")\n file_handler = logging.FileHandler(name + \".log\")\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger","sub_path":"lib/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"224547938","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score\r\nmsg = pd.read_csv('document.csv', names=['message', 'label'])\r\nprint(\"Total Instances of Dataset: \", msg.shape[0])\r\nmsg['labelnum'] = msg.label.map({'pos': 1, 'neg': 0})\r\nX = msg.message\r\ny = msg.labelnum\r\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y)\r\ncount_v = CountVectorizer()\r\nXtrain_dm = count_v.fit_transform(Xtrain)\r\nXtest_dm = count_v.transform(Xtest)\r\n#df = pd.DataFrame(Xtrain_dm.toarray(),columns=count_v.get_feature_names())\r\nclf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)\r\nclf.fit(Xtrain_dm, ytrain)\r\npred = clf.predict(Xtest_dm)\r\nprint('Accuracy Metrics:')\r\nprint('Accuracy: ', accuracy_score(ytest, pred))\r\nprint('Recall: ', recall_score(ytest, pred))\r\nprint('Precision: ', precision_score(ytest, pred))\r\nprint('Confusion Matrix: \\n', confusion_matrix(ytest, pred))\r\n\r\n","sub_path":"backpropagation.py","file_name":"backpropagation.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6341867","text":"from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom sentry import roles\nfrom sentry.web.frontend.base import BaseView\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.utils.encoding import force_str\nfrom django.core.signing import BadSignature, SignatureExpired\nfrom sentry.utils.signing import unsign\nfrom sentry.models import AuditLogEntryEvent, OrganizationMember, Organization, Team, TeamStatus, Project\n\n\nclass AcceptProjectTransferForm(forms.Form):\n team = forms.ChoiceField(choices=[])\n\n def __init__(self, request, *args, **kwargs):\n super(AcceptProjectTransferForm, self).__init__(*args, **kwargs)\n teams = []\n for o in Organization.objects.get_for_user(request.user):\n # getting ALL the teams for the organization - not scoped to organizationmember\n for t in Team.objects.filter(organization=o, status=TeamStatus.VISIBLE):\n option = \" %s - %s\" % (t.name, o.name)\n teams.append([t.id, option])\n\n self.fields['team'].choices = teams\n self.fields['team'].widget.choices = self.fields['team'].choices\n\n\nclass AcceptProjectTransferView(BaseView):\n required_scope = 'org:admin'\n sudo_required = True\n\n def get_form(self, request):\n if request.method == 'POST':\n return AcceptProjectTransferForm(request, request.POST, initial=request.POST)\n return AcceptProjectTransferForm(request)\n\n def handle(self, request, *args, **kwargs):\n try:\n d = request.GET['data']\n except KeyError:\n raise Http404\n\n try:\n data = unsign(force_str(d))\n except BadSignature:\n messages.add_message(\n request, messages.ERROR,\n _(u'Could not approve transfer, please make sure link is valid.')\n )\n return HttpResponseRedirect(\n reverse('sentry')\n )\n except SignatureExpired:\n messages.add_message(\n request, messages.ERROR,\n _(u'Project transfer link has expired!')\n )\n return HttpResponseRedirect(\n reverse('sentry')\n )\n\n project_id = data['project_id']\n user_id = data['user_id']\n transaction_id = data['transaction_id']\n from_organization_id = data['from_organization_id']\n if user_id != request.user.id:\n messages.add_message(\n request, messages.ERROR,\n _(u'Invalid permissions!')\n )\n return HttpResponseRedirect(\n reverse('sentry')\n )\n\n # check if user is still an owner\n if not OrganizationMember.objects.filter(\n role=roles.get_top_dog().id,\n user__is_active=True,\n user_id=user_id,\n ).exists():\n return HttpResponseRedirect(\n reverse('sentry')\n )\n\n try:\n project = Project.objects.get(id=project_id, organization_id=from_organization_id)\n except Project.DoesNotExist:\n messages.add_message(\n request, messages.ERROR,\n _(u'Project no longer exists')\n )\n return HttpResponseRedirect(\n reverse('sentry')\n )\n\n form = self.get_form(request)\n if form.is_valid():\n # transfer the project\n team_id = form.cleaned_data.get('team')\n new_team = Team.objects.get(id=team_id)\n project.transfer_to(new_team)\n\n self.create_audit_entry(\n request=request,\n organization=project.organization,\n target_object=project_id,\n event=AuditLogEntryEvent.PROJECT_ACCEPT_TRANSFER,\n data=project.get_audit_log_data(),\n transaction_id=transaction_id,\n )\n\n return HttpResponseRedirect(\n reverse('sentry-organization-home', args=[new_team.organization.slug])\n )\n\n context = {\n 'project': project,\n 'form': form,\n }\n return self.respond('sentry/projects/accept_project_transfer.html', context)\n","sub_path":"src/sentry/web/frontend/accept_project_transfer.py","file_name":"accept_project_transfer.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"446624458","text":"import sqlite3\nfrom bottle import route, run, debug, template, request, static_file, error\n\n# only needed when you run Bottle on mod_wsgi\nfrom bottle import default_app\n\n\n@route('/orders')\ndef todo_list():\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM orders JOIN menu ON orders.id = menu.id WHERE status LIKE '1' \")\n result = c.fetchall()\n c.close()\n\n output = template('make_table', rows=result)\n return output\n\n@route('/menu')\ndef menu_list():\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"SELECT id, name, quantity FROM menu WHERE quantity >= 0\")\n result = c.fetchall()\n c.close()\n\n output = template('make_table', rows=result)\n return output\n\n\n@route('/new', method='GET')\ndef new_item():\n\n if request.GET.save:\n\n new = request.GET.task.strip()\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n\n c.execute(\"INSERT INTO orders (id, quantity, status) VALUES (?,?)\", (new, 1, 1))\n new_id = c.lastrowid\n\n conn.commit()\n c.close()\n\n return '

The new item was inserted into the database, the ID is %s

' % new_id\n\n else:\n return template('new_task.tpl')\n\n\n@route('/edit/', method='GET')\ndef edit_item(no):\n\n if request.GET.save:\n edit = request.GET.task.strip()\n status = request.GET.status.strip()\n\n if status == 'open':\n status = 1\n else:\n status = 0\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"UPDATE orders SET item = ?, status = ? WHERE id LIKE ?\", (edit, status, no))\n conn.commit()\n\n return '

The item number %s was successfully updated

' % no\n else:\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n c.execute(\"SELECT item FROM orders WHERE id LIKE ?\", (str(no)))\n cur_data = c.fetchone()\n\n return template('edit_task', old=cur_data, no=no)\n\n\n@route('/item')\ndef show_item(item):\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM orders JOIN menu ON orders.id = menu.id WHERE id LIKE ?\", (item,))\n result = c.fetchall()\n c.close()\n\n if not result:\n return 'This item number does not exist!'\n else:\n return 'item: %s' % result[0]\n\n\n@route('/help')\ndef help():\n\n static_file('index.html', root='')\n\n\n@route('/json')\ndef show_json(json):\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM orders JOIN menu ON orders.id = menu.id WHERE status LIKE '1'\", (json,))\n result = c.fetchall()\n c.close()\n\n if not result:\n return {'item': 'This item number does not exist!'}\n else:\n return {'item': result[0]}\n\n@route('/open')\ndef show_open():\n\n conn = sqlite3.connect('order.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM orders JOIN menu ON orders.id = menu.id WHERE status LIKE '1'\")\n result = c.fetchall()\n c.close()\n\n if not result:\n return {'Data': 'No open orders exist!'}\n else:\n return {'Open orders': result}\n\n@error(403)\ndef mistake403(code):\n return 'There is a mistake in your url!'\n\n\n@error(404)\ndef mistake404(code):\n return 'Sorry, this page does not exist!'\n\n\ndebug(True)\nrun(reloader=True, host='0.0.0.0', port=8080)\n# run(reloader=True)\n# remember to remove reloader=True and debug(True) when you move your\n# application from development to a productive environment\n","sub_path":"menubot/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"33350706","text":"# from itertools import chain\r\n\r\nclass Solution:\r\n # @param strs, a list of strings\r\n # @return a list of strings\r\n def anagrams(self, strs):\r\n table = {} \r\n for s in strs: \r\n code = self.hash_code(s)\r\n table.setdefault(code, []).append(s)\r\n \r\n res = [lst for lst in table.values() if len(lst) >= 2]\r\n return list(itertools.chain.from_iterable(res))\r\n \r\n def hash_code(self, s): \r\n code = [0] * 26 \r\n for c in s: \r\n code[ord(c) - ord('a')] += 1\r\n return ''.join(map(str, code)) # LIST is unhashable.\r\n \r\n ","sub_path":"leetcode/20120319_anagrams.py","file_name":"20120319_anagrams.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"465818335","text":"import numpy as np\n# GMG 를 이용한 배경제거 시도\n# Author : 박재현\nimport cv2\nfont = cv2.FONT_HERSHEY_COMPLEX # normal size serif font\ncap = cv2.VideoCapture('videos\\\\newvtest.avi')\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1))\nfgbg = cv2.bgsegm.createBackgroundSubtractorGMG()\nwhile(1):\n ret, frame = cap.read()\n if ret == 1:\n #frame = cv2.resize(frame, None, fx=0.3, fy=0.3, interpolation=cv2.INTER_AREA)\n fgmask = fgbg.apply(frame)\n fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n cv2.putText(frame, 'Original', (210, 30), font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(fgmask, 'GMG', (290, 30), font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow('GMG',fgmask)\n cv2.imshow('ORIGINAL', frame)\n cv2.moveWindow('ORIGINAL', 100, 100)\n cv2.moveWindow('GMG', 500, 100)\n k = cv2.waitKey(1) & 0xff\n if k == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()","sub_path":"src/GMG.py","file_name":"GMG.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"194480311","text":"from config import *\r\ndef new_game_click():\r\n return NEW_GAME\r\n\r\ndef exit_click():\r\n return EXIT\r\n\r\nclass Button:\r\n def __init__(self, pos, size, color, shader):\r\n self.clickAction = None\r\n self.label = None\r\n self.pos = pos\r\n self.size = size\r\n self.color = color\r\n self.invertedColor = [0,0,0]\r\n for channel in range(3):\r\n self.invertedColor[channel] = abs(1 - self.color[channel])\r\n self.shader = shader\r\n\r\n self.vertices = (\r\n pos[0] - size[0]/2, pos[1] + size[1]/2, color[0], color[1], color[2],\r\n pos[0] - size[0]/2, pos[1] - size[1]/2, color[0], color[1], color[2],\r\n pos[0] + size[0]/2, pos[1] - size[1]/2, color[0], color[1], color[2],\r\n\r\n pos[0] - size[0]/2, pos[1] + size[1]/2, color[0], color[1], color[2],\r\n pos[0] + size[0]/2, pos[1] - size[1]/2, color[0], color[1], color[2],\r\n pos[0] + size[0]/2, pos[1] + size[1]/2, color[0], color[1], color[2]\r\n )\r\n self.vertices = np.array(self.vertices, dtype=np.float32)\r\n \r\n glUseProgram(self.shader)\r\n self.vao = glGenVertexArrays(1)\r\n glBindVertexArray(self.vao)\r\n self.vbo = glGenBuffers(1)\r\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\r\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\r\n\r\n glEnableVertexAttribArray(0)\r\n glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(0))\r\n\r\n glEnableVertexAttribArray(1)\r\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(8))\r\n \r\n def inside(self, pos):\r\n for i in range(2):\r\n if pos[i] < (self.pos[i] - self.size[i]) or pos[i] > (self.pos[i] + self.size[i]):\r\n return False\r\n return True\r\n \r\n def handle_mouse_movement(self, pos):\r\n if self.inside(pos):\r\n newColor = self.invertedColor\r\n if self.label is not None:\r\n self.label.color = np.array(self.color,dtype=np.float32)\r\n else:\r\n newColor = self.color\r\n if self.label is not None:\r\n self.label.color = np.array(self.color,dtype=np.float32)\r\n \r\n for i in range(6):\r\n self.vertices[5 * i + 2] = newColor[0]\r\n self.vertices[5 * i + 3] = newColor[1]\r\n self.vertices[5 * i + 4] = newColor[2]\r\n \r\n glBindBuffer(GL_ARRAY_BUFFER,self.vbo)\r\n memoryHandle = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)\r\n ctypes.memmove(ctypes.c_void_p(memoryHandle), ctypes.c_void_p(self.vertices.ctypes.data), self.vertices.nbytes)\r\n glUnmapBuffer(GL_ARRAY_BUFFER)\r\n \r\n def handle_mouse_click(self, pos):\r\n if self.inside(pos):\r\n if self.clickAction is not None:\r\n return self.clickAction()\r\n return CONTINUE\r\n\r\n def draw(self):\r\n glUseProgram(self.shader)\r\n glBindVertexArray(self.vao)\r\n glDrawArrays(GL_TRIANGLES,0,6)\r\n \r\n def destroy(self):\r\n glDeleteBuffers(1, (self.vbo,))\r\n glDeleteVertexArrays(1, (self.vao,))\r\n\r\nclass TextLine:\r\n def __init__(self, font, text, shader, fontsize, startPos, color):\r\n self.font = font\r\n self.text = text\r\n self.shader = shader\r\n self.vertices = []\r\n self.vertexCount = 0\r\n self.fontsize = fontsize\r\n self.startPos = startPos\r\n self.color = np.array(color, dtype=np.float32)\r\n\r\n glUseProgram(self.shader)\r\n self.vao = glGenVertexArrays(1)\r\n self.vbo = glGenBuffers(1)\r\n self.build_text()\r\n\r\n def build_text(self):\r\n self.vertices = []\r\n self.vertexCount = 0\r\n\r\n for i in range(len(self.text)):\r\n character = self.text[i]\r\n if character in FONT_TEX_COORDS:\r\n #top left pos\r\n self.vertices.append(self.startPos[0] + i * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] + self.fontsize[1])\r\n #top left tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][0])\r\n self.vertices.append(FONT_TEX_COORDS[character][1] - 0.15/16)\r\n #top right pos\r\n self.vertices.append(self.startPos[0] + (i + 1) * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] + self.fontsize[1])\r\n #top right tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][2])\r\n self.vertices.append(FONT_TEX_COORDS[character][1] - 0.15/16)\r\n #bottom right pos\r\n self.vertices.append(self.startPos[0] + (i + 1) * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] - self.fontsize[1])\r\n #bottom right tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][2])\r\n self.vertices.append(FONT_TEX_COORDS[character][3] - 0.15/16)\r\n\r\n #bottom right pos\r\n self.vertices.append(self.startPos[0] + (i + 1) * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] - self.fontsize[1])\r\n #bottom right tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][2])\r\n self.vertices.append(FONT_TEX_COORDS[character][3] - 0.15/16)\r\n #bottom left pos\r\n self.vertices.append(self.startPos[0] + i * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] - self.fontsize[1])\r\n #bottom left tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][0])\r\n self.vertices.append(FONT_TEX_COORDS[character][3] - 0.15/16)\r\n #top left pos\r\n self.vertices.append(self.startPos[0] + i * self.fontsize[0])\r\n self.vertices.append(self.startPos[1] + self.fontsize[1])\r\n #top left tex coord\r\n self.vertices.append(FONT_TEX_COORDS[character][0])\r\n self.vertices.append(FONT_TEX_COORDS[character][1] - 0.15/16)\r\n self.vertexCount += 6\r\n\r\n self.vertices = np.array(self.vertices,dtype=np.float32)\r\n glUseProgram(self.shader)\r\n glBindVertexArray(self.vao)\r\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\r\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\r\n\r\n glEnableVertexAttribArray(0)\r\n glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 16, ctypes.c_void_p(0))\r\n\r\n glEnableVertexAttribArray(1)\r\n glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 16, ctypes.c_void_p(8))\r\n\r\n def draw(self):\r\n glUseProgram(self.shader)\r\n self.font.use()\r\n glUniform3fv(glGetUniformLocation(self.shader, \"color\"), 1, self.color)\r\n glBindVertexArray(self.vao)\r\n glDrawArrays(GL_TRIANGLES, 0, self.vertexCount)\r\n\r\n def destroy(self):\r\n glDeleteBuffers(1, (self.vbo,))\r\n glDeleteVertexArrays(1, (self.vao,))\r\n","sub_path":"pyopengl/19 - Shadows/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"390244106","text":"import PyPDF2\nimport string\nimport math\nfrom collections import Counter\n\ndef extract_text(FILE_PATH):\n pdfFileObject = open(FILE_PATH, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObject)\n count = pdfReader.numPages\n for i in range(count):\n page = pdfReader.getPage(i)\n write_text(page.extractText())\n\ndef write_text(text):\n f = open(\"text.txt\", \"a+\")\n text = text.rstrip(\"\\n\")\n f.write(text)\n f.close()\n\ndef get_words(FILE_PATH):\n words = []\n with open(FILE_PATH,'r') as f:\n for line in f:\n for word in line.split():\n word.rstrip(\"\\n\")\n\n for x in word.lower(): \n if x in string.punctuation: \n word = word.replace(x, \"\") \n \n if word not in string.punctuation:\n words.append(word)\n words_counter = Counter(words)\n return words_counter\n\n\ndef counter_cosine_similarity(c1, c2):\n terms = set(c1).union(c2)\n dotprod = sum(c1.get(k, 0) * c2.get(k, 0) for k in terms)\n magA = math.sqrt(sum(c1.get(k, 0)**2 for k in terms))\n magB = math.sqrt(sum(c2.get(k, 0)**2 for k in terms))\n return dotprod / (magA * magB)\n\nif __name__ == \"__main__\":\n PDF_FILE_PATH = 'lect5/lect5.pdf'\n TEXT_FILE_PATH = 'text.txt'\n NOTES_FILE_PATH = 'lect5/lect5_otp.txt'\n\n extract_text(PDF_FILE_PATH)\n\n words_counter1 = get_words(TEXT_FILE_PATH)\n words_counter2 = get_words(NOTES_FILE_PATH)\n\n sim_score = counter_cosine_similarity(words_counter1, words_counter2)\n print(\"Similarity score : \", sim_score )\n ","sub_path":"testing/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"124530226","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom gerador_modelo import get_model\n\nmodel, X_train, X_test, y_train, y_test = get_model(['temp_max', 'chuva', 'fds'])\n\ny_preview_train = model.predict(X_train)\nerrors = y_train - y_preview_train\n\nax = sns.scatterplot(x=y_preview_train, y=y_train)\nax.set_title('Previsão x Real')\nax.set_xlabel('Consumo de Cerveja (litros) - Previsão')\nax.set_ylabel('Consumo de Cerveja (litros) - Real')\nplt.show()\n\nax = sns.scatterplot(x=y_preview_train, y=errors, s=150)\nax.set_title('Previsão Resíduos')\nax.set_xlabel('Consumo de Cerveja (litros) - Previsão')\nax.set_ylabel('Resíduos')\nplt.show()\n\n# Checando se a variância dos resíduos é constante\nax = sns.scatterplot(x=y_preview_train, y=errors**2, s=150)\nax.set_title('Previsão Resíduos²')\nax.set_xlabel('Consumo de Cerveja (litros) - Previsão')\nax.set_ylabel('Resíduos²')\nplt.show()\n\nax = sns.distplot(errors)\nax.set_title('Distribuição de Frequências dos Resíduos')\nax.set_xlabel('Litros')\nplt.show()\n","sub_path":"visualizando_graficos_modelo.py","file_name":"visualizando_graficos_modelo.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"443069888","text":"import random\nimport csv\nimport gym\nimport numpy as np\nimport gym_notif # Requires import even though IDE says it is unused\nfrom timeit import default_timer\nfrom gym_notif.envs.mobile_notification import MobileNotification\nfrom ml_metrics import MLMetrics, OverallMetrics\n\n\ndef get_q_state_index(possible_values: dict, notif: MobileNotification):\n # inputs\n # possible_values: dict values are a list of all possible values for their key category\n # (e.g. possible_states[\"time_of_day_states\"] = [\"morn\", \"afternoon\", \"evening\"]\n\n # Q-State-Index is calculated of the combination of the indices of the three features in their possible value list\n q_state_index = 0\n q_state_index += possible_values[\"package_states\"].index(notif.appPackage) * len(possible_values[\"category_states\"]) * \\\n len(possible_values[\"time_of_day_states\"]) # List of package states\n q_state_index += possible_values[\"category_states\"].index(notif.category) * len(possible_values[\"time_of_day_states\"]) # List of package states\n q_state_index += possible_values[\"time_of_day_states\"].index(notif.postedTimeOfDay) # List of package states\n return q_state_index\n\n\ndef split(a, n):\n k, m = divmod(len(a), n) # Returns quotient and remainder for len(a) / n\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)) # Returns a generator\n\n\nif __name__ == \"__main__\":\n start_time = default_timer()\n\n # Cross Validation k value\n K_VALUE = 10\n k_fold_average_reward = 0\n k_metrics = []\n training_metrics = []\n\n # Create Environment (ensure this is outside of the cross-validation loop, otherwise the dataset will be randomly\n # shuffled between k values\n env = gym.make('notif-v0')\n env.render()\n\n action_size = env.action_space.n\n print(\"MAIN: Action size \", action_size)\n\n state_size = env.observation_space.n\n print(\"MAIN: State size\", state_size)\n\n # Divide notification list into 10 equal parts\n k_parts_list = list(split(env.notification_list, K_VALUE))\n\n end_time = default_timer()\n\n print(\"Setup time: {}\".format(end_time - start_time))\n\n # For k in 10-fold cross validation\n for k_step in range(0, K_VALUE):\n env.training_data = []\n env.testing_data = []\n\n # Create training data for all groups except the testing data group\n for group in k_parts_list:\n if group != k_parts_list[k_step]:\n env.training_data += group\n\n # Set testing data group\n env.testing_data = k_parts_list[k_step]\n\n start_time = default_timer()\n\n # Create Q-Table\n qtable = np.zeros((state_size, action_size))\n\n # Create the hyper parameters\n total_training_episodes = 1000 # Was 50000\n total_test_episodes = 100\n max_training_steps = len(env.training_data) # Number of notifications per training episode\n max_testing_steps = len(env.testing_data) # Number of notifications per testing episode\n\n learning_rate = 0.7 # Was 0.7\n gamma = 0.618 # Discount rate\n\n # Exploration parameters\n epsilon = 1.0 # Exploration rate\n max_epsilon = 1.0 # Exploration probability at the start\n min_epsilon = 0.01 # Min exploration probability\n decay_rate = 0.005 # Exponential decay rate for exploration, was 0.01\n\n env.training = True\n\n # ----- The Q-Learning Algorithm -----\n print(\"Training...\")\n\n for episode in range(total_training_episodes):\n # Reset the environment\n state = env.reset()\n done = False\n total_reward = 0\n # Each step changes the state to another notification\n for step in range(max_training_steps):\n # Get random number for exploration/exploitation\n exp_exp_tradeoff = random.uniform(0, 1)\n\n # If this random number > epsilon --> exploitation (take largest Q value from the Q-table)\n if exp_exp_tradeoff > epsilon:\n action = np.argmax(qtable[get_q_state_index(env.info, state), :])\n\n # Else do a random choice --> exploration\n else:\n action = env.action_space.sample()\n\n # Take the action (a) and observe the outcome state (s') and reward (r)\n new_state, reward, done, info = env.step(bool(action))\n total_reward += reward\n\n # Update Q(s,a) using the Bellman equation\n qtable[get_q_state_index(env.info, state), action] = qtable[get_q_state_index(env.info, state), action] + \\\n learning_rate * (reward + gamma * np.max(qtable[get_q_state_index(env.info, new_state), :]) - qtable[get_q_state_index(env.info, state), action])\n\n # Update to the new state\n state = new_state\n\n # If done (i.e. passed through all states in the training set) then finish episode\n if done:\n print(\"TRAINING: k:{}, episode: {}/{}, total reward: {}, steps: {}, epsilon: {}\"\n .format(k_step, episode, total_training_episodes, total_reward, step, epsilon))\n if k_step == 0:\n training_metrics.append([episode, total_reward/step, epsilon])\n break\n\n episode += 1\n # Reduce epsilon (to reduce exploration over time)\n epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)\n\n end_time = default_timer()\n\n training_time = end_time - start_time\n print(\"Training time: {}\".format(training_time))\n # print(qtable)\n\n # ----- Using the Trained Q-Table -----\n start_time = default_timer()\n env.training = False\n env.reset()\n list_tot_rewards = []\n metric_list = OverallMetrics()\n\n for episode in range(total_test_episodes):\n state = env.reset()\n done = False\n total_reward = 0\n metr = MLMetrics()\n print(\"EPISODE \", episode)\n for step in range(max_testing_steps):\n # env.render\n # Take the action (index) that have the maximum expected future reward given that state\n action = np.argmax(qtable[get_q_state_index(env.info, state), :])\n new_state, reward, done, info = env.step(bool(action))\n # Actual action equals X-NOR(predicted, reward)\n metr.update(bool(action), not(bool(action) != bool(reward)))\n total_reward += reward\n if done:\n metric_list.update(metr)\n print(metr)\n break\n state = new_state\n\n end_time = default_timer()\n testing_time = end_time - start_time\n print(\"Testing time: {}\".format(testing_time))\n print(\"Average accuracy: {}\".format(metric_list.average_list(metric_list.accuracy_list)))\n k_metrics.append(metric_list.get_average_metrics(k_step) + [training_time, testing_time])\n\n csv_name = env.CSV_FILE.split('/')[1].split('.')[0] # Removes directory and file extension from the env's CSV name\n env.close()\n\n # ----- Write Average ML metrics for each k-step to csv -----\n file_1 = open(\"csv_output/\" + csv_name + \"_QTable.csv\", \"w\", newline='') # Newline override to prevent blank rows in Windows\n writer = csv.writer(file_1)\n writer.writerow([\"k_value\", \"Precision\", \"Accuracy\", \"Recall\", \"F1 Score\", \"Click_Through\", \"Train time\", \"Test time\"])\n for row in k_metrics:\n writer.writerow(row)\n file_1.close()\n\n # ----- Write reward and epsilon values across episodes to csv -----\n file_1 = open(\"csv_output/\" + csv_name + \"_k0traindata_QTable.csv\", \"w\", newline='')\n writer = csv.writer(file_1)\n writer.writerow([\"Episode\", \"Percentage Reward\", \"Epsilon\"])\n for row in training_metrics:\n writer.writerow(row)\n file_1.close()\n","sub_path":"q_table.py","file_name":"q_table.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"600796631","text":"### Michael DeCero\r\n### HomeWork 0502\r\n### 10/17/19\r\n### I have not given or received any unauthorized assistance on this assignment.\r\n### YouTube: https://youtu.be/I82PcJ7ByJM\r\n\r\nfrom diceSettings import *\r\nimport random\r\n\r\ndef HW0502():\r\n 'Function that facilitates the game of Cups & Dice'\r\n Name = GreetUser() #Greets User and gets his or her name\r\n UserBalance = 100 #Estabilishes a \"bank\" balance of $100\r\n UserAnswer = AskUserGame() #Asks user if he or she wants to play a game\r\n while UserAnswer.upper() == 'Y': #Will continue to play the game if user inputs 'Y'\r\n print ('\\n')\r\n print ('Your Balance is $' + str(UserBalance)) #Tells the user how much they have to bet\r\n Bet = GetUserBet() #Obtains and saves the amount the user wants to bet\r\n Goal = GenerateGoal() #Obtains and saves what the goal number is that the user is striving (rolling) for\r\n DiceTotal, Roll = DisplayRoll() #Rolls the amount of dice the user input\r\n BetResult = CalcWinLoss(Bet, Goal, DiceTotal) #Obtains and saves the bet result depending on the roll + the result of the cup roll\r\n UserBalance += BetResult\r\n PrtAnswer(Name, UserBalance, BetResult, Goal, DiceTotal, Roll) #Tells the user if he/she won, the results of the roll, and how much he/she won/lost\r\n UserAnswer = AskUserGame() #If user wants to continue, loop to play another game\r\n print ('\\n' + 'You do not want to play any more? Bummer. See ya later.')\r\n\r\ndef GreetUser():\r\n \"Function that greets the user and takes in the input of the user's first name\"\r\n Name = input(\"Hello. Welcome to the Cups & Dice Game! Please enter your first name in quotes (''): \")\r\n print ('\\n')\r\n return Name\r\n\r\ndef AskUserGame():\r\n 'Function that asks the user if he or she wants to play a game of Cups & Dice'\r\n UserAnswer = eval(input(\"Are you ready to play another game of Cups & Dice? ('Y' or 'N') \"))\r\n return UserAnswer\r\n\r\ndef GetUserBet():\r\n 'Function that asks the user how much of their balance they want to wager'\r\n Bet = eval(input('How much of your balance would you like to wager? '))\r\n while Bet <= 0: #Raise Error if a negative number is entered\r\n print ('\\n')\r\n print (ValueError('You must enter a positive integer for your bet.'))\r\n print ('\\n')\r\n Bet = eval(input('How much of your balance would you like to wager? '))\r\n return Bet\r\n\r\ndef GenerateGoal():\r\n 'Function that generates a random number between 1 and 100 - this number will be the goal the user is striving for'\r\n Goal = random.randrange(1,100)\r\n print ('\\n')\r\n print ('The number you are rolling for is ' + str(Goal))\r\n print ('\\n')\r\n return Goal\r\n\r\ndef GetDiceQuantity():\r\n 'Function that asks the user how many of each type of dice they want to roll in the cup'\r\n SixDice, TenDice, TwentyDice = map(int,input('What is the quantity of each type of die that you would like to roll? (Quantity of 6 sided die, Quantity of 10 sided die, Quantity of 20 sided die) ').split(','))\r\n return SixDice, TenDice, TwentyDice\r\n\r\ndef DisplayRoll():\r\n 'Function that rolls cup of dice'\r\n SixDice, TenDice, TwentyDice = GetDiceQuantity() #Pulls user input of quantities of dice from the GetDiceQuantity function above\r\n cup = Cup(SixDice, TenDice, TwentyDice) #Sets the quantities of six sided dice, ten sided dice, and twenty sided dice provided as input by user from the GetDiceQuantity function\r\n cup.roll() #Rolls dice\r\n DiceTotal = cup.getSum() #Sums the face values of the dice\r\n Roll = cup #Captures the face values of each die rolled\r\n return DiceTotal, Roll\r\n\r\ndef CalcWinLoss(Bet, Goal, DiceTotal):\r\n \"Function that calculates the user's total win or loss as well as updated balance depending on the bet and the results of the dice roll\"\r\n if DiceTotal == Goal: #Sum of dice = goal\r\n Bet *= 10 #User wins 10 times his or her bet\r\n elif (Goal - 3) <= DiceTotal < Goal: #Sum of dice is within 3 or less of goal\r\n Bet *= 5 #User wins 5 times his or her bet\r\n elif (Goal - 10) <= DiceTotal < Goal: #Sum of dice is within 10 or less of goal\r\n Bet *= 2 #User wins twice his or her bet\r\n else:\r\n Bet = 0 - Bet #User loses bet\r\n return Bet #Return the result of the wager +/- the result of the roll\r\n\r\ndef PrtAnswer(Name, UserBalance, BetResult, Goal, DiceTotal, Roll):\r\n 'Function that tells the user if he/she won, the results of the roll, and how much he/she won/lost'\r\n if BetResult < 0: #If BetResult < 0, user lost - print the total face value result and the face value of each die\r\n print ('\\n')\r\n print ('Sorry ' + Name + '. You lost your bet.')\r\n print ('Your updated balance is $' + str(UserBalance) + '\\n')\r\n else: #If BetResult > 0, user won! - print the total face value result and the face value of each die\r\n print ('\\n')\r\n print ('Congratulations ' + str(Name) + '! You won $' + str(BetResult))\r\n print ('Your updated balance is $' + str(UserBalance) + '\\n')\r\n print ('Below are the results of your roll:' + '\\n')\r\n print (str(Roll) + '\\n') \r\n print ('Goal = ' + str(Goal)) \r\n print ('Total = ' + str(DiceTotal))\r\n print ('\\n')\r\n \r\nHW0502()\r\n","sub_path":"HW0502_MAD.py","file_name":"HW0502_MAD.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"496158641","text":"##made 박소연\n#2 voice 생성 출력 통합 voicefunc\nimport datetime\nimport naver\nimport pygame\nimport time\n#import weather\nfrom Holiday import Holiday\nfrom TnHdev import I2Cth\nfrom weather import *\nweekDay = ['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일']\n#engine = pyttsx3.init()\n\nclass voiceNot:\n def __init__(self):\n self.hol = Holiday()#생성 오래 걸림, 미리 생성자 부르기\n pygame.init()\n print(\"hol\")\n self.weather_list = weather_info\n def voiceFunc(self,content , any = None):\n #content = WEATHER , TH 습도, DATE \n #default = ANY any:내용\n #음성 겹치지 않도록 함\n while True:\n if pygame.mixer.music.get_busy():\n time.sleep(0.5)\n continue\n break\n \n #텍스트 생성\n txt = \"\"\n \n if content == \"WEATHER\":\n #날씨 api 이용해서 구함.\n \n txt = \"오늘 날씨는 \" + self.weather_list[0] + \", 온도는 \" + self.weather_list[1] + \"도 입니다.\"\n elif content == \"TH\":\n tnh = I2Cth()\n txt = \"현재 실내온도는 \" + str(round(tnh.checkTemp(),1)) + \", 습도는 \" + str(round(tnh.checkHumi(),1)) + \" 입니다.\"\n elif content == \"DATE\":\n \n dt = datetime.datetime.now()\n wd = datetime.datetime.weekday(dt)\n txt = str(dt.year) + \"년 \" + str(dt.month) + \"월 \"+ str(dt.day)+\"일 \"+weekDay[wd]+\" 입니다.\"\n index = self.hol.isHoliday()\n if index > -1:\n #공휴일 api 이용\n txt += \"오늘은 \"+self.hol.getDayName(index)+\" 입니다.\"\n elif content == \"ANY\":\n txt = any\n else:\n raise AttributeError\n \n #네이버 api\n naver.naverVoiceApi(txt)\n \n #음성 로드 출력\n pygame.mixer.music.load(\"result.mp3\")\n pygame.mixer.music.play(1)\n \n ","sub_path":"voiceFunc.py","file_name":"voiceFunc.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"516583687","text":"import configparser\nfrom .parser import Parser\n\nclass INI_Setup_Parser(Parser):\n \n def get_dict_from_file(self):\n config = configparser.ConfigParser()\n config.read(self._file)\n return_struct = self._get_dict_from_config_parser(config)\n return return_struct\n \n def _get_dict_from_config_parser(self, config):\n return_struct = {}\n for conf in config.sections():\n return_struct[conf] = self._get_dict_from_section(conf, config[conf])\n return return_struct\n \n def _get_dict_from_section(self, section_key, section):\n return_struct = {}\n for key in section: \n value = section[key]\n try:\n _type = self._configuration[section_key]['fields'][key]['type']\n if _type and 'dict' in _type:\n value = INI_Setup_Parser._parse_filed_config_dict(value)\n elif _type and 'list-comma' in _type:\n value = INI_Setup_Parser._parse_filed_config_list(value, ',')\n elif _type and 'list-semi' in _type:\n value = INI_Setup_Parser._parse_filed_config_list(value, ';')\n return_struct[key] = value\n except:\n pass\n return return_struct\n \n @staticmethod\n def _parse_filed_config_dict(value):\n return_struct = {}\n value = value.replace(' ', '')\n value = value.replace('\\t', '')\n value = value.replace('\\s', '')\n value = value.replace('\\n', '', 1)\n value = value.replace('\\n', ',')\n tmp_pairs = value.split(',')\n for tmp_pair_str in tmp_pairs:\n tmp_pair = tmp_pair_str.split('=')\n if tmp_pair != [''] and tmp_pair != []:\n return_struct[tmp_pair[0]] = tmp_pair[1] \n return return_struct\n \n @staticmethod\n def _parse_filed_config_list(value, seperator):\n return_struct = []\n value = value.replace(' ', '')\n value = value.replace('\\t', '')\n value = value.replace('\\s', '')\n value = value.replace('\\n', '', 1)\n value = value.replace('\\n', seperator)\n tmp_value = value.split(seperator)\n for tmp_value in tmp_value:\n return_struct.append(tmp_value) \n return return_struct\n \n def write_dict_to_file(self, dict_to_write):\n config = configparser.ConfigParser()\n for section_key, section_value in dict_to_write.items():\n config[section_key] = {}\n for field_key, field_value in section_value.items():\n _type = self._configuration[section_key]['fields'][field_key]['type']\n value_string = ''\n if _type and 'dict' in _type:\n for sub_k, sub_v in field_value.items():\n value_string += f\"\\n{sub_k}={sub_v}\"\n elif _type and 'list-comma' in _type:\n for sub in field_value:\n value_string += f\"\\n{sub}\"\n elif _type and 'list-semi' in _type:\n for sub in field_value:\n value_string += f\"\\n{sub}\"\n else:\n value_string = str(field_value)\n config[section_key][field_key] = value_string\n with open(self._file, 'w') as configfile:\n config.write(configfile)\n","sub_path":"src/sg_testing/parser/ini_setup_parser.py","file_name":"ini_setup_parser.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"86226983","text":"#!usr/bin/env python\n\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\nLED1=21\nLED2=20\nLED3=16\nLED4=12\nchannels = [LED1, LED2, LED3, LED4]\n\nSW1=7\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(channels, GPIO.OUT)\nGPIO.setup(SW1, GPIO.IN)\n\nstatus = False\nwhile 1:\n if status:\n while 1:\n GPIO.output(channels, GPIO.HIGH)\n time.sleep(0.15)\n if GPIO.input(SW1) == True:\n status = not status\n break\n else:\n while 1:\n GPIO.output(channels, GPIO.LOW)\n time.sleep(0.15)\n if GPIO.input(SW1) == True:\n status = not status\n break\n\nGPIO.cleanup()\n","sub_path":"IoT/0j02017-2.py","file_name":"0j02017-2.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"351966014","text":"\n# coding: utf-8\n\nfrom mxnet import autograd\nfrom mxnet import ndarray as nd\nimport numpy as np\nfrom mxnet import gluon\nimport mxnet as mx\n\n# 选择默认的计算设备\ndef try_gpu():\n \"\"\"If GPU is available, return mx.gpu(0); else return mx.cpu()\"\"\"\n try:\n ctx = mx.gpu()\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx\n\ndef SGD(params, lr):\n for param in params:\n # param[:] 可以覆盖原内存更新值,不需要开辟新的存储空间\n param[:] = param - lr * param.grad\n\ndef accuracy(output, label):\n return nd.mean(output.argmax(axis= 1) == label).asscalar()\n\ndef _get_batch(batch, ctx):\n \"\"\"return data and label on ctx\"\"\"\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])\n\ndef evaluate_accuracy(data_iterator, net, ctx=[mx.cpu()]):\n if isinstance(ctx, mx.Context):\n ctx = [ctx]\n acc = nd.array([0])\n n = 0.\n if isinstance(data_iterator, mx.io.MXDataIter):\n data_iterator.reset()\n for batch in data_iterator:\n data, label, batch_size = _get_batch(batch, ctx)\n for X, y in zip(data, label):\n acc += nd.sum(net(X).argmax(axis=1)==y).copyto(mx.cpu())\n n += y.size\n acc.wait_to_read() # don't push too many operators into backend\n return acc.asscalar() / n\n\nclass DataLoader(object):\n \"\"\"similiar to gluon.data.DataLoader, but might be faster.\n The main difference this data loader tries to read more exmaples each\n time. But the limits are 1) all examples in dataset have the same shape, 2)\n data transfomer needs to process multiple examples at each time\n \"\"\"\n def __init__(self, dataset, batch_size, shuffle, transform=None):\n self.dataset = dataset\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.transform = transform\n\n def __iter__(self):\n data = self.dataset[:]\n X = data[0]\n y = nd.array(data[1])\n n = X.shape[0]\n if self.shuffle:\n idx = np.arange(n)\n np.random.shuffle(idx)\n X = nd.array(X.asnumpy()[idx])\n y = nd.array(y.asnumpy()[idx])\n\n for i in range(n//self.batch_size):\n if self.transform is not None:\n yield self.transform(X[i*self.batch_size:(i+1)*self.batch_size], \n y[i*self.batch_size:(i+1)*self.batch_size])\n else:\n yield (X[i*self.batch_size:(i+1)*self.batch_size],\n y[i*self.batch_size:(i+1)*self.batch_size])\n\n def __len__(self):\n return len(self.dataset)//self.batch_size\n\ndef load_data_fashion_mnist(batch_size, resize= None, root = \"~/.mxnet/datasets/fashion-mnist\"):\n \"\"\"download the fashion mnist dataset then load it into memory \"\"\"\n def transform_mnist(data, label):\n # Transform a batch of examples.\n if resize:\n n = data.shape[0]\n new_data = nd.zeros((n, resize, resize, data.shape[3]))\n for i in range(n):\n new_data[i] = image.imresize(data[i], resize, resize)\n data = new_data\n # change data from batch x height x width x channel to batch x channel x height x width\n return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')\n\n mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)\n mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)\n # Transform later to avoid memory explosion. \n train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)\n test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)\n return (train_data, test_data)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"598876775","text":"import numpy as np\nimport cv2\n\nimage = cv2.imread(\"../assets/img.png\")\nimage = cv2.resize(image, (256, 256))\n\ncolors = {'red': (0, 0, 255), 'blue': (255, 0, 0), 'yellow': (0, 255, 255)}\ncv2.line(image, (0, 0), (300, 300), colors['red'], 3)\ncv2.rectangle(image, (0, 0), (100, 100), colors['blue'], 3)\nret, p1, p2 = cv2.clipLine((0, 0, 100, 100), (0, 0), (300, 300))\nif ret:\n cv2.line(image, p1, p2, colors['yellow'], 3)\ncv2.imshow(\"ClipLine\", image)\ncv2.waitKey(0)\ncv2.destroyAl1lWindows()\n","sub_path":"drawing-shapes/drawing-cliplines.py","file_name":"drawing-cliplines.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"405910101","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 7 13:32:00 2020\n\n@author: admin\n\"\"\"\n\nimport glob \nimport re \nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle \nimport cv2 \nfrom mylab.miniscope.Mplot import *\n#%%\nanimal_id = \"191126\"\ntsFileList = glob.glob(os.path.join(r'W:\\qiushou\\miniscope\\2019*',animal_id,\"H*/timestamp.dat\")) \ndef sort_key(s): \n if s: \n try: \n date = re.findall('\\d{8}', s)[0]\n except: \n date = -1 \n try: \n H = re.findall('H(\\d+)',s)[0]\n except: \n H = -1 \n try: \n M = re.findall('M(\\d+)',s)[0]\n except: \n M = -1 \n try: \n S = re.findall('S(\\d+)',s)[0]\n except: \n S = -1 \n try: \n ms = re.findall('msCam(\\d+)',s)[0]\n except: \n ms = -1 \n return [int(date),int(H),int(M),int(S),int(ms)]\n \ntsFileList.sort(key=sort_key)\nmsCamFileList.sort(key=sort_key)\nts_lens = []\nframenums = []\nfor tsFile in tsFileList:\n print(tsFile)\n print(\">\",end=\"\")\n ts = pd.read_csv(tsFile,sep = \"\\t\", header = 0)\n ts_len=ts.shape[0] \n videoFileList=glob.glob(os.path.dirname(tsFile)+'\\msCam*.avi') \n framenum=[]\n for video in videoFileList:\n# print(video)\n print(\"<\",end=\"\")\n cap = cv2.VideoCapture(video)\n framenum.append(int(cap.get(7)))\n cap.release()\n print([ts_len,sum(framenum)])\n ts_lens.append(ts_len)\n framenums.append(sum(framenum))\nprint(sum(ts_lens),sum(framenums))\n#%%\n\n","sub_path":"miniscope/miniscope_02_videots_vs_videoframes.py","file_name":"miniscope_02_videots_vs_videoframes.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"402785962","text":"#!/usr/bin/env python\n'''\n@name: plot_omega_comp3.py\n@author: Matt\n@date: 25 March, 2014\n@version: 1.0\n@description: A python script for plotting the effective collision strengths for \n transitions from different calculations on the same graph.\n\nInput: plot_upsilon_comp.py [inputfile1] [inputfile2] [\"title\"] { -s [outfile]}\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport math\n\n\ndef main(argv):\n '''Main function for module plot_transition'''\n if len(argv) < 3:\n raise Exception(\"Insufficient number of arguments\")\n print(argv, len(argv))\n infile1 = argv[0]\n infile2 = argv[1]\n infile3 = argv[2]\n title = argv[3]\n save = False\n if len(argv) == 6: \n if argv[4] == '-s':\n save = True\n outfile = argv[5]\n\n # Load the input files and plot the appropriate columns\n data1 = np.loadtxt(infile1)\n data2 = np.loadtxt(infile2)\n data3 = np.loadtxt(infile3)\n x1 = data1[0:-2, 1]\n y1 = data1[0:-2, 3]\n # For comparing upsilons from a DW calculation, use the conversion of the\n # temperature to energies in Ryd below\n x2 = 6.336273823529412e-06 * data2[0:-2, 1]\n x3 = 6.336273823529412e-06 * data3[0:-2, 0]\n # Otherwise, just keep things normal as below\n #x2 = data2[0:-2, 1]\n y2 = data2[0:-2, 3]\n y3 = data3[0:-2, 1]\n\n # Create the desired uncertainty region\n # err = 0.2 * y2\n fig, ax = plt.subplots(1)\n ax.plot(x1, y1, 'r-') \n ax.plot(x2, y2, 'b--')\n ax.plot(x3, y3, 'g.-')\n # ax.fill_between(x2, y2 - err, y2 + err, alpha=0.5, edgecolor='red', \n # facecolor='red')\n ax.set_ylabel(\"$\\Omega$, Collision Strength\")\n ax.set_xlabel(\"Scattered Energy, $E$ (Ryd)\")\n ax.set_yscale(\"log\")\n ax.set_ylim((1e-4,2))\n ax.set_xlim((0,450))\n ax.set_title(title)\n #ax.legend(('DARC (serial, MXE=6601)', 'DARC (parallel, MXE=54401)'), loc='upper right', prop={'size':10})\n #ax.legend(('DARC', 'BP ICFT'), loc='upper right', prop={'size':10})\n ax.legend(('BP ICFT $\\Omega$', 'BP ICFT $\\\\Upsilon$','AUTOS DW $\\\\Upsilon$'), loc='upper right', prop={'size':10})\n if save:\n fig.set_size_inches(11.89,8.27)\n plt.savefig(outfile)\n else:\n plt.show()\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"DARC/plotting/plot_omega_comp3.py","file_name":"plot_omega_comp3.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"99800838","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n def isValidTwoDigit(s):\n # Must be in 10 - 26.\n if s[0] not in '12':\n return False\n if s[0] == '2' and s[1] not in '0123456':\n return False\n return True\n \n def isValidOneDigit(s):\n # Must be in 1 - 9.\n return s != '0'\n \n if not s:\n return 0\n \n # Num of ways to decode s[:i].\n dp = [0 for _ in range(len(s)+1)]\n dp[0] = 1\n if isValidOneDigit(s[0]):\n dp[1] = 1\n \n for i in range(2, len(dp)):\n if isValidOneDigit(s[i-1]):\n dp[i] += dp[i-1]\n if isValidTwoDigit(s[i-2:i]):\n dp[i] += dp[i-2]\n\n return dp[-1]\n\n","sub_path":"python2/l0091_decode_ways.py","file_name":"l0091_decode_ways.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"592564386","text":"'''\nIl tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3x3 caselle.\nA turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo \n(un giocatore ha come simbolo una \"o\" e l'avversario una 'x'). \nVince il giocatore che riesce a disporre tre dei propri simboli in linea retta \norizzontale, verticale o diagonale. Se la griglia viene riempita \nsenza che nessuno dei giocatori sia riuscito a completare una linea \nretta di tre simboli, il gioco finisce in parita'. Nel caso in cui il gioco \nfinisse in parita', la partita e' detta \"patta\". \nPer convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'\n\nUna configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.\n\nNel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.\nLa dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda \nche la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera, \ncontenga il simbolo 'x' o contenga il simbolo 'o'. \n\nData una configurazione C del gioco, l'albero di gioco per C e' l'albero che \nsi ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni \nche e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno \nfoglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e' \npossibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.\nSe veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire \ndalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\n \n\nSi consideri la seguente Classe di oggetti:\n\n\nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] \n\n\nBisogna progettare le seguente funzione \n\ngen_tree(griglia)\nche, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire \ndalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere \noggetti della classe NodoTris.\n\nPer testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi \ndella classe NodoTris che dovete comunque implementare: \n\n1)\ntipo(self)\nche, dato un nodo NodoTris, restituisce:\n 'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'\n 'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'\n '-' se la configurazione rappresentata dal nodo e' una configurazione di patta\n '?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato\n\n2)\nesiti(self)\nche, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili \nesiti della partita che ha come configurazione iniziale quella rappresentata dal nodo. \nPiu' precisamente: il primo elemento della tripla e' il numero di patte possibili, \nil secondo e' il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento \ne' il numero di possibili vittorie per il giocatore 'x'.\n\n3)\nvittorie_livello(self, giocatore, h)\nche, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,\nrestituisce il numero di nodi che rappresentano una vittoria per il giocatore e si \ntrovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili \nper giocatore in esattamente h mosse, nella partita che ha come configurazione iniziale \nquella rappresentata dalla radice dell'albero.\n\n4)\nstrategia_vincente(self,giocatore)\nche, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False. \nRestituisce True se giocatore ha una strategia vincente nella partita \nche ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.\n\nNota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se, \nqualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo \nche la partita termini con la sua vittoria.\n\nPotete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris \nse li ritenete utili al fine della risoluzione del compito.\n\nPotete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni \nlecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).\n\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n\nATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).\n'''\n\nimport copy \nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] #lista dei nodi figli\n\n \n def tipo(self):\n l=[[self.nome[0][0],self.nome[1][1],self.nome[2][2]],[self.nome[2][0],self.nome[1][1],self.nome[0][2]]]\n for i in range(3):\n l.append([self.nome[0][i],self.nome[1][i],self.nome[2][i]])\n l+=self.nome\n \n c=pieni(self.nome)[0]\n \n for el in l:\n \n if el[0]==el[1]==el[2] and el[0]!='':\n \n return str(el[0])\n if c==9:\n return'-'\n else:\n return '?'\n \n def esiti(self):\n return tuple(self.es_ric([0,0,0])) \n \n def vittorie_livello(self, giocatore, h):\n return self.lev(0,h,giocatore,0)\n \n def es_ric(self,l):\n if self.tipo()=='?': \n for el in self.lista_figli: \n el.es_ric(l)\n if self.tipo()=='-':\n l[0]+=1\n \n if self.tipo()=='o':\n l[1]+=1\n \n if self.tipo()=='x':\n l[2]+=1\n \n return l \n\n def ramo(self,l,turno):\n if self.tipo()=='x' or self.tipo()=='o' or self.tipo()=='-':\n return \n for i in l:\n griglia=copy.deepcopy(self.nome)\n l_nuova=copy.deepcopy(l)\n y=i[0]\n x=i[1]\n if turno:\n griglia[y][x]='o'\n boo=False\n else:\n griglia[y][x]='x'\n boo=True\n l_nuova.remove((y,x))\n nodo=NodoTris(griglia)\n self.lista_figli.append(nodo)\n nodo.ramo(l_nuova,boo) \n \n\n \n\n \n def lev(self,cont,alt,giocatore,l): \n if self.tipo()==giocatore and l==alt:\n cont+=1 \n if alt>l:\n l+=1\n for i in self.lista_figli:\n cont=i.lev(cont,alt,giocatore,l)\n \n return cont\n \n \n \n \n \n def chivince(self,M,giocatore): \n if self.tipo()==giocatore:\n return True\n if M>0:\n self.strategia_vincente(giocatore) \n \n def strategia_vincente(self,giocatore):\n if self.tipo()==giocatore:\n return True\n M=len(self.lista_figli)\n for indx in range(1,M):\n if self.lista_figli[indx].chivince(M,giocatore)==True and self.tipo()!='-':\n return True\n return False \n \n \n\ndef turno(griglia):\n c=9-pieni(griglia)[0]\n if c%2==0:\n return 'o' \n else: \n return 'x'\n \ndef pieni(griglia):\n c=0\n vuoti=[]\n for y in range(3):\n for x in range(3):\n if griglia[y][x]!='':\n c+=1\n else: vuoti+=[(y,x)]\n return c,vuoti\n\ndef gen_tree(griglia):\n l=pieni(griglia)[1] \n t=turno(griglia)\n albero=NodoTris(griglia)\n albero.ramo(l,t)\n return albero\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\ng1=[['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\ng2=[['x', 'o', 'o'], ['x', 'x', 'o'], ['o', 'x', 'o']]\ng3=[['x', 'o', 'o'], ['x', 'x', 'o'], ['o', '', 'x']]\ng4=[['o', 'x', 'x'], ['x', 'o', 'o'], ['o', 'o', 'x']]\n\nlista=[g1, g2, g3, g4]\nlista1=[gen_tree(x) for x in lista] \nprint([x.esiti() for x in lista1])\n\n\n'''g5=[['', 'x', ''], ['', 'o', ''], ['', '', '']]\ng6=[['', 'o', ''], ['', 'x', ''], ['', '', '']]\ng7=[['', 'x', 'o'], ['', '', ''], ['', '', '']]\ng8=[['', 'o', 'x'], ['', '', ''], ['', '', '']]\nlistab=[g5, g6, g7, g8]\nlista1=[gen_tree(x) for x in listab]\nprint([y.strategia_vincente('o') for y in lista1])''' \n","sub_path":"students/1815194/homework04/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"71913064","text":"from functools import partial\nfrom typing import Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom model.layer import AppendClassToken, PositionalEncoding, AppendMaskTokens, Normalize, ReorderToBlockWiseMask, ShufflePatches, Lambda, \\\n Unfold\nfrom utilities.train import IPretrainer\n\n\nclass MAE(nn.Module, IPretrainer):\n def __init__(self, *,\n encoder: nn.Module,\n decoder: nn.Module,\n masking_ratio: float,\n kernel_size: int,\n normalize: bool,\n masking_strategy: str = 'random-sampling',\n which_unfolder: str = 'unfold'):\n super(MAE, self).__init__()\n encoder_dim = encoder.d_model\n decoder_dim = decoder.d_model\n\n self.trained = False\n\n self.masking_ratio = masking_ratio\n self.normalize = normalize\n\n self.encoder = encoder\n self.decoder = decoder\n\n self.kernel_size = kernel_size\n self.unfold = Unfold(patch_size=kernel_size, which_impl=which_unfolder)\n self.fold = Lambda(partial(F.fold, kernel_size=kernel_size, stride=kernel_size))\n self.patch_projection = nn.Linear(3 * kernel_size ** 2, encoder_dim)\n self.append_cls_token = AppendClassToken(d_model=encoder_dim)\n self.encoder_pos_enc = PositionalEncoding(d_model=encoder_dim, max_len=1000)\n self.decoder_pos_enc = PositionalEncoding(d_model=decoder_dim, max_len=1000)\n self.append_mask_tokens = AppendMaskTokens(decoder_dim)\n self.latent_projection = nn.Linear(encoder_dim, decoder_dim)\n self.output_projection = nn.Linear(decoder_dim, 3 * kernel_size ** 2)\n self.patch_wise_normalize = Normalize(dim=-1)\n\n if masking_strategy == 'random-sampling':\n self.reorder_tokens = ShufflePatches()\n elif masking_strategy == 'block-wise':\n self.reorder_tokens = ReorderToBlockWiseMask(masking_ratio)\n else:\n assert False, f\"Unknown masking strategy setting: \\\"{masking_strategy}\\\"\"\n\n @property\n def d_model(self) -> int:\n return self.encoder.d_model\n\n def forward(self, x: torch.Tensor, blocks=None):\n tokens, original_patches = self.tokenize(x)\n\n # Randomly shuffle the tokens and remove some of them\n tokens, indices, mask_indices = self.generate_mask(tokens, blocks)\n\n z = self.encode(tokens)\n\n reconstruction, reconstructed_patches = self.decode(z, indices, n_drop=mask_indices.shape[1],\n img_shape=x.shape[2:])\n\n return reconstruction, reconstructed_patches, original_patches, mask_indices\n\n def tokenize(self, x):\n patches = self.unfold(x).swapaxes(1, 2) # BCHW -> BN(3*k*k)\n original_patches = patches.detach().clone() # Save copy of original patches (to be used in loss calculations)\n if self.normalize:\n original_patches = self.patch_wise_normalize(original_patches)\n tokens = self.patch_projection(patches) # BN(3*k*k) -> BD(3*k*k)\n tokens = self.encoder_pos_enc(tokens)\n return tokens, original_patches\n\n def generate_mask(self, tokens, blocks):\n tokens, indices = self.reorder_tokens(tokens, blocks)\n\n # Remove the last n_drop tokens, where n_drop depends on the masking ratio.\n n_drop = int(tokens.shape[1] * self.masking_ratio)\n tokens = tokens[:, :-n_drop]\n mask_indices = indices[:, -n_drop:]\n\n return tokens, indices, mask_indices\n\n def encode(self, tokens):\n # Append class token to encoder input sequence, as per He (2022)\n tokens = self.append_cls_token(tokens)\n\n z = self.encoder(tokens)\n return z\n\n def decode(self, z, indices, n_drop, img_shape: Tuple[int, int]):\n # Drop class token\n z_without_class_token = z[:, :-1, :]\n\n tokens = self.latent_projection(z_without_class_token)\n tokens = self.append_mask_tokens(tokens, n_dropped=n_drop)\n # Un-shuffle\n tokens.scatter_(dim=1, index=indices[:, :, :tokens.shape[2]], src=tokens.clone())\n tokens = self.decoder_pos_enc(tokens)\n tokens = self.decoder(tokens)\n reconstructed_patches = self.output_projection(tokens)\n\n # Fold reconstructed patches into original image dimensions\n reconstruction = self.fold(reconstructed_patches.swapaxes(1, 2), img_shape)\n return reconstruction, reconstructed_patches\n\n def freeze_encoder(self, freeze_n_layers: int or str or None = None):\n if freeze_n_layers is None:\n return\n\n # - freeze projection\n for _, p in self.patch_projection.named_parameters():\n p.requires_grad = False\n self.patch_projection.eval()\n\n # - freeze encoder layers\n self.encoder.freeze_layers(freeze_n_layers)\n\n\nclass ClassifierEncoder(nn.Module):\n def __init__(self, model, freeze_n_layers='all'):\n super(ClassifierEncoder, self).__init__()\n # TODO the model should be cloned if we reuse the same MAE model for different fine-tunings, like if we ablate\n # the number of fine-tuned layers.\n self.model = model\n self.model.freeze_encoder(freeze_n_layers)\n\n def forward(self, x: torch.Tensor):\n tokens, _ = self.model.tokenize(x)\n\n z = self.model.encode(tokens)\n\n class_token_result = z[:, -1, :]\n return class_token_result\n\n @property\n def d_model(self):\n return self.model.d_model\n\n\nclass FPNBottomUpEncoder(nn.Module):\n def __init__(self, model):\n super(FPNBottomUpEncoder, self).__init__()\n self.model = model\n\n @property\n def backbone(self):\n return self.model.encoder.enc_layers\n\n @property\n def tokenize(self):\n return self.model.tokenize\n\n @property\n def kernel_size(self):\n return self.model.kernel_size\n\n @property\n def d_model(self):\n return self.model.d_model\n\n\nif __name__ == '__main__':\n from utilities.config import ConfigReader\n\n mae_ = ConfigReader.load_all(model_key='mae', model_config='debug')[0]\n x_ = torch.rand(1, 3, 224, 224)\n x_hat_, x_hat_patches_, x_patches_, mask_ = mae_(x_)\n print(x_hat_.shape, x_hat_patches_.shape)\n","sub_path":"src/model/mae.py","file_name":"mae.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"123452378","text":"# Copyright 2021 The CLU Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple checkpointing library for TF2/Flax.\n\nThe class `Checkpoint` is a simple wrapper around `tf.train.Checkpoint` that\nalso stores a `flax.struct.dataclass` instance in the same directory.\n\nSynopsis:\n\n from clu import checkpoint\n import flax\n\n @flax.struct.dataclass\n class TrainState:\n optimizer: flax.optim.Optimizer\n step: int\n\n ds = load_tf_dataset()\n ds_iter = iter(ds)\n ckpt = checkpoint.MultihostCheckpoint(base_directory, dict(ds_iter=ds_iter))\n optimizer = create_flax_optimizer()\n state = TrainState(optimizer=optimizer, step=0)\n state = ckpt.restore_or_initialize(state) # Also restores `ds_iter`.\n initial_step = int(state.step) + 1\n # Need to replicate all data when training with multiple accelerators.\n state = flax.jax_utils.replicate(state)\n\n for step in range(initial_step, steps + 1):\n state = update_step(state, next(ds_iter))\n ckpt.save(flax.jax_utils.unreplicate(state))\n\nLoading the model e.g. in a Colab:\n\n from clu import checkpoint\n import flax\n from . import mnist_lib\n\n state_dict = checkpoint.load_state_dict(base_directory)\n params = state_dict['optimizer']['target']['params']\n module = mnist_lib.MyArchitecture.partial(num_classes=10)\n model = flax.nn.Model(module, params)\n\"\"\"\n\nimport collections\nimport re\nfrom typing import Any, Dict, Optional, TypeVar\n\nfrom absl import logging\n\nfrom clu.internal import utils\nimport flax\nimport jax\nimport tensorflow as tf\n\n# TODO(b/200953513): Migrate away from logging imports (on module level)\n# to logging the actual usage. See b/200953513.\n\n\n\nT = TypeVar(\"T\")\n\n\ndef load_state_dict(base_directory) -> Dict[str, Any]:\n \"\"\"Restores `state` as dictionary from the latest checkpoint.\n\n Synopsis:\n\n data = checkpoint.load_state_dict(base_directory)\n params = data['optimizer']['target']['params']\n module = mnist_lib.MyArchitecture.partial(num_classes=10)\n model = flax.nn.Model(module, params)\n\n Args:\n base_directory: Directory from which the checkpoints should be restored. See\n `Checkpoint.__init__()`.\n\n Returns:\n The deserialized Flax data, as a dictionary.\n\n Raises:\n FileNotFoundError: If there is no checkpoint to restore.\n \"\"\"\n ckpt = Checkpoint(base_directory)\n if not ckpt.latest_checkpoint:\n raise FileNotFoundError(f\"No checkpoint found in {base_directory}\")\n with utils.log_activity(\"load_state_dict\"):\n with tf.io.gfile.GFile(ckpt.latest_checkpoint_flax, \"rb\") as f:\n return flax.serialization.msgpack_restore(f.read())\n\n\nclass CheckpointInfo(\n collections.namedtuple(\"CheckpointInfo\", (\"prefix\", \"number\"))):\n \"\"\"Helper class to parse a TensorFlow checkpoint path.\"\"\"\n\n CHECKPOINT_REGEX = r\"^(?P.*)-(?P\\d+)\"\n\n @classmethod\n def initialize(cls, base_directory, checkpoint_name: str) -> \"CheckpointInfo\":\n \"\"\"Creates a first CheckpointInfo (number=1).\"\"\"\n return cls(f\"{base_directory}/{checkpoint_name}\", 1)\n\n @classmethod\n def from_path(cls, checkpoint: str) -> \"CheckpointInfo\":\n \"\"\"Parses a checkpoint.\n\n Args:\n checkpoint: A checkpoint prefix, as can be found in the\n `.latest_checkpoint` property of a `tf.train.CheckpointManager`.\n\n Returns:\n An instance of `CheckpointInfo` that represents `checkpoint`.\n \"\"\"\n m = re.match(cls.CHECKPOINT_REGEX, checkpoint)\n if m is None:\n RuntimeError(f\"Invalid checkpoint format: {checkpoint}\")\n d = m.groupdict() # pytype: disable=attribute-error\n return cls(d[\"prefix\"], int(d[\"number\"]))\n\n def increment(self) -> \"CheckpointInfo\":\n \"\"\"Returns a new CheckpointInfo with `number` increased by one.\"\"\"\n return CheckpointInfo(self.prefix, self.number + 1)\n\n def __str__(self):\n \"\"\"Does the opposite of `.from_path()`.\"\"\"\n return f\"{self.prefix}-{self.number}\"\n\n\nclass Checkpoint:\n \"\"\"A utility class for storing and loading TF2/Flax checkpoints.\n\n Both the state of a `tf.data.Dataset` iterator and a `flax.struct.dataclass`\n are stored on disk in the following files:\n\n - {directory}/checkpoint\n - {directory}/ckpt-{number}.index\n - {directory}/ckpt-{number}.data@*\n - {directory}/ckpt-{number}.flax\n\n Where {number} starts at 1 is then incremented by 1 for every new checkpoint.\n The last file is the `flax.struct.dataclass`, serialized in Messagepack\n format. The other files are explained in more detail in the Tensorflow\n documentation:\n\n https://www.tensorflow.org/api_docs/python/tf/train/Checkpoint\n \"\"\"\n\n def __init__(self,\n base_directory: str,\n tf_state: Optional[Dict[str, Any]] = None,\n *,\n max_to_keep: int = 5,\n checkpoint_name: str = \"ckpt\"):\n \"\"\"Initializes a Checkpoint with a dictionary of TensorFlow Trackables.\n\n Args:\n base_directory: Directory under which the checkpoints will be stored. Use\n a different base_directory in every task.\n tf_state: A dictionary of TensorFlow `Trackable` to be serialized, for\n example a dataset iterator.\n max_to_keep: Number of checkpoints to keep in the directory. If there are\n more checkpoints than specified by this number, then the oldest\n checkpoints are removed.\n checkpoint_name: Prefix of the checkpoint files (before `-{number}`).\n \"\"\"\n if tf_state is None:\n tf_state = dict()\n self.base_directory = base_directory\n self.max_to_keep = max_to_keep\n self.checkpoint_name = checkpoint_name\n self.tf_checkpoint = tf.train.Checkpoint(**tf_state)\n self.tf_checkpoint_manager = tf.train.CheckpointManager(\n self.tf_checkpoint,\n base_directory,\n max_to_keep=max_to_keep,\n checkpoint_name=checkpoint_name)\n\n def get_latest_checkpoint_to_restore_from(self):\n \"\"\"Returns the latest checkpoint to restore from.\n\n In the current implementation, this method simply returns the attribute\n `latest_checkpoint`.\n\n Subclasses can override this method to provide an alternative checkpoint to\n restore from, for example for synchronization across multiple checkpoint\n directories.\n \"\"\"\n return self.latest_checkpoint\n\n @property\n def latest_checkpoint(self) -> Optional[str]:\n \"\"\"Latest checkpoint, see `tf.train.CheckpointManager.latest_checkpoint`.\n\n Returns:\n A string to the latest checkpoint. Note that this string is path-like but\n it does not really describe a file, but rather a set of files that are\n constructed from this string, by appending different file extensions. The\n returned value is `None` if there is no previously stored checkpoint in\n `base_directory` specified to `__init__()`.\n \"\"\"\n return self.tf_checkpoint_manager.latest_checkpoint\n\n @property\n def latest_checkpoint_flax(self) -> Optional[str]:\n \"\"\"Path of the latest serialized `state`.\n\n Returns:\n Path of the file containing the serialized Flax state. The returned value\n is `None` if there is no previously stored checkpoint in `base_directory`\n specified to `__init__()`.\n \"\"\"\n if self.latest_checkpoint is None:\n return None\n return self._flax_path(self.latest_checkpoint)\n\n def _flax_path(self, checkpoint: str) -> str:\n return \"{}.flax\".format(checkpoint)\n\n def _next_checkpoint(self, checkpoint: Optional[str]) -> str:\n if checkpoint is None:\n return str(\n CheckpointInfo.initialize(self.base_directory, self.checkpoint_name))\n return str(CheckpointInfo.from_path(checkpoint).increment())\n\n def _checkpoint_number(self, checkpoint: Optional[str]) -> Optional[int]:\n if checkpoint is not None:\n return CheckpointInfo.from_path(checkpoint).number\n\n @utils.logged_with(\"Checkpoint.save()\")\n def save(self, state) -> str:\n \"\"\"Saves a new checkpoints in the directory.\n\n Args:\n state: Flax checkpoint to be stored.\n\n Returns:\n The checkpoint identifier ({base_directory}/ckpt-{number}).\n\n Raises:\n RuntimeError: If tf_checkpoint.save_counter does not match\n tf_checkpoint_manager.latest_checkpoint.\n \"\"\"\n latest_checkpoint_num = self._checkpoint_number(self.latest_checkpoint) or 0\n if latest_checkpoint_num != self.tf_checkpoint.save_counter.numpy():\n raise RuntimeError(\n f\"Expected save_counter={self.tf_checkpoint.save_counter.numpy()} \"\n f\"to match latest_checkpoint={self.latest_checkpoint}. Make sure \"\n f\"the checkpoint is initialized via `.restore_or_initialize()` \"\n f\"before it's stored and that no other process writes to the same \"\n f\"checkpoint directory.\")\n next_checkpoint = self._next_checkpoint(self.latest_checkpoint)\n flax_path = self._flax_path(next_checkpoint)\n if not tf.io.gfile.exists(self.base_directory):\n tf.io.gfile.makedirs(self.base_directory)\n with tf.io.gfile.GFile(flax_path, \"wb\") as f:\n f.write(flax.serialization.to_bytes(state))\n checkpoints = set(self.tf_checkpoint_manager.checkpoints)\n # Write Tensorflow data last. This way Tensorflow checkpoint generation\n # logic will make sure to only commit checkpoints if they complete\n # successfully. A previously written `flax_path` would then simply be\n # overwritten next time.\n self.tf_checkpoint_manager.save()\n for checkpoint in checkpoints.difference(\n self.tf_checkpoint_manager.checkpoints):\n tf.io.gfile.remove(self._flax_path(checkpoint))\n if next_checkpoint != self.latest_checkpoint:\n raise AssertionError( # pylint: disable=g-doc-exception\n \"Expected next_checkpoint to match latest_checkpoint: \"\n f\"{next_checkpoint} != {self.latest_checkpoint}\")\n return self.latest_checkpoint\n\n @utils.logged_with(\"Checkpoint.restore_or_initialize()\")\n def restore_or_initialize(self,\n state: T,\n checkpoint: Optional[str] = None) -> T:\n \"\"\"Restores from the latest checkpoint, or creates a first checkpoint.\n\n Args:\n state : A flax checkpoint to be stored or to serve as a template. If the\n checkoint is restored (and not initialized), then the fields of `state`\n must match the data previously stored.\n checkpoint: A flax checkpoint to be restored. If not specified, the\n latest checkpoint is restored.\n\n Returns:\n The restored `state` object. Note that all TensorFlow `Trackable`s in\n `tf_state` (see `__init__()`) are also updated.\n \"\"\"\n if checkpoint:\n checkpoint_to_restore = checkpoint\n else:\n logging.info(\"No checkpoint specified. Restore the latest checkpoint.\")\n checkpoint_to_restore = self.get_latest_checkpoint_to_restore_from()\n if not checkpoint_to_restore:\n logging.info(\"Checkpoint %s does not exist.\", checkpoint_to_restore)\n self.save(state)\n return state\n logging.info(\"Restoring checkpoint: %s\", checkpoint_to_restore)\n self.tf_checkpoint.restore(checkpoint_to_restore)\n flax_path = self._flax_path(checkpoint_to_restore)\n with tf.io.gfile.GFile(flax_path, \"rb\") as f:\n state = flax.serialization.from_bytes(state, f.read())\n logging.info(\"Restored save_counter=%d restored_checkpoint=%s\",\n self.tf_checkpoint.save_counter.numpy(),\n checkpoint_to_restore)\n return state\n\n def restore(self, state: T, checkpoint: Optional[str] = None) -> T:\n \"\"\"Restores from the latest checkpoint.\n\n Similar to `restore_or_initialize()`, but raises a `FileNotFoundError` if\n there is no checkpoint.\n\n Args:\n state : A flax checkpoint to be stored or to serve as a template. If the\n checkoint is restored (and not initialized), then the fields of `state`\n must match the data previously stored.\n checkpoint: A flax checkpoint path to be restored. If not specified, the\n latest checkpoint is restored.\n\n Returns:\n The restored `state` object. Note that all TensorFlow `Trackable`s in\n `tf_state` (see `__init__()`) are also updated.\n\n Raises:\n FileNotFoundError: If specified checkpoint does not exist, or if there\n is no checkpoint to restore in case no checkpoint was specified.\n \"\"\"\n checkpoint = self._check_or_get_latest_checkpoint(checkpoint)\n return self.restore_or_initialize(state, checkpoint)\n\n def restore_dict(self, checkpoint: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"Restores from the checkpoint as state dict.\n\n Args:\n checkpoint: A flax checkpoint path to be restored. If not specified, the\n latest checkpoint is restored.\n\n Returns:\n The restored state dict.\n Note that all TensorFlow `Trackable`s in `tf_state` (see `__init__()`) are\n also updated.\n\n Raises:\n FileNotFoundError: If specified checkpoint does not exist, or if there\n is no checkpoint to restore in case no checkpoint was specified.\n \"\"\"\n checkpoint = self._check_or_get_latest_checkpoint(checkpoint)\n logging.info(\"Restoring checkpoint as state dict from %s\", checkpoint)\n self.tf_checkpoint.restore(checkpoint)\n flax_path = self._flax_path(checkpoint)\n with tf.io.gfile.GFile(flax_path, \"rb\") as f:\n state = flax.serialization.msgpack_restore(f.read())\n return state\n\n def _check_or_get_latest_checkpoint(self, checkpoint: Optional[str]) -> str:\n if checkpoint:\n if not tf.io.gfile.exists(self._flax_path(checkpoint)):\n raise FileNotFoundError(f\"Checkpoint {checkpoint} does not exist\")\n else:\n checkpoint = self.get_latest_checkpoint_to_restore_from()\n if not checkpoint:\n raise FileNotFoundError(f\"No checkpoint found at {self.base_directory}\")\n return checkpoint\n\n\nclass MultihostCheckpoint(Checkpoint):\n \"\"\"An subclass of `Checkpoint` that synchronizes between multiple JAX hosts.\n\n If the training split across multiple hosts, then the following race condition\n can occur : If a host is pre-empted while writing a checkpoint, then the other\n hosts will only be restarted with a small delay, and at that point they\n probably already have finished writing their checkpoint. Upon restart, the\n host that was interrupted while writing the checkpoint will load the latest\n fully written checkpoint, which will be out of sync with the other hosts that\n successfully wrote one more checkpoint.\n\n This class also allows to specify a `multihost_base_directory` that is\n identical for all hosts and will be used to drive a host-specific directory.\n \"\"\"\n\n def __init__(self,\n multihost_base_directory: str,\n tf_state: Optional[Dict[str, Any]] = None,\n *,\n host_id: Optional[int] = None,\n max_to_keep: int = 5,\n checkpoint_name: str = \"ckpt\"):\n \"\"\"Initializes a MultihostCheckpoint with a dict of TensorFlow Trackables.\n\n Args:\n multihost_base_directory: Directory that will be used to construct a\n host-specific `base_directory` under which the checkpoints will be\n stored. Usually a directory *within* the work unit's workdirectory\n (e.g. `f\"{workdir}/checkpoints`). One directory per host will be created\n at the same level as this base directory labeled\n `f\"{multihost_base_directory}-{host_id}\"`.\n tf_state: A dictionary of TensorFlow `Trackable` to be serialized, for\n example a dataset iterator.\n host_id: Host ID used to construct the `base_directory`. Taken from\n `jax.process_index()` if not specified.\n max_to_keep: Number of checkpoints to keep in the directory. If there are\n more checkpoints than specified by this number, then the oldest\n checkpoints are removed.\n checkpoint_name: Prefix of the checkpoint files (before `-{number}`).\n \"\"\"\n if max_to_keep < 2:\n raise ValueError(\"Requires multiple checkpoints (max_to_keep>=2).\")\n multihost_base_directory = multihost_base_directory.rstrip(\"/\")\n self.multihost_base_directory = multihost_base_directory\n if host_id is None:\n host_id = jax.process_index()\n base_directory = f\"{multihost_base_directory}-{host_id}\"\n super().__init__(\n base_directory,\n tf_state,\n max_to_keep=max_to_keep,\n checkpoint_name=checkpoint_name)\n\n @utils.logged_with(\n \"MultihostCheckpoint.get_latest_checkpoint_to_restore_from()\")\n def get_latest_checkpoint_to_restore_from(self) -> Optional[str]:\n \"\"\"Returns the latest checkpoint available on all hosts.\"\"\"\n base_directory_glob = f\"{self.multihost_base_directory}-*\"\n base_directories = tf.io.gfile.glob(base_directory_glob)\n if self.base_directory not in base_directories:\n return None\n checkpoints = {}\n common_numbers = None\n all_numbers = set()\n for base_directory in base_directories:\n checkpoint_manager = tf.train.CheckpointManager(\n tf.train.Checkpoint(),\n base_directory,\n max_to_keep=self.max_to_keep,\n checkpoint_name=self.checkpoint_name)\n numbers = [\n CheckpointInfo.from_path(checkpoint).number\n for checkpoint in checkpoint_manager.checkpoints\n ]\n checkpoints[base_directory] = dict(\n zip(numbers, checkpoint_manager.checkpoints))\n numbers = set(numbers)\n if common_numbers is None:\n common_numbers = numbers\n else:\n common_numbers &= numbers\n all_numbers |= numbers\n logging.info(\n \"Checked checkpoint base_directories: %s - common_numbers=%s \"\n \"- exclusive_numbers=%s\", base_directories, common_numbers,\n all_numbers.difference(common_numbers))\n if not common_numbers:\n return None\n highest_number = sorted(common_numbers)[-1]\n return checkpoints[self.base_directory][highest_number]\n","sub_path":"clu/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":18241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501317390","text":"import time\r\n\r\nimport random\r\nfrom selenium import webdriver\r\nfrom webdriver_manager.chrome import ChromeDriverManager as CM\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nwith open('tag.txt','r') as f:\r\n tags = [line.strip() for line in f]\r\n\r\ndef doesnt_exist(driver,xpath):\r\n try:\r\n driver.find_element_by_xpath(xpath)\r\n except NoSuchElementException:\r\n return True\r\n else:\r\n return False\r\ndef random_comment():\r\n with open('comments.txt','r') as f:\r\n comments = [line.strip() for line in f]\r\n comment = random.choice(comments) \r\n return comment\r\n\r\nOptions =webdriver.ChromeOptions()\r\n\r\nmobile_emulation = {\r\n\r\n \"deviceMetrics\": { \"width\": 360, \"height\": 640, \"pixelRatio\": 3.0 },\r\n\r\n \"userAgent\": \"Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19\" }\r\n\r\nOptions.add_experimental_option(\"mobileEmulation\", mobile_emulation)\r\nOptions.add_argument(\"--log-level=3\")\r\nbot = webdriver.Chrome(options=Options, executable_path=CM().install())\r\nbot.set_window_size(500,950)\r\nbot.get('https://www.instagram.com/')\r\ntime.sleep(3)\r\nprint(\"Logging in....\")\r\nbot.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/article/div/div/div/div[3]/button[1]').click()\r\ntime.sleep(2)\r\n\r\nusername_field =bot.find_element_by_xpath('//*[@id=\"loginForm\"]/div[1]/div[3]/div/label/input')\r\nusername_field.send_keys('username')\r\ntime.sleep(1)\r\n\r\npassword_field=bot.find_element_by_xpath('//*[@id=\"loginForm\"]/div[1]/div[4]/div/label/input')\r\npassword_field.send_keys('password')\r\ntime.sleep(1)\r\n\r\nbot.find_element_by_xpath('//*[@id=\"loginForm\"]/div[1]/div[6]/button').click()\r\ntime.sleep(13)\r\n#==================\r\n# fethching=========\r\nprint(\"Fething ...\")\r\ntag = random.choice('tags')\r\nlink = \"https://www.instagram.com/explore/tags/\" + tag\r\n\r\nbot.get(link)\r\ntime.sleep(4)\r\nfor i in range(1):\r\n ActionChains(bot).send_keys(Keys.END).perform()\r\n time.sleep(2)\r\nrow1 =bot.find_element_by_xpath('//*[@id=\"react-root\"]/div/div/section/main/article/div[1]')\r\nrow2 =bot.find_element_by_xpath('//*[@id=\"react-root\"]/div/div/section/main/article/div[2]')\r\nr_link1= row1.find_elements_by_tag_name('a')\r\nr_link2= row1.find_elements_by_tag_name('a')\r\nlinks = r_link1+r_link2\r\n\r\nurls = []\r\n\r\nfor i in links:\r\n if i.get_attribute('href') != None:\r\n urls.append(i.get_attribute('href'))\r\n\r\n\r\n\r\n\r\n#============\r\n\r\n#comments======== \r\nfor url in urls:\r\n comment = random_comment()\r\n bot.get(url)\r\n bot.implicitly_wait(1)\r\n time.sleep(3)\r\n\r\n\r\n bot.find_element_by_xpath(\r\n '//*[@id=\"react-root\"]/div/div/section/main/div/div/article/div/div[3]/div/div/section[1]/span[2]/button').click()\r\n\r\n if doesnt_exist(bot,'//*[@id=\"react-root\"]/div/div/section/main/section/div'):\r\n print('Skiped - comments disabled')\r\n else:\r\n find_textarea= (\r\n By.XPATH,'//*[@id=\"react-root\"]/div/div/section/main/section/div/form/textarea')\r\n WebDriverWait(bot,50).until(\r\n EC.presence_of_element_located(find_textarea)\r\n\r\n )\r\n comment_box = bot.find_element(*find_textarea)\r\n WebDriverWait(bot,50).until(\r\n EC.element_to_be_clickable(find_textarea)\r\n\r\n )\r\n comment_box.click()\r\n \r\n comment_box.send_keys(comment)\r\n time.sleep(2)\r\n\r\n\r\n find_button= (\r\n By.XPATH,'//*[@id=\"react-root\"]/div/div/section/main/section/div/form/button')\r\n WebDriverWait(bot,50).until(\r\n EC.presence_of_element_located(find_button)\r\n\r\n )\r\n button = bot.find_element(*find_button)\r\n WebDriverWait(bot,50).until(\r\n EC.element_to_be_clickable(find_button)\r\n\r\n )\r\n button.click()\r\n\r\n time.sleep(random.randint(7,30))\r\n\r\n\r\ntime.sleep(100)\r\n\r\nbot.close()\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"431249662","text":"class Solution(object):\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # num = 0\n \n # for i in range(2,n+1):\n # if self.isPrime(i):\n # num+=1\n if n <= 2:\n return 0\n \n prime = [True] * n\n prime[:2] = [False, False]\n for base in range(2, int((n - 1) ** 0.5) + 1):\n if prime[base]:\n prime[base ** 2::base] = [False] * len(prime[base ** 2::base])\n return sum(prime) \n \ns= Solution()\n\nprint (s.countPrimes(100))","sub_path":"workspace/Leetcode/easy/primeNum.py","file_name":"primeNum.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"149189178","text":"#!/usr/bin/env python2\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\n\nimport recognizer.cnn as cnn\nimport recognizer.data as data\n\n# GLOBAL VARIABLE\nWHALE_TRAIN_DATA = \"../train.csv\"\nWHALE_TEST_DATA = \"../test.csv\"\n\n\nif __name__ == \"__main__\":\n # load data\n X_train, Y_train, X_test, Y_test = data.load_whale_data(\n WHALE_TRAIN_DATA,\n WHALE_TEST_DATA\n )\n\n print(\"X_test.shape == {};\".format(X_test.shape))\n print(\"Y_test.shape == {};\".format(Y_test.shape))\n\n # configuration\n kwargs = {\n \"X_train\": X_train,\n \"Y_train\": Y_train,\n \"X_test\": X_test,\n \"Y_test\": Y_test,\n \"input_shape\": (1, 96, 96),\n \"nb_classes\": 447,\n \"data_augmentation\": True,\n\n \"nb_convo_layers\": 3,\n \"nb_filters\": [32, 64, 128],\n \"nb_conv\": [3, 3, 3],\n\n \"convo_activations\": [\"relu\", \"relu\", \"relu\"],\n \"maxpools\": [True, True, True],\n \"pool_sizes\": [2, 2, 2],\n \"convo_dropouts\": [None, None, None],\n\n \"nb_dense_layers\": 3,\n \"dense_hidden_neurons\": [1000, 1000, 447],\n \"dense_activations\": [\"relu\", \"relu\", \"softmax\"],\n \"dense_dropouts\": [0.5, 0.5, None],\n\n \"loss\": \"categorical_crossentropy\",\n \"optimizer\": \"adadelta\",\n \"nb_epoch\": 200,\n \"batch_size\": 32,\n\n \"model_file\": \"model.json\",\n \"weights_file\": \"weights.dat\",\n \"results_file\": \"results.dat\"\n }\n\n # run cnn\n cnn.cnn(**kwargs)\n","sub_path":"experiments/cnn/scripts/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326756470","text":"from django.test import TestCase\nfrom retailapp.models import Product\nimport retailapp.sales\nfrom retailexcept import ValidationError\n\nclass SalesTest(TestCase):\n\n def setUp(self):\n self.phone_A = Product.objects.create(\n prod_id='PROD1',\n prod_name='Phone_A',\n prod_desc='Phone A')\n \n def test_validate_sales(self):\n sales_data = {}\n #Test exception\n self.assertRaises(\n ValidationError,\n retailapp.sales.validate_sales,\n sales_data)\n","sub_path":"retailapp/testsales.py","file_name":"testsales.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"112091538","text":"# 利用10个特征预测window_forward天的确诊人数\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport torch.utils.data as Data\r\nimport matplotlib.pyplot as plt\r\nimport LSTM_DataLoad as preprocess\r\nimport math\r\nimport os\r\nimport random\r\n\r\n\r\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\r\n\r\n\r\ndef set_seed(seed):\r\n '''\r\n fix the seed\r\n :param seed: seed\r\n :return: None\r\n '''\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n torch.manual_seed(seed)\r\n\r\n\r\n# 画出模型的效果图\r\ndef loss_res(epochs, train_loss, val_loss, learning_rate, hidden_num):\r\n plt.clf()\r\n plt.plot(list(range(1, epochs+1)), train_loss, 'r', label='train loss')\r\n plt.plot(list(range(1, epochs+1)), val_loss, 'b', label='val loss')\r\n plt.grid(True)\r\n plt.xlabel('epoch')\r\n plt.ylabel('loss')\r\n plt.legend(loc=\"upper right\")\r\n plt.savefig('../result/figure/India/5/LSTM/loss_' + country +\r\n '_' + str(learning_rate) + '_' + str(hidden_num) + '_' + str(epochs) + '_' + str(window_backward) + '.png')\r\n\r\n\r\ndef model_res(test_pred, ind, learning_rate, hidden_num):\r\n real = disease[country]['confirmed_delta'].values.reshape(-1, 1)\r\n test_pred = test_pred.reshape(-1, 1)\r\n test_pred_merge = np.concatenate((train_val_pred[-1:], test_pred))\r\n\r\n plt.clf()\r\n plt.scatter(list(range(len(real))), real, s=5, c='r', label='Real Count')\r\n plt.plot(list(range(ind+window_backward, ind+window_backward+len(train_val_pred))), train_val_pred,\r\n c='b', label='LSTM fitting')\r\n plt.plot(list(range((len(real)-(test_num+1)), len(real))), test_pred_merge,\r\n c='orange', label='LSTM prediction')\r\n plt.legend(loc='best')\r\n plt.title('Number of confirmed for ' + country + ' (' + str(window_forward) + ' days)')\r\n plt.vlines(ind + window_backward, min(real), max(real), colors=\"grey\", linestyles=\"dashed\")\r\n # plt.vlines(len(real)-5, min(real), max(real), colors=\"grey\", linestyles=\"dashed\")\r\n plt.savefig('../result/figure/India/5/LSTM/confirmed_delta_' + country +\r\n '_' + str(learning_rate) + '_' + str(hidden_num) + '_' + str(epochs) + '_' + str(window_backward) + '.png')\r\n\r\n\r\ndef rescaled(x, minval, maxval):\r\n '''\r\n :param x:\r\n :param minval: the minimal number of historical confirmed cases\r\n :param maxval: the maximum number of historical confirmed cases\r\n :return: rescaled value\r\n '''\r\n x = np.array(x)\r\n return (maxval-minval) * x + minval\r\n\r\n\r\ndef rmse(arr1, arr2):\r\n return math.sqrt(((arr1 - arr2) ** 2).mean())\r\n\r\n\r\ndef mae(y_true, y_pred):\r\n y_true = np.array(y_true)\r\n y_pred = np.array(y_pred)\r\n return np.mean(abs(y_true - y_pred))\r\n\r\n\r\ndef mape(y_true, y_pred):\r\n return np.mean(abs(y_true - y_pred) / y_true)\r\n\r\n\r\nclass lstm_model(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_layers, output_size, batch_size):\r\n super(lstm_model, self).__init__()\r\n self.input_size = input_size # number of features OF input\r\n self.hidden_size = hidden_size # number of features in the hidden state\r\n self.num_layers = num_layers # number of layer\r\n self.output_size = output_size # number of pred day\r\n self.batch_size = batch_size\r\n\r\n # (seq_len, batch, input_size)\r\n self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers)\r\n self.fc = nn.Linear(self.hidden_size, self.output_size)\r\n\r\n def forward(self, input):\r\n input = input.transpose(0, 1) # (seq_len, batch_size, n_feature)\r\n lstm_out, self.hidden_cell = self.lstm(input) # lstm_out: (seq_len, batch_size, hidden_size)\r\n y_pred = self.fc(lstm_out)\r\n return y_pred[-1]\r\n\r\n\r\ndef model_train(model):\r\n '''\r\n train the model.\r\n :param model: model.\r\n :return: trained LSTM.\r\n '''\r\n model.train()\r\n loss_func = nn.MSELoss()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n\r\n train_loss_list = [] # train_loss for each epoch\r\n val_loss_list = [] # val_loss for each epoch\r\n\r\n for i in range(epochs):\r\n train_loss = 0\r\n val_loss = 0\r\n for seq, labels in train_loader:\r\n optimizer.zero_grad()\r\n train_pred = model(seq)\r\n\r\n single_loss = loss_func(train_pred, labels)\r\n single_loss.backward()\r\n optimizer.step()\r\n train_loss += single_loss * len(labels)\r\n\r\n for seq, labels in val_loader:\r\n val_pred = model(seq)\r\n single_loss = loss_func(val_pred, labels)\r\n val_loss += single_loss * len(labels)\r\n\r\n train_loss_list.append(train_loss / len(train_y))\r\n val_loss_list.append(val_loss / len(val_y))\r\n\r\n print(f'epoch: {i:3}, train_loss: {train_loss / len(train_y):.6f}, val_loss: {val_loss / len(val_y):.6f}')\r\n\r\n loss_res(epochs, train_loss_list, val_loss_list, learning_rate, model.hidden_size)\r\n\r\n return model\r\n\r\n\r\ndef model_eval(model):\r\n '''\r\n predict the model for the train_val data.\r\n :param model: trained model.\r\n :return: predicted number of confirmed cases.\r\n '''\r\n model.eval()\r\n train_val_pred = []\r\n train_val_real = []\r\n for (id,(seq, labels)) in enumerate(train_val_loader):\r\n # if is not the last sample\r\n if id != len(train_val_loader)-1:\r\n # select the first value as the prediction\r\n train_val_pred.append([model(seq).detach().numpy()[0][0]])\r\n train_val_real.append([labels.detach().numpy()[0][0]])\r\n # if is the last sample\r\n else:\r\n train_val_pred.append(model(seq).detach().numpy().reshape(-1).tolist())\r\n train_val_real.append(labels.detach().numpy().reshape(-1).tolist())\r\n\r\n return train_val_pred\r\n\r\n\r\ndef model_pred(model):\r\n test_pred_set = []\r\n test_real_set = []\r\n for i in range(1, 7):\r\n if i != 6:\r\n test_data = feature_country.iloc[-test_num:-(test_num-window_forward*i), 2:]\r\n else:\r\n test_data = feature_country.iloc[-(test_num-window_forward*i):, 2:]\r\n # 0-1 normalize the test_X\r\n test_data_scaled = scaler.transform(test_data.values.reshape(-1, n_features))\r\n # normalized train_val_test_X\r\n data_scaled = np.concatenate((train_val_data_scaled, test_data_scaled), axis=0)\r\n test_y = data_scaled[-window_forward:,0] # take the latest window_forward days as the label\r\n test_X = torch.tensor(data_scaled[-(window_forward+window_backward):-window_forward,:], dtype=torch.float).reshape(\r\n -1, window_backward, n_features) # take the previous days as the X\r\n pre_temp = model(test_X).detach().numpy() # predict window_forward days\r\n test_pred_set.append(pre_temp.reshape(-1).tolist())\r\n test_real_set.append(test_y.tolist())\r\n return test_pred_set, test_real_set\r\n\r\n\r\nif __name__ == '__main__':\r\n os.chdir('../../')\r\n set_seed(100) # fix the seed\r\n\r\n\r\n batch_size = 8\r\n learning_rate = 0.0001 # lr\r\n epochs = 300\r\n window_backward = 10 # 历史窗口\r\n window_forward = 5 # 预测窗口\r\n test_num = 30 # 测试集的样本量\r\n n_features = 10 # number of features\r\n\r\n\r\n country = 'India'\r\n\r\n\r\n disease = preprocess.disease() # disease-related data\r\n mobility = preprocess.mobility(country) # mobility info\r\n feature_country, ind = preprocess.merge_hist(mobility, disease, country) # merged features\r\n\r\n\r\n train_val_data = feature_country.iloc[:-test_num, 2:] # train-validation data\r\n # 0-1 normalize all the features\r\n train_val_data_scaled, minval, maxval, scaler = preprocess.maxmin_scale(train_val_data)\r\n # generate the sequence for train and val\r\n train_val_data_scaled_sample = preprocess.series_to_supervised(\r\n train_val_data_scaled, seq_len=window_backward, n_out=window_forward)\r\n\r\n\r\n col_ind = list(range(window_backward * n_features))\r\n col_ind.extend([window_backward * n_features + i * n_features for i in range(window_forward)]) # sample + label\r\n train_val_data_scaled_sample = train_val_data_scaled_sample.iloc[:, col_ind]\r\n train_X, train_y, val_X, val_y, train_val_X, train_val_y = preprocess.data_split(\r\n train_val_data_scaled_sample, window_backward, window_forward, n_features)\r\n\r\n\r\n train = Data.TensorDataset(train_X, train_y)\r\n train_loader = Data.DataLoader(dataset=train, batch_size=batch_size, shuffle=False)\r\n val = Data.TensorDataset(val_X, val_y)\r\n val_loader = Data.DataLoader(dataset=val, batch_size=1, shuffle=False)\r\n train_val = Data.TensorDataset(train_val_X, train_val_y)\r\n train_val_loader = Data.DataLoader(dataset=train_val, batch_size=1, shuffle=False)\r\n\r\n\r\n LSTM = lstm_model(input_size=n_features, hidden_size=16, num_layers=1, output_size=window_forward, batch_size=batch_size)\r\n LSTM = model_train(LSTM)\r\n train_val_pred = model_eval(LSTM)\r\n train_val_pred = [j for i in train_val_pred for j in i]\r\n train_val_pred = rescaled(np.array(train_val_pred).reshape(-1, 1), minval, maxval)\r\n\r\n\r\n test_pred_set, test_y_set= model_pred(LSTM)\r\n test_pred_0 = rescaled([i for pred in test_pred_set for i in pred], minval, maxval)\r\n # test_real = rescaled([i for real in test_y_set for i in real], minval, maxval)\r\n test_pred = [rescaled([i for i in pred], minval, maxval) for pred in test_pred_set]\r\n test_real = [rescaled([i for i in pred], minval, maxval) for pred in test_y_set]\r\n\r\n pred_rmse = np.mean([rmse(test_pred[i], test_real[i]) for i in range(6)])\r\n pred_mape = np.mean([mape(test_pred[i], test_real[i]) for i in range(6)])\r\n pred_mae = np.mean([mae(test_pred[i], test_real[i]) for i in range(6)])\r\n # LSTM_test_rmse = rmse(test_real, test_pred)\r\n # LSTM_test_mape = mape(test_real, test_pred)\r\n # LSTM_test_mae = mae(test_real, test_pred)\r\n\r\n\r\n model_res(test_pred_0, ind, learning_rate, LSTM.hidden_size)\r\n with open('../result/figure/India/5/LSTM/' + country + '_model_res.txt', 'a') as file:\r\n file.write('hidden number {}, learning_rate {}, epochs {}, window_backward {}, batch size {}, '\r\n 'test_rmse: {:d}, test_mae: {:d}, test_mape: {:.6f}'.format(\r\n LSTM.hidden_size, learning_rate, epochs, window_backward, batch_size,\r\n round(pred_rmse), round(pred_mae), pred_mape))\r\n file.write('\\n')\r\n","sub_path":"India/5/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":10407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"124692818","text":"import os, sys\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\n\n# General Purpose open/read/write routines for \n# a number of common filetypes I use in speech processing\n#\n \ndef read(fname,filetype='RAW',datatype='LIST',encoding='latin1',dtype=[],args={},header=False):\n ''' reads data till the end of file for a number of different file and datatypes\n input arguments:\n fname file name (string)\n encoding latin1 (default), utf8, .. \n filetype RAW (default), SPRAAK, MATLAB\n datatype LIST (default), DICT, WAV, FRAME, SPRSEG, SEG\n dtype string, float32, int32, segdf, ... (numpy dtype)\n args dictionary for passing on optional arguments\n header TRUE, FALSE (returns data+header in tuple when true)\n return values:\n data (,hdr) the data and optionally the hdr (when header=TRUE is specified)\n '''\n \n if not os.path.isfile(fname):\n print(\"File path {} does not exist. Exiting...\".format(fname))\n sys.exit()\n \n \n hdr = {}\n if( filetype == 'MATLAB' ):\n data = sio.loadmat(fname,squeeze_me=True)\n return data\n elif( filetype == 'SPRAAK'):\n fp = open(fname,'r',encoding=encoding) \n fp, hdr = read_spr_hdr(fp)\n else:\n fp = open(fname,'r',encoding=encoding)\n \n if( 'datatype' in hdr.keys() ): datatype = hdr['datatype'] \n if( 'dtype' in hdr.keys() ): dtype = hdr['dtype'] \n\n if( datatype =='FRAME' ):\n data = np.fromfile(fp,dtype=dtype,count=-1)\n if( 'DIM1' in hdr.keys() and 'DIM2' in hdr.keys() ):\n nfr = int(hdr['DIM1'])\n nparam = int(hdr['DIM2']) \n data = np.reshape(data,(nfr,nparam))\n elif( datatype == 'SPRSEG'):\n data = read_spr_segdata(fp,hdr,args)\n elif( datatype == 'SEG'):\n data = read_segdata(fp,args)\n elif( datatype == 'DICT'):\n data = read_dict(fp,args) \n else:\n data = fp.read()\n \n \n if header:\n return data, hdr\n else:\n return data\n\ndef decode_args(args,defaults):\n '''\n decoding of optional / default arguments\n defaults: is a dict containing the values of parameter defaults \n args: is a dict containing the optional arguments (possibly more than applicable here)\n returns\n params: is a dict with locally relevant parameters set to the correct default / optional values\n '''\n \n params = defaults\n for key in args.keys():\n if key in params.keys(): params[key] = args[key]\n return(params)\n\ndef read_dict(fp,args={}):\n '''\n Reads a datafile directly into a dictionary structure, with params:\n ckey = column containing the keys (default = 0)\n cvalue = column containing the values (default = 1)\n maxsplit = max number of splits (default = -1)\n \n '''\n params = decode_args(args,{'ckey':0,'cvalue':1,'maxsplit':-1})\n\n dic = {}\n for line in fp:\n w = line.strip().split(maxsplit=params['maxsplit'])\n dic[w[params['ckey']]] = w[params['cvalue']]\n return(dic)\n\ndef read_spr_hdr(fp):\n '''\n Reads the header of a SPRAAK file with .spr, .key or ASCII header\n i.e. explicitly assumes that the file has header data till a line starting with \"#\" \n and that data after that\n \n The header consists with multiple lines consisting of KEY VALUE pairs where\n the first word is the KEY and the REMAINDER the VALUE\n\n returns\n fp: file pointer at the beginning of the data section\n hdr: header as a Python dictionary\n '''\n hdr = {}\n first_time = True\n while(1):\n line = fp.readline()\n # print(\"reading: \",line)\n line = line.strip()\n # determine the header type of the file\n if ( first_time ):\n first_time = False\n if( line != \".key\" and line != \".spr\"):\n # assuming ascii header if neither .key or .spr found\n hdr['.ascii'] = None\n # continue reading header KEY VALUE pairs till EOH is detected\n if len(line) == 0: \n continue\n elif line[0]==\"#\": \n break\n else:\n w = line.split(None,1)\n if len(w) == 1: hdr[w[0]] = None\n else: hdr[w[0]] = w[1] \n # print(\"last line in loop: \",line) \n # convert and overwrite certain header keys\n if 'DATA' in hdr.keys():\n if hdr['DATA'] == 'TRACK': hdr['datatype'] = 'FRAME'\n if hdr['DATA'] == 'SEG': hdr['datatype'] = 'SPRSEG'\n if 'TYPE' in hdr.keys():\n if hdr['TYPE'] == 'F32': hdr['dtype'] = 'float32'\n if hdr['TYPE'] == 'INT': hdr['dtype'] = 'int32'\n \n return fp, hdr\n\ndef read_segdata(fp,args={}):\n \"\"\"\n input: \n segmentation file consisting of lines\n first_frame last_frame+1 seg_name\n params (dict):\n segtype = CONTINOUS, DISCRETE (default)\n frameshift = 1 (i.e. frame based segmentation, can be used for downsampling from sample segs )\n col_t0 = column containing t0 (default=0)\n col_t1 = column containing t1 (default=1)\n col_seg = column containing seg (default=2)\n output:\n panda's datastructure with columns [seg,t0,t1]\n seg = segment name\n t0 = starting frame (counting starting at 0)\n t1 = last frame +1 (python style ranges)\n \"\"\"\n\n params = decode_args(args,{'frameshift':1,'col_t0':0,'col_t1':1,'col_seg':2})\n \n ww = []\n t0 = []\n t1 = []\n # read input file without doing conversions\n cnt = 0\n for line in fp:\n w = line.strip().split()\n t0.append(round(int(w[params['col_t0']])/params['frameshift']))\n t1.append(round(int(w[params['col_t1']])/params['frameshift']))\n ww.append(w[params['col_seg']])\n cnt += 1\n #cnt,len(t0),len(t1),len(ww)\n fp.close()\n df = pd.DataFrame({'t0':t0,'t1':t1,'seg':ww})\n return(df)\n\n\ndef read_spr_segdata(fp,hdr,params={}):\n \"\"\"\n input: \n fp points to SPRAAK style segmentation file consisting of lines\n entry_name seg_name begin end/nfr\n entry_name is a file reference or a '-' as continuation sign from previous line\n seg_name is a word/phone/state reference\n begin begin_time for CONTINOUS, first_frame for DISCRETE (counting from 0)\n end/nfr end_time for CONTINOUS, n_frames for DISCRETE\n params:\n frameshift continous time frameshift to be applied for converting continous to discrete (default=0.01)\n output:\n a dictionary with keys=entry_names values=segmenations as panda datastructure\n panda datastructures have columns [seg,t0,t1]\n seg = segment name\n t0 = starting frame (counting starting at 0)\n t1 = last frame +1 (python style ranges)\n \"\"\"\n\n C2D = False\n if( 'TIMEBASE' in hdr.keys() ):\n if ( hdr['TIMEBASE'] == \"CONTINUOUS\" ):\n if ('frameshift' in params.keys() ): frameshift = params['frameshift']\n else: frameshift = 0.01\n C2D = True\n \n First_time = True\n \n segdata = {}\n segname = \"\"\n ww = []\n t0 = []\n t1 = []\n cnt = 0\n for line in fp:\n w = line.strip().split()\n if w[0] != \"-\": \n if First_time: First_time = False\n else: segdata[segname] = pd.DataFrame({'t0':t0,'t1':t1,'seg':ww})\n ww = []\n t0 = []\n t1 = []\n cnt = 0\n segname=w[0]\n\n # process segmentation\n if C2D:\n i0 = round(float(w[2])/frameshift)\n i1 = round(float(w[3])/frameshift)\n t0.append(i0)\n t1.append(i1)\n else:\n i0 = int(w[2])\n i1 = int(w[3])\n t0.append(i0)\n t1.append(i0+i1)\n ww.append(w[1])\n cnt+=1\n \n # still need to end last entry to output \n segdata[segname] = pd.DataFrame({'t0':t0,'t1':t1,'seg':ww}) \n \n return(segdata)\n \ndef write_timit_seg_file(fname,segdf):\n \"\"\"\n write a TIMIT style segmentation to file\n \"\"\"\n nseg = len(segdf)\n fp = open(fname,\"w\")\n for i in range(nseg):\n fp.write('{:6d} {:6d} {:10s} \\n'.format(segdf['t0'][i],segdf['t1'][i],segdf['seg'][i]) )\n fp.close()\n \ndef xlat_seg(isegdf,xlat_dic,MERGE_CLOSURES=False):\n \"\"\"\n convert alphabets between input segmentation and output segmentations\n optionally merge identical labels\n this means that glottal closures are mapped to intra-word silence segments, this is OK for frame based analysis\n for segmental analysis it may be better to group the glottal closures with their respective plosive part\n inputs:\n isegdf: input segmentation in panda dataframe format\n xlat_dic: phone translation dictionary\n MERGE_CLOSURES: flag \n False: convert segments 1-on-1 and merge segments with identical labels\n True: merge glottal closures with adjoining plosive first \n \"\"\"\n if MERGE_CLOSURES:\n print(\"ERROR(xlat_seg): Closure Merging not supported Yet\")\n exit(-1)\n \n oseg = 0\n iseg = 0\n ww=isegdf.seg\n t0=isegdf.t0\n t1=isegdf.t1\n cnt = len(t0)\n xww = []\n xt0 = []\n xt1 = []\n while iseg < cnt:\n xww.append(xlat_dic[ww[iseg]])\n xt0.append(t0[iseg])\n Merge = False\n if iseg != cnt-1:\n if(xlat_dic[ww[iseg+1]] == xlat_dic[ww[iseg]]):\n Merge = True\n if Merge:\n xt1.append(t1[iseg+1])\n iseg = iseg+2\n else:\n xt1.append(t1[iseg])\n iseg += 1\n oseg +=1\n return(pd.DataFrame({'t0':xt0,'t1':xt1,'seg':xww})) ","sub_path":"skspeech/.ipynb_checkpoints/io-checkpoint.py","file_name":"io-checkpoint.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"28141843","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import RrcItem\nimport datetime\nimport zmail\nimport logging\n\n# 爬取汽车数据\n\n\nclass RrcSpider(scrapy.Spider):\n name = 'rrc'\n allowed_domains = ['www.renrenche.com']\n # start_urls = ['http://www.renrenche.com/']\n car_list = ['dazhong', 'fute', 'bieke', 'xiandai']\n city_list = ['bj', 'sh', 'zz', 'gz']\n time = datetime.datetime.now()\n custom_settings = {\n 'ITEM_PIPELINES': {'RRC.pipelines.RrcPipeline': 300},\n # 生成日志文件\n 'LOGIN_ENABLE': True,\n 'LOG_ENCODING': 'UTF8',\n\n 'LOG_FILE': '{}爬虫_{}年{}月{}日{}时{}分{}秒.log'.format(name, time.year, time.month,\n time.day, time.hour,\n time.minute, time.second),\n 'LOG_LEVEL': 'INFO',\n }\n\n # 发送邮箱 ----------- 不想发送邮箱注释即可\n def __init__(self, send_user, root_code, receiver_user, log_file):\n super(RrcSpider, self).__init__()\n self.send_user = send_user\n self.root_code = root_code\n self.receiver_user = receiver_user\n self.log_file = self.name + log_file\n self.time = datetime.datetime.now()\n self.time_time = '{}年-{}月-{}日-{}时-{}分-{}秒'.format(self.time.year, self.time.month,\n self.time.day, self.time.hour,self.time.minute, self.time.second)\n self.server = zmail.server(self.send_user, self.root_code)\n self.mail_content = {\n 'subject': '{}已开启了'.format(self.name),\n 'content': '{}开始时间为:{}'.format(self.name, self.time_time)\n }\n self.server.send_mail(self.receiver_user, self.mail_content)\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(send_user=crawler.settings.get('SEND_USER'),\n root_code=crawler.settings.get('ROOT_CODE'),\n receiver_user=crawler.settings.get('RECEIVER_USER'),\n log_file=crawler.settings.get('LOG_FILE'))\n spider.set_crawler(crawler)\n return spider\n # -----------\n\n def start_requests(self):\n for city in self.city_list:\n for car in self.car_list:\n url = 'https://www.renrenche.com/{}/{}/p1/'.format(city, car)\n yield scrapy.Request(url=url, dont_filter=True, callback=self.parse, meta={'car': car})\n\n def parse(self, response):\n li_list = response.xpath('//ul[@class=\"row-fluid list-row js-car-list\"]/li')\n for li in li_list:\n href = li.xpath('a[@class=\"thumbnail\"]/@href').extract_first('')\n if href.startswith('/car'):\n del href\n elif href == \"\":\n del href\n else:\n info_url = 'https://www.renrenche.com' + href\n yield scrapy.Request(url=info_url, dont_filter=True, meta=response.meta, callback=self.get_data)\n\n def get_data(self, response):\n logging.info('汽车地址为:{}'.format(response.url))\n print(response.url)\n car = response.meta['car']\n title = response.xpath('//p[@class=\"detail-breadcrumb-tagP\"]/a[last()]/text()').extract_first('')\n print(title)\n purchase_time = response.xpath('//li[@class=\"span7\"]/div/p[2]/text()').extract_first('')\n mileage = response.xpath('//li[@class=\"kilometre\"][1]/div/p[1]/strong/text()').extract_first('')\n money1 = response.xpath('//div[@class=\"list price-list\"][1]/p/text()').extract_first('')\n money2 = response.xpath('//div[@class=\"list price-list\"][1]/p/span/text()').extract_first('')\n money = money1 + money2\n down_payment = response.xpath('//div[@class=\"list payment-list\"]/p[2]/text()').extract_first('')\n number_data = {\n '0': '0',\n '1': '1',\n '2': '2',\n '4': '3',\n '3': '4',\n '5': '5',\n '8': '6',\n '6': '7',\n '9': '8',\n '7': '9',\n '上': '上',\n '牌': '牌',\n '.': '.',\n '万': '万',\n '公': '公',\n '里': '里',\n '-': '-',\n }\n update_purchase_time = ''\n update_mileage = ''\n for x in purchase_time:\n update_purchase_time += number_data[x]\n for x in mileage:\n update_mileage += number_data[x]\n item = RrcItem()\n item['name'] = self.name\n item['title'] = title\n item['car'] = car\n item['update_purchase_time'] = purchase_time\n item['update_mileage'] = mileage\n item['money'] = money\n item['down_payment'] = down_payment\n yield item\n\n @staticmethod\n def close(spider, reason):\n mail_content = {\n 'subject': '{}爬虫已关闭'.format(spider.name),\n 'content': '{}爬虫关闭时间为:{}'.format(spider.name, spider.time_time),\n 'attachments': spider.log_file\n\n }\n spider.sever_send_mail(spider.receiver_user, mail_content)\n","sub_path":"RRC/RRC/spiders/rrc.py","file_name":"rrc.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"597530816","text":"## I modify Add_Two_Numbers_1\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\nclass Solution(object):\r\n def addTwoNumbers(self, l1, l2):\r\n carry = 0 ## carry will take the \"进位\", i.e. 4+6=10, '1' will be added to 'carry'\r\n ## new_head is the dummy head. Initialize the current node to dummy head\r\n ## of the returning list by claiming it is ListNode(0).\r\n new_head = ListNode(0)\r\n ## 'present' represents the present/current node (only one node), i.e.\r\n ## the first node here\r\n present = new_head\r\n\r\n while l1 or l2 or carry:\r\n v1 = v2 = 0\r\n if l1:\r\n v1 = l1.val ## set v1 to be the value of the first node of l1\r\n l1 = l1.next ## advance one step further to next node\r\n if l2:\r\n v2 = l2.val\r\n l2 = l2.next\r\n\r\n ## divmod(): Take two (non complex) numbers as arguments and return a pair of\r\n ## numbers consisting of their quotient and remainder when using long\r\n ## division. With mixed operand types, the rules for binary arithmetic\r\n ## operators apply. For plain and long integers, the result is the\r\n ## same as (a // b, a % b). For floating point numbers the result is\r\n ## (q, a % b), where q is usually math.floor(a / b) but may be 1 less\r\n ## than that. In any case q * b + a % b is very close to a, if a % b\r\n ## is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b).\r\n ## e.g. v1=4, v2=6, carry=0, then carry, val = divmod(4+6+0, 10) = 1, 0\r\n carry, val = divmod(v1+v2+carry, 10)\r\n\r\n ## Create a new node with the digit value of 'val', and set it to\r\n ## current node's next, then advance current node to next.\r\n new_node = ListNode(val) ## Claim a new node.\r\n ## set 'new_node' to the present node's next node\r\n present.next = new_node\r\n ## advance the present node to the new node 'new_node'\r\n present = present.next\r\n\r\n ## Return dummy head's next node.Note that we use a dummy head to simplify\r\n ## the code. Without a dummy head, you would have to write extra\r\n ## conditional statements to initialize the head's value.\r\n return new_head.next\r\n","sub_path":"Add_Two_Numbers_1.py","file_name":"Add_Two_Numbers_1.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"283068788","text":"#encoding:UTF-8\r\n# 获取验证码与解析验证码\r\nfrom PIL import Image\r\nfrom util.ShowapiRequest import ShowapiRequest\r\nimport time\r\nclass GetCode:\r\n def __init__(self, driver):\r\n self.driver = driver\r\n # 获取图片\r\n def get_code_image(self,file_name):\r\n self.driver.save_screenshot(file_name)\r\n code_element = self.driver.find_element_by_id(\"getcode_num\") # 元素定位验证码图片\r\n print(code_element.location) # 取坐标值{\"x\":123,\"y\":456}\r\n left = code_element.location['x'] # 定位图片\r\n top = code_element.location['y']\r\n right = code_element.size['width']+left\r\n height = code_element.size['height']+top\r\n im = Image.open(file_name) #打开图片\r\n img = im.crop((left,top,right,height)) #将坐标传给图片,进行裁剪\r\n img.save(file_name) #保存裁剪后的图片\r\n time.sleep(2)\r\n\r\n # 解析图片获取验证码\r\n def code_online(self, file_name):\r\n self.get_code_image(file_name) # 需要先获取保存的file_name\r\n r = ShowapiRequest(\"http://route.showapi.com/184-4\",\"62626\",\"d61950be50dc4dbd9969f741b8e730f5\" )\r\n r.addBodyPara(\"typeId\", \"35\") # 3代表英文数字混合,5代表5位英文;3换为1,代表纯数字;3换为2,代表纯英文\r\n r.addBodyPara(\"convert_to_jpg\", \"0\")\r\n # r.addBodyPara(\"needMorePrecise\", \"0\")\r\n r.addFilePara(\"image\", file_name) # 文件上传时设置\r\n res = r.post()\r\n text = res.json()['showapi_res_body']['Result']\r\n time.sleep(3)\r\n return text","sub_path":"selenium/util/get_code.py","file_name":"get_code.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"545731306","text":"\"\"\"\nhttps://leetcode.com/submissions/detail/147334825/\n\nGiven a non-negative integer represented as a non-empty array of digits, plus one to the integer.\r\n\r\nYou may assume the integer do not contain any leading zero, except the number 0 itself.\r\n\r\nThe digits are stored such that the most significant digit is at the head of the list.\"\"\"\n\n\nclass Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n \n x = [str(k) for k in digits]\n digits_str = ''.join(x)\n digits_int = int(digits_str) + 1\n digits_str = str(digits_int)\n \n k = list(digits_str)\n return [int(m) for m in k]\n \n ","sub_path":"plus-one.py","file_name":"plus-one.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"525213768","text":"'''\nrandom.sample\nlearned number_source\nLearned about eval function\n'''\n\nimport random\n\nnumber_source = range(-40,40)\nsign_source = ['+','-','*'] * 4\nfilename = 'problems.txt'\n\n# numbers = random.sample(number_source,4)\n# signs = random.sample(sign_source,3)\n# print(numbers)\n# print(signs)\n\nwith open(filename,'w') as f:\n for i in range(1,101):\n numbers = random.sample(number_source, 4)\n signs = random.sample(sign_source, 3)\n f.write(f\"[{i:3}] {numbers[0]:4} {signs[0]} ({numbers[1]:4}) {signs[1]} ({numbers[2]:4}) {signs[2]} ({numbers[3]:4}) = _______\\n\")\n\nfor one_line in open(filename):\n problem = one_line[5:38]\n print(f\"{one_line[:38]} = {eval(problem):4}\")","sub_path":"Ex_9_MathProblem.py","file_name":"Ex_9_MathProblem.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"318812301","text":"import random\nimport time\nfrom appium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\nclass Yyzs:\n CONFIG = {\n \"platformName\": \"Android\",\n \"platformVersion\": \"7.1.2\",\n # \"platformVersion\": \"6.0.1\",\n \"deviceName\": \"127.0.0.1:62026\",\n \"udid\": \"127.0.0.1:62026\",\n # \"deviceName\": \"192.168.100.71:4444\",\n \"appPackage\": \"com.ll.fishreader\",\n \"appActivity\": \"com.ll.fishreader.ui.activity.SplashActivity\",\n \"noReset\": True,\n \"unicodekeyboard\": True,\n \"resetkeyboard\": True,\n \"normalizeTagNames\": True,\n }\n\n RAND_SLEEP = (15, 25)\n\n def get_driver(self):\n repet_nums = 5\n while repet_nums > 0:\n try:\n return webdriver.Remote('http://127.0.0.1:4725/wd/hub', self.CONFIG)\n except Exception as e:\n repet_nums -= 1\n print('尝试重连')\n return False\n\n def __init__(self, times=1800):\n self.driver = self.get_driver()\n if not self.driver:\n print('连接失败')\n raise ValueError('连接失败')\n self.times = times\n\n def get_size(self):\n size = self.driver.get_window_size()\n return size['width'], size['height']\n\n def home_to_look(self):\n ''' 首页点击 “继续阅读” '''\n try:\n if WebDriverWait(self.driver, 3).until(\n lambda x: x.find_element_by_id(\"com.ll.fishreader:id/recent_book_record_continue_tv\")\n ):\n self.driver.find_element_by_id(\"com.ll.fishreader:id/recent_book_record_continue_tv\").click()\n except Exception as e:\n print('无继续阅读')\n\n while self.times > 0:\n looktime = random.randint(*self.RAND_SLEEP)\n print('时长:{}s'.format(looktime), end='\\t')\n time.sleep(looktime)\n\n width, height = self.get_size()\n x1 = int(width * random.randint(85, 90) / 100)\n x2 = int(width * random.randint(10, 20) / 100)\n y1 = int(height * random.randint(70, 85) / 100)\n y2 = int(height * random.randint(50, 65) / 100)\n print(\"坐标:x1-{},y1-{},x2-{},y2-{}\".format(x1, y1, x2, y2))\n try:\n self.driver.swipe(x1, y1, x2, y2)\n except Exception as e:\n print('swipe - exception', e)\n # print(\"坐标:x1-{},y1-{},x2-{},y2-{}\".format(x1, y1, x2, y2))\n self.times -= looktime\n\n # def weixin_share(self):\n # try:\n # if WebDriverWait(self.driver, 5).until(\n # lambda x: x.find_element_by_xpath(\"//android.widget.TextView[@resource-id='com.tencent.mm:id/b2b']\")\n # ):\n # self.driver.press_keycode(4) # 返回\n # except Exception as e:\n # self.driver.press_keycode(4) # 返回\n # print('wx_share - exception', e)\n #\n\n\n # def top_coin(self):\n # ''' 顶部金币 '''\n # try:\n # self.driver.find_element_by_id(\n # \"com.ll.fishreader:id/tt_video_ad_close\").click()\n # time.sleep(3)\n # try:\n # # 等待\n # if WebDriverWait(self.driver, 35).until(lambda x: x.find_element_by_id( \"com.ll.fishreader:id/tt_video_ad_close\")):\n # # 点击\n # self.driver.find_element_by_id(\"com.ll.fishreader:id/tt_video_ad_close\").click()\n # self.pop_close()\n # except Exception as e:\n # self.driver.press_keycode(4) # 返回\n #\n # print('顶部金币 - 可领取')\n # except Exception as e:\n # print('顶部金币 - 不可领取')\n\n def resetSys(self):\n print('=======开始重启: 长按电源=>点击重启 ========')\n try:\n self.driver.implicitly_wait(30)\n self.driver.long_press_keycode(26)\n time.sleep(3)\n self.driver.find_element_by_xpath(\n \"//android.widget.TextView[@resource-id='android:id/message' and @text='重新启动']\").click()\n except Exception as e:\n print('重启', e)\n print('=======开始重启: 长按电源=>点击重启 end========')\n\n def check1(self):\n print('检查start广告')\n try:\n if WebDriverWait(self.driver, 5).until(\n lambda x: x.find_element_by_id('com.ll.fishreader:id/jump_widget_1')):\n self.driver.find_element_by_id('com.ll.fishreader:id/jump_widget_1').click()\n print('跳过ad')\n except Exception as e:\n pass\n\n def check2(self):\n print('检查首页 - 弹窗')\n try:\n if WebDriverWait(self.driver, 5).until(\n lambda x: x.find_element_by_id('com.ll.fishreader:id/jump_widget_1')):\n self.driver.find_element_by_id('com.ll.fishreader:id/widget_image_floatwindow_close').click()\n print('关闭')\n except Exception as e:\n pass\n\n def check3(self):\n print('检查首页 - 是否加载完')\n try:\n if WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_id(\n \"com.ll.fishreader:id/recent_book_record_continue_tv\")):\n print('加载完成')\n else:\n print('加载失败')\n except Exception as e:\n print('加载异常', e)\n\n def pop_close(self):\n print('检查 - 收入囊中 - 关闭')\n try:\n if WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_xpath(\"//android.widget.Button[@text='收入囊中']\")):\n self.driver.find_element_by_xpath(\"//android.widget.Button[@text='收入囊中']\").click()\n else:\n print('加载失败')\n except Exception as e:\n print('加载异常', e)\n\n def run(self):\n self.check1()\n self.check2()\n self.check3()\n\n self.home_to_look()\n\n # self.top_coin()\n # self.home_to_look()\n # self.resetSys()\n\n\nwhile True:\n time.sleep(5)\n njxs = Yyzs(3600)\n njxs.run()\n print('结束')\n break\n","sub_path":"yyzs.py","file_name":"yyzs.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"76817668","text":"import pytest\nimport torch\n\nfrom torch_geometric.utils import assortativity\n\n\ndef test_assortativity():\n # completely assortative graph\n edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5],\n [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]])\n out = assortativity(edge_index)\n assert pytest.approx(out, abs=1e-5) == 1.0\n\n # completely disassortative graph\n edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 5, 5, 5, 5],\n [5, 5, 5, 5, 5, 0, 1, 2, 3, 4]])\n out = assortativity(edge_index)\n assert pytest.approx(out, abs=1e-5) == -1.0\n","sub_path":"test/utils/test_assortativity.py","file_name":"test_assortativity.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"496684210","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/extenteten/cnn/lenet.py\n# Compiled at: 2017-01-19 20:08:06\n# Size of source mod 2**32: 434 bytes\nimport tensorflow as tf\nfrom ..layer import linear\nfrom ..util import func_scope\n\n@func_scope()\ndef lenet(images, output_size):\n h = tf.contrib.slim.conv2d(images, 32, 5, scope='conv0')\n h = tf.contrib.slim.max_pool2d(h, 2, 2, scope='pool0')\n h = tf.contrib.slim.conv2d(h, 64, 5, scope='conv1')\n h = tf.contrib.slim.max_pool2d(h, 2, 2, scope='pool1')\n h = tf.contrib.slim.flatten(h)\n return linear(h, output_size)","sub_path":"pycfiles/tensorflow_extenteten-0.0.22-py3.6/lenet.cpython-36.py","file_name":"lenet.cpython-36.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"286346468","text":"# -*- coding: utf-8 -*-\n# Date: 2021/06/12\n\nfrom tkinter import *\n\n\ndef callback(*args):\n print(\"data changed:\", xE.get())\n\n\nroot = Tk()\nroot.title(\"test\")\n\nxE = StringVar()\nentry = Entry(root, textvariable=xE)\nentry.pack(pady=5, padx=10)\nxE.trace(\"w\", callback)\n\nroot.mainloop()\n","sub_path":"ch5_Variable/03_trace方法.py","file_name":"03_trace方法.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"503632713","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimage = cv2.imread('../images/messi5.jpg', 0) # 0 - read image in grayscale\n# image = cv2.imread('../images/sudoku.png', 0) # 0 - read image in grayscale\n\n# Gradient methods\n# Laplacian gradient\nlap = cv2.Laplacian(image, cv2.CV_64F, ksize=3) # CV_64F - 64bit float, supports the negative numbers\nlap = np.uint8(np.absolute(lap)) # convert into an unsigned int\n\n# Sobel Gradient\nsobelX = cv2.Sobel(image, cv2.CV_64F, 1, 0)\nsobelY = cv2.Sobel(image, cv2.CV_64F, 0, 1)\ncanny = cv2.Canny(image, 100, 200)\n\n# convert to an unsigned int\nsobelX = np.uint8(np.absolute(np.absolute(sobelX)))\nsobelY = np.uint8(np.absolute(np.absolute(sobelY)))\n\n# combine X and Y\nsobelCombined = cv2.bitwise_or(sobelX, sobelY)\n\ntitles = ['Image', 'Laplacian', 'SobelX', 'SobelY', 'Sobel Combined', 'Canny']\nimages = [image, lap, sobelX, sobelY, sobelCombined, canny]\n\nfor i in range(6):\n plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')\n plt.title(titles[i])\n # plt.xticks([]), plt.yticks([])\n\nplt.show()\n","sub_path":"Image Gradients And Canny Edge Detection/opencv_image_gradients_and_edge_detection.py","file_name":"opencv_image_gradients_and_edge_detection.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"503363821","text":"import sublime\nimport sublime_plugin\nimport re\n\n\nclass ExpandAbbreviationCommand(sublime_plugin.TextCommand):\n\n def run(self, edit):\n view = sublime.active_window().active_view()\n r = self.view.sel()[0]\n empty_region = r.empty()\n line = view.substr(sublime.Region(r.b, r.b - 2))\n if empty_region and (line == \"++\" or\n line == \"--\" or\n line == \"+=\" or\n line == \"-=\" or\n line == \"*=\" or\n line == \"/=\" or\n line == \"%=\"):\n begin_line = self.view.full_line(r).a\n region_text = sublime.Region(begin_line, self.view.sel()[0].b - 2)\n prec_text = view.substr(region_text)\n if re.search(r'([\\w]+\\s?)$', prec_text):\n word = re.search(r'([\\w]+\\s?)$', prec_text).group(1)\n postfix = \"\"\n if line == \"++\":\n postfix = \" + 1;\"\n elif line == \"--\":\n postfix = \" - 1;\"\n elif line == \"+=\":\n postfix = \" + \"\n elif line == \"-=\":\n postfix = \" - \"\n elif line == \"*=\":\n postfix = \" * \"\n elif line == \"/=\":\n postfix = \" / \"\n elif line == \"%=\":\n postfix = \" % \"\n snippet = word + \" = \" + word + postfix\n begin_word = self.view.sel()[0].b - len(word) - 2\n region = sublime.Region(begin_word, self.view.sel()[0].b)\n self.view.replace(edit, region, snippet)\n return\n elif not empty_region:\n self.view.run_command('indent')\n return\n\n self.view.insert(edit, self.view.sel()[0].b, '\\t')\n","sub_path":"expand_abbreviation.py","file_name":"expand_abbreviation.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"315492537","text":"\"\"\"Finding duplicate letters in a word.\"\"\"\n\n__author__ = \"730396516\"\n\nword: str = input(\"Enter a word: \")\ni: int = 0\nlength: int = len(word)\nother = \"False\"\n\n\nwhile i < length:\n j: int = length - 1\n while j > i:\n if word[j] == word[i]:\n other = \"True\"\n j = j - 1\n i += 1\n\nprint(\"Found duplicate: \" + other)","sub_path":"exercises/ex03/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"94627709","text":"## import libraries\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n## get the data\ndf = pd.read_csv('cannabis.csv')\ndf.head()\n\n## functions\n\n## remove duplicates\ndef remove_duplicates(df):\n df = df.drop_duplicates(keep='first')\n return df\n\n## convert all strain names to all lowercase \ndef lower_case_column(column_name):\n return df[column_name].str.lower()\n\n## search by strain keyword\ndef search_strains(keyword):\n return df[df['Strain'].str.contains(keyword)]\n\n## get strain name from index\ndef get_strain_from_index(index):\n strain_name = df.iloc[index][0]\n return strain_name\n\n## get index number from strain\ndef get_index_from_strain(strain):\n strain_index = df[df['Strain'] == strain].index[0]\n return strain_index \n\n## combine features into one column\ndef combine_features(row):\n return row['Strain'] +\" \"+ row['Type'] + \" \"+ row['Rating'] + \" \" + row['Effects'] + \" \" + row['Flavor'] + \" \" + row['Description']\n\n## these are the columns we are going to use\nfeatures = ['Strain', 'Type', 'Rating', 'Effects', 'Flavor', 'Description']\n\n## drop duplicates\ndf = remove_duplicates(df)\n\n## convert all columns to string for nlp\ndf = df.astype(str)\n\n## make a combined features column\ndf['combined_features'] = df.apply(combine_features, axis=1)\n\n## fill nans with emtpy string\nfor feature in features:\n df[feature] = df[feature].fillna('')\n\n## make strain titles lowercase for easier search\ndf['Strain'] = lower_case_column('Strain')\n\n## the function\ndef recommend_strains(strain, num_of_strains=5):\n cv = CountVectorizer()\n count_matrix = cv.fit_transform(df['combined_features'])\n cosine_sim = cosine_similarity(count_matrix)\n strain_index = get_index_from_strain(strain)\n similar_strains = list(enumerate(cosine_sim[strain_index]))\n sorted_similar_strains = sorted(similar_strains, key= lambda x:x[1], reverse=True)\n recommended_strains = []\n i = 0\n for strain in sorted_similar_strains:\n recommended_strains.append(get_strain_from_index(strain[0]))\n i = i + 1\n if i > num_of_strains:\n break\n return recommended_strains\n","sub_path":"mc_app/mc_utils.py","file_name":"mc_utils.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"314795561","text":"# python 3.6 com postgresql\r\n\r\n# conecta banco: conn = psycopg2.connect(\"dbname='yourDB' user='yourUser' password='yourPass'\")\r\n# pega cursor: cur = conn.cursor()\r\n# executa comando: cur.execute(\"\")\r\n# salva os dados: conn.commit()\r\nimport psycopg2\r\nimport os\r\nimport time\r\nimport sys\r\n\r\ndef opcaoUsuario():\r\n os.system(\"cls\");\r\n print(\".........AGENDA.........\")\r\n opcao = input(\"Escolha a opcao desejada:\\n\\n1 - Cadastrar\\n2 - Consultar\\n3 - Alterar\\n4 - Excluir\\n5 - Mostrar Todos\\n6 - Sair\\n\")\r\n try:\r\n opcao = int(opcao)\r\n if opcao < 1 or opcao > 6:\r\n os.system(\"cls\");\r\n print(\"OPCAO INVALIDA: Verifique o valor digitado\")\r\n time.sleep(2)\r\n opcaoUsuario()\r\n except:\r\n os.system(\"cls\");\r\n print(\"OPCAO INVALIDA: Verifique o valor digitado\")\r\n time.sleep(2)\r\n opcaoUsuario()\r\n\r\n if opcao == 1:\r\n conn = conectaBanco()\r\n funcCadastrar(conn)\r\n\r\n elif opcao == 2:\r\n conn = conectaBanco()\r\n funcConsultar(conn)\r\n\r\n elif opcao == 3:\r\n conn = conectaBanco()\r\n funcAlterar(conn)\r\n\r\n elif opcao == 4:\r\n conn = conectaBanco()\r\n funcExcluir(conn)\r\n\r\n elif opcao == 5:\r\n conn = conectaBanco()\r\n funcMostrarTodos(conn)\r\n\r\n elif opcao == 6:\r\n sys.exit()\r\n\r\n\r\ndef conectaBanco():\r\n USER = 'yourUser'\r\n PASSWD = 'yourPass'\r\n DB = 'yourDB'\r\n\r\n try:\r\n conn = psycopg2.connect(dbname=DB, user=USER, password=PASSWD)\r\n\r\n except:\r\n print(\"O banco não foi encontrado...\"),\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\n return conn\r\n\r\ndef funcCadastrar(conn):\r\n print(\"\\n\\nDigite o nome:\\n\")\r\n nome = str(input(\"Nome: \"))\r\n nome = (nome.capitalize())\r\n cursor = conn.cursor()\r\n\r\n comando = \"INSERT INTO usuario(nome) VALUES ('\"+nome+\"')\"\r\n\r\n try:\r\n cursor.execute(comando)\r\n conn.commit()\r\n\r\n except:\r\n print(\"Erro\")\r\n\r\n print(\"Dados gravados com sucesso.\")\r\n conn.close()\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\ndef funcConsultar(conn):\r\n nome = str(input(\"Digite o nome a pesquisar: \"))\r\n nome = (nome.capitalize())\r\n cursor = conn.cursor()\r\n comando = \"SELECT * FROM usuario WHERE nome='\"+nome+\"'\"\r\n resultados = 0\r\n\r\n try:\r\n cursor.execute(comando)\r\n resultado = cursor.fetchall()\r\n for dados in resultado:\r\n ide = dados[0]\r\n nome = dados[1]\r\n resultados = int(resultados)\r\n resultados = resultados + 1\r\n print(\"\\n----------------------------\\n\")\r\n print(\" ID: %s\\n Nome: %s\" % (ide, nome))\r\n conn.commit()\r\n print(\"\\n\\nForam encontrados %d resultados\" % resultados)\r\n\r\n except:\r\n print(\"Erro\")\r\n\r\n conn.close()\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\n\r\ndef funcAlterar(conn):\r\n print(\"\\n\\nDigite os dados:\\n\")\r\n ide = input(\"ID do contato a alterar: \")\r\n novo_nome = input(\"Novo nome: \")\r\n novo_nome = (novo_nome.capitalize())\r\n cursor = conn.cursor()\r\n comando = \"UPDATE usuario SET nome='\"+novo_nome+\"' WHERE id='\"+ide+\"'\"\r\n\r\n try:\r\n cursor.execute(comando)\r\n conn.commit()\r\n\r\n except:\r\n print(\"Erro\")\r\n\r\n\r\n print(\"Alteração feita com sucesso.\")\r\n conn.close()\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\n\r\ndef funcExcluir(conn):\r\n print(\"\\n\\nDigite os dados:\\n\")\r\n ide_excluir = input(\"Digite o id do contato a excluir: \")\r\n cursor = conn.cursor()\r\n comando = \"DELETE FROM usuario WHERE id='\"+ide_excluir+\"'\"\r\n\r\n try:\r\n cursor.execute(comando)\r\n conn.commit()\r\n\r\n except:\r\n print(\"Erro\")\r\n\r\n print(\"Exclusão feita com sucesso.\")\r\n conn.close()\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\ndef funcMostrarTodos(conn):\r\n resultados = 0\r\n cursor = conn.cursor()\r\n comando = (\"SELECT * FROM usuario;\")\r\n\r\n try:\r\n cursor.execute(comando)\r\n resultado = cursor.fetchall()\r\n\r\n for dados in resultado:\r\n ide = dados[0]\r\n nome = dados[1]\r\n\r\n resultados = int(resultados)\r\n resultados = resultados + 1\r\n print(\"----------------------------------\")\r\n print(\"ID: %s\\n Nome: %s\" % (ide, nome))\r\n conn.commit()\r\n\r\n except:\r\n print(\"Erro\")\r\n\r\n print(\"\\n\\nForam encontrados %s resultados\" %resultados)\r\n conn.close()\r\n a = input()\r\n os.system(\"cls\")\r\n opcaoUsuario()\r\n\r\n\r\nif __name__ == '__main__':\r\n opcaoUsuario()\r\n","sub_path":"catalog_py_post.py","file_name":"catalog_py_post.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"224253870","text":"#Convert a list of characters into a string\n#Input : ['p', 'r', 'o', 'g', 'r', 'a', 'm',\n #'m', 'i', 'n', 'g']\n#Output : programming\n\ndef convert(char):\n list = \"\"\n for i in char:\n list += i\n return list\n\nchar = list(input(\"enter the list : \"))\nprint(char)\nprint(convert(char))","sub_path":"Personel/Nilesh/python/Practice/12March/program6.py","file_name":"program6.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"50122836","text":"__author__ = 'Sergey Tomin'\nfrom matplotlib.path import Path\nfrom ellipse_transform import *\nfrom pylab import *\nfrom ocelot.cpbd.tracking import *\n\n\ndef sort_phase_ellipse(X,Y):\n\n def sort_ind(ind):\n X_p = X[ind].flatten()\n Y_p = Y[ind].flatten()\n ind_sort = argsort(X_p)\n X_p, Y_p = X_p[ind_sort], Y_p[ind_sort]\n return X_p, Y_p\n\n ind_p = argwhere(Y>=0)\n X_p, Y_p = sort_ind(ind_p)\n ind_n = argwhere(Y<0)\n X_n, Y_n = sort_ind(ind_n)\n X = append(X_p,X_n[::-1])\n Y = append(Y_p,Y_n[::-1])\n return X,Y\n\ndef polygon_points(x_polg, y_polg, x, y):\n xycrop = np.vstack((x_polg, y_polg)).T\n xypix = np.vstack((x, y)).T\n pth = Path(xycrop, closed=False)\n mask = pth.contains_points(xypix)\n return array(mask)\n\n\ndef divide_beam(x_polg, y_polg, x,y):\n Xi_fl = array(x).flatten()\n Yi_fl = array(y).flatten()\n mask = polygon_points(x_polg = x_polg, y_polg = y_polg, x = Xi_fl, y = Yi_fl)\n mask_out = bitwise_not(mask)\n X_out = Xi_fl[mask_out]\n Y_out = Yi_fl[mask_out]\n X_in = Xi_fl[mask]\n Y_in = Yi_fl[mask]\n return X_in, Y_in, X_out, Y_out\n\n\n\n\ndef equal_tracks(track):\n for pxy in track:\n p = Particle(x = pxy.p_list[-1][0], px=pxy.p_list[-1][1],\n y=pxy.p_list[-1][2], py = pxy.p_list[-1][3],\n tau = pxy.p_list[-1][4], p = pxy.p_list[-1][5],\n s = 0)\n pxy.p = p\n pxy.x = p.x\n pxy.y = p.px\n pxy.turn = 0\n pxy.p_list = [[p.x,p.px,p.y,p.py,p.tau,p.p]]\n return track\n\ndef divide_tracks(track, acceptance):\n '''\n input: tracklist and acceptance = (x_acccep, y_accep)\n '''\n x_acccep, y_accep = acceptance\n track = equal_tracks(track)\n x, y = map(lambda p: p.x, track), map(lambda p: p.y, track)\n mask = polygon_points(x_acccep, y_accep, x, y)\n track_in = array(track)[mask]\n\n track_out = array(track)[~mask]\n return track_in, track_out\n\n\n","sub_path":"siberia2_inj/transform_track.py","file_name":"transform_track.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"283881580","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport mock\nimport factory\nfrom factory import fuzzy\nimport random\nfrom datetime import date\nfrom django.test import TestCase\nfrom db.models import Event, Deck, Card, Part, Archetype\nfrom project.models import Season\n\n\nclass CardFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Card\n\n id = factory.Sequence(lambda n: n)\n multiverseid = factory.Sequence(lambda n: n)\n cmc = random.choice([0, 1, 2, 2, 3, 3, 4, 5, 6])\n\n\nclass EventFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Event\n\n held_date = fuzzy.FuzzyDate(date(2014, 1, 1))\n\n\nclass DeckFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Deck\n\n event = factory.SubFactory(EventFactory)\n player = fuzzy.FuzzyText()\n version = 1\n\n\nclass PartFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Part\n\n num = random.choice([1, 2, 3, 4])\n is_sideboard = random.choice([False, False, False, False, True])\n card = factory.SubFactory(CardFactory)\n deck = factory.SubFactory(DeckFactory)\n\n\n### Factory end ###\n\n\nclass TestEvent(TestCase):\n\n def test_override_save(self):\n ev = Event.objects.create(\n name=\"test\",\n held_date=date(2015, 9, 1),\n regulation=\"standard\"\n )\n self.assertEqual(ev.id, \"test20150901\")\n self.assertEqual(ev, Event.objects.get(id=\"test20150901\"))\n\n def test_ids_by_offset_days(self):\n qs = Event.objects.ids_by_offset_days()\n expected = 'SELECT \"db_event\".\"id\" FROM \"db_event\" WHERE \"db_event\".\"held_date\" >='\n self.assertTrue(expected in str(qs.query))\n\n\nclass TestDeck(TestCase):\n\n def test_override_save(self):\n ev = EventFactory.create(name=\"test\", held_date=date(2015, 9, 1), regulation=\"standard\")\n deck = Deck.objects.create(\n event=ev,\n version=1,\n rank=1,\n player=\"sakamoto\",\n info=\"(4-0)\"\n )\n self.assertEqual(deck.id, \"test20150901-1-sakamoto\")\n self.assertEqual(deck, Deck.objects.get(id=\"test20150901-1-sakamoto\"))\n\n def test_all_in_season(self):\n self.assertEquals(Deck.objects.count(), 0, 'before confirm no record')\n old_event = EventFactory.create(season_id='_20160123_OLD')\n DeckFactory.create_batch(5, event=old_event)\n current_event = EventFactory.create(season_id='_20170123_CUR')\n DeckFactory.create_batch(7, event=current_event)\n\n self.assertEquals(Deck.objects.count(), 12,\n 'refs current_event and old event, 12 record created')\n\n with mock.patch('project.models.Season.get_current_id') as m:\n m.return_value = '_20170123_CUR'\n self.assertEquals(Deck.objects.all_in_season().count(), 7,\n 'mocked season calcs cur-seasons-deck')\n\n\nclass TestDeckManager(TestCase):\n\n fixtures = [\n \"20160430_archetype.json\",\n \"20160430_archetypecard.json\",\n \"20160430_card.json\",\n \"20160430_card_attrs.json\",\n \"20160430_deck.json\",\n \"20160430_event.json\",\n \"20160430_part.json\",\n ]\n\n def test_archetype_ranking(self):\n result = Deck.objects.archetype_ranking()\n archetype, count = result[0]\n self.assertEquals(count, 5)\n self.assertEquals(archetype.id, 'g-w_tokens_20160411_SOI')\n self.assertEquals(len(result), 5)\n\n\nclass TestArchetype(TestCase):\n\n fixtures = [\n \"20160430_archetype.json\",\n \"20160430_archetypecard.json\",\n \"20160430_card.json\",\n \"20160430_card_attrs.json\",\n \"20160430_deck.json\",\n \"20160430_event.json\",\n \"20160430_part.json\",\n ]\n\n def test_color(self):\n obj = Archetype.objects.get(id='mono_white_aggro_20160411_SOI')\n assert obj.color == 'White'\n\n def test_ja_name(self):\n obj = Archetype.objects.get(id='freaky')\n assert obj.ja_name == 'メタ外'\n\n obj = Archetype.objects.get(id='mono_white_aggro_20160411_SOI')\n assert obj.ja_name == 'Mono White Aggro'\n\n\nclass TestCard(TestCase):\n\n fixtures = [\n 'card.json',\n 'card_attrs.json',\n 'event.json',\n 'deck.json',\n 'part.json',\n ]\n\n def test_used_card_ranking(self):\n self.assertEqual(len(Card.objects.used_card_ranking()), 54)\n\n\nclass TestSeason(TestCase):\n\n def test_get_season_by_id(self):\n self.assertEqual(Season.get_season_by_date(date(2015, 10, 1), False).id, \"_20150717_ORI\")\n self.assertEqual(Season.get_season_by_date(date(2015, 10, 2), False).id, \"_20151002_BFZ\")\n self.assertRaises(ValueError, Season.get_season_by_date, date(2015, 7, 20), True)","sub_path":"db/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"607741354","text":"import random\n\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nimport numpy as np\nCXPB = 0.5\nseed = np.random.randint(10000,size=25)\nprint(\"seed:\",seed)\ngl = []\nG = []\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\ntoolbox.register(\"attr_bool\", random.randint, 0, 1)\n\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual,\n toolbox.attr_bool, 100)\n\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ndef evalOneMax(individual):\n return sum(individual),\n\ntoolbox.register(\"evaluate\", evalOneMax)\n\ntoolbox.register(\"ichi_mate\", tools.cxOnePoint)\ntoolbox.register(\"ni_mate\", tools.cxTwoPoint)\ntoolbox.register(\"ichiyou_mate\",tools.cxUniform)\n\ntoolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.05)\n\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n\ndef main(mp):\n for s in seed:\n random.seed(s)\n \n pop = toolbox.population(n=300)\n \n # CXPB is the probability with which two individuals\n # are crossed\n #\n # MUTPB is the probability for mutating an individual\n \n \n print(\"Start of evolution\")\n \n \n fitnesses = list(map(toolbox.evaluate, pop))\n \n \n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n \n print(\" Evaluated %i individuals\" % len(pop))\n \n fits = [ind.fitness.values[0] for ind in pop]\n \n # Variable keeping track of the number of generations\n g = 0\n \n # Begin the evolution\n while max(fits) < 100 and g < 1000:\n # A new generation\n g = g + 1\n gl.append(g)\n #print(\"-- Generation %i --\" % g)\n \n # Select the next generation individuals\n \n offspring = toolbox.select(pop, len(pop))\n offspring = list(map(toolbox.clone, offspring))\n \n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < CXPB:\n toolbox.ichi_mate(child1, child2)\n #toolbox.ni_mate(child1, child2)\n #toolbox.ichiyou_mate(child1, child2, 0.5)\n del child1.fitness.values\n del child2.fitness.values\n \n for mutant in offspring:\n if random.random() < mp:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n \n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n \n #print(\" Evaluated %i individuals\" % len(invalid_ind))\n \n # The population is entirely replaced by the offspring\n pop[:] = offspring\n \n # Gather all the fitnesses in one list and print the stats\n fits = [ind.fitness.values[0] for ind in pop]\n \n length = len(pop)\n mean = sum(fits) / length\n sum2 = sum([x*x for x in fits])\n std = abs(sum2 / length - mean**2)**0.5\n \n #print(\" Min %s\" % min(fits))\n #print(\" Max %s\" % max(fits))\n #print(\" Avg %s\" % mean)\n #print(\" Std %s\" % std)\n \n #print(\"-- End of (successful) evolution --\",\"MUTPB = \", MUTPB,\"CXPB = \",CXPB)\n \n best_ind = tools.selBest(pop, 1)[0]\n #print(\"Best individual is %s, %s\" % (best_ind, best_ind.fitness.values))\n gmax = max(gl)\n G.append(gmax)\n gl.clear()\nif __name__ == \"__main__\":\n pl = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]\n for p in pl: \n main(p)\n data=open(\"1.txt\",'a') \n print(\"avg:\",sum(G)/len(G),\"std:\",np.std(G,ddof=1),\"p=\",p,\"ichi\",file=data)\n data.close()\n\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"362748321","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\nfrom collections import OrderedDict\n\nfrom cloudshell.networking.cisco.command_actions.system_actions import SystemActions\nfrom cloudshell.networking.cisco.flows.cisco_configuration_flow import (\n CiscoConfigurationFlow,\n)\n\n\nclass CiscoNXOSConfigurationFlow(CiscoConfigurationFlow):\n STARTUP_LOCATION = \"startup-config\"\n RUNNING_LOCATION = \"running-config\"\n BACKUP_STARTUP_LOCATION = \"bootflash:backup-sc\"\n TEMP_STARTUP_LOCATION = \"bootflash:local-copy\"\n\n def _restore_flow(\n self, path, configuration_type, restore_method, vrf_management_name\n ):\n \"\"\"Execute flow which save selected file to the provided destination.\n\n :param path: the path to the configuration file, including the\n configuration file name\n :param restore_method: the restore method to use when restoring the\n configuration file. Possible Values are append\n and override\n :param configuration_type: the configuration type to restore. Possible\n values are startup and running\n :param vrf_management_name: Virtual Routing and Forwarding Name\n \"\"\"\n if \"-config\" not in configuration_type:\n configuration_type += \"-config\"\n\n with self._cli_handler.get_cli_service(\n self._cli_handler.enable_mode\n ) as enable_session:\n restore_action = SystemActions(enable_session, self._logger)\n reload_action_map = self._prepare_reload_act_map()\n\n if restore_method == \"override\":\n if self._cli_handler.cli_type.lower() != \"console\":\n raise Exception(\n self.__class__.__name__,\n \"Unsupported cli session type: {0}. \"\n \"Only Console allowed for restore override\".format(\n self._cli_handler.cli_type.lower()\n ),\n )\n\n restore_action.copy(\n source=path,\n destination=self.TEMP_STARTUP_LOCATION,\n vrf=vrf_management_name,\n action_map=restore_action.prepare_action_map(\n path, self.TEMP_STARTUP_LOCATION\n ),\n )\n\n restore_action.write_erase()\n restore_action.reload_device_via_console(action_map=reload_action_map)\n\n restore_action.copy(\n source=self.TEMP_STARTUP_LOCATION,\n destination=self.RUNNING_LOCATION,\n action_map=restore_action.prepare_action_map(\n self.TEMP_STARTUP_LOCATION, self.RUNNING_LOCATION\n ),\n )\n\n time.sleep(5)\n restore_action.copy(\n source=self.RUNNING_LOCATION,\n destination=self.STARTUP_LOCATION,\n action_map=restore_action.prepare_action_map(\n self.RUNNING_LOCATION, self.STARTUP_LOCATION\n ),\n timeout=200,\n )\n\n elif \"startup\" in configuration_type:\n raise Exception(\n self.__class__.__name__,\n \"Restore of startup config in append mode is not supported\",\n )\n else:\n restore_action.copy(\n source=path,\n destination=configuration_type,\n vrf=vrf_management_name,\n action_map=restore_action.prepare_action_map(\n path, configuration_type\n ),\n )\n\n def _prepare_reload_act_map(self):\n action_map = OrderedDict()\n # Proceed with reload? [confirm]\n action_map[\n (\n r\"[Aa]bort\\s+[Pp]ower\\s+[Oo]n\\s+[Aa]uto\\s+\"\n r\"[Pp]rovisioning.*[\\(\\[].*[Nn]o[\\]\\)]\"\n )\n ] = lambda session, logger: session.send_line(\"yes\", logger)\n action_map[\n r\"[Ee]nter\\s+system\\s+maintenance\\s+mode.*[\\[\\(][Yy](es)?\\/[Nn](o)?[\\)\\]] \"\n ] = lambda session, logger: session.send_line(\"n\", logger)\n action_map[\n r\"[Ss]tandby card not present or not [Rr]eady for failover]\"\n ] = lambda session, logger: session.send_line(\"y\", logger)\n action_map[\n r\"[Pp]roceed with reload\"\n ] = lambda session, logger: session.send_line(\" \", logger)\n action_map[r\"reboot.*system\"] = lambda session, logger: session.send_line(\n \"y\", logger\n )\n action_map[\n r\"[Ww]ould you like to enter the basic configuration dialog\"\n ] = lambda session, logger: session.send_line(\"n\", logger)\n action_map[\n r\"[Dd]o you want to enforce secure password standard\"\n ] = lambda session, logger: session.send_line(\"n\", logger)\n # Since as a part of restore override we are doing complete configuration erase,\n # switching Login action map to use default NXOS username - admin\n action_map[\n \"[Ll]ogin:|[Uu]ser:|[Uu]sername:\"\n ] = lambda session, logger: session.send_line(\"admin\", logger)\n action_map[\"[Pp]assword.*:\"] = lambda session, logger: session.send_line(\n self._cli_handler.password, logger\n )\n action_map[r\"\\[confirm\\]\"] = lambda session, logger: session.send_line(\n \"y\", logger\n )\n action_map[r\"continue\"] = lambda session, logger: session.send_line(\"y\", logger)\n action_map[r\"\\(y\\/n\\)\"] = lambda session, logger: session.send_line(\"n\", logger)\n action_map[\n r\"[\\[\\(][Yy]es/[Nn]o[\\)\\]]\"\n ] = lambda session, logger: session.send_line(\"n\", logger)\n action_map[r\"[\\[\\(][Nn]o[\\)\\]]\"] = lambda session, logger: session.send_line(\n \"n\", logger\n )\n action_map[r\"[\\[\\(][Yy]es[\\)\\]]\"] = lambda session, logger: session.send_line(\n \"n\", logger\n )\n action_map[\n r\"[\\[\\(][Yy]/[Nn][\\)\\]]\"\n ] = lambda session, logger: session.send_line(\"n\", logger)\n\n return action_map\n","sub_path":"cloudshell/networking/cisco/nxos/flows/cisco_nxos_configuration_flow.py","file_name":"cisco_nxos_configuration_flow.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"630227642","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START gae_python37_app]\nimport os\nimport requests\nimport urllib.parse\n\nfrom flask import Flask, request, send_from_directory, url_for\n\n# Project imports\nimport iex\n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask('finny')\n\n# Load app configuration\napp.config.from_pyfile('settings.cfg')\n\n# Register\napp.register_blueprint(iex.bp)\n\n# Favicon\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n# Say hello at root\n@app.route('/')\ndef hello():\n \"\"\"Say hello!\"\"\"\n return \"tranquiloooo\"\n \n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"337516420","text":"# Inspired by http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/\nimport datetime, time\nimport imutils, cv2\n\n\nMIN_WIDTH = 150\nMIN_HEIGHT = 150\n\nPIXEL_SIZE = 10\n\n\n_referenceFrame = None\n\n\ndef process(frame):\n global _referenceFrame\n \n frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frameGray = cv2.GaussianBlur(frameGray, (21, 21), 0)\n\n if _referenceFrame is None:\n _referenceFrame = frameGray\n return frame\n\n frameDelta = cv2.absdiff(_referenceFrame, frameGray)\n frameThresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n frameThresh = cv2.dilate(frameThresh, None, iterations = 2)\n\n contours, hierarchy = cv2.findContours(\n frameThresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n text = 'Free'\n if contours:\n for idx in range(len(hierarchy[0])):\n if hierarchy[0][idx][3] >= 0:\n continue\n\n contour = contours[idx]\n\n #area = cv2.contourArea(contour)\n #if area < 500:\n # continue\n \n x1, y1, w, h = cv2.boundingRect(contour)\n if w < MIN_WIDTH or h < MIN_HEIGHT:\n continue\n x2, y2 = x1 + w, y1 + h\n x1, y1, x2, y2 = x1/PIXEL_SIZE*PIXEL_SIZE, y1/PIXEL_SIZE*PIXEL_SIZE, x2/PIXEL_SIZE*PIXEL_SIZE, y2/PIXEL_SIZE*PIXEL_SIZE \n\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\n text = 'Occupied'\n\n cv2.putText(frame, '{}'.format(text), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\n cv2.putText(frame, datetime.datetime.now().strftime('%c'),\n (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n\n return frame\n \n","sub_path":"opencv/motion.py","file_name":"motion.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"238425921","text":"from demo import single_pic_proc\nimport os\nimport time\nimport shutil\nimport numpy as np\nimport json\nfrom glob import glob\nfrom PIL import Image\n\n\n\n\nif __name__ == '__main__':\n image_files = glob('./test_images/*.*')\n result_dir = './test_result'\n if os.path.exists(result_dir):\n shutil.rmtree(result_dir)\n os.mkdir(result_dir)\n\n for image_file in sorted(image_files):\n t = time.time()\n result, image_framed = single_pic_proc(image_file)\n output_file = os.path.join(result_dir, image_file.split('/')[-1])\n txt_file = os.path.join(result_dir, image_file.split('/')[-1].split('.')[0]+'.txt')\n print(txt_file)\n txt_f = open(txt_file, 'w')\n Image.fromarray(image_framed).save(output_file)\n print(\"Mission complete, it took {:.3f}s\".format(time.time() - t))\n print(\"\\nRecognition Result:\\n\")\n for key in result:\n print(result[key][1])\n txt_f.write(result[key][1]+'\\n')\n txt_f.close()\n\n # \n text_files = glob('./test_result/*.txt')\n for text_file in sorted(text_files):\n print(text_file)\n \n \n text=open(text_file,\"r\")\n res=[]\n print(text_files)\n for line in text:\n res.append(line[:-1]) \n print(line)\n res[4]=res[4][6:]\n res[5]=res[5][1:]\n\n final={\"Name\":res[3],\n \"DOB\":res[4],\n \"Gender\":res[5],\n \"AADHAR Number\":res[6]}\n\n with open (\"result.json\",\"w\") as output:\n json.dump(final,output)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"584046683","text":"from newshub.data.settings import *\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport datetime\nimport pytz\nfrom flask import jsonify\nfrom pymongo import MongoClient\nimport pymongo\n\n\ndef scrape():\n\terrors = {}\n\terrors[\"link_errors\"] = {}\n\tlink_errors = errors[\"link_errors\"]\n\tlink_errors[\"link_error\"] = []\n\tlink_error = link_errors[\"link_error\"]\n\terrors[\"page_errors\"] = {}\n\tpage_errors = errors[\"page_errors\"]\n\tpage_errors[\"page_error\"] = []\n\tpage_error = page_errors[\"page_error\"]\n\tlink_data = {}\n\tpage_d = {}\n\tdate = datetime.datetime.now(pytz.timezone('Asia/Calcutta')).strftime('%x')\n\ttry:\n\t\tclient1 = MongoClient(DB_CON)\n\t\tdb1 = client1.newshub\n\t\tdoc1 = db1[\"links\"]\n\texcept Exception as e:\n\t\tlink_error.append({'error': e, 'time': datetime.datetime.now(pytz.timezone('Asia/Calcutta'))})\n\n\ttry:\n\t\tclient2 = MongoClient(DB_CON)\n\t\tdb2 = client2.newshub\n\t\tdoc2 = db2[\"page_data\"]\n\texcept Exception as e:\n\t\tpage_error.append({'error': e, 'time': datetime.datetime.now(pytz.timezone('Asia/Calcutta'))})\n\n\trequest_page = requests.get(\n\t\turl=SCRAPE)\n\traw_content = request_page.content\n\thtml_page = BeautifulSoup(raw_content, \"html.parser\")\n\tlatest_news_ul = html_page.find_all(\"ul\", {\"class\": \"latest-news\"})\n\tlatest_news_ul = latest_news_ul[0]\n\tli = latest_news_ul.find_all(\"li\")\n\tli = li[::-1]\n\tlinks = []\n\tfor all in li:\n\t\tlink = all.find(\"a\")['href']\n\t\tif doc1.find_one(link.split('/')[-1][:-4]):\n\t\t\tprint('skip')\n\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tlink_data = {\n\t\t\t\t\t\"_id\": link.split('/')[-1][:-4],\n\t\t\t\t\t\"link\": link,\n\t\t\t\t\t\"title\": all.find(\"a\").text,\n\t\t\t\t\t\"time\": all.find(\"span\", {\"class\": \"l-datetime\"}).text,\n\t\t\t\t\t\"type\": all.find(\"span\", {\"class\": \"homeSection-name\"}).text,\n\t\t\t\t\t\"date\": datetime.datetime.now(pytz.timezone('Asia/Calcutta')).strftime('%x')\n\t\t\t\t}\n\t\t\t\tres = doc1.insert_one(link_data)\n\t\t\t\tprint(res.acknowledged)\n\t\t\t\tif (res.acknowledged):\n\t\t\t\t\tsource = {'name': 'thehindu.com'}\n\t\t\t\t\tdescription = ''\n\t\t\t\t\tsection_name = ''\n\t\t\t\t\ttitle = ''\n\t\t\t\t\tauthor_name = ''\n\t\t\t\t\tplace = ''\n\t\t\t\t\tpost_time = ''\n\t\t\t\t\tupdated_time = ''\n\t\t\t\t\timg_name = ''\n\t\t\t\t\timg = ''\n\t\t\t\t\tcontent = ''\n\t\t\t\t\turl = ''\n\t\t\t\t\ttry:\n\t\t\t\t\t\t_id = link.split('/')[-1][:-4]\n\t\t\t\t\t\tpage = requests.get(url=link)\n\t\t\t\t\t\tcontents = page.content\n\t\t\t\t\t\tsoup = BeautifulSoup(contents, \"html.parser\")\n\t\t\t\t\t\tarticle_full = soup.find_all(\n\t\t\t\t\t\t\t\"div\", {\"class\": \"article\", \"role\": \"main\"})\n\t\t\t\t\t\tsection_name = article_full[0].find(\n\t\t\t\t\t\t\t\"a\", {\"class\": 'section-name'}).text\n\t\t\t\t\t\tif (article_full[0].find(\"h1\").text):\n\t\t\t\t\t\t\ttitle = article_full[0].find(\"h1\").text\n\t\t\t\t\t\tif (article_full[0].find(\"a\", {\"class\": 'auth-nm'})):\n\t\t\t\t\t\t\tauthor_name = article_full[0].find(\n\t\t\t\t\t\t\t\t\"a\", {\"class\": 'auth-nm'}).text\n\t\t\t\t\t\tplace_time_uptime = article_full[0].find(\n\t\t\t\t\t\t\t\"div\", {\"class\": 'ut-container'})\n\t\t\t\t\t\tplace = place_time_uptime.find_all(\n\t\t\t\t\t\t\t\"span\")[0].text.replace(\"\\n\", \"\")[:-2]\n\t\t\t\t\t\tpost_time = place_time_uptime.find_all(\n\t\t\t\t\t\t\t\"span\")[1].text.replace(\"\\n\", \"\")\n\t\t\t\t\t\t# if (place_time_uptime.find_all(\"span\")[2] is not None):\n\t\t\t\t\t\t# updated_time=place_time_uptime.find_all(\"span\")[2].text.replace(\"\\n\",\"\")\n\t\t\t\t\t\tif (article_full[0].find_all(\"img\", {\"class\": 'lead-img'})):\n\t\t\t\t\t\t\timg = article_full[0].find_all(\n\t\t\t\t\t\t\t\t\"img\", {\"class\": 'lead-img'})\n\t\t\t\t\t\t\tif (article_full[0].find_all(\"picture\")):\n\t\t\t\t\t\t\t\timg = article_full[0].find_all(\"picture\")\n\t\t\t\t\t\t\t\tif (img[0].find_all(\"source\")[0]['srcset']):\n\t\t\t\t\t\t\t\t\timg = img[0].find_all(\"source\")[\n\t\t\t\t\t\t\t\t\t\t0]['srcset']\n\t\t\t\t\t\t\t\t\timg_name = img.split('/')[-1]+\".jpg\"\n\t\t\t\t\t\t\t\t\t# if ('newshub/img/'+img_name):\n\t\t\t\t\t\t\t\t\t# \tpass\n\t\t\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t\t\t# \twith open('newshub/img/'+img_name ,'wb') as w:\n\t\t\t\t\t\t\t\t\t# \t\timg_res = requests.get(img,stream=True)\n\t\t\t\t\t\t\t\t\t# \t\tif not img_res.ok:\n\t\t\t\t\t\t\t\t\t# \t\t\tprint(img_res)\n\t\t\t\t\t\t\t\t\t# \t\tfor b in img_res.iter_content(1024):\n\t\t\t\t\t\t\t\t\t# \t\t\tif not b:\n\t\t\t\t\t\t\t\t\t# \t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t# \t\t\tw.write(b)\n\t\t\t\t\t\tif (article_full[0].find(\"h2\", {\"class\": 'intro'})):\n\t\t\t\t\t\t\tdescription = article_full[0].find(\n\t\t\t\t\t\t\t\t\"h2\", {\"class\": 'intro'}).text\n\t\t\t\t\t\tid_ = re.compile('^content-body-')\n\t\t\t\t\t\tcontent = article_full[0].find_all(\n\t\t\t\t\t\t\t\"div\", {\"id\": id_})[0].text\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tpage_error.append({'error': e, 'time': datetime.datetime.now(pytz.timezone('Asia/Calcutta'))})\n\t\t\t\t\ttry:\n\t\t\t\t\t\tpage_d = {\n\t\t\t\t\t\t\t'article_id': _id,\n\t\t\t\t\t\t\t'source': source,\n\t\t\t\t\t\t\t'section_name': section_name,\n\t\t\t\t\t\t\t'title': title,\n\t\t\t\t\t\t\t'author_name': author_name,\n\t\t\t\t\t\t\t'place': place,\n\t\t\t\t\t\t\t'post_time': post_time,\n\t\t\t\t\t\t\t# 'Updated':updated_time,\n\t\t\t\t\t\t\t'img_name': img_name,\n\t\t\t\t\t\t\t'img_url': img,\n\t\t\t\t\t\t\t'description': description,\n\t\t\t\t\t\t\t'content': content,\n\t\t\t\t\t\t\t'url': link,\n\t\t\t\t\t\t\t'Comments': {}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdoc2.insert_one(page_d)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpage_error.append({'error': e, 'time': datetime.datetime.now(pytz.timezone('Asia/Calcutta'))})\n\t\t\t\t\t\t# time.sleep(34)\n\n\n\t\t\texcept pymongo.errors.DuplicateKeyError:\n\t\t\t\tpass\n\t\t\texcept Exception as er:\n\t\t\t\tlink_error.append({'error': er, 'time': datetime.datetime.now(pytz.timezone('Asia/Calcutta'))})\n\t\t\tlinks.append(link)\n\n\t# last = doc.find({'date': datetime.datetime.now().strftime(\"%x\")}).sort(\n\t\t# \"time\", pymongo.DESCENDING)[0]\n\tfinal = {'data': {'link_data': link_data, 'page_data': page_d}}\n\tall_data = {'errors': errors, 'final': final}\n\t# print(all_data)\n\tprint(all_data)\n\treturn all_data\n\n\n# print(scrape())\n","sub_path":"newshub/data/scrape_data.py","file_name":"scrape_data.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"538186065","text":"import util\n\ndef topic(bot, args):\n if len(args) >= 2:\n topic = ' '.join(args[1:])\n if bot.remote['sendee'] == \"#ualug\":\n if len(topic) <= 250:\n bot._sendq((\"TOPIC\", bot.remote['sendee']), '%s: %s [/%s] | %s' % (bot.config.get('module: topic', 'title'), topic, bot.remote['nick'], bot.config.get('module: topic', 'tagline')))\n else:\n util.answer(bot, \"Sorry %s that topic is too long.\" % Bot.remote['nick'])\n else:\n bot._sendq((\"TOPIC\", bot.remote['sendee']), ' '.join(args[1:]))\n else:\n util.give_help(bot, args[0], \"\")\n\nutil.register(topic, \"common\", \"topic\")\n","sub_path":"xbot/modules/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"385005242","text":"\"\"\"\r\nwav.py\r\n\r\nModule to read / write .wav files from numerical data.\r\n\r\n@author: Chan Wai Lou / Vincent Lou\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.io import wavfile\r\nimport os\r\nfrom datetime import datetime\r\n\r\nimport aumix.signal.simple_signal as ss\r\n\r\n\r\ndef write(filename, signal, samp_rate=None, amp_perc=1.0, dtype=np.int32, auto_timestamp=False):\r\n \"\"\"\r\n Convert from numerical data to .wav.\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n Filename of the output WITHOUT \".wav\" appended.\r\n\r\n signal : np.ndarray, or aumix.signal.simple_signal.Signal\r\n An array containing the numerical data, or a Signal class with the numerical data\r\n encapsulated inside.\r\n\r\n samp_rate : int, optional\r\n Sampling rate.\r\n If `signal` is an array, this needs to be specified.\r\n If `signal` is a Signal class, its samp_rate field should be specified.\r\n\r\n amp_perc : float, optional\r\n Amplitude percentage. Should range from 0.0 to 1.0.\r\n\r\n dtype\r\n Data type of the output .wav file. 4 resolution are supported as follows:\r\n\r\n np.uint8 : 8-bit PCM\r\n np.int16 : 16-bit PCM\r\n np.int32 : 32-bit PCM\r\n np.float32 : 32-bit floating point\r\n\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n\r\n data = signal\r\n\r\n # If signal is a Signal class, then retrieve data and sampling rate from it\r\n if isinstance(signal, ss.Signal):\r\n data = signal.data\r\n samp_rate = signal.samp_rate\r\n\r\n # If sampling rate is undefined, we don't have enough info to output a .wav file\r\n if samp_rate is None:\r\n raise ValueError(\"Sampling rate is undefined.\")\r\n\r\n # Find out the maximum size of the specified type\r\n amplitude = amp_perc * (np.iinfo(dtype).max if dtype != np.float32 else 1)\r\n\r\n # Treat 8-bit PCM as a special case, since the numbers are unsigned.\r\n if dtype == np.uint8:\r\n unsigned_data = data - np.min(data)\r\n out_data = unsigned_data * amplitude / np.max(unsigned_data)\r\n else:\r\n out_data = data * amplitude / np.max(data)\r\n\r\n # Create folder if it doesn't exist\r\n raw_filename = filename.split(\"/\")[-1]\r\n folder = \"/\".join(filename.split(\"/\")[:-1])\r\n if folder != \"\" and not os.path.exists(folder):\r\n os.makedirs(folder)\r\n\r\n timestamp = datetime.now().strftime(\"%y%m%d-%H%M%S\") + \"-\" if auto_timestamp else \"\"\r\n\r\n # Output file\r\n wavfile.write(f\"{folder}/{timestamp}{raw_filename}.wav\", samp_rate, out_data.astype(dtype))\r\n","sub_path":"aumix/io/wav.py","file_name":"wav.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"482752028","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.views.decorators.http import require_http_methods\nimport redis\n\n__cache = redis.StrictRedis(host='127.0.0.1', port=6379)\n\n\n@login_required\n@require_http_methods('GET')\ndef check_login(request):\n return render(request, template_name='index.html')\n\n\n@login_required\n@require_http_methods('GET')\ndef bus_stations(request):\n from_redis = __cache.get('BUS_STATIC')\n str_ = from_redis.decode('utf-8') if from_redis else ''\n if not str_:\n return HttpResponseBadRequest()\n return HttpResponse(str_, content_type='application/json')\n\n\n@login_required\n@require_http_methods('GET')\ndef bus_realtime(request):\n from_redis = __cache.get('BUS_REALTIME')\n str_ = from_redis.decode('utf-8') if from_redis else ''\n if not str_:\n return HttpResponseBadRequest()\n return HttpResponse(str_, content_type='application/json')\n\n\n@login_required\n@require_http_methods('GET')\ndef bike_realtime(request):\n from_redis = __cache.get('BIKE_REALTIME')\n str_ = from_redis.decode('utf-8') if from_redis else ''\n if not str_:\n return HttpResponseBadRequest()\n return HttpResponse(str_, content_type='application/json')","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"353048346","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------------\n# :author: Pete R. Jemian\n# :email: prjemian@gmail.com\n# :copyright: (c) 2018, Pete R. Jemian\n#\n# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n'''\nPython Utilities for NeXus HDF5 files\n\nmain user interface file\n\n.. rubric:: Usage\n\n::\n\n console> punx -h\n usage: punx [-h] [-v]\n {configuration,demonstrate,structure,tree,update,validate} ...\n \n Python Utilities for NeXus HDF5 files version: 0.2.0+9.g31fd4b4.dirty URL:\n http://punx.readthedocs.io\n \n optional arguments:\n -h, --help show this help message and exit\n -v, --version show program's version number and exit\n \n subcommand:\n valid subcommands\n \n {configuration,demonstrate,structure,tree,update,validate}\n configuration show configuration details of punx\n demonstrate demonstrate HDF5 file validation\n structure structure command deprecated. Use ``tree`` instead\n tree show tree structure of HDF5 or NXDL file\n update update the local cache of NeXus definitions\n validate validate a NeXus file\n \n Note: It is only necessary to use the first two (or more) characters of any\n subcommand, enough that the abbreviation is unique. Such as: ``demonstrate``\n can be abbreviated to ``demo`` or even ``de``.\n\n.. autosummary::\n \n ~main\n ~MyArgumentParser\n ~parse_command_line_arguments\n ~func_demo\n ~func_validate\n ~func_hierarchy\n ~func_configuration\n ~func_tree\n ~func_update\n\n'''\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nlogging.basicConfig(\n level=logging.INFO, \n # level=logging.DEBUG, \n format='[%(levelname)s %(asctime)s.%(msecs)03d %(name)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S') \n\n\nfrom .__init__ import __version__, __package_name__, __url__\nfrom .__init__ import FileNotFound, HDF5_Open_Error, SchemaNotFound\nfrom . import finding\nfrom . import utils\n\nERROR = 40\nlogger = utils.setup_logger(__name__, logging.INFO)\n\n# :see: https://docs.python.org/2/library/argparse.html#sub-commands\n# obvious 1st implementations are h5structure and update\n\n\ndef exit_message(msg, status=None, exit_code=1):\n '''\n exit this code with a message and a status\n \n :param str msg: text to be reported\n :param int status: 0 - 50 (default: ERROR = 40)\n :param int exit_code: 0: no error, 1: error (default)\n '''\n if status is None:\n status = ERROR\n logging.info(\"{} -- {}\".format(msg, status))\n exit(exit_code)\n\n\ndef func_configuration(args):\n \"\"\"show internal configuration of punx\"\"\"\n from . import cache_manager\n from . import github_handler\n\n cm = cache_manager.CacheManager()\n print(\"Locally-available versions of NeXus definitions (NXDL files)\")\n print(cm.table_of_caches())\n print(\"default NXDL file set: \", cm.default_file_set.ref)\n\n # nothing to show from here\n # grr = github_handler.GitHub_Repository_Reference()\n # perhaps does local creds file exist? Show where it is? or TMI?\n\n\ndef func_demo(args):\n '''\n show what **punx** can do\n \n .. index:: demo\n\n Internally, runs these commands::\n\n punx validate /data/writer_1_3.hdf5\n punx tree /data/writer_1_3.hdf5\n\n .. index:: cache update\n\n If you get an error message that looks like this one \n (line breaks added here for clarity)::\n\n punx.cache.FileNotFound: file does not exist:\n /Users//.config/punx/definitions-master/nxdl.xsd\n AND not found in source cache either! Report this problem to the developer.\n\n then you will need to update your local cache of the NeXus definitions.\n Use this command to update the local cache::\n\n punx update\n\n '''\n path = os.path.dirname(__file__)\n args.infile = os.path.abspath(os.path.join(path, 'data', 'writer_1_3.hdf5'))\n\n print(\"\")\n print('console> punx validate ' + args.infile)\n args.report = ','.join(sorted(finding.VALID_STATUS_DICT.keys()))\n func_validate(args)\n del args.report\n\n print(\"\")\n print('console> punx tree ' + args.infile)\n from . import h5tree\n mc = h5tree.Hdf5TreeView(args.infile)\n # :param bool show_attributes: display attributes in output\n show_attributes=True\n mc.array_items_shown = 5\n print('\\n'.join(mc.report(show_attributes)))\n\n\ndef func_hierarchy(args):\n \"not implemented yet\"\n url = 'http://punx.readthedocs.io/en/latest/analyze.html'\n print('A chart of the NeXus hierarchy is in the **punx** documentation.')\n print('see: ' + url)\n # TODO: issue #1 & #10 show NeXus base class hierarchy from a given base class\n\n\ndef func_structure(args):\n \"deprecated subcommand\"\n msg = 'structure command deprecated. Use ``tree`` instead'\n print(ValueError(msg))\n sys.exit(1)\n\n\ndef func_tree(args):\n \"\"\"print the tree structure of a NeXus HDF5 data file of NXDL XML file\"\"\"\n if args.infile.endswith('.nxdl.xml'):\n from . import nxdltree\n\n try:\n mc = nxdltree.NxdlTreeView(os.path.abspath(args.infile))\n except FileNotFound:\n exit_message('File not found: ' + args.infile)\n except Exception as exc:\n exit_message(str(exc))\n report = mc.report(args.show_attributes)\n print('\\n'.join(report or ''))\n\n else:\n from . import h5tree\n\n try:\n mc = h5tree.Hdf5TreeView(os.path.abspath(args.infile))\n except FileNotFound:\n exit_message('File not found: ' + args.infile)\n mc.array_items_shown = args.max_array_items\n try:\n report = mc.report(args.show_attributes)\n except HDF5_Open_Error:\n exit_message('Could not open as HDF5: ' + args.infile)\n print('\\n'.join(report or ''))\n\n\ndef func_validate(args):\n \"\"\"\n validate the content of a NeXus HDF5 data file of NXDL XML file\n \"\"\"\n from . import validate\n\n if args.infile.endswith('.nxdl.xml'):\n result = validate.validate_xml(args.infile)\n if result is None:\n print(args.infile, ' validates')\n return\n\n validator = validate.Data_File_Validator()\n\n # determine which findings are to be reported\n report_choices, trouble = [], []\n for c in args.report.upper().split(','):\n if c in finding.VALID_STATUS_DICT:\n report_choices.append(finding.VALID_STATUS_DICT[c])\n else:\n trouble.append(c)\n if len(trouble) > 0:\n msg = 'invalid choice(s) for *--report* option: '\n msg += ','.join(trouble)\n msg += '\\n'\n msg += '\\t' + 'available choices: '\n msg += ','.join(sorted(finding.VALID_STATUS_DICT.keys()))\n exit_message(msg)\n\n try:\n # run the validation\n validator.validate(args.infile)\n except FileNotFound:\n exit_message('File not found: ' + args.infile)\n except HDF5_Open_Error:\n exit_message('Could not open as HDF5: ' + args.infile)\n except SchemaNotFound as _exc:\n exit_message(str(_exc))\n\n # report the findings from the validation\n validator.print_report()\n\n\ndef _install(cm, grr, ref, use_user_cache = True, force = False):\n \"\"\"\n Install or update the named NXDL file reference\n \"\"\"\n force = force or ref == \"master\" # always update from the master branch\n\n msg = \"install_NXDL_file_set(ref={}, force={}, user_cache={})\".format(\n ref, force, use_user_cache)\n logger.info(msg)\n\n m = cm.install_NXDL_file_set(\n grr, \n user_cache=use_user_cache, \n ref=ref,\n force = force)\n if isinstance(m, list):\n print(str(m[-1]))\n\n\ndef func_update(args):\n \"\"\"update or install versions of the NeXus definitions\"\"\"\n from . import cache_manager\n from . import github_handler\n\n cm = cache_manager.CacheManager()\n print(cm.table_of_caches())\n\n if args.try_to_install_or_update:\n grr = github_handler.GitHub_Repository_Reference()\n grr.connect_repo()\n cm.find_all_file_sets()\n \n for ref in args.file_set_list:\n _install(cm, grr, ref, force=args.force)\n \n print(cm.table_of_caches())\n\n\nclass MyArgumentParser(argparse.ArgumentParser):\n '''\n override standard ArgumentParser to enable shortcut feature\n \n stretch goal: permit the first two char (or more) of each subcommand to be accepted\n # ?? http://stackoverflow.com/questions/4114996/python-argparse-nargs-or-depending-on-prior-argument?rq=1\n '''\n \n def parse_args(self, args=None, namespace=None):\n '''\n permit the first two char (or more) of each subcommand to be accepted\n '''\n if args is None and len(sys.argv) > 1 and not sys.argv[1].startswith('-'):\n # TODO: issue #8: make more robust for variations in optional commands\n sub_cmd = sys.argv[1]\n # make a list of the available subcommand names\n choices = []\n for g in self._subparsers._group_actions:\n if isinstance(g, argparse._SubParsersAction):\n #choices = g._name_parser_map.keys()\n choices = g.choices.keys()\n break\n if len(choices) > 0 and sub_cmd not in choices:\n if len(sub_cmd) < 2:\n msg = 'subcommand too short, must match first 2 or more characters, given: %s'\n self.error(msg % ' '.join(sys.argv[1:]))\n # look for any matches\n matches = [c for c in choices if c.startswith(sub_cmd)]\n # validate the match is unique\n if len(matches) == 0:\n msg = 'subcommand unrecognized, given: %s'\n self.error(msg % ' '.join(sys.argv[1:]))\n elif len(matches) > 1:\n msg = 'subcommand ambiguous (matches: %s)' % ' | '.join(matches)\n msg += ', given: %s'\n self.error(msg % ' '.join(sys.argv[1:]))\n else:\n sub_cmd = matches[0]\n # re-assign the subcommand\n sys.argv[1] = sub_cmd\n return argparse.ArgumentParser.parse_args(self, args, namespace)\n\n\ndef parse_command_line_arguments():\n '''process command line'''\n doc = __doc__.strip().splitlines()[0]\n doc += '\\n version: ' + __version__\n doc += '\\n URL: ' + __url__\n epilog = 'Note: It is only necessary to use the first two (or'\n epilog += ' more) characters of any subcommand, enough that the'\n epilog += ' abbreviation is unique. '\n epilog += ' Such as: ``demonstrate`` can be abbreviated to'\n epilog += ' ``demo`` or even ``de``.'\n p = MyArgumentParser(\n prog=__package_name__, \n description=doc,\n epilog=epilog)\n\n p.add_argument(\n '-v', \n '--version', \n action='version', \n version=__version__)\n\n # TODO: issue #9, stretch goal: GUI for any of this\n # p.add_argument(\n # '-g', \n # '--gui', \n # help='graphical user interface (TBA)')\n\n subcommand = p.add_subparsers(title='subcommand', description='valid subcommands',)\n\n\n ## subcommand: configuration\n # TODO: issue #11\n help_text = 'show configuration details of punx'\n p_sub = subcommand.add_parser('configuration', help=help_text)\n p_sub.set_defaults(func=func_configuration)\n \n \n ### subcommand: demo\n p_sub = subcommand.add_parser('demonstrate', help='demonstrate HDF5 file validation')\n # TODO: add_logging_argument(p_sub)\n p_sub.set_defaults(func=func_demo)\n\n\n# ### subcommand: hierarchy\n# # TODO: issue #1 & #10\n# help_text = 'show NeXus base class hierarchy from a given base class'\n# p_sub = subcommand.add_parser('hierarchy', help=help_text)\n# p_sub.set_defaults(func=func_hierarchy)\n# #p_sub.add_argument('something', type=bool, help='something help_text')\n\n\n ### subcommand: structure\n help_text = 'structure command deprecated. Use ``tree`` instead'\n p_sub = subcommand.add_parser('structure', help=help_text)\n p_sub.set_defaults(func=func_structure)\n p_sub.add_argument('infile', help=\"HDF5 or NXDL file name\")\n\n\n ### subcommand: tree\n help_text = 'show tree structure of HDF5 or NXDL file'\n p_sub = subcommand.add_parser('tree', help=help_text)\n p_sub.set_defaults(func=func_tree)\n p_sub.add_argument('infile', help=\"HDF5 or NXDL file name\")\n p_sub.add_argument(\n '-a', \n action='store_false', \n default=True,\n dest='show_attributes',\n help='Do not print attributes of HDF5 file structure')\n help_text = 'maximum number of array items to be shown'\n p_sub.add_argument(\n '-m', '--max_array_items',\n default=5,\n type=int,\n #choices=range(1,51),\n help=help_text)\n # TODO: add_logging_argument(p_sub)\n\n\n ### subcommand: update\n help_text = 'update the local cache of NeXus definitions'\n p_sub = subcommand.add_parser('update', help=help_text)\n p_sub.set_defaults(func=func_update)\n\n help_text = \"name(s) of reference NeXus NXDL file set\"\n help_text += \" (GitHub tag, hash, version, or 'master')\"\n help_text += \" -- default master\"\n p_sub.add_argument(\n '-r', '--file_set_list',\n default=[\"master\", ],\n nargs='*',\n help=help_text)\n\n p_sub.add_argument(\"-i\", \"--install\",\n action='store_false', \n default=True,\n dest='try_to_install_or_update',\n help='Do not install (or update) -- default True')\n\n p_sub.add_argument(\n '-f', '--force', \n action='store_true', \n default=False, \n help='force update (if GitHub available)')\n # TODO: add_logging_argument(p_sub)\n\n\n ### subcommand: validate\n p_sub = subcommand.add_parser('validate', help='validate a NeXus file')\n p_sub.add_argument('infile', help=\"HDF5 or NXDL file name\")\n p_sub.set_defaults(func=func_validate)\n reporting_choices = ','.join(sorted(finding.VALID_STATUS_DICT.keys()))\n help_text = 'select which validation findings to report, choices: '\n help_text += reporting_choices\n p_sub.add_argument('--report', default=reporting_choices, help=help_text)\n # TODO: add_logging_argument(p_sub)\n\n return p.parse_args()\n\n\ndef main():\n print(\"\\n!!! WARNING: this program is not ready for distribution.\\n\")\n args = parse_command_line_arguments()\n if not hasattr(args, \"func\"):\n print(\"ERROR: must specify a subcommand -- for help, type:\")\n print(\"%s -h\" % sys.argv[0])\n sys.exit(1)\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/punx/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"529618047","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 18 14:20:14 2021\r\n\r\n@author: scpgo\r\n\"\"\"\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport plotly.graph_objects as go # or \r\nimport plotly.express as px\r\n\r\nfrom pygraphtec import lee_fichero_sesion\r\n\r\ndf = lee_fichero_sesion(\"201112-165432.csv\", path_sesiones='dataLogger')\r\n\r\napp = dash.Dash()\r\n\r\n#fig_names = ['fig1', 'fig2']\r\nfig_names = df.columns #asigno fig_names a las columnas del Dataframe\r\n\r\ncols_dropdown = html.Div([ #Div del filtro de variables\r\n dcc.Dropdown(\r\n id='cols_dropdown',\r\n options=[{'label': x, 'value': x} for x in fig_names], #creo el filtro de variables\r\n value=None, #ninguna opcion inicial preseleccionada \r\n multi=True #permite selección de varias opciones\r\n )])\r\n \r\nfig_plot = html.Div(id='fig_plot') #Div de la gráfica\r\n\r\napp.layout = html.Div([cols_dropdown, fig_plot]) #permite construir la estructura el filtro\r\n #de variables y la gráfica\r\n\r\n@app.callback( #permite devolver la gráfica como Dash Core Component dcc.Graph (línea 44)\r\ndash.dependencies.Output('fig_plot', 'children'),\r\n[dash.dependencies.Input('cols_dropdown', 'value')])\r\ndef name_to_figure(value):\r\n if value is None:\r\n figure = {}\r\n else:\r\n figure = px.line(df[value]) #se crea figura que representa todas las variables\r\n #fig_names corresponde a la variable global (línea 25)\r\n return dcc.Graph(figure=figure)\r\n\r\napp.run_server(debug=True, use_reloader=False) #arranca la aplicacion","sub_path":"dash_filtro_variables.py","file_name":"dash_filtro_variables.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"346302781","text":"# multiAgents.py\n# --------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom util import manhattanDistance\nfrom game import Directions\nimport random, util\n\nfrom game import Agent\n\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n\n The code below is provided as a guide. You are welcome to change\n it in any way you see fit, so long as you don't touch our method\n headers.\n \"\"\"\n\n\n def getAction(self, gameState):\n \"\"\"\n You do not need to change this method, but you're welcome to.\n\n getAction chooses among the best options according to the evaluation function.\n\n Just like in the previous project, getAction takes a GameState and returns\n some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}\n \"\"\"\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n \"Add more of your code here if you want to\"\n\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n Design a better evaluation function here.\n\n The evaluation function takes in the current and proposed successor\n GameStates (pacman.py) and returns a number, where higher numbers are better.\n\n The code below extracts some useful information from the state, like the\n remaining food (newFood) and Pacman position after moving (newPos).\n newScaredTimes holds the number of moves that each ghost will remain\n scared because of Pacman having eaten a power pellet.\n\n Print out these variables to see what you're getting, then combine them\n to create a masterful evaluation function.\n \"\"\"\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n\n closestGhostDist = min([manhattanDistance(ghost.configuration.getPosition(),newPos) for ghost in newGhostStates])\n foods = newFood.asList()\n score = 0\n if len(foods)==0:\n return float('inf')\n closestfooddist = min([manhattanDistance(food, newPos) for food in foods])\n score += 10000/(len(foods)+1)\n\n\n if closestGhostDist <= 2:\n score /= 10\n score += 1/closestfooddist\n \n \n return score\ndef scoreEvaluationFunction(currentGameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n\n This evaluation function is meant for use with adversarial search agents\n (not reflex agents).\n \"\"\"\n return currentGameState.getScore()\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n\n def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent (question 2)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action from the current gameState using self.depth\n and self.evaluationFunction.\n\n Here are some method calls that might be useful when implementing minimax.\n\n gameState.getLegalActions(agentIndex):\n Returns a list of legal actions for an agent\n agentIndex=0 means Pacman, ghosts are >= 1\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n\n gameState.isWin():\n Returns whether or not the game state is a winning state\n\n gameState.isLose():\n Returns whether or not the game state is a losing state\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \n self.wanted_action = gameState.getLegalActions(0)[0]\n\n\n def maxhelper(depthnow,gameState, agentIndex):\n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow,self.depth,gameState.state,self.evaluationFunction(gameState))\n\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n max_results = -1* float('inf')\n max_action = None\n \n for action in actionable:\n #print(depthnow, gameState.state)\n successorstate = gameState.generateSuccessor(agentIndex,action)\n mh = minhelper(depthnow,successorstate,1)\n max_results = max(max_results,mh)\n if mh == max_results:\n max_action = action \n self.wanted_action = max_action\n return max_results\n def minhelper(depthnow, gameState, agentIndex):\n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow)\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n succ = [gameState.generateSuccessor(agentIndex,action) for action in actionable]\n result = []\n for successorstate in succ:\n #print(depthnow, gameState.state)\n if agentIndex< gameState.getNumAgents()-1:\n result.append(minhelper(depthnow,successorstate,agentIndex+1))\n else:\n result.append(maxhelper(depthnow+1,successorstate,0))\n \n return min(result)\n \n maxhelper(0,gameState,0)\n return self.wanted_action\n\n\n\nclass AlphaBetaAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent with alpha-beta pruning (question 3)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action using self.depth and self.evaluationFunction\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \n \n self.action_wanted = None\n def maxipad(depthnow,gameState,agentIndex,alpha,beta):\n #print(alpha,beta,gameState.state)\n \n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow,self.depth,gameState.state,self.evaluationFunction(gameState))\n\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n max_results = -1* float('inf')\n max_action = None\n \n for action in actionable:\n \n #print(depthnow, gameState.state)\n successorstate = gameState.generateSuccessor(agentIndex,action)\n mh = minipad(depthnow,successorstate,1,alpha,beta)\n \n if mh > beta:\n self.action_wanted = action\n return mh\n else:\n alpha = max(alpha, mh)\n #print()\n #print(max_results,mh)\n max_results = max(max_results,mh)\n #print(max_results,mh)\n if mh == max_results:\n #print(2)\n max_action = action \n #print(max_action)\n \n #print(max_results) \n self.action_wanted = max_action\n return max_results\n\n def minipad(depthnow,gameState,agentIndex,alpha,beta):\n #print(alpha,beta,gameState.state)\n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow)\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n result = []\n for action in actionable:\n successorstate = gameState.generateSuccessor(agentIndex, action)\n #print(result)\n #print(depthnow, gameState.state)\n if agentIndex< gameState.getNumAgents()-1:\n mh = minipad(depthnow,successorstate,agentIndex+1,alpha,beta)\n if mh < alpha:\n return mh\n else:\n beta = min(mh,beta)\n result.append(mh)\n else:\n mh = maxipad(depthnow+1,successorstate,0,alpha,beta)\n if mh < alpha:\n return mh\n else:\n beta = min(mh,beta)\n result.append(mh)\n #print(result) \n return min(result)\n maxipad(0,gameState,0,-1*float('inf'),float('inf'))\n return self.action_wanted\n \n\nclass ExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent (question 4)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n\n All ghosts should be modeled as choosing uniformly at random from their\n legal moves.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n self.wanted_action = gameState.getLegalActions(0)[0]\n\n\n def maxhelper(depthnow,gameState, agentIndex):\n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow,self.depth,gameState.state,self.evaluationFunction(gameState))\n\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n max_results = -1* float('inf')\n max_action = None\n \n for action in actionable:\n #print(depthnow, gameState.state)\n successorstate = gameState.generateSuccessor(agentIndex,action)\n mh = expectopatronum(depthnow,successorstate,1)\n max_results = max(max_results,mh)\n if mh == max_results:\n max_action = action \n self.wanted_action = max_action\n return max_results\n def expectopatronum(depthnow, gameState, agentIndex):\n if (depthnow == self.depth) or gameState.isWin() or gameState.isLose():\n #print(depthnow)\n return self.evaluationFunction(gameState)\n else:\n actionable = gameState.getLegalActions(agentIndex)\n succ = [gameState.generateSuccessor(agentIndex,action) for action in actionable]\n result = []\n for successorstate in succ:\n #print(depthnow, gameState.state)\n if agentIndex< gameState.getNumAgents()-1:\n result.append(expectopatronum(depthnow,successorstate,agentIndex+1))\n else:\n result.append(maxhelper(depthnow+1,successorstate,0))\n \n return sum(result)/len(succ)\n \n maxhelper(0,gameState,0)\n return self.wanted_action\n\ndef betterEvaluationFunction(currentGameState):\n \"\"\"\n Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable\n evaluation function (question 5).\n\n DESCRIPTION: \n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n foods = currentGameState.getFood().asList()\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n\n\n closestGhostDist = min([manhattanDistance(ghost.configuration.getPosition(),newPos) for ghost in newGhostStates])\n i= 0\n j= 0\n for ghost in newGhostStates:\n if manhattanDistance(ghost.configuration.getPosition(),newPos) == closestGhostDist:\n j = i\n i+=1\n\n score = 0\n if len(foods)==0:\n return float('inf')\n closestfooddist = min([manhattanDistance(food, newPos) for food in foods])\n score += 10000/(len(foods)+1)\n\n if closestGhostDist <= newScaredTimes[j]:\n score += closestGhostDist*100\n if closestGhostDist <= 2:\n score /= 10\n score += 0.5 *1/closestfooddist\n \n \n return score\n \n\n# Abbreviation\nbetter = betterEvaluationFunction\n","sub_path":"CS 188/multiagent copy/multiAgents.py","file_name":"multiAgents.py","file_ext":"py","file_size_in_byte":14623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"85"} +{"seq_id":"422842571","text":"#!/usr/bin/python3\n\"\"\"\nGet the titles of the hot posts for a given subreddit\n\"\"\"\nimport requests\n\nURL = 'https://www.reddit.com/r/{}/hot.json'\nUSER_AGENT = 'Mozilla/5.0 (Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'\n\n\ndef recurse(subreddit, titles=[], **kwargs):\n \"\"\"\n Query reddit for all hot posts of a subreddit\n \"\"\"\n params = {\n 'after': kwargs.setdefault('after'),\n 'count': kwargs.setdefault('count', 0),\n 'limit': kwargs.setdefault('limit', 100)\n }\n r = requests.get(\n URL.format(subreddit),\n headers={'User-Agent': USER_AGENT},\n params=params,\n allow_redirects=False,\n timeout=30,\n )\n if r.status_code == 200:\n results = r.json()['data']\n titles.extend(post['data']['title'] for post in results['children'])\n if results['after'] is not None:\n kwargs['after'] = results['after']\n kwargs['count'] += kwargs['limit']\n return recurse(subreddit, titles, **kwargs)\n return titles\n return None\n","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"48719826","text":"import os\nimport sys\nimport atexit\nimport socket\nimport sqlite3\n\n\nclass Database:\n def __init__(self):\n self.name = \"uct.db\"\n self.table = \"cursos\"\n self.cursos = []\n self.create()\n\n def create(self):\n if os.path.isfile(self.name):\n print (\"*** Using existing Database\")\n return\n print (\"*** Creating Database\")\n try:\n conn = sqlite3.connect(self.name)\n cur = conn.cursor()\n\n cur.execute(\"CREATE TABLE %s (Name TEXT)\" % self.table)\n\n conn.commit()\n\n except sqlite3.Error as e:\n print (\"Error %s\" % e.args[0])\n sys.exit(-1)\n finally:\n cur.close()\n conn.close()\n print (\"*** Done\")\n\n def insert(self, name):\n if name not in self.cursos:\n print (\"*** Inserting new class\")\n self.cursos.append(name)\n try:\n conn = sqlite3.connect(self.name)\n cur = conn.cursor()\n\n insert = \"INSERT INTO %s VALUES('%s')\"\n cur.execute(insert % (self.table, name))\n\n conn.commit()\n except sqlite3.Error as e:\n print (\"Error %s\" % e.args[0])\n sys.exit(-1)\n finally:\n cur.close()\n conn.close()\n print (\"*** Done\")\n else:\n print (\"*** Class already exists\")\n\n\nclass Server:\n def __init__(self, host=\"127.0.0.1\", port=50111):\n self.address = (host, port)\n self.sock = socket.socket()\n self.sock.bind(self.address)\n self.sock.listen(1)\n self.clientConnected = False\n self.running = True\n self.database = Database()\n\n atexit.register(self.sock.close)\n\n self.mainLoop()\n\n def mainLoop(self):\n while self.running:\n try:\n if self.clientConnected is not True:\n print (\"*** Esperando conexion\")\n clientSock, clientAddress = self.sock.accept()\n print (\"** Cliente Conectado\")\n self.clientConnected = True\n if self.clientConnected:\n print (\"*** Esperando mensaje\")\n message = clientSock.recv(1024).decode(\"ascii\")\n if message != \"\":\n print (\"*** Recibido\")\n print (message)\n self.database.insert(message)\n except socket.error:\n print (\"*** Cliente Desconectado\")\n self.clientConnected = False\n\nserver = Server()\n","sub_path":"Client Server/Labs/Lab10/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"491771177","text":"#!/usr/bin/python3\n\n\"\"\"\n1. Мэдэрлийн сүлжээндээ logsoftmax ашигласан. logsoftmax-ийг ашигалахад learning_rate-ийг 1e-2-оос 1e-3 болгон багасгаж байж мэдэрлийн сүлжээ маань сайн ажиллаж байгаа нь ажиглагдлаа.\n logsoftmax-ийн хэрэглэснээр learning_rate багассан боловч, нийт episode-ийн тоо нэмэгдэх шаардлагагүй сайн сурч байна.\n2. \n\"\"\"\nimport sys\nimport torch\nimport random\nimport numpy as np \nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport pickle\n# import matplotlib.pyplot as plt\nfrom itertools import count\n\n# Constants\nGAMMA = 0.9\neps = np.finfo(np.float32).eps.item()\nDEPTH = 15\nalpha = 0.05\naction_names = [\"right\",\"left\",\"down\",\"up\"]\nclass Env:\n def __init__(self,n, m, threshold, bounty = 10):\n random.seed()\n self.done = False\n self.bounty = bounty\n self.threshold = threshold\n self.action = -1\n self.row_size = n\n self.col_size = m\n\n self.move = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\n self.grid = np.ones((n, m))\n \n self.sx = 0\n self.sy = 0\n self.dx = 0\n self.dy = 0\n \n def reset(self):\n self.done = False\n for i in range(self.row_size):\n for j in range(self.col_size):\n self.grid[i][j] = 1\n \n t = random.randint(0, self.row_size * self.col_size - 1)\n self.sx = t // self.col_size;\n self.sy = t % self.col_size;\n t = random.randint(0, self.row_size * self.col_size - 1)\n self.dx = t // self.col_size;\n self.dy = t % self.col_size;\n\n if self.sx == self.dx and self.sy == self.dy:\n self.reset()\n\n self.grid[self.sx][self.sy] = 2\n self.grid[self.dx][self.dy] = 3\n return self.grid\n\n def render(self):\n print(\"===========================================\")\n print('Applied action: {}\\n'.format(action_names[self.action]))\n for i in range(0, self.row_size):\n for j in range(0, self.col_size):\n print('{:2d} '.format(int(self.grid[i][j])), end = '')\n print('');\n print(\"===========================================\")\n input()\n\n def step(self,n):\n self.action = n\n x, y = self.sx, self.sy\n x += self.move[n][0]\n y += self.move[n][1]\n\n reward = -1\n\n if x < 0 or y < 0 or x >= self.row_size or y >= self.col_size:\n reward = -2\n return self.grid, reward, False\n \n self.grid[self.sx][self.sy] = 1\n\n self.grid[x][y] = 2\n \n if x == self.dx and y == self.dy:\n self.done = True\n\n self.sx, self.sy = x, y\n \n if self.done == True:\n return self.grid, self.bounty, True\n\n return self.grid, reward, False\n\n\nclass PolicyNetwork(nn.Module):\n def __init__(self, in_features, num_actions, hidden_size, learning_rate=1e-2):\n super(PolicyNetwork, self).__init__()\n\n self.num_actions = num_actions\n self.linear1 = nn.Linear(in_features, hidden_size)\n self.linear2 = nn.Linear(hidden_size, num_actions)\n self.logsoftmax = nn.LogSoftmax(dim=0)\n self.rewards = []\n self.log_probs = []\n \n def forward(self, state):\n x = F.relu(self.linear1(state.view(-1)))\n x = self.linear2(x)\n x = self.logsoftmax(x)\n #print(x)\n return x \n\n\n# policy = PolicyNetwork(5*5, 4, 128)\nenv = Env(5, 5, 10)\n# optimizer = optim.SGD(policy.parameters(), lr=1e-3)\npickle_in = open(\"policy_goal2.pickle\",\"rb\")\npolicy = pickle.load(pickle_in)\nfor param in policy.parameters():\n print(type(param.data), param.size())\noptimizer = optim.SGD(policy.parameters(), lr=1e-3)\n\n\ndef select_action(state):\n state = torch.from_numpy(state).float()\n probs = policy(state)\n #print(probs)\n # exp_probs = torch.exp(probs)\n # print(exp_probs)\n action_n = np.random.choice(4, p=np.squeeze(torch.exp(probs.detach()).numpy()))\n policy.log_probs.append(probs.squeeze()[action_n])\n return action_n\n\ndef finish_episode(policy):\n R = 0\n policy_loss = []\n Gt = []\n for r in policy.rewards[::-1]:\n R = r + GAMMA * R\n Gt.insert(0, R)\n Gt = torch.tensor(Gt)\n\n for log_prob, R in zip(policy.log_probs, Gt):\n policy_loss.append(-log_prob * R)\n\n optimizer.zero_grad()\n policy_loss = torch.stack(policy_loss).sum()\n policy_loss.backward()\n optimizer.step()\n \n del policy.rewards[:]\n del policy.log_probs[:]\n\ndef debug():\n while True:\n state,reward,done = env.reset(),0,False\n while done == False:\n action = select_action(state)\n state, reward, done = env.step(action)\n env.render()\n\ndef main():\n \n running_reward = 10\n created_dump = False\n for i_episode in count(1):\n state, ep_reward = env.reset(), 0\n for t in range(1, DEPTH): \n action = select_action(state)\n state, reward, done = env.step(action)\n\n if i_episode >= 100000:\n env.render()\n if created_dump == False:\n with open(\"policy_goal2.pickle\",\"wb\") as file:\n pickle.dump(policy,file)\n print(\"---------------created dump---------------\")\n created_dump = True\n policy.rewards.append(reward)\n ep_reward += reward\n if done:\n break\n print('run: {:d} / 100000 ({:d}%)\\r'.format(i_episode, int(i_episode / 100000 * 100)), end = '')\n # print('run: ', i_episode,)\n # running_reward = alpha * ep_reward + (1 - alpha) * running_reward\n finish_episode(policy)\n # if running_reward > env.threshold:\n # print(\"Solved! Running reward is now {} and \"\n # \"the last episode runs to {} time steps!\".format(running_reward, t))\n # break\n\nif __name__ == '__main__':\n # main()\n debug()\n\n","sub_path":"goal2.py","file_name":"goal2.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"5764469","text":"class Solution:\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n return self.plusrec(digits, [], 0)\n\n def plusrec(self, digits, newval, c):\n if not digits and c==1 : return [1]+newval\n if digits[-1]!=9:\n val=digits[-1]+1 if c==0 and not newval else c+digits[-1]\n return digits[:-1]+[val]+newval\n else:\n while digits:\n digits.pop()\n newval.append(0)\n return self.plusrec(digits, newval, 1)\n\nobj=Solution()\nprint(obj.plusOne([1,9,9]))\n","sub_path":"array/Plusone.py","file_name":"Plusone.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"526795798","text":"import numpy as np\nimport astropy.io.fits as fits\nfrom verification_tools import calc_limits\nfrom verification_tools import fudge_throughput as ft\n\nconfigs = [{'aperture':'lw','filter':'f250m','disperser':'grismr','bounds':(2.421,2.581)},\n {'aperture':'lw','filter':'f277w','disperser':'grismr','bounds':(2.421,3.09)},\n {'aperture':'lw','filter':'f300m','disperser':'grismr','bounds':(2.848,3.137)},\n {'aperture':'lw','filter':'f322w2','disperser':'grismr','bounds':(2.451,3.958)},\n {'aperture':'lw','filter':'f335m','disperser':'grismr','bounds':(3.207,3.502)},\n {'aperture':'lw','filter':'f356w','disperser':'grismr','bounds':(3.152,3.942)},\n {'aperture':'lw','filter':'f360m','disperser':'grismr','bounds':(3.442,3.777)},\n {'aperture':'lw','filter':'f410m','disperser':'grismr','bounds':(3.914,4.257)},\n {'aperture':'lw','filter':'f430m','disperser':'grismr','bounds':(4.195,4.367)},\n {'aperture':'lw','filter':'f444w','disperser':'grismr','bounds':(3.929,4.949)},\n {'aperture':'lw','filter':'f460m','disperser':'grismr','bounds':(4.543,4.713)},\n {'aperture':'lw','filter':'f480m','disperser':'grismr','bounds':(4.693,4.921)}]\n \napertures = np.array([2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5])*0.0648\nidt_fluxes = np.array([1e-2, 1e-2,1e-2,1e-2,1e-2,1e-2,1e-2,1e-2,1e-2,1e-2,1e-2,1e-2])\nskyfacs = [2,2,2,2,2,2,2,2,2,2,2,2]\nobsmode = {\n 'instrument': 'nircam',\n 'mode': 'wfgrism',\n 'filter': 'f090w',\n 'aperture': 'lw',\n 'disperser': 'grismr'\n }\nexp_config = {\n 'subarray': 'full',\n 'readmode': 'deep8',\n 'ngroup': 5,\n 'nint': 1,\n 'nexp': 10\n }\nstrategy = {\n 'method': 'specapphot',\n 'aperture_size': 0.15,\n 'sky_annulus': [0.16,0.5],\n 'background_subtraction': False\n }\n \noutput = calc_limits.calc_limits(configs,apertures,idt_fluxes,obsmode=obsmode,scanfac=5,skyfacs=skyfacs,\n exp_config=exp_config,strategy=strategy,background='minzodi12')\n\nnp.savez('../../outputs/nircam_wfgrism_sensitivity.npz',\n wavelengths=output['wavelengths'], sns=output['sns'], lim_fluxes=output['lim_fluxes'], sat_limits=output['sat_limits'], configs=output['configs'])\n","sub_path":"tests/nircam/nircam_sensitivity_wfgrism.py","file_name":"nircam_sensitivity_wfgrism.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"126842840","text":"from PIL import ImageDraw\nfrom summary import writer\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR\nimport argparse\nimport torch\nimport os\nfrom utils import progress_bar\nfrom torchvision import transforms as tfs\nfrom Model import CnnAlign\nfrom Helen import HellenDataset, DataPrefetcher, draw_ann\nimport PIL.ImageFont as ImageFont\nimport numpy as np\nimport time\n\n\nMODEL_SAVE_PATH = \"./output/face_align.pt\"\n\nfont_size = 4\nfont1 = ImageFont.truetype(r'./Ubuntu-B.ttf', font_size)\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gama', \"-g\", type=float, default=0.9, help='train gama')\n parser.add_argument('--step', \"-s\", type=int, default=20, help='train step')\n parser.add_argument('--batch', \"-b\", type=int, default=1, help='train batch')\n parser.add_argument('--epoes', \"-e\", type=int, default=30, help='train epoes')\n parser.add_argument('--lr', \"-l\", type=float, default=0.001, help='learn rate')\n parser.add_argument('--pretrained', \"-p\", type=bool, default=False, help='prepare trained')\n parser.add_argument('--mini_batch', \"-m\", type=int, default=1, help='mini batch')\n return parser.parse_args()\n\ndef train(args):\n start_epoch = 0\n data_loader = DataLoader(dataset=HellenDataset(True, 224), batch_size=args.batch, shuffle=True, num_workers=16)\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n model = CnnAlign()\n print(\"add graph\")\n writer.add_graph(model, torch.zeros((1, 3, 224, 224)))\n print(\"add graph over\")\n if args.pretrained and os.path.exists(MODEL_SAVE_PATH):\n print(\"loading ...\")\n state = torch.load(MODEL_SAVE_PATH)\n model.load_state_dict(state['net'])\n start_epoch = state['epoch']\n print(\"loading over\")\n model = torch.nn.DataParallel(model, device_ids=[0, 1]) # multi-GPU\n model.to(device)\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)\n scheduler = StepLR(optimizer, step_size=args.step, gamma=args.gama)\n train_loss = 0\n to_pil_img = tfs.ToPILImage()\n to_tensor = tfs.ToTensor()\n\n for epoch in range(start_epoch, start_epoch+args.epoes):\n model.train()\n prefetcher = DataPrefetcher(data_loader)\n img_tensor, label_tensor = prefetcher.next()\n last_img_tensor = img_tensor\n last_label_tensor = label_tensor\n optimizer.zero_grad()\n i_batch = 0\n while img_tensor is not None:\n last_img_tensor = img_tensor\n last_label_tensor = label_tensor\n output = model(img_tensor)\n loss = torch.nn.functional.smooth_l1_loss(output, label_tensor.view(-1, output.size(1)))\n if loss is None:\n img_tensor, label_tensor = prefetcher.next()\n continue\n loss.backward()\n if i_batch % args.mini_batch == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n train_loss = loss.item()\n global_step = epoch*len(data_loader)+i_batch\n progress_bar(i_batch, len(data_loader), 'loss: %f, epeche: %d'%(train_loss, epoch))\n writer.add_scalar(\"loss\", train_loss, global_step=global_step)\n img_tensor, label_tensor = prefetcher.next()\n i_batch += 1\n\n\n #save one pic and output\n pil_img = to_pil_img(last_img_tensor[0].cpu())\n ann = output[0].cpu().detach().numpy()\n ann = np.resize(ann, (194, 2))\n draw_ann(pil_img, ann.tolist(), font1, font_size)\n writer.add_image(\"img: \"+str(epoch), to_tensor(pil_img))\n scheduler.step()\n\n if epoch % 10 == 0:\n print('Saving..')\n state = {\n 'net': model.module.state_dict(),\n 'epoch': epoch,\n }\n torch.save(state, \"./output/face_align\"+str(epoch)+\".pt\")\n\n if not os.path.isdir('data'):\n os.mkdir('data')\n print('Saving..')\n state = {\n 'net': model.module.state_dict(),\n 'epoch': epoch,\n }\n torch.save(state, MODEL_SAVE_PATH)\n writer.close()\n\nif __name__=='__main__':\n train(parse_args())\n\n","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"192563196","text":"from flask import Flask\nimport os\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef displayHostname():\n command = os.popen('hostname')\n hostName = command.read()\n return \"Hostname : %s \" % hostName\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"demoapp/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"518108216","text":"\r\nimport asyncio\r\nimport time\r\nimport numpy as np\r\nfrom math import sqrt\r\n\r\nasync def calculate_number_observation(one_dimensional_array): \r\n print('start calculate_number_observation() procedure') \r\n await asyncio.sleep(0)\r\n number_observation = one_dimensional_array.size\r\n print(\"Number of Observation: {} \".format(number_observation)) \r\n await calcuate_arithmetic_mean(one_dimensional_array, number_observation)\r\n# await calculate_median(one_dimensional_array, number_observation)\r\n print(\"finished calculate_number_observation() procedure\") \r\n \r\nasync def calcuate_arithmetic_mean(one_dimensional_array, number_observation): \r\n print('start calcuate_arithmetic_mean() procedure') \r\n await calculate_median(one_dimensional_array, number_observation)\r\n sum_result = 0.0\r\n await asyncio.sleep(0)\r\n for i in range(number_observation): \r\n sum_result += one_dimensional_array[i] \r\n arithmetic_mean = sum_result / number_observation\r\n print(\"Arithmetic Mean: {} \".format(arithmetic_mean)) \r\n await calculate_sample_standard_deviation(one_dimensional_array, number_observation, arithmetic_mean)\r\n print(\"finished calcuate_arithmetic_mean() procedure\") \r\n \r\nasync def calculate_median(one_dimensional_array, number_observation): \r\n print('starting calculate_median()') \r\n await asyncio.sleep(0)\r\n one_dimensional_array.sort() \r\n half_position = number_observation // 2\r\n if not number_observation % 2:\r\n median = (one_dimensional_array[half_position - 1] + one_dimensional_array[half_position]) / 2.0\r\n else:\r\n median = one_dimensional_array[half_position] \r\n print(\"Median: {} \".format(median))\r\n print(\"finished calculate_median() procedure\") \r\n\r\nasync def calculate_sample_standard_deviation(one_dimensional_array, number_observation, arithmetic_mean): \r\n print('start calculate_sample_standard_deviation() procedure') \r\n await asyncio.sleep(0)\r\n sum_result = 0.0\r\n for i in range(number_observation): \r\n sum_result += pow((one_dimensional_array[i] - arithmetic_mean), 2) \r\n sample_variance = sum_result / (number_observation - 1) \r\n sample_standard_deviation = sqrt(sample_variance) \r\n print(\"Sample Standard Deviation: {} \".format(sample_standard_deviation))\r\n print(\"finished calculate_sample_standard_deviation() procedure\") \r\n \r\ndef main(one_dimensional_array): \r\n ioloop = asyncio.get_event_loop()\r\n tasks = [ioloop.create_task(calculate_number_observation(one_dimensional_array))]\r\n wait_tasks = asyncio.wait(tasks)\r\n ioloop.run_until_complete(wait_tasks)\r\n ioloop.close()\r\n \r\nif __name__ == '__main__':\r\n start_time = time.clock() \r\n one_dimensional_array = np.arange(1000000, dtype=np.float64) \r\n main(one_dimensional_array)\r\n end_time = time.clock()\r\n print(\"Program Runtime: {} seconds\".format(round(end_time - start_time, 1)))\r\n\r\n ","sub_path":"Week1/Code/intel_python_class/src/module_2/summary_statistics_asyncio.py","file_name":"summary_statistics_asyncio.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"111775484","text":"from requests import get\nfrom bs4 import BeautifulSoup\nfrom spotdl.search.spotifyClient import SpotifyClient\nfrom typing import List\n\n\ndef from_url(spotifyURL: str):\n if not (\"open.spotify.com\" in spotifyURL and \"track\" in spotifyURL):\n raise Exception(\"passed URL is not that of a track: %s\" % spotifyURL)\n\n # query spotify for song, artist, album details\n spotifyClient = SpotifyClient()\n\n rawTrackMeta = spotifyClient.track(spotifyURL)\n\n primaryArtistId = rawTrackMeta[\"artists\"][0][\"id\"]\n rawArtistMeta = spotifyClient.artist(primaryArtistId)\n\n albumId = rawTrackMeta[\"album\"][\"id\"]\n rawAlbumMeta = spotifyClient.album(albumId)\n\n return rawTrackMeta, rawArtistMeta, rawAlbumMeta\n\n\ndef get_song_lyrics(song_name: str, song_artists: List[str]) -> str:\n \"\"\"\n `str` `song_name` : name of song\n\n `list` `song_artists` : list containing name of contributing artists\n\n RETURNS `str`: Lyrics of the song.\n\n Gets the metadata of the song.\n \"\"\"\n\n headers = {\n \"Authorization\": \"Bearer alXXDbPZtK1m2RrZ8I4k2Hn8Ahsd0Gh_o076HYvcdlBvmc0ULL1H8Z8xRlew5qaG\",\n }\n api_search_url = \"https://api.genius.com/search\"\n search_query = f'{song_name} {\", \".join(song_artists)}'\n\n api_response = get(\n api_search_url, params={\"q\": search_query}, headers=headers\n ).json()\n\n song_id = api_response[\"response\"][\"hits\"][0][\"result\"][\"id\"]\n song_api_url = f\"https://api.genius.com/songs/{song_id}\"\n\n api_response = get(song_api_url, headers=headers).json()\n\n song_url = api_response[\"response\"][\"song\"][\"url\"]\n\n genius_page = get(song_url)\n soup = BeautifulSoup(genius_page.text, \"html.parser\")\n lyrics = soup.select_one(\"div.lyrics\").get_text()\n\n return lyrics.strip()\n","sub_path":"spotdl/search/metadataProvider.py","file_name":"metadataProvider.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"359459690","text":"import tkinter as tk\nfrom PIL import ImageTk, Image, ImageDraw\nimport os\nimport csv\n\ngrid_size = 10\ngrid_lines = int(150 / grid_size)\n\nunlabeled_path = \"./unlabeled/\"\n\n\ndef save_map_grid():\n root.labels = [str(l[0] * 1000 + l[1]) for l in root.labels]\n root.labels = list(set(root.labels))\n root.labels.sort()\n root.labels = \" \".join(root.labels)\n data = [root.image_list[root.img_index], root.labels]\n root.writer.writerow(data)\n root.labels = []\n os.rename(unlabeled_path + root.image_list[root.img_index],\n \"./coordinate_images/\" + root.image_list[root.img_index])\n\n\ndef change_pic():\n save_map_grid()\n root.img_index += 1\n draw_map()\n map_name_label.configure(text=root.image_list[root.img_index])\n\n\ndef undo():\n root.labels.pop()\n draw_map()\n\n\ndef draw_grid(draw):\n fill = (0, 0, 255, 96)\n for x in range(grid_lines):\n z = grid_size * x\n draw.line([z, 0, z, 150], fill=fill)\n draw.line([0, z, 150, z], fill=fill)\n\n fill = (0, 255, 255, 96)\n for l in root.labels:\n x = l[0] * grid_size\n y = l[1] * grid_size\n draw.rectangle((x, y, x + grid_size, y + grid_size), fill=fill)\n\n\ndef print_coord(event):\n x = event.x - grid_size / 2\n if x < 0:\n x = 0\n x = int(x // grid_size)\n y = event.y - grid_size / 2\n if y < 0:\n y = 0\n y = int(y // grid_size)\n root.labels.append([x, y])\n draw_map()\n print(root.labels)\n\n\ndef draw_map():\n img = Image.open(unlabeled_path + root.image_list[root.img_index]).convert('RGBA')\n overlay = Image.new('RGBA', img.size, (255, 255, 255, 0))\n draw = ImageDraw.Draw(overlay)\n draw_grid(draw)\n out = Image.alpha_composite(img, overlay)\n root.photo = ImageTk.PhotoImage(out)\n map_label.configure(image=root.photo)\n\n\nroot = tk.Tk()\nimages = os.listdir(unlabeled_path)\nimages.sort()\nroot.image_list = images\nroot.img_index = 0\nroot.labels = []\n\nmap_label = tk.Label(root)\nmap_label.bind(\"