diff --git "a/280.jsonl" "b/280.jsonl" new file mode 100644--- /dev/null +++ "b/280.jsonl" @@ -0,0 +1,784 @@ +{"seq_id":"265452538","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\nimport json\nimport os\nfrom pathlib import Path\n\nclass Configuration_manager(object):\n\t\"\"\"Configuration loader\n\t\n\tThis script loads the configuration from a file 'config.json'.\n\t\n\t\"\"\"\n\n\t@staticmethod\n\tdef load_config():\n\t\tfile_dir = os.path.dirname(os.path.abspath(__file__))\n\t\tpath = Path(file_dir)\n\t\tfile_route = '/repositorio/conf/config.json'\n\t\tfile_option = 'r'\n\t\twith open(str(path.parent) + file_route, file_option) as f:\n\t\t\tconfig = json.load(f)\n\n\t\treturn config\n\n\t@staticmethod\n\tdef save_values(config_data):\n\n\t\tfile_dir = os.path.dirname(os.path.abspath(__file__))\n\t\tpath = Path(file_dir)\n\t\tfile_route = '/repositorio/conf/config.json'\n\t\tfile_option = 'r+'\n\n\t\twith open(str(path.parent) + file_route, file_option) as f:\n\t\t\tf.seek(0) # <--- should reset file position to the beginning.\n\t\t\tjson.dump(config_data, f, indent=4)\n\t\t\tf.truncate() \n","sub_path":"mlflow-github/app/conf/Configuration_manager.py","file_name":"Configuration_manager.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"495129857","text":"# Goal: To plot together results from different runs\n\nimport numpy as np\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport cPickle\nfrom time import sleep\n\na = '/Volumes/group_dv/personal/DValenzano/month-by-month/Jun2015/simul/asex/17-Jun/plot_values_run1.txt'\na_out = '/Volumes/group_dv/personal/DValenzano/month-by-month/Jun2015/simul/asex/17-Jun/'\n\ns = '/Volumes/group_dv/personal/DValenzano/month-by-month/Jun2015/simul/sex/17-Jun-2015/plot_values_run1.txt'\ns_out = '/Volumes/group_dv/personal/DValenzano/month-by-month/Jun2015/simul/sex/17-Jun/'\n\nclass cp(object):\n def __init__(self, inp):\n self.inp = inp\n self.o = open(self.inp, 'rb')\n self.pop_in = cPickle.load(self.o)\n self.n_stage = len(self.pop_in)-1\n self.res_in = cPickle.load(self.o)\n self.age_distr_in = cPickle.load(self.o)\n #print np.shape(age_distr_in[0]) \n self.repr_rate_in = cPickle.load(self.o)\n self.repr_rate_sd_in = cPickle.load(self.o)\n for i in range(len(self.repr_rate_sd_in)):\n self.repr_rate_sd_in[i] = np.array(self.repr_rate_sd_in[i])\n self.repr_rate_sd_in[i].shape = (71,1)\n self.repr_rate_junk_in = cPickle.load(self.o)\n self.surv_rate_in = cPickle.load(self.o)\n self.surv_rate_sd_in = cPickle.load(self.o)\n for i in range(len(self.surv_rate_sd_in)):\n self.surv_rate_sd_in[i] = np.array(self.surv_rate_sd_in[i])\n self.surv_rate_sd_in[i].shape = (71,1)\n self.surv_rate_junk_in = cPickle.load(self.o)\n self.repr_fit_in = cPickle.load(self.o)\n self.repr_fit_junk_in = cPickle.load(self.o)\n self.surv_fit_in = cPickle.load(self.o)\n self.surv_fit_junk_in = cPickle.load(self.o)\n self.fit_in = np.array(self.repr_fit_in)*np.array(self.surv_fit_in)\n self.fit_junk_in = np.array(self.repr_fit_junk_in)*np.array(self.surv_fit_junk_in)\n self.dens_surv_in = cPickle.load(self.o)\n self.dens_repr_in = cPickle.load(self.o)\n self.hetrz_mea = cPickle.load(self.o)\n self.hetrz_mea_sd = cPickle.load(self.o) # when simul version > 0.6 \n self.males_females_ages = cPickle.load(self.o)\n self.o.close()\n \n # actual survival series \n def compute_actual_surv_rate(self, p):\n #self.p = p\n \"\"\" \n Takes age distribution of two consecutive stages and computes the \n fractions of those survived from age x to age x+1. The cumulative product \n of those values builds the final result. \n Returns a numpy array. \n \"\"\"\n div = self.age_distr_in[p]*self.pop_in[p]\n div[div == 0] = 1\n stage2 = np.array(list((self.age_distr_in[p+1]*self.pop_in[p+1]))[1:]+[0])\n\n res = stage2 / div\n for i in range(1,len(res)):\n res[i] = res[i-1] * res[i]\n return res \n\n def avr_actual_surv_rate(self, m):\n \"\"\"Averages actual survival rate over 100 stages.\"\"\"\n if m <= 50:\n res2 = self.compute_actual_surv_rate(m+100)\n for i in range(m,m+100):\n res2 += self.compute_actual_surv_rate(i)\n return res2/100\n if m >= self.n_stage-50:\n res2 = self.compute_actual_surv_rate(self.n_stage-101)\n for i in range(self.n_stage-100,self.n_stage-1):\n res2 += self.compute_actual_surv_rate(i)\n return res2/100\n else:\n res2 = self.compute_actual_surv_rate(m+50)\n for i in range(m-50,m+50):\n res2 += self.compute_actual_surv_rate(i)\n return res2/100\n \n\nca = cp(a)\ncs = cp(s)\n\nay = ca.avr_actual_surv_rate(15000)[:-1]\nax = np.array(range(70))\n\nsy = cs.avr_actual_surv_rate(15000)[:-1]\nsx = np.array(range(70))\n\naz = np.polyfit(ax,ay,3)\naq = np.poly1d(az)\nax_new = np.linspace(ax[0], ax[-1], 50)\nay_new = aq(ax_new)\nplt.plot(ax,ay,'o', ax_new, ay_new)\nplt.plot(ax,ay)\nplt.xlim([ax[0], ax[-1] + 1])\nplt.ylim([0, 1])\nplt.show()\n\nsz = np.polyfit(sx,sy,3)\nsq = np.poly1d(sz)\nsx_new = np.linspace(sx[0], sx[-1], 50)\nsy_new = sq(sx_new)\nplt.plot(sx,sy,'o', sx_new, sy_new)\nplt.xlim([sx[0], sx[-1] + 1])\nplt.ylim([0, 1])\nplt.show()\n\nplt.plot(ax,ay,'ro', sx, sy, 'g^')\nplt.xlim([ax[0], ax[-1] + 1])\nplt.ylim([0, 1])\nplt.show()\n","sub_path":"01-Jul-2015.py","file_name":"01-Jul-2015.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"426343666","text":"#!/usr/bin/env python\n\nimport ROOT as R\n\nimport os\nimport sys\nimport fnmatch\n\ndef find_files(rootdir='.', pattern='*'):\n\treturn [os.path.join(rootdir, filename)\n\t\t\tfor rootdir, dirnames, filenames in os.walk(rootdir)\n\t\t\tfor filename in filenames\n\t\t\tif fnmatch.fnmatch(filename, pattern)]\n\ndef make_draw_files(dname='.'):\n\tl = find_files(dname, '*.root')\n\tfor fn in l:\n\t\tf = R.TFile(fn)\n\t\ttd = f.GetListOfKeys()\n\t\tfdraw = fn+'.draw'\n\t\twith open(fdraw, 'w') as fout:\n\t\t\tprint >> fout,'#figure'\n\t\t\tprint >> fout,'#title: {}'.format(fdraw)\n\t\t\tfor k in td:\n\t\t\t\tprint >> fout,os.path.abspath(fn),'\t\t:'+k.GetName(),':hist',':','title='+k.GetTitle()\n\ndef main():\n\tmake_draw_files('./')\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Draw/macro/published/LK0jet-master_From_XiaomingZhang/utils/make_draw_files.py","file_name":"make_draw_files.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"577382843","text":"import robel\nimport gym\nimport torch\nimport torch.nn as nn\nimport gym\nimport numpy as np\nimport os\nimport sys\nimport copy\nimport datetime\nimport argparse\nimport collections\n\n\nfrom modules import *\nimport utils\n\nfrom tensorboardX import SummaryWriter\n\n\nimport pdb\n\ndef eval_policy(policy, env_name, eval_episodes=1, broken_info=False, real_robot = False, seed = 0):\n env_seed = 2 ** 32 - 1 - seed\n if real_robot:\n eval_env = gym.make(env_name, device_path='/dev/tty.usbserial-FT3WI485')\n else:\n eval_env = gym.make(env_name)\n eval_env.seed(env_seed)\n\n avg_reward = 0.\n for _ in range(eval_episodes):\n state, done = eval_env.reset(), False\n if broken_info:\n state = np.concatenate((state, np.ones(9)))\n while not done:\n action = policy.select_action(np.array(state), 'test')\n state, reward, done, _ = eval_env.step(action)\n if broken_info:\n state = np.concatenate((state, np.ones(9)))\n avg_reward += reward\n \n avg_reward /= eval_episodes\n\n print(\"---------------------------------------\")\n print(\"Evaluation over {} episodes: {:.3f}\".format(eval_episodes, avg_reward))\n print(\"---------------------------------------\")\n return avg_reward\n\nbase_env = gym.make('DClawTurnFixed-v0')\n# env = gym.make('DClawTurnFixed-v0', device_path='/dev/tty.usbserial-FT3WI485')\n\nclass AdversarialEnv(object):\n def __init__(self,\n ddpg_action_dim,\n ddpg_state_dim,\n ddpg_buffer_max_size,\n writer,\n ddpg_gamma,\n ddpg_hidden_size,\n ddpg_save_freq,\n ddpg_record_freq,\n ddpg_batch_size,\n ddpg_max_action,\n ddpg_tau,\n ddpg_variance,\n device,\n broken_timesteps,\n broken_info = False,\n env_name='DClawTurnFixed-v0',\n real_robot=False,\n outdir = None,\n broken_info_recap = False):\n self.ddpg_action_dim = ddpg_action_dim\n self.ddpg_state_dim = ddpg_state_dim\n self.ddpg_buffer_max_size = ddpg_buffer_max_size\n self.writer = writer\n self.ddpg_gamma = ddpg_gamma\n self.ddpg_hidden_size = ddpg_hidden_size\n self.ddpg_save_freq = ddpg_save_freq\n self.device = device\n self.env_name = env_name \n self.real_robot = real_robot\n self.ddpg = DDPG(state_dim=ddpg_state_dim,\n action_dim=ddpg_action_dim,\n device=device,\n writer=writer,\n buffer_max_size=ddpg_buffer_max_size,\n gamma=ddpg_gamma,\n save_freq=ddpg_save_freq,\n record_freq=ddpg_record_freq,\n outdir = outdir,\n hidden_size=ddpg_hidden_size,\n broken_info_recap=broken_info_recap)\n self.broken_timesteps = broken_timesteps\n if real_robot:\n self.base_env = gym.make(env_name, device_path='/dev/tty.usbserial-FT3WI485')\n else:\n self.base_env = gym.make(env_name)\n self.action_space = self.base_env.action_space\n self.outdir = outdir\n self.broken_info = broken_info\n def step(self, adversarial_action: int, ddpg_obs):\n current_state = ddpg_obs\n original_state_dim = current_state.shape[0]\n if self.broken_info:\n current_state = np.concatenate((current_state, np.ones(9)))\n current_state[original_state_dim + adversarial_action] = 0\n total_done = False\n reward_list = []\n for i in range(self.broken_timesteps):\n ddpg_action = self.ddpg.select_action(current_state, 'test')\n ddpg_action[adversarial_action] = -0.6\n next_state, reward, done, info = self.base_env.step(ddpg_action)\n original_next_state = next_state\n if self.broken_info:\n joint_info = np.ones(9)\n joint_info[adversarial_action] = 0\n next_state = np.concatenate((next_state, joint_info))\n reward_list.append(reward)\n if done:\n total_done = done\n break\n current_state = next_state\n avg_reward = np.array(reward_list).mean()\n return original_next_state, avg_reward, total_done, info\n # ddpg_action = self.ddpg.select_action(ddpg_obs, 'test')\n # ddpg_action[advesarial_action] = 0.\n # next_state, reward, done, info = self.base_env.step(ddpg_action)\n # return next_state, reward, done, info\n def seed(self, input_seed):\n self.base_env.seed(input_seed)\n def reset(self):\n return self.base_env.reset()\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--start-timesteps\", type=int, default=int(1e4))\n parser.add_argument(\"--max-timesteps\", type=int, default=int(1e7))\n parser.add_argument(\"--eval-freq\", type=int, default=5000)\n parser.add_argument(\"--save-freq\", type=int, default=5000)\n parser.add_argument(\"--record-freq\", type=int, default=5000)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--buffer-max-size\", type=int, default=int(1e6))\n parser.add_argument(\"--ddpg-training-steps\", type=int, default=int(5000))\n parser.add_argument(\"--restore-step\", type=int, default=0)\n parser.add_argument(\"--ddpg-hidden-size\", type=int, default=512)\n parser.add_argument(\"--broken-info\", action='store_true', default=True,\n\t help=\"whether use broken joints indice as a part of state\")\n parser.add_argument(\"--broken-info-recap\", action='store_true', default=False,\n\t\t\t\t\t\thelp='whether to use broken info again in actor module to reinforce the learning')\n args = parser.parse_args()\n if args.broken_info_recap:\n assert args.broken_info\n base_env.seed(args.seed)\n if not os.path.exists('./logs'):\n os.system('mkdir logs')\n if not os.path.exists('./saved_models'):\n os.system('mkdir saved_models')\n outdir = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n outdir = os.path.join('./saved_models', outdir)\n os.system('mkdir ' + outdir)\n with open(outdir+'/setting.txt','w') as f:\n f.writelines(\"for each training ddpg episode, only a specifc joint can be disabled, it cannot be changed to other ones in this episode\")\n for each_arg, value in args.__dict__.items():\n f.writelines(each_arg + \" : \" + str(value)+\"\\n\")\n writer = SummaryWriter(logdir=('logs/{}').format(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n state_dim = base_env.reset().shape[0]\n original_state_dim = state_dim\n if args.broken_info:\n state_dim += 9\n action_dim = base_env.action_space.sample().shape[0]\n max_action = base_env.action_space.high[0]\n\n\n \"advesarial agent code\"\n adversarial_env = AdversarialEnv(ddpg_action_dim = action_dim,\n ddpg_state_dim=state_dim,\n ddpg_buffer_max_size=args.buffer_max_size,\n writer=writer,\n device=device,\n ddpg_gamma=0.9,\n ddpg_hidden_size=args.ddpg_hidden_size,\n ddpg_save_freq=args.save_freq,\n ddpg_record_freq=args.record_freq,\n ddpg_batch_size=64,\n ddpg_max_action=max_action,\n ddpg_tau=5e-3,\n ddpg_variance=0.1,\n broken_timesteps=1,\n broken_info=args.broken_info,\n outdir = outdir,\n broken_info_recap=args.broken_info_recap)\n if args.restore_step:\n print(\"restoring the model {}\".format(args.restore_step))\n adversarial_env.ddpg.restore_model_for_train(args.restore_step)\n # adversarial_env.ddpg.index = 0\n current_state = adversarial_env.reset()\n if args.broken_info:\n current_state = np.concatenate((current_state, np.ones(9)))\n episode = 0\n t = 0\n ddpg_t = 0\n adversary_t = 0\n minimal_index = 0\n while True:\n if t > args.max_timesteps:\n break\n \n \" the ddpg training loop\"\n broken_joints = collections.deque(maxlen=1)\n for i in range(args.ddpg_training_steps):\n t += 1\n ddpg_t += 1 \n if t % args.eval_freq == 0:\n print(\"-------------------------------------------\")\n print(\"steps:{:07d}\".format(t))\n print(\"episode:{:07d}\".format(episode))\n avg_reward = eval_policy(adversarial_env.ddpg, 'DClawTurnFixed-v0', broken_info=args.broken_info)\n writer.add_scalar('/eval/avg_reward',avg_reward, t)\n \n if ddpg_t == args.start_timesteps:\n print(\"start ddpg learning\")\n if ddpg_t < args.start_timesteps:\n original_action = adversarial_env.action_space.sample()\n else:\n original_action = adversarial_env.ddpg.select_action(current_state, 'train')\n action = copy.deepcopy(original_action)\n action[minimal_index] = - 0.6\n # action[adversary_action[0]] = 0\n next_state, reward, done, info = adversarial_env.base_env.step(action)\n if args.broken_info:\n next_state = np.concatenate((next_state, np.ones(9)))\n next_state[original_state_dim + minimal_index] = 0\n current_state[original_state_dim + minimal_index] = 0\n suc = info['score/success']\n adversarial_env.ddpg.add_buffer(current_state, original_action, next_state,reward,done)\n if ddpg_t > args.start_timesteps:\n adversarial_env.ddpg.train()\n current_state = next_state\n if done:\n broken_joints = collections.deque(maxlen=1)\n current_state = adversarial_env.reset()\n if args.broken_info:\n current_state = np.concatenate((current_state, np.ones(9)))\n episode += 1\n \"the adversary q training loop\"\n performance_list = []\n sum_reward = 0\n current_state = adversarial_env.reset()\n for i in range(action_dim):\n while True:\n next_state, reward, done, info = adversarial_env.step(i, current_state)\n sum_reward += reward\n current_state = next_state\n if done:\n current_state = adversarial_env.reset()\n performance_list.append(sum_reward)\n sum_reward = 0\n break\n performance_list = np.array(performance_list)\n minimal_index = np.where(performance_list == performance_list.min())\n minimal_index = minimal_index[0][0]\n current_state = adversarial_env.reset()\n if args.broken_info:\n current_state = np.concatenate((current_state, np.ones(9)))\n\n\n","sub_path":"train_kitty/train_trival_adversary.py","file_name":"train_trival_adversary.py","file_ext":"py","file_size_in_byte":11554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"321379004","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\nfrom __future__ import print_function,division\n\nimport rospy\nfrom larvae_behavior_classifier.msg import Features\n# from larvae_behavior_classifier.msg import Behavior\nfrom mightex_controller.msg import CmdCurrent\n\nfrom std_msgs.msg import Empty\n\nfrom lavis_stimuli_controller.msg import HeadCastStimuli\nfrom lavis_stimuli_controller.msg import StochasticStimuli\nfrom lavis_stimuli_controller.msg import GaussianStimuli\n\nfrom datetime import date\nimport time\nimport csv\nimport os\nimport copy\n\nFOURIER_N = 7\nCOEFF_NAMES = ['ax','bx','ay','by']\n\n\nclass DataWriter(object):\n def __init__(self,*args,**kwargs):\n rospy.loginfo('Initializing larve_data_writer node...')\n self._initialized = False\n rospy.Subscriber(\"features\",Features,self._features_callback)\n rospy.Subscriber('~start',Empty,self._start_callback)\n rospy.Subscriber('~stop',Empty,self._stop_callback)\n rospy.Subscriber(\"/mightex_controller_node/cmd_current\",CmdCurrent,self._cmdcurrentvars_callback)\n # rospy.Subscriber(\"/lavis/lavis_stimuli_controller/SpatialStimuli\",spatialstimuli,self._spatialstimulivars_callback)\n rospy.Subscriber(\"/camera/head_cast_node/start\", HeadCastStimuli,self._headcast_start_callback)\n rospy.Subscriber(\"/camera/stochastic_node/start\", StochasticStimuli,self._stochastic_start_callback)\n rospy.Subscriber(\"/camera/gaussian_node/start\", GaussianStimuli,self._gaussian_start_callback)\n\n self.csv_file = None\n self.csv_writer = None\n self.ledVals = 0\n self.L_current = 0\n self.R_current = 0\n self.header = [\n 'index',\n 'time',\n 'frame_interval_ms',\n 's',\n 's_filtered',\n 's_convolved_squared',\n 'eig_reduced',\n 'eig_reduced_filtered',\n 'eig_reduced_convolved_squared',\n 'angle_upper_lower_filtered',\n 'angle_upper_lower_convolved_squared',\n 'v_norm_absolute_5_filtered',\n 'v_norm_absolute_5_convolved_squared',\n 'damped_distance_filtered',\n 'damped_distance_convolved_squared',\n 'crab_speed_filtered',\n 'crab_speed_convolved_squared',\n 'skeleton_length_filtered',\n 'skeleton_length_convolved_squared',\n 'perimeter_filtered',\n 'perimeter_convolved_squared',\n 'asymmetry_filtered',\n 'asymmetry_convolved_squared',\n 'speed_reduced_filtered',\n 'speed_reduced_convolved_squared',\n 'x_head',\n 'y_head',\n 'x_neck',\n 'y_neck',\n 'x_tail',\n 'y_tail',\n 'x_center',\n 'y_center',\n 'x_stage',\n 'y_stage',\n 'x_head_pixels',\n 'y_head_pixels',\n 'x_neck_pixels',\n 'y_neck_pixels',\n 'x_tail_pixels',\n 'y_tail_pixels',\n 'x_center_pixels',\n 'y_center_pixels',\n 'b_fast',\n 'b_slow',\n 'b_stop',\n 'b_cast',\n 'b_run',\n 'b_curl',\n 'b_roll',\n 'b_left',\n 'b_right',\n 'b_back',\n 'b_hunch',\n 'b_straight',\n 'b_bend',\n 'b_ball',\n 'b_weird'\n ]\n self.features = copy.copy(self.header)\n self.features.remove('index')\n self.features.remove('time')\n self.features.remove('frame_interval_ms')\n self.now = rospy.get_rostime().to_sec()\n\n for i in range(FOURIER_N):\n for j in range(len(COEFF_NAMES)):\n s = 'fourier_{0}[{1}]'.format(COEFF_NAMES[j],i)\n self.header.append(s)\n\n self.header.append('channel')\n self.header.append('current')\n self._initialized = True\n\n def _features_callback(self,data):\n if self._initialized and self.csv_writer is not None:\n row = []\n row.append(data.index)\n frame_time = data.frame_time.to_sec()\n row.append(frame_time)\n frame_interval_ms = data.frame_interval_s*1000\n row.append(frame_interval_ms)\n for feature in self.features:\n row.append(getattr(data,feature))\n k = 0\n for i in range(FOURIER_N):\n for j in range(len(COEFF_NAMES)):\n row.append(data.fourier_coeffs[k])\n k += 1\n\n try:\n row.append(self.ledVals.channel)\n row.append(self.ledVals.current)\n except AttributeError:\n row.append(0)\n row.append(0)\n \n # rospy.loginfo(\"we write now\")\n self.writerow(row)\n\n def _cmdcurrentvars_callback(self,data):\n self.ledVals = data\n # def _spatialstimulivars_callback(self,data):\n # self.spatialstimulivars = data\n\n def _headcast_start_callback(self,stimuli):\n self.L_str = stimuli.cast_left_current\n self.R_str = stimuli.cast_right_current\n\n def _stochastic_start_callback(self,stimuli):\n self.L_str = stimuli.stochastic_left_probability\n self.R_str = stimuli.stochastic_right_probability\n\n def _gaussian_start_callback(self,stimuli):\n self.L_str = stimuli.gaussian_left_mean\n self.R_str = stimuli.gaussian_right_mean\n\n def _start_callback(self,req):\n rospy.loginfo('Starting larve_data_writer.')\n if self.csv_writer is None:\n user_home_dir = os.path.expanduser('~')\n date_str = self._get_date_str()\n output_dir = os.path.join(user_home_dir,'data',date_str)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n date_time_str = self._get_date_time_str()\n\n output_path = os.path.join(output_dir,'Lch_' + str(self.L_str) + '_Rch_' + str(self.R_str) + '_' + date_time_str + '-data.txt')\n self.csv_file = open(output_path, 'w')\n\n # Create a new csv writer object to use as the output formatter\n self.csv_writer = csv.writer(self.csv_file,quotechar='\\\"',quoting=csv.QUOTE_MINIMAL)\n self.writerow(self.header)\n\n def _stop_callback(self,req):\n rospy.loginfo('Stopping larve_data_writer.')\n self.stop()\n\n def stop(self):\n if self.csv_writer is not None:\n self.csv_file.close()\n self.csv_file = None\n self.csv_writer = None\n\n # override csv.writer's writerow() to support utf8 encoding:\n def writerow(self,columns):\n if self.csv_writer is not None:\n utf8row = []\n for col in columns:\n utf8row.append(str(col).encode('utf8'))\n self.csv_writer.writerow(utf8row)\n\n def _get_date_str(self):\n today = date.today()\n date_str = \"{year}-{month}-{day}\".format(year=today.year,\n month=today.month,\n day=today.day)\n return date_str\n\n def _get_time_str(self):\n localtime = time.localtime()\n time_str = \"{hour}-{min}-{sec}\".format(hour=localtime.tm_hour,\n min=localtime.tm_min,\n sec=localtime.tm_sec)\n return time_str\n\n def _get_date_time_str(self):\n date_str = self._get_date_str()\n time_str = self._get_time_str()\n return date_str + '-' + time_str\n\n\nif __name__ == '__main__':\n rospy.init_node('larvae_data_writer')\n data_writer = DataWriter()\n try:\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n finally:\n data_writer.stop()\n","sub_path":"nodes/data_writer.py","file_name":"data_writer.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"508149844","text":"#/bin/python\n\nimport sys\nimport boto3\nRegion = str(sys.argv[1])\n\n#This will create a client of aws ec2 for the specified region\nec2 = boto3.client('ec2', region_name=Region)\nInstanceList = ['etcd-0', 'controller-0', 'worker-0', 'worker-1', 'worker-2']\n\n#Above created client will interect with ec2 instances using AWS SDKs based upon filtered instances\nresponse = ec2.describe_instances()\n\n\n#This is the sample attribute that you can fetch, I have fetched \"PublicIp\" for worker nodes, etcd, controller and created host file for ansible\n\n\nfor reservation in response[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n for tags in instance['Tags']:\n if tags['Key'] == 'Name' and tags['Value'] in InstanceList and instance['State']['Name'] == \"running\":\n if tags['Value'].startswith('etcd'):\n print (\"[INFO]: Etcd component created successfully\")\n elif tags['Value'].startswith('cont'):\n print (\"[INFO]: Controller component created successfully\")\n elif tags['Value'].startswith('worker'):\n print (\"[INFO]: \" + tags['Value'] + \" component created successfully\")\n","sub_path":"dev/0.0.1/infra_provisioning/TestInfraProvision.py","file_name":"TestInfraProvision.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"540375467","text":"import scrapy\nfrom newsbot.items import Document\nfrom newsbot.spiders.news import NewsSpider, NewsSpiderConfig\nfrom scrapy.linkextractors import LinkExtractor\nfrom urllib.parse import urlsplit\nfrom datetime import datetime\n\n\nclass RiaSpider(NewsSpider):\n name = 'ria'\n start_urls = ['https://www.ria.ru']\n config = NewsSpiderConfig(\n title_path='//h1[contains(@class, \"article__title\")]/text()',\n date_path='//div[contains(@class, \"endless__item\")]/@data-published',\n date_format='%Y-%m-%dT%H:%MZ',\n text_path='//div[contains(@class, \"article__block\") and @data-type = \"text\"]//text()',\n topics_path='//a[contains(@class, \"article__tags-item\")]/text()'\n )\n news_le = LinkExtractor(restrict_css='div.lenta__item')\n max_page_depth = 4\n\n def parse(self, response):\n article_links = self.news_le.extract_links(response)\n\n if response.meta.get('page_depth', 1) < self.max_page_depth:\n # Getting and forming the next page link\n next_page_link = response.xpath('//div[contains(@class, \"lenta__item\")]/@data-next').extract()[0]\n link_url = '{}{}'.format(self.start_urls[0], next_page_link)\n\n yield scrapy.Request(url=link_url,\n priority=100,\n callback=self.parse,\n meta={'page_depth': response.meta.get('page_depth', 1) + 1}\n )\n\n for link in article_links:\n yield scrapy.Request(url=link.url, callback=self.parse_document)\n\n def parse_document(self, response):\n for res in super().parse_document(response):\n # Leave only the last tag\n # (the last tag is always a global website tag)\n res['topics'] = [res['topics'][-1]]\n\n yield res\n","sub_path":"scrapping/newsbot/newsbot/spiders/ria.py","file_name":"ria.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"510934306","text":"from tkinter import *\nfrom PIL import ImageTk\nfrom tkinter import messagebox\nimport pymysql\nimport sys\nfrom math import sqrt\nimport tkinter as tk\n\nnum = 0.0\nnewNum = 0.0\nsumIt = 0.0\nsumAll = 0.0\noperator = \"\"\n\nopVar = False\n\nclass Login:\n def __init__(self,root):\n self.root = root\n self.root.title(\"Tugas Pemrograman Visual Pahrul\")\n self.root.geometry(\"1350x700+0+0\")\n self.logo_icon=PhotoImage(file=\"logoUnhas.png\")\n self.kal_icon=PhotoImage(file=\"pahrul.png\")\n \n self.root.resizable(False,False)\n self.loginform()\n \n def loginform(self):\n Frame_login=Frame(self.root,bg=\"green\")\n Frame_login.place(x=0,y=0,height=700,width=1366)\n\n\n frame_input1=Frame(self.root,bg='green')\n frame_input1.place(x=200,y=130,height=450,width=430)\n\n\n logolbl=Label(frame_input1, image=self.logo_icon, bg='green')\n logolbl.place(x=50,y=-50)\n\n \n frame_input=Frame(self.root,bg='green')\n frame_input.place(x=620,y=130,height=450,width=330)\n\n label1=Label(frame_input,text=\"MASUK\",font=('poppins',32),\n fg=\"black\",bg='green')\n label1.place(x=95,y=30)\n\n label1=Label(frame_input,text=\"Masukkan Nama dan Password yang Benar\",font=('poppins',10, \"bold\"),\n fg=\"black\",bg='green')\n label1.place(x=30,y=100)\n \n label2=Label(frame_input,text=\"Nama\",font=(\"poppins\",13,\"bold\"),\n fg='black',bg='green')\n label2.place(x=30,y=153)\n\n self.nama_txt=Entry(frame_input,font=(\"poppins\",13),\n bg='#e2e0e1')\n self.nama_txt.place(x=30,y=185,width=270,height=35)\n\n label3=Label(frame_input,text=\"Password\",font=(\"poppins\",13,\"bold\"),\n fg='black',bg='green')\n label3.place(x=30,y=223)\n\n self.password=Entry(frame_input,font=(\"poppins\",13),\n bg='#e2e0e1')\n self.password.place(x=30,y=255,width=270,height=35)\n\n btn=Button(frame_input,text=\"Daftar\",command=self.Register,cursor=\"hand2\",\n font=(\"poppins\",15),fg=\"white\",bg=\"black\",\n bd=0,width=15,height=0)\n btn.place(x=70,y=400)\n\n\n btn2=Button(frame_input,text=\"Masuk\",command=self.login,cursor=\"hand2\",\n font=(\"poppins\",15),fg=\"white\",bg=\"black\",\n bd=0,width=15,height=1)\n btn2.place(x=70,y=340)\n\n def login(self):\n if self.nama_txt.get()==\"\" or self.password.get()==\"\":\n messagebox.showerror(\"Error\",\"All fields are required\",parent=self.root)\n else:\n try:\n con=pymysql.connect(host='localhost',user='root',password='',\n database='tugasPV', port=3306)\n cur=con.cursor()\n cur.execute('select * from register where nama=%s and password=%s'\n ,(self.nama_txt.get(),self.password.get()))\n row=cur.fetchone()\n if row==None:\n messagebox.showerror('Error','Invalid Username And Password'\n ,parent=self.root)\n self.loginclear()\n self.nama_txt.focus()\n else:\n self.appscreen()\n con.close()\n except Exception as es:\n messagebox.showerror('Error',f'Error Due to : {str(es)}'\n ,parent=self.root)\n\n def Register(self):\n Frame_login1=Frame(self.root,bg=\"green\")\n Frame_login1.place(x=0,y=0,height=700,width=1366)\n\n frame_input2=Frame(self.root,bg='green')\n frame_input2.place(x=340,y=130,height=480,width=630)\n\n label1=Label(frame_input2,text=\"Daftar\",font=('poppins',22,'bold'),\n fg=\"black\",bg='green')\n label1.place(x=30,y=20)\n\n #Nama\n label2=Label(frame_input2,text=\"Nama\",font=(\"poppins\",12,\"bold\"),\n fg='black',bg='green')\n label2.place(x=30,y=95)\n\n self.entry=Entry(frame_input2,font=(\"poppins\",11),\n bg='lightgray')\n self.entry.place(x=30,y=125,width=270,height=35)\n\n #NIM\n label4=Label(frame_input2,text=\"NIM\",font=(\"poppins\",12,\"bold\"),\n fg='black',bg='green')\n label4.place(x=330,y=95)\n\n self.entry3=Entry(frame_input2,font=(\"poppins\",11),\n bg='lightgray')\n self.entry3.place(x=330,y=125,width=270,height=35)\n\n \n #Tanggal Lahir\n label7=Label(frame_input2,text=\"Tanggal Lahir\",\n font=(\"poppins\",12,\"bold\"),fg='black',bg='green')\n label7.place(x=30,y=175)\n\n self.entry5=Entry(frame_input2,font=(\"poppins\",11),\n bg='lightgray')\n self.entry5.place(x=30,y=205,width=270,height=35)\n\n #Gender\n label6 = Label(frame_input2, text=\"Jenis Kelamin\",\n font=(\"poppins\",12,\"bold\"),fg='black',bg='green')\n label6.place(x=330,y=175)\n\n var = IntVar()\n Radiobutton(frame_input2, text=\"Laki-laki\", font=(\"poppins\",12),fg='black',bg='green', \n padx = 5, variable=var, value=1).place(x=330,y=203)\n Radiobutton(frame_input2, text=\"Perempuan\", font=(\"poppins\",12),fg='black',bg='green',\n padx = 20, variable=var, value=2).place(x=430,y=203)\n\n #Email\n label3=Label(frame_input2,text=\"Email\",font=(\"poppins\",12,\"bold\"),\n fg='black',bg='green')\n label3.place(x=30,y=255)\n\n self.entry2=Entry(frame_input2,font=(\"poppins\",11),\n bg='lightgray')\n self.entry2.place(x=30,y=285,width=270,height=35)\n\n #Password\n label5=Label(frame_input2,text=\"Password\",\n font=(\"poppins\",12,\"bold\"),fg='black',bg='green')\n label5.place(x=330,y=255)\n\n self.entry4=Entry(frame_input2,font=(\"poppins\",11),\n bg='lightgray')\n self.entry4.place(x=330,y=285,width=270,height=35)\n\n def register():\n gender = var.get()\n\n if self.entry.get()==\"\"or self.entry2.get()==\"\"or self.entry3.get()==\"\"or self.entry4.get()==\"\":\n messagebox.showerror(\"Error\",\"All Fields Are Required\",parent=self.root)\n else:\n try:\n con=pymysql.connect(host='localhost',user='root',password='',\n database='tugasPV', port=3306)\n cur=con.cursor()\n cur.execute(\"select * from register where nama=%s\"\n ,self.entry3.get())\n row=cur.fetchone()\n if row!=None:\n messagebox.showerror(\"Error\"\n ,\"User already Exist,Please try with another Email\"\n ,parent=self.root)\n # self.regclear()\n self.entry.focus()\n else:\n cur.execute(\"insert into register values(%s,%s,%s,%s,%s,%s)\"\n ,(self.entry.get(),self.entry3.get(),\n self.entry2.get(), self.entry4.get(),\n gender, self.entry5.get()))\n con.commit()\n con.close()\n messagebox.showinfo(\"Success\",\"Register Succesfull\"\n ,parent=self.root)\n # self.regclear()\n except Exception as es:\n messagebox.showerror(\"Error\",f\"Error due to:{str(es)}\"\n ,parent=self.root)\n\n \n #Button Register\n btn2=Button(frame_input2,command=register,text=\"Daftar\"\n ,cursor=\"hand2\",font=(\"poppins\",15),fg=\"white\",\n bg=\"black\",bd=0,width=47,height=0)\n btn2.place(x=30,y=360)\n\n btn3=Button(frame_input2,command=self.loginform,\n text=\"Sudah Punya Akun, Masuk\",cursor=\"hand2\",\n font=(\"poppins\",10),bg='green',fg=\"black\",bd=0)\n btn3.place(x=225,y=425)\n\n def appscreen(self):\n Frame_menu=Frame(self.root,bg=\"green\")\n Frame_menu.place(x=0,y=0,height=700,width=1366)\n \n label1=Label(Frame_menu,text=\"M E N U\"\n ,font=('poppins',32,'bold'),\n fg=\"white\",bg='blue')\n label1.place(x=575,y=100)\n\n logolbl=Label(Frame_menu, image=self.kal_icon, bg='white')\n logolbl.place(x=500,y=240)\n\n btn = Button(Frame_menu, text=\"Kalkulator Sederhana\",command=self.kalkulator, cursor=\"hand2\",\n font=(\"poppins\", 13, \"bold\"), fg=\"white\", bg=\"blue\",\n bd=0,width=22,height=1)\n btn.place(x=350,y=500)\n\n\n \n\n btn2=Button(Frame_menu,text=\"<<< Kembali\",command=self.loginform,cursor=\"hand2\",\n font=(\"poppins\",13),fg=\"white\",bg=\"black\",\n bd=0,width=10,height=1)\n btn2.place(x=30,y=600)\n\n def kalkulator(self):\n Frame_menu1=Frame(self.root,bg=\"green\")\n Frame_menu1.place(x=0,y=0,height=700,width=1366)\n \n label1=Label(Frame_menu1,text=\"KALKULATOR SEDERHANA\"\n ,font=('poppins',32,'bold'),\n fg=\"white\",bg='blue')\n label1.place(x=405,y=80)\n\n btn2=Button(Frame_menu1,text=\"<<< Kembali\",command=self.appscreen,cursor=\"hand2\",\n font=(\"poppins\",13),fg=\"white\",bg=\"black\",\n bd=0,width=10,height=1)\n btn2.place(x=30,y=600)\n \n frame_input=Frame(self.root,bg='blue')\n frame_input.place(x=450,y=150,height=380,width=450)\n\n layar = Entry(frame_input, font=(\"poppins\",13),\n bg='white')\n layar.place(x=30,y=20,width=390,height=45)\n\n def tambahAngka(angka):\n angkaPer = layar.get()\n layar.delete(0, END)\n layar.insert(0, str(angkaPer) + str(angka))\n\n def hapus():\n layar.delete(0, END)\n\n def samaDengan():\n angka_kedua = layar.get()\n layar.delete(0, END)\n\n if math == \"tambah\":\n layar.insert(0, angkaFirst + int(angka_kedua))\n\n if math == \"kurang\":\n layar.insert(0, angkaFirst - int(angka_kedua))\n\n if math == \"kali\":\n layar.insert(0, angkaFirst * int(angka_kedua))\n\n if math == \"bagi\":\n layar.insert(0, angkaFirst / int(angka_kedua))\n\n\n def tambah():\n angka_pertama = layar.get()\n layar.delete(0, END)\n global angkaFirst\n global math\n angkaFirst = int(angka_pertama)\n math = \"tambah\"\n\n\n def kurang():\n angka_pertama = layar.get()\n layar.delete(0, END)\n global angkaFirst\n global math\n angkaFirst = int(angka_pertama)\n math = \"kurang\"\n\n def kali():\n angka_pertama = layar.get()\n layar.delete(0, END)\n global angkaFirst\n global math\n angkaFirst = int(angka_pertama)\n math = \"kali\"\n\n def bagi():\n angka_pertama = layar.get()\n layar.delete(0, END)\n global angkaFirst\n global math\n angkaFirst = int(angka_pertama)\n math = \"bagi\"\n\n angka1 = Button(frame_input, text=1, bd=0, font=(\"poppins\",10), \n width=10,height=2, command=lambda: tambahAngka(1))\n angka1.place(x=30,y=80)\n\n angka2 = Button(frame_input, text=2, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(2))\n angka2.place(x=130,y=80)\n\n angka3 = Button(frame_input, text=3, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(3))\n angka3.place(x=230,y=80)\n\n angka4 = Button(frame_input, text=4, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(4))\n angka4.place(x=30,y=150)\n \n angka5 = Button(frame_input, text=5, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(5))\n angka5.place(x=130,y=150)\n \n angka6 = Button(frame_input, text=6, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(6))\n angka6.place(x=230,y=150)\n \n angka7 = Button(frame_input, text=7, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(7))\n angka7.place(x=30,y=220)\n \n angka8 = Button(frame_input, text=8, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(8))\n angka8.place(x=130,y=220)\n\n angka9 = Button(frame_input, text=9, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(9))\n angka9.place(x=230,y=220)\n \n angka0 = Button(frame_input, text=0, bd=0, font=(\"poppins\",10),\n width=10,height=2, command=lambda: tambahAngka(0))\n angka0.place(x=130,y=290)\n\n hapus = Button(frame_input, text=\"hapus\", bd=0, \n width=10,height=2, font=(\"poppins\",10), command=hapus)\n hapus.place(x=30,y=290)\n\n samaDengan = Button(frame_input, text=\"=\", bd=0, \n width=10,height=2, font=(\"poppins\",10), command=samaDengan)\n samaDengan.place(x=230,y=290)\n\n tambah = Button(frame_input, text=\"+\", bd=0, width=10,height=2, \n font=(\"poppins\",10), command=tambah)\n tambah.place(x=330,y=290)\n \n kurang = Button(frame_input, text=\"-\", bd=0, width=10,height=2, \n font=(\"poppins\",10), command=kurang)\n kurang.place(x=330,y=220)\n \n bagi = Button(frame_input, text=\"/\", bd=0, width=10,height=2, \n font=(\"poppins\",10), command=bagi)\n bagi.place(x=330,y=150)\n\n kali = Button(frame_input, text=\"*\", bd=0, width=10,height=2, \n font=(\"poppins\",10), command=kali)\n kali.place(x=330,y=80)\n\n\nroot=Tk()\nob=Login(root)\nroot.mainloop()","sub_path":"Pahrul_Tugas5.py","file_name":"Pahrul_Tugas5.py","file_ext":"py","file_size_in_byte":14586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"578856461","text":"import pygame.font\nfrom ship import Ship\nfrom pygame.sprite import Group\n\n\nclass Scoreboard():\n def __init__(self,aliens_settings,screen,stats,ship):\n self.screen=screen\n self.screen_rect=screen.get_rect()\n self.aliens_settings=aliens_settings\n self.stats=stats\n self.text_color=(30,30,30)\n self.font=pygame.font.SysFont(None,48)\n self.ship=ship\n self.update_score()\n def update_score(self):\n self.perp_level()\n self.perp_score()\n self.perp_ships()\n self.perp_highest_score()\n def perp_score(self):\n score_str='{:,}'.format(int(round(self.stats.score,-1)))\n self.score_image=self.font.render(score_str,True,self.text_color,self.aliens_settings.bg_color)\n self.score_rect=self.score_image.get_rect()\n self.score_rect.right=self.screen_rect.right-20\n self.score_rect.top=20\n\n def perp_highest_score(self):\n score_str='{:,}'.format(int(round(self.stats.highest_score,-1)))\n self.highest_score_image=self.font.render(score_str,True,self.text_color,self.aliens_settings.bg_color)\n self.highest_score_rect=self.highest_score_image.get_rect()\n self.highest_score_rect.centerx=self.screen_rect.centerx\n self.highest_score_rect.top=20\n\n def perp_level(self):\n level_str=str(self.stats.level)\n self.level_image=self.font.render(level_str,True,self.text_color,self.aliens_settings.bg_color)\n self.level_rect=self.level_image.get_rect()\n self.level_rect.right=self.screen_rect.right-10\n self.level_rect.bottom=self.ship.rect.top\n\n def perp_ships(self):\n self.ships=Group()\n for i in range(self.stats.ships_left):\n ship=Ship(self.screen,self.aliens_settings)\n ship.rect.left=10+i*(10+ship.rect.width)\n ship.rect.top=self.screen_rect.top\n self.ships.add(ship)\n\n def scoreboard_show(self):\n self.screen.blit(self.score_image,self.score_rect)\n self.screen.blit(self.highest_score_image,self.highest_score_rect)\n self.screen.blit(self.level_image,self.level_rect)\n self.ships.draw(self.screen)\n","sub_path":"Alien_Invasion/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"545047643","text":"from flask import Flask, jsonify, request\nfrom models import Restaurant, Inspection, Session, serialize\n\napp = Flask(__name__)\n\n@app.route('/restaurant', defaults={'borough': 0})\n@app.route('/restaurant/')\ndef get_restaurants(borough):\n s = Session()\n q = s.query(Restaurant)\n if borough != 0:\n q = q.filter(Restaurant.borough==borough)\n ret = serialize(q.all())\n resp = jsonify(ret)\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n@app.route('/inspection')\ndef get_inspection():\n ids = request.args.get('ids').split(',')\n s = Session()\n ret = serialize(s.query(Inspection).filter(Inspection.restaurant.in_(ids)).order_by(Inspection.restaurant.asc()))\n resp = jsonify(ret)\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"50002505","text":"#打印日期格式为’2017-06-30 11:34:18 星期五‘\nimport time,datetime\n\ndef get_week_day(date):\n week_day_dict = {\n 0 : '星期一',\n 1 : '星期二',\n 2 : '星期三',\n 3 : '星期四',\n 4 : '星期五',\n 5 : '星期六',\n 6 : '星期天',\n }\n day = date.weekday()\n print(day)\n return week_day_dict[day]\n\nweek = get_week_day(datetime.datetime.now())\nprint(week)\n#打印当前时间,时间格式为年-月-日 小时-分钟-秒 星期\nprint('今天是:',time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),week)\n\n#打印当前时间戳\nprint('当前时间戳:',time.mktime(time.localtime()))\n\n","sub_path":"20170630/20170630001.py","file_name":"20170630001.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"13894924","text":"from django.db import models\nfrom django.utils.timezone import now\n\n\nclass CarMake(models.Model):\n\n name = models.CharField(max_length=30)\n description = models.CharField(max_length=1000)\n\n def __str__(self):\n return f\"Name: {self.name}. Description: {self.description}\"\n\n\nclass CarModel(models.Model):\n\n TYPE_CHOICES = [\n (\"sedan\", 'Sedan'),\n (\"suv\", 'SUV'),\n (\"wagon\", 'Wagon')\n ]\n carmake = models.ForeignKey(CarMake, on_delete=models.CASCADE)\n name = models.CharField(max_length=30)\n description = models.CharField(max_length=1000)\n dealer_id = models.IntegerField()\n type_of_car = models.CharField(\n max_length=20,\n choices = TYPE_CHOICES,\n default = \"sedan\"\n )\n year = models.DateField(default=now)\n\n def __str__(self):\n return f\"Model: {self.name}, Description: {self.description}, Type: {self.type_of_car}, Year: {self.year}\"\n\n\n\nclass CarDealer:\n\n def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):\n\n self.address = address\n self.city = city\n self.full_name = full_name\n self.id = id\n self.lat = lat\n self.long = long\n self.short_name = short_name\n self.st = st\n self.zip = zip\n\n def __str__(self):\n return \"Dealer name: \" + self.full_name\n\n\nclass DealerReview:\n\n def __init__(self, car_model, car_year, car_make, dealership, id, name, purchase, purchase_date, review):\n \n self.car_model = car_model\n self.car_year = car_year\n self.dealer_id = dealership\n self.id = id\n self.name = name\n self.purchase = purchase\n self.purchase_date = purchase_date\n self.review = review\n self.car_make = car_make\n self.sentiment = \"\"\n\n def __str__(self):\n return \"Dealer name: \" + self.name\n","sub_path":"server/djangoapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"209736025","text":"import requests\nimport telebot\nimport os.path\nfrom telebot import types\nimport zbar\nfrom matplotlib.image import imread as read_image\nimport zbar.misc\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom core.models import Client, Code\n\n\nscanner = zbar.Scanner()\n\n\ndef imread(image_filename):\n image = read_image(image_filename)\n if len(image.shape) == 3:\n image = zbar.misc.rgb2gray(image)\n return image\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n bot = telebot.TeleBot(settings.TOKEN, parse_mode=None)\n\n @bot.message_handler(commands=['start'])\n def ask_user_name(message):\n bot.send_message(message.chat.id, 'Здравствуйте, как вас зовут?')\n bot.register_next_step_handler(message, get_user_name)\n\n def get_user_name(message):\n Client.objects.update_or_create(\n user_id=message.from_user.id,\n defaults={\n 'name': message.text,\n }\n )\n markup = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=10)\n send_button = types.KeyboardButton('Отправить номер телефона', request_contact=True)\n markup.add(send_button)\n\n bot.send_message(message.chat.id, 'Пришлите, пожалуйста, Ваш номер телефона.', reply_markup=markup)\n bot.register_next_step_handler(message, get_phone)\n\n def get_phone(message):\n if message.contact:\n try:\n client = Client.objects.get(user_id=message.from_user.id)\n client.phone = message.contact.phone_number\n client.save()\n except AttributeError:\n bot.send_message(message.chat.id, 'Не удалось получить номер телефона. Попробуйте ещё раз.')\n bot.register_next_step_handler(message, get_phone)\n else:\n markup = types.ReplyKeyboardRemove()\n bot.send_message(message.chat.id, 'Пришлите фотографию с штрихкодом.', reply_markup=markup)\n bot.register_next_step_handler(message, get_code)\n else:\n bot.send_message(message.chat.id, 'Нажмите на кнопку ниже.')\n bot.register_next_step_handler(message, get_phone)\n\n @bot.message_handler(content_types=['photo'])\n def get_code(message):\n if message.photo:\n file_id = message.photo[0].file_id\n file_info = bot.get_file(file_id)\n photo = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(settings.TOKEN, file_info.file_path))\n\n with open(os.path.join(settings.BASE_DIR, file_info.file_path), 'wb') as f:\n f.write(photo.content)\n \n image_as_numpy_array = imread(file_info.file_path)\n results = scanner.scan(image_as_numpy_array)\n if not results:\n bot.send_message(message.chat.id, 'Штрихкод не распознан.')\n \n client = Client.objects.get(user_id=message.from_user.id)\n for result in results:\n code = Code.objects.filter(code=result.data.decode('utf8'))\n if not code:\n bot.send_message(message.chat.id, 'Штрихкод не найден.')\n code = code.filter(client=None).first()\n if code:\n code.client = client\n code.save()\n\n client.score += code.points\n client.save()\n else:\n bot.send_message(message.chat.id, 'Штрихкод уже вводился.')\n\n markup = types.InlineKeyboardMarkup(row_width=2)\n score_button = types.InlineKeyboardButton('Мои баллы', callback_data='score')\n codes_button = types.InlineKeyboardButton('Мои коды', callback_data='codes')\n markup.add(score_button, codes_button)\n bot.send_message(message.chat.id, 'Штрихкод обработан.', reply_markup=markup)\n\n @bot.callback_query_handler(lambda query: query.data == 'score')\n def get_user_score(query):\n client = Client.objects.get(user_id=query.from_user.id)\n bot.send_message(query.message.chat.id, f'Ваши баллы: {client.score}')\n\n @bot.callback_query_handler(lambda query: query.data == 'codes')\n def get_user_score(query):\n codes = Code.objects.filter(client__user_id=query.from_user.id)\n text = '\\n'.join(f'{i}. {code.code}' for i, code in enumerate(1, codes))\n bot.send_message(query.message.chat.id, text)\n\n bot.polling()\n","sub_path":"core/management/commands/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"551145900","text":"import matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\nimport libem\n\nimport os\nfrom shutil import rmtree\n\n\"\"\"\nThe Electrodynamics Simulation Library libvis.py\nAuthor: Adam Furman\nBrown University\n\nProvides methods of visualizing electrodynamics simulations.\n\"\"\"\n\nV_COLORS = \"RdBu\"#\"inferno\"\nT_COLORS = \"Greens\"\n\nclass Visualizations: \n @staticmethod\n def get_3d_vis_data(V, scale, top_left, resolution=1):\n \"\"\"\n Pull a subset of points from the array V to use in a 3D visualization.\n \"\"\"\n x = []\n y = []\n z = []\n values = []\n \n for pt, value in np.ndenumerate(V[::resolution,::resolution,::resolution]):\n loc = ((np.array(pt) * resolution) / scale) + top_left\n x.append(loc[0])\n y.append(loc[1])\n z.append(loc[2])\n values.append(value)\n \n return np.array(x), np.array(y), np.array(z), np.array(values)\n \n @staticmethod\n def get_2d_vis_data(V, scale, top_left, resolution=1):\n \"\"\"\n Pull a subset of points from the array V to use in a 2D visualization.\n \"\"\"\n x = []\n y = []\n values = []\n \n for pt, value in np.ndenumerate(V[::resolution,::resolution]):\n loc = ((np.array(pt) * resolution) / scale) + top_left\n x.append(loc[0])\n y.append(loc[1])\n values.append(value)\n \n return np.array(x), np.array(y), np.array(values)\n \n @staticmethod\n def colormesh_3d(sim, size=(10, 10), color_norm=\"auto\", resolution=\"auto\", graph_ax=None):\n \"\"\"\n Show the potential sim.V as a shaded color-coded map, where negative velocities are red\n and positive velocitues are blue.\n Parameters:\n - sim: EMSimulationSpace3D instance.\n - size: the figure size of the plot.\n - color_norm: the voltage to use as the maximum for the color display, defaults to largest abs(V[i,j,k]).\n - resolution: how many points to skip when sampling in each direction, defaults to 1.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n ax = None\n if graph_ax is None:\n plt.figure(figsize=size)\n ax = plt.axes(projection='3d')\n else:\n ax = graph_ax\n\n if resolution == \"auto\":\n resolution = 1\n x, y, z, values = Visualizations.get_3d_vis_data(sim.V, sim.scale, sim.top_left, resolution)\n\n cmap = plt.cm.RdBu if V_COLORS == \"RdBu\" else plt.cm.inferno\n custom_cmap = cmap(np.arange(cmap.N))\n custom_cmap[:,-1] = np.concatenate((np.linspace(1, 0, cmap.N // 2), np.linspace(0, 1, cmap.N // 2)))\n custom_cmap = ListedColormap(custom_cmap)\n\n if color_norm != None:\n if color_norm == \"auto\":\n flat_V = sim.V.flatten()\n color_norm = abs(max(abs(max(flat_V)), abs(min(flat_V))))\n ax.scatter(x, y, z, c=values, marker=\"p\", cmap=custom_cmap, vmin=-color_norm, vmax=color_norm)\n else:\n ax.scatter(x, y, z, c=values, marker=\"p\", cmap=custom_cmap)\n \n if graph_ax is None:\n ax.set_xlabel(sim.axis_names[0])\n ax.set_ylabel(sim.axis_names[1])\n ax.set_zlabel(sim.axis_names[2])\n plt.show()\n \n @staticmethod\n def color_xsections_3d(sim3d, ax_loc, size=(10, 10), color_norm=\"auto\", resolution=\"auto\", graph_ax=None):\n \"\"\"\n Show two-dimensional cross-sections of the potential sim.V as a shaded color-coded map, \n where negative velocities are red and positive velocitues are blue.\n Parameters:\n - sim: EMSimulationSpace3D instance.\n - ax_loc: tuples of (axis_id, location) to take cross sections at. See EMSimulationSpace2D.from_3d\n - size: the figure size of the plot.\n - color_norm: the voltage to use as the maximum for the color display, defaults to largest abs(V[i,j,k]).\n - resolution: how many points to skip when sampling in each direction, defaults to 1.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n graph_V = np.zeros(sim3d.V.shape, float)\n for axis, location in ax_loc:\n sim2d = libem.EMSimulationSpace2D.from_3d(sim3d, axis, location)\n if axis == 0:\n loc = sim3d.global_unit_to_point((location, 0, 0))\n graph_V[loc[0],:,:] = sim2d.V\n elif axis == 1:\n loc = sim3d.global_unit_to_point((0, location, 0))\n graph_V[:,loc[1],:] = sim2d.V\n elif axis == 2:\n loc = sim3d.global_unit_to_point((0, 0, location))\n graph_V[:,:,loc[2]] = sim2d.V\n \n dummy_sim = libem.EMSimulationSpace3D(sim3d.space_size, sim3d.scale, sim3d.top_left, sim3d.axis_names)\n dummy_sim.V = graph_V\n Visualizations.colormesh_3d(dummy_sim, size, color_norm, resolution, graph_ax)\n \n @staticmethod\n def colormesh_2d(sim, size=(10, 10), color_norm=\"auto\", graph_ax=None):\n \"\"\"\n Show the potential sim.V as a shaded color-coded map, where negative velocities are red\n and positive velocitues are blue.\n Parameters:\n - sim: EMSimulationSpace2D instance.\n - size: the figure size of the plot.\n - color_norm: the voltage to use as the maximum for the color display, defaults to largest abs(V[i,j]).\n - resolution: how many points to skip when sampling in each direction, defaults to 1.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n fig = None\n ax = None\n if graph_ax is None:\n fig = plt.figure(figsize=size)\n ax = fig.gca()\n else:\n ax = graph_ax\n \n x, y = sim.get_meshgrid()\n \n if color_norm != None:\n if color_norm == \"auto\":\n flat_V = sim.V.flatten()\n color_norm = abs(max(abs(max(flat_V)), abs(min(flat_V))))\n ax.pcolormesh(x, y, sim.V.T, cmap=V_COLORS, shading=\"auto\", vmin=-color_norm, vmax=color_norm)\n else:\n ax.pcolormesh(x, y, sim.V.T, cmap=V_COLORS, shading=\"auto\")\n \n if graph_ax is None:\n ax.set_xlabel(sim.axis_names[0])\n ax.set_ylabel(sim.axis_names[1])\n plt.show()\n \n @staticmethod\n def contour_2d(sim, size=(10, 10), graph_ax=None):\n \"\"\"\n Show contour lines of the potential of a two-dimensional simulation.\n Parameters:\n - sim: EMSimulationSpace2D instance.\n - size: the figure size of the plot.\n - graph_ax: existing graph axes to draw on.\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n fig = None\n ax = None\n if graph_ax is None:\n fig = plt.figure(figsize=size)\n ax = fig.gca()\n else:\n ax = graph_ax\n \n x, y = sim.get_meshgrid()\n \n cnt_levels = []\n sampled_V = sim.V.flatten()\n min_sV = min(sampled_V)\n max_sV = max(sampled_V)\n std_sV = np.std(sampled_V)\n steps = max(int(abs(max_sV - min_sV) / std_sV), 8)\n prev_lvl = 0\n for i in range(steps + 1):\n lvl = min_sV + ((abs(max_sV - min_sV) / steps) * i)\n if prev_lvl < 0 and lvl > 0:\n cnt_levels.append(0)\n cnt_levels.append(lvl)\n prev_lvl = lvl\n \n contours = ax.contour(x, y, sim.V.T, levels=cnt_levels)\n ax.clabel(contours)\n \n if graph_ax is None:\n ax.set_xlabel(sim.axis_names[0])\n ax.set_ylabel(sim.axis_names[1])\n plt.show()\n\n @staticmethod\n def efield_3d(sim3d, size=(10, 10), resolution=\"auto\", graph_ax=None):\n \"\"\"\n Show the electric field E as three-dimensional arrows in space.\n Parameters:\n - sim3d: EMSimulationSpace3D instance.\n - size: the figure size of the plot.\n - resolution: how many points to skip when sampling in each direction, defaults to 1.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n ax = None\n if graph_ax is None:\n plt.figure(figsize=size)\n ax = plt.axes(projection='3d')\n else:\n ax = graph_ax\n \n if resolution == \"auto\":\n resolution = 1\n \n E_x, E_y, E_z = sim3d.get_efield()\n \n x, y, z, E_x = Visualizations.get_3d_vis_data(E_x, sim3d.scale, sim3d.top_left, resolution)\n _, _, _, E_y = Visualizations.get_3d_vis_data(E_y, sim3d.scale, sim3d.top_left, resolution)\n _, _, _, E_z = Visualizations.get_3d_vis_data(E_z, sim3d.scale, sim3d.top_left, resolution) \n \n ax.quiver3D(x, y, z, E_x, E_y, E_z, label=\"E\")\n \n if graph_ax is None:\n ax.set_xlabel(sim3d.axis_names[0])\n ax.set_ylabel(sim3d.axis_names[1])\n ax.set_zlabel(sim3d.axis_names[2])\n plt.show()\n \n @staticmethod\n def efield_2d(sim2d, size=(10, 10), graph_ax=None):\n \"\"\"\n Show the electric field E as two-dimensional arrows in space.\n Parameters:\n - sim2d: EMSimulationSpace2D instance.\n - size: the figure size of the plot.\n - resolution: how many points to skip when sampling in each direction, defaults to 1.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n fig = None\n ax = None\n if graph_ax is None:\n fig = plt.figure(figsize=size)\n ax = fig.gca()\n else:\n ax = graph_ax\n \n E_x, E_y = sim2d.get_efield()\n \n x, y, E_x = Visualizations.get_2d_vis_data(E_x, sim2d.scale, sim2d.top_left, 1)\n _, _, E_y = Visualizations.get_2d_vis_data(E_y, sim2d.scale, sim2d.top_left, 1)\n \n ax.quiver(x, y, E_x, E_y, label=\"E\")\n \n if graph_ax is None:\n ax.set_xlabel(sim2d.axis_names[0])\n ax.set_ylabel(sim2d.axis_names[1])\n plt.show()\n \n @staticmethod\n def trajectory_3d(time, x, size=(10, 10), graph_ax=None):\n \"\"\"\n Show the trajectory of a particle in three-dimensional space.\n Parameters:\n - time: arrays representing N time indices.\n - x: 3xN array of particle X, Y, Z position\n - size: the figure size of the plot.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n ax = None\n if graph_ax is None:\n plt.figure(figsize=size)\n ax = plt.axes(projection='3d')\n else:\n ax = graph_ax\n \n ax.scatter(x[0], x[1], x[2], c=time, cmap=T_COLORS)\n \n if graph_ax is None:\n plt.show()\n \n @staticmethod\n def trajectory_2d(time, x3d, axis=0, size=(10, 10), graph_ax=None):\n \"\"\"\n Show the trajectory of a particle in two-dimensional space.\n Parameters:\n - time: arrays representing N time indices.\n - x: 3xN array of particle X, Y, Z position\n - axis: which component of the motion to not display (slice across).\n - size: the figure size of the plot.\n - graph_ax: existing graph axes to draw on/\n Produces a plot, and shows it if no axes are provided.\n \"\"\"\n ax = None\n if graph_ax is None:\n plt.figure(figsize=size)\n ax = plt.gca()\n else:\n ax = graph_ax\n \n x = np.delete(x3d, axis, axis=0)\n \n ax.scatter(x[0], x[1], c=time, cmap=T_COLORS)\n \n if graph_ax is None:\n plt.show()\n \nclass VideoMaker(object):\n def __init__(self, figure, axes, videoDir=None, framerate=1):\n \"\"\"\n Utility to create a video from successive Matplotlib figures.\n Parameters:\n - figure: the matplotlib Figure object which is updated.\n - axes: the collection of matplotlib Axes objects that are drawn to.\n - videoDir: the temporary directory to store video frames in and export to.\n - framerate: how many frames per second.\n \"\"\"\n self.fig = figure\n self.axes = np.array(axes)\n self.framerate = framerate\n \n self.curr_frame = -1\n \n self.videoDir = \"video_tmp\" if videoDir is None else videoDir\n if os.path.exists(self.videoDir):\n rmtree(self.videoDir)\n os.mkdir(self.videoDir)\n \n def new_frame(self):\n \"\"\"\n Called before the plot is updated. Clears the axes.\n \"\"\"\n self.curr_frame += 1\n for axis in self.axes.flatten():\n axis.clear()\n \n def draw_frame(self, save=True):\n \"\"\"\n Called after the plot is updated. Draws the canvas and saves the frame to a file.\n Parameter:\n - save: whether to save the file.\n \"\"\"\n self.fig.canvas.draw()\n if save:\n plt.savefig(os.path.join(self.videoDir, \"frame{:03d}.png\".format(self.curr_frame)))\n \n def make_movie(self, name=\"movie.mp4\"):\n \"\"\"\n Invokes FFMPEG to generate a mp4 file from the frames.\n \"\"\"\n cwd = os.getcwd()\n os.chdir(self.videoDir)\n os.system(\"ffmpeg -framerate {} -i frame%03d.png -r 24 -pix_fmt yuv420p {}\".format(self.framerate, name))\n os.chdir(cwd)\n ","sub_path":"libvis.py","file_name":"libvis.py","file_ext":"py","file_size_in_byte":13903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"31413060","text":"from copy import deepcopy\n\ndef dict_merge(target, *all_dicts):\n \"\"\"Merge multiple dictionaries (list in all_dicts) together into target and returns it.\n To merge into new one just call it: dict_merge({}, dict1, dict2, ...)\"\"\"\n for some_dict in all_dicts:\n if not isinstance(some_dict, dict):\n return some_dict\n\n for k, v in some_dict.iteritems():\n if k in target and isinstance(target[k], dict):\n dict_merge(target[k], v)\n else:\n target[k] = deepcopy(v)\n\n return target\n\ndef rename_keys(target, rename_dict):\n \"\"\"Rename the keys of a dictionary with those from another one\"\"\"\n if not isinstance(target, dict):\n return target\n \n for k,v in [(k,v) for k,v in target.iteritems() if k in rename_dict]:\n if k in rename_dict:\n if isinstance(rename_dict[k], dict):\n #recurse\n rename_keys(target[k], rename_dict[k])\n else:\n #rename\n if rename_dict[k] in target:\n raise Exception(\"Cannot rename property %s to %s as the later already exists\" % (k, rename_dict[k])) \n target[rename_dict[k]] = target[k]\n del target[k]\n return target \n \n","sub_path":"publisher/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"161527929","text":"import os\nimport sys\n\nfrom getch.key import Key\n\n\nclass _Getch(object): # pragma: no cover\n \"\"\" Gets a single character from standard input. Does not echo to the screen. \"\"\"\n\n class _GetchTimeOutException(Exception):\n pass\n\n def __init__(self):\n import sys\n if not os.isatty(sys.stdin.fileno()):\n self.impl = _Getch.modo_debug\n else:\n try:\n # noinspection PyUnresolvedReferences\n import msvcrt\n self.impl = msvcrt.getch()\n\n except ImportError:\n def _getch():\n import tty\n import termios\n old_settings = termios.tcgetattr(sys.stdin)\n try:\n tty.setcbreak(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)\n return ch[0]\n\n self.impl = _getch\n\n def __call__(self, block=True, translate_to_key=False, echo=False):\n if not block:\n if not os.isatty(sys.stdin.fileno()):\n return -1\n # return _Getch.modo_debug()\n else:\n import signal\n signal.signal(signal.SIGALRM, _Getch.alarm_handler)\n signal.setitimer(signal.ITIMER_REAL, 0.01)\n try:\n resp = self.impl()\n signal.alarm(0)\n except _Getch._GetchTimeOutException:\n signal.signal(signal.SIGALRM, signal.SIG_IGN)\n resp = -1\n elif translate_to_key:\n commands = []\n resp = self.impl()\n while resp != -1:\n commands.append(resp)\n resp = self(False)\n key = Key.get_key(commands)\n if key:\n resp = key\n elif len(commands) == 1:\n resp = commands[0]\n elif commands:\n resp = commands\n else:\n resp = self.impl()\n\n if echo:\n if resp not in Key:\n print(resp, end='', flush=True)\n return resp\n\n @staticmethod\n def modo_debug() -> str:\n \"\"\" Uses input to simulate getch \"\"\"\n code = [] # type list[str]\n resp = input()\n if resp:\n if len(resp) > 1 and resp.startswith('n') and (resp[1:].isdigit() or resp[1] == '-' and resp[2:].isdigit()):\n code = [chr(int(resp[1:]))]\n elif resp.startswith('k'):\n if resp in ['kAD']: # Atalhos\n return Key.ARROW_DOWN\n elif resp in ['kAU']: # Atalhos\n return Key.ARROW_UP\n elif resp in ['kAL']: # Atalhos\n return Key.ARROW_LEFT\n elif resp in ['kAR']: # Atalhos\n return Key.ARROW_RIGHT\n elif resp in ['kE']: # Atalhos\n return Key.ENTER\n elif resp in ['kES']: # Atalhos\n return Key.ESC\n else:\n for t in Key:\n if t.name == resp[1:]:\n code = [chr(_c) for _c in t.code]\n break\n else:\n code = [resp]\n else:\n code = ['']\n\n return code[0]\n\n @staticmethod\n def alarm_handler(signum, frame):\n raise _Getch._GetchTimeOutException\n\n\ndef getch(block: bool = True, translate_to_key: bool = False): # pragma: no cover\n \"\"\"\n Gets a single character from standard input.\n Does not echo to the screen.\n\n :param block: If wait for the input, if false return -1 if nothing is pressed\n :param translate_to_key: Return the Key, if any special key is pressed.\n :rtype: str | Key\n \"\"\"\n # noinspection PyProtectedMember\n return _Getch()(block=block, translate_to_key=translate_to_key)\n\n\ndef getche(block: bool = True, translate_to_key=False): # pragma: no cover\n \"\"\"\n Gets a single character from standard input.\n Echoing to the screen.\n\n :param block: If wait for the input, if false return -1 if nothing is pressed\n :param translate_to_key: Return the Key, if any special key is pressed.\n :rtype: str | Key\n \"\"\"\n # noinspection PyProtectedMember\n return _Getch()(block=block, translate_to_key=translate_to_key, echo=True)\n","sub_path":"getch/getch.py","file_name":"getch.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"428478965","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/5/29 上午7:51\n# @Author : yangsen\n# @Email : 0@keepangry.com\n# @File : 238. 除自身以外数组的乘积.py\n# @Software: PyCharm\nclass Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n\n # 左边和右边各自的乘积再乘积\n length = len(nums)\n # 字典也可改为数组\n left_dict, right_dict = {}, {}\n\n product_left, product_right = 1, 1\n for i in range(length-1):\n product_left = nums[i] * product_left\n product_right = nums[length-i-1] * product_right\n left_dict[i+1] = product_left\n right_dict[i+1] = product_right\n\n result = []\n for i in range(length):\n result.append(left_dict.get(i,1) * right_dict.get(length-i-1,1))\n return result\n\n\nif __name__ == \"__main__\":\n assert Solution().productExceptSelf([1]) == [1]\n assert Solution().productExceptSelf([1,2,3,4]) == [24,12,8,6]\n","sub_path":"238. 除自身以外数组的乘积.py","file_name":"238. 除自身以外数组的乘积.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"183042856","text":"import streamlit as st\nimport exercises.about_course as about_course\nimport exercises.capping_ex as capping_ex\nimport exercises.variograms as variograms\nimport exercises.cut_off as cut_off\nimport exercises.block_modelling as block_modelling\nimport exercises.geo_interp as geo_interp\nimport exercises.reporting as reporting\nimport exercises.interp as interp\n\nmax_width = 1200\n\npadding_top = 0\npadding_right = 0\npadding_left = 0\npadding_bottom = 0\nCOLOR = '#180c52'\nBACKGROUND_COLOR = 'white'\n\nst.markdown(\n f\"\"\"\n\n\"\"\",\n unsafe_allow_html=True,\n )\n\n\nst.sidebar.image(\"..//pdac2021_res_est_course_link3//055CF2A4-98DC-488C-B5A6-15CC02C9974E.png\", width=100)\n\n# Capping Exercise - interactive\n# Compositing - Q&A\n# Variograms - interactive\n# Interpolation - interactive\n# Compositing - Q&A\n\nradio_options = [\"01 About the Course\", \n \"02 Geological Interp\", \n \"03 Capping\", \n \"04 Variograms\", \n \"05 Interpolation\", \n \"06 Cut-Off Grade\", \n \"07 Reporting/Classification\"]\n\nexercise=st.sidebar.radio(\"\", \n options=radio_options, \n index=0, \n key=None)\n\nif exercise == radio_options[0]:\n about_course.about_course()\nif exercise == radio_options[1]:\n geo_interp.geo_interp()\nif exercise == radio_options[2]:\n capping_ex.capping_ex()\nif exercise == radio_options[3]:\n variograms.variograms()\nif exercise == radio_options[4]:\n block_modelling.block_modelling()\n# interp.block_modelling()\nif exercise == radio_options[5]:\n cut_off.cut_off()\nif exercise == radio_options[6]:\n reporting.reporting()\n","sub_path":"pdac2021.py","file_name":"pdac2021.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"388616757","text":"from boardState import *\nimport copy \n\n'''\nPublic Method\nFind all legal moves, including 1 roll and repetitive hops\n'''\t\ndef computeLegalMove(board, player):\n\tpossibleMoveBoard = []\n\tif player == 1:\n\t\tmyPosition = board.PositionOne\n\telif player == 2:\n\t\tmyPosition = board.PositionTwo\n\tfor (i, j) in myPosition:\n\t\t## current piece x and y position\n\t\trollMoves = findLegalRoll(board, i,j)\n\t\tfor (nexti, nextj) in rollMoves:\n\t\t\tpossibleMoveBoard.append(((i,j),(nexti, nextj)))\n\t\tpossibleMoveBoard += computeRepetitiveHop(board, i,j)\n\n\tallPossibleMove = []\n\tfor ((oldi, oldj),(newi, newj)) in possibleMoveBoard:\n\t\tmove = (oldi, oldj, newi, newj)\n\t\tallPossibleMove.append(move)\n\treturn allPossibleMove\n\n'''\nPrivate Method\nFind a legal roll moves, given the coordinate of a piece\n'''\t\ndef findLegalRoll(board, i, j):\n\trollMoves = []\n\tif i + 1 < board.height:\n\t\tif j + 1 < board.mid_width_max:\n\t\t## can go down right? \n\t\t\tif board.board[i+1][j+1] == 0:\n\t\t\t\trollMoves.append((i+1, j+1))\n\n\t\tif j - 1 >= 0:\n\t\t\tif board.board[i+1][j-1] == 0:\n\t\t## can go down left?\n\t\t\t\trollMoves.append((i+1, j-1))\n\n\tif i - 1 >= 0:\n\t\tif j + 1 < board.mid_width_max:\n\t\t## can go up right? \n\t\t\tif board.board[i-1][j+1] == 0:\n\t\t\t\trollMoves.append((i-1, j+1))\n\t\tif j - 1 >= 0:\n\t\t\tif board.board[i-1][j-1] == 0:\n\t\t## can go up left?\n\t\t\t\trollMoves.append((i-1, j-1))\n\n\treturn rollMoves\n\t\n'''\nPublic Method computeRepetitiveHop\nCompute repetitive hop for a piece, given the coordinate hopi and hopj\nCalls computeRepetitiveHopRecursion\n'''\t\ndef computeRepetitiveHop(board, hopi, hopj):\n\tpossibleMoveBoard = []\n\tpastPosition = {}\n\tpastPosition[(hopi, hopj)] = 1\n\tfor (basei, basej) in board.allPosition:\n\t\thopMove= findLegalHop(board, hopi, hopj, basei, basej)\n\t\tif hopMove is not None:\n\t\t\t(nexti, nextj) = hopMove\n\t\t\tif (nexti, nextj) not in pastPosition:\n\t\t\t\tpastPosition[(nexti, nextj)] = 1\n\t\t\t\tfutureBoard = copy.deepcopy(board.board)\n\t\t\t\tfutureBoard[hopi][hopj] = 0\n\t\t\t\tfutureBoard[nexti][nextj] = 1\t\n\t\t\t\tpossibleMoveBoard.append(((hopi,hopj),(nexti, nextj)))\n\t\t\t\tif board.fullGame == 0:\n\t\t\t\t\tfutureboard = boardState(options = 'smallGame', inputBoard = futureBoard)\n\t\t\t\telse:\n\t\t\t\t\tfutureboard = boardState(options = 'fullGame', inputBoard = futureBoard)\n\t\t\t\tcomputeRepetitiveHopRecursion(board, hopi, hopj, nexti, nextj, pastPosition, possibleMoveBoard)\n\t\n\treturn possibleMoveBoard\n\n\n\n'''\nPrivate\nCompute repetitive hop for a piece, given the coordinate hopi and hopj\nFind a legal hop moves, given the cooridnate of a piece and the base\n'''\t\ndef computeRepetitiveHopRecursion(board, origini, originj, hopi, hopj, pastPosition, possibleMoveBoard):\n\tfor (basei, basej) in board.allPosition:\n\t\thopMove= findLegalHop(board, hopi, hopj, basei, basej)\n\t\tif hopMove is not None:\n\t\t\t(nexti, nextj) = hopMove\n\t\t\tif (nexti, nextj) not in pastPosition:\n\t\t\t\t#print \"piece \" + str(hopi) + \" \" + str(hopj) + \" going \" + str(nexti) + \" \" + str(nextj)\n\t\t\t\tpastPosition[(nexti, nextj)] = 1\n\t\t\t\tfutureBoard = copy.deepcopy(board.board)\n\t\t\t\tfutureBoard[hopi][hopj] = 0\n\t\t\t\tfutureBoard[nexti][nextj] = 1\n\t\t\t\tpossibleMoveBoard.append(((origini, originj),(nexti, nextj)))\t\n\t\t\t\tif board.fullGame == 0:\n\t\t\t\t\tfutureboard = boardState(options = 'smallGame', inputBoard = futureBoard)\n\t\t\t\telse:\n\t\t\t\t\tfutureboard = boardState(options = 'fullGame', inputBoard = futureBoard)\n\t\t\t\t#futureboard.printBoard()\n\t\t\t\tcomputeRepetitiveHopRecursion(board, origini, originj, nexti, nextj, pastPosition, possibleMoveBoard)\n\n'''\nPrivate\nFind a legal hop moves, given the cooridnate of a piece and the base\n'''\t\ndef findLegalHop(board, hopi, hopj, basei, basej):\n\thopMove = None\n\tif isAdjacent(hopi, hopj, basei, basej):\n\t\tdiffi = basei - hopi\n\t\tdiffj = basej - hopj\n\t\tnexti = basei + diffi\n\t\tnextj = basej + diffj\n\t\tif nexti < board.height and nextj < board.mid_width_max:\n\t\t\tif board.board[nexti][nextj] == 0:\n\t\t\t\thopMove = (nexti, nextj)\n\t\t\t\n\treturn hopMove\n\n'''\nPrivate\nReturn if two pieces are next to each other\n'''\t\ndef isAdjacent(Ai, Aj, Bi, Bj):\n\tif abs(Ai - Bi) == 1:\n\t\tif abs(Aj - Bj) == 1:\n\t\t\treturn True\n\n\telif abs(Ai - Bi) == 0:\n\t\tif abs(Aj - Bj) == 2:\n\t\t\treturn True\n\telse:\n\t\treturn False","sub_path":"old_code/v3/computeLegalMove.py","file_name":"computeLegalMove.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"595992172","text":"# %%\n\nlist_of_squares = [x**2 for x in range(1, 20)]\nprint(list_of_squares)\n\n# %%\n\n\ndef for_loop_matrix_multiplication(A, B):\n new_matrix = []\n\n for row in A:\n new_row = []\n for col in zip(*B):\n new_row.append(sum([x*y for (x, y) in zip(row, col)]))\n\n new_matrix.append(new_row)\n\n return new_matrix\n\n\nA = [[1, 2, 3], [4, 5, 6]]\nB = [[7, 8], [9, 10], [11, 12]]\nprint(for_loop_matrix_multiplication(A, B))\n\n\n# %%\n# using list comprehension\n# to initializing matrix\nrows = 5\ncolumns = 4\nres = [[0 for i in range(rows)] for j in range(columns)]\nres[0][0] = 1\nprint(\"matrix is\", res)\n\n\n# %%\nA = [1, 2, 3, 4, 5, 6]\nfor i in range(0, len(A), 1):\n print(A[i])\n\n# %%\n","sub_path":"vectors_learning.py","file_name":"vectors_learning.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"352123990","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import Response, request, jsonify, json, redirect, url_for\napp = Flask(__name__)\n\nmaps = []\ncur_map = {}\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef main():\n print(maps)\n return render_template('index.html', main_maps = maps) \n\n\n@app.route('/my-maps/', methods=['GET', 'POST'])\ndef mymaps(mapname):\n\n global cur_map\n return render_template('final.html', mapname = cur_map)\n\n@app.route('/home', methods = ['GET', 'POST'])\ndef go_home():\n return jsonify(url=url_for('main'))\n\ndef redirect_home():\n return jsonify(url=url_for('main'))\n\n@app.route('/save-map', methods=['GET', 'POST'])\ndef the_map():\n \n global cur_map\n if request.method == \"POST\":\n map_info = request.data\n cur_map = json.loads(map_info)\n for m in maps:\n if (m['info']['map_name'] == cur_map['info']['map_name']): \n maps.remove(m)\n\n maps.append(cur_map)\n cur_map = {}\n\n return redirect_home()\n\n\n@app.route('/new-map', methods=['GET', 'POST'])\ndef newMap():\n\n global cur_map\n if request.method == \"POST\":\n map_info = request.data\n cur_map = json.loads(map_info)\n name = cur_map['info']['map_name']\n return jsonify(url = url_for('mymaps', mapname = name))\n\n@app.route('/load-map', methods = ['GET', 'POST'])\ndef loadmap():\n global cur_map\n if request.method == 'POST':\n mpn = request.data\n map_name = json.loads(mpn)\n for m in maps:\n if (m['info']['map_name'] == map_name):\n cur_map = m\n print(cur_map)\n return jsonify(url = url_for('mymaps', mapname = map_name))\n\n@app.route('/delete-map', methods = ['GET', 'POST'])\ndef deletemap():\n global cur_map\n if request.method == 'POST':\n mpn = request.data\n map_name = json.loads(mpn)\n print(maps)\n print(map_name)\n for m in range(len(maps)): \n if (maps[m]['info']['map_name'] == map_name):\n del maps[m]\n return go_home()\n\n@app.route('/dist-Up', methods = ['GET', 'POST'])\ndef pinUp():\n global cur_map\n if request.method == 'POST':\n newNum = request.data\n cur_map['info']['map_maxDist'] = newNum\n return jsonify(newNum)\n\n@app.route('/pin-Up', methods = ['GET', 'POST'])\ndef distUp():\n global cur_map\n if request.method == 'POST':\n newNum = request.data\n cur_map['info']['map_num'] = newNum\n return jsonify(newNum)\n\n@app.route('/delete-All', methods = ['GET', 'POST'])\ndef delAll():\n global cur_map\n if request.method == 'POST':\n cur_map = {}\n return jsonify(cur_map)\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n\n\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"236939595","text":"#!/usr/bin/env python3\n\nimport os, sys\nfrom pathlib import Path\n\n# TO DO: this could be moved out of the scripts (somehow..)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nparent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))\nsys.path.insert(0, parent_dir_path)\n\nfrom e2e_scenarios.constants import USER1_ADDRESS, USER1_SKEY_FILE_PATH\nfrom e2e_scenarios.utils import create_payment_key_pair_and_address, calculate_tx_fee, calculate_tx_ttl, send_funds, \\\n get_address_balance, wait_for_new_tip, assert_address_balance, create_stake_key_pair_and_address, \\\n create_stake_addr_registration_cert, get_key_deposit, get_pool_deposit, get_registered_stake_pools_ledger_state, \\\n create_and_register_stake_pool, write_to_file, gen_pool_metadata_hash, create_stake_addr_delegation_cert, \\\n register_stake_pool, wait_for_new_epoch\n\n# Scenario\n# 1. Step1: create 1 new payment key pair and addresses (addr0.addr)\n# 2. Step2: create 1 new stake key pair and addresses (addr0_stake.addr)\n# 3. Step3: create and submit the stake addresses registration certificate\n# 4. Step4: send some funds from user1 (the faucet) to addr0.addr\n# 5. Step5: create and register 1 stake pool with 1 owner\n# 6. Step6: create 1 stake addresses registration certificate\n# 7. Step7: create 1 stake addresses delegation certificate in order to meet the pledge requirements\n# 8. Step8: submit the 2 certificates through a tx - stake address registration, stake address delegation\n# 9. Step9: check that the pool was registered on chain\n# 10. Step10: update the pool parameters by resubmitting the pool registration certificate\n# TO DO: updating the pool parameters might cost less than pool registration (to validate this)\n# 11. Step11: wait_for_new_epoch and check that the pool parameters were correctly updated on chain\n\naddr_name = \"owner\"\nnode_name = \"poolA\"\npool_pledge = 4567\npool_cost = 3\npool_margin = 0.01\npool_pledge_updated = 1\npool_cost_updated = 1000000\npool_margin_updated = 0.9\n\npool_metadata = {\n \"name\": \"QA E2E test\",\n \"description\": \"Shelley QA E2E test Test\",\n \"ticker\": \"QA1\",\n \"homepage\": \"www.test1.com\"\n}\npool_metadata_url = \"https://www.where_metadata_file_is_located.com\"\n\nprint(\"Creating a new folder for the files created by the current test...\")\ntmp_directory_for_script_files = \"tmp_\" + sys.argv[0].split(\".\")[0]\nPath(tmp_directory_for_script_files).mkdir(parents=True, exist_ok=True)\n\nprint(\"Add the pool metadata into a different file\")\npool_metadata_file = write_to_file(tmp_directory_for_script_files, pool_metadata, \"pool_metadata.json\")\n\nprint(f\"====== Step1: create 1 new payment key pair and addresses ({addr_name}.addr)\")\naddr, addr_vkey_file, addr_skey_file = create_payment_key_pair_and_address(tmp_directory_for_script_files, addr_name)\nprint(f\"Address successfully created - {addr}; {addr_vkey_file}; {addr_skey_file}\")\n\nprint(f\"====== Step2: create 1 new stake key pair and addresses ({addr_name}_stake.addr)\")\ncreated_stake_addresses_dict = {}\nstake_addr, stake_addr_vkey_file, stake_addr_skey_file = create_stake_key_pair_and_address(tmp_directory_for_script_files, addr_name)\nprint(f\"Stake address successfully created - {stake_addr}; {stake_addr_vkey_file}; {stake_addr_skey_file}\")\n\nprint(f\"====== Step3: create 1 stake addresses registration cert\")\nstake_addr_reg_cert_file = create_stake_addr_registration_cert(tmp_directory_for_script_files, stake_addr_vkey_file, addr_name)\nprint(f\"Stake address registration certificate created - {stake_addr_reg_cert_file}\")\n\nprint(f\"====== Step4: send some funds from user1 (the faucet) to {addr_name}.addr\")\nkey_deposit = get_key_deposit()\npool_deposit = get_pool_deposit()\ntx_ttl = calculate_tx_ttl()\nsrc_address = USER1_ADDRESS\ndst_addresses_list = [addr]\ntx_fee = calculate_tx_fee(src_address, dst_addresses_list, tx_ttl)\n\ntransferred_amounts_list = [int(4 * tx_fee + key_deposit + pool_deposit + pool_pledge)]\nsigning_keys_list = [USER1_SKEY_FILE_PATH]\n\nsrc_add_balance_init = get_address_balance(src_address)\ndst_init_balance = get_address_balance(dst_addresses_list[0])\n\nprint(f\"Send {transferred_amounts_list} Lovelace from {src_address} to {dst_addresses_list}\")\nsend_funds(src_address, tx_fee, tx_ttl,\n destinations_list=dst_addresses_list,\n transferred_amounts=transferred_amounts_list,\n signing_keys=signing_keys_list)\n\nwait_for_new_tip()\nwait_for_new_tip()\n\nprint(f\"Check that the balance for source address was correctly updated\")\nassert_address_balance(src_address, src_add_balance_init - tx_fee - transferred_amounts_list[0])\n\nprint(f\"Check that the balance for destination address was correctly updated\")\nassert_address_balance(dst_addresses_list[0], dst_init_balance + transferred_amounts_list[0])\n\nprint(f\"====== Step5: create and register 1 stake pool with 1 owner\")\npool_owner = [addr, addr_vkey_file, addr_skey_file, stake_addr, stake_addr_vkey_file, stake_addr_skey_file]\npool_metadata_hash = gen_pool_metadata_hash(pool_metadata_file)\nstake_pool_id, node_cold_vkey_file, node_cold_skey_file, node_vrf_vkey_file = \\\n create_and_register_stake_pool(tmp_directory_for_script_files, node_name, pool_pledge, pool_cost, pool_margin,\n pool_owner, pool_metadata=[pool_metadata_url, pool_metadata_hash])\n\nprint(f\"====== Step6: create 1 stake addresses registration certificate\")\nstake_addr_reg_cert_file = create_stake_addr_registration_cert(tmp_directory_for_script_files, stake_addr_vkey_file, addr_name)\nprint(f\"Stake address registration certificate created - {stake_addr_reg_cert_file}\")\n\nprint(f\"====== Step7: create 1 stake addresses delegation certificate in order to meet the pledge requirements\")\nstake_addr_delegation_cert_file = create_stake_addr_delegation_cert(tmp_directory_for_script_files, stake_addr_vkey_file,\n node_cold_vkey_file, addr_name)\nprint(f\"Stake pool owner-delegation certificate created - {stake_addr_delegation_cert_file}\")\n\nprint(f\"====== Step8: submit the 2 certificates through a tx - stake address registration, stake address delegation\")\nsrc_address = addr\ncertificates_list = [stake_addr_reg_cert_file, stake_addr_delegation_cert_file]\nsigning_keys_list = [addr_skey_file, stake_addr_skey_file, node_cold_skey_file]\ntx_ttl = calculate_tx_ttl()\ntx_fee = calculate_tx_fee(src_address, [src_address], tx_ttl)\nsrc_add_balance_init = get_address_balance(src_address)\n\nsend_funds(src_address, tx_fee + key_deposit, tx_ttl,\n certificates=certificates_list,\n signing_keys=signing_keys_list)\n\nwait_for_new_tip()\nwait_for_new_tip()\n\nprint(f\"Check that the balance for source address was correctly updated\")\nassert_address_balance(src_address, src_add_balance_init - tx_fee)\n\nprint(f\"====== Step9: check that the pool was registered on chain\")\nif stake_pool_id not in list(get_registered_stake_pools_ledger_state().keys()):\n print(f\"ERROR: newly created stake pool id is not shown inside the available stake pools; \"\n f\"\\n\\t- Pool ID: {stake_pool_id} vs Existing IDs: {list(get_registered_stake_pools_ledger_state().keys())}\")\n exit(2)\nelse:\n print(f\"{stake_pool_id} is included into the output of ledger_state() command\")\n\non_chain_stake_pool_details = get_registered_stake_pools_ledger_state().get(stake_pool_id)\non_chain_pool_details_errors_list = []\nif on_chain_stake_pool_details['owners'][0] not in stake_addr:\n on_chain_pool_details_errors_list.append(f\"'owner' value is different than expected; \"\n f\"Expected: {stake_addr} vs Returned: {on_chain_stake_pool_details['owners'][0]}\")\n\nif on_chain_stake_pool_details['cost'] != pool_cost:\n on_chain_pool_details_errors_list.append(f\"'cost' value is different than expected; \"\n f\"Expected: {pool_cost} vs Returned: {on_chain_stake_pool_details['cost']}\")\n\nif on_chain_stake_pool_details['margin'] != pool_margin:\n on_chain_pool_details_errors_list.append(f\"'margin' value is different than expected; \"\n f\"Expected: {pool_margin} vs Returned: {on_chain_stake_pool_details['margin']}\")\n\nif on_chain_stake_pool_details['pledge'] != pool_pledge:\n on_chain_pool_details_errors_list.append(f\"'pledge' value is different than expected; \"\n f\"Expected: {pool_pledge} vs Returned: {on_chain_stake_pool_details['pledge']}\")\n\nif on_chain_stake_pool_details['metadata'] is None:\n on_chain_pool_details_errors_list.append(f\"'metadata' value is different than expected; \"\n f\"Expected: None vs Returned: {on_chain_stake_pool_details['metadata']}\")\n\nif on_chain_stake_pool_details['relays'] != []:\n on_chain_pool_details_errors_list.append(f\"'relays' value is different than expected; \"\n f\"Expected: [] vs Returned: {on_chain_stake_pool_details['relays']}\")\n\nif len(on_chain_pool_details_errors_list) > 0:\n print(f\"{len(on_chain_pool_details_errors_list)} pool parameter(s) have different values on chain than expected:\")\n for er in on_chain_pool_details_errors_list:\n print(f\"\\tERROR: {er}\")\nelse:\n print(f\"All pool details were correctly registered on chain for {stake_pool_id} - {on_chain_stake_pool_details}\")\n\nprint(f\"====== Step10: update the pool parameters by resubmitting the pool registration certificate\")\npool_reg_cert_file = register_stake_pool(pool_owner, pool_pledge_updated, pool_cost_updated, pool_margin_updated,\n node_vrf_vkey_file, node_cold_vkey_file, node_cold_skey_file,\n tmp_directory_for_script_files, node_name,\n pool_metadata=[pool_metadata_url, pool_metadata_hash])\n\nprint(f\"====== Step11: wait_for_new_epoch and check that the pool parameters were correctly updated on chain for pool id: {stake_pool_id}\")\nwait_for_new_epoch()\non_chain_stake_pool_details = get_registered_stake_pools_ledger_state().get(stake_pool_id)\non_chain_pool_details_errors_list = []\nif on_chain_stake_pool_details['owners'][0] not in stake_addr:\n on_chain_pool_details_errors_list.append(f\"'owner' value is different than expected; \"\n f\"Expected: {stake_addr} vs Returned: {on_chain_stake_pool_details['owners'][0]}\")\n\nif on_chain_stake_pool_details['cost'] != pool_cost_updated:\n on_chain_pool_details_errors_list.append(f\"'cost' value is different than expected; \"\n f\"Expected: {pool_cost_updated} vs Returned: {on_chain_stake_pool_details['cost']}\")\n\nif on_chain_stake_pool_details['margin'] != pool_margin_updated:\n on_chain_pool_details_errors_list.append(f\"'margin' value is different than expected; \"\n f\"Expected: {pool_margin_updated} vs Returned: {on_chain_stake_pool_details['margin']}\")\n\nif on_chain_stake_pool_details['pledge'] != pool_pledge_updated:\n on_chain_pool_details_errors_list.append(f\"'pledge' value is different than expected; \"\n f\"Expected: {pool_pledge_updated} vs Returned: {on_chain_stake_pool_details['pledge']}\")\n\nif on_chain_stake_pool_details['metadata'] is None:\n on_chain_pool_details_errors_list.append(f\"'metadata' value is different than expected; \"\n f\"Expected: None vs Returned: {on_chain_stake_pool_details['metadata']}\")\n\nif on_chain_stake_pool_details['relays'] != []:\n on_chain_pool_details_errors_list.append(f\"'relays' value is different than expected; \"\n f\"Expected: [] vs Returned: {on_chain_stake_pool_details['relays']}\")\n\nif len(on_chain_pool_details_errors_list) > 0:\n print(f\"{len(on_chain_pool_details_errors_list)} pool parameter(s) have different values on chain than expected:\")\n for er in on_chain_pool_details_errors_list:\n print(f\"\\tERROR: {er}\")\nelse:\n print(f\"All pool details were correctly registered on chain for {stake_pool_id} - {on_chain_stake_pool_details}\")\n","sub_path":"python_scripts/e2e_scenarios/update_stake_pool_parameteres_test.py","file_name":"update_stake_pool_parameteres_test.py","file_ext":"py","file_size_in_byte":12196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"605135733","text":"Door_O = \"Door:OPEN\"\nDoor_C = \"Door:CLOSED\"\nMOTION = \"Motion:YES\"\nNO_MOTION = \"Motion:NO\"\nHAS_KEY = \"Door:KEY\"\n\n# since cahnges are consecutive, use a tuple to represent a event which occurs when all element is true\nowner_returning = (False,False,False) #motion:no, door:open, door:key\nowner_leaving = (False,False,False) #motion:yes, door:open, door:key\n\ndef is_event(e):\n rs = e[0]\n for i in e:\n rs = rs and i\n return rs\n#reset event tuple to False everywhere\ndef reset_event(e):\n for i in e :\n i = False\n\ndef set_event(e,index):\n e[index] = True\n\ndb = open(\"../db.txt\", \"r\")\nfor line in db.readlines():\n if MOTION in line:\n set_event(owner_leaving,0)\n if NO_MOTION in line:\n set_event(owner_returning,0)\n if Door_O in line:\n set_event(owner_leaving,1)\n set_event(owner_returning,1)\n if HAS_KEY in line:\n set_event(owner_leaving,2)\n set_event(owner_returning,2)\n\n if is_event(owner_leaving):\n print(\"Owner leaves\")\n reset_event(owner_leaving)\n\n if is_event(owner_returning):\n print(\"Owner returns\")\n reset_event(owner_returning)","sub_path":"test/read_db.py","file_name":"read_db.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"392100236","text":"import numpy as np\nfrom ..common.checkers import check_box, check_col, check_row\n\n\ndef create_grid(max_clues):\n grid = np.zeros((9, 9), dtype=int)\n mem = []\n\n while max_clues != 0:\n new_i = np.random.randint(0, 9)\n new_j = np.random.randint(0, 9)\n\n i_j = [new_i, new_j]\n\n if i_j in mem:\n continue\n\n mem.append(i_j)\n\n new_clue = np.random.randint(1, 10)\n\n if grid[new_i][new_j] == 0:\n if not((check_row(new_clue, grid[new_i])) or\n (check_col(new_clue, grid, new_j)) or\n (check_box(new_clue, grid, new_i, new_j))):\n grid[new_i][new_j] = new_clue\n max_clues -= 1\n\n return grid\n\n\ndef print_grid(grid):\n for row in range(len(grid)):\n for j in range(len(grid[row])):\n if (((j + 1) % 3) == 0) and ((j + 1) != len(grid[row])):\n print(f' {grid[row][j]} |', end='')\n else:\n print(f' {grid[row][j]}', end='')\n if (((row + 1) % 3) == 0) and ((row + 1) != len(grid)):\n print('\\n-----------------------')\n else:\n print('')\n","sub_path":"pydoku/grid/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"427434287","text":"# -*- coding=utf-8 -*-\nfrom flask import Flask, render_template, session,\\\n redirect, url_for, flash\n# from flask.ext.sqlalchemy import SQLAlchemy\nimport os,sys\nfrom flask.ext.login import LoginManager\nfrom config import basedir\nimport MySQLdb\nimport traceback\n\nfrom urllib import quote_plus, unquote_plus\n\napp = Flask(__name__)\napp.config.from_object('config')\napp.config[\"APPLICATION_ROOT\"] = 'apps'\n\n# for OperationalError: (2006, 'MySQL server has gone away')\nclass DB:\n conn = None\n cursor = None\n def __init__(self):\n self.connect()\n\n def connect(self):\n self.conn = MySQLdb.connect(host = \"localhost\",\n user = \"root\",\n passwd = \"youareBT\",\n db = \"leezing_com\",\n charset=\"utf8\")\n\n def execute(self, sql):\n try:\n cursor = self.conn.cursor()\n cursor.execute(sql)\n except (AttributeError, MySQLdb.OperationalError):\n print(\"Error in SQL\")\n traceback.print_exc()\n self.connect()\n cursor = self.conn.cursor()\n cursor.execute(sql)\n return cursor\n\n def close(self): \n if(self.cursor):\n self.cursor.close()\n self.conn.commit()\n self.conn.close()\n\n def commit(self):\n self.conn.commit()\n\n def rollback(self):\n self.conn.rollback()\n\ndb = DB()\n# cursor = db.cursor()\n\nlm = LoginManager()\nlm.init_app(app)\nlm.login_view = 'login'\n\n\nfrom functools import wraps\ndef myLoginRequired(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user\") is None:\n return redirect(url_for('login'))\n return f(*args, **kwargs)\n return decorated_function\n\n\nfrom functools import wraps\ndef zyLoginRequired(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"zyUser\") is None:\n return redirect(url_for('zy.login'))\n return f(*args, **kwargs)\n return decorated_function\n\n\n\ndef myEncode(oldStr):\n if not oldStr:\n return oldStr\n oldStr = str(oldStr)\n string = oldStr.encode(\"utf-8\")\n string = quote_plus(string)\n string = string.decode(\"utf-8\")\n return string\n\n\ndef myDecode(oldStr):\n if not oldStr:\n return oldStr\n oldStr = str(oldStr)\n string = oldStr.encode(\"utf-8\")\n string = unquote_plus(string)\n string = string.decode(\"utf-8\")\n return string\n\n\nfrom apps import baseview\nreload(sys)\nsys.setdefaultencoding('utf8')\ntext_factory = str\n\nfrom apps.blog import blog_views\napp.register_blueprint(blog_views.mod)\n\nfrom apps.zy import zy_views\napp.register_blueprint(zy_views.mod)\n","sub_path":"apps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"156527788","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 10 10:36:37 2014\nNAME: TreeAnalysis.py\nAnalyzing the relationship between the number of training samples and the \nclasifying accuracy\nThe file iterates through the number of samples (2 step each time) and calculate\nthe accuracy. Then display the graph which indicates the relationship -> determine\nthe optimal number of samples \n@author: joseph\n\"\"\"\n\nimport numpy as np\nfrom numpy import *\nfrom DataGenerating import *\nfrom DecisionTree import *\n\n# Initializing\nfnf = 10\nAnumSample = arange(75-10 + 1)\nAnumSample = AnumSample + 10\nAnumSample = AnumSample*2\n\nAcc = zeros(AnumSample.size)\n\nfor i in range(AnumSample.size):\n # ============ LOADING DATA (TRAINING AND TEST)=========\n numSample = AnumSample[i]\n filename = '/home/jo/Dropbox/DecisionTree/iris.csv' \n LoadData(filename, numSample) # Training.csv, Test.csv \n DataGen(fnf, numSample) # TrainingSet.csv, TrainingLabels.csv\n # TestSet.csv, TestLabels.csv\n # ============ TRAINING TREE ===========================\n # LOAD TRAINING DATA\n data = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TrainingSet.csv', dtype = float, delimiter = ',')\n labels = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TrainingLabels.csv', dtype = float, delimiter = ',')\n \n # Type changing for memory saving\n data = array(data, dtype = int8)\n labels = array(labels, dtype = int8)\n \n # LOADING TRAINING FILES\n trainingTemp = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TrainingSet.csv', dtype = float, delimiter = ',')\n trainingLabels = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TrainingLabels.csv', dtype = float, delimiter = ',')\n \n # CHANGE TYPE\n trainingTemp = array(trainingTemp, dtype = int8)\n trainingLabels = array(trainingLabels, dtype = int8)\n \n # LOADING TESTING FILES\n testTemp = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TestSet.csv', dtype = float, delimiter = ',')\n testLabels = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TestLabels.csv', dtype = float, delimiter = ',')\n \n # CHANGE TYPES OF DATA (FLOAT TO INT)\n testTemp = array(testTemp, dtype = int8)\n testLabels = array(testLabels, dtype = int8)\n \n # MERGING\n test = column_stack((testTemp, testLabels))\n \n # Training Tree\n tree = DecisionTree(data, labels)\n \n # ============= TEST TREE ==================================\n # LOADING TESTING FILES\n testTemp = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TestSet.csv', dtype = float, delimiter = ',')\n testLabels = np.genfromtxt('/home/jo/Dropbox/DecisionTree/TestLabels.csv', dtype = float, delimiter = ',')\n \n # CHANGE TYPE OF DATA (FLOAT TO INT)\n testTemp = array(testTemp, dtype = int8)\n testLabels = array(testLabels, dtype = int8)\n \n # MERGING\n test = column_stack((testTemp, testLabels))\n \n # Calculate Accuracy \n Acc[i] = 100*Accuracy(tree, test) # Each tree and each test set gives out a accuracy\n\n\n# Plot\nimport pylab as pl\npl.plot(AnumSample, Acc)\npl.show()\n","sub_path":"DecisionTree/TreeAnalysis.py","file_name":"TreeAnalysis.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"265301064","text":"APP_EXE_STATUS_SUCCESS = '20200'\nAPP_EXE_STATUS_FAIL = '0'\nACTIVATION_SUCCESS = '10200'\nACTIVATION_FAIL = '0'\nRELEASE_SUCCESS = '30200'\nRELEASE_FAIL = '0'\nSTR_BLANK = ''\nINVALID_FORMAT_CODE='invalid_format'\nINVALID_FORMAT_MESSAGE = 'Invalid format'\nPATTERN_WINDOWS_PRODUCT_ID = '([A-Z0-9-]{6})([A-Z0-9-]{6})([A-Z0-9-]{6})([A-Z0-9-]{5})'\nPATTERN_MAC_ADDRESS = '([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})'\nPATTERN_DRIVE_SERIAL_NUMBER = '[a-zA-z0-9-]'\nPATTERN_PATTERN_ALPHANUMERIC = '[a-zA-z0-9]'","sub_path":"gtb/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"304072225","text":"import requests, time, json, logging, urllib, random\r\nimport traceback\r\n\r\n\r\ndef is_a5_ok(proxies, timeout_seconds=2):\r\n try:\r\n res = requests.get('https://www.hop.com/', proxies=proxies, timeout=timeout_seconds)\r\n if res.status_code != 200 and res.status_code != 403:\r\n return False\r\n return True\r\n except Exception as e:\r\n print(e)\r\n return False\r\n\r\n\r\ndef _get_proxy():\r\n proxy = ''\r\n try:\r\n li = json.loads(requests.get('http://dx.proxy.jiaoan100.com/proxy/getproxy?carrier=be', time).text)\r\n logging.info('Proxy Num: ' + str(len(li)))\r\n logging.info(str(li))\r\n proxy = random.choice(li) or ''\r\n print(proxy)\r\n except Exception as e:\r\n print(e)\r\n traceback.print_exc()\r\n logging.info('get proxy error....')\r\n finally:\r\n return proxy or ''\r\n\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n ip_port = _get_proxy()\r\n proxies = {\r\n 'http': 'http://%s' % ip_port,\r\n 'https': 'http://%s' % ip_port\r\n }\r\n print(is_a5_ok(proxies))\r\n time.sleep(1)\r\n","sub_path":"proxy/carrier/a5.py","file_name":"a5.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"317274825","text":"# This script gathers the climate data from daymet for many pixels (km^2) and writes it to\n# a correctly-formatted csv for the MoLS\n\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport ssl\nimport math\nimport datetime\n\nglobal ctx\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\n\n# LAT & LON BOUNDS (16 km x 9 km)\n# 31.391, -110.985 (TOP LEFT)\n# 31.391, -110.895 (TOP RIGHT)\n# 31.231, -110.985 (BOTTOM LEFT)\n# 31.231, -110.895 (BOTTOM RIGHT)\n\n# YEARS\n# 1980-2015\n\ndef drange(start, stop, step):\n r = start\n while r < stop:\n yield r\n r += step\n\ndef build_url(lat, lon, yr):\n return \"https://daymet.ornl.gov/single-pixel/send/query?daacid=38319&lat=\" + str(lat) + \"&lon=\" + str(\n lon) + \"&measuredParams=tmax%2Ctmin%2Cprcp%2Cvp&year=\" + str(yr)\n\n\ndef fetch_data(url):\n # url = build_url(lat, lon, yr)\n page = urllib2.urlopen(url, context=ctx)\n soup = BeautifulSoup(page, \"html.parser\")\n\n data = []\n\n table_body = soup.find_all('table')[1]\n\n rows = table_body.find_all('tr')\n\n for row in rows:\n cols = [element.text.strip() for element in (row.find_all('td'))]\n data.append([float(element) for element in cols if element])\n return data\n\n\ndef get_month(day):\n if day < 32:\n return 1, day\n if day < 60:\n return 2, day - 31\n if day < 91:\n return 3, day - 59\n if day < 121:\n return 4, day - 90\n if day < 152:\n return 5, day - 120\n if day < 182:\n return 6, day - 151\n if day < 213:\n return 7, day - 181\n if day < 244:\n return 8, day - 212\n if day < 274:\n return 9, day - 243\n if day < 305:\n return 10, day - 273\n if day < 335:\n return 11, day - 304\n else:\n return 12, day - 334\n\n\ndef clean_data(data,csv):\n for row in data:\n if row == []:\n continue\n year, dayOfYear, PrecipMM, T_max, T_min, vp = row\n year = int(year)\n vp = float(vp)/1000.0\n month, day = get_month(dayOfYear)\n T_ave = (T_max + T_min) / 2\n PrecipCM = PrecipMM / 10\n svp = .611 * math.e ** (5321 * ((1 / 273.0) - (1 / (T_ave + 273.15))))\n rh_ave = round((vp / svp) * 100, 2)\n\n csv.write(str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(T_max) + \",\" + str(T_min) + \",\" +\n str(PrecipMM) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n\n\nwith open(\"data.csv\", \"a\") as csv:\n csv.write(\"year,month,day,T_max,T_min,Precip(mm),T_ave,Precip(cm),rh_ave\\n\")\n with open(\"logFile.txt\", \"a\") as log:\n print (\"process started at: \" + str(datetime.datetime.now()) + \"\\n\")\n log.write(\"process started at: \" + str(datetime.datetime.now()) + \"\\n\")\n for yr in range(1980, 2016):\n for lat in drange(31.231, 31.401, .01):\n for lon in drange(-110.985, -110.905, .01):\n clean_data(fetch_data(build_url(lat , lon, yr)), csv)\n\n log.write(\"completed data for \" + str(yr) + \" at:\" + str(datetime.datetime.now()) + \"\\n\")\n print (\"completed data for \" + str(yr) + \" at:\" + str(datetime.datetime.now()) + \"\\n\")\n","sub_path":"ZIKA/ZIKA.py","file_name":"ZIKA.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"203607521","text":"# !usr/bin/python\n# classify recommendation, for cold starting\n# author:zhangyx\n# since 2016-09-15\n\n# cmd.sh\n# $SPARK_HOME/bin/spark-submit --master spark://10.105.247.189:7077 classifyRecommendation.py > log.txt\n#\n\n\"\"\"\nGlobal Classify Recommendation.\nSolving User Code Start.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport MySQLdb\nimport redis\nimport json\nimport itertools\nfrom operator import itemgetter\nfrom pyspark import SparkContext\n\nfrom config import *\nfrom utils import *\nfrom recommendProducts import *\n\nCHOOSE = 5.0\nBETA = 5.0\n\n\ndef Normalize(data_dic):\n\n if len(data_dic) == 0:\n return data_dic\n\n if len(data_dic) == 1:\n for key, value in data_dic.iteritems():\n data_dic[key] = 0.5\n return data_dic\n\n min = 0.0\n max = 0.0\n for key, value in data_dic.iteritems():\n #if value < min:\n # min = value\n if value > max:\n max = value\n\n #print(\"min \", min, \" max \", max)\n for key, value in data_dic.iteritems():\n data_dic[key] = (data_dic[key] - min) / (max - min)\n\n return data_dic\n\ndef UserClassifyRecommendation(sc):\n\n users_list_fromDB = LoadDataFromSQL(\"charid, sex, age\", \"dt_user_charbase\", \"\")\n usersRDD = sc.parallelize(users_list_fromDB).map(lambda l: (int(l[0]), int(l[1]), int(l[2])))\n users_list_fromDB = []\n\n items_list_fromDB = LoadDataFromSQL(\"id\", \"dt_product_clothing\", \"\")\n itemsRDD = sc.parallelize(items_list_fromDB).map(lambda l: int(l[0]))\n items_list_fromDB = []\n items_list = itemsRDD.collect()\n\n #\n # generate user classifications\n #\n gender_list = [1, 2] # M:1, F:2\n age_list = []\n for i in range(0, 60):\n age_list.append(i)\n age_list.append(60)\n user_classification = itertools.product(gender_list, age_list)\n\n #\n # Load ratings\n #\n data = sc.textFile(RATINGS_BASE_PATH)\n ratings = data.map(lambda l: l.split(','))\\\n .map(lambda l: (int(l[0]), int(l[1]), float(l[2])))\n\n ratings = ratings.filter(lambda l: l[2] >= CHOOSE)\n ratings_list = ratings.collect()\n\n #\n # Calculate Ni for each item\n #\n Ni_dic = {}\n for k in range(len(items_list)):\n itemID = items_list[k]\n Ni_dic[itemID] = []\n\n for rating in ratings_list:\n itemID = rating[1]\n contain = Ni_dic.get(itemID)\n if contain == None:\n print(\"cannot find %d\\n\"%itemID)\n else:\n contain.append(rating[0])\n\n age_score_dic_group = {}\n gender_score_dic_group = {}\n #occupation_score_dic_group = {}\n\n #\n # Now I will calcualte p(f, i) = |N(i) and U(f)| / (N(i) + BETA)\n #\n\n # Calculate scores for each age range\n for age in age_list:\n Uf = usersRDD.filter(lambda l: int(l[2]) == age).map(lambda l: int(l[0])).collect()\n score_dic = {}\n for itemID in items_list:\n JoinNiUf = list(set(Ni_dic[itemID]) & set(Uf))\n if len(JoinNiUf) > 0:\n score = len(JoinNiUf) / (len(Ni_dic[itemID]) + BETA)\n score_dic[itemID] = score\n Normalize(score_dic)\n age_score_dic_group[age] = score_dic\n\n #sorted_score_list = sorted(score_dic.iteritems(), key=itemgetter(1), reverse=True)\n #print(\"Sorted score list: age \", age)\n #for i in range(len(sorted_score_list)):\n # itemID = sorted_score_list[i][0]\n # print(sorted_score_list[i])\n # #print(itemtitles_list[itemID-1])\n\n # Calculate scores for each gender\n for gender in gender_list:\n Uf = usersRDD.filter(lambda l: int(l[1]) == gender).map(lambda l: int(l[0])).collect()\n score_dic = {}\n for itemID in items_list:\n JoinNiUf = list(set(Ni_dic[itemID]) & set(Uf))\n if len(JoinNiUf) > 0:\n score = len(JoinNiUf) / (len(Ni_dic[itemID]) + BETA)\n score_dic[itemID] = score\n Normalize(score_dic)\n gender_score_dic_group[gender] = score_dic\n\n #sorted_score_list = sorted(score_dic.iteritems(), key=itemgetter(1), reverse=True)\n #print(\"Sorted score list: gender \", gender)\n #for i in range(len(sorted_score_list)):\n # itemID = sorted_score_list[i][0]\n # print(sorted_score_list[i])\n # #print(itemtitles_list[itemID-1])\n\n #\n # Merge \n #\n classify_result_dic = {}\n\n for classification in user_classification:\n gender = classification[0]\n age = classification[1]\n\n merged_score_dic = {}\n for (itemID, score) in age_score_dic_group[age].items():\n if merged_score_dic.has_key(itemID):\n merged_score_dic[itemID] += score\n else:\n merged_score_dic[itemID] = score\n\n for (itemID, score) in gender_score_dic_group[gender].items():\n if merged_score_dic.has_key(itemID):\n merged_score_dic[itemID] += score\n else:\n merged_score_dic[itemID] = score\n\n merged_score_list = sorted(merged_score_dic.iteritems(), key=itemgetter(1), reverse=True) # (itemID, score)\n merged_list = map(lambda l: l[0], merged_score_list) # itemID\n\n classify_result_dic[\"%s_%s\"%(gender, age)] = merged_list\n\n #print(\"\\nMerged score list:\")\n #print(\"%s_%s\\n\"%(gender, age))\n #for i in range(len(merged_score_list)):\n # itemID = merged_score_list[i][0]\n # print(merged_score_list[i])\n # #print(item_titles_list[itemID-1])\n\n return classify_result_dic\n\n#\n# MAIN\n#\nif __name__ == \"__main__\": \n\n # new products\n all_new_products = []\n now_time = datetime.datetime.now()\n start_time = now_time + datetime.timedelta(days=(-1)*NEW_START_DAY)\n #new products\n calculateNewProducts(all_new_products,start_time)\n #all_new_products = [str(item) for item in all_new_products]\n print(\"new products:\")\n print(all_new_products)\n\n #select products is able to try\n all_prefer_products = set()\n all_prefer_list=[]\n calculateNewProducts(all_prefer_list,\"0000-00-00 00:00:00\")\n for product in all_prefer_list:\n all_prefer_products.add(product)\n\n #hot products\n all_hot_products = []\n calculateHotProducts(all_hot_products)\n \n all_hot_products = filter(lambda x: True == (x in all_prefer_products), all_hot_products)\n print(\"hot products:\")\n print(all_hot_products)\n\n # Init REDIS\n pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD,db=0)\n redisConn = redis.Redis(connection_pool=pool)\n\n # Recommend products for each user-classification\n sc = SparkContext(appName=\"ClassifyRecommend\")\n classify_result_dic = UserClassifyRecommendation(sc)\n\n file_result = open(CLASSIFY_RESULT_PATH, 'w')\n\n for classification, classify_products in classify_result_dic.iteritems():\n\n if len(classify_products) > CLASSIFY_RECOMMEND_K:\n classify_products = classify_products[0: CLASSIFY_RECOMMEND_K]\n #classify_products = [str(item) for item in classify_products]\n #print(\"\\nMerged score list:\")\n #print(\"%s\\n\"%(classification))\n #for i in range(len(score_list)):\n # itemID = score_list[i]\n # print(score_list[i])\n\n hot_products = all_hot_products\n new_products = all_new_products\n \n # TODO filter the products which prefer is not 1\n classify_products = filter(lambda x: True == (x in all_prefer_products), classify_products)\n\n #print(\"\\n\\nfilter results:\")\n #print(new_products)\n #print(hot_products)\n #print(classify_products)\n\n MergeRecommendResults(classification, hot_products, new_products, classify_products, \"global_classify:\", redisConn, file_result)\n\n\n # END\n # VERIFY DATA IN REDIS\n #print(\"\\n\\nVERIFY DATA IN REDIS\\n\\n\")\n #for classification, score_list in classify_result_dic.iteritems():\n # print(\"global_classify:\" + classification)\n # print(redisConn.get(\"global_classify:\" + classification))\n\n #keys = redisConn.keys(\"global_classify:*\")\n\n #for key in keys:\n # print(key)\n #print(redisConn.get(key))\n\n file_result.close()\n print(\"Recommend Finished!\\n\")\n","sub_path":"src/recommendation/classifyRecommendation.py","file_name":"classifyRecommendation.py","file_ext":"py","file_size_in_byte":8090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"212438736","text":"import argparse\nimport io\nimport os\nimport re\nimport subprocess\n\n# Directory names to ignore when looking for JavaScript files.\nignore_dirs = [\n \".git\",\n \"publish\",\n \"ThirdParty\",\n \"node_modules\",\n]\n\n# All valid two letter locale names.\nall_locales = set([\"en\", \"cn\", \"de\", \"fr\", \"ja\", \"ko\"])\n\n# Locales that are in zoneRegex object blocks.\nzoneregex_locales = set([\"en\", \"cn\", \"ko\"])\n\n# Locales that are not in zoneRegex object blocks.\nnon_zoneregex_locales = all_locales - zoneregex_locales\n\n# Where to start looking for files.\ndef base_path():\n return os.path.relpath(os.path.join(os.path.dirname(__file__), \"..\\\\\"))\n\n\n# Return a list of all javascript filenames found under base_path()\ndef find_all_javascript_files(filter):\n python_files = []\n for root, dirs, files in os.walk(base_path()):\n dirs[:] = [d for d in dirs if d not in ignore_dirs]\n\n for file in files:\n if not file.endswith(\".js\"):\n continue\n full_path = os.path.join(root, file)\n if filter not in full_path:\n continue\n python_files.append(full_path)\n return python_files\n\n\n# Print missing translations in |file| for |locales|\n# TODO: should this just be in javascript with the rest of the tests?\ndef parse_javascript_file(file, locales):\n locales = set(locales)\n\n with open(file, encoding=\"utf-8\") as fp:\n keys = []\n open_match = None\n\n open_obj_re = re.compile(r\"(\\s*)(.*{)\\s*\")\n key_re = re.compile(r\"\\s*(\\w\\w):\")\n\n for idx, line in enumerate(fp):\n # Any time we encounter what looks like a new object, start over.\n # FIXME: this deliberately simplifies and will ignore nested objects.\n # That's what we get for parsing javascript with regex.\n m = open_obj_re.fullmatch(line)\n if m:\n open_match = m\n # idx is zero-based, but line numbers are not.\n line_number = idx + 1\n keys = []\n continue\n\n # If we're not inside an object, keep looking for the start of one.\n if not open_match:\n continue\n\n # If this object is ended with the same indentation,\n # then we've probably maybe found the end of this object.\n if re.match(open_match.group(1) + \"}\", line):\n # Check if these keys look like a translation block.\n if \"en\" in keys:\n missing_keys = locales - set(keys)\n\n open_str = open_match.group(2)\n # Only some locales care about zoneRegex, so special case.\n if open_str == \"zoneRegex: {\":\n missing_keys -= non_zoneregex_locales\n\n if missing_keys:\n err = file + \":\" + str(line_number)\n err += ' \"' + open_str + '\"'\n if len(locales) > 1:\n err += \" \" + str(list(missing_keys))\n print(err)\n open_match = None\n continue\n\n # If we're inside an object, find anything that looks like a key.\n key_match = key_re.match(line)\n if key_match:\n keys.append(key_match.group(1))\n\n\ndef parse_trigger_file_for_timelines(file, locale):\n find_missing_timeline_js = os.path.join(\n os.path.dirname(__file__), \"find_missing_timeline_translations.js\"\n )\n\n # Process stdout ourselves so that it interleaves incorrectly.\n cmd = [\"node\", find_missing_timeline_js, str(file), locale]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n for line in io.TextIOWrapper(proc.stdout, encoding=\"utf-8\"):\n print(line.rstrip().encode(\"ascii\", \"backslashreplace\").decode())\n\n\nif __name__ == \"__main__\":\n example_usage = \"\"\n\n parser = argparse.ArgumentParser(\n description=\"Prints out a list of missing translations\",\n epilog=example_usage,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"-l\", \"--locale\", help=\"The locale to find missing translations for, e.g. de\"\n )\n parser.add_argument(\n \"-f\", \"--filter\", help=\"Limits the results to only match specific files/path\"\n )\n args = parser.parse_args()\n\n if not args.locale:\n raise parser.error(\"Missing required locale.\")\n if not args.locale in all_locales:\n raise parser.error(\"Invalid locale: \" + args.locale)\n locales = [args.locale]\n if not args.filter:\n args.filter = \"\"\n\n for file in find_all_javascript_files(args.filter):\n parse_trigger_file_for_timelines(file, args.locale)\n parse_javascript_file(file, locales)\n","sub_path":"util/find_missing_translations.py","file_name":"find_missing_translations.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"374179643","text":"num_class = int(input('總班級數:'))\n\nall_score_list = []\nfor i in range(0, num_class):\n print('第', i+1, '班 ------------')\n num_student = int(input('學生人數:'))\n score_list = []\n for j in range(0, num_student):\n score = int(input('請輸入學生成績'))\n score_list += [score]\n \n all_score_list += [score_list]\n \ngrade_A_count = []\ngrade_B_count = []\ngrade_C_count = []\ngrade_D_count = []\ngrade_E_count = []\n\nfor score_list in all_score_list:\n \n A_count = 0\n B_count = 0\n C_count = 0\n D_count = 0\n E_count = 0\n \n for score in score_list:\n if 100>=score>=90:\n A_count += 1\n if 90>score>=80:\n B_count += 1\n if 80>score>=70:\n C_count += 1\n if 70>score>=60:\n D_count += 1\n if score<60:\n E_count += 1\n \n grade_A_count += [A_count]\n grade_B_count += [B_count]\n grade_C_count += [C_count]\n grade_D_count += [D_count]\n grade_E_count += [E_count]\n\nfor i in range(0, len(grade_A_count)):\n print('第', i+1, '班------------',)\n print('等級A人數:', grade_A_count[i])\n print('等級B人數:', grade_B_count[i])\n print('等級C人數:', grade_C_count[i])\n print('等級D人數:', grade_D_count[i])\n print('等級E人數:', grade_E_count[i])","sub_path":"Chapter13/13-4.py","file_name":"13-4.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"496014032","text":"#!/usr/bin/env python\n\nimport smtplib\nfrom email.mime.text import MIMEText\n\nmsg = MIMEText(\"Test message\")\nmsg['Subject'] = 'Test header'\nmsg['From'] = 'mailtest-noreply@sandtrout.local'\nmsg['To'] = 'brock@brocktice.com'\n\ns = smtplib.SMTP('localhost')\ns.sendmail(msg['From'], [msg['To']], msg.as_string())\ns.quit()\n\n\n","sub_path":"mailtest.py","file_name":"mailtest.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"42781697","text":"#Logistic Regression\n#importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#importing the dataset\ndataset=pd.read_csv('Social_Network_Ads.csv')\nX=dataset.iloc[:,[2,3]].values.T\nY=dataset.iloc[:,-1].values.reshape(-1,1).T\nX=(X-np.mean(X))/np.std(X)\n#Splitting the dataset into training set and test set\n\n\n#Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc_X=StandardScaler()\nX=sc_X.fit_transform(X)\n\n\ndef initialize_weights(n_x):\n W=np.zeros((n_x,1))\n b=np.zeros((1,1))\n return W,b\n\ndef sigmoid(Z):\n A=1/(1+np.exp(-Z))\n return A\n\ndef forward_prop(X,W,b):\n Z=np.dot(W.T,X)+b\n A=sigmoid(Z)\n return A\n\ndef compute_cost(A,Y):\n m=Y.shape[1]\n cost=(-1/m)*(np.sum(np.multiply(np.log(A),Y)+np.multiply(np.log(1-A),1-Y)))\n return cost\n\ndef backward_prop(X,Y,A):\n m=Y.shape[1]\n dZ=A-Y\n dW=np.dot(X,dZ.T)/m\n db=np.sum(dZ)/m\n return dW,db\n\ndef Grad_Desc(W,b,dW,db,learning_rate):\n W=W-learning_rate*dW\n b=b-learning_rate*db\n return W,b\n\ndef logistic_model(X,Y,learning_rate,num_iter):\n W,b=initialize_weights(X.shape[0])\n for i in range(1,num_iter):\n \n A=forward_prop(X,W,b)\n dW,db=backward_prop(X,Y,A)\n W,b=Grad_Desc(W,b,dW,db,learning_rate)\n if i%100==0:\n print(compute_cost(A,Y))\n return W,b,dW,db\ndef predict(X,W,b):\n A=forward_prop(X,W,b)\n prediction=(A>0.5)\n return prediction\nW,b,dW,db=logistic_model(X,Y,0.0025,15000) \nprediction=predict(X,W,b)\ncount=0\nfor i in range(1,Y.shape[1]):\n if prediction[0][i]==Y[0][i]:\n count+=1\naccuracy=count/float(Y.shape[1])","sub_path":"LR_scratch.py","file_name":"LR_scratch.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"563842318","text":"import random\n\nfrom core.attacks import listing\n\n\ndef get_possible_attacks(attacker, defender):\n possible_attacks = []\n for attack_set in attacker.combat.attack_sets:\n attack = listing.get_attack(attack_set.attack)\n if attack.can_execute(attacker, defender):\n possible_attacks.append(attack_set)\n\n return possible_attacks\n\n\ndef auto_attack(attacker, defender):\n possible_attacks = get_possible_attacks(attacker, defender)\n if not possible_attacks:\n return False\n\n attack_set_or_chain = random.choice(possible_attacks)\n if hasattr(attack_set_or_chain, 'attack_sets'):\n attack_sets = attack_set_or_chain.attack_sets\n else:\n attack_sets = attack_set_or_chain,\n\n for attack_set in attack_sets:\n attack = listing.get_attack(attack_set.attack)\n if attack is None:\n raise Exception(\"Attack {} is not implemented.\".format(attack_set.attack.name))\n attack.execute(attacker, defender, attack_set)\n\n return True\n","sub_path":"core/attacks/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"283903402","text":"import logging\nfrom typing import List, Optional\n\nfrom attrs import define\nfrom sklearn.base import BaseEstimator\n\nimport skrough.typing as rght\nfrom skrough.algorithms.exceptions import LoopBreak\nfrom skrough.algorithms.meta.aggregates import (\n ChainProcessElementsHooksAggregate,\n InnerStopHooksAggregate,\n ProcessElementsHooksAggregate,\n ProduceElementsHooksAggregate,\n StopHooksAggregate,\n UpdateStateHooksAggregate,\n)\nfrom skrough.algorithms.meta.describe import (\n autogenerate_description_node,\n describe,\n inspect_config_keys,\n inspect_input_data_keys,\n inspect_values_keys,\n)\nfrom skrough.algorithms.meta.visual_block import sk_visual_block\nfrom skrough.logs import log_start_end\nfrom skrough.structs.description_node import NODE_META_OPTIONAL_KEY, DescriptionNode\nfrom skrough.structs.state import ProcessingState\n\nlogger = logging.getLogger(__name__)\n\n\n@define\nclass Stage(rght.Describable):\n stop_agg: StopHooksAggregate\n init_agg: UpdateStateHooksAggregate\n pre_candidates_agg: ProduceElementsHooksAggregate\n candidates_agg: ProcessElementsHooksAggregate\n select_agg: ProcessElementsHooksAggregate\n filter_agg: ChainProcessElementsHooksAggregate\n inner_init_agg: ChainProcessElementsHooksAggregate\n inner_stop_agg: InnerStopHooksAggregate\n inner_process_agg: ChainProcessElementsHooksAggregate\n finalize_agg: UpdateStateHooksAggregate\n\n # pylint: disable-next=protected-access\n _repr_mimebundle_ = BaseEstimator._repr_mimebundle_\n _sk_visual_block_ = sk_visual_block\n\n @classmethod\n @log_start_end(logger)\n def from_hooks(\n cls,\n stop_hooks: rght.OneOrSequence[rght.StopHook],\n init_hooks: Optional[rght.OneOrSequence[rght.UpdateStateHook]],\n pre_candidates_hooks: Optional[rght.OneOrSequence[rght.ProduceElementsHook]],\n candidates_hooks: Optional[rght.OneOrSequence[rght.ProcessElementsHook]],\n select_hooks: Optional[rght.OneOrSequence[rght.ProcessElementsHook]],\n filter_hooks: Optional[rght.OneOrSequence[rght.ProcessElementsHook]],\n inner_init_hooks: Optional[rght.OneOrSequence[rght.ProcessElementsHook]],\n inner_stop_hooks: rght.OneOrSequence[rght.InnerStopHook],\n inner_process_hooks: rght.OneOrSequence[rght.ProcessElementsHook],\n finalize_hooks: Optional[rght.OneOrSequence[rght.UpdateStateHook]],\n ):\n return cls(\n stop_agg=StopHooksAggregate.from_hooks(stop_hooks),\n init_agg=UpdateStateHooksAggregate.from_hooks(init_hooks),\n pre_candidates_agg=ProduceElementsHooksAggregate.from_hooks(\n pre_candidates_hooks\n ),\n candidates_agg=ProcessElementsHooksAggregate.from_hooks(candidates_hooks),\n select_agg=ProcessElementsHooksAggregate.from_hooks(select_hooks),\n filter_agg=ChainProcessElementsHooksAggregate.from_hooks(filter_hooks),\n inner_init_agg=ChainProcessElementsHooksAggregate.from_hooks(\n inner_init_hooks\n ),\n inner_stop_agg=InnerStopHooksAggregate.from_hooks(inner_stop_hooks),\n inner_process_agg=ChainProcessElementsHooksAggregate.from_hooks(\n inner_process_hooks\n ),\n finalize_agg=UpdateStateHooksAggregate.from_hooks(finalize_hooks),\n )\n\n @log_start_end(logger)\n def __call__(self, state: ProcessingState) -> None:\n logger.debug(\"Run init hooks\")\n self.init_agg(state)\n\n try:\n\n logger.debug(\"Check stop_hooks on start\")\n self.stop_agg(state, raise_loop_break=True)\n\n while True:\n\n logger.debug(\"Run pre_candidates_hooks\")\n pre_candidates = self.pre_candidates_agg(state)\n\n logger.debug(\"Run candidates_hooks\")\n candidates = self.candidates_agg(state, pre_candidates)\n\n logger.debug(\"Run select_hooks\")\n selected = self.select_agg(state, candidates)\n\n logger.debug(\"Run verify_hooks\")\n filtered = self.filter_agg(state, selected)\n\n logger.debug(\"Run inner_init_hooks\")\n elements = self.inner_init_agg(state, filtered)\n\n should_check_stop_after = True\n\n while True:\n\n logger.debug(\"Check inner_stop_hooks\")\n if self.inner_stop_agg(state, elements, raise_loop_break=False):\n logger.debug(\"Break inner loop\")\n break\n\n logger.debug(\"Run inner_process_hooks\")\n elements = self.inner_process_agg(state, elements)\n\n logger.debug(\"Check stop_hooks in inner loop\")\n self.stop_agg(state, raise_loop_break=True)\n should_check_stop_after = False\n\n if should_check_stop_after:\n logger.debug(\"Check stop_hooks on inner loop exit\")\n self.stop_agg(state, raise_loop_break=True)\n\n except LoopBreak:\n logger.debug(\"Break outer loop\")\n\n logger.debug(\"Run finalize_hooks\")\n self.finalize_agg(state)\n\n def get_description_graph(self):\n result = autogenerate_description_node(\n processing_element=self, process_docstring=True\n )\n result.children = [\n describe(\n self.init_agg,\n override_node_name=\"init\",\n ),\n describe(\n self.stop_agg,\n override_node_name=\"check_stop\",\n ),\n DescriptionNode(\n node_name=\"outer_loop\",\n children=[\n describe(\n self.pre_candidates_agg,\n override_node_name=\"pre_candidates\",\n ),\n describe(\n self.candidates_agg,\n override_node_name=\"candidates\",\n ),\n describe(\n self.select_agg,\n override_node_name=\"select\",\n ),\n describe(\n self.filter_agg,\n override_node_name=\"filter\",\n ),\n describe(\n self.inner_init_agg,\n override_node_name=\"inner_init\",\n ),\n DescriptionNode(\n node_name=\"inner_loop\",\n children=[\n describe(\n self.inner_stop_agg,\n override_node_name=\"inner_check_stop\",\n ),\n describe(\n self.inner_process_agg,\n override_node_name=\"inner_process\",\n ),\n describe(\n self.stop_agg,\n override_node_name=\"check_stop\",\n ),\n ],\n ),\n describe(\n self.stop_agg,\n override_node_name=\"check_stop\",\n override_node_meta={NODE_META_OPTIONAL_KEY: True},\n ),\n ],\n ),\n describe(\n self.finalize_agg,\n override_node_name=\"finalize\",\n ),\n ]\n return result\n\n def _get_children_processing_elements(self):\n return [\n self.stop_agg,\n self.init_agg,\n self.pre_candidates_agg,\n self.candidates_agg,\n self.select_agg,\n self.filter_agg,\n self.inner_init_agg,\n self.inner_stop_agg,\n self.inner_process_agg,\n self.finalize_agg,\n ]\n\n def get_config_keys(self) -> List[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_config_keys,\n )\n\n def get_input_data_keys(self) -> List[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_input_data_keys,\n )\n\n def get_values_keys(self) -> List[str]:\n return self._get_keys_from_elements(\n children=self._get_children_processing_elements(),\n inspect_keys_function=inspect_values_keys,\n )\n","sub_path":"src/skrough/algorithms/meta/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"621850663","text":"# -*- coding: utf-8 -*-\n\"\"\"\n flask_security.unified_signin\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Flask-Security Unified signin module\n\n :copyright: (c) 2019-2020 by J. Christopher Wagner (jwag).\n :license: MIT, see LICENSE for more details.\n\n This implements a unified sign in endpoint - allowing\n authentication via identity and passcode - where identity is configured\n via SECURITY_USER_IDENTITY_ATTRIBUTES, and allowable passcodes are either a\n password or one of US_ENABLED_METHODS.\n\n Finish up:\n - setup should probably require a 'fresh' authentication - as 2FA does.\n - should we support a way that /logout redirects to us-signin rather than /login?\n - we should be able to add a phone number as part of setup even w/o any METHODS -\n i.e. to allow login with any identity (phone) and a password.\n - openapi.yaml\n - add new example?\n - add username as last IDENTITY_MAPPING and allow anything...?? or just in example?\n\n Consider/Questions:\n - Allow registering/confirming with just a phone number - this likely would require\n a new register/confirm endpoint in order to implement verification.\n - Right now ChangePassword won't work - it requires an existing password - so\n if the user doesn't have one - can't change it. However ForgotPassword will in\n fact allow the user to add a password. Is that sufficient?\n - Any reason to support 'next' in form? xx?next=yyy works fine.\n - separate code validation times for SMS, email, authenticator?\n - token versus code versus passcode? Confusing terminology.\n\n\"\"\"\n\nimport sys\n\nfrom flask import current_app as app\nfrom flask import abort, after_this_request, redirect, request\nfrom flask_login import current_user\nfrom werkzeug.datastructures import MultiDict\nfrom werkzeug.local import LocalProxy\nfrom wtforms import BooleanField, RadioField, StringField, SubmitField, validators\n\nfrom .confirmable import requires_confirmation\nfrom .decorators import anonymous_user_required, auth_required, unauth_csrf\nfrom .forms import Form, Required, get_form_field_label\nfrom .quart_compat import get_quart_status\nfrom .signals import us_profile_changed, us_security_token_sent\nfrom .twofactor import is_tf_setup, tf_login\nfrom .utils import (\n _,\n SmsSenderFactory,\n base_render_json,\n check_and_get_token_status,\n config_value,\n do_flash,\n get_post_login_redirect,\n get_message,\n get_url,\n get_within_delta,\n json_error_response,\n login_user,\n suppress_form_csrf,\n url_for_security,\n)\n\n# Convenient references\n_security = LocalProxy(lambda: app.extensions[\"security\"])\n_datastore = LocalProxy(lambda: _security.datastore)\n\n\nPY3 = sys.version_info[0] == 3\nif PY3 and get_quart_status(): # pragma: no cover\n from .async_compat import _commit # noqa: F401\nelse:\n\n def _commit(response=None):\n _datastore.commit()\n return response\n\n\ndef _us_common_validate(form):\n # Be aware - this has side effect on the form - it will fill in\n # the form.user\n\n # Validate identity - we go in order to figure out which user attribute the\n # request gave us. Note that we give up on the first 'match' even if that\n # doesn't yield a user. Why?\n for mapping in config_value(\"USER_IDENTITY_MAPPINGS\"):\n for ua, mapper in mapping.items():\n # Make sure we don't validate on a column that application\n # hasn't specifically configured as a unique/identity column\n # In other words - might have a phone number for 2FA or unified\n # but don't want the user to be able to use that as primary identity\n if ua in config_value(\"USER_IDENTITY_ATTRIBUTES\"):\n # Allow mapper to alter (coerce) to type DB requires\n idata = mapper(form.identity.data)\n if idata is not None:\n form.user = _datastore.find_user(**{ua: idata})\n\n if not form.user:\n form.identity.errors.append(get_message(\"US_SPECIFY_IDENTITY\")[0])\n return False\n if not form.user.is_active:\n form.identity.errors.append(get_message(\"DISABLED_ACCOUNT\")[0])\n return False\n return True\n\n\nclass UnifiedSigninForm(Form):\n \"\"\" A unified login form\n For either identity/password or request and enter code.\n \"\"\"\n\n user = None\n authn_via = None\n\n identity = StringField(\n get_form_field_label(\"identity\"),\n validators=[Required()],\n render_kw={\"placeholder\": _(\"email, phone, username\")},\n )\n\n passcode = StringField(\n get_form_field_label(\"passcode\"),\n render_kw={\"placeholder\": _(\"Code or Password\")},\n )\n remember = BooleanField(get_form_field_label(\"remember_me\"))\n submit = SubmitField(get_form_field_label(\"signin\"))\n\n chosen_method = RadioField(\n _(\"Available Methods\"),\n choices=[(\"email\", _(\"Via email\")), (\"sms\", _(\"Via SMS\"))],\n validators=[validators.Optional()],\n )\n submit_send_code = SubmitField(get_form_field_label(\"sendcode\"))\n\n def __init__(self, *args, **kwargs):\n super(UnifiedSigninForm, self).__init__(*args, **kwargs)\n self.remember.default = config_value(\"DEFAULT_REMEMBER_ME\")\n\n def validate(self):\n if not super(UnifiedSigninForm, self).validate():\n return False\n\n # For either - require a valid identity\n if not _us_common_validate(self):\n return False\n\n if self.submit.data:\n # This is login - verify passcode/password\n # Since we have a unique totp_secret for each method - we\n # can figure out which mechanism was used.\n ok = False\n totp_secrets = self.user.us_get_totp_secrets()\n for method in config_value(\"US_ENABLED_METHODS\"):\n if method == \"password\":\n if self.user.verify_and_update_password(self.passcode.data):\n ok = True\n break\n else:\n if method in totp_secrets and _security._totp_factory.verify_totp(\n token=self.passcode.data,\n totp_secret=totp_secrets[method],\n user=self.user,\n window=config_value(\"US_TOKEN_VALIDITY\"),\n ):\n ok = True\n break\n if not ok:\n self.passcode.errors.append(get_message(\"INVALID_PASSWORD\")[0])\n return False\n\n self.authn_via = method\n\n # Only check this once authenticated to not give away info\n if requires_confirmation(self.user):\n self.identity.errors.append(get_message(\"CONFIRMATION_REQUIRED\")[0])\n return False\n return True\n elif self.submit_send_code.data:\n # Send a code - identity and chosen_method must be valid\n # Note: we don't check for NOT CONFIRMED account here and go\n # ahead and send a code - but above we check and won't let them sign in.\n # The idea is to not expose info if not authenticated\n if self.chosen_method.data not in config_value(\"US_ENABLED_METHODS\"):\n self.chosen_method.errors.append(\n get_message(\"US_METHOD_NOT_AVAILABLE\")[0]\n )\n return False\n if self.chosen_method.data == \"sms\" and not self.user.us_phone_number:\n # They need to us-setup!\n self.chosen_method.errors.append(get_message(\"PHONE_INVALID\")[0])\n return False\n return True\n return False # pragma: no cover\n\n\nclass UnifiedSigninSetupForm(Form):\n \"\"\" Setup form \"\"\"\n\n chosen_method = RadioField(\n _(\"Available Methods\"),\n choices=[\n (\"email\", _(\"Set up using email\")),\n (\n \"authenticator\",\n _(\"Set up using an authenticator app (e.g. google, lastpass, authy)\"),\n ),\n (\"sms\", _(\"Set up using SMS\")),\n ],\n )\n phone = StringField(get_form_field_label(\"phone\"))\n\n # By default we don't create a new totp_secret since that would invalidate\n # any authenticator setup. Allow user to request a reset.\n new_totp_secret = BooleanField(get_form_field_label(\"new_totp_secret\"))\n submit = SubmitField(get_form_field_label(\"submit\"))\n\n def __init__(self, *args, **kwargs):\n super(UnifiedSigninSetupForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n if not super(UnifiedSigninSetupForm, self).validate():\n return False\n if self.chosen_method.data not in config_value(\"US_ENABLED_METHODS\"):\n self.chosen_method.errors.append(get_message(\"US_METHOD_NOT_AVAILABLE\")[0])\n return False\n\n if self.chosen_method.data == \"sms\":\n msg = _security._phone_util.validate_phone_number(self.phone.data)\n if msg:\n self.phone.errors.append(msg)\n return False\n\n return True\n\n\nclass UnifiedSigninSetupVerifyForm(Form):\n \"\"\"The unified sign in setup validation form \"\"\"\n\n # These 2 filled in by view\n user = None\n totp_secret = None\n\n code = StringField(get_form_field_label(\"code\"), validators=[Required()])\n submit = SubmitField(get_form_field_label(\"submitcode\"))\n\n def __init__(self, *args, **kwargs):\n super(UnifiedSigninSetupVerifyForm, self).__init__(*args, **kwargs)\n\n def validate(self):\n if not super(UnifiedSigninSetupVerifyForm, self).validate():\n return False\n\n if not _security._totp_factory.verify_totp(\n token=self.code.data,\n totp_secret=self.totp_secret,\n user=self.user,\n window=config_value(\"US_TOKEN_VALIDITY\"),\n ):\n self.code.errors.append(get_message(\"INVALID_CODE\")[0])\n return False\n\n return True\n\n\n@anonymous_user_required\n@unauth_csrf(fall_through=True)\ndef us_send_code():\n \"\"\"\n Send code view.\n This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES)\n and a method request to send a code.\n \"\"\"\n form_class = _security.us_signin_form\n\n if request.is_json:\n if request.content_length:\n form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())\n else:\n form = form_class(formdata=None, meta=suppress_form_csrf())\n else:\n form = form_class(meta=suppress_form_csrf())\n form.submit_send_code.data = True\n\n if form.validate_on_submit():\n # send code\n user = form.user\n method = form.chosen_method.data\n totp_secrets = user.us_get_totp_secrets()\n if method not in totp_secrets:\n after_this_request(_commit)\n totp_secrets[method] = _security._totp_factory.generate_totp_secret()\n user.us_put_totp_secrets(totp_secrets)\n\n msg = user.us_send_security_token(\n method,\n totp_secret=totp_secrets[method],\n phone_number=user.us_phone_number,\n send_magic_link=True,\n )\n code_sent = True\n if msg:\n # send code didn't work\n code_sent = False\n form.chosen_method.errors.append(msg)\n\n if _security._want_json(request):\n # Not authenticated yet - so don't send any user info.\n return base_render_json(\n form, include_user=False, error_status_code=500 if msg else 400\n )\n\n return _security.render_template(\n config_value(\"US_SIGNIN_TEMPLATE\"),\n us_signin_form=form,\n methods=config_value(\"US_ENABLED_METHODS\"),\n chosen_method=form.chosen_method.data,\n code_sent=code_sent,\n skip_loginmenu=True,\n **_security._run_ctx_processor(\"us_signin\")\n )\n\n # Here on GET or failed validation\n if _security._want_json(request):\n payload = {\"methods\": config_value(\"US_ENABLED_METHODS\")}\n return base_render_json(form, include_user=False, additional=payload)\n\n return _security.render_template(\n config_value(\"US_SIGNIN_TEMPLATE\"),\n us_signin_form=form,\n methods=config_value(\"US_ENABLED_METHODS\"),\n skip_loginmenu=True,\n **_security._run_ctx_processor(\"us_signin\")\n )\n\n\n@anonymous_user_required\n@unauth_csrf(fall_through=True)\ndef us_signin():\n \"\"\"\n Unified sign in view.\n This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES)\n and a passcode (password or OTP).\n \"\"\"\n form_class = _security.us_signin_form\n\n if request.is_json:\n if request.content_length:\n form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())\n else:\n form = form_class(formdata=None, meta=suppress_form_csrf())\n else:\n form = form_class(meta=suppress_form_csrf())\n form.submit.data = True\n\n if form.validate_on_submit():\n # Require multi-factor is it is enabled, and the method\n # we authenticated with requires it and either user has requested MFA or it is\n # required.\n remember_me = form.remember.data if \"remember\" in form else None\n if (\n config_value(\"TWO_FACTOR\")\n and form.authn_via in config_value(\"US_MFA_REQUIRED\")\n and (config_value(\"TWO_FACTOR_REQUIRED\") or is_tf_setup(form.user))\n ):\n return tf_login(\n form.user, remember=remember_me, primary_authn_via=form.authn_via\n )\n\n after_this_request(_commit)\n login_user(form.user, remember=remember_me, authn_via=[form.authn_via])\n\n if _security._want_json(request):\n return base_render_json(form, include_auth_token=True)\n\n return redirect(get_post_login_redirect())\n\n # Here on GET or failed POST validate\n if _security._want_json(request):\n payload = {\n \"methods\": config_value(\"US_ENABLED_METHODS\"),\n \"identity_attributes\": config_value(\"USER_IDENTITY_ATTRIBUTES\"),\n }\n return base_render_json(form, include_user=False, additional=payload)\n\n # On error - wipe code\n form.passcode.data = None\n return _security.render_template(\n config_value(\"US_SIGNIN_TEMPLATE\"),\n us_signin_form=form,\n methods=config_value(\"US_ENABLED_METHODS\"),\n skip_login_menu=True,\n **_security._run_ctx_processor(\"us_signin\")\n )\n\n\n@anonymous_user_required\ndef us_verify_link():\n \"\"\"\n Used to verify a magic email link. GET only\n \"\"\"\n if not all(v in request.args for v in [\"email\", \"code\"]):\n m, c = get_message(\"API_ERROR\")\n if _security.redirect_behavior == \"spa\":\n return redirect(get_url(_security.login_error_view, qparams={c: m}))\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n user = _datastore.find_user(email=request.args.get(\"email\"))\n if not user or not user.active:\n if not user:\n m, c = get_message(\"USER_DOES_NOT_EXIST\")\n else:\n m, c = get_message(\"DISABLED_ACCOUNT\")\n if _security.redirect_behavior == \"spa\":\n return redirect(get_url(_security.login_error_view, qparams={c: m}))\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n totp_secrets = user.us_get_totp_secrets()\n if \"email\" not in totp_secrets or not _security._totp_factory.verify_totp(\n token=request.args.get(\"code\"),\n totp_secret=totp_secrets[\"email\"],\n user=user,\n window=config_value(\"US_TOKEN_VALIDITY\"),\n ):\n m, c = get_message(\"INVALID_CODE\")\n if _security.redirect_behavior == \"spa\":\n return redirect(\n get_url(\n _security.login_error_view,\n qparams=user.get_redirect_qparams({c: m}),\n )\n )\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n if (\n config_value(\"TWO_FACTOR\")\n and \"email\" in config_value(\"US_MFA_REQUIRED\")\n and (config_value(\"TWO_FACTOR_REQUIRED\") or is_tf_setup(user))\n ):\n return tf_login(user, primary_authn_via=\"email\")\n\n login_user(user, authn_via=[\"email\"])\n after_this_request(_commit)\n if _security.redirect_behavior == \"spa\":\n # We do NOT send the authentication token here since the only way to\n # send it would be via a query param and that isn't secure. (logging and\n # possibly HTTP Referer header).\n # This means that this can only work if sessions are active which sort of\n # makes sense - otherwise you need to use /us-signin with a code.\n return redirect(\n get_url(_security.post_login_view, qparams=user.get_redirect_qparams())\n )\n\n do_flash(*get_message(\"PASSWORDLESS_LOGIN_SUCCESSFUL\"))\n return redirect(get_post_login_redirect())\n\n\n@auth_required()\ndef us_setup():\n \"\"\"\n Change unified sign in methods.\n We want to verify the new method - so don't store anything yet in DB\n use a timed signed token to pass along state.\n GET - retrieve current info (json) or form.\n \"\"\"\n form_class = _security.us_setup_form\n\n if request.is_json:\n if request.content_length:\n form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())\n else:\n form = form_class(formdata=None, meta=suppress_form_csrf())\n else:\n form = form_class(meta=suppress_form_csrf())\n\n if form.validate_on_submit():\n method = form.chosen_method.data\n totp_secrets = current_user.us_get_totp_secrets()\n if method not in totp_secrets or form.new_totp_secret.data:\n totp = _security._totp_factory.generate_totp_secret()\n else:\n totp = totp_secrets[method]\n # N.B. totp (totp_secret) is actually encrypted - so it seems safe enough\n # to send it to the user.\n state = {\n \"totp_secret\": totp,\n \"chosen_method\": method,\n \"phone_number\": _security._phone_util.get_canonical_form(form.phone.data),\n }\n msg = current_user.us_send_security_token(\n method=method,\n totp_secret=state[\"totp_secret\"],\n phone_number=state[\"phone_number\"],\n )\n if msg:\n # sending didn't work.\n form.chosen_method.errors.append(msg)\n if _security._want_json(request):\n # Not authenticated yet - so don't send any user info.\n return base_render_json(\n form, include_user=False, error_status_code=500 if msg else 400\n )\n return _security.render_template(\n config_value(\"US_SETUP_TEMPLATE\"),\n methods=config_value(\"US_ENABLED_METHODS\"),\n us_setup_form=form,\n **_security._run_ctx_processor(\"us_setup\")\n )\n\n state_token = _security.us_setup_serializer.dumps(state)\n\n if _security._want_json(request):\n payload = {\"state\": state_token, \"chosen_method\": form.chosen_method.data}\n return base_render_json(form, include_user=False, additional=payload)\n return _security.render_template(\n config_value(\"US_SETUP_TEMPLATE\"),\n methods=config_value(\"US_ENABLED_METHODS\"),\n chosen_method=form.chosen_method.data,\n us_setup_form=form,\n us_setup_verify_form=_security.us_setup_verify_form(),\n state=state_token,\n **_security._run_ctx_processor(\"us_setup\")\n )\n\n # Get here on initial new setup (GET)\n # Or failure of POST\n if _security._want_json(request):\n payload = {\n \"identity_attributes\": config_value(\"USER_IDENTITY_ATTRIBUTES\"),\n \"methods\": config_value(\"US_ENABLED_METHODS\"),\n \"phone\": current_user.us_phone_number,\n }\n return base_render_json(form, include_user=False, additional=payload)\n\n # Show user existing phone number\n form.phone.data = current_user.us_phone_number\n return _security.render_template(\n config_value(\"US_SETUP_TEMPLATE\"),\n methods=config_value(\"US_ENABLED_METHODS\"),\n us_setup_form=form,\n **_security._run_ctx_processor(\"us_setup\")\n )\n\n\n@auth_required()\ndef us_setup_verify(token):\n \"\"\"\n Verify new setup.\n The token is the state variable which is signed and timed\n and contains all the state that once confirmed will be stored in the user record.\n \"\"\"\n\n form_class = _security.us_setup_verify_form\n\n if request.is_json:\n form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())\n else:\n form = form_class(meta=suppress_form_csrf())\n\n expired, invalid, state = check_and_get_token_status(\n token, \"us_setup\", get_within_delta(\"US_SETUP_WITHIN\")\n )\n if invalid:\n m, c = get_message(\"API_ERROR\")\n if expired:\n m, c = get_message(\"US_SETUP_EXPIRED\", within=config_value(\"US_SETUP_WITHIN\"))\n if invalid or expired:\n if _security._want_json(request):\n payload = json_error_response(errors=m)\n return _security._render_json(payload, 400, None, None)\n do_flash(m, c)\n return redirect(url_for_security(\"us_setup\"))\n\n form.totp_secret = state[\"totp_secret\"]\n form.user = current_user\n\n if form.validate_on_submit():\n after_this_request(_commit)\n method = state[\"chosen_method\"]\n totp_secrets = current_user.us_get_totp_secrets()\n\n totp_secrets[method] = state[\"totp_secret\"]\n if method == \"sms\":\n current_user.us_phone_number = state[\"phone_number\"]\n current_user.us_put_totp_secrets(totp_secrets)\n\n us_profile_changed.send(\n app._get_current_object(), user=current_user, method=method\n )\n if _security._want_json(request):\n return base_render_json(\n form,\n include_user=False,\n additional=dict(\n chosen_method=method, phone=current_user.us_phone_number\n ),\n )\n else:\n do_flash(*get_message(\"US_SETUP_SUCCESSFUL\"))\n return redirect(\n get_url(_security.us_post_setup_view)\n or get_url(_security.post_login_view)\n )\n\n # Code not correct/outdated.\n if _security._want_json(request):\n return base_render_json(form, include_user=False)\n m, c = get_message(\"INVALID_CODE\")\n do_flash(m, c)\n return redirect(url_for_security(\"us_setup\"))\n\n\n@auth_required()\ndef us_qrcode(token):\n\n if \"authenticator\" not in config_value(\"US_ENABLED_METHODS\"):\n return abort(404)\n expired, invalid, state = check_and_get_token_status(\n token, \"us_setup\", get_within_delta(\"US_SETUP_WITHIN\")\n )\n if expired or invalid:\n return abort(400)\n\n try:\n import pyqrcode\n\n # By convention, the URI should have the username that the user\n # logs in with.\n username = current_user.calc_username()\n url = pyqrcode.create(\n _security._totp_factory.get_totp_uri(\n username if username else \"Unknown\", state[\"totp_secret\"]\n )\n )\n except ImportError: # pragma: no cover\n raise\n from io import BytesIO\n\n stream = BytesIO()\n url.svg(stream, scale=3)\n return (\n stream.getvalue(),\n 200,\n {\n \"Content-Type\": \"image/svg+xml\",\n \"Cache-Control\": \"no-cache, no-store, must-revalidate\",\n \"Pragma\": \"no-cache\",\n \"Expires\": \"0\",\n },\n )\n\n\ndef us_send_security_token(\n user, method, totp_secret, phone_number, send_magic_link=False\n):\n \"\"\" Generate and send the security code.\n\n :param user: The user to send the code to\n :param method: The method in which the code will be sent\n :param totp_secret: the unique shared secret of the user\n :param phone_number: If 'sms' phone number to send to\n :param send_magic_link: If true a magic link that can be clicked on will be sent.\n\n This shouldn't be sent during a setup.\n\n There is no return value - it is assumed that exceptions are thrown by underlying\n methods that callers can catch.\n\n Flask-Security code should NOT call this directly -\n call :meth:`.UserMixin.us_send_security_token`\n\n .. versionadded:: 3.4.0\n \"\"\"\n token = _security._totp_factory.generate_totp_password(totp_secret)\n\n if method == \"email\":\n login_link = None\n if send_magic_link:\n login_link = url_for_security(\n \"us_verify_link\", email=user.email, code=token, _external=True\n )\n _security._send_mail(\n config_value(\"US_EMAIL_SUBJECT\"),\n user.email,\n \"us_instructions\",\n user=user,\n username=user.calc_username(),\n token=token,\n login_link=login_link,\n )\n elif method == \"sms\":\n m, c = get_message(\"USE_CODE\", code=token)\n from_number = config_value(\"SMS_SERVICE_CONFIG\")[\"PHONE_NUMBER\"]\n to_number = phone_number\n sms_sender = SmsSenderFactory.createSender(config_value(\"SMS_SERVICE\"))\n sms_sender.send_sms(from_number=from_number, to_number=to_number, msg=m)\n\n elif method == \"authenticator\" or method == \"password\":\n # tokens are generated automatically with authenticator apps\n # and passwords are well passwords\n # Still go ahead and notify signal receivers that they requested it.\n token = None\n us_security_token_sent.send(\n app._get_current_object(),\n user=user,\n method=method,\n token=token,\n phone_number=phone_number,\n send_magic_link=send_magic_link,\n )\n","sub_path":"flask_security/unified_signin.py","file_name":"unified_signin.py","file_ext":"py","file_size_in_byte":25890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"551876841","text":"import threading\nfrom socket import *\nfrom queue import Queue\nimport pickle\n\nq=Queue()\n\n\nHOST=\"localhost\"\nPORT=8888\nBUFSIZ=1024\nADDR=(HOST,PORT)\n\ntcpSerSock=socket(AF_INET,SOCK_STREAM)\ntcpSerSock.bind(ADDR)\ntcpSerSock.listen(20)\n\n\ndef code_execute(number,amount,tcpCliSock):\n dict={}\n dict[\"1:\"]=\"a\"\n dict[\"2:\"]=\"b\"\n\n tcpCliSock.send(pickle.dumps(dict))\n\n\ndef code_write_to_file(tcpCliSock):\n # print(\"将代码写入文件\")\n str = \"\"\n while True:\n # 将接收到的内容存到字符串str\n line = tcpCliSock.recv(BUFSIZ).decode(\"utf-8\")\n if not line:\n break\n if line==\"###\":\n # 跳出内层循环\n break\n str += line\n\n print(str)\n # 将接收到的内容写入文件\n with open(\"compute.py\", \"w\") as f:\n f.write(str)\n\ndef rece_code(tcpCliSock,addr):\n print(\"connection from\",addr)\n tcpCliSock.send(\"control connect success\".encode(\"utf-8\"))\n number=tcpCliSock.recv(BUFSIZ).decode(\"utf-8\")\n # print(\"number:\",number)\n amount = tcpCliSock.recv(BUFSIZ).decode(\"utf-8\")\n # print(\"amount:\", amount)\n ip1= tcpCliSock.recv(BUFSIZ).decode(\"utf-8\")\n\n\n # 将代码写入文件\n code_write_to_file(tcpCliSock)\n\n code_execute(number,amount,tcpCliSock)\n # tcpCliSock.send(\"222222222\".encode(\"utf-8\"))\n # print(\"准备写入\")\n\n\n\n\n\ndef main():\n while True:\n print(\"waiting for connection...\")\n tcpCliSock, addr = tcpSerSock.accept()\n # 接收数据\n data = tcpCliSock.recv(BUFSIZ).decode(\"utf-8\")\n if data==\"control\":\n t=threading.Thread(target=rece_code,args=(tcpCliSock,addr))\n t.start()\n t.join()\n\n\n\nmain()","sub_path":"Test/homework/server_node.py","file_name":"server_node.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"38264611","text":"#!/usr/bin/env python3.6\n\n# Author: Eric Turgeon\n# License: BSD\n\nimport unittest\nimport sys\nimport os\napifolder = os.getcwd()\nsys.path.append(apifolder)\nfrom auto_config import interface\nfrom functions import POST, PUT\nfrom config import BRIDGEDOMAIN, BRIDGEHOST, BRIDGEDNS, BRIDGEGW\n\n\nclass network(unittest.TestCase):\n\n def test_01_configure_interface_dhcp(self):\n payload = {\"int_dhcp\": \"true\",\n \"int_name\": \"ext\",\n \"int_interface\": interface}\n assert POST(\"/network/interface/\", payload) == 201\n\n def test_02_Setting_default_route_and_DNS(self):\n payload = {\"gc_domain\": BRIDGEDOMAIN,\n \"gc_hostname\": BRIDGEHOST,\n \"gc_ipv4gateway\": BRIDGEGW,\n \"gc_nameserver1\": BRIDGEDNS}\n assert PUT(\"/network/globalconfiguration/\", payload) == 200\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"tests/create/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"24710117","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n visited = []\n def dfs(root,level):\n if root is None:\n return []\n if len(visited)core = inp();\n state->pkt = NULL;\n ''')\n\n class SaveState(Element):\n def configure(self):\n self.inp = Input(SizeT, \"void *\", \"void *\")\n self.out = Output()\n\n def impl(self):\n self.run_c(r'''\n (size_t size, void* pkt, void* buff) = inp();\n iokvs_message* m = (iokvs_message*) pkt;\n state->pkt = m;\n state->pkt_buff = buff;\n output { out(); }\n ''')\n\n class GetPktBuff(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output(\"void*\", \"void*\")\n\n def impl(self):\n self.run_c(r'''\n void* pkt = state->pkt;\n void* pkt_buff = state->pkt_buff;\n output { out(pkt, pkt_buff); }\n ''')\n\n class CheckPacket(Element):\n def configure(self):\n self.inp = Input(SizeT, 'void*', 'void*')\n self.out = Output(SizeT, 'void*', 'void*')\n self.slowpath = Output( 'void*', 'void*')\n self.drop = Output('void*', 'void*')\n\n def impl(self):\n self.run_c(r'''\n(size_t msglen, void* pkt, void* buff) = inp();\niokvs_message* m = (iokvs_message*) pkt;\n\nint type; // 0 = normal, 1 = slow, 2 = drop\n\nif (m->ether.ether_type == htons(ETHER_TYPE_IPv4) &&\n m->ipv4.next_proto_id == 17 &&\n m->ipv4.dst_addr == settings.localip &&\n m->udp.dst_port == htons(11211) &&\n msglen >= sizeof(iokvs_message))\n{\n uint32_t blen = m->mcr.request.bodylen;\n uint32_t keylen = m->mcr.request.keylen;\n\n /* Ensure request is complete */\n if (blen < keylen + m->mcr.request.extlen ||\n msglen < sizeof(iokvs_message) + blen) {\n type = 2;\n }\n else if (m->mcudp.n_data != htons(1)) {\n type = 2;\n }\n else if (m->mcr.request.opcode != PROTOCOL_BINARY_CMD_GET &&\n m->mcr.request.opcode != PROTOCOL_BINARY_CMD_SET) {\n type = 2;\n }\n else {\n type = 0;\n }\n} else {\n type = 1;\n}\n\noutput switch {\n case type==0: out(msglen, m, buff);\n case type==1: slowpath(m, buff);\n else: drop(m, buff);\n}\n ''')\n\n\n class Classifer(Element):\n def configure(self):\n self.inp = Input()\n self.out_get = Output()\n self.out_set = Output()\n\n def impl(self):\n self.run_c(r'''\nuint8_t cmd = state->pkt->mcr.request.opcode;\n//printf(\"receive: %d\\n\", cmd);\n\noutput switch{\n case (cmd == PROTOCOL_BINARY_CMD_GET): out_get();\n case (cmd == PROTOCOL_BINARY_CMD_SET): out_set();\n // else drop\n}\n ''')\n\n class GetKey(ElementOneInOut):\n def impl(self):\n self.run_c(r'''\nstate->key = state->pkt->payload + state->pkt->mcr.request.extlen;\noutput { out(); }''')\n\n class GetCore(ElementOneInOut):\n def impl(self):\n self.run_c(r'''\nint core = state->hash %s %d;;\nstate->core = core;\n//printf(\"hash = %s, core = %s\\n\", state->hash, core);\n output { out(); }''' % ('%', n_cores, '%d', '%d'))\n\n ######################## hash ########################\n\n class JenkinsHash(ElementOneInOut):\n def impl(self):\n self.run_c(r'''\nstate->hash = jenkins_hash(state->key, state->pkt->mcr.request.keylen);\n//printf(\"hash = %d\\n\", hash);\noutput { out(); }\n ''')\n\n class HashGet(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output()\n self.null = Output()\n\n def impl(self):\n self.run_c(r'''\nitem* it = hasht_get(state->key, state->pkt->mcr.request.keylen, state->hash);\n//printf(\"hash get\\n\");\nstate->it = it;\n\noutput switch { case it: out(); else: null(); }\n ''')\n\n class HashPut(ElementOneInOut):\n def impl(self):\n self.run_c(r'''\n//printf(\"hash put\\n\");\nhasht_put(state->it, NULL);\noutput { out(); }\n ''')\n\n\n ######################## responses ########################\n\n class Scheduler(Element):\n this = Persistent(Schedule)\n\n def configure(self):\n self.out = Output(Int)\n self.this = Schedule()\n\n def impl(self):\n self.run_c(r'''\nthis->core = (this->core + 1) %s %s;\noutput { out(this->core); }''' % ('%', n_cores))\n\n class SizeGetResp(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output(SizeT)\n\n def impl(self):\n self.run_c(r'''\n//printf(\"size get\\n\");\n size_t msglen = sizeof(iokvs_message) + 4 + state->it->vallen;\n state->vallen = state->it->vallen;\n output { out(msglen); }\n ''')\n\n def impl_cavium(self):\n self.run_c(r'''\n uint32_t* vallen\n dma_read(&state->it->vallen, sizeof(uint32_t), (void**) &vallen);\n size_t msglen = sizeof(iokvs_message) + 4 + *vallen;\n state->vallen = *vallen;\n dma_free(vallen);\n output { out(msglen); }\n ''')\n\n class PrepareGetResp(Element):\n def configure(self):\n self.inp = Input(SizeT, 'void*', 'void*')\n self.out = Output(SizeT, Pointer(iokvs_message), 'void*')\n\n def impl(self):\n self.run_c(r'''\n (size_t msglen, void* pkt, void* pkt_buff) = inp();\n\n iokvs_message *m = pkt;\n //memcpy(m, &iokvs_template, sizeof(iokvs_message));\n item* it = state->it;\n\n m->mcr.request.magic = PROTOCOL_BINARY_RES;\n m->mcr.request.opcode = PROTOCOL_BINARY_CMD_GET;\n m->mcr.request.datatype = PROTOCOL_BINARY_RAW_BYTES;\n m->mcr.request.status = PROTOCOL_BINARY_RESPONSE_SUCCESS;\n\nm->mcr.request.keylen = 0;\nm->mcr.request.extlen = 4;\nm->mcr.request.bodylen = 4;\n*((uint32_t *)m->payload) = 0;\nm->mcr.request.bodylen = 4 + state->vallen;\nrte_memcpy(m->payload + 4, item_value(it), state->vallen);\n\noutput { out(msglen, m, pkt_buff); }\n ''')\n\n def impl_cavium(self):\n self.run_c(r'''\n (size_t msglen, void* pkt, void* pkt_buff) = inp();\n iokvs_message *m = pkt;\n //memcpy(m, &iokvs_template, sizeof(iokvs_message));\n int msglen = sizeof(iokvs_message) + 4;\n item* it = state->it;\n\n m->mcr.request.magic = PROTOCOL_BINARY_RES;\n m->mcr.request.opcode = PROTOCOL_BINARY_CMD_GET;\n m->mcr.request.datatype = PROTOCOL_BINARY_RAW_BYTES;\n m->mcr.request.status = PROTOCOL_BINARY_RESPONSE_SUCCESS;\n\nm->mcr.request.keylen = 0;\nm->mcr.request.extlen = 4;\nm->mcr.request.bodylen = 4;\n*((uint32_t *)m->payload) = 0;\nm->mcr.request.bodylen = 4 + state->vallen;\n\nvoid* value;\ndma_read(item_value(it), state->vallen, (void**) &value);\nrte_memcpy(m->payload + 4, value, state->vallen);\ndma_free(value);\n\noutput { out(msglen, m, pkt_buff); }\n ''')\n\n class SizeGetNullResp(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output(SizeT)\n\n def impl(self):\n self.run_c(r'''\n//printf(\"size get null\\n\");\n size_t msglen = sizeof(iokvs_message) + 4;\n output { out(msglen); }\n ''')\n\n class PrepareGetNullResp(Element):\n def configure(self):\n self.inp = Input(SizeT, 'void*', 'void*')\n self.out = Output(SizeT, Pointer(iokvs_message), 'void*')\n\n def impl(self):\n self.run_c(r'''\n (size_t msglen, void* pkt, void* pkt_buff) = inp();\n iokvs_message *m = pkt;\n //memcpy(m, &iokvs_template, sizeof(iokvs_message));\n\n m->mcr.request.magic = PROTOCOL_BINARY_RES;\n m->mcr.request.opcode = PROTOCOL_BINARY_CMD_GET;\n m->mcr.request.datatype = PROTOCOL_BINARY_RAW_BYTES;\n m->mcr.request.status = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT;\n\n m->mcr.request.keylen = 0;\n m->mcr.request.extlen = 4;\n m->mcr.request.bodylen = 4;\n *((uint32_t *)m->payload) = 0;\n\n output { out(msglen, m, pkt_buff); }\n ''')\n\n class SizeSetResp(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output(SizeT)\n\n def impl(self):\n self.run_c(r'''\n//printf(\"size set\\n\");\n size_t msglen = sizeof(iokvs_message) + 4;\n output { out(msglen); }\n ''')\n\n class SizePktBuffSetResp(Element):\n def configure(self):\n self.inp = Input()\n self.out = Output(SizeT, 'void*', 'void*')\n\n def impl(self):\n self.run_c(r'''\n size_t msglen = sizeof(iokvs_message) + 4;\n void* pkt = state->pkt;\n void* pkt_buff = state->pkt_buff;\n output { out(msglen, pkt, pkt_buff); }\n ''')\n\n class PrepareSetResp(Element):\n def configure(self, status):\n self.inp = Input(SizeT, 'void*', 'void*')\n self.out = Output(SizeT, Pointer(iokvs_message), 'void*')\n self.status = status\n # PROTOCOL_BINARY_RESPONSE_SUCCESS\n # PROTOCOL_BINARY_RESPONSE_ENOMEM\n\n def impl(self):\n self.run_c(r'''\n(size_t msglen, void* pkt, void* pkt_buff) = inp();\niokvs_message *m = pkt;\n//memcpy(m, &iokvs_template, sizeof(iokvs_message));\n\nm->mcr.request.magic = PROTOCOL_BINARY_RES;\nm->mcr.request.opcode = PROTOCOL_BINARY_CMD_SET;\nm->mcr.request.datatype = PROTOCOL_BINARY_RAW_BYTES;\nm->mcr.request.status = %s;\n\nm->mcr.request.keylen = 0;\nm->mcr.request.extlen = 0;\nm->mcr.request.bodylen = 0;\n\noutput { out(msglen, m, pkt_buff); }\n ''' % self.status)\n\n class SizePktBuff(Element):\n def configure(self):\n self.inp = Input(SizeT)\n self.out = Output(SizeT, 'void*', 'void*')\n\n def impl(self):\n self.run_c(r'''\n size_t msglen = inp();\n void* pkt = state->pkt;\n void* pkt_buff = state->pkt_buff;\n output { out(msglen, pkt, pkt_buff); }\n ''')\n\n class PrepareHeader(Element):\n def configure(self):\n self.inp = Input(SizeT, Pointer(iokvs_message), \"void *\")\n self.out = Output(SizeT, \"void *\", \"void *\")\n\n def impl(self):\n self.run_c(r'''\n (size_t msglen, iokvs_message* m, void* buff) = inp();\n\n struct ether_addr mymac = m->ether.d_addr;\n m->ether.d_addr = m->ether.s_addr;\n m->ether.s_addr = mymac; //settings.localmac;\n m->ipv4.dst_addr = m->ipv4.src_addr;\n m->ipv4.src_addr = settings.localip;\n m->ipv4.total_length = htons(msglen - offsetof(iokvs_message, ipv4));\n m->ipv4.time_to_live = 64;\n m->ipv4.hdr_checksum = 0;\n //m->ipv4.hdr_checksum = rte_ipv4_cksum(&m->ipv4);\n\n m->udp.dst_port = m->udp.src_port;\n m->udp.src_port = htons(11211);\n m->udp.dgram_len = htons(msglen - offsetof(iokvs_message, udp));\n m->udp.dgram_cksum = 0;\n\n output { out(msglen, (void*) m, buff); }\n ''')\n\n class HandleArp(Element):\n def configure(self):\n self.inp = Input(\"void *\", \"void *\")\n self.out = Output(SizeT, \"void *\", \"void *\")\n self.drop = Output(\"void *\", \"void *\")\n\n def impl(self):\n self.run_c(r'''\n (void* pkt, void* buff) = inp();\n iokvs_message* msg = (iokvs_message*) pkt;\n struct arp_hdr *arp = (struct arp_hdr *) (&msg->ether + 1);\n int resp = 0;\n\n /* Currently we're only handling ARP here */\n if (msg->ether.ether_type == htons(ETHER_TYPE_ARP) &&\n arp->arp_hrd == htons(ARP_HRD_ETHER) && arp->arp_pln == 4 &&\n arp->arp_op == htons(ARP_OP_REQUEST) && arp->arp_hln == 6 &&\n arp->arp_data.arp_tip == settings.localip)\n {\n printf(\"Responding to ARP\\n\");\n resp = 1;\n struct ether_addr mymac = msg->ether.d_addr;\n msg->ether.d_addr = msg->ether.s_addr;\n msg->ether.s_addr = mymac;\n arp->arp_op = htons(ARP_OP_REPLY);\n arp->arp_data.arp_tha = arp->arp_data.arp_sha;\n arp->arp_data.arp_sha = mymac;\n arp->arp_data.arp_tip = arp->arp_data.arp_sip;\n arp->arp_data.arp_sip = settings.localip;\n\n //rte_mbuf_refcnt_update(mbuf, 1); // TODO\n\n/*\n mbuf->ol_flags = PKT_TX_L4_NO_CKSUM;\n mbuf->tx_offload = 0;\n*/\n }\n\n output switch { \n case resp: out(sizeof(struct ether_hdr) + sizeof(struct arp_hdr), pkt, buff); \n else: drop(pkt, buff);\n }\n ''')\n\n\n class PrintMsg(Element):\n def configure(self):\n self.inp = Input(SizeT, \"void *\", \"void *\")\n self.out = Output(SizeT, \"void *\", \"void *\")\n\n def impl(self):\n self.run_c(r'''\n(size_t msglen, void* pkt, void* buff) = inp();\niokvs_message* m = (iokvs_message*) pkt;\nuint8_t *val = m->payload + 4;\nuint8_t opcode = m->mcr.request.opcode;\n\n/*\nif(opcode == PROTOCOL_BINARY_CMD_GET)\n printf(\"GET -- status: %d, len: %d, val:%d\\n\", m->mcr.request.status, m->mcr.request.bodylen, val[0]);\nelse if (opcode == PROTOCOL_BINARY_CMD_SET)\n printf(\"SET -- status: %d, len: %d\\n\", m->mcr.request.status, m->mcr.request.bodylen);\n*/\n\noutput { out(msglen, (void*) m, buff); }\n ''')\n\n\n\n ######################## item ########################\n class GetItemSpec(Element):\n this = Persistent(ItemAllocators)\n def states(self):\n self.this = item_allocators\n\n def configure(self):\n self.inp = Input()\n self.out = Output()\n self.nothing = Output()\n\n def impl(self):\n self.run_c(r'''\n size_t totlen = state->pkt->mcr.request.bodylen - state->pkt->mcr.request.extlen;\n item *it = ialloc_alloc(&this->ia[state->core], sizeof(item) + totlen, false); // TODO\n if(it) {\n it->refcount = 1;\n uint16_t keylen = state->pkt->mcr.request.keylen;\n\n // printf(\"get_item id: %d, keylen: %ld, totlen: %ld, item: %ld\\n\",\n //state->pkt->mcr.request.opaque, state->pkt->mcr.request.keylen, totlen, it);\n it->hv = state->hash;\n it->vallen = totlen - keylen;\n it->keylen = keylen;\n memcpy(item_key(it), state->key, totlen);\n state->it = it;\n }\n\n output switch { case it: out(); else: nothing(); }\n ''')\n\n\n class Unref(ElementOneInOut):\n def impl(self):\n self.run_c(r'''\n item_unref(state->it);\n output { out(); }\n ''')\n\n class Clean(Element):\n def configure(self, val):\n self.inp = Input()\n self.out = Output(Bool)\n self.val = val\n\n def impl(self):\n self.run_c(r'''output { out(%s); }''' % self.val)\n\n class Drop(Element):\n def configure(self):\n self.inp = Input()\n\n def impl(self):\n self.run_c(\"\")\n\n class ForwardBool(Element):\n def configure(self):\n self.inp = Input(Bool)\n self.out = Output(Bool)\n\n def impl(self):\n self.run_c(r'''\n (bool x) = inp();\n output { out(x); }\n ''')\n\n class CleanLog(Element):\n this = Persistent(ItemAllocators)\n\n def states(self):\n self.this = item_allocators\n\n def impl(self):\n self.run_c(r'''\n static __thread int count = 0;\n count++;\n if(count == 32) {\n count = 0;\n clean_log(&this->ia[state->core], state->pkt == NULL);\n }\n ''')\n\n def impl(self):\n MemoryRegion('data_region', 2 * 1024 * 1024 * 512, init='ialloc_init(data_region);') #4 * 1024 * 512)\n\n\n ######################## NIC Rx #######################\n class process_one_pkt(Segment):\n def impl(self):\n from_net = FromNet('from_net',configure=[32])\n from_net_free = FromNetFree('from_net_free')\n to_net = ToNet('to_net', configure=['from_net',32])\n classifier = main.Classifer()\n check_packet = main.CheckPacket()\n hton1 = HTON(configure=['iokvs_message'])\n hton2 = HTON(configure=['iokvs_message'])\n\n prepare_header = main.PrepareHeader()\n display = main.PrintMsg()\n drop = main.Drop()\n save_id = main.SaveID()\n\n self.core_id >> save_id\n\n # from_net\n from_net >> hton1 >> check_packet >> main.SaveState() \\\n >> main.GetKey() >> main.JenkinsHash() >> classifier\n from_net.nothing >> drop\n\n # get\n hash_get = main.HashGet()\n get_response = main.PrepareGetResp()\n classifier.out_get >> hash_get >> main.SizeGetResp() >> main.SizePktBuff() >> get_response >> prepare_header\n get_response >> main.Unref() >> main.Drop()\n\n # get (null)\n hash_get.null >> main.SizeGetNullResp() >> main.SizePktBuff() >> main.PrepareGetNullResp() >> prepare_header\n\n # set\n get_item = main.GetItemSpec()\n set_response = main.PrepareSetResp(configure=['PROTOCOL_BINARY_RESPONSE_SUCCESS'])\n classifier.out_set >> get_item >> main.HashPut() >> main.Unref() >> main.SizeSetResp() \\\n >> main.SizePktBuff() >> set_response >> prepare_header\n\n # set (unseccessful)\n set_reponse_fail = main.PrepareSetResp(configure=['PROTOCOL_BINARY_RESPONSE_ENOMEM'])\n get_item.nothing >> main.SizeSetResp() >> main.SizePktBuff() >> set_reponse_fail >> prepare_header\n\n # exception\n arp = main.HandleArp()\n check_packet.slowpath >> arp >> to_net\n arp.drop >> from_net_free\n check_packet.drop >> from_net_free\n\n # send\n prepare_header >> display >> hton2 >> to_net\n\n # clean log\n clean_log = main.CleanLog()\n\n run_order(save_id, from_net)\n run_order([to_net, from_net_free, drop], clean_log)\n\n process_one_pkt('process_one_pkt', process='dpdk', cores=range(n_cores))\n\nmaster_process('dpdk')\n\n\n######################## Run test #######################\nc = Compiler(main)\nc.include = r'''\n#include \"nicif.h\"\n#include \"iokvs.h\"\n#include \"protocol_binary.h\"\n'''\nc.generate_code_as_header()\nc.depend = ['jenkins_hash', 'hashtable', 'ialloc', 'settings', 'dpdk']\nc.compile_and_run('test_no_steer')\n","sub_path":"apps/memcached_cpu_only/main_no_steering.py","file_name":"main_no_steering.py","file_ext":"py","file_size_in_byte":20565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"373297568","text":"class Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[str]\n :rtype: bool\n \"\"\"\n row = [set() for _ in range(10)]\n col = [set() for _ in range(10)]\n block = [set() for _ in range(10)]\n\n for i, r in enumerate(board):\n for j, c in enumerate(r):\n if c != '.':\n v = ord(c) - ord('0')\n k = i // 3 * 3 + j // 3\n if v in row[i] or v in col[j] or v in block[k]:\n return False\n\n row[i].add(v)\n col[j].add(v)\n block[k].add(v)\n return True\n","sub_path":"solutions/valid_sudoku_2.py","file_name":"valid_sudoku_2.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"339586174","text":"\"\"\"\nImplementation of Binary Search\nTime Complexity = O(log n)\n\"\"\"\n# Iterative Solution\ndef binary_srch(arr, t):\n i = 0 # first position\n l = len(arr)-1 # last position\n found = False\n\n while i <= l and not found:\n mid = (i + l) // 2\n\n\n if arr[mid] == t:\n found = True\n return found\n else:\n if arr[mid] > t:\n l = mid - 1\n else:\n i = mid + 1\n\n return found\n\nmy_list = [1, 2, 3, 4, 5, 6, 8]\nprint(binary_srch(my_list, 5))\nprint(binary_srch(my_list, 2))\nprint(binary_srch(my_list, 7))\n\n# Recursive Solution\ndef binary_srch_rec(arr, t):\n if len(arr) == 0:\n return False\n\n mid = len(arr) // 2\n\n if arr[mid] == t:\n return True\n else:\n if arr[mid] > t:\n return binary_srch_rec(arr[:mid], t)\n else:\n return binary_srch_rec(arr[mid+1:], t)\n\nprint(\"#\")\nmy_list = [1, 2, 3, 4, 5, 6, 8]\nprint(binary_srch_rec(my_list, 5))\nprint(binary_srch_rec(my_list, 2))\nprint(binary_srch_rec(my_list, 7))\n","sub_path":"Searching Algos/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"462822097","text":"import plugins\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef meh(bot, event):\n s = requests.Session()\n r = s.get(\"http://meh.com\")\n soup = BeautifulSoup(r.content)\n sout = soup.h2\n for meh_string in sout.stripped_strings:\n (repr(meh_string))\n output = \"Meh {} https://meh.com\".format(meh_string)\n yield from bot.coro_send_message(event.conv, output)\n\ndef _initialise(bot):\n plugins.register_user_command([\"meh\"])\n","sub_path":"meh.py","file_name":"meh.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"611330209","text":"T = int(raw_input())\n\nfor case in xrange(1, T+1):\n [K, C, S] = map(int, raw_input().split())\n if C == 1:\n if S < K:\n print(\"Case #%d: IMPOSSIBLE\" % (case))\n else:\n print(\"Case #%d: %s\" % (case, \" \".join(map(str,range(1,K+1)))))\n continue\n if 2*S < K:\n print(\"Case #%d: IMPOSSIBLE\" % (case))\n continue\n res = []\n x = 0\n pot = K ** C\n while x < K/2:\n res.append(2*x*(K+1) + 2)\n x+=1\n if K%2:\n res.append(K)\n\n print(\"Case #%d: %s\" % (case, \" \".join(map(str, res))))\n","sub_path":"solutions_5636311922769920_1/Python/Malkava/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"268392771","text":"import os;\nimport os.path;\n\nsrc_path = \".\"\ndst_path = \"../minimal_bs_shared_ptr\"\n\nfrom_namespace = \"boost\"\nto_namespace = \"bs\"\n\nfrom_shared_ptr = \"shared_ptr\"\nto_shared_ptr = \"ptr\"\n\nfrom_make_shared = \"make_shared\"\nto_make_shared = \"make\"\n\nfrom_enable_shared_from_this = \"enable_shared_from_this\"\nto_enable_shared_from_this = \"enable_ptr_from_this\"\n\nfrom_shared_from_this = \"shared_from_this\"\nto_shared_from_this = \"ptr_from_this\"\n\nfrom_static_pointer_cast = \"static_pointer_cast\"\nto_static_pointer_cast = \"static_ptr_cast\"\n\nfrom_const_pointer_cast = \"const_pointer_cast\"\nto_const_pointer_cast = \"const_ptr_cast\"\n\nfrom_dynamic_pointer_cast = \"dynamic_pointer_cast\"\nto_dynamic_pointer_cast = \"dynamic_ptr_cast\"\n\nfrom_reinterpret_pointer_cast = \"reinterpret_pointer_cast\"\nto_reinterpret_pointer_cast = \"reinterpret_ptr_cast\"\n\n\ndef rename(src):\n dst = os.path.join(dst_path, src[len(src_path) + 1:])\n dst = dst.replace(\"/{0}/\".format(from_namespace), \"/{0}/\".format(to_namespace))\n\n dstdir = os.path.dirname(dst)\n if not os.path.isdir(dstdir):\n os.makedirs(dstdir)\n\n fin = open(src)\n lines = fin.readlines()\n fin.close()\n\n fout = open(dst, \"w\")\n for line in lines:\n # namespace\n line = line.replace(\"<{0}/\".format(from_namespace), \"<{0}/\".format(to_namespace))\n line = line.replace(\"namespace {0}\".format(from_namespace), \"namespace {0}\".format(to_namespace))\n line = line.replace(\"{0}::\".format(from_namespace), \"{0}::\".format(to_namespace))\n line = line.replace(\"{0}_\".format(from_namespace.upper()), \"{0}_\".format(to_namespace.upper()))\n # shared_ptr\n line = line.replace(\"::{0}\".format(from_shared_ptr), \"::{0}\".format(to_shared_ptr))\n line = line.replace(\" {0}\".format(from_shared_ptr), \" {0}\".format(to_shared_ptr))\n line = line.replace(\"({0}\".format(from_shared_ptr), \"({0}\".format(to_shared_ptr))\n line = line.replace(\"<{0}\".format(from_shared_ptr), \"<{0}\".format(to_shared_ptr))\n # make_shared\n line = line.replace(\"::{0}\".format(from_make_shared), \"::{0}\".format(to_make_shared))\n line = line.replace(\" {0}\".format(from_make_shared), \" {0}\".format(to_make_shared))\n # enable_shared_from_this\n line = line.replace(\"::{0}\".format(from_enable_shared_from_this), \"::{0}\".format(to_enable_shared_from_this))\n line = line.replace(\" {0}\".format(from_enable_shared_from_this), \" {0}\".format(to_enable_shared_from_this))\n line = line.replace(\"~{0}\".format(from_enable_shared_from_this), \"~{0}\".format(to_enable_shared_from_this))\n line = line.replace(\"({0}\".format(from_enable_shared_from_this), \"({0}\".format(to_enable_shared_from_this))\n # shared_from_this\n line = line.replace(\" {0}\".format(from_shared_from_this), \" {0}\".format(to_shared_from_this))\n line = line.replace(\".{0}\".format(from_shared_from_this), \".{0}\".format(to_shared_from_this))\n line = line.replace(\"->{0}\".format(from_shared_from_this), \"->{0}\".format(to_shared_from_this))\n # static_pointer_cast\n line = line.replace(\"{0}\".format(from_static_pointer_cast), \"{0}\".format(to_static_pointer_cast))\n # const_pointer_cast\n line = line.replace(\"{0}\".format(from_const_pointer_cast), \"{0}\".format(to_const_pointer_cast))\n # dynamic_pointer_cast\n line = line.replace(\"{0}\".format(from_dynamic_pointer_cast), \"{0}\".format(to_dynamic_pointer_cast))\n # reinterpret_pointer_cast\n line = line.replace(\"{0}\".format(from_reinterpret_pointer_cast), \"{0}\".format(to_reinterpret_pointer_cast))\n\n fout.write(line)\n fout.close()\n\n\ndef walkdir(path):\n for item in os.listdir(path):\n fullpath = os.path.join(path, item)\n if os.path.isdir(fullpath):\n if item.startswith(\".\"):\n continue\n else:\n walkdir(os.path.join(path, item))\n else:\n if item.startswith(\".\"):\n continue\n rename(os.path.join(path, item))\n\nwalkdir(src_path)\n","sub_path":"rename_tool.py","file_name":"rename_tool.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"8855735","text":"from sklearn.svm import SVC\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\niris = load_iris()\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state = 0)\n\nprint(\"size of training set: {}\".format(len(X_train)))\nprint(\"size of test set: {}\".format(len(X_test)))\n\nbestScore = 0\nbestParameters = None\n\nfor gamma in [0.001, 0.01, 0.1, 1, 10, 100]:\n\tfor C in [0.001, 0.01, 0.1, 1, 10, 100]:\n\t\tsvm = SVC(gamma = gamma, C = C).fit(X_train, y_train)\n\t\tscore = svm.score(X_test, y_test)\n\t\tif score > bestScore:\n\t\t\tbestScore = score\n\t\t\tbestParameters = {'C': C, 'gamma': gamma}\n\nprint(\"best score: {:.3f}\".format(bestScore))\nprint(\"best parameters: {}\".format(bestParameters))\n","sub_path":"Chapter5/tuanhtran/simpleGridSearch.py","file_name":"simpleGridSearch.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"232972669","text":"# coding: utf-8\n\"\"\"\npython preprocess.py --num_workers 10 --name son --in_dir D:\\hccho\\multi-speaker-tacotron-tensorflow-master\\datasets\\son --out_dir .\\data\\son\npython preprocess.py --num_workers 10 --name moon --in_dir D:\\hccho\\multi-speaker-tacotron-tensorflow-master\\datasets\\moon --out_dir .\\data\\moon\n ==> out_dir에 'audio', 'mel', 'linear', 'time_steps', 'mel_frames', 'text', 'tokens', 'loss_coeff'를 묶은 npz파일이 생성된다.\n \n \n \n\"\"\"\nimport argparse\nimport os\nimport json\nfrom multiprocessing import cpu_count\nfrom tqdm import tqdm\nfrom hparams import hparams, hparams_debug_string\nimport warnings\nimport nltk\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nimport numpy as np\nfrom utils import audio\nfrom text import text_to_sequence\n\nnltk.download('punkt')\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\ndef _process_utterance(out_dir, wav_path, text, hparams):\n \"\"\"\n Preprocesses a single utterance wav/text pair\n\n this writes the mel scale spectogram to disk and return a tuple to write\n to the train.txt file\n\n Args:\n - mel_dir: the directory to write the mel spectograms into\n - linear_dir: the directory to write the linear spectrograms into\n - wav_dir: the directory to write the preprocessed wav into\n - index: the numeric index to use in the spectogram filename\n - wav_path: path to the audio file containing the speech input\n - text: text spoken in the input audio file\n - hparams: hyper parameters\n\n Returns:\n - A tuple: (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, linear_frames, text)\n \"\"\"\n try:\n # Load the audio as numpy array\n wav = audio.load_wav(wav_path, sr=hparams.sample_rate)\n except FileNotFoundError: # catch missing wav exception\n print('file {} present in csv metadata is not present in wav folder. skipping!'.format(\n wav_path))\n return None\n\n # rescale wav\n if hparams.rescaling: # hparams.rescale = True\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n\n # M-AILABS extra silence specific\n if hparams.trim_silence: # hparams.trim_silence = True\n wav = audio.trim_silence(wav, hparams) # Trim leading and trailing silence\n\n # Mu-law quantize, default 값은 'raw'\n if hparams.input_type == 'mulaw-quantize':\n # [0, quantize_channels)\n out = audio.mulaw_quantize(wav, hparams.quantize_channels)\n\n # Trim silences\n start, end = audio.start_and_end_indices(out, hparams.silence_threshold)\n wav = wav[start: end]\n out = out[start: end]\n\n constant_values = audio.mulaw_quantize(0, hparams.quantize_channels)\n out_dtype = np.int16\n\n elif hparams.input_type == 'mulaw':\n # [-1, 1]\n out = audio.mulaw(wav, hparams.quantize_channels)\n constant_values = audio.mulaw(0., hparams.quantize_channels)\n out_dtype = np.float32\n\n else: # raw\n # [-1, 1]\n out = wav\n constant_values = 0.\n out_dtype = np.float32\n\n # Compute the mel scale spectrogram from the wav\n mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)\n mel_frames = mel_spectrogram.shape[1]\n\n if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: # hparams.max_mel_frames = 1000, hparams.clip_mels_length = True\n return None\n\n # Compute the linear scale spectrogram from the wav\n linear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32)\n linear_frames = linear_spectrogram.shape[1]\n\n # sanity check\n assert linear_frames == mel_frames\n\n if hparams.use_lws: # hparams.use_lws = False\n # Ensure time resolution adjustement between audio and mel-spectrogram\n fft_size = hparams.fft_size if hparams.win_size is None else hparams.win_size\n l, r = audio.pad_lr(wav, fft_size, audio.get_hop_size(hparams))\n\n # Zero pad audio signal\n out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)\n else:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n pad = audio.librosa_pad_lr(wav, hparams.fft_size, audio.get_hop_size(hparams))\n\n # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)\n out = np.pad(out, pad, mode='reflect')\n\n assert len(out) >= mel_frames * audio.get_hop_size(hparams)\n\n # time resolution adjustement\n # ensure length of raw audio is multiple of hop size so that we can use\n # transposed convolution to upsample\n out = out[:mel_frames * audio.get_hop_size(hparams)]\n assert len(out) % audio.get_hop_size(hparams) == 0\n time_steps = len(out)\n\n # Write the spectrogram and audio to disk\n wav_id = os.path.splitext(os.path.basename(wav_path))[0]\n\n # Write the spectrograms to disk:\n audio_filename = '{}-audio.npy'.format(wav_id)\n mel_filename = '{}-mel.npy'.format(wav_id)\n linear_filename = '{}-linear.npy'.format(wav_id)\n npz_filename = '{}.npz'.format(wav_id)\n npz_flag = True\n if npz_flag:\n # Tacotron 코드와 맞추기 위해, 같은 key를 사용한다.\n data = {\n 'audio': out.astype(out_dtype),\n 'mel': mel_spectrogram.T,\n 'linear': linear_spectrogram.T,\n 'time_steps': time_steps,\n 'mel_frames': mel_frames,\n 'text': text,\n 'tokens': text_to_sequence(text), # eos(~)에 해당하는 \"1\"이 끝에 붙는다.\n 'loss_coeff': 1 # For Tacotron\n }\n\n np.savez(os.path.join(out_dir, npz_filename), **data, allow_pickle=False)\n else:\n np.save(os.path.join(out_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)\n np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)\n np.save(os.path.join(out_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)\n\n # Return a tuple describing this training example\n return (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, text, npz_filename)\n\n\ndef build_from_path(hparams, in_dir, out_dir, num_workers=1, tqdm=lambda x: x):\n \"\"\"\n Preprocesses the speech dataset from a gven input path to given output directories\n\n Args:\n - hparams: hyper parameters\n - input_dir: input directory that contains the files to prerocess\n - out_dir: output directory of npz files\n - n_jobs: Optional, number of worker process to parallelize across\n - tqdm: Optional, provides a nice progress bar\n\n Returns:\n - A list of tuple describing the train examples. this should be written to train.txt\n \"\"\"\n\n executor = ProcessPoolExecutor(max_workers=num_workers)\n futures = []\n index = 1\n\n path = os.path.join(in_dir, 'alignment.json')\n\n with open(path, encoding='utf-8') as f:\n content = f.read()\n data = json.loads(content)\n for key, text in data.items():\n wav_path = key.strip().split('/')\n wav_path = os.path.join(in_dir, 'audio', '%s' % wav_path[-1])\n # In case of test file\n if not os.path.exists(wav_path):\n continue\n futures.append(executor.submit(partial(_process_utterance, out_dir, wav_path, text, hparams)))\n index += 1\n\n return [future.result() for future in tqdm(futures) if future.result() is not None]\n\n\ndef preprocess(in_dir, out_dir, num_workers):\n os.makedirs(out_dir, exist_ok=True)\n metadata = build_from_path(hparams, in_dir, out_dir, num_workers=num_workers, tqdm=tqdm)\n write_metadata(metadata, out_dir)\n\n\ndef write_metadata(metadata, out_dir):\n with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:\n for m in metadata:\n f.write('|'.join([str(x) for x in m]) + '\\n')\n mel_frames = sum([int(m[4]) for m in metadata])\n timesteps = sum([int(m[3]) for m in metadata])\n sr = hparams.sample_rate\n hours = timesteps / sr / 3600\n print('Write {} utterances, {} mel frames, {} audio timesteps, ({:.2f} hours)'.format(len(metadata), mel_frames,\n timesteps, hours))\n print('Max input length (text chars): {}'.format(max(len(m[5]) for m in metadata)))\n print('Max mel frames length: {}'.format(max(int(m[4]) for m in metadata)))\n print('Max audio timesteps length: {}'.format(max(m[3] for m in metadata)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', type=str, default=None)\n parser.add_argument('--in_dir', type=str, default=None)\n parser.add_argument('--out_dir', type=str, default=None)\n parser.add_argument('--num_workers', type=str, default=None)\n parser.add_argument('--hparams', type=str, default=None)\n args = parser.parse_args()\n\n if args.hparams is not None:\n hparams.parse(args.hparams)\n print(hparams_debug_string())\n\n name = args.name\n in_dir = args.in_dir\n out_dir = args.out_dir\n num_workers = args.num_workers\n num_workers = cpu_count() if num_workers is None else int(num_workers) # cpu_count() = process 갯수\n print(\"Sampling frequency: {}\".format(hparams.sample_rate))\n preprocess(in_dir, out_dir, num_workers)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":9375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"34091213","text":"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n# class Solution:\n# def visit(self,ls:'list',root:'Node'):\n# if root == None:\n# return\n# ls.append(root.val)\n# for i in root.children:\n# self.visit(ls,i) # 递归\n#\n# def preorder(self, root: 'Node'):\n# ls = []\n# self.visit(ls,root)\n# return ls\n\nclass Solution:\n def visit(self,root:'Node'):\n ls = []\n if root == None:\n return []\n stack = []\n stack.append(root)\n while stack:\n node = stack.pop(-1)\n ls.append(node.val)\n for i in range(len(node.children)-1,-1,-1):\n stack.append(node.children[i])\n return ls\n\nfor i in range(5,1, -1):\n print(i)","sub_path":"src/N叉树的前序遍历.py","file_name":"N叉树的前序遍历.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"48827242","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sbn\nimport pandas as pd\n\nfrom brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable, MultivariateNormalVariable\nfrom brancher import inference\nimport brancher.functions as BF\n\n# N repetitions\nN_rep = 15 #10\n\n# Data list\ncondition_list = [lambda t: (t < 10 or t > 20), lambda t: (t < 0 or t > 20), lambda t: True]\ncondition_label = [\"Bridge\", \"Past\", \"Full\"]\n\nN_itr = 200\nN_smpl = 20\noptimizer = \"Adam\"\nlr = 0.05 #0.0002\nN_ELBO_smpl = 1000\n\n\nfor cond, label in zip(condition_list, condition_label):\n ELBO1 = []\n ELBO2 = []\n ELBO3 = []\n ELBO4 = []\n for rep in range(N_rep):\n print(\"Repetition: {}\".format(rep))\n # Probabilistic model #\n T = 30\n dt = 0.02\n driving_noise = 0.1 #0.1\n measure_noise = 1.\n s = 10.\n r = 28.\n b = 8 / 3.\n x0 = NormalVariable(0., driving_noise, 'x0')\n y0 = NormalVariable(x0, measure_noise, 'y0')\n h0 = NormalVariable(0., driving_noise, 'h0')\n z0 = NormalVariable(0., driving_noise, 'z0')\n\n x = [x0]\n h = [h0]\n z = [z0]\n y = [y0]\n x_names = [\"x0\"]\n h_names = [\"h0\"]\n z_names = [\"z0\"]\n y_names = [\"y0\"]\n y_range = [t for t in range(T) if cond(t)]\n for t in range(1, T):\n x_names.append(\"x{}\".format(t))\n h_names.append(\"h{}\".format(t))\n z_names.append(\"z{}\".format(t))\n new_x = x[t - 1] + dt * s * (h[t - 1] - x[t - 1])\n new_h = h[t - 1] + dt * (x[t - 1] * (r - z[t - 1]) - h[t - 1])\n new_z = z[t - 1] + dt * (x[t - 1] * h[t - 1] - b * z[t - 1])\n x.append(NormalVariable(new_x, np.sqrt(dt) * driving_noise, x_names[t]))\n h.append(NormalVariable(new_h, np.sqrt(dt) * driving_noise, h_names[t]))\n z.append(NormalVariable(new_z, np.sqrt(dt) * driving_noise, z_names[t]))\n if t in y_range:\n y_name = \"y{}\".format(t)\n y_names.append(y_name)\n y.append(NormalVariable(x[t], measure_noise, y_name))\n AR_model = ProbabilisticModel(x + y)\n\n # Generate data #\n data = AR_model._get_sample(number_samples=1)\n time_series = [float(data[yt].data) for yt in y]\n ground_truth = [float(data[xt].data) for xt in x]\n\n # Observe data #\n [yt.observe(data[yt][:, 0, :]) for yt in y]\n\n # Structured variational distribution #\n Qx = [NormalVariable(0., 1., 'x0', learnable=True)]\n Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]\n Qxlambda = [RootVariable(0.5, 'x0_lambda', learnable=True)]\n\n Qh = [NormalVariable(0., 1., 'h0', learnable=True)]\n Qh_mean = [RootVariable(0., 'h0_mean', learnable=True)]\n Qhlambda = [RootVariable(0.5, 'h0_lambda', learnable=True)]\n\n Qz = [NormalVariable(0., 1., 'z0', learnable=True)]\n Qz_mean = [RootVariable(0., 'z0_mean', learnable=True)]\n Qzlambda = [RootVariable(0.5, 'z0_lambda', learnable=True)]\n\n for t in range(1, T):\n if t in y_range:\n l = 1. # 2\n else:\n l = 1. # 2\n Qx_mean.append(RootVariable(0, x_names[t] + \"_mean\", learnable=True))\n Qxlambda.append(RootVariable(l, x_names[t] + \"_lambda\", learnable=True))\n\n Qh_mean.append(RootVariable(0, h_names[t] + \"_mean\", learnable=True))\n Qhlambda.append(RootVariable(l, h_names[t] + \"_lambda\", learnable=True))\n\n Qz_mean.append(RootVariable(0, z_names[t] + \"_mean\", learnable=True))\n Qzlambda.append(RootVariable(l, z_names[t] + \"_lambda\", learnable=True))\n\n new_x = Qx[t - 1] + dt * s * (Qh[t - 1] - Qx[t - 1])\n new_h = Qh[t - 1] + dt * (Qx[t - 1] * (r - Qz[t - 1]) - Qh[t - 1])\n new_z = Qz[t - 1] + dt * (Qx[t - 1] * Qh[t - 1] - b * Qz[t - 1])\n\n Qx.append(NormalVariable(BF.sigmoid(Qxlambda[t]) * new_x + (1 - BF.sigmoid(Qxlambda[t])) * Qx_mean[t],\n np.sqrt(dt) * driving_noise, x_names[t], learnable=True))\n\n Qh.append(NormalVariable(BF.sigmoid(Qhlambda[t]) * new_h + (1 - BF.sigmoid(Qhlambda[t])) * Qh_mean[t],\n np.sqrt(dt) * driving_noise, h_names[t], learnable=True))\n\n Qz.append(NormalVariable(BF.sigmoid(Qzlambda[t]) * new_z + (1 - BF.sigmoid(Qzlambda[t])) * Qz_mean[t],\n np.sqrt(dt) * driving_noise, z_names[t], learnable=True))\n\n variational_posterior = ProbabilisticModel(Qx + Qh + Qz)\n AR_model.set_posterior_model(variational_posterior)\n\n # Inference #\n inference.perform_inference(AR_model,\n number_iterations=N_itr,\n number_samples=N_smpl,\n optimizer=optimizer,\n lr=lr)\n\n loss_list1 = AR_model.diagnostics[\"loss curve\"]\n\n\n # ELBO\n ELBO1.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))\n print(\"PE {}\".format(ELBO1[-1]))\n\n # Mean field\n Qx = [NormalVariable(0., 1., 'x0', learnable=True)]\n Qh = [NormalVariable(0., 1., 'h0', learnable=True)]\n Qz = [NormalVariable(0., 1., 'z0', learnable=True)]\n\n for t in range(1, T):\n Qx.append(NormalVariable(0., driving_noise, x_names[t], learnable=True))\n Qh.append(NormalVariable(0., driving_noise, h_names[t], learnable=True))\n Qz.append(NormalVariable(0., driving_noise, z_names[t], learnable=True))\n\n variational_posterior = ProbabilisticModel(Qx + Qh + Qz)\n AR_model.set_posterior_model(variational_posterior)\n\n # Inference #\n inference.perform_inference(AR_model,\n number_iterations=N_itr,\n number_samples=N_smpl,\n optimizer=optimizer,\n lr=lr)\n\n loss_list2 = AR_model.diagnostics[\"loss curve\"]\n\n # ELBO\n ELBO2.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))\n print(\"MF {}\".format(ELBO2[-1]))\n\n # Multivariate normal variational distribution #\n\n QV = MultivariateNormalVariable(loc=np.zeros((3*T,)),\n scale_tril=np.identity(3*T),\n name=\"V\",\n learnable=True)\n Qx = [NormalVariable(QV[0], 0.1, 'x0', learnable=True)]\n Qh = [NormalVariable(QV[0], 0.1, 'h0', learnable=True)]\n Qz = [NormalVariable(QV[0], 0.1, 'z0', learnable=True)]\n\n for t in range(1, T):\n Qx.append(NormalVariable(QV[t], driving_noise, x_names[t], learnable=True))\n Qh.append(NormalVariable(QV[T + t], driving_noise, h_names[t], learnable=True))\n Qz.append(NormalVariable(QV[2*T + t], driving_noise, z_names[t], learnable=True))\n variational_posterior = ProbabilisticModel(Qx + Qh + Qz)\n AR_model.set_posterior_model(variational_posterior)\n\n # Inference #\n inference.perform_inference(AR_model,\n number_iterations=N_itr,\n number_samples=N_smpl,\n optimizer=optimizer,\n lr=lr)\n\n loss_list3 = AR_model.diagnostics[\"loss curve\"]\n\n # ELBO\n ELBO3.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))\n print(\"MN {}\".format(ELBO3[-1]))\n\n # Structured NN distribution #\n hidden_size = 3*10\n latent_size = 3*10\n Qepsilon = NormalVariable(np.zeros((hidden_size, 1)), np.ones((hidden_size,)), 'epsilon', learnable=True)\n W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)), \"W1\", learnable=True)\n W2 = RootVariable(np.random.normal(0, 0.1, (3*T, hidden_size)), \"W2\", learnable=True)\n pre_x = BF.matmul(W2, BF.sigmoid(BF.matmul(W1, Qepsilon)))\n Qx = []\n Qh = []\n Qz = []\n for t in range(0, T):\n Qx.append(NormalVariable(pre_x[t], driving_noise, x_names[t], learnable=True))\n Qh.append(NormalVariable(pre_x[T + t], driving_noise, h_names[t], learnable=True))\n Qz.append(NormalVariable(pre_x[2*T + t], driving_noise, z_names[t], learnable=True))\n variational_posterior = ProbabilisticModel(Qx + Qh + Qz)\n AR_model.set_posterior_model(variational_posterior)\n\n # Inference #\n inference.perform_inference(AR_model,\n number_iterations=N_itr,\n number_samples=N_smpl,\n optimizer=optimizer,\n lr=lr)\n\n loss_list4 = AR_model.diagnostics[\"loss curve\"]\n\n # ELBO\n ELBO4.append(float(AR_model.estimate_log_model_evidence(N_ELBO_smpl).detach().numpy()))\n print(\"NN {}\".format(ELBO4[-1]))\n\n # plt.plot(loss_list1)\n # plt.plot(loss_list2)\n # plt.plot(loss_list3)\n # plt.plot(loss_list4)\n # plt.show()\n\n d = {'PE': ELBO1, 'MF': ELBO2, \"MN\": ELBO3, \"NN\": ELBO4}\n\n import pickle\n with open('{}_lorentz_results.pickle'.format(label), 'wb') as f:\n pickle.dump(d, f)\n\n df = pd.DataFrame(data=d)\n df.boxplot()\n plt.title(label)\n plt.ylabel(\"ELBO\")\n plt.savefig(\"Lorentz \" +label+\".pdf\")\n plt.clf()\n #plt.show()\n\n\n","sub_path":"development_playgrounds/StructuredInferencePlaygroundLorentzExperiment.py","file_name":"StructuredInferencePlaygroundLorentzExperiment.py","file_ext":"py","file_size_in_byte":9761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"571638682","text":"import kaa\nfrom kaa.filetype.default import modebase\nfrom kaa.theme import Theme, Style\n\nclass StatusInfo:\n updated = False\n def __init__(self):\n self.infos = {\n 'filename':'',\n 'modified_mark':'',\n }\n\n def set_info(self, **values):\n ret = False\n for name, value in values.items():\n updated = True\n if name in self.infos:\n if value == self.infos[name]:\n updated = False\n\n self.infos[name] = value\n\n self.updated = self.updated or updated\n ret = ret or updated\n return ret\n\n def get_info(self, name, default=None):\n return self.infos.get(name, default)\n\n\n\nStatusBarTheme = Theme('default', [\n Style('default', 'red', 'cyan', False, False),\n Style('filename', 'magenta', 'cyan'),\n Style('msg', 'default', 'default'),\n])\n\n\nclass StatusBarMode(modebase.ModeBase):\n STATUSBAR_MESSAGE = '{filename}[{modified_mark}]'\n\n def __init__(self):\n super().__init__()\n self.statusinfo = StatusInfo()\n\n def init_theme(self):\n self.theme = StatusBarTheme\n\n def on_set_document(self, doc):\n super().on_set_document(doc)\n doc.undo = None\n\n def on_add_window(self, wnd):\n super().on_add_window(wnd)\n\n def build_status(self, statusbar):\n d = statusbar.infos.copy()\n s = self.STATUSBAR_MESSAGE.format(**d)\n\n self.document.replace(0, self.document.endpos(), s)\n self.document.styles.setints(0, self.document.endpos(),\n self.get_styleid('filename'))\n\n def set_info(self, **values):\n ret = self.statusinfo.set_info(**values)\n return ret\n\n def on_idle(self):\n if not self.closed:\n ret = super().on_idle()\n if not ret:\n if self.statusinfo.updated:\n self.build_status(self.statusinfo)\n self.statusinfo.updated = False\n ret = True\n return ret\n","sub_path":"kaa/ui/statusbar/statusbarmode.py","file_name":"statusbarmode.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"253628120","text":"from django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404\nfrom django.template import RequestContext\n\nfrom csnews.models import Article\nfrom csnews.diggpaginator import DiggPaginator\n\nimport time\nfrom datetime import datetime\n\nARTICLE_NUMBER_PER_PAGE = 20\n\ndef _get_page(list,page):\n \"\"\" \"\"\"\n paginator = DiggPaginator(list, ARTICLE_NUMBER_PER_PAGE, body=5, padding=2)\n try:\n page = int(page)\n except ValueError:\n page = 1\n\n try:\n tor = paginator.page(page)\n except:\n tor = paginator.page(paginator.num_pages)\n return tor\n\ndef index(request):\n \"\"\" \"\"\"\n h = {}\n \n h['articles'] = Article.objects.filter(is_public=True)\n h['page'] = _get_page(h['articles'],request.GET.get('orria', '1')) \n return render_to_response('news/articles.html',h,context_instance=RequestContext(request))\n\ndef article_index(request,article_slug):\n \"\"\" \"\"\"\n h = {}\n h['obj'] = get_object_or_404(Article,slug=article_slug)\n return render_to_response('news/article.html',h,context_instance=RequestContext(request))\n\ndef hemeroteka(request):\n \"\"\" \"\"\"\n h = {}\n return render_to_response('news/hemeroteka.html',h,context_instance=RequestContext(request))\n","sub_path":"csnews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"538950594","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nfrom six import string_types\n\n'''\nParameters\nhessianThreshold\n Threshold for hessian keypoint detector used in SURF.\nnOctaves\n Number of pyramid octaves the keypoint detector will use.\nnOctaveLayers\n Number of octave layers within each octave.\nextended\n Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).\nupright\n Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation). \n'''\n\nclass detectionSURF:\n def __init__(self):\n #model parameters\n self.hessianThreshold = 100\n self.nOctaves = 4\n self.nOctaveLayers = 3\n self.extended = 0\n self.upright = 0\n \n #model flags\n self.color = [255,0,0]\n self.flagConsole = 0\n self.console = ''\n self.drawFlag = cv2.DRAW_MATCHES_FLAGS_DEFAULT\n self.ready = 0\n \n def setInput(self, inpImg):\n if isinstance(inpImg, string_types):\n if os.path.exists(inpImg):\n self.img = cv2.imread(inpImg)\n self.ready = 1\n else:\n self.console = 'file not found'\n if self.flagConsole != 0:\n print(self.console)\n else:\n self.img = inpImg\n self.ready = 1\n \n def setParamenter(self, para, value):\n if para == 'hessianThreshold':\n self.hessianThreshold = value\n elif para == 'nOctaves':\n self.nOctaves = value\n elif para == 'nOctaveLayers':\n self.nOctaveLayers = value\n elif para == 'extended':\n if value > 0:\n self.extended = 1\n else:\n self.extended = 0\n elif para == 'upright':\n if value > 0:\n self.upright = 1\n else:\n self.upright = 0\n else:\n self.console = 'There is not a parameter named ' + para\n if self.flagConsole != 0:\n print(self.console)\n \n def sefDrawFlag(self, flag):\n if flag == 'default':\n self.drawFlag = cv2.DRAW_MATCHES_FLAGS_DEFAULT\n elif flag == 'draw_over_outimg':\n self.drawFlag = cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG\n elif flag == 'not_drow_single_points':\n self.drawFlag = cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS\n elif flag == 'draw_rich_keypoints':\n self.drawFlag = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS\n else:\n self.console = 'There is not a flag named ' + flag\n if self.flagConsole != 0:\n print(self.console)\n \n def setColor(self, color):\n if len(color) == 3:\n colorTemp = [0,0,0]\n for i in range(0, 3):\n if color[i] >= 0 and color[i] <= 255:\n colorTemp[i] = color[i]\n elif color[i] < 0:\n colorTemp[i] = 0\n self.console = 'color input error'\n if flagConsole != 0:\n print(self.console)\n else:\n colorTemp[i] = 255\n self.console = 'color input error'\n if flagConsole != 0:\n print(self.console)\n self.color = colorTemp\n else:\n self.console = 'color input error'\n if self.flagConsole != 0:\n print(self.console)\n \n def genOutput(self):\n if self.ready >= 1:\n gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n surf = cv2.xfeatures2d.SURF_create(\n self.hessianThreshold, self.nOctaves, self.nOctaveLayers, self.extended, self.upright)\n self.kp, self.des = surf.detectAndCompute(gray, None)\n self.ready = 2\n else:\n self.console = 'Please set an input file.'\n if self.flagConsole != 0:\n print(self.console)\n \n def genPaint(self):\n if self.ready == 2:\n dummy = np.zeros((1,1))\n self.output = cv2.drawKeypoints(self.img, self.kp, dummy, color=(self.color[0],self.color[1],self.color[2]), flags=self.drawFlag)\n elif self.ready == 1:\n self.console = 'Generate output first.'\n if self.flagConsole != 0:\n print(self.console)\n else:\n self.console = 'Please set an input file.'\n if self.flagConsole != 0:\n print(self.console)\n\nif __name__ == '__main__':\n inpImg = '../../img\\\\lena.jpg'\n img = cv2.imread(inpImg)\n \n app = detectionSURF()\n app.setInput(inpImg)\n app.setParamenter('hessianThreshold', 500)\n app.genOutput()\n app.genPaint()\n\n cv2.imshow('img', img)\n cv2.imshow('out', app.output)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","sub_path":"src/detection/detectionSURF.py","file_name":"detectionSURF.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"303956786","text":"# the traditional way of creating the roles\n# create hero 1\nname = 'Elsa'\nhealth = 50\nmagicPoints = 80\ninventory = {'gold': 40, 'healing potion': 2, 'key': 1}\n\nprint('The hero %s has %s health.' % (name, health))\n\n# method1 of creating monster 1\nmonsterName = 'Goblin'\nmonsterHealth = 20\nmonsterMagicPoints = 0\nmonsterInventory = {'gold': 12, 'dagger': 1}\n\n# method2 of creating monster 2 and 3 with previous 1 in a way of list\nmonsterName = ['Goblin', 'Dragon', 'Goblin']\nmonsterHealth = [20, 300, 18]\nmonsterMagicPoints = [0, 200, 0]\nmonsterInventory = [{'gold': 12, 'dagger': 1}, {'gold': 890, 'magic amulet': 1}, {'gold': 15, 'dagger': 1}]\n\n# method3 of creating monster 1,2,3 by dictionary\nmonsters = [{'name': 'Goblin', 'health': 20, 'magic points': 0, 'inventory': {'gold': 12, 'dagger': 1}},\n {'name': 'Dragon', 'health': 300, 'magic points': 200, 'inventory': {'gold': 890, 'magic amulet': 1}},\n {'name': 'Goblin', 'health': 18, 'magic points': 0, 'inventory': {'gold': 15, 'dagger': 1}}]\n\n# delete a monster\n\ndef vanquishMonster(monsterIndex):\n del monsterName[monsterIndex]\n del monsterHealth[monsterIndex]\n del monsterMagicPoints[monsterIndex]\n\nvanquishMonster(0)\n\n","sub_path":"homework/role-playing_game_procedure.py","file_name":"role-playing_game_procedure.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"157247059","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n\t#Leave as empty string for base url\r\n\tpath('', views.home, name=\"home\"),\r\n\tpath('login/', views.login, name=\"login\"),\r\n\tpath('product/', views.product, name=\"product\"),\r\n\tpath('productdetail', views.productdetail, name=\"productdetail\"),\r\n\tpath('located/', views.located, name=\"located\"),\r\n\tpath('cart/', views.cart, name=\"cart\"),\r\n\tpath('submitorder/', views.submitorder, name=\"submitorder\"),\r\n\tpath('checkout/', views.checkout, name=\"checkout\"),\r\n\r\n\tpath('update_item/', views.updateItem, name=\"update_item\"),\r\n\tpath('process_order/', views.processOrder, name=\"process_order\"),\r\n\r\n]","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"97267441","text":"\"\"\"Communicating with the flashfocus server via unix socket.\"\"\"\nimport logging\n\nfrom flashfocus.sockets import init_client_socket\n\n\ndef client_request_flash():\n \"\"\"Request that the server flashes the current window.\"\"\"\n logging.info(\"Connecting to the flashfocus daemon...\")\n sock = init_client_socket()\n logging.info(\"Connection established, sending flash request...\")\n # Just send a single byte to the server. Contents are unimportant.\n sock.sendall(bytearray(\"1\", encoding=\"UTF-8\"))\n","sub_path":"flashfocus/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"230439496","text":"import os\nimport shutil\nimport time\n#entries = os.listdir('noscale.25g.500b.Output/')\n\n#print(entries)\n\n\n\nfor i in range(20):\n\t##Change the name of the folder here\n\tname = 'noscale.200g.true/R' + str(i+1)\n\tshutil.copy('run_paup_consensus.pl',name)\n\tshutil.copy('paup',name)\n\n\tos.chdir(name)\n\tstart=time.time()\n\tmyCmd = 'perl run_paup_consensus.pl -i BS.1 -o output'\n\tprint(time.time()-start)\n\tos.system(myCmd)\n\n\tos.remove('output.greedy')\n\tos.remove('output.paup')\n\tos.remove('output.nexus')\n\tos.remove('output.majority')\n\t#os.remove('output.majority.tree')\n\tos.remove('output.strict')\n\t#os.remove('output.strict.tree')\n\tos.remove('paup')\n\tos.remove('run_paup_consensus.pl')\n\t#coying file to initial Folder\n\tshutil.copy('output.majority.tree','../..')\n\tshutil.copy('output.greedy.tree','../..')\n\tshutil.copy('output.strict.tree','../..')\n\t#changing Directory to initial Folder\n\tos.chdir('../..')\n\t#Get the Fp and Fn Rates\n\t##Change the name of the OutputFiles\n\tmyCmd3= 'getFpFn.py -t true.tree -e output.majority.tree>>FpFnMajority.result'\n\tmyCmd4= 'getFpFn.py -t true.tree -e output.strict.tree>>FpFnStrict.result'\n\tmyCmd5= 'getFpFn.py -t true.tree -e output.greedy.tree>>FpFnGreedy.result'\n\tos.system(myCmd3)\n\tos.system(myCmd4)\n\tos.system(myCmd5)\n\tos.remove('output.greedy.tree')\n\tos.remove('output.strict.tree')\n\tos.remove('output.majority.tree')\n\t#for changing to initial directory\n\t#os.chdir('..')\n\n\n\n\n\t\n\t\n","sub_path":"Greedy_SuperFine_Ratetrue.py","file_name":"Greedy_SuperFine_Ratetrue.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"320857382","text":"class AdventOfCode:\n def __init__(self, filename):\n with open(filename) as f:\n self.shifts = list(map(int, f.read().splitlines()))\n\n def part1(self):\n return sum(self.shifts)\n\n def part2(self):\n freq = 0\n seen = set()\n\n while True:\n for shift in self.shifts:\n freq = freq + shift\n\n if freq in seen:\n return freq\n else:\n seen.add(freq)\n","sub_path":"py/2018/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"193666314","text":"def chunker(seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\nasdf = {}\ntmp = []\nwith open(\"data.txt\", \"r\") as f:\n for line in f:\n line = line.replace('[', '').replace(']', '').strip().split(',')\n key = line[0]\n line.pop(0)\n for group in chunker(line, 4):\n tmp.append(list(map(float, group)))\n print(key)\n # print(tmp)\n if key in asdf:\n asdf[key].append(tmp)\n else:\n asdf[key] = [tmp]\nprint(asdf)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"63736178","text":"from sys import stdin\n\nfor line in stdin:\n line = line[:len(line)-1]\n\n line = [x.lower() for x in line.split()]\n\n cnt = 0\n prev = False\n for i in range(len(line)):\n if i != len(line)-1:\n if line[i][0] == line[i+1][0]:\n if prev == False:\n cnt+=1\n prev = True\n else:\n prev = False\n print(cnt)","sub_path":"URI/1263 Alliteration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"351131219","text":"import bentoml\nimport tensorflow as tf\n\nfrom bentoml.artifact import TensorflowSavedModelArtifact\nfrom bentoml.adapters import TfTensorInput\n\n\nFASHION_MNIST_CLASSES = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n\n@bentoml.env(pip_dependencies=['tensorflow', 'numpy', 'pillow'])\n@bentoml.artifacts([TensorflowSavedModelArtifact('model')])\nclass FashionMnistTensorflow(bentoml.BentoService):\n\n @bentoml.api(input=TfTensorInput(), batch=True)\n def predict(self, inputs):\n outputs = self.artifacts.model.predict_image(inputs)\n output_classes = tf.math.argmax(outputs, axis=1)\n return [FASHION_MNIST_CLASSES[c] for c in output_classes]","sub_path":"7_Model Serving - bentoML/lesson3_tf2_image/tensorflow_fashion_mnist.py","file_name":"tensorflow_fashion_mnist.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"358545872","text":"from math import gcd\n\n\ndef best_location(matrix):\n row_num, col_num = len(matrix), len(matrix[0])\n asteroids = []\n for i in range(row_num):\n for j in range(col_num):\n if matrix[i][j] == \"#\":\n asteroids.append((i, j))\n\n for ast in asteroids:\n mark_matrix(asteroids, ast, matrix)\n print(matrix)\n return get_largest(asteroids, matrix)\n\n\ndef mark_matrix(asteroids, ast, matrix):\n for a in asteroids:\n if a == ast:\n continue\n else:\n i, j = a[0], a[1]\n delta_x, delta_y = ast[0] - i, ast[1] - j\n if delta_x == 0:\n delta_y = int(abs(delta_y) / delta_y)\n elif delta_y == 0:\n delta_x = int(abs(delta_x) / delta_x)\n else:\n divs = gcd(delta_x, delta_y)\n delta_x = int(delta_x / divs)\n delta_y = int(delta_y / divs)\n has_other_ast = False\n i += delta_x\n j += delta_y\n while (i, j) != ast:\n if matrix[i][j] != \".\":\n has_other_ast = True\n break\n i += delta_x\n j += delta_y\n if not has_other_ast:\n if matrix[a[0]][a[1]] == \"#\":\n matrix[a[0]][a[1]] = 0\n matrix[a[0]][a[1]] += 1\n\n\ndef get_largest(asteroids, matrix):\n res = -1\n row, col = -1, -1\n for ast in asteroids:\n i, j = ast[0], ast[1]\n if res < matrix[i][j]:\n res = matrix[i][j]\n row, col = i, j\n print(res)\n return row, col\n\n\nif __name__ == \"__main__\":\n matrix = []\n with open(\"../input/day10input\") as f:\n for line in f:\n matrix.append([pos for pos in str(line).rstrip()])\n\n row, col = best_location(matrix)\n print(col, row)\n","sub_path":"src/day10p1.py","file_name":"day10p1.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"279442453","text":"\nfrom fake_useragent import UserAgent\nfrom scrapy.selector import Selector\nimport requests\nfrom scrapy import Selector\nfrom urllib import parse\nfrom urllib.parse import urljoin\n\n\nbase_url = 'http://maoyan.com'\nfile = 'D:\\\\maoyan.html'\nwith open(file,'r',encoding='utf-8') as object:\n body = object.read()\n #print(body)\n\nselector = Selector(text=body)\ncontent = selector.xpath('//div[@class=\"movie-item-info\"]').getall()\nfor item in content[:10]:\n film_url = Selector(text=item).xpath('./descendant::a/@href').getall()\n url = base_url +film_url[0]\n print(url)\n\nfile_detail = 'D:\\\\maoyan_detail.html'\nwith open(file_detail,'r',encoding='utf-8') as object_detail:\n body = object_detail.read()\n#selector = Selector(text=body)\nfilm_name = Selector(text=body).xpath('//h1[@class=\"name\"]/text()').getall()\nfilm_gener = Selector(text=body).xpath('//ul/li[@class=\"ellipsis\"]/a/text()').getall()\nplan_time = Selector(text=body).xpath('//ul/li[3][@class=\"ellipsis\"]/text()').getall()\nprint(film_name)\nprint(film_gener)\nprint(plan_time)\n#film_name = Selector(text=tage).xpath('//h1[@class=\"name\"]/text()').extract_first\n#print(film_name)\n\nimport pymysql\ndb = pymysql.connect()\n\n\n\n\n\n","sub_path":"week01/scrapy_xpath/tutorial/tutorial/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"51165740","text":"############################################################################################################\r\n#\r\n# Save Programme for TS_READER - Custom script called from myTV 'Save Programme' menu option.\r\n#\r\n#\r\n# NOTES:\r\n# The class must be called 'SaveProgramme' that accepts Channel and Programme classes.\r\n# Must have a run() function that returns success as True or False.\r\n#\r\n#\r\n# CHANGLOG:\r\n# 14/02/07 - Created.\r\n#\r\n############################################################################################################\r\n\r\nimport xbmcgui,xbmc,time, re, telnetlib, time\r\nfrom string import replace, split\r\nfrom bbbGUILib import *\r\nimport mytvGlobals\r\nfrom mytvLib import *\r\n\r\n__language__ = sys.modules[\"__main__\"].__language__\r\n\r\nDISEQC_91 = '91.0W'\r\nDISEQC_82 = '82.0W'\r\nTSREADER_DISEQC = { DISEQC_91 : '2',\r\n\t\t\t\t\tDISEQC_82 : '1' }\r\nTSREADER_DP = { DISEQC_82 : '11250',\r\n\t\t\t\tDISEQC_91 : '14350' }\r\n\r\nclass SaveProgramme:\r\n\tdef __init__(self, cachePath=\"\"):\r\n\t\tdebug(\"> SaveProgramme().__init__\")\r\n\r\n\t\tself.name = os.path.splitext(os.path.basename( __file__))[0]\t# get filename without path & ext\r\n\t\tself.configSaveProgramme = ConfigSaveProgramme()\r\n\r\n\t\tdebug(\"< SaveProgramme().__init__\")\r\n\t\t\r\n\tdef getName(self):\r\n\t\treturn self.name\r\n\r\n\tdef saveMethod(self):\r\n\t\treturn SAVE_METHOD_CUSTOM\r\n\r\n\tdef isConfigured(self):\r\n\t\treturn self.configSaveProgramme.checkValues()\r\n\t\t\r\n\t############################################################################################################\r\n\tdef config(self, reset=True):\r\n\t\tdebug(\"> config() reset=%s\" % reset)\r\n\t\ttry:\r\n\t\t\tif reset:\r\n\t\t\t\tsuccess = self.configSaveProgramme.reset()\r\n\t\t\tsuccess = self.isConfigured()\r\n\t\t\tif success:\r\n\t\t\t\tself.tsrControlIP = self.configSaveProgramme.getIP()\r\n\t\t\t\tself.tsrControlPort = self.configSaveProgramme.getPort()\r\n\t\t\t\tself.is22khz = self.configSaveProgramme.is22khz()\r\n\t\t\t\tself.playbackOutput = self.configSaveProgramme.getPlaybackOutput()\r\n\t\t\t\tself.streamFile = self.configSaveProgramme.getStreamFile()\r\n\t\texcept:\r\n\t\t\thandleException()\r\n\r\n\t\tdebug(\"< config() success=%s\" % success)\r\n\t\treturn success\r\n\t\t\r\n\t############################################################################################################\r\n\tdef run(self, channelInfo, programme, confirmRequired=True):\r\n\t\tdebug(\"> SaveProgramme.run()\")\r\n\t\tsuccess = False\r\n\t\t\r\n\t\tchid = channelInfo[TVChannels.CHAN_ID]\r\n\t\tchName = channelInfo[TVChannels.CHAN_NAME]\r\n\t\ttitle = programme[TVData.PROG_TITLE]\r\n\t\tdesc = programme[TVData.PROG_DESC]\r\n\r\n\t\t# parse desc to extract tune options DIRECTION, DP, TPS, unknown value?\r\n\t\tmatches = searchRegEx(desc, '.+?\\|(.+?) (.+?) (.+?) ')\r\n\t\tif not matches:\r\n\t\t\tmessageOK(\"TS READER Tune Error\",\"Unable to parse tuning from description\")\r\n\t\t\tdebug(\"< SaveProgramme.run() failed\")\r\n\t\t\treturn False\r\n\r\n\t\tdirection = matches.group(1)\r\n\t\tdp = matches.group(2).replace('.','')\r\n\t\ttps = matches.group(3)\r\n\t\tsomeValue = matches.group(4)\r\n\r\n\t\tdiseqcPort = TSREADER_DISEQC[direction]\r\n\t\tif tps == 'L':\r\n\t\t\tlnbf = TSREADER_DP[direction]\r\n\t\telse:\r\n\t\t\tlnbf = TSREADER_DP[DISEQC_82]\r\n\r\n\t\ttuneCommand = \"TUNE %s %s %s %s %s %s\\r\" % (dp, tps, someValue, lnbf, self.is22khz, diseqcPort)\r\n\r\n\t\t# send cmds to server via telnet\r\n\t\tserverResp = '200 TSReader version 2.7.44 Control Server'\r\n\t\trestartedResp = '308 Source restarted'\r\n\t\tdecodeResp = '311 Table decoding complete'\r\n\t\tselectedResp = '300 Program selected'\r\n\t\tplaybackResp = '302 Playback starting'\r\n\t\t\r\n\t\tprogramNumber = split(chid, '-')[0]\r\n\t\tprogramCommand = 'PROGRAM ' + str(programNumber) + '\\r'\r\n\t\tplayCommand = 'PLAY ' + self.playbackOutput + '\\r'\r\n\t\tquitCommand = 'QUIT\\r'\r\n\t\tstallCommand = 'STALL 300\\r'\r\n\r\n\t\ttn = telnetlib.Telnet(self.tsrControlIP, self.tsrControlPort)\r\n\t\tif not tn:\r\n\t\t\tmessageOK(\"Telnet Negotiation Failed\",\"Failed to connect.\",\"Check IP & Port\")\r\n\t\t\tdebug(\"< SaveProgramme.run() connection failed\")\r\n\t\t\treturn False\r\n\r\n\t\tdialogProgress.update(0, \"Telnet Negotiation\", __language__(500))\t# wait...\r\n\t\tMAX_OPS = 6\r\n\t\topCount = 1\r\n\r\n\t\ttry:\r\n\t\t\ttn.read_until(serverResp, timeout=5)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Send Tune command ...\")\r\n\t\t\topCount += 1\r\n\r\n\t\t\ttn.write(tuneCommand)\r\n\t\t\ttn.read_until(restartedResp, timeout=5)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Send Stall command ...\")\r\n\t\t\topCount += 1\r\n\r\n\t\t\ttn.write(stallCommand)\r\n\t\t\ttn.read_until(decodeResp, timeout=60)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Send Program command ...\")\r\n\t\t\topCount += 1\r\n\r\n\t\t\ttn.write(programCommand)\r\n\t\t\ttn.read_until(selectedResp, timeout=5)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Send Play command ...\")\r\n\t\t\topCount += 1\r\n\r\n\t\t\ttn.write(playCommand)\r\n\t\t\ttn.read_until(playbackResp, timeout=5)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Send Quit command ...\")\r\n\t\t\topCount += 1\r\n\r\n\t\t\ttn.write(quitCommand)\r\n\t\t\ttn.close()\r\n\t\t\ttime.sleep(2)\r\n\t\t\tdialogProgress.update(int(opCount*100.0/MAX_OPS), \"Telnet Complete\")\r\n\t\t\tsuccess = True\r\n\t\texcept:\r\n\t\t\tmessageOK(\"Telnet Exception\",\"Unexpected end of connection.\")\r\n\t\telse:\r\n\t\t\tdialogProgress.close()\r\n\t\t\tif fileExist(self.streamFile):\r\n\t\t\t\tif playMedia(self.streamFile):\r\n\t\t\t\t\txbmc.executebuiltin('XBMC.ActivateWindow(2005)')\t# WINDOW_FULLSCREEN_VIDEO, 12005\r\n\t\t\telse:\r\n\t\t\t\tmessageOK(self.name,\"Playback stream file is missing.\", self.streamFile)\r\n\r\n\t\tdebug(\"< run() success=%s\" % success)\r\n\t\treturn success\r\n\t\t\r\n\r\n############################################################################################################\r\n# load, if not exist ask, then save\r\n############################################################################################################\r\nclass ConfigSaveProgramme:\r\n\tdef __init__(self, reset=False):\r\n\t\tdebug(\"> ConfigSaveProgramme().init() reset=%s\" % reset)\r\n\t\tself.CONFIG_SECTION = 'SAVEPROGRAMME_TSREADER'\r\n\r\n\t\t# CONFIG KEYS\r\n\t\tself.KEY_IP = 'ip'\r\n\t\tself.KEY_PORT = 'port'\r\n\t\tself.KEY_IS22KHZ = 'is22khz'\r\n\t\tself.KEY_PLAYBACK_OUTPUT = 'playback_output'\r\n\t\tself.KEY_STREAM_FILE = 'stream_file'\r\n\r\n\t\tself.configData = [\r\n\t\t\t[self.KEY_IP,__language__(812),'192.168.0.2',KBTYPE_IP],\r\n\t\t\t[self.KEY_PORT,__language__(813),'8429',KBTYPE_NUMERIC],\r\n\t\t\t[self.KEY_IS22KHZ,\"22Khz?\",False, KBTYPE_YESNO],\r\n\t\t\t[self.KEY_PLAYBACK_OUTPUT, \"Playback Output Type:\",'VLC2',KBTYPE_ALPHA],\r\n\t\t\t[self.KEY_STREAM_FILE, __language__(833), 'g:\\\\VideoLAN.strm', KBTYPE_ALPHA]\r\n\t\t\t]\r\n\r\n\t\tdebug(\"< ConfigSaveProgramme().init()\")\r\n\r\n\tdef reset(self):\r\n\t\tdebug(\"ConfigSaveProgramme.reset()\")\r\n\t\tconfigOptionsMenu(self.CONFIG_SECTION, self.configData, __language__(534))\r\n\r\n\t# check we have all required config options\r\n\tdef checkValues(self):\r\n\t\tdebug(\"> ConfigSaveProgramme.checkValues()\")\r\n\r\n\t\tsuccess = True\r\n\t\t# check mandatory keys have values\r\n\t\tfor data in self.configData:\r\n\t\t\tkey = data[0]\r\n\t\t\tvalue = self.getValue(key)\t\t# key\r\n\t\t\tif value in (None,\"\"):\r\n\t\t\t\tdebug(\"missing value for mandatory key=%s\" % key)\r\n\t\t\t\tsuccess = False\r\n\r\n\t\tdebug(\"< ConfigSaveProgramme.checkValues() success=%s\" % success)\r\n\t\treturn success\r\n\r\n\tdef getIP(self):\r\n\t\treturn self.getValue(self.KEY_IP)\r\n\tdef getPort(self):\r\n\t\treturn self.getValue(self.KEY_PORT)\r\n\tdef is22khz(self):\r\n\t\treturn self.getValue(self.KEY_IS22KHZ)\r\n\tdef getPlaybackOutput(self):\r\n\t\treturn self.getValue(self.KEY_PLAYBACK_OUTPUT)\r\n\tdef getStreamFile(self):\r\n\t\treturn self.getValue(self.KEY_STREAM_FILE)\r\n\r\n\tdef getValue(self, key):\r\n\t\treturn mytvGlobals.config.action(self.CONFIG_SECTION, key)\r\n","sub_path":"myTV/resources/saveprogramme/TS_Reader.py","file_name":"TS_Reader.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"471876381","text":"def agrupa_por_idade (dic):\n nv={}\n for k,v in dic. items():\n if v <= 11:\n nv[k]=\"crianca\"\n if v >=12 and v <=17:\n nv[k]=\"adolecente\"\n if v>=18 and v <59:\n nv[k]=\"adulto\"\n if v>=60:\n nv[k]=\"idoso\"\n return nv \n ","sub_path":"backup/user_167/ch153_2020_04_22_20_31_34_111172.py","file_name":"ch153_2020_04_22_20_31_34_111172.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"430282517","text":"##############################################################################\n#\n# Copyright (c) 2004, 2005 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Five-specific directive handlers\n\nThese directives are specific to Five and have no equivalents in Zope 3.\n\n$Id: fiveconfigure.py 18581 2005-10-14 16:54:25Z regebro $\n\"\"\"\nfrom zope.interface import classImplements, classImplementsOnly, implementedBy\nfrom zope.interface.interface import InterfaceClass\nfrom zope.configuration.exceptions import ConfigurationError\nfrom zope.app.component.metaconfigure import adapter\nfrom zope.app.site.interfaces import IPossibleSite\n\nfrom Products.Five.site.localsite import FiveSite\n\n_localsite_monkies = []\ndef classSiteHook(class_, site_class):\n setattr(class_, 'getSiteManager',\n site_class.getSiteManager.im_func)\n setattr(class_, 'setSiteManager',\n site_class.setSiteManager.im_func)\n _localsite_monkies.append(class_)\n\ndef installSiteHook(_context, class_, site_class=None):\n if site_class is None:\n if not IPossibleSite.implementedBy(class_):\n # This is not a possible site, we need to monkey-patch it so that\n # it is.\n site_class = FiveSite\n else:\n if not IPossibleSite.implementedBy(site_class):\n raise ConfigurationError('Site class does not implement '\n 'IPossibleClass: %s' % site_class)\n if site_class is not None:\n _context.action(\n discriminator = (class_,),\n callable = classSiteHook,\n args=(class_, site_class)\n )\n _context.action(\n discriminator = (class_, IPossibleSite),\n callable = classImplements,\n args=(class_, IPossibleSite)\n )\n\n# clean up code\n\ndef uninstallSiteHooks():\n for class_ in _localsite_monkies:\n delattr(class_, 'getSiteManager')\n delattr(class_, 'setSiteManager')\n classImplementsOnly(class_, implementedBy(class_)-IPossibleSite)\n _localsite_monkies.remove(class_)\n\nfrom zope.testing.cleanup import addCleanUp\naddCleanUp(uninstallSiteHooks)\ndel addCleanUp\n","sub_path":"Five/site/metaconfigure.py","file_name":"metaconfigure.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"606949089","text":"import socket\n\nobject_ =socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nprint('Cliente criado com sucesso!')\n\nhost = 'localhost'\ndoor = 5432\n\nobject_.bind((host, door))\nmsg = \"ola cliente\"\n\n\nwhile 1:\n date, end = object_.recvfrom(4096)\n\n if date:\n print('servidor enviando msg!')\n object_.sendto(date + (msg.encode()), end)","sub_path":"Seguranca-Python-main/ClientTcp_Udp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"53758003","text":"\n\nfrom xai.brain.wordbase.nouns._raccoon import _RACCOON\n\n#calss header\nclass _RACCOONS(_RACCOON, ):\n\tdef __init__(self,): \n\t\t_RACCOON.__init__(self)\n\t\tself.name = \"RACCOONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"raccoon\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_raccoons.py","file_name":"_raccoons.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"98255062","text":"#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the plusMinus function below.\r\ndef plusMinus(arr):\r\n pos = 0\r\n neg = 0\r\n zero = 0\r\n l = len(arr)\r\n\r\n for i in arr:\r\n if i > 0:\r\n pos += 1\r\n elif i < 0:\r\n neg += 1\r\n elif i == 0:\r\n zero += 1\r\n pos = pos/l\r\n neg = neg/l\r\n zero = zero/l\r\n\r\n print(f'{pos:.6}\\n{neg:.6}\\n{zero:.6}')\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n plusMinus(arr)\r\n","sub_path":"Problem Solving/Plus Minus.py","file_name":"Plus Minus.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"409696222","text":"import argparse\nimport pandas as pd\nimport numpy as np\nimport re\nimport pickle\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import load_model\n\nparser = argparse.ArgumentParser(description=\"\"\"Takes two arguments:\n the path of the excel file containing the column 'Narration', \n the path to the directory where the resulting file 'result.xlsx'\n with 'Enitity' column is to be saved \"\"\")\n\nparser.add_argument('-i','--inp', help='Path to the input excel file', required=True)\nparser.add_argument('-o','--out', help='Path to save the output result.xlsx file', required=True)\nargs = vars(parser.parse_args())\n\ndef tokenise(narration):\n \n \"\"\"\n Takes a narration string, and returns a list of tokens,\n obtained by splitting the string with the special characters inside the parenthesis:\n (,-_!/\\)\n In the absence of all the mentioned characters, the string is split with simple whaitespace character.\n \"\"\"\n \n narration = str(narration)\n tokens = re.split(', |_|-|!|/|\\+', narration)\n if len(tokens) == 1:\n tokens = ''.join(tokens).split()\n return [token.lower() for token in tokens]\n\ndef filter_numbers(narration):\n \n \"\"\"\n Takes a list of tokens, and checks if the token is purely numerical, in which case it is replaced \n with a simple 'num' string formatted with the index of that token in the list.\n \n The numbers at the start and end of strings are completely removed from the string.\n \"\"\"\n filtered_narration = []\n for token in narration:\n if token.isdigit():\n token = f'num{narration.index(token)}'\n else:\n token = re.sub(\"^\\d+|\\d+$\", \"\", token)\n filtered_narration.append(token)\n return filtered_narration\n\ndf = pd.read_excel(args['inp'])\nnarrations = df['Narration'].apply(lambda x:str(x))\n\ntokenised_X = []\nfor narration in narrations:\n tokenised_X.append(tokenise(narration))\n \nprint('Preprocessing data')\n \nX_data = []\nfor narration in tokenised_X:\n X_data.append(filter_numbers(narration))\n \nwith open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n \nX_encoded = tokenizer.texts_to_sequences(X_data)\n \nX = pad_sequences(X_encoded, padding='post')\n\nmodel = load_model('model.hdf5')\n\nprint('Making Predictions')\n\ny = np.argmax(model.predict(X), axis=-1)\n\npredicted_entities = []\nfor i in range(len(X)):\n narration = tokenised_X[i]\n if y[i] >= len(narration):\n entity = 'nan'\n else:\n entity = narration[y[i]]\n predicted_entities.append(entity)\n \nresults = pd.DataFrame(df['Narration'])\n \nresults['Entity'] = pd.Series(predicted_entities)\n \nprint('saving results')\n\nresults.to_excel(args['out']+'/results.xlsx')\n \nprint('Done.')\n","sub_path":"entityRecognizer.py","file_name":"entityRecognizer.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"256271787","text":"# -*- coding:utf-8 -*-\n# @Time: 2020/7/2 19:43\n# @Author: duiya duiyady@163.com\n\n\ndef myPow(x, n):\n now = x\n result = 1.0\n n_tmp = abs(n)\n if n == 0:\n return 1\n while n_tmp > 1:\n t = n_tmp%2\n if t == 1:\n result = result*now\n n_tmp = n_tmp//2\n now = now*now\n result = result*now\n if n < 0:\n result = 1/result\n return result\n\n\nif __name__ == '__main__':\n print(myPow(2, 11))","sub_path":"src/main/num001_100/50_pow(x,n).py","file_name":"50_pow(x,n).py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"199454285","text":"\"\"\"\nSet of utils for parsing data out of free text.\n\"\"\"\n\n\n\"\"\"\nCleaning free text\n\"\"\"\n\nimport pandas, numpy as np,re\nimport collections, unicodedata\nimport xml, itertools, xml.etree.ElementTree as ET\nfrom html.parser import HTMLParser\nimport dateparser\nfrom parsingData.procedures import classificationProcedures\n\nfloatParse = '[0-9]*[\\.,]?[0-9]+'\npars =HTMLParser()\ncleanWhites = re.compile(\"[^\\S\\n]+\")\nfullCleanTxt = lambda s: cleanString(remove_diacritics(str(s))).lower() \n\ndef findInXML(s, et): \n if isinstance(et, str):\n et = ET.fromstring(et)\n r =et.find('.//row[@NombreCampo=\"%s\"]' % s)\n if r is not None:\n return r.get('ValorCampo')\n return None\n\ndef prettyPrintXML(s):\n r = xml.dom.minidom.parseString(s) #r.RegistroXML)\n print(pars.unescape(r.toprettyxml()))\n \n# Maybe I should start tokenizing\ndef remove_diacritics(text):\n \"\"\"\n Returns a string with all diacritics (aka non-spacing marks) removed.\n For example \"Héllô\" will become \"Hello\".\n Useful for comparing strings in an accent-insensitive fashion.\n \"\"\"\n text = pars.unescape(str(text))\n normalized = unicodedata.normalize(\"NFKD\", str(text))\n return \"\".join(c for c in normalized if unicodedata.category(c) != \"Mn\")\n\ndef cleanString(text, removeChars = '-:,;\\.', removeWords = []):\n text = pars.unescape(str(text))\n text = cleanWhites.sub(' ', text)\n\n #clean html tags, when they are with with < >\n #text = re.sub('\\<\\;.*?\\>\\;', ' ', text)\n text = re.sub('\\<[^<]*?\\>', ' ', text)\n\n for c in removeChars:\n text = re.sub('(?%s)' % floatParse + ' ' + semanas + '[,]?)'\\\n + '( ' + paraHoy + ' ' + floatParse + ' ' + '(:?%s)?' % semanas + ')?' + '[^\\n]*'\n\n queryEchos = '(eco[a-z]*' + sep + '(' + echoLine + '\\s*' ')+)'\n m = re.findall(echoLine, t, re.MULTILINE)\n return m\n\nnoRecuerda = ['no', '\\?']\nsearchFUM = re.compile('fum'+ sep + '(?::|.)?'+ sep +'(:?' + date + '|%s)' % ('|'.join(noRecuerda)), flags = re.IGNORECASE)\ndef parseGPCA_and_fum(text):\n \"\"\"\n Gets the GPCA and FUM from the Triage or epicrisis.\n \n NOTE: some of the cases are incorrect, double check\n TODO: actually, registro del recien nacido has it as a field.\n \"\"\" \n allowedStarts = ['7->', '-', '- antecedentes', 'antecedentes']\n line = re.findall('^%s(?:%s)?%s' % (sep, '|'.join(allowedStarts), sep) + 'G' + sep + '[0-9]+.*$', text, re.M)\n if line:\n f = line[0]\n G = f(re.findall('g' + sep + '([0-9]+)', line[0]))\n A = f(re.findall('a' + sep + '([0-9]+)', line[0]))\n C = f(re.findall('c' + sep + '([0-9]+)', line[0]))\n P = f(re.findall('p' + sep + '([0-9]+)', line[0])) \n parsedGPCA = [G, P, A, C]\n GPCA_OK = True\n else:\n parsedGPCA = []\n GPCA_OK = False\n #Prob athere is a better way...\n parsedFUM = searchFUM.findall(text)\n #print(parsedFUM[0])\n return {'fum' : parsedFUM[0][0] if parsedFUM else '',\n 'fum_OK' : len(parsedFUM) > 0 ,\n 'GPCA_OK' : GPCA_OK,\n 'fum_Data' : parsedFUM,\n 'parsedGPCA' : parsedGPCA}\ndef getAlta(txt, newborn = False ):\n if txt is None:\n txt = ''\n if 'alta voluntaria' in txt:\n return 'altaVoluntaria'\n elif 'cuidados intermedios' in txt:\n return 'cuidadosIntermedios'\n elif 'cuidados basicos' in txt:\n return 'cuidadosBasicos'\n elif 'alojamiento conjunto'in txt:\n return 'alojamientoConjunto'\n elif 'alta medica' in txt or 'alta hospitalaria' in txt or ('alta' in txt and newborn):\n return 'altaMedica'\n elif ' uci' in txt or 'cuidados intensivo' in txt:\n return 'uci'\n else:\n return 'unknown'\n\ndef getMotherData(data):\n \"\"\"\n Parse the data relative to the mother and general pregnancy (from patient info, epicrisis and admision to the emergency room)\n \"\"\" \n res = {}\n res['VAR_0006'] = data.motherData.FechaNac\n #Etnia\n if data.motherData.Etnia == 1:\n res['VAR_0011'] = 'B'\n #Raizal, palenquero, negros/mulatos\n elif data.motherData.Etnia in [3,4,5]: \n res['VAR_0011'] = 'D'\n #Otras etnias?\n elif data.motherData.Etnia in [2]:\n res['VAR_0011'] = 'E'\n\n #Estudios y alfabetiacion\n if data.motherData.Escolaridad in [2, 3, 4,5,6]:\n res['VAR_0012'] = 'B'\n elif data.motherData.Escolaridad in [1, 8]:\n res['VAR_0012'] = 'A'\n\n #TODO: Que hacer con pre-escolar? \n if data.motherData.Escolaridad in [8, 1]:\n res['VAR_0013'] = 'A'\n elif data.motherData.Escolaridad in [3]:\n res['VAR_0013'] = 'B'\n elif data.motherData.Escolaridad in [4, 5]:\n res['VAR_0013'] = 'C'\n elif data.motherData.Escolaridad in [6]:\n res['VAR_0013'] = 'D'\n\n if data.motherData.EstadoCivil in ['Casado']:\n res['VAR_0015'] = 'A'\n elif data.motherData.EstadoCivil in ['Soltero']:\n res['VAR_0015'] = 'C'\n elif remove_diacritics(\n data.motherData.EstadoCivil) in ['Union Libre']:\n res['VAR_0015'] = 'B'\n\n res['VAR_0018'] = '806001061-8'\n res['VAR_0019'] = data.motherData.Identificacion \n\n # Antecedentes\n if data.ingreso is not None:\n etIngreso = ET.fromstring(data.ingreso.RegistroXML)\n\n #Antecedentes familiares\n aFamiliares = findInXML(\"AntecedentesFamiliares\", etIngreso)\n antecedentes = parseAntecedentes(aFamiliares)\n if ('None' in antecedentes and len(antecedentes) > 1) or len(antecedentes) == 0:\n \"\"\"\n Something weird happened\n \"\"\"\n pass\n else:\n res['VAR_0020'] = 'B' if 'TBC' in antecedentes else 'A'\n res['VAR_0022'] = 'B' if 'Diabetes' in antecedentes else 'A'\n res['VAR_0024'] = 'B' if 'HTA' in antecedentes else 'A'\n res['VAR_0026'] = 'B' if 'Preeclampsia' in antecedentes else 'A'\n res['VAR_0028'] = 'B' if 'Eclampsia' in antecedentes else 'A'\n res['VAR_0030'] = 'B' if 'Otros' in antecedentes else 'A'\n\n #Personales solo si no hay nada\n #TODO: a bit of parsing could be done, but I do not have time\n aFarm = findInXML('aFarmacologicos', etIngreso) == \"true\"\n findInXML('aGinecoObstetrico', etIngreso) == \"true\"\n aHosp = findInXML('aHospitalarios', etIngreso) == \"true\"\n aTraum = findInXML('aTraumaticos', etIngreso) == \"true\"\n aPathol = findInXML('aPatologicos', etIngreso) == \"true\"\n if findInXML('aQuirurgicos', etIngreso) == \"false\":\n res['VAR_0032'] = 'A'\n aToxic = findInXML('aToxico', etIngreso) == \"true\"\n aTransf = findInXML('aTranfusionales', etIngreso) == \"true\"\n #Height and weight\n try:\n res['VAR_0055'] = float(findInXML(\"Peso\", etIngreso))\n res['VAR_0056'] = float(findInXML(\"Talla\", etIngreso)) * 100 - 100\n except:\n pass\n if data.epicrisis is not None: \n et = ET.fromstring(data.epicrisis.RegistroXML)\n antececedentesText = findInXML('AntecedentesHTML', et)\n antececedentesText = cleanString(antececedentesText).lower()\n antececedentesText = removeWords(antececedentesText, ['a', 'de', 'el', 'que', 'para', 'y'])\n\n # G P C A : Double check, sometimes it is wrong and FUM\n gpca_fum = parseGPCA_and_fum(antececedentesText)\n if gpca_fum['GPCA_OK']:\n gpca_fum['VAR_0040'] = int(res['G'])\n gpca_fum['VAR_0042'] = int(res['P'])\n gpca_fum['VAR_0047'] = int(res['C'])\n gpca_fum['VAR_0041'] = int(res['A'])\n\n if gpca_fum['fum_OK']:\n if gpca_fum['fum'] in ['?', 'no']:\n res['VAR_0059'] = 'A'\n res['VAR_0057'] = '07/06/1954'\n else:\n res['VAR_0059'] = 'B'\n res['VAR_0057'] = '/'.join(parseDate(gpca_fum['fum']))\n\n #Echos\n m = parseEchographies(antececedentesText)\n if m is False:\n res['no_echo'] = 'no_echo_confirmed'\n res['VAR_0060'] = 'A'\n elif isinstance(m, list):\n res['no_echo'] = 'echo_confirmed'\n #TODO: parse date\n else:\n res['no_echo'] = 'no_information'\n\n # MORBILIDAD: (see analysis of hospital discharge)\n\n #Used medication\n # MD0430 -> oxitocina para inducir parto / reducir hemorrageas\n medication = findInXML('MedicamentosAdministrado', et)\n #MedicationByDate\n medicationByDate = medication.split('Fecha:')\n medicationByDate = map(lambda s: s.strip(), medicationByDate)\n medicationByDate = {m.split()[0] : m for m in medicationByDate if m}\n \n res['oxitocina'] = 'MD0430' in medication\n res['penilicilinaSifilis'] = 'MD0441' in medication\n res['sulfatoFerroso'] = 'MD0284' in medication\n res['magnesio'] = any( [m in medication for m in ['IM5038', 'IM5392', 'MD0028', 'MD0351', 'MD70149']])\n res['VAR_0443'] = 'B' if res['magnesio'] else 'A'\n res['VAR_0444'] = 'B' if res['magnesio'] else 'A'\n\n #Antibiotics\n #cefradina\n #cefalozina\n\n res['cefradina'] = any( [m in medication for m in ['MD0097', 'MD0098', 'MD0879']])\n res['ampicilina'] = any( [m in medication for m in ['IM5018', 'IM5235','MD0046','MD0047',\n 'MD0048','MD0049','MD0050','MD0051']])\n res['cefalopina'] = any( [m in medication for m in ['IM5338', 'MD0095']])\n res['cefalozina']= any( [m in medication for m in ['MD0096']])\n\n if res['cefradina'] or res['ampicilina'] or res['cefalopina'] or res['cefalozina']:\n res['VAR_0301'] = 'B'\n else:\n res['VAR_0301'] = 'B'\n #Transfusion\n res['plasma'] = any( [m in medication for m in ['MD0460']])\n\n #anestesia local\n res['lidocaina'] = any( [m in medication for m in ['IM5072','IM5109','IM5365','IM5418','MD0332','MD0333','MD0334','MD0335','MD0336','MD0337','MD0338','MD0679']])\n res['roxitaina'] = any( [m in medication for m in ['MD0677', 'MD0678', 'MD0680']])\n if res['lidocaina'] or res['roxitaina']:\n res['VAR_0303'] = 'B'\n else:\n res['VAR_0303'] = 'A'\n\n #anestesia regional\n res['bupinet'] = any( [m in medication for m in ['139555', '218170-2', 'MD0078']])\n if res['bupinet']:\n res['VAR_0404'] = 'B'\n else:\n res['VAR_0404'] = 'A'\n #Anestesia general\n # sintosinal\n # pitusina\n # misoprostal, prostalglandiac\n\n\n #Ingreso\n res['VAR_0183'] = data.casoDesc.FechaHora.split('.')[0]\n\n #Fecha / motivo egreso\n lastRegister = data.getMotherLastState()\n alta = getAlta(lastRegister)\n if alta != 'unknown':\n if alta == 'altaMedica':\n res['VAR_0379'] =lastRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0382'] = 'A'\n elif alta == 'altaVoluntaria':\n res['VAR_0379'] =lastRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0382'] = 'C'\n elif alta == 'cuidadosBasicos':\n res['VAR_0379'] =lastRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0381'] = 'Cuidados basicos'\n res['VAR_0382'] = 'C'\n\n elif alta == 'cuidadosIntermedios':\n res['VAR_0379'] =lastRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0381'] = 'Cuidados intermedios'\n res['VAR_0382'] = 'C'\n #Edad maternal\n res['VAR_0009'] = dateparser.parse(data.epicrisis.FechaAsignacionRegistro )- dateparser.parse(res['VAR_0006'])\n res['VAR_0009'] = int(res['VAR_0009'].days/365.25)\n res['VAR_0010'] = 'A' if res['VAR_0009'] >= 15 and 35 >= res['VAR_0009'] else 'B' \n #Parto aborto\n res['VAR_0182'] = 'A' if classificationProcedures[data.procTypeId] == 'p' else ''\n res['VAR_0182'] = 'B' if classificationProcedures[data.procTypeId] == 'a' else ''\n\n \n \n return res\n\n\ndef getDateFromQuirurgicDescription(txt):\n \"\"\"\n\n \"\"\"\n\n pattern = 'fecha %s a las ([0-9]+):([0-9]+)' % date\n res = re.findall(pattern, txt)\n return res\n\ndef getBloodLoss(text):\n text = text.replace(' de ', ' ').replace(' se ', ' ')\n \n removeWords = ['lateral', 'izquierda', 'derecha', 'superior', 'inferior', 'medial']\n for w in removeWords:\n text = text.replace(' %s ' % w, ' ')\n\n #Perdida de sangre\n bloodLost = re.findall('perdida estimada sangre(?::)? ([0-9]+) (?:cc|ml)', text)\n try:\n return bloodLost[0]\n except:\n return None\n\ndef findDesgarros(text):\n text = text.replace(' de ', ' ').replace(' se ', ' ')\n \n removeWords = ['lateral', 'izquierda', 'derecha', 'superior', 'inferior', 'medial']\n for w in removeWords:\n text = text.replace(' %s ' % w, ' ')\n\n #Perdida de sangre\n ver = '(?:eviden[a-z]*|observ[a-z]*|vis[a-z]*|encont[a-z]*|presen[a-z]*)' #diferentes manaeras de escribir ver\n negative = ['(?:sin|no) (?:%s )?desgar' % ver]\n positive = 'desgar[a-z]* (?:[a-z]* |(:?pared )?vag[a-z]* )?(?:sangr[a-z]* |no sangrant[a-z]* )?grado (i|ii|iii|1|2|3)'\n positiveUnidentified = '(?:%s )?desgar' % ver\n\n if re.findall('(:?%s)' % '|'.join(negative), text) or ('desgarro' not in text): #and 'sin complicaciones' in text):\n desgarro = 'no'\n elif re.findall(positive, text):\n desgarro = re.findall(positive, text)[0][1]\n if desgarro == 'i':\n desgarro = '1'\n elif desgarro == 'ii':\n desgarro = '2'\n elif desgarro == 'iii':\n desgarro = '3'\n\n elif re.findall(positiveUnidentified, text):\n desgarro = 'yes-NoGrade'\n else:\n desgarro = 'unknown'\n return desgarro\n\n#####\n# Info from newborn\n####\n\ndef getInformationFromProcedureDescription(data):\n \"\"\"\n Get information from the procedure\n \"\"\"\n etDescripcion = ET.fromstring(data.procedure.XmlDescripcion)\n txtDescription = remove_diacritics(cleanString(\n etDescripcion.find('detalle/procedimientos/procedimiento/descripcion').text.lower()))\n res = {}\n \n #Fecha parto\n # TODO: beware of laboors that are near 12 am\n try:\n fechaParto = dateparser.parse(findInXML('fechaCirugia', etDescripcion))\n horaFinCirugia = dateparser.parse(findInXML('horaFin', etDescripcion)) #If nothing else is found, a candidate for birth\n res['VAR_0284'] = str(fechaParto.year) + '/' + str(fechaParto.month) + '/' + str(fechaParto.day)\n res['VAR_0285'] = str(horaFinCirugia.hour) + ':' + str(horaFinCirugia.minute) \n except TypeError:\n pass\n # Presentacion\n if 'cefalic' in txtDescription:\n res['VAR_202'] = 'A'\n # Posicion parto\n if 'en posicion de litotomia' in txtDescription:\n res['VAR_029'] = 'C'\n elif 'decubito dorsal' in txtDescription:\n res['VAR_029'] = 'C'\n\n #Cordon umbilical\n if 'se pinza y corta cordon umbilical' in txtDescription:\n res['VAR_0299'] = 'A'\n #Episotomia\n if 'episiotomia' in txtDescription:\n res['VAR_0292'] = 'B'\n #Reanimacion TODO\n\n #Ocitodicos TODO\n\n # Desgarros\n gradoDesgarros = findDesgarros(txtDescription)\n if gradoDesgarros == 'no':\n res['VAR_0293'] = 'X'\n elif gradoDesgarros == 'yes-NoGrade':\n res['VAR_0294'] = '0'\n elif gradoDesgarros != 'unknown':\n res['VAR_0294'] = gradoDesgarros\n \n #Sangre\n res['perdidaEstimadaSangre'] = getBloodLoss(txtDescription)\n # Nacimiento vivo / muerto\n newbornPattern = '(rec[a-z]+ na[a-z]+|feto|producto)'\n if re.findall('%s (unico )?vivo' % newbornPattern, txtDescription):\n res['VAR_0282'] = 'A'\n elif re.findall('%s (muerto|sin signos vitales)' % newbornPattern, txtDescription):\n res['VAR_0282'] = 'D'\n elif re.findall('%s (obitado)' % newbornPattern, txtDescription) or 'obito' in txtDescription:\n res['VAR_0282'] = 'B'\n elif 'mortinato' in txtDescription:\n res['VAR_0282'] = 'C'\n\n # OxitocinaTDP\n if 'oxitocina' in txtDescription:\n res['VAR_0300'] = 'B'\n else:\n res['VAR_0300'] = 'A'\n\n # C-section / vaginal\n if data.procTypeId == 'H3089' or data.procTypeId == 'H3092':\n res['VAR_0287'] = 'A'\n elif data.procTypeId == 'H3094':\n res['VAR_0287'] = 'B'\n elif data.procTypeId == 'H3085':\n res['VAR_0287'] = 'C'\n\n # Placenta completa/ retenida\n if re.findall('(extrae|obtiene) placenta (tip[a-z]+ [a-z]+ )?completa', txtDescription):\n res['VAR_0297'] = 'B'\n res['VAR_0298'] = 'A'\n elif re.findall('(extrae|obtiene) placenta (tip[a-z]+ [a-z]+ )?incompleta', txtDescription):\n res['VAR_0297'] = 'A'\n res['VAR_0298'] = 'A'\n\n # Peso / medidas \n # Remove the points cause they create problems (they use points every 3 digits sometimes)\n if re.findall('peso (%s)' % floatParse, txtDescription):\n res['VAR_0311'] = re.findall('peso (%s)' % floatParse, txtDescription)[0].replace('.', '').replace(',', '')\n if re.findall('talla (%s)' % floatParse, txtDescription):\n res['VAR_0314'] = re.findall('talla (%s)' % floatParse, txtDescription)[0].replace(',', '.')\n\n #APGAR: TODO, easier to get from newborn registration, otherwise is dead.\n \n # TODO: defectos\n if 'sin malformaciones evidentes' in txtDescription:\n res['VAR_0335'] = 'A'\n return res\n\ndef parseAPGAR(s):\n if s != s:\n return False\n s = re.sub('(?<=[0-9])/10', '', s)\n s = re.sub('[^0-9]', \" \", s)\n r = s.split()\n if len(r) == 1:\n return [r[0]]\n if len(r) == 2:\n return r\n elif len(r) == 3:\n return [r[0], r[1] if r[1] != '5' else r[2]]\n elif len(r) == 4 and r[1] == '1' and r[3] == '5':\n return [r[0], r[2]]\n elif len(r) == 4 and r[0] == '1' and r[2] == '5':\n return [r[1], r[3]]\n else:\n return False\n\ndef getNewbornData(data, idNewBornRegister, debug = False):\n register = data.registrosRecienNacido[idNewBornRegister][idNewBornRegister]\n\n etRegistro = ET.fromstring(register.RegistroXML)\n res = {}\n #prettyPrintXML(register.RegistroXML)\n res['VAR_0284'] = findInXML('InputText_FechaHoraNacimiento', etRegistro)\n res['VAR_0283'] = findInXML('ASPxTimeEdit_HoraNacimiento', etRegistro)\n res['VAR_0198'] = findInXML('InputText_EdadGestac', etRegistro)\n EG2 = findInXML('InputText_EdadGestacDubowitzModificado', etRegistro)\n res['partoVag'] = findInXML('TexTarea_PartoVaginal', etRegistro) == 'SI'\n partoC = findInXML('TexTarea_PartoCesaria', etRegistro) == 'SI'\n res['VAR_0190'] = 'A' if res['partoVag'] else 'B'\n apgar = parseAPGAR(findInXML('InputText_APGAR', etRegistro))\n try:\n res['VAR_0321'] = apgar[0]\n except:\n pass\n try:\n res['VAR_0322'] = apgar[1]\n except:\n pass\n\n if findInXML('ASPxComboBox_Sexo', etRegistro) == 'Masculino':\n res['VAR_0310'] = 'B'\n elif findInXML('ASPxComboBox_Sexo', etRegistro) == 'Femenino':\n res['VAR_0310'] = 'A'\n elif findInXML('ASPxComboBox_Sexo', etRegistro):\n res['VAR_0310'] = 'C'\n vivo = findInXML('InputRadio_VM', etRegistro) == 'Vivo'\n \n #Antrhopometrics\n res['VAR_0311'] = findInXML('InputText_Peso', etRegistro).replace('.', '').replace(',', '')\n res['VAR_0314'] = findInXML('InputText_Talla', etRegistro).replace(',', '.')\n res['VAR_0313'] = findInXML('InputText_CC', etRegistro).replace(',', '.')\n \n #As a double check of GAPC\n res['VAR_0040'] = findInXML('InputText_ObstetricosGestaciones', etRegistro)\n res['VAR_0041'] = findInXML('InputText_ObstetricosAbortos', etRegistro)\n res['VAR_0046'] = findInXML('InputText_ObstetricosPartos', etRegistro)\n res['VAR_0047'] = findInXML('InputText_ObstetricosCesareas', etRegistro)\n \n res['sufrimientoFetal'] = findInXML('TexTarea_SufrimientoFetal', etRegistro)\n\n #Paraclinic check\n for r in data.registrosRecienNacido[idNewBornRegister].values():\n et = ET.fromstring(r.RegistroXML)\n #prettyPrintXML(r.RegistroXML)\n pos = ['react', '\\+', 'pos']\n neg = ['no', '-', 'neg']\n try:\n #if debug:\n # print(findInXML( 'DescripcionNota', et))\n txtNotas = cleanString(remove_diacritics(findInXML( 'DescripcionNota', et))).lower()\n if debug:\n print(txtNotas)\n r = re.findall('vdrl\\s+(%s)' % '|'.join(pos + neg), txtNotas) \n if r:\n res['VAR_0343'] = 'B' if r[0] in pos else 'A'\n break\n except Exception as e:\n pass\n #Hospital discharge, and reason\n dischargeRegister = data.getNewbornLastState(idNewBornRegister)\n if dischargeRegister is not None:\n et = ET.fromstring(dischargeRegister.RegistroXML)\n txt = cleanString(remove_diacritics(findInXML( 'DescripcionNota', et))).lower()\n txt = removeWords(txt, ['a', 'de', 'el', 'que', 'para'])\n else:\n dischargeRegister = data.registrosRecienNacido[idNewBornRegister][idNewBornRegister]\n et = ET.fromstring(dischargeRegister.RegistroXML)\n txt = cleanString(remove_diacritics(findInXML( 'TexTarea_PlanTratamiento', et))).lower()\n txt = removeWords(txt, ['a', 'de', 'el', 'que', 'para'])\n\n if txt is None:\n txt = ''\n alta = getAlta(txt, newborn = True)\n if alta != 'unknown':\n if alta == 'altaMedica':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0372'] = 'alta'\n elif alta == 'altaVoluntaria':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0372'] = 'altaVol'\n\n elif alta == 'cuidadosBasicos':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0372'] = 'cuidadosBasicos'\n\n elif alta == 'cuidadosIntermedios':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0372'] = 'cuidadosIntermedios'\n\n elif alta == 'uci':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0372'] = 'UCI'\n\n elif alta == 'alojamientoConjunto':\n res['VAR_0425'] =dischargeRegister.FechaAsignacionRegistro.split()[0]\n res['VAR_0381'] = 'Cuidados intermedios'\n res['VAR_0330'] = 'A'\n return res","sub_path":".ipynb_checkpoints/parsingDatabaseUtils-checkpoint.py","file_name":"parsingDatabaseUtils-checkpoint.py","file_ext":"py","file_size_in_byte":26269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"379471825","text":"\"\"\"\n\nA JSON viewer using pure python\n\nauthor: Atsushi Sakai (@Atsushi_twi)\n\n\"\"\"\n\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter import filedialog\nimport json\nimport os\nimport argparse\nfrom tkinter import messagebox\nfrom tkinter import font\nfrom urllib.parse import urlparse\nimport webbrowser\n\nVERSION = \"1.2.0\"\n\n# === Config ===\nMAX_N_SHOW_ITEM = 300\nHISTORY_FILE_PATH = os.path.expanduser('~') + \"/.pyjsonviewer_history\"\nMAX_HISTORY = 10\n\n\nclass JSONTreeFrame(ttk.Frame):\n\n def __init__(self, master, jsonpath=None, initialdir=\"~/\"):\n super().__init__(master)\n self.create_widgets()\n self.initialdir = initialdir\n\n if jsonpath:\n self.importjson(jsonpath)\n\n def create_widgets(self):\n self.tree = ttk.Treeview(self)\n self.tree.bind('', self.follow_link)\n\n ysb = ttk.Scrollbar(\n self, orient=tk.VERTICAL, command=self.tree.yview)\n self.tree.configure(yscroll=ysb.set)\n\n self.tree.grid(row=0, column=0, sticky=(tk.N, tk.S, tk.E, tk.W))\n ysb.grid(row=0, column=1, sticky=(tk.N, tk.S))\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n def insert_node(self, parent, key, value):\n node = self.tree.insert(parent, 'end', text=key, open=False)\n\n if value is None:\n return\n\n if type(value) is not dict:\n if type(value) is list:\n value = value[0:MAX_N_SHOW_ITEM]\n node = self.tree.insert(node, 'end', text=value, open=False)\n else:\n for (key, value) in value.items():\n self.insert_node(node, key, value)\n\n def follow_link(self, event):\n item_id = self.tree.selection()\n item_text = self.tree.item(item_id, 'text')\n\n if self.is_url(item_text):\n webbrowser.open(item_text)\n\n def is_url(self, text):\n parsed = urlparse(text)\n return all([parsed.scheme, parsed.netloc, parsed.path])\n\n def select_json_file(self):\n file_path = filedialog.askopenfilename(\n initialdir=self.initialdir, filetypes=[(\"JSON files\", \"*.json\")])\n self.importjson(file_path)\n\n def get_unique_list(self, seq):\n seen = []\n return [x for x in seq if x not in seen and not seen.append(x)]\n\n def select_listbox_item(self, evt):\n w = evt.widget\n index = int(w.curselection()[0])\n value = w.get(index)\n self.importjson(value)\n self.sub_win.destroy() # close window\n\n def select_json_file_from_history(self):\n self.sub_win = tk.Toplevel()\n lb = Listbox(self.sub_win)\n with open(HISTORY_FILE_PATH) as f:\n lines = self.get_unique_list(reversed(f.readlines()))\n for ln, line in enumerate(lines):\n lb.insert(ln, line.replace(\"\\n\", \"\"))\n lb.bind('', self.select_listbox_item)\n maximum_width = 250\n lb.autowidth(maximum_width)\n lb.pack()\n\n def save_json_history(self, file_path):\n lines = []\n try:\n with open(HISTORY_FILE_PATH, \"r\") as f:\n lines = self.get_unique_list(f.readlines())\n except FileNotFoundError:\n print(\"created:\" + HISTORY_FILE_PATH)\n\n lines.append(file_path)\n\n with open(HISTORY_FILE_PATH, \"w\") as f:\n lines = lines[max(0, len(lines) - MAX_HISTORY):]\n for line in lines:\n f.write(line.replace(\"\\n\", \"\") + \"\\n\")\n\n def load_json_data(self, file_path):\n with open(file_path) as f:\n return json.load(f)\n\n def importjson(self, file_path):\n data = self.load_json_data(file_path)\n self.save_json_history(file_path)\n self.delete_all_nodes()\n self.insert_nodes(data)\n\n def delete_all_nodes(self):\n for i in self.tree.get_children():\n self.tree.delete(i)\n\n def insert_nodes(self, data):\n parent = \"\"\n\n for (key, value) in data.items():\n self.insert_node(parent, key, value)\n\n def showinfo(self):\n msg = \"\"\"\n PyJSONViewer\n by Atsushi Sakai(@Atsushi_twi)\n Ver.\"\"\" + VERSION + \"\"\"\\n\n GitHub:https://github.com/AtsushiSakai/PyJSONViewer\n \"\"\"\n messagebox.showinfo(\"About\", msg)\n\n\nclass Listbox(tk.Listbox):\n \"\"\"\n auto width list box container\n \"\"\"\n\n def autowidth(self, maxwidth):\n f = font.Font(font=self.cget(\"font\"))\n pixels = 0\n for item in self.get(0, \"end\"):\n pixels = max(pixels, f.measure(item))\n # bump listbox size until all entries fit\n pixels = pixels + 10\n width = int(self.cget(\"width\"))\n for w in range(0, maxwidth + 1, 5):\n if self.winfo_reqwidth() >= pixels:\n break\n self.config(width=width + w)\n\n\ndef main():\n print(__file__ + \" start!!\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file', type=str, help='JSON file path')\n parser.add_argument('-d', '--dir', type=str,\n help='JSON file directory')\n parser.add_argument('-o', '--open', action='store_true',\n default=False, help='Open with finder')\n args = parser.parse_args()\n\n root = tk.Tk()\n root.title('PyJSONViewer')\n root.geometry(\"500x500\")\n menubar = tk.Menu(root)\n\n if args.open:\n args.file = filedialog.askopenfilename(\n initialdir=args.dir,\n filetypes=[(\"JSON files\", \"*.json\")])\n\n app = JSONTreeFrame(root, jsonpath=args.file, initialdir=args.dir)\n\n filemenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Open\", command=app.select_json_file)\n filemenu.add_command(label=\"Open from History\",\n command=app.select_json_file_from_history)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n helpmenu = tk.Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=app.showinfo)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n app.grid(column=0, row=0, sticky=(tk.N, tk.S, tk.E, tk.W))\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n root.config(menu=menubar)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pyapp/PyJSONViewer-1.2.0/pyjsonviewer/pyjsonviewer.py","file_name":"pyjsonviewer.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"374677753","text":"\"\"\"Chatter communication protocol\n\nThis module implements basic communication infrastructure used for Hat\ncommunication. Hat communication is based on multiple loosely coupled services.\nTo implement communication with other Hat components, user should always\nimplement independent communication service (or use one of predefined\nservices).\n\nAttributes:\n mlog (logging.Logger): module logger\n sbs_repo (sbs.Repository): chatter message definition SBS repository\n\n\"\"\"\n\nfrom pathlib import Path\nimport asyncio\nimport contextlib\nimport logging\nimport math\nimport ssl\nimport urllib.parse\n\nfrom hat import sbs\nfrom hat import util\nfrom hat.util import aio\n\n\nmlog = logging.getLogger(__name__)\n\n\nsbs_repo = sbs.Repository.from_json(Path(__file__).parent /\n 'chatter_sbs_repo.json')\n\n\nMsg = util.namedtuple(['Msg', \"Received message\"],\n ['conn', \"Connection: connection\"],\n ['data', \"Data: data\"],\n ['conv', \"Conversation: conversation\"],\n ['first', \"bool: first flag\"],\n ['last', \"bool: last flag\"],\n ['token', \"bool: token flag\"])\n\n\nData = util.namedtuple(['Data', \"Message data\"],\n ['module', \"Optional[str]: SBS module name\"],\n ['type', \"str: SBS type name\"],\n ['data', \"sbs.Data: data\"])\n\n\nConversation = util.namedtuple(['Conversation', \"Conversation\"],\n ['conn', \"Connection: connection\"],\n ['owner', \"bool: owner flag\"],\n ['first_id', \"int: first message id\"])\n\n\nclass ConnectionClosedError(Exception):\n \"\"\"Error signaling closed connection\"\"\"\n\n\nasync def connect(sbs_repo, address, *, pem_file=None, ping_timeout=20,\n connect_timeout=5, queue_maxsize=0):\n \"\"\"Connect to remote server\n\n `sbs_repo` should include `hat.chatter.sbs_repo` and aditional message data\n definitions.\n\n Address is string formatted as `://:` where\n\n * `` - one of `tcp+sbs`, `ssl+sbs`\n * `` - remote host's name\n * `` - remote tcp port.\n\n PEM file is used only for ssl connection. If PEM file is not defined,\n certificate's authenticity is not established.\n\n If `ping_timeout` is ``None`` or 0, ping service is not registered.\n\n Args:\n sbs_repo (hat.sbs.Repository): chatter SBS repository\n address (str): address\n pem_file (Optional[str]): path to pem file\n ping_timeout (float): ping timeout in seconds\n connect_timeout (float): connect timeout in seconds\n queue_maxsize (int): receive message queue maximum size (0 - unlimited)\n\n Returns:\n Connection: newly created connection\n\n Raises:\n OSError: could not connect to specified address\n ValueError: wrong address format\n socket.gaierror: unknown host name\n asyncio.TimeoutError: connect timeout\n\n \"\"\"\n url = urllib.parse.urlparse(address)\n if not url.port:\n raise ValueError(\"Undefined port\")\n if url.scheme == 'tcp+sbs':\n ssl_ctx = None\n elif url.scheme == 'ssl+sbs':\n ssl_ctx = _create_ssl_context(pem_file)\n else:\n raise ValueError(\"Undefined protocol\")\n\n reader, writer = await asyncio.wait_for(asyncio.open_connection(\n url.hostname, url.port, ssl=ssl_ctx), connect_timeout)\n transport = _TcpTransport(sbs_repo, reader, writer)\n\n mlog.debug('client connection established')\n conn = _create_connection(sbs_repo, transport, ping_timeout, queue_maxsize)\n return conn\n\n\nasync def listen(sbs_repo, address, on_connection_cb, *, pem_file=None,\n ping_timeout=20, queue_maxsize=0):\n \"\"\"Create listening server.\n\n `sbs_repo` is same as for :meth:`connect`.\n\n Address is same as for :meth:`connect`.\n\n If ssl connection is used, pem_file is required.\n\n If `ping_timeout` is ``None`` or 0, ping service is not registered.\n\n Args:\n sbs_repo (hat.sbs.Repository): chatter SBS repository\n address (str): address\n on_connection_cb (Callable[[Connection], None]): on connection callback\n pem_file (Optional[str]): path to pem file\n ping_timeout (float): ping timeout in seconds\n queue_maxsize (int): receive message queue maximum size (0 - unlimited)\n\n Returns:\n Server\n\n Raises:\n OSError: could not listen on specified address\n ValueError: wrong address format\n socket.gaierror: unknown host name\n\n \"\"\"\n url = urllib.parse.urlparse(address)\n if url.port is None:\n raise ValueError(\"Undefined port\")\n if url.scheme == 'tcp+sbs':\n ssl_ctx = None\n elif url.scheme == 'ssl+sbs':\n ssl_ctx = _create_ssl_context(pem_file)\n else:\n raise ValueError(\"Undefined protocol\")\n\n def connected_cb(reader, writer):\n mlog.debug(\"server accepted new connection\")\n transport = _TcpTransport(sbs_repo, reader, writer)\n conn = _create_connection(sbs_repo, transport, ping_timeout,\n queue_maxsize)\n srv._conns.add(conn)\n conn.closed.add_done_callback(lambda _: srv._conns.remove(conn))\n on_connection_cb(conn)\n\n srv = Server()\n srv._closed = asyncio.Future()\n srv._conns = set()\n srv._srv = await asyncio.start_server(\n connected_cb, url.hostname, url.port, ssl=ssl_ctx)\n srv._addresses = [\n _convert_sock_info_to_address(socket.getsockname(), url.scheme)\n for socket in srv._srv.sockets]\n mlog.debug(\"listening socket created\")\n\n return srv\n\n\nclass Server:\n \"\"\"Server\n\n For creating new server see :func:`listen`.\n\n \"\"\"\n\n @property\n def closed(self):\n \"\"\"asyncio.Future: closed future\"\"\"\n return asyncio.shield(self._closed)\n\n @property\n def addresses(self):\n \"\"\"List[str]: listening addresses\"\"\"\n return self._addresses\n\n async def async_close(self):\n \"\"\"Close server and all associated connections\"\"\"\n if self._srv:\n self._srv.close()\n await self._srv.wait_closed()\n self._srv = None\n if self._conns:\n await asyncio.wait([conn.async_close() for conn in self._conns])\n if not self._closed.done():\n self._closed.set_result(True)\n\n\ndef _create_connection(sbs_repo, transport, ping_timeout, queue_maxsize):\n conn = Connection()\n conn._sbs_repo = sbs_repo\n conn._transport = transport\n conn._last_id = 0\n conn._conv_timeouts = {}\n conn._msg_queue = aio.Queue(maxsize=queue_maxsize)\n conn._async_group = aio.Group(\n lambda e: mlog.error('connection async group exception: %s', e,\n exc_info=e))\n conn._async_group.spawn(conn._read_loop)\n conn._async_group.spawn(conn._ping_loop, ping_timeout)\n return conn\n\n\nclass Connection:\n \"\"\"Single connection\n\n For creating new connection see :func:`connect`.\n\n \"\"\"\n\n @property\n def local_address(self):\n \"\"\"str: Local address\"\"\"\n return self._transport.local_address\n\n @property\n def remote_address(self):\n \"\"\"str: Remote address\"\"\"\n return self._transport.remote_address\n\n @property\n def closed(self):\n \"\"\"asyncio.Future: closed future\"\"\"\n return self._async_group.closed\n\n async def async_close(self):\n \"\"\"Async close\"\"\"\n await self._async_group.async_close()\n\n async def receive(self):\n \"\"\"Receive incomming message\n\n Returns:\n Msg\n\n Raises:\n ConnectionClosedError\n\n \"\"\"\n try:\n return await self._msg_queue.get()\n except aio.QueueClosedError:\n raise ConnectionClosedError()\n\n def send(self, msg_data, *, conv=None, last=True, token=True,\n timeout=None, timeout_cb=None):\n \"\"\"Send message\n\n Conversation timeout callbacks are triggered only for opened\n connection. Once connection is closed, all active conversations are\n closed without triggering timeout callbacks.\n\n Sending message on closed connection will silently discard message.\n\n Args:\n msg_data (Data): message data\n conv (Optional[Conversation]): existing conversation\n or None for new conversation\n last (bool): conversation's last flag\n token (bool): conversation's token flag\n timeout (Optional[float]): conversation timeout in seconds or None\n for unlimited timeout\n conv_timeout_cb (Optional[Callable[[Conversation],None]]):\n conversation timeout callback\n\n Returns:\n Conversation\n\n Raises:\n ConnectionClosedError\n Exception\n\n \"\"\"\n mlog.debug(\"sending message\")\n if self.closed.done():\n raise ConnectionClosedError()\n\n mlog.debug(\"setting message parameters\")\n send_msg = {\n 'id': self._last_id + 1,\n 'first': conv.first_id if conv else self._last_id + 1,\n 'owner': conv.owner if conv else True,\n 'token': token,\n 'last': last,\n 'data': {\n 'module': _value_to_sbs_maybe(msg_data.module),\n 'type': msg_data.type,\n 'data': self._sbs_repo.encode(msg_data.module, msg_data.type,\n msg_data.data)\n }\n }\n self._transport.write(send_msg)\n self._last_id += 1\n mlog.debug(\"message sent (id: %s)\", send_msg['id'])\n\n if not conv:\n mlog.debug(\"creating new conversation\")\n conv = Conversation(self, True, self._last_id)\n conv_timeout = self._conv_timeouts.pop(conv, None)\n if conv_timeout:\n mlog.debug(\"canceling existing conversation timeout\")\n conv_timeout.cancel()\n if not last and timeout and timeout_cb:\n mlog.debug(\"registering conversation timeout\")\n\n def on_conv_timeout():\n mlog.debug(\"conversation's timeout triggered\")\n if self._conv_timeouts.pop(conv, None):\n timeout_cb(conv)\n\n self._conv_timeouts[conv] = asyncio.get_event_loop().call_later(\n timeout, on_conv_timeout)\n\n return conv\n\n def _close(self):\n self._async_group.close()\n\n async def _read_loop(self):\n mlog.debug(\"connection's read loop started\")\n\n try:\n while True:\n mlog.debug(\"waiting for incoming message\")\n try:\n transport_msg = await self._transport.read()\n msg = Msg(\n conn=self,\n data=Data(module=transport_msg['data']['module'][1],\n type=transport_msg['data']['type'],\n data=self._sbs_repo.decode(\n transport_msg['data']['module'][1],\n transport_msg['data']['type'],\n transport_msg['data']['data'])),\n conv=Conversation(conn=self,\n owner=not transport_msg['owner'],\n first_id=transport_msg['first']),\n first=transport_msg['first'],\n last=transport_msg['last'],\n token=transport_msg['token'])\n except asyncio.CancelledError:\n raise\n except asyncio.IncompleteReadError:\n mlog.debug(\"closed connection detected while reading\")\n break\n except Exception as e:\n mlog.error(\"error while reading message: %s\",\n e, exc_info=e)\n break\n\n conv_timeout = self._conv_timeouts.pop(msg.conv, None)\n if conv_timeout:\n mlog.debug(\"canceling existing conversation timeout\")\n conv_timeout.cancel()\n\n if msg.data.module == 'HatPing':\n if msg.data.type == 'MsgPing':\n mlog.debug(\n \"received ping request - sending ping response\")\n self.send(Data('HatPing', 'MsgPong', None),\n conv=msg.conv)\n elif msg.data.type == 'MsgPong':\n mlog.debug(\"received ping response\")\n else:\n await self._msg_queue.put(msg)\n except asyncio.CancelledError:\n mlog.debug(\"read loop canceled - closing connection\")\n raise\n finally:\n mlog.debug(\"connection's read loop stopping\")\n await aio.uncancellable(self._transport.async_close(),\n raise_cancel=False)\n for conv_timeout in self._conv_timeouts.values():\n conv_timeout.cancel()\n self._msg_queue.close()\n self._conv_timeouts = {}\n self._close()\n\n async def _ping_loop(self, timeout):\n if not timeout:\n return\n\n def on_conv_timeout(conv):\n mlog.debug(\"ping response timeout - closing connection\")\n self._close()\n\n mlog.debug(\"ping loop started\")\n with contextlib.suppress(asyncio.CancelledError):\n while not self.closed.done():\n mlog.debug(\"waiting for %ss\", timeout)\n await asyncio.sleep(timeout)\n mlog.debug(\"sending ping request\")\n if self.closed.done():\n break\n self.send(Data('HatPing', 'MsgPing', None),\n last=False, timeout=timeout,\n timeout_cb=on_conv_timeout)\n\n mlog.debug(\"ping loop stopping\")\n self._close()\n\n\nclass _TcpTransport:\n\n def __init__(self, sbs_repo, reader, writer):\n self._sbs_repo = sbs_repo\n self._reader = reader\n self._writer = writer\n scheme = ('tcp+sbs' if writer.get_extra_info('sslcontext') is None\n else 'ssl+sbs')\n self._local_address = _convert_sock_info_to_address(\n writer.get_extra_info('sockname'), scheme)\n self._remote_address = _convert_sock_info_to_address(\n writer.get_extra_info('peername'), scheme)\n\n @property\n def local_address(self):\n return self._local_address\n\n @property\n def remote_address(self):\n return self._remote_address\n\n async def read(self):\n msg_len_len = (await self._reader.readexactly(1))[0]\n msg_len_bytes = await self._reader.readexactly(msg_len_len)\n msg_len = _bebytes_to_uint(msg_len_bytes)\n msg_bytes = await self._reader.readexactly(msg_len)\n msg = self._sbs_repo.decode('Hat', 'Msg', msg_bytes)\n return msg\n\n def write(self, msg):\n msg_bytes = self._sbs_repo.encode('Hat', 'Msg', msg)\n msg_len = len(msg_bytes)\n msg_len_bytes = _uint_to_bebytes(msg_len)\n msg_len_len_bytes = bytes([len(msg_len_bytes)])\n self._writer.write(msg_len_len_bytes + msg_len_bytes + msg_bytes)\n\n async def async_close(self):\n self._writer.close()\n await self._writer.wait_closed()\n\n\ndef _convert_sock_info_to_address(sock_info, scheme):\n host, port = sock_info[0], sock_info[1]\n if ':' in host:\n host = '[' + host + ']'\n return '{}://{}:{}'.format(scheme, host, port)\n\n\ndef _create_ssl_context(pem_file):\n ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)\n ssl_ctx.verify_mode = ssl.VerifyMode.CERT_NONE\n if pem_file:\n ssl_ctx.load_cert_chain(pem_file)\n return ssl_ctx\n\n\ndef _value_to_sbs_maybe(value):\n return ('Just', value) if value is not None else ('Nothing', None)\n\n\ndef _uint_to_bebytes(x):\n bytes_len = max(math.ceil(x.bit_length() / 8), 1)\n return x.to_bytes(bytes_len, 'big')\n\n\ndef _bebytes_to_uint(b):\n return int.from_bytes(b, 'big')\n","sub_path":"src_py/hat/chatter.py","file_name":"chatter.py","file_ext":"py","file_size_in_byte":16214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"584015638","text":"from functools import wraps\n\nfrom selenium.common.exceptions import TimeoutException\nfrom cached_property import cached_property\n\nfrom src.pages import LoginPage, GroupPage, ItemPage\n\n\nclass PosterDecorators:\n \"\"\"A set of decorators for class Poster's methods\"\"\"\n\n @staticmethod\n def logged_before(decorated):\n \"\"\"Makes sure user is logged in before performing decorated\"\"\"\n\n @wraps(decorated)\n def inner(self, *args, **kwargs):\n self._login.log_in()\n result = decorated(self, *args, **kwargs)\n return result\n\n return inner\n\n\nclass Poster:\n \"\"\"Class which allows execution of CRUD actions on Facebook\"\"\"\n\n def __init__(self, driver_obj, groups_list):\n \"\"\"Initializes class with driver object and groups to insert\n :param driver_obj: WebDriver: a driver to perform actions with\n :param groups_list: list of all groups influenced by the poster\n \"\"\"\n self._driver_obj = driver_obj\n self._groups_list = groups_list\n\n # HACK: Private properties to make them cached\n @cached_property\n def _login(self):\n return LoginPage(self._driver_obj.driver)\n\n @cached_property\n def _groups(self):\n return [GroupPage(self._driver_obj.driver, group_link) for group_link in\n self._groups_list]\n\n @PosterDecorators.logged_before\n def delete_item(self, post_url):\n \"\"\"Deletes item with given url from a Facebook group\"\"\"\n item_page = ItemPage(self._driver_obj.driver, post_url)\n item_page.delete()\n\n @PosterDecorators.logged_before\n def add_item_to_all_groups(self, item_data, images_paths_list):\n \"\"\"Adds item to all groups influenced by the poster\n\n :param item_data: dict: Dictionary that should provide\n following fields:\n \"title\": str: Title of advertisement\n \"price\": str: Price of item in a relevant currency\n \"location\": str: Location of item to be sold\n \"desc\": str: Description of item to be sold,\n newline '\\n' characters are respected.\n :param images_paths_list: List of absolute paths of images\n to be inserted into announcement of the item.\n :return: list: of successfully inserted links in all\n poster's groups\n \"\"\"\n inserted_links = []\n for current_group in self._groups:\n try:\n to_insert = current_group.add_sales_post(item_data,\n images_paths_list)\n except TimeoutException:\n self._login.log_in()\n to_insert = current_group.add_sales_post(item_data,\n images_paths_list)\n\n item_data = ItemPage(self._driver_obj.driver, to_insert)\n if item_data.exists():\n inserted_links.append(to_insert)\n\n return inserted_links\n\n @PosterDecorators.logged_before\n def post_exists(self, url):\n \"\"\"Checks if sales post at given url exists\"\"\"\n return ItemPage(self._driver_obj.driver, url).exists()\n","sub_path":"src/poster.py","file_name":"poster.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"14904930","text":"#!/usr/bin/env python2\n\nimport socket\nimport sys\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM )\nif len(sys.argv) < 3:\n print( \"Give target host as first argument\")\n print( \"number of 'A's to send as second argument\")\n sys.exit(1)\n\nhost = sys.argv[1]\nport = 2048\n\nip = socket.gethostbyname( host )\n\n\nm = \"A\"*int(sys.argv[2])\n\ns.connect( (ip,port) )\n\ns.sendall( m )\n\n\n\n","sub_path":"break.py","file_name":"break.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"238860859","text":"import pytest\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom faker import Faker\nfrom pytest_django.migrations import DisableMigrations\nfrom thenewboston.accounts.manage import create_account\nfrom thenewboston.blocks.block import generate_block\nfrom thenewboston.constants.network import BANK, PRIMARY_VALIDATOR\nfrom thenewboston.third_party.pytest.client import UserWrapper\nfrom thenewboston.verify_keys.verify_key import encode_verify_key\n\nfrom v1.accounts.factories.account import AccountFactory\nfrom v1.self_configurations.helpers.self_configuration import get_self_configuration\nfrom v1.utils.blocks import create_block_and_related_objects\nfrom v1.validators.factories.validator import ValidatorFactory\n\n\n@pytest.fixture\ndef account(encoded_account_number):\n yield AccountFactory(account_number=encoded_account_number)\n\n\n@pytest.fixture\ndef account_data():\n yield create_account()\n\n\n@pytest.fixture\ndef account_number(account_data):\n signing_key, account_number = account_data\n yield account_number\n\n\n@pytest.fixture\ndef block(block_data):\n yield create_block_and_related_objects(block_data)\n\n\n@pytest.fixture\ndef block_data(account_data, account, encoded_account_number, random_encoded_account_number, self_configuration):\n signing_key, account_number = account_data\n primary_validator = self_configuration.primary_validator\n\n yield generate_block(\n account_number=account_number,\n balance_lock=encoded_account_number,\n signing_key=signing_key,\n transactions=[\n {\n 'amount': self_configuration.default_transaction_fee,\n 'fee': BANK,\n 'recipient': self_configuration.account_number\n },\n {\n 'amount': primary_validator.default_transaction_fee,\n 'fee': PRIMARY_VALIDATOR,\n 'recipient': primary_validator.account_number\n },\n {\n 'amount': Faker().pyint(min_value=1),\n 'recipient': random_encoded_account_number\n }\n ]\n )\n\n\n@pytest.fixture\ndef block_data_unique_recipients(\n account_data, account, encoded_account_number, random_encoded_account_number, self_configuration\n):\n signing_key, account_number = account_data\n primary_validator = self_configuration.primary_validator\n\n yield generate_block(\n account_number=account_number,\n balance_lock=encoded_account_number,\n signing_key=signing_key,\n transactions=[\n {\n 'amount': self_configuration.default_transaction_fee,\n 'fee': BANK,\n 'recipient': self_configuration.account_number\n },\n {\n 'amount': primary_validator.default_transaction_fee,\n 'fee': PRIMARY_VALIDATOR,\n 'recipient': primary_validator.account_number\n },\n {\n 'amount': Faker().pyint(min_value=1),\n 'recipient': self_configuration.account_number\n }\n ]\n )\n\n\n@pytest.fixture\ndef client():\n yield UserWrapper(None)\n\n\n@pytest.fixture(autouse=True)\ndef enable_db_access_for_all_tests(request, django_db_setup, django_db_blocker):\n from pytest_django.fixtures import _django_db_fixture_helper\n django_db_blocker.unblock()\n _django_db_fixture_helper(request, django_db_blocker, transactional=True)\n\n\n@pytest.fixture\ndef encoded_account_number(account_number):\n yield encode_verify_key(verify_key=account_number)\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef migrations_disabled():\n settings.MIGRATION_MODULES = DisableMigrations()\n yield None\n\n\n@pytest.fixture\ndef no_requests(monkeypatch):\n monkeypatch.delattr('requests.sessions.Session.request')\n\n\n@pytest.fixture\ndef random_encoded_account_number():\n _, account_number = create_account()\n yield encode_verify_key(verify_key=account_number)\n\n\n@pytest.fixture\ndef self_configuration(monkeypatch):\n call_command('loaddata', 'validator', 'self_configuration', 'user')\n monkeypatch.setenv('NETWORK_SIGNING_KEY', 'e5e5fec0dcbbd8b0a76c67204823678d3f243de7a0a1042bb3ecf66285cd9fd4')\n yield get_self_configuration(exception_class=RuntimeError)\n\n\n@pytest.fixture\ndef signing_key(account_data):\n key, account_number = account_data\n yield key\n\n\n@pytest.fixture(autouse=True)\ndef use_fake_redis(settings):\n \"\"\"Using fake Redis for running tests in parallel.\"\"\"\n settings.DJANGO_REDIS_CONNECTION_FACTORY = 'thenewboston.third_party.django_redis.pool.FakeConnectionFactory'\n settings.CACHES['default']['OPTIONS']['REDIS_CLIENT_CLASS'] = 'fakeredis.FakeStrictRedis'\n\n\n@pytest.fixture\ndef validator(encoded_account_number):\n yield ValidatorFactory(node_identifier=encoded_account_number)\n","sub_path":"v1/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"259690336","text":"#Daniel Duong and Matthew Machado.\r\n\r\n#Program will accept all 4 formats.\r\n#Interstate XYZ\r\n#I-XYZ\r\n#IXYZ\r\n#XYZ\r\n\r\ndef get_interstate_number():\r\n highway = input('Please enter a US Interstate Highway route number: ')\r\n\r\n if highway[0:11] == 'Interstate ':\r\n highway_number = highway[11:]\r\n \r\n\r\n elif highway.startswith('I-'):\r\n highway_number = highway[2:]\r\n \r\n \r\n\r\n elif highway[0] == 'I':\r\n highway_number = highway[1:]\r\n \r\n\r\n else:\r\n highway_number = highway\r\n \r\n\r\n return int(highway_number)\r\n\r\n \r\ndef main():\r\n highway = get_interstate_number()\r\n\r\n direction = highway % 2\r\n\r\n size = highway % 5\r\n\r\n last_two_digits = highway % 100\r\n\r\n first_digit = highway // 100\r\n\r\n if 0 < highway <= 99:\r\n if (highway % 2 == 0):\r\n print('Interstate', highway, 'is a long-distance arterial highway oriented east-west.')\r\n\r\n else:\r\n print('Interstate', highway, 'is a long-distance arterial highway oriented north-south.')\r\n\r\n if 999 >= highway >= 100:\r\n if (first_digit % 2 == 0):\r\n print('Interstate', highway, 'is a circumfirential highway', 'of Interstate', last_two_digits)\r\n\r\n else:\r\n print('Interstate', highway, 'is a spur highway', 'of Interstate', last_two_digits)\r\n\r\nmain()\r\n\r\n","sub_path":"Lab_8.py","file_name":"Lab_8.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"138813098","text":"# Based on Fairseq's Transformer. By Jordi Armengol Estapé.\n\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import OrderedDict\n\nfrom fairseq import utils\nfrom fairseq.tasks.factored_translation import FactoredTranslationTask\n\nfrom . import FairseqFactoredMultiSumModel, register_model, register_model_architecture\n\nfrom .transformer import (\n base_architecture,\n Embedding,\n TransformerModel,\n TransformerEncoder,\n TransformerDecoder,\n)\n\nfrom copy import deepcopy\n\n@register_model('factored_transformer_sum')\nclass FactoredTransformerSumModel(FairseqFactoredMultiSumModel):\n \"\"\"Train a factored Transformer model.\n\n Requires `--task factored_translation`.\n \"\"\"\n\n def __init__(self, encoders, decoders):\n super().__init__(encoders, decoders)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n pass\n TransformerModel.add_args(parser)\n parser.add_argument('--share-encoder-embeddings', action='store_true',\n help='share encoder embeddings across languages')\n parser.add_argument('--share-decoder-embeddings', action='store_true',\n help='share decoder embeddings across languages')\n parser.add_argument('--share-encoders', action='store_true',\n help='share encoders across languages')\n parser.add_argument('--share-decoders', action='store_true',\n help='share decoders across languages')\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n assert isinstance(task, FactoredTranslationTask)\n\n # make sure all arguments are present in older models\n base_factored_architecture(args)\n\n if not hasattr(args, 'max_source_positions'):\n args.max_source_positions = 1024\n if not hasattr(args, 'max_target_positions'):\n args.max_target_positions = 1024\n\n src_langs = [lang_pair.split('-')[0] for lang_pair in args.lang_pairs]\n tgt_langs = [lang_pair.split('-')[1] for lang_pair in args.lang_pairs]\n\n if args.share_encoders:\n args.share_encoder_embeddings = True\n if args.share_decoders:\n args.share_decoder_embeddings = True\n\n def build_embedding(dictionary, embed_dim, path=None):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n # if provided, load from preloaded dictionaries\n if path:\n embed_dict = utils.parse_embedding(path)\n utils.load_embedding(embed_dict, dictionary, emb)\n return emb\n\n # build shared embeddings (if applicable)\n shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None\n if args.share_all_embeddings:\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path):\n raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')\n shared_encoder_embed_tokens = FairseqFactoredMultiSumModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=task.langs,\n embed_dim=args.encoder_embed_dim,\n build_embedding=build_embedding,\n pretrained_embed_path=args.encoder_embed_path,\n )\n shared_decoder_embed_tokens = shared_encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n if args.share_encoder_embeddings:\n shared_encoder_embed_tokens = (\n FairseqFactoredMultiSumModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=src_langs,\n embed_dim=args.encoder_embed_dim,\n build_embedding=build_embedding,\n pretrained_embed_path=args.encoder_embed_path,\n )\n )\n if args.share_decoder_embeddings:\n shared_decoder_embed_tokens = (\n FairseqFactoredMultiSumModel.build_shared_embeddings(\n dicts=task.dicts,\n langs=tgt_langs,\n embed_dim=args.decoder_embed_dim,\n build_embedding=build_embedding,\n pretrained_embed_path=args.decoder_embed_path,\n )\n )\n\n # encoders/decoders for each language\n lang_encoders, lang_decoders = {}, {}\n\n def get_encoder(lang):\n if lang not in lang_encoders:\n if shared_encoder_embed_tokens is not None:\n encoder_embed_tokens = shared_encoder_embed_tokens\n else:\n if lang == args.factor:\n args2 = deepcopy(args)\n args2.encoder_embed_dim = args.factor_encoder_embed_dim\n args2.encoder_ffn_embed_dim = args.factor_encoder_embed_dim * 2\n encoder_embed_tokens = build_embedding(\n task.dicts[lang], args2.encoder_embed_dim, args.encoder_embed_path\n )\n lang_encoders[lang] = TransformerEncoder(args2, task.dicts[lang], encoder_embed_tokens)\n else:\n encoder_embed_tokens = build_embedding(\n task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path\n )\n lang_encoders[lang] = TransformerEncoder(args, task.dicts[lang], encoder_embed_tokens)\n return lang_encoders[lang]\n\n def get_decoder(lang):\n if lang not in lang_decoders:\n if shared_decoder_embed_tokens is not None:\n decoder_embed_tokens = shared_decoder_embed_tokens\n else:\n decoder_embed_tokens = build_embedding(\n task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path\n )\n lang_decoders[lang] = TransformerDecoder(args, task.dicts[lang], decoder_embed_tokens)\n return lang_decoders[lang]\n\n # shared encoders/decoders (if applicable)\n shared_encoder, shared_decoder = None, None\n if args.share_encoders:\n shared_encoder = get_encoder(src_langs[0])\n if args.share_decoders:\n shared_decoder = get_decoder(tgt_langs[0])\n\n encoders, decoders = OrderedDict(), OrderedDict()\n for lang_pair, src, tgt in zip(args.lang_pairs, src_langs, tgt_langs):\n encoders[lang_pair] = shared_encoder if shared_encoder is not None else get_encoder(src)\n decoders[lang_pair] = shared_decoder if shared_decoder is not None else get_decoder(tgt)\n #return FactoredTransformerModel(encoders, decoders)\n return FactoredTransformerSumModel(encoders, shared_decoder)\n\n\n@register_model_architecture('factored_transformer_sum', 'factored_transformer_sum')\ndef base_factored_architecture(args):\n base_architecture(args)\n '''\n args.share_encoder_embeddings = False\n args.share_decoder_embeddings = True\n args.share_encoders = False\n args.share_decoders = True\n '''\n args.share_encoder_embeddings = getattr(args, 'share_encoder_embeddings', False)\n args.share_decoder_embeddings = getattr(args, 'share_decoder_embeddings', True)\n args.share_encoders = getattr(args, 'share_encoders', False)\n args.share_decoders = getattr(args, 'share_decoders', True)\n\n\n@register_model_architecture('factored_transformer_sum', 'factored_transformer_sum_iwslt_de_en_babelnet')\ndef factored_transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.factor_encoder_embed_dim = 512\n args.factor = 'de_synset_at'\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_factored_architecture(args)\n\n\n@register_model_architecture('factored_transformer_sum', 'factored_transformer_sum_iwslt_de_en_lemmas_stanfordS')\ndef factored_transformer_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.factor_encoder_embed_dim = 512\n args.factor = 'de_tokensS_lemmas'\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)\n args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)\n args.decoder_layers = getattr(args, 'decoder_layers', 6)\n base_factored_architecture(args)\n\n","sub_path":"fairseq/models/factored_transformer_sum.py","file_name":"factored_transformer_sum.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"183622269","text":"# oauth PRAW template by /u/The1RGood #\r\n#==================================================Config stuff====================================================\r\nimport time, praw\r\nimport webbrowser\r\nfrom flask import Flask, request\r\nfrom threading import Thread\r\n\r\naccess_information = ''\r\n#==================================================End Config======================================================\r\n#==================================================OAUTH APPROVAL==================================================\r\napp = Flask(__name__)\r\n\r\nCLIENT_ID = 'CLIENT_ID'\r\nCLIENT_SECRET = 'CLIENT_SECRET'\r\nREDIRECT_URI = 'http://127.0.0.1:65010/authorize_callback'\r\n\r\ndef kill():\r\n\tfunc = request.environ.get('werkzeug.server.shutdown')\r\n\tif func is None:\r\n\t\traise RuntimeError('Not running with the Werkzeug Server')\r\n\tfunc()\r\n\treturn \"Shutting down...\"\r\n\r\n@app.route('/authorize_callback')\r\ndef authorized():\r\n\tglobal access_information\r\n\tstate = request.args.get('state', '')\r\n\tcode = request.args.get('code', '')\r\n\taccess_information = r.get_access_information(code)\r\n\tuser = r.get_me()\r\n\ttext = 'Sub Notifications Bot has been successfully started.'\r\n\tkill()\r\n\treturn text\r\n\t\r\ndef refresh_access():\r\n\twhile(True):\r\n\t\ttime.sleep(1800)\r\n\t\tr.refresh_access_information(access_information['refresh_token'])\r\n\t\r\nr = praw.Reddit('OAuth FLASK Template Script'\r\n 'https://praw.readthedocs.org/en/latest/'\r\n 'pages/oauth.html for more info.')\r\nr.set_oauth_app_info(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI)\r\nwebbrowser.open(r.get_authorize_url('DifferentUniqueKey',refreshable=True))\r\napp.run(debug=False, port=65010)\r\namt = Thread(target=refresh_access,args=())\r\namt.daemon=True\r\namt.start()\r\n#==================================================END OAUTH APPROVAL-=============================================","sub_path":"oauth_template.py","file_name":"oauth_template.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"522998077","text":"class Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n sets=[];\n \n \n def findSets(self,candidates, target, set):\n for c in candidates:\n newtarget = target-c\n if newtarget ==0:\n self.sets.append(set+[c])\n elif newtarget<0:\n continue\n else:\n newcandidates =[]\n for c2 in candidates:\n if c2>=c:\n newcandidates.append(c2)\n self.findSets(newcandidates, newtarget, set+[c])\n\n def combinationSum(self, candidates, target):\n self.findSets(candidates, target,[])\n return self.sets\n\nS=Solution()\nprint(S.combinationSum([2,3,6,7,8],8))\n","sub_path":"programming/python/combinationSum.py","file_name":"combinationSum.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"488259658","text":"\"\"\"\n\nFeature extraction:\n From the list of unprocessed ingredient strings:\n *) Tokenizes individual ingredient strings.\n *) Gets n=1,2,3 n-grams.\n *) Terms in n-grams are joined by '::'\n *) All n-grams for a given recipe are then joined into a single string separated by spaces \n e.g. ['garlic powder', 'onions'] --> 'garlic powder garlic::powder onions'\n *) These strings are treated as a corpus of documents to which a bag of words model is applied\n *) Classification performed with a Bayes classifier with a L1-based feature selector pre-processing step\n\n\"\"\"\n\nimport json\nimport pandas as pd\nimport numpy as np\nimport unicodedata\nimport re\nfrom collections import Counter\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import MultinomialNB\nimport sklearn.cross_validation as CV\nfrom sklearn.grid_search import GridSearchCV\nfrom nltk.stem import WordNetLemmatizer\n\n# Seed for randomization. Set to some definite integer for debugging and set to None for production\nseed = None\n\n### Text processing functions ###\n\ndef normalize(string):#Remove diacritics and whatevs\n return \"\".join(ch.lower() for ch in unicodedata.normalize('NFD', string) if not unicodedata.combining(ch))\n\nwnl = WordNetLemmatizer()\ndef tokenize(string):#Ignores special characters and punct\n return [wnl.lemmatize(token) for token in re.compile('\\w\\w+').findall(string)]\n\ndef ngrammer(tokens):#Gets all grams in each ingredient\n max_n = 2\n return [\":\".join(tokens[idx:idx+n]) for n in np.arange(1,1 + min(max_n,len(tokens))) for idx in range(len(tokens) + 1 - n)]\n\nprint(\"Importing training data...\")\nwith open('/Users/josh/dev/kaggle/whats-cooking/data/train.json','rt') as file:\n recipes_train_json = json.load(file)\n\n#Small subset for Debugging purposes\n#recipes_train_json = recipes_train_json[:100]\n \n# Build the grams for the training data\nprint('\\nBuilding n-grams from input data...')\nfor recipe in recipes_train_json:\n recipe['grams'] = [term for ingredient in recipe['ingredients'] for term in ngrammer(tokenize(normalize(ingredient)))]\n\n# Build vocabulary from training data grams. We'll remove grams that appear too much (stop words) or too little. The cut-offs are pretty arbitrary, TBH\ningredient_counts = Counter()\nfor recipe in recipes_train_json:\n for i in recipe['grams']: ingredient_counts[i]+=1\nvocabulary = [gram for gram in ingredient_counts.keys()]# if 2 <= ingredient_counts[gram]]# <= 20000]\n\n# Stuff everything into a dataframe. \nids_index = pd.Index([recipe['id'] for recipe in recipes_train_json],name='id')\nrecipes_train = pd.DataFrame([{'cuisine': recipe['cuisine'], 'ingredients': \" \".join(recipe['grams'])} for recipe in recipes_train_json],columns=['cuisine','ingredients'], index=ids_index)\n\n\n# Extract data for fitting\n#fit_data, val_data, fit_target, val_target = CV.train_test_split( recipes_train['ingredients'].values, recipes_train['cuisine'].values, test_size=0.05, random_state=seed)\nfit_data = recipes_train['ingredients'].values\nfit_target = recipes_train['cuisine'].values\n\n\n# Build SGD Classifier pipeline\ntext_clf = Pipeline([('vect', CountVectorizer(vocabulary=vocabulary)),\n ('feature_selection', LinearSVC(C=1e-5,tol=.001,penalty='l1',dual=False)),\n ('clf', MultinomialNB(alpha=.5)),\n])\n# Grid search over svm classifiers. \nparameters = {\n# 'feature_selection__loss': ('hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 'squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'),\n 'feature_selection__C': np.linspace(500,5000,10),\n}\n\n# Init GridSearchCV with k-fold CV object\ncv = CV.KFold(len(fit_data), n_folds=3, shuffle=True, random_state=seed)\ngs_clf = GridSearchCV(\n estimator=text_clf,\n param_grid=parameters,\n n_jobs=-1,\n cv=cv,\n scoring='accuracy',\n verbose=2 \n)\n\n# Fit on data subset\nprint(\"\\nPerforming grid search over hyperparameters...\")\ngs_clf.fit(fit_data, fit_target)\n\nprint(\"\\nTop scoring models under cross-validation:\\n\")\ntop_grid_scores = sorted(gs_clf.grid_scores_, key=lambda x: x[1], reverse=True)[:min(25,len(gs_clf.grid_scores_))]\nfor x in top_grid_scores:\n print(x)\n\n\n\n# Import competition test data\nprint(\"\\nImporting competition test data...\")\nwith open('/Users/josh/dev/kaggle/whats-cooking/data/test.json','rt') as file:\n recipes_test_json = json.load(file)\n\n# Build the grams for the test data\nprint('\\nBuilding n-grams from test data...')\nfor recipe in recipes_test_json:\n recipe['grams'] = [term for ingredient in recipe['ingredients'] for term in ngrammer(tokenize(normalize(ingredient)))]\n\n# Test data dataframe. \ntest_ids_index = pd.Index([recipe['id'] for recipe in recipes_test_json],name='id')\nrecipes_test = pd.DataFrame([{'ingredients': \" \".join(recipe['grams'])} for recipe in recipes_test_json],columns=['ingredients'], index=test_ids_index)\n\n# Test data predictions & evaluation\ntest_data = recipes_test['ingredients'].values\nprint(\"Computing test data predictions...\")\npredicted = gs_clf.predict(test_data)\n\n# Out to file\nrecipes_test.drop('ingredients',axis=1,inplace=True)\nrecipes_test.insert(0,'cuisine',predicted)\nprint(\"Saving predictions to file...\\n\")\nrecipes_test.to_csv('/Users/josh/dev/kaggle/whats-cooking/sub/gaussian_nb_bag.csv',index=True)\n","sub_path":"multinomial_nb_bag.py","file_name":"multinomial_nb_bag.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"588483543","text":"# _________________________________________________________________________\n#\n# Gcovr: A parsing and reporting tool for gcov\n# Copyright (c) 2013 Sandia Corporation.\n# This software is distributed under the BSD License.\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n# For more information, see the README.md file.\n# _________________________________________________________________________\n\n\nimport sys\n\n\n#\n# Produce the classic gcovr text report\n#\ndef print_text_report(covdata, options):\n def _num_uncovered(key):\n (total, covered, percent) = covdata[key].coverage(options.show_branch)\n return total - covered\n\n def _percent_uncovered(key):\n (total, covered, percent) = covdata[key].coverage(options.show_branch)\n if covered:\n return -1.0*covered/total\n else:\n return total or 1e6\n\n def _alpha(key):\n return key\n\n if options.output:\n OUTPUT = open(options.output, 'w')\n else:\n OUTPUT = sys.stdout\n total_lines = 0\n total_covered = 0\n # Header\n OUTPUT.write(\"-\"*78 + '\\n')\n a = options.show_branch and \"Branches\" or \"Lines\"\n b = options.show_branch and \"Taken\" or \"Exec\"\n c = \"Missing\"\n OUTPUT.write(\"File\".ljust(40) + a.rjust(8) + b.rjust(8) + \" Cover \" + c\n + \"\\n\")\n OUTPUT.write(\"-\" * 78 + '\\n')\n\n # Data\n keys = list(covdata.keys())\n sort_uncovered = options.sort_uncovered and _num_uncovered\n sort_percent = options.sort_percent and _percent_uncovered\n keys.sort(key=sort_uncovered or sort_percent or _alpha)\n for key in keys:\n (t, n, txt) = covdata[key].summary(options)\n total_lines += t\n total_covered += n\n OUTPUT.write(txt + '\\n')\n\n # Footer & summary\n OUTPUT.write(\"-\"*78 + '\\n')\n percent = total_lines and str(int(100.0*total_covered/total_lines)) or \"--\"\n OUTPUT.write((\"TOTAL\".ljust(40) + str(total_lines).rjust(8) +\n str(total_covered).rjust(8) + str(percent).rjust(6) + \"%\" +\n '\\n'))\n OUTPUT.write(\"-\"*78 + '\\n')\n\n # Close logfile\n if options.output:\n OUTPUT.close()\n","sub_path":"gcovr/text_report.py","file_name":"text_report.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"404390479","text":"from selenium.webdriver import Chrome\nfrom selenium.webdriver.common.keys import Keys\nimport time\nbrowser = Chrome()\nbrowser.get(\"https://www.momoshop.com.tw/main/Main.jsp/\")\nelement1 = browser.find_element_by_id('keyword')\n# 找出網頁serch的text id \nelement1.clear()\nelement1.send_keys(\"apple\")\nelement1.send_keys(Keys.RETURN)\ntime.sleep(20)\nbrowser.close()","sub_path":"demo12.py","file_name":"demo12.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"367483232","text":"from builtins import print\n\n__author__ = 'Victor'\n\nimport nltk\n\n\nnltk.download()\n\nfrom nltk.corpus import movie_reviews\n\n# Inspect the corpus\nprint(movie_reviews.fileids())\nprint(len(movie_reviews.fileids()))\nprint(movie_reviews.words('neg/cv000_29416.txt'))\n\n# Split into training/testing sets\ntrain_fileids = movie_reviews.fileids()[:500] + movie_reviews.fileids()[1000:1500]\ntest_fileids = movie_reviews.fileids()[500:1000] + movie_reviews.fileids()[1500:2000]\n\n# Collect all the words from the training examples\nvocabulary = set()\n# For each training file and for each word in the file\nfor fileid in train_fileids:\n for word in movie_reviews.words(fileid):\n vocabulary.add(word)\n\n\ndef format_dataset(fileids, featureSet):\n dataset = list()\n for fileid in fileids:\n features = dict()\n for word in featureSet:\n features[word] = word in movie_reviews.words(fileid)\n pos_or_neg = fileid[:3]\n example = (features, pos_or_neg)\n dataset.append(example)\n return dataset\n\n# Get the datasets ready\ntrain_set = format_dataset(train_fileids, vocabulary)\ntest_set = format_dataset(test_fileids, vocabulary)\n\n# Create some classifiers\nfrom nltk.classify.decisiontree import DecisionTreeClassifier\ntree = DecisionTreeClassifier.train(train_set)\n\nfrom nltk.classify.naivebayes import NaiveBayesClassifier\nbayes = NaiveBayesClassifier.train(train_set)\n\n# Test the classifiers\nfrom nltk.classify import accuracy\nprint(\"Decision Tree accuracy: \", accuracy(tree, test_set))\nprint(\"Naive Bayes accuracy: \", accuracy(bayes, test_set))\n","sub_path":"MovieReview.py","file_name":"MovieReview.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"2822291","text":"\"\"\"\n196ms\n\nGives the correct answer in terms of rankings, but the actual percentages don't match the test case\n\nMy suspicion is it has to do with rolling doubles - I'm unclear what happens if you roll *4* doubles, do both 3 and 4 bring you to jail?\nAnd I think there was a rule about rolling again if you roll a double? I'm almost positive that wasn't in the problem description though\n oh! maybe the phrasing means after each roll, so if you roll a double it's like a second turn i.e. just the same?\nI don't suspect the cards not being shuffled matters\n\nbut the Markov math works. for some reason I had to figure this out myself, it wasn't explictly in any of my books; it's this:\ntake the transition matrix and subtract the identity (since 1 is an eigen value) TMx = x => (TM-1)x == Ax = 0\nthis set of equations is now degenerate however, and if solved as is will give x=0, A*0 = 0,\nso drop the last row and replace it with all ones, then sovle Ax=b where b = [0,0,...1] using simple row elimination\nthat is to say, the last row enforces the normalization condition\n\n(you should also be able to solve for the eigenvectors, and keep the one with the greatest (=1) eigenvector\n(but a: you don't need to do that, the above is simpler; and 2: when I did it it wasn't working, don't know why)\n\nUh, what I'm describing is litterally solving for the eigenvector, or the null-space, nothing special to Markov\nthere must be a reason this isn't the way...\n\"\"\"\n\n\n\"\"\"\nMarkov chain!!!!\n\"\"\"\n\nfrom fractions import Fraction\nimport numpy as np\nfrom scipy.linalg import solve\n\nnstates = 40\ndie = 4\n\ntransition_matrix = [[0]*nstates for _ in range(nstates)]\n# actually I think this might be transposed from the way I want to think of it\n# or rather, the way I have it below is transposed\n# ah, no, rather you multiply the state vector on the left? okay\n\nfor i in range(nstates):\n # roll dice\n for j in range(1,die+1):\n for k in range(1,die+1):\n if j != k:\n transition_matrix[i][(i+j+k)%nstates] += Fraction(1,die*die)\n else:\n # 1 in die**2 chance the last two throws were doubles\n transition_matrix[i][(i+j+k)%nstates] += Fraction(1,die*die)*Fraction(die*die - 1,die*die)\n transition_matrix[i][10] += Fraction(1,die*die)*Fraction(1,die*die)\n \n \n # go to jain square\n transition_matrix[i][10] += transition_matrix[i][30]\n transition_matrix[i][30] = 0\n \n # community chest, at 2, 17, and 33\n for l in [2,17,33]:\n t = transition_matrix[i][l]\n transition_matrix[i][l] = t * Fraction(14,16)\n transition_matrix[i][0] += t * Fraction(1,16)\n transition_matrix[i][10] += t * Fraction(1,16)\n \n # chance, at 7, 22, and 36\n aDict = {7:15, 22:25, 36:5}\n for l in [7,22,36]:\n t = transition_matrix[i][l]\n transition_matrix[i][l] = t * Fraction(6,16)\n transition_matrix[i][0] += t * Fraction(1,16) # Go\n transition_matrix[i][10] += t * Fraction(1,16) # Jail\n transition_matrix[i][11] += t * Fraction(1,16) # C1\n transition_matrix[i][24] += t * Fraction(1,16) # E3\n transition_matrix[i][39] += t * Fraction(1,16) # H2\n transition_matrix[i][5] += t * Fraction(1,16) # R1\n #x = ((l//10)*10 + 15)%40\n #fuckit\n x = aDict[l]\n transition_matrix[i][x] += t * Fraction(2,16) # Next railroad (two cards)\n if l == 22:\n transition_matrix[i][28] += t * Fraction(1,16) # utility company\n else:\n transition_matrix[i][12] += t * Fraction(1,16)\n if l != 36:\n transition_matrix[i][l-3] += t * Fraction(1,16) # go back 3\n else:\n # oh boy\n transition_matrix[i][33] += t * Fraction(1,16)*Fraction(14,16)\n transition_matrix[i][0] += t * Fraction(1,16)*Fraction(1,16)\n transition_matrix[i][10] += t * Fraction(1,16)*Fraction(1,16)\n \n\nfor i in range(nstates):\n sum(transition_matrix[i])\n\n# scipy.linalg.solve?\n# transpose\ntmT = [[0]*(nstates) for _ in range(nstates)]\nfor i in range(nstates):\n for j in range(nstates):\n tmT[i][j] = transition_matrix[j][i]*1.0\n if i == j: tmT[i][j] -= 1\n\ntmT[-1] = [1]*(nstates)\ntmT = np.array(tmT)\nb = np.zeros(nstates)\nb[-1] = 1\n\n\nx = solve(tmT, b)\n# appears to work, but answer does not agree with provided solution. mrr.\n# typos....and rules...\n# just too much jail now...\n# chance can land on commity chest!!!!\n# still not it\n\ny = [ n for n in x]\none = y.pop(y.index(max(y)))\ntwo = y.pop(y.index(max(y)))\nthree = y.pop(y.index(max(y)))\n\ny = [ n for n in x]\nprint( y.index(one), y.index(two), y.index(three))\n# hmm, get's the right sequence...., just try it?\n# Yes! But I'm not happy.\n","sub_path":"problem84.py","file_name":"problem84.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"308863815","text":"from Instruccion import InstruccionTresOperandos\nfrom EnumRegistrosNucleo import EnumRegistrosNucleo\n\nclass InstruccionDiv(InstruccionTresOperandos):\n \n def __init__(self):\n InstruccionTresOperandos.__init__(self)\n \n def imprimir(self):\n print(\"Tipo: DIV\")\n InstruccionTresOperandos.imprimir(self)\n\n def ejecutar(self, nucleo):\n operando1 = nucleo.obtenerContenidoRegistro(self.registroFuente1)\n operando2 = nucleo.obtenerContenidoRegistro(self.registroFuente2)\n if operando2 == 0 : \n return False\n resultado = operando1 // operando2\n nucleo.guardarContenidoRegistro(self.registroDestino, resultado)\n return True","sub_path":"src/InstruccionDiv.py","file_name":"InstruccionDiv.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"385793615","text":"\nimport requests\nfrom bs4 import BeautifulSoup,Tag,CData;\nimport pymysql\nimport re\n\n\n\n# 打开数据库连接-- 求出处数据库链接\ndb = pymysql.connect(\"localhost\", \"root\", \"123456\", \"ask_source\")\n# 使用 cursor() 方法创建一个游标对象 cursor\ncursor = db.cursor()\n\nfor i in range(2): # 遍历多少页\n r = requests.get('https://www.gifjia5.com/tag/%E8%80%81%E5%8F%B8%E6%9C%BA/page' + '/%d/' % (i + 1))\n r.encoding = 'utf-8'\n # 像目标url地址发送get请求,返回一个response对象\n # print(r.text) #r.text是http response的网页HTML\n # 获取所有字段\n html_doc = r.text;\n # 设置解析对象\n soup = BeautifulSoup(html_doc, 'html.parser')\n # 页内列表格式为article\n articles = soup.find_all('article')\n # 遍历\n for article in articles:\n\n # 获取a单元的标签\n focusArray = article.find_all('a', class_='focus')\n focus = focusArray[0]\n # 获取img单元的标签\n imgArray = focus.find_all('img')\n img = imgArray[0]\n # 获取header单元的标签\n headerArray = article.find_all('header')\n header = headerArray[0]\n #########################################\n # 获取点击链接\n hrefUrl_str = focus['href']\n # 获取图片\n img_str = img['data-src']\n # 获取tag标签\n tag_a = header.find_all('a', class_='cat')\n tag_str = tag_a[0].get_text()\n # 获取标题\n title_a = header.find_all('h2')[0].find_all('a')[0]\n title_str = title_a.get_text()\n # 图片个数\n small_i = article.find_all('small', class_='text-muted')[0]\n small_str = small_i.get_text()\n # 日期\n date = article.find_all('time')[0]\n date_str = date.get_text()\n # 用户\n user = article.find_all('span', class_='author')[0]\n user_str = user.get_text()\n # 阅读数\n read = article.find_all('span', class_='pv')[0]\n read_str = read.get_text()\n # 评论\n comments_a = article.find_all('a', class_='pc')[0]\n comments_str = comments_a.get_text()\n # 评论链接\n comments_url = comments_a['href']\n # 赞\n like = article.find_all('span', class_='pv')[1]\n like_str = like.get_text()\n # 备注\n note = article.find_all('p', class_='note')[0]\n note_str = note.get_text()\n\n # print('标题:',title_str,'-- 图片个数:',small_str)\n # print('归类:',tag_str)\n # print('图片:',img_str)\n # print('链接:',hrefUrl_str)\n # print('日期:',date_str,'上传者:',user_str,read_str,like_str)\n # print('内容备注:',note_str)\n # print('==========分割==========')\n\n sql = \"INSERT INTO source_list_copy1\"\n list_pram = \"(title, date, cover,tag,href_url,small_count,upload_user,read_str,comments,comments_url,like_str,note)\"\n sql_values = \"VALUES('%s', '%s', '%s','%s', '%s', '%s','%s', '%s', '%s','%s', '%s', '%s')\" % \\\n (title_str, date_str, img_str,tag_str,hrefUrl_str,small_str,user_str,read_str,comments_str,comments_url,like_str,note_str)\n sql = sql + list_pram + sql_values\n\n print(sql_values)\n\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 执行sql语句\n db.commit()\n except:\n # 发生错误时回滚\n db.rollback()\n print(\"失败\")\n break\n print('当前第%d页'%(i+1))\n\nprint('结束')\n\n# 关闭数据库连接\ndb.close()\n","sub_path":"Python3Test/SRC/FirstP.py","file_name":"FirstP.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"338431617","text":"from typing import Callable, Dict\n\nimport os\nfrom collections import defaultdict\n\nimport ray\nimport torch\nfrom pytorch_lightning.accelerators import DDPSpawnAccelerator\nfrom pytorch_lightning import _logger as log, LightningModule, Trainer\nfrom ray.util.sgd.torch.utils import setup_address\n\nfrom ray_lightning.session import init_session\nfrom ray_lightning.util import process_results, Queue\nfrom ray_lightning.tune import TUNE_INSTALLED, is_session_enabled\n\n\n@ray.remote\nclass RayExecutor:\n \"\"\"A class to execute any arbitrary function remotely.\"\"\"\n\n def set_env_var(self, key: str, value: str):\n \"\"\"Set an environment variable with the provided values.\"\"\"\n os.environ[key] = value\n\n def get_node_ip(self):\n \"\"\"Returns the IP address of the node that this Ray actor is on.\"\"\"\n return ray.services.get_node_ip_address()\n\n def execute(self, fn: Callable, *args, **kwargs):\n \"\"\"Execute the provided function and return the result.\"\"\"\n return fn(*args, **kwargs)\n\n\nclass RayAccelerator(DDPSpawnAccelerator):\n \"\"\"Pytorch Lightning accelerator for DDP training on a Ray cluster.\n\n This accelerator is used to manage distributed training using DDP and\n Ray for process launching. Internally, the specified number of\n Ray actors are launched in the cluster and are registered as part of a\n Pytorch DDP process group. The Pytorch Lightning trainer is instantiated\n on the driver and sent to each of these training workers where training is\n executed. The distributed training protocol is handled by Pytorch DDP.\n\n Each training worker is configured to reserve ``num_cpus_per_worker``\n CPUS and 1 GPU if ``use_gpu`` is set to ``True``.\n\n If using this accelerator, you should run your code like a normal Python\n script: ``python train.py``, and only on the head node if running in a\n distributed Ray cluster. There is no need to run this script on every\n single node.\n\n Args:\n num_workers (int): Number of training workers to use.\n num_cpus_per_worker (int): Number of CPUs per worker.\n use_gpu (bool): Whether to use GPU for allocation. For GPU to be\n used, you must also set the ``gpus`` arg in your Pytorch Lightning\n Trainer to a value > 0.\n\n Example:\n\n .. code_block:: python\n\n import pytorch_lightning as ptl\n from ray_lightning import RayAccelerator\n\n ptl_model = MNISTClassifier(...)\n accelerator = RayAccelerator(num_workers=4, cpus_per_worker=1,\n use_gpu=True)\n\n # If using GPUs, set the ``gpus`` arg to a value > 0.\n # The actual number of GPUs is determined by ``num_workers``.\n trainer = pl.Trainer(..., gpus=1, accelerator=accelerator)\n trainer.fit(ptl_model)\n\n \"\"\"\n\n def __init__(self,\n num_workers: int = 1,\n num_cpus_per_worker: int = 1,\n use_gpu: bool = False):\n super().__init__(trainer=None, nprocs=0)\n self.nickname = \"ddp_ray\"\n self.num_workers = num_workers\n self.num_cpus_per_worker = num_cpus_per_worker\n self.use_gpu = use_gpu\n self.workers = []\n\n def _create_worker(self):\n \"\"\"Creates Ray actor.\"\"\"\n return RayExecutor.options(\n num_cpus=self.num_cpus_per_worker,\n num_gpus=int(self.use_gpu)).remote()\n\n def setup(self, model: LightningModule):\n \"\"\"Sets up PTL Trainer and creates the Ray actors.\"\"\"\n # Check that trainer attribute has been set when this method is called.\n assert hasattr(self, \"trainer\") and self.trainer is not None\n self.trainer.use_ddp = True\n self.trainer.model = model\n self.workers = [self._create_worker() for _ in range(self.num_workers)]\n\n def teardown(self):\n \"\"\"Shutdown the DDP process group and all the Ray actors. \"\"\"\n\n def shutdown_remote():\n torch.distributed.destroy_process_group()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n ray.get([w.execute.remote(shutdown_remote) for w in self.workers])\n for w in self.workers:\n ray.kill(w, no_restart=True)\n del w\n self.workers = []\n\n def __getstate__(self):\n d = self.__dict__.copy()\n del d[\"workers\"]\n return self.__dict__\n\n def __setstate__(self, d):\n d[\"workers\"] = []\n self.__dict__.update(d)\n\n def get_local_ranks(self) -> Dict[int, int]:\n \"\"\"Creates a mapping of global ranks to local ranks.\"\"\"\n # Get the local ranks for all the workers and store as a dict.\n # First get the IP address of each remote worker.\n node_ips = ray.get([w.get_node_ip.remote() for w in self.workers])\n rank_counter_dict = defaultdict(int)\n global_to_local = [None] * self.num_workers\n for global_rank in range(self.num_workers):\n ip = node_ips[global_rank]\n global_to_local[global_rank] = rank_counter_dict[ip]\n rank_counter_dict[ip] += 1\n return global_to_local\n\n def train(self):\n \"\"\"Main training loop.\n\n Sets up the torch.distributed process group for each training\n worker. Then trigger remote training via ``train_remote`` on each\n worker. If using with Ray Tune, create a communication queue to\n revieve intermediate results, and process those results. Finally\n retrieve the training results from the rank 0 worker and return.\"\"\"\n\n if \"PL_GLOBAL_SEED\" in os.environ:\n seed = os.environ[\"PL_GLOBAL_SEED\"]\n ray.get([\n w.set_env_var.remote(\"PL_GLOBAL_SEED\", seed)\n for w in self.workers\n ])\n\n # Get the rank 0 address for DDP connection.\n self.ddp_address = ray.get(\n self.workers[0].execute.remote(setup_address))\n\n self.global_to_local = self.get_local_ranks()\n\n trainer = self.trainer\n assert trainer is not None\n trainer_ref = ray.put(trainer)\n # Don't pickle self.trainer when training remotely.\n self.trainer = None\n\n queue = None\n if TUNE_INSTALLED and is_session_enabled():\n # Create communication queue and send to all the workers.\n queue = Queue(actor_options={\"num_cpus\": 0})\n\n futures = [\n self.workers[i].execute.remote(self.train_remote, trainer_ref, i,\n queue)\n for i in range(self.num_workers)\n ]\n\n results = process_results(futures, queue)\n results, best_path, state_dict = results[0]\n self.trainer = trainer\n self.trainer.model.load_state_dict(state_dict)\n if self.trainer.checkpoint_callback:\n self.trainer.checkpoint_callback.best_model_path = best_path\n\n if queue:\n # Shutdown the queue.\n queue.shutdown()\n\n return results\n\n # All methods below are only executed in remote Ray workers.\n\n def train_remote(self,\n trainer: Trainer,\n global_rank: int,\n queue: Queue = None):\n \"\"\"Training function to be executed on each remote worker.\"\"\"\n assert isinstance(self, RayAccelerator)\n # This method should be executed remotely in each worker.\n self.trainer = trainer\n self.trainer.accelerator_backend = self\n self.global_rank = global_rank\n model = self.trainer.model\n\n if queue is not None:\n # Initialize session.\n init_session(rank=global_rank, queue=queue)\n\n # Calling ddp_train will call transfer_distrib_spawn_state_on_fit_end.\n # We override that method and have it just set attributes.\n # Then we can just return those attributes here.\n super(RayAccelerator, self).ddp_train(\n process_idx=global_rank, mp_queue=None, model=model)\n return self.results, self.best_model_path, self.model_state_dict\n\n def init_ddp_connection(self,\n global_rank: int,\n world_size: int,\n is_slurm_managing_tasks: bool = True) -> None:\n \"\"\"Process group creation to be executed on each remote worker.\"\"\"\n torch_backend = \"nccl\" if self.use_gpu else \"gloo\"\n\n if not torch.distributed.is_initialized():\n log.info(f\"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER:\"\n f\" {global_rank + 1}/{world_size}\")\n torch.distributed.init_process_group(\n backend=torch_backend,\n init_method=self.ddp_address,\n rank=global_rank,\n world_size=world_size,\n )\n\n def set_world_ranks(self, process_idx: int):\n \"\"\"Set the appropriate rank attribues for the trainer.\"\"\"\n self.trainer.local_rank = self.global_to_local[self.global_rank]\n self.trainer.global_rank = self.global_rank\n self.trainer.world_size = self.num_workers\n\n def init_device(self, process_idx: int, is_master: bool):\n \"\"\"Sets the correct GPU device for the trainer and torch.\"\"\"\n if self.use_gpu:\n # Ray sets CUDA_VISIBLE_DEVICES already.\n gpu_idx = 0\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n else:\n pass\n\n def get_device_ids(self):\n \"\"\"Get the GPU device id of this worker, or None if on CPU only.\"\"\"\n if self.use_gpu:\n return super(RayAccelerator, self).get_device_ids()\n else:\n return None\n\n def model_to_device(self, model: LightningModule):\n \"\"\"Moves the model to the appropriate device.\"\"\"\n if self.use_gpu:\n model.cuda(self.trainer.root_gpu)\n else:\n model.cpu()\n\n def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue,\n results):\n \"\"\"Sets the training output as attributes so it can be retrieved.\"\"\"\n # Save training results as attributes.\n self.results = results\n self.model_state_dict = model.state_dict()\n best_model_path = None\n if self.trainer.checkpoint_callback is not None:\n best_model_path = self.trainer.checkpoint_callback.best_model_path\n self.best_model_path = best_model_path\n\n @property\n def distributed_sampler_kwargs(self):\n \"\"\"Returns the args to use for torch.data.DistributedSampler.\"\"\"\n distributed_sampler_kwargs = dict(\n num_replicas=self.num_workers, rank=self.global_rank)\n if self.ddp_plugin is not None:\n distributed_sampler_kwargs = \\\n self.ddp_plugin.distributed_sampler_kwargs(\n distributed_sampler_kwargs\n )\n return distributed_sampler_kwargs\n\n @property\n def require_distributed_sampler(self):\n \"\"\"This accelerator requires a distributed sampler.\"\"\"\n return True\n","sub_path":"ray_lightning/ray_ddp.py","file_name":"ray_ddp.py","file_ext":"py","file_size_in_byte":11082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"249398502","text":"\"\"\"Pg_database is responsible for postges implmentation of db_interface.\"\"\"\nfrom collections import OrderedDict\n\nimport psycopg2\n\nfrom lib.db_interface import Db_handler\nfrom utils.help import get_json_config, print_err, print_inf, print_warn\n\n\nclass Pg_handler(Db_handler):\n def __init__(self, state_column=\"is_faked\"):\n \"\"\"Initiating DB connection.\"\"\"\n self.state_column = state_column\n self.conn = self.connect(get_json_config('connection.json'))\n self.cursor = self.conn.cursor()\n self.__data_inst, self.schemas_map, self.__schemas_config = {}, {}, {}\n\n def connect(self, db_params):\n \"\"\"\n Establishe db connection according given parameters.\n\n Args:\n db_params (dict): Connection parameters\n Returns:\n cursor (object): Connection cursor\n \"\"\"\n try:\n with psycopg2.connect(**db_params) as conn:\n return conn\n except Exception as e:\n print_err('Connection cannot be established by given parameters.')\n raise Exception(e.message)\n\n def __conf_schemas(self):\n \"\"\"\n Get schemas list from config dict. flatten.\n\n Returns:\n schemas (list): Schema names\n \"\"\"\n try:\n inheritants = self.schemas_config['inheritants']\n schemas_list = []\n for schm_def, schemas in inheritants.items():\n self.schemas_map.update({schema: schm_def for schema\n in schemas})\n schemas_list.extend(schemas)\n return schemas_list\n except Exception as e:\n print_err('Json structure definition is invalid.[1]')\n raise Exception(e.message)\n\n def __conf_schema_tables(self, schema):\n \"\"\"\n Get tables list from config dict. flatten for given schema.\n\n Args:\n schema (str): Schema name\n Returns:\n tables (list): Schema names\n \"\"\"\n schema = self.__get_inheritee(schema)\n try:\n return self.schemas_config['schemas'][schema]['tables'].keys()\n except Exception:\n raise Exception(\"\"\"Schema \"{}\" wrong name. Take a look at the\n definition.\"\"\".format(schema))\n\n def __conf_table_columns(self, schema, table):\n \"\"\"\n Get coulmns list from config dict. flatten for given table.\n\n Args:\n schema (str): Schema name\n table (str): Table name\n Returns:\n columns (list): Schema names\n \"\"\"\n try:\n schema = self.__get_inheritee(schema)\n return self.schemas_config['schemas'][schema]['tables'][\n table]['columns'].keys()\n except Exception as e:\n print_err(e.message)\n raise Exception('Json structure definition is invalid.[2]')\n\n def __conf_table_keys(self, schema, table):\n \"\"\"\n Get keys list from config dict. flatten for given table.\n\n !Note this is mandatory to have.\n\n Args:\n schema (str): Schema name\n table (str): Table name\n Returns:\n keys (list): Schema keys\n \"\"\"\n try:\n schema = self.__get_inheritee(schema)\n return self.schemas_config['schemas'][schema]['tables'][\n table]['keys']\n except Exception as e:\n print_err(e.message)\n raise Exception('Json structure definition is invalid.[3]')\n\n def __schema_exists(self, schema):\n \"\"\"\n Check if schema exists in information schema.\n\n Args:\n schema (str): Schema name\n Returns:\n Bool: True if exists False otherwise\n \"\"\"\n select_schema_sql = \"\"\"SELECT *\n FROM information_schema.schemata\n WHERE schema_name = '{}';\"\"\".format(schema)\n self.cursor.execute(select_schema_sql)\n return True if self.cursor.fetchone() else False\n\n def __table_exists(self, schema, table):\n \"\"\"\n Check if table exists for a particular schema.\n\n Args:\n schema (str): Schema name\n table (str): Table name\n Returns:\n Bool: True if exists False otherwise\n \"\"\"\n select_schema_sql = \"SELECT to_regclass('{}.{}')\".format(schema, table)\n self.cursor.execute(select_schema_sql)\n return True if self.cursor.fetchone()[0] else False\n\n def __column_exists(self, schema, table, column):\n \"\"\"\n Check if column exists for a particular table.\n\n Args:\n schema (str): Schema name\n table (str): Table name\n column (str): Column name\n Returns:\n Bool: True if exists False otherwise\n \"\"\"\n select_column_sql = \"\"\"SELECT * FROM information_schema.columns\n WHERE table_schema='{}' AND table_name='{}' AND\n column_name='{}'\"\"\".format(schema, table,\n column)\n self.cursor.execute(select_column_sql)\n return True if self.cursor.fetchone() else False\n\n def __conf_coulmn_rules(self, schema, table, column):\n \"\"\"\n Get coulmns rules which will be used for data faking.\n\n Args:\n schema (str): Schema name\n table (str): Table name\n Returns:\n rules (list): User defined rulles\n \"\"\"\n schema = self.__get_inheritee(schema)\n return self.schemas_config['schemas'][schema]['tables'][\n table]['columns'][column]\n\n def __get_inheritee(self, schema):\n \"\"\"\n Get schema name which inherits its schema definition.\n\n Args:\n schema (str): Inheritant schema name\n Returns:\n schema (str): Inheritee schema name\n \"\"\"\n try:\n return self.schemas_map[schema]\n except Exception:\n print_err(\"There isn't defined inheritee schema for {}\".format(\n schema))\n\n def fake_table_data(self, schema, table, conf_table_keys):\n print_inf('Processing \"{}.{}\" table.'.format(schema, table), 1)\n counter, err_counter, row_exist = 0, 0, True\n while row_exist:\n failed_update = True\n while failed_update:\n update_query = self.__generate_update_query(schema, table,\n conf_table_keys)\n try:\n self.cursor.execute(update_query['sql'],\n update_query['values'])\n row_exist = self.cursor.fetchone()\n self.conn.commit()\n if row_exist:\n counter += 1\n failed_update = False\n break\n # in case there is a unique constrain violation just pass\n except psycopg2.IntegrityError:\n failed_update = True\n err_counter += 1\n self.conn.rollback()\n\n if counter and counter % 1000 == 0:\n print_inf('Updated {} rows in {}.{}.'.format(counter, schema,\n table), 1)\n print_warn(\"Script passed by {} duplicate rows\".format(\n err_counter))\n if counter:\n print_inf('Updated {} rows in {}.{}.'.format(counter, schema,\n table), 1)\n return True\n\n def fake_tables_data(self):\n schemas = self.__conf_schemas()\n length = len(schemas)\n current_pos = 0\n for schema in schemas:\n current_pos += 1\n print_warn(\"\"\"Current position is {} from {}\"\"\".format(\n current_pos, length), 1)\n for table in self.__conf_schema_tables(schema):\n self.add_status_to_table(schema, table)\n conf_table_keys = self.__conf_table_keys(schema, table)\n self.fake_table_data(schema, table, conf_table_keys)\n return True\n\n def __generate_update_query(self, schema, table, conf_table_keys):\n \"\"\"\n Generate update sql based on condition.\n\n Returns:\n {sql, values}(dict): Query, values.\n \"\"\"\n keys = \", \".join(conf_table_keys)\n try:\n select_sql = \"\"\"SELECT {} FROM {}.{} WHERE {} IS FALSE LIMIT 1\n FOR UPDATE\"\"\".format(keys, schema, table, self.state_column)\n except Exception as e:\n print_err(e.message)\n raise Exception('Select sql generating problem. {}'.format(\n select_sql))\n\n try:\n fake_data = OrderedDict()\n data_types = self.data_inst.get_types()\n for column in self.__conf_table_columns(schema, table):\n rule = self.__conf_coulmn_rules(schema, table, column)\n if rule['type'] in data_types:\n fake_data[column] = data_types[rule['type']]\n else:\n print_err(\"\"\"Fake data type \"{}\" is not implemented\n yet.\"\"\".format(rule['type']))\n return False\n fake_data[self.state_column] = True\n set_sql = \"=%s, \".join(fake_data.keys())\n set_sql += \"=%s\"\n update_sql = \"\"\"UPDATE {}.{} SET {} WHERE {} IN ({}) RETURNING {}\"\"\"\n update_sql = update_sql.format(schema, table, set_sql, keys,\n select_sql, keys,)\n return {'sql': update_sql, 'values':\n [val for _, val in fake_data.items()]}\n except Exception as e:\n print_err(e.message)\n raise Exception('Update query generating problem.')\n\n def conf_is_correct(self):\n \"\"\"\n Check wheter defined config is correct.\n\n Means the defined schemas, tables, columns exist.\n\n Args:\n config (dict): Database tree started from schemas to its tcolumns\n that need to be shuffled\n Returns:\n Bool: True if defined structure exist in connected DB\n False otherwise\n \"\"\"\n print_inf('Checking schemas correctness according to config.json')\n try:\n error = False\n for schema in self.__conf_schemas():\n if not self.__schema_exists(schema):\n print_err(\"\"\"Schema \"{}\" doesn't exist\"\"\".format(schema))\n error = True\n\n for table in self.__conf_schema_tables(schema):\n if not self.__table_exists(schema, table):\n print_err(\"\"\"Table \"{}\" for schema \"{}\" doesn't\n exist\"\"\".format(table, schema))\n error = True\n for column in self.__conf_table_columns(schema, table):\n if not self.__column_exists(schema, table, column):\n print_err(\"\"\"Column \"{}\" in table \"{}\" for schema\n \"{}\" doesn't exist\"\"\".format(column, table,\n schema))\n error = True\n print_inf('{} success'.format(schema))\n return False if error else True\n except Exception as e:\n print_err(e.message)\n\n def alter_column(self, schema, table, action='ADD'):\n \"\"\"Add or remove state checking field.\"\"\"\n alter_column_sql = \"\"\"ALTER TABLE {}.{} {} COLUMN \"{}\" {};\"\"\"\n add_case = 'BOOLEAN DEFAULT FALSE'\n alter_column_sql = alter_column_sql.format(schema, table, action,\n self.state_column,\n add_case if action == 'ADD'\n else '')\n try:\n self.cursor.execute(alter_column_sql)\n return self.conn.commit()\n except Exception:\n return self.conn.rollback()\n\n def add_status_to_table(self, schema, table):\n \"\"\"Add state_column if it is missing in the table.\"\"\"\n if not self.__column_exists(schema, table, self.state_column):\n print_inf(\"\"\"New \"{}\" field is being added to {}.\n Please wait....\"\"\".format(self.state_column, table))\n self.alter_column(schema, table)\n return True\n\n def clean_up(self):\n \"\"\"Add state_column if it is missing in the table.\"\"\"\n for schema in self.__conf_schemas():\n for table in self.__conf_schema_tables(schema):\n if self.__column_exists(schema, table, self.state_column):\n print_inf(\"\"\"The \"{}\" field is being removed. Please\n wait...\"\"\".format(self.state_column))\n self.alter_column(schema, table, 'DROP')\n return True\n\n @property\n def data_inst(self):\n \"\"\"User defined data types getter.\"\"\"\n return self.__data_inst\n\n @data_inst.setter\n def data_inst(self, inst):\n \"\"\"User defined data types setter.\"\"\"\n self.__data_inst = inst\n\n @property\n def schemas_config(self):\n \"\"\"User defined configs setter.\"\"\"\n return self.__schemas_config\n\n @schemas_config.setter\n def schemas_config(self, configs):\n \"\"\"User defined configs setter.\"\"\"\n self.__schemas_config = configs\n","sub_path":"lib/pg_database.py","file_name":"pg_database.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"504592459","text":"# Calculate IDF\ntot = len(df['docIdx'].unique())\npb_ij = df.groupby(['wordIdx'])\nIDF = np.log(tot / pb_ij['docIdx'].count())\nIDF_dict = IDF.to_dict()\n\n\ndef MNB(df, smooth=False, IDF=False):\n '''\n Multinomial Naive Bayes classifier\n :param df [Pandas Dataframe]: Dataframe of data\n :param smooth [bool]: Apply Smoothing if True\n :param IDF [bool]: Apply Inverse Document Frequency if True\n :return predict [list]: Predicted class ID\n '''\n # Using dictionaries for greater speed\n df_dict = df.to_dict()\n new_dict = {}\n prediction = []\n\n # new_dict = {docIdx : {wordIdx: count},....}\n for idx in range(len(df_dict['docIdx'])):\n docIdx = df_dict['docIdx'][idx]\n wordIdx = df_dict['wordIdx'][idx]\n count = df_dict['count'][idx]\n try:\n new_dict[docIdx][wordIdx] = count\n except:\n new_dict[df_dict['docIdx'][idx]] = {}\n new_dict[docIdx][wordIdx] = count\n\n # Calculating the scores for each doc\n for docIdx in range(1, len(new_dict) + 1):\n score_dict = {}\n # Creating a probability row for each class\n for classIdx in range(1, 21):\n score_dict[classIdx] = 1\n # For each word:\n for wordIdx in new_dict[docIdx]:\n # Check for frequency smoothing\n # log(1+f)*log(Pr(i|j))\n if smooth:\n try:\n probability = Pr_dict[wordIdx][classIdx]\n power = np.log(1 + new_dict[docIdx][wordIdx])\n # Check for IDF\n if IDF:\n score_dict[classIdx] += (\n power * np.log(\n probability * IDF_dict[wordIdx]))\n else:\n score_dict[classIdx] += power * np.log(\n probability)\n except:\n # Missing V will have log(1+0)*log(a/16689)=0\n score_dict[classIdx] += 0\n # f*log(Pr(i|j))\n else:\n try:\n probability = Pr_dict[wordIdx][classIdx]\n power = new_dict[docIdx][wordIdx]\n score_dict[classIdx] += power * np.log(\n probability)\n # Check for IDF\n if IDF:\n score_dict[classIdx] += power * np.log(\n probability * IDF_dict[wordIdx])\n except:\n # Missing V will have 0*log(a/16689) = 0\n score_dict[classIdx] += 0\n # Multiply final with pi\n score_dict[classIdx] += np.log(pi[classIdx])\n\n # Get class with max probabilty for the given docIdx\n max_score = max(score_dict, key=score_dict.get)\n prediction.append(max_score)\n\n return prediction","sub_path":"models/MNB.py","file_name":"MNB.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"152645750","text":"from app import create_app, db\nfrom app.models import User, Article, Comment\n\napp = create_app()\ndb.create_all(app=app)\n\n\ndef read_csv():\n import csv\n with open('test.csv') as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n yield row\n\nwith app.app_context():\n articles = Article.query.filter_by().all()\n for article in articles:\n url = article.img_url\n base_url = 'https://fangkeng-1253385403.cos.ap-beijing.myqcloud.com/%E6%96%B0%E5%BB%BA%E6%96%87%E4%BB%B6%E5%A4%B9/%E4%BB%81%E6%9A%AE%E6%9A%AE%E6%9A%AE%20(12323423)%20-%20%E8%B7%AF%E8%BF%87%E5%9B%BE%E5%BA%8A_files/'\n last_url = url.split('/')[-1]\n article.img_url = base_url + last_url\n db.session.commit()\n # print(new_url)\n# with app.app_context():\n# for row in read_csv():\n# article = Article(id=row[0], name=row[1], ticket=row[3], text=row[4], address=row[5], site=row[6], area=row[7], img_url=row[8])\n#\n# for comment in row[2].split(';'):\n# new_comment = Comment(comment, row[0])\n# db.session.add(new_comment)\n#\n# db.session.add(article)\n# db.session.commit()\n # article = Article.query.get(1)\n # print(article.comment.all())\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"518539259","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 25 09:56:37 2016\n\n@author: Bonan\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as pl\npl.rcParams['image.cmap'] = 'viridis'\nwlRange = np.linspace(400,800,100)\n#%% Calculate the average\ndef avgSpec(wlRange,spec):\n \"\"\"Calculate the average of the spectrum stack. Averaging is along the 1st axis\n \"\"\"\n avgData = np.average(spec, axis = 0)\n rng = np.linspace(wlRange[0], wlRange[-1], len(avgData))\n pl.plot(rng, avgData)\n#%%\n \nif __name__ == \"__main__\":\n path = \"C:/Users/Bonan/OneDrive/Documents/PartIII/Project/linearDefectResults/\"\n filename = \"20160405-103908AligenTrue.npy\"\n data = np.load(path + filename)\n pl.imshow(data)\n pl.figure()\n avgSpec(wlRange, data)\n #%% Compare with single spectrum\n from preset import s\n s.setThickness([2000])\n s.setPitch([180])\n res0 = s.scanSpectrum(wlRange)\n pl.plot(wlRange,res0[1], label = \"180 helix\")\n s.setPitch([150])\n s.setThickness([3000])\n res1 = s.scanSpectrum(wlRange)\n pl.plot(wlRange,res1[1],label = \"150 helix\")\n pl.legend()\n pl.xlabel(\"Wavelength /nm\")\n pl.ylabel(\"Reflectivity(L-L)\")\n pl.title(\"3 Layer defect averaged vs Single helix\")\n #%%","sub_path":"Simulation/BonanProject-aae99f9/linearDefectDataLoad.py","file_name":"linearDefectDataLoad.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"363372917","text":"# exception classes\nclass PathAlreadySetError(Exception):\n \"\"\"\n This Exception is thrown if the Config is already initialized\n and your code tries to set the path.\n\n Attributes\n ----------\n path : str\n The path your code was trying to set.\n current_path : str\n The actual path the configuration was initialized with.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n Parameters\n ----------\n path : str\n The path your code was trying to set.\n \"\"\"\n\n self.path = path\n self.current_path = Config.get_config().get_path()\n super().__init__(\"\"\"Path of config file already set to \\\"{}\\\".\n Setting it to \\\"{}\\\" failed.\"\"\".format(self.current_path, path))","sub_path":"src/config/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"215604672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 2 14:18:06 2019\n\n@author: eatdacarrot\n\"\"\"\nimport os\npath =\"/Users/eatdacarrot/ComputerScience/PythonSpyder/GeorgeFoxPractice/2019\"\nfile = open(os.path.join(path+\"Drawinput.txt\"),\"r\")\nlines = file.read().splitlines()\nline1=lines[0].split(\" \")\ncirclex,circley,circleradius=int(line1[0]),int(line1[1]),int(line1[2])/2\ndel lines[0]\nline1=lines[0].split(\" \")\nrectx,recty,rectw,recth=int(line1[0]),int(line1[1]),int(line1[2]),int(line1[3])\ndel lines[0]\nfor x in lines:\n y=x.split(\" \")\n y[0],y[1]=int(y[0]),int(y[1])\n if((((y[0]-(circlex+circleradius))**2)+((y[1]-(circley+circleradius))**2))**.5<=circleradius):\n print(\"circle\")\n elif(y[0]>=rectx and y[0]<=rectx+rectw and y[1]>=recty and y[1]<=recty+recth):\n print(\"rectangle\")\n else:\n print(\"neither\")","sub_path":"GeorgeFoxPractice/2019/Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"547349300","text":"import csv\nimport pandas as pd\n\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import RFE\nfrom sklearn.metrics import accuracy_score\n\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\n\ndirectory = '/ds_data_big/ds_data/'\n\ntraining_file_path = directory + 'data_train.csv'\ntest_file_path = directory + 'data_test.csv'\n\n\nclass Model:\n def __init__(self):\n numeric_features = ['num1', 'num2', 'num3', 'num4', 'num5', 'num6', 'num7', 'num8',\n 'num9', 'num10', 'num11', 'num12', 'num13', 'num14', 'num15', 'num16',\n 'num17', 'num18', 'num19', 'num20', 'num21', 'num22', 'num23']\n derived_features = ['der1', 'der2', 'der3', 'der4', 'der5', 'der6', 'der7', 'der8', 'der9', 'der10',\n 'der11', 'der12', 'der13', 'der14', 'der15', 'der16', 'der17', 'der18', 'der19']\n categorical_features = ['cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8',\n 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14']\n\n self.features = numeric_features + derived_features + categorical_features\n\n self.model = RandomForestClassifier()\n self.no_of_features = 10\n\n def fetch_training_data(self, training_file):\n print(\"fetch_training_data\")\n\n data = pd.read_csv(training_file)\n data = data.dropna(axis=0)\n\n y = data.target\n X = data[self.features]\n self.train, self.val, self.train_labels, self.val_labels = train_test_split(X, y, test_size=0.10)\n\n sm = SMOTE(random_state=2)\n X_train_res, y_train_res = sm.fit_sample(self.train, self.train_labels.ravel())\n\n self.train = X_train_res\n self.train_labels = y_train_res.ravel()\n\n def build_model(self):\n print(\"build_model\")\n self.model = RFE(self.model, 15)\n self.model.fit(self.train, self.train_labels)\n\n def evaluate_model(self):\n print(\"evaluate_model\")\n preds = self.model.predict(self.val)\n print(accuracy_score(self.val_labels, preds))\n\n def predict_using_model(self, testing_file):\n print(\"predict_using_model\")\n test_data = pd.read_csv(testing_file)\n test_data = test_data.dropna(axis=0)\n\n X_test = test_data[self.features]\n predictions = self.model.predict(X_test)\n\n result_csv = testing_file.replace(\".csv\", \"_output.csv\")\n with open(result_csv, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"target\"])\n for i in range(len(test_data)):\n writer.writerow([int(test_data.iloc[i]['id']), predictions[i]])\n\n\nprocess = Model()\nprocess.fetch_training_data(training_file_path)\nprocess.build_model()\nprocess.evaluate_model()\nprocess.predict_using_model(test_file_path)\n","sub_path":"my_ml_code.py","file_name":"my_ml_code.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"490671750","text":"import json\nfrom django.http import JsonResponse\nfrom django.contrib.auth.models import Group\nfrom utils.user.require import require_login\nfrom cmdb.models import CMDB\n\n# Create your views here.\n\n@require_login\ndef cmdb_list(request):\n data=json.loads(request.body)\n group = Group.objects.get(id=data.get('group')) \n cmdb_set = CMDB.objects.select_related('creator').filter(group=group)\n # print(cmdb_set[0].creator.username)\n # print(CMDB.objects.prefetch_related('creator__username').filter(group=group).values('id','device_type','port','hostname','addtime','creator_username'))\n cmdbs=[]\n for cmdb in cmdb_set:\n cmdbs.append({\n 'id':cmdb.id,\n 'device_type':cmdb.device_type,\n 'port':cmdb.port,\n 'hostname':cmdb.hostname,\n 'addtime':cmdb.addtime.strftime('%Y-%m-%d %H:%M:%S'),\n 'user':cmdb.creator.username,\n 'ip':cmdb.ip\n })\n return JsonResponse(cmdbs,safe=False)","sub_path":"SafeGuard/guard/cmdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"374462874","text":"from ...containers import RPCStorageContainer\nfrom ...descriptors import DescriptorFactory\n\nfrom functools import partial\n\n__all__ = ['RPCMeta']\n\n\nclass RPCMeta(type):\n \"\"\"Creates RPC storage interface for each class type\"\"\"\n\n def __new__(mcs, name, bases, cls_dict):\n cls = super().__new__(mcs, name, bases, cls_dict)\n\n members = RPCStorageContainer.get_member_instances(cls)\n ordered_members = RPCStorageContainer.get_ordered_members(members)\n factory_callback = partial(RPCStorageContainer, mapping=members, ordered_mapping=ordered_members)\n\n cls._rpc_container = DescriptorFactory(factory_callback)\n return cls\n","sub_path":"network/metaclasses/mapping/rpc_mapping.py","file_name":"rpc_mapping.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"277022338","text":"#coding:utf-8\nimport turtle\n\n\n# 基础课 5 的资料\n# list(列表) 常用操作\n# 创建一个空列表\na = []\n\n# 使用 len() 函数可以求出列表的长度(列表中元素的个数)\nlength = len(a)\nprint(length)\n\n# 可以用列表的 append 函数往列表末尾插入一个元素\na.append(1)\nprint(a)\n# [1]\n\na.append(3)\nprint(a)\n# [1, 3]\n\n# 多添加几个元素\na.append(2)\na.append(3)\na.append(4)\na.append(5)\na.append(3)\nprint(a)\n# [1, 3, 2, 3, 4, 5, 3]\n\n# 删除一个特定的元素, 该元素必须出现在列表中\na.remove(4)\nprint(a)\n# [1, 3, 2, 3, 5, 3]\n\n# 可以发现, 列表中有多个相同的元素 3\n# 试试删除它会怎样\na.remove(3)\nprint(a)\n# [1, 2, 3, 4, 3]\n\na.remove(3)\nprint(a)\n# [1, 2, 4, 3]\n\n# 结果是 remove 函数只会删除从头到尾出现的第一个该元素\n\n\n\n\n# 基础课5 的作业, 复习一下 list 的使用\n# 中国本周的每日平均气温, 从周一到周日\ntemps = [22, 19, 22, 21, 25, 27, 30]\n\n# 周五的天气是(下标从 0 开始)\nprint(temps[4])\n\n\n# 题 1\n# 周一的天气是?\nprint(temps[0])\n# 周日的天气是?\nprint(temps[6])\n# 周二的天气是?\nprint(temps[1])\n\n\n# 求本周最高气温的函数\ndef max(array):\n # array 是一个存了很多数据的数组\n # 让 a 等于第一个元素\n a = array[0]\n for i in array:\n # 遍历整个 array, 如果发现比a更大的数字, 就将 a 设置为新的最大值\n if a < i:\n a = i\n # 循环结束, 此时 a 是数组中最大的元素了\n return a\n\nprint(max(temps))\n\n\n# 题 2\n# 写一个函数求本周最低气温, 函数声明如下\ndef min(array):\n a = array[0]\n for i in array:\n if i < a:\n a = i\n\n return a\n\n\n# 题 3\n# 使用上面的函数和描述,删除本周最高气温和最低气温,然后求出平均气温\ndef avg(array):\n temp_sum = 0\n for i in array:\n temp_sum += i\n return temp_sum / len(array)\n\n\ndef delete_max_temp(array):\n array.remove(max(array))\n\n\ndef delete_min_temp(array):\n array.remove(min(array))\n\n\ndef delete_max_min_for_avg(array):\n delete_max_temp(array)\n delete_min_temp(array)\n print(\"average of temperature:\", avg(array))\n\n\ndelete_max_min_for_avg(temps)\n\n\n# 以下 2 题要用循环\n# while 和 for 都可以,建议用自己熟悉的那种\n# 附件中有两个参考图片\n# 下面这个网页可以上传图片 对图片取颜色代码\n# 如果你不会英语, 点击页面 choose file\n# 然后 show image\n# 在图片上点一下,颜色代码就出现在右下角了\n# http://html-color-codes.info/colors-from-image/\n#\n# 题 4\n# 根据本周天气,绘制折线图\n# 折线图的颜色 方案任选\n# 中国本周的每日平均气温, 从周一到周日\ntemps = [22, 19, 22, 21, 25, 27, 30]\nweeks = ['Mon', 'Tue', 'Wen', 'Thu', 'Fri', 'Sat', 'Sun']\nline_color = ['#FF0000', '#FF4000', '#FF8000', '#FFBF00', '#FFFF00', '#BFFF00', '#80FF00']\nt = turtle.Turtle()\nt.speed(0)\npencolor = \"#8CEBFF\"\nrect_coor_color = '#B6B4B6'\nline_size = 2\n\n\ndef temppic_line(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey, array):\n rect_coordinate(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey)\n array = createpoints(x, y, lengthx, lengthy, measure_rangex, measure_rangey, array)\n drawpoints(array)\n\n\ndef drawpoints(array):\n t.up()\n t.goto(array[0])\n t.pencolor(pencolor)\n t.pensize(line_size)\n t.down()\n for i in range(len(array)):\n t.pencolor(line_color[i])\n t.goto(array[i])\n\n\ndef createpoints(x, y, lengthx, lengthy, measure_rangex, measure_rangey, array):\n points = []\n offectx = lengthx / measure_rangex\n offecty = lengthy / measure_rangey\n for i in range(len(array)):\n points.append((x + offectx + i * offectx, y + array[i] * offecty))\n return points\n\n\ndef rect_coordinate(x, y, lengthx, lengthy, countx, county, measure_rangex,\n measure_rangey):\n y_line(x, y, lengthy, county, measure_rangey)\n x_line(x, y, lengthx, countx, measure_rangex)\n\n\ndef y_line(x, y, lengthy, county, measure_rangey):\n \"\"\"x,y 是原点坐标 ,lengthy 是y轴长度, county是y轴刻度\"\"\"\n t.up()\n t.goto(x, y)\n t.setheading(90)\n t.down()\n t.forward(lengthy + 50)\n scaley(x, y, lengthy, county, measure_rangey)\n t.up()\n t.goto(x, y + lengthy + 50)\n t.down()\n t.write('Temperature', align='center')\n\n\ndef x_line(x, y, lengthx, countx, measure_rangex):\n t.up()\n t.goto(x, y)\n t.setheading(0)\n t.down()\n t.forward(lengthx + 50)\n scalex(x, y, lengthx, countx, measure_rangex)\n t.up()\n t.goto(x + lengthx + 50, y)\n t.down()\n t.write('WeekDay', align='center')\n\n\ndef scalex(x, y, lengthx, countx, measure_rangex):\n per_length = lengthx / countx\n x = x + per_length\n for i in range(countx):\n t.up()\n t.goto(x + i * per_length, y)\n t.down()\n t.setheading(90)\n t.forward(5)\n t.up()\n t.goto(x + i * per_length, y - 20)\n t.down()\n t.write(weeks[int(measure_rangex / countx * i)], align='center')\n\n\ndef scaley(x, y, lengthy, county, measure_rangey):\n\n per_length = lengthy / county\n y = y + per_length\n for i in range(county):\n t.up()\n t.goto(x, y + i * per_length)\n t.down()\n t.setheading(0)\n t.forward(5)\n t.up()\n t.goto(x-25, y + i * per_length)\n t.down()\n t.write(measure_rangey / county * (i + 1), align='center')\n\n\n\ndef runtemppic():\n x = 50\n y = 0\n lengthx = 300\n lengthy = 300\n countx = 7\n county = 10\n measure_rangex = 7\n measure_rangey = 40\n t.pencolor(rect_coor_color)\n t.pensize(1)\n temppic_line(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey, temps)\nruntemppic()\n\n\n# 题 5\n# 根据本周天气,绘制柱状图(提示,不就是个矩形。。。)\n# 折线图的颜色 方案任选\n# 中国本周的每日平均气温, 从周一到周日\ntemps = [22, 19, 22, 21, 25, 27, 30]\nweeks = ['Mon', 'Tue', 'Wen', 'Thu', 'Fri', 'Sat', 'Sun']\nfillcolor = ['#8CEBFF', '#FFF78B', '#C6FF8D', '#FF8E9C', '#FFD38D', '#C5FF8C', '#C5FF8C']\n\n\ndef temppic_rect(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey, array):\n rect_coordinate(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey)\n array = createpoints(x, y, lengthx, lengthy, measure_rangex, measure_rangey, array)\n drawrect(x, y, 10, array)\n\n\ndef drawrect(x, y, weight, array):\n for i in range(len(array)):\n p = array[i]\n ax = p[0]\n ay = p[1]\n t.up()\n t.goto(ax, ay)\n t.down()\n t.fillcolor(fillcolor[i])\n t.pencolor(fillcolor[i])\n t.begin_fill()\n t.goto(ax+weight, ay)\n t.goto(ax+weight, y)\n t.goto(ax-weight, y)\n t.goto(ax-weight, ay)\n t.goto(ax, ay)\n t.end_fill()\n turtle.update()\n\n\ndef runtemppic_rect():\n x = -350\n y = 0\n lengthx = 300\n lengthy = 300\n countx = 7\n county = 10\n measure_rangex = 7\n measure_rangey = 35\n t.pencolor(rect_coor_color)\n t.pensize(1)\n temppic_rect(x, y, lengthx, lengthy, countx, county, measure_rangex, measure_rangey, temps)\n\nruntemppic_rect()\n\n\nturtle.done()\n","sub_path":"base/class05/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"142191830","text":"\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport ast\r\nimport os\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Map, Page, Pie, WordCloud\r\nfrom pyecharts.globals import SymbolType\r\nimport time\r\nt = str(time.time())\r\nURL = \"https://3g.dxy.cn/newh5/view/pneumonia\" # data source\r\ndata_route = \".\\\\map\" + t+\".html\" # the html save route\r\nsave_route = \".\\\\data\" + t + \".txt\" # the back-up data save route\r\nfp = open(save_route, \"w\")\r\nfp.close()\r\n\r\ndef check_empty(file_name):\r\n \"\"\"for back up data, check if the data file is empty, if empty, delete it.\"\"\"\r\n if os.stat(file_name).st_size == 0:\r\n pass\r\n else:\r\n os.remove(file_name)\r\n\r\n\r\ndef get_data(url):\r\n \"\"\" get data from 丁香园 and process then\"\"\"\r\n result_list = []\r\n try:\r\n r = requests.get(url)\r\n rt = r.text.encode(\"ISO-8859-1\").decode(\"utf-8\")\r\n soup = BeautifulSoup(rt, \"html.parser\")\r\n print(\"get data from : \"+ url)\r\n except:\r\n print(\"wrong\")\r\n\r\n result = str(soup.select('#getAreaStat'))[:-25]\r\n\r\n result = result.replace('[