diff --git "a/6330.jsonl" "b/6330.jsonl" new file mode 100644--- /dev/null +++ "b/6330.jsonl" @@ -0,0 +1,1695 @@ +{"seq_id":"34475446035","text":"from ruamel.yaml import YAML, dump, RoundTripDumper\nfrom raisimGymTorch.env.bin import train\nfrom raisimGymTorch.env.RaisimGymVecEnv import RaisimGymVecEnv as VecEnv\nfrom raisimGymTorch.helper.raisim_gym_helper import ConfigurationSaver\nfrom raisimGymTorch.helper.utils_plot import plot_trajectory_prediction_result\nimport os\nimport math\nimport time\nimport raisimGymTorch.algo.ppo.module as ppo_module\nimport torch.nn as nn\nimport numpy as np\nimport torch\nfrom collections import Counter\nimport argparse\nimport pdb\nimport wandb\nfrom raisimGymTorch.env.envs.train.model import Forward_Dynamics_Model\nfrom raisimGymTorch.env.envs.train.trainer import FDM_trainer\nfrom raisimGymTorch.env.envs.train.action import UserCommand, Constant_command_sampler, Linear_time_correlated_command_sampler, Normal_time_correlated_command_sampler\nfrom raisimGymTorch.env.envs.train.storage import Buffer\nimport random\n\n\"\"\"\nTrain Forward Dynamics Model (FDM)\n\nInput:\n - Current lidar observation\n - Selected generalized coordinates and velocities history\n - Future command trajectories\n \nOutput:\n - Future base coordinates (x, y)\n - Future probabilities of collision \n\n\"\"\"\n\nrandom.seed(0)\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n# task specification\ntask_name = \"FDM_train\"\n\n# configuration\nparser = argparse.ArgumentParser()\nparser.add_argument('-tw', '--tracking_weight', help='velocity command tracking policy weight path', type=str, required=True)\nargs = parser.parse_args()\ncommand_tracking_weight_path = args.tracking_weight\n\n# check if gpu is available\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# directories\ntask_path = os.path.dirname(os.path.realpath(__file__))\nhome_path = task_path + \"/../../../..\"\n\n# config\ncfg = YAML().load(open(task_path + \"/cfg.yaml\", 'r'))\ncfg[\"environment\"][\"determine_env\"] = 0\ncfg[\"environment\"][\"evaluate\"] = False\ncfg[\"environment\"][\"random_initialize\"] = True\ncfg[\"environment\"][\"point_goal_initialize\"] = False\ncfg[\"environment\"][\"CVAE_data_collection_initialize\"] = False\ncfg[\"environment\"][\"safe_control_initialize\"] = False\ncfg[\"environment\"][\"CVAE_environment_initialize\"] = False\n\n# user command sampling\nuser_command = UserCommand(cfg, cfg['environment']['num_envs'])\ncommand_sampler_constant = Constant_command_sampler(user_command)\ncommand_sampler_linear_correlated = Linear_time_correlated_command_sampler(user_command,\n beta=cfg[\"data_collection\"][\"linear_time_correlated_command_sampler_beta\"])\ncommand_sampler_normal_correlated = Normal_time_correlated_command_sampler(user_command, cfg[\"environment\"][\"command\"],\n sigma=cfg[\"data_collection\"][\"normal_time_correlated_command_sampler_sigma\"],\n std_scale_fixed=False)\n\n# create environment from the configuration file\nenv = VecEnv(train.RaisimGymEnv(home_path + \"/rsc\", dump(cfg['environment'], Dumper=RoundTripDumper)), cfg['environment'], normalize_ob=False)\n\n# shortcuts\nuser_command_dim = 3\nproprioceptive_sensor_dim = 81\nlidar_dim = 360\nassert env.num_obs == proprioceptive_sensor_dim + lidar_dim, \"Check configured sensor dimension\"\n\n# training rollout config\nn_steps = math.floor(cfg['environment']['max_time'] / cfg['environment']['control_dt'])\ncommand_period_steps = math.floor(cfg['data_collection']['command_period'] / cfg['environment']['control_dt'])\ntotal_steps = n_steps * env.num_envs\nassert n_steps % command_period_steps == 0, \"Total steps in training should be divided by command period steps.\"\n\nstate_dim = cfg[\"architecture\"][\"state_encoder\"][\"input\"]\ncommand_dim = cfg[\"architecture\"][\"command_encoder\"][\"input\"]\nP_col_dim = cfg[\"architecture\"][\"traj_predictor\"][\"collision\"][\"output\"]\ncoordinate_dim = cfg[\"architecture\"][\"traj_predictor\"][\"coordinate\"][\"output\"] # Just predict x, y coordinate (not yaw)\n\n# use naive concatenation for encoding COM vel history\nCOM_feature_dim = cfg[\"architecture\"][\"COM_encoder\"][\"naive\"][\"input\"]\nCOM_history_time_step = cfg[\"architecture\"][\"COM_encoder\"][\"naive\"][\"time_step\"]\nCOM_history_update_period = int(cfg[\"architecture\"][\"COM_encoder\"][\"naive\"][\"update_period\"] / cfg[\"environment\"][\"control_dt\"])\nassert state_dim - lidar_dim == COM_feature_dim * COM_history_time_step, \"Check COM_encoder output and state_encoder input in the cfg.yaml\"\n\ncommand_tracking_ob_dim = user_command_dim + proprioceptive_sensor_dim\ncommand_tracking_act_dim = env.num_acts\n\nCOM_buffer = Buffer(env.num_envs, COM_history_time_step, COM_feature_dim)\n\nenvironment_model = Forward_Dynamics_Model(state_encoding_config=cfg[\"architecture\"][\"state_encoder\"],\n command_encoding_config=cfg[\"architecture\"][\"command_encoder\"],\n recurrence_config=cfg[\"architecture\"][\"recurrence\"],\n prediction_config=cfg[\"architecture\"][\"traj_predictor\"],\n device=device)\n\n# Log the training and evaluating process or not\nlogging = cfg[\"logging\"]\n\ntrainer = FDM_trainer(environment_model=environment_model,\n state_dim=state_dim,\n command_dim=command_dim,\n P_col_dim=P_col_dim,\n coordinate_dim=coordinate_dim,\n prediction_period=cfg[\"data_collection\"][\"prediction_period\"],\n delta_prediction_time=cfg[\"data_collection\"][\"command_period\"],\n loss_weight=cfg[\"training\"][\"loss_weight\"],\n max_storage_size=cfg[\"training\"][\"storage_size\"],\n num_learning_epochs=cfg[\"training\"][\"num_epochs\"],\n mini_batch_size=cfg[\"training\"][\"batch_size\"],\n shuffle_batch=cfg[\"training\"][\"shuffle_batch\"],\n clip_grad=cfg[\"training\"][\"clip_gradient\"],\n learning_rate=cfg[\"training\"][\"learning_rate\"],\n max_grad_norm=cfg[\"training\"][\"max_gradient_norm\"],\n device=device,\n logging=logging,\n P_col_interpolate=cfg[\"training\"][\"interpolate_probability\"],\n prioritized_data_update=cfg[\"data_collection\"][\"prioritized_data_update\"],\n prioritized_data_update_magnitude=cfg[\"data_collection\"][\"prioritized_data_update_magnitude\"],\n weight_decay=cfg[\"training\"][\"weight_decay\"],\n weight_decay_lamda=cfg[\"training\"][\"weight_decay_lamda\"])\n\nsaver = ConfigurationSaver(log_dir=home_path + \"/data/\"+task_name,\n save_items=[task_path + \"/cfg.yaml\", task_path + \"/Environment.hpp\"])\n\n# wandb initialize\nif logging:\n wandb.init(name=task_name, project=\"Quadruped_navigation\")\n # wandb.watch(environment_model, log='all', log_freq=300) # for checking gradients and parameters\n\n# load pre-trained command tracking policy weight\nassert command_tracking_weight_path != '', \"Velocity command tracking policy weight path should be determined.\"\ncommand_tracking_policy = ppo_module.MLP(cfg['architecture']['command_tracking_policy_net'], nn.LeakyReLU,\n command_tracking_ob_dim, command_tracking_act_dim)\ncommand_tracking_policy.load_state_dict(torch.load(command_tracking_weight_path, map_location=device)['actor_architecture_state_dict'])\ncommand_tracking_policy.to(device)\ncommand_tracking_weight_dir = command_tracking_weight_path.rsplit('/', 1)[0] + '/'\niteration_number = command_tracking_weight_path.rsplit('/', 1)[1].split('_', 1)[1].rsplit('.', 1)[0]\nenv.load_scaling(command_tracking_weight_dir, int(iteration_number))\n\nprint(\"Ready to start training.\")\npdb.set_trace()\n\nfor update in range(cfg[\"environment\"][\"max_n_update\"]):\n start = time.time()\n\n if update % cfg[\"environment\"][\"eval_every_n\"] == 0:\n # evaluate\n print(\"Evaluating the current environment model\")\n torch.save({\n 'model_architecture_state_dict': environment_model.state_dict(),\n 'optimizer_state_dict': trainer.optimizer.state_dict(),\n }, saver.data_dir+\"/full_\"+str(update)+'.pt')\n\n # we create another graph just to demonstrate the save/load method\n loaded_environment_model = Forward_Dynamics_Model(state_encoding_config=cfg[\"architecture\"][\"state_encoder\"],\n command_encoding_config=cfg[\"architecture\"][\"command_encoder\"],\n recurrence_config=cfg[\"architecture\"][\"recurrence\"],\n prediction_config=cfg[\"architecture\"][\"traj_predictor\"],\n device=device)\n loaded_environment_model.load_state_dict(torch.load(saver.data_dir+\"/full_\"+str(update)+'.pt', map_location=device)['model_architecture_state_dict'])\n loaded_environment_model.eval()\n loaded_environment_model.to(device)\n\n env.initialize_n_step()\n env.reset()\n command_sampler_constant.reset()\n command_sampler_linear_correlated.reset()\n command_sampler_normal_correlated.reset()\n COM_buffer.reset()\n\n # sample command sampler type for each environment\n env_command_sampler_idx = np.random.choice(3, cfg[\"environment\"][\"num_envs\"])\n command_sampler_constant_idx = np.where(env_command_sampler_idx == 0)[0]\n command_sampler_linear_correlated_idx = np.where(env_command_sampler_idx == 1)[0]\n command_sampler_normal_correlated_idx = np.where(env_command_sampler_idx == 2)[0]\n sample_user_command = np.zeros((cfg[\"environment\"][\"num_envs\"], 3)).astype(np.float32)\n\n COM_history_traj = []\n lidar_traj = []\n state_traj = []\n command_traj = []\n P_col_traj = []\n coordinate_traj = []\n init_coordinate_traj = []\n done_envs = set()\n\n for step in range(n_steps):\n frame_start = time.time()\n new_command_time = step % command_period_steps == 0\n traj_update_time = (step + 1) % command_period_steps == 0\n\n if new_command_time:\n # reset only terminated environment\n env.initialize_n_step() # to reset in new position\n env.partial_reset(list(done_envs))\n\n # save coordinate before taking step to modify the labeled data\n coordinate_obs = env.coordinate_observe()\n init_coordinate_traj.append(coordinate_obs)\n\n obs, _ = env.observe(False) # observation before taking step\n if step % COM_history_update_period == 0:\n # update COM features\n COM_feature = np.concatenate((obs[:, :3], obs[:, 15:21]), axis=1) # body orientation, linear velocity, angular velocity\n COM_buffer.update(COM_feature)\n\n if new_command_time:\n # sample new command\n done_envs = set()\n previous_done_envs = np.array([])\n temp_state = np.zeros((cfg['environment']['num_envs'], state_dim))\n temp_lidar = np.zeros((cfg['environment']['num_envs'], lidar_dim))\n temp_command = np.zeros((cfg['environment']['num_envs'], command_dim))\n temp_P_col = np.zeros(cfg['environment']['num_envs'])\n temp_coordinate = np.zeros((cfg['environment']['num_envs'], coordinate_dim))\n\n lidar_data = obs[:, proprioceptive_sensor_dim:]\n temp_COM_history = COM_buffer.return_data(flatten=True)\n temp_state = np.concatenate((lidar_data, temp_COM_history), axis=1)\n\n sample_user_command_constant = command_sampler_constant.sample()\n sample_user_command_correlated = command_sampler_linear_correlated.sample()\n sample_user_command_normal_correlated = command_sampler_normal_correlated.sample()\n sample_user_command[command_sampler_constant_idx, :] = sample_user_command_constant[command_sampler_constant_idx, :]\n sample_user_command[command_sampler_linear_correlated_idx, :] = sample_user_command_correlated[command_sampler_linear_correlated_idx, :]\n sample_user_command[command_sampler_normal_correlated_idx, :] = sample_user_command_normal_correlated[command_sampler_normal_correlated_idx, :]\n temp_command = sample_user_command.copy()\n\n # track the given command\n tracking_obs = np.concatenate((sample_user_command, obs[:, :proprioceptive_sensor_dim]), axis=1)\n tracking_obs = env.force_normalize_observation(tracking_obs, type=1)\n with torch.no_grad():\n tracking_action = command_tracking_policy.architecture(torch.from_numpy(tracking_obs).to(device))\n _, dones = env.partial_step(tracking_action.cpu().detach().numpy())\n\n coordinate_obs = env.coordinate_observe() # coordinate after taking step\n\n # update P_col and coordinate for terminated environment\n current_done_envs = np.where(dones == 1)[0]\n counter_current_done_envs = Counter(current_done_envs)\n counter_previous_done_envs = Counter(previous_done_envs)\n new_done_envs = np.array(sorted((counter_current_done_envs - counter_previous_done_envs).elements())).astype(int)\n done_envs.update(new_done_envs)\n previous_done_envs = current_done_envs.copy()\n temp_P_col[new_done_envs] = dones[new_done_envs].astype(int)\n temp_coordinate[new_done_envs, :] = coordinate_obs[new_done_envs, :-1]\n\n # reset COM buffer for terminated environment\n COM_buffer.partial_reset(current_done_envs)\n\n frame_end = time.time()\n wait_time = cfg['environment']['control_dt'] - (frame_end-frame_start)\n\n # # Just for realistic visualization\n # if wait_time > 0.:\n # time.sleep(wait_time)\n\n if traj_update_time:\n # update P_col and coordinate for not terminated environment\n counter_current_done_envs = Counter(list(done_envs))\n counter_default_envs = Counter(np.arange(cfg['environment']['num_envs']))\n not_done_envs = np.array(sorted((counter_default_envs - counter_current_done_envs).elements())).astype(int)\n temp_P_col[not_done_envs] = 0\n temp_coordinate[not_done_envs, :] = coordinate_obs[not_done_envs, :-1]\n\n state_traj.append(temp_state)\n command_traj.append(temp_command)\n P_col_traj.append(temp_P_col)\n coordinate_traj.append(temp_coordinate)\n\n state_traj = np.array(state_traj)\n command_traj = np.array(command_traj)\n P_col_traj = np.array(P_col_traj)\n coordinate_traj = np.array(coordinate_traj)\n init_coordinate_traj = np.array(init_coordinate_traj)\n\n (real_P_cols, real_coordinates), (predicted_P_cols, predicted_coordinates), (total_col_prediction_accuracy, col_prediction_accuracy, not_col_prediction_accuracy, mean_coordinate_error) \\\n = trainer.evaluate(environment_model=loaded_environment_model,\n state_traj=state_traj,\n command_traj=command_traj,\n dones_traj=P_col_traj,\n coordinate_traj=coordinate_traj,\n init_coordinate_traj=init_coordinate_traj,\n collision_threshold=0.3)\n print('====================================================')\n print('{:>6}th evaluation'.format(update))\n print('{:<40} {:>6}'.format(\"total collision accuracy: \", '{:0.6f}'.format(total_col_prediction_accuracy)))\n print('{:<40} {:>6}'.format(\"collision accuracy: \", '{:0.6f}'.format(col_prediction_accuracy)))\n print('{:<40} {:>6}'.format(\"no collision accuracy: \", '{:0.6f}'.format(not_col_prediction_accuracy)))\n print('{:<40} {:>6}'.format(\"coordinate error: \", '{:0.6f}'.format(mean_coordinate_error)))\n\n print('====================================================\\n')\n\n # plot FDM prediction results\n n_output_samples = real_P_cols.shape[1]\n plot_samples_idx = np.random.choice(n_output_samples, 7, replace=False)\n\n plot_trajectory_prediction_result(P_col_traj=np.swapaxes(real_P_cols[:, plot_samples_idx, :], 0, 1),\n coordinate_traj=np.swapaxes(real_coordinates[:, plot_samples_idx, :], 0, 1),\n predicted_P_col_traj=np.swapaxes(predicted_P_cols[:, plot_samples_idx, :], 0, 1),\n predicted_coordinate_traj=np.swapaxes(predicted_coordinates[:, plot_samples_idx, :], 0, 1),\n task_name=saver.data_dir.split('/')[-2],\n run_name=saver.data_dir.split('/')[-1],\n n_update=update,\n prediction_time=cfg[\"data_collection\"][\"prediction_period\"])\n\n\n # generate new environment\n if update % cfg[\"environment\"][\"new_environment_every_n\"] == 0:\n print(\"Sample new environment\")\n # create environment from the configuration file\n cfg[\"environment\"][\"seed\"][\"train\"] = update + 2000\n env = VecEnv(train.RaisimGymEnv(home_path + \"/rsc\", dump(cfg['environment'], Dumper=RoundTripDumper)), cfg['environment'], normalize_ob=False)\n env.load_scaling(command_tracking_weight_dir, int(iteration_number))\n \n # prepare for training\n env.initialize_n_step()\n env.reset()\n command_sampler_constant.reset()\n command_sampler_linear_correlated.reset()\n command_sampler_normal_correlated.reset()\n COM_buffer.reset()\n\n # sample command sampler type for each environment\n env_command_sampler_idx = np.random.choice(3, cfg[\"environment\"][\"num_envs\"])\n command_sampler_constant_idx = np.where(env_command_sampler_idx == 0)[0]\n command_sampler_linear_correlated_idx = np.where(env_command_sampler_idx == 1)[0]\n command_sampler_normal_correlated_idx = np.where(env_command_sampler_idx == 2)[0]\n sample_user_command = np.zeros((cfg[\"environment\"][\"num_envs\"], 3)).astype(np.float32)\n\n COM_history_traj = []\n lidar_traj = []\n state_traj = []\n command_traj = []\n P_col_traj = []\n coordinate_traj = []\n init_coordinate_traj = []\n done_envs = set()\n\n # train\n for step in range(n_steps):\n frame_start = time.time()\n new_command_time = step % command_period_steps == 0\n traj_update_time = (step + 1) % command_period_steps == 0\n\n if new_command_time:\n # reset only terminated environment\n env.initialize_n_step() # to reset in new position\n env.partial_reset(list(done_envs))\n\n # save coordinate before taking step to modify the labeled data\n coordinate_obs = env.coordinate_observe()\n init_coordinate_traj.append(coordinate_obs)\n\n obs, _ = env.observe(False) # observation before taking step\n if step % COM_history_update_period == 0:\n # update COM features\n COM_feature = np.concatenate((obs[:, :3], obs[:, 15:21]), axis=1)\n COM_buffer.update(COM_feature)\n\n if new_command_time:\n # sample new command\n done_envs = set()\n previous_done_envs = np.array([])\n temp_state = np.zeros((cfg['environment']['num_envs'], state_dim))\n temp_lidar = np.zeros((cfg['environment']['num_envs'], lidar_dim))\n temp_command = np.zeros((cfg['environment']['num_envs'], command_dim))\n temp_P_col = np.zeros(cfg['environment']['num_envs'])\n temp_coordinate = np.zeros((cfg['environment']['num_envs'], coordinate_dim))\n\n lidar_data = obs[:, proprioceptive_sensor_dim:]\n temp_COM_history = COM_buffer.return_data(flatten=True)\n temp_state = np.concatenate((lidar_data, temp_COM_history), axis=1)\n \n sample_user_command_constant = command_sampler_constant.sample()\n sample_user_command_correlated = command_sampler_linear_correlated.sample()\n sample_user_command_normal_correlated = command_sampler_normal_correlated.sample()\n sample_user_command[command_sampler_constant_idx, :] = sample_user_command_constant[command_sampler_constant_idx, :]\n sample_user_command[command_sampler_linear_correlated_idx, :] = sample_user_command_correlated[command_sampler_linear_correlated_idx, :]\n sample_user_command[command_sampler_normal_correlated_idx, :] = sample_user_command_normal_correlated[command_sampler_normal_correlated_idx, :]\n \n temp_command = sample_user_command.copy()\n\n # track the given command\n tracking_obs = np.concatenate((sample_user_command, obs[:, :proprioceptive_sensor_dim]), axis=1)\n tracking_obs = env.force_normalize_observation(tracking_obs, type=1)\n with torch.no_grad():\n tracking_action = command_tracking_policy.architecture(torch.from_numpy(tracking_obs).to(device))\n _, dones = env.partial_step(tracking_action.cpu().detach().numpy())\n\n coordinate_obs = env.coordinate_observe() # coordinate after taking step\n\n # update P_col and coordinate for terminated environment\n current_done_envs = np.where(dones == 1)[0]\n counter_current_done_envs = Counter(current_done_envs)\n counter_previous_done_envs = Counter(previous_done_envs)\n new_done_envs = np.array(sorted((counter_current_done_envs - counter_previous_done_envs).elements())).astype(int)\n done_envs.update(new_done_envs)\n previous_done_envs = current_done_envs.copy()\n temp_P_col[new_done_envs] = dones[new_done_envs].astype(int)\n temp_coordinate[new_done_envs, :] = coordinate_obs[new_done_envs, :-1]\n\n # reset COM buffer for terminated environment\n COM_buffer.partial_reset(current_done_envs)\n\n frame_end = time.time()\n wait_time = cfg['environment']['control_dt'] - (frame_end-frame_start)\n\n # # Just for realistic visualization\n # if wait_time > 0.:\n # time.sleep(wait_time)\n\n if traj_update_time:\n # update P_col and coordinate for not terminated environment\n counter_current_done_envs = Counter(list(done_envs))\n counter_default_envs = Counter(np.arange(cfg['environment']['num_envs']))\n not_done_envs = np.array(sorted((counter_default_envs - counter_current_done_envs).elements())).astype(int)\n temp_P_col[not_done_envs] = 0\n temp_coordinate[not_done_envs, :] = coordinate_obs[not_done_envs, :-1]\n\n state_traj.append(temp_state)\n command_traj.append(temp_command)\n P_col_traj.append(temp_P_col)\n coordinate_traj.append(temp_coordinate)\n\n # update training data buffer\n state_traj = np.array(state_traj)\n command_traj = np.array(command_traj)\n P_col_traj = np.array(P_col_traj)\n coordinate_traj = np.array(coordinate_traj)\n init_coordinate_traj = np.array(init_coordinate_traj)\n\n trainer.update_data(state_traj=state_traj,\n command_traj=command_traj,\n dones_traj=P_col_traj,\n coordinate_traj=coordinate_traj,\n init_coordinate_traj=init_coordinate_traj)\n\n mean_loss, mean_P_col_loss, mean_coordinate_loss = 0.0, 0.0, 0.0\n if trainer.is_buffer_full():\n mean_loss, mean_P_col_loss, mean_coordinate_loss, mean_col_prediction_accuracy, mean_not_col_prediction_accuracy = trainer.train() # collision probability threshold is set to 0.99\n end = time.time()\n\n print('----------------------------------------------------')\n print('{:>6}th iteration'.format(update))\n print('{:<40} {:>6}'.format(\"loss: \", '{:0.6f}'.format(mean_loss)))\n print('{:<40} {:>6}'.format(\"collision loss: \", '{:0.6f}'.format(mean_P_col_loss)))\n print('{:<40} {:>6}'.format(\"coordinate loss: \", '{:0.6f}'.format(mean_coordinate_loss)))\n print('{:<40} {:>6}'.format(\"collision accuracy: \", '{:0.6f}'.format(mean_col_prediction_accuracy)))\n print('{:<40} {:>6}'.format(\"no collision accuracy: \", '{:0.6f}'.format(mean_not_col_prediction_accuracy)))\n print('{:<40} {:>6}'.format(\"time elapsed in this iteration: \", '{:6.4f}'.format(end - start)))\n print('{:<40} {:>6}'.format(\"fps: \", '{:6.0f}'.format(total_steps / (end - start))))\n print('{:<40} {:>6}'.format(\"real time factor: \", '{:6.0f}'.format(total_steps / (end - start)\n * cfg['environment']['control_dt'])))\n print('----------------------------------------------------\\n')\n\n\n","repo_name":"awesomericky/complex-env-navigation","sub_path":"raisimGymTorch/env/envs/train/FDM_train.py","file_name":"FDM_train.py","file_ext":"py","file_size_in_byte":25247,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"91"} +{"seq_id":"15729016185","text":"# https://www.acmicpc.net/problem/2857\n\nlst = []\n\nfor _ in range(1, 6):\n x = input()\n if x.find('FBI') >= 0:\n lst.append(_)\n\nif len(lst) == 0:\n print('HE GOT AWAY!')\nelse:\n for i in lst:\n print(i, end = ' ')","repo_name":"Yoonsik-Shin/TIL","sub_path":"Algorism/BAEKJOON/Theme/recommand/2857.py","file_name":"2857.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"9678958623","text":"from matplotlib import pyplot as plt\nimport matplotlib.ticker as plticker\nimport seaborn as sns\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport numpy as np\n\n\ndef rebin(series_, thresh, side='underflow',percentile=False):\n '''\n Function that fills underflow/overflow bins for visualization\n \n args:\n * series_ : data structure to be rebinned : type = pandas series \n * thresh : threshold : type = int or float\n * side : which bin (underflow or overflow) to put values : type = string\n * percentile: determines whether thresh is interpreted as percentile or in units of series : type bool\n returns:\n * : type = pandas series\n '''\n #correct a bug in python where chained indexing warning gets confused so turn it off\n pd.options.mode.chained_assignment = None # default='warn'\n \n #make a local copy\n series = series_.copy(deep=True)\n \n #redefine threshold based on percentile\n if percentile:\n thresh = np.nanpercentile(series,thresh)\n #z-score filtering could be useful later\n #z_score = (thresh - series.mean()) / series.std(ddof=0)\n \n if side is 'underflow':\n series.loc[series <= thresh] = thresh\n elif side is 'overflow':\n series.loc[series >= thresh] = thresh\n else:\n print('need to specify underflow or overflow')\n return series\n\n\n\ndef hist_labeler(series_in,label_in=None):\n '''\n Formats histograms labels\n inputs: pd series\n returns: label string\n '''\n null_cnt = series_in.isna().sum()\n entries = series_in.shape[0]\n label = 'Entries {}\\n$\\mu$={:.4f}'.format(entries,series_in.mean())\n if null_cnt\t> 1:\n label = '{} entries, {} ({:.1f}%) null\\n$\\mu$={:.4f}'.format(entries,null_cnt,null_cnt*100./entries,series_in.mean())\n \n if label_in:\n label = '{} {}'.format(label_in,label)\n return label\n\ndef single_hist(df_in_,col,title=None,xlabel=None,ylabel=None,xrng=None,**kwargs):\n '''\n Plot Single histogram\n Usage example: single_hist(df,'support_lag1_ratio',bins=60,kde=False,xrng=(0.75,1.5))\n '''\n #make a local copy to avoid altering the original\n df_in = df_in_.copy(deep=True)\n\n #treatment of infs\n df_in[col] = df_in[col].replace([np.inf, -np.inf], np.nan)\n \n if xrng:\n df_in[col] = rebin(df_in[col],xrng[0],'underflow')\n df_in[col] = rebin(df_in[col],xrng[1],'overflow')\n else:\n df_in[col] = rebin(df_in[col],99,'overflow',percentile=True)\n df_in[col] = rebin(df_in[col],1,'underflow',percentile=True)\n\n hist_label = hist_labeler(df_in[col])\n \n plt.rc('font', family='serif')\n f,ax = plt.subplots(figsize=(10,5))\n ax.grid()\n\n sns.distplot(df_in[col],label=hist_label,**kwargs)\n plt.title(title)\n plt.legend(loc='best',fontsize=10)\n if xlabel:\n ax.set_xlabel(xlabel)\n\n\n\n \ndef plot_2dists(df1_,df2_,col1,col2,label1=None, label2=None,xrng=None,**kwargs):\n '''\n inputs: df1 type=pd\n df2 type=p2\n col1 type=string desc: column to be plotted in df1\n col2 type=string desc: column to be plotted in df2\n label1 type=string desc: label for df1\n label2 type=string desc: label for df2\n n_bins type=int desc: number of bins in output hist\n outputs: none, produces normalized histogram of var_in of df1 and df2 on same axis\n\n '''\n\n #change local copies only\n df1 = df1_.copy(deep=True)\n df2 = df2_.copy(deep=True)\n \n #treatment of infs\n df1[col1] = df1[col1].replace([np.inf, -np.inf], np.nan)\n df2[col2] = df2[col2].replace([np.inf, -np.inf], np.nan)\n \n plt.rc('font', family='serif')\n f,ax = plt.subplots(figsize=(10,5))\n #ax.grid()\n\n \n #plotting\n if not label1:\n label1=col1\n if not label2:\n label2=col2\n\n hist_label1 = hist_labeler(df1[col1],label1)\n hist_label2 = hist_labeler(df2[col2],label2)\n \n if xrng is None:\n df1[col1] = rebin(df1[col1],99,'overflow',percentile=True)\n df1[col1] = rebin(df1[col1],1,'underflow',percentile=True)\n df2[col2] = rebin(df2[col2],99,'overflow',percentile=True)\n df2[col2] = rebin(df2[col2],1,'underflow',percentile=True) \n \n #set axis limits\n xmin = df2[col2].min()\n xmax = df1[col1].max()\n if df1[col1].min() < df2[col2].min():\n xmin = df1[col1].min()\n \n if df1[col1].max() < df2[col2].max():\n xmax = df2[col2].max()\n\n xrnge = (xmin, xmax*1.01)\n else:\n xrnge = xrng\n\n df1[col1] = rebin(df1[col1],xrnge[0],'underflow')\n df1[col1] = rebin(df1[col1],xrnge[1],'overflow')\n df2[col2] = rebin(df2[col2],xrnge[0],'underflow')\n df2[col2] = rebin(df2[col2],xrnge[1],'overflow') \n \n sns.distplot(df1[col1],label=hist_label1,hist_kws={\"range\":(xrnge[0],xrnge[1])},**kwargs)\n sns.distplot(df2[col2],label=hist_label2,hist_kws={\"range\":(xrnge[0],xrnge[1])},**kwargs)\n\n plt.grid()\n plt.legend(loc='best',fontsize=10)\n ax.set_xlim(xrnge[0],xrnge[1])\n ax.set(xlabel=col1, ylabel='counts')\n plt.show()\n\n\ndef correlation_plot(X_in,features,title_txt = ''):\n scaler = MinMaxScaler()\n X_normd = scaler.fit_transform(X_in)\n \n features_corr = pd.DataFrame(X_normd,columns=features).corr()\n features_corr = features_corr.round(2)\n\n fig, ax = plt.subplots(figsize=(14, 12))\n sns.heatmap(features_corr, \n xticklabels=features_corr.columns,\n yticklabels=features_corr.columns, \n annot=True,\n ax=ax)\n ax.set_title('Features Correlation {}'.format(title_txt), size=15)\n fig.show()\n\ndef plt_timeseries(var,timevar,df,title):\n #f,ax = plt.subplots(2,1,sharex=True,figsize=(15,7))\n f,ax = plt.subplots(figsize=(15,7))\n\n ax.scatter(df[timevar],df[var],color='green')\n # format the ticks\n f.autofmt_xdate()\n\n #Spacing between each line\n intervals = float(1)\n\n loc = plticker.MultipleLocator(base=intervals)\n ax.xaxis.set_major_locator(loc)\n #ax[0].yaxis.set_major_locator(loc)\n\n # Add the grid\n plt.grid()\n plt.xlabel(timevar)\n ax.set_ylabel(var)\n ax.grid(which='major', axis='both', linestyle='-')\n plt.xticks(fontsize=10)\n plt.title(title)\n plt.show()\n\n\n################\n##\n# Plots categorical values directly from pandas\n# inputs: str_list: list of strings - pandas columns to plot\n# df_in: pandas dataframe\n# top_n: integer, plots top_n values with highest counts\n# output: None\n##\n################\ndef plot_strings(str_list,df_in,top_n=20):\n plt.rc('font', family='serif')\n for plot in str_list:\n try:\n f,ax = plt.subplots(figsize=(10,5))\n topn_lst = list(df_in[plot].value_counts()[:top_n].index)\n df_plot = df_in[df_in[plot].isin(topn_lst)]\n df_plot[plot].value_counts(sort=True).plot.bar(title=plot,rot=90)\n plt.ylabel('counts')\n plt.grid()\n plt.show()\n except Exception as e:\n print('ERROR plotting {}, exception={}'.format(plot,e))\n\n\n \ndef plot_importance(importances,feats):\n '''\n Plot feature importances\n inputs:\n * importances : feature importances : type=array\n * features : features list : type=list\n returns: None\n '''\n sorted_idx = importances.argsort()\n sorted_feats = np.asarray(feats)\n\n y_ticks = np.arange(0, len(feats))\n fig, ax = plt.subplots()\n ax.barh(y_ticks, importances[sorted_idx])\n\n ax.set_yticklabels(sorted_feats[sorted_idx])\n ax.set_yticks(y_ticks)\n ax.set_title(\"Feature Importances\")\n fig.tight_layout()\n ax.grid()\n plt.show()\n \ndef dedupe(df_in):\n '''\n De-duplicate Pandas datafarme and print info\n input: pandas dataframe\n returns: de-duped pandas dataframe \n '''\n print('shape before de-dupe:{}'.format(df_in.shape))\n df_in_dedupe = df_in.drop_duplicates()\n print('shape after de-dupe:{}'.format(df_in_dedupe.shape))\n print('{} duplicate rows found'.format(df_in.shape[0]-df_in_dedupe.shape[0]))\n return df_in_dedupe\n\n\ndef check_quality(df_in_):\n '''\n Data Quality checker for Pandas, prints duplicates, null content of cols\n inputs: df_in_: pandas dataframe\n returns: None\n '''\n df_in = dedupe(df_in_)\n colz = df_in.columns\n\n nulls_counts = []\n for col in colz:\n pct_null = df_in[col].isna().sum()*100./df_in.shape[0]\n if pct_null > 0:\n nulls_counts.append(('{} is {:.1f}% NULL'.format(col, pct_null),pct_null))\n\n sorted_counts = sorted(nulls_counts,key=lambda x:x[1],reverse=True)\n print('\\n---- \\n')\n print('columns with null rows:')\n for j in sorted_counts:\n print(j[0])\n \n","repo_name":"char8060/ds_toolkit","sub_path":"ds_toolkit/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74479340782","text":"import json\nimport os\n\n\nclass Preprocess:\n def __init__(self):\n self.base_path = 'data/暴雨洪涝/'\n title_list = os.listdir(self.base_path + '未标记暴雨洪涝')\n self.title_set = set([title[:-4] for title in title_list])\n self.label_dict = {'LOC': '受灾地点', 'DB': '承载体', 'AIAC': '受灾面积', 'AIAC2': '受灾面积2',\n 'AInP': '受灾人数', 'AHC': '损毁房屋',\n 'AMP': '失踪人数', 'ATP': '转移安置人口', 'AWP': '最大水深', 'AE': '经济损失',\n 'ATAC': '绝收面积', 'ASAC': '承灾面积',\n 'ADP': '死亡人数', 'AImP': '受灾群众', 'AWD': '积水深度', 'DS': '开始日期', 'DO': '结束日期',\n 'TS': '开始时间', 'TO': '结束时间'}\n self.result_list = [] # 抽取标签后的list\n\n def get_entity(self, save_path):\n \"\"\"\n 获取每个新闻的实体标注信息\n :param save_path: 结果保存的路径\n \"\"\"\n time_title_set = set([title[:-4] for title in os.listdir(self.base_path + '时间标签')])\n # 遍历每个新闻\n for title in self.title_set:\n entity_dict = {'title': title[5:]}\n # 遍历每个子文件夹\n for sub_dir in ['暴雨洪涝位置', '承载体标签', '人口面积等标签暴雨洪涝_改', '时间标签']:\n path = self.base_path + sub_dir + '/' + title + '.txt'\n # 处理不存在文件情况\n if sub_dir == '时间标签' and title not in time_title_set:\n continue\n with open(path, 'r', encoding='UTF-8') as f:\n line_list = f.readlines()\n for line in line_list:\n split_result = line.split(' ')\n for s in split_result:\n find_label_result = self.find_label(s)\n if not find_label_result[0]:\n continue\n # 加入集合中\n if find_label_result[2] not in entity_dict:\n entity_dict[find_label_result[2]] = set()\n entity_dict[find_label_result[2]].add(find_label_result[1])\n f.close()\n\n # 将set转换为list\n for key in entity_dict.keys():\n if key != 'title':\n entity_dict[key] = list(entity_dict[key])\n\n # 将entity_dict加入到result_list中\n self.result_list.append(entity_dict)\n with open(save_path, 'w', encoding='UTF-8') as f:\n json.dump(self.result_list, f, ensure_ascii=False)\n\n def find_label(self, word):\n if len(word) <= 3:\n return [False]\n i = len(word) - 3\n while i > 0:\n if word[i] == '/':\n if word[i + 1:] in self.label_dict:\n return [True, word[:i], self.label_dict[word[i + 1:]]]\n else:\n break\n i -= 1\n return [False]\n\n\nif __name__ == '__main__':\n a = Preprocess()\n a.get_entity(save_path='data/entity_result.json')\n","repo_name":"Mr-xiu/BUPT-KG-lab3","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33021673031","text":"import spacy\nnlp = spacy.load(\"en_core_web_md\")\n\nlast_desc = nlp (\"Will he save their world or destroy it? When the Hulk becomes too dangerous for the Earth, the Illuminati trick Hulk into a shuttle and launch him into space to a planet where the Hulk can live in peace. Unfortunately, Hulk lands on the planet Sakaar where he is sold into slavery and trained as a gladiator.\")\n\ndef watch_next (last_desc):\n movie_name = []\n movie_desc = []\n max_similarity = 0\n best_match = \"\"\n\n with open(\"movies.txt\", \"r\") as m:\n for line in m:\n movie = line.split(\":\") #easier to store both parts together\n name = movie[0].strip()\n desc = nlp(movie[1].strip(\"\\n\"))\n #can be done using loop, more efficient to assign vars\n movie_name.append(name)\n movie_desc.append(desc)\n similarity = last_desc.similarity(desc)\n\n #check if similarity > current most similar desc\n #if not, become max_similar and best_match\n if similarity > max_similarity:\n max_similarity = similarity\n best_match = name\n\n print(f\"The best match for the last movie is: {best_match}\")\n\nwatch_next(last_desc)\n#expected output: movie c\n","repo_name":"Yyyyzryrd/Software-Engineering","sub_path":"programs/t38/watch_next.py","file_name":"watch_next.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71536140144","text":"from functools import partial\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.admin import AdminSite\nfrom django.urls import NoReverseMatch, reverse\nfrom django.utils.text import capfirst\n\n\nclass AdminSiteRegroup(AdminSite):\n def _build_app_dict(self, request, label=None):\n \"\"\"\n Build the app dictionary. The optional `label` parameter filters models\n of a specific app.\n \"\"\"\n app_dict = {}\n\n if label:\n models = {\n m: m_a\n for m, m_a in self._registry.items()\n if m._meta.app_label == label\n }\n else:\n models = self._registry\n\n for app_name, my_models in settings.ADMIN_APPS.items():\n for my_model in my_models:\n for model, model_admin in models.items():\n if my_model in (\n capfirst(model._meta.verbose_name_plural),\n model._meta.object_name,\n ):\n app_label = model._meta.app_label\n\n has_module_perms = model_admin.has_module_permission(\n request\n )\n if not has_module_perms:\n continue\n\n perms = model_admin.get_model_perms(request)\n\n # Check whether user has any perm for this module.\n # If so, add the module to the model_list.\n if True not in perms.values():\n continue\n\n info = (app_label, model._meta.model_name)\n model_dict = {\n \"name\": capfirst(model._meta.verbose_name_plural),\n \"object_name\": model._meta.object_name,\n \"perms\": perms,\n \"admin_url\": None,\n \"add_url\": None,\n }\n if perms.get(\"change\") or perms.get(\"view\"):\n model_dict[\"view_only\"] = not perms.get(\"change\")\n try:\n model_dict[\"admin_url\"] = reverse(\n \"admin:%s_%s_changelist\" % info,\n current_app=self.name,\n )\n except NoReverseMatch:\n pass\n if perms.get(\"add\"):\n try:\n model_dict[\"add_url\"] = reverse(\n \"admin:%s_%s_add\" % info,\n current_app=self.name,\n )\n except NoReverseMatch:\n pass\n\n if app_name in app_dict:\n app_dict[app_name][\"models\"].append(model_dict)\n else:\n app_dict[app_name] = {\n \"name\": app_name,\n \"app_label\": app_name,\n \"app_url\": \"\",\n \"has_module_perms\": has_module_perms,\n \"models\": [model_dict],\n }\n\n if label:\n return app_dict.get(label)\n return app_dict\n\n def get_app_list(self, request):\n \"\"\"\n Return a sorted list of all the installed apps that have been\n registered in this site.\n \"\"\"\n app_dict = self._build_app_dict(request)\n # Sort the apps alphabetically.\n app_list = app_dict.values()\n return app_list\n\n\nadmin_site = AdminSiteRegroup()\nadmin_site_register = partial(admin.register, site=admin_site)\n","repo_name":"dima-dmytruk23/django-admin-regroup","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"28849197972","text":"#!/usr/local/bin/python36\nimport sys\nimport os\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtWidgets\nimport pandas as pd\nfrom grassui import MainGui\n\n# to run grass\nGISDBASE = \"/Users/epi/grassdata/\"\nMAPSET = \"copy\"\nLOCATION_NAME = \"project\"\n\n\nif sys.platform=='darwin':\n GISBASE =\"/usr/local/grass-7.3.svn\"\nelse:\n GISBASE =\"/usr/local/grass-7.3.svn\"\n\n\nos.environ[\"GISBASE\"] = GISBASE\nsys.path.append(os.path.join(GISBASE, 'etc/python'))\nos.environ[\"GISDBASE\"] = GISDBASE\nos.environ[\"MAPSET\"] = MAPSET\nos.environ[\"LOCATION_NAME\"] = LOCATION_NAME\n\ngisrc = 'MAPSET: %s\\n' % os.environ[\"MAPSET\"]\ngisrc += 'GISDBASE: %s\\n' % os.environ[\"GISDBASE\"]\ngisrc += 'LOCATION_NAME: %s\\n' % os.environ[\"LOCATION_NAME\"]\ngisrc += 'GUI: text'\n\ngrass_gisrc = open('/tmp/gisrc', 'w')\ngrass_gisrc.write(gisrc)\ngrass_gisrc.close()\nos.environ['GISRC'] = '/tmp/gisrc'\n\nos.environ['PATH'] = '/usr/sbin:/bin/:/usr/bin:%s/bin:%s/scripts:/home/epi/.grass7/addons/bin:/home/epi/.grass7/addons/scripts:/usr/local/opt/gdal2/bin/:/Users/epi/.grass7/addons/bin:$PATH' % (GISBASE, GISBASE)\n\nfrom grass.script import task as gtask\n\nfrom parameters import GisOptionPrompt, GisOptionFlag, GisOptionString, GisOptionNum, GisOptionText, GisOptionMultiString, GisOptionFilePrompt\n\ndef gcommand(command):\n pd.options.mode.chained_assignment = None\n commandspecs = gtask.command_info(command)\n flags = []\n for i, v in enumerate(commandspecs['flags']):\n flags.append(pd.DataFrame.from_dict({0: v}, orient='index'))\n flags = pd.concat(flags).reset_index()\n flags.drop('index', 1, inplace=True)\n flags['guisection'].loc[(flags['guisection'] == '')] = 'Optional'\n\n params = []\n for i, v in enumerate(commandspecs['params']):\n params.append(pd.DataFrame.from_dict({0: v}, orient='index'))\n params = pd.concat(params).reset_index()\n params.drop('index', 1, inplace=True)\n\n params['guisection'].loc[\n (params['guisection'] == '') & (params['required'] == False)\n ] = 'Optional'\n params['guisection'].loc[\n (params['guisection'] == '') | (params['required'] == True)\n ] = 'Required'\n\n guisection = list(params['guisection'].unique()) + \\\n list(flags['guisection'].unique())\n guisection = set(guisection)\n command_description = {}\n pr = {}\n fl = {}\n for i in guisection:\n pr[i] = params.loc[(params['guisection'] == i)].reset_index()\n fl[i] = flags.loc[(flags['guisection'] == i)].reset_index()\n del pr[i]['index']\n del fl[i]['index']\n command_description['description'] = commandspecs['description']\n command_description['keywords'] = commandspecs['keywords']\n command_description['usage'] = commandspecs['usage']\n command_description['parameters'] = pr\n command_description['flags'] = fl\n return command_description\n\nclass GrassCommand(QObject):\n def init(self):\n self.w = MainGui()\n #\n self.prompts = []\n self.datasurcesprompts = []\n self.filesprompts = []\n self.gflags = []\n self.goptionsstring = []\n self.goptionsmultistring = []\n self.goptionsnum = []\n self.goptionstext = []\n #\n self.parameters=[]\n self.datasurceprompt = []\n self.fileprompt = []\n self.flags=[]\n self.stringoption = []\n self.multistringoption = []\n self.numoption = []\n self.textoption = []\n #\n self.commandname = 'r.in.gdal'\n #\n self.message = ''\n gsec = gcommand(self.commandname)\n commandspecs = gtask.command_info(self.commandname)\n self.w.commanddescription.setText(commandspecs['description'])\n self.w.commanddescription.setWordWrap(True)\n keywords = ', '.join(gsec['keywords'])\n title = str(self.commandname) + \" [\" + keywords + ']'\n #title = str(self.commandname)+\" \"+str(gsec['keywords'])\n self.w.setWindowTitle(title)\n for i in gsec['parameters'].keys():\n tab = QtWidgets.QScrollArea()\n tab.setWidget(QtWidgets.QWidget())\n fl = QtWidgets.QVBoxLayout(tab.widget())\n tab.setWidgetResizable(True)\n tab.setObjectName(i)\n self.w.commandtab.addTab(tab, i)\n for j in gsec['flags'][i].index.values:\n flag = gsec['flags'][i].iloc[j]\n gflag = GisOptionFlag(fl, self.flags, flag)\n self.gflags.append(gflag)\n for j in gsec['parameters'][i].index.values:\n command = gsec['parameters'][i].iloc[j]\n if command['gisprompt']: # and command['age'] == 'old':\n if command['prompt'] in ['raster', 'raster_3d', 'vector', 'label', 'region', 'group', 'all']:\n print(command['prompt'])\n fileopen = GisOptionPrompt(fl, self.parameters, command)\n fileopen.setObjectName(\"fileopen_%s\" % i)\n self.prompts.append(fileopen)\n\n # TODO: handle here the datasource prompt\n #\n # if command['gisprompt'] and command['prompt'] == 'datasource':\n # fileopen = GisOptionDataSourcePrompt(fl, self.datasurceprompt, command)\n # fileopen.setObjectName(\"fileopen_%s\" % i)\n # self.datasurcesprompts.append(fileopen)\n\n\n # TODO: handle here the file prompt\n #\n if command['gisprompt'] and command['prompt'] == 'file':\n fileopen = GisOptionFilePrompt(fl, self.fileprompt, command)\n fileopen.setObjectName(\"fileopen_%s\" % i)\n self.filesprompts.append(fileopen)\n\n #for j in gsec['parameters'][i].index.values:\n # command = gsec['parameters'][i].iloc[j]\n if command['type'] == 'string' and command['values'] != [] and not command['multiple']:\n opt = GisOptionString(fl, self.stringoption, command)\n opt.setObjectName(\"opt_%s\" % i)\n self.goptionsstring.append(opt)\n # TODO: handle multicheckbox here\n if command['type'] == 'string' and command['values'] != [] and command['multiple']:\n opt = GisOptionMultiString(fl, self.multistringoption, command)\n opt.setObjectName(\"opt_%s\" % i)\n self.goptionsmultistring.append(opt)\n\n #for j in gsec['parameters'][i].index.values:\n # command = gsec['parameters'][i].iloc[j]\n if command['type'] == 'integer' or command['type'] == 'float' and not command['multiple']:\n opt = GisOptionNum(fl, self.numoption, command)\n opt.setObjectName(\"opt_%s\" % i)\n self.goptionsnum.append(opt)\n if command['type'] == 'integer' or command['type'] == 'float' and command['multiple']:\n opt = GisOptionText(fl, self.textoption, command)\n opt.setObjectName(\"opt_%s\" % i)\n self.goptionstext.append(opt)\n spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n fl.addItem(spacerItem4)\n doclink = os.path.join(GISBASE, 'docs/html', self.commandname)\n self.w.manualpage.load(QUrl('file://%s.html' % doclink))\n #self.w.runcommand.clicked.connect(self.getParam)\n self.w.copycommand.clicked.connect(self.messagecopy)\n self.w.closecommand.clicked.connect(self.w.close)\n\n\n for i in self.gflags:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.goptionsstring:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.goptionsmultistring:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.goptionstext:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.goptionsnum:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.prompts:\n i.valueUpdated.connect(self.handleValueUpdated)\n for i in self.filesprompts:\n i.valueUpdated.connect(self.handleValueUpdated)\n\n self.status = {}\n self.status['flags'] = []\n for i in range(self.w.commandtab.count()):\n #print(i, self.w.commandtab.widget(i).objectName())\n if self.w.commandtab.widget(i).objectName() == 'Required':\n self.w.commandtab.tabBar().moveTab(i, 0)\n if self.w.commandtab.widget(i).objectName() == 'CommandOutput':\n self.w.commandtab.tabBar().moveTab(i, self.w.commandtab.count() - 2)\n if self.w.commandtab.widget(i).objectName() == 'Manual':\n self.w.commandtab.tabBar().moveTab(i, self.w.commandtab.count() - 1)\n print(self.w.commandtab.count())\n self.w.commandtab.setCurrentIndex(0)\n self.w.show()\n\n\n def handleValueUpdated(self, value):\n if len(value.split('=')) > 1:\n self.status[value.split('=')[0]] = value.split('=')[1]\n if len(value.split(':')) > 1:\n if value.split(':')[1] != 'None':\n self.status['flags'].append(value.split(':')[1])\n else:\n self.status['flags'] = [i for i in self.status['flags'] if value.split(':')[0] not in i]\n paramstatus = ' '.join(['{}={}'.format(k, v) for k, v in self.status.items() if k != 'flags'])\n flagstatus = ' '.join(self.status['flags'])\n self.message = self.commandname+' '+paramstatus+' '+flagstatus\n self.w.statusbar.showMessage(self.message)\n QtWidgets.QApplication.processEvents()\n\n def messagecopy(self):\n cb = QtWidgets.QApplication.clipboard()\n cb.clear(mode=cb.Clipboard)\n cb.setText(self.message, mode=cb.Clipboard)\n #print(self.w.commandtab.widget(0).objectName())\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n ss = None\n arg1 = ''\n if len(sys.argv) > 1:\n arg1 = sys.argv[1]\n app.processEvents()\n p = GrassCommand()\n p.init()\n sys.exit(app.exec_())\n","repo_name":"epifanio/imagedisplay","sub_path":"grass_modules/QT/grasscommand.py","file_name":"grasscommand.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28887202215","text":"\nfrom tsfresh import extract_features\nfrom tsfresh import extract_relevant_features\n# from tsfresh.utilities.dataframe_functions import impute\n\nlos = ends_with('_lo')(xdf)\nhis = ends_with('_hi')(xdf)\n\nxdfstd = (xdf.set_index('Dt')\n .drop(['Event', 'Cnt', 'Day', 'M', 'Yr', 'Doy'] + los + his, axis=1).iloc[:, :].copy()\n .dropna(axis=0, subset=['Logcnt'])\n .copy()\n )\n# print(xdfstd.isnull().sum())\nxdfstd = impute(xdfstd).assign(\n Id=1,\n Index=lambda x: np.arange(len(x)).astype(int)\n).reset_index(drop=1)\n# xdfstd = xdfstd.reset_index(drop=0)\n# print(xdfstd.isnull().sum())\ny = xdfstd.Logcnt\nx = xdfstd.drop('Logcnt', axis=1)\n\n# fts = extract_relevant_features(xdfstd, 'Logcnt', column_id='Id', column_sort='Index')\nfts = extract_features(x, column_id='Id', column_sort='Index')\nfeatures_filtered = select_features(fts, y)\n\n\ntimeseries_, y_ = load_robot_execution_failures()\nefs_ = extract_features(timeseries_, column_id=\"id\", column_sort=\"time\")\n\n\n##########################################################################\n# Batches\n##########################################################################\ndef get_batch(x, y, i, evaluation=False, bptt=np.inf):\n seq_len = min(bptt, len(x) - i)\n data = Variable(x[i:i + seq_len], volatile=evaluation)\n target = Variable(y[i:i + seq_len].view(-1))\n return data, target\n\n\ndef batch_getter(x, y):\n @wraps(get_batch)\n def f(i, evaluation=False, bptt=np.inf):\n return get_batch(x, y, i, evaluation, bptt)\n return f\n\n\n##########################################################################\n# Batches 2\n##########################################################################\ndef get_batch(source, i, bptt=20, evaluation=False):\n seq_len = min(bptt, len(source) - i)\n data = Variable(source[i:i+seq_len], volatile=evaluation)\n return data\n\ndef seq_batch_iter(x, y=None, bptt=20, evaluation=False):\n \"\"\"Iterates according to schema in\n http://stackoverflow.com/a/37009670/386279\n Every transition seen.\n \"\"\"\n for i in range(len(x) - 1):\n xb = get_batch(x, i, bptt=bptt, evaluation=evaluation)\n if y is None:\n yield xb\n else:\n yield xb, get_batch(y, i, bptt=bptt, evaluation=evaluation)\n\ndef batch_iter(x, y=None, bptt=20, evaluation=False):\n for i in range(0, len(x) - 1, bptt):\n xb = get_batch(x, i, bptt=bptt, evaluation=evaluation)\n if y is None:\n yield xb\n else:\n yield xb, get_batch(y, i, bptt=bptt, evaluation=evaluation)\n\n\n##########################################################################\n# Training\n##########################################################################\ndef train(model=None, hidden=None, brange=None, batch_getter=None, optimizer=None, eval=False):\n if hidden is None:\n hidden = model.init_hidden(batch_getter.batch_size)\n tot_loss = 0\n res = []\n# maxnorm = 0\n for br in brange:\n x, y = batch_getter(br)\n optimizer.zero_grad()\n # output, hidden = model(x, hidden)\n output = model(x, hidden)\n# hidden = repackage_hidden(hidden)\n\n res.append(output.data.squeeze())\n if eval:\n continue\n\n loss = criterion(output, y.view(-1, 1))\n loss.backward()\n\n T.nn.utils.clip_grad_norm(model.parameters(), 3)\n\n norms = [T.norm(p.grad.data) for p in m.parameters()]\n maxnorm = max(norms)\n if maxnorm > train.mnorm:\n train.mnorm = maxnorm\n print('max(grad) = {:.3f}'.format(maxnorm))\n\n optimizer.step()\n tot_loss += loss\n res = T.stack(res).view(-1).numpy()\n if eval:\n return res\n return tot_loss, res\n\ntrain.mnorm = 0\n# tot_loss, hidden, res = train(model=model, hidden=None, brange=brange, batch_getter=batch_getter, optimizer=optimizer)\n# print(tofloat(tot_loss))\n\n\n# With batch_iter\ndef train_epoch(xt, yt, model=None, bptt=20, hidden=None, optimizer=None, eval=False):\n if hidden is None:\n hidden = model.init_hidden(batch_getter.batch_size)\n tot_loss = 0\n res = []\n\n for x, y in batch_iter(xt, y=yt, bptt=bptt, evaluation=eval):\n optimizer.zero_grad()\n # output, hidden = model(x, hidden)\n output = model(x, hidden)\n # hidden = repackage_hidden(hidden)\n\n res.append(output.data.squeeze())\n if eval:\n continue\n\n loss = criterion(output, y.view(-1, 1))\n loss.backward()\n\n T.nn.utils.clip_grad_norm(model.parameters(), 3)\n\n norms = [T.norm(p.grad.data) for p in m.parameters()]\n maxnorm = max(norms)\n if maxnorm > train.mnorm:\n train.mnorm = maxnorm\n print('max(grad) = {:.3f}'.format(maxnorm))\n\n optimizer.step()\n tot_loss += loss\n res = T.stack(res).view(-1).numpy()\n if eval:\n return res\n return tot_loss, res\n\ntrain_epoch.mnorm = 0\n# tot_loss, hidden, res = train(model=model, hidden=None, brange=brange, batch_getter=batch_getter, optimizer=optimizer)\n# print(tofloat(tot_loss))\n\n\n################################################################################\n# Train/test\n# Cloud_cover\n################################################################################\n\ndef split(X, y, ratio=.9):\n null = X.isnull().any(axis=1) | y.isnull()\n print(X.shape)\n X = X[~null]\n print(X.shape)\n y = y[~null]\n\n N = int(len(X) * ratio)\n print(len(X))\n print(N)\n\n Xr = X[:N]\n yr = y[:N]\n\n Xs = X[N:]\n ys = y[N:]\n return Xr, yr, Xs, ys\n\n# X = ddf[fts + ['N']] #.dropna(axis=0)\nX = ddf[fts + [null_col]] #.dropna(axis=0)\nXr, yr, Xs, ys = split(X.drop(null_col, axis=1), X.Cloud_cover, ratio=.5)\n\nrf = RandomForestRegressor(n_estimators=30, oob_score=True).fit(Xr, yr, )\nypred = rf.predict(Xs)\n\ndef show_preds(ys, pred):\n plt.plot(ys, pred, '.')\n plt.xlabel('Actual')\n plt.ylabel('Pred')\n plt.savefig('cloud_cover_model_perf.png', bbox_inches='tight')\n\nshow_preds(ys, pred)\n","repo_name":"wcbeard/pollen","sub_path":"util/junk.py","file_name":"junk.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"} +{"seq_id":"26734591585","text":"\"\"\"Calculate phase shift from Luscher zeta for given inputs.\"\"\"\nimport sys\nimport subprocess\nimport inspect\nimport os\nimport math\nfrom math import sqrt\nimport numpy as np\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\n#from accupy import kdot\nfrom latfit.config import PION_MASS, L_BOX, CALC_PHASE_SHIFT\nfrom latfit.config import AINVERSE, ISOSPIN, MOMSTR, FIT_SPACING_CORRECTION\nfrom latfit.config import IRREP\nfrom latfit.utilities import read_file as rf\nfrom latfit.analysis.errorcodes import ZetaError, RelGammaError\nimport latfit.utilities.zeta.i1zeta as i1z\n\ntry:\n PROFILE = profile # throws an exception when PROFILE isn't defined\nexcept NameError:\n def profile(arg2):\n \"\"\"Line profiler default.\"\"\"\n return arg2\n PROFILE = profile\n\nif CALC_PHASE_SHIFT:\n @PROFILE\n def remove_epipi_indexing(epipi):\n \"\"\"Remove the indexining on epipi\"\"\"\n try:\n epipi = epipi[1]\n except (IndexError, TypeError):\n try:\n epipi = epipi[0]\n except (IndexError, TypeError):\n pass\n return epipi\n\n @PROFILE\n def getgamma(epipi, comp):\n \"\"\"get relativistic gamma\"\"\"\n if getgamma.psq is None:\n comp = np.asarray(comp, dtype=np.float64)\n assert len(comp) == 3, str(comp)\n getgamma.psq = np.dot(comp, comp)\n psq = getgamma.psq\n if epipi:\n try:\n if FIT_SPACING_CORRECTION:\n arg = epipi**2-(2*np.pi/L_BOX)**2*psq\n gamma = epipi/sqrt(arg)\n else:\n arg = epipi**2-4*np.sin(\n np.pi/L_BOX)**2*psq\n gamma = epipi/sqrt(arg)\n except (ValueError, FloatingPointError):\n print(\"zeta.py, bad gamma value for epipi=\", epipi)\n print(\"arg=\", arg)\n print(\"center of mass momentum=\", comp)\n print(\"Length of box=\", L_BOX)\n raise ZetaError(\"bad gamma, epipi = \"+str(epipi))\n if gamma < 1:\n raise RelGammaError(gamma=gamma, epipi=epipi)\n return gamma\n getgamma.psq = None\n\n def zeta(epipi):\n \"\"\"Calculate the I=0 scattering phase shift given the pipi energy\n for that channel.\n \"\"\"\n arglist, comp = args(epipi)\n out = phase_shift(epipi, arglist, comp)\n out = tocomplex(epipi, out)\n return out\n\n @PROFILE\n def args(epipi):\n \"\"\"Get arg list for C code to calculate phase shift\"\"\"\n epipi = remove_epipi_indexing(epipi)\n comp = np.array(rf.procmom(MOMSTR))\n gamma = getgamma(epipi, comp)\n epipi = epipi*AINVERSE/gamma\n lbox = L_BOX/AINVERSE\n setup_i1_zeta(lbox)\n #epipi = math.sqrt(epipi**2-(2*np.pi/lbox)**2*PTOTSQ) //not correct\n\n # set up the normal call to w00 phase shift method\n binpath = args.binpath\n arglist = [binpath, str(epipi), str(PION_MASS), str(lbox),\n str(comp[0]), str(comp[1]), str(comp[2]), str(gamma),\n str(int(not FIT_SPACING_CORRECTION))]\n return arglist, comp\n args.binpath = os.path.dirname(inspect.getfile(zeta))+'/main.o'\n\n @PROFILE\n def setup_i1_zeta(lbox):\n \"\"\"set up the I=1 moving frame version\"\"\"\n i1z.COMP = MOMSTR\n i1z.L_BOX = np.float(lbox)\n i1z.IRREP = str(IRREP)\n i1z.MPION = np.float(PION_MASS)\n\nelse:\n def zeta(_):\n \"\"\"Blank function; do not calculate phase shift\"\"\"\n return\n\n@PROFILE\ndef phase_shift(epipi, arglist, comp):\n \"\"\"Calculate phase shift\"\"\"\n if not np.isnan(epipi):\n try:\n if ISOSPIN != 1 or not np.any(comp):\n out = subprocess.check_output(arglist)\n else:\n out = i1z.phase(epipi)\n except FileNotFoundError:\n print(\"Error in zeta: main.C not compiled yet.\")\n print(subprocess.check_output(['pwd']))\n print(inspect.getfile(zeta))\n sys.exit(1)\n except subprocess.CalledProcessError:\n print(\"Error in zeta: calc of phase shift error:\")\n print(epipi)\n raise ZetaError(subprocess.Popen(\n arglist, stdout=subprocess.PIPE).stdout.read())\n else:\n out = np.nan\n return out\n\n@PROFILE\ndef tocomplex(epipi, out):\n \"\"\"if the phase shift should be complex,\n make the output (from the .C file) to a complex python type\"\"\"\n try:\n test = epipi*epipi/4-PION_MASS**2 < 0\n except FloatingPointError:\n print(\"floating point error\")\n print(\"epipi=\", epipi)\n sys.exit(1)\n if test:\n out = float(out)*1j\n else:\n try:\n out = complex(float(out))\n except ValueError:\n print(\"unable to convert phase shift to number:\", out)\n print(\"check to make sure there does not exist\"+\\\n \" debugging which needs to be turned off.\")\n print(out)\n raise ZetaError(\"bad number conversion\")\n if ISOSPIN == 0:\n if out.real < 0 and abs(out.real) > 90:\n out = np.complex(out.real+\n math.ceil(-1*out.real/180)*180, out.imag)\n if out.real > 180:\n out = np.complex(out.real-\n math.floor(out.real/180)*180, out.imag)\n return out\n\nif CALC_PHASE_SHIFT:\n def pheno(epipi):\n \"\"\"Calc pheno phase shift\"\"\"\n try:\n epipi = epipi[1]\n except (IndexError, TypeError):\n try:\n epipi = epipi[0]\n except (IndexError, TypeError):\n pass\n epipi = epipi*AINVERSE\n binpath = os.path.dirname(inspect.getfile(zeta))+'/pheno.o'\n arglist = [binpath, str(epipi), str(PION_MASS), str(ISOSPIN)]\n try:\n out = subprocess.check_output(arglist)\n except FileNotFoundError:\n print(\"Error in pheno: pheno.C not compiled yet.\")\n print(subprocess.check_output(['pwd']))\n print(inspect.getfile(zeta))\n sys.exit(1)\n except subprocess.CalledProcessError:\n print(\"Error in pheno: calc of phase shift error:\")\n print(epipi)\n raise ZetaError(\n subprocess.Popen(arglist, stdout=subprocess.PIPE).stdout.read())\n return float(out)\n\n\nelse:\n def pheno(_):\n \"\"\"Blank function; do not calculate phase shift\"\"\"\n return\n\ndef zeta_real(epipi):\n \"\"\"Gives nan's if zeta(E_pipi) is not real\"\"\"\n test = zeta(epipi)\n if test.imag != 0:\n retval = math.nan\n else:\n retval = test.real\n return retval\n\n\ndef plotcrosscurves(plot_both=False):\n \"\"\"Plots the cross curves of Luscher and Schenk (pheno)\n the intersection points are predictions for lattice energies\n \"\"\"\n points = 1e3 # Number of points\n xmin, xmax = 2*float(PION_MASS)/AINVERSE, 1.1\n xlist = list(map(lambda x: float(xmax - xmin)*1.0*x/(\n points*1.0)+xmin, list(np.arange(points+1))))\n xlist = [0+0j if np.isnan(i) else i for i in xlist]\n #ylist_pheno_minus = list(map(lambda y: -pheno(y), xlist))\n #plt.plot(xlist, ylist_pheno_minus, label='pheno-')\n #plt.plot(xlist, ylist_pheno_plus, label='pheno+')\n hfontt = {'fontname': 'FreeSans', 'size': 12}\n hfontl = {'fontname': 'FreeSans', 'size': 14}\n print('Isospin=', ISOSPIN)\n with PdfPages('SchenkVLuscherI'+str(ISOSPIN)+'.pdf') as pdf:\n if plot_both:\n ylist_pheno_plus = list(map(pheno, xlist))\n ylist_zeta = list(map(zeta, xlist))\n plt.plot(xlist, ylist_pheno_plus, label='Schenk')\n plt.plot(xlist, ylist_zeta, label='Luscher')\n else:\n ylist_dif = list(map(lambda y: zeta_real(y)-pheno(y), xlist))\n\n plt.plot(xlist, np.zeros((len(xlist))))\n plt.plot(xlist, ylist_dif, label='Luscher-Schenk')\n plt.title('Luscher Method, Schenk Phenomenology, Isospin='+\n str(ISOSPIN), **hfontt)\n plt.xlabel('Ea (Lattice Units, a^(-1)='+\n str(AINVERSE)+' GeV)', **hfontl)\n plt.ylabel(r'$\\delta$ (degrees)', **hfontl)\n plt.legend(loc='best')\n pdf.savefig()\n #plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), shadow=True, ncol=nplots)\n plt.show()\n","repo_name":"goracle/lattice-fitter","sub_path":"latfit/utilities/zeta/zeta.py","file_name":"zeta.py","file_ext":"py","file_size_in_byte":8421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"8492383566","text":"from enum import Enum, unique\nfrom typing import (\n Any,\n Mapping\n)\n\n\nVALUE_ERR_MSG = '{} has invalid value {}'\n\n\n@unique\nclass NoteType(Enum):\n personal = 1\n work = 2\n\n\nclass Note:\n def __init__(\n self,\n id: str,\n title: str,\n body: str,\n note_type: NoteType,\n updated_on: int\n ):\n if id is None:\n raise ValueError(VALUE_ERR_MSG.format(\"id\", id))\n if title is None:\n raise ValueError(VALUE_ERR_MSG.format(\"title\", title))\n if body is None:\n raise ValueError(VALUE_ERR_MSG.format(\"body\", body))\n if note_type is None:\n raise ValueError(VALUE_ERR_MSG.format(\"note_type\", note_type))\n if updated_on is None:\n raise ValueError(VALUE_ERR_MSG.format(\"updated_on\", updated_on))\n\n self._id = id\n self._title = title\n self._body = body\n self._note_type = note_type\n self._updated_on = updated_on\n\n @property\n def id(self) -> str:\n return self._id\n\n @id.setter\n def id(self, value: str) -> None:\n if not value:\n raise ValueError(VALUE_ERR_MSG.format('value', value))\n\n self._id = value\n\n @property\n def title(self) -> str:\n return self._title\n\n @title.setter\n def title(self, value: str) -> None:\n if not value:\n raise ValueError(VALUE_ERR_MSG.format('value', value))\n\n self._title = value\n\n @property\n def body(self) -> str:\n return self._body\n\n @body.setter\n def body(self, value: str) -> None:\n if not value:\n raise ValueError(VALUE_ERR_MSG.format('value', value))\n\n self._body = value\n\n @property\n def note_type(self) -> NoteType:\n return self._note_type\n\n @note_type.setter\n def note_type(self, value: NoteType) -> None:\n if not value:\n raise ValueError(VALUE_ERR_MSG.format('value', value))\n\n self._note_type = value\n\n @property\n def updated_on(self) -> int:\n return self._updated_on\n\n @updated_on.setter\n def updated_on(self, value: int) -> None:\n if not value:\n raise ValueError(VALUE_ERR_MSG.format('value', value))\n\n self._updated_on = value\n\n @classmethod\n def from_api_dm(cls, vars: Mapping[str, Any]) -> 'Note':\n return Note(\n id=vars[\"id\"],\n title=vars[\"title\"],\n body=vars[\"body\"],\n note_type=NoteType[vars[\"note_type\"]],\n updated_on=vars[\"updated_on\"]\n )\n\n def to_api_dm(self) -> Mapping[str, Any]:\n d = {\n \"id\": self.id,\n \"title\": self.title,\n \"body\": self.body,\n \"note_type\": self.note_type.name,\n \"updated_on\": self.updated_on\n }\n\n return {k: v for k, v in d.items() if v is not None}\n","repo_name":"btofficiel/notes-ms","sub_path":"notesservice/datamodel.py","file_name":"datamodel.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7049507996","text":"import collections\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shapely\n\nimport data\nfrom world_generation.terrains import TerrainTypes\n\n\ndef plot_map(world, to_file=False):\n # plot_polygons(world)\n plot_clusters(world)\n plot_mountain_chains(world)\n plot_rivers(world)\n # plot_downslopes(world)\n # plot_moisture(world)\n\n _display(\"map.png\", to_file)\n\n\ndef plot_polygons(world: data.World):\n HYPSOMETRIC_COLORS = collections.OrderedDict([\n (0.9, \"#e42423\"),\n (0.8, \"#e5452b\"),\n (0.6, \"#ee7d3b\"),\n (0.5, \"#f5b26b\"),\n (0.4, \"#ffe64c\"),\n (0.3, \"#f6ee81\"),\n (0.0, \"#76b648\"),\n (-0.2, \"#bee3eb\"),\n (-1, \"#71c7d6\"),\n ])\n\n def color_for_height(height):\n for min_height, hex_color in HYPSOMETRIC_COLORS.items():\n if height >= min_height:\n return hex_color\n\n fig = plt.figure()\n ax = fig.gca()\n for region_id, vertices in world.vertices_by_region.items():\n height = world.height_by_region[region_id]\n hex_color = color_for_height(height)\n pos_of_vertices = np.array([world.pos_by_vertex[vertex] for vertex in vertices])\n ax.fill(pos_of_vertices[:, 0], pos_of_vertices[:, 1], hex_color)\n\n\ndef plot_resources(resources, background_image: str, to_file=False):\n fig = plt.figure(figsize=(1, 1))\n ax = fig.gca()\n img_shape = plot_background_image(background_image)\n plot_resource_areas(resources, img_shape, ax)\n\n _display(\"resources.png\", to_file)\n\n\ndef plot_background_image(background_image):\n img = mpimg.imread(background_image)\n plt.imshow(img)\n return img.shape\n\n\ndef plot_resource_areas(resources, img_shape, ax):\n for resource_area in resources:\n scaled_area = shapely.affinity.scale(resource_area, xfact=img_shape[1], yfact=img_shape[0], origin=(0, 0))\n print(img_shape)\n print(scaled_area)\n pos_of_vertices = np.array(scaled_area.exterior.coords)\n\n ax.fill(pos_of_vertices[:, 0], pos_of_vertices[:, 1], \"#ff000080\")\n\n\ndef plot_mountain_chains(world: data.World):\n for chain in world.mountain_chains:\n xes = [p[0] for p in chain.line.coords]\n yes = [p[1] for p in chain.line.coords]\n plt.plot(xes, yes, color=\"purple\", linewidth=0.3)\n\n\ndef plot_rivers(world: data.World):\n for river in world.rivers:\n river_pos = [world.pos_by_vertex[vertex] for vertex in river]\n xes = [p[0] for p in river_pos]\n yes = [p[1] for p in river_pos]\n plt.plot(xes, yes, color=\"blue\", linewidth=0.15)\n\n\ndef plot_moisture(world: data.World):\n for vertex_id, moisture in world.moisture_by_vertex.items():\n river_pos = world.pos_by_vertex[vertex_id]\n if moisture > 0:\n plt.plot(river_pos[0], river_pos[1], marker='o', markersize=0.25, color=(moisture, moisture, moisture))\n\n\nCOLOR_BY_TERRAIN = {\n TerrainTypes.MOUNTAIN: \"#B0B0B0\",\n TerrainTypes.CONIFEROUS_FOREST: \"#1E3C00\",\n TerrainTypes.DECIDUOUS_FOREST: \"#006600\",\n TerrainTypes.GRASSLAND: \"#A7E541\",\n TerrainTypes.PLAINS: \"#DCFF5E\",\n TerrainTypes.DEEP_WATER: \"#007ad0\",\n TerrainTypes.SHALLOW_WATER: \"#0091cf\",\n TerrainTypes.LAKE: \"#95daf0\",\n}\n\n\ndef plot_terrain(world: data.World):\n fig = plt.figure()\n ax = fig.gca()\n for region_id, vertices in world.vertices_by_region.items():\n terrain = world.terrain_by_region[region_id]\n hex_color = COLOR_BY_TERRAIN[terrain]\n pos_of_vertices = np.array([world.pos_by_vertex[vertex] for vertex in vertices])\n ax.fill(pos_of_vertices[:, 0], pos_of_vertices[:, 1], hex_color)\n\n\nsort_order = {\n TerrainTypes.MOUNTAIN: 6,\n TerrainTypes.CONIFEROUS_FOREST: 4,\n TerrainTypes.DECIDUOUS_FOREST: 3,\n TerrainTypes.GRASSLAND: 2,\n TerrainTypes.PLAINS: 2,\n TerrainTypes.LAKE: 5,\n TerrainTypes.SHALLOW_WATER: 1,\n TerrainTypes.DEEP_WATER: 0,\n}\n\n\ndef plot_clusters(world):\n fig = plt.figure(figsize=(1, 1))\n ax = fig.gca()\n for cluster in sorted(world.clusters, key=lambda x: sort_order[x.terrain_type]):\n hex_color = COLOR_BY_TERRAIN[cluster.terrain_type]\n pos_of_vertices = np.array(cluster.polygon.exterior.coords)\n ax.fill(pos_of_vertices[:, 0], pos_of_vertices[:, 1], hex_color)\n\n\ndef plot_downslopes(world: data.World):\n for vertex, pos in world.pos_by_vertex.items():\n downslopes = world.downslopes[vertex]\n for downslope in downslopes:\n downslope_pos = world.pos_by_vertex[downslope]\n plt.arrow(pos[0], pos[1], (downslope_pos[0] - pos[0]) / 2, (downslope_pos[1] - pos[1]) / 2,\n head_width=0.0005, head_length=0.001)\n\n\ndef _display(file_name, only_to_file):\n plt.axis('off')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0,\n hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.savefig(file_name, dpi=2000, bbox_inches=0, pad_inches=0)\n\n if not only_to_file:\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n","repo_name":"alchrabas/creo-exeris","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29638607986","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Windows EventLog (EVT) parser.\"\"\"\n\nimport unittest\n\nfrom plaso.formatters import winevt as _ # pylint: disable=unused-import\nfrom plaso.lib import eventdata\nfrom plaso.lib import timelib\nfrom plaso.parsers import winevt\n\nfrom tests.parsers import test_lib\n\n\nclass WinEvtParserTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Windows EventLog (EVT) parser.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._parser = winevt.WinEvtParser()\n\n def testParse(self):\n \"\"\"Tests the Parse function.\"\"\"\n test_file = self._GetTestFilePath([u'SysEvent.Evt'])\n event_queue_consumer = self._ParseFile(self._parser, test_file)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n # Windows Event Log (EVT) information:\n #\tVersion : 1.1\n #\tNumber of records : 6063\n #\tNumber of recovered records : 437\n #\tLog type : System\n\n self.assertEqual(len(event_objects), (6063 + 437) * 2)\n\n # Event number : 1392\n # Creation time : Jul 27, 2011 06:41:47 UTC\n # Written time : Jul 27, 2011 06:41:47 UTC\n # Event type : Warning event (2)\n # Computer name : WKS-WINXP32BIT\n # Source name : LSASRV\n # Event category : 3\n # Event identifier : 0x8000a001 (2147524609)\n # Number of strings : 2\n # String: 1 : cifs/CONTROLLER\n # String: 2 : \"The system detected a possible attempt to compromise\n # security. Please ensure that you can contact the\n # server that authenticated you.\\r\\n (0xc0000388)\"\n event_object = event_objects[1]\n self.assertEqual(event_object.record_number, 1392)\n self.assertEqual(event_object.event_type, 2)\n self.assertEqual(event_object.computer_name, u'WKS-WINXP32BIT')\n self.assertEqual(event_object.source_name, u'LSASRV')\n self.assertEqual(event_object.event_category, 3)\n self.assertEqual(event_object.event_identifier, 40961)\n self.assertEqual(event_object.strings[0], u'cifs/CONTROLLER')\n\n expected_string = (\n u'\"The system detected a possible attempt to compromise security. '\n u'Please ensure that you can contact the server that authenticated you.'\n u'\\r\\n (0xc0000388)\"')\n\n self.assertEqual(event_object.strings[1], expected_string)\n\n event_object = event_objects[0]\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2011-07-27 06:41:47')\n self.assertEqual(event_object.timestamp, expected_timestamp)\n self.assertEqual(\n event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)\n\n event_object = event_objects[1]\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n u'2011-07-27 06:41:47')\n self.assertEqual(event_object.timestamp, expected_timestamp)\n\n self.assertEqual(\n event_object.timestamp_desc, eventdata.EventTimestamp.WRITTEN_TIME)\n\n expected_msg = (\n u'[40961 / 0xa001] '\n u'Severity: Warning '\n u'Record Number: 1392 '\n u'Event Type: Information event '\n u'Event Category: 3 '\n u'Source Name: LSASRV '\n u'Computer Name: WKS-WINXP32BIT '\n u'Strings: [\\'cifs/CONTROLLER\\', '\n u'\\'\"The system detected a possible attempt to '\n u'compromise security. Please ensure that you can '\n u'contact the server that authenticated you. (0xc0000388)\"\\']')\n\n expected_msg_short = (\n u'[40961 / 0xa001] '\n u'Strings: [\\'cifs/CONTROLLER\\', '\n u'\\'\"The system detected a possibl...')\n\n self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"CNR-ITTIG/plasodfaxp","sub_path":"tests/parsers/winevt.py","file_name":"winevt.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"14492853588","text":"#balance - the outstanding balance on the credit card\r\n#annualInterestRate - annual interest rate as a decimal\r\n#monthlyPaymentRate - minimum monthly payment rate as a decimal\r\n\r\n#Month: 1\r\n#Minimum monthly payment: 96.0\r\n#Remaining balance: 4784.0\r\n\r\n#Monthly interest rate= (Annual interest rate) / 12.0\r\n#Minimum monthly payment = (Minimum monthly payment rate) x (Previous balance)\r\n#Monthly unpaid balance = (Previous balance) - (Minimum monthly payment)\r\n#Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)\r\n\r\n#Test Case 1:\r\n#\t balance = 4213\r\n#\t annualInterestRate = 0.2\r\n#\t monthlyPaymentRate = 0.04\r\n#\t \r\n#\t Result Your Code Should Generate:\r\n#\t -------------------\r\n#\t Month: 1\r\n#\t Minimum monthly payment: 168.52\r\n#\t Remaining balance: 4111.89\r\n#\t Month: 2\r\n#\t Minimum monthly payment: 164.48\r\n#\t Remaining balance: 4013.2\r\n#\t Month: 3\r\n#\t Minimum monthly payment: 160.53\r\n#\t Remaining balance: 3916.89\r\n\r\n#balance = 4213\r\ntotal_paid = 0\r\n#annualInterestRate = 0.2\r\n#monthlyPaymentRate = 0.04\r\nmon_rate = annualInterestRate/12.0\r\n\r\nfor i in range(1,13):\r\n min_mon_pay = round(monthlyPaymentRate * balance,2)\r\n unpaid_balance = balance - min_mon_pay\r\n balance = round(unpaid_balance + mon_rate*unpaid_balance, 2)\r\n total_paid += min_mon_pay\r\n #print (\"Month: \" + str(i))\r\n #print (\"Minimum monthly payment: \" + str(min_mon_pay))\r\n #print (\"Remaining balance: \" + str(balance))\r\n\r\n#print (\"Total paid: \" + str(total_paid))\r\nprint (\"Remaining balance: \" + str(balance))\r\n","repo_name":"allen791210/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python","sub_path":"hw2_1.py","file_name":"hw2_1.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"31613269812","text":"import os\nimport numpy as np\nimport pymap3d\nfrom scipy.spatial.transform import Rotation\nfrom itertools import combinations\n\n\ndef geo_conversion(pos, ell, ell_out):\n return pos\n\n\ndef restore_rotation(v1, v2):\n cos_a = np.dot(v1, v2) / np.linalg.norm(v1)\n angle = 0 if cos_a > 1.0 else np.arccos(cos_a)\n cross = np.cross(v1, v2)\n w = angle * cross / np.linalg.norm(cross)\n rot = Rotation.from_rotvec(w)\n return rot\n\n\ndef restore_position(u, pos0):\n q = restore_quat(u)\n return q.apply(pos0)\n\n\ndef restore_quat(u):\n qp = Rotation.from_rotvec(u[1] * np.array([0, 1, 0]))\n qy = Rotation.from_rotvec(u[2] * np.array([0, 0, 1]))\n qyp = qy * qp\n rx = qyp.apply(np.array([1, 0, 0]))\n rx = rx / np.linalg.norm(rx)\n\n k, l, m = rx\n z = np.sin(u[0])\n a = l ** 2 / k ** 2 + 1\n b = 2 * l * m * z / k ** 2\n c = z ** 2 * ((m / k) ** 2 + 1) - 1\n d = b ** 2 - 4 * a * c\n y = np.array([(-b - d ** 0.5) / a / 2, (-b + d ** 0.5) / a / 2])\n x = -(l * y + m * z) / k\n ry = np.array([x[0], y[0], z])\n rz = np.cross(rx, ry)\n if rz[2] < 0:\n ry = np.array([x[1], y[1], z])\n ry = ry / np.linalg.norm(ry)\n\n ry_h = qy.apply(np.array([0, 1, 0]))\n qr = restore_rotation(ry_h, ry)\n q_total = qr * qyp\n return q_total\n\n\ndef restore_rotation_speed(rx_1, rx, rz_1, rz):\n qz = restore_rotation(rz_1, rz)\n rxr = qz.apply(rx_1)\n qx = restore_rotation(rxr, rx)\n return qx * qz\n\n\ndef wwxr_correction(a, w, pos, z0=True):\n wxr = np.cross(w, pos, axisb=1)\n wwxr = np.cross(w, wxr, axisb=1)\n a -= wwxr\n if z0:\n a[:, 2] = 0\n return a\n\n\ndef get_middle(srns_points, get_mean=False):\n if srns_points.shape[0] < 3 or get_mean:\n return np.mean(srns_points, axis=0)\n\n pts = srns_points[:, :2]\n comb_indices = list(combinations(range(pts.shape[0]), 2))\n mid_pts, distances = [], []\n for ids in comb_indices:\n mid_pts.append(np.mean([pts[ids[0]], pts[ids[1]]], axis=0))\n distances.append(np.linalg.norm(pts[ids[0]] - pts[ids[1]]))\n\n mid_pts = np.multiply(np.array(mid_pts), np.tile(np.array(distances) / np.sum(distances), (2, 1)).T)\n middle = np.append(np.sum(mid_pts, axis=0), np.mean(srns_points[:, 2]))\n return middle\n\n\ndef join(*args):\n return os.path.join(*args).replace('\\\\', '/')\n","repo_name":"DmitryYenkov/various_code_samples","sub_path":"data_fusion_system/fusion_filter/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33010848476","text":"from pygame import *\n\n\nclass Barrier(sprite.Sprite):\n def __init__(self, size, color, row, column):\n sprite.Sprite.__init__(self)\n self.height = size\n self.width = size\n self.color = color\n self.image.fill(self.color)\n self.rect = self.image.get_rect()\n self.row = row\n self.column = column\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)","repo_name":"ThomasB15/Alien_Invaders_cs386_project3","sub_path":"Barrier.py","file_name":"Barrier.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19129197783","text":"import psycopg2\n\ntry:\n connection = psycopg2.connect(\n user=\"USERNAME_HERE\", # user_name\n password=\"PASSWORD_HERE\", # password\n host=\"HOST_HERE\", # localhost\n port=\"PORT_HERE\", # 5432\n database=\"DATABASE_HERE\", # test_database\n )\n cursor = connection.cursor()\n\n table_create_query = \"\"\"\n CREATE TABLE IF NOT EXISTS test(\n id serial PRIMARY KEY,\n first_name VARCHAR(30) NOT NULL,\n last_name VARCHAR(30) NOT NULL,\n dob DATE,\n location VARCHAR(50),\n );\"\"\"\n\n cursor.execute(table_create_query)\n connection.commit()\n\nexcept Exception as e:\n print(\"[-] Exception Occurred:\", e)\n\nfinally:\n print(\"[+] Executed !\")\n cursor.close()\n connection.close()\n print(\"[+] Connection Closed !\")\n","repo_name":"Saphall/BackEnd","sub_path":"Database/2-PostgreSQL/Practical/Python_Postgres/psycopg_connection.py","file_name":"psycopg_connection.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"29474068836","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Module for linkifying urls in messages\n\nLicense: See LICENSE file\n\n\"\"\"\n\nimport logging\n\nfrom linkify_it import LinkifyIt\nfrom linkify_it.tlds import TLDS\n\nlogger = logging.getLogger(__name__)\n\n\ndef linkify(message: str) -> str:\n \"\"\"Replace text URLs in message with HTML links\"\"\"\n linkifier = LinkifyIt().tlds(TLDS)\n\n # Pre-test first for efficiency\n if not linkifier.pretest(message):\n return message\n\n # Test if a link is present\n if not linkifier.test(message):\n return message\n\n # Find links in message\n matches = linkifier.match(message)\n if not matches:\n return message\n\n logger.debug(f\"Replacing urls in message:\\n{message}\")\n\n # Construct new message\n new_message = \"\"\n idx = 0\n for match in matches:\n new_message += message[idx : match.index]\n new_message += f'{match.raw}'\n idx = match.last_index\n new_message += message[idx:]\n\n logger.debug(f\"Replaced urls in message:\\n{new_message}\")\n return new_message\n","repo_name":"GjjvdBurg/signal2html","sub_path":"signal2html/linkify.py","file_name":"linkify.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"91"} +{"seq_id":"24705411767","text":"from datetime import datetime\r\nimport pandas as pd\r\nimport csv\r\nimport arrow\r\nimport re\r\nfrom datetime import date\r\nimport dateutil.parser\r\nnew_NH_dates = list()\r\nnew_NH_dates2 = list()\r\nnew_NH_dates3 = list()\r\nnew_NH_dates4 = list()\r\nnew_NH_dates5 = list()\r\ndef clean_NH_data(NH_data):\r\n\tNH_data = NH_data\r\n\tfor x in NH_data['Dates of Breach']:\r\n\t\tx = str(x)\r\n\t\ttry: \r\n\t\t\td = datetime.strptime(x, \"%d-%b-%y\")\r\n\t\t\tnew_NH_dates.append(d)\r\n\t\texcept:\t\r\n\t\t\tnew_NH_dates.append(x)\r\n\t\r\n\tNH_data['Start Date of Breach'] = new_NH_dates\r\n\t\r\n\tfor x in NH_data['Start Date of Breach']:\r\n\t\ttry: \r\n\t\t\td = datetime.strptime(x, \"%B %d, %Y\")\r\n\t\t\tnew_NH_dates2.append(d)\r\n\t\texcept:\t\r\n\t\t\tnew_NH_dates2.append(x)\r\n\r\n\tNH_data['Start Date of Breach'] = new_NH_dates2\r\n\r\n\tfor x in NH_data['Start Date of Breach']:\r\n\t\ttry: \r\n\t\t\td = datetime.strptime(x, \"%B %d. %Y\")\r\n\t\t\tnew_NH_dates3.append(d)\r\n\t\texcept:\t\r\n\t\t\tnew_NH_dates3.append(x)\r\n\r\n\tNH_data['Start Date of Breach'] = new_NH_dates3\r\n\tfor x in NH_data['Start Date of Breach']:\r\n\t\ttry: \r\n\t\t\td = datetime.strptime(x, \"%B %d %Y\")\r\n\t\t\tnew_NH_dates4.append(d)\r\n\t\texcept:\t\r\n\t\t\tnew_NH_dates4.append(x)\r\n\r\n\tNH_data['Start Date of Breach'] = new_NH_dates4\r\n\tfor x in NH_data['Start Date of Breach']:\r\n\t\ttry: \r\n\t\t\td = datetime.strptime(x, \"%B %d,%Y\")\r\n\t\t\tnew_NH_dates5.append(d)\r\n\t\texcept:\t\r\n\t\t\tnew_NH_dates5.append(x)\r\n\tNH_data['Start Date of Breach'] = new_NH_dates5\r\n\tNH_data['End Date of Breach'] = ''\r\n\tNH_data.to_csv(\"testNH.csv\")\r\n\treturn NH_data\r\n\r\n# replace with NH data\r\ny = pd.read_csv('C:/Users/atrm1/Downloads/NH.csv')\r\nclean_NH_data(y)\r\n","repo_name":"rachrich765/SI_485_PRC_class","sub_path":"dates/NH_dates.py","file_name":"NH_dates.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19790886512","text":"class Apartment():\n def __init__(self,flat_number, owner_name, electicitybillamount):\n self.flat_number=flat_number\n self.owner_name=owner_name\n self.electicitybillamount=electicitybillamount\n \nclass apartment_demo():\n def __init__(self):\n #p=0\n pass\n \n def getSecondMiniBill(self,apartment_list):\n self.apartment_list=apartment_list\n apartment_list.sort(key=lambda x:x.electicitybillamount,reverse=True)\n return apartment_list[1].electicitybillamount\n \nif __name__=='__main__':\n apartment_list=[]\n count=int(input())\n for c in range(count):\n flat_number=int(input())\n owner_name=input()\n electicitybillamount=int(input())\n Apartment_obj=Apartment(flat_number, owner_name, electicitybillamount)\n apartment_list.append(Apartment_obj)\n \n demo_obj=apartment_demo()\n result=demo_obj.getSecondMiniBill(apartment_list)\n print(result)\n \n","repo_name":"Alan-Codes-hub/TCS-Learning","sub_path":"Xplore & Xperience/Assessments/PA & OPA (python)/Dec 28 (apartment).py","file_name":"Dec 28 (apartment).py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36319677754","text":"startdate = \"2017-01-01\"\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib.dates import YearLocator, MonthLocator, DateFormatter\nimport pandas as pd\nimport numpy as np\nimport os\nfrom monthdelta import monthdelta\nimport math\nimport os\nimport sys\nsys.path.append('/nesi/project/niwa00004/rampaln/CAOA2101/seven-station-series-python/lib')\nfrom src import *\ninput_dirs = r'/scale_wlg_persistent/filesets/project/niwa00004/rampaln/CAOA2101/seven-station-series-python/output/time_series'\nos.chdir(input_dirs)\nbrk_dirs = r'/scale_wlg_persistent/filesets/project/niwa00004/rampaln/CAOA2101/seven-station-series-python/output'\nmain_dirs =r'/scale_wlg_persistent/filesets/project/niwa00004/rampaln/CAOA2101/seven-station-series-python' \noutput_dirs = f'{input_dirs}'\nts_all = pd.read_csv(f'{input_dirs}/AllStationMonthly_Anomalies.csv',\n index_col =0, parse_dates =True)\nts = ts_all.T.mean().apply(lambda a: float(\"%.2f\"%a))\nts.columns = [['Temperature anomaly']]\n\n# Pressure\nts_all['NZT7_Anomally'] = ts\nts_all.to_excel(f'{input_dirs}/SevenStationSeries.xlsx')\ncovert_series_to_brick(ts_all,brk_dirs +'/bricks', output_name = 'NZ_T7Anomalies')\nts = ts.truncate(before=startdate)\nanoms = ts.values.flatten()\n\n# %%\ndates = np.array(ts.index.to_pydatetime())\nwidths = np.array([(dates[j + 1] - dates[j]).days for j in range(len(dates) - 1)] + [30])\nyears = YearLocator()\n# months = MonthLocator(bymonth=[1,3,5,7,9,11])\nmonths = MonthLocator()\nmFMT = DateFormatter('%b')\nyFMT = DateFormatter('\\n\\n%Y')\nmpl.rcParams['xtick.labelsize'] = 12\nmpl.rcParams['ytick.labelsize'] = 12\nmpl.rcParams['axes.titlesize'] = 14\nmpl.rcParams['xtick.direction'] = 'out'\nmpl.rcParams['ytick.direction'] = 'out'\nmpl.rcParams['xtick.major.size'] = 5\nmpl.rcParams['ytick.major.size'] = 5\nmpl.rcParams['xtick.minor.size'] = 2\n\ny_axis_max = math.ceil(max(anoms))\ny_axis_min = math.floor(min(anoms))\nranges = {'well above average': [anoms > 1.20, 'firebrick'],\n 'above average': [(anoms >= 0.51) & (anoms <= 1.20), 'darksalmon'],\n 'near average': [(anoms >= -0.50) & (anoms <= 0.50), 'lightgrey'],\n 'below average': [(anoms >= -1.20) & (anoms <= -0.50), 'steelblue'],\n 'well below average': [anoms < -1.20, 'darkblue']}\n\n# %%\n# NIWA logo settings\nfrom PIL import Image\n\nim = Image.open(f'{main_dirs}/NIWA_CMYK_Hor.png')\nrsize = im.resize((np.array(im.size) / 10).astype(int)) # Use PIL to resize\nposition = (1120, 540)\n\n# %%\nfig, ax = plt.subplots(figsize=(14, 7))\nfig.subplots_adjust(bottom=0.15)\n\nfor label in ranges:\n rng = ranges[label][0]\n clr = ranges[label][1]\n\n if True in rng: # excludes from legend if no data\n ax.bar(dates[rng], anoms[rng], width=widths[rng], label=label, align='center',\n facecolor=clr, alpha=.8, edgecolor='k', lw=1.5)\n\nax.xaxis.set_minor_locator(months)\nax.xaxis.set_major_locator(years)\n\n# ax.xaxis.set_major_locator(years)\nax.xaxis.set_minor_formatter(mFMT)\nax.xaxis.set_major_formatter(yFMT)\nax.axhline(0, color='k')\n\nlabels = ax.get_xminorticklabels()\nfor label in labels:\n label.set_fontsize(14)\n label.set_rotation(90)\nlabels = ax.get_xmajorticklabels()\nfor label in labels:\n label.set_fontsize(18)\nlabels = ax.get_yticklabels()\nfor label in labels:\n label.set_fontsize(18)\n\nax.grid(linestyle=':')\nax.xaxis.grid(True, which='both', linestyle=':')\nax.set_ylim(y_axis_min, y_axis_max)\nax.set_ylabel(u'Temperature anomaly (\\N{DEGREE SIGN}C)', fontsize=14, backgroundcolor=\"w\")\nax.set_xlim(dates[0] - monthdelta(1), dates[-1] + monthdelta(1))\n\nax.text(0.01,1.02,\"NZ 7-station monthly temperature anomalies since {}\".format(dates[0].strftime(\"%b %Y\")), fontsize=16, fontweight = 'bold', transform=ax.transAxes)\n\nax.figure.figimage(rsize,position[0],position[1], alpha=.6, zorder=1)\n\n#ax.figure.figimage(rsize, loc ='upper left', alpha=.6, zorder=1)\n\n######## Label for latest anomaly (top right) ########\nu'\\N{DEGREE SIGN}'\nif round(anoms[-1],1) > 0.0:\n anom_text = u'+{}\\N{DEGREE SIGN}C'.format(round(anoms[-1],1)) # adds plus sign to positive anoms\nelse:\n anom_text = u'{}\\N{DEGREE SIGN}C'.format(round(anoms[-1],1))\n\n# writes full month if 5 characters or less. Otherwise abriviate to 3 letters\nif len(dates[-1].strftime(\"%B\")) > 5:\n mon_text = \"{} value = {}\".format(dates[-1].strftime(\"%b %Y\"), anom_text)\nelse:\n mon_text = \"{} value = {}\".format(dates[-1].strftime(\"%B %Y\"), anom_text)\n\nax.text(0.75,1.02,mon_text, fontsize=14, transform=ax.transAxes)\n####################################################\n\nleg = ax.legend(loc='upper left', frameon=False)\n\noutput_fname = f'NZT7_{dates[-1].strftime(\"%b%Y\")}.png'\nfig.savefig(output_fname)\nfig.show()\nemail_title = \"7 Station Series Data\"\ntext = f'Here is the Seven Station Series Data for {dates[-1].strftime(\"%b %Y\")}'\nprint(\"Emailing the file\")\n#rcode = os.popen(f'export LC_CTYPE=\"en_US.UTF-8\" && mail -a {input_dirs}/{output_fname} -a {input_dirs}/AllStationMonthly_Anomalies.xlsx -s \"##{email_title}\" Neelesh.Rampal@niwa.co.nz,Seth.Carrier@niwa.co.nz,John-Mark.Woolley@niwa.co.nz,Ben.Noll@niwa.co.nz', 'w').write(text)\n\nrcode = os.popen(f'export LC_CTYPE=\"en_US.UTF-8\" && mail -a {input_dirs}/{output_fname} -a {input_dirs}/SevenStationSeries.xlsx -s \"{email_title}\" Neelesh.Rampal@niwa.co.nz,Andrew.Lorrey@niwa.co.nz,Stephen.Stuart@niwa.co.nz,Seth.Carrier@niwa.co.nz,John-Mark.Woolley@niwa.co.nz,Ben.Noll@niwa.co.nz', 'w').write(text)\n\n\n#rcode = os.popen(f'export LC_CTYPE=\"en_US.UTF-8\" && mail -a {input_dirs}/{output_fname} -a {input_dirs}/SevenStationSeries.xlsx -s \"{email_title}\" Neelesh.Rampal@niwa.co.nz', 'w').write(text)\n","repo_name":"nram812/cpp","sub_path":"ops/send_series.py","file_name":"send_series.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"13006808897","text":"import math\r\n\r\n\r\nclass CustomSort:\r\n @staticmethod\r\n def by_insertion(items:list,key=lambda t: t, reverse=False, cmp = None):\r\n \"\"\"\r\n sorts list by insertion\r\n if cmp is defined, it will be used as sort condition.\r\n Otherwise, key is the default comparison term\r\n :param items: items list\r\n :type: list\r\n :param key: function sort key f:element->key, e1bool, f(x,y)=true if xkey(item2)\r\n return not cmp(item1,item2)\r\n\r\n result = [item for item in items]\r\n for i in range(1,len(result)):\r\n j = i\r\n while j>0 and compare(result[j-1],result[j]): #key(result[j-1])>key(result[j]):\r\n result[j - 1], result[j] = result[j], result[j-1]\r\n j -= 1\r\n i += 1\r\n if reverse:\r\n result.reverse()\r\n return result\r\n\r\n @staticmethod\r\n def by_combo(items:list,key=lambda t: t, reverse=False, cmp = None):\r\n \"\"\"\r\n sorts list by combo sort\r\n if cmp is defined, it will be used as sort condition.\r\n Otherwise, key is the default comparison term\r\n :param items: items list\r\n :type: list\r\n :param key: function sort key f:element->key, e1key(item2)\r\n return not cmp(item1,item2)\r\n\r\n result = [item for item in items]\r\n gap = len(result)\r\n shrink = 1.3\r\n sorted = False\r\n while not sorted:\r\n gap = math.floor(gap/shrink)\r\n if gap <= 1:\r\n gap = 1\r\n sorted = True\r\n i = 0\r\n while i+gapkey(result[i+gap]):\r\n result[i], result[i+gap] = result[i+gap], result[i]\r\n sorted = False\r\n i += 1\r\n if reverse:\r\n result.reverse()\r\n return result\r\n\r\n @staticmethod\r\n def perform(items:list, key=lambda t: t, reverse=False, cmp=None, method=\"combo\"):\r\n \"\"\"\r\n sorts list by a chosen method\r\n :param items: items list\r\n :type: list\r\n :param key: function sort key f:element->key, e1 length measure, 1 -> add, 2 -> erase, 3 -> move\n\n self.points = []\n self.session_points = {} # store points for all images opened\n # format: { \"imgname.ext\" : [QPoint(x1,y1), ...] }\n self.session_points_actual = {} # store original resolution session data for exporting\n\n self.wing_length = 0\n self.length_coords = []\n self.session_lengths = {} # same idea but we store length/end data\n self.session_lengths_actual = {}\n\n self.drag_index = -1\n self.length_drag_index = -1\n\n self.pointer = QtCore.QPoint()\n self.end = QtCore.QPoint()\n\n self.pen_size = 15\n\n self.image_name = \"\"\n self.image_path = \"\"\n self.img_width = 0\n self.img_height = 0\n self.pixmap = QtGui.QPixmap()\n\n\n def paintEvent(self, event):\n if self.image_name != \"\":\n qp = QtGui.QPainter(self)\n # for now, we just keep the size the image was opened at\n qp.drawPixmap(self.pixmap.rect(), self.pixmap)\n\n if self.mode == 0:\n pen = QtGui.QPen(QtGui.QColor(10, 255, 10, 200), self.pen_size)\n pen.setCapStyle(QtCore.Qt.RoundCap)\n qp.setPen(pen)\n for pt in self.length_coords:\n qp.drawPoint(pt)\n pen = QtGui.QPen(QtGui.QColor(10, 190, 10, 200), 3)\n pen.setCapStyle(QtCore.Qt.RoundCap)\n qp.setPen(pen)\n qp.drawLine(self.length_coords[0], self.length_coords[1])\n\n pen = QtGui.QPen(QtGui.QColor(255, 10, 10, 200), self.pen_size)\n pen.setCapStyle(QtCore.Qt.RoundCap)\n qp.setPen(pen)\n for pt in self.points:\n qp.drawPoint(pt)\n\n def mousePressEvent(self, event):\n self.pointer = event.pos()\n self.end = event.pos()\n if self.mode == 0:\n for i, pt in enumerate(self.length_coords):\n if abs((pt - self.pointer).manhattanLength()) < 20:\n self.length_drag_index = i\n elif self.mode == 1:\n # only allow adding points within the bounds of the image\n if (self.pointer.x() - self.pixmap.width() < 0 and\n self.pointer.y() - self.pixmap.height() < 0):\n self.points.append(self.pointer)\n elif self.mode == 2:\n for pt in self.points:\n if abs((pt - self.pointer).manhattanLength()) < 20:\n self.points.remove(pt)\n elif self.mode == 3:\n # select the index of the point the user is clicking on\n for i, pt in enumerate(self.points):\n if abs((pt - self.pointer).manhattanLength()) < 20:\n self.drag_index = i\n\n self.session_points[self.image_name] = self.points # update session\n if self.points: # if we actually have points update _actual session\n self.session_points_actual[self.image_name],\\\n self.session_lengths_actual[self.image_name] = self.convertRelativePoints()\n self.session_lengths[self.image_name] = self.length_coords\n self.update()\n\n def setImage(self, image_path):\n # add if we already know about the image load it into current points, else create it\n image_name = os.path.basename(image_path)\n if image_name in self.session_points:\n self.points = self.session_points[image_name]\n else:\n self.points = []\n self.session_points[image_name] = []\n self.session_points_actual[image_name] = []\n\n self.image_name = image_name\n self.image_path = image_path\n unscaled_pix = QtGui.QPixmap(image_path)\n self.img_width = unscaled_pix.width()\n self.img_height = unscaled_pix.height()\n self.pixmap = unscaled_pix.scaled(self.width(), self.height(),\n QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation)\n\n # add old or new wing length data\n if image_path in self.session_lengths:\n self.length_coords = self.session_lengths[image_path]\n self.wing_length = int(math.hypot(\n self.length_coords[0].x() - self.length_coords[1].x(),\n self.length_coords[0].y() - self.length_coords[1].y()))\n else:\n self.new_length_points(self.pixmap.width(), self.pixmap.height())\n self.session_lengths[image_path] = self.length_coords\n\n self.update()\n\n def new_length_points(self, width, height):\n y = int(height / 2)\n x1 = int(width * (1/3))\n x2 = int(width * (2/3))\n self.length_coords = []\n self.length_coords.append(QtCore.QPoint(x1,y))\n self.length_coords.append(QtCore.QPoint(x2,y))\n self.wing_length = int(math.hypot(x2 - x1, y - y))\n\n def analyzeLength(self):\n self.length_coords = []\n start, end = analyzer.analyze(self.image_path)\n x_factor = self.img_width / self.pixmap.width()\n y_factor = self.img_height / self.pixmap.height()\n self.length_coords.append(QtCore.QPoint(start[0] / x_factor , start[1] / y_factor))\n self.length_coords.append(QtCore.QPoint(end[0] / x_factor, end[1] / y_factor))\n self.session_lengths[self.image_name] = self.length_coords\n _,self.wing_length = self.convertRelativePoints()\n self.session_lengths_actual[self.image_name] = self.wing_length\n self.update()\n\n def setLength(self):\n self.mode = 0\n self.update()\n\n def setNormal(self):\n self.mode = 3\n self.update()\n\n def setAdd(self):\n self.mode = 1\n self.update()\n\n def setErase(self):\n self.mode = 2\n self.update()\n\n def loadSwapFile(self, dirpath):\n try:\n filepath = os.path.join(dirpath, '.session.swp')\n with open(filepath, mode='rb') as swapfile:\n self.session_points = pickle.loads(swapfile.read())\n except FileNotFoundError:\n pass\n\n def exportSwapFile(self, dirpath):\n filepath = os.path.join(dirpath, '.session.swp')\n with open(filepath, mode='wb+') as swapfile:\n pickle.dump(self.session_points, swapfile)\n\n def exportLandmarks(self, filepath):\n if filepath is not None and filepath != '':\n with open(filepath, mode='w+', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n points_og,_ = self.convertRelativePoints()\n length_og = self.session_lengths_actual[self.image_name]\n csv_file.write(\"wing length: \" + str(length_og) + \"\\n\")\n for point in points_og:\n writer.writerow([point.x(), point.y()])\n\n def exportAll(self, dirpath):\n for entry in self.session_points_actual:\n filename, _ = os.path.splitext(entry)\n filename = filename + \".csv\"\n filepath = os.path.join(dirpath, filename)\n with open(filepath, mode='w+', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n points_og = self.session_points_actual[entry]\n try:\n length_og = self.session_lengths_actual[entry]\n except:\n length_og = \"NULL\"\n csv_file.write(\"wing length: \" + str(length_og) + \"\\n\")\n for point in points_og:\n writer.writerow([point.x(), point.y()])\n\n\n # def saveMarkedImage(self):\n # \"\"\"repaints the image with the landmark/length points in its\n # original resolution and saves as a png image\"\"\"\n # points_og = self.convertRelativePoints()\n # tmp_painter = QtGui.QPainter()\n # tmp_painter.drawPixmap(self.pixmap.rect(), self.pixmap)\n # pen_scale = self.img_width / self.pixmap.width()\n # pen = QtGui.QPen(QtGui.QColor(255, 10, 10, 200), self.pen_size * pen_scale)\n # pen.setCapStyle(QtCore.Qt.RoundCap)\n # tmp_painter.setPen(pen)\n # for pt in self.points:\n # tmp_painter.drawPoint(pt)\n # save_image = QtGui.QImage(self.img_width, self.img_height, QtGui.QImage.Format_RGB32)\n # src_rect = QtCore.QRect()\n # tmp_painter.drawImage(rect, save_image, )\n\n def convertRelativePoints(self):\n points_og = []\n length_og = 0\n x_factor = self.img_width / self.pixmap.width()\n y_factor = self.img_height / self.pixmap.height()\n for point in self.points:\n x_og = point.x() * x_factor\n y_og = point.y() * y_factor\n points_og.append(QtCore.QPoint(x_og, y_og))\n self.wing_length = int(math.hypot(\n (self.length_coords[0].x() - self.length_coords[1].x()) * x_factor,\n (self.length_coords[0].y() - self.length_coords[1].y()) * y_factor))\n length_og = self.wing_length\n return points_og, length_og\n\n\n def mouseMoveEvent(self, event):\n if self.mode == 3 and self.drag_index >= 0:\n self.end = event.pos()\n self.points[self.drag_index] = event.pos()\n self.update()\n elif self.mode == 0 and self.length_drag_index >= 0:\n self.end = event.pos()\n self.length_coords[self.length_drag_index] = event.pos()\n self.update()\n\n def mouseReleaseEvent(self, event):\n if self.mode == 3 and self.drag_index >= 0:\n self.end = event.pos()\n self.points[self.drag_index] = event.pos()\n self.drag_index = -1 # indicate we arent dragging anymore\n self.session_points[self.image_name] = self.points # update session\n self.session_points_actual[self.image_name],_ = self.convertRelativePoints()\n self.update()\n if self.mode == 0 and self.length_drag_index >= 0:\n self.end = event.pos()\n self.length_coords[self.length_drag_index] = event.pos()\n self.length_drag_index = -1\n self.session_lengths[self.image_name] = self.length_coords\n _,self.wing_length = self.convertRelativePoints()\n self.session_lengths_actual[self.image_name] = self.wing_length\n self.update()\n","repo_name":"wing-net/wingqt","sub_path":"canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":10618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"39569359157","text":"from iaso.test import TestCase\nfrom django.test import tag\nfrom iaso import models as m\n\n\nclass OrgUnitModelTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.sector = m.OrgUnitType.objects.create(name=\"Sector\", short_name=\"Sec\")\n cls.system = m.OrgUnitType.objects.create(name=\"System\", short_name=\"Sys\")\n cls.jedi_council = m.OrgUnitType.objects.create(name=\"Jedi Council\", short_name=\"Cnc\")\n cls.jedi_task_force = m.OrgUnitType.objects.create(name=\"Jedi Task Force\", short_name=\"Jtf\")\n\n @tag(\"iaso_only\")\n def test_org_unit_creation_no_parent_or_parent_has_path(self):\n \"\"\"Newly created org unit without parents should have a path, and so do new org units\n attached to a parent that has a path.\"\"\"\n\n corrusca = m.OrgUnit.objects.create(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corruscant = m.OrgUnit.objects.create(parent=corrusca, org_unit_type=self.sector, name=\"Corruscant System\")\n self.assertEqual(str(corrusca.path), str(corrusca.pk))\n self.assertEqual(str(corruscant.path), f\"{corrusca.pk}.{corruscant.pk}\")\n\n @tag(\"iaso_only\")\n def test_org_unit_creation_or_update_parent_without_path(self):\n \"\"\"Created or updated a org unit linked to a pathless parent should not have a path.\"\"\"\n\n corrusca = m.OrgUnit(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corrusca.save(skip_calculate_path=True)\n corruscant = m.OrgUnit.objects.create(parent=corrusca, org_unit_type=self.sector, name=\"Corruscant System\")\n self.assertIsNone(corruscant.path)\n corruscant.save()\n self.assertIsNone(corruscant.path)\n\n @tag(\"iaso_only\")\n def test_org_unit_update_path_with_children(self):\n \"\"\"Path should be set for the whole hierarchy\"\"\"\n\n corrusca = m.OrgUnit(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corrusca.save(skip_calculate_path=True)\n corruscant = m.OrgUnit(org_unit_type=self.system, parent=corrusca, name=\"Coruscant System\")\n corruscant.save(skip_calculate_path=True)\n jedi_council_corruscant = m.OrgUnit(\n org_unit_type=self.jedi_council, parent=corruscant, name=\"Corruscant Jedi Council\"\n )\n jedi_council_corruscant.save(skip_calculate_path=True)\n\n self.assertIsNone(corrusca.path)\n self.assertIsNone(corruscant.path)\n self.assertIsNone(jedi_council_corruscant.path)\n\n # 2 savepoints, 1 regular update, 3 \"get children\" queries, 1 bulk update\n with self.assertNumQueries(7):\n corrusca.save(update_fields=[\"path\"])\n\n corruscant.refresh_from_db()\n jedi_council_corruscant.refresh_from_db()\n\n self.assertEqual(str(corrusca.path), str(corrusca.pk))\n self.assertEqual(str(corruscant.path), f\"{corrusca.pk}.{corruscant.pk}\")\n self.assertEqual(\n str(jedi_council_corruscant.path), f\"{corrusca.pk}.{corruscant.pk}.{jedi_council_corruscant.pk}\"\n )\n\n @tag(\"iaso_only\")\n def test_org_unit_path_does_not_change(self):\n \"\"\"Updating the \"name\" property should not result in path change queries \"\"\"\n\n corrusca = m.OrgUnit.objects.create(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corruscant = m.OrgUnit.objects.create(org_unit_type=self.system, parent=corrusca, name=\"Coruscant System\")\n m.OrgUnit.objects.create(org_unit_type=self.jedi_council, parent=corruscant, name=\"Corruscant Jedi Council\")\n\n corrusca.name = \"Corrusca Sector FTW\"\n # 2 savepoints, 1 regular update\n with self.assertNumQueries(3):\n corrusca.save()\n\n @tag(\"iaso_only\")\n def test_org_unit_save_skip_calculate_path(self):\n \"\"\"If skip_calculate_path is set to True, path should be None, and no transaction should be created\"\"\"\n\n # create\n corrusca = m.OrgUnit(org_unit_type=self.sector, name=\"Corrusca Sector\")\n with self.assertNumQueries(1):\n corrusca.save(skip_calculate_path=True)\n\n # update\n corrusca.name = \"Corrusca Sector FTW\"\n with self.assertNumQueries(1):\n corrusca.save(skip_calculate_path=True)\n\n @tag(\"iaso_only\")\n def test_org_unit_path_does_change(self):\n \"\"\"Changing the parent should trigger a path update\"\"\"\n\n alderaan = m.OrgUnit.objects.create(org_unit_type=self.sector, name=\"Alderaan Sector\")\n corrusca = m.OrgUnit.objects.create(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corruscant = m.OrgUnit.objects.create(org_unit_type=self.system, parent=alderaan, name=\"Coruscant System\")\n m.OrgUnit.objects.create(org_unit_type=self.jedi_council, parent=corruscant, name=\"Corruscant Jedi Council\")\n\n corruscant.name = \"The awesome Coruscant System\"\n corruscant.parent = corrusca\n\n # 2 savepoints, 1 regular update, 2 children, 1 bulk update\n with self.assertNumQueries(6):\n corruscant.save()\n\n @tag(\"iaso_only\")\n def test_org_unit_hierarchy_children_descendants(self):\n \"\"\"Test manager methods: hierarchy(), children() and descendants().\"\"\"\n\n (corrusca, corruscant, first_council, second_council, task_force) = self.create_simple_hierarchy()\n\n self.assertEqual(5, m.OrgUnit.objects.hierarchy(corrusca).count())\n self.assertEqual(5, m.OrgUnit.objects.hierarchy(m.OrgUnit.objects.filter(name__icontains=\"corrus\")).count())\n self.assertEqual(1, m.OrgUnit.objects.children(corrusca).count())\n self.assertEqual(4, m.OrgUnit.objects.descendants(corrusca).count())\n\n self.assertEqual(4, m.OrgUnit.objects.hierarchy(corruscant).count())\n self.assertEqual(2, m.OrgUnit.objects.children(corruscant).count())\n self.assertEqual(3, m.OrgUnit.objects.descendants(corruscant).count())\n\n self.assertEqual(2, m.OrgUnit.objects.hierarchy(first_council).count())\n self.assertEqual(3, m.OrgUnit.objects.hierarchy([first_council, second_council]).count())\n self.assertEqual(1, m.OrgUnit.objects.children(first_council).count())\n self.assertEqual(1, m.OrgUnit.objects.descendants(first_council).count())\n\n self.assertEqual(1, m.OrgUnit.objects.hierarchy(task_force).count())\n self.assertEqual(0, m.OrgUnit.objects.children(task_force).count())\n self.assertEqual(0, m.OrgUnit.objects.descendants(task_force).count())\n\n # membership sanity checks\n self.assertIn(first_council, m.OrgUnit.objects.hierarchy(corrusca))\n self.assertNotIn(first_council, m.OrgUnit.objects.children(corrusca))\n self.assertIn(first_council, m.OrgUnit.objects.descendants(corrusca))\n self.assertIn(first_council, m.OrgUnit.objects.hierarchy(corruscant))\n self.assertIn(first_council, m.OrgUnit.objects.children(corruscant))\n self.assertIn(first_council, m.OrgUnit.objects.descendants(corruscant))\n self.assertIn(first_council, m.OrgUnit.objects.hierarchy(first_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.children(first_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.descendants(first_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.hierarchy(second_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.children(second_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.descendants(second_council))\n self.assertNotIn(first_council, m.OrgUnit.objects.hierarchy(task_force))\n self.assertNotIn(first_council, m.OrgUnit.objects.children(task_force))\n self.assertNotIn(first_council, m.OrgUnit.objects.descendants(task_force))\n\n def create_simple_hierarchy(self):\n corrusca = m.OrgUnit.objects.create(org_unit_type=self.sector, name=\"Corrusca Sector\")\n corruscant = m.OrgUnit.objects.create(org_unit_type=self.system, parent=corrusca, name=\"Coruscant System\")\n first_council = m.OrgUnit.objects.create(\n org_unit_type=self.jedi_council, parent=corruscant, name=\"First Corruscant Jedi Council\"\n )\n second_council = m.OrgUnit.objects.create(\n org_unit_type=self.jedi_council, parent=corruscant, name=\"Second Corruscant Jedi Council\"\n )\n task_force = m.OrgUnit.objects.create(\n org_unit_type=self.jedi_task_force, parent=first_council, name=\"Jedi Ethics Task Force\"\n )\n corrusca.refresh_from_db()\n corruscant.refresh_from_db()\n first_council.refresh_from_db()\n second_council.refresh_from_db()\n task_force.refresh_from_db()\n\n return corrusca, corruscant, first_council, second_council, task_force\n","repo_name":"vundalon/iaso","sub_path":"iaso/tests/models/test_org_unit.py","file_name":"test_org_unit.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73661668142","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom csv import writer\r\nimport pandas as pd \r\n\r\nempty = []\r\nempty_2 = []\r\nscientific = []\r\nURL = \"https://plants.ces.ncsu.edu/find_a_plant/\"\r\npage = requests.get(URL)\r\n\r\n# bs4 reads the HTML for us\r\nsoup = BeautifulSoup(page.content, \"html.parser\")\r\n\r\n# here i am getting bs4 to find all divs with a certain class in the html\r\nsci = soup.find_all(\"div\", class_=\"plant_1_1\")\r\n# this loops finds all h2's inside of the class specified\r\nfor plant in sci:\r\n name = soup.find_all(\"h2\")\r\n \r\n for name in name:\r\n name.find_all(\"em\")\r\n empty.append(name.text)\r\n\r\n\r\ncommon = soup.find_all(\"span\", class_=\"common_names\")\r\nfor plant in common:\r\n name = soup.find_all(\"span\", class_=\"list_common_names\")\r\n for name in name:\r\n name.find_all(\"a\")\r\n empty_2.append(name.text)\r\n \r\n# removing duplicates \r\nres2 = []\r\nfor i in empty_2:\r\n if i not in res2:\r\n res2.append(i)\r\n\r\n# removing duplicates\r\nres = []\r\nfor i in empty:\r\n if i not in res:\r\n res.append(i)\r\n\r\nres.remove(' Find a PlantShow Menu')\r\n\r\nfor element in res:\r\n scientific.append(element.strip())\r\n\r\n\r\nscientific.remove('Where Next?')\r\nprint(scientific)\r\n\r\n#pandas puts it all in a nice little csv\r\ndict = {'Scientific Name': scientific, 'Common Name': res2}\r\ndf = pd.DataFrame(dict)\r\ndf.to_csv('plants.csv')\r\n","repo_name":"kayleegstroud/plantwebscrape","sub_path":"plantwebscrape/webscrape/webscrape.py","file_name":"webscrape.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"69886817904","text":"from web3.auto import w3\r\nimport json\r\nimport sqlite3 as sql\r\nuniswap_address = w3.toChecksumAddress('0x7a250d5630b4cf539739df2c5dacb4c659f2488d')\r\nuniswap_contract = w3.eth.contract(uniswap_address, abi=json.load(open('UniswapABI.json', 'r'))['abi'])\r\n\r\ncompetition = [w3.toChecksumAddress('0x000000000000084e91743124a982076c59f10084'),\r\n w3.toChecksumAddress('0x00000000000064c443ef440577C26525A3C34A30'),\r\n w3.toChecksumAddress('0x00000000e8080dB3Ed60313725643D38beC42071'),\r\n w3.toChecksumAddress('0xC6E6dd6A0C61651d1bC055Dc1c22418a729d41Bb'),\r\n w3.toChecksumAddress('0xb5917A48a99C1f8C76119a5133fdE1169ec11170'),\r\n w3.toChecksumAddress('0x0d43d01c2262e1935c200ccac989af024269c07e'),\r\n w3.toChecksumAddress('0x0555bdebc3429585b4594285a76f853725a49532'),\r\n w3.toChecksumAddress('0xe34820500dcd2a2c3c4ce2c1cac561e30ede0dc7'),\r\n w3.toChecksumAddress('0x51acf0af77adcb20de482e1cd678f620baf07e0c'),\r\n w3.toChecksumAddress('0x0000000033431f236e97fa549b367827360effd7'),\r\n w3.toChecksumAddress('0x5b9cda9a39dccb8d5def007b72c6a969fd5bb267')]\r\n\r\naddresses = [w3.toChecksumAddress('0xb6b7cc8c20a25d886f3feff988d15d267f71ac7c'),\r\n w3.toChecksumAddress('0x5b9cda9a39dccb8d5def007b72c6a969fd5bb267')]\r\n\r\ndef expected_return_fees(token_pool, weth_pool, value, fee=997, pct=1):\r\n return (value * fee * token_pool / (weth_pool * 1000 + value * fee))*pct\r\n\r\ndef get_acceptable_tokens(sql,\r\n to='0xda1faeb056a2f568b138ca0ad9ad8a51915ba336',\r\n from_='0xda1faeb056a2f568b138ca0ad9ad8a51915ba336',\r\n fun=lambda x: '0x' + x[34:74]):\r\n import pandas as pd\r\n conn = sql.connect('etherscan_analysis/main_db.db')\r\n data = pd.read_sql('select * from transactions', conn)\r\n d = data[(data.to == to) | (data['from'] == from_)].copy()\r\n t = d.groupby('blockNumber').apply(lambda x: x.iloc[-1].value - x.iloc[0].value)\r\n t = t[t > 0]\r\n # return d.input.apply(fun).unique().tolist()\r\n return d[d.blockNumber.isin(t.index)].contractAddress.unique().tolist()\r\n\r\n\r\n\r\ndef get_acceptable_tokens_tx(sql,\r\n to='0xda1faeb056a2f568b138ca0ad9ad8a51915ba336',\r\n from_='0xda1faeb056a2f568b138ca0ad9ad8a51915ba336',\r\n currency='WETH',\r\n fun=lambda x: '0x' + x[34:74]):\r\n import pandas as pd\r\n conn = sql.connect('etherscan_analysis/main_db.db')\r\n data = pd.read_sql('select * from transactions', conn)\r\n print(len(data))\r\n d = data[(data.to == to) | (data['from'] == from_)].copy()\r\n t = d.groupby('blockNumber').apply(lambda x: pd.Series([x.iloc[-1].value - x.iloc[0].value, len(x), x.iloc[0].tokenSymbol]))\r\n t = t[(t[1] > 0) & (t[1] == 4) & (t[2] == currency)]\r\n # return d.input.apply(fun).unique().tolist()\r\n return d[d.blockNumber.isin(t.index)]\r\n\r\ndef get_blocks(from_, block_number, n):\r\n # importing the requests library\r\n import requests\r\n\r\n # api-endpoint\r\n URL = \"https://blocks.flashbots.net/v1/blocks\"\r\n # defining a params dict for the parameters to be sent to the API\r\n i = block_number\r\n while 'd' not in locals() and i < block_number+n:\r\n try:\r\n PARAMS = {'from': from_,\r\n 'block_number': i}\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.get(url=URL, params=PARAMS)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n index = list(filter(lambda x: x['eoa_address'] == from_,\r\n data['blocks'][0]['transactions']))[0]['bundle_index']\r\n # dict(map(lambda x: (x['total_miner_reward'], x['gas_used']),\r\n # list(filter(lambda x: x['bundle_index'] == index, data['blocks'][0]['transactions']))))\r\n d = dict(map(lambda x: [int(x['total_miner_reward']), x['gas_used']],\r\n list(filter(lambda x: x['bundle_index'] == index,\r\n data['blocks'][0]['transactions']))))\r\n print(i)\r\n except:\r\n i += 1\r\n if 'd' not in locals():\r\n return (False, False, False, False)\r\n print('Bundle len: ', len(d))\r\n return sum(d.keys()),\\\r\n sum(d.values()),\\\r\n data['blocks'][0]['miner'],\\\r\n sum(d.keys())/sum(d.values())\r\n\r\n\r\ndef check_pct_token(contract_router=\"0x78d1866129ee81b8bc0a95bcbb2c633a6b80ebee\",\r\n start_block=None,\r\n end_block=None):\r\n from etherscan_analysis.EtherScan import EtherScan\r\n from tests.ABIs import uniswap_pair_abi\r\n\r\n etherScan = EtherScan(w3.toChecksumAddress(contract_router),\r\n 'BEKN8SC2I4BRHXR6TGMATP7PRMVIBC4XYU',\r\n db='main_db.db')\r\n if start_block == None or end_block == None:\r\n etherScan.get_transactions(endblock=str(w3.eth.blockNumber),\r\n startblock=str(w3.eth.blockNumber-10000),\r\n db_insert=False)\r\n else:\r\n etherScan.get_transactions(endblock=str(end_block),\r\n startblock=str(start_block),\r\n db_insert=False)\r\n\r\n contract = w3.eth.contract(w3.toChecksumAddress(contract_router), abi=uniswap_pair_abi)\r\n data = etherScan.pd_data.copy()\r\n data.value = data[['value', 'tokenDecimal']].apply(lambda x: x.value*10**x.tokenDecimal, 1)\r\n data2 = data[data.to == contract_router.lower()][\r\n ['blockNumber', 'hash', 'transactionIndex', 'contractAddress', 'value']]. \\\r\n merge(data[data.to != contract_router.lower()][['hash', 'contractAddress', 'value']], on='hash')\r\n r = []\r\n s = 0\r\n for ind, i in enumerate(data2.blockNumber):\r\n if ind/len(data2) >= s:\r\n print(ind/len(data2))\r\n s += .1\r\n r.append(contract.functions.getReserves().call(block_identifier=i - 1))\r\n data2['reserves'] = r\r\n data2.contractAddress_x = data2.contractAddress_x.apply(w3.toChecksumAddress)\r\n data2.contractAddress_y = data2.contractAddress_y.apply(w3.toChecksumAddress)\r\n return data2.groupby('blockNumber').apply(token_pct_helper, contract=contract, first=contract.functions.token0().call())\r\n\r\n\r\ndef token_pct_helper(df, contract, first):\r\n # reserve0, reserve1, t = contract.functions.getReserves().call(block_identifier=df.blockNumber.iloc[0]-1)\r\n if df.contractAddress_x.iloc[0] == first:\r\n return df.value_y.iloc[0], expected_return_fees(df.reserves.iloc[0][1], df.reserves.iloc[0][0], df.value_x.iloc[0])\r\n else:\r\n return df.value_y.iloc[0], expected_return_fees(df.reserves.iloc[0][0], df.reserves.iloc[0][1], df.value_x.iloc[0])\r\n\r\ndef frontrun2(data, transaction_cache):\r\n tr_cache_temp = [] # Added temp cache to delete confirmed transactions from trans_cache\r\n for fr, tx in data['pending'].items():\r\n for i, t in tx.items():\r\n if not tx:\r\n continue\r\n print('Invalid tx')\r\n elif t[u'hash'] in transaction_cache:\r\n tr_cache_temp.append(t[u'hash'])\r\n else:\r\n if t[u'to'] == '0x00000000000064c443ef440577C26525A3C34A30':\r\n print('Cunt front running')\r\n print(t)\r\n handle_transaction(t)\r\n tr_cache_temp.append(t[u'hash'])\r\n transaction_cache = tr_cache_temp\r\n return len(data['pending']), transaction_cache\r\n\r\n\r\ndef handle_transaction(tx):\r\n if tx[u'blockHash'] != None:\r\n return\r\n\r\n gas_price = int(tx[u'gasPrice'], 16)\r\n one_gwei = int(1e9)\r\n my_gas_price = gas_price + one_gwei\r\n if triggers_buy(tx):\r\n # print('Front-running!!!')\r\n print()\r\n print(tx[u'hash'], int(tx[u'value'], 16) / 10 ** 18)\r\n method, params = uniswap_contract.decode_function_input(tx[u'input'])\r\n l = list(map(lambda x: contract_df[contract_df.token == x].name.iloc[0],\r\n filter(lambda x: w3.toChecksumAddress(x) in contract_df.token.values, params['path'])))\r\n print(method.fn_name, l)\r\n if 'swapExact' in method.fn_name:\r\n print('AmountOutMin: ', params[u'amountOutMin'] / 10 ** 18)\r\n if method.fn_name in ['swapExactETHForTokens', 'swapExactTokensForETH', 'swapETHForExactTokens',\r\n 'swapTokensForExactETH']:\r\n print('Ratio: ', contracts[l[0]][1].call() / contracts[l[0]][0].call())\r\n print('Gas price: ', gas_price / 10 ** 9)\r\n\r\n\r\ndef triggers_buy(tx):\r\n if tx[u'to'] != uniswap_address:\r\n return False\r\n\r\n if tx['input'] == '0x':\r\n return False\r\n try:\r\n method, params = uniswap_contract.decode_function_input(tx[u'input'])\r\n except ValueError as e:\r\n print(e)\r\n pass\r\n except:\r\n pass\r\n return method.fn_name in ['swapExactETHForTokens',\r\n 'swapExactTokensForETH',\r\n 'swapETHForExactTokens',\r\n 'swapTokensForExactETH',\r\n 'swapExactTokensForTokens',\r\n 'swapTokensForExactTokens'\r\n ] and any(map(lambda x: x in contract_df.token.values, params['path'])) \\\r\n and int(tx[u'value'], 16) >= BUY_THRESHOLD\r\n\r\ndef from_private_key(private_key_bytes):\r\n import codecs\r\n import ecdsa\r\n from Crypto.Hash import keccak\r\n import os\r\n\r\n key = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key\r\n\r\n key_bytes = key.to_string()\r\n private_key = codecs.encode(private_key_bytes, 'hex')\r\n public_key = codecs.encode(key_bytes, 'hex')\r\n\r\n print(\"Private key: \", private_key)\r\n print(\"Public key: \", public_key)\r\n\r\n public_key_bytes = codecs.decode(public_key, 'hex')\r\n\r\n hash = keccak.new(digest_bits=256)\r\n hash.update(public_key_bytes)\r\n keccak_digest = hash.hexdigest()\r\n\r\n address = '0x' + keccak_digest[-40:]\r\n return address\r\n\r\ndef sha3(seed):\r\n from Crypto.Hash import keccak\r\n sha3_256 = lambda x: keccak.new(digest_bits=256, data=x).digest()\r\n return sha3_256(str(seed))\r\n\r\ndef normalize_address(x, allow_blank=False):\r\n from rlp.utils import decode_hex\r\n if allow_blank and x == '':\r\n return ''\r\n if len(x) in (42, 50) and x[:2] == '0x':\r\n x = x[2:]\r\n if len(x) in (40, 48):\r\n x = decode_hex(x)\r\n if len(x) == 24:\r\n assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:]\r\n x = x[:20]\r\n if len(x) != 20:\r\n raise Exception(\"Invalid address format: %r\" % x)\r\n return x\r\n\r\ndef mk_contract_address(sender, nonce):\r\n import rlp\r\n return sha3(rlp.encode([normalize_address(sender), nonce]))[12:]\r\n\r\ndef check_effective_gas():\r\n import pandas as pd\r\n import sqlite3 as sql\r\n conn = sql.connect('etherscan_analysis/main_db.db')\r\n data = pd.read_sql('select * from BundleData', conn)\r\n data['EffPriceIncluded'] = data[['TargetFromAddress', 'BlockTarget', 'NBlocks']].apply(lambda x: get_blocks(x[0], x[1], x[2]), 1)\r\n # data.EffPriceIncluded = data.EffPriceIncluded.apply(lambda x: (False, False, False) if x==False else x)\r\n data[['minerPayment', 'totalGas', 'minerAddress', 'EffPriceIncluded']] = pd.DataFrame(data.EffPriceIncluded.tolist(), index=data.index)\r\n # data.EffPriceIncluded = data.minerPayment.where(~data.minerPayment, data.minerPayment.div(data.totalGas))\r\n return data\r\n\r\n# import time\r\n#\r\n# l = w3.eth.filter('latest')\r\n#\r\n# while True:\r\n# ts = time.time()\r\n# ll = l.get_new_entries()\r\n# for i in ll:\r\n# # print('===== Block hash: ', i.hex())\r\n# block_hash = i.hex()\r\n# block = w3.eth.getBlock(block_hash, full_transactions=True)\r\n#\r\n# for tx in transactions:\r\n# if tx[u'to'] in [w3.toChecksumAddress('0x00000000acd5ca17eee6d92d9ca121543126cce1'),\r\n# w3.toChecksumAddress('0x8a69b34968aab824295e10b1c1fa49b453c0fada'),\r\n# w3.toChecksumAddress('0xc6e6dd6a0c61651d1bc055dc1c22418a729d41bb'),\r\n# w3.toChecksumAddress('0xb5917a48a99c1f8c76119a5133fde1169ec11170'),\r\n# w3.toChecksumAddress('0x51acf0af77adcb20de482e1cd678f620baf07e0c')]:\r\n# transactions = block['transactions']\r\n# print('===== Block Number: ', block['number'])\r\n# print(ts, ' From wallet: ', tx['from'])\r\n# print(ts, ' Value ETH: ', tx['value'])\r\n\r\nif __name__ == '__main__':\r\n import numpy as np\r\n # tokens = get_acceptable_tokens(sql,\r\n # '0x000000005736775feb0c8568e7dee77222a26880',\r\n # '0x000000005736775feb0c8568e7dee77222a26880')\r\n # tokens2 = get_acceptable_tokens(sql,\r\n # '0xda1faeb056a2f568b138ca0ad9ad8a51915ba336',\r\n # '0xda1faeb056a2f568b138ca0ad9ad8a51915ba336')\r\n # data = get_acceptable_tokens_tx(sql,\r\n # '0x00000000b7ca7e12dcc72290d1fe47b2ef14c607',\r\n # '0x00000000b7ca7e12dcc72290d1fe47b2ef14c607')\r\n # from tests.ABIs import uniswap_pair_abi\r\n # import pandas as pd\r\n # data = check_pct_token('0xd01a189b95d2b07600de7003d6122e843e27447b')\r\n\r\n # data = get_blocks(w3.toChecksumAddress('0x60fa342f253addbc138c899afd0957e2c2d4da3d'), 12866864)\r\n data = check_effective_gas()\r\n data['ratio'] = data.MyEfPrice.div(data.EffPriceIncluded.where(data.EffPriceIncluded != False, np.inf))","repo_name":"ZigaMr/uniswap_sandwich_bot","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":13784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72880335662","text":"from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom .forms import SignUpForm\nfrom .forms import ContactForm\n# Create your views here.\n\ndef home(request):\n\ttitle = \"Welcome\"\n\t#if request.user.is_authenticated():\n\t#\ttitle = \"My Title %s\" %(request.user)\n\t\n\tform = SignUpForm(request.POST or None)\n\tcontext = {\n\t\"title\": title,\n\t\"form\": form,\n\t}\n\tif form.is_valid():\n\t\t# form.save()\n\n\t\tinstance = form.save(commit = False)\n\t\tfull_name = form.cleaned_data.get(\"full_name\")\n\t\tif not full_name:\n\t\t\tfull_name = \"New full name\"\n\t\tinstance.full_name = full_name\n\t\tinstance.save()\n\t\t# print instance\n\t\tcontext = {\n\t\t\"title\": \"Thank you\",\n\t\t}\n\treturn render(request, \"home.html\", context)\n\ndef contact(request):\n\tform = ContactForm(request.POST or None)\n\tform_email = form.cleaned_data.get(\"email\")\n\tform_message = form.cleaned_data.get(\"message\")\n\tform_name = form.cleaned_data.get(\"full_name\")\n\tcontext = {\n\t\t\"form\": form,\n\t}\n\tsubject = 'contact form' \n\tfrom_email = settings.EMAIL_HOST_USER\n\tto_email = from_email\n\t# send_mail(\n # subject,\n # 'Here is the message.',\n # from_email,\n # [to_email],\n # fail_silently=False,\n\t# )\n\treturn render(request, \"forms.html\", context)","repo_name":"yamen225/tryDjango18","sub_path":"myenv/src/newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17673189478","text":"from freelancersdk.resources.messages.messages import post_message\nfrom freelancersdk.session import Session\nfrom freelancersdk.resources.messages.exceptions import \\\n MessageNotCreatedException\nimport os\n\n\n# https://developers.freelancer.com/docs/use-cases/messaging#header-sending-a-message\ndef sample_post_message():\n url = os.environ.get('FLN_URL')\n oauth_token = os.environ.get('FLN_OAUTH_TOKEN')\n session = Session(oauth_token=oauth_token, url=url)\n\n thread_data = {\n 'thread_id': 401,\n 'message': \"Let's talk\",\n }\n\n try:\n t = post_message(session, **thread_data)\n except MessageNotCreatedException as e:\n print('Error message: {}'.format(e.message))\n print('Server response: {}'.format(e.error_code))\n return None\n else:\n return t\n\n\nt = sample_post_message()\nif t:\n print('Message created: {} (message_id={})'.format(t, t.id))\n","repo_name":"freelancer/freelancer-sdk-python","sub_path":"examples/create_message.py","file_name":"create_message.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"91"} +{"seq_id":"19013398666","text":"# coding:utf-8\r\n# 选择排序:首先在未排序序列中找到最小元素,存放到排序序列的起始位置,然后,再从剩余未排序元素中继续寻找最小元素,然后放到已排序序列的末尾。以此类推,直到所有元素均排序完毕\r\n\"\"\"\r\n时间复杂度:O(N^2)\r\n空间复杂度:O(1)\r\n不稳定排序:比如说 5,8,5,2,9 这样一组数据,使用选择排序算法来排序的话,第一次找到最小元素2,与第一个5 交换位置,那第一个5 和中间的5 顺序就变了\r\n原地排序\r\n\"\"\"\r\n\r\n\r\n# 以下标序号为标准\r\ndef Selectionsort1(arr):\r\n len_arr = len(arr)\r\n\r\n for i in range(len_arr):\r\n min = i # 假定最小值的下标为第一个\r\n\r\n for j in range(i + 1, len_arr): # 上一个值右边的数组\r\n if arr[min] > arr[j]: # 使min为最小值,遇到比min小的值就把下标进行替换\r\n min = j\r\n\r\n arr[i], arr[min] = arr[min], arr[i] # 交换最小值到左边\r\n return arr\r\n\r\n","repo_name":"adapt-to/WriteCode","sub_path":"排序算法/03 选择排序.py","file_name":"03 选择排序.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19460855796","text":"import inspect\nimport os\nimport re\nimport sys\nfrom functools import cache\nfrom unittest import TestCase\n\nfrom . import compiler\nfrom . import exceptions as cex\nfrom . import module_helper as mh\nfrom .context import CasioType\nfrom .bytecode import Bytecode\n\n\nclass TestCompiler(TestCase):\n pass\n\n\nFIND_TEST_PATTERN = re.compile(r\"^[ \\t]*#.*@test[ \\t]+([a-z0-9_-]+)[ \\t]?(.*)\", re.IGNORECASE)\nFIND_BYTE_REPL = re.compile(r\"\\{([A-Z_]+)}\") # TODO: allow {,} escape\n\n\nclass TestLoader:\n def __init__(self, path: str):\n self.path = path\n self.filename = os.path.basename(path)\n with open(path) as f:\n self.source = f.read()\n self.tester: TestCase|None = None\n self.line_no = 0\n self.lines = self.source.splitlines()\n\n def generate_tests(self, cls: type):\n test_map = get_test_map()\n\n test_count = 0\n for line_num, line in enumerate(self.lines, start=1):\n match = FIND_TEST_PATTERN.match(line)\n if not match:\n continue\n\n method_name = match.group(1)\n # TODO: allow spaces in arguments when enclosed in \"\n # like ``@test symbol var \"hello world\"`` would not work properly\n args = match.group(2).strip().split()\n method_name = method_name.replace(\"-\", \"_\")\n if method_name not in test_map:\n print(f\"[Test Loader] Unknown test '{method_name}' in {self.filename}\", file=sys.stderr)\n continue\n\n test_method = test_map[method_name]\n\n # intermediate values for the function capture\n this_line_num = line_num\n this_line = line\n\n def some_func(tester_self):\n # called as if this function is a member in the tester class (TestCompiler->self == tester_self)\n self.tester = tester_self\n self.line_no = this_line_num\n vargs = [parse_value(v) for v in args]\n try:\n test_method(self, *vargs) # some method in TestLoader beginning with \"test_\"\n except TypeError as e:\n print(f\"[Test Loader] TESTER ERROR:{self.msg()}\", file=sys.stderr)\n print(this_line, file=sys.stderr)\n print(f\"Method name: test_{method_name}, Args parsed: {vargs}\", file=sys.stderr)\n raise e\n\n # this is basically taken from https://stackoverflow.com/a/2799009\n # update class of interest with new test method\n some_func.__name__ = f\"test_{self.filename}\"\n setattr(cls, some_func.__name__, some_func)\n test_count += 1\n return test_count\n\n def msg(self):\n return f\"\\nTest File \\\"{self.path}\\\", line {self.line_no}\"\n\n def compile(self):\n return compiler.compile_source(self.filename, self.source)\n\n def test_compiles(self):\n \"\"\" test that the file compiles and return the context if it does \"\"\"\n try:\n return self.compile()\n except cex.CasioException:\n self.tester.fail(f\"Test expected file to compile: {self.msg()}\")\n except Exception as e:\n self.tester.fail(f\"Test expected file to compile: {self.msg()}\")\n raise e\n\n def test_err(self, ename):\n \"\"\" test that a certain error occured while compiling the file \"\"\"\n if ename[0].islower():\n ename = ename[0].upper() + ename[1:]\n etype = f\"Casio{ename}Error\"\n if not hasattr(cex, etype):\n self.tester.fail(f\"{self.msg()}\\n{etype} does not exist\")\n return\n ex = getattr(cex, etype)\n if not issubclass(ex, cex.CasioException):\n self.tester.fail(f\"{self.msg()}\\n{etype} is not a casio exception\")\n return\n with self.tester.assertRaises(ex, msg=self.msg()):\n self.compile()\n\n def test_import(self, name, module_path):\n \"\"\" test that a certain symbol contains the module path \"\"\"\n context = self.test_compiles()\n self.tester.assertIn(name, context.symbols, self.msg())\n import_sym = context.symbols[name].value\n self.tester.assertIsInstance(import_sym, mh.ModulePath, self.msg())\n self.tester.assertEqual(import_sym, module_path, self.msg())\n\n def _test_symbol(self, type_: CasioType, bytes_: bytes, name):\n context = self.test_compiles()\n self.tester.assertIn(name, context.symbols, self.msg())\n sym = context.symbols[name]\n self.tester.assertEqual(type_, sym.type, self.msg())\n self.tester.assertEqual(bytes_, sym.value, self.msg())\n\n def test_symbol_str(self, name, value):\n \"\"\" test that a symbol contains a string value \"\"\"\n self._test_symbol(CasioType.STRING, b'\"' + str(value).encode() + b'\"', name)\n\n def test_symbol_num(self, name, value):\n \"\"\" test that a symbol contains a number value \"\"\"\n self._test_symbol(CasioType.NUMBER, str(value).encode(), name)\n\n def test_symbol_expr(self, name, value):\n \"\"\" test that a symbol contains a number with specific bytecode \"\"\"\n new_value = str(value).encode()\n for byte_repl in FIND_BYTE_REPL.finditer(value):\n match = byte_repl.group(0)\n text = byte_repl.group(1)\n if not hasattr(Bytecode, text):\n self.tester.fail(f\"{self.msg()}\\n{text} bytecode does not exist\")\n return\n casio_bytes = getattr(Bytecode, text)\n if not isinstance(casio_bytes, bytes):\n self.tester.fail(f\"{self.msg()}\\n{text} is not valid bytecode\")\n return\n new_value = new_value.replace(match.encode(), casio_bytes, 1)\n\n self._test_symbol(CasioType.NUMBER, new_value, name)\n\n@cache\ndef get_test_map() -> dict[str, callable]:\n d = {}\n for name, member in inspect.getmembers(TestLoader):\n if inspect.isfunction(member) and name.startswith(\"test_\"):\n d[name[5:]] = member # remove test_\n return d\n\n\ndef parse_value(v: str):\n try:\n return int(v)\n except ValueError:\n pass\n try:\n return float(v)\n except ValueError:\n pass\n # else str\n # always remove quotes\n if v[0] == '\"' and v[-1] == '\"':\n return v[1:-1]\n return v\n\n\ndef load_compiler_tests():\n file_count = 0\n test_count = 0\n parent_dir = os.path.dirname(__file__)\n test_dir = os.path.join(parent_dir, \"test_data\")\n for dir_path, dir_names, file_names in os.walk(test_dir):\n for test_py in file_names:\n if not test_py.endswith(\".py\"):\n print(f\"[Test Loader] Skipping non-py file '{test_py}'\")\n continue\n\n test_file = os.path.join(dir_path, test_py)\n tester = TestLoader(test_file) # cut off .py\n tests_generated = tester.generate_tests(TestCompiler)\n if tests_generated == 0:\n print(f\"[Test Loader] No tests found in file {test_py}\", file=sys.stderr) # TODO: use logging\n else:\n file_count += 1\n test_count += tests_generated\n\n print(f\"[Test Loader] Loaded {test_count} tests from {file_count} files\")\n\n\nload_compiler_tests()\n","repo_name":"electrohedric/pycasio","sub_path":"pycasio/test_compiler.py","file_name":"test_compiler.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6518652559","text":"import time\nimport pymysql\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.http import JsonResponse\nfrom django.forms.models import model_to_dict\nfrom django.views.decorators.csrf import csrf_exempt\nfrom one.models import Binance, Spider, OKX, Huobi, Chain, Analysis\nimport json\n\n\n# redirect 重定向\n# Create your views here.\n# 第一个页面\ndef login(request):\n if request.method == \"GET\":\n return render(request, \"login.html\")\n else:\n response = request.POST\n user = response[\"user\"]\n password = response[\"password\"]\n if user == 'crypto' and password == 'root':\n return redirect(\"https://www.runker54.top/search\")\n else:\n return render(request, \"login.html\", {\"error_msg\": \"用户名或密码错误!\"})\n\n\ndef data_search(request):\n if request.method == 'GET':\n return render(request, \"datasearch.html\")\n\n\n@csrf_exempt\ndef data_post(request):\n if request.method == 'POST':\n parms = request.POST\n coin_name = str(parms.get('coin')).upper()\n change1m = parms.get('_1m')\n change5m = parms.get('_5m')\n change15m = parms.get('_15m')\n change30m = parms.get('_30m')\n data_dict = {}\n # 默认排序方法\n sortx = 'change_1m'\n if parms:\n data_dict[\"coin_pairs__contains\"] = coin_name\n if change1m:\n data_dict[\"change_1m__gte\"] = float(change1m)\n sortx = 'change_1m'\n if change5m:\n data_dict[\"change_5m__gte\"] = float(change5m)\n sortx = 'change_5m'\n if change15m:\n data_dict[\"change_15m__gte\"] = float(change15m)\n sortx = 'change_15m'\n if change30m:\n data_dict[\"change_30m__gte\"] = float(change30m)\n sortx = 'change_30m'\n data_list = list(Binance.objects.all().order_by(f'-{sortx}').values().filter(**data_dict))\n ret = {\"data\": data_list}\n return JsonResponse(ret)\n\n\n@csrf_exempt\ndef analysis_data(request):\n # 获取每个cex的coin_pairs,获得交集coin_pairs\n binance_pairs = set(Binance.objects.values_list('coin_pairs'))\n okx_pairs = set(OKX.objects.values_list('coin_pairs_temp'))\n huobi_pairs = set(Huobi.objects.values_list('coin_pairs_temp'))\n array = [_[0] for _ in list(binance_pairs & huobi_pairs & okx_pairs)]\n if request.method == 'GET':\n return render(request, 'analysis.html', {\"array\": array})\n if request.method == 'POST':\n # 获取Chain的coin_pairs信息\n chain_data = {_x: _y if _y else 0.00001 for _x, _y in list(Chain.objects.filter(coin_pairs__in=array).values_list('coin_pairs', 'coin_price'))}\n # 获取各个cex的交集coin_pairs信息\n binance_data = {_x: _y if _y else 0.00001 for _x, _y in list(Binance.objects.filter(coin_pairs__in=array).values_list('coin_pairs', 'coin_price'))}\n okx_data = {_x: _y if _y else 0.00001 for _x, _y in list(OKX.objects.filter(coin_pairs_temp__in=array).values_list('coin_pairs_temp', 'coin_price'))}\n huobi_data = {_x: _y if _y else 0.00001 for _x, _y in list(Huobi.objects.filter(coin_pairs_temp__in=array).values_list('coin_pairs_temp', 'coin_price'))}\n # 构建规范的字典数据\n result = {_: {\"Binance\": binance_data[_], \"OKX\": okx_data[_], \"Huobi\": huobi_data[_], \"Chain\": chain_data[_],\n \"price_diff\": max([binance_data[_], okx_data[_], huobi_data[_], chain_data[_]]) - min([binance_data[_], okx_data[_], huobi_data[_], chain_data[_]]),\n \"per\": (abs(binance_data[_] - chain_data[_]) / binance_data[_]) * 100} for _ in array}\n result = dict(sorted(result.items(), key=lambda item: (item[1]['per'], item[1]['price_diff']), reverse=False))\n # 将数据写入analysis\n for one_el in result:\n Analysis.objects.filter(coin_pairs=one_el).update(binance=result[one_el]['Binance'],\n okx=result[one_el]['OKX'], huobi=result[one_el]['Huobi'],\n chain=result[one_el]['Chain'],\n pricediff=result[one_el]['price_diff'])\n ret = {'coin_list': array, 'data': result}\n return JsonResponse(ret)\n\n\n@csrf_exempt\ndef spider_page(request):\n if request.method == 'GET':\n return render(request, 'spiderpage.html')\n if request.method == 'POST':\n # Binance Noce top 10\n binace_noce = list(Spider.objects.all().order_by('created').values().filter(source='Binance'))[-20:]\n # OKX Noce top 10\n okx_noce = list(Spider.objects.all().order_by('created').values().filter(source='OKX'))[-20:]\n ret = {'Binance': binace_noce, 'OKX': okx_noce}\n return JsonResponse(ret)\n\n\n@csrf_exempt\ndef navigation(request):\n return render(request, 'navigation.html')\n","repo_name":"runker54/mysite","sub_path":"one/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"27250923799","text":"\"\"\"The user interface to the class tableToShape.\"\"\"\r\n__author__ = \"Martin Lacayo-Emery \"\r\n\r\nimport sys\r\nimport lib.shp.tableToShape\r\n\r\nif __name__ == \"__main__\":\r\n inName=sys.argv[1]\r\n xField=sys.argv[2]\r\n yField=sys.argv[3]\r\n outName=sys.argv[4]\r\n quadrant=sys.argv[5]\r\n shape=lib.shp.tableToShape.shapefile.shapeTypes[sys.argv[6]]\r\n if sys.argv[7]==\"true\":\r\n changeTypes=True\r\n else:\r\n changeTypes=False\r\n \r\n lib.shp.tableToShape.TableToShape(inName,xField,yField,outName,quadrant,shape,changeTypes)","repo_name":"mlacayoemery/pointpatternanalyst","sub_path":"uiDBFshape.py","file_name":"uiDBFshape.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4684107445","text":"import unittest\nimport numpy as np\nfrom sparseklearn import Sparsifier, KMeans\n\nfrom tests import DataGenerator\n\nclass TestSparsifier(unittest.TestCase):\n\n def assertArrayEqual(self, x, y):\n self.assertTrue(np.allclose(x, y, rtol=1e-6))\n\n def setUp(self):\n self.td = DataGenerator()\n kmeans = KMeans(n_components = 2,\n num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,\n num_samp = 4, transform = 'dct',\n D_indices = self.td.D_indices, mask = self.td.mask)\n self.kmeans = kmeans\n\n def test_fit_sparsifier(self):\n kmeans = KMeans(n_components = 2,\n num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,\n num_samp = 4, transform = 'dct',\n D_indices = self.td.D_indices, mask = self.td.mask)\n kmeans.fit_sparsifier(X = self.td.X)\n self.assertArrayEqual(self.td.RHDX, kmeans.RHDX)\n self.assertArrayEqual(self.td.mask, kmeans.mask)\n self.assertEqual(self.td.N, kmeans.num_samp)\n self.assertEqual(self.td.Q, kmeans.num_feat_comp)\n self.assertEqual(self.td.P, kmeans.num_feat_full)\n\n def test_fit(self):\n self.kmeans.fit(X = self.td.X)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"EricKightley/sparseklearn","sub_path":"tests/test_kmeans.py","file_name":"test_kmeans.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"19700628944","text":"from subprocess import Popen\nfrom pywinauto import Desktop\nfrom pywinauto import Application\nimport pyautogui\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom pywinauto.application import Application\nimport time\nimport csv\nimport os\nimport sys\nimport pywinauto\nfrom datetime import datetime\n\n\ndef clearTextBySelectAll():\n pyautogui.moveRel(30, 0)\n pyautogui.dragRel(-500, 0, 1, button='left')\n pyautogui.press('del')\n\nfrom functions.functions_utils import tm_init\n\n## get the appliation handler from the init function\ntempla = tm_init()[0]\napp = tm_init()[1]\n\n## start \nmainSitesTab = templa.child_window(title='Sites', control_type='TabItem')\nmainSitesTab.click_input()\nmainSitesWindow = templa.child_window(title='Sites', control_type='Window')\n\n########################\n#\n# Setup Excel Sheet\n#\n########################\nsite_reallocate_sheet = 'Sites Re-Allocate' \ndf = pd.read_excel('test.xlsx', sheet_name=site_reallocate_sheet)\nprint(\"starting...\")\n\nfor i in df.index:\n siteName = df['SITE']\n csm = df['CSM']\n tablet = df['TABLET']\n status = df['STATUS']\n\n #print(\"Site Name:\" + siteName[i])\n #print(\"CSM: \" + csm[i])\n #print(\"iPad: \" + ipad[i])\n if status[i] == \"Done\" or status[i] == \"Skip\":\n print(str(siteName[i]) + \" is Done\")\n continue\n\n\n if status[i] == \"Stop\":\n print(\"Stop here\")\n break\n\n # click on the Code Edit Box\n mainSitesWindow.window(title='Name', control_type='ComboBox').click_input()\n pyautogui.typewrite(str(siteName[i]))\n\n #####################################\n # before open the site, \n # check if the site already set up correctly\n # check the CSM Name\n #####################################\n\n # mainSitesWindow.child_window(title=\"CSM\", control_type=\"ComboBox\").click_input()\n # pyautogui.typewrite(csm[i])\n\n # check if the CSM already assigned to this site\n #\n # MUST make the CSM on the first Column\n #\n csmExists = mainSitesWindow.child_window(title=str(csm[i]), control_type=\"DataItem\")\n \n if csmExists.exists(): \n print(\"site Code: \" + siteName[i])\n print(\"site Name: \" + siteName[i])\n print(\"Already assigned to \" + csm[i])\n print(\"#################################\")\n print(\" \")\n pyautogui.moveRel(-25, 25) \n pyautogui.click() # reset the select status\n\n else:\n print(\"CSM Different, need to change\")\n pyautogui.moveRel(-25, 25) \n pyautogui.doubleClick() # open the site by double click\n\n\n # # open analysis details dialouge window\n # #siteDetailWindow = app.window(title_re='Site Detail - *')\n siteDetailWindow = app.window(title_re='Site Detail - *')\n siteDetailWindow.wait('exists', timeout=15)\n siteDetailWindow.window(title='Analysis versions', control_type='TabItem').click_input()\n print(\"site name: \" + str(siteName[i]))\n\n\n ########################\n #\n # Need to check if the month is current month\n # if True: double click on itself\n # if False: click Add button\n #\n #######################\n #siteDetailWindow.print_control_identifiers()\n currentYearFull = datetime.now().strftime('%Y') # 2018\n currentMonth = datetime.now().strftime('%m') # month in number with 0 padding\n\n itemExist = False\n for j in range(1,32): # loop from 1 to 31\n titleDate= \"%s/%s/%s\" %(j,currentMonth,currentYearFull)\n lastAnalysisItem = siteDetailWindow.window(title=str(titleDate))\n if lastAnalysisItem.exists():\n itemExist = True\n break\n\n if itemExist: # if the current month entry exists\n lastAnalysisItem.click_input(double=True)\n print (\"open the last item\")\n else:\n siteDetailWindow['Add'].click_input()\n print (\"add new entry\")\n\n\n ## operate the site details analysis window\n siteAnalysisWindow = siteDetailWindow.child_window(title_re='Site Analysis Detail - *')\n siteAnalysisWindow.wait('exists', timeout=15)\n siteAnalysisWindow.window(title='Business analysis', control_type='TabItem').click_input()\n\n # change CSM and Tablet Number\n # Edit 39 = CSM, Edit 43 = Tablet\n siteAnalysisWindow.Edit39.click_input()\n # pyautogui.moveRel(60,0)\n pyautogui.PAUSE = 1.5\n pyautogui.dragRel(-500,0)\n pyautogui.typewrite(str(csm[i]))\n pyautogui.press(\"tab\")\n pyautogui.PAUSE = 1.5\n # print(\"Located now to: \" + str(csm[i]))\n pyautogui.moveRel(500,15)\n pyautogui.click()\n # siteAnalysisWindow.Edit43.click_input()\n pyautogui.dragRel(-500,0)\n pyautogui.PAUSE = 1.5\n pyautogui.typewrite(tablet[i])\n pyautogui.press(\"tab\")\n pyautogui.PAUSE = 1.5\n pyautogui.moveRel(300,20)\n pyautogui.click()\n pyautogui.press(\"tab\")\n # press Accept button\n # Save\n siteAnalysisWindow.Accept.click_input()\n siteDetailWindow.Save.click_input()\n pyautogui.PAUSE = 1.5\n print(str(siteName[i]) + \": is Done now\")\n print(\"###############################\")\n print(\" \")\n\n","repo_name":"allan-2stars/Templa-Auto","sub_path":"SiteReassign-Name.py","file_name":"SiteReassign-Name.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28261930624","text":"import os\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nPROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Henrique Chehad', 'henrique.chehad@ipsafe.com.br'),\n ('Guilherme Bessa Rezende', 'guilherme.bessa@ipsafe.com.br'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'ipbx',\n 'USER': 'ipbx-gui',\n 'PASSWORD': 'ipbx-gu',\n 'HOST': '127.0.0.1',\n 'PORT': '',\n }\n}\n\nALLOWED_HOSTS = ['127.0.0.1']\nTIME_ZONE = 'America/Sao_Paulo'\nLANGUAGE_CODE = 'pt-br'\n\nSITE_ID = 1\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\nMEDIA_ROOT = ''\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip(\"/\"))\n\n\nMEDIA_URL = STATIC_URL + \"media/\"\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip(\"/\").split(\"/\"))\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, 'ipbx/static'),\n\n)\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nSECRET_KEY = 'j0q*n(kyumrkxns8iv!7c5o71&7f&8_g=p5!z+8ilgf(#9tagv'\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Required for Django Toolbar\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n \n)\n\n\nROOT_URLCONF = 'ipbx.urls'\n\nWSGI_APPLICATION = 'ipbx.wsgi.application'\n\nTEMPLATE_DIRS = (\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'ipbx.core',\n\n # Uncomment the next line to enable the admin:\n 'grappelli',\n\n #'yawdadmin',\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n 'django.contrib.admindocs',\n 'south',\n 'sysmon',\n 'debug_toolbar',\n 'gunicorn',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nADMIN_SITE_NAME = 'iPBX'\nADMIN_SITE_DESCRIPTION = 'iPBX'\n\nLOGIN_REDIRECT_URL = \"/\"\n\n\nINTERNAL_IPS = ('127.0.0.1',)\nDEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n)\ndef custom_show_toolbar(request):\n return True # Always show toolbar, for example purposes only.\n\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,\n #'EXTRA_SIGNALS': ['myproject.signals.MySignal'],\n 'HIDE_DJANGO_SQL': False,\n 'TAG': 'div',\n 'ENABLE_STACKTRACES' : True,\n}\n","repo_name":"IPsafe/ipbx-gui","sub_path":"ipbx/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"8414042693","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom app.models import VisitorDeatils\nfrom app.models import UserDetail\n\ndef Details(request):\n try:\n userName = request.POST.get(\"uname\")\n Email = request.POST.get(\"email\")\n pw = request.POST.get(\"PW\")\n cpw = request.POST.get(\"CPW\")\n if userName!='' and Email!='' and pw!='' and cpw!='' and pw==cpw:\n UserDetail(userName=userName, Email=Email, pw=pw).save()\n return render(request, \"admin.html\")\n else:\n return render(request, \"UserSignup.html\")\n except ValueError:\n return render(request,\"UserSignup.html\")\n\ndef Login(request):\n if request.method == \"POST\":\n try:\n userName = request.POST.get(\"name\")\n pw = request.POST.get(\"pw\")\n try:\n res = UserDetail.objects.get(Email=userName)\n print(res.Email,res.pw)\n if userName == str(res.Email) and pw == res.pw:\n return render(request,\"user.html\")\n else:\n return render(request, \"UserLogin.html\", {\"msg\": \"Invalid Customer ID and Password\"})\n except UserDetail.DoesNotExist:\n return render(request, \"UserLogin.html\")\n except ValueError:\n return render(request,\"UserLogin.html\")\n\ndef VisitorData(request):\n if request.method == \"POST\":\n try:\n username = request.POST.get(\"name\")\n Cname = request.POST.get(\"Cname\")\n Email = request.POST.get(\"email\")\n ContactNo = request.POST.get(\"ContactNo\")\n Date = request.POST.get(\"date\")\n Add = request.POST.get(\"add\")\n city = request.POST.get(\"City\")\n state = request.POST.get(\"state\")\n note = request.POST.get(\"note\")\n VisitorDeatils(UserName=username, Cname=Cname, Email=Email, ContactNo=ContactNo, Date=Date, Address=Add,\n City=city, State=state, note=note).save()\n return render(request,\"user.html\")\n except ValueError:\n return render(request, \"visitorEntryForm.html\")\n\n\n\ndef checkAdmin(request):\n try:\n name = request.POST.get(\"name\")\n pw = request.POST.get(\"pw\")\n if name == \"vijay chawada\" and pw == \"vijay chawada\":\n return render(request,\"admin.html\")\n else:\n return render(request,\"adminLogin.html\")\n except ValueError:\n return render(request,\"adminLogin.html\")\n\n\ndef viewVisitor(request):\n try:\n res = VisitorDeatils.objects.all()\n return render(request,\"viewVisitor.html\",{\"Data\":res})\n except:\n return render(request,\"user.html\")","repo_name":"salman-pydev/VistorEntryBook","sub_path":"Project1/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4699028877","text":"from pathlib import Path\nfrom pyshd import pushd\nimport os\n\n\ndef cwd():\n return Path(os.getcwd())\n\n\ndef test_pushd(tmpdir):\n prev = Path(os.getcwd())\n root = Path(str(tmpdir)) # Pytest provides a pathlib2 instance\n path = root / 'some' / 'dir'\n path.mkdir(parents=True)\n\n assert cwd() == prev\n with pushd(root):\n assert cwd() == root\n\n with pushd(path):\n assert cwd() == path\n\n assert cwd() == root\n assert cwd() == prev\n","repo_name":"equinor/pyshd","sub_path":"pyshd/tests/test_pyshd.py","file_name":"test_pyshd.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"29534757840","text":"import re\nimport numpy as np\n\nfrom pymatgen.io.vasp.outputs import *\n\nfrom core.dao.vasp import VaspReader\n\nfrom matplotlib import rc, patches\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\n\nrc('text', usetex=True)\nparams = {'legend.fontsize': '12',\n 'figure.figsize': (6, 5),\n 'axes.labelsize': 20,\n 'axes.titlesize': 16,\n 'xtick.labelsize': 16,\n 'ytick.labelsize': 16}\npylab.rcParams.update(params)\n\n\nclass PhononMode(object):\n\n def __init__(self, index=0, freq=None, eigenvec=None):\n self.index = index\n self.freq = freq # in THz\n self.eigenvec = eigenvec\n\n @property\n def is_imaginary(self):\n return np.imag(self.freq) != 0.0\n\n\nclass PhononSepctrum(object):\n\n def __init__(self):\n self.modes = []\n\n def get_mode_by_freq(self, freq):\n for m in self.modes:\n if m.freq == freq * TETRA * 2 * math.pi:\n return m\n\n\ndef get_phonon_spectrum():\n global spectrum, freq, mode, read_eigenvectors, mode\n f = open('OUTCAR', 'r')\n all_lines = f.readlines()\n spectrum = PhononSepctrum()\n for line in all_lines:\n ls = line.split()\n # Read in mode eigenfrequencies\n if re.match(r\"(\\s+)(\\d+)(\\s+)f(\\D+)=(\\s+)(\\d+).(\\d+)(\\s+)THz\", line):\n if 'f/i' not in line:\n freq = float(ls[3])\n else:\n freq = float(ls[2]) * 1j\n\n freq = freq * TETRA * 2 * math.pi\n mode = PhononMode(index=int(ls[0]) - 1, freq=freq)\n\n read_eigenvectors = True\n this_eigenvector = []\n\n # Read in mode eigenvectors\n if read_eigenvectors:\n if re.match(r\"(\\s+)(\\S+).(\\d+)\" * 6, line):\n this_atom_mode = np.array([float(k) for k in line.strip().split()[-3:]])\n this_eigenvector.append(this_atom_mode)\n\n elif ('X ' not in line) and ('THz' not in line):\n read_eigenvectors = False\n mode.eigenvec = np.array(this_eigenvector)\n spectrum.modes.append(mode)\n return spectrum\n\n\nTETRA = 1e12\nELECTRON_CHARGE = 1.602176487e-19 # Coulomb\nA_IN_METRE = 1e-10 # coverts angstorm to meter\nAMU = 1.66e-27 # kg\nEPSILON_0 = 8.854E-12 # C2N−1m−2\n\napply_2D_correction = False\nread_eigenvectors = False\n\nspectrum = get_phonon_spectrum()\n\noutcar = Outcar('./OUTCAR')\nborn_charges = outcar.born\n\ncrystal = VaspReader(input_location='./POSCAR').read_POSCAR()\nprint(crystal.lattice.volume)\n\nif apply_2D_correction:\n all_z_positions = np.array([a.scaled_position.z for a in crystal.asymmetric_unit[0].atoms])\n all_z_positions = all_z_positions - np.round(all_z_positions)\n all_z_positions = [z * crystal.lattice.c for z in all_z_positions]\n slab_thick = max(all_z_positions) - min(all_z_positions)\n\nfrom core.models.element import atomic_mass_dict\n\nmass_list = [math.sqrt(1.0 / atomic_mass_dict[a.label.upper()]) for a in\n crystal.all_atoms(sort=False)] # Here mass is given in the atomic mass unit (a.m.u.)\n\nZ_tensor = [[0.0 for _ in [0, 1, 2]] for m in range(len(spectrum.modes))]\nfor m in range(len(spectrum.modes)): # loop around the mode index\n for alpha in [0, 1, 2]: # loop around the x,y,z direction\n Z_m_alpha = 0\n\n for i in range(len(mass_list)): # loop around the number of atoms in the system\n for gamma in [0, 1, 2]: # loop around the x,y,z direction\n # print(m,i,gamma,np.shape(spectrum.modes[m].eigenvec))\n Z_m_alpha += born_charges[i][alpha][gamma] * mass_list[i] * spectrum.modes[m].eigenvec[i][gamma]\n\n Z_tensor[m][alpha] = Z_m_alpha\n print('Mode','\\t',m,'\\t','spatial averaged Born charge ','\\t',sum(Z_tensor[m][:]))\n\nprint(\"\"\"======Mode-Dependent Dielectric Constants======\"\"\")\n\nvolume = crystal.lattice.volume * A_IN_METRE ** 3\nsum_e_xy = 0\nsum_e_xy_raw = 0\n\nN = len([m for m in spectrum.modes if np.imag(m.freq) == 0.0])\n\nfrequencies = []\nmode_dielectrics = []\n\nfor m in range(len(spectrum.modes)):\n\n if np.imag(spectrum.modes[m].freq) == 0:\n e_xx = (Z_tensor[m][0] * ELECTRON_CHARGE) ** 2 / ((spectrum.modes[m].freq) ** 2 * volume) / EPSILON_0 / AMU\n e_yy = (Z_tensor[m][1] * ELECTRON_CHARGE) ** 2 / ((spectrum.modes[m].freq) ** 2 * volume) / EPSILON_0 / AMU\n av_raw = (e_xx + e_yy) / 2.0\n\n if apply_2D_correction:\n av = (crystal.lattice.c / slab_thick) * (av_raw + (1 / N) * (1 - slab_thick / crystal.lattice.c))\n sum_e_xy += av\n print('mode', '\\t', m, '\\t', '{:.5f}'.format(spectrum.modes[m].freq / TETRA / (2 * math.pi)), '\\t', 'THz',\n '\\t', '\\t', '{:.5f}'.format(av_raw), '\\t', '{:.5f}'.format(av))\n mode_dielectrics.append(av)\n else:\n print('mode', '\\t', m, '\\t', '{:.5f}'.format(spectrum.modes[m].freq / TETRA / (2 * math.pi)), '\\t', 'THz',\n '\\t', '\\t', '{:.5f}'.format(av_raw))\n mode_dielectrics.append(av_raw)\n\n frequencies.append(spectrum.modes[m].freq / TETRA / (2 * math.pi))\n\n sum_e_xy_raw += av_raw\n\nprint(\"Summed across modes raw:\", sum_e_xy_raw)\nprint(\"Summed across modes corrected:\", sum_e_xy)\n\nplt.bar(frequencies, mode_dielectrics, align='center', width=0.2, color='#FEE715FF')\n#plt.yscale('log')\nplt.xlabel('Phonon mode frequency ($\\\\omega_{m}$, THz)')\nplt.ylabel('$\\\\varepsilon_{2D}^{\\\\parallel}(\\\\omega_{m})$')\nplt.tight_layout()\nplt.savefig('mode_dielectrics.pdf')\n","repo_name":"yangjackie/futuremat_public","sub_path":"twodPV/mode_dielectric_constants.py","file_name":"mode_dielectric_constants.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37897922200","text":"from kiwipiepy import Kiwi\nfrom collections import Counter\n\nkiwi = Kiwi()\n\n\ndef lambda_handler(event, context):\n input_text = \"안녕하세요. 저는 박찬호입니다.\"\n\n word_Cloud = [token.form for token in kiwi.tokenize(input_text) if token.tag[0] == \"N\"]\n resp = dict(Counter(word_Cloud))\n\n return resp\n\nprint(lambda_handler(1, 2))","repo_name":"JMC-Soft/automated-scoring","sub_path":"python/essay/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38472172636","text":"from django_filters import rest_framework as filters\n# from django_filters import FilterSet, DateRangeFilter, DateFilter, CharFilter\nimport logging\nfrom operations_log.models import Log\nfrom datetime import datetime\nfrom django.db.models import Q\n\nlogger = logging.getLogger('backend')\n\n\nclass LogFilter(filters.FilterSet):\n s = filters.DateFilter(method=\"filter_start\")\n f = filters.DateFilter(method='filter_end')\n query = filters.CharFilter(method='filter_query')\n\n def filter_start(self, queryset, name, value):\n fdate = datetime.combine(value, datetime.min.time())\n return queryset.filter(report_date__gte=fdate)\n\n def filter_end(self, queryset, name, value):\n fdate = datetime.combine(value, datetime.max.time())\n return queryset.filter(report_date__lte=fdate)\n\n def filter_query(self, queryset, name, value):\n return queryset.filter(\n Q(description__contains=value)\n | Q(type__contains=value)\n | Q(logged_by_id__first_name__contains=value)\n | Q(logged_by_id__last_name__contains=value)\n | Q(subtype__contains=value)\n | Q(report_date__contains=value)\n # | Q(type__name__contains=value)\n # | Q(subtype__name__contains=value)\n )\n\n class Meta:\n model = Log\n fields = ('s', 'f')\n","repo_name":"fengo4142/aero-django-backend","sub_path":"operations_log/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32443366071","text":"# -*- coding: utf-8 -*- \n'''\nTranslating SEDAC GPW v3 Population Count Grid ascii files into netCDF format\n(http://sedac.ciesin.columbia.edu/data/set/gpw-v3-population-count, http://dx.doi.org/10.7927/H4639MPP)\n\nCode is written and tested under Python 2.7 \n\n@license: This work is licensed under a Creative Commons Attribution 4.0 International License (http://creativecommons.org/licenses/by/4.0/)\n@author: Mate Rozsai\n'''\nimport sys\nimport datetime\nimport csv\nimport gzip\nimport numpy\nimport netCDF4\n \ndef convertSEDACpcountAscii2nc(asciiGzFile, ncFile):\n def log(message):\n sys.stdout.write(message)\n\n def createAllMaskedArray(dimA, dimB, defValue):\n res = numpy.ma.array(numpy.zeros([dimA, dimB])) \n res[:,:] = defValue\n return res\n\n def saveSEDACncFile(lats, lons, variable, ncFile, NODATA_value):\n #write result to netCDF file\n nc = netCDF4.Dataset(ncFile, 'w', format='NETCDF4')\n \n nc.createDimension('lat', lats.size)\n nc.createDimension('lon', lons.size)\n \n rvLat = nc.createVariable('lat','f8',('lat',))\n rvLat.setncattr('standard_name', 'latitude')\n rvLat.setncattr('long_name', 'latitude')\n rvLat.setncattr('axis', 'Y')\n rvLat.units = 'degrees_north'\n rvLat[:] = lats\n \n rvLon = nc.createVariable('lon','f8',('lon',))\n rvLon.setncattr('standard_name', 'longitude')\n rvLon.setncattr('long_name', 'longitude')\n rvLon.setncattr('axis', 'X')\n rvLon.units = 'degrees_east'\n rvLon[:] = lons\n \n rvVar = nc.createVariable('pcount','f4',('lat','lon',), fill_value=1e+20)\n rvVar.setncattr('long_name', 'Population counts in 2000 adjusted to match UN totals (SEDAC GPWv3)')\n rvVar.units = 'persons'\n rvVar.setncattr('standard_name', 'population')\n rvVar[:,:] = variable[:,:]\n nc.close()\n \n def loadAsciiFile(asciiGzFile, dtype):\n # ****** loading the ascii file \n log('['+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'] reading sedac ascii file (can take a while).')\n # checking header rows\n with gzip.open(asciiGzFile, 'rb') as fcsv:\n reader = csv.reader(fcsv, delimiter=' ', quoting=csv.QUOTE_NONE)\n row = next(reader)\n if 'ncols'!=row[0]: raise Exception('IO error', 'Invalid 1st line: does not start with \"ncols\".')\n else: ncols = int(row[-1])\n row = next(reader)\n if 'nrows'!=row[0]: raise Exception('IO error', 'Invalid 2nd line: does not start with \"nrows\".')\n else: nrows = int(row[-1])\n row = next(reader)\n if 'xllcorner'!=row[0]: raise Exception('IO error', 'Invalid 3rd line: does not start with \"xllcorner\".')\n else: xllcorner = float(row[-1])\n if -180!=xllcorner: raise Exception('IO error', 'Invalid 3rd line: \"xllcorner\" must be -180 (correct the script otherwise).')\n row = next(reader)\n if 'yllcorner'!=row[0]: raise Exception('IO error', 'Invalid 4th line: does not start with \"yllcorner\".')\n else: yllcorner = float(row[-1])\n if 0 0, 0)\n\n # Put the deltas back on. We'll need them to aggregate multiple locatios together,\n # becuase the cs_* field are only correct for this location.\n t['cs_norm'] = t.cs_norm.round(0).astype(int)\n\n t['delta_norm'] = t.cs_norm.diff().fillna(0).astype(int)\n\n return t\n\n\ndef plot_loc_norming(t):\n ax = t.plot(y='cs', figsize=(15, 7))\n t.plot(ax=ax, y='cs_mean', color='red')\n t.plot(ax=ax, y='cs_norm', color='green')\n\n\ndef clean_events(s, use_tqdm=True, locations=None):\n \"\"\"Given an event scraper that has has \"\"\"\n\n frames = []\n for e in s.iterate_splits(use_tqdm=use_tqdm, locations=locations):\n locationUid, file_list = e\n\n df = pd.concat([pd.read_csv(e) for e in file_list], ignore_index=True)\n\n t = augment_df(df).pipe(dedup_1s).pipe(dedup_rolling).pipe(re_norm_location)\n t['locationUid'] = locationUid\n\n t = t[['locationUid', 'in', 'out', 'delta', 'cs', 'delta_norm', 'cs_norm']]\n\n frames.append(t)\n\n return pd.concat(frames)\n","repo_name":"sandiegodata/cityiq","sub_path":"src/cityiq/clean_events.py","file_name":"clean_events.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"16290069568","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport sys\n\nfrom workflow import Workflow3, web, ICON_WARNING\n\n\ndef main(wf):\n args = wf.args\n\n def cacheSearch():\n return getSimilar(args[0], args[1])\n\n words = wf.cached_data(args[0] + \"_\" + args[1], cacheSearch, max_age=60)\n\n if not words:\n wf.add_item('No words found', icon=ICON_WARNING)\n wf.send_feedback()\n return 0\n\n for word in words:\n wf.add_item(title=word['word'], subtitle=word['def'], arg=word['word'], valid=True)\n\n wf.send_feedback()\n\ndef getSimilar(mode, query):\n url = 'https://api.datamuse.com/words'\n params = dict(max=20, md='d')\n params[mode] = query\n \n r = web.get(url, params)\n\n # throw an error if request failed\n r.raise_for_status()\n\n words = r.json()\n \n for i, word in enumerate(words):\n definition = \"\"\n if 'defs' in word and len(word['defs']) != 0:\n definition = word['defs'][0].split(\"\\t\")[1]\n\n words[i]['def'] = definition\n \n return words\n\n \n\nif __name__ == '__main__':\n wf = Workflow3(update_settings={\n \"github_slug\": \"isaacpz/Alfred-WordSearch\"\n })\n if wf.update_available:\n wf.start_update()\n \n sys.exit(wf.run(main))","repo_name":"isaacpz/Alfred-WordSearch","sub_path":"similar.py","file_name":"similar.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"91"} +{"seq_id":"26605630373","text":"#! /usr/bin/env python3\n\ncookbook = {\n \"sandwich\": {\n \"ingredients\": (\"ham\", \"bread\", \"cheese\", \"tomatoes\"),\n \"meal\": \"lunch\",\n \"prep_time\": 10\n },\n \"cake\": {\n \"ingredients\": (\"floor\", \"sugar\", \"eggs\"),\n \"meal\": \"dessert\",\n \"prep_time\": 60\n },\n \"salad\": {\n \"ingredients\": (\"avocado\", \"arugula\", \"tomatoes\", \"spinach\"),\n \"meal\": \"lunch\",\n \"prep_time\": 15\n }\n}\n\n\ndef print_recipe(name):\n recipe = cookbook[name]\n ilist = \", \".join(recipe[\"ingredients\"])\n meal = recipe[\"meal\"]\n prep_time = recipe[\"prep_time\"]\n print(f\"{name.capitalize()} recipe:\\n\"\n f\"Ingredients: {ilist}\\n\"\n f\"Perfect as a {meal}\\n\"\n f\"Done in {prep_time} minutes only!\\n\")\n\n\ndef print_recipe_menu():\n name = input(\"Please enter the recipe's name: \")\n if name not in cookbook:\n print(\"Invalid recipe name, try to print the cookbook\")\n else:\n print_recipe(name)\n\n\ndef print_cookbook():\n print(\"This is all we got!\\n\\n\")\n for name in cookbook:\n print_recipe(name)\n\n\ndef add_recipe():\n name = \"\"\n while name == \"\":\n name = input(\"Please enter a name for your new recipe\\n\")\n if name.lower() in cookbook:\n print(\"We already have this one, but we apreciate your effort!\")\n return\n meal = input(f\"What meal is the best to eat {name}?\\n\")\n ingredients = input(\n \"What is in your delicious dish ? \"\n \"(comma separated ingredients)\").split(\",\")\n print(ingredients)\n if ingredients[0] == \"\":\n print(\"A recipe needs ingredients, unless you can make \"\n \"food appear from thin air? Nah, you would be long gone.\")\n return\n ingredients = (item.strip() for item in ingredients)\n prep_time = input(\"How long does it takes to cook? (number, minutes)\\n\")\n try:\n prep_time = int(prep_time)\n if prep_time <= 0:\n raise Exception()\n except Exception:\n print(\"Should be a positive number, \"\n \"I do think you got nothing of how this works!\")\n return\n cookbook[name.lower()] = {\n \"ingredients\": ingredients,\n \"meal\": meal,\n \"prep_time\": prep_time\n }\n print(f\"We now have {name} in our cookbook, thanks!\")\n\n\ndef del_recipe():\n name = input().lower()\n if name not in cookbook:\n print(f\"We couldn't find '{name}' in the cookbook...\")\n else:\n del cookbook[name]\n print(f\"We deleted {name} from the cookbook\")\n\n\nMENU = (\"1: Add a recipe\\n\"\n \"2: Delete a recipe\\n\"\n \"3: Print a recipe\\n\"\n \"4: Print the cookbook\\n\"\n \"5: Quit\\n\")\n\nutils = (add_recipe, del_recipe, print_recipe_menu, print_cookbook)\n\n\ndef prog():\n while True:\n print()\n tmp = input(MENU)\n try:\n choice = int(tmp)\n except ValueError:\n print(\"ERROR: invalid choice\")\n continue\n if choice == 5:\n print(\"See you soon ;)\")\n return\n if choice == 0 or choice > 4:\n print(\"ERROR: invalid choice\")\n continue\n try:\n utils[choice - 1]()\n except Exception as e:\n print(e)\n continue\n\n\nif __name__ == \"__main__\":\n prog()\n","repo_name":"Karocyt/PythonBootcamp","sub_path":"D00/ex06/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37447301545","text":"#!/usr/bin/env python3\nfrom sys import *\nfrom heapq import *\n\nn = int(input())\na = list(map(lambda ai: int(ai) - 1, input().split()))\n\nforced = set()\nins = [[] for _ in range(n)]\nfor i in range(n):\n ins[a[i]].append(i)\n\nfor i in range(n):\n ins[i] = sorted(ins[i])\n\n# In every cycle, at least one mentor should change\ndef cycle(i, a, ins, forced):\n curr = i\n force = -1\n seen = set()\n\n while curr not in seen:\n seen.add(curr)\n ai = a[curr]\n # When mentors are shared, usually the lowest-numbered mentee stays\n if curr != min(ins[ai]):\n return\n if len(ins[ai]) > 1:\n force = max(force, curr)\n curr = ai\n\n # When there are no in-edges, anyone can switch\n if force == -1:\n force = max(seen)\n # In a cycle with in-edges, someone at an in-edge should switch\n forced.add(a[force])\n\ndef pick(i, a, b, head, tail, nexts, prevs, seen):\n b[i] = a[i]\n if b[i] in used:\n b[i] = nexts[0]\n if head[b[i]] == i:\n b[i] = nexts[b[i] + 1]\n \n used.add(b[i])\n head[tail[i]] = head[b[i]]\n tail[head[b[i]]] = tail[i]\n\n nexts[prevs[b[i]]] = nexts[b[i] + 1]\n prevs[nexts[b[i] + 1]] = prevs[b[i]]\n\nseen = set()\n\n# Mark forced nodes\nfor i in range(n):\n this_seen = set()\n curr = i\n while curr not in seen and curr not in this_seen:\n this_seen.add(curr)\n curr = a[curr]\n\n if curr in this_seen:\n # No change for a single giant cycle\n if len(this_seen) == n:\n print(*list(map(lambda x: x + 1, a)), sep=' ')\n exit()\n cycle(curr, a, ins, forced)\n seen |= this_seen\n\n# Doubly linked list of paths\nhead = list(range(n))\ntail = list(range(n))\n# Doubly linked list of free spots\n# Next unused spot (self included)\nnexts = list(range(n + 1))\n# Beginning of line of used spots (self included)\nprevs = list(range(n + 1))\n\nb = list(range(n))\n\n# Link things so the forced nodes are forced\ndone = set()\nused = set()\nfor i in range(n):\n if (i == ins[a[i]][0] and a[i] not in forced) \\\n or (a[i] in forced and len(ins[a[i]]) > 1 and i == ins[a[i]][1]):\n pick(i, a, b, head, tail, nexts, prevs, used)\n done.add(i)\n\nfor i in range(n):\n if i not in done:\n if len(done) < n - 1:\n pick(i, a, b, head, tail, nexts, prevs, used)\n done.add(i)\n else:\n b[i] = nexts[0]\n\nprint(*list(map(lambda x: x + 1, b)), sep=' ')\n","repo_name":"Kodsport/kth-challenge-2020","sub_path":"gaggle/submissions/wrong_answer/joseph_incorrect_minimum.py","file_name":"joseph_incorrect_minimum.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29930879169","text":"__author__ = \"Phelipe Muller e Sabrina Machado\"\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport time\r\n\r\n# link https://youtu.be/WR6updj8A4c\r\n\r\n# If you want to open a video, just change this path\r\n#cap = cv2.VideoCapture('hall_box_battery.mp4')\r\n\r\n# Parameters to use when opening the webcam.\r\ncap = cv2.VideoCapture(0)\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\r\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\r\n\r\nlower = 0\r\nupper = 1\r\n\r\n# Returns an image containing the borders of the image\r\n# sigma is how far from the median we are setting the thresholds\r\ndef auto_canny(image, sigma=0.33):\r\n # compute the median of the single channel pixel intensities\r\n v = np.median(image)\r\n\r\n # apply automatic Canny edge detection using the computed median\r\n lower = int(max(0, (1.0 - sigma) * v))\r\n upper = int(min(255, (1.0 + sigma) * v))\r\n edged = cv2.Canny(image, lower, upper)\r\n\r\n # return the edged image\r\n return edged\r\n\r\n# Initiate SIFT detector\r\nsift = cv2.xfeatures2d.SIFT_create()\r\nimg1 = cv2.imread('powerpuff-girls.png',0) # Imagem a procurar\r\nkp1, des1 = sift.detectAndCompute(img1,None)\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n # Convert the frame to grayscale\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n # A gaussian blur to get rid of the noise in the image\r\n blur = cv2.GaussianBlur(gray,(5,5),0)\r\n #blur = gray\r\n # Detect the edges present in the image\r\n bordas = auto_canny(blur)\r\n\r\n MIN_MATCH_COUNT = 81.5\r\n\r\n\r\n img2 = frame # Imagem do cenario\r\n\r\n # find the keypoints and descriptors with SIFT in each image\r\n kp2, des2 = sift.detectAndCompute(img2,None)\r\n\r\n FLANN_INDEX_KDTREE = 0\r\n index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\r\n search_params = dict(checks = 5)\r\n\r\n # Configura o algoritmo de casamento de features\r\n flann = cv2.FlannBasedMatcher(index_params, search_params)\r\n\r\n # Tenta fazer a melhor comparacao usando o algoritmo\r\n matches = flann.knnMatch(des1,des2,k=2)\r\n\r\n good = []\r\n for m,n in matches:\r\n if m.distance < 0.8*n.distance:\r\n good.append(m)\r\n\r\n circles = []\r\n\r\n\r\n # Obtains a version of the edges image where we can draw in color\r\n bordas_color = cv2.cvtColor(bordas, cv2.COLOR_GRAY2BGR)\r\n\r\n # Raposa = None\r\n # Raposa = cv2.Canny(bordas,cv2.HOUGH_GRADIENT)\r\n # HoughCircles - detects circles using the Hough Method. For an explanation of\r\n # param1 and param2 please see an explanation here http://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/\r\n\r\n if len(good) > MIN_MATCH_COUNT:\r\n print(\"Powerpuff Girls!!!\")\r\n\r\n else:\r\n print(\"Nao encontrei nada...\")\r\n\r\n if circles is not None:\r\n circles = np.uint16(np.around(circles))\r\n\r\n\r\n\r\n # linha azul diagonal\r\n # cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]])\r\n # cv2.line(bordas_color,(0,0),(511,511),(255,0,0),5)\r\n\r\n # Quadrado Verde x\r\n #cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])\r\n # cv2.rectangle(bordas_color,(384,0),(510,128),(0,255,0),3)\r\n\r\n # cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]])\r\n # font = cv2.FONT_HERSHEY_SIMPLEX\r\n # cv2.putText(bordas_color,'Ninjutsu ;)',(0,50), font, 2,(255,255,255),2,cv2.LINE_AA)\r\n\r\n #More drawing functions @ http://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html\r\n\r\n # Display the resulting frame\r\n cv2.imshow('Detector de circulos',bordas_color)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"SabrinaMB/Rob-tica-projeto1","sub_path":"draw_circles_video.py","file_name":"draw_circles_video.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"40477761398","text":"from functools import lru_cache\n\n@lru_cache(None)\ndef decode(b):\n # it barely makes a difference\n # but it is interesting to note\n # that the last bit of the unknown\n # byte is the same as the last bit\n # in the encoded byte\n start = int((b & 1) != 0)\n for x in range(start, 256, 2):\n if ((x ^ (x << 1)) & 255) == b:\n return x \n\nfor i in range(256):\n decode(i)\n\n_ = int(input())\nbs = print(\" \".join(str(decode(int(s))) for s in input().split(\" \")))\n","repo_name":"Ikerlb/kattis","sub_path":"communication/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"69840949743","text":"from django.shortcuts import get_object_or_404\nfrom server.models import Project\n\n\ndef common_context(request):\n kwargs = request.resolver_match.kwargs\n key = \"project_id\"\n if key in kwargs:\n project = get_object_or_404(Project, pk=kwargs[\"project_id\"])\n return {\n \"project_id\": project.id,\n \"project_name\": project.name,\n \"project_type\": project.project_type,\n \"project_description\": project.description,\n \"al_mode\": project.al_mode,\n \"project_multilabel\": project.multilabel,\n \"hierarchy\": project.hierarchy,\n \"managers\": project.managers.all(),\n \"annotators\": sorted(\n project.annotators.all(), key=lambda a: a.username.lower()\n ),\n \"main_annotator\": project.main_annotator\n }\n else:\n return {}\n","repo_name":"josipjukic/alanno","sub_path":"app/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"91"} +{"seq_id":"35759415155","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom datetime import date\n\nfrom django.test import TestCase\n\nfrom .models import Architect\nfrom professionals.tests import ProfessionalTest\n\n\nclass ArchitectTests(ProfessionalTest, TestCase):\n\n def set_professional_params(self):\n model_params = {\n 'lic_num': 1,\n 'lic_prefix': 'aaa',\n 'lic_name': 'bbb',\n 'lic_type': 'ccc',\n 'lic_status': 'ddd',\n 'lic_issue_date': date(2000, 1, 1),\n 'lic_expire_date': date(2222, 2, 2),\n 'street_address': 'eee',\n 'city': 'fff',\n 'county': 'ggg',\n 'state': 'hhh',\n 'country': 'iii',\n 'pos_code': 'jjj',\n 'uuid': 'kkk',\n }\n return model_params\n\n def set_model(self):\n self.model = Architect\n\n def set_path(self):\n self.path = '/architect/1/'\n self.path_404 = '/architect/2/'\n\n def test_get_lic_num(self):\n lic_num = self.soup.find(name='td', class_='rname', text='License #:').find_next_sibling().get_text()\n self.assertEqual(self.professional.lic_num, int(lic_num))\n\n def test_get_lic_type(self):\n lic_type = self.soup.find(name='td', class_='rname', text='License Type:').find_next_sibling().get_text()\n self.assertEqual(self.professional.lic_type + ' ', lic_type)\n\n def test_get_lic_address(self):\n address = self.soup.find(name='td', class_='rname', text='Address:').find_next_sibling().get_text()\n address = address.replace('\\n', ',', 1).replace(' ', '').replace('\\n', '')\n address = filter(lambda x: x != '', address)\n street_address, city, state = address.split(',')[0:3]\n self.assertEqual(self.professional.street_address, street_address)\n self.assertEqual(self.professional.city, city)\n self.assertEqual(self.professional.state, state)\n\n def test_get_lic_issue_date(self):\n issued_date = self.soup.find(name='td', class_='rname', text='Issued Date:').find_next_sibling().get_text()\n lic_issued_date = '{month}. {date}, {year}'.format(\n month=self.professional.lic_issue_date.strftime('%b'),\n date=self.professional.lic_issue_date.day,\n year=self.professional.lic_issue_date.year,\n )\n self.assertEqual(lic_issued_date, issued_date)\n\n def test_get_lic_status(self):\n lic_status = self.soup.find(name='td', class_='rname', text='License Status:').find_next_sibling().get_text()\n self.assertEqual(self.professional.lic_status, lic_status)\n","repo_name":"HackerWithData/BackEnd","sub_path":"backend_core/architects/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"11104464946","text":"from Bio.PDB import *\n\nfrom utility.load_ppi_templates import load_ppi_templates\nfrom utility.execute_with_elapsed_time import execute_with_elapsed_time\nfrom constant.directory_constant import dbd5_path, templates_path\nfrom constant.last_heavy_atom import lha_dict\nfrom constant.dbd3_dataset import dbd3_all_list\nfrom constant.dbd5_dataset import dbd5_all_list\nfrom benchmark.dbd_sanity_check import dbd_sanity_check\n\npdb_parser = PDBParser()\n\n# Docking Benchmark 5: Load PPI Templates (5A)\n# Load DBD5 PPI template dictionary from .json files (to be used in the main program)\ndbd5_a_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_a_templates_5a.json\")\ndbd5_ei_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_ei_templates_5a.json\")\ndbd5_er_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_er_templates_5a.json\")\ndbd5_es_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_es_templates_5a.json\")\ndbd5_ab_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_ab_templates_5a.json\")\ndbd5_og_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_og_templates_5a.json\")\ndbd5_ox_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_ox_templates_5a.json\")\ndbd5_or_templates_5a_dict = load_ppi_templates(templates_path, \"dbd5_or_templates_5a.json\")\n\ndbd5_all_templates_5a_dict = {**dbd5_a_templates_5a_dict, **dbd5_ei_templates_5a_dict, **dbd5_er_templates_5a_dict,\n **dbd5_es_templates_5a_dict, **dbd5_ab_templates_5a_dict, **dbd5_og_templates_5a_dict,\n **dbd5_ox_templates_5a_dict, **dbd5_or_templates_5a_dict}\n\n# Docking Benchmark 3: Load PPI Templates (5A)\n# Load DBD3 PPI template dictionary from .json files (to be used in the main program)\ndbd3_e_templates_5a_dict = load_ppi_templates(templates_path, \"dbd3_e_templates_5a.json\")\ndbd3_a_templates_5a_dict = load_ppi_templates(templates_path, \"dbd3_a_templates_5a.json\")\ndbd3_ab_templates_5a_dict = load_ppi_templates(templates_path, \"dbd3_ab_templates_5a.json\")\ndbd3_o_templates_5a_dict = load_ppi_templates(templates_path, \"dbd3_o_templates_5a.json\")\n\ndbd3_all_templates_5a_dict = {**dbd3_e_templates_5a_dict, **dbd3_a_templates_5a_dict, **dbd3_ab_templates_5a_dict,\n **dbd3_o_templates_5a_dict}\n\n\nprint(\"DBD3: All Complexes\")\nexecute_with_elapsed_time(dbd_sanity_check,\n pdb_id_list=dbd3_all_list,\n templates_dict=dbd3_all_templates_5a_dict,\n dbd_path=dbd5_path,\n pdb_parser=pdb_parser,\n lha_dict=lha_dict,\n ranking_size=100,\n verbose=True,\n iteration_per_protein=1,\n population_size=300,\n number_of_generations=300,\n crossover_probability=0.5,\n mutation_probability=0.7,\n tournament_size=3,\n number_of_tournament=50)\n\nprint(\"DBD5: All Complexes\")\nexecute_with_elapsed_time(dbd_sanity_check,\n pdb_id_list=dbd5_all_list,\n templates_dict=dbd5_all_templates_5a_dict,\n dbd_path=dbd5_path,\n pdb_parser=pdb_parser,\n lha_dict=lha_dict,\n ranking_size=100,\n verbose=True,\n iteration_per_protein=1,\n population_size=300,\n number_of_generations=300,\n crossover_probability=0.5,\n mutation_probability=0.7,\n tournament_size=3,\n number_of_tournament=50)\n","repo_name":"darmawanalbert/gassppi","sub_path":"run_dbd_all_templates.py","file_name":"run_dbd_all_templates.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20574849356","text":"from django.urls import path\nfrom .views import BotInfoHandler, ChatsViewSet, MessageViewSet, WebHooks\n\nurlpatterns = [\n path('', WebHooks.as_view()),\n path('bot/', BotInfoHandler.as_view()),\n path('list/', ChatsViewSet.as_view({'get': 'list'})),\n path('update/', ChatsViewSet.as_view({'patch': 'update'})),\n path(\"create/\", ChatsViewSet.as_view({'post': 'create'})),\n path('messages//', MessageViewSet.as_view({'get': 'list'})),\n path(\"messages/create/\", MessageViewSet.as_view({'post': 'create'})),\n path(\"messages/update/\", MessageViewSet.as_view({'patch': 'update'}))\n]\n","repo_name":"JustGimli/BPDocker001","sub_path":"backends/app/api/v1/chats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24051825152","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTests and Confidence Intervals for Binomial Proportions\n\nCreated on Fri Mar 01 00:23:07 2013\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\"\"\"\n\nfrom statsmodels.compat.python import lzip\nfrom typing import Callable, Tuple\nimport numpy as np\nimport pandas as pd\nfrom scipy import optimize, stats\n\nfrom statsmodels.stats.base import AllPairsResults, HolderTuple\nfrom statsmodels.stats.weightstats import _zstat_generic2\nfrom statsmodels.tools.sm_exceptions import HypothesisTestWarning\nfrom statsmodels.tools.testing import Holder\nfrom statsmodels.tools.validation import array_like\n\nFLOAT_INFO = np.finfo(float)\n\n\ndef _bound_proportion_confint(\n func: Callable[[float], float], qi: float, lower: bool = True\n) -> float:\n \"\"\"\n Try hard to find a bound different from eps/1 - eps in proportion_confint\n\n Parameters\n ----------\n func : callable\n Callable function to use as the objective of the search\n qi : float\n The empirical success rate\n lower : bool\n Whether to fund a lower bound for the left side of the CI\n\n Returns\n -------\n float\n The coarse bound\n \"\"\"\n default = FLOAT_INFO.eps if lower else 1.0 - FLOAT_INFO.eps\n\n def step(v):\n return v / 8 if lower else v + (1.0 - v) / 8\n\n x = step(qi)\n w = func(x)\n cnt = 1\n while w > 0 and cnt < 10:\n x = step(x)\n w = func(x)\n cnt += 1\n return x if cnt < 10 else default\n\n\ndef _bisection_search_conservative(\n func: Callable[[float], float], lb: float, ub: float, steps: int = 27\n) -> Tuple[float, float]:\n \"\"\"\n Private function used as a fallback by proportion_confint\n\n Used when brentq returns a non-conservative bound for the CI\n\n Parameters\n ----------\n func : callable\n Callable function to use as the objective of the search\n lb : float\n Lower bound\n ub : float\n Upper bound\n steps : int\n Number of steps to use in the bisection\n\n Returns\n -------\n est : float\n The estimated value. Will always produce a negative value of func\n func_val : float\n The value of the function at the estimate\n \"\"\"\n upper = func(ub)\n lower = func(lb)\n best = upper if upper < 0 else lower\n best_pt = ub if upper < 0 else lb\n if np.sign(lower) == np.sign(upper):\n raise ValueError(\"problem with signs\")\n mp = (ub + lb) / 2\n mid = func(mp)\n if (mid < 0) and (mid > best):\n best = mid\n best_pt = mp\n for _ in range(steps):\n if np.sign(mid) == np.sign(upper):\n ub = mp\n upper = mid\n else:\n lb = mp\n mp = (ub + lb) / 2\n mid = func(mp)\n if (mid < 0) and (mid > best):\n best = mid\n best_pt = mp\n return best_pt, best\n\n\ndef proportion_confint(count, nobs, alpha:float=0.05, method=\"normal\"):\n \"\"\"\n Confidence interval for a binomial proportion\n\n Parameters\n ----------\n count : {int or float, array_like}\n number of successes, can be pandas Series or DataFrame. Arrays\n must contain integer values if method is \"binom_test\".\n nobs : {int or float, array_like}\n total number of trials. Arrays must contain integer values if method\n is \"binom_test\".\n alpha : float\n Significance level, default 0.05. Must be in (0, 1)\n method : {\"normal\", \"agresti_coull\", \"beta\", \"wilson\", \"binom_test\"}\n default: \"normal\"\n method to use for confidence interval. Supported methods:\n\n - `normal` : asymptotic normal approximation\n - `agresti_coull` : Agresti-Coull interval\n - `beta` : Clopper-Pearson interval based on Beta distribution\n - `wilson` : Wilson Score interval\n - `jeffreys` : Jeffreys Bayesian Interval\n - `binom_test` : Numerical inversion of binom_test\n\n Returns\n -------\n ci_low, ci_upp : {float, ndarray, Series DataFrame}\n lower and upper confidence level with coverage (approximately) 1-alpha.\n When a pandas object is returned, then the index is taken from `count`.\n\n Notes\n -----\n Beta, the Clopper-Pearson exact interval has coverage at least 1-alpha,\n but is in general conservative. Most of the other methods have average\n coverage equal to 1-alpha, but will have smaller coverage in some cases.\n\n The \"beta\" and \"jeffreys\" interval are central, they use alpha/2 in each\n tail, and alpha is not adjusted at the boundaries. In the extreme case\n when `count` is zero or equal to `nobs`, then the coverage will be only\n 1 - alpha/2 in the case of \"beta\".\n\n The confidence intervals are clipped to be in the [0, 1] interval in the\n case of \"normal\" and \"agresti_coull\".\n\n Method \"binom_test\" directly inverts the binomial test in scipy.stats.\n which has discrete steps.\n\n TODO: binom_test intervals raise an exception in small samples if one\n interval bound is close to zero or one.\n\n References\n ----------\n .. [*] https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval\n\n .. [*] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).\n \"Interval Estimation for a Binomial Proportion\", Statistical\n Science 16 (2): 101–133. doi:10.1214/ss/1009213286.\n \"\"\"\n is_scalar = np.isscalar(count) and np.isscalar(nobs)\n is_pandas = isinstance(count, (pd.Series, pd.DataFrame))\n count_a = array_like(count, \"count\", optional=False, ndim=None)\n nobs_a = array_like(nobs, \"nobs\", optional=False, ndim=None)\n\n def _check(x: np.ndarray, name: str) -> np.ndarray:\n if np.issubdtype(x.dtype, np.integer):\n return x\n y = x.astype(np.int64, casting=\"unsafe\")\n if np.any(y != x):\n raise ValueError(\n f\"{name} must have an integral dtype. Found data with \"\n f\"dtype {x.dtype}\"\n )\n return y\n\n if method == \"binom_test\":\n count_a = _check(np.asarray(count_a), \"count\")\n nobs_a = _check(np.asarray(nobs_a), \"count\")\n\n q_ = count_a / nobs_a\n alpha_2 = 0.5 * alpha\n\n if method == \"normal\":\n std_ = np.sqrt(q_ * (1 - q_) / nobs_a)\n dist = stats.norm.isf(alpha / 2.0) * std_\n ci_low = q_ - dist\n ci_upp = q_ + dist\n elif method == \"binom_test\":\n # inverting the binomial test\n def func_factory(count: int, nobs: int) -> Callable[[float], float]:\n if hasattr(stats, \"binomtest\"):\n\n def func(qi):\n return stats.binomtest(count, nobs, p=qi).pvalue - alpha\n\n else:\n # Remove after min SciPy >= 1.7\n def func(qi):\n return stats.binom_test(count, nobs, p=qi) - alpha\n\n return func\n\n bcast = np.broadcast(count_a, nobs_a)\n ci_low = np.zeros(bcast.shape)\n ci_upp = np.zeros(bcast.shape)\n index = bcast.index\n for c, n in bcast:\n # Enforce symmetry\n reverse = False\n _q = q_.flat[index]\n if c > n // 2:\n c = n - c\n reverse = True\n _q = 1 - _q\n func = func_factory(c, n)\n if c == 0:\n ci_low.flat[index] = 0.0\n else:\n lower_bnd = _bound_proportion_confint(func, _q, lower=True)\n val, _z = optimize.brentq(\n func, lower_bnd, _q, full_output=True\n )\n if func(val) > 0:\n power = 10\n new_lb = val - (val - lower_bnd) / 2**power\n while func(new_lb) > 0 and power >= 0:\n power -= 1\n new_lb = val - (val - lower_bnd) / 2**power\n val, _ = _bisection_search_conservative(func, new_lb, _q)\n ci_low.flat[index] = val\n if c == n:\n ci_upp.flat[index] = 1.0\n else:\n upper_bnd = _bound_proportion_confint(func, _q, lower=False)\n val, _z = optimize.brentq(\n func, _q, upper_bnd, full_output=True\n )\n if func(val) > 0:\n power = 10\n new_ub = val + (upper_bnd - val) / 2**power\n while func(new_ub) > 0 and power >= 0:\n power -= 1\n new_ub = val - (upper_bnd - val) / 2**power\n val, _ = _bisection_search_conservative(func, _q, new_ub)\n ci_upp.flat[index] = val\n if reverse:\n temp = ci_upp.flat[index]\n ci_upp.flat[index] = 1 - ci_low.flat[index]\n ci_low.flat[index] = 1 - temp\n index = bcast.index\n elif method == \"beta\":\n ci_low = stats.beta.ppf(alpha_2, count_a, nobs_a - count_a + 1)\n ci_upp = stats.beta.isf(alpha_2, count_a + 1, nobs_a - count_a)\n\n if np.ndim(ci_low) > 0:\n ci_low.flat[q_.flat == 0] = 0\n ci_upp.flat[q_.flat == 1] = 1\n else:\n ci_low = 0 if q_ == 0 else ci_low\n ci_upp = 1 if q_ == 1 else ci_upp\n elif method == \"agresti_coull\":\n crit = stats.norm.isf(alpha / 2.0)\n nobs_c = nobs_a + crit**2\n q_c = (count_a + crit**2 / 2.0) / nobs_c\n std_c = np.sqrt(q_c * (1.0 - q_c) / nobs_c)\n dist = crit * std_c\n ci_low = q_c - dist\n ci_upp = q_c + dist\n elif method == \"wilson\":\n crit = stats.norm.isf(alpha / 2.0)\n crit2 = crit**2\n denom = 1 + crit2 / nobs_a\n center = (q_ + crit2 / (2 * nobs_a)) / denom\n dist = crit * np.sqrt(\n q_ * (1.0 - q_) / nobs_a + crit2 / (4.0 * nobs_a**2)\n )\n dist /= denom\n ci_low = center - dist\n ci_upp = center + dist\n # method adjusted to be more forgiving of misspellings or incorrect option name\n elif method[:4] == \"jeff\":\n ci_low, ci_upp = stats.beta.interval(\n 1 - alpha, count_a + 0.5, nobs_a - count_a + 0.5\n )\n else:\n raise NotImplementedError(f\"method {method} is not available\")\n if method in [\"normal\", \"agresti_coull\"]:\n ci_low = np.clip(ci_low, 0, 1)\n ci_upp = np.clip(ci_upp, 0, 1)\n if is_pandas:\n container = pd.Series if isinstance(count, pd.Series) else pd.DataFrame\n ci_low = container(ci_low, index=count.index)\n ci_upp = container(ci_upp, index=count.index)\n if is_scalar:\n return float(ci_low), float(ci_upp)\n return ci_low, ci_upp\n\n\ndef multinomial_proportions_confint(counts, alpha=0.05, method='goodman'):\n \"\"\"\n Confidence intervals for multinomial proportions.\n\n Parameters\n ----------\n counts : array_like of int, 1-D\n Number of observations in each category.\n alpha : float in (0, 1), optional\n Significance level, defaults to 0.05.\n method : {'goodman', 'sison-glaz'}, optional\n Method to use to compute the confidence intervals; available methods\n are:\n\n - `goodman`: based on a chi-squared approximation, valid if all\n values in `counts` are greater or equal to 5 [2]_\n - `sison-glaz`: less conservative than `goodman`, but only valid if\n `counts` has 7 or more categories (``len(counts) >= 7``) [3]_\n\n Returns\n -------\n confint : ndarray, 2-D\n Array of [lower, upper] confidence levels for each category, such that\n overall coverage is (approximately) `1-alpha`.\n\n Raises\n ------\n ValueError\n If `alpha` is not in `(0, 1)` (bounds excluded), or if the values in\n `counts` are not all positive or null.\n NotImplementedError\n If `method` is not kown.\n Exception\n When ``method == 'sison-glaz'``, if for some reason `c` cannot be\n computed; this signals a bug and should be reported.\n\n Notes\n -----\n The `goodman` method [2]_ is based on approximating a statistic based on\n the multinomial as a chi-squared random variable. The usual recommendation\n is that this is valid if all the values in `counts` are greater than or\n equal to 5. There is no condition on the number of categories for this\n method.\n\n The `sison-glaz` method [3]_ approximates the multinomial probabilities,\n and evaluates that with a maximum-likelihood estimator. The first\n approximation is an Edgeworth expansion that converges when the number of\n categories goes to infinity, and the maximum-likelihood estimator converges\n when the number of observations (``sum(counts)``) goes to infinity. In\n their paper, Sison & Glaz demo their method with at least 7 categories, so\n ``len(counts) >= 7`` with all values in `counts` at or above 5 can be used\n as a rule of thumb for the validity of this method. This method is less\n conservative than the `goodman` method (i.e. it will yield confidence\n intervals closer to the desired significance level), but produces\n confidence intervals of uniform width over all categories (except when the\n intervals reach 0 or 1, in which case they are truncated), which makes it\n most useful when proportions are of similar magnitude.\n\n Aside from the original sources ([1]_, [2]_, and [3]_), the implementation\n uses the formulas (though not the code) presented in [4]_ and [5]_.\n\n References\n ----------\n .. [1] Levin, Bruce, \"A representation for multinomial cumulative\n distribution functions,\" The Annals of Statistics, Vol. 9, No. 5,\n 1981, pp. 1123-1126.\n\n .. [2] Goodman, L.A., \"On simultaneous confidence intervals for multinomial\n proportions,\" Technometrics, Vol. 7, No. 2, 1965, pp. 247-254.\n\n .. [3] Sison, Cristina P., and Joseph Glaz, \"Simultaneous Confidence\n Intervals and Sample Size Determination for Multinomial\n Proportions,\" Journal of the American Statistical Association,\n Vol. 90, No. 429, 1995, pp. 366-369.\n\n .. [4] May, Warren L., and William D. Johnson, \"A SAS® macro for\n constructing simultaneous confidence intervals for multinomial\n proportions,\" Computer methods and programs in Biomedicine, Vol. 53,\n No. 3, 1997, pp. 153-162.\n\n .. [5] May, Warren L., and William D. Johnson, \"Constructing two-sided\n simultaneous confidence intervals for multinomial proportions for\n small counts in a large number of cells,\" Journal of Statistical\n Software, Vol. 5, No. 6, 2000, pp. 1-24.\n \"\"\"\n if alpha <= 0 or alpha >= 1:\n raise ValueError('alpha must be in (0, 1), bounds excluded')\n counts = np.array(counts, dtype=float)\n if (counts < 0).any():\n raise ValueError('counts must be >= 0')\n\n n = counts.sum()\n k = len(counts)\n proportions = counts / n\n if method == 'goodman':\n chi2 = stats.chi2.ppf(1 - alpha / k, 1)\n delta = chi2 ** 2 + (4 * n * proportions * chi2 * (1 - proportions))\n region = ((2 * n * proportions + chi2 +\n np.array([- np.sqrt(delta), np.sqrt(delta)])) /\n (2 * (chi2 + n))).T\n elif method[:5] == 'sison': # We accept any name starting with 'sison'\n # Define a few functions we'll use a lot.\n def poisson_interval(interval, p):\n \"\"\"\n Compute P(b <= Z <= a) where Z ~ Poisson(p) and\n `interval = (b, a)`.\n \"\"\"\n b, a = interval\n prob = stats.poisson.cdf(a, p) - stats.poisson.cdf(b - 1, p)\n return prob\n\n def truncated_poisson_factorial_moment(interval, r, p):\n \"\"\"\n Compute mu_r, the r-th factorial moment of a poisson random\n variable of parameter `p` truncated to `interval = (b, a)`.\n \"\"\"\n b, a = interval\n return p ** r * (1 - ((poisson_interval((a - r + 1, a), p) -\n poisson_interval((b - r, b - 1), p)) /\n poisson_interval((b, a), p)))\n\n def edgeworth(intervals):\n \"\"\"\n Compute the Edgeworth expansion term of Sison & Glaz's formula\n (1) (approximated probability for multinomial proportions in a\n given box).\n \"\"\"\n # Compute means and central moments of the truncated poisson\n # variables.\n mu_r1, mu_r2, mu_r3, mu_r4 = [\n np.array([truncated_poisson_factorial_moment(interval, r, p)\n for (interval, p) in zip(intervals, counts)])\n for r in range(1, 5)\n ]\n mu = mu_r1\n mu2 = mu_r2 + mu - mu ** 2\n mu3 = mu_r3 + mu_r2 * (3 - 3 * mu) + mu - 3 * mu ** 2 + 2 * mu ** 3\n mu4 = (mu_r4 + mu_r3 * (6 - 4 * mu) +\n mu_r2 * (7 - 12 * mu + 6 * mu ** 2) +\n mu - 4 * mu ** 2 + 6 * mu ** 3 - 3 * mu ** 4)\n\n # Compute expansion factors, gamma_1 and gamma_2.\n g1 = mu3.sum() / mu2.sum() ** 1.5\n g2 = (mu4.sum() - 3 * (mu2 ** 2).sum()) / mu2.sum() ** 2\n\n # Compute the expansion itself.\n x = (n - mu.sum()) / np.sqrt(mu2.sum())\n phi = np.exp(- x ** 2 / 2) / np.sqrt(2 * np.pi)\n H3 = x ** 3 - 3 * x\n H4 = x ** 4 - 6 * x ** 2 + 3\n H6 = x ** 6 - 15 * x ** 4 + 45 * x ** 2 - 15\n f = phi * (1 + g1 * H3 / 6 + g2 * H4 / 24 + g1 ** 2 * H6 / 72)\n return f / np.sqrt(mu2.sum())\n\n\n def approximated_multinomial_interval(intervals):\n \"\"\"\n Compute approximated probability for Multinomial(n, proportions)\n to be in `intervals` (Sison & Glaz's formula (1)).\n \"\"\"\n return np.exp(\n np.sum(np.log([poisson_interval(interval, p)\n for (interval, p) in zip(intervals, counts)])) +\n np.log(edgeworth(intervals)) -\n np.log(stats.poisson._pmf(n, n))\n )\n\n def nu(c):\n \"\"\"\n Compute interval coverage for a given `c` (Sison & Glaz's\n formula (7)).\n \"\"\"\n return approximated_multinomial_interval(\n [(np.maximum(count - c, 0), np.minimum(count + c, n))\n for count in counts])\n\n # Find the value of `c` that will give us the confidence intervals\n # (solving nu(c) <= 1 - alpha < nu(c + 1).\n c = 1.0\n nuc = nu(c)\n nucp1 = nu(c + 1)\n while not (nuc <= (1 - alpha) < nucp1):\n if c > n:\n raise Exception(\"Couldn't find a value for `c` that \"\n \"solves nu(c) <= 1 - alpha < nu(c + 1)\")\n c += 1\n nuc = nucp1\n nucp1 = nu(c + 1)\n\n # Compute gamma and the corresponding confidence intervals.\n g = (1 - alpha - nuc) / (nucp1 - nuc)\n ci_lower = np.maximum(proportions - c / n, 0)\n ci_upper = np.minimum(proportions + (c + 2 * g) / n, 1)\n region = np.array([ci_lower, ci_upper]).T\n else:\n raise NotImplementedError('method \"%s\" is not available' % method)\n return region\n\n\ndef samplesize_confint_proportion(proportion, half_length, alpha=0.05,\n method='normal'):\n \"\"\"\n Find sample size to get desired confidence interval length\n\n Parameters\n ----------\n proportion : float in (0, 1)\n proportion or quantile\n half_length : float in (0, 1)\n desired half length of the confidence interval\n alpha : float in (0, 1)\n significance level, default 0.05,\n coverage of the two-sided interval is (approximately) ``1 - alpha``\n method : str in ['normal']\n method to use for confidence interval,\n currently only normal approximation\n\n Returns\n -------\n n : float\n sample size to get the desired half length of the confidence interval\n\n Notes\n -----\n this is mainly to store the formula.\n possible application: number of replications in bootstrap samples\n\n \"\"\"\n q_ = proportion\n if method == 'normal':\n n = q_ * (1 - q_) / (half_length / stats.norm.isf(alpha / 2.))**2\n else:\n raise NotImplementedError('only \"normal\" is available')\n\n return n\n\n\ndef proportion_effectsize(prop1, prop2, method='normal'):\n \"\"\"\n Effect size for a test comparing two proportions\n\n for use in power function\n\n Parameters\n ----------\n prop1, prop2 : float or array_like\n The proportion value(s).\n\n Returns\n -------\n es : float or ndarray\n effect size for (transformed) prop1 - prop2\n\n Notes\n -----\n only method='normal' is implemented to match pwr.p2.test\n see http://www.statmethods.net/stats/power.html\n\n Effect size for `normal` is defined as ::\n\n 2 * (arcsin(sqrt(prop1)) - arcsin(sqrt(prop2)))\n\n I think other conversions to normality can be used, but I need to check.\n\n Examples\n --------\n >>> import statsmodels.api as sm\n >>> sm.stats.proportion_effectsize(0.5, 0.4)\n 0.20135792079033088\n >>> sm.stats.proportion_effectsize([0.3, 0.4, 0.5], 0.4)\n array([-0.21015893, 0. , 0.20135792])\n\n \"\"\"\n if method != 'normal':\n raise ValueError('only \"normal\" is implemented')\n\n es = 2 * (np.arcsin(np.sqrt(prop1)) - np.arcsin(np.sqrt(prop2)))\n return es\n\n\ndef std_prop(prop, nobs):\n \"\"\"\n Standard error for the estimate of a proportion\n\n This is just ``np.sqrt(p * (1. - p) / nobs)``\n\n Parameters\n ----------\n prop : array_like\n proportion\n nobs : int, array_like\n number of observations\n\n Returns\n -------\n std : array_like\n standard error for a proportion of nobs independent observations\n \"\"\"\n return np.sqrt(prop * (1. - prop) / nobs)\n\n\ndef _std_diff_prop(p1, p2, ratio=1):\n return np.sqrt(p1 * (1 - p1) + p2 * (1 - p2) / ratio)\n\n\ndef _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,\n alpha=0.05, discrete=True, dist='norm', nobs=None,\n continuity=0, critval_continuity=0):\n \"\"\"\n Generic statistical power function for normal based equivalence test\n\n This includes options to adjust the normal approximation and can use\n the binomial to evaluate the probability of the rejection region\n\n see power_ztost_prob for a description of the options\n \"\"\"\n # TODO: refactor structure, separate norm and binom better\n if not isinstance(continuity, tuple):\n continuity = (continuity, continuity)\n crit = stats.norm.isf(alpha)\n k_low = mean_low + np.sqrt(var_low) * crit\n k_upp = mean_upp - np.sqrt(var_upp) * crit\n if discrete or dist == 'binom':\n k_low = np.ceil(k_low * nobs + 0.5 * critval_continuity)\n k_upp = np.trunc(k_upp * nobs - 0.5 * critval_continuity)\n if dist == 'norm':\n #need proportion\n k_low = (k_low) * 1. / nobs #-1 to match PASS\n k_upp = k_upp * 1. / nobs\n# else:\n# if dist == 'binom':\n# #need counts\n# k_low *= nobs\n# k_upp *= nobs\n #print mean_low, np.sqrt(var_low), crit, var_low\n #print mean_upp, np.sqrt(var_upp), crit, var_upp\n if np.any(k_low > k_upp): #vectorize\n import warnings\n warnings.warn(\"no overlap, power is zero\", HypothesisTestWarning)\n std_alt = np.sqrt(var_alt)\n z_low = (k_low - mean_alt - continuity[0] * 0.5 / nobs) / std_alt\n z_upp = (k_upp - mean_alt + continuity[1] * 0.5 / nobs) / std_alt\n if dist == 'norm':\n power = stats.norm.cdf(z_upp) - stats.norm.cdf(z_low)\n elif dist == 'binom':\n power = (stats.binom.cdf(k_upp, nobs, mean_alt) -\n stats.binom.cdf(k_low-1, nobs, mean_alt))\n return power, (k_low, k_upp, z_low, z_upp)\n\n\ndef binom_tost(count, nobs, low, upp):\n \"\"\"\n Exact TOST test for one proportion using binomial distribution\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials.\n nobs : int\n the number of trials or observations.\n low, upp : floats\n lower and upper limit of equivalence region\n\n Returns\n -------\n pvalue : float\n p-value of equivalence test\n pval_low, pval_upp : floats\n p-values of lower and upper one-sided tests\n\n \"\"\"\n # binom_test_stat only returns pval\n tt1 = binom_test(count, nobs, alternative='larger', prop=low)\n tt2 = binom_test(count, nobs, alternative='smaller', prop=upp)\n return np.maximum(tt1, tt2), tt1, tt2,\n\n\ndef binom_tost_reject_interval(low, upp, nobs, alpha=0.05):\n \"\"\"\n Rejection region for binomial TOST\n\n The interval includes the end points,\n `reject` if and only if `r_low <= x <= r_upp`.\n\n The interval might be empty with `r_upp < r_low`.\n\n Parameters\n ----------\n low, upp : floats\n lower and upper limit of equivalence region\n nobs : int\n the number of trials or observations.\n\n Returns\n -------\n x_low, x_upp : float\n lower and upper bound of rejection region\n\n \"\"\"\n x_low = stats.binom.isf(alpha, nobs, low) + 1\n x_upp = stats.binom.ppf(alpha, nobs, upp) - 1\n return x_low, x_upp\n\n\ndef binom_test_reject_interval(value, nobs, alpha=0.05, alternative='two-sided'):\n \"\"\"\n Rejection region for binomial test for one sample proportion\n\n The interval includes the end points of the rejection region.\n\n Parameters\n ----------\n value : float\n proportion under the Null hypothesis\n nobs : int\n the number of trials or observations.\n\n Returns\n -------\n x_low, x_upp : int\n lower and upper bound of rejection region\n \"\"\"\n if alternative in ['2s', 'two-sided']:\n alternative = '2s' # normalize alternative name\n alpha = alpha / 2\n\n if alternative in ['2s', 'smaller']:\n x_low = stats.binom.ppf(alpha, nobs, value) - 1\n else:\n x_low = 0\n if alternative in ['2s', 'larger']:\n x_upp = stats.binom.isf(alpha, nobs, value) + 1\n else :\n x_upp = nobs\n\n return int(x_low), int(x_upp)\n\n\ndef binom_test(count, nobs, prop=0.5, alternative='two-sided'):\n \"\"\"\n Perform a test that the probability of success is p.\n\n This is an exact, two-sided test of the null hypothesis\n that the probability of success in a Bernoulli experiment\n is `p`.\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials.\n nobs : int\n the number of trials or observations.\n prop : float, optional\n The probability of success under the null hypothesis,\n `0 <= prop <= 1`. The default value is `prop = 0.5`\n alternative : str in ['two-sided', 'smaller', 'larger']\n alternative hypothesis, which can be two-sided or either one of the\n one-sided tests.\n\n Returns\n -------\n p-value : float\n The p-value of the hypothesis test\n\n Notes\n -----\n This uses scipy.stats.binom_test for the two-sided alternative.\n \"\"\"\n\n if np.any(prop > 1.0) or np.any(prop < 0.0):\n raise ValueError(\"p must be in range [0,1]\")\n if alternative in ['2s', 'two-sided']:\n try:\n pval = stats.binomtest(count, n=nobs, p=prop).pvalue\n except AttributeError:\n # Remove after min SciPy >= 1.7\n pval = stats.binom_test(count, n=nobs, p=prop)\n elif alternative in ['l', 'larger']:\n pval = stats.binom.sf(count-1, nobs, prop)\n elif alternative in ['s', 'smaller']:\n pval = stats.binom.cdf(count, nobs, prop)\n else:\n raise ValueError('alternative not recognized\\n'\n 'should be two-sided, larger or smaller')\n return pval\n\n\ndef power_binom_tost(low, upp, nobs, p_alt=None, alpha=0.05):\n if p_alt is None:\n p_alt = 0.5 * (low + upp)\n x_low, x_upp = binom_tost_reject_interval(low, upp, nobs, alpha=alpha)\n power = (stats.binom.cdf(x_upp, nobs, p_alt) -\n stats.binom.cdf(x_low-1, nobs, p_alt))\n return power\n\n\ndef power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm',\n variance_prop=None, discrete=True, continuity=0,\n critval_continuity=0):\n \"\"\"\n Power of proportions equivalence test based on normal distribution\n\n Parameters\n ----------\n low, upp : floats\n lower and upper limit of equivalence region\n nobs : int\n number of observations\n p_alt : float in (0,1)\n proportion under the alternative\n alpha : float in (0,1)\n significance level of the test\n dist : str in ['norm', 'binom']\n This defines the distribution to evaluate the power of the test. The\n critical values of the TOST test are always based on the normal\n approximation, but the distribution for the power can be either the\n normal (default) or the binomial (exact) distribution.\n variance_prop : None or float in (0,1)\n If this is None, then the variances for the two one sided tests are\n based on the proportions equal to the equivalence limits.\n If variance_prop is given, then it is used to calculate the variance\n for the TOST statistics. If this is based on an sample, then the\n estimated proportion can be used.\n discrete : bool\n If true, then the critical values of the rejection region are converted\n to integers. If dist is \"binom\", this is automatically assumed.\n If discrete is false, then the TOST critical values are used as\n floating point numbers, and the power is calculated based on the\n rejection region that is not discretized.\n continuity : bool or float\n adjust the rejection region for the normal power probability. This has\n and effect only if ``dist='norm'``\n critval_continuity : bool or float\n If this is non-zero, then the critical values of the tost rejection\n region are adjusted before converting to integers. This affects both\n distributions, ``dist='norm'`` and ``dist='binom'``.\n\n Returns\n -------\n power : float\n statistical power of the equivalence test.\n (k_low, k_upp, z_low, z_upp) : tuple of floats\n critical limits in intermediate steps\n temporary return, will be changed\n\n Notes\n -----\n In small samples the power for the ``discrete`` version, has a sawtooth\n pattern as a function of the number of observations. As a consequence,\n small changes in the number of observations or in the normal approximation\n can have a large effect on the power.\n\n ``continuity`` and ``critval_continuity`` are added to match some results\n of PASS, and are mainly to investigate the sensitivity of the ztost power\n to small changes in the rejection region. From my interpretation of the\n equations in the SAS manual, both are zero in SAS.\n\n works vectorized\n\n **verification:**\n\n The ``dist='binom'`` results match PASS,\n The ``dist='norm'`` results look reasonable, but no benchmark is available.\n\n References\n ----------\n SAS Manual: Chapter 68: The Power Procedure, Computational Resources\n PASS Chapter 110: Equivalence Tests for One Proportion.\n\n \"\"\"\n mean_low = low\n var_low = std_prop(low, nobs)**2\n mean_upp = upp\n var_upp = std_prop(upp, nobs)**2\n mean_alt = p_alt\n var_alt = std_prop(p_alt, nobs)**2\n if variance_prop is not None:\n var_low = var_upp = std_prop(variance_prop, nobs)**2\n power = _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,\n alpha=alpha, discrete=discrete, dist=dist, nobs=nobs,\n continuity=continuity, critval_continuity=critval_continuity)\n return np.maximum(power[0], 0), power[1:]\n\n\ndef _table_proportion(count, nobs):\n \"\"\"\n Create a k by 2 contingency table for proportion\n\n helper function for proportions_chisquare\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials.\n nobs : int\n the number of trials or observations.\n\n Returns\n -------\n table : ndarray\n (k, 2) contingency table\n\n Notes\n -----\n recent scipy has more elaborate contingency table functions\n\n \"\"\"\n count = np.asarray(count)\n dt = np.promote_types(count.dtype, np.float64)\n count = np.asarray(count, dtype=dt)\n table = np.column_stack((count, nobs - count))\n expected = table.sum(0) * table.sum(1)[:, None] * 1. / table.sum()\n n_rows = table.shape[0]\n return table, expected, n_rows\n\n\ndef proportions_ztest(count, nobs, value=None, alternative='two-sided',\n prop_var=False):\n \"\"\"\n Test for proportions based on normal (z) test\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials. If this is array_like, then\n the assumption is that this represents the number of successes for\n each independent sample\n nobs : {int, array_like}\n the number of trials or observations, with the same length as\n count.\n value : float, array_like or None, optional\n This is the value of the null hypothesis equal to the proportion in the\n case of a one sample test. In the case of a two-sample test, the\n null hypothesis is that prop[0] - prop[1] = value, where prop is the\n proportion in the two samples. If not provided value = 0 and the null\n is prop[0] = prop[1]\n alternative : str in ['two-sided', 'smaller', 'larger']\n The alternative hypothesis can be either two-sided or one of the one-\n sided tests, smaller means that the alternative hypothesis is\n ``prop < value`` and larger means ``prop > value``. In the two sample\n test, smaller means that the alternative hypothesis is ``p1 < p2`` and\n larger means ``p1 > p2`` where ``p1`` is the proportion of the first\n sample and ``p2`` of the second one.\n prop_var : False or float in (0, 1)\n If prop_var is false, then the variance of the proportion estimate is\n calculated based on the sample proportion. Alternatively, a proportion\n can be specified to calculate this variance. Common use case is to\n use the proportion under the Null hypothesis to specify the variance\n of the proportion estimate.\n\n Returns\n -------\n zstat : float\n test statistic for the z-test\n p-value : float\n p-value for the z-test\n\n Examples\n --------\n >>> count = 5\n >>> nobs = 83\n >>> value = .05\n >>> stat, pval = proportions_ztest(count, nobs, value)\n >>> print('{0:0.3f}'.format(pval))\n 0.695\n\n >>> import numpy as np\n >>> from statsmodels.stats.proportion import proportions_ztest\n >>> count = np.array([5, 12])\n >>> nobs = np.array([83, 99])\n >>> stat, pval = proportions_ztest(count, nobs)\n >>> print('{0:0.3f}'.format(pval))\n 0.159\n\n Notes\n -----\n This uses a simple normal test for proportions. It should be the same as\n running the mean z-test on the data encoded 1 for event and 0 for no event\n so that the sum corresponds to the count.\n\n In the one and two sample cases with two-sided alternative, this test\n produces the same p-value as ``proportions_chisquare``, since the\n chisquare is the distribution of the square of a standard normal\n distribution.\n \"\"\"\n # TODO: verify that this really holds\n # TODO: add continuity correction or other improvements for small samples\n # TODO: change options similar to propotion_ztost ?\n\n count = np.asarray(count)\n nobs = np.asarray(nobs)\n\n if nobs.size == 1:\n nobs = nobs * np.ones_like(count)\n\n prop = count * 1. / nobs\n k_sample = np.size(prop)\n if value is None:\n if k_sample == 1:\n raise ValueError('value must be provided for a 1-sample test')\n value = 0\n if k_sample == 1:\n diff = prop - value\n elif k_sample == 2:\n diff = prop[0] - prop[1] - value\n else:\n msg = 'more than two samples are not implemented yet'\n raise NotImplementedError(msg)\n\n p_pooled = np.sum(count) * 1. / np.sum(nobs)\n\n nobs_fact = np.sum(1. / nobs)\n if prop_var:\n p_pooled = prop_var\n var_ = p_pooled * (1 - p_pooled) * nobs_fact\n std_diff = np.sqrt(var_)\n from statsmodels.stats.weightstats import _zstat_generic2\n return _zstat_generic2(diff, std_diff, alternative)\n\n\ndef proportions_ztost(count, nobs, low, upp, prop_var='sample'):\n \"\"\"\n Equivalence test based on normal distribution\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials. If this is array_like, then\n the assumption is that this represents the number of successes for\n each independent sample\n nobs : int\n the number of trials or observations, with the same length as\n count.\n low, upp : float\n equivalence interval low < prop1 - prop2 < upp\n prop_var : str or float in (0, 1)\n prop_var determines which proportion is used for the calculation\n of the standard deviation of the proportion estimate\n The available options for string are 'sample' (default), 'null' and\n 'limits'. If prop_var is a float, then it is used directly.\n\n Returns\n -------\n pvalue : float\n pvalue of the non-equivalence test\n t1, pv1 : tuple of floats\n test statistic and pvalue for lower threshold test\n t2, pv2 : tuple of floats\n test statistic and pvalue for upper threshold test\n\n Notes\n -----\n checked only for 1 sample case\n\n \"\"\"\n if prop_var == 'limits':\n prop_var_low = low\n prop_var_upp = upp\n elif prop_var == 'sample':\n prop_var_low = prop_var_upp = False #ztest uses sample\n elif prop_var == 'null':\n prop_var_low = prop_var_upp = 0.5 * (low + upp)\n elif np.isreal(prop_var):\n prop_var_low = prop_var_upp = prop_var\n\n tt1 = proportions_ztest(count, nobs, alternative='larger',\n prop_var=prop_var_low, value=low)\n tt2 = proportions_ztest(count, nobs, alternative='smaller',\n prop_var=prop_var_upp, value=upp)\n return np.maximum(tt1[1], tt2[1]), tt1, tt2,\n\n\ndef proportions_chisquare(count, nobs, value=None):\n \"\"\"\n Test for proportions based on chisquare test\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials. If this is array_like, then\n the assumption is that this represents the number of successes for\n each independent sample\n nobs : int\n the number of trials or observations, with the same length as\n count.\n value : None or float or array_like\n\n Returns\n -------\n chi2stat : float\n test statistic for the chisquare test\n p-value : float\n p-value for the chisquare test\n (table, expected)\n table is a (k, 2) contingency table, ``expected`` is the corresponding\n table of counts that are expected under independence with given\n margins\n\n Notes\n -----\n Recent version of scipy.stats have a chisquare test for independence in\n contingency tables.\n\n This function provides a similar interface to chisquare tests as\n ``prop.test`` in R, however without the option for Yates continuity\n correction.\n\n count can be the count for the number of events for a single proportion,\n or the counts for several independent proportions. If value is given, then\n all proportions are jointly tested against this value. If value is not\n given and count and nobs are not scalar, then the null hypothesis is\n that all samples have the same proportion.\n\n \"\"\"\n nobs = np.atleast_1d(nobs)\n table, expected, n_rows = _table_proportion(count, nobs)\n if value is not None:\n expected = np.column_stack((nobs * value, nobs * (1 - value)))\n ddof = n_rows - 1\n else:\n ddof = n_rows\n\n #print table, expected\n chi2stat, pval = stats.chisquare(table.ravel(), expected.ravel(),\n ddof=ddof)\n return chi2stat, pval, (table, expected)\n\n\ndef proportions_chisquare_allpairs(count, nobs, multitest_method='hs'):\n \"\"\"\n Chisquare test of proportions for all pairs of k samples\n\n Performs a chisquare test for proportions for all pairwise comparisons.\n The alternative is two-sided\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials.\n nobs : int\n the number of trials or observations.\n multitest_method : str\n This chooses the method for the multiple testing p-value correction,\n that is used as default in the results.\n It can be any method that is available in ``multipletesting``.\n The default is Holm-Sidak 'hs'.\n\n Returns\n -------\n result : AllPairsResults instance\n The returned results instance has several statistics, such as p-values,\n attached, and additional methods for using a non-default\n ``multitest_method``.\n\n Notes\n -----\n Yates continuity correction is not available.\n \"\"\"\n #all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))\n all_pairs = lzip(*np.triu_indices(len(count), 1))\n pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)])[1]\n for pair in all_pairs]\n return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)\n\n\ndef proportions_chisquare_pairscontrol(count, nobs, value=None,\n multitest_method='hs', alternative='two-sided'):\n \"\"\"\n Chisquare test of proportions for pairs of k samples compared to control\n\n Performs a chisquare test for proportions for pairwise comparisons with a\n control (Dunnet's test). The control is assumed to be the first element\n of ``count`` and ``nobs``. The alternative is two-sided, larger or\n smaller.\n\n Parameters\n ----------\n count : {int, array_like}\n the number of successes in nobs trials.\n nobs : int\n the number of trials or observations.\n multitest_method : str\n This chooses the method for the multiple testing p-value correction,\n that is used as default in the results.\n It can be any method that is available in ``multipletesting``.\n The default is Holm-Sidak 'hs'.\n alternative : str in ['two-sided', 'smaller', 'larger']\n alternative hypothesis, which can be two-sided or either one of the\n one-sided tests.\n\n Returns\n -------\n result : AllPairsResults instance\n The returned results instance has several statistics, such as p-values,\n attached, and additional methods for using a non-default\n ``multitest_method``.\n\n\n Notes\n -----\n Yates continuity correction is not available.\n\n ``value`` and ``alternative`` options are not yet implemented.\n\n \"\"\"\n if (value is not None) or (alternative not in ['two-sided', '2s']):\n raise NotImplementedError\n #all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))\n all_pairs = [(0, k) for k in range(1, len(count))]\n pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)],\n #alternative=alternative)[1]\n )[1]\n for pair in all_pairs]\n return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)\n\n\ndef confint_proportions_2indep(count1, nobs1, count2, nobs2, method=None,\n compare='diff', alpha=0.05, correction=True):\n \"\"\"\n Confidence intervals for comparing two independent proportions.\n\n This assumes that we have two independent binomial samples.\n\n Parameters\n ----------\n count1, nobs1 : float\n Count and sample size for first sample.\n count2, nobs2 : float\n Count and sample size for the second sample.\n method : str\n Method for computing confidence interval. If method is None, then a\n default method is used. The default might change as more methods are\n added.\n\n diff:\n - 'wald',\n - 'agresti-caffo'\n - 'newcomb' (default)\n - 'score'\n\n ratio:\n - 'log'\n - 'log-adjusted' (default)\n - 'score'\n\n odds-ratio:\n - 'logit'\n - 'logit-adjusted' (default)\n - 'score'\n\n compare : string in ['diff', 'ratio' 'odds-ratio']\n If compare is diff, then the confidence interval is for diff = p1 - p2.\n If compare is ratio, then the confidence interval is for the risk ratio\n defined by ratio = p1 / p2.\n If compare is odds-ratio, then the confidence interval is for the\n odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).\n alpha : float\n Significance level for the confidence interval, default is 0.05.\n The nominal coverage probability is 1 - alpha.\n\n Returns\n -------\n low, upp\n\n See Also\n --------\n test_proportions_2indep\n tost_proportions_2indep\n\n Notes\n -----\n Status: experimental, API and defaults might still change.\n more ``methods`` will be added.\n\n References\n ----------\n .. [1] Fagerland, Morten W., Stian Lydersen, and Petter Laake. 2015.\n “Recommended Confidence Intervals for Two Independent Binomial\n Proportions.” Statistical Methods in Medical Research 24 (2): 224–54.\n https://doi.org/10.1177/0962280211415469.\n .. [2] Koopman, P. A. R. 1984. “Confidence Intervals for the Ratio of Two\n Binomial Proportions.” Biometrics 40 (2): 513–17.\n https://doi.org/10.2307/2531405.\n .. [3] Miettinen, Olli, and Markku Nurminen. \"Comparative analysis of two\n rates.\" Statistics in medicine 4, no. 2 (1985): 213-226.\n .. [4] Newcombe, Robert G. 1998. “Interval Estimation for the Difference\n between Independent Proportions: Comparison of Eleven Methods.”\n Statistics in Medicine 17 (8): 873–90.\n https://doi.org/10.1002/(SICI)1097-0258(19980430)17:8<873::AID-\n SIM779>3.0.CO;2-I.\n .. [5] Newcombe, Robert G., and Markku M. Nurminen. 2011. “In Defence of\n Score Intervals for Proportions and Their Differences.” Communications\n in Statistics - Theory and Methods 40 (7): 1271–82.\n https://doi.org/10.1080/03610920903576580.\n \"\"\"\n method_default = {'diff': 'newcomb',\n 'ratio': 'log-adjusted',\n 'odds-ratio': 'logit-adjusted'}\n # normalize compare name\n if compare.lower() == 'or':\n compare = 'odds-ratio'\n if method is None:\n method = method_default[compare]\n\n method = method.lower()\n if method.startswith('agr'):\n method = 'agresti-caffo'\n\n p1 = count1 / nobs1\n p2 = count2 / nobs2\n diff = p1 - p2\n addone = 1 if method == 'agresti-caffo' else 0\n\n if compare == 'diff':\n if method in ['wald', 'agresti-caffo']:\n count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone\n count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n diff_ = p1_ - p2_\n var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_\n z = stats.norm.isf(alpha / 2)\n d_wald = z * np.sqrt(var)\n low = diff_ - d_wald\n upp = diff_ + d_wald\n\n elif method.startswith('newcomb'):\n low1, upp1 = proportion_confint(count1, nobs1,\n method='wilson', alpha=alpha)\n low2, upp2 = proportion_confint(count2, nobs2,\n method='wilson', alpha=alpha)\n d_low = np.sqrt((p1 - low1)**2 + (upp2 - p2)**2)\n d_upp = np.sqrt((p2 - low2)**2 + (upp1 - p1)**2)\n low = diff - d_low\n upp = diff + d_upp\n\n elif method == \"score\":\n low, upp = _score_confint_inversion(count1, nobs1, count2, nobs2,\n compare=compare, alpha=alpha,\n correction=correction)\n\n else:\n raise ValueError('method not recognized')\n\n elif compare == 'ratio':\n # ratio = p1 / p2\n if method in ['log', 'log-adjusted']:\n addhalf = 0.5 if method == 'log-adjusted' else 0\n count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf\n count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n ratio_ = p1_ / p2_\n var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_\n z = stats.norm.isf(alpha / 2)\n d_log = z * np.sqrt(var)\n low = np.exp(np.log(ratio_) - d_log)\n upp = np.exp(np.log(ratio_) + d_log)\n\n elif method == 'score':\n res = _confint_riskratio_koopman(count1, nobs1, count2, nobs2,\n alpha=alpha,\n correction=correction)\n low, upp = res.confint\n\n else:\n raise ValueError('method not recognized')\n\n elif compare == 'odds-ratio':\n # odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)\n if method in ['logit', 'logit-adjusted', 'logit-smoothed']:\n if method in ['logit-smoothed']:\n adjusted = _shrink_prob(count1, nobs1, count2, nobs2,\n shrink_factor=2, return_corr=False)[0]\n count1_, nobs1_, count2_, nobs2_ = adjusted\n\n else:\n addhalf = 0.5 if method == 'logit-adjusted' else 0\n count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf\n count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)\n var = (1 / count1_ + 1 / (nobs1_ - count1_) +\n 1 / count2_ + 1 / (nobs2_ - count2_))\n z = stats.norm.isf(alpha / 2)\n d_log = z * np.sqrt(var)\n low = np.exp(np.log(odds_ratio_) - d_log)\n upp = np.exp(np.log(odds_ratio_) + d_log)\n\n elif method == \"score\":\n low, upp = _score_confint_inversion(count1, nobs1, count2, nobs2,\n compare=compare, alpha=alpha,\n correction=correction)\n\n else:\n raise ValueError('method not recognized')\n\n else:\n raise ValueError('compare not recognized')\n\n return low, upp\n\n\ndef _shrink_prob(count1, nobs1, count2, nobs2, shrink_factor=2,\n return_corr=True):\n \"\"\"\n Shrink observed counts towards independence\n\n Helper function for 'logit-smoothed' inference for the odds-ratio of two\n independent proportions.\n\n Parameters\n ----------\n count1, nobs1 : float or int\n count and sample size for first sample\n count2, nobs2 : float or int\n count and sample size for the second sample\n shrink_factor : float\n This corresponds to the number of observations that are added in total\n proportional to the probabilities under independence.\n return_corr : bool\n If true, then only the correction term is returned\n If false, then the corrected counts, i.e. original counts plus\n correction term, are returned.\n\n Returns\n -------\n count1_corr, nobs1_corr, count2_corr, nobs2_corr : float\n correction or corrected counts\n prob_indep :\n TODO/Warning : this will change most likely\n probabilities under independence, only returned if return_corr is\n false.\n\n \"\"\"\n vectorized = any(np.size(i) > 1 for i in [count1, nobs1, count2, nobs2])\n if vectorized:\n raise ValueError(\"function is not vectorized\")\n nobs_col = np.array([count1 + count2, nobs1 - count1 + nobs2 - count2])\n nobs_row = np.array([nobs1, nobs2])\n nobs = nobs1 + nobs2\n prob_indep = (nobs_col * nobs_row[:, None]) / nobs**2\n corr = shrink_factor * prob_indep\n if return_corr:\n return (corr[0, 0], corr[0].sum(), corr[1, 0], corr[1].sum())\n else:\n return (count1 + corr[0, 0], nobs1 + corr[0].sum(),\n count2 + corr[1, 0], nobs2 + corr[1].sum()), prob_indep\n\n\ndef score_test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,\n compare='diff', alternative='two-sided',\n correction=True, return_results=True):\n \"\"\"\n Score test for two independent proportions\n\n This uses the constrained estimate of the proportions to compute\n the variance under the Null hypothesis.\n\n Parameters\n ----------\n count1, nobs1 :\n count and sample size for first sample\n count2, nobs2 :\n count and sample size for the second sample\n value : float\n diff, ratio or odds-ratio under the null hypothesis. If value is None,\n then equality of proportions under the Null is assumed,\n i.e. value=0 for 'diff' or value=1 for either rate or odds-ratio.\n compare : string in ['diff', 'ratio' 'odds-ratio']\n If compare is diff, then the confidence interval is for diff = p1 - p2.\n If compare is ratio, then the confidence interval is for the risk ratio\n defined by ratio = p1 / p2.\n If compare is odds-ratio, then the confidence interval is for the\n odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)\n return_results : bool\n If true, then a results instance with extra information is returned,\n otherwise a tuple with statistic and pvalue is returned.\n\n Returns\n -------\n results : results instance or tuple\n If return_results is True, then a results instance with the\n information in attributes is returned.\n If return_results is False, then only ``statistic`` and ``pvalue``\n are returned.\n\n statistic : float\n test statistic asymptotically normal distributed N(0, 1)\n pvalue : float\n p-value based on normal distribution\n other attributes :\n additional information about the hypothesis test\n\n Notes\n -----\n Status: experimental, the type or extra information in the return might\n change.\n\n \"\"\"\n\n value_default = 0 if compare == 'diff' else 1\n if value is None:\n # TODO: odds ratio does not work if value=1\n value = value_default\n\n nobs = nobs1 + nobs2\n count = count1 + count2\n p1 = count1 / nobs1\n p2 = count2 / nobs2\n if value == value_default:\n # use pooled estimator if equality test\n # shortcut, but required for odds ratio\n prop0 = prop1 = count / nobs\n # this uses index 0 from Miettinen Nurminned 1985\n count0, nobs0 = count2, nobs2\n p0 = p2\n\n if compare == 'diff':\n diff = value # hypothesis value\n\n if diff != 0:\n tmp3 = nobs\n tmp2 = (nobs1 + 2 * nobs0) * diff - nobs - count\n tmp1 = (count0 * diff - nobs - 2 * count0) * diff + count\n tmp0 = count0 * diff * (1 - diff)\n q = ((tmp2 / (3 * tmp3))**3 - tmp1 * tmp2 / (6 * tmp3**2) +\n tmp0 / (2 * tmp3))\n p = np.sign(q) * np.sqrt((tmp2 / (3 * tmp3))**2 -\n tmp1 / (3 * tmp3))\n a = (np.pi + np.arccos(q / p**3)) / 3\n\n prop0 = 2 * p * np.cos(a) - tmp2 / (3 * tmp3)\n prop1 = prop0 + diff\n\n var = prop1 * (1 - prop1) / nobs1 + prop0 * (1 - prop0) / nobs0\n if correction:\n var *= nobs / (nobs - 1)\n\n diff_stat = (p1 - p0 - diff)\n\n elif compare == 'ratio':\n # risk ratio\n ratio = value\n\n if ratio != 1:\n a = nobs * ratio\n b = -(nobs1 * ratio + count1 + nobs2 + count0 * ratio)\n c = count\n prop0 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)\n prop1 = prop0 * ratio\n\n var = (prop1 * (1 - prop1) / nobs1 +\n ratio**2 * prop0 * (1 - prop0) / nobs0)\n if correction:\n var *= nobs / (nobs - 1)\n\n # NCSS looks incorrect for var, but it is what should be reported\n # diff_stat = (p1 / p0 - ratio) # NCSS/PASS\n diff_stat = (p1 - ratio * p0) # Miettinen Nurminen\n\n elif compare in ['or', 'odds-ratio']:\n # odds ratio\n oratio = value\n\n if oratio != 1:\n # Note the constraint estimator does not handle odds-ratio = 1\n a = nobs0 * (oratio - 1)\n b = nobs1 * oratio + nobs0 - count * (oratio - 1)\n c = -count\n prop0 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)\n prop1 = prop0 * oratio / (1 + prop0 * (oratio - 1))\n\n # try to avoid 0 and 1 proportions,\n # those raise Zero Division Runtime Warnings\n eps = 1e-10\n prop0 = np.clip(prop0, eps, 1 - eps)\n prop1 = np.clip(prop1, eps, 1 - eps)\n\n var = (1 / (prop1 * (1 - prop1) * nobs1) +\n 1 / (prop0 * (1 - prop0) * nobs0))\n if correction:\n var *= nobs / (nobs - 1)\n\n diff_stat = ((p1 - prop1) / (prop1 * (1 - prop1)) -\n (p0 - prop0) / (prop0 * (1 - prop0)))\n\n statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),\n alternative=alternative)\n\n if return_results:\n res = HolderTuple(statistic=statistic,\n pvalue=pvalue,\n compare=compare,\n method='score',\n variance=var,\n alternative=alternative,\n prop1_null=prop1,\n prop2_null=prop0,\n )\n return res\n else:\n return statistic, pvalue\n\n\ndef test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,\n method=None, compare='diff',\n alternative='two-sided', correction=True,\n return_results=True):\n \"\"\"\n Hypothesis test for comparing two independent proportions\n\n This assumes that we have two independent binomial samples.\n\n The Null and alternative hypothesis are\n\n for compare = 'diff'\n\n - H0: prop1 - prop2 - value = 0\n - H1: prop1 - prop2 - value != 0 if alternative = 'two-sided'\n - H1: prop1 - prop2 - value > 0 if alternative = 'larger'\n - H1: prop1 - prop2 - value < 0 if alternative = 'smaller'\n\n for compare = 'ratio'\n\n - H0: prop1 / prop2 - value = 0\n - H1: prop1 / prop2 - value != 0 if alternative = 'two-sided'\n - H1: prop1 / prop2 - value > 0 if alternative = 'larger'\n - H1: prop1 / prop2 - value < 0 if alternative = 'smaller'\n\n for compare = 'odds-ratio'\n\n - H0: or - value = 0\n - H1: or - value != 0 if alternative = 'two-sided'\n - H1: or - value > 0 if alternative = 'larger'\n - H1: or - value < 0 if alternative = 'smaller'\n\n where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))\n\n Parameters\n ----------\n count1 : int\n Count for first sample.\n nobs1 : int\n Sample size for first sample.\n count2 : int\n Count for the second sample.\n nobs2 : int\n Sample size for the second sample.\n value : float\n Value of the difference, risk ratio or odds ratio of 2 independent\n proportions under the null hypothesis.\n Default is equal proportions, 0 for diff and 1 for risk-ratio and for\n odds-ratio.\n method : string\n Method for computing the hypothesis test. If method is None, then a\n default method is used. The default might change as more methods are\n added.\n\n diff:\n\n - 'wald',\n - 'agresti-caffo'\n - 'score' if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985\n\n ratio:\n\n - 'log': wald test using log transformation\n - 'log-adjusted': wald test using log transformation,\n adds 0.5 to counts\n - 'score': if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985\n\n odds-ratio:\n\n - 'logit': wald test using logit transformation\n - 'logit-adjusted': wald test using logit transformation,\n adds 0.5 to counts\n - 'logit-smoothed': wald test using logit transformation, biases\n cell counts towards independence by adding two observations in\n total.\n - 'score' if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985\n\n compare : {'diff', 'ratio' 'odds-ratio'}\n If compare is `diff`, then the hypothesis test is for the risk\n difference diff = p1 - p2.\n If compare is `ratio`, then the hypothesis test is for the\n risk ratio defined by ratio = p1 / p2.\n If compare is `odds-ratio`, then the hypothesis test is for the\n odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)\n alternative : {'two-sided', 'smaller', 'larger'}\n alternative hypothesis, which can be two-sided or either one of the\n one-sided tests.\n correction : bool\n If correction is True (default), then the Miettinen and Nurminen\n small sample correction to the variance nobs / (nobs - 1) is used.\n Applies only if method='score'.\n return_results : bool\n If true, then a results instance with extra information is returned,\n otherwise a tuple with statistic and pvalue is returned.\n\n Returns\n -------\n results : results instance or tuple\n If return_results is True, then a results instance with the\n information in attributes is returned.\n If return_results is False, then only ``statistic`` and ``pvalue``\n are returned.\n\n statistic : float\n test statistic asymptotically normal distributed N(0, 1)\n pvalue : float\n p-value based on normal distribution\n other attributes :\n additional information about the hypothesis test\n\n See Also\n --------\n tost_proportions_2indep\n confint_proportions_2indep\n\n Notes\n -----\n Status: experimental, API and defaults might still change.\n More ``methods`` will be added.\n\n The current default methods are\n\n - 'diff': 'agresti-caffo',\n - 'ratio': 'log-adjusted',\n - 'odds-ratio': 'logit-adjusted'\n\n \"\"\"\n method_default = {'diff': 'agresti-caffo',\n 'ratio': 'log-adjusted',\n 'odds-ratio': 'logit-adjusted'}\n # normalize compare name\n if compare.lower() == 'or':\n compare = 'odds-ratio'\n if method is None:\n method = method_default[compare]\n\n method = method.lower()\n if method.startswith('agr'):\n method = 'agresti-caffo'\n\n if value is None:\n # TODO: odds ratio does not work if value=1 for score test\n value = 0 if compare == 'diff' else 1\n\n count1, nobs1, count2, nobs2 = map(np.asarray,\n [count1, nobs1, count2, nobs2])\n\n p1 = count1 / nobs1\n p2 = count2 / nobs2\n diff = p1 - p2\n ratio = p1 / p2\n odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)\n res = None\n\n if compare == 'diff':\n if method in ['wald', 'agresti-caffo']:\n addone = 1 if method == 'agresti-caffo' else 0\n count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone\n count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n diff_stat = p1_ - p2_ - value\n var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_\n statistic = diff_stat / np.sqrt(var)\n distr = 'normal'\n\n elif method.startswith('newcomb'):\n msg = 'newcomb not available for hypothesis test'\n raise NotImplementedError(msg)\n\n elif method == 'score':\n # Note score part is the same call for all compare\n res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n value=value, compare=compare,\n alternative=alternative,\n correction=correction,\n return_results=return_results)\n if return_results is False:\n statistic, pvalue = res[:2]\n distr = 'normal'\n # TODO/Note score_test_proportion_2samp returns statistic and\n # not diff_stat\n diff_stat = None\n else:\n raise ValueError('method not recognized')\n\n elif compare == 'ratio':\n if method in ['log', 'log-adjusted']:\n addhalf = 0.5 if method == 'log-adjusted' else 0\n count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf\n count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n ratio_ = p1_ / p2_\n var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_\n diff_stat = np.log(ratio_) - np.log(value)\n statistic = diff_stat / np.sqrt(var)\n distr = 'normal'\n\n elif method == 'score':\n res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n value=value, compare=compare,\n alternative=alternative,\n correction=correction,\n return_results=return_results)\n if return_results is False:\n statistic, pvalue = res[:2]\n distr = 'normal'\n diff_stat = None\n\n else:\n raise ValueError('method not recognized')\n\n elif compare == \"odds-ratio\":\n\n if method in ['logit', 'logit-adjusted', 'logit-smoothed']:\n if method in ['logit-smoothed']:\n adjusted = _shrink_prob(count1, nobs1, count2, nobs2,\n shrink_factor=2, return_corr=False)[0]\n count1_, nobs1_, count2_, nobs2_ = adjusted\n\n else:\n addhalf = 0.5 if method == 'logit-adjusted' else 0\n count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf\n count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf\n p1_ = count1_ / nobs1_\n p2_ = count2_ / nobs2_\n odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)\n var = (1 / count1_ + 1 / (nobs1_ - count1_) +\n 1 / count2_ + 1 / (nobs2_ - count2_))\n\n diff_stat = np.log(odds_ratio_) - np.log(value)\n statistic = diff_stat / np.sqrt(var)\n distr = 'normal'\n\n elif method == 'score':\n res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,\n value=value, compare=compare,\n alternative=alternative,\n correction=correction,\n return_results=return_results)\n if return_results is False:\n statistic, pvalue = res[:2]\n distr = 'normal'\n diff_stat = None\n else:\n raise ValueError('method \"%s\" not recognized' % method)\n\n else:\n raise ValueError('compare \"%s\" not recognized' % compare)\n\n if distr == 'normal' and diff_stat is not None:\n statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),\n alternative=alternative)\n\n if return_results:\n if res is None:\n res = HolderTuple(statistic=statistic,\n pvalue=pvalue,\n compare=compare,\n method=method,\n diff=diff,\n ratio=ratio,\n odds_ratio=odds_ratio,\n variance=var,\n alternative=alternative,\n value=value,\n )\n else:\n # we already have a return result from score test\n # add missing attributes\n res.diff = diff\n res.ratio = ratio\n res.odds_ratio = odds_ratio\n res.value = value\n return res\n else:\n return statistic, pvalue\n\n\ndef tost_proportions_2indep(count1, nobs1, count2, nobs2, low, upp,\n method=None, compare='diff', correction=True):\n \"\"\"\n Equivalence test based on two one-sided `test_proportions_2indep`\n\n This assumes that we have two independent binomial samples.\n\n The Null and alternative hypothesis for equivalence testing are\n\n for compare = 'diff'\n\n - H0: prop1 - prop2 <= low or upp <= prop1 - prop2\n - H1: low < prop1 - prop2 < upp\n\n for compare = 'ratio'\n\n - H0: prop1 / prop2 <= low or upp <= prop1 / prop2\n - H1: low < prop1 / prop2 < upp\n\n\n for compare = 'odds-ratio'\n\n - H0: or <= low or upp <= or\n - H1: low < or < upp\n\n where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))\n\n Parameters\n ----------\n count1, nobs1 :\n count and sample size for first sample\n count2, nobs2 :\n count and sample size for the second sample\n low, upp :\n equivalence margin for diff, risk ratio or odds ratio\n method : string\n method for computing the hypothesis test. If method is None, then a\n default method is used. The default might change as more methods are\n added.\n\n diff:\n - 'wald',\n - 'agresti-caffo'\n - 'score' if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.\n\n ratio:\n - 'log': wald test using log transformation\n - 'log-adjusted': wald test using log transformation,\n adds 0.5 to counts\n - 'score' if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.\n\n odds-ratio:\n - 'logit': wald test using logit transformation\n - 'logit-adjusted': : wald test using logit transformation,\n adds 0.5 to counts\n - 'logit-smoothed': : wald test using logit transformation, biases\n cell counts towards independence by adding two observations in\n total.\n - 'score' if correction is True, then this uses the degrees of freedom\n correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985\n\n compare : string in ['diff', 'ratio' 'odds-ratio']\n If compare is `diff`, then the hypothesis test is for\n diff = p1 - p2.\n If compare is `ratio`, then the hypothesis test is for the\n risk ratio defined by ratio = p1 / p2.\n If compare is `odds-ratio`, then the hypothesis test is for the\n odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).\n correction : bool\n If correction is True (default), then the Miettinen and Nurminen\n small sample correction to the variance nobs / (nobs - 1) is used.\n Applies only if method='score'.\n\n Returns\n -------\n pvalue : float\n p-value is the max of the pvalues of the two one-sided tests\n t1 : test results\n results instance for one-sided hypothesis at the lower margin\n t1 : test results\n results instance for one-sided hypothesis at the upper margin\n\n See Also\n --------\n test_proportions_2indep\n confint_proportions_2indep\n\n Notes\n -----\n Status: experimental, API and defaults might still change.\n\n The TOST equivalence test delegates to `test_proportions_2indep` and has\n the same method and comparison options.\n\n \"\"\"\n\n tt1 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=low,\n method=method, compare=compare,\n alternative='larger',\n correction=correction,\n return_results=True)\n tt2 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=upp,\n method=method, compare=compare,\n alternative='smaller',\n correction=correction,\n return_results=True)\n\n # idx_max = 1 if t1.pvalue < t2.pvalue else 0\n idx_max = np.asarray(tt1.pvalue < tt2.pvalue, int)\n statistic = np.choose(idx_max, [tt1.statistic, tt2.statistic])\n pvalue = np.choose(idx_max, [tt1.pvalue, tt2.pvalue])\n\n res = HolderTuple(statistic=statistic,\n pvalue=pvalue,\n compare=compare,\n method=method,\n results_larger=tt1,\n results_smaller=tt2,\n title=\"Equivalence test for 2 independent proportions\"\n )\n\n return res\n\n\ndef _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0):\n \"\"\"\n Compute standard error under null and alternative for 2 proportions\n\n helper function for power and sample size computation\n\n \"\"\"\n if value != 0:\n msg = 'non-zero diff under null, value, is not yet implemented'\n raise NotImplementedError(msg)\n\n nobs_ratio = ratio\n p1 = p2 + diff\n # The following contains currently redundant variables that will\n # be useful for different options for the null variance\n p_pooled = (p1 + p2 * ratio) / (1 + ratio)\n # probabilities for the variance for the null statistic\n p1_vnull, p2_vnull = p_pooled, p_pooled\n p2_alt = p2\n p1_alt = p2_alt + diff\n\n std_null = _std_diff_prop(p1_vnull, p2_vnull, ratio=nobs_ratio)\n std_alt = _std_diff_prop(p1_alt, p2_alt, ratio=nobs_ratio)\n return p_pooled, std_null, std_alt\n\n\ndef power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05,\n value=0, alternative='two-sided',\n return_results=True):\n \"\"\"\n Power for ztest that two independent proportions are equal\n\n This assumes that the variance is based on the pooled proportion\n under the null and the non-pooled variance under the alternative\n\n Parameters\n ----------\n diff : float\n difference between proportion 1 and 2 under the alternative\n prop2 : float\n proportion for the reference case, prop2, proportions for the\n first case will be computed using p2 and diff\n p1 = p2 + diff\n nobs1 : float or int\n number of observations in sample 1\n ratio : float\n sample size ratio, nobs2 = ratio * nobs1\n alpha : float in interval (0,1)\n Significance level, e.g. 0.05, is the probability of a type I\n error, that is wrong rejections if the Null Hypothesis is true.\n value : float\n currently only `value=0`, i.e. equality testing, is supported\n alternative : string, 'two-sided' (default), 'larger', 'smaller'\n Alternative hypothesis whether the power is calculated for a\n two-sided (default) or one sided test. The one-sided test can be\n either 'larger', 'smaller'.\n return_results : bool\n If true, then a results instance with extra information is returned,\n otherwise only the computed power is returned.\n\n Returns\n -------\n results : results instance or float\n If return_results is True, then a results instance with the\n information in attributes is returned.\n If return_results is False, then only the power is returned.\n\n power : float\n Power of the test, e.g. 0.8, is one minus the probability of a\n type II error. Power is the probability that the test correctly\n rejects the Null Hypothesis if the Alternative Hypothesis is true.\n\n Other attributes in results instance include :\n\n p_pooled\n pooled proportion, used for std_null\n std_null\n standard error of difference under the null hypothesis (without\n sqrt(nobs1))\n std_alt\n standard error of difference under the alternative hypothesis\n (without sqrt(nobs1))\n \"\"\"\n # TODO: avoid possible circular import, check if needed\n from statsmodels.stats.power import normal_power_het\n\n p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio,\n alpha=alpha, value=value)\n\n pow_ = normal_power_het(diff, nobs1, alpha, std_null=std_null,\n std_alternative=std_alt,\n alternative=alternative)\n\n if return_results:\n res = Holder(power=pow_,\n p_pooled=p_pooled,\n std_null=std_null,\n std_alt=std_alt,\n nobs1=nobs1,\n nobs2=ratio * nobs1,\n nobs_ratio=ratio,\n alpha=alpha,\n )\n return res\n else:\n return pow_\n\n\ndef samplesize_proportions_2indep_onetail(diff, prop2, power, ratio=1,\n alpha=0.05, value=0,\n alternative='two-sided'):\n \"\"\"\n Required sample size assuming normal distribution based on one tail\n\n This uses an explicit computation for the sample size that is required\n to achieve a given power corresponding to the appropriate tails of the\n normal distribution. This ignores the far tail in a two-sided test\n which is negligible in the common case when alternative and null are\n far apart.\n\n Parameters\n ----------\n diff : float\n Difference between proportion 1 and 2 under the alternative\n prop2 : float\n proportion for the reference case, prop2, proportions for the\n first case will be computing using p2 and diff\n p1 = p2 + diff\n power : float\n Power for which sample size is computed.\n ratio : float\n Sample size ratio, nobs2 = ratio * nobs1\n alpha : float in interval (0,1)\n Significance level, e.g. 0.05, is the probability of a type I\n error, that is wrong rejections if the Null Hypothesis is true.\n value : float\n Currently only `value=0`, i.e. equality testing, is supported\n alternative : string, 'two-sided' (default), 'larger', 'smaller'\n Alternative hypothesis whether the power is calculated for a\n two-sided (default) or one sided test. In the case of a one-sided\n alternative, it is assumed that the test is in the appropriate tail.\n\n Returns\n -------\n nobs1 : float\n Number of observations in sample 1.\n \"\"\"\n # TODO: avoid possible circular import, check if needed\n from statsmodels.stats.power import normal_sample_size_one_tail\n\n if alternative in ['two-sided', '2s']:\n alpha = alpha / 2\n\n _, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio,\n alpha=alpha, value=value)\n\n nobs = normal_sample_size_one_tail(diff, power, alpha, std_null=std_null,\n std_alternative=std_alt)\n return nobs\n\n\ndef _score_confint_inversion(count1, nobs1, count2, nobs2, compare='diff',\n alpha=0.05, correction=True):\n \"\"\"\n Compute score confidence interval by inverting score test\n\n Parameters\n ----------\n count1, nobs1 :\n Count and sample size for first sample.\n count2, nobs2 :\n Count and sample size for the second sample.\n compare : string in ['diff', 'ratio' 'odds-ratio']\n If compare is `diff`, then the confidence interval is for\n diff = p1 - p2.\n If compare is `ratio`, then the confidence interval is for the\n risk ratio defined by ratio = p1 / p2.\n If compare is `odds-ratio`, then the confidence interval is for the\n odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).\n alpha : float in interval (0,1)\n Significance level, e.g. 0.05, is the probability of a type I\n error, that is wrong rejections if the Null Hypothesis is true.\n correction : bool\n If correction is True (default), then the Miettinen and Nurminen\n small sample correction to the variance nobs / (nobs - 1) is used.\n Applies only if method='score'.\n\n Returns\n -------\n low : float\n Lower confidence bound.\n upp : float\n Upper confidence bound.\n \"\"\"\n\n def func(v):\n r = test_proportions_2indep(count1, nobs1, count2, nobs2,\n value=v, compare=compare, method='score',\n correction=correction,\n alternative=\"two-sided\")\n return r.pvalue - alpha\n\n rt0 = test_proportions_2indep(count1, nobs1, count2, nobs2,\n value=0, compare=compare, method='score',\n correction=correction,\n alternative=\"two-sided\")\n\n # use default method to get starting values\n # this will not work if score confint becomes default\n # maybe use \"wald\" as alias that works for all compare statistics\n use_method = {\"diff\": \"wald\", \"ratio\": \"log\", \"odds-ratio\": \"logit\"}\n rci0 = confint_proportions_2indep(count1, nobs1, count2, nobs2,\n method=use_method[compare],\n compare=compare, alpha=alpha)\n\n # Note diff might be negative\n ub = rci0[1] + np.abs(rci0[1]) * 0.5\n lb = rci0[0] - np.abs(rci0[0]) * 0.25\n if compare == 'diff':\n param = rt0.diff\n # 1 might not be the correct upper bound because\n # rootfinding is for the `diff` and not for a probability.\n ub = min(ub, 0.99999)\n elif compare == 'ratio':\n param = rt0.ratio\n ub *= 2 # add more buffer\n if compare == 'odds-ratio':\n param = rt0.odds_ratio\n\n # root finding for confint bounds\n upp = optimize.brentq(func, param, ub)\n low = optimize.brentq(func, lb, param)\n return low, upp\n\n\ndef _confint_riskratio_koopman(count1, nobs1, count2, nobs2, alpha=0.05,\n correction=True):\n \"\"\"\n Score confidence interval for ratio or proportions, Koopman/Nam\n\n signature not consistent with other functions\n\n When correction is True, then the small sample correction nobs / (nobs - 1)\n by Miettinen/Nurminen is used.\n \"\"\"\n # The names below follow Nam\n x0, x1, n0, n1 = count2, count1, nobs2, nobs1\n x = x0 + x1\n n = n0 + n1\n z = stats.norm.isf(alpha / 2)**2\n if correction:\n # Mietinnen/Nurminen small sample correction\n z *= n / (n - 1)\n # z = stats.chi2.isf(alpha, 1)\n # equ 6 in Nam 1995\n a1 = n0 * (n0 * n * x1 + n1 * (n0 + x1) * z)\n a2 = - n0 * (n0 * n1 * x + 2 * n * x0 * x1 + n1 * (n0 + x0 + 2 * x1) * z)\n a3 = 2 * n0 * n1 * x0 * x + n * x0 * x0 * x1 + n0 * n1 * x * z\n a4 = - n1 * x0 * x0 * x\n\n p_roots_ = np.sort(np.roots([a1, a2, a3, a4]))\n p_roots = p_roots_[:2][::-1]\n\n # equ 5\n ci = (1 - (n1 - x1) * (1 - p_roots) / (x0 + n1 - n * p_roots)) / p_roots\n\n res = Holder()\n res.confint = ci\n res._p_roots = p_roots_ # for unit tests, can be dropped\n return res\n\n\ndef _confint_riskratio_paired_nam(table, alpha=0.05):\n \"\"\"\n Confidence interval for marginal risk ratio for matched pairs\n\n need full table\n\n success fail marginal\n success x11 x10 x1.\n fail x01 x00 x0.\n marginal x.1 x.0 n\n\n The confidence interval is for the ratio p1 / p0 where\n p1 = x1. / n and\n p0 - x.1 / n\n Todo: rename p1 to pa and p2 to pb, so we have a, b for treatment and\n 0, 1 for success/failure\n\n current namings follow Nam 2009\n\n status\n testing:\n compared to example in Nam 2009\n internal polynomial coefficients in calculation correspond at around\n 4 decimals\n confidence interval agrees only at 2 decimals\n\n \"\"\"\n x11, x10, x01, x00 = np.ravel(table)\n n = np.sum(table) # nobs\n p10, p01 = x10 / n, x01 / n\n p1 = (x11 + x10) / n\n p0 = (x11 + x01) / n\n q00 = 1 - x00 / n\n\n z2 = stats.norm.isf(alpha / 2)**2\n # z = stats.chi2.isf(alpha, 1)\n # before equ 3 in Nam 2009\n\n g1 = (n * p0 + z2 / 2) * p0\n g2 = - (2 * n * p1 * p0 + z2 * q00)\n g3 = (n * p1 + z2 / 2) * p1\n\n a0 = g1**2 - (z2 * p0 / 2)**2\n a1 = 2 * g1 * g2\n a2 = g2**2 + 2 * g1 * g3 + z2**2 * (p1 * p0 - 2 * p10 * p01) / 2\n a3 = 2 * g2 * g3\n a4 = g3**2 - (z2 * p1 / 2)**2\n\n p_roots = np.sort(np.roots([a0, a1, a2, a3, a4]))\n # p_roots = np.sort(np.roots([1, a1 / a0, a2 / a0, a3 / a0, a4 / a0]))\n\n ci = [p_roots.min(), p_roots.max()]\n res = Holder()\n res.confint = ci\n res.p = p1, p0\n res._p_roots = p_roots # for unit tests, can be dropped\n return res\n","repo_name":"statsmodels/statsmodels","sub_path":"statsmodels/stats/proportion.py","file_name":"proportion.py","file_ext":"py","file_size_in_byte":85651,"program_lang":"python","lang":"en","doc_type":"code","stars":9039,"dataset":"github-code","pt":"91"} +{"seq_id":"8030548140","text":"import unittest\n\nimport torch\n\nfrom python_util.torch_utils.running_average import running_average\n\n\nclass TestRollingAvg(unittest.TestCase):\n def test_pt_rolling_avg(self):\n out = running_average(10)\n out(torch.ones(100))\n next = out(torch.full([100], 2.0))\n assert all(next == 1.5 for next in next)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"haydenrear/python_util","sub_path":"test/torch_utils_tests/test_rolling_avg.py","file_name":"test_rolling_avg.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9719088046","text":"import random\nimport time\nfrom datetime import datetime\n\nimport vertica_python\nfrom tqdm.contrib.concurrent import process_map\n\ndns = {\n 'host': '127.0.0.1',\n 'port': 5433,\n 'user': 'dbadmin',\n 'password': '',\n 'database': 'docker',\n 'use_prepared_statements': True,\n}\n\nuser_ids = [str(x) for x in range(10000)]\nmovie_ids = [str(x) for x in range(10000)]\n\n\ndef database(dns):\n with vertica_python.connect(**dns) as connect:\n cursor = connect.cursor()\n cursor.execute(\"\"\"\n CREATE TABLE views (\n id IDENTITY,\n user_id VARCHAR(36) NOT NULL,\n movie_id VARCHAR(36) NOT NULL,\n viewed_frame INTEGER NOT NULL,\n event_time DATETIME NOT NULL\n );\n \"\"\")\n\n\ndef generate_line() -> tuple:\n return (\n random.choice(user_ids),\n random.choice(movie_ids),\n random.randint(1, 180),\n datetime.now(),\n )\n\n\ndef insert_line(x):\n connect = vertica_python.connect(**dns)\n cursor = connect.cursor()\n values = [generate_line() for _ in range(1000)]\n start = time.time()\n try:\n cursor.executemany(\n 'INSERT INTO views('\n 'user_id, movie_id, viewed_frame, event_time) VALUES (?,?,?,?)', values)\n except Exception as e:\n raise e\n cursor.close()\n connect.commit()\n connect.close()\n end = time.time()\n print(end - start)\n\n\ndef generate():\n _max = 10000\n process_map(insert_line, range(0, _max), max_workers=4, chunksize=1)\n\n\nif __name__ == '__main__':\n generate()\n","repo_name":"Elizaveta0309/online_sinema","sub_path":"ugc/benchmarks/vertica/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"4656424650","text":"import discord,os\nfrom core.any import Cog_Extension,get_valid_ips,update_proxy_ips\nimport asyncio,random,requests\nimport core.get_gpu_data as get_gpu_data\n\nclass Task(Cog_Extension):\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n async def interval():\n await self.bot.wait_until_ready()\n subs_id_url = os.environ['subs_id_url']\n id_r = requests.get(subs_id_url)\n ids = id_r.json()\n self.channel = self.bot.get_channel(971127322235260978)\n Agent = [\"Mozilla/5.0\",\n \"AppleWebKit/537.36\",\n \"Safari/537.36\",\n \"Gecko/20130326\"]\n url = \"https://tw.evga.com/products/productlist.aspx?type=0\"\n valid_ips = get_valid_ips()\n while not self.bot.is_closed():\n headers = {\"User-Agent\":random.choice(Agent)}\n delay = 10\n if len(valid_ips) == 0:\n update_proxy_ips(3)\n valid_ips = get_valid_ips()\n proxy_ip = random.choice(valid_ips)\n # proxy_ip = \"\"\n # random_get_ip = True\n # while random_get_ip:\n # if len(valid_ips) == 0:\n # update_proxy_ips(3)\n # valid_ips = get_valid_ips()\n # try:\n # proxy_ip = random.choice(valid_ips)\n # requests.get('https://ip.seeip.org/jsonip?',\n # proxies={'http':proxy_ip,'https':proxy_ip},\n # timeout=10)\n # random_get_ip = False\n # except:\n # valid_ips.remove(proxy_ip)\n try:\n print(\"proxy ip list: \",valid_ips)\n print(\"What proxy ip I use: \",proxy_ip)\n #respose = requests.get(url=url,headers=headers)\n respose = requests.get(url=url,headers=headers,proxies={'http':proxy_ip,'https':proxy_ip},timeout=30)\n print(respose)\n r,r_gpus,gpus,time_str = get_gpu_data.check(respose)\n delay = 110 + random.randint(-10,10)\n if r == \"on\":\n embed = discord.Embed(title = f\"上架了!!!\", color = discord.Color.green())\n for g in r_gpus:\n url = g[\"url\"]\n g_url = f\"{g['name']}\\n{url}\"\n embed.add_field(name=g_url,value=g[\"price\"],inline=False)\n embed.set_footer(text=time_str)\n await self.channel.send(embed=embed)\n for id in ids:\n user = await self.bot.fetch_user(id)\n await user.send(embed=embed)\n elif r == \"down\":\n embed = discord.Embed(title = f\"下架了QQ\", color = discord.Color.red())\n for g in r_gpus:\n url = g[\"url\"]\n g_url = f\"{g['name']}\\n{url}\"\n embed.add_field(name=g_url,value=g[\"price\"],inline=False)\n embed.set_footer(text=time_str)\n await self.channel.send(embed=embed)\n for id in ids:\n user = await self.bot.fetch_user(id)\n await user.send(embed=embed)\n elif not r:\n valid_ips.remove(proxy_ip)\n delay = 5\n except:\n delay = 5\n valid_ips.remove(proxy_ip)\n print(\"task error\")\n await asyncio.sleep(delay)\n self.bg_task = self.bot.loop.create_task(interval())\ndef setup(bot):\n bot.add_cog(Task(bot))","repo_name":"C107152349/GPU_robot","sub_path":"cogs/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13756293922","text":"import math\nimport os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.wsgi import get_wsgi_application\nfrom django.http import HttpResponse\nfrom django.urls import path\nfrom django.utils.crypto import get_random_string\nfrom django.views.decorators.http import require_GET\n\n# Configure Django\n\nsettings.configure(\n # Use Django's debug mode when the environment asks for it:\n DEBUG=(os.environ.get(\"DEBUG\", \"\") == \"1\"),\n # Tell Django to read URL's from this module:\n ROOT_URLCONF=__name__,\n # We aren't using any security features but Django requires this setting:\n SECRET_KEY=get_random_string(50),\n # Add some useful defaults:\n MIDDLEWARE=[\"django.middleware.common.CommonMiddleware\"],\n # Django REST Framework settings:\n REST_FRAMEWORK={\n # Disable default authentication\n \"DEFAULT_AUTHENTICATION_CLASSES\": [],\n \"UNAUTHENTICATED_USER\": None,\n # Disable default browsable API, since we haven't set TEMPLATES\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n },\n)\n\n# Django REST Framework requires Django settings to be configured before\n# importing it\n\nfrom rest_framework import serializers # noqa: E402\nfrom rest_framework.decorators import api_view # noqa: E402\nfrom rest_framework.response import Response # noqa: E402\n\n# Our view functions\n\n\n@require_GET\ndef index_view(request):\n return HttpResponse(\n \"

Welcome to Example Corp

\"\n + '

See /api/ for the API.

'\n )\n\n\n@api_view()\ndef api_index_view(request):\n return Response(\n data={\n \"endpoints\": [\n \"/api/circle-area/\",\n ],\n },\n )\n\n\nclass CircleInputSerializer(serializers.Serializer):\n radius = serializers.FloatField(\n min_value=1e-9,\n max_value=1e9,\n )\n\n\n@api_view()\ndef circle_area_view(request):\n serializer = CircleInputSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n radius = serializer.validated_data[\"radius\"]\n\n area = calculate_circle_area(radius)\n\n return Response(data={\"area\": area})\n\n\ndef calculate_circle_area(radius):\n return math.pi * (radius ** 2)\n\n\n# Our URL configuration\n\n\nurlpatterns = [\n path(\"\", index_view),\n path(\"api/\", api_index_view),\n path(\"api/circle-area/\", circle_area_view),\n]\n\n# Create a WSGI application so a web server could run this for us:\napp = get_wsgi_application()\n\n# Allow running Django's commands through this file:\nif __name__ == \"__main__\":\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n","repo_name":"adamchainz/workshop-rest-api-django","sub_path":"ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"26554493555","text":"import os.path as osp\n\nfrom ..build import DATASET_REGISTRY\nfrom .digits_dg import DigitsDG\nfrom ..base_dataset import DatasetBase\n\n\n@DATASET_REGISTRY.register()\nclass OfficeHomeDG(DatasetBase):\n \"\"\"Office-Home.\n\n Statistics:\n - Around 15,500 images.\n - 65 classes related to office and home objects.\n - 4 domains: Art, Clipart, Product, Real World.\n - URL: http://hemanthdv.org/OfficeHome-Dataset/.\n\n Reference:\n - Venkateswara et al. Deep Hashing Network for Unsupervised\n Domain Adaptation. CVPR 2017.\n \"\"\"\n\n dataset_dir = \"office_home_dg\"\n domains = [\"art\", \"clipart\", \"product\", \"real_world\"]\n data_url = \"https://drive.google.com/uc?id=1gkbf_KaxoBws-GWT3XIPZ7BnkqbAxIFa\"\n\n def __init__(self, cfg):\n root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))\n self.dataset_dir = osp.join(root, self.dataset_dir)\n\n if not osp.exists(self.dataset_dir):\n dst = osp.join(root, \"office_home_dg.zip\")\n self.download_data(self.data_url, dst, from_gdrive=True)\n\n self.check_input_domains(\n cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS\n )\n\n train = DigitsDG.read_data(\n self.dataset_dir, cfg.DATASET.SOURCE_DOMAINS, \"train\"\n )\n val = DigitsDG.read_data(\n self.dataset_dir, cfg.DATASET.SOURCE_DOMAINS, \"val\"\n )\n test = DigitsDG.read_data(\n self.dataset_dir, cfg.DATASET.TARGET_DOMAINS, \"all\"\n )\n\n super().__init__(train_x=train, val=val, test=test)\n","repo_name":"KaiyangZhou/Dassl.pytorch","sub_path":"dassl/data/datasets/dg/office_home_dg.py","file_name":"office_home_dg.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":979,"dataset":"github-code","pt":"91"} +{"seq_id":"30677233126","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 7 09:38:29 2023\r\n\r\n@author: thepe\r\n\"\"\"\r\n\r\nfrom dash import Dash, html, dcc, Input, Output, callback\r\n\r\nimport dash_bootstrap_components as dbc\r\n\r\nimport pandas as pd\r\nimport plotly.express as px\r\n\r\n\r\nurl = 'https://raw.githubusercontent.com/chriszapp/datasets/main/books.csv'\r\ndf = pd.read_csv(url, nrows = 3000)\r\n\r\ndf2 = df.groupby(by = ['authors', 'title']).agg({'language_code' : 'first', ' num_pages' : 'mean'}).reset_index()\r\nfig = px.scatter(df2, x=\"authors\", y=\" num_pages\")\r\nfig.update_layout(\r\n title=dict(text=\"Nombre moyen de pages par auteur\", font=dict(size=30), automargin=True, yref='paper'),\r\n font_color='#119DFF',\r\n title_font_color='#F71016',\r\n xaxis_title=\"Auteur\",\r\n yaxis_title=\"Nombre de pages\")\r\n\r\n\r\n\r\n\r\napp = Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\r\nserver = app.server\r\n\r\napp.layout = dbc.Container([\r\n html.H4(children='Books'),\r\n dcc.Graph(\r\n figure=fig\r\n ,id ='graph'),\r\n \r\n dbc.Row([\r\n dbc.Col(\r\n \r\n dcc.Markdown(''' \r\n \r\n \r\n \r\n Choix de l'auteur :\r\n\r\n '''),\r\n width=6),\r\n dbc.Col(\r\n dcc.Markdown(''' \r\n \r\n \r\n \r\n Choix de la langue :\r\n\r\n '''),\r\n width = 6)\r\n ]),\r\n \r\n dbc.Row([\r\n dbc.Col(\r\n # choix auteur\r\n \r\n dcc.Dropdown(options = [{'label':nom, 'value': nom} for nom in list(df['authors'].unique())],\r\n value =list(df['authors'].unique())[0],\r\n id =\"drop-authors\" \r\n ),\r\n width = 6),\r\n dbc.Col(\r\n \r\n # choix langue\r\n dcc.RadioItems(options = [{'label':nom, 'value': nom} for nom in list(df['language_code'].unique())],\r\n value =list(df['language_code'].unique())[0], \r\n id = 'radio-lang'),\r\n width = 6)\r\n ])\r\n ])\r\n \r\n \r\n@callback(\r\n Output('graph', 'figure'),\r\n [Input('drop-authors', 'value'),\r\n Input('radio-lang', 'value')]\r\n)\r\n\r\ndef update_graph(auteur, langue):\r\n \r\n df3 = df2[(df2['authors'] == auteur) & (df2['language_code'] == langue)]\r\n fig = px.scatter(df3, x=\"title\", y=\" num_pages\")\r\n return fig\r\n\r\n\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"theperk08/dash_layout","sub_path":"Dash_Layouts.py","file_name":"Dash_Layouts.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24168113689","text":"from django.utils.crypto import get_random_string\n\nfrom rest_framework import serializers\n\nfrom .models import Projectile\n\nclass CreateProjectileSerializer(serializers.ModelSerializer):\n\n identity = serializers.CharField(max_length=10, read_only=True)\n\n def create(self, validated_data):\n id = get_random_string(10)\n count = 0\n try:\n while count <= 20:\n Projectile.objects.get(identity=id)\n id = get_random_string(10)\n count += 1\n except:\n acceleration = validated_data.get(\"acceleration\")\n hang_time = validated_data.get(\"hang_time\")\n bounce_height = validated_data.get(\"bounce_height\")\n projectile = Projectile(\n identity=id,\n acceleration=acceleration,\n hang_time=hang_time,\n bounce_height=bounce_height,\n score=acceleration/2 + hang_time/2 + bounce_height,\n )\n projectile.save()\n return projectile\n\n class Meta:\n model = Projectile\n fields = (\"identity\", \"acceleration\", \"hang_time\", \"bounce_height\")\n","repo_name":"Deerjason/CSE-453_Scoreboard","sub_path":"backend/projectile/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"2714930691","text":"import os\nimport logging\nimport glob\n\nimport yaml\nimport joblib\nimport numpy as np\nimport pandas as pd\n\nfrom mathtools import utils # , pose\nfrom kinemparse import assembly as lib_assembly\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef relativePose(poses_seq, lower_tri_only=True, magnitude_only=False):\n def relPose(lhs, rhs, magnitude_only=False):\n if np.isnan(lhs).any() or np.isnan(rhs).any():\n either_row_has_nan = np.isnan(lhs).any(axis=1) + np.isnan(rhs).any(axis=1)\n non_nan_rel_pose = relPose(\n lhs[~either_row_has_nan, :], rhs[~either_row_has_nan, :],\n magnitude_only=magnitude_only\n )\n rel_pose = np.full(lhs.shape[0:1] + non_nan_rel_pose.shape[1:], np.nan)\n rel_pose[~either_row_has_nan, ...] = non_nan_rel_pose\n return rel_pose\n\n if not magnitude_only:\n raise NotImplementedError()\n\n position_lhs = lhs[:, :3]\n position_rhs = rhs[:, :3]\n rel_pose = np.linalg.norm(position_lhs - position_rhs, axis=1, keepdims=True)\n return rel_pose\n\n num_poses = poses_seq.shape[-1]\n rel_poses = np.stack(\n tuple(\n np.stack(\n tuple(\n # pose.relPose(\n relPose(\n poses_seq[..., i], poses_seq[..., j],\n magnitude_only=magnitude_only\n )\n for j in range(num_poses)\n ),\n axis=1\n )\n for i in range(num_poses)\n ), axis=1\n )\n\n if lower_tri_only:\n rows, cols = np.tril_indices(rel_poses.shape[1], k=-1)\n rel_poses = rel_poses[:, rows, cols]\n\n return rel_poses\n\n\ndef actionLabels(\n labels_arr, num_samples, action_name_to_index, part_name_to_index,\n lower_tri_only=True):\n num_parts = len(part_name_to_index)\n label_seq = np.zeros((num_samples, num_parts, num_parts), dtype=int)\n\n for i, (start_idx, end_idx, action, part1, part2) in labels_arr.iterrows():\n action_idx = action_name_to_index[action]\n part1_idx = part_name_to_index[part1]\n part2_idx = part_name_to_index[part2]\n\n label_seq[start_idx:end_idx, part1_idx, part2_idx] = action_idx + 1\n label_seq[start_idx:end_idx, part2_idx, part1_idx] = action_idx + 1\n\n if lower_tri_only:\n rows, cols = np.tril_indices(label_seq.shape[1], k=-1)\n label_seq = label_seq[:, rows, cols]\n\n return label_seq\n\n\ndef iterateSymmetries(action, part1, part2, cur_assembly, symmetries=[]):\n def is_possible(action, part1, part2, cur_assembly):\n if action == 'connect':\n for joint in cur_assembly.joints.values():\n for part in (part1, part2):\n if part == joint.parent_name or part == joint.child_name:\n return False\n return True\n if action == 'disconnect':\n for joint in cur_assembly.joints.values():\n for part in (part1, part2):\n if part == joint.parent_name or part == joint.child_name:\n return True\n return False\n\n part1_sym = symmetries.get(part1, [part1])\n part2_sym = symmetries.get(part2, [part2])\n\n for p1 in part1_sym:\n for p2 in part2_sym:\n if is_possible(action, p1, p2, cur_assembly):\n if action == 'connect':\n assembly = cur_assembly.add_joint(p1, p2, in_place=False, directed=False)\n elif action == 'disconnect':\n assembly = cur_assembly.remove_joint(p1, p2, in_place=False, directed=False)\n yield assembly\n\n\ndef _assemblyLabels(labels_arr, num_samples, assembly_vocab=[], symmetries=[]):\n def getIndex(assembly):\n for i, a in enumerate(assembly_vocab):\n if a == assembly:\n return i\n else:\n assembly_vocab.append(assembly)\n return len(assembly_vocab) - 1\n\n label_seq = np.zeros(num_samples, dtype=int)\n\n assembly = lib_assembly.Assembly()\n getIndex(assembly)\n paths = [[assembly]]\n for i, (_, _, action, part1, part2) in labels_arr.iterrows():\n new_paths = []\n for i, path in enumerate(paths):\n states = list(\n iterateSymmetries(action, part1, part2, path[-1], symmetries=symmetries)\n )\n for assembly in states:\n getIndex(assembly)\n new_path = path + [assembly]\n new_paths.append(new_path)\n paths = new_paths\n\n return label_seq\n\n\ndef assemblyLabels(labels_arr, num_samples, assembly_vocab=[], symmetries=[]):\n def getIndex(assembly):\n for i, a in enumerate(assembly_vocab):\n if a == assembly:\n return i\n else:\n assembly_vocab.append(assembly)\n return len(assembly_vocab) - 1\n\n label_seq = np.zeros(num_samples, dtype=int)\n\n assembly = lib_assembly.Assembly()\n assembly_index = getIndex(assembly)\n prev_end_idx = 0\n prev_start_idx = -1\n for i, (start_idx, end_idx, action, part1, part2) in labels_arr.iterrows():\n if start_idx != prev_start_idx or end_idx != prev_end_idx:\n assembly_index = getIndex(assembly)\n label_seq[prev_end_idx:end_idx] = assembly_index\n if action == 'connect':\n assembly = assembly.add_joint(part1, part2, in_place=False, directed=False)\n elif action == 'disconnect':\n assembly = assembly.remove_joint(part1, part2, in_place=False, directed=False)\n prev_start_idx = start_idx\n prev_end_idx = end_idx\n assembly_index = getIndex(assembly)\n label_seq[end_idx:] = assembly_index\n\n return label_seq\n\n\ndef makePairs(seq, lower_tri_only=True):\n pairs = tuple(tuple((token1, token2) for token2 in seq) for token1 in seq)\n\n if lower_tri_only:\n rows, cols = np.tril_indices(len(seq), k=-1)\n pairs = tuple(pairs[r][c] for r, c in zip(rows, cols))\n\n return pairs\n\n\ndef possibleConnections(part_pair_names):\n def holeInfo(part_name):\n part_name, hole_idx = part_name.split('_hole_')\n return part_name, hole_idx\n\n def connectionPossible(name_A, name_B):\n name_A, hole_A = holeInfo(name_A)\n name_B, hole_B = holeInfo(name_B)\n if name_A == name_B:\n return False\n return True\n\n return np.array([connectionPossible(a, b) for a, b in part_pair_names])\n\n\ndef main(\n out_dir=None, data_dir=None, part_symmetries=None,\n plot_output=None, results_file=None, sweep_param_name=None, start_from=None):\n\n if part_symmetries is None:\n part_symmetries = [\n ['frontbeam_hole_1', 'frontbeam_hole_2', 'backbeam_hole_1', 'backbeam_hole_2'],\n ['cushion_hole_1', 'cushion_hole_2']\n ]\n part_symmetries = {\n symms[i]: symms\n for symms in part_symmetries\n for i in range(len(symms))\n }\n\n data_dir = os.path.expanduser(data_dir)\n\n out_dir = os.path.expanduser(out_dir)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n out_data_dir = os.path.join(out_dir, 'data')\n if not os.path.exists(out_data_dir):\n os.makedirs(out_data_dir)\n\n fig_dir = os.path.join(out_dir, 'figures')\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir)\n\n debug_dir = os.path.join(out_dir, 'debug')\n if not os.path.exists(debug_dir):\n os.makedirs(debug_dir)\n\n def saveVariable(var, var_name):\n joblib.dump(var, os.path.join(out_data_dir, f'{var_name}.pkl'))\n\n logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))\n\n logger.info(f\"Reading from: {data_dir}\")\n logger.info(f\"Writing to: {out_dir}\")\n\n if results_file is None:\n results_file = os.path.join(out_dir, 'results.csv')\n # write_mode = 'w'\n else:\n results_file = os.path.expanduser(results_file)\n # write_mode = 'a'\n\n fig_dir = os.path.join(out_dir, 'figures')\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir)\n\n out_data_dir = os.path.join(out_dir, 'data')\n if not os.path.exists(out_data_dir):\n os.makedirs(out_data_dir)\n\n labels_dir = os.path.join(data_dir, 'labels')\n\n with open(os.path.join(labels_dir, 'action_and_part_names.yaml'), 'rt') as f:\n names = yaml.safe_load(f)\n action_names = names['action_names']\n action_name_to_index = {name: i for i, name in enumerate(action_names)}\n part_names = names['part_names']\n part_name_to_index = {name: i for i, name in enumerate(part_names)}\n\n video_ids = []\n all_label_arrs = []\n for label_fn in glob.glob(os.path.join(labels_dir, \"*.csv\")):\n video_id = utils.stripExtension(label_fn)\n labels_arr = pd.read_csv(label_fn)\n all_label_arrs.append(labels_arr)\n video_ids.append(video_id)\n\n pose_dir = os.path.join(data_dir, 'poses')\n pose_ids = tuple(\n video_id\n for video_id in video_ids\n if os.path.exists(os.path.join(pose_dir, video_id))\n )\n keep_ids = tuple(v_id in pose_ids for v_id in video_ids)\n logger.info(\n f\"Ignoring {len(keep_ids) - sum(keep_ids)} video(s) with missing data: \"\n f\"{', '.join([v_id for v_id, keep in zip(video_ids, keep_ids) if not keep])}\"\n )\n\n def filterSeq(seq):\n return tuple(x for x, keep_id in zip(seq, keep_ids) if keep_id)\n all_label_arrs = filterSeq(all_label_arrs)\n video_ids = filterSeq(video_ids)\n\n assembly_vocab = []\n label_seqs = []\n for i, video_id in enumerate(video_ids):\n if start_from is not None and i < start_from:\n continue\n\n logger.info(\"PROCESSING VIDEO {0}: {1}\".format(i, video_id))\n\n labels_arr = all_label_arrs[i]\n\n video_dir = os.path.join(pose_dir, video_id)\n\n def loadFile(part_name):\n path = os.path.join(video_dir, f'{part_name}.csv')\n arr = pd.read_csv(path)\n return arr\n\n part_data = tuple(loadFile(part_name) for part_name in part_names)\n\n poses_seq = np.stack(tuple(arr.values for arr in part_data), axis=-1)\n\n feature_seq = relativePose(poses_seq, lower_tri_only=True, magnitude_only=True)\n label_seq = actionLabels(\n labels_arr, feature_seq.shape[0],\n action_name_to_index, part_name_to_index\n )\n\n part_pair_names = makePairs(part_names, lower_tri_only=True)\n is_possible = possibleConnections(part_pair_names)\n feature_seq = feature_seq[:, is_possible, :]\n label_seq = label_seq[:, is_possible]\n part_pair_names = tuple(n for (b, n) in zip(is_possible, part_pair_names) if b)\n\n utils.plot_multi(\n np.moveaxis(feature_seq, (0, 1, 2), (-1, 0, 1)), label_seq.T,\n axis_names=part_pair_names, label_name='action',\n feature_names=('translation_dist', 'rotation_dist'),\n tick_names=[''] + action_names,\n fn=os.path.join(fig_dir, f\"{video_id}_actions.png\")\n )\n\n label_seq = assemblyLabels(\n labels_arr, feature_seq.shape[0],\n assembly_vocab=assembly_vocab, symmetries=part_symmetries\n )\n # expanded_label_seq = _assemblyLabels(\n # labels_arr, feature_seq.shape[0],\n # assembly_vocab=assembly_vocab, symmetries=part_symmetries\n # )\n utils.plot_array(\n feature_seq.sum(axis=-1).T, (label_seq,), ('assembly',),\n fn=os.path.join(fig_dir, f\"{video_id}_assemblies.png\")\n )\n label_seqs.append(label_seq)\n\n label_segments, __ = utils.computeSegments(label_seq)\n assembly_segments = [assembly_vocab[i] for i in label_segments]\n lib_assembly.writeAssemblies(\n os.path.join(debug_dir, f'trial={video_id}_assembly-seq.txt'),\n assembly_segments\n )\n\n video_id = video_id.replace('_', '-')\n saveVariable(feature_seq, f'trial={video_id}_feature-seq')\n saveVariable(label_seq, f'trial={video_id}_label-seq')\n # saveVariable(expanded_label_seq, f'trial={video_id}_expanded-label-seq')\n\n if False:\n from seqtools import utils as su\n transition_probs, start_probs, end_probs = su.smoothCounts(\n *su.countSeqs(label_seqs)\n )\n # import pdb; pdb.set_trace()\n\n lib_assembly.writeAssemblies(\n os.path.join(debug_dir, 'assembly-vocab.txt'),\n assembly_vocab\n )\n\n saveVariable(assembly_vocab, 'assembly-vocab')\n with open(os.path.join(out_data_dir, 'action-vocab.yaml'), 'wt') as f:\n yaml.dump(action_names, f)\n with open(os.path.join(out_data_dir, 'part-vocab.yaml'), 'wt') as f:\n yaml.dump(part_names, f)\n\n\nif __name__ == \"__main__\":\n # Parse command-line args and config file\n cl_args = utils.parse_args(main)\n config, config_fn = utils.parse_config(cl_args, script_name=__file__)\n\n # Create output directory, instantiate log file and write config options\n out_dir = os.path.expanduser(config['out_dir'])\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, config_fn), 'w') as outfile:\n yaml.dump(config, outfile)\n utils.copyFile(__file__, out_dir)\n\n main(**config)\n","repo_name":"jd-jones/kinemparse","sub_path":"egs/ikea_jhu/scripts/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40525726708","text":"# 精通Counter方法的基础上再熟练应用reduce和map方法\nfrom collections import Counter\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n return reduce(Counter.__and__, map(Counter, A)).elements()\n\n# 灵活使用Counter方法\nfrom collections import Counter\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n dic = Counter(A[0])\n for w in A:\n dic &= Counter(w)\n return dic.elements()\n\n# 最low的代码\nfrom collections import Counter\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n if not A: return []\n dic = Counter(A[0])\n for i, w in enumerate(A):\n if 0 == i:\n continue\n temp = Counter(w)\n inter_key = set(tuple(dic.keys())) & set(tuple(temp.keys()))\n new_dic = {}\n for k in inter_key:\n new_dic[k] = min(dic[k], temp[k])\n dic = new_dic\n ret = []\n for i in dic:\n for j in range(dic[i]):\n ret.append(i)\n return ret","repo_name":"Bieneath/LeetCode_training","sub_path":"Week_10/1002.py","file_name":"1002.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38349640444","text":"import pandas as pd\nimport numpy as np\n# Create features\n\n# pp.preprocess_candles('2h')\ndata = pd.read_csv(\"2h_BTC_candle_shape.csv\").values\nN = data.shape[0]\nF = data.shape[1]\n\n\ndef labels(c1, c2):\n \"\"\"\n Creates a label (1 or 0) from 2 candles\n :param c1: candle 1\n :param c2: candle 2\n :return: 1 or 0\n \"\"\"\n if (c2[1] - c1[0]) >= 0:\n return 1 # green bar\n else:\n return 0\n\n\nX = []\nY = []\n\nfor i in range(N-6):\n x = [data[i, :], data[i+1, :], data[i+2, :], data[i+3, :]]\n X.append(x)\n y = labels(data[i+4], data[i+5])\n Y.append(y)\n\nY = np.array(Y)\nprint(len(X))\n# Deep-Q Learning NN\n\n# TODO - Q-Learning NN for candle shapes -> Library for this?\n\n\ndef sigmoid(x, deriv=False):\n # if deriv:\n # return x*(1-x)\n # return 1/(1+np.exp(-x))\n if deriv:\n a = x*(1-x)\n return np.array(a)\n else:\n X = []\n for j in range(len(x)):\n X.append(1/(1+np.exp(-x[j])))\n u = x[j]\n print(u)\n return np.array(X)\n\n\n# X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]]) 4x3\n# y = np.array([[0,0,1,1]]).T 4x1\n\ntraining_iterations = 100\n\nnp.random.seed(1)\n\n# Synapses\nsyn0 = np.random.random((4, 1)) - 1 # 3x1\n# syn1 = 2*np.random.random((9, 12)) - 1\n# syn2 = 2*np.random.random((12, 12)) - 1\n# syn3 = 2*np.random.random((12, 9)) - 1\n# syn4 = 2*np.random.random((9, 4)) - 1\n# syn5 = 2*np.random.random((4, 1)) - 1\n\nl6_error = 0\nl6 = 0\nfor i in range(training_iterations):\n l0 = X\n l6 = sigmoid(np.dot(l0, syn0))\n # l2 = sigmoid(np.dot(l1, syn1))\n # l3 = sigmoid(np.dot(l2, syn2))\n # l4 = sigmoid(np.dot(l3, syn3))\n # l5 = sigmoid(np.dot(l4, syn4))\n # l6 = sigmoid(np.dot(l5, syn5))\n\n l6_error = Y-l6 # Should get smaller and smaller as we train\n print(Y.shape)\n print(l6.shape)\n print(X)\n\n l6_delta = l6_error * sigmoid(l6, True)\n\n # Updates weights in our network\n # syn0 += np.dot(l0,l6_delta)\n\ntotalError = np.sum(l6_error)\n\nprint(len(Y))\nprint(Y[0])\nprint(len(l6))\nprint(np.column_stack(Y, l6))\n\n\n\n\n","repo_name":"MarcGoulding/binance-data-processing","sub_path":"Q-Learning candle-shape.py","file_name":"Q-Learning candle-shape.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"16132190776","text":"from block_ai.lib.myblokus import point\nfrom block_ai.lib.myblokus.point import X, Y\n\nimport unittest\n\nfrom hypothesis import given\nimport hypothesis.strategies as st\n\nclass PointTests(unittest.TestCase):\n\n def test_get_adjacent(self):\n # Given \n p = (1, 1)\n\n # When\n adjacent = point.get_adjacent(p)\n\n # Then\n expected = [(0, 1),\n (1, 0),\n (1, 2),\n (2, 1)]\n self.assertEqual(adjacent, expected)\n\n def test_get_corners(self):\n # Given\n p = (1, 1)\n\n # When\n corners = point.get_corners(p)\n\n # Then\n expected = [(0, 0),\n (0, 2),\n (2, 0),\n (2, 2)]\n self.assertTrue(corners, expected)\n\n def test_str(self):\n # Given\n p = (1, 1)\n\n # When\n p_str = str(p)\n\n # Then\n self.assertEqual(p_str, \"(1, 1)\")\n\n def test_repr(self):\n\n # Given\n p = (1, 1)\n\n # When\n p_repr = repr(p)\n\n # Then\n self.assertEqual(p_repr, \"(1, 1)\")\n\n @given(x=st.integers(), y=st.integers())\n def test_equal(self, x, y):\n # Given\n p1 = (x, y)\n p2 = (x, y)\n\n # Then\n self.assertTrue(p1 == p2)\n\n\n @given(x=st.integers(), y=st.integers())\n def test_not_equal(self, x, y):\n # Given\n p1 = (x, y)\n p2 = (x + 1, y)\n\n # Then\n self.assertNotEqual(p1, p2)\n self.assertTrue(p1 != p2)\n \n @given(x=st.integers(), y=st.integers())\n def test_hash(self, x, y):\n # Given\n p1 = (x, y)\n p2 = (x, y)\n\n # Then\n self.assertEqual(hash(p1), hash(p2))\n\n @given(x=st.integers(), y=st.integers())\n def test_less_than(self, x, y):\n # Given\n p1 = (x - 1, y)\n p2 = (x, y - 1)\n p3 = (x, y)\n p4 = (x, y + 1)\n p5 = (x + 1, y + 1)\n\n # Then\n self.assertFalse(p1 < p1)\n self.assertTrue(p1 < p2)\n self.assertTrue(p2 < p3)\n self.assertTrue(p3 < p4)\n self.assertTrue(p4 < p5)\n\n \n \n @given(x=st.integers(), y=st.integers())\n def test_greater_than(self, x, y):\n # Given\n p1 = (x - 1, y)\n p2 = (x, y - 1)\n p3 = (x, y)\n p4 = (x, y + 1)\n p5 = (x + 1, y + 1)\n\n # Then\n self.assertFalse(p1 > p2)\n self.assertFalse(p2 > p3)\n self.assertFalse(p3 > p4)\n self.assertFalse(p4 > p5)\n\n @given(x=st.integers(), y=st.integers(), a=st.integers(), b=st.integers())\n def test_add(self, x, y, a, b):\n # Given\n p1 = (x, y)\n p2 = (a, b)\n\n # When\n p_sum = point.add(p1, p2)\n\n # Then\n expected = (x + a, y + b)\n self.assertEqual(p_sum, expected)\n\n @given(x=st.integers(), y=st.integers(), a=st.integers(), b=st.integers())\n def test_sub(self, x, y, a, b):\n # Given\n p1 = (x, y)\n p2 = (a, b)\n \n # When\n p_diff = point.subtract(p1, p2)\n\n # Then\n expected = (x - a, y - b)\n self.assertEqual(p_diff, expected)\n \n @given(x=st.integers(), y=st.integers())\n def test_from_string(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.from_string(str(p1))\n \n # Then\n self.assertEqual(p1, p2)\n\n @given(x=st.integers(), y=st.integers())\n def test_ident_transform(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.ident(p1)\n\n # Then\n self.assertEqual(p1, p2)\n\n\n @given(x=st.integers(), y=st.integers())\n def test_flip_transform(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.flip(p1)\n p3 = point.flip(p2)\n\n # Then\n self.assertEqual(p1, p3)\n self.assertEqual(p1[X], p2[X])\n self.assertEqual(p1[Y], -p2[Y])\n\n \n @given(x=st.integers(), y=st.integers())\n def test_rot90_transform(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.rot90(p1)\n \n p = p1\n for i in range(4):\n p = point.rot90(p)\n\n # Then\n self.assertEqual(p1[X], -p2[Y])\n self.assertEqual(p1[Y], p2[X])\n self.assertEqual(p1, p)\n\n\n @given(x=st.integers(), y=st.integers())\n def test_rot180_transform(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.rot180(p1)\n \n p = p1\n for i in range(2):\n p = point.rot180(p)\n\n # Then\n self.assertEqual(p1[X], -p2[X])\n self.assertEqual(p1[Y], -p2[Y])\n self.assertEqual(p1, p)\n\n @given(x=st.integers(), y=st.integers())\n def test_rot270_transform(self, x, y):\n # Given\n p1 = (x, y)\n\n # When\n p2 = point.rot270(p1)\n \n p = p1\n for i in range(4):\n p = point.rot90(p)\n\n # Then\n self.assertEqual(p1[X], p2[Y])\n self.assertEqual(p1[Y], -p2[X])\n self.assertEqual(p1, p)\n\n","repo_name":"BenjaminNMitchell/blockAI","sub_path":"test/lib/myblokus/test_point.py","file_name":"test_point.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"75273150062","text":"import sys\nimport pyodbc\n\n# Check the pyodbc version\ntry:\n major, minor, patch = map(int, pyodbc.version.split('.'))\n if major != 4 or (minor, patch) < (0, 27):\n raise ImportError('pyodbc version >= 4.0.27 required. Try running `pip3 install --user --force pyodbc`.')\nexcept ValueError: # The version number is not a number...\n raise ImportError('pyodbc version >= 4.0.27 required. Try running `pip3 install --user --force pyodbc`.')\n\n\nclass ClientError(Exception):\n pass\n\n\ndef getScriptLanguagesFromArgs():\n for i, arg in enumerate(sys.argv):\n if arg == '--script-languages':\n if len(sys.argv) == i + 1:\n raise ClientError('Value for --script-languages missing')\n return sys.argv[i + 1]\n\n\nclass ODBCClient(object):\n def __init__(self, dsn, user=\"sys\", password=\"exasol\"):\n self.cursor = None\n self.params = {'dsn': dsn, 'uid': user, 'pwd': password}\n\n def connect(self, **kwargs):\n params = self.params.copy()\n params.update(kwargs)\n self.conn = pyodbc.connect(**params, ansi=True)\n self.conn.setencoding(encoding='utf-8')\n self.cursor = self.conn.cursor()\n self._setScriptLanguagesFromArgs()\n\n def _setScriptLanguagesFromArgs(self):\n langs = getScriptLanguagesFromArgs()\n if langs is not None:\n self.query(\"ALTER SESSION SET SCRIPT_LANGUAGES='%s'\" % langs)\n\n def query(self, qtext, *args):\n if self.cursor is None:\n raise ClientError('query() requires connect() first')\n q = self.cursor.execute(qtext, *args)\n try:\n return q.fetchall()\n except pyodbc.ProgrammingError as e:\n if 'No results. Previous SQL was not a query.' in str(e):\n return None\n else:\n raise\n\n def executeStatement(self, qtext, *args):\n if self.cursor is None:\n raise ClientError('executeStatement() requires connect() first')\n self.cursor.execute(qtext, *args)\n return self.cursor.rowcount\n\n def columns(self, table=None, catalog=None, schema=None, column=None):\n args = {}\n if table:\n args['table'] = table\n if catalog:\n args['catalog'] = catalog\n if schema:\n args['schema'] = schema\n if column:\n args['column'] = column\n return self.cursor.columns(**args).fetchall()\n\n def rowcount(self):\n return self.cursor.rowcount\n\n def cursorDescription(self):\n return self.cursor.description\n\n def commit(self):\n self.conn.commit()\n\n def rollback(self):\n self.conn.rollback()\n\n def close(self):\n self.cursor.close()\n self.conn.close()\n\n# vim: ts=4:sts=4:sw=4:et:fdm=indent\n","repo_name":"exasol/exasol-python-test-framework","sub_path":"exasol_python_test_framework/exatest/clients/odbc.py","file_name":"odbc.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"26465322668","text":"import scrapy\nfrom ..items import BasicSpiderItem\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import MapCompose\n\nclass BookSpider(scrapy.Spider):\n name = 'book_spider'\n\n start_urls = [\n 'https://books.toscrape.com/'\n ]\n\n def parse(self, response):\n categories = response.css('.nav-list ul a::attr(href)').getall()\n \n yield from response.follow_all(categories, self.parse_categories)\n\n def parse_categories(self, response):\n\n for book in response.css('.col-lg-3'):\n loader = ItemLoader(item=BasicSpiderItem(), selector=book)\n loader.add_css('book_title', '.product_pod a::attr(title)')\n loader.add_css('book_price', '.price_color::text')\n loader.add_css('book_img_url', '.thumbnail::attr(src)', MapCompose(response.urljoin))\n loader.add_value('book_details_url', response.urljoin(book.css('.product_pod a::attr(href)').get()))\n\n yield loader.load_item()\n\n next_page = response.css('.next a::attr(href)').get()\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse_categories)","repo_name":"Augustogp/Scrapy_coding_challenge","sub_path":"venv/basic_spider/build/lib/basic_spider/spiders/basic_spider.py","file_name":"basic_spider.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5454496622","text":"# основной алгоритм получения и обработки данных\n\nfrom base.mongo import *\nfrom base.logic import xy2s, span\n\nfrom data.person import Person\nfrom data.position import Position\nfrom data.track import outlier, smooth, prune\n\nfrom calc.moment import moment\nfrom calc.velocity import velocity\nfrom calc.vehicle import vehicle\n\n\n# ----------------------------------\n\nclass Process:\n p = [] # люди\n k = 0 # номер трека\n\n t = 60 # максимальное время и расстояние между двумя соседними точками\n d = 1000\n\n n = 100000 # по сколько точек кладем в базу\n\n # ----------------------------------\n\n def __init__(self, k):\n self.p = [Person() for i in range(k)]\n temp_collection.drop()\n\n # ----------------------------------\n # точки притяжения человека\n\n def make(self, q):\n person = self.p[q.i]\n\n if person.p:\n\n if span(q, person.p) > self.t: # трек оборвался\n self.temp2data(q.k) # перекалдываем его\n\n q.k = self.k # начинаем новый трек\n self.k += 1\n\n else:\n q.k = person.p.k\n\n else:\n q.k = self.k # начинаем новый трек\n self.k += 1\n\n person.p = q\n self.temp(q) # сохраняем позицию\n\n # ----------------------------------\n # обработка трека\n\n def track(self, p):\n for q in p: xy2s(q) # находим квадрат\n\n outlier.make(p) # удаляем выбросы\n prune.make(p) # прореживаем\n smooth.make(p) # сглаживаем\n\n moment.get(p) # определяем момент\n velocity.find(p) # вычисляем скорости\n\n vehicle.find(p) # вычисляем способ перемещения\n\n # ----------------------------------\n\n def temp(self, q):\n d = {\n 'i': q.i,\n 'x': q.x,\n 'y': q.y,\n 't': q.t,\n 'e': q.e,\n 'k': q.k,\n }\n temp_collection.insert_one(d)\n\n # ----------------------------------\n # перекладываем трек\n\n def temp2data(self, i):\n p = []\n\n for d in temp_collection.find({'i': i}):\n q = Position(d['i'], d['x'], d['y'], d['t'], d['e'])\n p.append(q)\n\n self.track(p)\n data = []\n\n for q in p:\n d = {\n 'i': q.i,\n 'x': q.x,\n 'y': q.y,\n 't': q.t,\n 'r': q.r,\n 'k': q.k,\n 'v': q.v,\n 'm': q.m,\n }\n data.append(d)\n\n position_collection.insert_many(data)\n temp_collection.remove({'i': i}) # удаляем\n","repo_name":"yugle7/geo","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33115352362","text":"import os\nfrom os import path\nfrom threading import Thread\nfrom netaddr import IPNetwork\nimport json\n\ncidr = os.environ['CIDR']\nclient_dir = os.environ['CLIENT_DIR']\nnumber_of_clients = os.environ['NUMBER_OF_CLIENTS']\nip_prefix = os.environ['IP_PREFIX']\nclient_port = os.environ['CLIENT_PORT']\nexternal_dns = os.environ['EXTERNAL_DNS']\nserver_pubkey = os.environ['SERVER_PUBLIC_KEY']\nallow_ip = os.environ['ALLOW_IP']\nendpoint = os.environ['ENDPOINT']\nserver_port = os.environ['SERVER_PORT']\nserverId = os.environ['SEVER_ID']\n\nnetwork = IPNetwork(cidr)\nrange_ip = list(network)[2:int(number_of_clients)+2]\n\n\ndef gen_client(client_ip, config_path, json_path, private_key_path, public_key_path):\n privatekey = open(private_key_path, 'r').read().split('\\n') # array\n publickey = open(public_key_path, 'r').read().split('\\n') # array\n\n # Generate GW config file\n wg_conf_template = \"\"\"\\\n[Interface]\nAddress = {client_ip}/{ip_prefix}\nListenPort = {client_port}\nPrivateKey = {privatekey}\nDNS = {external_dns}\n\n[Peer]\nPublicKey = {server_pubkey}\nAllowedIPs = {allow_ip}\nEndpoint = {endpoint}:{server_port}\n\"\"\".format( client_ip=client_ip, \n ip_prefix=ip_prefix, \n client_port=client_port,\n privatekey=privatekey[0], \n external_dns=external_dns,\n server_pubkey=server_pubkey, \n allow_ip=allow_ip, \n endpoint=endpoint, \n server_port=server_port )\n\n # Generate GW json config file\n interface_addr = \"{client_ip}/{ip_prefix}\".format(client_ip=client_ip, ip_prefix=ip_prefix)\n init_endpoint = \"{endpoint}:{server_port}\".format(endpoint=endpoint, server_port=server_port)\n\n wg_json_template = {\n \"serverId\": int(serverId), \n \"wgConfig\": { \n \"Interface\": {\n \"Address\": interface_addr,\n \"ListenPort\": int(client_port),\n \"PrivateKey\": privatekey[0],\n \"DNS\": external_dns\n },\n \"Peer\": {\n \"PublicKey\": server_pubkey,\n \"AllowedIPs\": allow_ip,\n \"Endpoint\": init_endpoint\n }\n }\n }\n\n with open(config_path, 'w') as f:\n f.write(wg_conf_template)\n\n with open(json_path, 'w') as f:\n f.write(json.dumps(wg_json_template))\n\ndef gen_key_without_thread():\n\n for ip in range_ip:\n private_key_path = \"{client_dir}/{ip}/privatekey\".format(client_dir=client_dir,ip=ip)\n public_key_path = \"{client_dir}/{ip}/publickey\".format(client_dir=client_dir,ip=ip)\n config_path = \"{client_dir}/{ip}/wg.conf\".format(client_dir=client_dir,ip=ip)\n json_path = \"{client_dir}/{ip}/wg.json\".format(client_dir=client_dir,ip=ip)\n gen_client(ip, config_path, json_path, private_key_path, public_key_path)\n\nif __name__ == '__main__':\n gen_key_without_thread()\n","repo_name":"minhleaws/VPN","sub_path":"ansible-wireguard/roles/wg-config-api/files/gen_client_conf.py","file_name":"gen_client_conf.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"31524378769","text":"import tensorflow as tf\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nBUFFER_SIZE = 20000\nBATCH_SIZE = 64\n\n\"\"\"## Positional encoding\nSince this model doesn't contain any recurrence or convolution, \npositional encoding is added to give the model some information about the relative position of the words in the sentence. \n\nThe positional encoding vector is added to the embedding vector. \nEmbeddings represent a token in a d-dimensional space where tokens with similar meaning will be closer to each other. \nBut the embeddings do not encode the relative position of words in a sentence. So after adding the positional encoding, \nwords will be closer to each other based on the *similarity of their meaning and their position in the sentence*, in the d-dimensional space.\n\nSee the notebook on [positional encoding](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) \nto learn more about it. The formula for calculating the positional encoding is as follows:\n\n$$\\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$\n$$\\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$\n\"\"\"\n\ndef get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\ndef positional_encoding(position, d_model):\n angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n \n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n \n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n \n pos_encoding = angle_rads[np.newaxis, ...]\n \n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\"\"\"## Masking\n\nMask all the pad tokens in the batch of sequence. It ensures that the model does not treat padding as the input. The mask indicates where pad value `0` is present: it outputs a `1` at those locations, and a `0` otherwise.\n\"\"\"\n\ndef create_padding_mask(seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n \n # add extra dimensions to add the padding\n # to the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\n\n\"\"\"The look-ahead mask is used to mask the future tokens in a sequence. In other words, the mask indicates which entries should not be used.\n\nThis means that to predict the third word, only the first and second word will be used. Similarly to predict the fourth word, only the first, second and the third word will be used and so on.\n\"\"\"\n\ndef create_look_ahead_mask(size):\n mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n return mask # (seq_len, seq_len)\n\n\"\"\"## Scaled dot product attention\"\"\"\n\n\ndef scaled_dot_product_attention(q, k, v, mask):\n \"\"\"Calculate the attention weights.\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead) \n but it must be broadcastable for addition.\n \n Args:\n q: query shape == (..., seq_len_q, depth)\n k: key shape == (..., seq_len_k, depth)\n v: value shape == (..., seq_len_v, depth_v)\n mask: Float tensor with shape broadcastable \n to (..., seq_len_q, seq_len_k). Defaults to None.\n \n Returns:\n output, attention_weights\n \"\"\"\n\n matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)\n \n # scale matmul_qk\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n # add the mask to the scaled tensor.\n if mask is not None:\n scaled_attention_logits += (mask * -1e9) \n\n # softmax is normalized on the last axis (seq_len_k) so that the scores\n # add up to 1.\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)\n\n output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)\n\n return output, attention_weights\n\n\"\"\"As the softmax normalization is done on K, its values decide the amount of importance given to Q.\n\nThe output represents the multiplication of the attention weights and the V (value) vector. \nThis ensures that the words you want to focus on are kept as-is and the irrelevant words are flushed out.\n\"\"\"\n\n\"\"\"## Multi-head attention\n\n\"\"\"\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n \n assert d_model % self.num_heads == 0\n \n self.depth = d_model // self.num_heads\n \n self.wq = tf.keras.layers.Dense(d_model)\n self.wk = tf.keras.layers.Dense(d_model)\n self.wv = tf.keras.layers.Dense(d_model)\n \n self.dense = tf.keras.layers.Dense(d_model)\n \n def split_heads(self, x, batch_size):\n \"\"\"Split the last dimension into (num_heads, depth).\n Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)\n \"\"\"\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n \n def call(self, v, k, q, mask):\n batch_size = tf.shape(q)[0]\n \n q = self.wq(q) # (batch_size, seq_len, d_model)\n k = self.wk(k) # (batch_size, seq_len, d_model)\n v = self.wv(v) # (batch_size, seq_len, d_model)\n \n q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)\n k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)\n v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)\n \n # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n scaled_attention, attention_weights = scaled_dot_product_attention(\n q, k, v, mask)\n \n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)\n\n concat_attention = tf.reshape(scaled_attention, \n (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)\n\n output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)\n \n return output, attention_weights\n\n\"\"\"## Point wise feed forward network\n\nPoint wise feed forward network consists of two fully-connected layers with a ReLU activation in between.\n\"\"\"\n\ndef point_wise_feed_forward_network(d_model, dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)\n ])\n\n\"\"\"## Encoder and decoder\"\"\"\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(EncoderLayer, self).__init__()\n\n self.mha = MultiHeadAttention(d_model, num_heads)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n \n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n \n def call(self, x, training, mask):\n\n attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)\n \n ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)\n \n return out2\n\nsample_encoder_layer = EncoderLayer(512, 8, 2048)\n\nsample_encoder_layer_output = sample_encoder_layer(\n tf.random.uniform((64, 43, 512)), False, None)\n\nsample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model)\n\n\"\"\"### Decoder layer\n\nEach decoder layer consists of sublayers:\n\n1. Masked multi-head attention (with look ahead mask and padding mask)\n2. Multi-head attention (with padding mask). V (value) and K (key) receive the *encoder output* as inputs. Q (query) receives the *output from the masked multi-head attention sublayer.*\n3. Point wise feed forward networks\n\nEach of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis.\n\nThere are N decoder layers in the transformer.\n\nAs Q receives the output from decoder's first attention block, and K receives the encoder output, the attention weights represent the importance given to the decoder's input based on the encoder's output. In other words, the decoder predicts the next word by looking at the encoder output and self-attending to its own output. See the demonstration above in the scaled dot product attention section.\n\"\"\"\n\nclass DecoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(DecoderLayer, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads)\n self.mha2 = MultiHeadAttention(d_model, num_heads)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n \n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n \n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n \n \n def call(self, x, enc_output, training, \n look_ahead_mask, padding_mask):\n # enc_output.shape == (batch_size, input_seq_len, d_model)\n\n attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n \n attn2, attn_weights_block2 = self.mha2(\n enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)\n \n ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)\n \n return out3, attn_weights_block1, attn_weights_block2\n\n\"\"\"### Encoder\n\nThe `Encoder` consists of:\n1. Input Embedding\n2. Positional Encoding\n3. N encoder layers\n\nThe input is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the encoder layers. The output of the encoder is the input to the decoder.\n\"\"\"\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,\n maximum_position_encoding, rate=0.1):\n super(Encoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n \n self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding, \n self.d_model)\n \n \n self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) \n for _ in range(num_layers)]\n \n self.dropout = tf.keras.layers.Dropout(rate)\n \n def call(self, x, training, mask):\n\n seq_len = tf.shape(x)[1]\n \n # adding embedding and position encoding.\n x = self.embedding(x) # (batch_size, input_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n \n for i in range(self.num_layers):\n x = self.enc_layers[i](x, training, mask)\n \n return x # (batch_size, input_seq_len, d_model)\n\n\"\"\"### Decoder\n\nThe `Decoder` consists of:\n1. Output Embedding\n2. Positional Encoding\n3. N decoder layers\n\nThe target is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the decoder layers. The output of the decoder is the input to the final linear layer.\n\"\"\"\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,\n maximum_position_encoding, rate=0.1):\n super(Decoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n \n self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)\n \n self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate) \n for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n \n def call(self, x, enc_output, training, \n look_ahead_mask, padding_mask):\n\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n \n x = self.embedding(x) # (batch_size, target_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n print('x', x.shape)\n print('pos encoding', self.pos_encoding[:, :seq_len, :].shape)\n x += self.pos_encoding[:, :seq_len, :]\n \n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x, block1, block2 = self.dec_layers[i](x, enc_output, training,\n look_ahead_mask, padding_mask)\n \n attention_weights['decoder_layer{}_block1'.format(i+1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i+1)] = block2\n \n # x.shape == (batch_size, target_seq_len, d_model)\n return x, attention_weights\n\n\"\"\"## Create the Transformer\n\nTransformer consists of the encoder, decoder and a final linear layer. The output of the decoder is the input to the linear layer and its output is returned.\n\"\"\"\n\nclass Transformer(tf.keras.Model):\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, \n target_vocab_size, pe_input, pe_target, rate=0.1):\n super(Transformer, self).__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, dff, \n input_vocab_size, pe_input, rate)\n\n self.decoder = Decoder(num_layers, d_model, num_heads, dff, \n target_vocab_size, pe_target, rate)\n\n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n \n def call(self, inp, tar, training, enc_padding_mask, \n look_ahead_mask, dec_padding_mask):\n\n enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)\n \n # dec_output.shape == (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self.decoder(\n tar, enc_output, training, look_ahead_mask, dec_padding_mask)\n \n final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)\n \n return final_output, attention_weights\n\n\"\"\"## Set hyperparameters\n\n\"\"\"\n\nnum_layers = 1\nd_model = 128\ndff = 512\nnum_heads = 8\n\ninput_vocab_size = 14 #TODO change this in function of the number of features considered.\ntarget_vocab_size = 1\ndropout_rate = 0.1\nEPOCHS = 20\n\n\"\"\"## Optimizer\n\nUse the Adam optimizer with a custom learning rate scheduler according to the formula in the [paper](https://arxiv.org/abs/1706.03762).\n\n$$\\Large{lrate = d_{model}^{-0.5} * min(step{\\_}num^{-0.5}, step{\\_}num * warmup{\\_}steps^{-1.5})}$$\n\"\"\"\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n \n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n \n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps ** -1.5)\n \n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\n\nlearning_rate = CustomSchedule(d_model)\n\noptimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, \n epsilon=1e-9)\n\ntemp_learning_rate_schedule = CustomSchedule(d_model)\n\n\"\"\"## Loss and metrics\n\nSince the target sequences are padded, it is important to apply a padding mask when calculating the loss.\n\"\"\"\n\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n \n return tf.reduce_mean(loss_)\n\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='train_accuracy')\n\n\"\"\"## Training and checkpointing\"\"\"\n\ntransformer = Transformer(num_layers, d_model, num_heads, dff,\n input_vocab_size, target_vocab_size, \n pe_input=input_vocab_size, \n pe_target=target_vocab_size,\n rate=dropout_rate)\n\ndef create_masks(inp, tar):\n # Encoder padding mask\n enc_padding_mask = create_padding_mask(inp)\n \n # Used in the 2nd attention block in the decoder.\n # This padding mask is used to mask the encoder outputs.\n dec_padding_mask = create_padding_mask(inp)\n \n # Used in the 1st attention block in the decoder.\n # It is used to pad and mask future tokens in the input received by \n # the decoder.\n look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])\n dec_target_padding_mask = create_padding_mask(tar)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n \n return enc_padding_mask, combined_mask, dec_padding_mask\n\n\"\"\"Create the checkpoint path and the checkpoint manager. This will be used to save checkpoints every `n` epochs.\"\"\"\n\ncheckpoint_path = \"./checkpoints/train\"\n\nckpt = tf.train.Checkpoint(transformer=transformer,\n optimizer=optimizer)\n\nckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n\n# if a checkpoint exists, restore the latest checkpoint.\nif ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!')\n\ntrain_step_signature = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n]\n\n@tf.function(input_signature=train_step_signature)\ndef train_step(inp, tar):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n \n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)\n \n with tf.GradientTape() as tape:\n predictions, _ = transformer(inp, tar_inp, \n True, \n enc_padding_mask, \n combined_mask, \n dec_padding_mask)\n loss = loss_function(tar_real, predictions)\n\n gradients = tape.gradient(loss, transformer.trainable_variables) \n optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))\n \n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\"\"\"Portuguese is used as the input language and English is the target language.\"\"\"\n\nfor epoch in range(EPOCHS):\n start = time.time()\n \n train_loss.reset_states()\n train_accuracy.reset_states()\n \n # inp -> portuguese, tar -> english\n for (batch, (inp, tar)) in enumerate(train_dataset):\n train_step(inp, tar)\n \n if batch % 50 == 0:\n print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(\n epoch + 1, batch, train_loss.result(), train_accuracy.result()))\n \n if (epoch + 1) % 5 == 0:\n ckpt_save_path = ckpt_manager.save()\n print ('Saving checkpoint for epoch {} at {}'.format(epoch+1,\n ckpt_save_path))\n \n print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1, \n train_loss.result(), \n train_accuracy.result()))\n\n print ('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start))\n\n\n\n\"\"\"## Evaluate\n\nThe following steps are used for evaluation:\n\n* Encode the input sentence using the Portuguese tokenizer (`tokenizer_pt`). Moreover, add the start and end token so the input is equivalent to what the model is trained with. This is the encoder input.\n* The decoder input is the `start token == tokenizer_en.vocab_size`.\n* Calculate the padding masks and the look ahead masks.\n* The `decoder` then outputs the predictions by looking at the `encoder output` and its own output (self-attention).\n* Select the last word and calculate the argmax of that.\n* Concatentate the predicted word to the decoder input as pass it to the decoder.\n* In this approach, the decoder predicts the next word based on the previous words it predicted.\n\nNote: The model used here has less capacity to keep the example relatively faster so the predictions maybe less right. To reproduce the results in the paper, use the entire dataset and base transformer model or transformer XL, by changing the hyperparameters above.\n\"\"\"\n\ndef evaluate(inp_sentence):\n start_token = [tokenizer_pt.vocab_size]\n end_token = [tokenizer_pt.vocab_size + 1]\n \n # inp sentence is portuguese, hence adding the start and end token\n inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token\n encoder_input = tf.expand_dims(inp_sentence, 0)\n \n # as the target is english, the first word to the transformer should be the\n # english start token.\n decoder_input = [tokenizer_en.vocab_size]\n output = tf.expand_dims(decoder_input, 0)\n \n for i in range(MAX_LENGTH):\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(\n encoder_input, output)\n \n # predictions.shape == (batch_size, seq_len, vocab_size)\n predictions, attention_weights = transformer(encoder_input, \n output,\n False,\n enc_padding_mask,\n combined_mask,\n dec_padding_mask)\n \n # select the last word from the seq_len dimension\n predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)\n\n predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\n \n # return the result if the predicted_id is equal to the end token\n if predicted_id == tokenizer_en.vocab_size+1:\n return tf.squeeze(output, axis=0), attention_weights\n \n # concatentate the predicted_id to the output which is given to the decoder\n # as its input.\n output = tf.concat([output, predicted_id], axis=-1)\n\n return tf.squeeze(output, axis=0), attention_weights\n\ndef plot_attention_weights(attention, sentence, result, layer):\n fig = plt.figure(figsize=(16, 8))\n \n sentence = tokenizer_pt.encode(sentence)\n \n attention = tf.squeeze(attention[layer], axis=0)\n \n for head in range(attention.shape[0]):\n ax = fig.add_subplot(2, 4, head+1)\n \n # plot the attention weights\n ax.matshow(attention[head][:-1, :], cmap='viridis')\n\n fontdict = {'fontsize': 10}\n \n ax.set_xticks(range(len(sentence)+2))\n ax.set_yticks(range(len(result)))\n \n ax.set_ylim(len(result)-1.5, -0.5)\n \n ax.set_xticklabels(\n ['']+[tokenizer_pt.decode([i]) for i in sentence]+[''], \n fontdict=fontdict, rotation=90)\n \n ax.set_yticklabels([tokenizer_en.decode([i]) for i in result \n if i < tokenizer_en.vocab_size], \n fontdict=fontdict)\n \n ax.set_xlabel('Head {}'.format(head+1))\n \n plt.tight_layout()\n plt.show()\n\n","repo_name":"AMDonati/SMC-T","sub_path":"src/models/Baselines/original_transformer_tf_ts.py","file_name":"original_transformer_tf_ts.py","file_ext":"py","file_size_in_byte":24022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"30988829439","text":"import pdfkit\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.views.generic import ListView,TemplateView,CreateView,UpdateView,DeleteView\nfrom apps.employees.models import Employee\nfrom apps.assignments.models import Delivered, DeliveredDetail\nfrom apps.equipments.models import Supply\nfrom apps.employees.models import AssignedSize\nfrom apps.requests.models import Status\nfrom .forms import Assigned_Size_Form\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render, redirect\nfrom ..sizes.models import Area,Size\nfrom django.db.models import Q\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom ippe.util.render_pdf import render_pdf\n# Create your views here.\nimport json\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nfrom ippe.util.select_data_util import *\nfrom django.utils.translation import ugettext as _\n\nclass MyDataList(LoginRequiredMixin,ListView):\n login_url ='authenticate:login'\n template_name = 'Profile.html'\n def get_queryset(self,**kwargs):\n return Employee.objects.get(id='1')\n\n def get_context_data(self , **kwargs):\n context = super(MyDataList,self).get_context_data(**kwargs)\n context['employee'] = self.request.user.employee\n deliv = Delivered.objects.filter(employee = self.request.user.employee.id).values('id')\n # deliver_detail = DeliveredDetail.objects.filter(delivered_id__in = deliv).distinct(\"supply__name\")\n deliver_detail = DeliveredDetail.objects.filter(delivered_id__in = deliv)\n context['deliver_detail'] = deliver_detail\n return context\n\n\nclass RateElement(LoginRequiredMixin,TemplateView):\n login_url ='authenticate:login'\n template_name = 'rate.html'\n def get_context_data(self , **kwargs):\n context = super(RateElement,self).get_context_data(**kwargs)\n context['employee'] = self.request.user.employee\n deliv = Delivered.objects.filter(employee = self.request.user.employee.id)\n deliver_detail = DeliveredDetail.objects.filter(delivered_id__in = deliv)\n id_deliver_item = self.kwargs.get('pk')\n deliver = deliver_detail[int(id_deliver_item)-1]\n context['deliver'] = deliver\n return context\n\n\nclass SizeEmployeeList(LoginRequiredMixin,ListView):\n login_url ='authenticate:login'\n template_name = 'size_list.html'\n model = AssignedSize\n def get_context_data(self , **kwargs):\n context = super(SizeEmployeeList,self).get_context_data(**kwargs)\n # print(self.request.user.employee)\n data = self.request.user.employee.assignedsize_set.all()\n elements = []\n actions = []\n for d in data:\n elements.append([\n {\n 'type': 'text',\n 'content': d.id\n },\n {\n 'type': 'code_name',\n 'content': { 'code':d.size.body_area.code , 'name': d.size.body_area.name } \n },\n {\n 'type': 'text',\n 'content': d.size.body_area.description \n },\n {\n 'type': 'code_name',\n 'content': { 'code':d.size.code ,'name':d.size.name } \n },\n {\n 'type': 'text',\n 'content': d.size.description \n },\n\n {\n 'type': 'action',\n 'actions': [\n {\n 'content': ' '+_('Edit')+'',\n 'href': reverse_lazy(\"employees:configure_sizes_update\", kwargs = {'pk': d.id})\n },\n {\n 'content': ' '+_('Delete')+'',\n 'href': '#',\n 'pk' : d.id,\n }\n ]\n }\n ])\n dataTable = {\n 'card_tittle' : _('Sizes'),\n 'headers': ['#',_('Body Area'),_('Description'),_('Size'),_('Size Dimensions'),_('Actions')],\n 'data': elements,\n 'create_button_url' : reverse_lazy(\"employees:configure_sizes_create\"),\n 'delete_button_url' : reverse_lazy(\"employees:configure_sizes_delete\")\n }\n context['dataTable'] = dataTable\n return context\n\nclass SizeEmployeeCreate(LoginRequiredMixin,CreateView):\n login_url ='authenticate:login'\n model=AssignedSize\n form_class= Assigned_Size_Form\n template_name= 'size_employee_create.html'\n success_url=reverse_lazy('employees:configure_sizes_list') \n def get_context_data(self , **kwargs):\n context = super(SizeEmployeeCreate, self).get_context_data(**kwargs)\n context['areaList'] = Area.objects.all()\n context['employee'] = self.request.user.employee\n context['actualSize'] = \"\"\n return context\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n area_id_request = Size.objects.get(id=request.POST.get('size')).body_area_id\n Asigned_sizes_per_employee = AssignedSize.objects.filter(employee_id=request.POST.get('employee'))\n areas_id_per_employee=[]\n\n for Assigned in Asigned_sizes_per_employee: \n areas_id_per_employee.append(Assigned.size.body_area_id)\n\n if area_id_request in areas_id_per_employee: \n position= areas_id_per_employee.index(area_id_request)\n asigned_id = Asigned_sizes_per_employee[position].id\n AssignedSize.objects.filter(id = asigned_id).update(size_id=request.POST.get('size')) \n return redirect('employees:configure_sizes_list')\n\n post.save()\n return redirect('employees:configure_sizes_list')\n # return render(request, \"size_employee_create.html\", {'form': form})\n return redirect('employees:configure_sizes_create')\n \nclass SizeEmployeeUpdate(LoginRequiredMixin,UpdateView):\n login_url ='authenticate:login'\n model=AssignedSize\n form_class= Assigned_Size_Form\n template_name='size_employee_update.html'\n success_url=reverse_lazy('employees:configure_sizes_list')\n def get_context_data(self , **kwargs):\n context= super(SizeEmployeeUpdate, self).get_context_data(**kwargs)\n context['areaList'] = Area.objects.all()\n context['sizeid'] = AssignedSize.objects.get(id=self.kwargs.get('pk')).size.id\n context['areaid'] = AssignedSize.objects.get(id=self.kwargs.get('pk')).size.body_area_id\n context['employee'] = self.request.user.employee\n context['actualSize'] = AssignedSize.objects.get(id=self.kwargs.get('pk'))\n return context\n\nclass EquipmentDelete( LoginRequiredMixin,DeleteView): \n login_url ='authenticate:login' \n def post(self, request, *args, **kwargs):\n model = AssignedSize\n pk = request.POST['delete_id']\n data = AssignedSize.objects.get(id = pk )\n data.delete()\n return redirect(reverse_lazy('employees:configure_sizes_list'))\n\nclass SizeAjax(LoginRequiredMixin,View):\n login_url ='authenticate:login'\n def get(self, request, *args, **kwargs):\n area_id =request.GET.get('id')\n if area_id == '':\n resp = {\"response\" : \".\"}\n info = json.dumps(resp)\n else:\n sizelist =Size.objects.filter(body_area_id=area_id)\n jsonList= []\n for size in sizelist:\n jsonList.append(\n {\n 'id' : size.id,\n 'code': size.code,\n 'gender': size.gender_text(),\n 'description' : size.description,\n 'name' : size.name,\n })\n info = json.dumps(jsonList)\n return HttpResponse(info, content_type='application/json')\n\nclass DeliveredList(LoginRequiredMixin,View):\n login_url ='authenticate:login'\n def get(self, request, *args, **kwargs):\n context = {}\n if 'q' in self.request.GET.keys():\n code = int(self.request.GET.get('q'))\n data =Delivered.objects.filter(employee_id=request.user.employee.id,checkout_id__isnull=False,status=code)\n else:\n data = Delivered.objects.filter(employee_id=request.user.employee.id,checkout_id__isnull=False)\n\n # data = Delivered.objects.filter(employee_id=request.user.employee.id,checkout_id__isnull=False)\n elements = []\n actions = []\n for d in data:\n count = \"%s / %s\" % (d.delivereddetail_set.filter(~Q(is_accept = Delivered.CODE_PENDING)).count() , d.delivereddetail_set.count())\n elements.append([\n {\n 'type': 'text',\n 'content': d.id\n },\n {\n 'type': 'label',\n 'content': [{ 'code': d.checkout.request.code}]\n },\n {\n 'type': 'code_name',\n 'content': { 'code': d.checkout.request.request_type.code, 'name': d.checkout.request.request_type.name} \n },\n {\n 'type': 'text',\n 'content': d.checkout.request.request_date\n },\n {\n 'type': 'text',\n 'content': d.checkout.date_out\n },\n {\n 'type': 'label_statu',\n 'content':{ 'badge':d.checkout.request.get_level_badge, 'status':d.checkout.request.status_text}\n },\n {\n 'type': 'label_statu',\n 'content':{ 'badge':d.checkout.status_badge, 'status':d.checkout.status_text}\n },\n {\n 'type': 'label_statu',\n 'content':{ 'badge':'badge-info', 'status':count}\n }, \n {\n 'type': 'label_statu',\n 'content':{ 'badge':d.get_level_badge, 'status':d.status_text}\n },\n {\n 'type': 'text',\n 'content': d.checkout.observation\n },\n {\n 'type': 'action',\n 'actions': [\n {\n 'content': ' '+_('Show and Accept')+'',\n 'href': reverse_lazy(\"employees:delivered_detail\", kwargs = {'pk': d.id})\n },\n {\n 'content': ' '+_('Print Delivered Order')+'',\n 'href': reverse_lazy(\"employees:delivered_pdf\", kwargs = {'pk': d.id})\n },\n ]\n }\n ])\n dataTable = {\n 'card_tittle' : _('Dispatch Supplies'),\n 'headers': ['#',_('Request Code'),_('Request Type'),_('Request Date'),_('Managed Date'),_('Request Status'),_('Dispatch Status'),_('Accepted Items'),_('Accepted Status'), _('Dispatch Observations'),_( 'Actions')],\n 'data': elements,\n }\n context['dataTable'] = dataTable\n return render(request, 'delivered_list.html', context)\n\n def post(self, request, *args, **kwargs):\n print(request.POST)\n delivered = Delivered.objects.get(pk=request.POST['delivered_id'])\n delivered.observation = request.POST['observation']\n delivered.save()\n if delivered.status == 1:\n status = Status.objects.create(\n observation = request.POST['observation'],\n status = 11,\n employee_request = delivered.checkout.request,\n employee = request.user.employee,\n )\n elif delivered.status == 2:\n status = Status.objects.create(\n observation = request.POST['observation'],\n status = 8,\n employee_request = delivered.checkout.request,\n employee = request.user.employee,\n )\n elif delivered.status == 3:\n status = Status.objects.create(\n observation = request.POST['observation'],\n status = 11,\n employee_request = delivered.checkout.request,\n employee = request.user.employee,\n )\n status.save()\n return redirect(reverse_lazy('employees:delivered_list'))\n\nclass DeliveredDetailView(LoginRequiredMixin,View):\n login_url ='authenticate:login'\n def get(self, request, *args, **kwargs):\n context = {}\n requ = Delivered.objects.get(pk=self.kwargs['pk'])\n context['request'] = requ\n return render(request, 'delivered_detail.html', context)\n\n def post(self, request, *args, **kwargs):\n detail_id = request.POST['detail_id']\n redirect_id = request.POST['delivered_id'] \n obj = DeliveredDetail.objects.get(pk=detail_id)\n if request.POST['send'] == 'accept':\n obj.is_accept = 2\n obj.observation = request.POST['observation'] \n elif request.POST['send'] == 'refuse':\n obj.is_accept = 3\n obj.observation = request.POST['observation'] \n obj.save()\n delivered = obj.delivered\n delivered_id = delivered.id\n detail_status = DeliveredDetail.objects.filter(delivered=delivered_id)\n count_all = detail_status.count()\n count = 0\n for status in detail_status:\n if status.is_accept != 1:\n count = count + 1\n if count == count_all:\n delivered.status = 2\n else:\n delivered.status = 3\n delivered.save()\n return redirect(reverse_lazy('employees:delivered_detail', kwargs = {'pk': redirect_id}))\n\nclass DeliveredPDF(LoginRequiredMixin,View):\n login_url ='authenticate:login'\n \n def get(self, request, *args, **kwargs):\n delivered_id = self.kwargs['pk']\n obj = Delivered.objects.get(pk = delivered_id)\n data = {\n 'employee_name' : obj.employee.get_short_name(),\n 'position' : obj.employee.get_postion(),\n 'area' : obj.employee.get_structure_last().departament.name,\n }\n detail = obj.delivereddetail_set.filter(is_accept=2)\n pdf = render_pdf(\"detail_pdf.html\",{\"data\": data,'detail':detail})\n return HttpResponse(pdf, content_type=\"application/pdf\")","repo_name":"jalvarezms/Ixprotec","sub_path":"apps/employees/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17769679856","text":"import rospy\nimport tf\nimport numpy as np\n\nfrom envs.policy.orca import ORCA\nfrom envs.human import Human\n\nfrom geometry_msgs.msg import Twist, Pose\nfrom nav_msgs.msg import Odometry\nfrom rosgraph_msgs.msg import Clock\nfrom std_msgs.msg import Int8\n\n\nclass Env:\n def __init__(self, configs):\n self.configs = configs\n self.human_list = []\n self.human_index_list = []\n self.rvo_agent_list = []\n\n self.sim = ORCA(configs)\n\n map_img_path = configs['Env']['map']['map_img_path']\n self.sim.add_static_obstacle(map_img_path)\n\n # pub list\n self.cmd_vel_list = []\n self.cmd_pose_list = []\n\n # sub list\n self.object_state_sub_list = []\n self.odom_sub_list = []\n self.check_crash_list = []\n \n self.sim_clock = rospy.Subscriber('clock', Clock, self.sim_clock_callback)\n\n def generate_human(self, index, pos):\n #human = Human(pos)\n human = Human()\n self.human_list.append(human)\n self.human_index_list.append(index)\n agent = self.sim.addAgent(pos[0], pos[1])\n self.rvo_agent_list.append(agent)\n\n def init_pub(self, index):\n cmd_vel_topic = 'human_' + str(index) + '/cmd_vel'\n cmd_vel = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\n self.cmd_vel_list.append(cmd_vel) \n \n cmd_pose_topic = 'human_' + str(index) + '/cmd_pose'\n cmd_pose = rospy.Publisher(cmd_pose_topic, Pose, queue_size=2)\n self.cmd_vel_list.append(cmd_pose) \n\n\n def init_sub(self, index):\n object_state_topic = 'human_' + str(index) + '/base_pose_ground_truth'\n object_state_sub = rospy.Subscriber(object_state_topic, Odometry, self.ground_truth_callback)\n self.object_state_sub_list.append(object_state_sub)\n \n odom_topic = 'human_' + str(index) + '/odom'\n odom_sub = rospy.Subscriber(odom_topic, Odometry, self.odometry_callback)\n self.odom_sub_list.append(odom_sub)\n \n crash_topic = 'human_' + str(index) + '/is_crashed'\n check_crash = rospy.Subscriber(crash_topic, Int8, self.crash_callback)\n self.check_crash_list.append(check_crash)\n\n def step(self):\n self.sim.doStep()\n for index in self.human_index_list:\n human = self.human_list[index]\n cur_pos = human.get_pos()\n self.sim.setAgentPosition(cur_pos)\n goal_pos = human.get_local_goal()\n dt_vel = (goal_pos[0] - cur_pos[0], goal_pos[1] - cur_pos[1])\n dt_angle = self.compute_rel_angle(goal_pos, cur_pos)\n agent = self.rvo_agent_list[index]\n self.sim.setAgentPrefVelocity(agent, dt_vel)\n pref_linear_vel, pref_angular_vel = human.get_linear_vel[1], human.get_angular_vel[1]\n self.control_vel([pref_linear_vel, pref_angular_vel * dt_angle])\n\n def set_goal(self, index, goal_list):\n human = self.human_list[index]\n human.set_goal_list(goal_list)\n\n def compute_rel_angle(self, start_point, end_point):\n rel_angle = np.arctan2(end_point[1] - start_point[1], end_point[0] - start_point[0])\n return rel_angle\n\n def get_crash_state(self):\n return self.is_crashed\n\n def get_sim_time(self):\n return self.sim_time\n\n def reset_pose(self):\n random_pose = self.generate_random_pose() # return [x, y, theta] [-9~9,-9~9], dist>9\n rospy.sleep(0.01)\n self.control_pose(random_pose) # create pose(Euler or quartanion) for ROS\n [x_robot, y_robot, theta] = self.get_self_stateGT() # Ground Truth Pose\n\n # start_time = time.time()\n while np.abs(random_pose[0] - x_robot) > 0.2 or np.abs(random_pose[1] - y_robot) > 0.2: # np.bas: absolute, compare # generated random pose with topic pose\n [x_robot, y_robot, theta] = self.get_self_stateGT() # same\n self.control_pose(random_pose)\n rospy.sleep(0.01)\n\n\n def control_vel(self, action, index):\n move_cmd = Twist()\n move_cmd.linear.x = action[0]\n move_cmd.linear.y = 0.\n move_cmd.linear.z = 0.\n move_cmd.angular.x = 0.\n move_cmd.angular.y = 0.\n move_cmd.angular.z = action[1]\n self.cmd_vel_list[index].publish(move_cmd)\n\n\n def control_pose(self, pose, index): # pose = [x, y, theta]\n pose_cmd = Pose()\n assert len(pose)==3\n pose_cmd.position.x = pose[0] # x\n pose_cmd.position.y = pose[1] # y\n pose_cmd.position.z = 0 # 0(don't care rot cause pose?)\n\n qtn = tf.transformations.quaternion_from_euler(0, 0, pose[2], 'rxyz')\n pose_cmd.orientation.x = qtn[0]\n pose_cmd.orientation.y = qtn[1]\n pose_cmd.orientation.z = qtn[2]\n pose_cmd.orientation.w = qtn[3]\n self.cmd_pose_list[index].publish(pose_cmd)\n\n#############################################################################################\n# Callback function\n#############################################################################################\n def ground_truth_callback(self, GT_odometry):\n Quaternious = GT_odometry.pose.pose.orientation\n Euler = tf.transformations.euler_from_quaternion([Quaternious.x, Quaternious.y, Quaternious.z, Quaternious.w])\n self.state_GT = [GT_odometry.pose.pose.position.x, GT_odometry.pose.pose.position.y, Euler[2]]\n v_x = GT_odometry.twist.twist.linear.x\n v_y = GT_odometry.twist.twist.linear.y\n v = np.sqrt(v_x**2 + v_y**2)\n self.speed_GT = [v, GT_odometry.twist.twist.angular.z]\n\n def odometry_callback(self, odometry):\n Quaternions = odometry.pose.pose.orientation\n Euler = tf.transformations.euler_from_quaternion([Quaternions.x, Quaternions.y, Quaternions.z, Quaternions.w])\n self.state = [odometry.pose.pose.position.x, odometry.pose.pose.position.y, Euler[2]]\n self.speed = [odometry.twist.twist.linear.x, odometry.twist.twist.angular.z]\n\n def sim_clock_callback(self, clock):\n self.sim_time = clock.clock.secs + clock.clock.nsecs / 1000000000.\n\n def crash_callback(self, flag):\n self.is_crashed = flag.data\n\n\n\n\n","repo_name":"mincheulkim/rl_collision_avoidance_flowMap","sub_path":"envs/env_crowd.py","file_name":"env_crowd.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"9529160757","text":"from classes.staff import Staff\nfrom classes.student import Student\n\nclass School:\n def __init__(self, name):\n self.name = name\n self.staff = Staff.objects()\n self.students = Student.objects()\n\n def list_students(self):\n student_count = 1\n print(\"\\n\")\n for each_student in self.students:\n print(f\"{student_count}. {each_student.name} {each_student.school_id}\")\n student_count += 1\n \n def find_student_by_id(self, student_id):\n for each_student in self.students:\n if each_student.school_id == student_id:\n print(each_student)","repo_name":"jjpeterson90/oop-school-interface-ii","sub_path":"classes/school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72151465902","text":"from lib import load_input\n\n\ndef solve(data):\n return int_to_snafu(sum(snafu_to_int(line) for line in data.splitlines()))\n\n\ndef snafu_to_int(snafu):\n return sum(pow(5, i) * ('=-012'.index(c) - 2) for i, c in enumerate(reversed(snafu)))\n\n\ndef int_to_snafu(num):\n res = ''\n carry = 0\n while num or carry:\n cur = num % 5 + carry\n carry = 0\n if cur > 2:\n carry = 1\n cur %= 5\n res = (str(cur) if cur < 3 else '=' if cur == 3 else '-') + res\n num //= 5\n return res\n\n\nif __name__ == '__main__':\n print(solve(load_input('small')))\n print(solve(load_input()))\n","repo_name":"yoshivda/aoc2022","sub_path":"days/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15243742793","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework import generics, status\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom csv_wizard import CSVWizard, CURRENT_PARENT_DIR\nimport os\nfrom . import serializers\nfrom .models import File\n\n\navailable_operations = [\n \"slice\",\n \"divide\",\n \"delete_blanks\",\n \"find_common_rows\",\n \"find_different_rows\"\n]\n\nclass FilesView(generics.GenericAPIView):\n \n serializer_class = serializers.FileSerializer\n\n def get_queryset(self):\n return self.request\n \n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n # after DRF default validation, apply other custom validations\n if request.data.get(\"operation_name\") not in available_operations:\n raise ValidationError({\"operation_error\":\"operation does not exist\"}, status.HTTP_400_BAD_REQUEST)\n if request.data.get(\"operation_name\") == \"divide\" and not request.data.get(\"number_of_parts\"):\n raise ValidationError({\"number_of_parts_error\":\"operation 'divide' needs a specific number of parts\"}, status.HTTP_400_BAD_REQUEST)\n if request.data.get(\"operation_name\") == \"find_common_rows\" or request.data.get(\"operation_name\") == \"find_different_rows\":\n if not request.data.get(\"file2_contents\"):\n raise ValidationError({\"amount_of_files_error\":\"Operations 'find_common_rows' and 'find_different_rows' require TWO files\"}, status.HTTP_400_BAD_REQUEST)\n if request.data.get(\"file1_contents\").content_type != \"text/csv\":\n raise ValidationError({\"file_type_error\":\"File 1 is not a CSV\"}, status.HTTP_400_BAD_REQUEST)\n if request.data.get(\"file2_contents\"):\n if request.data.get(\"file2_contents\").content_type != \"text/csv\":\n raise ValidationError({\"file_type_error\":\"File 2 is not a CSV\"}, status.HTTP_400_BAD_REQUEST)\n\n file_entry = File.objects.create(\n user=request.user,\n size=request.data.get(\"file1_contents\").size,\n name=request.data.get(\"file1_contents\").name,\n operation_name=request.data.get(\"operation_name\"),\n is_deleted=False,\n )\n file_entry.save()\n try:\n os.mkdir(f\"./results_files/{request.user.username}/\")\n except FileExistsError:\n pass\n \n if request.data.get('file2_contents'):\n file_entry2 = File.objects.create(\n user=request.user,\n size=request.data.get(\"file2_contents\").size,\n name=request.data.get(\"file2_contents\").name,\n is_deleted=False,\n )\n file_entry2.save()\n \n with open(f\"./files/{request.user.username}/{request.data.get('file1_contents').name[:-4]}-{request.user.username}.csv\", \"wb\") as file:\n for chunk in request.data.get(\"file1_contents\").chunks():\n file.write(chunk)\n if request.data.get('file2_contents'):\n with open(f\"./files/{request.user.username}/{request.data.get('file2_contents').name[:-4]}-{request.user.username}.csv\", \"wb\") as file:\n for chunk in request.data.get(\"file2_contents\").chunks():\n file.write(chunk)\n\n operation = request.data.get(\"operation_name\")\n user_results_files_folder_path = f\"./results_files/{request.user.username}/\"\n file1_filesystem_name = f\"{request.data.get('file1_contents').name[:-4]}-{request.user.username}\"\n if request.data.get('file2_contents'):\n file2_filesystem_name = f\"{request.data.get('file2_contents').name[:-4]}-{request.user.username}\"\n\n is_file1_only_request = request.data.get(\"file1_contents\") and not request.data.get(\"file2_contents\")\n is_two_files_request = request.data.get(\"file1_contents\") and request.data.get(\"file2_contents\")\n\n # process CSV files\n if is_file1_only_request:\n\n file1 = CSVWizard(file1_filesystem_name, f\"./files/{request.user.username}\")\n file1_encoding = file1.get_encoding()\n file1_headers = file1.get_headers()\n \n if operation == \"slice\":\n try:\n os.mkdir(f\"{user_results_files_folder_path}/slice/\")\n except FileExistsError:\n pass\n \n try:\n result = file1.slice()\n \n first_half = CSVWizard(f\"{file1_filesystem_name}_FIRST_HALF\", f\"{user_results_files_folder_path}/slice\")\n second_half = CSVWizard(f\"{file1_filesystem_name}_SECOND_HALF\", f\"{user_results_files_folder_path}/slice\")\n \n first_half.overwrite(result['First_Half'], file1_encoding)\n first_half.write_headers(file1_headers)\n second_half.overwrite(result['Second_Half'], file1_encoding)\n second_half.write_headers(file1_headers)\n\n return Response(data={\n \"operation\": \"slice\", \n \"filename\": {\n \"first_half\": f\"{file1_filesystem_name}_FIRST_HALF\",\n \"second_half\": f\"{file1_filesystem_name}_SECOND_HALF\"\n }}, \n status=status.HTTP_200_OK\n )\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n elif operation == \"divide\":\n\n number_of_parts = int(request.data.get('number_of_parts'))\n all_resulting_filenames = []\n \n try:\n os.mkdir(f\"{user_results_files_folder_path}/divide/\")\n except FileExistsError:\n pass\n\n try:\n result = file1.divide(number_of_parts)\n\n for i in range(number_of_parts):\n part = CSVWizard(f\"{file1_filesystem_name}-divided_part_{i + 1}\", f\"{user_results_files_folder_path}/divide/\")\n part.overwrite(result[i], file1_encoding)\n part.write_headers(file1_headers)\n all_resulting_filenames.append(f\"{file1_filesystem_name}-divided_part_{i + 1}\")\n\n return Response(data={\n \"operation\":\"divide\",\n \"number_of_parts\": number_of_parts,\n \"filenames\": all_resulting_filenames\n }, \n status=status.HTTP_200_OK\n )\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n elif operation == \"delete_blanks\":\n\n try:\n os.mkdir(f\"{user_results_files_folder_path}/delete_blanks/\")\n except FileExistsError:\n pass\n \n try:\n result = file1.delete_blanks()\n\n no_blanks_file = CSVWizard(f\"{file1_filesystem_name}_no_blanks\", f\"{user_results_files_folder_path}/delete_blanks/\")\n no_blanks_file.overwrite(result, file1_encoding)\n no_blanks_file.write_headers(file1_headers)\n\n return Response(\n data={\"operation\":\"delete_blanks\", \"filename\":f\"{file1_filesystem_name}_no_blanks\"}, \n status=status.HTTP_200_OK\n )\n \n except Exception as e:\n return Response(data={\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n elif is_two_files_request:\n\n file1 = CSVWizard(file1_filesystem_name, f\"./files/{request.user.username}\")\n file1_encoding = file1.get_encoding()\n file1_headers = file1.get_headers()\n file2 = CSVWizard(file2_filesystem_name, f\"./files/{request.user.username}\")\n\n if operation == \"find_common_rows\":\n \n try:\n os.mkdir(f\"{user_results_files_folder_path}/find_common_rows/\")\n except FileExistsError:\n pass\n\n try:\n result = file1.find_common_rows(file2)\n\n common_rows_file = CSVWizard(f\"{file1_filesystem_name}-{file2_filesystem_name}_common_rows\", f\"{user_results_files_folder_path}/find_common_rows/\")\n common_rows_file.overwrite(result, file1_encoding)\n common_rows_file.write_headers(file1_headers)\n\n return Response(\n data={\n \"operation\":\"find_common_rows\",\n \"filename\":f\"{file1_filesystem_name}-{file2_filesystem_name}_common_rows\"\n }, \n status=status.HTTP_200_OK\n )\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n elif operation == \"find_different_rows\":\n try:\n os.mkdir(f\"{user_results_files_folder_path}/find_different_rows/\")\n except FileExistsError:\n pass\n \n try:\n result = file1.find_different_rows(file2)\n\n different_rows_file = CSVWizard(f\"{file1_filesystem_name}-{file2_filesystem_name}_different_rows\", f\"{user_results_files_folder_path}/find_different_rows/\")\n different_rows_file.overwrite(result, file1_encoding)\n different_rows_file.write_headers(file1_headers)\n \n return Response(\n data={\n \"operation\":\"find_different_rows\",\n \"filename\": f\"{file1_filesystem_name}-{file2_filesystem_name}_different_rows\"\n }, \n status=status.HTTP_200_OK\n )\n except Exception as e:\n return Response(data={\"error\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n return Response({\"error\": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass FileDownloadView(generics.GenericAPIView):\n\n def get(self, request, operation, filename):\n try:\n with open(f\"results_files/{request.user.username}/{operation}/{filename}.csv\", \"rb\") as file:\n response = HttpResponse(file.read(), content_type=\"text/csv\")\n response['Content-Disposition'] = f\"attachment; filename={filename}.csv\"\n return response\n except FileNotFoundError:\n return Response(status=status.HTTP_404_NOT_FOUND)\n","repo_name":"CarlosZBent/csv_wizard_web","sub_path":"file_handling/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17534420361","text":"import time\r\nimport csv\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver import ChromeOptions\r\nfrom selenium.webdriver.common.by import By\r\n\r\ndef scrape(channel_id='statquest'):\r\n options = ChromeOptions()\r\n\r\n options.add_argument(\"--start-maximized\")\r\n options.add_experimental_option(\"useAutomationExtension\", False)\r\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\r\n\r\n driver = webdriver.Chrome(options=options)\r\n\r\n\r\n driver.get(f\"https://www.youtube.com/@{channel_id}/videos\")\r\n\r\n SCROLL_PAUSE_TIME = 2\r\n\r\n # Get scroll height\r\n last_height = driver.execute_script(\"return document.documentElement.scrollHeight\")\r\n\r\n while True:\r\n # Scroll down to bottom\r\n driver.execute_script(\"window.scrollTo(0, document.documentElement.scrollHeight);\")\r\n\r\n # Wait to load page\r\n time.sleep(SCROLL_PAUSE_TIME)\r\n\r\n # Calculate new scroll height and compare with last scroll height\r\n new_height = driver.execute_script(\"return document.documentElement.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n\r\n container = driver.find_element(By.ID, 'contents')\r\n videos_blocks = container.find_elements(By.TAG_NAME, 'ytd-rich-grid-row')\r\n\r\n # Open a CSV file in write mode\r\n with open('video_titles.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n # Write the header\r\n writer.writerow([\"Title\"])\r\n\r\n for block in videos_blocks:\r\n videos = block.find_elements(By.TAG_NAME, 'ytd-rich-item-renderer')\r\n for video in videos:\r\n title = video.find_element(By.TAG_NAME, 'h3').text\r\n # Write the title to the CSV file\r\n writer.writerow([title])\r\n\r\n driver.quit()\r\n","repo_name":"kokukai/bright-to-right-statquest","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12201455178","text":"import datetime\nimport os\nimport glob\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom torch.utils.data import Dataset, DataLoader, DistributedSampler\nfrom interpolation_ways import *\nimport torch.nn.functional as F\nimport torch.distributed as dist\n\n\ndef create_dataloader(configs, train, root_dir=None):\n def train_collate_fn(batch):\n train_seq_date_list = list()\n train_seq_flux_list = list()\n train_pred_date_list = list()\n train_pred_flux_list = list()\n\n # for train_seq, train_pred in batch:\n for (date_seq, date_pred), (train_seq, train_pred) in batch:\n train_seq_date_list.append(torch.from_numpy(date_seq).float())\n train_seq_flux_list.append(torch.from_numpy(train_seq).float())\n train_pred_date_list.append(torch.from_numpy(date_pred).float())\n train_pred_flux_list.append(torch.from_numpy(train_pred).float())\n # print(train_seq_date_list[0].shape)\n\n train_seq_date_list = torch.stack(train_seq_date_list, dim=0)\n train_seq_flux_list = torch.stack(train_seq_flux_list, dim=0)\n train_pred_date_list = torch.stack(train_pred_date_list, dim=0)\n train_pred_flux_list = torch.stack(train_pred_flux_list, dim=0)\n return train_seq_date_list, train_seq_flux_list, train_pred_date_list, train_pred_flux_list\n # return train_seq_flux_list, train_pred_flux_list\n\n # def validation_collate_fn(batch):\n # return train_collate_fn(batch)\n\n if train:\n dataset = JeonpaDataset(configs, True, root_dir=root_dir)\n # train_sampler = DistributedSampler(dataset)\n\n return DataLoader(dataset=dataset,\n batch_size=configs.train.batch_size,\n # shuffle=True,\n num_workers=configs.train.num_workers,\n collate_fn=train_collate_fn,\n # sampler=train_sampler,\n # pin_memory=True,\n # drop_last=True,\n # sampler=None\n )\n else:\n dataset = JeonpaDataset(configs, False, root_dir=root_dir)\n # vali_sampler = DistributedSampler(dataset)\n\n return DataLoader(dataset=dataset,\n collate_fn=train_collate_fn,\n\n batch_size=64,\n shuffle=False,\n num_workers=configs.train.num_workers,\n # sampler=vali_sampler\n )\n\n\ndef create_testloader(configs, root_dir=None):\n def test_collate_fn(batch):\n train_seq_date_list = list()\n train_seq_flux_list = list()\n date_list = list()\n\n for train_date_seq, train_flux_seq, date in batch:\n train_seq_date_list.append(torch.from_numpy(train_date_seq).float())\n train_seq_flux_list.append(torch.from_numpy(train_flux_seq).float())\n date_list.append(torch.from_numpy(date).float())\n # print(train_seq_date_list[0].shape)\n\n train_seq_date_list = torch.stack(train_seq_date_list, dim=0)\n train_seq_flux_list = torch.stack(train_seq_flux_list, dim=0)\n date_list = torch.stack(date_list, dim=0)\n\n return train_seq_date_list, train_seq_flux_list, date_list\n\n return DataLoader(dataset=JeonpaTestDataset(configs, root_dir=root_dir),\n collate_fn=test_collate_fn,\n batch_size=1, shuffle=False, num_workers=0)\n\n\ndef get_data_from_path(configs, file_path, test=False, root_dir=None):\n if root_dir is None:\n root_dir = Path(os.getcwd()).parent.absolute()\n # print(f\"root directory: {root_dir}\")\n\n dataset = pd.read_csv(f'{root_dir}/{file_path}')\n # print(dataset)\n\n flux = np.array(dataset['flux'])\n print('len before interpolation:', len(flux))\n\n def to_datetime(str):\n if type(str) is int:\n # 그냥 2023년 10/11월이라 하자.\n # print(str)\n if str <= 31:\n res = datetime.datetime(2023, 10, str)\n else:\n res = datetime.datetime(2023, 11, str - 31)\n else:\n split = str.split('/')\n res = datetime.datetime(2000 + int(split[2]), int(split[0]), int(split[1]))\n return res\n\n # print(dataset)\n df_stamp = dataset['date']\n # print(df_stamp)\n\n df_stamp = df_stamp.apply(to_datetime)\n date = np.concatenate((np.array(df_stamp.apply(lambda row: row.month, 1))[:, np.newaxis],\n np.array(df_stamp.apply(lambda row: row.day, 1))[:, np.newaxis],\n np.array(df_stamp.apply(lambda row: row.weekday(), 1))[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.hour, 1))[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.minute, 1))[:, np.newaxis]\n ),\n axis=1) #.astype(dtype=np.int64)\n # print(date)\n\n interpolation_model = InterpolationRemoveLongMissingValue(configs)\n # interpolation_model = InterpolationPoly(configs)\n # interpolation_model = InterpolationKNN(configs)\n\n date, flux = interpolation_model.get_dataset(date, flux, test)\n print('len after interpolation:', len(flux))\n\n\n # print(df_stamp)\n # print(date[0])\n\n return date, flux\n\n\nclass JeonpaDataset(Dataset):\n def __init__(self, configs, train, root_dir=None):\n self.configs = configs\n self.train = train\n self.train_file = configs.data.trainset\n self.test_file = configs.data.trainset\n\n self.split_rate = configs.data.split_rate\n self.seq_len = configs.model.seq_len\n self.pred_len = configs.model.pred_len\n\n # 학습데이터와 테스트데이터를 나눔.\n date, flux = get_data_from_path(configs, self.configs.data.trainset, root_dir=root_dir)\n split_index = int(len(flux) * self.split_rate)\n # print(len(flux))\n self.train_date = date[:split_index]\n self.train_flux = flux[:split_index]\n self.test_date = date[split_index:]\n self.test_flux = flux[split_index:]\n\n def __len__(self):\n if self.train:\n return len(self.train_flux)\n else:\n return len(self.test_flux)\n\n def __getitem__(self, idx):\n if self.train:\n # dim = 1\n # train_seq = self.train_flux[idx:idx + self.seq_len][:, np.newaxis] # 10~70\n # train_pred = self.train_flux[idx + self.seq_len:idx + self.seq_len + self.pred_len][:, np.newaxis] # 70~100\n\n # shape = (3, 2)\n # shape[:, np.newaxis, :] # 3, 1, 2\n # shape.unsqueeze(1) (3, 1, 2)\n # flatten -> 차원 밀어서 -> 1차원\n # squeeze((3, 1, 1, 1, 1, 2)) -> (3, 2)\n # sequeeze((3, 1, 2, 1), dim=1) -> (3, 2, 1)\n return self.train_date[idx], self.train_flux[idx]\n else:\n # validation_seq = self.test_flux[idx:idx + self.seq_len][:, np.newaxis]\n # validation_pred = self.test_flux[idx + self.seq_len:idx + self.seq_len + self.pred_len][:, np.newaxis]\n # return validation_seq, validation_pred\n return self.test_date[idx], self.test_flux[idx]\n\n\nclass JeonpaTestDataset(Dataset):\n def __init__(self, configs, root_dir=None):\n self.configs = configs\n self.seq_len = configs.model.seq_len\n self.pred_len = configs.model.pred_len\n self.label_len = configs.model.label_len\n self.date, self.flux = get_data_from_path(configs, self.configs.data.testset, test=True, root_dir=root_dir)\n # print(self.date[0].shxape)\n # print(self.flux)\n\n def __len__(self):\n # 입력, 출력 길이에 따라 사용할 수 있는 데이터의 양이 달라진다.\n # raw 데이터 길이가 100이고, seq_len = 60, pred_len = 30인 경우 -> 90개\n # 11개의 학습 데이터를 뽑을 수 있음\n # 0-89, 1-90, 2-91, 3-92, 4-93,\n # 5-94, 6-95, 7-96, 8-97, 9-98, 10-99\n # total_train_len = dataset_len - self.seq_len - self.pred_len + 1\n # minus = self.seq_len - 1\n # return len(self.date) - minus # or test_flux\n\n return len(self.flux)\n\n def __getitem__(self, idx):\n # train_seq = self.flux[idx:idx + self.seq_len][:, np.newaxis]\n\n # date = np.concatenate((np.array(df_stamp.apply(lambda row: row.month, 1))[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.day, 1))[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.weekday(), 1))[:, np.newaxis],\n # # np.array(df_stamp.apply(lambda row: row.hour, 1))[:, np.newaxis],\n # # np.array(df_stamp.apply(lambda row: row.minute, 1))[:, np.newaxis]\n # ),\n months = []\n days = []\n weekdays = []\n for i in range(self.pred_len + self.label_len): # 예측 30개\n if i <= 29:\n res = datetime.datetime(2023, 11, i + 1)\n else:\n # print(i)\n res = datetime.datetime(2023, 12, i - 29)\n\n months.append(res.month)\n days.append(res.day)\n weekdays.append(res.weekday())\n\n date = np.concatenate((np.array(months)[:, np.newaxis],\n np.array(days)[:, np.newaxis],\n np.array(weekdays)[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.hour, 1))[:, np.newaxis],\n # np.array(df_stamp.apply(lambda row: row.minute, 1))[:, np.newaxis]\n ),\n axis=1)\n # print(date)\n\n return self.date[idx], self.flux[idx], date\n","repo_name":"GoldenMine0502/JeonpaAI","sub_path":"src/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"876668077","text":"#!/bin/python3\nimport random\ndef checkCorrect(int):\n if int == 5:\n print('Congratulations! You won $100,000')\n elif int == 4:\n print('Congratulations! You won $4,000')\n elif int == 3: \n print('Congratulations! You won $5')\n else:\n print('Play Again')\n print('Welcome to the Lotto Game')\n\n#Do I need to make this a main function? All the github labs said to do it this way. What is the purpose? Is it so I can just call main() in a try/except to restart the program?\ndef main():\n numString = input('Enter 5 Numbers (1-60) seperated by a comma: ')\n numList = numString.split(\",\")\n\n try:\n #The stupid list was returning empty for the second condition it took me a long time to figure out I had to add list() when converting the list to ints. Not 100% sure why\n numbers = list(int(i) for i in numList)\n if (max(numbers) < 61 and min(numbers) > 0):\n print('Your Lotto Numbers:')\n print(numbers)\n \n #PLAN: Create a function that checks for dupe entries and add the if == True condition to the above if statement\n winningNum = random.sample(range(1, 60), 5)\n print('Winning Numbers:')\n print(winningNum)\n\n numCorrect = sum(x == y for x, y in zip(numList, winningNum))\n print('You got', numCorrect, 'correct')\n checkCorrect(numCorrect)\n\n else:\n #If I run in python3 CL this errors out with command not found\n print('nope')\n except:\n print('Bad Value - Enter numbers only')\n\nif __name__== \"__main__\":\n main()\n#Is this just saying this is the end of the function or something else?\n","repo_name":"halesyeah3/lotto","sub_path":"lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34256657575","text":"from transform.transformers.survey_transformer import SurveyTransformer\n\n\nclass LCTransformer(SurveyTransformer):\n \"\"\"Performs the transforms and formatting for the low carbon survey.\n\n low carbon is unusual in that it does not create a pck file\n \"\"\"\n\n def create_pck(self):\n pck_name = \"\"\n pck = None\n return pck_name, pck\n","repo_name":"ONSdigital/sdx-transform-cs","sub_path":"transform/transformers/common_software/low_carbon_transformer.py","file_name":"low_carbon_transformer.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"684524071","text":"#Default background stars without growth\n\nclass Star:\n \n def __init__(self,a,b,c,d):\n self.x = a\n self.y = b\n self.sz = c\n self.focal_x = width/2\n self.focal_y = height/2\n self.col = d\n self.warp = 0\n self.away = dist(self.x,self.y,self.focal_x,self.focal_y)\n \n #Code used if there's only one star class\n # self.growth = 0\n # if dist(self.focal_x,self.focal_y,self.x,self.y) <= 50:\n # self.growth = 0.2\n # elif dist(self.focal_x,self.focal_y,self.x,self.y) <= 75:\n # self.growth = 0.1\n \n # Cartesian movement (has problems at high slopes)\n # def move(self):\n # self.speed = (self.y-self.focal_y)/(self.x-self.focal_x)\n # self.n = self.y-self.speed*self.x\n # if self.x < self.focal_x:\n # self.x -= self.warp + self.sz#*sqrt(1/(20*(pow(self.speed,2)+1))) \n # self.y = self.speed*self.x + self.n\n # elif self.x >= self.focal_x:\n # self.x += self.warp + self.sz#*sqrt(1/(20*(pow(self.speed,2)+1))) '''\n # self.y = self.speed * self.x + self.n\n \n \n #Polar movement (rotation is broken)\n def move(self):\n self.cang = (self.x-self.focal_x)/self.away\n self.sang = (self.y-self.focal_y)/self.away\n self.away += self.warp + self.sz\n self.x = self.away*self.cang + self.focal_x\n self.y = self.away*self.sang + self.focal_y\n \n def display(self):\n fill(self.col)\n ellipse(self.x,self.y,self.sz,self.sz)\n \n # Code used if there's only one star class\n # self.sz += self.growth\n \n def edges(self):\n if self.x >= width or self.x <= 0 or self.y >= height or self.y <= 0:\n self.x = random(self.focal_x-width/2,self.focal_x+width/2)\n self.y = random(self.focal_y-height/2, self.focal_y+height/2)\n self.away = dist(self.x,self.y,self.focal_x,self.focal_y)\n \n #Code used if there's only one star class\n # if self.sz <= 3:\n # self.x = random(self.focal_x-width/2,self.focal_x+width/2)\n # self.y = random(self.focal_y-height/2, self.focal_y+height/2)\n # self.sz = random(1,2)\n # else:\n # self.x = random(self.focal_x-width/6,self.focal_x+width/6)\n # self.y = random(self.focal_y-height/6,self.focal_y+height/6)\n # self.sz = random(2,8)\n \n def pilot(self):\n if keyPressed: \n noCursor()\n if key == \"a\":\n #self.x += 1 \n self.focal_x = width/4\n elif key == \"w\":\n #self.y += 1\n self.focal_y = height/4\n elif key == \"s\":\n #self.y -= 1\n self.focal_y = 3*height/4\n elif key == \"d\":\n #self.x -= 1\n self.focal_x = 3*width/4\n\n # Still not working properly \n # elif key == \"e\":\n # self.sang = sin(asin(self.sang)+1)\n # self.cang = cos(acos(self.cang)+1)\n \n # self.rot = (self.x-width/2) + (self.y-height/2j)\n # print(self.rot)\n # self.new = self.rot * 1j\n # print(self.new)\n # self.x = self.new.real + width/2\n # self.y = self.new.imag + height/2\n \n elif key == \" \":\n self.warp = 10\n else :\n self.focal_x = width/2\n self.focal_y = height/2\n self.warp = 0\n\n \n","repo_name":"jiug/processing","sub_path":"hyperspace_py/Star.py","file_name":"Star.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73886288944","text":"from time import sleep\r\nfrom prettytable import PrettyTable\r\nimport os\r\n\r\ndef menu_cabecalho(texto, tamanho):\r\n print(tamanho*'-')\r\n print(texto.center(tamanho))\r\n print(tamanho*'-')\r\n \r\ndef menu_listar(lista_menu):\r\n for i in range(len(lista_menu)):\r\n print(f'[{i+1}] {lista_menu[i]}')\r\n \r\ndef castrar_produto(produtos,categoria,categoria_nomes):\r\n novo_produto = {}\r\n categoria_produtos = {}\r\n mensagem = ''\r\n while True:\r\n codigo = str(input('Digite o CÓDIGO do produto:\\t'))\r\n codigo = codigo.upper()\r\n if codigo not in produtos.keys():\r\n for i in range(len(categoria)):\r\n nome = str(input(f'Digite {categoria_nomes[i]} do produto:\\t'))\r\n nome = nome.upper()\r\n categoria_produtos[categoria[i]] = nome\r\n \r\n novo_produto[codigo] = categoria_produtos\r\n produtos.update(novo_produto)\r\n mensagem = 'PRODUTO CADASTRADO COM SUCESSO!'\r\n break\r\n else:\r\n mensagem = 'ERRO! ESSE CÓDIGO JÁ ESTÁ CADASTRADO!'\r\n print(mensagem)\r\n mensagem = 'ERRO! PRODUTO NÃO CADASTRADO!'\r\n return produtos, print(mensagem)\r\n\r\ndef localizar_produto(produtos):\r\n produto_obtido = {}\r\n codigo = str(input('Digite o CÓDIGO do produto:\\t'))\r\n codigo = codigo.upper()\r\n if codigo in produtos.keys():\r\n produto_obtido = {codigo:produtos[codigo]}\r\n return produto_obtido\r\n\r\ndef remover_produto(produtos):\r\n codigo = str(input('Digite o CÓDIGO do produto:\\t'))\r\n codigo = codigo.upper()\r\n if codigo in produtos.keys():\r\n del produtos[codigo]\r\n return produtos, print('PRODUTO REMOVIDO COM SUCESSO!')\r\n else:\r\n return print('PRODUTO NÃO ENCONTRADO')\r\n\r\ndef alterar_produto(produtos,categoria,categoria_nomes):\r\n codigo = str(input('Digite o CÓDIGO do produto:\\t'))\r\n codigo = codigo.upper()\r\n if codigo in produtos.keys():\r\n for i in range(len(categoria)):\r\n nome = str(input(f'Digite {categoria_nomes[i]} do produto:\\t'))\r\n nome = nome.upper()\r\n produtos[codigo][categoria[i]] = nome \r\n else:\r\n return print('PRODUTO NÃO ENCONTRADO') \r\n return produtos\r\n\r\ndef perguntar_novamente(texto):\r\n while True:\r\n escolha = str(input(f'\\nDeseja {texto} (S/N): '))\r\n escolha = escolha.upper()\r\n if escolha == 'S':\r\n return escolha\r\n elif escolha == '' or escolha == 'N':\r\n print('OKAY, VOLTANDO PARA O MENU...')\r\n sleep(2)\r\n return 'N'\r\n\r\ndef mostrar_na_tela(produtos):\r\n categoria = ['CÓDIGO','NOME','MARCA','CATEGORIA','VALOR (R$)']\r\n x = PrettyTable()\r\n produtos_lista = []\r\n chaves_lista = []\r\n x.field_names = categoria\r\n\r\n chaves_lista = list(produtos.keys())\r\n contador = 0\r\n\r\n if produtos == {}:\r\n return print(x)\r\n else:\r\n for i in produtos.keys():\r\n for j in produtos[i]:\r\n produtos_lista.append(produtos[i][j])\r\n produtos_lista.insert(0,chaves_lista[contador])\r\n x.add_row(produtos_lista)\r\n produtos_lista = []\r\n contador += 1\r\n return print(x)\r\n\r\ndef limpar_tela():\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\ndef soma_media(produtos):\r\n soma = 0\r\n print(f'O NÚMERO TOTAL DE PRODUTOS É: {len(produtos.keys())}')\r\n for i in produtos.keys():\r\n valor = (produtos[i]['valor']).replace(',', '.')\r\n soma = soma + float(valor)\r\n print(f'A SOMA DOS VALORES É : R${str(soma).replace(\".\", \",\")}')\r\n if not produtos == {}:\r\n print(f'A MÉDIA DOS VALORES É: R${str(round(soma/len(produtos.keys()),2)).replace(\".\", \",\")}')\r\n \r\n \r\n","repo_name":"miqueiasrodrigues/virtual-store-python","sub_path":"Funcoes.py","file_name":"Funcoes.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"70041423985","text":"# ler a idade de um jovem e determinar ;\n# falta x anos para ele atingir a idade de se alistar;\n# ele esta na idade de se alistar;\n# já passaram x anos do alistamento;\nimport datetime\nanoatual = datetime.date.today().year\n\nanonasc = int(input(\"Digite o ano do seu nascimento (formato AAAA), ex.: 1986: \"))\n\nif anoatual - anonasc == 18:\n print(\"Você deve se alistar\")\nelif anoatual - anonasc > 18:\n atraso = (anoatual - anonasc) - 18\n print(f\"É, tá barril, vc tá atrasado em {atraso} ano(s). Veja aí o que vc vai fazer, maluco\")\nelse:\n falta = abs((anoatual - anonasc) - 18)\n print(f\"Aproveite, vc ainda eh criança e falta(m) {falta} ano(s) pra vc.\")\nprint(\"Valeu!\")","repo_name":"us19861229c/Meu-aprendizado-Python","sub_path":"CeV - Gustavo Guanabara/exerc039.py","file_name":"exerc039.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"37684182779","text":"from transformers import AutoTokenizer, AutoModel\nimport torch\n\n\ndef get_last_hidden_state():\n # First we initialize our model and tokenizer:\n\n tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')\n model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')\n\n # Then we tokenize the sentences just as before:\n\n sentences = [\n \"Three years later, the coffin was still full of Jello.\",\n \"The fish dreamed of escaping the fishbowl and into the toilet where he saw his friend go.\",\n \"The person box was packed with jelly many dozens of months later.\",\n \"He found a leprechaun in his walnut shell.\"\n ]\n\n # 初始化字典来存储\n tokens = {'input_ids': [], 'attention_mask': []}\n\n for sentence in sentences:\n # 编码每个句子并添加到字典\n new_tokens = tokenizer.encode_plus(sentence, max_length=128,\n truncation=True, padding='max_length',\n return_tensors='pt')\n tokens['input_ids'].append(new_tokens['input_ids'][0])\n tokens['attention_mask'].append(new_tokens['attention_mask'][0])\n\n # 将张量列表重新格式化为一个张量\n tokens['input_ids'] = torch.stack(tokens['input_ids'])\n tokens['attention_mask'] = torch.stack(tokens['attention_mask'])\n\n # We process these tokens through our model:\n\n outputs = model(**tokens)\n # outputs.keys()\n\n #odict_keys(['last_hidden_state', 'pooler_output'])\n\n #The dense vector representations of our text are contained within the outputs 'last_hidden_state' tensor, which we access like so:\n\n # embeddings = outputs.last_hidden_state\n embeddings = outputs[0]\n return embeddings, tokens\n\n\ndef compute_mean_pooled(tokens, embeddings):\n\n # To perform this operation, we first resize our attention_mask tensor:\n attention_mask = tokens['attention_mask']\n\n # attention_mask.shape\n # torch.Size([4, 128])\n\n mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()\n\n # mask.shape\n # torch.Size([4, 128, 768])\n\n # 上面的每个向量表示一个单独token的掩码——现在每个token都有一个大小为768的向量,表示它的attention_mask状态。然后将两个张量相乘:\n\n masked_embeddings = embeddings * mask\n\n # masked_embeddings.shape\n # torch.Size([4, 128, 768])\n # 然后我们沿着轴1将剩余的嵌入项求和:\n\n summed = torch.sum(masked_embeddings, 1)\n # summed.shape\n # torch.Size([4, 768])\n\n # 然后将张量的每个位置上的值相加:\n\n summed_mask = torch.clamp(mask.sum(1), min=1e-9)\n # summed_mask.shape\n # torch.Size([4, 768])\n # 最后,我们计算平均值:\n\n mean_pooled = summed / summed_mask\n return mean_pooled\n\n\ndef compute_similarity(mean_pooled):\n \"\"\"\n 一旦我们有了密集向量,我们就可以计算每个向量之间的余弦相似性:\n \"\"\"\n from sklearn.metrics.pairwise import cosine_similarity\n\n #让我们计算第0句的余弦相似度:\n\n # 将PyTorch张量转换为numpy数组\n mean_pooled = mean_pooled.detach().numpy()\n\n # 计算\n simi_list = cosine_similarity(\n [mean_pooled[0]],\n mean_pooled[1:]\n )\n # array([[0.33088905, 0.7219259, 0.55483633]], dtype=float32)\n\n # These similarities translate to:\n return simi_list\n\n\nif __name__ == '__main__':\n embeddings, tokens = get_last_hidden_state()\n mean_pooled = compute_mean_pooled(tokens, embeddings)\n print(compute_similarity(mean_pooled))\n\n # 1. DPR 训练代码里计算相似度;看来不用:(相关工作指出,相比BERT直接的输出计算loss,后面再加一些基本MLP将会大有帮助,比如原Bert输出句子嵌入、直接计算cosine的结果会比GloVe还差,但是Bert输出后面简单再加一层再进行计算,结果会远远超过原来的)\n # 2. cat(还是stack?) query和answer时 也要有对应的attention_mask\n\n","repo_name":"mug2mag/DPR","sub_path":"biencoder_diy.py","file_name":"biencoder_diy.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"14838298088","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport grp\nimport asyncio\nimport requests\nimport subprocess\nimport logging\nfrom urllib.parse import urlparse\nfrom packaging.version import parse as parse_version\nfrom aiohttp import web\nfrom aiohttp.web_runner import GracefulExit\n\nfrom .global_settings import GlobalSettings\nfrom .osquery import Osquery\nfrom .flock_logs import FlockLog, FlockLogTypes\nfrom .api_client import (\n FlockApiClient,\n PermissionDenied,\n BadStatusCode,\n ResponseIsNotJson,\n RespondedWithError,\n InvalidResponse,\n ConnectionError,\n)\nfrom ..common import Platform\nfrom ..health import health_items\n\n\nclass Daemon:\n def __init__(self, common):\n logger = logging.getLogger(f\"Daemon.__init__\")\n self.c = common\n\n # Daemon's log\n if Platform.current() == Platform.MACOS:\n log_dir = \"/usr/local/var/log/flock-agent\"\n else:\n log_dir = \"/var/log/flock-agent\"\n os.makedirs(log_dir, exist_ok=True)\n\n # Set up logging to a file.\n base_logger = logging.getLogger(\"\")\n ch = logging.FileHandler(os.path.join(log_dir, \"log\"))\n basic_formatter = logging.Formatter(\n \"%(asctime)s: %(levelname)s - %(name)s: %(message)s\"\n )\n ch.setFormatter(basic_formatter)\n base_logger.addHandler(ch)\n\n logger.info(f\"version {self.c.version}\")\n\n self.osquery = Osquery(common)\n self.c.osquery = self.osquery\n\n hostname = self.osquery.exec(\"SELECT uuid AS host_uuid FROM system_info;\")\n if hostname:\n hostname = hostname[0][\"host_uuid\"]\n\n self.global_settings = GlobalSettings(common, hostname)\n self.c.global_settings = self.global_settings\n\n self.api_client = FlockApiClient(self.c)\n\n # Flock Agent lib directory\n if Platform.current() == Platform.MACOS:\n self.lib_dir = \"/usr/local/var/lib/flock-agent\"\n else:\n self.lib_dir = \"/var/lib/flock-agent\"\n os.makedirs(self.lib_dir, exist_ok=True)\n\n # Flock Agent keeps its own submission queue separate from osqueryd, for when users\n # enable/disable the server, or enable/disable twigs\n self.flock_log = FlockLog(self.c, self.lib_dir)\n\n # Prepare the unix socket path\n self.unix_socket_path = os.path.join(self.lib_dir, \"socket\")\n if os.path.exists(self.unix_socket_path):\n os.remove(self.unix_socket_path)\n\n # The socket uid will be 0 (root), and the group will be the administrator group.\n # In macOS, this group is \"admin\". In debian-like distros it's \"sudo\", and in fedora-like\n # distros it's the \"wheel\" group. Other distros? PRs are welcome :).\n if Platform.current() == Platform.MACOS:\n groupinfo = grp.getgrnam(\"admin\")\n self.gid = groupinfo.gr_gid\n else:\n if os.path.isfile(\"/usr/bin/apt\"):\n groupinfo = grp.getgrnam(\"sudo\")\n self.gid = groupinfo.gr_gid\n elif os.path.isfile(\"/usr/bin/dnf\") or os.path.isfile(\"/usr/bin/yum\"):\n groupinfo = grp.getgrnam(\"wheel\")\n self.gid = groupinfo.gr_gid\n else:\n # Unknown, so make the group root\n self.gid = 0\n\n # Start with refreshing osqueryd\n self.osquery.refresh_osqueryd()\n\n async def start(self):\n await asyncio.gather(\n self.submit_loop(), self.http_server(), self.autoupdate_loop()\n )\n\n async def submit_loop(self):\n while True:\n if self.global_settings.get(\"use_server\") and self.global_settings.get(\n \"gateway_token\"\n ):\n await self.submit_logs_osquery()\n await self.submit_logs_flock()\n\n # Wait a minute\n await asyncio.sleep(60)\n\n async def submit_logs_osquery(self):\n # Submit osquery logs\n try:\n self.osquery.submit_logs()\n except Exception as e:\n exception_type = type(e).__name__\n logger = logging.getLogger(\"Daemon.submit_loop\")\n logger.debug(f\"Exception submitting logs: {exception_type}\")\n\n async def submit_logs_flock(self):\n logger = logging.getLogger(\"Daemon.submit_logs_flock\")\n # Submit Flock Agent logs\n try:\n self.flock_log.submit_logs()\n except Exception as e:\n exception_type = type(e).__name__\n logger.warning(f\"Exception submitting flock logs: {exception_type}\")\n\n async def http_server(self):\n logger = logging.getLogger(\"Daemon.http_server\")\n logger.info(\"Starting http server\")\n\n def response_object(data=None, error=False):\n obj = {\"data\": data, \"error\": error}\n return web.json_response(obj)\n\n # Routes\n async def ping(request):\n return response_object()\n\n async def shutdown(request):\n logger = logging.getLogger(\"Daemon.http_server\")\n logger.info(\"GET /shutdown, shutting down daemon\")\n raise GracefulExit()\n\n async def get_setting(request):\n key = request.match_info.get(\"key\", None)\n try:\n return response_object(self.global_settings.get(key))\n except:\n return response_object(error=\"invalid key\")\n\n async def set_setting(request):\n logger = logging.getLogger(\"Daemon.http_server.set_settings\")\n\n key = request.match_info.get(\"key\", None)\n val = await request.json()\n\n # Only change the setting if it's actually changing\n old_val = self.global_settings.get(key)\n if old_val == val:\n logger.debug(f\"skipping {key}={val}, because it's already set\",)\n else:\n logger.debug(f\"setting {key}={val}\")\n self.global_settings.set(key, val)\n self.global_settings.save()\n\n if key == \"use_server\":\n if val:\n self.flock_log.log(FlockLogTypes.SERVER_ENABLED)\n else:\n self.flock_log.log(FlockLogTypes.SERVER_DISABLED)\n # Submit flock logs right away\n await self.submit_logs_flock()\n\n return response_object()\n\n async def exec_twig(request):\n twig_id = request.match_info.get(\"twig_id\", None)\n try:\n twig = self.global_settings.get_twig(twig_id)\n data = self.osquery.exec(twig[\"query\"])\n return response_object(data)\n except:\n return response_object(error=\"invalid twig_id\")\n\n async def enable_undecided_twigs(request):\n logger = logging.getLogger(\"Daemon.http_server.enable_undecided_twigs\")\n # If the user choose to automatically opt-in to new twigs, this enables them all\n enabled_twig_ids = []\n twig_ids = self.global_settings.get_undecided_twig_ids()\n for twig_id in twig_ids:\n if not self.global_settings.is_twig_enabled(twig_id):\n self.global_settings.enable_twig(twig_id)\n enabled_twig_ids.append(twig_id)\n\n if enabled_twig_ids:\n logger.debug(f\"enabled twigs: {enabled_twig_ids}\")\n self.global_settings.save()\n self.osquery.refresh_osqueryd()\n self.flock_log.log(FlockLogTypes.TWIGS_ENABLED, enabled_twig_ids)\n\n return response_object()\n\n async def get_decided_twig_ids(request):\n return response_object(self.global_settings.get_decided_twig_ids())\n\n async def get_undecided_twig_ids(request):\n return response_object(self.global_settings.get_undecided_twig_ids())\n\n async def get_enabled_twig_ids(request):\n return response_object(self.global_settings.get_enabled_twig_ids())\n\n async def get_twig_enabled_statuses(request):\n return response_object(self.global_settings.get_twig_enabled_statuses())\n\n async def update_twig_status(request):\n logger = logging.getLogger(\"Daemon.update_twig_status\")\n twig_status = await request.json()\n\n # Validate twig_status\n if type(twig_status) != dict:\n return response_object(error=\"twig_status must be a dict\")\n for twig_id in twig_status:\n if twig_id not in self.global_settings.settings[\"twigs\"]:\n return response_object(\n error=\"twig_status contains invalid twig_ids\"\n )\n if type(twig_status[twig_id]) != bool:\n return response_object(error=\"twig_status is in an invalid format\")\n\n enabled_twig_ids = []\n disabled_twig_ids = []\n\n for twig_id in twig_status:\n if twig_status[twig_id] and not self.global_settings.is_twig_enabled(\n twig_id\n ):\n self.global_settings.enable_twig(twig_id)\n enabled_twig_ids.append(twig_id)\n if not twig_status[twig_id] and (\n self.global_settings.is_twig_enabled(twig_id)\n or self.global_settings.is_twig_undecided(twig_id)\n ):\n self.global_settings.disable_twig(twig_id)\n disabled_twig_ids.append(twig_id)\n\n if enabled_twig_ids or disabled_twig_ids:\n self.global_settings.save()\n self.osquery.refresh_osqueryd()\n\n if enabled_twig_ids:\n logger.info(f\"enabled twigs: {enabled_twig_ids}\")\n self.flock_log.log(FlockLogTypes.TWIGS_ENABLED, enabled_twig_ids)\n if disabled_twig_ids:\n logger.info(f\"disabled twigs: {disabled_twig_ids}\")\n self.flock_log.log(FlockLogTypes.TWIGS_DISABLED, disabled_twig_ids)\n\n return response_object()\n\n async def exec_health(request):\n health_item_name = request.match_info.get(\"health_item_name\", None)\n query = None\n for health_item in health_items[Platform.current()]:\n if health_item[\"name\"] == health_item_name:\n query = health_item[\"query\"]\n break\n if query:\n try:\n data = self.osquery.exec(query)\n return response_object(data)\n except:\n return response_object(error=\"error executing health item query\")\n else:\n return response_object(error=\"invalid health_item_name\")\n\n async def register_server(request):\n data = await request.json()\n try:\n name = data[\"name\"]\n server_url = data[\"server_url\"]\n except:\n return response_object(error=\"Invalid request to daemon\")\n\n # Validate server URL\n o = urlparse(server_url)\n if (\n (o.scheme != \"http\" and o.scheme != \"https\")\n or (o.path != \"\" and o.path != \"/\")\n or o.params != \"\"\n or o.query != \"\"\n or o.fragment != \"\"\n ):\n return response_object(error=\"Invalid server URL\")\n\n # Save the server URL in settings\n self.global_settings.set(\"gateway_url\", server_url)\n self.global_settings.save()\n\n # Try to register\n try:\n self.api_client.register(name)\n self.api_client.ping()\n return response_object()\n except PermissionDenied:\n return response_object(error=\"Permission denied\")\n except BadStatusCode as e:\n return response_object(error=f\"Bad status code: {e}\")\n except ResponseIsNotJson:\n return response_object(error=\"Server response is not JSON\")\n except RespondedWithError as e:\n return response_object(error=f\"Server error: {e}\")\n except InvalidResponse:\n return response_object(error=\"Server returned an invalid response\")\n except ConnectionError:\n return response_object(error=\"Error connecting to server\")\n\n # Anything else was an unknown failure\n return response_object(error=\"Unknown error\")\n\n app = web.Application()\n app.router.add_get(\"/ping\", ping)\n app.router.add_post(\"/shutdown\", shutdown)\n app.router.add_get(\"/setting/{key}\", get_setting)\n app.router.add_post(\"/setting/{key}\", set_setting)\n app.router.add_get(\"/exec_twig/{twig_id}\", exec_twig)\n app.router.add_post(\"/enable_undecided_twigs\", enable_undecided_twigs)\n app.router.add_get(\"/decided_twig_ids\", get_decided_twig_ids)\n app.router.add_get(\"/undecided_twig_ids\", get_undecided_twig_ids)\n app.router.add_get(\"/enabled_twig_ids\", get_enabled_twig_ids)\n app.router.add_get(\"/twig_enabled_statuses\", get_twig_enabled_statuses)\n app.router.add_post(\"/update_twig_status\", update_twig_status)\n app.router.add_get(\"/exec_health/{health_item_name}\", exec_health)\n app.router.add_post(\"/register_server\", register_server)\n\n loop = asyncio.get_event_loop()\n await loop.create_unix_server(app.make_handler(), self.unix_socket_path)\n\n # Change permissions of unix socket\n os.chmod(self.unix_socket_path, 0o660)\n os.chown(self.unix_socket_path, 0, self.gid)\n\n logger.info(\"Started http server\")\n\n async def autoupdate_loop(self):\n # Autoupdate is only available for macOS right now; Linux uses package managers\n if Platform.current() != Platform.MACOS:\n return\n\n # Run requests.get in asyncio\n async def load_url(url):\n loop = asyncio.get_event_loop()\n future = loop.run_in_executor(None, requests.get, url)\n return await future\n\n update_check_delay = 43200 # 12 hours\n\n while True:\n logger = logging.getLogger(\"Daemon.autoupdate_loop\")\n logger.info(\"Checking for updates\")\n\n try:\n # Query github for the latest version of Flock Agent\n r = await load_url(\n \"https://api.github.com/repos/firstlookmedia/flock-agent/releases/latest\"\n )\n release = r.json()\n latest_version = release[\"tag_name\"].lstrip(\"v\")\n logger.info(\n f\"installed version: {self.c.version}, latest version: {latest_version}\",\n )\n if parse_version(latest_version) <= parse_version(self.c.version):\n await asyncio.sleep(update_check_delay)\n continue\n\n # Find the pkg asset\n url = None\n filename = None\n for asset in release[\"assets\"]:\n if asset[\"name\"].endswith(\".pkg\"):\n url = asset[\"browser_download_url\"]\n filename = asset[\"name\"]\n break\n\n if not url:\n logger.warning(\"could not find .pkg file\")\n await asyncio.sleep(update_check_delay)\n continue\n\n # Download the update\n logger.info(f\"downloading {url}\")\n r = requests.get(url)\n\n os.makedirs(os.path.join(self.lib_dir, \"updates\"), exist_ok=True)\n download_filename = os.path.join(self.lib_dir, \"updates\", filename)\n with open(download_filename, \"wb\") as f:\n f.write(r.content)\n\n logger.info(f\"download complete: {download_filename}\")\n\n # Verify that it's codesigned\n p = subprocess.run(\n [\"/usr/sbin/pkgutil\", \"--check-signature\", download_filename],\n stdout=subprocess.PIPE,\n )\n if (\n p.returncode != 0\n or (\n # macOS 10.15\n \"Status: signed by a developer certificate issued by Apple for distribution\"\n not in p.stdout.decode()\n # macOS 10.14\n and \"Status: signed by a certificate trusted by Mac OS X\"\n not in p.stdout.decode()\n )\n or \"Developer ID Installer: FIRST LOOK PRODUCTIONS, INC. (\"\n not in p.stdout.decode()\n ):\n logger.warning(f\"codesign verification failed: {p.stdout.decode()}\")\n await asyncio.sleep(update_check_delay)\n continue\n\n # Install the update\n logger.info(\n \"launching installer background process and quitting daemon\",\n )\n subprocess.Popen(\n [\"/usr/sbin/installer\", \"-pkg\", download_filename, \"-target\", \"/\"]\n )\n sys.exit(0)\n\n except Exception as e:\n logger.warning(f\"Exception while checking for updates: {e}\")\n await asyncio.sleep(30)\n\n def cleanup(self):\n if os.path.exists(self.unix_socket_path):\n os.remove(self.unix_socket_path)\n","repo_name":"firstlookmedia/flock-agent","sub_path":"flock_agent/daemon/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":17575,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"91"} +{"seq_id":"5881744027","text":"# -*- coding: utf-8 -*-\nfrom miraFramework.PipelineBaseUI import PipelineBaseUI\nfrom miraLibs.dccLibs import get_parent_win\nfrom miraLibs.qtLibs.splash import splash\n\n\ndef normal_render(widget_class):\n parent_win = get_parent_win.get_parent_win()\n if parent_win:\n pb_ui = PipelineBaseUI(widget_class, parent_win)\n pb_ui.show()\n else:\n pb_ui = PipelineBaseUI(widget_class)\n pb_ui.show()\n return pb_ui\n\n\n@splash\ndef render(widget_class):\n ui_object = normal_render(widget_class)\n return ui_object\n","repo_name":"jasonbrackman/mira","sub_path":"miraLibs/qtLibs/render_ui.py","file_name":"render_ui.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"11545651927","text":"\n\"\"\"\nTime Complexity: O(n2) as there are two nested loops.\nAuxiliary Space: O(1)\nThe good thing about selection sort is it never makes more than O(n) swaps and can be useful when\nmemory write is a costly operation.\nStability : The default implementation is not stable. However it can be made stable.\nIn Place : Yes, it does not require extra space.\n\"\"\"\n\ndef selection_sort(source):\n for i in range(len(source)):\n min_index = i\n for j in range(i + 1, len(source)):\n if source[j] < source[min_index]:\n min_index = j\n source[i], source[min_index] = source[min_index], source[i]\n return source\n\nif __name__ == \"__main__\":\n print(selection_sort([4,6,1,3,9,2,6,7]))\n","repo_name":"dpattayath/algo","sub_path":"sort/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8924408241","text":"### Link:\n# https://github.com/doocs/leetcode/blob/main/basic/searching/BinarySearch/README.md\n\n\n\n### 题目\n# 给定一个按照升序排列的长度为 n 的整数数组,以及 q 个查询\n# 对于每个查询,返回一个元素 k 的起始位置和终止位置(位置从 0 开始计数)\n# 如果数组中不存在该元素,则返回 -1 -1\n\n### 输入格式\n# 第一行包含整数 n 和 q,表示数组长度和询问个数\n# 第二行包含 n 个整数(均在 1∼10000 范围内),表示完整数组\n# 接下来 q 行,每行包含一个整数 k,表示一个询问元素\n\n### 输出格式\n# 共 q 行,每行包含两个整数,表示所求元素的起始位置和终止位置\n# 如果数组中不存在该元素,则返回 -1 -1\n\n\n\n### Example\n# input:\n# 6 3\n# 1 2 2 3 3 4\n# 3\n# 4\n# 5\n\n# output:\n# 3 4\n# 5 5\n# -1 -1\n\nn, q = map(int, input().split())\nnums = list(map(int, input().split()))\n\nfor _ in range(q):\n x = int(input())\n left, right = 0, n - 1\n while left < right:\n mid = (left + right) // 2\n if nums[mid] >= x:\n right = mid\n else:\n left = mid + 1\n if nums[left] != x:\n print('-1 -1')\n else:\n t = left\n left, right = 0, n - 1\n while left < right:\n mid = (left + right + 1) // 2\n if nums[mid] <= x:\n left = mid\n else:\n right = mid - 1\n print(f'{t} {left}')\n","repo_name":"wendyZhang98/Leetcode-Solutions","sub_path":"Search/Binary Search.py","file_name":"Binary Search.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38390915723","text":"\"\"\"\n Entrada de datos\n *** Implementación de Thompson\n\n Construcción de NFA --> Simulación (si o no de una cadena w)\n \n Contrucción de subconjuntos\n Tabla con: NFA | DFA | Variables....\n Construcción de AFD dada r\n Graficar el AFD\n\n Simulación de AFD --> (si o no de una cadena w)\n \n Extra: Minimización de los AF\n\n Imprimir para cada AF generado a partir de r, \n -un SÍ o NO según si la cadena pertenece al lenguaje\n Tiempo que tarda cada AF en realizar la validacion de una cadena\n -Generar archivo por cada AF con \n -Estados, simbolos, inicio, acepación, transcisión\n\"\"\"\n\nimport os\nimport numpy as np\nfrom copy import deepcopy\nimport time\nfrom graphviz import Digraph\nfrom Thompson import Thompson\nfrom DFA import createDFA\nfrom graph import graficar \nfrom D_AFD import D_DFA\nimport copy\n\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items)-1]\n \ndef infixTopostfix(exp, characters):\n\n mod = exp[0]\n for i in range(1,len(exp)):\n if( (((exp[i] in characters) and exp[i-1] != '(') or exp[i] == '(') and (exp[i-1] != '|' or exp[i-1] == ')') ):\n mod += '.'+exp[i]\n else:\n mod += exp[i]\n\n regex = mod\n prec = {}\n prec[\"?\"] = 4\n prec[\"*\"] = 4\n prec[\"+\"] = 4\n prec[\".\"] = 3\n prec[\"|\"] = 2\n prec[\"(\"] = 1\n tokens = list(regex)\n output = []\n stack = Stack()\n for token in tokens:\n if (token.isalpha() or token.isdigit() or token == ' '):\n output.append(token)\n elif (token == '('):\n stack.push(token)\n elif (token == ')'):\n top = stack.pop()\n while(top != '('):\n output.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (prec[stack.peek()] >= prec[token]):\n output.append(stack.pop())\n stack.push(token)\n while(not stack.isEmpty()):\n output.append(stack.pop())\n \n return ''.join(output)\n\ndef proyecto(exp, characters):\n regex = \"\"\n for v in exp:\n if v == \"+\" or v == \"?\":\n if(exp[exp.index(v)-1] == \")\"):\n inicio = exp.index(v)-1\n pos = exp[exp.index(v)-1]\n while (pos != \"(\"):\n pos = exp[inicio]\n inicio -= 1\n values = exp[inicio+1:exp.index(v)]\n if (v == \"+\"):\n new_regex = exp[:exp.index(v)] + values + \"*\"+ exp[exp.index(v)+1:] \n elif (v == \"?\"):\n new_regex = exp[:exp.index(v)-(len(values)-1)] + values + \"|3)\"+ exp[exp.index(v)+1:] \n exp = new_regex\n else:\n val = exp[exp.index(v)-1]\n if (v == \"+\"):\n new_regex = exp[:exp.index(v)] + val + \"*\"+ exp[exp.index(v)+1:] \n elif(v == \"?\"):\n new_regex = exp[:exp.index(v)-1] +\"(\" + exp[exp.index(v)-1] + \"|3)\"+ exp[exp.index(v)+1:]\n exp = new_regex\n \n if((\"+\" not in exp) and (\"?\" not in exp)):\n regex = exp\n\n postfix = infixTopostfix(regex, characters)\n print(\"La postfix regex: \", postfix)\n \n #print(postfix)\n #cadena de tokens\n tokens = []\n while len(postfix) != 0:\n if (postfix[0] == \"|\" or postfix[0] == \"*\" or postfix[0] == \".\"):\n tokens.append(postfix[0])\n postfix = postfix[slice(1,len(postfix))]\n else:\n for a in characters:\n if (postfix.find(a) != -1):\n postfix = postfix[slice(len(a), len(postfix))]\n tokens.append(str(a))\n break\n print(\"tokens: \", tokens)\n \n \n #Contrucción del NFA \n countState = 0\n if(len(tokens) == 1):\n tokens, countState = Thompson(tokens, countState)\n while(len(tokens) > 1):\n tokens, countState = Thompson(tokens, countState)\n\n # Automata generado\n Aut = tokens[0]\n # Del AFN a AFD\n AFD = createDFA(Aut)\n \n return AFD\n \"\"\"\n w = input(\"Ingrese cadena que desea validar: \")\n print(\"Para NFA:\", simulacionAutomataNFA(Aut, w))\n print(\"Para DFA:\", simulacionAutomataDFA(AFD, w))\n #print(\"Para DFA Directo:\",simulacionAutomata(D_DFA_Aut, w))\n \"\"\"","repo_name":"1Yasmin/compis3","sub_path":"Proyecto1.py","file_name":"Proyecto1.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"382321312","text":"from kube.async_loop import get_loop\nfrom kube.cluster_facade import SyncClusterFacade\nfrom kube.config import Context, KubeConfigCollection\nfrom kubefs.fs_kubecluster import (\n KubeClusterGenericResourceDir,\n KubeClusterNamespacesDir,\n name_api_resources,\n)\nfrom kubefs.fs_model import ONE_DAY, Directory, File, Payload\n\n\nclass KubeConfigClusterDir(Directory):\n @classmethod\n def create(cls, *, payload: Payload, context: Context):\n self = cls(payload=payload)\n self.context = context\n self.facade = SyncClusterFacade(async_loop=get_loop(), context=self.context)\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n # special handling for namespaces\n payload = Payload(name=\"namespaces\")\n dir = KubeClusterNamespacesDir.create(\n payload=payload,\n context=self.context,\n )\n\n dirs = [dir]\n\n api_resources = self.facade.list_api_resources()\n pairs = name_api_resources(api_resources)\n\n for name, api_resource in pairs:\n payload = Payload(name=name)\n dir = KubeClusterGenericResourceDir.create(\n payload=payload,\n context=self.context,\n api_resource=api_resource,\n )\n dirs.append(dir)\n\n # api resources almost never change\n self.set_lazy_entries(dirs, lifetime=ONE_DAY)\n\n return self.lazy_entries\n\n\nclass KubeConfigClustersDir(Directory):\n @classmethod\n def create(cls, *, payload: Payload, config: KubeConfigCollection):\n self = cls(payload=payload)\n self.config = config\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n dirs = []\n for context in self.config.contexts.values():\n payload = Payload(\n name=context.cluster.short_name,\n ctime=context.file.ctime,\n mtime=context.file.mtime,\n atime=context.file.atime,\n )\n dir = KubeConfigClusterDir.create(payload=payload, context=context)\n dirs.append(dir)\n\n self.set_lazy_entries(dirs, lifetime=ONE_DAY)\n\n return self.lazy_entries\n\n\nclass KubeConfigContextDir(Directory):\n @classmethod\n def create(cls, *, payload: Payload, context: Context):\n self = cls(payload=payload)\n self.context = context\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n dirs = []\n\n cluster = self.context.cluster\n if cluster:\n payload = Payload(name=\"cluster\")\n dir = KubeConfigClusterDir.create(payload=payload, context=self.context)\n dirs.append(dir)\n\n user = self.context.user\n if user:\n payload = Payload(name=\"user\")\n dir = KubeConfigUserDir.create(payload=payload, context=self.context)\n dirs.append(dir)\n\n self.set_lazy_entries(dirs, lifetime=ONE_DAY)\n\n return self.lazy_entries\n\n\nclass KubeConfigContextsDir(Directory):\n @classmethod\n def create(cls, *, payload: Payload, config: KubeConfigCollection):\n self = cls(payload=payload)\n self.config = config\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n dirs = []\n for context in self.config.contexts.values():\n payload = Payload(\n name=context.short_name,\n ctime=context.file.ctime,\n mtime=context.file.mtime,\n atime=context.file.atime,\n )\n dir = KubeConfigContextDir.create(payload=payload, context=context)\n dirs.append(dir)\n\n self.set_lazy_entries(dirs, lifetime=ONE_DAY)\n\n return self.lazy_entries\n\n\nclass KubeConfigUserDir(Directory):\n \"\"\"Represents a directory that contains files that belong to a single user.\n The files and their contents are the key/values of the user object in the\n kube config.\"\"\"\n\n @classmethod\n def create(cls, *, payload: Payload, context: Context):\n self = cls(payload=payload)\n self.context = context\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n files = []\n\n for attname in self.context.user.get_attribute_names():\n value = getattr(self.context.user, attname)\n assert type(value) is str # we need to call .encode on this\n\n payload = Payload(\n name=attname,\n data=value.encode(),\n ctime=self.context.file.ctime,\n mtime=self.context.file.mtime,\n atime=self.context.file.atime,\n )\n file = File(payload=payload)\n files.append(file)\n\n self.set_lazy_entries(files, lifetime=ONE_DAY)\n\n return self.lazy_entries\n\n\nclass KubeConfigUsersDir(Directory):\n \"\"\"Represents a directory containing all the user names defined in the kube\n config. Each entry is itself a directory containing files.\"\"\"\n\n @classmethod\n def create(cls, *, payload: Payload, config: KubeConfigCollection):\n self = cls(payload=payload)\n self.config = config\n return self\n\n def get_entries(self):\n if not self.lazy_entries:\n dirs = []\n for context in self.config.contexts.values():\n payload = Payload(\n name=context.user.short_name,\n ctime=context.file.ctime,\n mtime=context.file.mtime,\n atime=context.file.atime,\n )\n dir = KubeConfigUserDir.create(payload=payload, context=context)\n dirs.append(dir)\n\n self.set_lazy_entries(dirs, lifetime=ONE_DAY)\n\n return self.lazy_entries\n","repo_name":"nearmap/kubefs","sub_path":"kubefs/fs_kubeconfig.py","file_name":"fs_kubeconfig.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"13211305305","text":"#!/usr/bin/python3\n\nfrom collections import namedtuple\nimport argparse\nimport socket\nimport sys\nfrom environment import Environment\n\n\ndef str2bool(v):\n # https://stackoverflow.com/a/43357954/2570622\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\nparser = argparse.ArgumentParser(description=\"Implements the Environment.\")\nparser.add_argument('-ip', '--ip', dest='ip', type=str, default='localhost', help='IP of server')\nparser.add_argument('-port', '--port', dest='port', type=int, default=5000, help='Port for connection')\nparser.add_argument('-side', '--side', dest='side', type=int, default=32, help='Side length of the square grid')\nparser.add_argument('-i', '--instance', dest='instance', type=int, default=0, help='Instance number of the gridworld.')\nparser.add_argument('-slip', '--slip', dest='slip', type=float, default=0.02, help='How likely is it for the agent to slip')\nparser.add_argument('-ml', '--maxlength', dest='maxLength', type=int, default=1000, help='Maximum number of timesteps in an episode')\nparser.add_argument('-rs', '--randomseed', dest='randomseed', type=int, default=0, help='Seed for RNG.')\nparser.add_argument('-nobf', '--noobfuscate', dest='obfuscate', type=str2bool, nargs='?', const=False, default=True, help='Whether to obfuscate the states or not')\nparser.add_argument('-ne', '--numepisodes', dest='numEpisodes', type=int, default=1600, help='Number of episodes to run')\nparser.add_argument('-q', '--quiet', dest='quiet', type=str2bool, nargs='?', const=True, default=False, help='Surpresses detailed output. (Will make the code run a little faster)')\nargs = parser.parse_args()\nprint(args, file=sys.stderr)\nverbose = not args.quiet\n\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nAddress = namedtuple('Address', ['ip', 'port'])\nserver_address = Address(ip=args.ip, port=args.port)\nprint('Server started.', server_address, file=sys.stderr)\nsock.bind(server_address)\nsock.listen(1)\n\n\nenv = Environment(args.side, args.instance, args.slip, args.obfuscate, args.randomseed, args.maxLength)\n\n\nprint('Waiting for client', file=sys.stderr)\nconnection, client_address = sock.accept()\ntry:\n client_address = Address(*client_address)\n print('Connected.', client_address, file=sys.stderr)\n\n episodeNum = 0\n totReward = 0\n episodeRewards = []\n if not verbose: print('Progress: ', end='', file=sys.stderr)\n while episodeNum < args.numEpisodes:\n action = connection.recv(1024).decode('utf-8')\n if verbose: print('----------\\nRecieved:', action, file=sys.stderr)\n\n if action == 'info':\n numStates = env.getnumStates()\n state = env.getState()\n print('Number of states: {}, Current State: {}'.format(numStates, state), file=sys.stderr)\n if verbose: env.printWorld()\n connection.sendall('{} {}\\n'.format(numStates, state).encode('utf-8'))\n\n elif action in 'up down left right'.split():\n if verbose: print('Taking action', action, file=sys.stderr)\n state, reward, event = env.takeAction(action)\n if verbose: print('New state: {}, Reward: {}, event: {}'.format(state,reward, event), file=sys.stderr)\n\n totReward += reward\n if event in ['goal', 'terminated']:\n episodeNum += 1\n if not verbose and episodeNum % (args.numEpisodes//50) == 0:\n print('#', end='', flush=True, file=sys.stderr)\n episodeRewards.append(totReward)\n totReward = 0\n\n event = ['continue', 'terminated', 'goal'].index(event)\n connection.sendall('{} {} {}\\n'.format(state, reward, event).encode('utf-8'))\n\n else:\n print('\\n\\nInvalid action! Terminating', file=sys.stderr)\n break\n connection.sendall('TERMINATE\\n'.encode('utf-8'))\n\n print('Reward for each episode:')\n print(episodeRewards)\n print('Completed {} episodes.'.format(episodeNum))\n\nfinally:\n connection.close()\n","repo_name":"akshaykhadse/reinforcement-learning","sub_path":"Gridworld/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"91"} +{"seq_id":"8827684079","text":"from coffee import MENU, resources\r\n\r\n# menu shows ingredients on how to make each\r\n# report shows how much of ingredients is remaining\r\nmenu = MENU\r\nreport = resources\r\nreport[\"money\"] = 0\r\ni = \"ingredients\"\r\nw = \"water\"\r\nm = \"milk\"\r\nc = \"coffee\"\r\nt = \"cost\"\r\nn = \"money\"\r\n# drink choice == 'report' -> shows the amounts left in machine\r\n# drink choice == 'off' -> stops the program\r\n\r\n\r\ndef machine():\r\n if drink != \"report\":\r\n def refill():\r\n report[w] = 300\r\n report[m] = 200\r\n report[c] = 100\r\n return report\r\n refill()\r\n\r\n def cash():\r\n print(\"Please insert coins.\")\r\n qtr = int(input(\"How many quarters?: \")) * 0.25\r\n dm = int(input(\"How many dimes?: \")) * 0.10\r\n nkl = int(input(\"How many nickles?: \")) * 0.05\r\n pny = int(input(\"How many pennies?: \")) * 0.01\r\n total = qtr + dm + nkl + pny\r\n return total\r\n paid = cash()\r\n\r\n def change():\r\n cost = menu[drink][t]\r\n remain = paid - cost\r\n if remain < 0:\r\n cost = 0\r\n report[n] += round(cost, 2)\r\n return report, remain\r\n back = round(change()[1], 2)\r\n\r\n if back > 0:\r\n def water():\r\n cost = menu[drink][i][w]\r\n water_left = report[w]\r\n remain = water_left - cost\r\n if remain < 0:\r\n print(\"Sorry there's not enough water.\")\r\n remain = water_left\r\n report[w] = remain\r\n return report[w]\r\n report[w] = water()\r\n\r\n def coffee():\r\n cost = menu[drink][i][c]\r\n coff_left = report[c]\r\n remain = coff_left - cost\r\n if remain < 0:\r\n print(\"Sorry there's not enough coffee.\")\r\n remain = coff_left\r\n report[c] = remain\r\n return report[c]\r\n report[c] = coffee()\r\n\r\n def milk():\r\n milk_left = report[m]\r\n if drink == \"espresso\":\r\n cost = 0\r\n else:\r\n cost = menu[drink][i][m]\r\n remain = milk_left - cost\r\n if remain < 0:\r\n print(\"Sorry there's not enough milk.\")\r\n remain = milk_left\r\n report[m] = remain\r\n return report[m]\r\n report[m] = milk()\r\n\r\n return back, report\r\n\r\n\r\nmore = True\r\nwhile more:\r\n drink = input(\"What would you like? (espresso/latte/cappuccino): \")\r\n if drink == 'report':\r\n for val in report:\r\n ing = val.title()\r\n amt = report[val]\r\n if val == 'coffee':\r\n print(f\"{ing}: {amt}g\")\r\n elif val == 'money':\r\n print(f\"{ing}: ${amt}\")\r\n else:\r\n print(f\"{ing}: {amt}ml\")\r\n elif drink == 'off':\r\n more = False\r\n elif drink in (\"espresso\", \"latte\", \"cappuccino\"):\r\n returned = machine()[0]\r\n if returned < 0:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n else:\r\n print(f\"Your change back is ${returned}.\")\r\n else:\r\n print(\"Sorry, please choose one of the listed options\")\r\n if drink != 'off':\r\n thirsty = input(\"Do you want more? Type 'y' or 'n': \")\r\n print(\"\")\r\n if thirsty == 'n':\r\n more = False\r\n","repo_name":"mahamilton1/Coffee-Machine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10538895329","text":"from django.shortcuts import render\nfrom django.db.models import F\nfrom comment.models import Comment\nfrom movie.models import MovieInfomation\nfrom music.models import MusicInformation\nfrom tools.logincheck import login_check\nfrom django.http import JsonResponse\nimport json\n\n# Create your views here.\nfrom user.models import UserProfile\n\n\n@login_check('POST')\ndef get_comment(request):\n # 提交评论\n if request.method == 'POST':\n # 在token中获取phonenumber\n user = request.user\n if not user:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n json_str = request.body\n if not json_str:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n print(json_str)\n\n json_obj = json.loads(json_str)\n\n url = json_obj.get('url', '')\n if not url:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n content = json_obj.get('content', '')\n if not content:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n type = json_obj.get('type', '')\n if not type:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n # 获取评论星级 如果低于某值 则删除资源, 高于则继续插入\n star_val = json_obj.get('num', '')\n star_val = int(star_val)\n if not star_val:\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n if type == \"music\":\n try:\n music = MusicInformation.objects.filter(url=url)\n print(music)\n if star_val == 1:\n music.update(star_one=F('star_one') + 1)\n elif star_val == 2:\n music.update(star_two=F('star_two') + 1)\n elif star_val == 3:\n music.update(star_three=F('star_three') + 1)\n elif star_val == 4:\n music.update(star_four=F('star_four') + 1)\n elif star_val == 5:\n music.update(star_five=F('star_five') + 1)\n\n music = music[0]\n all_star = music.star_one * 1 + \\\n music.star_two * 2 + \\\n music.star_three * 3 + \\\n music.star_four * 4 + \\\n music.star_five * 5 + \\\n music.download_count * 3.5\n all_count = music.star_one + \\\n music.star_two + \\\n music.star_three + \\\n music.star_four + \\\n music.star_five + \\\n music.download_count\n avg_star = all_star / all_count\n\n # 判断平均星数\n if avg_star < 1.5:\n # 删除表记录\n try:\n music.delete()\n except Exception as e:\n print(e)\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n # 修改平均星值\n music.avg_star = avg_star\n music.save()\n except Exception as e:\n print('没找到字段')\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n # 插入评论表\n try:\n\n Comment.objects.create(url=url, content=content, phonenumber=user.phonenumber)\n # 返回content和create\n except Exception as e:\n print(\"评论表插入失败\")\n pass\n try:\n l_com = Comment.objects.filter(url=url)\n lis = []\n for com in l_com:\n print('com对象是', com)\n dic = {}\n phonenumber = com.phonenumber\n phone = UserProfile.objects.get(phonenumber=phonenumber)\n dic['nickname'] = phone.nickname\n dic['avatar'] = \"\" if not phone.avatar.name else \"/media/\" + phone.avatar.name\n dic['content'] = com.content\n dic['createtime'] = com.createtime.strftime(\"%Y-%m-%d %H:%M\")\n lis.append(dic)\n lis.reverse()\n # TODO 查询信息\n if type == \"music\":\n try:\n info = MusicInformation.objects.filter(url=url)[0]\n print(info)\n except Exception as e:\n return JsonResponse({\"code\": 20000})\n\n data = {\n 'code': 200,\n \"messages_count\": len(lis),\n \"star_one\": info.star_one,\n \"star_two\": info.star_two,\n \"star_three\": info.star_three,\n \"star_four\": info.star_four,\n \"star_five\": info.star_five,\n \"star_avg\":info.star_avg,\n \"data\": lis,\n }\n return JsonResponse(data)\n elif type == \"movie\":\n pass\n elif type == \"picture\":\n pass\n except Exception as e:\n print(e)\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n\n\n # 获取所有评论内容\n elif request.method == 'GET':\n url = request.GET.get('url')\n type = request.GET.get('type')\n try:\n l_com = Comment.objects.filter(url=url)\n lis = []\n for com in l_com:\n print('com对象是', com)\n dic = {}\n phonenumber = com.phonenumber\n phone = UserProfile.objects.get(phonenumber=phonenumber)\n dic['nickname'] = phone.nickname\n dic['avatar'] = \"\" if not phone.avatar.name else \"/media/\" + phone.avatar.name\n dic['content'] = com.content\n dic['createtime'] = com.createtime.strftime(\"%Y-%m-%d %H:%M\")\n lis.append(dic)\n lis.reverse()\n # TODO 查询信息\n if type == \"music\":\n try:\n info = MusicInformation.objects.filter(url=url)[0]\n print(info)\n except Exception as e:\n return JsonResponse({\"code\": 20000})\n data = {\n 'code': 200,\n \"messages_count\": len(lis),\n \"star_one\": info.star_one,\n \"star_two\": info.star_two,\n \"star_three\": info.star_three,\n \"star_four\": info.star_four,\n \"star_five\": info.star_five,\n \"star_avg\": info.star_avg,\n \"data\": lis,\n }\n return JsonResponse(data)\n elif type == \"movie\":\n pass\n elif type == \"picture\":\n pass\n except Exception as e:\n print(e)\n result = {'code': 20000, 'data': 'error'}\n return JsonResponse(result)\n","repo_name":"wujiaze/SuperUrl","sub_path":"superUrl/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33668618150","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\n\"\"\"\n__doc__:类的描述信息\n__class__:显示所属类\n__module__:显示所属模块\n__new__:开辟空间\n__init__:初始化\n__del__:析构函数\n__call__:对象后面加小括号()自动调用\n__dict__:检查类或实例对象所有属性\n__str__:当定义__str__,在为得到对象描述时,默认输出该函数返回值(int,str,bool都可以)\n__getitem__/__setitem__/__delitem__:用于索引操作,如字典,对象加中括号[]。获取/设置/删除数据\n__getslice__/__setslice__/__delslice__:用于切片操作,如列表\n____:\n\"\"\"\n\nclass Dog(object):\n \"\"\"这是狗狗哦\"\"\"\n def __init__(self,name):\n self.name = name\n\n\nd = Dog(\"hsq\")\n\nprint(d.__doc__)","repo_name":"shulip/python_learn","sub_path":"04_Python高级语法/08_类的魔法属性.py","file_name":"08_类的魔法属性.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74416453752","text":"import tweepy\r\nimport csv\r\nfrom textblob import TextBlob\r\n\r\nconsumer_key = 'BDMHBweQzZwimb1mN1C05gYsp'\r\nconsumer_secret = '4A3ryMWFIKvC2TYomOUppNNvAyZPrJAApLcLdQhAietfPwEEYE'\r\n\r\naccess_token = '863173256-8oym2WsCQb8V2XWJZp3CPBNfpiVxLrmfjNAjuBBE'\r\naccess_token_secret = 'X3fYTmoRevFv6R6x8OcN7NdNwhpWZ3cEK49doIHIdhVC7'\r\n\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n\r\nauth.set_access_token(access_token, access_token_secret)\r\n\r\n\r\napi = tweepy.API(auth)\r\n\r\npublic_tweets = api.search('trump')\r\nwith open('csvexample.csv', 'w', newline='') as f:\r\n\tfieldnames = ['positive polarity', 'negative polarity']\r\n\twriter = csv.DictWriter(f, fieldnames = fieldnames)\r\n\twriter.writeheader()\r\n\r\n\tfor tweet in public_tweets: \r\n\t\tprint(tweet.text)\r\n\t\tanalysis = TextBlob(tweet.text)\r\n\t\tprint(analysis.sentiment)\r\n\r\n\t\tif analysis.sentiment.polarity > 0.5:\t\t\r\n\t\t\t# Write a row to the CSV file. I use encode UTF-8\r\n\t\t\twriter.writerow({'positive polarity': tweet.text.encode('utf-8')})\r\n\t\telse:\r\n\t\t\twriter.writerow({'negative polarity': tweet.text.encode('utf-8')})\r\n\t\t","repo_name":"usmanmukhtar/Twitter-Sentiment-Analysis","sub_path":"twitter_sentiment_analysis.py","file_name":"twitter_sentiment_analysis.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29437692134","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('csanciones', '0001_initial'),\n ('sanciones', '0002_auto_20150115_0241'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='sancion',\n name='sancion',\n ),\n migrations.AddField(\n model_name='sancion',\n name='concepto',\n field=models.OneToOneField(default=2, to='csanciones.ConceptoSancion'),\n preserve_default=False,\n ),\n ]\n","repo_name":"omartoledodev/SistemaEspejo","sub_path":"sies/sanciones/migrations/0003_auto_20150115_0349.py","file_name":"0003_auto_20150115_0349.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38758477518","text":"import os\n\nfrom dotenv import (load_dotenv, find_dotenv)\n\nload_dotenv(find_dotenv())\n\nvariables = {\n 'CONSUMER_KEY': os.getenv('CONSUMER_KEY'),\n 'CONSUMER_SECRET': os.getenv('CONSUMER_SECRET'),\n 'ACCESS_TOKEN_KEY': os.getenv('ACCESS_TOKEN_KEY'),\n 'ACCESS_TOKEN_SECRET': os.getenv('ACCESS_TOKEN_SECRET'),\n 'FOX_NEWS_API_KEY': os.getenv('FOX_NEWS_API_KEY')\n}\n\nTwitterEnv = variables\n","repo_name":"Gaccobee/filternews","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11108233751","text":"from bengali_asr.models import Whisper,ModelDimensions\nfrom bengali_asr.dataset.tokenizer import BengaliTokenizer\nfrom bengali_asr.dataset.encoder_decoder_dataset import SpeechRecognitionDataset\nfrom bengali_asr.models.loss import MaskedCrossEntropyLoss\nfrom bengali_asr.dataset.wav2vec_dataset import SpeechRecognitionLMCollate,SpeechRecognitionCTCDataset\nimport pandas as pd\nimport torch\nfrom .base import Base\nclass Configs(Base):\n OUTPUTDIR=\"../workdir/wav2vec2_characterlevel_pretrained_ctcloss_lm\"\n TRAIN_DATA_PATH=\"/app/dataset/train_data_with_openasr.csv\"\n VALID_DATA_PATH=\"/app/dataset/valid_data_subset.csv\"\n DATA_ROOT=\"/app/dataset/train_numpy_16k\"\n WHISPER_PATH=\"/app/bengali-speech-recognition/workdir/whisper_checkpoints/small.pkl\"\n \n USE_DATASET_LEN=None #Set to small number while debugging\n SAMPLES_PER_GPU=12\n N_GPU=4\n ENCODER_UNFREEZE_EPOCH=10\n\n VALIDATION_BS=16\n VALIDATION_FREQUENCY=5000\n PIN_MEMORY=True\n NUM_WORKERS=4\n NUM_WORKERS_VAL=4\n DISTRIBUTED=True\n FREEZE_ENCODER=True\n LR=0.000025\n WD=1e-5\n EPOCHS=10\n \n MAX_TOKEN_LENGTH=256\n MAX_PREDICTION_LENGTH=MAX_TOKEN_LENGTH\n MAX_AUDIO_LENGTH=163840\n AUDIO_PADDING=0.0\n TRAIN_TYPE=\"wav2vec_lm\"\n AUTOCAST=False\n augoregressive_inference=False\n \n AUDIO_SCALE=320\n \n VOCAB = all_combinations= ['ব', 'া', 'ং', 'ল', 'দ', 'ে', 'শ', ' ', 'য', '়', 'ি', 'ত', '্', 'ন', 'এ', 'ধ', 'র', 'ণ', 'ক', 'ড', 'হ', 'উ', 'প', 'জ', 'অ', 'থ', 'স', 'ষ', 'ই', 'আ', 'ছ', 'গ', 'ু', 'ো', 'ও', 'ভ', 'ী', 'ট', 'ূ', 'ম', 'ৈ', 'ৃ', 'ঙ', 'খ', 'ঃ', '১', '৯', '৬', '০', '২', 'চ', 'ঘ', 'ৎ', '৫', '৪', '-', '‘', '’', 'ফ', ',', 'ৌ', '৮', 'ঁ', 'য়', '৩', 'ঢ', 'ঠ', '৭', ':', '।', '.', 'ড়', 'ঝ', '/', 'ঞ', '\"', \"'\", 'ঔ', 'ঈ', 'ঐ','!', 'ঋ', 'ঊ', '?', '–', ';', 'ঢ়', '—']\n START_TOKEN=len(VOCAB)\n END_TOKEN=len(VOCAB)+1\n PAD_TOKEN=-1\n\n\n def __init__(self,inference_files=None,inference_text=None,use_numpy=False):\n self.device = \"cuda\"\n self.dataloder_collate = SpeechRecognitionLMCollate(self.MAX_TOKEN_LENGTH,\n self.MAX_AUDIO_LENGTH,\n self.AUDIO_PADDING,\n self.PAD_TOKEN,\n self.END_TOKEN,\n self.AUDIO_SCALE)\n self.model_dims = ModelDimensions(n_mels=self.N_MELS, \n n_audio_ctx=self.N_FRAMES//2, \n n_audio_state=768,\n n_audio_head=12,\n n_audio_layer=12,\n n_vocab=len(self.VOCAB)+2, \n n_text_ctx=448, \n n_text_state=768, \n n_text_head=12, \n n_text_layer=12)\n self.model = Whisper(self.model_dims)\n\n self.tokenizer_train = BengaliTokenizer(self.VOCAB,self.START_TOKEN,self.END_TOKEN)\n self.tokenizer = BengaliTokenizer(self.VOCAB,self.START_TOKEN,self.END_TOKEN)\n if inference_files is not None:\n print(\"inference mode is on\")\n self.inference_dataset = SpeechRecognitionCTCDataset(inference_files,\n inference_text,\n self.tokenizer,\n self.DATA_ROOT,\n sampling_rate=self.SAMPLE_RATE,\n train=False,\n usenumpy=use_numpy) \n return\n self.audio_transform_train = None\n \n self.training_data = pd.read_csv(self.TRAIN_DATA_PATH)[:self.USE_DATASET_LEN]\n self.valid_data = pd.read_csv(self.VALID_DATA_PATH)[:self.USE_DATASET_LEN]\n print(f\"length of train: {len(self.training_data)}, length of valid: {len(self.valid_data)}\")\n\n self.train_dataset = SpeechRecognitionCTCDataset(self.training_data.id.apply(lambda x: x.replace(\".mp3\",\".npy\")),\n self.training_data.sentence,\n self.tokenizer_train,\n self.DATA_ROOT,\n raw_transform=self.audio_transform_train,\n sampling_rate=self.SAMPLE_RATE)\n \n self.valid_dataset = SpeechRecognitionCTCDataset(self.valid_data.id.apply(lambda x: x.replace(\".mp3\",\".npy\")),\n self.valid_data.sentence,\n self.tokenizer,\n self.DATA_ROOT,\n sampling_rate=self.SAMPLE_RATE,\n train=False)\n\n self.optimizer = torch.optim.Adam(self.model.parameters(),lr=self.LR,weight_decay=self.WD)\n self.steps_per_epoch = len(self.train_dataset)//(self.SAMPLES_PER_GPU*self.N_GPU)+1\n self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer,max_lr=self.LR,steps_per_epoch=self.steps_per_epoch,epochs=self.EPOCHS,pct_start=0.1)\n self.criterion = MaskedCrossEntropyLoss(self.PAD_TOKEN)\n\n def load_state_dict(self,path):\n statedict = torch.load(path)\n print(\"loading model checkpoint from epoch: \",statedict[\"current_step\"])\n self.model.load_state_dict(statedict[\"model_state_dict\"])","repo_name":"SamratThapa120/bengali-speech-recognition","sub_path":"configs/whisper_small_characterwise_pretrained_lm_openasr.py","file_name":"whisper_small_characterwise_pretrained_lm_openasr.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42227929232","text":"with open(\"10.input.txt\", \"r\") as f:\n lines = [\n line.strip()\n for line in f.readlines()\n ]\n\nmatching_pairs = {\n '[': ']',\n '{': '}',\n '(': ')',\n '<': '>',\n}\nscore_table = {\n ')': 1,\n ']': 2,\n '}': 3,\n '>': 4,\n}\n\nscore = []\nfor linenum, line in enumerate(lines):\n state = ''\n try:\n for charnum, char in enumerate(line):\n if char in matching_pairs.keys():\n state += char\n elif char in matching_pairs.values():\n expected_close = matching_pairs[state[-1]]\n if char == expected_close:\n state = state[:-1]\n else:\n raise SyntaxError(f\"Line {linenum}: syntax error at position {charnum}: \"\n f\"expected {expected_close} but got {char}\")\n\n if state != '': # incomplete line\n completion_string = ''.join([\n matching_pairs[open_char]\n for open_char in reversed(state)\n ])\n line_score = 0\n for char in completion_string:\n line_score = line_score * 5 + score_table[char]\n print(f\"Line {linenum}: score {line_score}\")\n score.append(line_score)\n\n except SyntaxError:\n pass # ignore corrupt lines\n\nprint(f\"Autocomplete score: {sorted(score)[len(score)//2]}\")\n","repo_name":"nielslaukens/advent-of-code","sub_path":"2021/10_2.py","file_name":"10_2.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18003169205","text":"import bybit\nimport cryptowatch as cw\nimport csv\n\ncw.api_key = ''\ncsv_name = 'btc_books.csv'\n\ndef get_db_list(file_name):\n markets = []\n with open(file_name, 'r') as file:\n reader = csv.reader(file)\n next(reader)\n for row in reader:\n markets.extend([x.replace(':', '__').replace('-','').upper() for x in row[1:] if x])\n return markets\n\ndef get_cw_list(file_name):\n markets = []\n with open(file_name, 'r') as file:\n reader = csv.reader(file)\n next(reader)\n for row in reader:\n markets.extend([x for x in row[1:] if x])\n return markets\ndef avg_strat(all_dicts):\n depth_level = 41\n blend_level = 1\n delta_flip = .26\n delta_close_include = True\n delta_close = 0\n include = ['BINANCE__BTCUSDT', 'BINANCE__BTCUSDTPERPETUALFUTURES', 'BINANCE__BTCUSDPERPETUALFUTUREINVERSE', 'BINANCEUS__BTCUSDT', 'BITFINEX__BTCUSD', 'BITFINEX__BTCUSDT', 'HUOBI__BTCUSDT', 'DERIBIT__BTCUSDPERPETUALFUTUREINVERSE', 'COINBASEPRO__BTCUSD', 'KRAKEN__BTCUSD', 'KRAKEN__BTCUSDT', 'OKEX__BTCUSDT', 'BITSTAMP__BTCUSD', 'POLONIEX__BTCUSDT', 'GEMINI__BTCUSD', 'BITTREX__BTCUSD', 'BITTREX__BTCUSDT']\n for key in list(all_dicts.keys()):\n if key not in include:\n del all_dicts[key]\n\n\n\n\n\n\ncw_markets = get_cw_list(csv_name)\ndb_tables = get_db_list(csv_name)\nall_dicts = {}\nfor i in range(len(cw_markets)):\n cw_string = cw_markets[i]\n db_table = db_tables[i]\n data_dict = {3:[3,5]}#cw.markets.get(cw_string, orderbook=True).__dict__\n # data_dict = {db_table:data_dict}\n all_dicts[db_table] = data_dict\n\navg_strat(all_dicts)\n\n\n","repo_name":"terrencstaciralphlindgr/algotrading","sub_path":"Backup Live Scripts/avg_book_strat.py","file_name":"avg_book_strat.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"29961999900","text":"#!/usr/bin/python3\n\"\"\"\nThis module contains the function model_state_update_id_2()\n\"\"\"\nfrom model_state import Base, State\nimport sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef model_state_update_id_2():\n \"\"\"changes the name of a State object from the database hbtn_0e_6_usa\"\"\"\n\n url = 'mysql+mysqldb://{}:{}@localhost/{}'.format(sys.argv[1],\n sys.argv[2],\n sys.argv[3])\n\n engine = create_engine(url, pool_pre_ping=True)\n Base.metadata.create_all(engine)\n State.metadata.create_all(engine)\n\n Session = sessionmaker(bind=engine)\n\n conn = engine.connect()\n session = Session(bind=conn)\n\n obj = session.query(State).filter(State.id == 2).all()\n\n for elem in obj:\n elem.name = 'New Mexico'\n\n session.commit()\n\n\nif __name__ == \"__main__\":\n model_state_update_id_2()","repo_name":"otalorajuand/holbertonschool-higher_level_programming","sub_path":"python-object_relational_mapping/12-model_state_update_id_2.py","file_name":"12-model_state_update_id_2.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"13429808794","text":"# Created in 20190517\n\nimport json\n\n# numbers = [2, 3, 4, 6, 7, 0, 11]\n# file_numbers = 'numbers.json'\n# with open(file_numbers, 'w') as f_num:\n# json.dump(numbers, f_num)\n\n\nimport os\n\n\ndef greet_user():\n \"\"\"问候用户,并指出其名字\"\"\"\n filename = 'username.json'\n\n # 如果 username.json 文件已存在\n if os.path.exists(filename):\n with open(filename) as f_obj:\n username = json.load(f_obj)\n print(\"Welcome back, \" + username + \"!\")\n else:\n username = input(\"你叫什么名字? \")\n # 如果文件不存在就创建文件并写入\n with open(filename, \"w\") as f_obj:\n json.dump(username, f_obj)\n print(\"We'll remember you when you come back, \" + username + \"!\")\n\n\ngreet_user()\n","repo_name":"W1033/Python-learning","sub_path":"《Python 编程从入门到实践》/Chapter 10 文件与异常/10.4.3 重构/remember_me.py","file_name":"remember_me.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"9238449859","text":"import logging\nimport threading\nimport time\nimport os\nimport sys\nfrom serial import Serial\nfrom datetime import datetime, timedelta\nfrom mcp.plugins import plugins\n\nlogger = None\nthread = None\nserial = None\nobs = None\n\nCMD_START = '\\xFE'\nCMD_END = '\\xFF'\n\nHEARTBEAT_STATUS_DELTA = timedelta(seconds=15) # How often to poll for status\nHEARTBEAT_FAILURE_DELTA = timedelta(seconds=120) # When a device is considered broken\n\ncallables = []\n\n# TODO: Configurable? (should pull from DB)\nclass Host:\n address = 17\n is_active = False # assume not active to avoid health checks\n last_status_request = datetime.min\n last_status = datetime.min\n\n def __init__(self, serial):\n self._serial = serial\n\n def admit_access(self, doorNum):\n self._serial.write([CMD_START, ord('A'), doorNum, ord('A')^doorNum, CMD_END])\n\nhosts = []\n\ndef init(config, obsMain):\n global thread\n global logger\n global serial\n global obs\n logger = logging.getLogger(__name__)\n obs = obsMain\n\n portName = config.get('serial', 'port')\n baudRate = config.get('serial', 'baud')\n\n logger.info(\"Initializing serial monitor (%s @ %s baud)\" % (portName, baudRate))\n serial = Serial(portName, baudRate, timeout=config.getint('serial', 'timeout'))\n\n # TODO: Configurable?\n hosts.append(Host(serial))\n load_commands(config, obsMain)\n\n thread = threading.Thread(target=watch_serial, args=[])\n thread.daemon = True\n thread.start()\n\ndef load_commands(config, obsMain):\n logger.info(\"Loading serial commands\")\n\n dev_plugins = plugins.get_plugins(os.path.join(os.path.dirname(__file__), '..', '..', 'plugins', 'devices'), 'plugins.devices.')\n for plugin in dev_plugins:\n if (hasattr(plugin, 'configure')):\n plugin.configure(config, obsMain)\n for func in plugin.__dict__.values():\n if (hasattr(func, 'command') and hasattr(func, '__call__')):\n callables.append(func)\n\ndef watch_serial():\n logger.info(\"Starting serial monitor thread\")\n while True:\n for host in hosts:\n try:\n # Delay at start so each attempt begins with waiting to ensure devices\n # have a delay between attempted reads\n time.sleep(0.1)\n\n # If status has not been recently received, request status\n if (datetime.now() - host.last_status > HEARTBEAT_STATUS_DELTA):\n # If we've request status recently, wait before requesting against\n if(datetime.now() - host.last_status_request > HEARTBEAT_STATUS_DELTA):\n host.last_status_request = datetime.now()\n logger.debug(\"Sending heartbeat to address: %s\" % host.address)\n serial.write([CMD_START, ord('S'), ord('S'), CMD_END])\n\n cmd = serial.readline()\n try:\n cmdStart = cmd.index(CMD_START)\n\n # First byte should be a command start: Assume no command if not\n if (cmdStart != 0): continue\n\n cmdLine = bytearray(cmd[cmdStart + 1 : cmd.index(CMD_END)]).decode('utf-8')\n\n logger.debug(\"Read command: %s\" % cmdLine)\n\n cmdArgs = cmdLine.split(',')\n\n cmdArg = cmdArgs[0]\n cmdProcessed = False\n\n for func in callables:\n try:\n if (cmdArg == func.command):\n func(host, cmdLine, cmdArgs)\n cmdProcessed = True\n except Exception as e:\n logger.error(\"Error calling plugin for command %s\" % cmdLine, exc_info=True)\n\n if not cmdProcessed:\n logger.error(\"Invalid command (%s): unknown command type\" % cmdLine)\n except ValueError as e:\n pass # no command found\n except Exception as e:\n logger.error(\"Error processing command (%s)\" % str(bytearray(cmd)), exc_info=True)\n\n\n # If device status has not been received by failure threshold, log and notify.\n # Process this after running commands so we don't assume a device is broken when it is\n # responding slowly to ping requests.\n if (datetime.now() - host.last_status > HEARTBEAT_FAILURE_DELTA):\n logger.error(\"MasterControl RFID communications down! Device: %s\" % host.address)\n obs.trigger('device_down', host)\n host.is_active = False\n host.last_status = datetime.now() - HEARTBEAT_STATUS_DELTA # prevents notifification spam\n except IOError as e:\n pass\n except Exception as e:\n logger.error(\"Error reading command\", exc_info=True)\n time.sleep(5)\n","repo_name":"ENTS-Source/MasterControl","sub_path":"mcp/devices/serial_monitor.py","file_name":"serial_monitor.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"74686460472","text":"import numpy as np\nfrom PIL import Image\nimport torch\nfrom skimage.transform import resize\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef train_loader(handgun, shuriken, usb, phone, knife, hard_disk, battery, handgun_labels, shuriken_labels, usb_labels, phone_labels, knife_labels, hard_disk_labels, battery_labels, batch_size, num_classes):\n\n batch_size = batch_size\n img_size = 416\n max_objects = 1\n i = 0\n\n while i < (len(handgun)/(batch_size/num_classes)-1):\n \n img_paths = []\n label_paths = []\n\n img_paths.extend(handgun[(i)%len(handgun):((i+1))%len(handgun)])\n img_paths.extend(shuriken[(i)%len(shuriken):((i+1))%len(shuriken)])\n img_paths.extend(usb[(i)%len(usb):((i+1))%len(usb)])\n img_paths.extend(phone[(i)%len(phone):((i+1))%len(phone)])\n img_paths.extend(knife[(i)%len(knife):((i+1))%len(knife)])\n img_paths.extend(hard_disk[(i)%len(hard_disk):((i+1))%len(hard_disk)])\n img_paths.extend(battery[(i)%len(battery):((i+1))%len(battery)])\n\n\n label_paths.extend(handgun_labels[(i)%len(handgun_labels):((i+1))%len(handgun_labels)])\n label_paths.extend(shuriken_labels[(i)%len(shuriken_labels):((i+1))%len(shuriken_labels)])\n label_paths.extend(usb_labels[(i)%len(usb_labels):((i+1))%len(usb_labels)])\n label_paths.extend(phone_labels[(i)%len(phone_labels):((i+1))%len(phone_labels)])\n label_paths.extend(knife_labels[(i)%len(knife_labels):((i+1))%len(knife_labels)])\n label_paths.extend(hard_disk_labels[(i)%len(hard_disk_labels):((i+1))%len(hard_disk_labels)])\n label_paths.extend(battery_labels[(i)%len(battery_labels):((i+1))%len(battery_labels)])\n\n batch_img = None\n img_shape = (img_size, img_size)\n\n # Image\n\n for img_path in img_paths:\n\n img_path = img_path.rstrip()\n img = np.array(Image.open(img_path))\n \n\n # Handles images with less than three channels\n while len(img.shape) != 3:\n img = np.expand_dims(img, axis = 2)\n img = np.concatenate((img, img, img), 2)\n\n\n h, w, _ = img.shape\n dim_diff = np.abs(h - w)\n # Upper (left) and lower (right) padding\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n # Determine padding\n pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0))\n # Add padding\n input_img = np.pad(img, pad, 'constant', constant_values=128) / 255.\n padded_h, padded_w, _ = input_img.shape\n # Resize and normalize\n input_img = resize(input_img, (*img_shape, 3), mode='reflect')\n # Channels-first\n input_img = np.transpose(input_img, (2, 0, 1))\n # As pytorch tensor\n input_img = torch.from_numpy(input_img).float().unsqueeze(0)\n\n if batch_img == None:\n batch_img = input_img\n else:\n batch_img = torch.cat((batch_img, input_img), dim = 0)\n \n \n # #---------\n # # Label\n # #---------\n batch_label = None\n for label_path in label_paths:\n \n\n label_path = label_path.rstrip()\n labels = None\n if os.path.exists(label_path):\n labels = np.loadtxt(label_path).reshape(-1, 5)\n # Extract coordinates for unpadded + unscaled image\n x1 = w * (labels[:, 1] - labels[:, 3]/2)\n y1 = h * (labels[:, 2] - labels[:, 4]/2)\n x2 = w * (labels[:, 1] + labels[:, 3]/2)\n y2 = h * (labels[:, 2] + labels[:, 4]/2)\n # Adjust for added padding\n x1 += pad[1][0]\n y1 += pad[0][0]\n x2 += pad[1][0]\n y2 += pad[0][0]\n # Calculate ratios from coordinates\n labels[:, 1] = ((x1 + x2) / 2) / padded_w\n labels[:, 2] = ((y1 + y2) / 2) / padded_h\n labels[:, 3] *= w / padded_w\n labels[:, 4] *= h / padded_h\n # Fill matrix\n filled_labels = np.zeros((max_objects, 5))\n if labels is not None:\n filled_labels[range(len(labels))[:max_objects]] = labels[:max_objects]\n filled_labels = torch.from_numpy(filled_labels).unsqueeze(0)\n\n if batch_label == None:\n batch_label = filled_labels\n else:\n batch_label = torch.cat((batch_label, filled_labels), dim = 0)\n\n i += 1\n \n yield batch_img, batch_label\n","repo_name":"garganm1/YOLO-ADDA-Object-Detection","sub_path":"ADDA & YOLO/code_original_ADDA/custom_dataloader_class_7.py","file_name":"custom_dataloader_class_7.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"5053775510","text":"def is_anagram(first_string, second_string):\n string1 = list(first_string.lower())\n string2 = second_string.lower()\n try:\n for letter in string2:\n string1.remove(letter)\n except ValueError:\n return False\n if len(string1) == 0:\n return True\n else:\n return False\n","repo_name":"Vitor-742/algorithms","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38105595746","text":"from flask import Flask, request, render_template\n\nimport hackbright\n\napp = Flask(__name__)\n\n\n@app.route(\"/student\")\ndef get_student():\n \"\"\"Show information about a student.\"\"\"\n\n # github = \"jhacks\"\n github = request.args.get('github','jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n return render_template(\"student_info.html\" , first=first, gorilla=last, giraffe=github)\n # return \"%s is the GitHub account for %s %s\" % (github, first, last)\n\n@app.route(\"/student-search\")\ndef get_student_form():\n \"\"\"Show form for searching for a student.\"\"\"\n\n return render_template(\"student_search.html\")\n\n\n@app.route(\"/show-student-add-form\")\ndef student_add():\n \"\"\"Add a student.\"\"\"\n hackbright.make_new_student(first_name, last_name, github)\n \n return render_template(\"student-add.html\")\n\n\n@app.route(\"/add-student\")\ndef student_add():\n \"\"\"Add a student.\"\"\"\n\n first_name = request.form[\"firstname\"] # passing on name value from form to get the data\n last_name = request.form[\"lastname\"]\n GitHub = request.form[\"new_github\"]\n\n hackbright.make_new_student(first_name, last_name, Github) # from hackbright.py file\n \n html = render_template('student_info_display.html',\n first_name=first_name,\n last_name=last_name,\n Github=github)\n return html\n\n\nif __name__ == \"__main__\":\n hackbright.connect_to_db(app)\n app.run(debug=True)\n","repo_name":"rmmistry/project-tracker-flask","sub_path":"hackbright-web.py","file_name":"hackbright-web.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19524108577","text":"from sknet.dataset import mnist,svhn,cifar10,fashionmnist,cifar100, stl10\nfrom sknet.dataset import preprocess\nfrom sknet.utils import plotting\n\nimport pylab as pl\nimport os\n# Make Tensorflow quiet.\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n\n\n# Put the preprocessing function into a list\npreprocessing_list = [preprocess.Standardize,\n preprocess.ZCAWhitening]\n\n# Save number of preprocessing\nn_preprocessing = len(preprocessing_list)\n\n# Put the dataset functions into a list s.t. dataset_list[0].load() \n# loads the dataset 0\ndataset_list = [mnist(),fashionmnist(),svhn(),cifar10(),cifar100(),stl10()]\ndataset_name = ['mnist','fashionmnist','svhn','cifar10','cifar100','stl10']\n\n# Save number of dataset\nn_dataset = len(dataset_list)\n\nfor dataset,dataset_n in zip(dataset_list,dataset_name):\n pl.figure(figsize = (20,n_preprocessing*2))\n dataset.load()\n # Initialize the counter for subplot\n cpt = 1\n for i,im in enumerate(dataset[\"train_set\"][0][:10]):\n pl.subplot(n_preprocessing+1,10,cpt)\n plotting.imshow(im)\n cpt+=1\n if(i==4):\n pl.title('Original Data')\n for preprocessing in preprocessing_list:\n dataset.preprocess(preprocessing,fit=\"train_set\",\n transform=\"train_set\",inplace=False,name=\"\\t\")\n images = dataset[\"train_set\"][0][:10]\n for i,im in enumerate(images):\n pl.subplot(n_preprocessing+1,10,cpt)\n plotting.imshow(im)\n cpt+=1\n if(i==4):\n pl.title(dataset.preprocessing.name)\n pl.tight_layout()\n pl.savefig('test_preprocessing_'+dataset_n+'.png')\n\n","repo_name":"RandallBalestriero/Sknet","sub_path":"examples/quickstart_preprocess.py","file_name":"quickstart_preprocess.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"28497206268","text":"import requests\nimport json\nfrom urllib import parse\nif __name__ == '__main__':\n\tbase_url = \"https://fanyi.baidu.com/sug\"\n\t#wd = input(\"input your keyword:\")\n\twd = 'girl'\n\tdata = {'kw':wd}\n\tprint(type(data))\n\t#因为使用post,至少因该包含Content-Length字段\n\theaders = {'Content-Length':str(len(data))}\n\n\trsp = requests.post(url=base_url,data=data,headers=headers)\n\tprint(rsp.text)\n\tprint(rsp.json())","repo_name":"KenZP/tulingxueyuan","sub_path":"requests03.py","file_name":"requests03.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34778154386","text":"from queue import PriorityQueue\n\nimport numpy as np\n\n# from blitz_protocol import BlitzProtocol\n# from htlc_protocol import HTLCProtocol\n# from run_simulator import get_random_element\nfrom constants import FAILED, SUCCESS, GO_IDLE, RELEASING, TX_ER_PUBLISHED, RELEASE_ALL, REVOKING, TX_ER_CHECKING, \\\n REVOKE_ALL\n\nimport copy\nimport random\nimport array\n\n\n# from sim.transactions import Transaction\n\n\nclass Simulator():\n failed_Blitz = []\n failed_HTLC = []\n array_rounds = []\n\n # stateSet = True --unnecessary\n\n def __init__(self, protocol, epoch_size):\n self.protocol = protocol\n self.epoch_size = epoch_size\n self.round_counter = 0\n\n\n '''\n Preforms the simulation of a payment for specific transactions\n Params:\n @transactions -- [Transaction] array of txs to be simulated\n '''\n\n def simulate_transactions(self, transactions):\n np.random.seed(0)\n self.transactions = transactions\n\n # Generate random transactions\n self.txs = []\n self.txs_cleaning = copy.copy(transactions)\n\n # First get rid of all the transactions which lead to canonical error\n for tx in self.txs_cleaning:\n if tx.find_path():\n is_processable = True\n for dchannel in tx.dchannels_path:\n if (dchannel.balance < tx.payment_amount or # according to topology the balance is not enough\n dchannel.min_htlc > tx.payment_amount\n # according to topology the payment amount is below the minimum\n ):\n is_processable = False\n break\n if is_processable:\n self.txs.append(tx)\n # hits=0\n # for tx in self.txs:\n # for tx2 in self.txs:\n # if tx.id != tx2.id:\n # for dchannel in tx.dchannels_path:\n # for dchannel2 in tx2.dchannels_path:\n # if dchannel.src.pk== dchannel2.src.pk and dchannel.trg.pk== dchannel2.trg.pk:\n # hits+=1\n #\n # print(\"HITS\")\n # print(hits)\n self.array_rounds=[]\n for i in range(0, 1000000):\n self.array_rounds.append([])\n\n\n for tx in self.txs:\n position = np.random.randint(0, 5000)\n self.array_rounds[position].append(tx)\n\n for i, round in enumerate(self.array_rounds):\n for j in range(len(round)):\n tx = round[j]\n # process next tx in the row\n if tx.status != FAILED and tx.status != SUCCESS:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n self.round_counter += 1\n\n if tx.status == REVOKE_ALL:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n\n # if the TX_ER_CHECKING is set process it right away\n if tx.status == TX_ER_CHECKING:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n\n if tx.status == GO_IDLE:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n # release all channels immediately in this round\n if tx.status == RELEASE_ALL or tx.status == TX_ER_PUBLISHED:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n # put tx further away and then revoke all channels\n # if tx.status == TX_ER_PUBLISHED:\n # self.array_rounds[i + self.delay_param].append(tx)\n # distribute tx over delay_in_operations in an array\n if tx.status == REVOKING:\n for step in range(len(tx.dchannels_path)):\n self.array_rounds[int(i + self.epoch_size * (step + 1))].append(tx)\n\n if tx.status != FAILED and tx.status != SUCCESS and tx.status != REVOKING and tx.status != TX_ER_PUBLISHED:\n self.array_rounds[i + 1].append(tx)\n\n old = False\n if (old):\n # Process all the txs that according to the topology could be processed\n while (True):\n\n # check if all txs are processed until the end\n all_done = True\n done_counter = 0\n idle_counter = 0\n for tx in self.txs:\n if tx.status != SUCCESS and tx.status != FAILED:\n if tx.status == GO_IDLE:\n idle_counter += 1\n all_done = False\n else:\n done_counter += 1\n\n if all_done:\n print(done_counter)\n break\n\n if self.round_counter % 1000 == 0:\n print(done_counter)\n print(self.round_counter)\n\n percentage_of_done = done_counter / len(self.txs)\n if percentage_of_done > 0.9:\n print(done_counter)\n break\n # for tx in type(self.protocol).successfully_reached_receiver_txs:\n # if tx.status!= FAILED and tx.status!=SUCCESS:\n # self.protocol.continue_tx(tx, self.round_counter, self.delay_param )\n\n # check if it is time for the next epoch\n # if len(type(self.protocol).successfully_reached_receiver_txs) == self.delay_param:\n # # next epoch\n # self.go_to_next_epoch()\n\n # if Simulator.round_counter%self.delay_param==0:\n # self.go_to_next_epoch()\n # print(done_counter)\n # if there is no more txs to process do the last epoch and release all locked channels------------------------\n # if (len(self.txs) - done_counter-len(Simulator.failed_HTLC)-len(Simulator.failed_Blitz)) == idle_counter:\n # self.go_to_next_epoch()\n # while len(Simulator.failed_HTLC) != 0 or len(Simulator.failed_Blitz) != 0:\n # self.process_failed_tx_form_the_last_epoch()\n\n # ------------------------------------------------------------------------------------------------------------\n\n # Pick a random tx\n r = np.random.randint(0, len(self.txs))\n tx = self.txs[r]\n\n # tx, index = self.get_random_element(self.txs)\n\n # if(Simulator.counter_of_operations==1000):\n # Simulator.counter_of_operations=0\n # self.go_to_next_epoch()\n #\n # Simulator.counter_of_operations+=1\n # Perform one execution step of the tx\n\n if tx.status != FAILED and tx.status != SUCCESS and tx.failed_purposely == False:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n self.round_counter += 1\n\n # if the TX_ER_CHECKING is set process it right away\n if tx.status == TX_ER_CHECKING:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n\n if tx.status == GO_IDLE:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n if tx.status == TX_ER_PUBLISHED or tx.status == RELEASE_ALL or tx.status == REVOKING:\n self.protocol.continue_tx(tx, self.round_counter, self.epoch_size)\n\n # if tx.status == FAILED or tx.status == SUCCESS or tx.status == GO_IDLE:\n # # Tx finished or waiting the end of the epoch\n # self.txs.pop(index)\n\n '''\n After 1 epoch all the txs that reached the receiver are either all released in HTLC/Blitz case\n or revoked(only the last channel)/tx_er_published(all revoked) in HTLC/Blitz\n '''\n\n # def go_to_next_epoch(self):\n # self.process_failed_tx_form_the_last_epoch()\n #\n # # failedTxs = np.random.choice(type(self.protocol).succesfullTxs, size=int(len(type(self.protocol).succesfullTxs) * 0.2), replace=False)\n # failedTxs = random.sample(list(type(self.protocol).successfully_reached_receiver_txs),\n # int(len(type(\n # self.protocol).successfully_reached_receiver_txs) * self.percentage_of_failed))\n #\n # # add all t.ids in an array\n # for t in failedTxs:\n # type(self.protocol).all_failedTxs.append(t.id)\n # type(self.protocol).failed_purposely.append(t)\n #\n # for t in type(self.protocol).successfully_reached_receiver_txs:\n # if isinstance(self.protocol, BlitzProtocol):\n # if t in failedTxs:\n # t.status = TX_ER_PUBLISHED\n # t.failed_purposely = True\n # self.protocol.continue_tx(t)\n # # imulator.failed_Blitz.append(t)\n # else:\n # t.status = RELEASE_ALL\n # self.protocol.continue_tx(t)\n # elif isinstance(self.protocol, HTLCProtocol):\n # if t in failedTxs:\n # t.status = REVOKING\n # t.failed_purposely = True\n # Simulator.failed_HTLC.append(t)\n # else:\n # t.status = RELEASE_ALL # this has to be done immediately\n # self.protocol.continue_tx(t)\n #\n # type(self.protocol).successfully_reached_receiver_txs = []\n #\n # '''\n # All txs that are purposely failed in all epochs until now are processed.\n # '''\n #\n # def process_failed_tx_form_the_last_epoch(self):\n # temp_failed = []\n # if isinstance(self.protocol, BlitzProtocol):\n # for t in Simulator.failed_Blitz:\n # self.protocol.continue_tx(t)\n # assert t.status == FAILED\n # if t.status != FAILED:\n # temp_failed.append(t)\n #\n # Simulator.failed_Blitz = []\n # Simulator.failed_Blitz = copy.copy(temp_failed)\n #\n # temp_failed = []\n #\n # if isinstance(self.protocol, HTLCProtocol):\n # for t in Simulator.failed_HTLC:\n # self.protocol.continue_tx(t)\n # if t.status != FAILED:\n # temp_failed.append(t)\n #\n # Simulator.failed_HTLC = []\n # Simulator.failed_HTLC = copy.copy(temp_failed)\n #\n # temp_failed = []\n\n '''\n Gets a random element from the elem_list\n '''\n\n def get_random_element(self, elem_list):\n if Simulator.stateSet == False:\n random.setstate(self.state)\n Simulator.stateSet = True\n r = random.randint(0, len(elem_list) - 1)\n print(r)\n return elem_list[r], r\n","repo_name":"anamarijaeres/AME-simulator","sub_path":"sim/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"37766292461","text":"# -*- coding: utf-8 -*-\r\n#using python 3\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport math\r\nimport re\r\nfrom sqlalchemy import create_engine\r\nimport re\r\nimport pymysql\r\npymysql.install_as_MySQLdb()\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nscore_table = pd.read_csv('E:/scoretable.csv')\r\n\r\nn_users=np.max(score_table['new_id'])\r\nn_movices=int(np.max(score_table['fid']))\r\nindexmax = int(score_table.shape[0])\r\nscore_table_len = len(score_table)\r\nscorearray = np.zeros((n_users,n_movices))\r\n\r\nprint('ok')\r\nprint('ok')\r\nfor index in range(0,indexmax):\r\n row = int(score_table.loc[index]['new_id']-1)\r\n column = int( score_table.loc[index]['fid']-1)\r\n scorearray[row][column] = score_table.loc[index]['score']\r\ndf_s = pd.DataFrame(scorearray)\r\n#df_s.to_csv('E:/scorearrayspyder2.csv',index)\r\nprint('ok')\r\nprint('ok')\r\nl = [ (indexs,i) for indexs in df_s.index for i in\r\n range(len(df_s.loc[indexs].values)) if(df_s.loc[indexs].values[i] ==0)]\r\ndf_location = pd.DataFrame(l)+1\r\ndf_location.to_csv('E:/nanlocation.csv',index =None)\r\nprint('ok')\r\nprint('ok')\r\n\r\nfrom keras.models import model_from_json\r\njson_file = open('model629.json', 'r')\r\nloaded_model_json = json_file.read()\r\njson_file.close()\r\nmodel = model_from_json(loaded_model_json)\r\nmodel.load_weights(\"model629.h5\")\r\npred_array = np.zeros((n_users,n_movices))\r\n\r\n# for s in range(0,len(df_location)):\r\n# # i = df_location.loc[s][0]\r\n# # j = df_location.loc[s][1]\r\n# # a = i-1\r\n# # b = j-1\r\n# # pred_array[a][b] = model.predict([np.array([i]),np.array([j])])\r\n# #\r\n# # df_pred = pd.DataFrame(pred_array)\r\n# # df_pred.to_csv('E:/pred.csv',index = None)\r\n# #\r\n# # print('ok')\r\n# # print('ok')\r\n# # df_preddata = df_s+df_pred\r\n# # df_preddata.to_csv('E:/preddata.csv',index = None)\r\n# # print('ok')\r\n\r\nusers=score_table['new_id'].values\r\nmovices=score_table['fid'].values\r\ny=score_table['score'].values\r\npred1 = model.predict([np.array([11]),np.array([1])])\r\npred2 = model.predict([np.array([users[11]]), np.array([movices[1]])])\r\n\r\npred3 = model.predict([np.array([40]),np.array([1])])\r\npred4 = model.predict([np.array([users[40]]), np.array([movices[1]])])\r\n\r\n\r\nprint('pred1:',pred1)\r\nprint('pred2:',pred2)\r\nprint('pred3:',pred3)\r\nprint('pred4:',pred4)","repo_name":"zhuanglichun/LIULIUNAO","sub_path":"temp6294.py","file_name":"temp6294.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"5301190954","text":"import numpy as np\nimport torch\nimport tqdm.auto as tqdm\nfrom .utils import getattr_recursive, glorot_uniform, LayerHooks, merge_maps, relu_arr\n\n\nclass AttributionModel:\n def __init__(self, model, layers, use_cuda=True, max_batch_size=16, verbose=1, output_activation=None, use_gradients=True, **kwargs):\n self.model = model.eval()\n self.use_cuda = use_cuda\n self.layers = []\n self.acts_and_grads = []\n self.max_batch_size = max_batch_size\n self.verbose = verbose\n if isinstance(layers, str) or not hasattr(layers, '__iter__'):\n layers = [layers]\n for layer in layers:\n if isinstance(layer, str):\n # This is used for named tensors such as 'decoder1.decoder2'\n layer = getattr_recursive(self.model, layer)\n self.layers.append(layer)\n self.acts_and_grads.append(LayerHooks(self.model, layer, use_gradients=use_gradients))\n self.use_gradients = use_gradients\n self.output_activation = output_activation\n\n def forward(self, image, **kwargs):\n if self.use_cuda:\n image = image.cuda()\n self.model = self.model.cuda()\n self.reset()\n output = self(image)\n if self.use_gradients:\n self.model.zero_grad()\n output.backward(torch.ones_like(output), retain_graph=False)\n att_maps = []\n self.stop()\n iterator = zip(self.layers, self.acts_and_grads)\n if self.verbose == 1:\n iterator = tqdm.tqdm(iterator, total=len(self.layers))\n for layer, acts_and_grads in iterator:\n acts, grads = acts_and_grads.get()\n if self.output_activation is not None:\n acts = [self.output_activation(act) for act in acts]\n weights = self.get_layer_weights(input_image=image, activations=acts, gradients=grads, predictions=output, layer=layer, **kwargs)\n # sum over 0-images, 1,2-channels\n att_map = np.sum(weights[:, np.newaxis] * acts, axis=(0, 1, 2))\n att_map = np.maximum(att_map, 0)\n att_maps.append(att_map)\n return att_maps\n\n def get_layer_weights(self, input_image=None, activations=None, gradients=None, predictions=None, **kwargs):\n raise NotImplementedError('get_layer_weights is only implemented in subclasses of InterpModel')\n\n def start(self):\n for item in self.acts_and_grads:\n item.start()\n\n def stop(self):\n for item in self.acts_and_grads:\n item.stop()\n\n def clear(self):\n for item in self.acts_and_grads:\n item.clear()\n\n def reset(self, restart=True):\n for item in self.acts_and_grads:\n item.reset(restart=restart)\n\n def __call__(self, *args, **kwargs):\n output = self.model(*args, **kwargs)\n if self.output_activation:\n output = self.output_activation(output)\n return output\n\n\nclass GradCAM(AttributionModel):\n def get_layer_weights(self, gradients=None, **kwargs):\n weights = np.mean(gradients[-1], axis=(2, 3), keepdims=True)\n return weights\n\n @property\n def name(self):\n return 'GradCAM'\n\n\nclass GradCAMPlus(AttributionModel):\n \"\"\"adapted from https://github.com/jacobgil/pytorch-grad-cam/blob/master/pytorch_grad_cam/grad_cam_plusplus.py\"\"\"\n def get_layer_weights(self, activations=None, gradients=None, **kwargs):\n gradients = gradients[-1]\n activations = activations[-1]\n grads_2 = gradients ** 2\n grads_3 = grads_2 * gradients\n sum_acts = np.sum(activations, axis=(2, 3), keepdims=True)\n eps = 1e-6\n aij = grads_2 / (2 * grads_2 + sum_acts * grads_3 + eps)\n weights = np.maximum(gradients, 0) * aij\n weights = np.sum(weights, axis=(2, 3), keepdims=True)\n return weights\n\n @property\n def name(self):\n return 'GradCAM++'\n\n\nclass ScoreCAM(AttributionModel):\n def __init__(self, model, layers, use_gradients=False, verbose=1, **kwargs):\n super().__init__(model, layers, use_gradients=False, verbose=verbose, **kwargs)\n\n def get_layer_weights(self,\n input_image=None,\n activations=None,\n predictions=None,\n zero_baseline=True,\n low_memory=False,\n **kwargs):\n activations = activations[-1]\n with torch.no_grad():\n upsample = torch.nn.UpsamplingBilinear2d(size=input_image.shape[-2:])\n acts = torch.from_numpy(activations)\n if self.use_cuda:\n self.model = self.model.cuda()\n if not low_memory:\n acts = acts.cuda()\n if low_memory:\n input_image = input_image.cpu()\n\n max_val = acts.view(*acts.shape[:2], -1).max(dim=-1).values[:, :, None, None]\n min_val = acts.view(*acts.shape[:2], -1).min(dim=-1).values[:, :, None, None]\n acts = (acts - min_val) / (max_val - min_val)\n upsampled = upsample(acts)\n if not low_memory:\n masked_images = input_image[:, None] * upsampled[:, :, None]\n scores = []\n model_output = predictions.cpu().numpy()\n if self.verbose == 2:\n pbar = tqdm.tqdm(total=input_image.shape[0] * upsampled.shape[1])\n for image_idx in range(input_image.shape[0]):\n for idx in range(0, upsampled.shape[1], self.max_batch_size):\n if low_memory and self.use_cuda:\n masked_batch = input_image[image_idx, None].cuda() * upsampled[image_idx, idx:idx + self.max_batch_size, None].cuda()\n else:\n masked_batch = masked_images[image_idx, idx:idx + self.max_batch_size]\n masked_prediction = self.__call__(masked_batch).cpu().numpy()\n if zero_baseline:\n batch_scores = np.sum(masked_prediction, axis=(1, 2, 3))\n else:\n batch_scores = np.sum(masked_prediction - model_output, axis=(1, 2, 3))\n scores.extend(batch_scores)\n if self.verbose == 2:\n pbar.update(masked_batch.shape[0])\n if self.verbose == 2:\n pbar.close()\n scores = torch.Tensor(scores)\n scores = scores.view(activations.shape[:2])\n weights = torch.nn.Softmax(dim=-1)(scores).numpy()\n return weights[:, :, None, None]\n\n @property\n def name(self):\n return 'ScoreCAM'\n\n\nclass KernelWeighted(AttributionModel):\n def __init__(self, model, layers, merge_layers=True, use_gradients=False, output_activation=relu_arr, verbose=1, **kwargs):\n super().__init__(model, layers, use_gradients=False, output_activation=output_activation, verbose=verbose, **kwargs)\n self.merge_layers = merge_layers\n\n def forward(self, image, merge_layers=True, **kwargs):\n layer_maps = super().forward(image, **kwargs)\n if merge_layers:\n return merge_maps(layer_maps, rescale=False)\n else:\n return layer_maps\n\n def get_layer_weights(self, input_image=None, activations=None, predictions=None, layer=None, **other):\n baseline_weight = 1. / torch.sum(predictions, (1, 2, 3))\n if isinstance(layer, torch.nn.Conv2d):\n output_axis = 0\n elif isinstance(layer, torch.nn.ConvTranspose2d):\n output_axis = 1\n else:\n raise ValueError('KernelWeighted has only been implemented for conv2d and convtranspose2d layers right now')\n\n kernel = torch.clone(layer.weight)\n empty_kernel = torch.from_numpy(glorot_uniform(kernel.shape)).float()\n if self.use_cuda:\n empty_kernel = empty_kernel.cuda()\n\n weights = torch.zeros([input_image.shape[0], kernel.shape[output_axis]])\n idx_slice = [slice(None) for _ in range(kernel.ndim)]\n iterator = tqdm.trange(kernel.shape[output_axis]) if self.verbose == 2 else range(kernel.shape[output_axis])\n with torch.no_grad():\n for idx in iterator:\n temp_kernel = torch.clone(kernel).detach()\n idx_slice[output_axis] = idx\n temp_kernel[idx_slice] = empty_kernel[idx_slice]\n layer.weight = torch.nn.Parameter(temp_kernel, requires_grad=False)\n dependent_pred = self(input_image)\n dependent_contribution = torch.sum(predictions - torch.minimum(dependent_pred, predictions),\n dim=(1, 2, 3), keepdim=True)\n\n temp_kernel = torch.clone(empty_kernel)\n temp_kernel[idx_slice] = kernel[idx_slice]\n layer.weight = torch.nn.Parameter(temp_kernel, requires_grad=False)\n independent_pred = self(input_image)\n independent_contribution = torch.sum(torch.minimum(independent_pred, predictions),\n dim=(1, 2, 3), keepdim=True)\n\n weight = (dependent_contribution * independent_contribution) * baseline_weight\n weight = weight[:, 0, 0, 0] # weight will always have shape: [batch, 1, 1, 1]\n weights[:, idx] = weight\n layer.weight = torch.nn.Parameter(kernel)\n weights = (weights - torch.min(weights)) / (torch.max(weights) - torch.min(weights))\n return weights.cpu().numpy()[:, :, None, None]\n\n @property\n def name(self):\n return 'KernelWeighted'\n","repo_name":"Mullans/KernelWeighted","sub_path":"src/interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"12763250953","text":"#!/usr/bin/env python3\nfrom os import getenv\nusername = getenv(\"USER\") # Get the user's username and calculate the length\nusernamelen = len(username)\nans = 0\n\nwhile True:\n ans += 1 # Starting at zero, loop until the first number is greater than\n length = len(str(ans)) + usernamelen # the length of the number plus the username length\n if (ans // length // 12) > length:\n break\n\nprint(ans)\n","repo_name":"jabedude/x86-Assembly-Bomb","sub_path":"stage3.py","file_name":"stage3.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10386805345","text":"class Solution:\n def merge(self, arr, tempArr, left, middle, right):\n i = left\n j = middle + 1\n k = left\n inversions = 0\n while i <= middle and j <= right:\n if arr[i] <= arr[j]:\n tempArr[k] = arr[i]\n i += 1\n k += 1\n else:\n # Since left and right subarrays are sorted, so all the remaining elements in left-subarray (a[i+1], a[i+2] … a[mid]) will be greater than a[j].\n inversions += (middle - i + 1)\n tempArr[k] = arr[j]\n j += 1\n k += 1\n while i <= middle:\n tempArr[k] = arr[i]\n i += 1\n k += 1\n while j <= right:\n tempArr[k] = arr[j]\n j += 1\n k += 1\n for i in range(left, right + 1):\n arr[i] = tempArr[i]\n return inversions\n\n def mergeSort(self, arr, tempArr, left, right):\n inversions = 0\n if left < right:\n middle = left + ((right - left) // 2)\n inversions += self.mergeSort(arr, tempArr, left, middle)\n inversions += self.mergeSort(arr, tempArr, middle + 1, right)\n inversions += self.merge(arr, tempArr, left, middle, right)\n return inversions\n\n def inversionCount(self, arr):\n n = len(arr)\n if n > 1:\n return self.mergeSort(arr, [0 for _ in range(n)], 0, n - 1)\n else:\n return 0\n\n\nprint(Solution().inversionCount([1, 20, 6, 4, 5]))\n","repo_name":"akashanup/programming","sub_path":"CountInversionsInAnArray/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1527984437","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\nreadme = open('README.rst').read()\nhistory = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\nsetup(\n name='https://github.com/arnaldorusso/apissah',\n version='0.1.0',\n description='\"Apiçá\" is a Tupi-Guarani word meaning \"Atention\". Apiçá is a regressive clock.',\n long_description=readme + '\\n\\n' + history,\n author='Arnaldo Russo',\n author_email='arnaldorusso@gmail.com',\n url='https://github.com/arnaldorusso/apissah',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n install_requires=[\n ],\n license=\"BSD\",\n zip_safe=False,\n keywords='https://github.com/arnaldorusso/apissah',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ],\n test_suite='tests',\n)\n","repo_name":"arnaldorusso/apissah","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29652262541","text":"from node import Node\nfrom datatypes import Stack, Queue\n\n\ndef breadth_first(root):\n queue = Queue()\n queue.push(\"separator\")\n queue.push(root)\n\n height = 0\n while not queue.empty():\n node = queue.pop()\n if isinstance(node, Node):\n print(node)\n if not node.leaf():\n queue.push(\"separator\")\n\n # queue ignores pushes of None, these are potential None pushes\n queue.push(node.left)\n queue.push(node.right)\n\n if node == \"separator\":\n print(f\"height {height}\")\n height += 1\n\n\nA = Node(\"A\")\nB = Node(\"B\")\nC = Node(\"C\")\nD = Node(\"D\")\nE = Node(\"E\")\nF = Node(\"F\")\n\nA.left = B\nA.right = C\nB.left = D\nD.left = E\nD.right = F\n\nbreadth_first(A)\n","repo_name":"bensmus/trees","sub_path":"BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33759042502","text":"def softmax(x):\n return np.exp(x)/np.sum(np.exp(x))\n\n\ndata_path = 'lesion_data_multiclass/test'\n\n\ndata_transforms = transforms.Compose([\n transforms.CenterCrop(224),\n transforms.ToTensor()\n])\n\ntrain_dataset = datasets.ImageFolder(\n root=data_path,\n transform=data_transforms\n)\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset\n)\n\n\ndf = pd.DataFrame()\n\nfor (images, labels) in train_loader:\n images = Variable(images)\n if torch.cuda.is_available():\n images = images.cuda()\n\n outputs2 = model_conv(images)\n # add probabilities to DataFrame\n df = df.append(pd.Series(np.apply_along_axis(softmax, 1, outputs2.detach().numpy())[0]),ignore_index=True)\n break\n \n","repo_name":"ProjectSeminarISM/part2","sub_path":"test_dummy.py","file_name":"test_dummy.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2588438982","text":"import requests\nimport time\n\ndate = str(int(time.time() * 1000)) \n\n\nprint(date)\n\n\nurl = \"https://bbin-tw.pragmaticplay.net/gs2c/html5Game.do?jackpotid=0&gname=888%20Dragons&extGame=1&ext=0&cb_target=exist_tab&symbol=vs1dragon8&jurisdictionID=99&mgckey=AUTHTOKEN@1743de549dec5c750b1fcca844eabdb10ec630b329b756f7e652a36c5817ebc3~stylename@bbin~SESSION@042b5b06-acdf-4ee9-90ba-0fb7ecacd137&tabName=\"\n\ntoken = \"7ba69023cb8f0a3ab7985cf88e348f05b3d23b70b601d83d28bc298b3c87de58\"\n\nheader = {\n \"Authorization\": token,\n \"content-length\": \"170\",\n \"content-type\" : \"text/html\",\n \"content-type\" : \"charset=UTF-8\",\n \"User-Agent\" : \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36\"\n}\n\n\n\ndata = {\n \"tw\" : \"0.00\",\n \"balance\" :\"94.00\",\n \"index\" : \"3\",\n \"balance_cash\" : \"94.00\",\n \"balance_bonus\" : \"0.00\",\n \"na\" : \"s\",\n \"stime\" : date,\n \"sa\" : \"6, 6, 5\",\n \"sb\" : \"6, 6, 5\",\n \"sh\" : \"3\",\n \"c\" : \"1.00\",\n \"sver\" : \"5\",\n \"counter\" : \"6\",\n \"l\" : \"1\",\n \"s\" : \"3, 3, 6, 6, 6, 5, 5, 5, 6\",\n \"w\" : \"0.00\"\n}\n\nres = requests.post(url, headers=header, data=data)\n# print(res.url)\nprint(res.headers)\nprint(res.status_code)\nprint(\"\\n\\n\\n\\n\")\nprint(res.text)\n# print(\"\\n\\n\\n\\n\")\n# print(res.content)\n","repo_name":"KongoHuster/Game888","sub_path":"888.py","file_name":"888.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"37708863651","text":"from .base_page import BasePage\nfrom .locators import ProductPageLocators\n\n\nclass ProductPage(BasePage):\n def add_item_to_basket(self):\n # добавить товар в корзину\n button = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET_BUTTON)\n button.click()\n\n def check_alert_after_adding_item_in_basket(self):\n # проверка отображения сообщения об успешном добавлении товара в корзину\n assert self.is_element_present(*ProductPageLocators.ITEM_ADDED_MESSAGE), \\\n \"Test failed: The message about adding an item to basket is not displayed!\"\n\n def check_name_and_price_of_item_in_basket(self):\n # проверка соответствия наименования и цены товара до и после добавления в корзину\n try:\n item_name = self.browser.find_element(*ProductPageLocators.ITEM_NAME).text #\n item_price = self.browser.find_element(*ProductPageLocators.ITEM_PRICE).text #\n item_name_in_message = self.browser.find_element(*ProductPageLocators.ITEM_NAME_IN_MESSAGE).text\n basket_price = self.browser.find_element(*ProductPageLocators.BASKET_PRICE).text #\n finally:\n assert item_name == item_name_in_message,\"Test failed: Wrong item added to basket!\"\n assert item_price in basket_price, \\\n \"Test failed: The price of the item in the basket does not match the original price of the item!\"\n\n def should_not_be_success_message(self):\n # проверка отсутствия сообщения об успешном добавлении товара в корзину\n assert self.is_not_element_present(*ProductPageLocators.ITEM_ADDED_MESSAGE), \\\n \"The message is presented, but should not be!\"\n\n def should_disappear_from_the_page(self):\n # проверка: сообщение о добавлении товара исчезает со страницы\n assert self.is_disappeared(*ProductPageLocators.ITEM_ADDED_MESSAGE), \\\n \"The message did not disappear within the allotted time!\"","repo_name":"shmurge/SeleniumPythonFinalPetProject","sub_path":"pages/product_page.py","file_name":"product_page.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13748140574","text":"from django.urls import path\nfrom .views import PostView, RoomCreateView, RoomUpdateView,RoomDeleteView\nfrom main import views\n\nurlpatterns = [\n path('', views.index, name='main'),\n path('rules', views.rules, name='rules'),\n path('map', views.SearchView.as_view(), name='map'),\n path('/', PostView.as_view(), name='room_details'),\n path(\"room/new\", RoomCreateView.as_view(), name=\"room_create\"),\n path(\"room//edit\", RoomUpdateView.as_view(), name=\"room_edit\"),\n path(\"room//delete\", RoomDeleteView.as_view(), name=\"room_delete\"),\n path('MyRooms', views.MyRooms, name='MyRooms'),\n]","repo_name":"zharkovdmitrii9911/Server","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6228786345","text":"from .widget import Widget\nfrom .widget_group import WidgetGroup\nfrom typing import Optional\n\nclass Entry(Widget):\n \"\"\"\n Classe para criar caixas de input.\n \"\"\"\n def __init__(self, screen, x: int, y: int, size: list[int], border: int = 0, default_text: str = \"\", widget_group: Optional[WidgetGroup] = None):\n super().__init__(screen, x, y, size, widget_group = widget_group)\n self.__border_size = border\n self.__default_text = default_text\n \n self.__selected = False\n self.__pipe_on = False\n \n self.__pipe_interval = int(screen.get_application().get_fps() * 0.8)\n self.__frames = 0\n \n self.__input_string = str()\n self.__build()\n \n self.update_text()\n \n def __build(self):\n \"\"\"\n Cria todas as imagens e objetos gráficos\n necessários para desenhar o widget.\n \"\"\"\n\n self.__border = self.screen.create_rectangle(\n self.x - self.__border_size, self.y - self.__border_size,\n self.width + self.__border_size * 2,\n self.height + self.__border_size * 2,\n color = (0, 0, 0)\n )\n\n self.__background = self.screen.create_rectangle(\n self.x, self.y, self.width, self.height,\n color = (255, 255, 255)\n )\n\n self.__text = self.screen.create_text(\n self.__input_string, self.x + 5, self.y + self.height // 2,\n anchor_x = \"left\", anchor_y = \"center\",\n font_size = self.height * 0.4,\n color = (0, 0, 0, 255)\n )\n\n def add_char(self, char: str):\n \"\"\"\n Adiciona um caractere ao final da caixa de texto.\n \"\"\"\n self.__input_string += char\n self.update_text()\n return True\n\n def check(self, *cursor_pos: int):\n \"\"\"\n Verifica se o cursor se encontra na posição da caixa de texto.\n \"\"\"\n in_x = self.x <= cursor_pos[0] <= (self.x + self.width)\n in_y = self.y <= cursor_pos[1] <= (self.y + self.height)\n \n if in_x and in_y:\n self.__background.color = (240, 240, 240)\n return True\n \n self.__background.color = (250, 250, 250)\n return False\n\n def clear(self):\n \"\"\"\n Limpa a caixa de texto.\n \"\"\"\n self.__input_string = \"\"\n self.update_text()\n\n def delete_char(self):\n \"\"\"\n Apaga o último caractere da caixa de texto.\n \"\"\"\n if not self.__input_string: return\n \n self.__input_string = self.__input_string[:-1]\n self.update_text()\n\n def draw(self):\n \"\"\"\n Desenha o widget na tela.\n \"\"\"\n self.__border.draw()\n self.__background.draw()\n self.__text.draw()\n\n def get_text(self) -> str:\n \"\"\"\n Retorna o texto da caixa de texto.\n \"\"\"\n return self.__input_string\n\n def next(self):\n \"\"\"\n Avança para o próximo estado da animação.\n \"\"\"\n self.__frames = (self.__frames + 1) % self.__pipe_interval\n if self.__frames == 0: self.__pipe_on = not self.__pipe_on\n \n self.update_text()\n\n def set_pipe(self, boolean: bool):\n \"\"\"\n Ativa ou desativa o pipe.\n \"\"\"\n self.__selected = boolean\n\n def update_text(self):\n \"\"\"\n Atualiza o objeto gráfico de texto.\n \"\"\"\n if not self.__input_string:\n self.__text.text = self.__default_text\n self.__text.color = (60, 60, 60, 255)\n \n else:\n self.__text.text = self.__input_string + (\"|\" if self.__pipe_on and self.__selected else \"\")\n self.__text.color = (0, 0, 0, 255)\n","repo_name":"JeanExtreme002/University-Projects--UFBA-","sub_path":"MATA55 - Programação Orientada a Objetos/Battle Chess Game/app/screens/util/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"pt","doc_type":"code","stars":9,"dataset":"github-code","pt":"95"} +{"seq_id":"18132119788","text":"import asyncio\nfrom json import JSONDecodeError\n\nfrom fastapi import WebSocket\nfrom pydantic import ValidationError\n\nfrom .schemas import ErrorResponse, HouseRequest, HouseBase, HouseAdjustments\nfrom .internal.errors import ParseError\nfrom .internal.house_processing import calculate_adjustments\nfrom .internal.auth import verify_user_token\nfrom .parsers.avito import AvitoParser\nfrom app import config\n\nasync def searcher_endpoint(ws: WebSocket):\n await ws.accept()\n\n if 'token' not in ws.query_params or \\\n not verify_user_token(ws.query_params['token']):\n await ws.send_json(\n ErrorResponse(error='Invalid token').dict(exclude_none=True)\n )\n await ws.close()\n return\n\n houses: list[tuple[HouseBase, HouseAdjustments]] = []\n\n try:\n req_json = await asyncio.wait_for(\n ws.receive_json(),\n config.SEARCHER_QUERY_TIMEOUT\n )\n except TimeoutError:\n await ws.send_json(\n ErrorResponse(error='Request wait timeout').dict(exclude_none=True)\n )\n await ws.close()\n return\n except JSONDecodeError:\n await ws.send_json(\n ErrorResponse(error='JSON parse error').dict(exclude_none=True)\n )\n await ws.close()\n return\n except Exception:\n await ws.send_json(\n ErrorResponse(error='Unresolved error').dict(exclude_none=True)\n )\n await ws.close()\n return\n \n try:\n req = HouseRequest.parse_obj(req_json)\n except ValidationError:\n await ws.send_json(\n ErrorResponse(error='Invalid request').dict(exclude_none=True)\n )\n await ws.close()\n return\n \n id_counter = 0\n async with AvitoParser(req.house.location) as parser:\n while len(houses) < req.max_house_count:\n try:\n house = await parser.parse_next()\n except ParseError:\n continue\n \n if house is None:\n break\n\n house.id = id_counter\n id_counter += 1\n\n houses.append((\n house,\n calculate_adjustments(req.house, house, req.adjustments)\n ))\n houses.sort(key=lambda t: t[1].calc_size(t[0]))\n\n await ws.send_json(list(map(lambda t: t[0].dict(), houses)))\n\n await ws.close()\n","repo_name":"arsuhinars/ADEDA","sub_path":"backend/app/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39404236234","text":"# https://practice.geeksforgeeks.org/problems/split-a-circular-linked-list-into-two-halves/1?page=1&difficulty[]=-2&difficulty[]=-1&difficulty[]=0&status[]=unsolved&curated[]=7&sortBy=submissions\r\n\r\n\r\n# https://www.youtube.com/watch?v=VdGIR91xlaM\r\ndef splitList(self, head, head1, head2):\r\n\r\n slow = head\r\n\r\n # so slow should alway point to second mid(in case odd value)\r\n fast = head.next\r\n\r\n while fast != head and fast.next != head:\r\n slow = slow.next\r\n fast = fast.next.next\r\n\r\n head2 = slow.next\r\n head1 = head\r\n slow.next = head\r\n\r\n cur = head2\r\n while cur.next != head:\r\n cur = cur.next\r\n\r\n cur.next = head2\r\n\r\n # this is to emulate pass by reference in python please don't delete below line.\r\n return head1, head2\r\n","repo_name":"nerds-coding/python_dsa","sub_path":"DS/6_LinkedList/13_split_circular_ll.py","file_name":"13_split_circular_ll.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32584335773","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef test01():\n browser = webdriver.Firefox()\n browser.get('https://duckduckgo.com')\n browser.save_screenshot('duckduckgo.png')\n time.sleep(5)\n browser.get('https://google.com')\n browser.refresh()\n browser.quit() \n \ndef test02(): \n browser = webdriver.Firefox()\n browser.get('https://youtube.com')\n # time.sleep(2)\n # xpath = '/html/body/ytd-app/div[1]/div/ytd-masthead/div[3]/div[3]/div[2]/ytd-button-renderer/a/tp-yt-paper-button/yt-formatted-string'\n # browser.find_element(By.XPATH, xpath).click()\n # time.sleep(2)\n # login_xpath = '/html/body/div[1]/div[1]/div[2]/div/c-wiz/div/div[2]/div/div[1]/div/form/span/section/div/div/div[1]/div/div[1]/div/div[1]/input'\n # browser.find_element(By.XPATH, login_xpath).send_keys('victorsmirnov67@gmail.com')\n # next_xpath = '/html/body/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div[2]/div/div[1]/div/div/button'\n # browser.find_element(By.XPATH, next_xpath).click()\n \n html = browser.find_element(By.TAG_NAME, 'html')\n for _ in range(10):\n html.send_keys(Keys.DOWN)\n \ndef main():\n # test01() \n test02()\n \n\nif __name__ == '__main__':\n main()","repo_name":"Victorvs1967/python-selenium","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16316711364","text":"import os\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n plt.rcParams.update({'font.size': 14})\n\n CASE_NUMBER = os.getenv('CASE')\n\n if CASE_NUMBER == '1' or CASE_NUMBER == '2':\n file_name = ''\n if CASE_NUMBER == '1':\n file_name = './first_data_set_measures.txt'\n elif CASE_NUMBER == '2':\n file_name = './second_data_set_measures.txt'\n\n f = open(file_name)\n\n x = []\n bin_search_on_rows = []\n ladder_solve = []\n ladder_exp_solve = []\n\n for line in f:\n m, bin_search_result, ladder_result, ladder_exp_result = map(float, line.split())\n x.append(m)\n bin_search_on_rows.append(bin_search_result)\n ladder_solve.append(ladder_result)\n ladder_exp_solve.append(ladder_exp_result)\n\n plt.plot(x, bin_search_on_rows, \"r-\", label=\"bin search on rows O(M*log(N))\")\n plt.plot(x, ladder_solve, \"g-\", label=\"ladder algorithm O(N + M)\")\n plt.plot(x, ladder_exp_solve, \"b-\", label=\"ladder with exp search algorithm O(M*(log(N) + log(M) + 1))\")\n plt.yscale('log')\n plt.xlabel(\"M\")\n plt.ylabel(\"time in milliseconds\")\n plt.legend(title=\"algorithm\")\n plt.title('Сравнение алгоритмов на первых данных' if CASE_NUMBER == '1' else 'Сравнение алгоритмов на вторых данных')\n\n plt.show()\n elif CASE_NUMBER == '3':\n first_case_results = open('./first_data_set_measures.txt')\n second_case_results = open('./second_data_set_measures.txt')\n\n x = []\n ladder_exp_first = []\n ladder_exp_second = []\n\n for line in first_case_results:\n m, _, _, ladder_exp_time = map(float, line.split())\n x.append(m)\n ladder_exp_first.append(ladder_exp_time)\n\n for line in second_case_results:\n m, _, _, ladder_exp_time = map(float, line.split())\n ladder_exp_second.append(ladder_exp_time)\n\n ladder_exp_ratio = [ ladder_exp_first[i]/ladder_exp_second[i] for i in range(len(ladder_exp_first)) ]\n plt.plot(x, ladder_exp_ratio , \"r-\", label=\"отношение времен на разных даннх\")\n plt.xlabel(\"M\")\n plt.ylabel(\"отношение времени\")\n plt.legend()\n plt.title(\"ladder_exp_gen1/ladder_exp_gen2\")\n plt.xscale('log')\n\n plt.show()\n else:\n raise RuntimeError('Required CASE environment variable. Try to use CASE=1 python plot.py')\n","repo_name":"sandwander26/algorithms-lab1","sub_path":"Лабораторная работа 1 Артемьев Б.М/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18777367337","text":"import re\nimport json\nimport yaml\nfrom enum import Enum\nfrom typing import Any, Union\nfrom pathlib import Path\n\nfrom .shot import Shot\nfrom .context import Context\nfrom .dialogue import Dialogue, Role\nfrom .model import Model, OpenAIModel\n\nclass PromptFormat(Enum):\n RAW_TEXT = \"raw_text\"\n JSON = \"json\"\n\nclass PromptMode(Enum):\n STANDARD = \"standard\"\n DRCoT = \"drcot\"\n\nclass Action(Enum):\n GREETING = \"greeting\"\n ASK_FINDING = \"ask_finding\"\n MAKE_DIAGNOSIS = \"make_diagnosis\"\n\nclass ReasoningStep(Enum):\n POS_FINDINGS = \"positive clinical findings\"\n NEG_FINDINGS = \"negative clinical findings\"\n RANKED_DDX = \"ranked differential diagnosis\"\n ASK_FINDING = \"the clinical finding to ask about\"\n QUESTION = \"question\"\n DX_RATIONALE = \"rationale\"\n FINAL_DIAGNOSIS = \"most likely diagnosis\"\n\nclass Bot(object):\n \"\"\"The chat bot playing a given role.\"\"\"\n history_taking_msg = \"\"\n NONE_RESPONSE = \"NONE\"\n\n def __init__(\n self,\n prefix_instruction: str,\n shots: list[Shot],\n context: Context,\n dialogue: Dialogue,\n suffix_instruction: str,\n model: Model\n ):\n self.prefix_instruction = prefix_instruction\n self.shots = shots\n self.context = context\n self.dialogue = dialogue\n self.suffix_instruction = suffix_instruction\n self.model = model\n self.role = None\n\n def get_completion_prompt(self) -> str:\n \"\"\"Get the completion prompt for the bot.\"\"\"\n raise NotImplementedError\n\n def get_chatcompletion_prompt(self) -> list[dict[str, str]]:\n \"\"\"Get the chatcompletion prompt for the bot.\"\"\"\n raise NotImplementedError\n \n def get_prompt(self) -> Any:\n if self.model.config[\"model\"] in self.model.chatcompletion_models:\n prompt = self.get_chatcompletion_prompt()\n else:\n prompt = self.get_completion_prompt()\n return prompt\n\n def get_role_string(self) -> str:\n return self.role.value[0].upper() + self.role.value[1:]\n\n def respond(self, utterance: str) -> str:\n \"\"\"Respond to the counterpart chatbot's utterance.\"\"\"\n self.dialogue.add_utterance(self.opposite_role, utterance)\n\n prompt = self.get_prompt()\n response = self.model.generate(prompt)\n if response is None:\n response = self.NONE_RESPONSE\n else:\n response = response.strip()\n\n self.dialogue.add_utterance(self.role, response)\n return response\n \n def reset(self, context: Context, dialogue: Dialogue = None) -> None:\n self.set_context(context)\n self.clear_dialogue()\n if dialogue is not None:\n self.dialogue = dialogue\n \n def set_context(self, context: Context) -> None:\n \"\"\"Set the context for the bot.\"\"\"\n self.context = context\n \n def clear_dialogue(self) -> None:\n \"\"\"Clear the dialogue for the bot.\"\"\"\n self.dialogue = Dialogue([]) # [] is necessary to create a new dialogue object\n\nclass PatientBot(Bot):\n \"\"\"The chat bot playing the patient role.\"\"\"\n\n def __init__(\n self,\n prefix_instruction: str,\n shots: list[Shot],\n context: Context,\n dialogue: Dialogue,\n suffix_instruction: str,\n model: Model\n ):\n super().__init__(\n prefix_instruction,\n shots,\n context,\n dialogue,\n suffix_instruction,\n model\n )\n self.role = Role.PATIENT\n self.opposite_role = Role.DOCTOR\n\n def get_completion_prompt(self) -> str:\n \"\"\"Get the completion prompt (a whole string) for the bot.\"\"\"\n sents = []\n for shot in self.shots:\n sents.append(self.prefix_instruction)\n sents.append(shot.context.text())\n sents.append('')\n sents.append(self.history_taking_msg)\n for d in shot.dialogue.data:\n role_str = d[\"role\"][0].upper() + d[\"role\"][1:]\n sent = f\"{role_str}: {d['utterance']}\"\n sents.append(sent)\n sents.append('')\n sents.append(self.prefix_instruction)\n sents.append(self.context.text())\n sents.append('')\n sents.append(self.history_taking_msg)\n for d in self.dialogue.data:\n role_str = d[\"role\"][0].upper() + d[\"role\"][1:]\n sent = f\"{role_str}: {d['utterance']}\"\n sents.append(sent)\n suffix = ''\n if self.suffix_instruction:\n suffix = self.suffix_instruction\n return '\\n'.join(sents) + f\"\\n{self.get_role_string()}: {suffix}\"\n\n def get_chatcompletion_prompt(self) -> list[dict[str, str]]:\n if self.role is None:\n raise ValueError(\"Bot role is None.\")\n msgs = []\n for shot in self.shots:\n # system message: prefix_instruction + shot_context_text\n msgs.append({\n \"role\": \"system\",\n \"content\": self.prefix_instruction + '\\n' + shot.context.text()\n })\n # dialogue\n for d in shot.dialogue.data:\n msgs.append({\n \"role\": \"system\",\n \"name\": \"example_\" + d[\"role\"], # help clarify that this is an example\n \"content\": d[\"utterance\"]\n })\n # current system message: prefix_instruction + context_text\n msgs.append({\n \"role\": \"system\",\n \"content\": self.prefix_instruction + '\\n' + self.context.text()\n })\n # current diaglogue\n for d in self.dialogue.data:\n msgs.append({\n \"role\": \"assistant\" if d[\"role\"] == self.role.value else \"user\",\n \"name\": d[\"role\"],\n \"content\": d[\"utterance\"]\n })\n # suffix_instruction if it exists\n if self.suffix_instruction:\n msgs.append({\n \"role\": \"system\",\n \"content\": self.suffix_instruction\n })\n return msgs\n\n def inform_initial_evidence(self, utterance: str) -> str:\n \"\"\"Inform the initial evidence to the doctor.\"\"\"\n self.dialogue.add_utterance(self.opposite_role, utterance)\n response = self.context.initial_evidence\n self.dialogue.add_utterance(self.role, response)\n return response\n \n @property\n def state(self) -> dict[str, Any]:\n \"\"\"Get the state of the bot.\"\"\"\n return {\n \"prefix_instruction\": self.prefix_instruction,\n \"shots\": [\n {\n \"context\": shot.context.text(),\n \"dialogue\": shot.dialogue.data\n } for shot in self.shots\n ],\n \"context\": self.context.text(),\n \"dialogue\": self.dialogue.data,\n \"suffix_instruction\": self.suffix_instruction,\n \"model\": self.model.config,\n \"prompt\": self.get_prompt()\n }\n\nclass DoctorBot(Bot):\n \"\"\"The chat bot playing the doctor role.\"\"\"\n greeting_msg = {\n \"action\": \"greeting\",\n \"question\": \"How may I help you today?\"\n }\n final_diagnosis_msg = \"Based on your description, the most likely diagnosis is\"\n ask_finding_prefix = \"[Ask finding]\"\n make_diagnosis_prefix = \"[Make diagnosis]\"\n\n def __init__(\n self,\n prefix_instruction: str,\n shots: list[Shot],\n context: Context,\n dialogue: Dialogue,\n suffix_instruction: str,\n suffix_instructions: dict[str, str], # the doctor has different suffix instructions fr \"ask_finding\" and \"make_diagnosis\"\n model: Model,\n max_ddx: int,\n prompt_mode: str,\n prompt_format: str,\n ):\n super().__init__(\n prefix_instruction,\n shots,\n context,\n dialogue,\n suffix_instruction,\n model\n )\n self.role = Role.DOCTOR\n self.opposite_role = Role.PATIENT\n self.suffix_instructions = suffix_instructions\n self.max_ddx = max_ddx\n self.set_max_ddx()\n if prompt_mode not in [p.value for p in PromptMode]:\n raise ValueError(f\"Invalid prompt mode: {prompt_mode}\")\n self.prompt_mode = prompt_mode\n if prompt_format not in [p.value for p in PromptFormat]:\n raise ValueError(f\"Invalid prompt format: {prompt_format}\")\n self.prompt_format = prompt_format\n\n def parse_utterance(self, utterance: Union[str, dict[str, Any]]) -> str:\n \"\"\"Parse the utterance according to prompt_mode and prompt_format.\"\"\"\n if isinstance(utterance, str):\n return utterance\n if self.prompt_mode == PromptMode.STANDARD.value:\n if self.prompt_format == PromptFormat.JSON.value:\n d = {\"action\": utterance[\"action\"]}\n if utterance[\"action\"] == Action.MAKE_DIAGNOSIS.value:\n d[ReasoningStep.FINAL_DIAGNOSIS.value] = utterance[ReasoningStep.FINAL_DIAGNOSIS.value]\n elif utterance[\"action\"] in [Action.ASK_FINDING.value, Action.GREETING.value]:\n d[ReasoningStep.QUESTION.value] = utterance[ReasoningStep.QUESTION.value]\n else:\n raise ValueError(f\"Invalid action: {utterance['action']}\")\n return json.dumps(d)\n elif self.prompt_format == PromptFormat.RAW_TEXT.value:\n prefix = ''\n if utterance[\"action\"] == Action.MAKE_DIAGNOSIS.value:\n prefix = self.make_diagnosis_prefix + ' '\n text = f\"{self.final_diagnosis_msg} {utterance[ReasoningStep.FINAL_DIAGNOSIS.value]}.\"\n elif utterance[\"action\"] in [Action.ASK_FINDING.value, Action.GREETING.value]:\n if utterance[\"action\"] == Action.ASK_FINDING.value:\n prefix = self.ask_finding_prefix + ' '\n text = utterance[ReasoningStep.QUESTION.value]\n else:\n raise ValueError(f\"Invalid action: {utterance['action']}\")\n return prefix + text\n else:\n raise ValueError(f\"Invalid prompt format: {self.prompt_format}\")\n elif self.prompt_mode == PromptMode.DRCoT.value:\n if self.prompt_format == PromptFormat.JSON.value:\n raise NotImplementedError\n elif self.prompt_format == PromptFormat.RAW_TEXT.value:\n sents = []\n if utterance[\"action\"] == Action.GREETING.value:\n sents.append(utterance[ReasoningStep.QUESTION.value])\n elif utterance[\"action\"] in [Action.ASK_FINDING.value, Action.MAKE_DIAGNOSIS.value]:\n if utterance[\"action\"] == Action.ASK_FINDING.value:\n prefix = self.ask_finding_prefix\n else: # Action.MAKE_DIAGNOSIS.value\n prefix = self.make_diagnosis_prefix\n symptom_review = f\"\"\"Based on the {ReasoningStep.POS_FINDINGS.value} '{\", \".join(utterance[ReasoningStep.POS_FINDINGS.value])}' and the {ReasoningStep.NEG_FINDINGS.value} '{\", \".join(utterance[ReasoningStep.NEG_FINDINGS.value])}',\"\"\"\n dd_formulation = f\"\"\"the {ReasoningStep.RANKED_DDX.value} is '{\", \".join(utterance[ReasoningStep.RANKED_DDX.value])}'.\"\"\"\n sents += [prefix, symptom_review, dd_formulation]\n if utterance[\"action\"] == Action.ASK_FINDING.value:\n next_inquiry = f\"\"\"To narrow down the {ReasoningStep.RANKED_DDX.value}, {ReasoningStep.ASK_FINDING.value} is '{utterance[ReasoningStep.ASK_FINDING.value]}'.\"\"\"\n question = f\"\"\"[{ReasoningStep.QUESTION.value}] {utterance[ReasoningStep.QUESTION.value]}\"\"\"\n sents += [next_inquiry, question]\n else: # Action.MAKE_DIAGNOSIS.value\n dx_rationale = utterance[ReasoningStep.DX_RATIONALE.value]\n final_dx = f\"\"\"[{ReasoningStep.FINAL_DIAGNOSIS.value}] {utterance[ReasoningStep.FINAL_DIAGNOSIS.value]}\"\"\"\n sents += [dx_rationale, final_dx]\n return ' '.join(sents)\n else:\n raise ValueError(f\"Invalid prompt format: {self.prompt_format}\")\n else:\n raise ValueError(f\"Invalid prompt mode: {self.prompt_mode}\")\n\n def get_completion_prompt(self) -> str:\n \"\"\"Get the completion prompt (a whole string) for the bot.\"\"\"\n instruction = self.prefix_instruction + '\\n' + self.context.text() + '\\n'\n sents = [instruction]\n for shot in self.shots:\n sents.append(self.history_taking_msg)\n for d in shot.dialogue.data:\n role_str = d[\"role\"][0].upper() + d[\"role\"][1:]\n sent = f\"{role_str}: {self.parse_utterance(d['utterance'])}\"\n sents.append(sent)\n sents.append('')\n sents.append(self.history_taking_msg)\n for d in self.dialogue.data:\n role_str = d[\"role\"][0].upper() + d[\"role\"][1:]\n sent = f\"{role_str}: {self.parse_utterance(d['utterance'])}\"\n sents.append(sent)\n suffix = ''\n if self.suffix_instruction:\n suffix = self.suffix_instruction\n return '\\n'.join(sents) + f\"\\n{self.get_role_string()}: {suffix}\"\n\n def get_chatcompletion_prompt(self) -> list[dict[str, str]]:\n if self.role is None:\n raise ValueError(\"Bot role is None.\")\n msgs = []\n for shot in self.shots:\n # system message: prefix_instruction + shot_context_text\n msgs.append({\n \"role\": \"system\",\n \"content\": self.prefix_instruction + '\\n' + shot.context.text()\n })\n # dialogue\n for d in shot.dialogue.data:\n msgs.append({\n \"role\": \"system\",\n \"name\": \"example_\" + d[\"role\"], # help clarify that this is an example\n \"content\": d[\"utterance\"] if d[\"role\"] == Role.PATIENT.value else self.parse_utterance(d[\"utterance\"])\n })\n # current system message: prefix_instruction + context_text\n msgs.append({\n \"role\": \"system\",\n \"content\": self.prefix_instruction + '\\n' + self.context.text()\n })\n # current diaglogue\n for d in self.dialogue.data:\n msgs.append({\n \"role\": \"assistant\" if d[\"role\"] == self.role.value else \"user\",\n \"name\": d[\"role\"],\n \"content\": d[\"utterance\"]\n })\n # suffix_instruction if it exists\n if self.suffix_instruction:\n msgs.append({\n \"role\": \"system\",\n \"content\": self.suffix_instruction\n })\n return msgs\n \n def set_max_ddx(self) -> None:\n \"\"\"Set the maximum number of differential diagnoses.\"\"\"\n for shot in self.shots:\n for turn in shot.dialogue.data:\n if turn[\"role\"] == self.role.value and isinstance(turn[\"utterance\"], dict) and \"ranked differential diagnosis\" in turn[\"utterance\"]:\n turn[\"utterance\"][\"ranked differential diagnosis\"] = turn[\"utterance\"][\"ranked differential diagnosis\"][:self.max_ddx]\n \n def set_suffix_instruction(self, suffix_instruction: str) -> None:\n \"\"\"Set the suffix instruction for the bot.\"\"\"\n self.suffix_instruction = suffix_instruction\n \n def greeting(self, utterance: str = \"\") -> str:\n if utterance:\n self.dialogue.add_utterance(self.opposite_role, utterance)\n if self.prompt_format == PromptFormat.JSON.value:\n greeting_msg = json.dumps(self.greeting_msg)\n elif self.prompt_format == PromptFormat.RAW_TEXT.value:\n greeting_msg = self.greeting_msg[\"question\"]\n else:\n raise ValueError(f\"Invalid prompt format: {self.prompt_format}\")\n self.dialogue.add_utterance(role=self.role, utterance=greeting_msg)\n return self.greeting_msg[\"question\"]\n \n def ask_basic_info(self, utterance: str = \"\") -> str:\n if utterance:\n self.dialogue.add_utterance(self.opposite_role, utterance)\n else:\n raise ValueError(\"Utterance is empty.\")\n self.dialogue.add_utterance(self.role, self.ask_basic_info_msg)\n d = json.loads(self.ask_basic_info_msg)\n return d[\"question\"]\n\n def parse_response(self, response: str, key: str) -> str:\n if self.prompt_format == PromptFormat.JSON.value:\n try:\n d = json.loads(response)\n except:\n print(f\"===== Error response =====\\n{response}\\n\")\n raise ValueError(\"Response is not a valid JSON string.\")\n return d[key]\n elif self.prompt_format == PromptFormat.RAW_TEXT.value:\n if self.prompt_mode == PromptMode.STANDARD.value:\n return response\n elif self.prompt_mode == PromptMode.DRCoT.value:\n found = re.findall(f\"\\\\[{key}\\\\] (.*)\", response)\n if len(found) > 0:\n response = found[0]\n elif key == ReasoningStep.QUESTION.value: # len(found) == 0\n dx = re.findall(f\"\\\\[{ReasoningStep.FINAL_DIAGNOSIS.value}\\\\] (.*)\", response) # early termination of the dialogue (the doctor bot make a diagnosis)\n if len(dx) > 0:\n response = dx[0]\n else:\n response = self.NONE_RESPONSE\n elif key == ReasoningStep.FINAL_DIAGNOSIS.value: # len(found) == 0\n response = self.NONE_RESPONSE\n else:\n raise ValueError(f\"Invalid key: {key}\")\n return response\n else:\n raise ValueError(f\"Invalid prompt format: {self.prompt_format}\")\n\n def respond(self, utterance: str) -> str:\n \"\"\"Respond to the PatientBot's utterance.\"\"\"\n self.dialogue.add_utterance(self.opposite_role, utterance)\n\n prompt = self.get_prompt()\n response = self.model.generate(prompt)\n if response is None:\n response = self.NONE_RESPONSE\n else:\n response = response.strip()\n\n self.dialogue.add_utterance(self.role, self.suffix_instruction + ' ' + response)\n return response\n\n def ask_finding(self, utterance: str) -> str:\n self.set_suffix_instruction(f\"{self.ask_finding_prefix}\")\n response = self.respond(utterance)\n return self.parse_response(response, key=ReasoningStep.QUESTION.value)\n\n def make_diagnosis(self, utterance: str) -> str:\n self.set_suffix_instruction(f\"{self.make_diagnosis_prefix}\")\n response = self.respond(utterance)\n return self.parse_response(response, key=ReasoningStep.FINAL_DIAGNOSIS.value)\n\n @property\n def state(self) -> dict[str, Any]:\n \"\"\"Get the state of the bot.\"\"\"\n return {\n \"prefix_instruction\": self.prefix_instruction,\n \"shots\": [\n {\n \"context\": shot.context.text(),\n \"dialogue\": shot.dialogue.data\n } for shot in self.shots\n ],\n \"context\": self.context.text(),\n \"dialogue\": self.dialogue.data,\n \"suffix_instructions\": self.suffix_instructions,\n \"model\": self.model.config,\n \"prompt\": self.get_prompt()\n }\n\n# manual unit tests\nif __name__ == \"__main__\":\n from data import DDxDataset\n # Load dataset\n csv_path = \"../../ddxplus/release_test_patients.csv\"\n pathology_info_path = \"../../ddxplus/release_conditions.json\"\n evidences_info_path = \"../../ddxplus/our_evidences_to_qa_v2.json\"\n\n dataset = DDxDataset(csv_path, pathology_info_path, evidences_info_path)\n indices = [98595] #, 123464, 86477, 9209, 98151]\n pats = dataset.df.iloc[indices]\n print(\"Dataset loaded.\")\n\n # test PatientBot.get_chatcompletion_prompt() and DoctorBot.get_chatcompletion_prompt()\n with open(\"../../experiments/configs/debug.yml\") as f:\n args = yaml.safe_load(f)\n \n patient_prompt = json.loads(Path(\"../../prompts/patient/standard.json\").read_bytes())\n \n patient_bot = PatientBot(\n prefix_instruction=patient_prompt[\"prefix_instruction\"],\n shots=[\n Shot(\n context=Context(raw_text=shot[\"context\"]),\n dialogue=Dialogue(data=shot[\"dialogue\"])\n ) for shot in patient_prompt[\"shots\"]\n ],\n context=Context(raw_text=patient_prompt[\"context\"]),\n dialogue=Dialogue(data=patient_prompt[\"dialogue\"]),\n suffix_instruction=patient_prompt[\"suffix_instruction\"],\n model=OpenAIModel(config=args[\"patient\"][\"model_config\"])\n )\n \n question = \"What's your sex and age?\"\n \n from context import PatientContext\n for _, pat in pats.iterrows():\n print(f\"Patient: {pat.AGE} yo {pat.SEX}\")\n patient_bot.reset(\n context=PatientContext(\n sex=pat.SEX,\n age=pat.AGE,\n initial_evidence=pat.INITIAL_EVIDENCE,\n evidences=pat.EVIDENCES\n )\n )\n response = patient_bot.respond(question)\n print(response)\n print(patient_bot.dialogue.data)\n print(patient_bot.get_chatcompletion_prompt())\n\n # doctor_prompt = json.loads(Path(\"../../prompts/doctor/debug.json\").read_bytes())\n # doctor_bot = DoctorBot(\n # prefix_instruction=doctor_prompt[\"prefix_instruction\"],\n # shots=[\n # Shot(\n # context=Context(raw_text=shot[\"context\"]),\n # dialogue=Dialogue(data=shot[\"dialogue\"])\n # ) for shot in doctor_prompt[\"shots\"]\n # ],\n # context=Context(raw_text=doctor_prompt[\"context\"]),\n # dialogue=Dialogue(data=doctor_prompt[\"dialogue\"]),\n # suffix_instruction=doctor_prompt[\"suffix_instruction\"],\n # model=OpenAIModel(config=args[\"doctor\"][\"model_config\"])\n # )\n \n # answer = \"No. I don't have a fever.\"\n # response = doctor_bot.respond(answer)\n # print(response)\n # print(doctor_bot.dialogue.data)\n","repo_name":"Brian-Ckwu/dr-cot","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":22318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34047652602","text":"from ctre import FeedbackDevice\nfrom ctre.talonsrx import TalonSRX\nfrom wpilib.command import Subsystem\n\nimport Logger\nfrom control.BlazeTalon import BlazeTalon\nfrom control.smartdrive import SmartRobotDrive\n\n\nclass Drivetrain(Subsystem):\n def __init__(self):\n super().__init__(\"Drivetrain\")\n\n self.talon_left_rear = BlazeTalon(0)\n self.talon_left_front = BlazeTalon(1)\n\n self.setup_talons(self.talon_left_rear, self.talon_left_front)\n\n self.talon_right_rear = BlazeTalon(2)\n self.talon_right_front = BlazeTalon(3)\n\n self.setup_talons(self.talon_right_rear, self.talon_right_front, invert=True)\n\n self.robotdrive = SmartRobotDrive(self.talon_left_rear, self.talon_right_rear)\n\n self.dt_logger = Logger.Logger(\"Drivetrain\")\n self.dt_logger.add(\"Left Speed\", self.robotdrive.get_left_speed)\n self.dt_logger.add(\"Right Speed\", self.robotdrive.get_right_speed)\n self.dt_logger.add(\"Right Voltage\", self.robotdrive.get_right_voltage)\n self.dt_logger.add(\"Left Voltage\", self.robotdrive.get_left_voltage)\n # self.dt_logger.start()\n\n def setup_talons(self, master: TalonSRX, slave: TalonSRX, invert=False,\n pidIdx=0, timeoutMs=0, brake=True):\n slave.follow(master)\n master.configSelectedFeedbackSensor(FeedbackDevice.CTRE_MagEncoder_Relative, pidIdx, timeoutMs)\n\n master.enableVoltageCompensation(True)\n master.configOpenLoopRamp(1/3, timeoutMs)\n master.configContinuousCurrentLimit(50, timeoutMs)\n master.configPeakCurrentLimit(80, timeoutMs)\n master.configPeakCurrentDuration(500, timeoutMs)\n\n neut_mode = TalonSRX.NeutralMode.Brake if brake else TalonSRX.NeutralMode.Coast\n master.setNeutralMode(neut_mode)\n slave.setNeutralMode(neut_mode)\n\n master.setSensorPhase(False)\n master.setInverted(invert)\n slave.setInverted(invert)\n\n def set_brake(self, brake=True):\n mode = TalonSRX.NeutralMode.Brake if brake else TalonSRX.NeutralMode.Coast\n self.talon_left_front.setNeutralMode(mode)\n self.talon_left_rear.setNeutralMode(mode)\n self.talon_right_front.setNeutralMode(mode)\n self.talon_right_rear.setNeutralMode(mode)\n\n def set_ramp(self, ramp=0):\n self.talon_left_rear.configOpenLoopRamp(ramp, 0)\n self.talon_right_rear.configOpenLoopRamp(ramp, 0)\n\n def arcade_drive(self, drive_power, turn_power):\n self.robotdrive.arcade_drive(drive_power, turn_power)\n\n def tank_drive(self, left_power, right_power):\n self.robotdrive.tank_drive(left_power, right_power)\n\n def curvature_drive(self, drive_power, turn_command):\n self.robotdrive.radius_drive(drive_power, turn_command)\n\n def arc(self, speed, radius):\n self.robotdrive.radius_turn(speed, radius)\n\n def straight(self, speed):\n self.robotdrive.drive_profile_open_loop(speed, speed, 0, 0)\n\n def turn(self, turn_pow):\n self.robotdrive.drive_profile_open_loop(-turn_pow, turn_pow, 0, 0)\n","repo_name":"FRC3184/frc2018","sub_path":"systems/drivetrain.py","file_name":"drivetrain.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"42591617230","text":"#!/usr/bin/env python3\n\nimport os\nimport platform\nimport argparse\nimport importlib\nimport threading\nimport subprocess\n\nfrom utils.scan import VTscan\nfrom utils.record import logger\nfrom utils.screen import printer\nfrom utils.yaraParser import parser\nfrom utils.download import doublecheck\n\nfrom typing import Dict, List\n\ndef takeAction(\n mName: str, iDic: Dict) -> None:\n if (mName == \"\" or mName is None):\n return False\n\n aModule = importlib.import_module('actions.{}'.format(mName))\n mTrigget = aModule.trigger(iDic)\n\n\ndef main() -> None:\n plat = platform.system()\n s_printer = printer(plat)\n\n argumentParser = argparse.ArgumentParser(\"VTI-Cosplay\")\n argumentParser.add_argument(\n '-y', '--yara-file',\n nargs=1,\n required=True,\n help='YARA file'\n )\n argumentParser.add_argument(\n '-k', '--api-key',\n nargs=1,\n required=False,\n help='Virustotal API key'\n )\n argumentParser.add_argument(\n '-l', '--limit',\n nargs=1,\n required=False,\n help='Limit total matched sample count')\n argumentParser.add_argument(\n '-a', '--action',\n nargs=1,\n required=False,\n help='Action module to trigger for matched samples')\n argumentParser.add_argument(\n '--livehunt',\n action='store_true',\n required=False,\n help='Create scheduled task for the YARA file provided.\\\n When a new sample is out there it prints and stores')\n argumentParser.add_argument(\n '-f', '--fast',\n action='store_true',\n required=False,\n help='Fast scan by reducing the data that is transferred')\n argumentParser.add_argument(\n '-v', '--verbose',\n action='store_true',\n required=False,\n help='Verbose output')\n argumentParser.add_argument(\n '-i', '--i-dont-trust-you',\n nargs=1,\n required=False,\n help='At the end, it downloads matched files\\\n and does YARA scan against them')\n args = argumentParser.parse_args()\n\n try:\n api_key = args.api_key[0]\n except TypeError as e:\n api_key = os.environ.get('VT_API_KEY')\n except Exception as e:\n print(e)\n exit()\n\n r_logger = logger(\n plat,\n s_printer,\n args.yara_file[0])\n livehunt = args.livehunt\n verbose = args.verbose\n fast = args.fast\n if (args.limit is not None):\n limit = args.limit[0]\n else:\n limit = 0\n\n if (args.i_dont_trust_you is not None):\n path = args.i_dont_trust_you[0]\n else:\n path = \"\"\n \n param = {\n 'file': args.yara_file[0],\n 'key': api_key,\n 'limit': limit,\n 'printer': s_printer,\n 'logger': r_logger,\n 'verbose': verbose,\n 'fast': fast,\n 'livehunt': livehunt}\n\n yara = parser(param)\n pDic = yara.parse()\n arr = pDic.get('arr', [])\n meta = pDic.get('meta', {})\n scan = VTscan(arr, param)\n res = scan.evaluate()\n\n if (args.i_dont_trust_you is not None):\n try:\n stdout, strerr = subprocess.Popen(\n ['yara', '-v'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n d_doublecheck = doublecheck(\n res, path,\n api_key, args.yara_file[0],\n s_printer)\n yRes = d_doublecheck.orchestrator()\n except FileNotFoundError as e:\n print(e)\n print(\"[-] YARA must be installed. \\\n I don't trust you scan is aborted.\\n\")\n d_doublecheck = None\n yRes = res\n finally:\n hList = yRes\n else:\n hList = res\n d_doublecheck = None\n\n if (args.action is not None and\n hList != [] and len(hList) > 0):\n try:\n thread1 = threading.Thread(\n target=s_printer.status,\n args=(\"Taking\", \"Took\", \"the actions\",),\n daemon=True)\n thread1.start()\n paramA = {\n 'res': hList,\n 'meta': meta,\n 'yara': args.yara_file[0],\n 'key': api_key,\n 'path': path\n }\n takeAction(args.action[0], paramA)\n s_printer.finished = True\n thread1.join()\n except Exception as e:\n print(e)\n\n if (hList != [] and len(hList) > 0):\n if (d_doublecheck is not None):\n r_logger.logResults(d_doublecheck.sArr)\n else:\n r_logger.logResults(hList)\n print()\n s_printer.prettyPrint(hList)\n\nif (__name__ == \"__main__\"):\n main()\n","repo_name":"r00tten/VTI-Cosplay","sub_path":"vti-cosplay.py","file_name":"vti-cosplay.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"95"} +{"seq_id":"38015588286","text":"from collections.abc import Iterable\n\nfrom django.db import transaction\nfrom fullctl.django.rest.decorators import serializer_registry\nfrom fullctl.django.rest.serializers import ModelSerializer\nfrom rest_framework import serializers\n\nimport django_devicectl.models.devicectl as models\n\nSerializers, register = serializer_registry()\n\n\n@register\nclass Facility(ModelSerializer):\n org_id = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Facility\n fields = [\n \"id\",\n \"name\",\n \"slug\",\n \"reference\",\n \"reference_is_sot\",\n \"instance\",\n \"org_id\",\n ]\n\n def get_org_id(self, device):\n return device.instance.org.permission_id\n\n\n@register\nclass Device(ModelSerializer):\n org_id = serializers.SerializerMethodField()\n facility_slug = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Device\n fields = [\n \"id\",\n \"name\",\n \"display_name\",\n \"reference\",\n \"reference_is_sot\",\n \"description\",\n \"status\",\n \"type\",\n \"instance\",\n \"org_id\",\n \"facility_id\",\n \"facility_slug\",\n ]\n\n def get_org_id(self, device):\n return device.instance.org.permission_id\n\n def get_facility_slug(self, device):\n try:\n return device.facility.slug\n except AttributeError:\n # not assigned to facility\n return None\n\n\n@register\nclass Port(ModelSerializer):\n org_id = serializers.SerializerMethodField()\n\n ip_address_4 = serializers.CharField(\n read_only=True, source=\"port_info.ip_address_4\"\n )\n ip_address_6 = serializers.CharField(\n read_only=True, source=\"port_info.ip_address_6\"\n )\n is_management = serializers.BooleanField(\n read_only=True, source=\"port_info.is_management\"\n )\n\n logical_port_name = serializers.SerializerMethodField()\n virtual_port_name = serializers.SerializerMethodField()\n device = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Port\n fields = [\n \"id\",\n \"org_id\",\n \"virtual_port\",\n \"port_info\",\n \"display_name\",\n \"device_id\",\n \"device_name\",\n \"device\",\n \"name\",\n \"ip_address_4\",\n \"ip_address_6\",\n \"is_management\",\n \"logical_port_name\",\n \"virtual_port_name\",\n ]\n\n def get_org_id(self, port):\n return port.org.permission_id\n\n def get_logical_port_name(self, port):\n return port.virtual_port.logical_port.name\n\n def get_virtual_port_name(self, port):\n return port.virtual_port.name\n\n def get_device(self, port):\n if \"device\" in self.context.get(\"joins\", []):\n # device from preloaded cache\n device = self.devices.get(port.device_id)\n\n # device.facility from preloaded cache\n device.facility = self.facilities.get(device.facility_id)\n\n return Device(instance=device).data\n return None\n\n @property\n def devices(self):\n \"\"\"\n Preloads and caches all devices needed to render device relationships\n \"\"\"\n if not hasattr(self, \"_devices\"):\n ports = self.instance\n if not isinstance(ports, Iterable):\n ports = [ports]\n\n self._devices = {\n device.id: device\n for device in models.Device.objects.filter(\n id__in=[port.device_id for port in ports]\n )\n }\n return self._devices\n\n @property\n def facilities(self):\n \"\"\"\n Preloads and caches all facilities needed to render device relationships\n \"\"\"\n if not hasattr(self, \"_facilities\"):\n ports = self.instance\n if not isinstance(ports, Iterable):\n ports = [ports]\n\n self._facilities = {\n facility.id: facility\n for facility in models.Facility.objects.filter(\n id__in=[\n self.devices.get(port.device_id).facility_id for port in ports\n ]\n )\n }\n return self._facilities\n\n\n@register\nclass PortInfo(ModelSerializer):\n org_id = serializers.SerializerMethodField()\n\n class Meta:\n model = models.PortInfo\n fields = [\n \"id\",\n \"org_id\",\n \"port\",\n \"ip_address_4\",\n \"ip_address_6\",\n \"is_management\",\n \"is_routeserver_peer\",\n \"speed\",\n \"display_name\",\n ]\n\n def get_org_id(self, port_info):\n return port_info.org.permission_id\n\n\n@register\nclass IPAddress(ModelSerializer):\n class Meta:\n model = models.IPAddress\n fields = [\n \"id\",\n \"address\",\n \"instance\",\n \"reference\",\n \"reference_is_sot\",\n \"port_info\",\n ]\n\n\n@register\nclass VirtualPort(ModelSerializer):\n class Meta:\n model = models.VirtualPort\n fields = [\n \"id\",\n \"logical_port\",\n \"vlan_id\",\n \"port\",\n \"reference\",\n \"reference_is_sot\",\n \"name\",\n \"display_name\",\n ]\n read_only_fields = [\"port\"]\n\n\n@register\nclass RequestDummyPorts(serializers.Serializer):\n ref_tag = \"request_dummy_ports\"\n\n instance = serializers.IntegerField()\n ports = serializers.JSONField()\n name_prefix = serializers.CharField()\n device_type = serializers.CharField()\n\n class Meta:\n fields = [\"ports\", \"name_prefix\", \"instance\", \"device_type\"]\n\n @transaction.atomic\n def create(self, validated_data):\n ports = validated_data[\"ports\"]\n instance = models.Instance.objects.get(id=validated_data[\"instance\"])\n name_prefix = validated_data[\"name_prefix\"]\n device_type = validated_data[\"device_type\"]\n\n created_ports = []\n\n for device_id, port_data in ports.items():\n device, _ = models.Device.objects.get_or_create(\n name=f\"{name_prefix}:{device_id}\",\n instance=instance,\n )\n device.type = device_type\n device.save()\n device.setup()\n\n if not device.facility:\n facility = models.Facility.objects.filter(\n name=name_prefix, instance=instance\n ).first()\n if not facility:\n facility = models.Facility.objects.create(\n name=name_prefix, instance=instance, slug=name_prefix.lower()\n )\n device.facility = facility\n device.save()\n\n for _port in port_data:\n virtual_port, _ = models.VirtualPort.objects.get_or_create(\n name=f\"{name_prefix}:virt:{_port['id']}\",\n logical_port=device.physical_ports.first().logical_port,\n vlan_id=0,\n )\n\n port, port_created = models.Port.objects.get_or_create(\n virtual_port=virtual_port, name=f\"{name_prefix}:{_port['id']}\"\n )\n\n ip4_incoming = _port.get(\"ip_address_4\")\n ip6_incoming = _port.get(\"ip_address_6\")\n ip4 = None\n ip6 = None\n\n if ip4_incoming:\n ip4 = models.IPAddress.objects.filter(\n instance=instance, address=ip4_incoming\n ).first()\n if ip6_incoming:\n ip6 = models.IPAddress.objects.filter(\n instance=instance, address=ip6_incoming\n ).first()\n\n if ip4:\n created_ports.append(ip4.port_info.port)\n if ip6:\n created_ports.append(ip6.port_info.port)\n\n if port_created or not port.port_info_id:\n port.port_info = models.PortInfo.objects.create(\n instance=instance,\n )\n port.save()\n\n if not ip4:\n port.port_info.ip_address_4 = ip4_incoming\n\n if not ip6:\n port.port_info.ip_address_6 = ip6_incoming\n\n created_ports.append(port)\n\n return created_ports\n","repo_name":"fullctl/devicectl","sub_path":"src/django_devicectl/rest/serializers/service_bridge.py","file_name":"service_bridge.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"41851738078","text":"# Проводим Blueprint!\n\n# Переименовали файл из server.py в __init__.py\n# Запуск приложеня после упаковки в файл и нового имени\n# export FLASK_APP=webapp && export FLASK_ENV=development && flask run\n\nfrom flask import Flask, render_template, flash, redirect, url_for\n# Уберем возможность заходить на страницу /login для авторизованных\n# добавляем current_user\n# Создадим страницу, доступную только зарегистрированным\n# добавляем login_required (это дикоратов)\nfrom flask_login import LoginManager, current_user, login_required, login_user, logout_user\n\nfrom webapp.forms import LoginForm\nfrom webapp.model import db, News, User\nfrom webapp.weather import weather_by_city\n\n\ndef create_app():\n\n app = Flask(__name__)\n app.config.from_pyfile('config.py')\n # База, будь базой для приложения app, которое app = Flask(__name__)\n # База, работай вот с этим приложением. Но ты можешь быть базой\n # и для других приложений...\n # Аналогия от Михаила:\n # Если выходит дополнение к игре, то чтобы оно окзалось в игре\n # его надои инициализировать в игре\n db.init_app(app)\n\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = 'login'\n\n # НЕПОНЯТНАЯ ХРЕНЬ!!!\n # По id вытаскивае весь объект Пользователь\n # Потом будем часто пользоваться\n # Тащит из куки id пользователя\n # запрашивает его из базы данных\n # и по этому id вытаскивает конкретный объект User-а\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(user_id)\n\n @app.route(\"/\")\n # В какой-то нежожиданный момент мы переименовали first_foo в index\n def index():\n page_title = 'Новости Python'\n weather = weather_by_city(app.config['WEATHER_DEFAULT_CITY'])\n news_list = News.query.order_by(News.published.desc()).all()\n\n return render_template(\n 'index.html', weather=weather, page_title=page_title,\n news_list=news_list)\n\n @app.route('/login')\n def login():\n # В свое вермя мы унаследовали в Модель User из класса\n # фалска UserMixin, в котором есть переменная current_user и\n # его свойства в т.ч. is_authenticated = Правда, если пользователь\n # авторизирован\n # Если текущий пользователь атворизирован, то перенаправляем его\n # на главную страницу\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n title = \"Авторизация\"\n login_form = LoginForm()\n return render_template('login.html', page_title=title, form=login_form)\n\n # Реализуем обработку формы логина\n @app.route('/process-login', methods=['POST'])\n def process_login():\n # создаем экзмепляр (объект) формы\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n # Если user Истина (не 0, None) и Проверка пользователя прошла\n # user.check_password(form.password.data)\n # ВОЛШЕБНЫМ оразом переменная user стала обладать не только\n # именем пользователя, но и свойствами класса User,\n # откуда мы вызваем проверку пароля check_password(пароль из\n # только что заполненной формы)\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n # remember -атрибут login_user. Говрит Помнить или не Помнить\n # параметры ввода пользователя в КУКИ\n # form.remember_me - идем к remember_me атрибуту в forms\n # .data - дать результат выполнения\n # Получим Правда или Ложь\n # Т.е. Запомнить или Не запоминать\n flash('Вы вошли на сайт')\n return redirect(url_for('index'))\n flash('Неправильное имя пользователя или пароль')\n return redirect(url_for('login'))\n\n # Выход пользователя из сессии под своим логином\n @app.route('/logout')\n def logout():\n logout_user()\n flash('Вы разлогинились')\n return redirect(url_for('index'))\n\n @app.route('/admin')\n # Дикоратор @login_required проверит, что это авторизированный\n # пользователь и если нет, то вернет его на главную странцу\n @login_required\n # admin_index допольнительно проверяет, что текущий пользователь\n # не просто авторизирован, но и имеет роль админа, эту возможность\n # проверки мы учли в Моделе User, возвращает ЛОЖЬ или ПРАВДА\n def admin_index():\n if current_user.is_admin:\n return 'Привет админ'\n else:\n return 'Ты не админ!'\n\n return app\n","repo_name":"nyd7/learn_web","sub_path":"last_step/__init__step11.py","file_name":"__init__step11.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73145693422","text":"# omparador de Hashes\nimport hashlib\n\narquivo1 = 'a.txt'\narquivo2 = 'b.txt'\n\nhash1 = hashlib.new('ripemd160')\n\nhash1.update(open(arquivo1, 'rb').read()) # ira ler o arquivo e mandas para o hash - rb arbetura em modo binário\n\nhash2 = hashlib.new('ripemd160')\n\nhash2.update(open(arquivo2, 'rb').read()) #rb arbetura em modo binário\n\nif hash1.digest() != hash2.digest():\n print(f'o arquivo: {arquivo1} é diferente do arquivo: {arquivo2}')\n print('O hash do arquivo a.txt é: ', hash1.hexdigest())\n print('O hash do arquivo b.txt é: ', hash2.hexdigest())\n #hexdigest vai resumir o hash em exadecimal e via mostrar o hash\nelse:\n print(f'o arquivo: {arquivo1} é igual ao arquivo: {arquivo2}')\n print('O hash do arquivo a.txt é: ', hash1.hexdigest())\n print('O hash do arquivo b.txt é: ', hash2.hexdigest())\n","repo_name":"arnaldofilh0/comparador_hash","sub_path":"ch.py","file_name":"ch.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9316817535","text":"import os\nimport asyncio\nimport aiofiles\nimport datetime\nimport argparse\nfrom dotenv import load_dotenv\n\ndef get_args():\n load_dotenv()\n host = os.getenv(\"HOST\")\n outgoing_port = os.getenv(\"SEND_PORT\")\n\n parser = argparse.ArgumentParser(\n description='Chat untility')\n parser.add_argument('--host',\n default=host,\n type=str,\n help='Connection host')\n parser.add_argument('--outgoing_port',\n default=outgoing_port,\n type=int,\n help='Outgoing connections port')\n return parser.parse_args()\n\nasync def tcp_echo_client(host, outgoing_port):\n reader, writer = await asyncio.open_connection(\n host, outgoing_port)\n\n token = '80d5bd9e-f92a-11eb-8c47-0242ac110002\\n'.encode()\n message = 'Hello!\\n\\n'.encode()\n info_message = await reader.readline()\n print(info_message)\n writer.write(token)\n\n info_message = await reader.readline()\n print(info_message)\n writer.write(message)\n\n\n print('Close the connection')\n writer.close()\n\n\nif __name__ == '__main__':\n args = get_args()\n asyncio.run(tcp_echo_client(args.host, args.outgoing_port))\n","repo_name":"borovikova/underground-chat-cli","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6892968745","text":"import pandas as pd\nimport numpy as np\nimport tweepy \nfrom tweepy.auth import OAuthHandler\nimport json\nimport time\n\nwith open(\"twitter_credentials.json\", \"r\") as file:\n creds = json.load(file)\n\nCONSUMER_KEY = creds[\"CONSUMER_KEY\"]\nCONSUMER_SECRET = creds[\"CONSUMER_SECRET\"]\nACCESS_KEY = creds[\"ACCESS_TOKEN\"]\nACCESS_SECRET = creds[\"ACCESS_SECRET\"]\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth)\n\ndef tweet(df, date):\n speak_output = ''\n df = df[df['Date'] == \"2/{}/2021\".format(date)]\n\n i = 1\n \n announce = \"📣 PSA: THE NEXT {} TWEETS ARE FOR INCIDENTS ON 2/{}/2021\".format(len(df), date)\n announce_tweet = api.update_status(status = announce, tweet_mode='extended') \n\n for output in (df['Speech Output']):\n try:\n speak_output = str(i) + \". \" + output\n count = 220\n tweet = \" \"\n if (len(speak_output) > 220):\n while speak_output[count] != ' ':\n count = count + 1\n tweet = speak_output[:count]\n tweet += \" (1/2)\"\n else:\n tweet = speak_output\n \n print(speak_output, flush=True)\n og_tweet = api.update_status(status = tweet, tweet_mode='extended') \n if(len(speak_output) > count):\n api.update_status(status=speak_output[count:]+\" (2/2)\", \n in_reply_to_status_id=og_tweet.id, \n auto_populate_reply_metadata=True)\n time.sleep(5)\n i += 1\n except tweepy.TweepError as error:\n print(error)\n\ndf = pd.read_csv('./data/incidents_speech.csv')\ndate = 23\n\nwhile True:\n if date == 25:\n break\n tweet(df, date)\n time.sleep(60)\n\n date += 1\n ","repo_name":"arshreality/UNT-Police-Incidents","sub_path":"twitter_bot.py","file_name":"twitter_bot.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"37445741346","text":"\nimport os\nimport random\nimport time\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n# Relation-centric Neural Network\n# This NN takes all information about relations in the graph and outputs effects of all interactions between objects.\n\n\nclass RelationalModel(nn.Module):\n def __init__(self, input_size, output_size, hidden_size):\n super(RelationalModel, self).__init__()\n \n self.output_size = output_size\n \n self.layers = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, output_size),\n nn.ReLU()\n )\n \n def forward(self, x):\n '''\n Args:\n x: [batch_size, n_relations, input_size]\n Returns:\n [batch_size, n_relations, output_size]\n '''\n batch_size, n_relations, input_size = x.size()\n x = x.view(-1, input_size)\n x = self.layers(x)\n x = x.view(batch_size, n_relations, self.output_size)\n return x\n\n\n# Object-centric Neural Network\n# This NN takes information about all objects and effects on them, then outputs prediction of the next state of the graph.

\n\n\nclass ObjectModel(nn.Module):\n def __init__(self, input_size, hidden_size, object_dim):\n super(ObjectModel, self).__init__()\n \n self.layers = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n # nn.Linear(hidden_size, 2), #speedX and speedY\n nn.Linear(hidden_size, object_dim)\n )\n \n def forward(self, x):\n '''\n Args:\n x: [batch_size, n_objects, input_size]\n Returns:\n [batch_size * n_objects, object_dim] \n '''\n input_size = x.size(2)\n x = x.view(-1, input_size)\n return self.layers(x)\n\n\n# Interaction Network\n# IN involves only matrix operations that do not contain learnable parameters.\n\n\nclass InteractionNetwork(nn.Module):\n def __init__(self, n_objects, object_dim, n_relations, relation_dim, effect_dim):\n super(InteractionNetwork, self).__init__()\n\n self.relational_model = RelationalModel(2*object_dim + relation_dim, effect_dim, 150)\n self.object_model = ObjectModel(object_dim + effect_dim, 100, object_dim)\n self.n_objects = n_objects\n self.object_dim = object_dim\n \n def forward(self, objects, sender_relations, receiver_relations, relation_info):\n if len(sender_relations.shape) ==2 :\n sender_relations.unsqueeze_(0)\n if len(receiver_relations.shape)==2 :\n receiver_relations.unsqueeze_(0)\n if len(relation_info.shape) == 2:\n relation_info.unsqueeze_(0)\n senders = sender_relations.permute(0, 2, 1).bmm(objects) # bmm - batch matrix multiply\n receivers = receiver_relations.permute(0, 2, 1).bmm(objects)\n effects = self.relational_model(torch.cat([senders, receivers, relation_info], 2))\n effect_receivers = receiver_relations.bmm(effects)\n# predicted = self.object_model(torch.cat([objects, effect_receivers], 2))\n \n predicted = self.object_model(torch.cat([objects, effect_receivers], 2)).reshape(-1, self.n_objects, self.object_dim)\n val = [21,22,23,24]\n predicted[:,:,val[0]:val[-1]+1] = objects[:,:,val[0]:val[-1]+1]\n\n val = [0,1]\n predicted[:,:,val[0]:val[-1]+1] = objects[:,:,val[0]:val[-1]+1]\n\n return predicted\n\n","repo_name":"sanjass/IntNetworks6885","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10628832596","text":"import json\nimport logging\nimport configparser\n\n\ndef read_config_file(json_file_path):\n \"\"\"\n Reads a json_file and returns it as a python dict\n \n :param string json_file_path: path to a json file with configuration information\n :returns: dict\n \"\"\"\n \n json_file=check_file(json_file_path)\n with open(json_file) as json_data:\n return json.load(json_data)\n \n\ndef read_parameters(file_path):\n \"\"\"\n Read a configuration text file\n \n :param string file_path: path to configuration text file\n :returns: dict: dict of a parser object\n \"\"\"\n parameter_file=check_file(file_path)\n parser=configparser.ConfigParser()\n parser.read(parameter_file) \n return parser._sections\n\n\ndef check_file(file_path):\n '''\n Check if the file exists\n \n :param string file_path: path to file to check\n :return: file_path\n :raises Exception e: General exception if file doesn't exist. \n '''\n try:\n \n with open(file_path):\n return file_path\n\n except Exception as e:\n logging.error(\"Error in check_file(%s). Error: %s \" %(file_path,str(e)))\n raise Exception(\"Error in check_file(%s). Error: %s \" %(file_path,str(e)))\n\n","repo_name":"awacero/get_mseed_data","sub_path":"get_mseed_data/get_mseed_utils.py","file_name":"get_mseed_utils.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5342460731","text":"from priority_queue_folder.positional_list import PositionalList\nfrom priority_queue_folder.sorted_priority_queue import SortedPriorityQueue\n\n\ndef insertion_sort(L):\n \"\"\" implementation of selection sort\"\"\"\n C = PositionalList()\n for e in L:\n C.add_last(e)\n n = len(C)\n P = SortedPriorityQueue()\n for j in range(n):\n elem = C.delete(C.first())\n P.add(elem, elem)\n for j in range(n):\n (k, v) = P.remove_min()\n C.add_last(v)\n return C\n\n\nL = [6, 7, 4, 5, 2, 9]\nprint(L)\nC = insertion_sort(L)\nprint(C)\n","repo_name":"zzw-math/DataStructure","sub_path":"priority_queue_folder/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"34544511705","text":"n = int(input())\nstatus = \"\"\nif n % 2 == 1 or n % 2 == 0 and n in range(6, 21):\n status = \"Weird\"\nelif n in range(2,6) and n > 20 and n % 2 == 0:\n status = \"Not Weird\"\nelse:\n status = \"Not Weird\"\nprint(status)\n\n\n\n","repo_name":"hzaman193/Practice-Python","sub_path":"Conditional-Statement.py","file_name":"Conditional-Statement.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34934263036","text":"import os\nfrom imdb import Cinemagoer\nfrom itertools import groupby\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport logging\nimport logging.config\n\nia = Cinemagoer()\nshort_movie_genre = 'Short'\n\n\ndef get_movie_details(movie_id, contribution_type):\n movie = ia.get_movie(movie_id)\n short_movie_kind_prefix = ''\n try:\n short_movie_kind_prefix = 'Short ' if short_movie_genre in movie['genres'] else ''\n except:\n pass\n\n movie_details = {'title': movie.get('title'),\n 'year': movie.get('year'),\n 'kind': short_movie_kind_prefix + movie.get('kind'),\n 'contribution_type': contribution_type,\n 'rating': movie.get('rating')}\n if movie_details['rating'] is None:\n movie_details['rating'] = 0.0\n if movie_details['year'] is None:\n movie_details['year'] = 0\n if movie_details['kind'] is None:\n movie_details['kind'] = 'NA'\n return movie_details\n\n\ndef get_movies(filmography):\n movie_list = []\n for contribution_type in filmography.keys():\n for movie in filmography[contribution_type]:\n movie_list.append((movie.getID(), contribution_type))\n movie_list.sort(key=lambda m: m[0])\n return [(key, ', '.join(j for i, j in group)) for key, group in groupby(movie_list, key=lambda x: x[0])]\n\n\ndef search_movies(name):\n person = search_person(name)\n logging.info('Name: %s' % person.__str__())\n logging.info('Image: %s' % person.get_fullsizeURL())\n\n result = ia.get_person_filmography(person.personID)\n movies = get_movies(result['data']['filmography'])\n movie_list = []\n\n print('Getting results of %d projects' % len(movies))\n with ThreadPoolExecutor(max_workers=10) as executor:\n futures = {executor.submit(get_movie_details, movie[0], movie[1]) for movie in movies}\n\n for future in as_completed(futures):\n try:\n movie_list.append(future.result())\n except Exception as exc:\n logging.error('Could not process request due to: %s' % exc)\n\n logging.info('Got %d movies of %s' % (len(movie_list), name))\n sorted_movie_list = sorted(movie_list, key=lambda m: (m['kind'], m['rating'], m['year']), reverse=True)\n for movie in sorted_movie_list:\n logging.info('%s (%.1f) (%d) (%s) (%s)' % (\n movie['title'], movie['rating'], movie['year'], movie['kind'], movie['contribution_type']))\n\n\ndef search_person(name):\n people = ia.search_person(name=name)\n logging.info('Number of people with name \\'%s\\': %d' % (name, len(people)))\n if len(people) == 0:\n logging.info('Couldn\\'t find %s in imdb. Please check the spelling.' % name)\n sys.exit()\n return people[0]\n\n\nif __name__ == '__main__':\n logging.config.fileConfig(os.path.dirname(os.path.realpath(__file__)) + '/logging.conf')\n person_name = sys.argv[1]\n logging.info('Searching for movies \\'%s\\' has been part of...' % person_name)\n search_movies(person_name)\n","repo_name":"pkgajulapalli/imdb-search","sub_path":"movie_search.py","file_name":"movie_search.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70163836785","text":"from collections import Counter\ndef solution():\n start_word=words[0]\n count=0\n for word in words[1:]:\n diff_count=0\n for char in start_word:\n if char in word:\n word.remove(char)\n continue\n else:\n diff_count+=1\n \n if diff_count>1 or len(word)>=2:\n continue\n\n count+=1\n \n print(count)\n \n\n \nif __name__ == \"__main__\":\n with open(\"input2607.txt\",\"r\") as file:\n n=int(file.readline())\n words=[list(file.readline().strip()) for _ in range(n)]\n solution()","repo_name":"JehyunJung/Code-Test-Preparing","sub_path":"algorithm/Tasks_By_Algorithms/Implementation/2607.py","file_name":"2607.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"22630380734","text":"'''\naugment : .Mat\nreturn : tuple\n'''\n\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport math\nimport statistics\n\nclass Details:\n def __init__(self, file_name):\n self.pic_array = sio.loadmat(file_name)\n \n def findLocation(self):\n # x1, y1\n y1_list, x1_list = np.where(self.pic_array == 0)[0], np.where(self.pic_array == 0)[1]\n y1_list_median = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][x1_list[0]]!=255]\n x1 = x1_list[0]\n y1 = round((y1_list_median[0] + y1_list_median[-1])/2)\n \n # x2, y2\n flip_y = np.flip(self.pic_array, axis = 1)\n starting_point = round(np.shape(flip_y)[1]/6)\n x2_flip = 0\n \n for j in list(range(starting_point, flip_y.shape[1]-starting_point)):\n for i in range(flip_y.shape[0]):\n list_flip_y = list(flip_y[i][j-starting_point:j+starting_point])\n if flip_y[i][j]==0 and list_flip_y.count(0)==starting_point*2:\n x2_flip = j-starting_point\n break\n else : continue\n if x2_flip != 0 : break\n else : continue\n\n x2 = flip_y.shape[1] - x2_flip - 15\n y2_list = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][x2]!=255]\n y2 = int(statistics.median(y2_list))\n \n # x3, y3\n y3_list = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][0]!=255]\n if len(y3_list) == 0:\n j = -1\n while len(y3_list) == 0 :\n j+=1\n y3_list = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][j]!=255]\n y3 = int(statistics.median(y3_list))\n x3 = 0\n\n # x4, y4\n y4_list = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][-1]!=255]\n j = 0\n if len(y4_list) == 0:\n while len(y4_list) == 0:\n j = j-1\n y4_list = [i for i in range(self.pic_array.shape[0]) if self.pic_array[i][j]!=255]\n y4 = int(statistics.median(y4_list)) + 5\n x4 = self.pic_array.shape[1]\n \n# location_dic = {'x1y1': [x1,y1], 'x2y2': [x2,y2], 'x3y3': [x3,y3], 'x4y4':[x4,y4]}\n location_tuple = ((x1,y1),(x2,y2),(x3,y3),(x4,y4))\n return location_tuple\n \n def findLength_pixel(self, location_tuple):\n self.location_tuple = location_tuple\n # (x1, y1) - (x3, y3)\n a = math.dist(location_tuple[0], location_tuple[2])\n\n # (x1, y1) - (x2, y2)\n b = math.dist(location_tuple[0], location_tuple[1])\n \n # (x2, y2) - (x4, y4)\n c = math.dist(location_tuple[1], location_tuple[3])\n\n # extra (x3, y3) - (x2, y2) for left angle\n d = math.dist(location_tuple[2], location_tuple[1])\n\n # extra (x1, y1) - (x4, y4) for right angle\n e = math.dist(location_tuple[0], location_tuple[3])\n\n \n # total length (x3, y3) - (x4, y4)\n total = math.dist(location_tuple[2], location_tuple[3])\n\n \n length_pixel_tuple = (a,b,c,d,e,total)\n return length_pixel_tuple\n \n def findAngle(self, length_pixel_dic):\n #find triangle size with Heron Formula\n self.length_pixel_dic = length_pixel_dic\n a,b,c = self.length_pixel_tuple[0], self.length_pixel_tuple[1], self.length_pixel_tuple[2]\n d,e = self.length_pixel_tuple[3], self.length_pixel_tuple[4]\n \n # triangle size formula function\n def heron_formula(len_1, len_2, len_3):\n s = (len_1 + len_2 + len_3)/2\n triangle_size = math.sqrt(s*(s-len_1)*(s-len_2)*(s-len_3))\n return triangle_size\n\n # angle function\n def subtract_angle(triangle_size, len_1, len_2):\n sin_radian = (2*triangle_size)/(len_1*len_2) # angle > 90\n angle_360 = ((math.asin(sin_radian))/math.pi)*180\n if angle_360 < 90 : angle_final = 180-angle_360\n else : angle_final = angle_360\n return angle_final\n \n # Left angle\n left_triangle = heron_formula(a,b,d)\n left_angle = subtract_angle(left_triangle, a, b)\n \n # Right angle\n right_triangle = heron_formula(b,c,e)\n right_angle = subtract_angle(right_triangle, b, c)\n \n# angle_dic = {'left_angle' : left_angle, 'right_angle': right_angle}\n angle_tuple = (left_angle, right_angle) \n return angle_tuple","repo_name":"chloeroh/railway-image-detection-project","sub_path":"details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10211979519","text":"average = 0.0\nn = int(input())\n\nwhile n > 0:\n i1, i2, i3 = input().split()\n i1 = float(i1)\n i2 = float(i2)\n i3 = float(i3)\n average = ((i1 * 2) + (i2 * 3) + (i3 * 5)) / 10\n print('%.1f' %average)\n n = n - 1\n","repo_name":"Pedro-Neiva/URI","sub_path":"Extremely Basic/1079.py","file_name":"1079.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8649406932","text":"from django.urls import path\nfrom blog.views import PostListView,PostDetailView,PostCreateView,PostDraftsView,publish,PostUpdateView,PostDeleteView,disapprove\n\n\nurlpatterns=[\n\npath('',PostDetailView.as_view(),name='post-detail'),\npath('create/',PostCreateView.as_view(),name='post-create'),\npath('drafts/',PostDraftsView.as_view(),name='post-drafts'),\npath('publish//',publish, name='post-publish'),\npath('update//',PostUpdateView.as_view(), name='post-update'),\npath('delete//',PostDeleteView.as_view(), name='post-delete'),\npath('unpublish/',disapprove,name='post-disapprove')\n]","repo_name":"datobih/Datobi-Blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"34055754848","text":"casa = float(input('Quanto custa a casa que deseja comprar? R$'))\nsalario = float(input('Qual é o seu salário?'))\nmeses = float(input('Em quantos meses pretende pagar a casa?'))\nprestação = casa / meses\nmaximo = salario * 30 / 100\n\nif prestação > maximo:\n print('Você não teve seu emprestimo aprovado, pois a prestação excede 30% do seu salário.')\nelse:\n print('Seu crédito foi aprovado!')\n print('Lembrando que são {:.0f} prestações de R${:.2f}' .format(meses, prestação))\n","repo_name":"MarioViniciux/Python-backup","sub_path":"D036.py","file_name":"D036.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34505727206","text":"'''\n题目描述:\n给定一个整数数组nums和目标值target, 在数组中找出和为目标值的两个整数,并且返回它们的下标。假设每个目标值只对应一个答案。\n\n思路一:\n用一个与nums同等长度的数组rawIndex在对应位置上记录nums中每一个数字的索引,在对nums进行快速排序的同时,相应地移动每个数字对应的索引。\n排序后,用头指针和尾指针分别指向nums的头部和尾部,将两个指针对应的数字相加;若和大于target,则将指向尾指针向前移一位;若和小于target,则将头指针向前移一位;若等于,则返回两个指针对应的数字的索引。\n时间复杂���为O(nlogn),空间复杂度为O(n)\n\n思路二:\n利用哈希表快速定址的优势,将nums的每个元素与对应的索引存在哈希表中,元素为键,索引为值。遍历nums,检查target-element是否存在于字典中,从而实现O(n)的时间复杂度,空间复杂度为O(n)\n'''\n\n\n\ndef quickSort(nums, start, end, rawIndex):\n if end == start or end < start:\n return\n pivot = nums[start]\n forward, backawrd = start + 1, end\n while True:\n if forward == backawrd:\n if nums[forward] <= pivot:\n nums[start] = nums[forward]\n nums[forward] = pivot\n tmp = rawIndex[start]\n rawIndex[start] = rawIndex[forward]\n rawIndex[forward] = tmp\n else:\n backawrd -= 1\n break\n while nums[backawrd] > pivot and backawrd > forward:\n backawrd -= 1\n while nums[forward] <= pivot and forward < backawrd:\n forward += 1\n if forward == backawrd:\n continue\n tmp = nums[forward]\n nums[forward] = nums[backawrd]\n nums[backawrd] = tmp\n tmp = rawIndex[forward]\n rawIndex[forward] = rawIndex[backawrd]\n rawIndex[backawrd] = tmp\n quickSort(nums, start, forward - 1, rawIndex)\n quickSort(nums, backawrd + 1, end, rawIndex)\n\n#排序法\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n rawIndex = [i for i in range(len(nums))] #各个数字未排序前的索引\n forward, backawrd = 0, len(nums) - 1\n quickSort(nums, 0, backawrd, rawIndex)\n sumResult = nums[forward] + nums[backawrd]\n while sumResult != target:\n if sumResult > target:\n backawrd -= 1\n else:\n forward += 1\n sumResult = nums[forward] + nums[backawrd]\n return [rawIndex[forward], rawIndex[backawrd]]\n \n#字典法\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n hashmap = {}\n for index, element in enumerate(nums):\n left = target - element\n if left in hashmap:\n return [index, hashmap[left]]\n hashmap[element] = index\n","repo_name":"CVan19/data-structure-and-algorithm","sub_path":"排序/twoSums.py","file_name":"twoSums.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12644705308","text":"import pytest\n\nfrom cfripper.rules.cloudformation_authentication import CloudFormationAuthenticationRule\nfrom tests.utils import get_cfmodel_from\n\n\n@pytest.fixture()\ndef good_template():\n return get_cfmodel_from(\"rules/CloudFormationAuthenticationRule/cfn_authentication_good.json\").resolve()\n\n\n@pytest.fixture()\ndef neutral_template():\n return get_cfmodel_from(\"rules/CloudFormationAuthenticationRule/cfn_authentication_neutral.yml\").resolve()\n\n\n@pytest.fixture()\ndef bad_template():\n return get_cfmodel_from(\"rules/CloudFormationAuthenticationRule/cfn_authentication_bad.json\").resolve()\n\n\ndef test_no_failures_are_raised(good_template):\n rule = CloudFormationAuthenticationRule(None)\n result = rule.invoke(good_template)\n\n assert result.valid\n assert len(result.failed_rules) == 0\n assert len(result.failed_monitored_rules) == 0\n\n\ndef test_failures_are_raised(bad_template):\n rule = CloudFormationAuthenticationRule(None)\n result = rule.invoke(bad_template)\n\n assert not result.valid\n assert len(result.failed_rules) == 1\n assert len(result.failed_monitored_rules) == 0\n assert result.failed_rules[0].rule == \"CloudFormationAuthenticationRule\"\n assert result.failed_rules[0].reason == \"Hardcoded credentials in EC2I4LBA1\"\n\n\ndef test_rule_ignores_where_auth_not_mentioned(neutral_template):\n rule = CloudFormationAuthenticationRule(None)\n result = rule.invoke(neutral_template)\n\n assert result.valid\n assert len(result.failed_rules) == 0\n assert len(result.failed_monitored_rules) == 0\n","repo_name":"gustcol/Canivete","sub_path":"cfripper/tests/rules/test_CloudFormationAuthenticationRule.py","file_name":"test_CloudFormationAuthenticationRule.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"91"} +{"seq_id":"6189864693","text":"import tkinter as tk\r\nfrom tkinter import ttk, filedialog, messagebox\r\nimport эллочка as ell\r\nfrom threading import Thread\r\nfrom os import fsencode\r\n\r\nttkthemes_imported = True\r\n\r\ntry:\r\n from ttkthemes import ThemedTk\r\nexcept ModuleNotFoundError:\r\n ttkthemes_imported = False\r\n\r\n\r\nclass Debugger():\r\n def __init__(self):\r\n self.sel_acts = []\r\n self.send_to_run = {\r\n 'debug_func': self.act,\r\n 'init_vars_func': self.init_vars,\r\n 'file': None\r\n }\r\n \r\n self.mode_select()\r\n\r\n def file_select(self):\r\n file = filedialog.askopenfilename()\r\n try:\r\n open(file, 'r')\r\n except FileNotFoundError:\r\n return False\r\n self.send_to_run['file'] = file\r\n return True\r\n\r\n def mode_select(self, file=None):\r\n self.all_actions_descs = {\r\n 'curr_cell': 'Значение в текущей ячейке',\r\n 'memory_f': '20 ячеек вокруг текущей',\r\n 'calls_f': '10 команд вокруг той, которая сейчас выполняется',\r\n 'code_f': 'Исходный код'\r\n }\r\n self.all_actions_funcs = {\r\n 'curr_cell': self.curr_cell,\r\n 'memory_f': self.memory_f,\r\n 'calls_f': self.calls_f,\r\n 'code_f': self.code_f\r\n }\r\n\r\n if ttkthemes_imported:\r\n root = ThemedTk(theme='breeze')\r\n else:\r\n root = tk.Tk()\r\n messagebox.showwarning(message='Нужно установить ttkthemes\\nОткройте IDE, чтобы установить', title='Нет модуля')\r\n root.focus_force()\r\n root.title('[configure] DEBUG')\r\n \r\n frm_chb = ttk.Frame(root, padding=20, borderwidth=1, relief='sunken')\r\n frm_chb.grid(row=0, columnspan=2)\r\n\r\n frm_fsb = ttk.Frame(root, padding=20)\r\n frm_fsb.grid(row=1, column=0)\r\n\r\n frm_b = ttk.Frame(root, padding=20)\r\n frm_b.grid(row=1, column=1)\r\n\r\n self.file_selected = False\r\n def butt_pressed():\r\n if not len(self.sel_acts):\r\n return\r\n root.destroy()\r\n self.start_real_init()\r\n\r\n def fsbutton():\r\n self.file_selected = self.file_select()\r\n if self.file_selected:\r\n fsbutt['text'] = 'Файл выбран'\r\n if len(self.sel_acts) and self.file_selected:\r\n btn['state'] = ['normal']\r\n btn['text'] = 'Start'\r\n else:\r\n btn['state'] = ['disabled']\r\n if len(self.sel_acts):\r\n btn['text'] = 'Выберите файл'\r\n elif self.file_selected:\r\n btn['text'] = 'Выберите режим'\r\n else:\r\n btn['text'] = 'Выберите режим и файл'\r\n\r\n fsbutt = ttk.Button(frm_fsb, text='Выбрать файл', command=fsbutton)\r\n fsbutt.pack()\r\n\r\n btn = ttk.Button(frm_b, text='Выберите режим и файл', command=butt_pressed, state=['disabled'])\r\n btn.pack()\r\n \r\n self.sel_acts =[]\r\n\r\n def select():\r\n self.sel_acts = []\r\n #if curr_cell.get() == 1: result.append('curr_cell')\r\n if memory_f.get() == 1: self.sel_acts.append('memory_f')\r\n if calls_f.get() == 1: self.sel_acts.append('calls_f')\r\n if code_f.get() == 1: self.sel_acts.append('code_f')\r\n if len(self.sel_acts) and self.file_selected:\r\n btn['state'] = ['normal']\r\n btn['text'] = 'Start'\r\n else:\r\n btn['state'] = ['disabled']\r\n if len(self.sel_acts):\r\n btn['text'] = 'Выберите файл'\r\n elif self.file_selected:\r\n btn['text'] = 'Выберите режим'\r\n else:\r\n btn['text'] = 'Выберите режим и файл'\r\n \r\n position = {\"padx\":6, \"pady\":6, \"anchor\":tk.NW}\r\n \r\n #curr_cell = tk.IntVar()\r\n #ttk.Checkbutton(frm_chb, text='Значение в текущей ячейке', variable=curr_cell, command=select).pack(**position)\r\n \r\n memory_f = tk.IntVar()\r\n ttk.Checkbutton(frm_chb, text='20 ячеек вокруг текущей', variable=memory_f, command=select).pack(**position)\r\n \r\n calls_f = tk.IntVar()\r\n ttk.Checkbutton(frm_chb, text='10 команд вокруг той, которая сейчас выполняется', variable=calls_f, command=select).pack(**position)\r\n \r\n code_f = tk.IntVar()\r\n ttk.Checkbutton(frm_chb, text='Исходный код', variable=code_f, command=select).pack(**position)\r\n \r\n root.mainloop()\r\n\r\n def start_real_init(self):\r\n try:\r\n self.actions = [self.all_actions_funcs[action] for action in self.sel_acts]\r\n except KeyError as ex:\r\n raise ValueError('[DEBUGGER] Unknown action\\n' + ex)\r\n self.real_init()\r\n \r\n def real_init(self):\r\n self.code = []\r\n self.calls = []\r\n self.flag = True\r\n self.running = False\r\n\r\n if ttkthemes_imported:\r\n self.root = ThemedTk(theme='breeze')\r\n else:\r\n self.root = tk.Tk()\r\n self.root.focus_force()\r\n self.root.title('DEBUG')\r\n \r\n self.frm_b = ttk.Frame(self.root, padding=20)\r\n self.frm_b.grid(row=2, columnspan=2)\r\n self.frm_m = ttk.Frame(self.root, padding=20, borderwidth=1, relief='sunken')\r\n self.frm_m.grid(row=0, columnspan=2)\r\n self.frm_c = ttk.Frame(self.root, padding=20, borderwidth=1, relief='sunken')\r\n self.frm_c.grid(column=0, row=1)\r\n self.frm_code = ttk.Frame(self.root, padding=20)\r\n self.frm_code.grid(column=1, row=1)\r\n \r\n self.btn = ttk.Button(self.frm_b, text='Step', command=self.resume)\r\n self.btn.grid()\r\n\r\n '''if self.curr_cell in self.actions:\r\n self.lbl = ttk.Label(self.frm_m, text='__')\r\n self.lbl.grid()'''\r\n\r\n if self.memory_f in self.actions:\r\n self.mem_lbls = []\r\n for j in range(20):\r\n label = ttk.Label(self.frm_m, text = '_')\r\n label.grid(column=2 * j, row=0)\r\n space = ttk.Label(self.frm_m, text = ' ')\r\n space.grid(column=2 * j + 1, row=0)\r\n self.mem_lbls.append(label)\r\n\r\n if self.calls_f in self.actions:\r\n self.calls_lbls = []\r\n for j in range(10):\r\n label = ttk.Label(self.frm_c, text=' ')\r\n label.grid(column=0, row=j)\r\n self.calls_lbls.append(label)\r\n \r\n if self.code_f in self.actions:\r\n self.txt = tk.Text(self.frm_code)\r\n self.txt.grid()\r\n self.txt['height'] = 15\r\n self.txt['width'] = 25\r\n self.txt_inited = False\r\n\r\n self.thread = Thread(target=ell.run, kwargs=self.send_to_run)\r\n self.thread.start()\r\n self.running = True\r\n self.root.title('[running] DEBUG')\r\n \r\n self.root.mainloop()\r\n \r\n def init_vars(self, cd=[], cls=[]):\r\n self.code = list(map(lambda x: x.strip(), filter(lambda x: x.isprintable() and len(x), cd.split('\\n'))))\r\n self.calls = list(cls)\r\n\r\n def resume(self):\r\n self.running = self.thread.is_alive()\r\n\r\n if not self.running:\r\n self.root.title('[finished] DEBUG')\r\n self.term()\r\n return\r\n \r\n self.flag = False\r\n\r\n def curr_cell(self, kwargs):\r\n self.lbl['text'] = kwargs['curr_cell']\r\n\r\n def memory_f(self, kwargs):\r\n memory = kwargs['memory']\r\n i = kwargs['i']\r\n if i <= 10 or len(memory) - i <= 5:\r\n show_mem = memory[:20:]\r\n idx = i\r\n else:\r\n show_mem = memory[i - 10:i + 10:]\r\n idx = 10\r\n\r\n for i in range(20):\r\n self.mem_lbls[i]['text'] = show_mem[i]\r\n self.mem_lbls[i]['background'] = ''\r\n if i == idx:\r\n self.mem_lbls[i]['background'] = '#00F000'\r\n\r\n def calls_f(self, kwargs):\r\n calls_j = kwargs['j']\r\n if calls_j <= 5 or len(self.calls) - calls_j <= 5:\r\n show_calls = self.calls[:10:]\r\n calls_idx = calls_j\r\n else:\r\n show_calls = self.calls[calls_j - 5:calls_j + 5:]\r\n calls_idx = 5\r\n\r\n \r\n for j in range(10):\r\n self.calls_lbls[j]['text'] = (show_calls[j] if isinstance(show_calls[j], str) else show_calls[j].__name__)\r\n self.calls_lbls[j]['background'] = ''\r\n if j == calls_idx:\r\n self.calls_lbls[j]['background'] = '#00FF00'\r\n\r\n def code_f(self, kwargs):\r\n if not self.txt_inited:\r\n self.txt.insert('1.0', '\\n'.join(self.code))\r\n self.txt['state'] = ['disabled']\r\n self.txt_inited = True\r\n\r\n def act(self, **kwargs):\r\n self.flag = True\r\n\r\n for action in self.actions:\r\n action(kwargs)\r\n\r\n while self.flag:\r\n pass\r\n\r\n def term(self):\r\n if self.running:\r\n raise RuntimeError('Вызван Debugger.term, но Эллочка ещё работает')\r\n self.btn['text'] = 'Close'\r\n self.btn['command'] = self.root.destroy\r\n\r\ndef main():\r\n dbgr = Debugger()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","repo_name":"Petua41/ell","sub_path":"ell/debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":9694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71335905583","text":"from collections import Counter\nimport math\nfrom argparse import ArgumentParser\nimport os\nimport common\n\n\ndef load_bug_report(file_path: str):\n global corpus_dic\n with open(file_path, 'r') as file:\n lines = [line.strip() for line in file.readlines()]\n corpus_dic[file_path] = Counter(' '.join(lines).split())\n return\n\n\ndef load_code(proj_path: str, correspond_path: str):\n global corpus_dic\n for root, dirs, files in os.walk(proj_path):\n for file in files:\n if not file.endswith(common.java_file_postfix):\n continue\n\n file_path = os.path.join(root, file)\n correspond_file_path = file_path.replace(proj_path, correspond_path)\n rf, cf = open(file_path, 'r'), open(correspond_file_path, 'r')\n lines, clines = rf.readlines(), cf.readlines()\n rf.close()\n cf.close()\n if len(lines) != len(clines):\n print('wrong in correspond file. please check!')\n print('code path: %s' % file_path)\n print('correspond path: %s' % correspond_file_path)\n return\n for line, cline in zip(lines, clines):\n line = line.strip(common.afterPT_code_text_splitor).strip()\n cline_parts = cline.split(common.afterPT_code_correspond_splitor)\n if not line or not cline_parts:\n continue\n key = \"{}{}{}\".format(file_path, common.path_sig_splitor, cline_parts[0])\n corpus_dic[key] = Counter(line.split())\n return\n\n\n# count可以通过countlist得到, word可以通过count得到\n# count[word]可以得到每个单词的词频, sum(count.values())得到整个doc的单词总数\ndef tf(word, count):\n return math.log(count[word]) + 1\n\n\n# 统计的是含有该单词的句子数\ndef n_containing(word, count_list):\n return sum(1 for count in count_list if word in count)\n\n\n# len(count_list)是指句子的总数,n_containing(word, count_list)是指含有该单词的句子的总数,加1是为了防止分母为0\ndef idf(word, count_list):\n return math.log(len(count_list) / n_containing(word, count_list))\n\n\n# 将tf和idf相乘\ndef tfidf(word, count, count_list):\n return tf(word, count) * idf(word, count_list)\n\n\ncorpus_dic = {}\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-br\", \"--bug_report_path\", dest = \"bug_report_path\", required = True)\n parser.add_argument(\"-co\", \"--code_path\", dest = \"code_path\", required = True)\n parser.add_argument(\"-cr\", \"--correspond_path\", dest = \"correspond_path\", required = True)\n parser.add_argument(\"-bs\", \"--br_save_path\", dest = \"br_save_path\", required = True)\n parser.add_argument(\"-cs\", \"--code_save_path\", dest = \"code_save_path\", required = True)\n args = parser.parse_args() \n bug_report_path = args.bug_report_path\n code_path = args.code_path\n correspond_path = args.correspond_path\n code_save_path = args.code_save_path\n br_save_path = args.br_save_path\n\n load_bug_report(file_path = bug_report_path)\n load_code(proj_path = code_path, correspond_path = correspond_path)\n\n counter_list = list(corpus_dic.values())\n\n # save br\n if bug_report_path not in corpus_dic:\n print('bug report not found!')\n exit(1)\n counter = corpus_dic[bug_report_path]\n pair = ['{}{}{}'.format(word, common.tfidfvalue_internal_splitor, tfidf(word, counter, counter_list)) for word in counter]\n with open(br_save_path, 'w') as f:\n f.write(common.linesep.join(pair))\n print('finished calculate tf-idf for br.')\n del corpus_dic[bug_report_path]\n\n # save method\n for doc in corpus_dic:\n doc_parts = doc.split(common.path_sig_splitor)\n file_path = doc_parts[0]\n method = common.path_sig_splitor.join(doc_parts[1:]) # 因为都是'#'\n write_path = file_path.replace(code_path, code_save_path)\n\n # create dirs\n write_dir = os.path.split(write_path)[0]\n if not os.path.isdir(write_dir):\n os.makedirs(write_dir)\n\n counter = corpus_dic[doc]\n with open(write_path, 'a') as f:\n f.write(method + common.method_tfidfvalue_splitor)\n pair = ['{}{}{}'.format(word, common.tfidfvalue_internal_splitor, tfidf(word, counter, counter_list)) for word in counter]\n f.write(common.tfidfvalue_external_splitor.join(pair))\n f.write(os.linesep)\n\n print('finished calculate tf-idf for method.')\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jolemon/FineLocator","sub_path":"queryexpansion/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"43921687618","text":"#!/usr/bin/env python2.7\n\nimport argparse\nfrom dns import work_loop\nfrom options import Options, set_options, prepare_args\nimport os\nfrom sys import argv\n\n\ndef main():\n parser = argparse.ArgumentParser()\n prepare_args(parser)\n args = parser.parse_args()\n set_options(Options(args))\n print(args)\n\n if os.geteuid() != 0:\n print(\"Error: {0} requires root privileges.\".format(argv[0]))\n exit()\n\n print(\"Starting up\")\n work_loop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"greydot/pydnsperf","sub_path":"pydnsperf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37402074500","text":"samolot ={ 'nazwa': ' boing',\n 'przebieg': '200',\n 'typ': 'pasazerski'}\n#dla klucza zwraca wartosc\nprint(samolot['nazwa'])\nprint(samolot['typ'])\nprint('')\n# dla pythona 3\nfor key, value in samolot.iteritems():\n print('{0}:{1}'.format(key, value))\n# dla starszych pythonow\nfor key in samolot:\n print('{0}:{1}'.format(key, samolot[key]))\n\n# wyrzuci blad - print(samolot['aaa'])\nprint('')\nprint('')\n# lista ktorej elementami jest slownik\n\nkoszyk = [ {'nazwa': 'mleko','cena': 5.20},\n {'nazwa': 'bulka', 'cena': 0.20},\n {'nazwa': 'ziemniaki', 'cena': 1.60}]\nsuma=0\nfor a in range(len(koszyk)):\n print(koszyk[a]['cena'])\n suma = suma + koszyk[a]['cena']\n\nprint('suma powyzszych to ' + str(suma))\n# mleko i ser 10% znizkiz flagami\nstan_reguly = {'mleko': False, 'bulka': False}\nsuma=0\nfor a in koszyk:\n suma = suma + a['cena']\n nazwa_prod = a['nazwa']\n if nazwa_prod == 'mleko' or nazwa_prod == 'bulka':\n stan_reguly[nazwa_prod] = True\n\nif stan_reguly['mleko'] and stan_reguly['bulka']:\n print('10% znizki')\n suma = suma - (suma*0.10)\nprint(suma)\n\n# zad 14\nprodukty ={ 'S123444': 'sukienka trojkatna',\n 'P123': 'spodnei krata',\n 'X1221': 'kosola'}\nigla = 'X2X'\n\nif igla in produkty:\n print('Znalazlem {0}'.format(igla))\nelse:\n print('Brak w magazynie {0}'.format(igla))\n","repo_name":"losjimi/nauka_pythona","sub_path":"slownik.py","file_name":"slownik.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7913422941","text":"##\n# Read a sequence of items, add them in a list,\n# format the list, display the result as a string\n#\n\n## Format a list, separating each item with a comma and\n# including \"and\" before the last one\n# @param t a list \n# @return a string with all the items of the list formatted\n#\ndef listFormatter(t):\n # if t has one element, return it as string\n if len(t) == 0:\n s = \"\"\n # if t has one element, return it as string\n elif len(t) == 1:\n s = t[0]\n # if t has more than 1 element\n else:\n # Create a sublist with all elements but the last\n v = t[:-1]\n # Create a string so that each element is separated by commas\n v = \", \".join(v)\n # Create a second sublist with \"and\" and last element of t\n p = [\"and\", t[-1]]\n # Create a string so that the last element is preceded by \"and\"\n p = \" \".join(p)\n # Concatenate the 2 strings\n s = v + \" \" + p\n return s\n\n# Read a list of items from the user till a blank line is entered\n# Display the list formatted as a string using listFormatter function\ndef main():\n items = []\n item = input(\"Please, enter a string: \")\n while item != \"\":\n items.append(item)\n item = input(\"Please, enter another string: \")\n print(\"Here the list formatted: \")\n print(\" '{}'\".format(listFormatter(items)))\n\n# Call the main function\nif __name__ == \"__main__\":\n main()","repo_name":"Indiana3/python_exercises","sub_path":"wb_chapter5/exercise120.py","file_name":"exercise120.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40909183516","text":"from socket import *\nfrom method import *\n\nBUF_SIZE = 5000\n\nupLayer_address = \"127.0.0.1\"\nupLayer_port = 11200\nphyLayer_address = \"127.0.0.1\"\nphyLayer_port =11100\nADDRESS_UPLAYER = (upLayer_address, upLayer_port)\nADDRESS_PHYLAYER = (phyLayer_address, phyLayer_port)\n\nclient_socket = socket(AF_INET,SOCK_DGRAM)\nclient_socket.bind(ADDRESS_UPLAYER)\n\nwhile (1):\n s = input()\n a = sendMessage(client_socket, s, ADDRESS_PHYLAYER)\n # client_socket.sendto(s.encode(), ADDRESS_PHYLAYER)\n\nclient_socket.close()\n","repo_name":"guangangning/DCCProject1","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71642448303","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport sys\nsys.path.append(\"../scrap\")\nfrom scrap_condition import ScrapCondition\nfrom scrap_links import ScrapLinks\n\nclass QiitaLikeSpider(scrapy.Spider):\n name = 'qiita_like'\n def start_requests(self):\n urls = [\n \"http://qiita.com/tags/%E6%A9%9F%E6%A2%B0%E5%AD%A6%E7%BF%92/likes\"\n ]\n\n for url in urls:\n pages = 68\n for page in range(1, pages):\n new_url = url + \"?page=\" + str(page)\n scrap_links = ScrapCondition(new_url)\n for link in scrap_links.output():\n yield scrapy.Request(url=\"http://qiita.com\" + link, callback=self.parse)\n\n\n\n def parse(self, response):\n page = response.url.split(\"/\")[-1]\n filename = '../articles/qiita-%s.html' % page\n with open(filename, 'wb') as f:\n f.write(response.body)\n filename = \"../articles/urls.txt\"\n with open(filename, 'a') as f:\n f.write(response.url + \",\")\n # scrap_links = ScrapLinks(response.url)\n # for url in scrap_links.output():\n # yield scrapy.Request(url=url, callback=self.parse)\n","repo_name":"Kazuuuuuki/utech_crawler","sub_path":"utech_crawler/utech_crawler/spiders/qiita_like.py","file_name":"qiita_like.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"35930854396","text":"# --- Open cmt line bellow if run by cmd: python *.py\nimport sys # nopep8\nsys.path.append(\".\") # nopep8\nimport pickle\n\n\n# A way to run the code in the file.\nif __name__ == \"__main__\":\n\n model = pickle.load(open('src/ml_models/rf_valorent.model', 'rb'))\n # model = pickle.load(open('scripts/rfc_spark.model', 'rb'))\n result = model.predict([[0.7, 11.0, 12.9, 3.2, 57.7, 4.2, 170.0, 0.2, 20.784]])\n print(result[0])\n # rfcModel = RandomForestClassificationModel.load('scripts/rfc_spark.model')\n # data_list = Vectors.dense([0.7, 11.0, 12.9, 3.2, 57.7, 4.2, 170.0, 0.2, 20.784])\n\n # prediction = rfcModel.predictRaw(data_list)\n\n # result = rfcModel.transform([[0.7, 11.0, 12.9, 3.2, 57.7, 4.2, 170.0, 0.2, 20.784]])\n # print(prediction)\n","repo_name":"bezleen/bigdata-mmk-rt","sub_path":"scripts/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73104941104","text":"import cv2\nimport numpy as np\nfrom readImage import ReadImage\nfrom imageProces import ImageProces\nfrom algoritmoGenetico.Poblacion import Poblacion\nfrom algoritmoGenetico.AlgoritmoGenetico import AlgoritmoGenetico\nfrom algoritmoGenetico.Individuo import Individuo\nclass Main:\n def __init__(self):\n pass\n\n def initial(self):\n return str(\"comenzando\")\n\n def read(self):\n # return \"crack.jpg\"\n # return \"crack1.jpeg\"\n # return \"crack2.jpg\"\n return \"grieta1.jpg\"\n # return \"grietafalsa.jpg\"\n\nif __name__ == \"__main__\":\n main = Main()\n #Ruta de la imagen\n print(main.initial())\n route = main.read()\n readImage = ReadImage(route)\n # Lectura de la imagen\n print(\"Ruta de la imagen\", readImage.getRoute())\n imgs = readImage.read()\n widht = imgs[1]\n heihgt = imgs[2]\n cv2.imshow('leer Imagen', imgs[0])\n imageProces = ImageProces(imgs[0])\n # print(\"nuevo tamanio\", widht, heihgt)\n \"\"\"\n ||||||||||||||||||||||||\n | POBLACION INICIAL |\n ||||||||||||||||||||||||\n \"\"\"\n\n # | CREACION DEL ALGORITMO GENETICO\n\n poblacionInicial = Poblacion()\n print(str(poblacionInicial))\n \"\"\"\n |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n | IMAGENES BINARIAS TOMANDO EN CUENTA TECNICAS DE VISION POR COMPUTADORA |\n |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n \"\"\"\n\n # FUNCION LA PLACE\n laplace = imageProces.laplace()\n individuo = Individuo(laplace)\n # print(\"laplace tamanio\", individuo.shape)\n poblacionInicial.addIndividuo(individuo)\n #FUNCION SOBEL EN X\n sobelx = imageProces.sobelX()\n individuo = Individuo(sobelx)\n poblacionInicial.addIndividuo(individuo)\n # FUNCION SOBEL EN Y\n sobely = imageProces.sobelY()\n individuo = Individuo(sobely)\n poblacionInicial.addIndividuo(individuo)\n #FUNCION CANNY\n canny = imageProces.canny()\n individuo = Individuo(canny)\n poblacionInicial.addIndividuo(individuo)\n \"\"\"\n |||||||||||||||||||||||||||||||\n | MOSTRAR LA POBLACION INCIAL |\n |||||||||||||||||||||||||||||||\n \"\"\"\n # poblacionInicial.mostrarPoblacion()\n\n \"\"\"\n ||||||||||||||||||||||||||\n | ALGORITMO GENETICO |\n ||||||||||||||||||||||||||\n \"\"\"\n\n algoritmoGenetico = AlgoritmoGenetico(poblacionInicial)\n \"\"\"\n *******************************************\n | Generar una cantidad de generaciones |\n *******************************************\n \"\"\"\n generaciones = 5\n mejorIndividuo = algoritmoGenetico.generaciones(generaciones)\n # print(mejorIndividuo[0])\n cv2.imshow(\"mejor Individuo\", poblacionInicial.seleccion(mejorIndividuo).getIndividuo())\n poblacionInicial.mostrarPoblacion()\n print(\"elitista \", mejorIndividuo)\n # print(\"mejor hijo\", mejorHijo)\n # imgs = imageProces.canny()\n\n # sobel = cv2.addWeighted(sobelx, sobely)\n # print(\"poblacion\", poblacionInicial) # cantidad de poblacion inicial relacionada\n # cv2.imshow('sobelx', sobelx)\n # cv2.imshow('soberly', sobely)\n # cv2.imshow('laPlace', laplace)\n # cv2.imshow('canny', canny)\n # cv2.imshow('mejorHijo', mejorHijo)\n\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"carlitos4560/DetectionGriet","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24669123669","text":"import time\r\nfrom multiprocessing import Process, Lock, Value, Event\r\nimport random\r\nimport os\r\ndef sell(lock,event,Num,total,totalVendidas):\r\n proc_id = os.getpid()\r\n event.wait()\r\n VendedorNum=random.randint(1,200)\r\n x=0\r\n cont=0\r\n while True:\r\n time.sleep(0.01)\r\n if total.value==0:\r\n break\r\n if random.randint(0,1)==1:\r\n cont+=1\r\n if Num==1:\r\n CompNum=cont\r\n else:\r\n CompNum=cont+(Num.value*5)\r\n x=random.randint(1,200)\r\n lock.acquire()\r\n if total.value-x >=0:\r\n totalVendidas.value+=x\r\n total.value-=x\r\n print(\"Soy el vendedor #\"\r\n +str(Num.value)+\" y le vendí: \"\r\n +str(x)+\" al comprador #\"\r\n +str(CompNum)+\" y ahora solo quedan \"\r\n +str(total.value)+\" leches. Total de leches vendidas son \"\r\n +str(totalVendidas.value))\r\n lock.release() #Release\r\n else:\r\n cont-=1\r\n lock.release() #Release\r\n continue\r\n if cont==5: #Release\r\n break\r\n\r\nif __name__ == '__main__':\r\n e=Event()\r\n totalInventario = Value('i', 1000)\r\n totalVendidas = Value('i', 0)\r\n Num = Value('i',0)\r\n lock = Lock()\r\n Num=Value('i',Num.value+1)\r\n Seller1 = Process(target=sell, args=(lock,e,Num,totalInventario,totalVendidas))\r\n Num=Value('i',Num.value+1)\r\n Seller2 = Process(target=sell, args=(lock,e,Num,totalInventario,totalVendidas))\r\n Num=Value('i',Num.value+1)\r\n Seller3 = Process(target=sell, args=(lock,e,Num,totalInventario,totalVendidas))\r\n Num=Value('i',Num.value+1)\r\n Seller4 = Process(target=sell, args=(lock,e,Num,totalInventario,totalVendidas))\r\n Num=Value('i',Num.value+1)\r\n Seller5 = Process(target=sell, args=(lock,e,Num,totalInventario,totalVendidas))\r\n\r\n Seller1.start()\r\n Seller2.start()\r\n Seller3.start()\r\n Seller4.start()\r\n Seller5.start()\r\n\r\n e.set()\r\n\r\n Seller1.join()\r\n Seller2.join()\r\n Seller3.join()\r\n Seller4.join()\r\n Seller5.join()\r\n\r\n print( \"\\|/ (__) \\n\"+\r\n \" `\\------(oo) La vaquita ya no tiene leche :( \\n\"+\r\n \" || (__)\\n\"+\r\n \" ||w--|| \\|/\\n\"+\r\n \" \\|/\\n\")\r\n","repo_name":"Erickln/miscellaneousProjects","sub_path":"milk_seller.py","file_name":"milk_seller.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41510804785","text":"import os\nimport sys\nimport trimesh\nimport numpy as np\nimport pandas as pd\n\nfrom scripts.helper import write_to_json\n\n\ndef main(num_chunks, chunk_idx):\n visited = set(os.listdir(scene_dir))\n chunk_size = int(np.ceil(len(obj_file_names) / num_chunks))\n obj_file_names_chunk = obj_file_names[chunk_idx * chunk_size: (chunk_idx + 1) * chunk_size]\n for file_name in obj_file_names_chunk:\n scene_file_name = file_name.split('.')[0] + '.json'\n if scene_file_name in visited:\n continue\n cat = df_metadata.loc[df_metadata['objectId'] == file_name.split('.')[0] + '.obj', 'mpcat40'].values[0]\n pc = np.load(os.path.join(obj_dir, file_name))\n centroid = np.mean(pc, axis=0)\n centroid = [float(e) for e in centroid]\n pc = trimesh.points.PointCloud(pc)\n try:\n vertices = pc.bounding_box_oriented.vertices.tolist()\n except Exception:\n vertices = pc.bounding_box.vertices.tolist()\n obbox = [centroid] + vertices\n scene = {'0': {'category': [cat], 'obbox': obbox}}\n write_to_json(scene, os.path.join(scene_dir, scene_file_name))\n\n\nif __name__ == '__main__':\n # define the paths\n mode = 'train'\n data_dir = '../data/shapenetsem'\n obj_dir = '/media/reza/Large/shapenetsem/objects_pc/{}'.format(mode)#os.path.join(data_dir, 'objects_pc', mode)\n obj_file_names = os.listdir(obj_dir)\n metadata_path = os.path.join(data_dir, 'metadata.csv')\n df_metadata = pd.read_csv(metadata_path)\n scene_dir = os.path.join(data_dir, 'scenes', mode)\n if not os.path.exists(scene_dir):\n try:\n os.makedirs(scene_dir)\n except FileExistsError:\n pass\n\n if len(sys.argv) == 1:\n main(1, 0)\n else:\n # To run in parallel you can use the command:\n # parallel -j5 \"python3 -u build_shapenetsem_scenes.py {1} {2}\" ::: 5 ::: 0 1 2 3 4\n main(int(sys.argv[1]), int(sys.argv[2]))\n","repo_name":"reza-asad/3DSSR","sub_path":"scripts/build_shapenetsem_scenes.py","file_name":"build_shapenetsem_scenes.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"22017340420","text":"\"\"\"Persisters for the MIMIC-III database.\n\n\"\"\"\n__author__ = 'Paul Landes'\n\nfrom typing import Tuple, Iterable, Optional, List\nfrom dataclasses import dataclass, field\nimport logging\nimport sys\nfrom itertools import chain\nfrom zensols.config import Settings\nfrom zensols.persist import persisted, ReadOnlyStash\nfrom zensols.db import DbPersister, DataClassDbPersister\nfrom zensols.nlp import FeatureDocument, FeatureDocumentParser\nfrom . import (\n MimicError, RecordNotFoundError,\n Admission, Patient, Diagnosis, Procedure, NoteEvent\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass AdmissionPersister(DataClassDbPersister):\n \"\"\"Manages instances of :class:`.Admission`.\n\n \"\"\"\n def __post_init__(self):\n self.bean_class = Admission\n super().__post_init__()\n\n def get_by_hadm_id(self, hadm_id: int) -> Admission:\n \"\"\"Return the admission by it's hospital admission ID.\"\"\"\n adm = self.execute_by_name(\n 'select_admission_by_hadm_id', params=(hadm_id,))\n if len(adm) == 0:\n raise RecordNotFoundError(self, 'hadm', hadm_id)\n if len(adm) > 1:\n raise MimicError('Found {len(adm)}>1 record(s) for hadm {hadm_id}')\n return adm[0]\n\n def get_hadm_ids(self, subject_id: int) -> Iterable[int]:\n \"\"\"Get all hospital admission IDs (``hadm_id``) for a patient.\"\"\"\n ids = self.execute_by_name(\n 'select_hadm_for_subject_id', params=(subject_id,),\n row_factory='tuple')\n return map(lambda x: x[0], ids)\n\n def get_by_subject_id(self, subject_id: int) -> Tuple[Admission]:\n \"\"\"Get an admissions by patient ID.\"\"\"\n return self.execute_by_name(\n 'select_admission_by_subject_id', params=(subject_id,))\n\n def get_admission_counts(self, limit: int = sys.maxsize) -> \\\n Tuple[Tuple[int, int]]:\n \"\"\"Return the counts of subjects for each hospital admission.\n\n :param limit: the limit on the return admission counts\n\n :return: a list of tuples, each in the form (``subject_id``, ``count``)\n\n \"\"\"\n return self.execute_by_name(\n 'select_admission_counts', params=(limit,),\n row_factory='tuple')\n\n def uniform_sample_hadm_ids(self, limit: int) -> Iterable[int]:\n \"\"\"Return a sample from the uniform distribution of admission IDs.\n\n \"\"\"\n return self.execute_by_name(\n 'random_hadm', params=(limit,), row_factory=lambda x: x)\n\n\n@dataclass\nclass PatientPersister(DataClassDbPersister):\n \"\"\"Manages instances of :class:`.Patient`.\n\n \"\"\"\n def __post_init__(self):\n self.bean_class = Patient\n super().__post_init__()\n\n def get_by_subject_id(self, subject_id: int) -> Patient:\n pat = self.execute_by_name(\n 'select_patient_by_subject_id', params=(subject_id,))\n assert len(pat) == 1\n return pat[0]\n\n\n@dataclass\nclass DiagnosisPersister(DataClassDbPersister):\n \"\"\"Manages instances of :class:`.Diagnosis`.\n\n \"\"\"\n def __post_init__(self):\n self.bean_class = Diagnosis\n super().__post_init__()\n\n def get_by_hadm_id(self, hadm_id: int) -> Diagnosis:\n \"\"\"Get ICD-9 diagnoses codes by hospital admission IDs.\n\n \"\"\"\n return self.execute_by_name(\n 'select_diagnosis_by_hadm_id', params=(hadm_id,))\n\n def get_heart_failure_hadm_ids(self) -> Tuple[int]:\n \"\"\"Return hospital admission IDs that are heart failure related.\n\n \"\"\"\n return tuple(map(lambda r: r[0],\n self.execute_by_name('select_heart_failure_hadm_id',\n row_factory='tuple')))\n\n\n@dataclass\nclass ProcedurePersister(DataClassDbPersister):\n \"\"\"Manages instances of :class:`.Procedure`.\n\n \"\"\"\n def __post_init__(self):\n self.bean_class = Procedure\n super().__post_init__()\n\n def get_by_hadm_id(self, hadm_id: int) -> Procedure:\n return self.execute_by_name(\n 'select_procedure_by_hadm_id', params=(hadm_id,))\n\n\n@dataclass\nclass NoteDocumentStash(ReadOnlyStash):\n \"\"\"Reads ``noteevents`` from the database and returns parsed documents.\n\n \"\"\"\n doc_parser: FeatureDocumentParser = field(default=None)\n \"\"\"NER+L medical domain natural langauge parser.\"\"\"\n\n note_db_persister: DbPersister = field(default=None)\n \"\"\"Fetches the note text by key from the DB.\"\"\"\n\n def load(self, row_id: str) -> FeatureDocument:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'loading row ID {row_id}')\n text = self.note_db_persister.execute_by_name(\n 'select_note_text_by_id', params=(row_id,), row_factory='tuple')\n # many notes have trailing newlines, which may cause issues with spaCy\n # or downstream prediction tasks\n return self.doc_parser(text[0][0].strip())\n\n def keys(self) -> Iterable[str]:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('returning note all DB keys')\n return map(lambda x: str(x[0]),\n self.note_db_persister.execute_by_name(\n 'select_keys', row_factory='tuple'))\n\n def exists(self, name: str) -> bool:\n res = self.note_db_persister.execute_by_name(\n 'select_hadm_id_by_row_id', params=(name,), row_factory='tuple')\n return len(res) > 0\n\n\n@dataclass\nclass NoteEventPersister(DataClassDbPersister):\n \"\"\"Manages instances of :class:`.NoteEvent`.\n\n \"\"\"\n mimic_note_context: Settings = field(default=None)\n \"\"\"Contains resources needed by new and re-hydrated notes, such as the\n document stash.\n\n \"\"\"\n def __post_init__(self):\n self.bean_class = NoteEvent\n super().__post_init__()\n self.row_factory = self._create_bean\n\n def _create_bean(self, *args):\n return NoteEvent(*args, context=self.mimic_note_context)\n\n @property\n @persisted('_categories', cache_global=True)\n def categories(self) -> Tuple[str]:\n \"\"\"All unique categories.\"\"\"\n cats = self.execute_by_name('categories', row_factory='tuple')\n return tuple(map(lambda x: x[0], cats))\n\n def get_note_count(self, hadm_id: int) -> int:\n \"\"\"Return the count of notes for a hospital admission.\n\n :param hadm_id: the hospital admission ID\n\n \"\"\"\n return self.execute_by_name(\n 'select_note_count', params=(hadm_id,), row_factory='tuple')[0][0]\n\n def get_note_counts_by_subject_id(self, subject_id: int) -> \\\n Tuple[Tuple[int, int]]:\n \"\"\"Get counts of notes related to a subject.\n\n :param subject_id: the patient's ID\n\n :return: tuple of (``hadm_id``, ``count``) pairs for a subject\n\n \"\"\"\n return self.execute_by_name(\n 'select_note_count_by_subject_id', params=(subject_id,),\n row_factory='tuple')\n\n def get_row_ids_by_hadm_id(self, hadm_id: int) -> Tuple[int]:\n \"\"\"Return all note row IDs for a admission ID.\"\"\"\n return tuple(chain.from_iterable(\n self.execute_by_name(\n 'select_row_ids_by_hadm_id', params=(hadm_id,),\n row_factory='identity')))\n\n def get_notes_by_hadm_id(self, hadm_id: int) -> Tuple[NoteEvent]:\n \"\"\"Return notes by hospital admission ID.\n\n :param hadm_id: the hospital admission ID\n\n \"\"\"\n return self.execute_by_name(\n 'select_notes_by_hadm_id', params=(hadm_id,))\n\n def get_hadm_id(self, row_id: int) -> Optional[int]:\n \"\"\"Return the hospital admission for a note.\n\n :param row_id: the unique ID of the note event\n\n :return: the hospital admission unique ID ``hadm_id`` if ``row_id`` is\n in the database\n\n \"\"\"\n maybe_row: List[int] = self.execute_by_name(\n 'select_hadm_id_by_row_id', params=(row_id,),\n row_factory=lambda x: x)\n if len(maybe_row) > 0:\n return maybe_row[0]\n\n def get_hadm_ids(self, row_ids: Iterable[int]) -> Iterable[int]:\n \"\"\"Return the hospital admission for a set of note.\n\n :param row_id: the unique IDs of the note events\n\n :return: the hospital admission admissions unique ID ``hadm_id``\n\n \"\"\"\n return map(self.get_hadm_id, row_ids)\n\n def get_notes_by_category(self, category: str,\n limit: int = sys.maxsize) -> Tuple[NoteEvent]:\n \"\"\"Return notes by what the category to which they belong.\n\n :param category: the category of the note (i.e. ``Radiology``)\n\n :param limit: the limit of notes to return\n\n \"\"\"\n return self.execute_by_name(\n 'select_notes_by_category', params=(category, limit))\n\n def get_discharge_reports(self, limit: int = sys.maxsize) -> \\\n Tuple[NoteEvent]:\n \"\"\"Return discharge reports (as apposed to addendums).\n\n :param limit: the limit of notes to return\n\n \"\"\"\n return self.execute_by_name('select_discharge_reports', params=[limit])\n","repo_name":"plandes/mimic","sub_path":"src/python/zensols/mimic/persist.py","file_name":"persist.py","file_ext":"py","file_size_in_byte":9038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"5231896519","text":"def serialize(obj):\n \"\"\"\n Generalized function to turn cengine models into a serializable and\n representable format\n\n :param obj: The object to serialize\n :return result: The converted object\n \"\"\"\n # For a list, convert each element and return another list\n if isinstance(obj, list):\n result = []\n for x in obj:\n if hasattr(x, 'to_serial'):\n v = x.to_serial()\n else:\n v = serialize(x)\n if v is not None:\n result.append(v)\n\n return result\n\n # For a dict, convert each value and save another dict with the same keys\n elif isinstance(obj, dict):\n result = {}\n for x, y in obj.items():\n if hasattr(y, 'to_serial'):\n v = y.to_serial()\n else:\n v = serialize(y)\n\n if v is not None:\n result.update({x: v})\n return result\n\n # For cengine models, get the attributes and convert its values\n elif hasattr(obj, 'get_attributes'):\n result = {}\n for attr in obj.get_attributes():\n value = getattr(obj, attr)\n\n if hasattr(value, 'to_serial'):\n v = value.to_serial()\n else:\n v = serialize(value)\n\n if v is not None:\n result.update({attr: v})\n return result\n\n # The remaining data structures\n else:\n return obj\n","repo_name":"zenml-io/cengine","sub_path":"cengine/utils/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"13387602531","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n# -*- coding: utf-8 -*-\n\n# Run this app with `python app.py` and\n# visit http://127.0.0.1:8050/ in your web browser.\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport pandas as pd\n\nimport plotly.express as px\nfrom urllib.request import urlopen\nimport json\n\nimport plotly.graph_objects as go\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncolors = {\n 'background': '#F0F8FF',\n 'text': '#00008B'\n}\n\n# assume you have a \"long-form\" data frame\n# see https://plotly.com/python/px-arguments/ for more options\ndf = pd.read_csv(\"time_series_plotly.csv\")\n\nfig = px.scatter(df, x='Date', y='Case Count', color='County')\n\nfig.update_layout(\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text']\n)\n\ndf2 = pd.read_csv(\"geo_cases_plotly.csv\")\n\ndf3 = pd.read_csv(\"bubble_geo_cases_plotly.csv\")\n\ncities_for_map = pd.read_csv(\"cities_for_geo_map.csv\")\n\nmax_cases = df2[\"Cases per 100000 population\"].max()\n\nwith urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:\n counties = json.load(response)\n \nfig2 = px.choropleth(df2, geojson=counties, locations='FIPS #', color='Cases per 100000 population',\n color_continuous_scale=\"Reds\",\n hover_name = \"County Name\",\n range_color=(0, max_cases),\n scope = \"usa\",\n title = \"Cases per 100000 population over past 7 days\",\n labels={\"FIPS #\": \"FIPS\", \"Cases per 100000 population\": \"Cases per 100000 pop\"}\n )\n \nfig2.update_layout(title_text='Cases per 100000 population over past 7 days', title_x=0.5)\n\n# to show texas cities on map\nfig2.add_trace(go.Scattergeo(\n locationmode = 'USA-states',\n lon = cities_for_map['lng'],\n lat = cities_for_map['lat'],\n hoverinfo = 'text',\n text = cities_for_map['city'],\n mode = 'markers',\n marker = dict(\n size = 4,\n color = 'rgb(102,102,102)',\n line = dict(\n width = 3,\n color = 'rgba(68, 68, 68, 0)'\n )\n )))\n\nfig2.update_geos(fitbounds=\"locations\")\nfig2.update_layout(\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text']\n)\n\nfig3 = px.choropleth(df3, geojson=counties, locations='FIPS #',\n hover_name = \"County\",\n scope = \"usa\",\n title = \"Total Cases\",\n )\n\ncolors_fig3 = ['rgb(189,215,231)','rgb(107,174,214)','rgb(33,113,181)','rgb(239,243,255)']\nmonths = {5: 'May', 6:'June',7:'July',8:'Aug'}\n\n#plot each bubble month cases for each county\nfor i in range(5,9)[::-1]:\n mask = df3[\"month\"] == i\n df_month = df3[mask]\n #print(df_month)\n fig3.add_trace(go.Scattergeo(\n locationmode = 'USA-states',\n lon = df_month['X (Lat)'],\n lat = df_month['Y (Long)'],\n text = df_month[['County','Case Count']],\n name = months[i],\n mode = 'markers',\n marker = dict(\n size = df_month['Case Count'],\n color = colors_fig3[i-6],\n line_width = 0,\n sizeref = 9,\n sizemode = \"area\",\n reversescale = True\n )))\n \n# to show texas cities on map\nfig3.add_trace(go.Scattergeo(\n locationmode = 'USA-states',\n lon = cities_for_map['lng'],\n lat = cities_for_map['lat'],\n hoverinfo = 'text',\n text = cities_for_map['city'],\n name = \"Major Cities\",\n mode = 'markers',\n marker = dict(\n size = 4,\n color = 'rgb(102,102,102)',\n line = dict(\n width = 3,\n color = 'rgba(68, 68, 68, 0)'\n )\n )))\n\nfig3.update_geos(fitbounds=\"locations\")\nfig3.update_layout(title_text='Total Cases per month for last 4 months', title_x=0.5)\n\nfig3.update_layout(\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text']\n)\n\nmarkdown_text = '''\n### Texas COVID-19 Dashboard\n\nCreator: Truett Bloxsom, [LinkedIn](https://www.linkedin.com/in/truett-bloxsom/), [github](https://github.com/tsbloxsom) \n\nThis is my first interactive dashboard using Dash! Hope you like it!\n\nIf you would like to read about how I created this I wrote a medium [article] (https://towardsdatascience.com/creating-and-automating-an-interactive-dashboard-using-python-5d9dfa170206)\n\nThis first plot is Texas COVID-19 accumulated cases by county over time\n\nThis plot is interactive, so you can double click one county to look at it individually\n\nSource for data: [dshs.texas.gov](https://www.dshs.texas.gov/coronavirus/additionaldata/)\n\n'''\n\nmarkdown_text_geo = '''The plot below shows COVID-19 case counts per 100,000 residents for each county in the past 7 days\n\nThe major cities are plotted in grey and you must zoom in to interact with the counites in and around these cities\n\nPopulation of counties is based on 2010 Texas census data, and I will be writing a medium article about this plot soon!\n\n'''\n\napp.layout = html.Div([\n dcc.Markdown(children=markdown_text,\n style={\n 'backgroundColor': colors['background'],\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n \n dcc.Graph(\n id='example-graph',\n figure=fig\n ),\n \n dcc.Markdown(children=markdown_text_geo,\n style={\n 'backgroundColor': colors['background'],\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n\n dcc.Graph(\n id = \"graph2\",\n figure = fig2),\n\n dcc.Graph(\n id = \"graph3\",\n figure = fig3)\n \n])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"tsbloxsom/Texas-census-county-data-project","sub_path":"Automate collecting of data notebooks/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"32664078045","text":"from django.db import transaction\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import RetrieveModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom cobra.project.api.exceptions import (\n InvitationHasExpired,\n InvitationIsNotPending,\n UserIsAlreadyMember,\n)\nfrom cobra.project.api.filters import IsInviterOrInvitedUserFilterBackend\nfrom cobra.project.api.permissions import IsInvitedUser\nfrom cobra.project.api.serializers.invitation import ProjectInvitationSerializer\nfrom cobra.project.models import ProjectInvitation, ProjectMembership\nfrom cobra.project.utils.models import ACCEPTED, REJECTED\n\n\nclass ProjectInvitationViewSet(RetrieveModelMixin, GenericViewSet):\n lookup_field = \"id\"\n queryset = ProjectInvitation.objects.all()\n serializer_class = ProjectInvitationSerializer\n permission_classes = [IsAuthenticated]\n filter_backends = [IsInviterOrInvitedUserFilterBackend]\n\n @action(detail=True, methods=[\"post\"], permission_classes=[IsInvitedUser])\n def accept(self, *args, **kwargs):\n invitation: ProjectInvitation = self.get_object()\n if not invitation.is_pending:\n raise InvitationIsNotPending()\n if invitation.is_expired:\n raise InvitationHasExpired()\n if ProjectMembership.objects.filter(\n user__pk=invitation.user.pk, project__pk=invitation.project.pk\n ).exists():\n raise UserIsAlreadyMember()\n with transaction.atomic():\n ProjectMembership.objects.create(\n user=invitation.user, project=invitation.project\n )\n invitation.status = ACCEPTED\n invitation.save(update_fields=[\"status\"])\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"post\"], permission_classes=[IsInvitedUser])\n def reject(self, *args, **kwargs):\n invitation: ProjectInvitation = self.get_object()\n if not invitation.is_pending:\n raise InvitationIsNotPending()\n if invitation.is_expired:\n raise InvitationHasExpired()\n if ProjectMembership.objects.filter(\n user__pk=invitation.user.pk, project__pk=invitation.project.pk\n ).exists():\n raise UserIsAlreadyMember()\n invitation.status = REJECTED\n invitation.save(update_fields=[\"status\"])\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"vladfedoriuk/cobra-backend","sub_path":"cobra/project/api/views/invitation.py","file_name":"invitation.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"69940845743","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nMIN_MATCH_COUNT = 10\n\nimg1 = cv2.imread('./normal.png',0) # queryImage\nimg2 = cv2.imread('./ir.png',0) # trainImage\n\nakaze = cv2.AKAZE_create()\n\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = akaze.detectAndCompute(img1,None)\nkp2, des2 = akaze.detectAndCompute(img2,None)\n\nbf = cv2.BFMatcher()\n# 特徴量ベクトル同士をBrute-Force&KNNでマッチング\nmatches = bf.knnMatch(des1, des2, k=2)\n\n# データを間引きする\nratio = 0.5\ngood = []\nfor m, n in matches:\n if m.distance < ratio * n.distance:\n good.append([m])\n\n# 特徴量をマッチング状況に応じてソートする\ngood = sorted(matches, key = lambda x : x[1].distance)\n\nsrc_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good ]).reshape(-1,1,2)\ndst_pts = np.float32([ kp2[m[0].trainIdx].pt for m in good ]).reshape(-1,1,2)\n\nM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\nE = np.eye(3)\ndst = cv2.warpPerspective(img1,M,(640,480))\n\n# 画像表示\ncv2.imshow('img', dst)\n\n# キー押下で終了\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"doukeshiDONALDO/hoge","sub_path":"imaging/find_homography.py","file_name":"find_homography.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39770702802","text":"from Backend.DataTypes.Emails.Email import Email\nfrom Backend.DataTypes.Status import Status\nfrom Backend.GraphQL.shared import mutation, get_applicant_document, get_batch_document\nfrom graphql import GraphQLError\nfrom Backend.GraphQL.mutations.emailMutation import mutation_email\n\ndef convert_applicant_to_participant(applicant_id, batch_id, application_details):\n batch_doc = get_batch_document(batch_id)\n participants = batch_doc.collection('participants')\n participants_doc = participants.document(str(applicant_id))\n participants_doc.set(application_details)\n\n@mutation.field(\"saveForm\")\ndef save_form(_, info, applicant_id, batch_id, location, streetNumber, addressSuffix, postcode, city, country, accountHolder, bankName, iban, bic, shirtSize, shirtStyle, foodIntolerances):\n try:\n application, application_details = get_applicant_document(batch_id, applicant_id)\n except Exception as err:\n return GraphQLError(message=err.__str__())\n\n if(application):\n acceptanceFormData = {\n 'location': location,\n 'streetNumber': streetNumber,\n 'addressSuffix': addressSuffix,\n 'postcode': postcode,\n 'city': city,\n 'country': country,\n 'accountHolder': accountHolder,\n 'bankName': bankName,\n 'iban': iban,\n 'bic': bic,\n 'shirtSize': shirtSize,\n 'shirtStyle': shirtStyle,\n 'foodIntolerances': foodIntolerances\n }\n application.set({\"acceptanceFormData\": acceptanceFormData}, merge=True)\n info.context[\"user\"] = True\n mutation_email(_, info, 'sendFormConfirmation', applicant_id, batch_id)\n mutation_email(_, info, 'sendDocuments', applicant_id, batch_id)\n mutation_email(_, info, 'sendAgreements', applicant_id, batch_id)\n convert_applicant_to_participant(applicant_id, batch_id, application_details)\n return Status(0, 'Form was succesfully saved & Documents were sent')\n","repo_name":"DigitalProductschool/rt-backend","sub_path":"Backend/GraphQL/mutations/formMutation.py","file_name":"formMutation.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"29303603612","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 1 13:47:17 2020\n\n@author: eoncis\n\"\"\"\nimport os\nimport numpy\nimport pdb\nfrom netCDF4 import Dataset\nimport pdb\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\nimport requests\n\nCHUNK = 1024 * 1024 * 8 # 1 MB\nclass RemoteFileException(Exception):\n pass\n\ndef open_netcdf4(filename):\n \"\"\"\n Open data file in netCDF4 format.\n\n Args:\n filename: file to open\n\n Returns:\n rootgrp: data reference to variables stored in data file.\n\n Raises:\n IOError: if file does not exist at the expected path\n \"\"\"\n if not os.path.isfile(filename):\n raise IOError('Data file not at path: {0}'.format(filename))\n\n rootgrp = Dataset(filename, \"r\", format=\"NETCDF4\")\n return rootgrp\n\ndef choose_points(lat, lon, buoy_lat, buoy_lon, flat=False):\n \"\"\"\n Choose the four closest NARR or MERRA points to a lat/lon position.\n\n Args:\n lat, lon: numpy arrays, 2d\n buoy_lat, buoy_lon: these is the point to get close to\n\n Returns:\n chosen indices, coordinates of the 4 closest points (euclidean)\n \"\"\"\n \n distances = (lat - buoy_lat)**2 + (lon - buoy_lon)**2\n dist_args_sorted = numpy.argsort(distances.flatten())\n\n chosen_idxs = dist_args_sorted[0:4]\n chosen_idxs = numpy.unravel_index(chosen_idxs, lat.shape)\n\n coordinates = list(zip(lat[chosen_idxs], lon[chosen_idxs]))\n\n return chosen_idxs, coordinates\n\ndef distance(p1, p2):\n\t\"\"\" Euclidean Distance of 2 iterables \"\"\"\n\tx1, y1 = p1\n\tx2, y2 = p2\n\treturn numpy.sqrt((x1-x2)**2+(y1-y2)**2)\n\ndef idw(samples, locations, point, power=2):\n\t\"\"\" Shepard's Method (inverse distance weighting interpolation)\n\t\n\tArgs::\n\t\tsamples: data to be interpolated, of length n\n\t\tlocations: locations of the data, shape 2, n\n\t\tpoint: point to interpolate too, shape 2\n\t\tpower: integer, arbitary\n\n\tReturns:\n\t\tweighted_samples: samples, weighted by the weights\n\n\tNotes::\n\t\tfrom A NOVEL CONFIDENCE METRIC APPROACH FOR A LANDSAT LAND SURFACE\n\t\tTEMPERATURE PRODUCT, Monica J. Cook and Dr. John R. Schott\n\t\"\"\"\n\tdistances = numpy.asarray([distance(i, point) for i in locations])\n\t\n\tweights = distances ** -power\n\tweights /= weights.sum() # normalize to 1\n\n\tweighted_samples = [weights[i] * samples[i] for i in range(len(locations))]\n\n\treturn sum(weighted_samples)\n\ndef download_ftp(url, filepath):\n \"\"\" download an FTP resource. \"\"\"\n \n total_size = 0\n \n try:\n request = urllib.request.urlopen(url)\n total_size = int(request.getheader('Content-Length').strip())\n except urllib.error.URLError as e:\n print(url)\n error_string = '\\n url: {0} does not exist, trying other sources\\n'.format(url)\n \n raise RemoteFileException(error_string)\n \n downloaded = 0\n filename = filepath[len(filepath) - filepath[::-1].index('/'):]\n\n with open(filepath, 'wb') as fileobj: \n while True:\n output_string = \" Downloading %s - %.1fMB of %.1fMB\\r\" % (filename, (downloaded / 1000000), (total_size / 1000000))\n \n print(output_string)\n \n chunk = request.read(CHUNK)\n if not chunk:\n break\n fileobj.write(chunk)\n downloaded += len(chunk)\n\n output_string = \"\\n Download completed...\"\n print(output_string)\n \n\n return filepath\n\ndef download_http(url, filepath, auth=None):\n \"\"\" download a http or https resource using requests. \"\"\"\n \n with requests.Session() as session:\n output_string = \"\\n Opening session to %s\" % (url[:url.find('/', 9, len(url))])\n print(output_string)\n \n req = session.request('get', url)\n\n if auth:\n resource = session.get(req.url, auth=auth)\n \n if resource.status_code != 200:\n error_string = '\\n url: {0} does not exist, trying other sources\\n'.format(url)\n \n raise RemoteFileException(error_string)\n \n else:\n output_string = \"\\n Session opened successfully\"\n print(output_string)\n \n else:\n resource = session.get(req.url)\n \n if resource.status_code != 200:\n error_string = '\\n url: {0} does not exist, trying other sources\\n'.format(url)\n \n raise RemoteFileException(error_string)\n \n else:\n output_string = \"\\n Session opened successfully\"\n print(output_string)\n \n with open(filepath, 'wb') as f:\n output_string = \"\\n Downloading %s \" % (filepath[filepath.rfind('/') + 1:]) \n print(output_string)\n \n f.write(resource.content)\n \n output_string = \"\\n Download completed...\"\n print(output_string)\n\n return filepath\n\ndef url_download(url, out_dir, _filename=None, auth=None):\n \"\"\" download a file (ftp or http), optional auth in (user, pass) format \"\"\"\n\n out_dir = 'output_data/' + out_dir + '/geos'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n filename = _filename if _filename else url.split('/')[-1]\n filepath = os.path.join(out_dir, filename)\n\n if os.path.isfile(filepath):\n return filepath\n \n if url[0:3] == 'ftp':\n download_ftp(url, filepath)\n else:\n download_http(url, filepath, auth)\n\n return filepath\n\ndef geos_closest_time(date):\n \n d_int = date.hour + date.minute/60\n d = numpy.asarray([0, 3, 6, 9, 12, 15, 18, 21]).astype(int)\n t1, t2 = sorted(abs(d - d_int).argsort()[:2])\n d1 = d[t1]\n d2 = d[t2]\n \n return d1,d2\n\ndef geos_interp_time(date, a1, a2, t1, t2):\n \"\"\" linear interp.\n Args:\n date: Python datetime object\n a1, a2: 2 numpy arrays, same dimensions as each other and output\n \"\"\"\n hour = date.hour\n minute = date.minute\n second = date.second\n\n # round to nearest minute\n if second > 30: minute = minute + 1\n\n # convert hour-min acquisition time to decimal time\n time = hour + minute / 60.0\n\n # interpolate in time\n a = a1 + (time - t1) * ((a2 - a1)/(t2 - t1))\n\n return a\n\ndef calculate_dew_point(RH,T):\n \"\"\"\n convert RH to Dew Point(Approx of Clausius - Clapeyron equation)\n :param RH: relative humidity\n :param T: air temperature\n :return: DP: the dew point\n \"\"\"\n B = (numpy.log(RH) + ((17.27 * (T - 273.15)) / (237.3 + T - 273.15))) / 17.27\n DP = (237.3 * B) / (1 - B) + 273.15\n return DP\n\ndef geos_download(date,d1,d2,out_dir):\n \"\"\"\n Download GEOS data via ftp.\n\n Args:\n cc: CalibrationController object\n\n Returns:\n None\n \"\"\"\n \n geos_url = 'https://portal.nccs.nasa.gov/datashare/gmao/geos-fp/das/Y%s/M%s/D%s/GEOS.fp.asm.inst3_3d_asm_Np.%s_%02d00.V01.nc4'\n \n url1 = geos_url % (date.strftime('%Y'),\n date.strftime('%m'),\n date.strftime('%d'),\n date.strftime('%Y%m%d'),\n d1)\n \n url2 = geos_url % (date.strftime('%Y'),\n date.strftime('%m'),\n date.strftime('%d'),\n date.strftime('%Y%m%d'),\n d2)\n \n filename1 = url_download(url1,out_dir)\n filename2 = url_download(url2,out_dir)\n \n return filename1,filename2\n\ndef geos_process(date, lat_oi, lon_oi, out_dir, grnd_alt = 0, max_alt = 100):\n \"\"\"\n process atmospheric data, yield an atmosphere\n \"\"\"\n \n d1,d2 = geos_closest_time(date)\n filename1,filename2 = geos_download(date,d1,d2,out_dir)\n\n atmo_data1 = open_netcdf4(filename1)\n atmo_data2 = open_netcdf4(filename2)\n press = numpy.array(atmo_data1.variables['lev'][:])\n \n # choose points\n lat = atmo_data1.variables['lat'][:]\n lon = atmo_data1.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n index1 = (0, slice(None), latidx, lonidx)\n \n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data1.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data2.variables['T'][index1], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data1.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data2.variables['RH'][index1], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data1.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data2.variables['H'][index1], axis1=1, axis2=2).T / 1000.0\n \n\n # interpolate in time, now they are shape (4, N)\n t = geos_interp_time(date, temp1, temp2, d1,d2)\n h = geos_interp_time(date, height1, height2,d1,d2)\n rh = geos_interp_time(date, rhum1, rhum2,d1,d2)\n \n # interpolate in space, now they are shape (1, N)\n height = idw(h, data_coor, [lat_oi, lon_oi])\n temp = idw(t, data_coor, [lat_oi, lon_oi])\n relhum = idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2,])\n \n \n height = height[nr_of_nans:]\n mask = numpy.where((numpy.array(height) >= grnd_alt) & (numpy.array(height) <= max_alt))\n height = height[mask]\n temp = temp[nr_of_nans:]\n temp = temp[mask]\n relhum = relhum[nr_of_nans:]\n relhum = relhum[mask]\n press = press[nr_of_nans:]\n press = press[mask]\n dewtemp = calculate_dew_point(relhum,temp)\n \n profile = list([height,press,temp,dewtemp])\n \n return profile","repo_name":"garnyt/thermal","sub_path":"other/SC_transmission/get_geos.py","file_name":"get_geos.py","file_ext":"py","file_size_in_byte":10077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7571311504","text":"import json\n\nwith open('final_ingredients.json') as data_file:\n data = json.load(data_file)\n\nfiltered = [ing for ing in data if 'name' in ing]\n\n\ndef keyword_in_related(keyword, related_ingredients):\n for ingredient in related_ingredients:\n if keyword in ingredient['name'].lower():\n return True\n return False\n\n\ndef keyword_in_name(ingredient, keyword):\n if keyword in ingredient['name'].lower():\n return True\n return keyword_in_related(keyword, ingredient['relatedIngredients'])\n\n\ndef ingredients_with_keyword(ingredients, keyword):\n return [ingredient for ingredient in ingredients if keyword_in_name(ingredient, keyword)]\n\n\ndef get_all_with_keyword(keyword):\n return ingredients_with_keyword(filtered, keyword)\n\n\ndef get_with_keywords(words):\n all_ingredients = list()\n for word in words:\n all_ingredients = all_ingredients + get_all_with_keyword(word)\n return all_ingredients\n\n\ndef add_color(ingredients, color):\n for ing in ingredients:\n ing.update({'color': color})\n return ingredients\n\n\ndef add_category(ingredients, category):\n for ing in ingredients:\n ing.update({'category' : category})\n return ingredients\n\n\ndef remove_duplicates_by_id(ingredients):\n seen = set()\n return [x for x in ingredients if x['id'] not in seen and not seen.add(x['id'])]\n\n\ndef make_final_dict(baseid, category, color, ingredient):\n return {'Name' : ingredient['name'], 'ID' : ingredient['id'], 'Color' : color, 'Category' : category, 'BaseID' : baseid}\n\n\ndef convert_to_final(base_ingredient):\n if 'color' in base_ingredient:\n return [{'Name' : base_ingredient['name'], 'ID' : base_ingredient['id'], 'Color' : base_ingredient['color'], 'Category' : base_ingredient['category'], 'BaseID' : base_ingredient['id']}] +\\\n [make_final_dict(base_ingredient['id'], base_ingredient['category'], base_ingredient['color'], related) for related in base_ingredient['relatedIngredients']]\n return [{'Name' : base_ingredient['name'], 'ID' : base_ingredient['id'], 'Category' : base_ingredient['category'], 'BaseID' : base_ingredient['id']}] + [make_final_dict(base_ingredient['id'], base_ingredient['category'], \"\", related) for related in base_ingredient['relatedIngredients']]\n\n\nneutral_spirits = get_with_keywords(['gin', 'vodka', 'jenever'])\nneutral_spirits = add_color(neutral_spirits, 'light blue')\n\nliqueurs_and_schnapps = get_with_keywords(['liqueur', 'schnapps', 'amaro', 'picon', 'chartreuse', 'licor'])\nliqueurs_and_schnapps = add_color(liqueurs_and_schnapps, 'tan')\n\nwhiskies = get_with_keywords(['whiskey', 'scotch', 'bourbon'])\nwhiskies = add_color(whiskies, 'brown')\n\nrum = get_all_with_keyword('rum')\nrum = add_color(rum, 'brown')\n\nwine = get_with_keywords(['wine', 'sherry', 'lillet', 'mad dog'])\nwine = add_color(wine, 'purple')\n\nbrandy = get_all_with_keyword('brandy')\nbrandy = add_color(brandy, 'brown')\n\nbeer = [ing for ing in filtered if ('beer' in ing['name'].lower() and ('root' not in ing['name'].lower() and 'ginger' not in ing['name'].lower() and ing not in liqueurs_and_schnapps))]\nbeer = add_color(beer, 'brown')\n\ncachaca = get_all_with_keyword('cachaca')\ncachaca = add_color(cachaca, 'light blue')\n\npisco = get_all_with_keyword('pisco')\npisco = add_color(pisco, 'light blue')\n\nfernet = get_all_with_keyword('fernet')\nfernet = add_color(fernet, 'purple')\n\naquavit = get_all_with_keyword('aquavit')\naquavit = add_color(aquavit, 'tan')\n\nchampagne = [ing for ing in filtered if ('champagne' in ing['name'].lower() and 'soda' not in ing['name'].lower())]\nchampagne = add_color(champagne, 'yellow')\n\narrack = get_all_with_keyword('arrack')\narrack = add_color(arrack, 'brown')\n\njapanese_drinks = get_with_keywords(['shochu', 'sake'])\njapanese_drinks = add_color(japanese_drinks, 'light blue')\n\ntequila = get_with_keywords(['tequila', 'mezcal'])\ntequila = add_color(tequila, 'light blue')\n\nport = get_all_with_keyword('port')\nport = add_color(port, 'purple')\n\nirish_mist = get_all_with_keyword('mist')\nirish_mist = add_color(irish_mist, 'brown')\n\nfirewater = get_with_keywords(['firewater'])\nfirewater = add_color(firewater, 'brown')\n\nabsinthe = get_with_keywords(['absinthe'])\nabsinthe = add_color(absinthe, 'green')\n\nmandarin_napoleon = get_all_with_keyword('napoleon')\nmandarin_napoleon = add_color(mandarin_napoleon, 'brown')\n\nhard_lemonade = get_all_with_keyword('hard lemonade')\nhard_lemonade = add_color(hard_lemonade, 'yellow')\n\ncider = get_with_keywords(['hard cider', 'cherry cider', 'strongbow cider'])\ncider = add_color(cider, 'red')\n\nzima = get_all_with_keyword('zima')\nzima = add_color(zima, 'tan')\n\ntaboo = get_all_with_keyword('taboo')\ntaboo = add_color(taboo, 'brown')\n\nvermouth = get_all_with_keyword('vermouth')\nvermouth = add_color(vermouth, 'tan')\n\njager = get_all_with_keyword('jagermeister')\njager = add_color(jager, 'dark brown')\n\nalcohols = rum + whiskies + liqueurs_and_schnapps + neutral_spirits + wine + brandy + beer + cachaca + pisco + fernet + \\\n aquavit + champagne + arrack + japanese_drinks + tequila + port + firewater + absinthe + mandarin_napoleon + cider + irish_mist + hard_lemonade + zima + taboo + vermouth + jager\n\n\nsoda = get_with_keywords(['soda', 'coke', 'grenadine'])\nsoda = add_color(soda, 'dark red')\n\nwater = get_all_with_keyword('water')\nwater = add_color(water, 'light blue')\n\nkool_aide = get_with_keywords(['aide', 'aid'])\nkool_aide = add_color(kool_aide, 'red')\n\njuice = [ing for ing in get_with_keywords(['juice']) if ing not in alcohols]\njuice = add_color(juice, 'purple')\n\nmixes = get_all_with_keyword('mix')\nmixes = add_color(mixes, 'yellow')\n\nmilk = get_with_keywords(['milk', 'half and half'])\nmilk = add_color(milk, 'white')\n\nnectar = get_with_keywords(['nectar'])\nnectar = add_color(nectar, 'orange')\n\nhot_chocolate = get_all_with_keyword('hot chocolate')\nhot_chocolate = add_color(hot_chocolate, 'creamy brown')\n\ncream = get_with_keywords(['heavy cream', 'light cream'])\ncream = add_color(cream, 'white')\n\nginger_shit = get_with_keywords(['ginger ale', 'ginger beer'])\nginger_shit = add_color(ginger_shit, 'yellow')\n\ntea = [ing for ing in get_with_keywords(['tea', 'snapple']) if (ing not in alcohols and ing not in mixes)]\ntea = add_color(tea, 'brown')\n\nsoft_cider = [ing for ing in get_all_with_keyword('cider') if ing not in cider]\nsoft_cider = add_color(soft_cider, 'brown')\n\nlemonade = [ing for ing in get_with_keywords(['lemonade', 'limeade']) if ing not in alcohols]\nlemonade = add_color(lemonade, 'yellow')\n\nmello_yello = get_all_with_keyword('mello')\nmello_yello = add_color(mello_yello, 'yellow')\n\nsprite = get_all_with_keyword('sprite')\nsprite = add_color(sprite, 'light blue')\n\ntonic = get_all_with_keyword('tonic')\ntonic = add_color(tonic, 'light blue')\n\nmixers = soda + kool_aide + juice + mixes + milk + nectar + cream + hot_chocolate + ginger_shit + tea + soft_cider + lemonade + mello_yello + tonic + sprite + water\nmixers = [ing for ing in mixers if ing not in alcohols]\n\nothers = [ing for ing in filtered if (ing not in mixers and ing not in alcohols)]\n\nprint (alcohols)\nprint (len(alcohols))\nprint (len(mixers))\nprint(len(others))\nprint(len(filtered) - len(mixers + alcohols + others))\n\nmixers = remove_duplicates_by_id(mixers)\nalcohols = remove_duplicates_by_id(alcohols)\nothers = remove_duplicates_by_id(others)\n\nalcohols = add_category(alcohols, 0)\nmixers = add_category(mixers, 1)\nothers = add_category(others, 2)\n\nall = alcohols + mixers + others\nfor i in range(0, len(all)):\n all[i].update({'id': i})\nfinal_list = list()\nfor item in all:\n final_list = final_list + convert_to_final(item)\nfor i in range(0, len(final_list)):\n final_list[i].update({'ID' : i})\n\nbase_ingredients = [{'ID' : ing['id'], 'Name' : ing['name'], 'Category' : ing['category']} for ing in all]\n\nprint(all)\nprint(final_list)\nprint (len(all))\n\n\nprint(len(filtered) - len(mixers + alcohols + others))\n\nwith open('classified-ingredients.json', 'w') as outfile:\n json.dump(final_list, outfile)\n\nwith open('base-ingredients.json', 'w') as basefile:\n json.dump(base_ingredients, basefile)","repo_name":"bsomes/drink-roulette-ingredient-classifier","sub_path":"IngredientClassifier/ingredient_classification.py","file_name":"ingredient_classification.py","file_ext":"py","file_size_in_byte":8082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72582590383","text":"from fastapi import APIRouter, HTTPException, status, Depends\n\nimport models\nfrom dependencies.dependency import get_current_user\n\nrouter = APIRouter(\n prefix=\"/data/{data_id}/share\",\n tags=[\"data\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n# @router.get('/')\n# async def get_shared_users(\n# data_id: int,\n# user: models.user_pydantic = Depends(get_current_user)\n# ):\n# \"\"\" Get a list of shared user for a data by id\"\"\"\n# parent_data = await models.TimeseriesData.get(id=data_id)\n\n# if parent_data.username != user.username:\n# shared_user = await models.UserSharedTimeSeriesData.get(\n# timeseries_data=await models.TimeseriesData.get(id=shared_data.id),\n# user=await models.Users.get(username=shared_user.username)\n# )\n\n# shared_data = await models.UserSharedTimeSeriesData.all().filter(\n# timeseries_data=await models.TimeseriesData.get(id=data_id)\n# )\n# if shared_data:\n# shared_data_pydantic = [await models.user_shared_ts_data_pydantic.from_tortoise_orm(data) \\\n# for data in shared_data]\n# return [d.username for d in shared_data_pydantic] + [parent_data.username]\n\n\n@router.post(\"/{username}\")\nasync def share_data_with_user(\n data_id: int, username: str, user: models.user_pydantic = Depends(get_current_user)\n):\n \"\"\"Create shared relation for a data\"\"\"\n shared_user = await models.Users.get(username=username)\n shared_data = await models.TimeseriesData.get(id=data_id).prefetch_related(\n \"user__user\"\n )\n print(shared_data.user.username, user.username)\n\n if shared_data.user.username != user.username:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Unauthorized!\"\n )\n\n usr_shared = models.UserSharedTimeSeriesData(\n timeseriesdata=await models.TimeseriesData.get(id=shared_data.id),\n user=await models.Users.get(username=shared_user.username),\n )\n\n await usr_shared.save()\n return await models.user_shared_ts_data_pydantic.from_tortoise_orm(usr_shared)\n\n\n@router.delete(\"/{username}\")\nasync def delete_user_sharing(\n data_id: int, username: str, user: models.user_pydantic = Depends(get_current_user)\n):\n \"\"\"Delete user from sharing the data!\"\"\"\n ts_data = await models.TimeseriesData.get(id=data_id).prefetch_related(\"user__user\")\n shared_data = await models.UserSharedTimeSeriesData.get(\n timeseriesdata=ts_data, user=await models.Users.get(username=username)\n )\n if ts_data.user.username == user.username or user.username == username:\n await shared_data.delete()\n return\n\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Unauthorized!\"\n )\n","repo_name":"NREL/EVOLVE","sub_path":"api/routes/timeseries_data_sharing_routes.py","file_name":"timeseries_data_sharing_routes.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"32722518298","text":"# импортируем библиотеки\nfrom os import path\nfrom pygame import *\n\n# импортируем классы из других файлов\nfrom config import *\nfrom player import Player\nfrom camera import Camera, camera_configure\nfrom block import Block, BlockDie, BlockCarrot, Nora, BlockHeart\nfrom monster import Monster\nfrom button import Button\n\n\n# главный класс с самой игрой\nclass Game:\n # инициализация\n def __init__(self):\n\n self.screen = pygame.display.set_mode(WINDOW) # Создаем окошко\n pygame.display.set_caption(\"Crazy Rabbit\") # Пишем название в шапку\n self.backgrounds = [pygame.image.load('blocks/bg1.jpg'), pygame.image.load('blocks/bg2.jpg'),\n pygame.image.load('blocks/bg3.jpg')]\n pygame.mixer.music.load('Music/Jungle trail bpm90 C Example Background.wav') # мелодия игры\n pygame.mixer.music.set_volume(0.4) # громкость мелодии\n pygame.mixer.music.play(loops=-1) # зациклила мелодию\n\n # создаем уменьшенное изображение для отображения жизней\n player_img = pygame.image.load(path.join('img/r4-1.png')).convert()\n self.player_mini_img = pygame.transform.scale(player_img, (25, 19))\n self.player_mini_img.set_colorkey(Color(0, 0, 0)) # Black\n\n # координаты игрока\n self.playerX = 0\n self.playerY = 0\n\n self.all_objects = pygame.sprite.Group() # Все объекты\n self.blocks = [] # то, во что мы будем врезаться или опираться\n\n self.monsters = pygame.sprite.Group() # Все передвигающиеся объекты\n\n # текущий уровень\n self.current_level = 1\n self.level = []\n # загружаем уровень\n self.load_level(self.current_level)\n\n total_level_width = len(\n self.level[0]) * BLOCK_WIDTH # Создаем большой прямоугольник уровня. Это дляя камеры Высчитываем\n # фактическую ширину уровня\n total_level_height = len(self.level) * BLOCK_HEIGHT # высоту\n\n # инициализируем кролик��\n self.hero = Player(self.playerX, self.playerY) # начальная создаем героя по (x,y) координатам\n self.left = self.right = False # по умолчанию - стоим\n self.up = False\n\n # в список со всеми объектами добавляем кролика\n self.all_objects.add(self.hero)\n self.timer = pygame.time.Clock()\n\n # иниацилизируем камеру\n self.camera = Camera(camera_configure, total_level_width, total_level_height)\n self.running = True\n self.game_over = False\n self.waiting_win = False\n self.main_menu = True\n\n # метод загрузки уровня\n def load_level(self, num_level):\n # по переданому числу, загружаем нужный уровень\n # уровни хранятся в файлах\n level_file = open(f'levels/{num_level}.txt')\n line = \" \"\n self.level.clear()\n # работа с файлом уровня\n while line[0] != \"/\": # пока не нашли символ завершения файла\n line = level_file.readline() # считываем построчно\n if line[0] == \"[\": # если нашли символ начала уровня\n while line[0] != \"]\": # то, пока не нашли символ конца уровня\n line = level_file.readline() # считываем построчно уровень\n if line[0] != \"]\": # и если нет символа конца уровня\n endLine = line.find(\"|\") # то ищем символ конца строки\n self.level.append(line[0: endLine]) # и добавляем в уровень строку от начала до символа \"|\"\n\n if line[0] != \"\": # если строка не пустая\n commands = line.split() # разбиваем ее на отдельные команды\n if len(commands) > 1: # если количество команд > 1, то ищем эти команды\n\n if commands[0] == \"player\": # если первая команда - player\n self.playerX = int(commands[1]) # то записываем координаты героя\n self.playerY = int(commands[2])\n\n if commands[0] == \"monster\": # если первая команда monster, то создаем монстра\n mn = Monster(int(commands[1]), int(commands[2]), int(commands[3]), int(commands[4]),\n int(commands[5]), int(commands[6]))\n self.all_objects.add(mn)\n self.blocks.append(mn)\n self.monsters.add(mn)\n\n # метод отрисовки уровня\n def draw_level(self):\n x = y = 0 # координаты\n for row in self.level: # вся строка\n for col in row: # каждый символ\n # блоки\n if col == \"-\":\n pf = Block(x, y)\n self.all_objects.add(pf)\n self.blocks.append(pf)\n # шипы\n if col == \"d\":\n bd = BlockDie(x, y)\n self.all_objects.add(bd)\n self.blocks.append(bd)\n # нора\n if col == 'N':\n bn = Nora(x, y)\n self.all_objects.add(bn)\n self.blocks.append(bn)\n # морковка\n if col == \"c\":\n bc = BlockCarrot(x, y)\n self.all_objects.add(bc)\n self.blocks.append(bc)\n # сердце, прибавляющее дополнительную жизнь\n if col == \"h\":\n bc = BlockHeart(x, y)\n self.all_objects.add(bc)\n self.blocks.append(bc)\n\n x += BLOCK_WIDTH # блоки платформы ставятся на ширине блоков\n y += BLOCK_HEIGHT # то же самое и с высотой\n x = 0 # на каждой новой строчке начинаем с нуля\n\n # метод перезапуска уровня\n # (запуск при смерти)\n def restart_level(self):\n pygame.mixer.music.unpause()\n self.up = False\n self.left = self.right = False\n self.blocks.clear()\n self.all_objects.empty()\n self.all_objects.add(self.hero)\n self.load_level(self.current_level)\n self.draw_level()\n self.hero.died = False\n self.hero.teleporting(self.hero.startX, self.hero.startY)\n\n # метод переключения уровня\n def next_level(self):\n self.current_level += 1\n # если пройден 3 уровень, то возвращаем на 1\n if self.current_level > MAX_LEVEL:\n self.current_level = 1\n self.waiting_win = True\n self.show_winning()\n self.restart_level()\n\n # метод перезапуска игры\n # (запуск при потере всех жизней)\n def reset_game(self):\n self.game_over = True\n # возврат на 1 уровень\n self.current_level = 1\n self.restart_level()\n self.hero.restore_lives()\n\n # обработка событий\n def listen_event(self):\n for event in pygame.event.get(): # Обрабатываем события кнопок\n if event.type == QUIT:\n self.running = False\n # при проигранной игре (потере всех жизней)\n if event.type == GAME_OVER_EVENT:\n self.reset_game()\n # при смерти\n if event.type == DIE_EVENT:\n self.restart_level()\n # при победе\n if event.type == WIN_EVENT:\n self.next_level()\n # обработка кнопок\n if event.type == KEYDOWN and event.key == K_UP:\n self.up = True\n if event.type == KEYDOWN and event.key == K_LEFT:\n self.left = True\n if event.type == KEYDOWN and event.key == K_RIGHT:\n self.right = True\n\n if event.type == KEYUP and event.key == K_UP:\n self.up = False\n if event.type == KEYUP and event.key == K_RIGHT:\n self.right = False\n if event.type == KEYUP and event.key == K_LEFT:\n self.left = False\n\n # метод для обработки кадра\n def tick(self):\n self.timer.tick(60)\n self.screen.blit(self.backgrounds[self.current_level - 1], (0, 0)) # Каждую итерацию необходимо всё перерисовывать\n\n self.camera.update(self.hero) # центризируем камеру относительно персонажа\n self.monsters.update(self.blocks) # обновляем переносим блоки\n self.hero.update(self.left, self.right, self.up, self.blocks) # передвижение\n # all_objects.draw(screen) # отображение\n for event in self.all_objects: #меньший прямоугольник равны размеру окна цетрирующийся относительно главного героя и\n # где прорисовываются все объекты\n self.screen.blit(event.image, self.camera.apply(event)) # Каждую итерацию необходимо всё перерисовывать под положение\n # камеры относительно главного героя\n self.draw_lives(80, 10, self.hero.lives, self.player_mini_img) # прорисовка колличства жизни кроликами\n\n # счетчик\n def counter(self):\n # работа со счетчиком\n f1 = pygame.font.Font(None, 36)\n text1 = f1.render(str(self.hero.n_carrot), True, (255, 255, 255))\n self.screen.blit(text1, (10, 10))\n # картинка морковки рядом с счетчиком\n img = pygame.image.load(\"blocks/carrot.png\")\n self.screen.blit(img, (27, 0))\n\n # заставка при проигрыше\n def show_go_screen(self):\n # ставим музыку на паузу\n pygame.mixer.music.pause()\n # поле game over\n background = pygame.image.load('game over/game over3 500-800.jpg.png')\n self.screen.blit(background, (0, 0))\n pygame.display.flip()\n\n while self.game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_over = False\n self.running = False\n if event.type == pygame.KEYDOWN:\n self.game_over = False\n self.main_menu = True\n\n pygame.event.clear()\n self.restart_level()\n\n # заставка при выигрыше\n def show_winning(self):\n # ставим музыку на паузу\n pygame.mixer.music.pause()\n # win/continue\n bg = pygame.image.load('you win/заставка800-500.png')\n self.screen.blit(bg, (0, 0))\n pygame.display.flip()\n\n while self.waiting_win:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.waiting_win = False\n self.running = False\n if event.type == pygame.KEYDOWN:\n self.waiting_win = False\n self.main_menu = True\n\n pygame.event.clear()\n self.restart_level()\n self.hero.lives = 3\n\n # отрисовка жизней (мини-кроликов в углу экрана)\n def draw_lives(self, x, y, lives, img):\n for i in range(lives):\n img_rect = img.get_rect()\n img_rect.x = x + 30 * i\n img_rect.y = y\n self.screen.blit(img, img_rect)\n\n # заставка главного меню\n def show_main_menu(self):\n # ставим музыку на паузу\n pygame.mixer.music.pause()\n # start menu\n bg = pygame.image.load('menu/main_menu1.jpg')\n self.screen.blit(bg, (0, 0))\n\n mouse_handlers = []\n buttons = []\n # отрисовка кнопок\n for i, (text, click_handler) in enumerate((('PLAY', self.play), ('QUIT', self.exit))):\n b = Button(MENU_OFFSET_X,\n MENU_OFFSET_Y + (MENU_BUTTON_H + 15) * i,\n MENU_BUTTON_W,\n MENU_BUTTON_H,\n text,\n click_handler,\n padding=15)\n b.draw(self.screen)\n mouse_handlers.append(b.handle_mouse_event)\n buttons.append(b)\n\n pygame.display.flip()\n\n # отслеживание событий мыши\n while self.main_menu:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.main_menu = False\n self.running = False\n if event.type in (pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION):\n for handler in mouse_handlers:\n handler(event.type, event.pos)\n\n for b in buttons:\n b.draw(self.screen)\n\n pygame.display.update()\n self.timer.tick(60)\n\n # выход из игры\n def exit(self, button):\n self.main_menu = False\n self.running = False\n\n # запуск игры\n def play(self, button):\n self.main_menu = False\n pygame.mixer.music.unpause()\n\n # Основной цикл программы\n def run(self):\n while self.running:\n # если мы в главном меню\n if self.main_menu:\n self.show_main_menu()\n\n # если игра закончилась\n if self.game_over:\n self.show_go_screen()\n\n # подключаем функцию ожидания для этого состояния или крестик и выход или перезапуск\n if self.waiting_win:\n self.show_winning()\n\n self.listen_event()\n self.tick()\n self.counter()\n pygame.display.update() # обновление и вывод всех изменений на экранн\n\n\ndef main():\n pygame.init() # Инициация PyGame, обязательная строчка\n # инициализация игры\n game = Game()\n # отрисовываем уровень\n game.draw_level()\n # запускаем игру\n game.run()\n\n\nif __name__ == '__main__':\n main()","repo_name":"makssimkk/The-Rabbit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15706,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"42100646468","text":"import json\n\nfrom actstream.feeds import UserActivityMixin, AbstractActivityStream\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.syndication.views import Feed\nfrom django.http import HttpResponse\nfrom django.utils.encoding import force_str\nfrom django.utils.feedgenerator import rfc3339_date\nfrom django.views.generic import View\n\n\nclass AbstractActivityStream_W3C2(AbstractActivityStream):\n\n def format(self, action):\n \"\"\"\n Returns a formatted dictionary for the given action.\n \"\"\"\n item = {\n '@context': 'https://www.w3.org/ns/activitystreams',\n 'summary': str(action),\n 'type': str(action.description),\n 'id': self.get_uri(action),\n 'url': self.get_url(action),\n 'verb': action.verb,\n 'published': rfc3339_date(action.timestamp),\n 'actor': self.format_actor(action),\n\n }\n if action.target:\n item['target'] = self.format_target(action)\n if action.action_object:\n item['object'] = self.format_action_object(action)\n return item\n\n def format_item(self, action, item_type='actor'):\n \"\"\"\n Returns a formatted dictionary for an individual item based on the action and item_type.\n \"\"\"\n obj = getattr(action, item_type)\n return {\n 'id': self.get_uri(action, obj),\n 'url': self.get_url(action, obj),\n 'type': ContentType.objects.get_for_model(obj).name,\n 'name': str(obj)\n }\n\n\nclass ActivityStreamsBaseFeed_V2(AbstractActivityStream_W3C2, Feed):\n\n def feed_extra_kwargs(self, obj):\n \"\"\"\n Returns an extra keyword arguments dictionary that is used when\n initializing the feed generator.\n \"\"\"\n return {}\n\n def item_extra_kwargs(self, action):\n \"\"\"\n Returns an extra keyword arguments dictionary that is used with\n the `add_item` call of the feed generator.\n Add the 'content' field of the 'Entry' item, to be used by the custom\n feed generator.\n \"\"\"\n item = self.format(action)\n item.pop('title', None)\n item['uri'] = item.pop('url')\n item['activity:verb'] = item.pop('verb')\n return item\n\n def format_item(self, action, item_type='actor'):\n name = item_type == 'actor' and 'name' or 'title'\n item = super(ActivityStreamsBaseFeed, self).format_item(action, item_type)\n item[name] = item.pop('displayName')\n item['activity:object-type'] = item.pop('objectType')\n item.pop('url')\n return item\n\n def item_link(self, action):\n return self.get_url(action)\n\n def item_description(self, action):\n if action.description:\n return force_str(action.description)\n\n def items(self, obj):\n return self.get_stream()(obj)[:30]\n\n\n\nclass JSONActivityFeed_V2(AbstractActivityStream_W3C2, View):\n \"\"\"\n Feed that generates feeds compatible with the v1.0 JSON Activity Stream spec\n \"\"\"\n def dispatch(self, request, *args, **kwargs):\n return HttpResponse(self.serialize(request, *args, **kwargs),\n content_type='application/json')\n\n def serialize(self, request, *args, **kwargs):\n items = self.items(request, *args, **kwargs)\n return json.dumps({\n 'totalItems': len(items),\n 'items': [self.format(action) for action in items]\n }, indent=4 if 'pretty' in request.GET or 'pretty' in request.POST else None)\n\n\n\n# Main Class Where Activity Feed URL Use\nclass UserJSONActivityFeed_V2(UserActivityMixin, JSONActivityFeed_V2):\n \"\"\"\n JSON feed of Activity for a given user (where actions are those that the given user follows).\n \"\"\"\n pass\n\n","repo_name":"bengibaykal/swe574group1","sub_path":"cityapp/community/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"13318122435","text":"from django.db.models import Count, Sum, Avg\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.http import *\n# Create your views here.\n\n#this is index view, it just send the data from the db to the html template to show all the sites list\ndef index(request):\n try:\n pk = sitemodel.objects.all().annotate(a=Count('valuesmodel__a'), b=Count('valuesmodel__b'))\n return render(request,'index.html',{'data':list(pk),'allow':True})\n except Exception as e:\n raise HttpResponseServerError(e)\n\n# this view fucn is called when the data user click on any specific site to see the data\ndef sitepage(request, id=0):\n try:\n if id == 0:\n raise Http404(\"Not Found\")\n else:\n site = sitemodel.objects.filter(id=id)[:1]\n value = valuesmodel.objects.filter(site=site)\n return render(request, 'site.html', {'data':list(value),'sitename':site[0].name,'allow':True})\n except Exception as e:\n raise HttpResponseServerError(e)\n\ndef sumsummary(request):\n try:\n sites=sitemodel.objects.all()\n values=valuesmodel.objects.all()\n kk=list()\n for i in sites:\n a=0.0\n b=0.0\n for k in values:\n if k.site==i:\n a=a+float(k.a)\n b=b+float(k.b)\n\n kk.append({'name':i.name,'a':a,'b':b,'id':i.id})\n\n return render (request,'summary.html',{'data':kk,'pagename':'Sum','allow':False,'sum':True})\n except Exception as e:\n raise HttpResponseServerError(e)\n\ndef avgsummary(request):\n try:\n kk = list()\n pk = sitemodel.objects.all().annotate(a=Avg('valuesmodel__a'), b=Avg('valuesmodel__b'))\n\n for i in pk:\n kk.append({'name': i.name, 'a': i.a, 'b': i.b, 'id': i.id})\n\n return render(request, 'summary.html', {'data': kk, 'pagename': 'Average','allow':False,'sum':False})\n except Exception as e:\n raise HttpResponseServerError(e)\n","repo_name":"aakashq/3mwStack","sub_path":"mw3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8056837516","text":"\"\"\"\nGiven an integer n, return any array containing n unique integers such that they add up to 0.\n\nExample 1:\n\nInput: n = 5\nOutput: [-7,-1,1,3,4]\nExplanation: These arrays also are accepted [-5,-1,1,2,3] , [-3,-1,2,-2,4].\nExample 2:\n\nInput: n = 3\nOutput: [-1,0,1]\nExample 3:\n\nInput: n = 1\nOutput: [0]\n\"\"\"\nfrom typing import List\n\n\ndef sumZero(n: int) -> List[int]:\n result = []\n\n if n % 2 != 0:\n result.append(0)\n\n for i in range(1, n):\n if len(result) == n:\n break\n result.append(i)\n result.append(-i)\n\n return result\n\n\nif __name__ == \"__main__\":\n n = 5\n func_call = sumZero(n)\n expected_result = [0, 1, -1, 2, -2]\n assert expected_result == func_call, func_call\n print(func_call)\n\n n = 3\n func_call = sumZero(n)\n expected_result = [0, 1, -1]\n assert expected_result == func_call, func_call\n print(func_call)\n\n n = 1\n func_call = sumZero(n)\n expected_result = [0]\n assert expected_result == func_call, func_call\n print(func_call)\n\n\n","repo_name":"MD-ARMAN-Shanto/Problem-Solving","sub_path":"List/unique_integer_sum_upto_zero.py","file_name":"unique_integer_sum_upto_zero.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8180564060","text":"from template_test import template_auto\nfrom tools.handle_xls import handle_xls\nfrom configs.config import config\nfrom data.data_temp import list_data\n\n\"\"\"\n该方法用于生成测试脚本,自动读取/data/data_temp.py中的测试用例和数据,\n自动完成测试脚本生成;\n\"\"\"\n\n\nclass generate_cases:\n def __init__(self):\n self.temp = template_auto()\n\n def generate_class(self, title, class_name):\n self.temp.template_class(title=title, class_name=class_name)\n\n def generate_def(self, title, func_name):\n self.temp.template_def(story_title=title, function_name=func_name)\n\n def generate_click(self,title, index_case, index_step, method=''):\n self.temp.template_click(title=title,index_case=index_case,index_step=index_step,method=method)\n\n def generate_sendkey(self, title, index_case, index_step, method=''):\n self.temp.template_sendKey(title=title,index_case=index_case,index_step=index_step,method=method)\n\n def generate_panduan(self, title, index_case, index_step):\n self.temp.template_panduan(title=title,index_case=index_case,index_step=index_step)\n\n def generate_screen(self,title):\n self.temp.template_screen(title=title)\n\n def get_data(self, case_name, steps, methods, index_case, tem_keyword=config.tem_keyword):\n self.generate_class(title=case_name, class_name=case_name)\n self.generate_def(title=case_name, func_name=case_name)\n for i in range(len(steps)):\n # 输入\n if tem_keyword[0] in steps[i]:\n if methods[i] != 'null':\n self.generate_sendkey(title=steps[i],\n index_case=index_case,\n index_step=i,\n method=methods[i])\n else:\n self.generate_sendkey(title=steps[i],\n index_case=index_case,\n index_step=i)\n # 判断\n if tem_keyword[1] in steps[i]:\n self.generate_panduan(title=steps[i],\n index_case=index_case,\n index_step=i)\n #点击\n if tem_keyword[2] in steps[i]:\n if methods[i] != 'null':\n self.generate_click(title=steps[i],\n index_case=index_case,\n index_step=i,\n method=methods[i])\n else:\n self.generate_click(title=steps[i],\n index_case=index_case,\n index_step=i)\n # 截图\n if tem_keyword[3] in steps[i]:\n self.generate_screen(title=steps[i])\n return self.temp.import_code + self.temp.basic_code\n\n def generate_running(self):\n for i in range(len(list_data)):\n code = self.get_data(case_name=list_data[i][config.dict_key[1]],\n steps=list_data[i][config.dict_key[2]],\n methods=list_data[i][config.dict_key[3]],\n index_case=i)\n with open(config.generate_path + list_data[i][config.dict_key[0]] + '_' + list_data[i][config.dict_key[1]] + '.py', 'w', encoding='utf-8') as w:\n w.write(code)\n w.close()\n self.temp.template_basic_clear()\n\n\nif __name__ == '__main__':\n handle_xls().handle_xls()\n generate_cases().generate_running()\n\n","repo_name":"Justin-Hi/Autotest_Test","sub_path":"tools/generate_case.py","file_name":"generate_case.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15171841871","text":"import pprint\n\ndef countProfit(shippers):\n listBarang = [\n ['Sepatu Stacattu', 1500000, 10],\n ['Baju Zoro', 500000, 2],\n ['Sweater Uniklooh', 175000, 1]\n ]\n output = []\n\n a = {'product': 'Sepatu Stacattu'}\n asep = {'shoppers': []}\n leftsep = {'leftOver': 10}\n totalprofsep = {'totalProfit': 0}\n\n b={'product': 'Baju Zoro'}\n abaj = {'shoppers': []}\n leftbaj = {'leftOver': 2}\n totalprofbaj = {'totalProfit': 0}\n\n c={'product': 'Sweater Uniklooh'}\n aswe = {'shoppers': []}\n leftswe = {'leftOver': 1}\n totalprofswe = {'totalProfit': 0}\n\n for i in range(len(shippers)):\n if shippers[i]['product'] == 'Sepatu Stacattu':\n if shippers[i]['amount'] <= leftsep['leftOver'] :\n asep['shoppers'].append(shippers[i]['name'])\n leftsep['leftOver']=leftsep['leftOver']-shippers[i]['amount']\n totalprofsep['totalProfit']=totalprofsep['totalProfit']+(listBarang[0][1]*shippers[i]['amount'])\n elif shippers[i]['product'] == 'Baju Zoro':\n if shippers[i]['amount'] <= leftbaj['leftOver'] :\n abaj['shoppers'].append(shippers[i]['name'])\n leftbaj['leftOver']=leftbaj['leftOver']-shippers[i]['amount']\n totalprofbaj['totalProfit']=totalprofbaj['totalProfit']+(listBarang[1][1]*shippers[i]['amount'])\n elif shippers[i]['product'] == 'Sweater Uniklooh':\n if shippers[i]['amount'] <= leftswe['leftOver'] :\n aswe['shoppers'].append(shippers[i]['name'])\n leftswe['leftOver']=leftswe['leftOver']-shippers[i]['amount']\n totalprofswe['totalProfit']=totalprofswe['totalProfit']+(listBarang[2][1]*shippers[i]['amount'])\n else :\n continue\n a['shoppers'] = asep['shoppers']\n a['leftOver'] = leftsep['leftOver']\n a['totalProfit'] = totalprofsep['totalProfit']\n output.append(a)\n\n b['shoppers'] = abaj['shoppers']\n b['leftOver'] = leftbaj['leftOver']\n b['totalProfit'] = totalprofbaj['totalProfit']\n output.append(b)\n\n c['shoppers'] = aswe['shoppers']\n c['leftOver'] = leftswe['leftOver']\n c['totalProfit'] = totalprofswe['totalProfit']\n output.append(c)\n return output\n\npprint.pprint(countProfit([\n {'name': 'Windi', 'product': 'Sepatu Stacattu', 'amount': 8},\n {'name': 'Vanessa', 'product': 'Sepatu Stacattu', 'amount': 10},\n {'name': 'Rani', 'product': 'Sweater Uniklooh', 'amount': 1},\n {'name': 'Devi', 'product': 'Baju Zoro', 'amount': 1},\n {'name': 'Lisa', 'product': 'Baju Zoro', 'amount': 1}\n]))\n\n# print(countProfit([{'name': 'Windi', 'product': 'Sepatu Naiki', 'amount': 5}]))\n","repo_name":"hamdiranu/cobarepo","sub_path":"Struktur Data/Problem 1/5 - Toko X.py","file_name":"5 - Toko X.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7331955222","text":"\"\"\"\nDatos de entrada\n\nDistancia recorrida en kilometros = x = int\n\n\nDatos de salida\n\nPago que debe realizar el cliente = pago_cliente = int\n\n\n\"\"\"\n# Entradas\nx=int(input( \"Inserte la distancia recorrida por usten en kilometros : \"))\n\n\n# Caja Negra \nd=(x-1000)# representa los kilometros adicionales que se cobraran despues de los 1000 km \n\nPrecio=''\nif (x<= 300):\n precio= 50_000\nelif(x>300 and x<1000):\n precio= (70_000+(30000*(x-300))) # se cobra 30000 demas por cada km superior a 300 km\nelif(x>1000):\n precio= ((150_000+(9000*(d)))+(((x-d)-300)*30000)) # Se cobra 9000 por cada km adicional despues de los 1000 km /se cobra 30000 demas por cada km superior a 300 km\n\n# Salidas \nprint(f\" El Pago que debe realizar el cliente es de ${precio}\")\n\n","repo_name":"Mariajosedibo19/Talleres_de_Algoritmos","sub_path":"Taller_Estructuras_de _Control_Selectivas/Ejercicio_7.py","file_name":"Ejercicio_7.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5448392936","text":"from urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\nfrom fpdf import FPDF\nimport re\n\n#for browser run\nimport subprocess \nimport webbrowser\nimport sys\nimport urllib # for url encoding\n\n# Needed:\n# BeautifulSoup\n# pip install urlopen\n# pip install lxml\n# pip install fpdf\n# DejaVuSansCondensed.ttf font file in program directory\n\n# Working 29.09.2019\n# The program may need modifications if the page changes\n\n#to do\n# fix bug if only one category page\n# maybe a table view?\n\n# add proxy\n# multithreading (not necessary becouse it can look like flood)\n\n\n#Settings\n# products per page 15/30/45/60 (recommended 60 for best performance)\nsett_productsPerPage = 60\n# start page number (recommended start from 0),\nsett_startPageNumber = 0\n# category eg. Muzyka = 33, Kolekcje własne = 46, Gry planszowe = 376301, 0 = no category\nsett_category = 376301\n# use to limit results\nlimitResults = 500\n\n\n#data containier\ndataList = []\n\n#prepare PDF\npdf = FPDF()\npdf.add_page()\n\n#DejaVu Unicode font (uses UTF-8)\npdf.add_font('DejaVu', '', 'DejaVuSansCondensed.ttf', uni=True)\npdf.set_font('DejaVu', '', 10)\n\n\n#open top products in browser function\ndef openBest(data, number):\n for i in range(0, number):\n url = \"https://www.empik.com/szukaj/produkt?q=\"\n url += data[i][0]\n url += \"&qtype=basicForm&ac=true\"\n print(url)\n \n if sys.platform == 'darwin': # in case of OS X\n subprocess.Popen(['open', url])\n else:\n webbrowser.open_new_tab(url)\n\n\n#search url constructor\ndef searchUrl(data):\n url = \"https://www.empik.com/szukaj/produkt?q=\"\n url += urllib.parse.quote_plus(data, safe='', encoding='utf-8', errors=None)\n url += \"&qtype=basicForm&ac=true\"\n return url\n \n \n#url constructor\ndef urlBuilder( productPerPage, pageNumber, Category ):\n url = \"https://www.empik.com/promocje?\"\n\n if Category != 0:\n url += \"searchCategory=\" + str(Category)\n\n url += \"&hideUnavailable=true\"\n url += \"&start=\" + str((pageNumber*productPerPage)+1)\n url += \"&resultsPP=\" + str(productPerPage)\n url += \"&qtype=facetForm\"\n return url\n\n# url\nsett_url = urlBuilder(sett_productsPerPage, sett_startPageNumber, sett_category)\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}\nreg_url = sett_url\nreq = Request(url=reg_url, headers=headers) \nhtml = urlopen(req)\nbsObj = BeautifulSoup(html.read(), \"lxml\");\n\n#counting pages\npageNumbers = bsObj.find(\"div\", \"pagination\").findAll(\"a\")\naTagNumber = len(pageNumbers)-2\n\npriceList = bsObj.findAll(\"div\", \"price ta-price-tile\")\n#titleList = bsObj.findAll(\"strong\", \"ta-product-title\")\n\nextractedPagesAmount = int(pageNumbers[aTagNumber].get_text())\nproductsOnPage = len(priceList)\nprint(\"First link: \", sett_url)\nprint(\"Category number: \", sett_category)\nprint(\"Number of pages: \", extractedPagesAmount)\nprint(\"Number of products of each page: \", productsOnPage)\nprint(\"Approximate number of expected products: ~\", extractedPagesAmount*productsOnPage)\n\n#---------------------\nprint(\"Fetching \", extractedPagesAmount, \" pages of data\")\n\ncount = 0\nfor j in range(0, extractedPagesAmount):\n \n reg_url = urlBuilder(sett_productsPerPage, j, sett_category)\n req = Request(url=reg_url, headers=headers) \n html = urlopen(req)\n bsObj = BeautifulSoup(html.read(), \"lxml\");\n\n priceList = bsObj.findAll(\"div\", \"price ta-price-tile\")\n titleList = bsObj.findAll(\"strong\", \"ta-product-title\")\n\n productsOnPage = len(priceList)\n \n # singe item in loop\n for i in range(0, productsOnPage):\n extractedPrice = priceList[i].get_text()\n extractedTitle = (titleList[i].get_text()).strip()\n count += 1\n\n prices = re.findall(\"\\d+\\,\\d+\", extractedPrice)\n \n prices[0] = float(prices[0].replace(',','.'))\n\n if len(prices) == 2:\n prices[1] = float(prices[1].replace(',','.'))\n percentdiff = round(((prices[1]*100)/prices[0])-100,2)\n data = [extractedTitle, percentdiff, prices[0], prices[1]]\n else:\n # when no second price\n percentdiff = 0\n data = [extractedTitle, percentdiff, prices[0], prices[0]]\n\n dataList.append(data)\n\n #to Debug\n #print(\"-\", percentdiff, end=\"% \")\n #print(prices, end=\" \")\n #print(extractedTitle)\n #---------------------\n\n print(\"[\",j+1,\"/\",extractedPagesAmount, \"] - page done\")\n\n\n#display collected data\n\n# function to return the second element \ndef returnSecondElement(val): \n return val[1] \n\nlimitCount = 0\ndataList.sort(key = returnSecondElement, reverse = True) #sort\n\nprint(\"Listing\")\nprint(\"Order: price diff [%], old price [PLN], new price [PLN], product\")\n\npdf.cell(0, 10, txt=\"Results\", ln=1, align=\"C\")\npdf.cell(0, 10, txt=\"Order: price diff [%], old price [PLN], new price [PLN], product\", ln=1, align=\"L\")\n\nfor i in dataList:\n limitCount += 1\n\n text = str(i[1]) + \"%\\t\" + str(i[2]) + \"\\t\" + str(i[3]) + \"\\t\" + str(i[0])\n print(text)\n \n # can't use \"\\t\" in PDF\n pdfText = str(i[1]) + \"% \" + str(i[2]) + \" \" + str(i[3]) + \" \" + i[0]\n\n \n pdf.cell(0, 10, txt=pdfText, ln=1, align=\"L\", fill=False, link=searchUrl(i[0]))\n\n \n if limitCount >= limitResults:\n break;\n\nprint(\"Total scanned products: \", count)\nprint(\"Listed only: \", limitResults, \" best of % difference.\")\nprint(\"Please generate PDF file to see whole list.\")\n\nprint(\"Do you want to generate PDF file? [yes/no]\")\nchoice = input()\n\nif choice == \"yes\":\n pdf.output(\"output.pdf\")\nelse:\n print(\"Thanks for using\")\n\nprint(\"Do you want to see best products in browser? [yes/no]\")\nchoice = input()\n\nif choice == \"yes\":\n howManyOpen = input(\"How many? (number): \")\n howManyOpen = int(howManyOpen)\n openBest(dataList, howManyOpen)\nelse:\n print(\"Thanks for using\")\n\n\n","repo_name":"0xuser/empikWebScraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28563127151","text":"import re\nimport os\nimport time\nimport tornado.gen\nimport gramex.data\nimport sqlalchemy as sa\nfrom string import ascii_lowercase, digits\nfrom random import choice\nfrom mimetypes import guess_type\nfrom tornado.web import HTTPError\nfrom gramex.config import objectpath\nfrom gramex.http import NOT_FOUND, REQUEST_ENTITY_TOO_LARGE, UNSUPPORTED_MEDIA_TYPE\nfrom .formhandler import FormHandler\n\n\nclass DriveHandler(FormHandler):\n '''\n Lets users manage files. Here's a typical configuration::\n\n path: $GRAMEXDATA/apps/appname/ # Save files here\n user_fields: [id, role, hd] # user attributes to store\n tags: [tag] # to store\n allow: [.doc, .docx] # Only allow these files\n ignore: [.pdf] # Don't allow these files\n max_file_size: 100000 # Files must be smaller than this\n redirect: # After uploading the file,\n query: next # ... redirect to ?next=\n url: /$YAMLURL/ # ... else to this directory\n\n File metadata is stored in /.meta.db as SQLite\n '''\n @classmethod\n def setup(cls, path, user_fields=None, tags=None, allow=None, ignore=None, max_file_size=None,\n **kwargs):\n cls.path = path\n cls.user_fields = cls._ensure_type('user_fields', user_fields)\n cls.tags = cls._ensure_type('tags', tags)\n cls.allow = allow or []\n cls.ignore = ignore or []\n cls.max_file_size = max_file_size or 0\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n\n # Set up the parent FormHandler with a single SQLite URL and table\n url, table = 'sqlite:///' + os.path.join(path, '.meta.db'), 'drive'\n kwargs.update(url=url, table=table, id='id')\n cls.special_keys += ['path', 'user_fields', 'tags', 'allow', 'ignore', 'max_file_size']\n super().setup(**kwargs)\n\n # Ensure all tags and user_fields are present in \"drive\" table\n engine = sa.create_engine(url)\n meta = sa.MetaData(bind=engine)\n meta.reflect()\n cls._db_cols = {\n 'id': sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),\n 'file': sa.Column('file', sa.Text), # Original file name\n 'ext': sa.Column('ext', sa.Text), # Original file extension\n 'path': sa.Column('path', sa.Text), # Saved file relative path\n 'size': sa.Column('size', sa.Integer), # File size\n 'mime': sa.Column('mime', sa.Text), # MIME type\n 'date': sa.Column('date', sa.Integer), # Uploaded date\n }\n for s in cls.user_fields:\n cls._db_cols['user_%s' % s] = sa.Column('user_%s' % s, sa.String)\n for s in cls.tags:\n cls._db_cols.setdefault(s, sa.Column(s, sa.String))\n if table in meta.tables:\n with engine.connect() as conn:\n with conn.begin():\n for col, coltype in cls._db_cols.items():\n if col not in meta.tables[table].columns:\n conn.execute('ALTER TABLE %s ADD COLUMN %s TEXT' % (table, col))\n else:\n sa.Table(table, meta, *cls._db_cols.values()).create(engine)\n\n # If ?_download=...&id=..., then download the file via modify:\n def download_plugin(data, key, handler):\n data = original_modify(data, key, handler)\n ids = handler.args.get('id', [])\n if len(ids) != 1 or '_download' not in handler.args:\n return data\n if len(data) == 0:\n raise HTTPError(NOT_FOUND, 'No file record with id=%s' % ids[0])\n path = os.path.join(handler.path, data['path'][0])\n if not os.path.exists(path):\n raise HTTPError(NOT_FOUND, 'Missing file for id=%s' % ids[0])\n handler.set_header('Content-Type', data['mime'][0])\n handler.set_header('Content-Length', os.stat(path).st_size)\n handler.set_header(\n 'Content-Disposition', 'attachment; filename=\"%s\"' % data['file'][0])\n with open(path, 'rb') as handle:\n return handle.read()\n\n original_modify = cls.datasets['data'].get('modify', lambda v, *args: v)\n cls.datasets['data']['modify'] = download_plugin\n\n def check_filelimits(self):\n allow = set(ext.lower() for ext in self.allow)\n ignore = set(ext.lower() for ext in self.ignore)\n for name, ext, size in zip(self.args['file'], self.args['ext'], self.args['size']):\n if self.max_file_size and size > self.max_file_size:\n raise HTTPError(REQUEST_ENTITY_TOO_LARGE, '%s: %d > %d' % (\n name, size, self.max_file_size))\n if ext in ignore or (allow and ext not in allow):\n raise HTTPError(UNSUPPORTED_MEDIA_TYPE, name)\n\n @tornado.gen.coroutine\n def post(self, *path_args, **path_kwargs):\n '''Saves uploaded files, then updates metadata DB'''\n user = self.current_user or {}\n uploads = self.request.files.get('file', [])\n n = len(uploads)\n # Initialize all DB columns (except ID) to have the same number of rows as uploads\n for key, col in list(self._db_cols.items())[1:]:\n self.args[key] = self.args.get(key, []) + [col.type.python_type()] * n\n for key in self.args:\n self.args[key] = self.args[key][:n]\n for i, upload in enumerate(uploads):\n file = os.path.basename(upload.get('filename', ''))\n ext = os.path.splitext(file)[1]\n path = re.sub(r'[^!#$%&()+,.0-9;<=>@A-Z\\[\\]^`a-z{}~]', '-', file)\n while os.path.exists(os.path.join(self.path, path)):\n path = os.path.splitext(path)[0] + choice(digits + ascii_lowercase) + ext\n self.args['file'][i] = file\n self.args['ext'][i] = ext.lower()\n self.args['path'][i] = path\n self.args['size'][i] = len(upload['body'])\n self.args['date'][i] = int(time.time())\n # Guess MIME type from filename if it's unknown\n self.args['mime'][i] = upload['content_type']\n if self.args['mime'][i] == 'application/unknown':\n self.args['mime'][i] = guess_type(file, strict=False)[0]\n # Append user attributes\n for s in self.user_fields:\n self.args['user_%s' % s.replace('.', '_')][i] = objectpath(user, s)\n self.check_filelimits()\n yield super().post(*path_args, **path_kwargs)\n for upload, path in zip(uploads, self.args['path']):\n with open(os.path.join(self.path, path), 'wb') as handle:\n handle.write(upload['body'])\n\n @tornado.gen.coroutine\n def delete(self, *path_args, **path_kwargs):\n '''Deletes files from metadata DB and from file system'''\n conf = self.datasets.data\n files = gramex.data.filter(conf.url, table=conf.table, args=self.args)\n result = yield super().delete(*path_args, **path_kwargs)\n for index, row in files.iterrows():\n path = os.path.join(self.path, row['path'])\n if os.path.exists(path):\n os.remove(path)\n return result\n\n @tornado.gen.coroutine\n def put(self, *path_args, **path_kwargs):\n '''Update attributes and files'''\n # PUT can update only 1 ID at a time. Use only the first upload, if any\n uploads = self.request.files.get('file', [])[:1]\n id = self.args.get('id', [-1])\n # User cannot change the path, size, date or user attributes\n for s in ('path', 'size', 'date'):\n self.args.pop(s, None)\n for s in self.user_fields:\n self.args.pop('user_%s' % s, None)\n # These are updated only when a file is uploaded\n if len(uploads):\n user = self.current_user or {}\n self.args.setdefault('size', []).append(len(uploads[0]['body']))\n self.args.setdefault('date', []).append(int(time.time()))\n for s in self.user_fields:\n self.args.setdefault('user_%s' % s.replace('.', '_'), []).append(\n objectpath(user, s))\n conf = self.datasets.data\n files = gramex.data.filter(conf.url, table=conf.table, args={'id': id})\n result = yield super().put(*path_args, **path_kwargs)\n if len(uploads) and len(files):\n path = os.path.join(self.path, files['path'].iloc[0])\n with open(path, 'wb') as handle:\n handle.write(uploads[0]['body'])\n return result\n\n @classmethod\n def _ensure_type(cls, field, values):\n if isinstance(values, dict):\n return values\n if isinstance(values, (list, tuple)):\n return {v: 'str' for v in values if v}\n if isinstance(values, str) and values:\n return {values: 'str'}\n if not values:\n return {}\n raise TypeError('%s: %s should be a dict, not %s' % (cls.name, field, values))\n","repo_name":"nehak0601/gramex","sub_path":"gramex/handlers/drivehandler.py","file_name":"drivehandler.py","file_ext":"py","file_size_in_byte":9196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"37889300749","text":"from django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .user import User\nfrom .validators import year_validator\n\n\nclass Category(models.Model):\n \"\"\"Модель категории произведения\"\"\"\n\n name = models.CharField(max_length=256)\n slug = models.SlugField(\n max_length=50,\n unique=True,\n db_index=True\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return f'{self.name[:15]}'\n\n\nclass Genre(models.Model):\n \"\"\"Модель жанра произведения\"\"\"\n\n name = models.CharField(max_length=256)\n slug = models.SlugField(\n max_length=50,\n unique=True,\n db_index=True\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Жанр'\n verbose_name_plural = 'Жанры'\n\n def __str__(self):\n return f'{self.name[:15]}'\n\n\nclass Title(models.Model):\n \"\"\"Модель произведений\"\"\"\n\n category = models.ForeignKey(\n Category,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name='titles',\n verbose_name='Категория'\n )\n description = models.TextField(\n 'Описание произведения',\n blank=True,\n null=True\n )\n genre = models.ManyToManyField(\n Genre,\n blank=True,\n related_name='titles',\n verbose_name='Жанр'\n )\n name = models.CharField(\n 'Название',\n max_length=256,\n db_index=True\n )\n year = models.IntegerField(\n 'Год',\n validators=(year_validator, )\n )\n\n class Meta:\n verbose_name = 'Произведение'\n verbose_name_plural = 'Произведения'\n ordering = ['name']\n\n def __str__(self):\n return self.name[:15]\n\n\nclass Review(models.Model):\n \"\"\"Модель для Отзывов\"\"\"\n\n text = models.TextField()\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='reviews',\n verbose_name='Автор'\n )\n title = models.ForeignKey(\n Title,\n on_delete=models.CASCADE,\n related_name='reviews',\n verbose_name='произведение'\n )\n score = models.IntegerField(\n 'Оценка',\n validators=[MinValueValidator(1), MaxValueValidator(10)]\n )\n pub_date = models.DateTimeField(\n 'Дата публикации отзыва',\n auto_now_add=True\n )\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Отзыв'\n verbose_name_plural = 'Отзывы'\n constraints = [\n models.UniqueConstraint(\n fields=['author', 'title'],\n name='unique_author_title'\n )\n ]\n\n\nclass Comment(models.Model):\n \"\"\"Модель для Комментариев\"\"\"\n\n review = models.ForeignKey(\n Review,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='Отзыв'\n )\n text = models.TextField()\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='author',\n verbose_name='Автор'\n )\n pub_date = models.DateTimeField(\n 'Дата публикации',\n auto_now_add=True\n )\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Комментарий'\n verbose_name_plural = 'Комментарии'\n\n def __str__(self):\n return self.text\n\n\nclass GenreTitle(models.Model):\n \"\"\"Модель ManyToMany для связи произведений и жанров\"\"\"\n\n genre = models.ForeignKey(\n Genre,\n on_delete=models.SET_NULL,\n null=True,\n related_name='genretitle',\n )\n title = models.ForeignKey(\n Title,\n on_delete=models.SET_NULL,\n null=True,\n related_name='titlegenre',\n )\n\n class Meta:\n models.UniqueConstraint(\n fields=['genre', 'title'],\n name='unique_follow'\n )\n","repo_name":"Promolife/api_yamdb","sub_path":"api_yamdb/reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25074479603","text":"# human_detection.py\n# ! please modify the path to your video.mp4\n# Import necessary modules\nimport sys\nimport cv2\nfrom numpy import ndarray, array\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QWidget, QLabel, QPushButton, QFrame,\n QHBoxLayout, QVBoxLayout)\nfrom PyQt5.QtGui import QPixmap, QImage\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\n\nstyle_sheet = \"\"\"\n QLabel#VideoLabel{\n color: darkgrey;\n border: 2px solid darkgrey;\n qproperty-alignment: AlignCenter;\n }\n\"\"\"\n\n\nclass VideoWorkerThread(QThread):\n \"\"\"Worker thread for capturing video.\n \"\"\"\n frame_data_updated = pyqtSignal(ndarray)\n\n def __init__(self, parent, video_file=None) -> None:\n print(f'\\n\\tVideoWorkerThread - init')\n super().__init__()\n self.parent = parent\n self.video_file = video_file\n\n def run(self):\n \"\"\"The code that we want to run in a separate thread,\n in this case capturing video using OpenCV, is placed in this function.\n run() is called automatically after start().\n \"\"\"\n print(f'\\n\\tVideoWorkerThread - run')\n self.capture = cv2.VideoCapture(self.video_file)\n\n while self.parent.thread_is_running:\n # Read frames from the camera\n ret_val, frame = self.capture.read()\n\n if not ret_val:\n print(f'\\n\\ttVideoWorkerThread - run: can not capture frame -> break')\n break\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # Resize an image for faster detection\n frame = cv2.resize(frame, (600, 400))\n rects = self.createHOGDescriptor(frame)\n\n # Draw the detections (rects) in the frame;\n # tr and br refer to the top-left and bottom-right corners of the detected rects,\n # respectively.\n for (x_tr, y_tr, x_br, y_br) in rects:\n frame = cv2.rectangle(\n frame, (x_tr, y_tr),\n (x_br, y_br),\n (0, 0, 255),\n 2)\n self.frame_data_updated.emit(frame)\n\n def createHOGDescriptor(self, frame):\n print(f'\\n\\tVideoWorkerThread - createHOGDescriptor')\n # Initialize OpenCV's HOG Descriptor and SVM classifier\n hog = cv2.HOGDescriptor()\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n\n # Detect people in the image and return the bounding rectangles.\n # Altering the parameters in detect MultiScale() can affect the accuracy of detections.\n # winStride refers to the number of steps the sliding window moves in the x and y directions;\n # the sliding window is padded to improve accuracy;\n # a smaller scale value will increase detection accuracy,\n # but also increase processing time\n rects, weights = hog.detectMultiScale(\n frame, winStride=(4, 4), padding=(8, 8), scale=1.1)\n\n # For each of the rects detected in an image,\n # add the values for the corners of the rect to an array\n rects = array([[x, y, x + width, y + height]\n for (x, y, width, height) in rects])\n return rects\n\n def stopThread(self):\n print(f'\\n\\tVideoWorkerThread - stopThread')\n self.wait()\n QApplication.processEvents()\n\n\nclass DisplayVideo(QMainWindow):\n def __init__(self) -> None:\n print(f'\\ninit DisplayVideo')\n super().__init__()\n self.initializeUI()\n\n def initializeUI(self):\n print(f'\\ninitializeUI')\n self.setMinimumSize(800, 500)\n self.setWindowTitle('5.2 - Human Detection GUI')\n\n self.thread_is_running = False\n\n self.setupWindow()\n self.show()\n\n def setupWindow(self):\n print(f'\\nsetupWindow')\n self.video_display_label = QLabel()\n self.video_display_label.setObjectName(\"VideoLabel\")\n\n self.start_button = QPushButton('Start Video')\n self.start_button.clicked.connect(self.startVideo)\n\n stop_button = QPushButton('Stop Video')\n stop_button.clicked.connect(self.stopCurrentVideo)\n\n # Create horizontal and vertical layouts\n side_panel_v_box = QVBoxLayout()\n side_panel_v_box.setAlignment(Qt.AlignTop)\n side_panel_v_box.addWidget(self.start_button)\n side_panel_v_box.addWidget(stop_button)\n\n side_panel_frame = QFrame()\n side_panel_frame.setMinimumWidth(200)\n side_panel_frame.setLayout(side_panel_v_box)\n\n main_h_box = QHBoxLayout()\n main_h_box.addWidget(self.video_display_label, 1)\n main_h_box.addWidget(side_panel_frame)\n\n # Create container widget and set main window's widget\n container = QWidget()\n container.setLayout(main_h_box)\n self.setCentralWidget(container)\n\n def setupMenu(self):\n print(f'\\nsetupMenu')\n pass\n\n def startVideo(self):\n print(f'\\nstartVideo')\n self.thread_is_running = True\n self.start_button.setEnabled(False)\n self.start_button.repaint()\n\n # Create an instance of the worker thread using a local video file\n video_file = \"prac/Chapter5/video.mp4\" # ! please modify this\n self.video_thread_worker = VideoWorkerThread(\n parent=self, video_file=video_file)\n\n # Connect to the thread's signal to update the frames in the video_display_label\n self.video_thread_worker.frame_data_updated.connect(\n self.updateVideoFrames)\n self.video_thread_worker.start()\n\n def stopCurrentVideo(self):\n print(f'\\nstopCurrentVideo')\n if self.thread_is_running:\n self.thread_is_running = False\n self.video_thread_worker.stopThread()\n\n self.video_display_label.clear()\n self.start_button.setEnabled(True)\n\n def updateVideoFrames(self, video_frame):\n print(f'\\nupdateVideoFrames')\n # Get the shape of the frame, height * width * channels\n height, width, channels = video_frame.shape\n\n # Number of bytes required by the image pixels in a row; dependency on the number of channels\n bytes_per_line = width * channels\n\n # Create instance of QImage using data from the video file\n converted_Qt_image = QImage(\n video_frame, width, height, bytes_per_line, QImage.Format_RGB888)\n\n # Set the video_display_label's pixmap\n self.video_display_label.setPixmap(\n QPixmap.fromImage(converted_Qt_image).scaled(\n self.video_display_label.width(),\n self.video_display_label.height(),\n Qt.KeepAspectRatioByExpanding))\n\n def closeEvent(self, event) -> None:\n print(f'\\ncloseEvent')\n if self.thread_is_running:\n self.video_thread_worker.quit()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n app.setStyleSheet(style_sheet)\n window = DisplayVideo()\n sys.exit(app.exec_())\n","repo_name":"AkiraGiShinichi/py_ModernPyQt","sub_path":"prac/Chapter5/human_detection.py","file_name":"human_detection.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"3607521105","text":"\"\"\"The Sophia-doc Command-line interface.\"\"\"\nfrom __future__ import annotations\n\nimport argparse\nimport sys\n\nfrom docstring_parser import DocstringStyle\n\nfrom sophia_doc import ModuleNode\nfrom sophia_doc.builders.markdown import MarkdownBuilder\nfrom sophia_doc.utils import import_module\n\nif sys.version_info >= (3, 9):\n from argparse import BooleanOptionalAction\nelse:\n from argparse import Action\n from typing import Any, Callable, Iterable, Sequence, TypeVar\n\n from typing_extensions import override\n\n _T = TypeVar(\"_T\")\n\n class BooleanOptionalAction(Action):\n def __init__(\n self,\n option_strings: Sequence[str],\n dest: str,\n default: _T | str | None = None,\n type: Callable[[str], _T] | Any | None = None,\n choices: Iterable[_T] | None = None,\n required: bool = False,\n help: str | None = None,\n metavar: str | tuple[str, ...] | None = None,\n ) -> None:\n _option_strings: list[str] = []\n for option_string in option_strings:\n _option_strings.append(option_string)\n\n if option_string.startswith(\"--\"):\n option_string = \"--no-\" + option_string[2:] # noqa: PLW2901\n _option_strings.append(option_string)\n\n if help is not None and default is not None:\n help += \" (default: %(default)s)\"\n\n super().__init__(\n option_strings=_option_strings,\n dest=dest,\n nargs=0,\n default=default,\n type=type,\n choices=choices,\n required=required,\n help=help,\n metavar=metavar,\n )\n\n @override\n def __call__(\n self,\n _parser: Any,\n namespace: Any,\n _values: str | Sequence[Any] | None,\n option_string: str | None = None,\n ) -> None:\n if option_string is not None and option_string in self.option_strings:\n setattr(\n namespace,\n self.dest,\n not option_string.startswith(\"--no-\"),\n )\n\n def format_usage(self) -> str:\n return \" | \".join(self.option_strings)\n\n\nparser = argparse.ArgumentParser(\n description=\"Sophia_doc is a python package to automatically \"\n \"generate API documents for Python modules\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n)\nparser.add_argument(\"module\", type=str, help=\"Python module names.\")\nparser.add_argument(\n \"-o\",\n \"--output-dir\",\n type=str,\n default=\"doc\",\n help=\"The directory to write document.\",\n)\nparser.add_argument(\n \"-f\",\n \"--format\",\n type=str,\n default=\"markdown\",\n help=\"File format of document.\",\n)\nparser.add_argument(\n \"--docstring-style\",\n type=str,\n default=\"auto\",\n help=\"Docstring style the python module used.\",\n)\nparser.add_argument(\n \"--ignore-data\",\n type=bool,\n action=BooleanOptionalAction,\n default=False,\n help=\"Ignore data in Markdown text.\",\n)\nparser.add_argument(\n \"--anchor-extend\",\n type=bool,\n action=BooleanOptionalAction,\n default=False,\n help=\"Add anchor to markdown title.\",\n)\nparser.add_argument(\n \"--overwrite\",\n type=bool,\n action=BooleanOptionalAction,\n default=False,\n help=\"Overwrite any file in output directory.\",\n)\nparser.add_argument(\n \"--exclude-module-name\",\n type=bool,\n action=BooleanOptionalAction,\n default=False,\n help=\"Write file to path which exclude module name.\",\n)\nparser.add_argument(\n \"--init-file-name\",\n type=str,\n default=\"index.md\",\n help=\"The name of Markdown file from __init__.py, index.md by default.\",\n)\n\n\ndef cli() -> None:\n \"\"\"The Sophia-doc Command-line interface.\"\"\"\n args = parser.parse_args()\n assert args.format == \"markdown\"\n MarkdownBuilder(\n ModuleNode(import_module(args.module)),\n docstring_style=getattr(DocstringStyle, args.docstring_style.upper()),\n anchor_extend=args.anchor_extend,\n ignore_data=args.ignore_data,\n ).write(\n args.output_dir,\n overwrite=args.overwrite,\n exclude_module_name=args.exclude_module_name,\n init_file_name=args.init_file_name,\n )\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"st1020/sophia-doc","sub_path":"sophia_doc/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"43582790569","text":"\"\"\"\nFood Oasis to ESRI \n\nConnect to Food Oasis API.\nExport as ESRI layer.\n\"\"\"\nimport requests\nimport pandas\nimport os\n\nfrom common_utils import utils\n\nlahub_user = os.environ[\"LAHUB_ACC_USERNAME\"]\nlahub_pass = os.environ[\"LAHUB_ACC_PASSWORD\"]\n\nURL = \"https://foodoasis.la/api/stakeholderbests?categoryIds[]=1&categoryIds[]=9&latitude=33.99157326008516&longitude=-118.25853610684041&distance=5&isInactive=either&verificationStatusId=0&maxLng=-117.83718436872704&maxLat=34.193301591847344&minLng=-118.67988784495431&minLat=33.78936487151597&tenantId=1\"\nOUTPUT_FILE = \"./Food Oasis LA.csv\"\nfla_layer = 'b3a61e62a98d46ecb078aca873fa1eae'\n\ncategory_type_dict = {\n \"FPF\": \"Food Pantry\",\n \"MPF\": \"Meal Program\",\n \"OTF\": \"Other\",\n \"SHF\": \"Shelter\",\n \"FBF\": \"Food Bank\",\n \"CCF\": \"Care Center\",\n \"UKF\": \"Unknown\",\n \"CGF\": \"Community Garden\",\n}\n\ndef foodoasisla(json, output):\n r = requests.get(json)\n j = r.json()\n df = pandas.DataFrame.from_dict(j)\n #fix column that looks like a dictionary\n split_df = (pandas.DataFrame.from_records(df.categories)\n .rename(columns = \n {0: \"one\", 1: \"two\", 2: \"three\", 3: \"four\"}\n )\n )\n # Need cleaning, since one row can have up to 4 different entires with the `categories` column\n category_df = pandas.DataFrame()\n for col in [\"one\", \"two\", \"three\", \"four\"]:\n # This apply function unpacks all the dictionary key/value pairs\n # However many items are in there, it'll create new columns for it\n this_col_df = split_df[col].apply(pandas.Series)\n print(\"Unpack our dictionary\")\n category_df = category_df.append(this_col_df, sort=False)\n \n \n # Clean up, drop NaN values\n category_df = (category_df[category_df.stakeholder_id.notna()]\n .reset_index(drop=True)\n .drop(columns = [\"id\", \"display_order\"])\n .rename(columns = {\"name\": \"category_name\"})\n .astype({\"stakeholder_id\": int})\n )\n df2 = pandas.merge(df, \n category_df, \n left_on = \"id\", \n right_on = \"stakeholder_id\",\n validate = \"1:m\"\n ).drop(columns = [\"categories\"])\n \n # Create a string that captures all the possible categories for each stakeholder ID\n df2['categories'] = (df2[['id','category_name']]\n .groupby(['id'])['category_name'].transform(lambda x: ' & '.join(x))\n )\n\n # Drop duplicate obs \n df3 = df2.drop_duplicates(subset = ['id'])\n \n # Create dummy variables flagging various categories\n for key, value in category_type_dict.items():\n df3 = df3.assign(\n new_col = df3.apply(lambda x: value in x.categories, axis=1).astype(int)\n ).rename(columns = {\"new_col\": f\"{value} Flag\"})\n\n keep_cols = [\n 'name','categories','address1','address2','city','state','zip','phone',\n 'latitude','longitude','website','notes',\n 'email','facebook','twitter','pinterest','linkedin',\n 'description','donationSchedule','donationDeliveryInstructions','donationNotes',\n 'covidNotes','categoryNotes','eligibilityNotes','isVerified',\n 'Food Pantry Flag', 'Meal Program Flag', 'Other Flag', 'Shelter Flag', \n 'Food Bank Flag', 'Care Center Flag', 'Unknown Flag', 'Community Garden Flag'\n ]\n\n fla = df3[keep_cols].copy()\n \n fla.index.name='UNIQID'\n fla.to_csv(output)\n print(\"Successfully exported as csv\")\n\n \nif __name__ == \"__main__\":\n foodoasisla(URL, OUTPUT_FILE)\n utils.update_geohub_layer('https://lahubcom.maps.arcgis.com', lahub_user, lahub_pass, fla_layer, OUTPUT_FILE)","repo_name":"CityOfLosAngeles/civis-gcp-transition","sub_path":"src/foodoasisla_geohub.py","file_name":"foodoasisla_geohub.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"35947378204","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 29 14:05:18 2016\n\n@author: Andy\n\"\"\"\nfrom sklearn.cluster import KMeans\nimport csv,cv2\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef most_powerful(kps,descs,size):\n responses = [i.response for i in kps]\n responses.sort(reverse = True)\n kept = responses[:size]\n resultkps = []\n resultdescs = []\n for i,j in zip(kps,descs):\n if i.response in kept:\n resultkps.append(i)\n resultdescs.append(j)\n return resultkps,resultdescs\n \n \ndef process_image(file):\n img = cv2.imread(file)\n gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n \n sift = cv2.xfeatures2d.SIFT_create()\n kp = sift.detect(gray,None)\n img=cv2.drawKeypoints(gray,kp,cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n (kps,descs) = sift.detectAndCompute(gray,None)\n print('keypoints: {number} and shape: {shape}'.format(number=len(kps),shape=descs.shape))\n return kps,descs\n\ndef get_imgdata(filelist,size):\n largerresult = []\n for i in filelist:\n kps,descs = process_image(i)\n adkps,addescs = most_powerful(kps,descs,size)\n for j in addescs:\n largerresult.append(j)\n return largerresult\n\ndef write_down(alldata,filelist,ncluster,desc):\n kmeans = KMeans(n_clusters=ncluster, random_state=0).fit(alldata)\n fDict = {}\n imgDict = {}\n for indexI in filelist:\n imgDict[indexI] = None\n for img in filelist:\n kps,descs = process_image(img)\n labels = kmeans.predict(descs)\n for i in range(0,ncluster,1):\n fDict[i] = 0\n for clus in labels:\n fDict[clus] += 1\n sums = sum(fDict.values())\n for i in fDict.keys():\n fDict[i] = fDict[i]/sums\n imgDict[img] = [ fDict[i] for i in range(0,ncluster,1) ]\n with open(desc,'w') as f:\n csvwriter = csv.writer(f)\n for i in filelist:\n csvwriter.writerow([i]+imgDict[i])\n f.close()\n \ndef main():\n where = str(input('The image location: like /users/andy/desktop/Project3_poodleKFC_train/images :\\n'))\n os.chdir(where)\n imgslist =os.listdir()\n imgslist.pop(0)\n size = int(input('How many to extract from each picture? i.e. 50 :\\n'))\n alldata = get_imgdata(imgslist,size)\n ncluster = int(input('What size for your codebook? i.e. 5000 : \\n'))\n desc = str(input('Where do you want to store csv? i.e. /users/andy/desktop/ss.csv :\\n'))\n write_down(alldata,imgslist,ncluster,desc)\n \nif __name__ == '__main__':\n main()\n \n \n\n\n \n","repo_name":"TZstatsADS/Fall2016-proj3-grp6","sub_path":"output/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"32565249203","text":"from django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.views.generic import TemplateView\n\nfrom app.api_data.financial import FinancialClient\n\n\nclass FinancialView(TemplateView):\n \"\"\"\n Class View para setor fianceiro.\n \"\"\"\n template_name = 'admin/index.html'\n\n def get_last_date(self):\n client = FinancialClient(**self.auth_data)\n try:\n status_code, data = client.get_financial_sales_data()\n periodos = data.get('periodos')\n if periodos:\n return periodos[0]\n\n except Exception as err:\n pass\n\n def dispatch(self, request, *args, **kwargs):\n self.auth_data = self.request.session['auth_data']\n return super(FinancialView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(FinancialView, self).get_context_data(**kwargs)\n context['header_name'] = self.auth_data['nome']\n\n if 'errors' in kwargs:\n return context\n\n client = FinancialClient(**self.auth_data)\n\n try:\n status_code, data = client.get_financial_data()\n except Exception as err:\n context['errors'] = 'Erro ao obter informações do servidor.'\n return context\n\n if status_code >= 400:\n context['errors'] = data\n elif status_code == 200:\n context['ultima_data'] = self.get_last_date()\n context['financeiro'] = data\n\n return context\n","repo_name":"Weslley/TheFarmaClient","sub_path":"app/views/admin/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"25590810884","text":"def begin(autor=None, title=None, language=\"english\", margin=None):\r\n \"\"\" ajoute les package dans le document\"\"\"\r\n text = [r\"\\documentclass{article}\", r\"\\usepackage[utf8]{inputenc}\", \"\",\r\n r\"\\usepackage[\"+language+\"]{babel}\", r\"\\usepackage{hyperref}\", \r\n r\"\\usepackage{graphicx}\", r\"\\usepackage{listings}\", \"\"]\r\n \r\n if margin != \"big\":\r\n set_margin = [r\"\\usepackage{geometry}\", r\"\\geometry{hmargin=2.5cm,vmargin=1.5cm}\", \"\"]\r\n text += set_margin\r\n\r\n text.append(r\"\\begin{document}\")\r\n text.append(\"\")\r\n\r\n if not autor is None:\r\n if type(autor) == str:\r\n text.append(autor)\r\n\r\n elif type(autor) == list:\r\n for x in autor:\r\n text.append(x+r'\\\\')\r\n \r\n text.append(\"\")\r\n \r\n if not title is None:\r\n make_title=[r\"\\begin{center}\", r\"\\Large\", title, \"\\end{center}\", r\"\\bigskip\"]\r\n text += make_title\r\n \r\n text.append(r'\\tableofcontents')\r\n text.append(r\"\\bigskip\")\r\n text.append(\"\") \r\n\r\n return text\r\n\r\n\r\ndef end():\r\n return [r\"\\end{document}\"]","repo_name":"Highdrien/Markdown_to_LaTeX_Converter","sub_path":"src/begin_end.py","file_name":"begin_end.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"15063923661","text":"from setuptools import setup, find_packages\n\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='SimString-cuda',\n version='0.1.0',\n url='https://github.com/fginter/simstring-cuda.git',\n author='Filip Ginter',\n author_email='filip.ginter@gmail.com',\n description=\"A poor-man's version of simistring-like lookup. Can hold its ground if the DB is few million strings, a GPU is present, and queries are batched by about a hundred strings.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=find_packages(), \n install_requires=['sklearn', 'torch'],\n scripts=['simscuda']\n)\n","repo_name":"fginter/simstring-cuda","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"41510959868","text":"import os,sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfunc = os.path.basename(__file__).split('_test.py')[0]\nfrom common.gmpackage import *\n@ddt\nclass Report(unittest.TestCase):\n '''\n 举报帖子/回复/日记本\n '''\n\n @classmethod\n def setUpClass(cls):\n cls.host = g.host\n cls.api_name = g.api_name(func)\n cls.url = cls.host + cls.api_name\n cls.diary_id = diary_id_get()\n cls.reason_id = reason_id_get()\n\n\n @data(*(get_values(func, \"test_report\")))\n def test_report(self,value):\n self._testMethodDoc = \"--\"\n '''\n 举报帖子/回复/日记本\n '''\n data = {\n \"reason_id\":self.reason_id,\n 'diarybook_id':self.diary_id\n }\n r = gmhttp.post(self.url,data=data).json()\n self.assertIn(r.get(\"error\"),[0,1])\n if r.get('error') == 1:\n self.assertEqual(r.get('error_code'),13001)\n print('用例执行完毕!')\n\n\n def tearDown(self):\n #删除日记本\n pass\n\n\nif __name__ == \"__main__\":\n Report.run()","repo_name":"andy-29/AutomatedTest","sub_path":"testCase/report_test.py","file_name":"report_test.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17680126127","text":"import request\nimport log\n\n# 设置日志输出级别\nlog.basicConfig(level=log.INFO) \nhttp_log = log.getLogger(\"HTTP GET\")\n\nurl = \"http://httpbin.org/get\"\n\nresponse = request.get(url) # 发起http GET请求\nhttp_log.info(response.json()) # 以json方���读取返回\n","repo_name":"mfkiwl/EC100Y-SDK","sub_path":"Demo/http/example_request_get_file.py","file_name":"example_request_get_file.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"34985827012","text":"from abc import abstractmethod\n\nfrom condorpy import Node\nfrom django.db import models\nfrom model_utils.managers import InheritanceManager\n\nfrom tethys_compute.models.condor.condor_py_workflow import CondorPyWorkflow\n\n\nclass CondorWorkflowNode(models.Model):\n \"\"\"\n Base class for CondorWorkflow Nodes\n \"\"\"\n\n TYPES = (\n (\"JOB\", \"JOB\"),\n (\"DAT\", \"DATA\"),\n (\"SUB\", \"SUBDAG\"),\n (\"SPL\", \"SPLICE\"),\n (\"FIN\", \"FINAL\"),\n )\n\n TYPE_DICT = {k: v for v, k in TYPES}\n\n objects = InheritanceManager()\n\n name = models.CharField(max_length=1024)\n workflow = models.ForeignKey(\n CondorPyWorkflow, on_delete=models.CASCADE, related_name=\"node_set\"\n )\n parent_nodes = models.ManyToManyField(\n \"self\", related_name=\"children_nodes\", symmetrical=False\n )\n pre_script = models.CharField(max_length=1024, null=True, blank=True)\n pre_script_args = models.CharField(max_length=1024, null=True, blank=True)\n post_script = models.CharField(max_length=1024, null=True, blank=True)\n post_script_args = models.CharField(max_length=1024, null=True, blank=True)\n variables = models.JSONField(default=dict, null=True, blank=True)\n priority = models.IntegerField(null=True, blank=True)\n category = models.CharField(max_length=128, null=True, blank=True)\n retry = models.PositiveSmallIntegerField(null=True, blank=True)\n retry_unless_exit_value = models.IntegerField(null=True, blank=True)\n pre_skip = models.IntegerField(null=True, blank=True)\n abort_dag_on = models.IntegerField(null=True, blank=True)\n abort_dag_on_return_value = models.IntegerField(null=True, blank=True)\n dir = models.CharField(max_length=1024, null=True, blank=True)\n noop = models.BooleanField(default=False)\n done = models.BooleanField(default=False)\n\n @abstractmethod\n def type(self):\n pass\n\n @abstractmethod\n def job(self):\n pass\n\n @property\n def condorpy_node(self):\n if not hasattr(self, \"_condorpy_node\"):\n condorpy_node = Node(\n job=self.job,\n pre_script=self.pre_script,\n pre_script_args=self.pre_script_args,\n post_script=self.post_script,\n post_script_args=self.post_script_args,\n variables=self.variables,\n priority=self.priority,\n category=self.category,\n retry=self.retry,\n pre_skip=self.pre_skip,\n abort_dag_on=self.abort_dag_on,\n abort_dag_on_return_value=self.abort_dag_on_return_value,\n dir=self.dir,\n noop=self.noop,\n done=self.done,\n )\n self._condorpy_node = condorpy_node\n return self._condorpy_node\n\n @property\n def parents(self):\n return self.parent_nodes.select_subclasses()\n\n def add_parent(self, parent):\n self.parent_nodes.add(parent)\n\n def update_database_fields(self):\n pass\n","repo_name":"tethysplatform/tethys","sub_path":"tethys_compute/models/condor/condor_workflow_node.py","file_name":"condor_workflow_node.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"65"} +{"seq_id":"39982749353","text":"\"\"\"Test Ecole observation functions in Python.\n\nMost observation functions are written in Ecole C++ library.\nThis is where the logic should be tested.\nHere,\n - Some tests automatically run the same assertions on all functions;\n - Other tests that observation returned form observation functions are bound to the correct types.\n\"\"\"\n\nimport copy\nimport pickle\n\nimport numpy as np\nimport pytest\n\nimport ecole\n\n\n# TODO adapt for MilpBiparite that must not be in stage solving\ndef pytest_generate_tests(metafunc):\n \"\"\"Parametrize the `observation_function` fixture.\n\n Add observation functions here to have them automatically run all the tests that take\n `observation_function` as input.\n \"\"\"\n if \"observation_function\" in metafunc.fixturenames:\n all_observation_functions = (\n ecole.observation.Nothing(),\n ecole.observation.NodeBipartite(),\n ecole.observation.MilpBipartite(),\n ecole.observation.StrongBranchingScores(True),\n ecole.observation.StrongBranchingScores(False),\n ecole.observation.Pseudocosts(),\n ecole.observation.Khalil2016(),\n ecole.observation.Hutter2011(),\n )\n metafunc.parametrize(\"observation_function\", all_observation_functions)\n\n\ndef test_default_init(observation_function):\n \"\"\"Construct with default arguments.\"\"\"\n type(observation_function)()\n\n\ndef test_before_reset(observation_function, model):\n \"\"\"Successive calls to before_reset.\"\"\"\n observation_function.before_reset(model)\n observation_function.before_reset(model)\n\n\ndef test_extract(observation_function, model):\n \"\"\"Obtain observation.\"\"\"\n observation_function.before_reset(model)\n pytest.helpers.advance_to_stage(model, ecole.scip.Stage.Solving)\n observation_function.extract(model, False)\n\n\ndef make_obs(obs_func, model, stage=ecole.scip.Stage.Solving):\n \"\"\"Utility function to extract observation on root node.\"\"\"\n obs_func.before_reset(model)\n pytest.helpers.advance_to_stage(model, stage)\n return obs_func.extract(model, False)\n\n\ndef test_observation_deepcopy(observation_function, model):\n \"\"\"Deepcopy observation.\"\"\"\n obs = make_obs(observation_function, model)\n copy.deepcopy(obs)\n\n\ndef test_observation_pickle(observation_function, model):\n \"\"\"Pickle and unpickle observation.\"\"\"\n obs = make_obs(observation_function, model)\n blob = pickle.dumps(obs)\n obs_copy = pickle.loads(blob)\n\n\ndef assert_array(arr, ndim=1, non_empty=True, dtype=np.double):\n assert isinstance(arr, np.ndarray)\n assert arr.ndim == ndim\n assert (not non_empty) or (arr.size > 0)\n assert arr.dtype == dtype\n\n\ndef test_Nothing_observation(model):\n \"\"\"Observation of Nothing is None.\"\"\"\n assert make_obs(ecole.observation.Nothing(), model) is None\n\n\ndef test_NodeBipartite_observation(model):\n \"\"\"Observation of NodeBipartite is a type with array attributes.\"\"\"\n obs = make_obs(ecole.observation.NodeBipartite(), model)\n assert isinstance(obs, ecole.observation.NodeBipartiteObs)\n assert_array(obs.variable_features, ndim=2)\n assert_array(obs.row_features, ndim=2)\n assert_array(obs.edge_features.values)\n assert_array(obs.edge_features.indices, ndim=2, dtype=np.uint64)\n\n # Check that there are enums describing feeatures\n assert len(obs.VariableFeatures.__members__) == obs.variable_features.shape[1]\n assert len(obs.RowFeatures.__members__) == obs.row_features.shape[1]\n\n\ndef test_MilpBipartite_observation(model):\n \"\"\"Observation of MilpBipartite is a type with array attributes.\"\"\"\n obs = make_obs(ecole.observation.MilpBipartite(), model, stage=ecole.scip.Stage.Problem)\n assert isinstance(obs, ecole.observation.MilpBipartiteObs)\n assert_array(obs.variable_features, ndim=2)\n assert_array(obs.constraint_features, ndim=2)\n assert_array(obs.edge_features.values)\n assert_array(obs.edge_features.indices, ndim=2, dtype=np.uint64)\n\n # Check that there are enums describing feeatures\n assert len(obs.VariableFeatures.__members__) == obs.variable_features.shape[1]\n assert len(obs.ConstraintFeatures.__members__) == obs.constraint_features.shape[1]\n\n\ndef test_StrongBranchingScores_observation(model):\n \"\"\"Observation of StrongBranchingScores is a numpy array.\"\"\"\n obs = make_obs(ecole.observation.StrongBranchingScores(), model)\n assert_array(obs)\n\n\ndef test_Pseudocosts_observation(model):\n \"\"\"Observation of Pseudocosts is a numpy array.\"\"\"\n obs = make_obs(ecole.observation.Pseudocosts(), model)\n assert_array(obs)\n\n\ndef test_Khalil2016_observation(model):\n \"\"\"Observation of Khalil2016 is a numpy matrix.\"\"\"\n obs = make_obs(ecole.observation.Khalil2016(), model)\n assert_array(obs.features, ndim=2)\n\n # Check that there are enums describing feeatures\n assert len(obs.Features.__members__) == obs.features.shape[1]\n\n\ndef test_Hutter2011_observation(model):\n \"\"\"Observation of Hutter2011 is a numpy vector.\"\"\"\n obs = make_obs(ecole.observation.Hutter2011(), model, stage=ecole.scip.Stage.Problem)\n assert_array(obs.features, ndim=1)\n\n # Check that there are enums describing feeatures\n assert len(obs.Features.__members__) == obs.features.shape[0]\n","repo_name":"ds4dm/ecole","sub_path":"python/ecole/tests/test_observation.py","file_name":"test_observation.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"65"} +{"seq_id":"5609665727","text":"from typing import Sequence\nimport numpy as np\nimport open3d as o3d\nimport json\nimport bbox_filtering as bf\nimport time as tm\nimport cylinder_filtering as cf\nfrom open3d.visualization import Visualizer \n\n\n# Constants\n# Setup paths\ndata_path = 'dataset/'\n\n\ndef load_skeleton_points_as_nparray(seq_name, hd_idx):\n '''\n Function that loads the skeleton joints\n Input:\n - seq_name: It is the name of the sequence -> type: string\n - hd_idx : it is the index of the frame -> type int \n\n Output\n - Skeleton points: the joints of the skeleton and the hands -> type: list\n '''\n\n skel_points = []\n hands = []\n #Path of the folder that contains the json file with the points of the pose\n hd_skel_json_path = data_path+seq_name+'/hdPose3d_stage1_coco19/'\n #Path of the folder that contains the json file with the points of the hands \n hd_hand_json_path = data_path+seq_name+'/hdHand3d/'\n\n \n try:\n # Load the json file with this frame's skeletons\n skel_json_fname = hd_skel_json_path+'body3DScene_{0:08d}.json'.format(hd_idx)\n with open(skel_json_fname) as dfile:\n bframe = json.load(dfile)\n \n\n # Load hand json\n hand_json_fname = hd_hand_json_path+'handRecon3D_hd{0:08d}.json'.format(hd_idx)\n with open(hand_json_fname) as dfile:\n hframe = json.load(dfile)\n\n # Cycle Bodies\n for ids in range(len(bframe['bodies'])):\n body = bframe['bodies'][ids]\n\n # skeleton format: x,y,z,c where c is the confidence\n # keep 3d coordinates, remove confidence score\n body_points = np.delete(np.array(body['joints19']).reshape((-1,4)), [-1], axis=1)\n\n skel_points.insert(ids, body_points)\n \n # Cycle Hands\n for hand in hframe['people']:\n hand3d_r = np.array(hand['right_hand']['landmarks']).reshape((-1,3))\n hand3d_l = np.array(hand['left_hand']['landmarks']).reshape((-1,3))\n\n hands.append([hand3d_l, hand3d_r])\n\n except IOError as e:\n print('Error reading {0}\\n'.format(skel_json_fname)+e.strerror)\n \n \n skels = [[skel_points[i], hands[i][0], hands[i][1]] for i in range(len(skel_points))]\n\n return skels\n\n\ndef load_ptcloud(sequence, hd_idx, draw=False):\n '''\n Function that loads a point cloud\n Input:\n - sequence: it is the name of the sequence -> type: string \n - hd_idx: it is the index of the frame -> type int \n - draw : indicates if you want to display the point cloud or not -> type: bool\n Output:\n - pcd: it returns the point cloud and if the variable draw is true, it also draws the point cloud \n '''\n\n path = f\"kinoptic_ptclouds/{sequence}\" + \"/ptcloud_hd{0:08d}.ply\".format(hd_idx)\n\n pcd = o3d.io.read_point_cloud(path)\n #If draw = True, it draw the point cloud with a path like variable path\n if draw: \n o3d.visualization.draw_geometries([pcd])\n \n return pcd\n\n\nif __name__ == \"__main__\":\n #Global variables\n sequence = \"170407_haggling_a1\"\n #Run the algorithm for single frame\n '''\n pcd_idx = 1700\n\n #Load pcd\n pcd = load_ptcloud(sequence, pcd_idx, draw=True)\n\n #Load skeleton\n skels = load_skeleton_points_as_nparray(sequence, pcd_idx)\n\n t0 = tm.time()\n #Filter using the fast algorithm \n filtered = bf.filter(pcd, skels)\n #Filter with the slow algorithm\n #filtered = cf.filter(pcd,skels)\n t1 = tm.time()\n\n print(t1-t0)\n\n #Visualize the output of the algorithm\n o3d.visualization.draw_geometries([filtered])\n\n '''\n #Run the algorithm for the multiple frames\n range_indices = range(1651, 1700, 4)\n \n vis = Visualizer()\n vis.create_window()\n\n for pcd_idx in range_indices: \n try:\n #Load pcd\n pcd = load_ptcloud(sequence, pcd_idx, draw=False)\n\n #Load skeleton\n skels = load_skeleton_points_as_nparray(sequence, pcd_idx)\n\n t0 = tm.time()\n #Filter using the fast algorithm \n filtered = bf.filter(pcd, skels)\n #Filter with the slow algorithm\n #filtered = cf.filter(pcd,skels)\n #Display the unfiltered point cloud\n #filtered = pcd\n t1 = tm.time()\n\n print(t1-t0)\n\n #Visualize the output of the algorithm\n vis.add_geometry(filtered)\n vis.update_renderer()\n vis.poll_events()\n \n tm.sleep(2)\n vis.remove_geometry(filtered)\n \n except KeyError:\n print(\"The skeleton is probably missing a body part\")\n\n vis.close() \n\n ","repo_name":"andrearigo-dev/fast-point-cloud-multi-person-filtering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"75224925646","text":"from collections import deque\r\n\r\ndef solution(arrangement):\r\n arr=deque(arrangement)\r\n ans=0\r\n stack=deque()\r\n while arr:\r\n targ=arr.popleft()\r\n if targ=='(':\r\n stack.append(targ)\r\n laser=True\r\n elif laser==True:\r\n stack.pop()\r\n ans+=len(stack)\r\n laser=False\r\n else:\r\n stack.pop()\r\n ans+=1\r\n\r\n return ans\r\n\r\nprint(solution('()(((()())(())()))(())'))\r\n","repo_name":"mod96/hiddenlayer","sub_path":"CodingTestExamples/Basic_Algorithms/StackQueue/Stack&Queue 4.py","file_name":"Stack&Queue 4.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"3076194210","text":"def mod_10(x):\n\tx = x % 10\n\treturn x\n\nN = int(input())\ns = []\nfor i in range(N):\n\ts_i = int(input())\n\ts.append(s_i)\n\ns.sort()\n\nm_l = list(map(mod_10, s))\n\nans = sum(s)\nif ans % 10 ==0:\n\tfor i in range(N):\n\t\tif m_l[i] != 0:\n\t\t\tans = ans - s[i]\n\t\t\tbreak\nif sum(m_l) == 0:\n\tans = 0\n\nprint(ans)\n\n\n","repo_name":"knagakura/procon","sub_path":"atcoder/abc063/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"11203410509","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras import models\r\nimport os\r\n\r\ndim = (32, 32)\r\n\r\n# find all images in pictures folder and store their path\r\npath_images = []\r\npath = './pictures'\r\nvalid_images = ['.jpg', '.gif', '.png', '.tga']\r\nfor f in os.listdir(path):\r\n ext = os.path.splitext(f)[1]\r\n if ext.lower() not in valid_images:\r\n continue\r\n path_images.append(os.path.join(path, f))\r\n\r\n# define label names\r\nclass_names = ['Plane', 'Car', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']\r\n\r\n# load model\r\nmodel = models.load_model('image_classifier.model')\r\n\r\n# for all image path, predict the label of image\r\nfor i, path in enumerate(path_images):\r\n img = cv2.imread(path)\r\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n prediction = model.predict(np.array([img]) / 255)\r\n index = np.argmax(prediction)\r\n print(f'Prediction is {class_names[index]} with probability = {prediction[0][index]}')\r\n\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.imshow(img, cmap=plt.cm.binary)\r\n plt.title(f'Prediction: {class_names[index]}')\r\n plt.xlabel(f'probability: {prediction[0][index]}')\r\n plt.show()\r\n","repo_name":"xingyuqiu2/Simple-Image-Classifier","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"38696676493","text":"from aimcore.web.api.utils import APIRouter # wrapper for fastapi.APIRouter\nfrom aimcore.web.api.reports.serializers import report_response_serializer\nfrom aimcore.web.api.reports.models import Report\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends, HTTPException\n\n\nfrom aimcore.web.api.reports.pydantic_models import (\n ReportCreateIn, ReportUpdateIn, ReportOut, ReportListOut,\n)\nfrom aimcore.web.api.db import get_session\n\nreports_router = APIRouter()\n\n\n@reports_router.get('/', response_model=ReportListOut)\nasync def reports_list_api(session: Session = Depends(get_session)):\n reports_query = session.query(Report).order_by(Report.updated_at)\n result = [report_response_serializer(report) for report in reports_query]\n return result\n\n\n@reports_router.post('/', status_code=201, response_model=ReportOut)\nasync def reports_post_api(request_data: ReportCreateIn, session: Session = Depends(get_session)):\n report = Report(request_data.code, request_data.name,\n request_data.description)\n session.add(report)\n session.commit()\n return report_response_serializer(report)\n\n\n@reports_router.get('/{report_id}/', response_model=ReportOut)\nasync def reports_get_api(report_id: str, session: Session = Depends(get_session)):\n report = session.query(Report).filter(Report.uuid == report_id).first()\n if not report:\n raise HTTPException(status_code=404)\n return report_response_serializer(report)\n\n\n@reports_router.put('/{report_id}/', response_model=ReportOut)\nasync def reports_put_api(report_id: str, request_data: ReportUpdateIn,\n session: Session = Depends(get_session)):\n report = session.query(Report).filter(Report.uuid == report_id).first()\n if not report:\n raise HTTPException(status_code=404)\n if request_data.code is not None:\n report.code = request_data.code\n if request_data.name is not None:\n report.name = request_data.name\n if request_data.description is not None:\n report.description = request_data.description\n session.commit()\n return report_response_serializer(report)\n\n\n@reports_router.delete('/{report_id}/')\nasync def reports_delete_api(report_id: str, session: Session = Depends(get_session)):\n report = session.query(Report).filter(Report.uuid == report_id).first()\n if not report:\n raise HTTPException(status_code=404)\n session.delete(report)\n session.commit()\n","repo_name":"aimhubio/aimos","sub_path":"src/aimcore/web/api/reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"65"} +{"seq_id":"26025937212","text":"# coding=utf-8\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\n# Create your models here.\nclass portfolio(models.Model):\n project_kor_name = models.CharField( # 1. 프로젝트 국문이름\n max_length=200,\n help_text='프로젝트의 한글 이름을 입력하세요',\n verbose_name='프로젝트 국문이름',\n )\n project_eng_name = models.CharField( # 2. 프로젝트 영문이름\n max_length=200,\n help_text='프로젝트의 영문 이름을 입력하세요',\n verbose_name='프로젝트 영문이름',\n )\n making_year = models.CharField( # 3. 제작년도\n max_length=4,\n help_text='ex) 2017',\n verbose_name='제작년도',\n )\n making_month = models.CharField( # 4. 제작월\n max_length=2,\n help_text='ex) 12',\n verbose_name='제작월'\n )\n\n WEB = 'W1'\n MOBILE = 'M1'\n APP = 'A1'\n VIDEO = 'V1'\n OTHER = 'O1'\n PROJECT_KIND_CHOICES = (\n (WEB, 'Web'),\n (MOBILE, 'Mobile'),\n (APP, 'App'),\n (VIDEO, 'Video'),\n (OTHER, 'Other'),\n )\n project_kind = models.CharField( # 5. 프로젝트 종류\n max_length=2,\n choices=PROJECT_KIND_CHOICES,\n default=WEB,\n help_text='프로젝트 종류를 선택하세요',\n verbose_name='프로젝트 종류',\n )\n\n project_summary = models.TextField( # 6. 프로젝트 요약\n max_length=1000,\n help_text='프로젝트 요약을 입력하세요',\n verbose_name='프로젝트 요약',\n )\n project_period = models.CharField( # 7. 프로젝트 기간\n max_length=30,\n help_text='ex) 2017.01 ~ 2017.12',\n verbose_name='프로젝트 기간',\n )\n project_range = models.CharField( # 8. 프로젝트 범위\n max_length=500,\n help_text='',\n verbose_name='프로젝트 범위',\n )\n client_name = models.CharField( # 9. 고객사 이름\n max_length=100,\n help_text='',\n verbose_name='고객사 이름',\n )\n producer = models.CharField( # 10. 제작사 이름\n max_length=100,\n help_text='',\n verbose_name='제작사 이름',\n )\n bg_image_horizontal = models.ImageField( # 11. 백그라운드 이미지 가로형\n upload_to='images/%Y%m%d',\n null=True,\n help_text='PC에서 사용 할 배경이미지 - width, height지정필요',\n verbose_name='백그라운드 이미지(가로형)',\n max_length=200,\n )\n bg_image_vertical = models.ImageField( # 11. 백그라운드 이미지 세로형\n upload_to='images/%Y%m%d',\n null=True,\n help_text='MOBILE에서 사용 할 배경 이미지 - width, height지정필요',\n verbose_name='백그라운드 이미지(세로형)',\n max_length=200,\n )\n thumb_image = models.ImageField( # 12. 썸네일이미지\n upload_to='images/%Y%m%d',\n null=True,\n help_text='width, height지정필요',\n verbose_name='썸네일 이미지',\n max_length=200,\n )\n main_image = models.ImageField( # 13. 메인이미지\n upload_to='images/%Y%m%d',\n null=True,\n help_text='width, height지정필요',\n verbose_name='메인 이미지',\n )\n sub_image01 = models.ImageField( # 14. 서브이미지01\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='서브 이미지01',\n )\n sub_image02 = models.ImageField( # 15. 서브이미지02\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='서브 이미지02',\n )\n sub_image03 = models.ImageField( # 16. 서브이미지03\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='서브 이미지03',\n )\n sub_image04 = models.ImageField( # 17. 서브이미지04\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='서브 이미지04',\n )\n icon_image = models.ImageField( # 18. 아이콘이미지\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='아이콘 아미지',\n )\n mobile_image = models.ImageField( # 19. 모바일이미지\n upload_to='images/%Y%m%d',\n null=True,\n help_text='--',\n verbose_name='모바일 이미지',\n )\n video_url = models.TextField( # 20. 영상URL\n null=True,\n help_text='vimeo영상 url소스를 입력하세요',\n verbose_name='영상 URL',\n )\n\n OPEN = 'Y'\n NOTOPEN = 'N'\n OPEN_YN_CHOICE = (\n (OPEN, '공개'),\n (NOTOPEN, '비공개'),\n )\n open_yn = models.CharField( # 21. 공개여부\n max_length=1,\n null=False,\n choices=OPEN_YN_CHOICE,\n default=OPEN,\n help_text='',\n verbose_name='공개여부',\n )\n\n show_order = models.IntegerField( # 22. 노��순서\n null=False,\n help_text='ex) 10',\n verbose_name='노출순서',\n )\n reg_date = models.DateField( # 23. 등록일자\n auto_now_add=False,\n help_text='오늘일자 또는 원하는 일자를 선택하세요',\n verbose_name='등록일자',\n )\n\n class Meta:\n verbose_name = \"포토폴리오\"\n verbose_name_plural = \"포토폴리오\"\n\n def __unicode__(self):\n return self.project_kor_name\n","repo_name":"lohanyeon/graviti2017_admin","sub_path":"portfolios/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"4878117542","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom arch.layers import conv2d_bn_relu, dense, global_avg_pool2d\nfrom arch.initializers import He_normal, Kumar_normal\nfrom arch import mobilenet\n\n#MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications\n#https://arxiv.org/abs/1704.04861\n#https://github.com/kuangliu/pytorch-cifar/blob/master/models/mobilenet.py\n#https://github.com/Zehaos/MobileNet\ndef cifar10_mobilenet(x, seed = 42):\n layers = []\n variables = []\n\n training = tf.placeholder(tf.bool, name=\"training\")\n variables.append((\"training\", training))\n\n conv = conv2d_bn_relu(\n x, size = 3, n_filters = 32,\n kernel_init = He_normal(seed = seed+1),\n is_training = training,\n name = \"initial_conv\")\n layers.append((\"initial_conv\", conv))\n \n mblock1 = mobilenet.mobilenet_block(\n conv, n_filters = 64, stride = 1,\n kernel_init = He_normal(seed = seed+2),\n is_training = training,\n name = \"mobilenet_block_1\"\n ) \n layers.append((\"mobilenet_block_1\", mblock1))\n\n # 16x16\n mblock2 = mobilenet.mobilenet_block(\n mblock1, n_filters = 128, stride = 2,\n kernel_init = He_normal(seed = seed+3),\n is_training = training,\n name = \"mobilenet_block_2\"\n ) \n layers.append((\"mobilenet_block_2\", mblock2))\n\n mblock3 = mobilenet.mobilenet_block(\n mblock2, n_filters = 128, stride = 1,\n kernel_init = He_normal(seed = seed+4),\n is_training = training,\n name = \"mobilenet_block_3\"\n ) \n layers.append((\"mobilenet_block_3\", mblock3))\n\n # 8x8\n mblock4 = mobilenet.mobilenet_block(\n mblock3, n_filters = 256, stride = 2,\n kernel_init = He_normal(seed = seed+5),\n is_training = training,\n name = \"mobilenet_block_4\"\n ) \n layers.append((\"mobilenet_block_4\", mblock4))\n\n mblock5 = mobilenet.mobilenet_block(\n mblock4, n_filters = 256, stride = 1,\n kernel_init = He_normal(seed = seed+6),\n is_training = training,\n name = \"mobilenet_block_5\"\n ) \n layers.append((\"mobilenet_block_5\", mblock5))\n\n # 4x4\n mblock6 = mobilenet.mobilenet_block(\n mblock5, n_filters = 512, stride = 2,\n kernel_init = He_normal(seed = seed+7),\n is_training = training,\n name = \"mobilenet_block_6\"\n ) \n layers.append((\"mobilenet_block_6\", mblock6))\n\n mblock7 = mobilenet.mobilenet_block(\n mblock6, n_filters = 512, stride = 1,\n kernel_init = He_normal(seed = seed+8),\n is_training = training,\n name = \"mobilenet_block_7\"\n ) \n layers.append((\"mobilenet_block_7\", mblock7))\n\n mblock8 = mobilenet.mobilenet_block(\n mblock7, n_filters = 512, stride = 1,\n kernel_init = He_normal(seed = seed+9),\n is_training = training,\n name = \"mobilenet_block_8\"\n ) \n layers.append((\"mobilenet_block_8\", mblock8))\n \n mblock9 = mobilenet.mobilenet_block(\n mblock8, n_filters = 512, stride = 1,\n kernel_init = He_normal(seed = seed+10),\n is_training = training,\n name = \"mobilenet_block_9\"\n ) \n layers.append((\"mobilenet_block_9\", mblock9))\n \n mblock10 = mobilenet.mobilenet_block(\n mblock9, n_filters = 512, stride = 1,\n kernel_init = He_normal(seed = seed+11),\n is_training = training,\n name = \"mobilenet_block_10\"\n ) \n layers.append((\"mobilenet_block_10\", mblock10))\n \n mblock11 = mobilenet.mobilenet_block(\n mblock10, n_filters = 512, stride = 1,\n kernel_init = He_normal(seed = seed+12),\n is_training = training,\n name = \"mobilenet_block_11\"\n ) \n layers.append((\"mobilenet_block_11\", mblock11))\n\n # 2x2\n mblock12 = mobilenet.mobilenet_block(\n mblock11, n_filters = 1024, stride = 2,\n kernel_init = He_normal(seed = seed+13),\n is_training = training,\n name = \"mobilenet_block_12\"\n ) \n layers.append((\"mobilenet_block_12\", mblock12))\n\n mblock13 = mobilenet.mobilenet_block(\n mblock12, n_filters = 1024, stride = 1,\n kernel_init = He_normal(seed = seed+14),\n is_training = training,\n name = \"mobilenet_block_13\"\n ) \n layers.append((\"mobilenet_block_13\", mblock13))\n\n pool = global_avg_pool2d(mblock13)\n layers.append((\"pool\", pool))\n \n dense1 = dense(\n pool, n_units = 10,\n kernel_init = Kumar_normal(activation = None, mode = \"FAN_IN\", seed = seed+15),\n name = \"dense1\")\n layers.append((\"logit\", dense1))\n \n prob = tf.nn.softmax(dense1, name = \"prob\")\n layers.append((\"prob\", prob))\n\n \n return layers, variables\n","repo_name":"autasi/tensorflow_examples","sub_path":"arch/mobilenet_graph.py","file_name":"mobilenet_graph.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"29319858427","text":"from scapy.all import *\nfrom scapy.layers.dns import DNSRR, DNS, DNSQR\nfrom base64 import b64decode\n\npkts = rdpcap('./netlogs.pcap')\npkts = [p for p in pkts if p.haslayer(DNS)]\n\nb64 = \"\"\nfor i, p in enumerate(pkts):\n if p.qdcount == 1:\n # LS0tLS1CRUdJTiBQR1AgTUVTU0FHRS0tLS0tClZlcnNpb246IEdudVBHI-tamu.1e100.net.\n dns_req = p.qd.qname.decode().replace(\"-tamu.1e100.net.\", \"\")\n if b64.endswith(dns_req):\n print(f\"Skipping {i}\")\n continue\n b64 += dns_req\n\nout = b64decode(b64)\nprint(out.decode())\n","repo_name":"NicolaiSoeborg/ctf-writeups","sub_path":"2020/TAMUctf 2020/RUSSIAN_NESTING_DOLL/extract-dns.py","file_name":"extract-dns.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"65"} +{"seq_id":"74287473486","text":"def check(s1, s2):\n temp = len(s1) * len(s2)\n\n if len(s1) < temp:\n s1 *= temp // len(s1)\n if len(s2) < temp:\n s2 *= temp // len(s2)\n\n if s1 == s2:\n return True\n else:\n return False\n\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n str1, str2 = input().split()\n\n if check(str1, str2):\n print(f'#{tc} yes')\n else:\n print(f'#{tc} no')\n","repo_name":"Seonggyu-Bae/TIL_SSAFY","sub_path":"Practice/SWEA/SWEA_무한문자열.py","file_name":"SWEA_무한문자열.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"33297165994","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def sortList(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: ListNode\r\n \"\"\"\r\n return self.mergeSort(head)\r\n \r\n \r\n \r\n def mergeSort(self, head):\r\n if head is None or head.next is None:\r\n return head\r\n prev = head\r\n fast = head\r\n slow = head\r\n \r\n while fast is not None and fast.next is not None:\r\n prev = slow\r\n slow = slow.next\r\n fast = fast.next.next\r\n \r\n prev.next = None\r\n \r\n l1 = self.mergeSort(head)\r\n l2 = self.mergeSort(slow)\r\n \r\n return self.mergeLists(l1, l2)\r\n \r\n \r\n def mergeLists(self, l1, l2):\r\n dummy = ListNode(0)\r\n l3 = dummy\r\n while l1 is not None and l2 is not None:\r\n if l1.val < l2.val:\r\n l3.next = l1\r\n l1 = l1.next\r\n else:\r\n l3.next = l2\r\n l2 = l2.next\r\n l3 = l3.next\r\n \r\n if l1 is not None:\r\n l3.next = l1\r\n elif l2 is not None:\r\n l3.next = l2\r\n return dummy.next\r\n \r\n \r\n def printList(self, head):\r\n strn = \"\"\r\n while head is not None:\r\n strn += str(head.val) + \", \"\r\n head = head.next\r\n print(strn)","repo_name":"hwillmott/csfundamentals","sub_path":"leetcode/148_sortlist.py","file_name":"148_sortlist.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"19604003444","text":"import pymysql.cursors\nimport pytest\n\nfrom pymysql.connections import Connection\n\n\n@pytest.fixture(scope=\"session\")\ndef db_conn():\n _conn = pymysql.connect(\n host='localhost',\n user='nate',\n password='nate',\n cursorclass=pymysql.cursors.DictCursor\n )\n with _conn as conn:\n yield conn\n\n\ndef test_db_connect(db_conn):\n ...\n\n\ndef test_db_cursor(db_conn):\n print(type(db_conn))\n with db_conn.cursor() as cursor:\n sql = 'SHOW DATABASES;'\n cursor.execute(sql)\n result = cursor.fetchall()\n print(result)\n","repo_name":"nathanielschutte/tosser","sub_path":"test/transactions/test_db_conn.py","file_name":"test_db_conn.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"4399827470","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 18:36:54 2021\n\n@author: apple\n\n\"\"\"\n#被围绕的区域\nclass Solution:\n def solve(self, board):\n if not board:\n return\n \n n, m = len(board), len(board[0])\n\n def dfs(x, y):\n if not 0 <= x < n or not 0 <= y < m or board[x][y] != 'O':\n return\n \n board[x][y] = \"A\"\n dfs(x + 1, y)\n dfs(x - 1, y)\n dfs(x, y + 1)\n dfs(x, y - 1)\n \n for i in range(n):\n dfs(i, 0)\n dfs(i, m - 1)\n \n for i in range(m - 1):\n dfs(0, i)\n dfs(n - 1, i)\n \n for i in range(n):\n for j in range(m):\n if board[i][j] == \"A\":\n board[i][j] = \"O\"\n elif board[i][j] == \"O\":\n board[i][j] = \"X\"\n\n#课程表二\nfrom collections import deque\nclass Solution2:\n def find_order(self, numCourses,prequisites):\n self.n = numCourses\n self.edge = [[] for each in range(numCourses)]\n self.inDeg = [0] * numCourses\n for pre in prequisites:\n ai,bi = pre[0],pre[1]\n self.addEdge(bi,ai)\n return self.topsort()\n def topsort(self):\n order = []\n q = deque()\n for i in range(self.n):\n if self.inDeg[i]==0:\n q.append(i)\n while len(q) > 0:\n x = q.popleft()\n order.append(x)\n for y in self.edge[x]:\n self.inDeg[y] -= 1\n if self.inDeg[y] == 0:\n q.append(y)\n if len(order) == self.n:\n return order\n return[]\n def addEdge(self,u,v):\n self.edge[u].append(v)\n self.inDeg[v] += 1","repo_name":"JICHENGPENG/algorithm_2021","sub_path":"week3/homework_week3.py","file_name":"homework_week3.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17705619330","text":"DATABASE = {\n \"metals\": [\n {\n \"id\": 1,\n \"metal\": \"Sterling Silver\",\n \"price\": 12.42\n },\n {\n \"id\": 2,\n \"metal\": \"14K Gold\",\n \"price\": 736.4\n },\n {\n \"id\": 3,\n \"metal\": \"24K Gold\",\n \"price\": 1258.9\n },\n {\n \"id\": 4,\n \"metal\": \"Platinum\",\n \"price\": 795.45\n },\n {\n \"id\": 5,\n \"metal\": \"Palladium\",\n \"price\": 1241\n }\n ],\n \"orders\": [\n {\n \"id\": 1,\n \"metalId\": 3,\n \"sizeId\": 2,\n \"styleId\": 3,\n \"timestamp\": 1614659931693\n },\n {\n \"id\": 2,\n \"metalId\": 2,\n \"sizeId\": 2,\n \"styleId\": 2,\n \"timestamp\": 1614659931694\n }\n ],\n \"sizes\": [\n {\n \"id\": 1, \n \"carats\": 0.5, \n \"price\": 405 \n },\n {\n \"id\": 2, \n \"carats\": 0.75, \n \"price\": 782 \n },\n {\n \"id\": 3, \n \"carats\": 1, \n \"price\": 1470 \n },\n {\n \"id\": 4, \n \"carats\": 1.5, \n \"price\": 1997 \n },\n {\n \"id\": 5, \n \"carats\": 2, \n \"price\": 3638 \n }\n ],\n \"styles\": [\n {\n \"id\": 1,\n \"style\": \"Classic\",\n \"price\": 500 \n },\n {\n \"id\": 2,\n \"style\": \"Modern\",\n \"price\": 710 \n },\n {\n \"id\": 3,\n \"style\": \"Vintage\",\n \"price\": 965 \n }\n ]\n }\n\ndef expand(key, data):\n resource = key[:-2]+\"s\"\n matching_data = retrieve(resource, data[key], \"\")\n new_key = key[:-2]\n data[new_key] = matching_data\n del data[key]\n\ndef addPrice(data, foreign_keys):\n total = 0\n # for key in [\"metal\", \"size\", \"style\"]:\n # total += data[key][\"price\"]\n\n for key in foreign_keys:\n resource = key[:-2]+\"s\"\n matching_data = retrieve(resource, data[key], \"\")\n total += matching_data[\"price\"]\n \n data[\"price\"] = round(total, 2)\n\ndef all(resource):\n \"\"\"For GET requests to collection\"\"\"\n return DATABASE[resource]\n\ndef retrieve(resource, id, query_params):\n \"\"\"For GET requests to a single resource\"\"\"\n requested_data = None\n\n for data in DATABASE[resource]:\n if data[\"id\"] == id:\n requested_data = data\n\n if resource == \"orders\":\n foreign_keys = [\"metalId\", \"sizeId\", \"styleId\"]\n\n addPrice(requested_data, foreign_keys)\n\n if \"expand\" in query_params:\n for key in foreign_keys:\n expand(key, requested_data)\n\n return requested_data\n\ndef create(resource, new_data):\n \"\"\"For POST requests to a collection\"\"\"\n max_id = DATABASE[resource][-1][\"id\"]\n new_id = max_id + 1\n new_data[\"id\"] = new_id\n DATABASE[resource].append(new_data)\n return new_data\n\ndef update(resource, id, edited_data):\n \"\"\"For PUT requests to a single resource\"\"\"\n for index, data in enumerate(DATABASE[resource]):\n if data[\"id\"] == id:\n DATABASE[resource][index] = edited_data\n break\n\ndef delete(resource, id):\n \"\"\"For DELETE requests to a single resource\"\"\"\n data_index = -1\n\n for index, data in enumerate(DATABASE[resource]):\n if data[\"id\"] == id:\n data_index = index\n\n if data_index >= 0:\n DATABASE[resource].pop(data_index)","repo_name":"carlydopps/kneel-server","sub_path":"repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"2336734656","text":"#!/usr/bin/env python3\n\nimport sys\nfrom typing import Iterator, Tuple\n\n\ndef split2(infile) -> Iterator[Tuple[set, set]]:\n for line in infile:\n n = len(line)\n yield set(line[:n//2]), set(line[n//2:])\n\n\ndef split3(infile) -> Iterator[Tuple[set, set, set]]:\n infile = iter(infile)\n for line1 in infile:\n line2, line3 = next(infile), next(infile)\n yield set(line1), set(line2), set(line3)\n\n\ndef priority(items: set[str]) -> int:\n return sum('_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(item) for item in items)\n\n\ninput = list(line.strip() for line in sys.stdin)\n\ntotal1 = sum( priority(l & r) for (l, r) in split2(input) )\nprint(total1)\n\ntotal2 = sum( priority(a & b & c) for (a, b, c) in split3(input) )\nprint(total2)\n","repo_name":"riggsd/AoC","sub_path":"2022/03/aoc03.py","file_name":"aoc03.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"17311547711","text":"\"\"\"\nRoutes and views for the bottle application.\n\"\"\"\n\nfrom bottle import route, view, get, redirect, request\nfrom datetime import datetime\nimport json\nimport urllib2\n\n#QA LOGIC\n\nQAAPITOKEN = 'fcc1d505c1884c9a81953351449b4ebe'\n\nQAAPPID = '231414e493a34ddb96a96bde60d39f2b'\n\n\ndef getApiToken(companyId):\n return QAAPITOKEN\n\ndef getAppId(companyId):\n return QAAPPID\n\n#END\n\n\n#Release LOGIC\n\n# MARAPITOKEN = 'ea53c4d267fe45a2a5525cdc0781f550'\n# NCSAPITOKEN = 'eb31bf4e16804f768847185318d45c00'\n# PROAPITOKEN = 'ec51d0fba71a4cce83ae6744c5211ba7'\n#\n# MARAPPID = '3e8c3c11679d41158dc15d5088929eae'\n# NCSAPPID = '5678688052d344279b4f7dc00a203d3e'\n# PROAPPID = '1c7acaf01ddf4db3aa8fffa84464927d'\n#\n#\n# def getApiToken(companyId):\n# return {\n# 'MAR': MARAPITOKEN,\n# 'NCS': NCSAPITOKEN,\n# 'PRO': PROAPITOKEN\n# }[companyId]\n#\n#\n# def getAppId(companyId):\n# return {\n# 'MAR': MARAPPID,\n# 'NCS': NCSAPPID,\n# 'PRO': PROAPPID\n# }[companyId]\n\n#END\n\n\n@get('/getApk')\n@route('/getApk')\ndef getApp():\n parameters = request.query.getlist('company')\n companyId = 'NCS'\n if len(parameters) > 0:\n companyId = parameters[0]\n appId = getAppId(companyId)\n redirect('https://rink.hockeyapp.net/api/2/apps/{0}?format=apk'.format(appId), 302)\n\n\ndef getAppInfoJson():\n parameters = request.query.getlist('company')\n companyId = 'NCS'\n if len(parameters) > 0:\n companyId = parameters[0]\n newRequest = urllib2.Request('https://rink.hockeyapp.net/api/2/apps/{0}/app_versions?pages=1'.format(getAppId(companyId)), headers={ 'X-HockeyAppToken': getApiToken(companyId) })\n return json.loads(urllib2.urlopen(newRequest).read())\n\n\n@get('/checkForUpdate')\n@route('/checkForUpdate')\ndef checkForUpdate():\n try:\n appResultJson = getAppInfoJson()\n if appResultJson is None:\n return None\n\n latestVersionInfo = None\n for version in appResultJson['app_versions']:\n if version['status'] == 2:\n latestVersionInfo = version\n break\n\n if latestVersionInfo is None:\n return None\n\n resultJson = json.dumps({\n 'NewVersion' : latestVersionInfo['version'],\n 'UpdateMandatory' : 'false',\n 'ApkSizeInBytes' : str(latestVersionInfo['appsize'])\n })\n return str(resultJson)\n except:\n return None\n\n\n@route('/')\n@route('/home')\n@view('index')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n return dict(\n year=datetime.now().year\n )\n\n\n@route('/contact')\n@view('contact')\ndef contact():\n \"\"\"Renders the contact page.\"\"\"\n return dict(\n title='Contact',\n message='Your contact page.',\n year=datetime.now().year\n )\n\n\n@route('/about')\n@view('about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return dict(\n title='About',\n message='Your application description page.',\n year=datetime.now().year\n )","repo_name":"Saratsin/XUpdate","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22046262438","text":"# version 2 mejorada\nimport sys\nimport time\nimport numpy as np\n\nclass insertionSort2:\n def __init__(self,N):\n self.N=N\n def exch(self,arr,a,b):\n swap=arr[a]\n arr[a]=arr[b]\n arr[b]=swap\n def sort(self,arr):\\\n # arranca en 1 por el arr[j-1]\n i=1\n while(i0 and (arr[j]0\n j-=1\n i+=1\n return arr\n\n\nN=int(sys.argv[1])\nprg=insertionSort2(N)\ninput=np.random.randint(0,100,N)\nprint(input)\nstart_time=time.time()\nb=prg.sort(input)\nprint(\"insertionsort2 --- %s seconds ---\" % (time.time() - start_time))\nprint(b)\n# 16.5s N=10.000\n# 1705s N=100.000\n\n","repo_name":"maurosc3ner/algorithms_playground","sub_path":"ch2-Sorting/ch2.1/ECinsertionSort2.py","file_name":"ECinsertionSort2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"65"} +{"seq_id":"9215820551","text":"'''\n最长上升子序列\n300. Longest Increasing Subsequence:https://leetcode.com/problems/longest-increasing-subsequence/\nGiven an unsorted array of integers, find the length of longest increasing subsequence.\n\nExample:\n\nInput: [10,9,2,5,3,7,101,18]\nOutput: 4 \nExplanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4. \nNote:\n\nThere may be more than one LIS combination, it is only necessary for you to return the length.\nYour algorithm should run in O(n2) complexity.\n\n思路1:动态规划:O(n^2)\n 首先想到用动态规划解决该问题,维护数组 dp , dp[i] 表示以第i个元素为结尾的增长序列的长度,\n 则递归式为:dp[i]= max(dp[i], dp[j] + 1) 其中 j 0..i-1 && nums[i] > nums[j]\n\n思路2:Θ(nlgn)的方案,二分查找\n 建立一个辅助数组tails,依次读取数组元素 x 与数组末尾元素 top比较:\n 如果 x > top,将 x 放到数组末尾;\n 如果 x < top,则二分查找数组中第一个 大于等于x 的数,并用 x 替换它。\n'''\n\n\nclass Solution(object):\n def _lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n result = 1\n dp = [1] * len(nums)\n for i in range(len(nums)):\n for j in range(i):\n if nums[i] > nums[j]:\n dp[i] = max(dp[i], dp[j] + 1)\n result = max(result, dp[i])\n return result\n\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n tails = []\n for x in nums:\n if len(tails) == 0 or tails[-1] < x:\n tails.append(x)\n else:\n low, high = 0, len(tails) - 1\n while low <= high:\n mid = (low + high) // 2\n if tails[mid] >= x:\n high = mid - 1\n else:\n low = mid + 1\n tails[low] = x # 找的是第一个大于等于目标的数,又数组是升序的,即从小到大,故取low\n return len(tails)\n\n\ns = Solution()\nnums = [10, 9, 2, 5, 3, 7, 101, 18]\nlenLIS = s.lengthOfLIS(nums)\nprint(lenLIS)\n\nlenLIS = s._lengthOfLIS(nums)\nprint(lenLIS)\n","repo_name":"shouliang/Development","sub_path":"Python/LeetCode/300_length_of_LIS.py","file_name":"300_length_of_LIS.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"41891156403","text":"import iris\nimport sys\nimport yaml\n\nimport matplotlib.pyplot as plt\nimport iris.quickplot as qplt\n\ndef main(settings):\n with open(settings, \"r\") as f:\n settings_dict = yaml.full_load(f)\n\n metadata_paths = settings_dict[\"input_files\"]\n metadata = []\n for metadata_path in metadata_paths:\n with open(metadata_path) as f:\n metadata.append(yaml.full_load(f))\n\n # MultiModelMean campo de temperaturas\n for intermediary_files_meta in metadata:\n for file_meta in intermediary_files_meta.values():\n if file_meta[\"dataset\"] == \"MultiModelMean\" and file_meta[\"variable_group\"] == \"tas_clim_global\":\n cube = iris.load_cube(file_meta[\"filename\"])\n print(cube)\n plt.figure()\n qplt.pcolormesh(cube)\n plt.gca().coastlines()\n plt.savefig(f\"{settings_dict['plot_dir']}/global_map.png\")\n\nif __name__ == '__main__':\n settings = sys.argv[1]\n main(settings)","repo_name":"pepcos/taller_ESMValTool_CIMA-DCAO","sub_path":"como_no_escribir_un_diagnostico.py","file_name":"como_no_escribir_un_diagnostico.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"32060311987","text":"from turtle import Turtle\nFONT = (\"Courier\", 24, \"normal\")\n\n\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.penup()\n self.hideturtle()\n self.currentlev = 1\n self.updatescoreboard()\n\n def updatescoreboard(self):\n self.clear()\n self.goto(-210, 260)\n self.write(f\"Level: {self.currentlev}\", align=\"center\", font=(FONT))\n\n def gameover(self):\n self.goto(0, 0)\n self.write(f\"GAME OVER!\", align=\"center\", font=FONT)\n\n def increase_lev(self):\n self.currentlev += 1\n self.clear()\n self.updatescoreboard()\n","repo_name":"pratikbhadane24/GUI-PythonProjects","sub_path":"Turtle Projects/The Turtle Crossing Game/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"33883975968","text":"from threading import Thread, Lock\nimport time\nimport logging\n\nlog = logging.getLogger(__name__)\n\nclass Accounting(Thread):\n def __init__(self, jobQueue, refresh = 5):\n super().__init__(daemon=True)\n self._jobdoing = 0\n self._jobdone = 0\n self._totalJob = 0\n self._joberror = 0\n self.jobQueue = jobQueue\n self.lock = Lock()\n self.refreshTime = refresh\n self.close = False\n self.start()\n\n def increaseJobdoing(self):\n with self.lock:\n self._jobdoing += 1\n\n def decreaseJobdoing(self):\n with self.lock:\n self._jobdoing -= 1\n\n def JobComplete(self):\n with self.lock:\n self._jobdone += 1\n\n def JobFailed(self):\n with self.lock:\n self._joberror += 1\n\n def TotalJob(self):\n log.debug(\"queue size %d, jobdoing %d, jobdone %d, joberror %d\" % (self.jobQueue.qsize(), self._jobdoing, self._jobdone, self._joberror))\n with self.lock:\n self._totalJob = self.jobQueue.qsize() + self._jobdoing + self._jobdone + self._joberror\n\n def update(self):\n self.TotalJob()\n s = \"\\n\\n* Complete: %d/%d, %0.2f%%\\n\" % (self._jobdone, self._totalJob, float(self._jobdone/self._totalJob*100.0))\n s += \"* Error: %d/%d\\n\" % (self._joberror, self._totalJob)\n log.info(s)\n\n def run(self):\n while self._jobdoing == 0 and not self.close:\n time.sleep(1)\n\n while not self.close:\n self.update()\n time.sleep(self.refreshTime)\n\n def shutdown(self):\n self.update()\n self.close = True\n","repo_name":"NguyenKhong/gdriveshare","sub_path":"gdriveshare/accounting.py","file_name":"accounting.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"43767786482","text":"import shutil\nimport tempfile\n\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom ..models import Group, Post, User\n\nTEMP_MEDIA = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA)\nclass TestPostImageFormTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='testuser')\n cls.group = cls.group = Group.objects.create(\n title='Заголовок тестовой группы', slug='slug')\n small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n cls.uploaded = SimpleUploadedFile(\n name='small.gif',\n content=small_gif,\n content_type='image/gif')\n cls.post = cls.post = Post.objects.create(text='Тестовый текст',\n author=cls.user,\n group=cls.group,\n image=cls.uploaded)\n cls.templates_pages_names = {\n 'index.html': reverse('index'),\n 'group.html': reverse('group_posts', kwargs={\n 'slug': cls.group.slug}),\n 'profile.html': reverse('profile', kwargs={\n 'username': cls.user.username}),\n 'post.html': reverse('post', kwargs={\n 'username': cls.user.username,\n 'post_id': cls.post.id,\n }),\n }\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)\n super().tearDownClass()\n\n def setUp(self):\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_create_task(self):\n for template, reverse_name in self.templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(reverse_name)\n self.assertContains(response, ' None:\n\t\tself._lap = lap\n\t\tself.setPixmap(\n\t\t\tContext.pixmap_loader.get_pixmap(lap)\n\t\t)\n\n\tdef mousePressEvent(self, mouse_event: QtGui.QMouseEvent):\n\t\tprint('mouse press', self)\n\t\tsuper().mousePressEvent(mouse_event)\n\t\tself.clicked.emit(self)\n\n\nclass LapRater(QDialog):\n\n\tdef __init__(self, parent: QWidget):\n\t\tsuper().__init__(parent=parent)\n\n\t\tself._layout = QtWidgets.QHBoxLayout()\n\n\t\tself._left_label = LapLabel()\n\t\tself._right_label = LapLabel()\n\n\t\tself._left_label.clicked.connect(self._label_clicked)\n\t\tself._right_label.clicked.connect(self._label_clicked)\n\n\t\tself._layout.addWidget(self._left_label)\n\t\tself._layout.addWidget(self._right_label)\n\n\t\tself.setLayout(self._layout)\n\n\t\tself._lock = Lock()\n\t\tself._picked_lap = None # type: Lap\n\n\tdef _label_clicked(self, lap: Lap) -> None:\n\t\tself._picked_lap = lap\n\t\tself._lock.release()\n\n\tdef _pick_one(self, first: Lap, second: Lap) -> Lap:\n\t\tself._left_label.setPixmap(\n\t\t\tContext.pixmap_loader.get_pixmap(first).get()\n\t\t)\n\t\tself._right_label.setPixmap(\n\t\t\tContext.pixmap_loader.get_pixmap(second).get()\n\t\t)\n\n\t\tself._lock.acquire()\n\t\treturn self._picked_lap\n\n\tdef rate_laps(self, laps: t.Collection[Lap]) -> t.List[Lap]:\n\n\t\tlist_laps = list(laps)\n\n\t\tfirst_lap, second_lap = list_laps[0], list_laps[1]\n\n\t\tprint(self._pick_one(first_lap, second_lap))\n\n\n\n\t\treturn list(laps)\n","repo_name":"guldfisk/cubeeditor","sub_path":"cubeeditor/laprating/laprate.py","file_name":"laprate.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18571894329","text":"N, M = map(int, input().split())\n\ngraph = [[] for i in range(N)]\nleft = set()\nright = set()\nfor i in range(M):\n l, r, d = map(int, input().split())\n l -= 1; r -= 1\n graph[l].append((r, d))\n graph[r].append((l, -d))\n left.add(l); right.add(r)\n\nroot = left - right\nif M and len(root) == 0:\n print('No')\n quit()\n\nX = [None] * N\nwhile root:\n node = root.pop()\n X[node] = 0\n stack = [node]\n while stack:\n node = stack.pop()\n while graph[node]:\n child, d = graph[node].pop()\n if X[child] is None:\n X[child] = X[node] + d\n stack.append(child)\n elif X[child] != X[node] + d:\n print('No')\n quit()\n\nprint('Yes')","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03450/s077881479.py","file_name":"s077881479.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"21860467671","text":"import scipy.integrate as integrate\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\n\n\ndef posX(K, L, R, l):\n\treturn integrate.quad(lambda l_: math.cos(K(L, R, l_)), 0, l)[0]\n\t\ndef posY(K, L, R, l):\n\treturn integrate.quad(lambda l_: math.sin(K(L, R, l_)), 0, l)[0]\n\t\t\n\n# clothoid\n# k = l / A2\n# K = l^2 / (A2 * 2) \n\ndef K_Clothoid(L, R, l):\n\tA2 = R * L\n\treturn pow(l, 2) / (A2 * 2)\n\t\ndef k_Clothoid(L, R, l):\n\tA2 = R * L\n\treturn l / A2\n\n# calculated with matlab\n# needed approxCurve.md\n# k = l /(R*L)\n# [x, y, X, Y] = approxCurve(k, 4)\ndef X_Clothoid(L, R, l):\n\treturn l - l**5/(40*L**2*R**2) + l**9/(3456*L**4*R**4) - l**13/(599040*L**6*R**6)\n\t\ndef Y_Clothoid(L, R, l):\n\treturn l**3/(6*L*R) - l**7/(336*L**3*R**3) + l**11/(42240*L**5*R**5) - l**15/(9676800*L**7*R**7)\n\t\n# bloss \n# K = l^3 / (R * L^2) - l^4 * (2 * R * L^3) \n# k = l^2 * 3 / (R * L^2) - l^3 * 2 / (R * L^3)\ndef K_Bloss(L, R, l):\n\treturn pow(l, 3) / (R * pow(L, 2)) - pow(l,4) / (2 * R * pow(L, 3))\n\t\ndef k_Bloss(L, R, l):\n\treturn math.pow(l, 2) * 3 / (R * math.pow(L, 2)) - math.pow(l, 3) * 2 / (R * math.pow(L, 3))\n\n# calculated with matlab\n# needed approxCurve.md\n# k = 3 * l^2 /(R*L^2) - 2 * l^3 /(R*L^3)\n# [x, y, X, Y] = approxCurve(k, 4)\ndef X_Bloss(L, R, l):\n\treturn l - l**7/(14*L**4*R**2) + l**8/(16*L**5*R**2) - l**9/(72*L**6*R**2) + l**13/(312*L**8*R**4) - l**14/(168*L**9*R**4) + l**15/(240*L**10*R**4) - l**16/(768*L**11*R**4) + l**17/(6528*L**12*R**4) - l**19/(13680*L**12*R**6) + l**20/(4800*L**13*R**6) - l**21/(4032*L**14*R**6) + l**22/(6336*L**15*R**6) - l**23/(17664*L**16*R**6) + l**24/(92160*L**17*R**6) - l**25/(1152000*L**18*R**6)\n\t\ndef Y_Bloss(L, R, l):\n\treturn l**4/(4*L**2*R) - l**5/(10*L**3*R) - l**10/(60*L**6*R**3) + l**11/(44*L**7*R**3) - l**12/(96*L**8*R**3) + l**13/(624*L**9*R**3) + l**16/(1920*L**10*R**5) - l**17/(816*L**11*R**5) + l**18/(864*L**12*R**5) - l**19/(1824*L**13*R**5) + l**20/(7680*L**14*R**5) - l**21/(80640*L**15*R**5) - l**22/(110880*L**14*R**7) + l**23/(33120*L**15*R**7) - l**24/(23040*L**16*R**7) + l**25/(28800*L**17*R**7) - l**26/(59904*L**18*R**7) + l**27/(207360*L**19*R**7) - l**28/(1290240*L**20*R**7) + l**29/(18708480*L**21*R**7)\n\t\ndef getTransitionCurve(K, L, R, n):\n\txs = []\n\tys = []\n\t\n\tfor i in range(0, n):\n\t\tl = i * (L / (n-1)) + 0.001\n\t\tx = posX(K, L, R, l)\n\t\ty = posY(K, L, R, l)\n\t\t\n\t\txs.append(x)\n\t\tys.append(y)\n\t\t\n\treturn xs, ys\n\t\ndef calculateCurvature(xs, ys):\n\ta = np.array([xs, ys]).T\n\t\n\tdx = np.gradient(a[:, 0])\n\tdy = np.gradient(a[:, 1])\n\t\n\tds = np.sqrt(dx * dx + dy * dy)\n\t\n\td2x = np.gradient(dx)\n\td2y = np.gradient(dy)\n\t\n\tcurvature = np.abs(d2x * dy - dx * d2y) / (dx * dx + dy * dy)**1.5\n\t\n\treturn curvature\n\t\nn = 1500\nR = 4.0\nL = 10.0\n\nxsC, ysC = getTransitionCurve(K_Clothoid, L, R, n)\nxsB, ysB = getTransitionCurve(K_Bloss, L, R, n)\n\nxsC2 = []\nysC2 = []\n\nxsB2 = []\nysB2 = []\n\n# approximation of bloss curve\nfor i in range(0, n):\n\t# somewhere in here is an error\n\n\tl = i * (L / (n-1)) + 0.001 # length of subcurve\n\t\n\txC = X_Clothoid(L, R, l)\n\tyC = Y_Clothoid(L, R, l)\n\t\n\txB = X_Bloss(L, R, l)\n\tyB = Y_Bloss(L, R, l)\n\t\n\txsC2.append(xC)\n\tysC2.append(yC)\n\t\n\txsB2.append(xB)\n\tysB2.append(yB)\n\t\n\nif True: # plot transition curves\n\tplot1 = plt.plot(xsC, ysC, label=\"clothoid\")\n\tplot2 = plt.plot(xsB, ysB, label=\"bloss\")\n\tplot3 = plt.plot(xsC2, ysC2, label=\"clothoid approx\")\n\tplot4 = plt.plot(xsB2, ysB2, label=\"bloss approx\")\n\t\n\tfor i in range(0, 10):\t\n\t\tl = L * i / (9)\n\t\tangle = K_Bloss(L, R, l)\n\t\tx0 = posX(K_Bloss, L, R, l)\n\t\ty0 = posY(K_Bloss, L, R, l)\n\t\t\n\t\tx1 = math.cos(angle) * 0.1 + x0\n\t\ty1 = math.sin(angle) * 0.1 + y0\n\t\tx2 = math.cos(angle) * -0.1 + x0\n\t\ty2 = math.sin(angle) * -0.1 + y0\n\t\t\n\t\tplt.plot([x2, x1], [y2, y1], color='b')\n\t\n\tplt.legend(handles=[plot1[0], plot2[0], plot3[0], plot4[0]], loc=2)\n\tplt.axes().set_aspect('equal', 'datalim')\n\tplt.show()\n\t\nif False: # plot xs and ys clothoid separately\n\tplot1 = plt.plot(xsC, label=\"x clothoid\")\n\tplot2 = plt.plot(xsC2, label=\"x clothoid approx\")\n\t\n\tplot3 = plt.plot(ysC, label=\"y clothoid\")\n\tplot4 = plt.plot(ysC2, label=\"y clothoid approx\")\n\t\n\tplt.legend(handles=[plot1[0], plot2[0], plot3[0], plot4[0]], loc=2)\n\tplt.show()\n\t\nif False: # plot xs and ys bloss separately\n\tplot1 = plt.plot(xsB, label=\"x bloss\")\n\tplot2 = plt.plot(xsB2, label=\"x bloss approx\")\n\t\n\tplot3 = plt.plot(ysB, label=\"y bloss\")\n\tplot4 = plt.plot(ysB2, label=\"y bloss approx\")\n\t\n\tplt.legend(handles=[plot1[0], plot2[0], plot3[0], plot4[0]], loc=2)\n\tplt.show()\n\t\nif False: # plot clothoid curvature\n\tcC = calculateCurvature(xsC, ysC)\n\tcC2 = calculateCurvature(xsC2, ysC2)\n\t\n\tcC3 = [k_Clothoid(L, R, i * (L/(n-1)) + 0.001) for i in range(0, n)]\n\n\tplot1 = plt.plot(cC, label=\"k\")\n\tplot2 = plt.plot(cC2, label=\"k approx\")\n\tplot3 = plt.plot(cC3, label=\"k control\")\n\t\n\tplt.legend(handles=[plot1[0], plot2[0], plot3[0]], loc=2)\n\tplt.show()\n\nif False: # plot bloss curvature\n\tcB = calculateCurvature(xsB, ysB)\n\tcB2 = calculateCurvature(xsB2, ysB2)\n\t\n\tcB3 = [k_Bloss(L, R, i * (L/(n-1)) + 0.001) for i in range(0, n)]\n\n\tplot1 = plt.plot(cB, label=\"k\")\n\tplot2 = plt.plot(cB2, label=\"k approx\")\n\tplot3 = plt.plot(cB3, label=\"k control\")\n\t\n\tplt.legend(handles=[plot1[0], plot2[0], plot3[0]], loc=2)\n\tplt.show()\n\n\n","repo_name":"tumcms/Open-Infra-Platform","sub_path":"Documentation/retired/bloss.py","file_name":"bloss.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"90"} +{"seq_id":"8043661887","text":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import render, redirect\nfrom django.db import models\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\n# Create your views here.\n\nvulgar_words_french = [\n \"con\",\n \"connard\",\n \"conasse\",\n \"pute\",\n \"putain\",\n \"salope\",\n \"merde\",\n \"enculé\",\n \"enculée\",\n \"bordel\",\n \"couille\",\n \"bite\",\n \"chier\",\n \"nique\",\n]\n\nvulgar_words_english = [\n \"fuck\",\n \"shit\",\n \"asshole\",\n \"bitch\",\n \"cunt\",\n \"bastard\",\n \"dick\",\n \"cock\",\n \"pussy\",\n \"slut\",\n \"whore\",\n \"fag\",\n \"faggot\",\n \"damn\",\n]\n\n\nlist_jokes = []\nblack_list = ['']\n\ndef censor_vulgar_words(text):\n words = text.split()\n censored_words = []\n\n for word in words:\n if word.lower() in vulgar_words_french or word.lower() in vulgar_words_english:\n censored_word = '*' * len(word)\n else:\n censored_word = word\n censored_words.append(censored_word)\n\n censored_text = ' '.join(censored_words)\n return censored_text\n\n\n\nclass Joke(models.Model):\n i = 1\n def __init__(self, username, content):\n self.id = Joke.i\n self.username = username.capitalize()\n self.content = censor_vulgar_words(content)\n Joke.i = Joke.i + 1\n\n\n\ndef addView(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n story = request.POST.get('story')\n joke = Joke(username, story)\n list_jokes.append(joke)\n return redirect('joke:index')\n return render(request, 'joke/addView.html')\n\n\ndef index(request):\n return render(request,\"joke/index.html\", {'jokes':list_jokes})\n\ndef entry_view(request, id):\n if (id > len(list_jokes)):\n raise Http404(\"Page not found\")\n joke = list_jokes[id-1]\n data = {\n 'id': id,\n 'username': joke.username,\n 'content': joke.content,\n }\n return JsonResponse(data)\n","repo_name":"aminekebouche/ProgramationWeb_M1","sub_path":"problem_4/talk/joke/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18574847949","text":"N=int(input())\nct,cx,cy=0,0,0\nfor i in range(N):\n t,x,y=map(int,input().split())\n dt=t-ct\n dx=x-cx\n dy=y-cy\n if dt 0:\n cluster_name = args[0]\n if len(args) > 1:\n role = args[1]\n if len(args) > 2:\n env = args[2]\n if len(args) > 3:\n job = args[3]\n else:\n # TODO(ksweeney): Remove this after MESOS-2945 is completed.\n die('env scheduler pages are not yet implemented, please specify job')\n\n if not cluster_name:\n die('cluster is required')\n\n api = make_client(cluster_name)\n\n import webbrowser\n webbrowser.open_new_tab(synthesize_url(api.scheduler.scheduler().url, role, env, job))\n\n\n@app.command\n@app.command_option('--local', dest='local', default=False, action='store_true',\n help='Inspect the configuration as would be created by the \"spawn\" command.')\n@app.command_option('--raw', dest='raw', default=False, action='store_true',\n help='Show the raw configuration.')\n@app.command_option(ENVIRONMENT_BIND_OPTION)\n@app.command_option(CLUSTER_CONFIG_OPTION)\n@app.command_option(ENV_CONFIG_OPTION)\n@app.command_option(JSON_OPTION)\n@requires.exactly('cluster/role/env/job', 'config')\ndef inspect(job_spec, config_file):\n \"\"\"usage: inspect cluster/role/env/job config\n\n Verifies that a job can be parsed from a configuration file, and displays\n the parsed configuration.\n \"\"\"\n options = app.get_options()\n config = get_job_config(job_spec, config_file, options)\n if options.raw:\n print('Parsed job config: %s' % config.job())\n return\n\n job_thrift = config.job()\n job = config.raw()\n job_thrift = config.job()\n print('Job level information')\n print(' name: %s' % job.name())\n print(' role: %s' % job.role())\n print(' contact: %s' % job.contact())\n print(' cluster: %s' % job.cluster())\n print(' instances: %s' % job.instances())\n if job.has_cron_schedule():\n print(' cron:')\n print(' schedule: %s' % job.cron_schedule())\n print(' policy: %s' % job.cron_collision_policy())\n if job.has_constraints():\n print(' constraints:')\n for constraint, value in job.constraints().get().items():\n print(' %s: %s' % (constraint, value))\n print(' service: %s' % job_thrift.taskConfig.isService)\n print(' production: %s' % bool(job.production().get()))\n print()\n\n task = job.task()\n print('Task level information')\n print(' name: %s' % task.name())\n if len(task.constraints().get()) > 0:\n print(' constraints:')\n for constraint in task.constraints():\n print(' %s' % (' < '.join(st.get() for st in constraint.order())))\n print()\n\n processes = task.processes()\n for process in processes:\n print('Process %s:' % process.name())\n if process.daemon().get():\n print(' daemon')\n if process.ephemeral().get():\n print(' ephemeral')\n if process.final().get():\n print(' final')\n print(' cmdline:')\n for line in process.cmdline().get().splitlines():\n print(' ' + line)\n print()\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\n@app.command_option(OPEN_BROWSER_OPTION)\ndef start_cron(args, options):\n \"\"\"usage: start_cron cluster/role/env/job\n\n Invokes a cron job immediately, out of its normal cron cycle.\n This does not affect the cron cycle in any way.\n \"\"\"\n\n api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(\n args, options, make_client_factory())\n config = get_job_config(job_key.to_path(), config_file, options) if config_file else None\n resp = api.start_cronjob(job_key, config=config)\n check_and_log_response(resp)\n handle_open(api.scheduler.scheduler().url, job_key.role, job_key.env, job_key.name)\n\n\n@app.command\n@app.command_option(\n '--pretty',\n dest='pretty',\n default=False,\n action='store_true',\n help='Show job information in prettyprinted format')\n@app.command_option(\n '--show-cron',\n '-c',\n dest='show_cron_schedule',\n default=False,\n action='store_true',\n help='List jobs registered with the Aurora scheduler')\n@requires.exactly('cluster/role')\ndef list_jobs(cluster_and_role):\n \"\"\"usage: list_jobs [--show_cron_schedule] cluster/role/env/job\"\"\"\n def show_job_simple(job):\n if options.show_cron_schedule:\n print(('{0}/{1.key.role}/{1.key.environment}/{1.key.name}' +\n '\\t\\'{1.cronSchedule}\\'\\t{1.cronCollisionPolicy}').format(cluster, job))\n else:\n print('{0}/{1.key.role}/{1.key.environment}/{1.key.name}'.format(cluster, job))\n\n def show_job_pretty(job):\n print(\"Job %s/%s/%s/%s:\" %\n (cluster, job.key.role, job.key.environment, job.key.name))\n print('\\tcron schedule: %s' % job.cronSchedule)\n print('\\tcron policy: %s' % job.cronCollisionPolicy)\n\n options = app.get_options()\n if options.show_cron_schedule and options.pretty:\n print_fn = show_job_pretty\n else:\n print_fn = show_job_simple\n # Take the cluster_and_role parameter, and split it into its two components.\n if cluster_and_role.count('/') != 1:\n die('list_jobs parameter must be in cluster/role format')\n (cluster,role) = cluster_and_role.split('/')\n api = make_client(cluster)\n resp = api.get_jobs(role)\n check_and_log_response(resp)\n for job in resp.result.getJobsResult.configs:\n print_fn(job)\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\n@app.command_option(OPEN_BROWSER_OPTION)\n@app.command_option(SHARDS_OPTION)\ndef kill(args, options):\n \"\"\"usage: kill cluster/role/env/job\n\n Kills a running job, blocking until all tasks have terminated.\n\n Default behaviour is to kill all shards in the job, but the kill\n can be limited to specific shards with the --shards option\n \"\"\"\n api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(\n args, options, make_client_factory())\n options = app.get_options()\n config = get_job_config(job_key.to_path(), config_file, options) if config_file else None\n resp = api.kill_job(job_key, options.shards, config=config)\n check_and_log_response(resp)\n handle_open(api.scheduler.scheduler().url, job_key.role, job_key.env, job_key.name)\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\ndef status(args, options):\n \"\"\"usage: status cluster/role/env/job\n\n Fetches and prints information about the active tasks in a job.\n \"\"\"\n def is_active(task):\n return task.status in ACTIVE_STATES\n\n def print_task(scheduled_task):\n assigned_task = scheduled_task.assignedTask\n taskInfo = assigned_task.task\n taskString = ''\n if taskInfo:\n taskString += '''cpus: %s, ram: %s MB, disk: %s MB''' % (taskInfo.numCpus,\n taskInfo.ramMb,\n taskInfo.diskMb)\n if assigned_task.assignedPorts:\n taskString += '\\n\\tports: %s' % assigned_task.assignedPorts\n taskString += '\\n\\tfailure count: %s (max %s)' % (scheduled_task.failureCount,\n taskInfo.maxTaskFailures)\n taskString += '\\n\\tevents:'\n for event in scheduled_task.taskEvents:\n taskString += '\\n\\t\\t %s %s: %s' % (datetime.fromtimestamp(event.timestamp / 1000),\n ScheduleStatus._VALUES_TO_NAMES[event.status],\n event.message)\n taskString += '\\n\\tpackages:'\n for pkg in assigned_task.task.packages:\n taskString += ('\\n\\t\\trole: %s, package: %s, version: %s' % (pkg.role, pkg.name, pkg.version))\n\n return taskString\n\n def print_tasks(tasks):\n for task in tasks:\n taskString = print_task(task)\n\n log.info('role: %s, env: %s, name: %s, shard: %s, status: %s on %s\\n%s' %\n (task.assignedTask.task.owner.role,\n task.assignedTask.task.environment,\n task.assignedTask.task.jobName,\n task.assignedTask.instanceId,\n ScheduleStatus._VALUES_TO_NAMES[task.status],\n task.assignedTask.slaveHost,\n taskString))\n for pkg in task.assignedTask.task.packages:\n log.info('\\tpackage %s/%s/%s' % (pkg.role, pkg.name, pkg.version))\n\n api, job_key, _ = LiveJobDisambiguator.disambiguate_args_or_die(\n args, options, make_client_factory())\n resp = api.check_status(job_key)\n check_and_log_response(resp)\n\n tasks = resp.result.scheduleStatusResult.tasks\n if tasks:\n active_tasks = filter(is_active, tasks)\n log.info('Active Tasks (%s)' % len(active_tasks))\n print_tasks(active_tasks)\n inactive_tasks = filter(lambda x: not is_active(x), tasks)\n log.info('Inactive Tasks (%s)' % len(inactive_tasks))\n print_tasks(inactive_tasks)\n else:\n log.info('No tasks found.')\n\n\n@app.command\n@app.command_option(SHARDS_OPTION)\n@app.command_option(ENVIRONMENT_BIND_OPTION)\n@app.command_option(CLUSTER_CONFIG_OPTION)\n@app.command_option(ENV_CONFIG_OPTION)\n@app.command_option(JSON_OPTION)\n@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)\n@app.command_option(\n '--force',\n dest='force',\n default=True, # TODO(maximk): Temporary bandaid for MESOS-4310 until a better fix is available.\n action='store_true',\n help='Turn off warning message that the update looks large enough to be disruptive.')\n@requires.exactly('cluster/role/env/job', 'config')\ndef update(job_spec, config_file):\n \"\"\"usage: update cluster/role/env/job config\n\n Performs a rolling upgrade on a running job, using the update configuration\n within the config file as a control for update velocity and failure tolerance.\n\n Updates are fully controlled client-side, so aborting an update halts the\n update and leaves the job in a 'locked' state on the scheduler.\n Subsequent update attempts will fail until the update is 'unlocked' using the\n 'cancel_update' command.\n\n The updater only takes action on shards in a job that have changed, meaning\n that changing a single shard will only induce a restart on the changed shard.\n\n You may want to consider using the 'diff' subcommand before updating,\n to preview what changes will take effect.\n \"\"\"\n def warn_if_dangerous_change(api, job_spec, config):\n # Get the current job status, so that we can check if there's anything\n # dangerous about this update.\n job_key = AuroraJobKey(config.cluster(), config.role(), config.environment(), config.name())\n resp = api.query(api.build_query(config.role(), config.name(),\n statuses=ACTIVE_STATES, env=config.environment()))\n if resp.responseCode != ResponseCode.OK:\n die('Could not get job status from server for comparison: %s' % resp.message)\n remote_tasks = [t.assignedTask.task for t in resp.result.scheduleStatusResult.tasks]\n resp = api.populate_job_config(config)\n if resp.responseCode != ResponseCode.OK:\n die('Server could not populate job config for comparison: %s' % resp.message)\n local_task_count = len(resp.result.populateJobResult.populated)\n remote_task_count = len(remote_tasks)\n if (local_task_count >= 4 * remote_task_count or local_task_count <= 4 * remote_task_count\n or local_task_count == 0):\n print('Warning: this update is a large change. Press ^c within 5 seconds to abort')\n time.sleep(5)\n\n options = app.get_options()\n config = get_job_config(job_spec, config_file, options)\n api = make_client(config.cluster())\n if not options.force:\n warn_if_dangerous_change(api, job_spec, config)\n resp = api.update_job(config, options.health_check_interval_seconds, options.shards)\n check_and_log_response(resp)\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\n@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)\n@app.command_option(OPEN_BROWSER_OPTION)\n@app.command_option(SHARDS_OPTION)\n@app.command_option(\n '--batch_size',\n dest='batch_size',\n type=int,\n default=1,\n help='Number of shards to be restarted in one iteration.')\n@app.command_option(\n '--max_per_shard_failures',\n dest='max_per_shard_failures',\n type=int,\n default=0,\n help='Maximum number of restarts per shard during restart. Increments total failure count when '\n 'this limit is exceeded.')\n@app.command_option(\n '--max_total_failures',\n dest='max_total_failures',\n type=int,\n default=0,\n help='Maximum number of shard failures to be tolerated in total during restart.')\n@app.command_option(\n '--restart_threshold',\n dest='restart_threshold',\n type=int,\n default=60,\n help='Maximum number of seconds before a shard must move into the RUNNING state before '\n 'considered a failure.')\n@app.command_option(\n '--watch_secs',\n dest='watch_secs',\n type=int,\n default=30,\n help='Minimum number of seconds a shard must remain in RUNNING state before considered a '\n 'success.')\ndef restart(args, options):\n \"\"\"usage: restart cluster/role/env/job\n [--shards=SHARDS]\n [--batch_size=INT]\n [--updater_health_check_interval_seconds=SECONDS]\n [--max_per_shard_failures=INT]\n [--max_total_failures=INT]\n [--restart_threshold=INT]\n [--watch_secs=SECONDS]\n\n Performs a rolling restart of shards within a job.\n\n Restarts are fully controlled client-side, so aborting halts the restart.\n \"\"\"\n api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(\n args, options, make_client_factory())\n config = get_job_config(job_key.to_path(), config_file, options) if config_file else None\n updater_config = UpdaterConfig(\n options.batch_size,\n options.restart_threshold,\n options.watch_secs,\n options.max_per_shard_failures,\n options.max_total_failures)\n resp = api.restart(job_key, options.shards, updater_config,\n options.health_check_interval_seconds, config=config)\n check_and_log_response(resp)\n handle_open(api.scheduler.scheduler().url, job_key.role, job_key.env, job_key.name)\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\ndef cancel_update(args, options):\n \"\"\"usage: cancel_update cluster/role/env/job\n\n Unlocks a job for updates.\n A job may be locked if a client's update session terminated abnormally,\n or if another user is actively updating the job. This command should only\n be used when the user is confident that they are not conflicting with another user.\n \"\"\"\n api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(\n args, options, make_client_factory())\n config = get_job_config(job_key.to_path(), config_file, options) if config_file else None\n resp = api.cancel_update(job_key, config=config)\n check_and_log_response(resp)\n\n\n@app.command\n@app.command_option(CLUSTER_INVOKE_OPTION)\n@requires.exactly('role')\ndef get_quota(role):\n \"\"\"usage: get_quota --cluster=CLUSTER role\n\n Prints the production quota that has been allocated to a user.\n \"\"\"\n options = app.get_options()\n resp = make_client(options.cluster).get_quota(role)\n quota = resp.result.getQuotaResult.quota\n\n quota_fields = [\n ('CPU', quota.numCpus),\n ('RAM', '%f GB' % (float(quota.ramMb) / 1024)),\n ('Disk', '%f GB' % (float(quota.diskMb) / 1024))\n ]\n log.info('Quota for %s:\\n\\t%s' %\n (role, '\\n\\t'.join(['%s\\t%s' % (k, v) for (k, v) in quota_fields])))\n","repo_name":"calebTomlinson/aurora","sub_path":"src/main/python/twitter/aurora/client/commands/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":21675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40580640426","text":"\"\"\"\n1258. Synonymous Sentences\nMedium\n\n289\n\n124\n\nAdd to List\n\nShare\nYou are given a list of equivalent string pairs synonyms where synonyms[i] = [si, ti] indicates that si and ti are equivalent strings. You are also given a sentence text.\n\nReturn all possible synonymous sentences sorted lexicographically.\n\n \n\nExample 1:\n\nInput: synonyms = [[\"happy\",\"joy\"],[\"sad\",\"sorrow\"],[\"joy\",\"cheerful\"]], text = \"I am happy today but was sad yesterday\"\nOutput: [\"I am cheerful today but was sad yesterday\",\"I am cheerful today but was sorrow yesterday\",\"I am happy today but was sad yesterday\",\"I am happy today but was sorrow yesterday\",\"I am joy today but was sad yesterday\",\"I am joy today but was sorrow yesterday\"]\nExample 2:\n\nInput: synonyms = [[\"happy\",\"joy\"],[\"cheerful\",\"glad\"]], text = \"I am happy today but was sad yesterday\"\nOutput: [\"I am happy today but was sad yesterday\",\"I am joy today but was sad yesterday\"]\n \n\nConstraints:\n\n0 <= synonyms.length <= 10\nsynonyms[i].length == 2\n1 <= si.length, ti.length <= 10\nsi != ti\ntext consists of at most 10 words.\nAll the pairs of synonyms are unique.\nThe words of text are separated by single spaces.\n\n\"\"\"\nclass Solution:\n def __init__(self):\n self.rank = dict()\n \n def generateSentences(self, synonyms: List[List[str]], text: str) -> List[str]:\n def find(word):\n if word not in self.rank:\n self.rank[word] = word\n return self.rank[word]\n if self.rank[word] == word:\n return word\n self.rank[word] = find(self.rank[word])\n return self.rank[word]\n \n def union(w1, w2):\n pw1 = find(w1)\n pw2 = find(w2)\n if pw1 == pw2:\n return\n self.rank[pw1] = pw2\n return\n \n def getWords(w1):\n w = []\n pw1 = find(w1)\n w.append(pw1)\n for word in self.rank:\n pw2 = find(word)\n if pw1 == pw2:\n w.append(word)\n return list(set(w))\n \n for syn in synonyms:\n union(syn[0], syn[1])\n \n res = []\n words = text.split(\" \")\n def rec(idx, history):\n if idx == len(words):\n cpy = history.copy()\n res.append(\" \".join(cpy))\n return\n \n if words[idx] not in self.rank:\n return rec(idx+1, history + [words[idx]])\n \n for w2 in getWords(words[idx]):\n history.append(w2)\n rec(idx+1, history)\n history.pop()\n return\n rec(0, [])\n return sorted(res)\n","repo_name":"venkatsvpr/Problems_Solved","sub_path":"1258_Synonymous_Sentences.py","file_name":"1258_Synonymous_Sentences.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"42931084420","text":"import numpy as np\n\n\ndef ulam_spiral_coords(N, origin=1):\n N = np.array(N)\n N -= origin - 1\n N = N[N >= 1]\n\n I, J = np.ndarray(N.shape), np.ndarray(N.shape)\n\n K = np.floor(N**.5)\n R = K % 2\n D = N - K**2\n\n crit1 = R == 0\n crit2 = D != 0\n crit3 = D <= K\n\n sel = crit1\n I[sel] = 1 - K[sel]/2\n J[sel] = K[sel]/2\n\n sel = ~crit1\n I[sel] = (K[sel] - 1)/2\n J[sel] = -(K[sel] - 1)/2\n\n sel = crit2 & crit3\n I[sel] -= (-1)**R[sel]\n J[sel] -= (D[sel] - 1)*(-1)**R[sel]\n\n sel = crit2 & ~crit3\n I[sel] -= (2 + K[sel] - D[sel])*(-1)**R[sel]\n J[sel] -= K[sel]*(-1)**R[sel]\n\n return I, J\n\n\ndef get_primes(n):\n numbers = set(range(n, 1, -1))\n primes = []\n while numbers:\n p = numbers.pop()\n primes.append(p)\n numbers.difference_update(set(range(p*2, n+1, p)))\n return sorted(primes)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n n = 200*200\n origin = 1\n\n p = get_primes(n + origin)\n\n fig, ax = plt.subplots()\n ax.plot(*ulam_spiral_coords(range(origin, n + origin), origin),\n 'r-', lw=.5)\n ax.plot(*ulam_spiral_coords(p, origin), 'k.', ms=1)\n ax.set_aspect('equal')\n\n nroot = n**.5/2 + 1\n ax.set_xlim(-nroot, nroot)\n ax.set_ylim(-nroot, nroot)\n ax.axis('off')\n fig.tight_layout()\n # fig.savefig('ulam_spiral.pdf')\n plt.show()\n","repo_name":"arthursn/misc","sub_path":"ulam_spiral.py","file_name":"ulam_spiral.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27880857220","text":"import biz.photo as biz\nfrom model.tracker import Tracker\n\ndef main2():\n\ttracker = Tracker()\n\tinput_page = input(\"input page num to start:\")\n\tpage = int(input_page)\n\twhile True:\t\n\t\tbiz.getPaging(page, biz.downloadPhoto, tracker)\n\t\tpage += 1\n\t\tinput(\"Continue to download page {0}?\".format(page))\t\t\n\ndef main3():\n\ttracker = Tracker()\n\t\n\tinput_page = input(\"input page num to start:\")\n\tpage = 1\n\ttry:\n\t\tpage = int(input_page)\n\texcept ValueError:\n\t\tpage = 1\n\t\n\tbiz.scanPageAndFindDedicatePhoto(page, tracker)\n\nif __name__ == \"__main__\":\n\tmain3()","repo_name":"xiaoooyu/unsplash_photo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"31278386241","text":"import unittest\n\nimport eop\nimport pandas as pd\nimport numpy as np\n\n \nclass A(eop.DataInstance): pass\n\nclass B(eop.DataInstance): pass\n\nclass Point3D(eop.DataInstance):\n DTypes = {\"x\": np.dtype(\"float64\"),\n \"y\": np.dtype(\"float64\"),\n \"z\": np.dtype(\"float64\")}\n def summary(self):\n return eop.DataInstance(pd.DataFrame({\"summary\": ([\"/\".join(str(item) for item in row) for idx, row in self.df.iterrows()])}))\n \n\nclass X(eop.DataInstance):\n DTypes = {\"doi\": np.dtype(\"int64\")}\n\n\nclass Y(eop.DataInstance):\n DTypes = {\"doi\": np.dtype(\"int64\"), \"sub\": Point3D}\n\n\ntest_data_a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6], \"z\": [7, 8, 9]})\ntest_data_b = pd.DataFrame({\"doi\": [77, 88, 99]})\ntest_data_c = pd.DataFrame({\"nana\": [2, 3, 5]})\n\nnrcols = 10\nnrrows = 10\ntest_data_d = pd.DataFrame({chr(97+x): np.linspace(0, 1, nrcols) * (10**x) for x in range(nrrows)})\ntest_data_d = test_data_d.set_index(\"a\")\n\nreplrows = (5,7)\nreplcols = (5,7)\ntest_data_e_columns = pd.Index([chr(97+x) for x in range(*replcols)])\ntest_data_e_index = pd.Index(np.linspace(0, 1, nrrows)[replrows[0]:replrows[1]])\ntest_data_e = pd.DataFrame(index=test_data_e_index, columns=test_data_e_columns)\nfor col in test_data_e_columns:\n test_data_e.loc[:,col] = [\"%s:%s\" % (col, row) for row in range(len(test_data_e_index))]\ntest_data_e.rename(columns={test_data_e.columns[-1]: \"newcol\"}, index={test_data_e.index[-1]: \"newrow\"}, inplace=True)\n\n\nclass TestDataInstance(unittest.TestCase):\n def test_square_assign(self):\n d = eop.DataInstance(test_data_d)\n e = eop.DataInstance(test_data_e)\n d[test_data_e_index, test_data_e_columns] = e\n self.assertEqual(d[0.5555555555555556, \"f\"], \"f:0\")\n self.assertEqual(d[\"newrow\", \"newcol\"], \"g:1\")\n\n def test_square_assign_rename(self):\n d = eop.DataInstance(test_data_d)\n e = eop.DataInstance(test_data_e)\n d[test_data_e_index, test_data_e_columns] <<= e\n self.assertEqual(d[0.5555555555555556, \"f\"], \"f:0\")\n self.assertEqual(d[0.6666666666666666, \"g\"], \"g:1\")\n \n def test_sub(self):\n sub = eop.DataInstance(test_data_a.copy())\n main = eop.DataInstance(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertIsInstance(main[\"sub\"], eop.DataInstance)\n \n def test_sub_loc(self):\n sub = eop.DataInstance(test_data_a.copy())\n main = eop.DataInstance(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertEqual(main.loc[[0, 2]][\"sub\"][\"x\"].iloc[1], 3)\n\n def test_sub_type(self):\n sub = B(test_data_a.copy())\n main = A(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertIsInstance(main, A)\n self.assertIsInstance(main[\"sub\"], B)\n \n def test_method_type(self):\n sub = B(test_data_a.copy())\n main = A(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertIsInstance(main.head(), A)\n self.assertIsInstance(main.head()[\"sub\"], B)\n \n def test_dtypes(self):\n sub = Point3D(test_data_a.copy())\n main = X(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertEqual(main.dtypes[\"doi\"], np.dtype(\"int64\"))\n self.assertEqual(main[\"sub\"].dtypes[\"x\"], np.dtype(\"float64\"))\n\n @unittest.skip(\"Not yet implemented\")\n def test_summary(self):\n sub = Point3D(test_data_a.copy())\n main = X(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertEqual(main.summary()[(\"sub\", \"[Point3D]\", \"summary\")].loc[0], \"1.0/4.0/7.0\")\n\n @unittest.skip(\"Not yet implemented\")\n def test_summary_single_col(self):\n main = eop.DataInstance(test_data_a.copy())\n sub = X(test_data_b.copy())\n main[\"sub\"] = sub\n self.assertEqual(main.summary()[(\"sub\", \"[X]\", \"doi\", \"[int64]\")].loc[0], 77)\n\n def test_sub_sub(self):\n main = eop.DataInstance(test_data_b.copy())\n sub = A(test_data_a.copy())\n sub2 = B(test_data_c.copy())\n main[\"sub\"] = sub\n main[\"sub\"][\"xxx\"] = sub2\n self.assertIsInstance(main[\"sub\"][\"xxx\"], B)\n\n def test_attributes(self):\n sub = eop.DataInstance(test_data_a.copy())\n main = eop.DataInstance(test_data_b.copy())\n sub.gazonk = 47\n main[\"sub\"] = sub\n self.assertEqual(main[\"sub\"].gazonk, 47)\n\n def test_flatten(self):\n sub = Point3D(test_data_a.copy())\n main = Y(test_data_b.copy())\n main[\"sub\"] = sub\n\n unflattened = Y(main.flatten())\n self.assertEqual(str(main), str(unflattened))\n \nclass TestDataSet(unittest.TestCase):\n def test_contains_tag(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n self.assertNotIn(\"c\", ds)\n self.assertIn(\"a\", ds)\n self.assertIn(\"b\", ds)\n\n def test_contains_instance(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n self.assertIn(\"Foo\", ds)\n\n def test_get_tags(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n self.assertIn(\"a\", ds[\"Foo\"])\n self.assertIn(\"b\", ds[\"Foo\"])\n \n def test_no_tag(self):\n ds = eop.DataSet()\n ds[:] = \"Notags\"\n self.assertIn(\"Notags\", ds)\n self.assertEqual(ds[\"Notags\"].tags, set())\n\n def test_intersection_query(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n ds[\"a\", \"c\"] = \"Bar\"\n ds[\"b\", \"c\"] = \"Fie\"\n ds[\"a\", \"d\"] = \"Hehe\"\n self.assertEqual(ds[\"b\", \"c\"], {\"Fie\"})\n self.assertIn((\"b\", \"c\"), ds)\n self.assertNotIn((\"b\", \"d\"), ds)\n\n def test_type_query(self):\n ds = eop.DataSet()\n ds[\"a\"] = a1 = eop.A({\"foo\": [1]})\n ds[\"a\"] = a2 = eop.B({\"foo\": [2]})\n ds[\"b\"] = a3 = eop.B({\"foo\": [3]})\n self.assertEqual(ds[eop.A, \"a\"], {a1})\n self.assertEqual(ds[eop.B, \"a\"], {a2})\n self.assertEqual(ds[eop.B, \"b\"], {a3})\n\n def test_complex_tag(self):\n ds = eop.DataSet()\n ds[{\"src\": \"nanana\"}] = \"lala\"\n self.assertEqual(len(ds[\"lala\"].tags), 1)\n self.assertEqual(list(ds[\"lala\"].tags)[0][\"src\"], \"nanana\")\n\n def test_more_complex_tag(self):\n ds = eop.DataSet()\n a = eop.A({\"foo\": [1]})\n ds[{\"src\": a}] = \"lala\"\n self.assertEqual(len(ds[\"lala\"]), 1)\n self.assertEqual(ds[{\"src\": a}], {\"lala\"})\n\n def test_complex_tag_slice_syntax(self):\n ds = eop.DataSet()\n ds[\"src\": \"nanana\"] = \"lala\"\n self.assertEqual(len(ds[\"lala\"]), 1)\n self.assertEqual(list(ds[\"lala\"].tags)[0][\"src\"], \"nanana\")\n\n def test_add_tag(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n ds[\"a\", \"c\"] = \"Bar\"\n ds[\"b\", \"c\"] = \"Fie\"\n ds[\"a\", \"b\"] += \"d\" \n self.assertEqual(ds[\"d\"], {\"Foo\"})\n\n def test_remove_tag(self):\n ds = eop.DataSet()\n ds[\"a\", \"b\"] = \"Foo\"\n ds[\"a\", \"c\"] = \"Bar\"\n ds[\"b\", \"c\"] = \"Fie\"\n ds[\"a\", \"b\"] -= \"b\" \n self.assertEqual(ds[\"b\"], {\"Fie\"})\n\n def test_triggers(self):\n ds = eop.DataSet()\n trigger_status = {}\n @eop.on(ds[\"a\"])\n def on_a(*tags, **kw):\n trigger_status[\"result\"] = (tags, kw)\n \n ds[\"a\", \"b\"] = \"Foo\"\n self.assertIn(\"result\", trigger_status)\n self.assertEqual(trigger_status[\"result\"][1][\"action\"], \"add\")\n self.assertEqual(trigger_status[\"result\"][1][\"instance\"], \"Foo\")\n del trigger_status[\"result\"]\n \n ds[\"b\", \"c\"] = \"Fie\"\n self.assertNotIn(\"result\", trigger_status)\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"redhog/EmbarrassmentOfPandas","sub_path":"tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"24070213821","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 2018/5/14\n\nimport json\nimport time\nimport inspect\nimport logging\nimport requests\nimport threading\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nfrom utils.wx.client.api.base import BaseWeChatClientAPI\nfrom utils.wx.errcodes import WeChatErrorCode\nfrom utils.wx.client import api\nfrom utils.wx import BaseWeChat\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_api_endpoint(instance):\n return issubclass(instance.__class__, BaseWeChatClientAPI)\n\n\nclass WeChatClient(BaseWeChat):\n\n API_BASE_URL = \"https://api.weixin.qq.com/cgi-bin/\"\n\n # 消息\n message = api.WeChatMessage()\n # 用户\n user = api.WeChatUser()\n # 混合工具\n misc = api.WeChatMisc()\n\n def __new__(cls, *args, **kwargs):\n self = super(WeChatClient, cls).__new__(cls)\n api_endpoints = inspect.getmembers(self, _is_api_endpoint)\n for name, api_ins in api_endpoints:\n api_cls = type(api_ins)\n api_ins = api_cls(self)\n setattr(self, name, api_ins)\n return self\n\n def __init__(self, app_id, secret, timeout=None, session=None, auto_retry=True):\n super(WeChatClient, self).__init__(\n app_id, timeout, session, auto_retry\n )\n self.secret = secret\n self.expires_at = 0\n self.__access_token = None\n\n def _handle_result(self, res, method=None, url=None,\n result_processor=None, **kwargs):\n \"\"\"结果解析\n\n Parameters\n ----------\n res : request instance\n\n 响应对象 response\n\n method : string\n\n 请求方法\n\n url : string\n\n 请求的 `url`\n\n result_processor: func\n\n 结果处理器\n\n kwargs: dict\n\n 更多参数\n\n\n Returns\n -------\n dict\n \"\"\"\n if not isinstance(res, dict):\n result = res.json()\n else:\n result = res\n\n if not isinstance(result, dict):\n return result\n\n if \"base_resp\" in result:\n # Different response in device APIs. Fuck Tencent!\n result.update(errcode=result.pop(\"base_resp\"))\n\n if \"errcode\" in result:\n result[\"errcode\"] = int(result[\"errcode\"])\n\n if \"errcode\" in result and result[\"errcode\"] != 0:\n errcode = result[\"errcode\"]\n errmsg = result.get(\"errmsg\", errcode)\n\n # 启动重试发送\n if self.auto_retry and errcode in (\n WeChatErrorCode.INVALID_CREDENTIAL.value,\n WeChatErrorCode.INVALID_ACCESS_TOKEN.value,\n WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value,):\n logger.info(\"Access token expired, fetch a new one and retry request\")\n\n kwargs[\"params\"][\"access_token\"] = self._fetch_access_token()\n\n return self.request(\n method=method,\n url_or_endpoint=url,\n result_processor=result_processor,\n **kwargs\n )\n\n elif errcode == WeChatErrorCode.OUT_OF_API_FREQ_LIMIT.value:\n # api 使用频率超过限制\n logger.error(\"Beyond API limits wx request: {}\".format(errmsg))\n\n else:\n # api 其它异常\n logger.error(\"Invalid wx request: {} {}\".format(errcode, errmsg))\n\n logger.info(\"WxApi client res: {}\".format(result))\n\n return result if not result_processor else result_processor(result)\n\n def request(self, method, url_or_endpoint, **kwargs):\n\n if not url_or_endpoint.startswith((\"http://\", \"https://\")):\n api_base_url = kwargs.pop(\"api_base_url\", self.API_BASE_URL)\n url = \"{base}{endpoint}\".format(\n base=api_base_url,\n endpoint=url_or_endpoint\n )\n else:\n url = url_or_endpoint\n\n if \"params\" not in kwargs:\n kwargs[\"params\"] = {}\n\n if isinstance(kwargs[\"params\"], dict):\n kwargs[\"params\"][\"access_token\"] = self.access_token\n\n if isinstance(kwargs.get(\"data\", \"\"), dict):\n body = json.dumps(kwargs[\"data\"], ensure_ascii=False)\n body = body.encode('utf-8')\n kwargs['data'] = body\n\n kwargs[\"timeout\"] = kwargs.get(\"timeout\", self.timeout)\n result_processor = kwargs.pop(\"result_processor\", None)\n\n res = self._http.request(\n method=method,\n url=url,\n **kwargs\n )\n\n try:\n res.raise_for_status()\n except requests.RequestException as exc:\n logger.error(str(exc))\n # 发送错误消息至\n\n return self._handle_result(\n res, method, url, result_processor, **kwargs\n )\n\n def _fetch_access_token(self):\n\n logger.info(\"Fetching access token appid is {}, secret is {}\".format(\n self.app_id, self.secret\n ))\n\n url = \"https://api.weixin.qq.com/cgi-bin/token\"\n params = {\n \"grant_type\": \"client_credential\",\n \"appid\": self.app_id,\n \"secret\": self.secret\n }\n\n res = self._http.get(url=url, params=params)\n\n res.raise_for_status()\n\n result = res.json()\n\n # 如果获取不到凭证进行通知\n if \"errcode\" in result and result[\"errcode\"] != WeChatErrorCode.SUCCESS.value:\n task = threading.Thread(\n target=send_mail,\n args=(\n \"获取微信凭证失败通知\", str(result),\n settings.DEFAULT_FROM_EMAIL, settings.DEFAULT_TO_EMAILS\n )\n )\n task.start()\n\n self.expires_at = int(time.time()) + result.get(\"expires_in\", 0)\n\n self.__access_token = result.get(\"access_token\", )\n\n return result.get(\"access_token\", )\n\n @property\n def access_token(self):\n \"\"\"get access_token\n\n 公众平台全局通用凭证\n\n Parameters\n ----------\n\n Returns\n -------\n access_token: string\n \"\"\"\n\n if self.__access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return self.__access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 300:\n return self.__access_token\n\n self._fetch_access_token()\n\n return self.__access_token\n","repo_name":"wangyitao/awDemo","sub_path":"utils/wx/client/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"34683794585","text":"startindex=0\r\nendindex=11\r\n# read files to a nice format\r\n# f={}\r\n# f[0] = open(\"ref/chopin-mono.txt\",'r')\r\n# f[1] = open(\"2014ON/mazurka24-4-polyoutput.txt\",'r')\r\n# f[2] = open(\"2016MP/chopinOp24No4.txt\",'r')\r\n# f[3] = open(\"OL1/mazurkaoutput.txt\",'r')\r\n# f[4] = open(\"OL2/mazurkaoutput.txt\",'r')\r\n# f[5] = open(\"MeredithTLF1MIREX2016/mazurka24-4output.txt\",'r')\r\n# f[6] = open(\"MeredithTLPMIREX2016/mazurka24-4output.txt\",'r')\r\n# f[7] = open(\"MeredithTLRMIREX2016/mazurka24-4output.txt\",'r')\r\n# f[8] = open(\"VM/VM1/output/mazurka24-4.txt\",'r')\r\n# f[9] = open(\"VM/VM2/output/mazurka24-4input.txt\",'r')\r\n# f[10] = open(\"2016IR/chopin-mono.txt\",'r')\r\n\r\nf={}\r\nf[0] = open(\"ref/chopin-mono.txt\",'r')\r\nf[1] = open(\"2014ON/mazurka24-4-polyoutput.txt\",'r')\r\nf[2] = open(\"2016MP/chopinOp24No4.txt\",'r')\r\nf[8] = open(\"OL1/chopin1.txt\",'r')\r\nf[9] = open(\"OL2/chopin2.txt\",'r')\r\nf[3] = open(\"MeredithTLF1MIREX2016/mazurka24-4mono.tlf1\",'r')\r\nf[4] = open(\"MeredithTLPMIREX2016/mazurka24-4.tlf1\",'r')\r\nf[5] = open(\"MeredithTLRMIREX2016/mazurka24-4.tlf1\",'r')\r\nf[6] = open(\"VM/VM1/output/mazurka24-4.txt\",'r')\r\nf[7] = open(\"VM/VM2/output/mazurka24-4input.txt\",'r')\r\nf[10] = open(\"SIARCT-CFP/examples/exampleData/patterns_mazurka24-4.txt\")\r\n\r\n# f={}\r\n# f[0] = open(\"ref/bach-mono.txt\",'r')\r\n# f[1] = open(\"2014ON/bach.txt\",'r')\r\n# f[2] = open(\"2016MP/bachBWV889Fg.txt\",'r')\r\n# f[8] = open(\"OL1/wtc.txt\",'r')\r\n# f[9] = open(\"OL2/wtc.txt\",'r')\r\n# f[3] = open(\"MeredithTLF1MIREX2016/wtc2f20output.txt\",'r')\r\n# f[4] = open(\"MeredithTLPMIREX2016/wtc2f20.tlp\",'r')\r\n# f[5] = open(\"MeredithTLRMIREX2016/wtc2f20.tlr\",'r')\r\n# f[6] = open(\"VM/VM1/output/wtc2f20.txt\",'r')\r\n# f[7] = open(\"VM/VM2/output/wtc2f20.txt\",'r')\r\n# f[8] = open(\"2016IR/bach-mono.txt\",'r')\r\n\r\n# f={}\r\n# f[0] = open(\"ref/beethoven-mono.txt\",'r')\r\n# f[1] = open(\"2014ON/bee.txt\",'r')\r\n# f[2] = open(\"2016MP/beethovenOp2No1Mvt3.txt\",'r')\r\n# f[8] = open(\"OL1/sonata01-3.txt\",'r')\r\n# f[9] = open(\"OL2/sonata01-3.txt\",'r')\r\n# f[3] = open(\"MeredithTLF1MIREX2016/sonata01-3.tlf1\",'r')\r\n# f[4] = open(\"MeredithTLPMIREX2016/sonata01-3.tlp\",'r')\r\n# f[5] = open(\"MeredithTLRMIREX2016/sonata01-3.tlr\",'r')\r\n# f[6] = open(\"VM/VM1/output/sonata01-3.txt\",'r')\r\n# f[7] = open(\"VM/VM2/output/sonata01-3.txt\",'r')\r\n# # f[8] = open(\"2016IR/bach-mono.txt\",'r')\r\n\r\n# f={}\r\n# f[0] = open(\"ref/mozart-mono.txt\",'r')\r\n# f[1] = open(\"2014ON/mozart.txt\",'r')\r\n# f[2] = open(\"2016MP/mozartK282Mvt2.txt\",'r')\r\n# f[8] = open(\"OL1/sonata04-2.txt\",'r')\r\n# f[9] = open(\"OL2/sonata04-2.txt\",'r')\r\n# f[3] = open(\"MeredithTLF1MIREX2016/sonata04-2.tlf1\",'r')\r\n# f[4] = open(\"MeredithTLPMIREX2016/sonata04-2.tlp\",'r')\r\n# f[5] = open(\"MeredithTLRMIREX2016/sonata04-2.tlr\",'r')\r\n# f[6] = open(\"VM/VM1/output/sonata04-2.txt\",'r')\r\n# f[7] = open(\"VM/VM2/output/sonata04-2.txt\",'r')\r\n# # f[8] = open(\"2016IR/bach-mono.txt\",'r')\r\n\r\n\r\n\r\n\r\n# f={}\r\n# f[0] = open(\"ref/gibbons-mono.txt\",'r')\r\n# f[1] = open(\"2014ON/gibbons.txt\",'r')\r\n# f[2] = open(\"2016MP/gibbonsSilverSwan1612.txt\",'r')\r\n# f[8] = open(\"OL1/silverswan.txt\",'r')\r\n# f[9] = open(\"OL2/silverswan.txt\",'r')\r\n# f[3] = open(\"MeredithTLF1MIREX2016/silverswan.tlf1\",'r')\r\n# f[4] = open(\"MeredithTLPMIREX2016/silverswan.tlp\",'r')\r\n# f[5] = open(\"MeredithTLRMIREX2016/silverswan.tlr\",'r')\r\n# f[6] = open(\"VM/VM1/output/silverswan.txt\",'r')\r\n# f[7] = open(\"VM/VM2/output/silverswan.txt\",'r')\r\n# # f[8] = open(\"2016IR/out.txt\",'r')\r\n\r\nd={}\r\nfor i in range(startindex,endindex):\r\n d[i]=f[i].readlines()\r\n\r\ndef outputtimes(text):\r\n pitches=[]\r\n pairs=[]\r\n occurtimes=[]\r\n pattimes=[]\r\n times=[]\r\n total=[]\r\n for line in text:\r\n if \",\" in line:\r\n pairs.append([float(i) for i in line.split(',')])\r\n total.append([float(i) for i in line.split(',')])\r\n\r\n if \"o\" in line:\r\n total.append('o')\r\n if pairs != []:\r\n times=zip(*pairs)[0]\r\n occurtimes.append(times)\r\n pairs=[]\r\n\r\n if \"p\" in line:\r\n total.append('p')\r\n pattimes.append(occurtimes)\r\n # print(len(occurtimes))\r\n occurtimes=[]\r\n\r\n # print(total)\r\n\r\n olist=[]\r\n plist=[]\r\n for index in range(0,len(total)):\r\n item = total[index]\r\n if item == 'p':\r\n plist.append(index)\r\n if item =='o':\r\n olist.append(index)\r\n\r\n occurtimes=[]\r\n pattimes=[]\r\n record=0\r\n for pindex in range(1,len(plist)):\r\n for oindex in range(0,len(olist)-1):\r\n if plist[pindex]-olist[oindex+1]>1 and oindex>=record:\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[oindex+1]])[0])\r\n if plist[pindex]-olist[oindex+1]==-1:\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[oindex+1]-1])[0])\r\n record=oindex+1\r\n # print(record)\r\n sub=[]\r\n pattimes.append(occurtimes)\r\n occurtimes=[]\r\n\r\n # print(olist)\r\n # print(plist)\r\n\r\n pindex=plist[-1]\r\n occurtimes=[]\r\n for oindex in range(0,len(olist)-1):\r\n if olist[oindex]>pindex:\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[-1]])[0])\r\n\r\n\r\n oindex=olist[-1]\r\n occurtimes.append(zip(*total[oindex+1:])[0])\r\n pattimes.append(occurtimes)\r\n\r\n # print(pattimes[1])\r\n # taking the onset and offset\r\n startend=[]\r\n startendpat=[]\r\n for occtime in pattimes:\r\n for time in occtime:\r\n start=time[0]\r\n end=time[-1]\r\n startend.append([start,end])\r\n startendpat.append(startend)\r\n startend=[]\r\n # print(startendpat[-1])\r\n return startendpat\r\n\r\ndef outputtimeandpitch(text):\r\n pitches=[]\r\n pairs=[]\r\n occurtimes=[]\r\n occurpitches=[]\r\n patpitches=[]\r\n pattimes=[]\r\n times=[]\r\n total=[]\r\n index=0\r\n for line in text:\r\n if \",\" in line:\r\n pairs.append([float(i) for i in line.split(',')])\r\n total.append([float(i) for i in line.split(',')])\r\n\r\n if \"o\" in line:\r\n total.append('o')\r\n if pairs != []:\r\n times=zip(*pairs)[0]\r\n # print(times)\r\n pitches=zip(*pairs)[1]\r\n occurtimes.append(times)\r\n occurpitches.append(pitches)\r\n pairs=[]\r\n\r\n if \"p\" in line:\r\n total.append('p')\r\n pattimes.append(occurtimes)\r\n patpitches.append(occurpitches)\r\n # print(len(occurtimes))\r\n occurtimes=[]\r\n occurpitches=[]\r\n index=index+1\r\n\r\n # print(total)\r\n # print(pattimes)\r\n # print(patpitches)\r\n\r\n olist=[]\r\n plist=[]\r\n for index in range(0,len(total)):\r\n item = total[index]\r\n if item == 'p':\r\n plist.append(index)\r\n if item =='o':\r\n olist.append(index)\r\n\r\n occurtimes=[]\r\n occurpitches=[]\r\n pattimes=[]\r\n patpitches=[]\r\n record=0\r\n\r\n for pindex in range(1,len(plist)):\r\n for oindex in range(0,len(olist)-1):\r\n # print('pindex='+str(plist[pindex])+';oindex='+str(olist[oindex+1]))\r\n # print(zip(*total[olist[oindex]+1:olist[oindex+1]]))\r\n # print(zip(*total[olist[oindex]+1:olist[oindex+1]-1]))\r\n if plist[pindex]-olist[oindex+1]>1 and oindex>=record:\r\n # print('first'+str(oindex)+str(pindex))\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[oindex+1]])[0])\r\n occurpitches.append(zip(*total[olist[oindex]+1:olist[oindex+1]])[1])\r\n if plist[pindex]-olist[oindex+1]==-1:\r\n # print('second'+str(pindex)+str(pindex))\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[oindex+1]-1])[0])\r\n occurpitches.append(zip(*total[olist[oindex]+1:olist[oindex+1]-1])[1])\r\n record=oindex+1\r\n # print(record)\r\n sub=[]\r\n # print(occurtimes)\r\n # print(occurpitches)\r\n \r\n pattimes.append(occurtimes)\r\n patpitches.append(occurpitches)\r\n occurtimes=[]\r\n occurpitches=[]\r\n # print(olist)\r\n # print(plist)\r\n # print(patpitches)\r\n pindex=plist[-1]\r\n occurtimes=[]\r\n occurpitches=[]\r\n for oindex in range(0,len(olist)-1):\r\n if olist[oindex]>pindex:\r\n occurtimes.append(zip(*total[olist[oindex]+1:olist[oindex+1]-1])[0])\r\n occurpitches.append(zip(*total[olist[oindex]+1:olist[oindex+1]-1])[1])\r\n\r\n\r\n oindex=olist[-1]\r\n occurtimes.append(zip(*total[oindex+1:])[0])\r\n occurpitches.append(zip(*total[oindex+1:])[1])\r\n pattimes.append(occurtimes)\r\n patpitches.append(occurpitches)\r\n\r\n # print(patpitches)\r\n # print(pattimes)\r\n # taking the onset and offset\r\n startend=[]\r\n startendpitches=[]\r\n startendpat=[]\r\n startendtime=[]\r\n startendpatpitches=[]\r\n startendpattime=[]\r\n # print(pattimes)\r\n for occtimei in range(0,len(pattimes)):\r\n occtime = pattimes[occtimei]\r\n occpitch= patpitches[occtimei]\r\n # print(occtime)\r\n # print(occpitch)\r\n\r\n for timei in range(0,len(occtime)):\r\n start=occtime[timei][0]\r\n end=occtime[timei][-1]\r\n startend.append([start,end])\r\n startendtime.append(occtime[timei])\r\n startendpitches.append(occpitch[timei])\r\n\r\n startendpat.append(startend)\r\n startendpatpitches.append(startendpitches)\r\n startendpattime.append(startendtime)\r\n startend=[]\r\n startendtime=[]\r\n startendpitches=[]\r\n\r\n # print(startendpat[-1]) \r\n return startendpat, startendpatpitches,startendpattime\r\n\r\n# stats calculation\r\n# overlap percentage\r\ndef overlap(startflat, endflat, flattened_list):\r\n overlaptotal=[]\r\n for i in range(0, len(startflat)):\r\n overlappatlen=[]\r\n for pair in flattened_list:\r\n if startflat[i]>pair[0] and startflat[i]=pair[1]:\r\n overlaplen = pair[1]-startflat[i]\r\n overlappatlen.append(overlaplen)\r\n if endflat[i]>pair[0] and endflat[i]pair[0] and endflat[i]pair[1]:\r\n overlaplen = endflat[i] - startflat[i]\r\n overlappatlen.append(overlaplen)\r\n overlaptotal.append(overlappatlen)\r\n return overlaptotal\r\n\r\n# coverage\r\ndef merge_intervals(intervals):\r\n s = sorted(intervals, key=lambda t: t[0])\r\n m = 0\r\n for t in s:\r\n if t[0] > s[m][1]:\r\n m += 1\r\n s[m] = t\r\n else:\r\n s[m] = (s[m][0], t[1])\r\n return s[:m+1]\r\n\r\nstartendpat={}\r\nflattened_list={}\r\nstartflat={}\r\nendflat={}\r\ntotallist=[]\r\ntotalstartlist=[]\r\ntotalendlist=[]\r\noverlaptotal={}\r\ncoverinterval={}\r\nallpitches={}\r\nalltimes={}\r\nfor i in range(startindex,endindex):\r\n startendpat[i]=outputtimes(d[i])\r\n allpitches[i]=outputtimeandpitch(d[i])[1]\r\n alltimes[i]=outputtimeandpitch(d[i])[2]\r\n if startendpat[i]==[]:\r\n startendpat[i]=[[[0,1]]]\r\n flattened_list[i]=[y for x in startendpat[i] for y in x]\r\n startflat[i]=zip(*flattened_list[i])[0]\r\n endflat[i]=zip(*flattened_list[i])[1]\r\n totallist=totallist+flattened_list[i]\r\n totalstartlist=totalstartlist+list(startflat[i])\r\n totalendlist=totalendlist+list(endflat[i])\r\n overlaptotal[i]=overlap(startflat[i],endflat[i],flattened_list[i])\r\n coverinterval[i] = merge_intervals(flattened_list[i])\r\n print(len(startendpat[i]))\r\n\r\n\r\n\r\ntotaltime = max(totalendlist) - min(totalendlist)\r\n\r\noverlaptotalper={}\r\nfor i in range(startindex,endindex):\r\n overlaptotalper[i]= [x / totaltime for x in [y for x in overlaptotal[i] for y in x]]\r\n print(\"Total overlap length (unit of the piece len):\"+str(sum(overlaptotalper[i])))\r\n\r\nalloverlap = overlap(totalstartlist,totalendlist,totallist)\r\nalloverlapper = [x / totaltime for x in [y for x in alloverlap for y in x]]\r\nprint(\"all overlap\"+str(sum(alloverlapper)))\r\n\r\nfor i in range(startindex,endindex):\r\n sum=0\r\n for interval in coverinterval[i]:\r\n sum=sum+interval[1] - interval[0]\r\n print(\"coverage:\"+str(sum/totaltime))\r\n\r\n# Ploting\r\nimport matplotlib.pyplot as plt\r\nimport numpy\r\nheight = 0\r\nfrom operator import itemgetter\r\n\r\nc=numpy.random.rand(3,1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfig=plt.figure(figsize=(10,15))\r\nax=fig.add_subplot(1, 1, 1)\r\n\r\n\r\n\r\nc=numpy.random.rand(3,1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\n# for patterns in startendpat[1]:\r\n# # c=numpy.random.rand(3,1)\r\n# height = height + 1\r\n# for occur in patterns:\r\n# plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[1])):\r\n# patterns = startendpat[1][n]\r\n# t=alltimes[1][n]\r\n# p=allpitches[1][n]\r\n\r\n\r\n# plt.axhline(y=height+0.5)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# rankinglist=[]\r\n# indexlist=[]\r\n# pickinglist=[]\r\n# for p in range(0,len(startendpat[5])):\r\n# rankinglist.append(startendpat[5][p][0][0])\r\n# indexlist.append(p)\r\n# zippedlist=zip(rankinglist,indexlist)\r\n# for pairs in sorted(zippedlist):\r\n# pickinglist.append(pairs[1])\r\n\r\n# print(pickinglist)\r\n\r\n# for patterns in startendpat[5]:\r\n# # c=numpy.random.rand(3,1)\r\n# height = height + 1\r\n# for occur in patterns:\r\n# plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# plt.plot((0,0), (0,0), color=c, label=\"SIATECCompress-TLR\")\r\n\r\n# c=numpy.random.rand(3,1)\r\n# for color in c:\r\n# if color < 0.5:\r\n # color = color + 0.5\r\n# for patterns in startendpat[6]:\r\n# # c=numpy.random.rand(3,1)\r\n# height = height + 1\r\n# for occur in patterns:\r\n# plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0,0,0)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\n# for n in range(0,len(startendpat[0])):\r\n # patterns = startendpat[0][n]\r\n # t=alltimes[0][n]\r\n # p=allpitches[0][n]\r\nfor patterns in startendpat[0]:\r\n height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\n\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2)\r\nplt.plot((0,0), (0,0), color=c, label=\"Ground Truth\")\r\n\r\nc=numpy.random.rand(3,1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\n\r\n\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.5,0.5,0.2)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[10]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[10])):\r\n# patterns = startendpat[10][n]\r\n# t=alltimes[10][n]\r\n# p=allpitches[10][n]\r\n\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"SIARCT-CFP(SIACFP)\")\r\n\r\nc=(0.1,0.1,0.1)\r\nfor patterns in startendpat[1]:\r\n height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\n\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\nplt.plot((0,0), (0,0), color=c, label=\"MotivesExtractor(ME)\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.5,0.5,0.2)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[2]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# plt.axhline(y=height+0.5)\r\n# for n in range(0,len(startendpat[2])):\r\n# patterns = startendpat[2][n]\r\n# t=alltimes[2][n]\r\n# p=allpitches[2][n]\r\n\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"SYMCHM(SC)\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.1,0.1,0.1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[9]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[9])):\r\n# patterns = startendpat[9][n]\r\n# t=alltimes[9][n]\r\n# p=allpitches[9][n]\r\n\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"OL2\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.5,0.5,0.2)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[8]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[8])):\r\n# patterns = startendpat[8][n]\r\n# t=alltimes[8][n]\r\n# p=allpitches[8][n]\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"OL1\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.1,0.1,0.1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[7]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[7])):\r\n# patterns = startendpat[7][n]\r\n# t=alltimes[7][n]\r\n# p=allpitches[7][n]\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"VM2\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.5,0.5,0.2)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[6]:\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[6])):\r\n# patterns = startendpat[6][n]\r\n# t=alltimes[6][n]\r\n# p=allpitches[6][n]\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"VM1\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.1,0.1,0.1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[5]:\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[5])):\r\n# patterns = startendpat[5][n]\r\n# t=alltimes[5][n]\r\n# p=allpitches[5][n]\r\n\r\n# # for patterns in startendpat[0]:\r\n# height = height + 1\r\n# for j in range(0, len(patterns)):\r\n# occur = patterns[j]\r\n# tcur = t[j]\r\n# pcur = p[j]\r\n# plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 1)\r\n# plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"SIATECCompress-TLR(SIAR)\") \r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.5,0.5,0.2)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\nfor patterns in startendpat[4]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# for n in range(0,len(startendpat[4])):\r\n# patterns = startendpat[4][n]\r\n# t=alltimes[4][n]\r\n# p=allpitches[4][n]\r\n\r\n# # for patterns in startendpat[0]:\r\n# height = height + 1\r\n# for j in range(0, len(patterns)):\r\n# occur = patterns[j]\r\n# tcur = t[j]\r\n# pcur = p[j]\r\n# plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n# plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"SIATECCompress-TLP(SIAP)\")\r\n\r\nc=numpy.random.rand(3,1)\r\nc=(0.1,0.1,0.1)\r\nfor color in c:\r\n if color < 0.5:\r\n color = color + 0.5\r\n\r\nfor patterns in startendpat[3]:\r\n # c=numpy.random.rand(3,1)\r\n height = height + 1\r\n for occur in patterns:\r\n plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2, alpha=0.5)\r\n# plt.axhline(y=height+0.5)\r\n# for n in range(0,len(startendpat[3])):\r\n# patterns = startendpat[3][n]\r\n# t=alltimes[3][n]\r\n# p=allpitches[3][n]\r\n\r\n# for patterns in startendpat[0]:\r\n # height = height + 1\r\n # for j in range(0, len(patterns)):\r\n # occur = patterns[j]\r\n # tcur = t[j]\r\n # pcur = p[j]\r\n # plt.scatter(tcur, [pitch * 0.008 + height for pitch in pcur], color = c, alpha=0.5, s = 3)\r\n # plt.xlabel('Time')\r\nplt.plot((0,0), (0,0), color=c, label=\"SIATECCompress-TLF1(SIAF1)\")\r\n\r\n\r\n\r\n# c=numpy.random.rand(3,1)\r\n# for patterns in startendpat[10]:\r\n# # c=numpy.random.rand(3,1)\r\n# height = height + 1\r\n# for occur in patterns:\r\n# plt.plot((occur[0], occur[1]), (height, height), color = c, lw=2)\r\n# plt.plot((0,0), (0,0), color=c, label=\"IR\")\r\n\r\n\r\n\r\n\r\nplt.xlim([0, 600])\r\nplt.ylim([0,153])\r\n\r\nhandles, labels = ax.get_legend_handles_labels()\r\nplt.legend(handles[::-1], labels[::-1],loc='best',fontsize=15)\r\n# plt.title('Pattern Visualisation')\r\nplt.xlabel('Time')\r\nplt.ylabel('Patterns and their occurrences')\r\nplt.tight_layout()\r\nplt.tick_params(axis='both', left='off', top='off', right='off', labelleft='off', labeltop='off', labelright='off')\r\n# plt.savefig('chopinwithTom.png')\r\nplt.show()\r\n\r\n# print(overlaptotal)\r\n# plt.figure()\r\n# color=numpy.random.rand(3,1)\r\n# print(color)\r\n# color=numpy.array([0.5,0.5,1])\r\n# for x in range(0, len(overlaptotal)):\r\n# color[2] = color[2] - 0.01\r\n# print(color)\r\n# for p in range(0,len(overlaptotal[x])):\r\n# plt.scatter(x, overlaptotal[x][p],color=color)\r\n# plt.xlabel('Occurrencies ')\r\n# plt.ylabel('Overlapping time')\r\n# plt.show()\r\n","repo_name":"irisyupingren/2017Pattern","sub_path":"figure5.py","file_name":"figure5.py","file_ext":"py","file_size_in_byte":23859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71867219177","text":"Import('env', 'testPythonsGtk')\nenv = env.Clone(\n commondir=env.subst('$commondir'),\n wrapperdir=env.subst('$wrapperdir'),\n )\n\nDefault(env.Template('SamplerConfig.py.in', varlist=['VERSION']))\npythons = [\n 'AppConfig.py',\n 'Launcher.py',\n 'Main2.py',\n 'Outcome.py',\n 'ReportsReader.py',\n 'SampledLauncher.py',\n 'SamplerConfig.py',\n 'ServerMessage.py',\n 'UnsampledLauncher.py',\n 'Uploader.py',\n 'Upload.py',\n ]\n\ncompiled = env.PythonBytecodeOpt(pythons)\nDefault(compiled)\n\ntestPythonsGtk(env, pythons)\nAlias('test', env.TestXML('wrapper.ui', schema=File('../gtk-builder.xsd')))\n\nAlias('install', env.Install('$DESTDIR$wrapperdir', [compiled, 'wrapper.ui']))\n","repo_name":"liblit/sampler","sub_path":"launcher/wrapper/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"31892343777","text":"import bisect\nimport numpy as np\nfrom torch.utils.data import Dataset, ConcatDataset\nimport mediapy as media\nfrom skimage.color import gray2rgb\nfrom .augmentation import AllAugmentationTransform\n\n# import albumentations as A\n# from albumentations.pytorch import ToTensorV2\n# import random\n\nimport einops\n\n# for image dataset\n# import albumentations\n# from PIL import Image\nimport torch\n\nfrom data.h5 import HDF5Dataset\n\nclass ConcatDatasetWithIndex(ConcatDataset):\n \"\"\"Modified from original pytorch code to return dataset idx\"\"\"\n def __getitem__(self, idx):\n if idx < 0:\n if -idx > len(self):\n raise ValueError(\"absolute value of index should not exceed dataset length\")\n idx = len(self) + idx\n dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)\n if dataset_idx == 0:\n sample_idx = idx\n else:\n sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]\n return self.datasets[dataset_idx][sample_idx], dataset_idx\n\n\nclass VideoPaths(Dataset):\n def __init__(self, paths, start_idxs, end_idxs, trans=None, labels=None):\n self._length = len(paths)\n self._trans = trans\n\n if labels is None:\n self.labels = dict() \n else:\n self.labels = labels\n\n self.labels[\"file_path\"] = paths\n self.labels[\"start_idx\"] = start_idxs\n self.labels[\"end_idx\"] = end_idxs\n\n def __len__(self):\n return self._length\n\n def preprocess_video(self, video_path, start_idx, end_idx):\n video = media.read_video(video_path)[start_idx:end_idx]\n video = np.array(video).astype(np.uint8)\n tmp_video = []\n for i in range(len(video)):\n tmp_video.append(self._trans(image=video[i])[\"image\"])\n video = np.array(tmp_video)\n video = (video/127.5 - 1.0).astype(np.float32)\n # [0,255] -> [-1,1]\n return video\n\n def __getitem__(self, i):\n video = dict()\n video[\"video\"] = self.preprocess_video(self.labels[\"file_path\"][i], int(self.labels[\"start_idx\"][i]), int(self.labels[\"end_idx\"][i]))\n for k in self.labels:\n video[k] = self.labels[k][i]\n return video\n \n\nclass HDF5InterfaceDataset(Dataset):\n def __init__(self, data_dir, frames_per_sample, random_time=True, total_videos=-1, start_at=0, augmentation_params=None, labels=None):\n super().__init__()\n if labels is None:\n self.labels = dict() \n else:\n self.labels = labels\n self.data_dir = data_dir\n self.videos_ds = HDF5Dataset(data_dir)\n self.total_videos = total_videos\n self.start_at = start_at\n self.random_time = random_time\n self.frames_per_sample = frames_per_sample\n self.transform = AllAugmentationTransform(**augmentation_params)\n\n # The numpy HWC image is converted to pytorch CHW tensor. \n # If the image is in HW format (grayscale image), 、\n # it will be converted to pytorch HW tensor.\n\n # if flip:\n # flag = random.choice([0,1])\n # else:\n # flag = 0\n\n # self.trans = A.Compose([\n # A.HorizontalFlip(p=flag),\n # ToTensorV2()\n # ])\n\n def __len__(self):\n if self.total_videos > 0:\n return self.total_videos\n else:\n return len(self.videos_ds)\n \n def max_index(self):\n return len(self.videos_ds)\n\n def len_of_vid(self, index):\n video_index = index % self.__len__()\n shard_idx, idx_in_shard = self.videos_ds.get_indices(video_index)\n with self.videos_ds.opener(self.videos_ds.shard_paths[shard_idx]) as f:\n video_len = f['len'][str(idx_in_shard)][()]\n return video_len\n \n def __getitem__(self, index, time_idx=0):\n # Use `index` to select the video, and then\n # randomly choose a `frames_per_sample` window of frames in the video\n video = dict()\n\n video_index = round(index / (self.__len__() - 1) * (self.max_index() - 1))\n shard_idx, idx_in_shard = self.videos_ds.get_indices(video_index)\n final_clip = []\n with self.videos_ds.opener(self.videos_ds.shard_paths[shard_idx]) as f:\n video_len = f['len'][str(idx_in_shard)][()] - self.start_at\n if self.random_time and video_len > self.frames_per_sample:\n time_idx = np.random.choice(video_len - self.frames_per_sample)\n time_idx += self.start_at\n # print(self.start_at, time_idx, min(time_idx + self.frames_per_sample, video_len))\n for i in range(time_idx, min(time_idx + self.frames_per_sample, video_len)):\n frame = f[str(idx_in_shard)][str(i)][()]\n if len(frame.shape) == 2 or frame.shape[2] == 1:\n final_clip.append(gray2rgb(frame))\n else:\n final_clip.append(frame)\n # final_clip.append(self.trans(image=f[str(idx_in_shard)][str(i)][()])[\"image\"])\n \n # print(np.min(final_clip[0]), np.max(final_clip[0]))\n final_clip = self.transform(final_clip) # 0,255 -> 0,1\n # print(np.min(final_clip[0]), np.max(final_clip[0]))\n final_clip = np.stack(final_clip)\n # print(final_clip.shape)\n final_clip = torch.tensor(final_clip)\n # print(final_clip.shape)\n final_clip = (final_clip*2 - 1.0).type(torch.float32) # 0,1 -> -1,1\n # final_clip = (final_clip/127.5 - 1.0).type(torch.float32)\n final_clip = einops.rearrange(final_clip, \"t h w c -> t c h w \")\n video[\"video\"] = final_clip\n\n for k in self.labels:\n video[k] = self.labels[k][i]\n\n return video\n\n\n# class ImagePaths(Dataset):\n# def __init__(self, paths, size=None, random_crop=False, labels=None):\n# self.size = size\n# self.random_crop = random_crop\n\n# self.labels = dict() if labels is None else labels\n# self.labels[\"file_path\"] = paths\n# self._length = len(paths)\n\n# if self.size is not None and self.size > 0:\n# self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)\n# if not self.random_crop:\n# self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)\n# else:\n# self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)\n# self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])\n# else:\n# self.preprocessor = lambda **kwargs: kwargs\n\n# def __len__(self):\n# return self._length\n\n# def preprocess_image(self, image_path):\n# image = Image.open(image_path)\n# if not image.mode == \"RGB\":\n# image = image.convert(\"RGB\")\n# image = np.array(image).astype(np.uint8)\n# image = self.preprocessor(image=image)[\"image\"]\n# image = (image/127.5 - 1.0).astype(np.float32)\n# return image\n\n# def __getitem__(self, i):\n# example = dict()\n# example[\"image\"] = self.preprocess_image(self.labels[\"file_path_\"][i])\n# for k in self.labels:\n# example[k] = self.labels[k][i]\n# return example\n \n# class NumpyPaths(ImagePaths):\n# def preprocess_image(self, image_path):\n# image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024\n# image = np.transpose(image, (1,2,0))\n# image = Image.fromarray(image, mode=\"RGB\")\n# image = np.array(image).astype(np.uint8)\n# image = self.preprocessor(image=image)[\"image\"]\n# image = (image/127.5 - 1.0).astype(np.float32)\n# return image\n","repo_name":"JunyaoHu/pred-vdm","sub_path":"data/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"74008584618","text":"#!/usr/bin/env python\n'''\nOWASP ZSC\nhttps://www.owasp.org/index.php/OWASP_ZSC_Tool_Project\nhttps://github.com/zscproject/OWASP-ZSC\nhttp://api.z3r0d4y.com/\nhttps://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]\n'''\nimport sys\nimport os\nfrom core.compatible import *\nfrom core.alert import *\nfrom core.commands import *\nfrom core.update import _update\nfrom lib.shell_storm_api.grab import _search_shellcode\nfrom lib.shell_storm_api.grab import _download_shellcode\nfrom lib.shell_storm_api.grab import _grab_all\nfrom core.encode import encode_process\nfrom core.get_input import _input\nfrom core.opcoder import op\nfrom core.obfuscate import obf_code\nfrom core.file_out import file_output\nif 'linux' in sys.platform:\n\timport readline\nelif 'darwin' in sys.platform:\n\tsys.path.insert(0, 'module/readline_osx')\n\timport readline\nelif 'win32' == sys.platform or 'win64' == sys.platform:\n\tsys.path.insert(0, 'module/readline_windows')\n\timport readline\nexec (compile(\n\topen(\n\t\tstr(os.path.dirname(os.path.abspath(__file__)).replace('\\\\', '/')) +\n\t\t'/commands.py', \"rb\").read(), str(os.path.dirname(os.path.abspath(\n\t\t\t__file__)).replace('\\\\', '/')) + '/commands.py', 'exec'))\nexec (compile(\n\topen(\n\t\tstr(os.path.dirname(os.path.abspath(__file__)).replace('\\\\', '/')) +\n\t\t'/start.py', \"rb\").read(), str(os.path.dirname(os.path.abspath(\n\t\t\t__file__)).replace('\\\\', '/')) + '/start.py', 'exec'))\n\n\nclass autocomplete(object):\n\tdef __init__(self, options):\n\t\tself.options = sorted(options)\n\n\tdef complete(self, text, state):\n\t\tif state == 0:\n\t\t\tif text:\n\t\t\t\tself.matches = [s for s in self.options\n\t\t\t\t\t\t\t\tif s and s.startswith(text)]\n\t\t\telse:\n\t\t\t\tself.matches = self.options[:]\n\t\ttry:\n\t\t\treturn self.matches[state]\n\t\texcept IndexError:\n\t\t\treturn None\n\n\ndef getcommand(commands):\n\n\tbackup_commands = commands\n\tcrawler = 0\n\tcommand_path = ['zsc']\n\tcommand = ''\n\twhile True:\n\t\ttry:\n\t\t\tcommand = _input('/'.join(command_path), 'any', False)\n\t\t\tif command is None:\n\t\t\t\t_lets_error\n\t\texcept:\n\t\t\twarn('interrupted by user!\\nExit\\n')\n\t\t\tsys.exit(0)\n\t\tcheck = True\n\n\t\tif command.startswith('#'): # allows for comments\n\t\t\tcontinue\n\n\t\tinContext = ['clear', 'help', 'about', 'version', 'back']\n\t\tfor option in commands:\n\t\t\tif command == option and command not in inContext:\n\t\t\t\tcrawler += 1\n\t\t\t\tif crawler is 1:\n\t\t\t\t\tcommands = commands[option][1]\n\t\t\t\t\tcommand_path.append(option)\n\t\t\t\tif crawler is 2:\n\t\t\t\t\tif command == 'search':\n\t\t\t\t\t\t_search_shellcode(False,0)\n\t\t\t\t\t\tcommands = backup_commands\n\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\t\tcrawler = 0\n\t\t\t\t\t\tcommand_path = ['zsc']\n\t\t\t\t\telif command == 'download':\n\t\t\t\t\t\t_download_shellcode(False,0,'')\n\t\t\t\t\t\tcommands = backup_commands\n\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\t\tcrawler = 0\n\t\t\t\t\t\tcommand_path = ['zsc']\n\t\t\t\t\telif command == 'shell_storm_list':\n\t\t\t\t\t\t_grab_all()\n\t\t\t\t\t\tcommands = backup_commands\n\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\t\tcrawler = 0\n\t\t\t\t\t\tcommand_path = ['zsc']\n\t\t\t\t\telif command == 'generate':\n\t\t\t\t\t\tcommands = commands[option]\n\t\t\t\t\t\tcommand_path.append(option)\n\t\t\t\t\telse:\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\tf = []\n\t\t\t\t\t\t\timport os as OS\n\t\t\t\t\t\t\tfor (dirpath, dirnames, filenames) in OS.walk('.'):\n\t\t\t\t\t\t\t\tf.extend(filenames)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcompleter = autocomplete(f)\n\t\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\t\tfilename = _input('filename', 'any', True)\n\t\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tcontent = open(filename, 'rb').read()\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\twarn('sorry, cann\\'t find file\\n')\n\t\t\t\t\t\tcommands = commands[option]\n\t\t\t\t\t\tcommand_path.append(option)\n\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\t\tt = True\n\t\t\t\t\t\twhile t:\n\t\t\t\t\t\t\tencode = _input('encode', 'any', True)\n\t\t\t\t\t\t\tfor en in commands:\n\t\t\t\t\t\t\t\tif encode == en:\n\t\t\t\t\t\t\t\t\tt = False\n\t\t\t\t\t\t\tif t is True:\n\t\t\t\t\t\t\t\twarn('please enter a valid encode name\\n')\n\t\t\t\t\t\tobf_code(option, encode, filename, content,False)\n\t\t\t\t\t\tcommands = backup_commands\n\t\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\t\tcrawler = 0\n\t\t\t\t\t\tcommand_path = ['zsc']\n\t\t\t\tif crawler is 3:\n\t\t\t\t\tos = option\n\t\t\t\t\tcommands = commands[option]\n\t\t\t\t\tcommand_path.append(option)\n\t\t\t\tif crawler is 4:\n\t\t\t\t\tfunc = option\n\t\t\t\t\tcommands = commands[option]\n\t\t\t\t\tcommand_path.append(option)\n\t\t\t\tif crawler is 5:\n\t\t\t\t\tdata = []\n\t\t\t\t\tbackup_option = option\n\t\t\t\t\tif option != '':\n\t\t\t\t\t\toptions = option.rsplit('&&')\n\t\t\t\t\t\tfor o in options:\n\t\t\t\t\t\t\tdata.append(_input(o,'any',True))\n\t\t\t\t\t\tn = 0\n\t\t\t\t\t\twrite('\\n')\n\t\t\t\t\t\tfor o in options:\n\t\t\t\t\t\t\tinfo('%s set to \"%s\"\\n' % (o, data[n]))\n\t\t\t\t\t\t\tn += 1\n\t\t\t\t\trun = getattr(\n\t\t\t\t\t\t__import__('lib.generator.%s.%s' % (os, func),\n\t\t\t\t\t\t\t\t fromlist=['run']),\n\t\t\t\t\t\t'run')\n\t\t\t\t\tshellcode = run(data)\n\t\t\t\t\twrite('\\n')\n\t\t\t\t\tfor encode in backup_commands['shellcode'][1]['generate'][\n\t\t\t\t\t\t\tos][func][backup_option]:\n\t\t\t\t\t\tinfo(encode + '\\n')\n\t\t\t\t\twrite('\\n\\n')\n\t\t\t\t\tinfo('enter encode type\\n')\n\t\t\t\t\tcompleter = autocomplete(backup_commands['shellcode'][1][\n\t\t\t\t\t\t'generate'][os][func][backup_option])\n\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\ttry:\n\t\t\t\t\t\tencode = _input('/'.join(command_path) + \"/encode_type\", 'any', False)\n\t\t\t\t\t\tif encode is None:\n\t\t\t\t\t\t\t_lets_error\n\t\t\t\t\texcept:\n\t\t\t\t\t\tencode = 'none'\n\t\t\t\t\t\twarn(\n\t\t\t\t\t\t\t'\\n\"none\" encode selected\\n')\n\t\t\t\t\twrite('\\n')\n\t\t\t\t\tassembly_code_or_not = _input(\n\t\t\t\t\t\t'Output assembly code?(y or n)', 'any', True)\n\t\t\t\t\tif assembly_code_or_not == 'y':\n\t\t\t\t\t\tassembly_code = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tassembly_code = False\n\t\t\t\t\tif assembly_code is True:\n\t\t\t\t\t\twrite('\\n'+encode_process(encode, shellcode, os, func) + '\\n\\n')\n\t\t\t\t\toutput_shellcode = _input('Output shellcode to screen?(y or n)', 'any', True)\n\t\t\t\t\tshellcode_op = op( encode_process(encode, shellcode, os, func), os)\n\t\t\t\t\tif output_shellcode == 'y':\n\t\t\t\t\t\tinfo('Generated shellcode is:\\n' + shellcode_op +'\\n\\n')\n\t\t\t\t\tfile_or_not = _input('Shellcode output to a .c file?(y or n)', 'any', True)\n\t\t\t\t\tif file_or_not == 'y':\n\t\t\t\t\t\ttarget = _input('Target .c file?', 'any', True)\n\t\t\t\t\t\tfile_output(target, func, data, os, encode, shellcode, shellcode_op)\n\t\t\t\t\tcommands = backup_commands\n\t\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\t\tcrawler = 0\n\t\t\t\t\tcommand_path = ['zsc']\n\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\tcheck = False\n\t\tif command == 'exit' or command == 'quit':\n\t\t\twrite(color.color('reset'))\n\t\t\tsys.exit('Exit')\n\t\telif command == 'update':\n\t\t\t_update(__version__)\n\t\t\tcommands = backup_commands\n\t\t\tcompleter = autocomplete(commands)\n\t\t\treadline.set_completer(completer.complete)\n\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\tcrawler = 0\n\t\t\tcommand_path = ['zsc']\n\t\telif command == 'help':\n\t\t\t_help(help)\n\t\telif command == 'restart':\n\t\t\tcommands = backup_commands\n\t\t\tcompleter = autocomplete(commands)\n\t\t\treadline.set_completer(completer.complete)\n\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\tcrawler = 0\n\t\t\tcommand_path = ['zsc']\n\t\telif command == 'about':\n\t\t\tabout()\n\t\telif command == 'version':\n\t\t\t_version()\n\t\telif command == 'clear':\n\t\t\t_clear()\n\t\telif command == 'back':\n\t\t\tif len(command_path) > 1:\n\t\t\t\tcommand_path.pop()\n\t\t\t\tcommands = backup_commands\n\t\t\t\tfor option in command_path:\n\t\t\t\t\tif option == 'zsc':\n\t\t\t\t\t\tpass\n\t\t\t\t\telif option == command_path[1]:\n\t\t\t\t\t\tcommands = commands[option][1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommands = commands[option]\n\t\t\t\tcompleter = autocomplete(commands)\n\t\t\t\treadline.set_completer(completer.complete)\n\t\t\t\treadline.parse_and_bind('tab: complete')\n\t\t\t\tcrawler -= 1\n\t\t\telse:\n\t\t\t\tinfo('Can\\'t go back from here!\\n')\n\t\telse:\n\t\t\tif command != '' and check is True:\n\t\t\t\tinfo('Command not found!\\n')\n\n\ndef engine(commands):\n\t''' engine function'''\n\tcompleter = autocomplete(commands)\n\treadline.set_completer(completer.complete)\n\treadline.parse_and_bind('tab: complete')\n\tgetcommand(commands)\n","repo_name":"OWASP/ZSC","sub_path":"core/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","stars":619,"dataset":"github-code","pt":"90"} +{"seq_id":"34376763366","text":"import sys\nimport random\nimport os\n\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm.auto import tqdm\nfrom torchvision import transforms\n\nsys.path.append(\".\")\nfrom utils import get_gen_loss, get_disc_loss\n\n\ndef train(\n dataset,\n device,\n gen,\n gen_opt,\n disc,\n disc_opt,\n adv_criterion,\n lambda_recon,\n recon_criterion,\n n_epochs,\n display_step,\n batch_size,\n model_name,\n save_model=True,\n cur_step=0\n):\n writer_real = SummaryWriter(f'logs/logs_{model_name}/real')\n writer_fake = SummaryWriter(f'logs/logs_{model_name}/fake')\n writer_condition = SummaryWriter(f'logs/logs_{model_name}/condition')\n\n try:\n os.mkdir('saved_model_paths')\n except FileExistsError:\n pass\n\n train_dataset, val_dataset = random_split(\n dataset,\n [int(len(dataset) * 0.95), len(dataset) - int(len(dataset) * 0.95)]\n )\n\n dataloader = DataLoader(train_dataset, batch_size=batch_size)\n val_dataloader = DataLoader(val_dataset, batch_size=batch_size)\n\n # applies a horizontal flip on an image\n flip = torch.jit.script(torch.nn.Sequential(transforms.RandomHorizontalFlip(p=1)))\n\n mean_generator_loss = 0\n mean_discriminator_loss = 0\n\n for epoch in range(n_epochs):\n for image, _ in tqdm(dataloader, file=sys.stdout):\n # Input Setup\n condition = image[:, :, :, 256:].to(device)\n real = image[:, :, :, :256].to(device)\n\n # 50% chance of flipping the the images horizontally. Either both must be flipped or both must be normal.\n if random.random() > 0.5:\n real = flip(real)\n condition = flip(condition)\n \n # Update discriminator\n disc_opt.zero_grad()\n disc_loss = get_disc_loss(gen, disc, real, condition, adv_criterion)\n disc_loss.backward(retain_graph=True)\n disc_opt.step()\n\n # Update generator\n gen_opt.zero_grad()\n gen_loss = get_gen_loss(\n gen,\n disc,\n real,\n condition,\n adv_criterion,\n recon_criterion,\n lambda_recon,\n )\n gen_loss.backward()\n gen_opt.step()\n\n # Keep track of the average loss\n mean_discriminator_loss += disc_loss.item() / display_step\n mean_generator_loss += gen_loss.item() / display_step\n\n # Visualization code\n if cur_step % display_step == 0:\n print()\n mean_val_loss = 0\n val_condition = None\n val_real = None\n for val_image, _ in tqdm(val_dataloader, file=sys.stdout, position=0, leave=True):\n val_condition = val_image[:, :, :, 256:].to(device)\n val_real = val_image[:, :, :, :256].to(device)\n\n with torch.no_grad():\n gen_loss = get_gen_loss(\n gen,\n disc,\n val_real,\n val_condition,\n adv_criterion,\n recon_criterion,\n lambda_recon,\n )\n mean_val_loss += gen_loss.item() / len(val_dataloader)\n\n print(\n f\"Epoch {epoch}: Step {cur_step}: \"\n f\"Generator loss: {mean_generator_loss}, \"\n f\"Generator Val Loss: {mean_val_loss}, \"\n f\"Discriminator loss: {mean_discriminator_loss}, \"\n )\n\n # Log with tensorboard\n with torch.no_grad():\n fake = gen(val_condition)\n img_grid_real = torchvision.utils.make_grid(val_real, normalize=True)\n img_grid_condition = torchvision.utils.make_grid(val_condition, normalize=True)\n img_grid_fake = torchvision.utils.make_grid(fake, normalize=True)\n\n writer_real.add_image(\"Real\", img_grid_real, global_step=cur_step)\n writer_fake.add_image(\"Fake\", img_grid_fake, global_step=cur_step)\n writer_condition.add_image(\"Condition\", img_grid_condition, global_step=cur_step)\n\n mean_generator_loss = 0\n mean_discriminator_loss = 0\n\n if save_model and cur_step % 2000 == 0:\n torch.save(\n {'gen': gen.state_dict(),\n 'gen_opt': gen_opt.state_dict(),\n 'disc': disc.state_dict(),\n 'disc_opt': disc_opt.state_dict()\n },\n f\"saved_model_paths/{model_name}_{cur_step}.pth\"\n )\n\n cur_step += 1\n","repo_name":"aryanpanpalia/anime-pix2pix","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"90"} +{"seq_id":"22514780579","text":"\"\"\"\r\nMethod need \"self\" reference paramenet but function don't need self reference parameter\r\n\"\"\"\r\n\r\nclass LoteryPlayer:\r\n def __init__(self):\r\n self.name = \"Rasid\"\r\n self.number = (5,4,56,7,5,14)\r\n \r\n # inside a class by using method , we can access self variable\r\n def total(self):\r\n return sum(self.number)\r\n \r\n \r\nplayer = LoteryPlayer()\r\nprint(player.name)\r\nprint(player.number)\r\nprint(player.total())\r\n\r\n\r\n\r\n\r\n\r\ndef a(x,y):\r\n return x*y\r\n\r\nclass Point3:\r\n\r\n\r\n def assign(self,x ,y, z): #method\r\n self.a = x\r\n self.b = y\r\n self.c = z\r\n\r\n\r\n def printPoint(self):\r\n print(self.a, self.b,self.c) # method\r\n print(a(self.a,self.b)) #use a function on printPoint method\r\n\r\n\r\n\r\n# using method :\r\np1 = Point3()\r\np1.assign(2, 3, 5)\r\np1.printPoint()\r\n\r\n\r\n\r\n\r\n# without method or function:\r\np1 = Point3()\r\n\r\np1.a = 2\r\np1.b = 3\r\np1.c = 4\r\n\r\nprint(p1.a,p1.b,p1.c)\r\n\r\n","repo_name":"MonadWizard/python-basic","sub_path":"basic/oopFundamental/ClassDemo/classWithMethodBasic.py","file_name":"classWithMethodBasic.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"73873930857","text":"# Standard Library\nimport logging\nimport sys\nimport re\nfrom json import loads\n\n# 3rd Party\nimport redis_sentinel_url\nimport structlog\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_sockets import Sockets\nfrom gevent import pywsgi\nfrom geventwebsocket.handler import WebSocketHandler\nfrom structlog.processors import (\n JSONRenderer,\n StackInfoRenderer,\n TimeStamper,\n format_exc_info,\n)\nfrom structlog.stdlib import add_log_level, add_logger_name, filter_by_level\n\n# Fastlane\nimport fastlane.api.gzipped as gzipped\nimport fastlane.api.metrics as metrics\nfrom fastlane.api.enqueue import bp as enqueue\nfrom fastlane.api.execution import bp as execution_api\nfrom fastlane.api.healthcheck import bp as healthcheck\nfrom fastlane.api.routes import bp as routes_api\nfrom fastlane.api.status import bp as status\nfrom fastlane.api.stream import bp as stream\nfrom fastlane.api.task import bp as task_api\nfrom fastlane.models import db\nfrom fastlane.models.categories import QueueNames\nfrom fastlane.queue import Queue, QueueGroup\nfrom flask_basicauth import BasicAuth\n\n\nclass Application:\n def __init__(self, config, log_level, testing=False):\n self.config = config\n self.logger = None\n self.log_level = log_level\n\n self.create_app(testing)\n\n def create_app(self, testing):\n self.app = Flask(\"fastlane\")\n\n self.testing = testing\n self.app.testing = testing\n self.app.error_handlers = []\n\n for key in self.config.items.keys():\n self.app.config[key] = self.config[key]\n\n self.app.config[\"ENV\"] = self.config.ENV\n self.app.config[\"DEBUG\"] = self.config.DEBUG\n self.app.original_config = self.config\n self.app.log_level = self.log_level\n self.configure_logging()\n self.connect_redis()\n self.configure_queue()\n # self.connect_queue()\n self.config_blacklist_words_fn()\n\n self.configure_basic_auth()\n self.connect_db()\n self.load_executor()\n self.load_error_handlers()\n\n enable_cors = self.app.config[\"ENABLE_CORS\"]\n\n if (\n isinstance(enable_cors, (str, bytes)) and enable_cors.lower() == \"true\"\n ) or (isinstance(enable_cors, (bool)) and enable_cors):\n origin = self.app.config[\"CORS_ORIGINS\"]\n self.app.logger.info(f\"Configured CORS to allow access from '{origin}'.\")\n CORS(self.app)\n\n metrics.init_app(self.app)\n self.app.register_blueprint(metrics.bp)\n self.app.register_blueprint(healthcheck)\n self.app.register_blueprint(enqueue)\n self.app.register_blueprint(task_api)\n self.app.register_blueprint(execution_api)\n self.app.register_blueprint(status)\n self.app.register_blueprint(routes_api)\n\n self.app.register_blueprint(gzipped.bp)\n gzipped.init_app(self.app)\n\n sockets = Sockets(self.app)\n sockets.register_blueprint(stream)\n\n def configure_basic_auth(self):\n self.basic_auth = None\n\n if (\n self.app.config[\"BASIC_AUTH_USERNAME\"] is not None\n and self.app.config[\"BASIC_AUTH_PASSWORD\"] is not None\n ):\n self.basic_auth = BasicAuth(self.app)\n self.app.config[\"BASIC_AUTH_FORCE\"] = True\n\n def configure_logging(self):\n if self.app.testing:\n structlog.reset_defaults()\n\n disabled = [\n \"docker.utils.config\",\n \"docker.auth\",\n \"docker.api.build\",\n \"docker.api.swarm\",\n \"docker.api.image\",\n \"werkzeug\",\n \"requests\",\n \"urllib3\",\n ]\n\n for logger in disabled:\n log = logging.getLogger(logger)\n log.setLevel(logging.ERROR)\n log.disabled = True\n self.app.logger.disabled = True\n\n logging.basicConfig(\n level=self.log_level, stream=sys.stdout, format=\"%(message)s\"\n )\n\n chain = [\n filter_by_level,\n add_log_level,\n add_logger_name,\n TimeStamper(fmt=\"iso\"),\n StackInfoRenderer(),\n format_exc_info,\n JSONRenderer(indent=1, sort_keys=True),\n ]\n\n logger = logging.getLogger(__name__)\n\n if self.testing:\n chain = []\n logger = structlog.ReturnLogger()\n\n log = structlog.wrap_logger(\n logger,\n processors=chain,\n context_class=dict,\n wrapper_class=structlog.stdlib.BoundLogger,\n # cache_logger_on_first_use=True,\n )\n self.logger = log\n self.app.logger = self.logger\n\n def connect_redis(self):\n self.logger.debug(\"Connecting to redis...\")\n\n redis_url = self.app.config[\"REDIS_URL\"]\n self.logger.info(\"Configuring Redis...\", redis_url=redis_url)\n sentinel, client = redis_sentinel_url.connect(redis_url)\n self.app.sentinel = sentinel\n self.app.redis = client\n self.logger.info(\"Connection to redis successful\")\n\n def configure_queue(self):\n self.logger.debug(\"Configuring queue...\")\n\n queues = []\n\n for queue_name in [\n QueueNames.Job,\n QueueNames.Monitor,\n QueueNames.Notify,\n QueueNames.Webhook,\n ]:\n queue = Queue(self.app.logger, self.app.redis, queue_name)\n setattr(self.app, f\"{queue_name}_queue\", queue)\n queues.append(queue)\n\n self.app.queue_group = QueueGroup(self.logger, self.app.redis, queues)\n\n def config_blacklist_words_fn(self):\n blacklist_words = map(str.strip, self.app.config[\"ENV_BLACKLISTED_WORDS\"].split(\",\"))\n blacklist_pattern = r\"(%s)\" % \"|\".join(blacklist_words)\n re_blacklist = re.compile(blacklist_pattern, re.RegexFlag.IGNORECASE)\n self.app.blacklist_words_fn = re_blacklist.search\n\n def connect_db(self):\n settings = self.app.config[\"MONGODB_CONFIG\"]\n\n if isinstance(settings, (dict,)):\n self.app.config[\"MONGODB_SETTINGS\"] = settings\n else:\n self.app.config[\"MONGODB_SETTINGS\"] = loads(\n self.app.config[\"MONGODB_CONFIG\"]\n )\n\n self.logger.info(\n \"Connecting to MongoDB...\", mongo=self.app.config[\"MONGODB_SETTINGS\"]\n )\n db.init_app(self.app)\n self.logger.info(\n \"Connected to MongoDB successfully.\",\n mongo=self.app.config[\"MONGODB_SETTINGS\"],\n )\n\n def load_executor(self):\n name = self.config.EXECUTOR\n parts = name.split(\".\")\n executor_module = __import__(\".\".join(parts), None, None, [parts[-1]], 0)\n\n self.app.executor_module = executor_module\n\n blueprint = getattr(executor_module, \"bp\", None)\n\n if blueprint is not None:\n self.app.register_blueprint(blueprint)\n\n self.app.executor = self.app.executor_module.Executor(self.app)\n\n def load_error_handlers(self):\n self.app.error_handlers = []\n\n for handler_name in self.app.config[\"ERROR_HANDLERS\"]:\n parts = handler_name.split(\".\")\n obj = __import__(\".\".join(parts[:-1]), None, None, [parts[-1]], 0)\n obj = getattr(obj, parts[-1])\n\n self.app.error_handlers.append(obj(self.app))\n\n self.app.report_error = self.report_error\n\n def report_error(self, err, metadata=None):\n for handler in self.app.error_handlers:\n handler.report(err, metadata)\n\n def run(self, host, port):\n server = pywsgi.WSGIServer(\n (host, port), self.app, handler_class=WebSocketHandler\n )\n server.serve_forever()\n\n def _mock_redis(self, connected):\n def handle():\n self.app.redis.connected = connected\n\n return handle\n","repo_name":"fastlane-queue/fastlane","sub_path":"fastlane/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"90"} +{"seq_id":"21180215166","text":"# The guess API is already defined for you.\n# @param num, your guess\n# @return -1 if num is higher than the picked number\n# 1 if num is lower than the picked number\n# otherwise return 0\n# def guess(num: int) -> int:\n \n\nclass Solution:\n def guessNumber(self, n: int) -> int:\n l = 1\n r = n\n while l<=r:\n pick = (l+r)//2\n ans = guess(pick)\n if ans<0:\n r=pick-1\n elif ans>0:\n l=pick+1\n else:\n return pick\n","repo_name":"Tettey1/A2SV","sub_path":"leetcode-solutions/guess-number-higher-or-lower.py","file_name":"guess-number-higher-or-lower.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"22765418719","text":"\"\"\"\r\ncortx_dicom\r\n\r\nCore components\r\n\"\"\"\r\n\r\n\r\n# pylint: disable=too-many-lines\r\n# I. Docstring and code together consumes a lot of lines of code.\r\n# II. We try to keep similar or related classes in the same file.\r\n# III. The working code may be under the 1000 lines limit, but we\r\n# think, docstring with notations is useful to anybody to\r\n# understand our code and the way of our thinking.\r\n\r\n\r\n# Imports form standard library\r\nfrom importlib.resources import read_binary\r\nimport json\r\nfrom os import devnull\r\nfrom os.path import isfile\r\nimport pickle\r\nfrom random import randint\r\n\r\n# Imports from additional dependencies\r\nfrom boto3 import client\r\nfrom elasticsearch import Elasticsearch\r\nimport pydicom\r\n\r\n# Import self\r\nfrom . import dicomfields\r\n\r\n\r\nclass Config:\r\n \"\"\"\r\n Static class to provide configuration for CortxDicom\r\n ====================================================\r\n \"\"\"\r\n\r\n\r\n # Constants to represent config file save and load methods\r\n FILE_JSON = 'json'\r\n FILE_PICKLE = 'pickle'\r\n\r\n\r\n # Privae variable to hold config data in a secure way\r\n __config = {}\r\n\r\n\r\n @classmethod\r\n def get(cls, key : str = '') -> any:\r\n \"\"\"\r\n Get configuration\r\n =================\r\n\r\n Parameters\r\n ----------\r\n key : str, optional (empty string if omitted)\r\n The key to get. If key is empty string, the whole configuration is\r\n returned.\r\n\r\n Returns\r\n -------\r\n str | dict\r\n If key is not an empty string, the concerning value is returned. If\r\n key is empty, a copy of the whole configuration dict is returned.\r\n\r\n Raises\r\n ------\r\n KeyError\r\n When key is given but it doesn't exist.\r\n \"\"\"\r\n\r\n if key == '':\r\n result = dict(cls.__config)\r\n elif key in cls.__config.keys():\r\n result = cls.__config[key]\r\n else:\r\n raise KeyError('Config.get_config(): non-existing key.')\r\n return result\r\n\r\n\r\n @classmethod\r\n def is_set(cls, key : str) -> bool:\r\n \"\"\"\r\n Get whether a key is set or not\r\n ===============================\r\n\r\n Returns\r\n -------\r\n bool\r\n True if the key is set, false if not.\r\n \"\"\"\r\n\r\n return key in cls.__config.keys()\r\n\r\n\r\n @classmethod\r\n def load(cls, filename : str, method : str = '', **kwargs):\r\n \"\"\"\r\n Load configuration from file\r\n ============================\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n Name of the file to load.\r\n method : str, optional (empty string if omitted)\r\n Method to handle file. It can be Config.FILE_JSON or\r\n Config.FILE_PICKLE. If an empty string is given, Config.FILE_PICKLE\r\n is considered to use as load method.\r\n keyword arguments\r\n Arguments to forward to json.load() or pickle.load() funtions.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When unsupported storage method is set.\r\n FileNotFoundError\r\n When the name if the file cannot be interpreted as existing file.\r\n TypeError\r\n When the loaded instance is not a dictionary.\r\n \"\"\"\r\n\r\n if method == '':\r\n _method = Config.FILE_PICKLE\r\n else:\r\n _method = method\r\n if _method not in [Config.FILE_JSON, Config.FILE_PICKLE]:\r\n raise ValueError('Config.load_config(): unsupported method.')\r\n if not isfile(filename):\r\n raise FileNotFoundError('Config.load_config(): non existing file.')\r\n if _method == Config.FILE_JSON:\r\n with open(filename, 'r') as instream:\r\n _data = json.load(instream, **kwargs)\r\n elif _method == Config.FILE_PICKLE:\r\n with open(filename, 'rb') as instream:\r\n _data = pickle.load(instream, **kwargs)\r\n if not isinstance(_data, dict):\r\n raise TypeError('Config.load_config(): config data must be ' +\r\n 'instance of dict.')\r\n cls.__config = _data\r\n\r\n\r\n @classmethod\r\n def save(cls, filename : str, method : str = '', **kwargs):\r\n \"\"\"\r\n Save configuration to file\r\n ==========================\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n Name of the file to save.\r\n method : str, optional (empty string if omitted)\r\n Method to handle file. It can be Config.FILE_JSON or\r\n Config.FILE_PICKLE. If an empty string is given, Config.FILE_PICKLE\r\n is considered to use as save method.\r\n keyword arguments\r\n Arguments to forward to json.dump() or pickle.dump() funtions.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When unsupported storage method is set.\r\n\r\n Notes\r\n -----\r\n If you want to use Config.FILE_JSON each value in the configuration\r\n dictionary must be serializable with JSON.\r\n \"\"\"\r\n\r\n if method == '':\r\n _method = Config.FILE_PICKLE\r\n else:\r\n _method = method\r\n if _method not in [Config.FILE_JSON, Config.FILE_PICKLE]:\r\n raise ValueError('Config.save_config(): unsupported method.')\r\n if _method == Config.FILE_JSON:\r\n with open(filename, 'w') as outstream:\r\n json.dump(cls.__config, outstream, **kwargs)\r\n elif _method == Config.FILE_PICKLE:\r\n with open(filename, 'wb') as outstream:\r\n pickle.dump(cls.__config, outstream, **kwargs)\r\n\r\n\r\n @classmethod\r\n def set(cls, key : str, value : any) -> dict:\r\n \"\"\"\r\n Set configuration\r\n =================\r\n\r\n Parameters\r\n ----------\r\n key : str\r\n The key to set.\r\n value : any\r\n The value to set.\r\n\r\n Notes\r\n -----\r\n If you would like to use Config.FILE_JSON as handle method for\r\n load() or save() functions, value must be serializable with JSON.\r\n \"\"\"\r\n\r\n cls.__config[key] = value\r\n\r\n\r\nclass CortxDicom:\r\n \"\"\"\r\n Provides easy-to-use way to store and retrieve DICOM files in cortx\r\n ===================================================================\r\n\r\n Attributes:\r\n es_engine : Elasticsearch (access-only)\r\n Provide direct access to the Elasticsearch engine.\r\n s3_engine : S3 (S3.botocore.client) (access-only)\r\n Provide direct access to the boto3 S3 engine.\r\n \"\"\"\r\n\r\n\r\n # Constant to provide the default content type\r\n CONTENT_TYPE = 'application/dicom'\r\n\r\n # Constant to provide the default index to store DICOM metadata\r\n ES_DEFAULT_INDEX = 'cortx_dicom'\r\n\r\n # Constants to be applied as switches at label filtering\r\n FILTER_NO_FILTER = 0\r\n FILTER_PRIVATE = 1\r\n FILTER_HIPAA = 2\r\n FILTER_GDPR = 4\r\n\r\n # Keys of labels\r\n KEY_SCHEME_HUMAN_READABLE = 1\r\n KEY_SCHEME_NUMERIC = 0\r\n KEY_SCHEME_SPACELESS = 2\r\n\r\n\r\n # Dictionary to hold labeling information\r\n __labels = {}\r\n\r\n\r\n def __init__(self, es_engine : any = None, s3_engine : any = None):\r\n \"\"\"\r\n Initialize an instance of the object\r\n ====================================\r\n\r\n Parameters\r\n ----------\r\n es_engine : Elasticsearch, optional (None if omitted)\r\n An existing Elasticsearch instance. If None is given, a new\r\n Elasticsearch instance is created.\r\n s3_engine : S3 (S3.botocore.clinet), optional (None if omitted)\r\n An existing S3 instance. If None is given, a new S3 instance is\r\n created.\r\n\r\n Raises\r\n ------\r\n RuntimeError\r\n When creating es_engine from Config is not possible due to lack of\r\n needed keys.\r\n TypeError\r\n When a not supported type of instance is given as es_engine.\r\n RuntimeError\r\n When creating s3_engine from Config is not possible due to lack of\r\n needed keys.\r\n TypeError\r\n When a not supported type of instance is given as s3_engine.\r\n \"\"\"\r\n\r\n if es_engine is None:\r\n if not Config.is_set('es.url'):\r\n raise RuntimeError('CortxDicom.init(): cennot create ' +\r\n 'elasticsearch engine from configuration.')\r\n _es_dict = {}\r\n if Config.is_set('es.http_compress'):\r\n _es_dict['http_compress'] = Config.get('es.http_compress')\r\n if Config.is_set('es.port'):\r\n _es_dict['port'] = Config.get('es.port')\r\n if Config.is_set('es.scheme'):\r\n _es_dict['scheme'] = Config.get('es.scheme')\r\n self.__es_engine = Elasticsearch(Config.get('es.url'), **_es_dict)\r\n elif isinstance(es_engine, Elasticsearch):\r\n self.__es_engine = es_engine\r\n else:\r\n raise TypeError('CortxDicom.init(): es_engine must be NoneType or' +\r\n ' Elasticsearch instance.')\r\n if s3_engine is None:\r\n if not all([Config.is_set('s3.acces_key_id'),\r\n Config.is_set('s3.sercret_access_key'),\r\n Config.is_set('s3.url')]):\r\n raise RuntimeError('CortxDicom.init(): cennot create S3 ' +\r\n 'engine from configuration.')\r\n self.__s3_engine = client('s3', endpoint_url=Config.get('s3.url'),\r\n aws_access_key_id=\r\n Config.get('s3.acces_key_id'),\r\n aws_secret_access_key=\r\n Config.get('s3.sercret_access_key'))\r\n elif getattr(getattr(s3_engine, '__class__'), '__name__') == 'S3' and \\\r\n getattr(getattr(s3_engine, '__class__'), '__module__') == \\\r\n 'botocore.client':\r\n self.__s3_engine = s3_engine\r\n else:\r\n raise TypeError('CortxDicom.init(): s3_engine must be NoneType or' +\r\n ' boto3 S3 client instance.')\r\n\r\n\r\n @classmethod\r\n def apply_filter(cls, dicom_object : any,\r\n filter_type : int = 0) -> pydicom.dataset.FileDataset:\r\n \"\"\"\r\n Filter a DICOM file\r\n ===================\r\n\r\n Parameters\r\n ----------\r\n dicom_object : str | FileDataset (pydicom.dataset)\r\n Path of a DICOM file, or the DICOM file's instance.\r\n filter_type : int, optional (0 if omitted)\r\n Filters to apply.\r\n\r\n Returns\r\n -------\r\n FileDataset\r\n The filtered instance.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n When object is neither a FileDataset nor a string instance.\r\n\r\n See Also\r\n --------\r\n Available filters : FILTER_* constants of class CortxDicom()\r\n\r\n Notes\r\n -----\r\n Deleting from a pydicom instance usually mean inplace operation.\r\n This means it is not necessarily to use the returned variable.\r\n \"\"\"\r\n\r\n if isinstance(dicom_object, pydicom.dataset.FileDataset):\r\n result = dicom_object\r\n elif isinstance(dicom_object, str):\r\n result = pydicom.dcmread(dicom_object)\r\n else:\r\n raise TypeError('CortxDicom.apply_filter(): object must be a ' +\r\n 'DICOM instance or string, that is path to a ' +\r\n 'local DICOM file.')\r\n if filter_type // CortxDicom.FILTER_PRIVATE % 2 == 1:\r\n result.remove_private_tags()\r\n if filter_type // CortxDicom.FILTER_HIPAA % 2 == 1:\r\n cls.__load_labels(CortxDicom.FILTER_HIPAA)\r\n result.walk(cls.__remove_hipaa)\r\n if filter_type // CortxDicom.FILTER_GDPR % 2 == 1:\r\n cls.__load_labels(CortxDicom.FILTER_GDPR)\r\n result.walk(cls.__remove_gdpr)\r\n return result\r\n\r\n\r\n @classmethod\r\n def describe(cls, dicom_object : any, key_scheme : int = -1) -> list:\r\n \"\"\"\r\n Describe DICOM file\r\n ===================\r\n\r\n Parameters\r\n ----------\r\n dicom_object : str | FileDataset (pydicom.dataset)\r\n Path of a DICOM file, or the DICOM file's instance.\r\n key_scheme : int, optional (-1 if omitted)\r\n Scheme of DICOM keys to add. It must be\r\n CortxDicom.KEY_SCHEME_HUMAN_READABLE, CortxDicom.KEY_SCHEME_NUMERIC\r\n or CortxDicom.KEY_SCHEME_SPACELESS. If -1 is given, default value\r\n is used. At the moment it is CortxDicom.KEY_SCHEME_HUMAN_READABLE.\r\n\r\n Returns\r\n -------\r\n list[dict('key' : key, 'value' : value)]\r\n List of key value pairs in dictionaries.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n When object is neither a FileDataset nor a string instance.\r\n ValueError\r\n When the given key scheme is not supported.\r\n\r\n See Also\r\n --------\r\n Available schemes : KEY_SCHEME_* constants of class CortxDicom()\r\n \"\"\"\r\n\r\n if isinstance(dicom_object, pydicom.dataset.FileDataset):\r\n _object = dicom_object\r\n elif isinstance(dicom_object, str):\r\n _object = pydicom.dcmread(dicom_object)\r\n else:\r\n raise TypeError('CortxDicom.describe(): object must be a DICOM ' +\r\n 'instance or string, that is path to a local ' +\r\n 'DICOM file.')\r\n if key_scheme == -1:\r\n _key_scheme = CortxDicom.KEY_SCHEME_HUMAN_READABLE\r\n else:\r\n _key_scheme = key_scheme\r\n if _key_scheme not in [CortxDicom.KEY_SCHEME_HUMAN_READABLE,\r\n CortxDicom.KEY_SCHEME_NUMERIC,\r\n CortxDicom.KEY_SCHEME_SPACELESS]:\r\n raise ValueError('CortxDicom.describe(): unsupported key scheme.')\r\n cls.__load_labels(0)\r\n # Solve dicom bug to avoid UnicodeDecodeError\r\n with open(devnull, 'w') as nullstream:\r\n print(_object, file=nullstream)\r\n result = []\r\n for key, value in _object.items():\r\n str_key = str(key)\r\n if str_key in cls.__labels[0].keys():\r\n if value.value is not None:\r\n _key = cls.__labels[0][str_key][_key_scheme]\r\n if isinstance(value.value, bytes):\r\n _value = value.value.decode('utf-8', 'strict')\r\n else:\r\n _value = str(value.value)\r\n result.append({'dicom.key' : _key, 'dicom.value' : _value})\r\n return result\r\n\r\n\r\n @property\r\n def es_engine(self) -> any:\r\n \"\"\"\r\n Provide direct access to the Elasticsearch engine\r\n =================================================\r\n\r\n Returns\r\n -------\r\n Elasticsearch\r\n The engine itself.\r\n \"\"\"\r\n\r\n return self.__es_engine\r\n\r\n\r\n def get(self, search_expression : any = None, object_key : str = '',\r\n bucket : str = '', index : str = '') -> any:\r\n \"\"\"\r\n Get a DICOM file\r\n ================\r\n\r\n Parameters\r\n ----------\r\n search_expression : str | tuple | dict, optional (None if omitted)\r\n If string is given, it is treated as an Id to get. If a tuple is\r\n given it is treated as a simple key = value search. If dict is\r\n given it is directly forwarded to elasticsearch as a query. If None\r\n is given S3 is used.\r\n index : str, optional (empty string if omitted)\r\n Name of the index to get. If empty string is given, index name is\r\n retrieved from the configuration or default index is used if there\r\n is no index name in the configuration.\r\n object_key : str, optional (empty string if omitted)\r\n The name (key) to get the object in s3 with.\r\n bucket : str, optional (empty string if omitted)\r\n Name of the bucket to get.\r\n\r\n Returns\r\n -------\r\n FileDataset | list[FileDataset] | None\r\n DICOM file, if bucket and object_key or a string as\r\n search_expression is given. List of FileDataset if tuple or dict\r\n as search_expression is given. None if no result presents.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When neither search_expression nor object_key is given.\r\n\r\n Notes\r\n -----\r\n I.\r\n Parameter precedence: search_results precedes bucket and object_key\r\n parameters.\r\n III.\r\n Bucket precedence: bucket name in parameter always precedes bucket\r\n name retrived from configuration.\r\n IV.\r\n Index precedence: index name in parameter always precedes index name\r\n retrieved from configuration. Index name from configuration always\r\n precedes the default index name.\r\n \"\"\"\r\n\r\n if search_expression is not None:\r\n _data = self.search(search_expression, index=index, just_find=False)\r\n if _data is None:\r\n result = None\r\n else:\r\n result = []\r\n for row in _data:\r\n _element = self.s3_get_instance__(row[1], row[0])\r\n if _element is not None:\r\n result.append(_element)\r\n if len(result) == 0:\r\n result = 0\r\n else:\r\n if object_key == '':\r\n raise ValueError('CortxDicom.get(): either search_expression ' +\r\n 'or object_key must be given.')\r\n _bucket = self.__validate_bucket(bucket)\r\n if _bucket is None:\r\n raise AssertionError('CortxDicom.get(): bucket must set as '+\r\n 'parameter or as s3.bucket key in Config.')\r\n result = self.s3_get_instance__(object_key, _bucket)\r\n return result\r\n\r\n\r\n @property\r\n def s3_engine(self) -> any:\r\n \"\"\"\r\n Provide direct access to the boto3 S3 engine\r\n ============================================\r\n\r\n Returns\r\n -------\r\n Elasticsearch\r\n The engine itself.\r\n \"\"\"\r\n\r\n return self.__s3_engine\r\n\r\n\r\n def search(self, search_expression : any, index : str = '',\r\n just_find : bool = True) -> any:\r\n \"\"\"\r\n Search for a DICOM file\r\n =======================\r\n\r\n Parameters\r\n ----------\r\n search_expression : str | tuple | dict\r\n If string is given, it is treated as an Id to get. If a tuple is\r\n given it is treated as a simple key = value search. If dict is\r\n given it is directly forwarded to elasticsearch as a query.\r\n index : str, optional (empty string if omitted)\r\n Name of the index to get. If empty string is given, index name is\r\n retrieved from the configuration or default index is used if there\r\n is no index name in the configuration.\r\n just_find : bool, optional (True if omitted)\r\n Whether to return a bool with the result of the search or return\r\n information about the found object(s).\r\n\r\n Returns\r\n -------\r\n bool | tuple(str, str) | list[tuple(str, str)] | None\r\n If just_find is set to True, it returns a boll with value True,\r\n if the search search_expression had good results, False if not.\r\n If just_find is set to False, it returns a tuple of bucket,\r\n object_key pair, if the search_expression was a string, it returns\r\n a list of tuples with bucket and object_key pairs if the\r\n search_expression is was a dict or tuple, it returns None, if\r\n nothing is found.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When the tuple type search_expression has more or less than 2\r\n elements.\r\n TypeError\r\n When search_expression has other type than str, tuple or dict.\r\n\r\n Notes\r\n -----\r\n Index precedence: index name in parameter always precedes index name\r\n retrieved from configuration. Index name from configuration always\r\n precedes the default index name.\r\n \"\"\"\r\n\r\n _index = self.__get_index(index)\r\n if isinstance(search_expression, str):\r\n _status = self.es_engine.get(index=_index, id=search_expression,\r\n ignore=[400, 404])\r\n if _status.get('_source') is not None:\r\n result = self.__get_s3_loaction(_status['_source'])\r\n else:\r\n result = None\r\n else:\r\n if isinstance(search_expression, tuple):\r\n if len(search_expression) != 2:\r\n raise ValueError('CortxDicom.search(): tuple must have ' +\r\n 'exactly 2 elements.')\r\n _query = {'query': {'simple_query_string' :\r\n {'query' : '\"{}\" + \"{}\"'.format(\r\n search_expression[0], search_expression[1]),\r\n 'fields' : 'info', 'default_operator' : 'and'}}}\r\n elif isinstance(search_expression, dict):\r\n _query = search_expression\r\n else:\r\n raise TypeError('CortxDicom.search(): supported ' +\r\n 'search_expression types ar str, tuple, dict.')\r\n _status = self.es_engine.search(index=_index, body=_query,\r\n ignore=[400, 404])\r\n result = self.__interpret_search_results(_status)\r\n if just_find:\r\n result = result is not None\r\n return result\r\n\r\n\r\n def store(self, dicom_object : any, object_key : str = '',\r\n bucket : str = '', index : str = '',\r\n allow_overwrite : bool = False) -> any:\r\n \"\"\"\r\n Store DICOM file\r\n ================\r\n\r\n Parameters\r\n ----------\r\n dicom_object : str | FileDataset (pydicom.dataset)\r\n Path of a DICOM file, or the DICOM file's instance.\r\n object_key : str, optional (empty string if omitted)\r\n The name (key) to store the object in s3 with. If empty string is\r\n given, a new random name is generated.\r\n bucket : str, optional (empty string if omitted)\r\n Name of the bucket to store. If empty string is given, bucket name\r\n is retrieved from the configuration.\r\n index : str, optional (empty string if omitted)\r\n Name of the index to store. If empty string is given, index name is\r\n retrieved from the configuration or default index is used if there\r\n is no index name in the configuration.\r\n allow_overwrite : bool, optional (False if omitted)\r\n Whether or not to allow to overwrite existing object.\r\n\r\n Returns\r\n -------\r\n str | None\r\n The key created in Elasticsearch engine in case of success, None\r\n in case of failure.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n When object is neither a FileDataset nor a string instance.\r\n AssertionError\r\n When neither bucket parameter nor s3.bucket key in Config is set.\r\n\r\n Notes\r\n -----\r\n I.\r\n Bucket precedence: bucket name in parameter always precedes bucket\r\n name retrived from configuration.\r\n II.\r\n Index precedence: index name in parameter always precedes index name\r\n retrieved from configuration. Index name from configuration always\r\n precedes the default index name.\r\n III.\r\n Content type precedence: content type from configuration always\r\n precedes the default content type.\r\n \"\"\"\r\n\r\n # pylint: disable=too-many-arguments\r\n # We consider a better practice having long list of named\r\n # arguments then having **kwargs only.\r\n # pylint: disable=too-many-locals\r\n # The amount of local variables is needed because of the\r\n # readability of the code.\r\n\r\n _object = self.__validate_object(dicom_object)\r\n if _object is None:\r\n raise TypeError('CortxDicom.store(): object must be a DICOM ' +\r\n 'instance or string, that is path to a local ' +\r\n 'DICOM file.')\r\n _bucket = self.__validate_bucket(bucket)\r\n if _bucket is None:\r\n raise AssertionError('CortxDicom.store(): bucket must set as '+\r\n 'parameter or as s3.bucket key in Config.')\r\n _index = self.__get_index(index)\r\n _content_tpye = self.__get_content_type()\r\n _keys = self.s3_objects__(_bucket)\r\n _need_overwrite = False\r\n if object_key != '':\r\n _object_key = object_key\r\n _need_overwrite = _object_key in _keys\r\n else:\r\n _object_key = ''.join([chr(randint(97, 122)) for i in range(64)])\r\n while _object_key in _keys:\r\n _object_key = ''.join([chr(randint(97, 122))\r\n for i in range(64)])\r\n if _need_overwrite and not allow_overwrite:\r\n result = None\r\n else:\r\n _status = self.s3_engine.put_object(Bucket=_bucket, Key=_object_key,\r\n Body=pickle.dumps(_object),\r\n ContentType=_content_tpye)\r\n result = s3_success(_status)\r\n if result is True:\r\n _searchable = {}\r\n _searchable['bucket'] = _bucket\r\n _searchable['object_key'] = _object_key\r\n _searchable['info'] = self.describe(_object)\r\n _status = self.es_engine.index(index=self.ES_DEFAULT_INDEX,\r\n body=_searchable)\r\n if es_success(_status):\r\n if '_id' in _status.keys():\r\n result = _status['_id']\r\n else:\r\n result = None\r\n else:\r\n result = None\r\n return result\r\n\r\n\r\n def s3_buckets__(self) -> list:\r\n \"\"\"\r\n Get buckets\r\n ===========\r\n\r\n Returns\r\n -------\r\n list[str]\r\n List of bucket names.\r\n \"\"\"\r\n\r\n result = []\r\n data = self.__s3_engine.list_buckets()\r\n if s3_success(data):\r\n if data.get('Buckets') is not None:\r\n result = [e['Name'] for e in data['Buckets']]\r\n return result\r\n\r\n\r\n def s3_get_instance__(self, object_key : str, bucket : str) -> any:\r\n \"\"\"\r\n Get object from S3 as a python instance\r\n =======================================\r\n\r\n Parameters\r\n ----------\r\n object_key : str\r\n Object key to get.\r\n bucket : str\r\n Bucket to get from.\r\n\r\n Returns\r\n -------\r\n any\r\n Python instance.\r\n\r\n Notes\r\n -----\r\n Caution! This function doesn't perform additional checks due to\r\n performance improvement. preferably use CortxDicom.get().\r\n \"\"\"\r\n\r\n _keys = self.s3_objects__(bucket)\r\n if object_key not in _keys:\r\n result = None\r\n else:\r\n _data = self.s3_engine.get_object(Bucket=bucket, Key=object_key)\r\n _bytes = _data.get('Body')\r\n if bytes is None:\r\n result = None\r\n else:\r\n result = pickle.loads(_bytes.read())\r\n return result\r\n\r\n\r\n def s3_objects__(self, bucket : str = '') -> list:\r\n \"\"\"\r\n Get objects in the given bucket\r\n ===============================\r\n\r\n Parameters\r\n ----------\r\n bucket : str, optional (empty string if omitted)\r\n Name of the bucket to store. If empty string is given, bucket name\r\n is retrieved from the configuration.\r\n\r\n Returns\r\n -------\r\n list[str]\r\n List of object names.\r\n\r\n Raises\r\n ------\r\n AssertionError\r\n When neither bucket parameter nor s3.bucket key in Config is set.\r\n\r\n Notes\r\n -----\r\n Bucket precedence: bucket name in parameter always precedes bucket\r\n name retrived from configuration.\r\n \"\"\"\r\n\r\n _bucket = self.__validate_bucket(bucket)\r\n if _bucket is None:\r\n raise AssertionError('CortxDicom.s3_objects__(): bucket must ' +\r\n 'set as parameter or as s3.bucket key in' +\r\n 'Config.')\r\n result = []\r\n data = self.__s3_engine.list_objects(Bucket=_bucket)\r\n if s3_success(data):\r\n if data.get('Contents') is not None:\r\n result = [e['Key'] for e in data['Contents']]\r\n return result\r\n\r\n\r\n def __get_content_type(self) -> str:\r\n \"\"\"\r\n Get valid content type\r\n ======================\r\n\r\n Returns\r\n -------\r\n str\r\n Content type that conforms the precedence.\r\n \"\"\"\r\n\r\n # pylint: disable=no-self-use\r\n # However self is not used, tha nature of the method deeply\r\n # belongs to the object.\r\n\r\n if Config.is_set('s3.content_type'):\r\n result = Config.get('s3.content_type')\r\n else:\r\n result = CortxDicom.CONTENT_TYPE\r\n return result\r\n\r\n\r\n def __get_index(self, index : str) -> str:\r\n \"\"\"\r\n Get valid index\r\n ===============\r\n\r\n Parameters\r\n ----------\r\n index : str\r\n Name of the index to store. If empty string is given, index name is\r\n retrieved from the configuration or default index is used if there\r\n is no index name in the configuration.\r\n\r\n Returns\r\n -------\r\n str\r\n Index that conforms the precedence.\r\n\r\n Notes\r\n -----\r\n Index precedence: index name in parameter always precedes index name\r\n retrieved from configuration. Index name from configuration always\r\n precedes the default index name.\r\n \"\"\"\r\n\r\n # pylint: disable=no-self-use\r\n # However self is not used, tha nature of the method deeply\r\n # belongs to the object.\r\n\r\n if index != '':\r\n result = index\r\n elif Config.is_set('s3.index'):\r\n result = Config.get('s3.index')\r\n else:\r\n result = CortxDicom.ES_DEFAULT_INDEX\r\n return result\r\n\r\n\r\n def __get_s3_loaction(self, source : dict) -> tuple:\r\n \"\"\"\r\n Get S3 location from elasticsearch source dict\r\n ==============================================\r\n\r\n Parameters\r\n ----------\r\n source : dict\r\n Dictionary from elasticsearch.\r\n\r\n Returns\r\n -------\r\n tuple(str, str) | None\r\n Tuple of bucket, object_key pair if source contains valid data,\r\n None if not.\r\n \"\"\"\r\n\r\n # pylint: disable=no-self-use\r\n # However self is not used, tha nature of the method deeply\r\n # belongs to the object.\r\n\r\n _bucket = source.get('bucket')\r\n _object_key = source.get('object_key')\r\n if _bucket is None or _object_key is None:\r\n result = None\r\n else:\r\n result = (_bucket, _object_key)\r\n return result\r\n\r\n\r\n def __interpret_search_results(self, search_results : dict) -> list:\r\n \"\"\"\r\n Interpreted elasticsearch search results\r\n ========================================\r\n\r\n Parameters\r\n ----------\r\n dict\r\n The result of the search.\r\n\r\n Returns\r\n -------\r\n list[tuple(str, str)] | None\r\n List of tuples with bucket and object_key pairs if search_results\r\n contains that kind of data, None, if nothing is found.\r\n \"\"\"\r\n\r\n if 'hits' not in search_results.keys():\r\n result = None\r\n else:\r\n if 'hits' not in search_results['hits'].keys():\r\n result = None\r\n else:\r\n result = []\r\n for element in search_results['hits']['hits']:\r\n _source = element.get('_source')\r\n if _source is None:\r\n continue\r\n _location = self.__get_s3_loaction(_source)\r\n if _location is None:\r\n continue\r\n result.append((_location))\r\n if len(result) == 0:\r\n result = None\r\n return result\r\n\r\n\r\n @classmethod\r\n def __load_labels(cls, label_id : int):\r\n \"\"\"\r\n Load labels if needed\r\n =====================\r\n\r\n Parameters\r\n ----------\r\n label_id : int\r\n Label storage to load.\r\n - 0 to load all supported DICOM tags.\r\n - CortxDicom.FILTER_HIPAA to load HIPAA protected tags\r\n - CortxDicom.FILTER_GDPR to load GDPR protected tags\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When a not supported label_id is given.\r\n \"\"\"\r\n\r\n if label_id == 0:\r\n if 0 not in cls.__labels.keys():\r\n cls.__labels[0] = pickle.loads(read_binary(dicomfields,\r\n 'dicom_tags.dict'))\r\n elif label_id == CortxDicom.FILTER_HIPAA:\r\n if CortxDicom.FILTER_HIPAA not in cls.__labels.keys():\r\n cls.__labels[CortxDicom.FILTER_HIPAA] = pickle.loads(\r\n read_binary(dicomfields, 'phi_hipaa.list'))\r\n elif label_id == CortxDicom.FILTER_GDPR:\r\n if CortxDicom.FILTER_GDPR not in cls.__labels.keys():\r\n cls.__labels[CortxDicom.FILTER_GDPR] = pickle.loads(\r\n read_binary(dicomfields, 'phi_gdpr.list'))\r\n else:\r\n raise ValueError('CortxDicom.__load_labels(): unsupported label ' +\r\n 'source.')\r\n\r\n\r\n @classmethod\r\n def __remove_gdpr(cls, dataset : pydicom.dataset.FileDataset,\r\n element : pydicom.dataelem.DataElement):\r\n \"\"\"\r\n Remove tag according to regulations of GDPR\r\n ===========================================\r\n\r\n Parameters\r\n ----------\r\n dataset : pydicom.dataset.FileDataset\r\n Dataset to perform removal.\r\n element : pydicom.dataelem.DataElement\r\n Data element to examine.\r\n\r\n Notes\r\n -----\r\n This function meets the specifications of pydicom.dataset.walk()\r\n function.\r\n \"\"\"\r\n\r\n if str(element.tag) in cls.__labels[CortxDicom.FILTER_GDPR]:\r\n del dataset[element.tag]\r\n\r\n\r\n @classmethod\r\n def __remove_hipaa(cls, dataset : pydicom.dataset.FileDataset,\r\n element : pydicom.dataelem.DataElement):\r\n \"\"\"\r\n Remove tag according to regulations of HIPAA\r\n ============================================\r\n\r\n Parameters\r\n ----------\r\n dataset : pydicom.dataset.FileDataset\r\n Dataset to perform removal.\r\n element : pydicom.dataelem.DataElement\r\n Data element to examine.\r\n\r\n Notes\r\n -----\r\n This function meets the specifications of pydicom.dataset.walk()\r\n function.\r\n \"\"\"\r\n\r\n if str(element.tag) in cls.__labels[CortxDicom.FILTER_HIPAA]:\r\n del dataset[element.tag]\r\n\r\n\r\n def __validate_bucket(self, bucket) -> str:\r\n \"\"\"\r\n Validate bucket\r\n ===============\r\n\r\n Parameters\r\n ----------\r\n bucket : str\r\n Name of the bucket to store. If empty string is given, bucket name\r\n is retrieved from the configuration.\r\n\r\n Returns\r\n -------\r\n str | None\r\n The name of the bucket if validation is successful, None if not.\r\n\r\n Notes\r\n -----\r\n Bucket precedence: bucket name in parameter always precedes bucket\r\n name retrived from configuration.\r\n \"\"\"\r\n\r\n # pylint: disable=no-self-use\r\n # However self is not used, tha nature of the method deeply\r\n # belongs to the object.\r\n\r\n result = None\r\n if bucket != '':\r\n result = bucket\r\n else:\r\n if Config.is_set('s3.bucket'):\r\n result = Config.get('s3.bucket')\r\n return result\r\n\r\n\r\n def __validate_object(self,\r\n dicom_object : any) -> pydicom.dataset.FileDataset:\r\n \"\"\"\r\n Validate object\r\n ===============\r\n\r\n Parameters\r\n ----------\r\n dicom_object : str | FileDataset (pydicom.dataset)\r\n Path of a DICOM file, or the DICOM file's instance.\r\n\r\n Returns\r\n -------\r\n FileDataset (pydicom.dataset) | None\r\n The DICOM object if validation is successful, None if not.\r\n \"\"\"\r\n\r\n # pylint: disable=no-self-use\r\n # However self is not used, tha nature of the method deeply\r\n # belongs to the object.\r\n\r\n if isinstance(dicom_object, pydicom.dataset.FileDataset):\r\n result = dicom_object\r\n elif isinstance(dicom_object, str):\r\n result = pydicom.dcmread(dicom_object)\r\n else:\r\n result = None\r\n return result\r\n\r\n\r\ndef es_success(query_result : dict) -> bool:\r\n \"\"\"\r\n Check whether elasticsearch query was successful or not\r\n =======================================================\r\n\r\n Parameters\r\n ----------\r\n query_result : dcit\r\n The result of the query.\r\n\r\n Returns\r\n -------\r\n bool\r\n True if the query was successful, False if not.\r\n \"\"\"\r\n\r\n result = False\r\n if query_result.get('_shards') is not None:\r\n if query_result['_shards'].get('failed') is not None:\r\n result = query_result['_shards']['failed'] == 0\r\n return result\r\n\r\n\r\ndef s3_success(query_result : dict) -> bool:\r\n \"\"\"\r\n Check whether S3 query was successful or not\r\n ============================================\r\n\r\n Parameters\r\n ----------\r\n query_result : dcit\r\n The result of the query.\r\n\r\n Returns\r\n -------\r\n bool\r\n True if the query was successful, False if not.\r\n \"\"\"\r\n\r\n result = False\r\n if query_result.get('ResponseMetadata') is not None:\r\n if query_result['ResponseMetadata'].get('HTTPStatusCode') is not None:\r\n result = query_result['ResponseMetadata']['HTTPStatusCode'] == 200\r\n return result\r\n","repo_name":"Seagate/cortx","sub_path":"doc/integrations/cortx_dicom/cortx_dicom/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":39384,"program_lang":"python","lang":"en","doc_type":"code","stars":631,"dataset":"github-code","pt":"90"} +{"seq_id":"36104329707","text":"import json\nimport shutil\nfrom uuid import uuid4\n\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\nfrom requests_html import HTMLSession\n\n\ndef send_discord_alert(user, user_alert, chain, price):\n\n webhook = DiscordWebhook(\n (\n \"WEBHOOK LINK\"\n ),\n rate_limit_retry=True,\n )\n msg = (\n f\"Your **{user_alert} gwei** alert on **{chain}** has been triggered. :dart:\"\n f\"\\n\\n**Current gasPrice: {price} gwei**. :dollar:\"\n )\n\n embed = DiscordEmbed(\n title=f\":rotating_light: :envelope_with_arrow: \",\n description=msg,\n color=\"2D36FF\",\n )\n\n embed.set_timestamp()\n\n webhook.add_embed(embed)\n\n webhook.execute()\n\n webhook = DiscordWebhook(\n (\n \"WEBHOOK LINK\"\n ),\n rate_limit_retry=True,\n content=user,\n )\n webhook.execute()\n\n print(f\"Alert sent to {user}. {user_alert} gwei on {chain}.\") \n\n\nclass Logger:\n def __init__(self):\n self.chains = [\n \"AVAX\",\n \"ETH\",\n \"ARB\",\n \"BSC\",\n \"HECO\",\n \"MATIC\",\n \"MOVR\",\n \"CRO\",\n \"CELO\",\n \"XDAI\",\n ]\n self.session = HTMLSession()\n\n with open(\"files/bdd/db.json\", \"r+\", encoding=\"utf8\") as file:\n self.db = json.load(file)\n\n def save(self):\n with open(\"files/bdd/db.json\", \"r+\", encoding=\"utf8\") as file:\n file.seek(0)\n json.dump(self.db, file, indent=4)\n file.truncate()\n\n def saveImage(self, url):\n try:\n if url[:26] == \"https://cdn.discordapp.com\" and url[-3:] in [\n \"jpg\",\n \"png\",\n \"jpeg\",\n ]:\n content = self.session.get(url, stream=True)\n title = f\"files/photos/{str(uuid4())[:5]}.jpg\"\n with open(f\"{title}\", \"wb\") as image:\n shutil.copyfileobj(content.raw, image)\n\n return \"Picture added ! :fire:\"\n elif url[-3:] not in [\"jpg\", \"png\", \"jpeg\"]:\n return \"Wrong picture format. Please Try again.\"\n\n except Exception as err:\n print(\"Error saving picture: {}\".format(err))\n return \"Could not save picture. Try again. :x:\"\n\n def add_count(self, user):\n user = f\"<@{user}>\"\n if user not in self.db:\n self.db[user] = {\n \"count\": 0,\n \"links\": [],\n \"gasAlerts\": {\n \"AVAX\": 0,\n \"ETH\": 0,\n \"ARB\": 0,\n \"BSC\": 0,\n \"HECO\": 0,\n \"MATIC\": 0,\n \"MOVR\": 0,\n \"CRO\": 0,\n \"CELO\": 0,\n \"XDAI\": 0,\n },\n }\n print(f\"{user} added to the database.\")\n\n self.db[user][\"count\"] += 1\n self.save()\n return user\n\n def add_link(self, user, url):\n user = self.add_count(user)\n\n if url not in self.db[user][\"links\"]:\n self.db[user][\"links\"].append(url)\n self.save()\n print(f\"{user} added an URL to his list.\")\n return f\"Your link has been saved. Use **!myLinks** to check all your saved links.\"\n else:\n return \"URL already in your list !\"\n\n def user_links(self, user):\n user = self.add_count(user)\n\n print(f\"{user} asked for his URLs.\")\n return self.db[user][\"links\"]\n\n def del_link(self, user, index):\n user = self.add_count(user)\n try:\n if index - 1 > 0:\n target = self.db[user][\"links\"][index - 1]\n del self.db[user][\"links\"][index - 1]\n self.save()\n\n print(f\"{user} has deleted a link.\")\n return \"This link was removed from your list. :white_check_mark:\"\n elif index - 1 <= 0:\n return \"Wrong index ! :x:\"\n\n except IndexError:\n return \"Could not find this URL in your list. :x:\"\n\n def add_gas_alert(self, user, chain, alert):\n\n try:\n alert = int(alert)\n if alert < 0:\n return \"Please choose a positive number. :x:\"\n except Exception:\n return \"Wrong format. Please try again. :x:\\n\\nCommand example: **!gasAlert ETH 50**\"\n\n chain = chain.upper()\n if chain in self.chains:\n user = self.add_count(user)\n\n self.db[user][\"gasAlerts\"][chain] = alert\n self.save()\n\n print(f\"{user} added a {alert} gwei alert on {chain}.\")\n return f\"Alert successfully added. :white_check_mark:\\n\\n:arrow_forward: **{alert} gwei on {chain}.**\"\n else:\n return \"Wrong chain ! Use **!gasList** to check available chains.\"\n\n def check_gas_alerts(self):\n \"\"\"Comparing user alert with last gasPrice and\n send him message on Discord with webhook.\n user = user to check alerts from.\"\"\"\n\n headers = {\n \"User-Agent\": (\n \"MY_USER_AGENT\"\n )\n }\n current_gasPrices = {\n \"AVAX\": 0,\n \"ETH\": 0,\n \"ARB\": 0,\n \"BSC\": 0,\n \"HECO\": 0,\n \"MATIC\": 0,\n \"MOVR\": 0,\n \"CRO\": 0,\n \"CELO\": 0,\n \"XDAI\": 0,\n }\n try:\n for chain in current_gasPrices.keys():\n url = (\n \"GAS API URL\"\n )\n r = self.session.get(url, headers=headers).json()\n gasPrice = round(r[\"data\"][\"normal\"][\"price\"] * 0.000000001, 2)\n\n current_gasPrices[chain] = gasPrice\n \n except KeyError:\n return None\n \n for user in self.db:\n user_alerts = self.db[user][\"gasAlerts\"]\n\n # COMPARING USER ALERT WITH CURRENT PRICE\n if current_gasPrices:\n for chain in current_gasPrices.keys() & user_alerts.keys():\n current_price = current_gasPrices[chain]\n alert = user_alerts[chain]\n if current_price <= alert:\n send_discord_alert(user, alert, chain, current_price)\n self.db[user][\"gasAlerts\"][chain] = 0\n self.save()\n print(f\"{user} alert on {chain} reset.\")\n \n \n def my_alerts(self, user):\n user = self.add_count(user)\n\n user_alerts = self.db[user][\"gasAlerts\"]\n alerts_list = []\n for entry in user_alerts:\n if user_alerts[entry] > 0:\n alerts_list.append(\n f\":arrow_forward: **{entry}: {user_alerts[entry]} gwei**\\n\\n\"\n )\n if alerts_list:\n print(f\"{user} asked for his gas alerts.\")\n return f\"Current alerts:\\n\\n{''.join(alerts_list)}\"\n elif not alerts_list:\n return \"No alert saved yet. Use **!gasAlert** command to save your first.\"\n\n def reset_gas_alerts(self, user):\n user = self.add_count(user)\n\n alerts = self.db[user][\"gasAlerts\"]\n for chain in alerts:\n alerts[chain] = 0\n self.save()\n print(f\"{user} has reset his gas alerts.\")\n return \"All your gas alerts were reset. :white_check_mark:\"\n\n def add_response(self, user, answer):\n user = self.add_count(user)\n\n with open(\"files/bdd/responses.json\", \"r+\", encoding=\"utf8\") as file:\n responses = json.load(file)\n if answer not in responses[\"list\"] and answer:\n responses[\"list\"].append(answer)\n file.seek(0)\n json.dump(responses, file, indent=4)\n file.truncate()\n print(f\"{user} added a response.\")\n return f'\"{answer}\" added to responses list. :pencil:'\n elif not answer:\n return \"Empty response. :x:\"\n\n elif answer in responses[\"list\"]:\n return f'\"{answer}\" is already in responses list. :x:'\n\n def del_response(self, answer):\n with open(\"files/bdd/responses.json\", \"r+\", encoding=\"utf8\") as file:\n responses = json.load(file)\n if answer not in responses[\"list\"]:\n return f'\"{answer}\" was not found in responses list. :x:'\n\n responses[\"list\"].remove(answer)\n file.seek(0)\n json.dump(responses, file, indent=4)\n file.truncate()\n return f'\"{answer}\" removed from responses list. :white_check_mark:'\n\n def delete_last_response(self):\n with open(\"files/bdd/responses.json\", \"r+\", encoding=\"utf8\") as file:\n responses = json.load(file)\n if responses[\"list\"]:\n last = responses[\"list\"][-1]\n responses[\"list\"].remove(last)\n file.seek(0)\n json.dump(responses, file, indent=4)\n file.truncate()\n return f'\"{last}\" removed from list. :white_check_mark:'\n elif not responses[\"list\"]:\n return \"List is empty ! :x:\"\n\n def allResponses(self):\n with open(\"files/bdd/responses.json\", \"r\", encoding=\"utf8\") as file:\n responses = json.load(file)\n return \"\\n\\n\".join(responses[\"list\"])\n","repo_name":"Rayanworkout/madison-discord-bot","sub_path":"files/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19125999280","text":"from django import forms\n\n\nclass FormLinkedList(forms.Form):\n \"\"\"Classe responsavel por cria o formulario html\"\"\"\n\n opcao = {\n (1, 'insert'),\n (2, 'append'),\n (3, 'removeFirst')\n }\n \n linked_list = forms.CharField(label='Adicionar elemento', required=False)\n escolhe_acao = forms.ChoiceField(\n label='Escolha a opcao',\n choices=opcao,\n required=False,\n )\n lista_atual = forms.CharField(\n label='Lista Atual',\n required=False,\n )","repo_name":"davibtonon/lista_encadeada_django","sub_path":"linked_list/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18208327379","text":"import itertools\nN = int(input())\nA = [int(_) for _ in input().split()]\nif A[0] != 0:\n if N == 0 and A[0] == 1:\n print(1)\n else:\n print(-1)\n exit()\nans = 1\nn = 1\nif A[0] > 1:\n print(-1)\n exit()\ncumr = list(itertools.accumulate(A[::-1]))[::-1]\nfor i in range(1, N + 1):\n minn = n - A[i - 1]\n n = min(2 * minn, cumr[i])\n if n>=minn and n>=A[i]:\n ans += n\n else:\n print(-1)\n exit()\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02665/s560827510.py","file_name":"s560827510.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18146597259","text":"def count(word, text):\n \"\"\"Count the word in text.\n Counting is done case-insensitively.\n\n >>> count('a', 'A b c a')\n 2\n >>> count('computer', 'Nurtures computer scientists' \\\n + ' and highly-skilled computer engineers' \\\n + ' who will create and exploit \"knowledge\"' \\\n + ' for the new era. Provides an outstanding' \\\n + ' computer environment.')\n 3\n \"\"\"\n return (text\n .lower()\n .split()\n .count(word.lower()))\n\n\ndef run():\n word = input()\n text = \"\"\n\n line = input()\n while line != \"END_OF_TEXT\":\n text += line + \"\\n\"\n line = input()\n\n print(count(word, text))\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02419/s064076039.py","file_name":"s064076039.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15026170204","text":"import configparser\r\n\r\nfrom ftplib import FTP\r\nfrom Now_Time import Time\r\n\r\n\r\nconfig = configparser.ConfigParser()\r\n\r\ndef FTP_Post(FileName_PatchNote):\r\n try:\r\n ftp = FTP('nixserver.dothome.co.kr')\r\n ftp.login(\"nixserver\", \"dlswb4fkd!\")\r\n \r\n ftp.cwd('html/DATA') # 업로드할 FTP 폴더로 이동\r\n myfile = open(FileName_PatchNote,'rb') # 로컬 파일 열기\r\n print(f\"{Time()}) FTP 로컬 파일 열기 완료\")\r\n ftp.storbinary('STOR ' +FileName_PatchNote, myfile ) # 파일을 FTP로 업로드\r\n print(f\"{Time()}) FTP 업로드 완료\")\r\n myfile.close() # 파일 닫기\r\n print(f\"{Time()}) FTP 파일 닫기 완료\")\r\n ftp.quit()\r\n print(f\"{Time()}) FTP 모듈 종료\")\r\n try:\r\n config['Data'] = {}\r\n config['Data']['Detect'] = '1'\r\n with open(\"Detect.ini\", 'w', encoding='UTF-8-SIG') as configfile:\r\n config.write(configfile)\r\n print(f\"{Time()}) Detect.ini 완료\")\r\n except:\r\n print(f\"{Time()}) Detect.ini 파일 쓰기 실패\")\r\n return None\r\n return\r\n except:\r\n return \r\n","repo_name":"lsc58461/nixbot","sub_path":"FTP_TitleName_Post.py","file_name":"FTP_TitleName_Post.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15899228611","text":"#!/usr/bin/python3\n\ndef divisible_by_2(my_list=[]):\n\n if (not my_list):\n return (None)\n\n else:\n new_list = []\n list_size = len(my_list)\n for index in range(list_size):\n new_list.append(my_list[index] % 2 == 0)\n return (new_list)\n","repo_name":"lancedesk/alx-higher_level_programming","sub_path":"0x03-python-data_structures/10-divisible_by_2.py","file_name":"10-divisible_by_2.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"33656451307","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n# iteratively\n# 执行用时 :44 ms, 在所有 python3 提交中击败了95.40%的用户\n# 内存消耗 :13.8 MB, 在所有 python3 提交中击败了5.66%的用户\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n cur1,cur2 = l1,l2\n new = ListNode(0)\n cur = new\n while cur1 != None and cur2 != None:\n # print(cur1.val)\n # print(cur2.val)\n if cur1.val <= cur2.val:\n cur.next = cur1\n cur1 = cur1.next\n else:\n cur.next = cur2\n cur2 = cur2.next\n cur = cur.next\n if cur1 != None:\n cur.next = cur1\n\n if cur2 != None:\n cur.next = cur2\n\n return new.next\n\n# recursively\n# 执行用时 :36 ms, 在所有 python3 提交中击败了99.82%的用户\n# 内存消耗 :13.9 MB, 在所有 python3 提交中击败了5.66%的用户\n def MergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if not l1 or not l2:\n return l2 or l1\n if l1.val <= l2.val:\n l1.next = self.recursionMergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.recursionMergeTwoLists(l1, l2.next)\n return l2","repo_name":"algorithm004-04/algorithm004-04","sub_path":"Week 01/id_049/LeetCode_21_049.py","file_name":"LeetCode_21_049.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"90"} +{"seq_id":"29058918430","text":"#Gets one thousand movies\nimport random\nimport getMovies\n##d = {'Terminator': [], 'Matrix': [], 'Kappa': []}\n##print(d[random.choice(list(d.keys()))])\n\n\n#random.randint(0,999)\nc = {} #CRITICS\nmovies = getMovies.main()\n\n\nfor x in range(100):\n c['random_user ' + str(x)] = []\n#print(c)\n\nlistOfCritics = list(c.keys())\nprint(listOfCritics)\n\nfor y in movies:\n for i in range(10):\n temp = random.choice(listOfCritics)\n randomrating = random.randint(0,1)\n rating = True\n if (randomrating == 0):\n rating = False\n c[temp] += (y, rating)\n movies[y] += (temp, rating)\n\nprint()\nprint('This is C')\nprint(c)\n\nprint()\nprint('This is D')\nprint(movies)\n\n \n \n","repo_name":"chamathsd/mycritic-desktop","sub_path":"movieDB.py","file_name":"movieDB.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"74785444777","text":"class Node():\n def __init__(self,data):\n self.data=data\n self.left=None\n self.right=None\nclass Solution():\n def invertTree(self,root):\n if root==None:\n return\n root.left,root.right=root.right,root.left\n self.invertTree(root.left)\n self.invertTree(root.right)\n return root\n\ntree=Node(1)\ntree.right=Node(2)\ntree.left=Node(3)\ntree.right.right=Node(5)\ntree.left.right=Node(4)\n\nvar=Solution()\ntree=var.invertTree(tree)\n\nprint(tree.data)\nprint(tree.right.data)\nprint(tree.left.data)\nprint(tree.right.right.data)\nprint(tree.left.right.data)\n\n","repo_name":"codejigglers/leetcodes","sub_path":"preparation/Trees/Invert_a_binary_tree.py","file_name":"Invert_a_binary_tree.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"29664319151","text":"def twoNumberSum(array, targetSum):\n result_array = []\n for i, value_at_i in enumerate(array):\n print(f'i-index-> {i}: {value_at_i}')\n remaining_arr = array[i+1:]\n for j, value_at_j in enumerate(remaining_arr):\n if(addToGetTargetSum(value_at_i, value_at_j, targetSum)):\n result_array.append(value_at_i)\n result_array.append(value_at_j)\n return result_array\n\ndef addToGetTargetSum(num1, num2, targetSum):\n return num1 + num2 == targetSum\n\ndef main():\n print(twoNumberSum([10, 5, 20, 7], 12))\n\nif __name__ == \"__main__\":\n main()","repo_name":"abirAbuAsim/algo_expert_practice","sub_path":"two_number_sum.py","file_name":"two_number_sum.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15733677942","text":"#-*- coding: UTF-8 -*-\n#MACRO\nUSE_API=False\nDEVELOP=True\nEVERYDAY = False\n\nimport logging\nfrom datetime import date\nfrom datetime import time\nfrom datetime import timedelta\nlogger = logging.getLogger(\"stock_log\")\n#formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)\nfile_handler = logging.FileHandler(\"stock_log.txt\",encoding='utf-8')\n#file_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\nlogger.setLevel(logging.DEBUG)\nclass oneday_input:\n today_date = None\n def __init__(self):\n self.total_post = 0\n self.online_peple=[]\n self.raise_post = 0\n self.down_post = 0\n self.all_postheader = []#list,每一项都是字符串,当天的帖子标题\n def get_online_peple(self,online_peple):\n self.online_peple=online_peple\n def get_all_postheader(self,all_postheader):\n self.all_postheader.append(all_postheader)\n def get_today_date(self,today_date):\n self.today_date = today_date\n def compute(self):\n#计算total_post\n self.total_post = len(self.all_postheader)\n#计算raise_post,down_post看多看空的帖子数\n raise_words = ['涨','多','弹','底','抄','买','牛','加']\n down_words = ['跌','空','顶','卖','水','减','熊','逃','跑']\n self.raise_post=0\n for post_header in self.all_postheader:\n for one_word in raise_words:\n if(one_word in post_header):\n self.raise_post+=1\n for one_word in down_words:\n if(one_word in post_header):\n self.down_post+=1\n def __del__(self):\n pass\ndef date_to_str(date):\n year = str(date.year)\n month = str(date.month)\n day = str(date.day)\n if(len(month)==1):\n month=\"0\"+month\n if(len(day)==1):\n day = \"0\"+day\n return year+'-'+month+'-'+day\n\nclass oneday_output:\n def __init__(self):\n self.shangzheng=0#百分比\n self.hushen300=0#百分比\n self.chengjiao=0#绝对值\n self.zhongxiaoban=0#百分比\nclass oneday_result:\n def __init__(self,date):\n import tushare as ts\n self.sh = ts.get_hist_data('sh',start=date_to_str(date),end=(date_to_str(date)),ktype='D')#获取上证指数k线数据,其它参数与个股一致,下同\ndef get_text_from_url(session,url,host,encoding):\n '''\n '''\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, compress',\n 'Accept-Language': 'en-us;q=0.5,en;q=0.3',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': host,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36' }\n session.headers.update(headers)\n r = session.get(url)\n r.encoding = encoding\n return r.text\n\ndef str_to_date(date_str):\n '''\n 把字符串日期或int日期转变为python date\n '''\n #from datetime import date\n if(type(date_str)==str):\n if(date_str[0:4]==\"2015\" or date_str[0:4]=='2016'):\n return date(int(date_str[0:4]), month=int(date_str[5:7]), day=int(date_str[8:10]))\n else:\n return None\n else:\n return None\n\nif __name__ == \"__main__\" and DEVELOP:\n one_day_try = oneday_result(date(year=2016,month=2,day=4))\n print(one_day_try.sh.p_change)\n #url_top = \"http://www.newsmth.net/nForum/#!board/Stock\"\n#登录\n url_list = \"http://m.newsmth.net/board/Stock?p=\"\n host = 'm.newsmth.net'\n total_read_pages = 500\n days_input = [oneday_input()]\n import requests\n session = requests.Session()\n session.post('http://m.newsmth.net/user/login', data={'id':'yigo3000','passwd':'7227510', 'save':'on'})\n#只运行一次,用于开发该程序\n for i in range(31,32):#total_read_pages+1):\n temp_posts=[]\n temp_text = get_text_from_url(session,url_list+str(i),host,\"utf-8\")\n #logger.debug(temp_text)\n #得到标题和日期\n import re\n onepost_patten = re.compile(r'(?P/article/.*?(?=\">))\">(?P.*?(?=)).*?(?=
)
(?P.*?)(?= )')\n # .*? .匹配任意字符,?非贪婪。 正则表达式的关键在于:卡住开头和结尾(固定字符),中间任意(.*?)\n temp_posts += onepost_patten.findall(temp_text)\n if(len(temp_posts)<20):\n logger.debug('page=%s: cant find post!' %i)\n #得到这个页面的所有标题,分配到对应的days_input的item中\n for k in range(len(temp_posts)):#每一个标题计算一次\n temp_date = str_to_date(temp_posts[k][2])#日期取出来\n if(temp_date!=None):\n if(days_input[0].today_date==None):#还没确定今天的日期\n days_input[0].get_today_date(temp_date)\n days_input[0].get_all_postheader(temp_posts[0][1])\n else:\n j=0\n while(j!=len(days_input)):#看看该归到哪个日期下面\n if(temp_date==days_input[j].today_date):\n days_input[j].get_all_postheader(temp_posts[k][1])\n break\n else:\n j+=1\n if(j==len(days_input)):#如果没有这个日期\n days_input.append(oneday_input())#增加一个新对象,有新的日期\n days_input[-1].get_today_date(temp_date)#设置日期\n days_input[-1].get_all_postheader(temp_posts[k][1])#把帖子标题添加进来\n #time.sleep(2)#避免被封号\n pass\n import pickle\n with open('smth_stock.pkl','wb') as pk:\n pickle.dump(days_input,pk)\n\n\nif __name__ == \"__main__\" and EVERYDAY:\n import pickle\n with open('smth_stock.pkl','rb') as pk:\n days_input = pickle.load(pk)\n#得到昨天的日期\n yesterday_date = date.today()-timedelta(1)\n#登录\n url_list = \"http://m.newsmth.net/board/Stock?p=\"\n host = 'm.newsmth.net'\n total_read_pages = 500\n days_input = [oneday_input()]\n import requests\n session = requests.Session()\n session.post('http://m.newsmth.net/user/login', data={'id':'yigo3000','passwd':'7227510', 'save':'on'})\n not_the_day_wanted_count = 0#不属于我们想要的天\n page_num=1\n while(not_the_day_wanted_count>10):\n temp_posts=[]\n temp_text = get_text_from_url(session,url_list+str(page_num),host,\"utf-8\")\n #logger.debug(temp_text)\n #得到标题和日期\n import re\n onepost_patten = re.compile(r'(?P/article/.*?(?=\">))\">(?P.*?(?=)).*?(?=
)
(?P.*?)(?= )')\n # .*? .匹配任意字符,?非贪婪。 正则表达式的关键在于:卡住开头和结尾(固定字符),中间任意(.*?)\n temp_posts += onepost_patten.findall(temp_text)\n #得到这个页面的所有标题,分配到对应的days_input的item中\n\n for i in range(len(temp_posts)):#每一个标题计算一次\n temp_date = str_to_date(temp_posts[i][2])#日期取出来\n if(temp_date!=None):\n if(temp_date!=yesterday_date):#不想要\n not_the_day_wanted_count+=1\n pass\n else:\n j=0\n if(j==0):#如果没有这个日期\n days_input.append(oneday_input())#增加一个新对象,有新的日期\n days_input[-1].get_today_date(temp_date)#设置日期\n days_input[-1].get_all_postheader(temp_posts[i][1])#把帖子标题添加进来\n not_the_day_wanted_count=0\n j+=1\n else:\n days_input[-1].get_all_postheader(temp_posts[i][1])#把帖子标题添加进来\n\n page_num+=1\n time.sleep(3)#避免被封号\n#每天运行:\n '''\n import datetime\n while(1):\n now = datetime.datetime.now()\n if(now.minute ==25):#整点时读取主页的“在线人数”等\n temp_text = get_text_from_url(url_list,\"http://www.newsmth.net\",\"utf-8\")\n datetime.time.sleep(3590)\n else:\n datetime.time.sleep(50)\n'''\n\n\nif __name__ == \"__main__\" and USE_API:\n import byrapi\n token = 'xxxxx'\n API = byrapi.Byr(token)\n print(API.get_user_info(\"yigo3000\"))\n pass","repo_name":"yigo3000/stock","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"43632695516","text":"import xgboost as xgb\nimport re\nfrom sklearn.externals import joblib\noe = joblib.load(\"xgb_encoder.pkl\")\nfeature_list = oe.get_feature_names().tolist()\nfor ind in range(len(feature_list)):\n\tfeature_list[ind] = re.sub(\"[\\,\\<\\>\\[\\]]\",\" \",feature_list[ind])\nprint(len(feature_list))\ndtrain = xgb.DMatrix('xgb_matrix_train.txt')#,feature_names=[\"placeholder\"]+feature_list)\ndvalid = xgb.DMatrix(\"xgb_matrix_valid.txt\")#,feature_names=[\"placeholder\"]+feature_list)\nparam = {\"gamma\":0,'max_depth': 10, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic',\"lambda\":0.01}\nparam['nthread'] = 16\nparam['eval_metric'] = 'auc'\nevallist = [(dtrain, 'train'),(dvalid,\"valid\")]\nnum_round = 100\nbst = xgb.train(param, dtrain, num_round, evallist)\nbst.save_model('simple.model')\n","repo_name":"roddink/CAD2","sub_path":"xgb_model.py","file_name":"xgb_model.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28897392000","text":"import cv2\nfrom os.path import join\nfrom time import sleep\n\nname = input(\"Name: \")\n\ncap = cv2.VideoCapture(0)\npath = \"images/train_data\"\niters = 100\n\nprint('Start capturing now...')\n\nfor i in range(1, iters+1):\n _, frame = cap.read()\n\n filename = \"{}_{}.jpg\".format(name, i)\n cv2.imwrite(join(path, filename), frame)\n\n print(\"Captured for image {}\".format(i))\n # sleep(0.08)\n\nprint(\"Completed\")\n","repo_name":"nghiavt2906/test","sub_path":"collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18721501366","text":"def day_03():\n with open(\"/Users/arun.yusuf/PycharmProjects/advent_of_code_2022/day_03_input.txt\") as d:\n d_list = d.read().split(\"\\n\")\n\n answer_1 = 0\n answer_2 = 0\n counter = 0\n\n for ruck in d_list:\n skip = \"\"\n counter += 1\n for char in ruck[len(ruck)//2:]:\n if char in ruck[:len(ruck)//2]:\n if char not in skip:\n if ord(char) > 96:\n answer_1 += ord(char) - 96\n skip += char\n else:\n answer_1 += ord(char) - 38\n skip += char\n if counter == 1:\n elf_one = ruck\n if counter == 2:\n elf_two = ruck\n if counter == 3:\n for char in ruck:\n if char in elf_one and char in elf_two:\n if ord(char) > 96:\n answer_2 += ord(char) - 96\n else:\n answer_2 += ord(char) - 38\n break\n counter = 0\n\n return answer_1, answer_2\n","repo_name":"arunyusuf/advent_of_code_2022","sub_path":"day_03.py","file_name":"day_03.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18015836359","text":"N = int(input())\nA = list(map(int, input().split()))\n\ncumulative = [0]\nA.sort()\nfor i in range(N):\n cumulative.append(cumulative[i] + A[i])\n# print(A)\n# print(cumulative)\n\nfor i in range(N - 1, 0, -1):\n # print(i)\n if A[i] > cumulative[i] * 2:\n print(N - i)\n break\nelse:\n print(N)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03786/s149696425.py","file_name":"s149696425.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70236372137","text":"import random\nimport os\nimport logging\nimport sys\n\n# Adjust the path to include helper classes and functions\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../helper_classes_and_functions'))\nfrom config import BLOOD_PRESSURE_SOURCE_NAME, DIASTOLIC_RANGE, PATIENT_INFO_NAME, SYSTOLIC_RANGE\nfrom source_data_sender import SourceDataSender\nfrom config_loader import ConfigLoader\n\n# Configure logging for the script\nlogging.basicConfig(level=logging.INFO)\n\n\ndef generate_random_blood_pressure(patient_id):\n \"\"\"\n Generates a random blood pressure value.\n \"\"\"\n \n systolic = random.randint(*SYSTOLIC_RANGE)\n diastolic = random.randint(*DIASTOLIC_RANGE)\n return {\n \"Patient_ID\": patient_id,\n \"systolic\": systolic,\n \"diastolic\": diastolic\n }\n\n\n\nif __name__ == \"__main__\":\n patient_info_path = os.path.join(os.path.dirname(__file__), '../../consume_patient_details/patient_info.json')\n patient_info_config_loader = ConfigLoader(patient_info_path)\n patient_details = patient_info_config_loader.load_config(PATIENT_INFO_NAME)\n patient_id = patient_details[\"Patient_ID\"]\n\n config_file_path = os.path.join(os.path.dirname(__file__), '../../config/config.json')\n \n \n try:\n # Load configurations and initialize sender\n config_loader = ConfigLoader(config_file_path)\n config = config_loader.load_config(BLOOD_PRESSURE_SOURCE_NAME)\n sender = SourceDataSender(config)\n \n # Send continuous data\n sender.send_continuous_data(BLOOD_PRESSURE_SOURCE_NAME, lambda: generate_random_blood_pressure(patient_id))\n \n except Exception as e:\n logging.error(f\"Error: {e}\")\n\n\n","repo_name":"Licht2050/OP-Daten-Generator","sub_path":"vitalparam/blood_pressure/blood_pressure.py","file_name":"blood_pressure.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17960683134","text":"boolean = {\n True: 'Sim',\n False: 'Nao',\n}\niboolean = {v: k for k, v in boolean.items()}\n\nconsultas = {\n 0: 'Ignorado',\n 1: 'Nenhuma',\n 2: 'de_1_a_3',\n 3: 'de_4_a_6',\n 4: '7_e_mais'\n}\niconsultas = {v: k for k, v in consultas.items()}\n\nescmae = {\n 0: 'Ignorado',\n 1: '0',\n 2: 'Nenhuma',\n 3: '1_a_3_anos',\n 4: '4_a_7_anos',\n 5: '8_a_11_anos',\n 6: '12_e_mais'\n}\niescmae = {v: k for k, v in escmae.items()}\n\nescmae2010 = {\n 0: 'Ignorado',\n 1: 'Sem_escolaridade',\n 2: 'Fundamental_II_(5a_a_8a_serie)',\n 3: 'Fundamental_I_(1a_a_4a_serie)',\n 4: 'Medio_(antigo_2o_Grau)',\n 5: 'Superior_incompleto',\n 6: 'Superior_completo'\n}\niescmae2010 = {v: k for k, v in escmae2010.items()}\n\nescmaeagr1 = {\n 0: 'Ignorado',\n 1: 'Sem_Escolaridade',\n 2: 'Fundamental_II_Incompleto_ou_Inespecifico',\n 3: 'Fundamental_I_Incompleto',\n 4: 'Ensinomedio_Incompleto_ou_Inespecifico',\n 5: 'Fundamental_II_Completo',\n 6: 'Fundamental_I_Incompleto_ou_Inespecifico',\n 7: 'Ensino_Medio_Completo',\n 8: 'Fundamental_II_Incompleto',\n 9: 'Superior_Incompleto',\n 10: 'Fundamental_I_Completo',\n 11: 'Ensino_Medio_Incompleto',\n 12: 'Superior_Completo'\n}\niescmaeagr1 = {v: k for k, v in escmaeagr1.items()}\n\nestcivmae = {\n 0: 'Ignorado',\n 1: 'Casada',\n 2: 'Solteira',\n 3: 'Uniao_consensual_(versoes_anteriores)',\n 4: 'Separado_judicialmente/Divorciado',\n 5: 'Viuva'\n}\niestcivmae = {v: k for k, v in estcivmae.items()}\n\ngestacao = {\n 0: 'Ignorado',\n 1: 'Menos_de_22_semanas',\n 2: '22_a_27_semanas',\n 3: '28_a_31_semanas',\n 4: '32_a_36_semanas',\n 5: '37_a_41_semanas',\n 6: '42_semanas_e_mais'\n}\nigestacao = {v: k for k, v in gestacao.items()}\n\ngravidez = {\n 0: 'Ignorado',\n 1: 'Unica',\n 2: 'Dupla',\n 3: 'Tripla_e_mais'\n}\nigravidez = {v: k for k, v in gravidez.items()}\n\nidanomal = {\n 0: 'Ignorado',\n 1: 'Nao',\n 2: 'Sim'\n}\niidanomal = {v: k for k, v in idanomal.items()}\n\nlocnasc = {\n 0: 'Ignorado',\n 1: 'Outros',\n 2: 'Outro_Estab_Saude',\n 3: 'Hospital',\n 4: 'Domicilio'\n}\nilocnasc = {v: k for k, v in locnasc.items()}\n\nparto = {\n 0: 'Cesareo',\n 1: 'Vaginal'\n}\niparto = {v: k for k, v in parto.items()}\n\nsexo = {\n 0: 'Ignorado',\n 1: 'Feminino',\n 2: 'Masculino'\n}\nisexo = {v: k for k, v in sexo.items()}\n\nstcesparto = {\n 0: 'Ignorado',\n 1: 'Nao_se_aplica',\n 2: 'Nao',\n 3: 'Sim'\n}\nistcesparto = {v: k for k, v in stcesparto.items()}\n\nsttrabpart = {\n 0: 'Ignorado',\n 1: 'Nao',\n 2: 'Sim',\n}\nisttrabpart = {v: k for k, v in sttrabpart.items()}\n\ntpapresent = {\n 0: 'Ignorado',\n 1: 'Cefalico',\n 2: 'Pelvica_ou_podalica',\n 3: 'Transversa',\n}\nitpapresent = {v: k for k, v in tpapresent.items()}\n\ntpdocresp = {\n 0: 'Ignorado',\n 1: 'RG',\n 2: 'CPF',\n 3: 'COREN',\n 4: 'CNES',\n 5: 'CRM',\n}\nitpdocresp = {v: k for k, v in tpdocresp.items()}\n\ntpfuncresp = {\n 0: 'Medico',\n 1: 'Enfermeiro',\n 2: 'Funcionario_do_cartorio',\n 3: 'Parteira',\n 4: 'Outros',\n}\nitpfuncresp = {v: k for k, v in tpfuncresp.items()}\n\ntpmetestim = {\n 0: 'Ignorado',\n 1: 'Outro_metodo',\n 2: 'Exame_fisico',\n}\nitpmetestim = {v: k for k, v in tpmetestim.items()}\n\ntpnascassi = {\n 0: 'Ignorado',\n 1: 'Medico',\n 2: 'Parteira',\n 3: 'Enfermeira/obstetriz',\n 4: 'Outros',\n}\nitpnascassi = {v: k for k, v in tpnascassi.items()}\n","repo_name":"ascle/etl_bases_tcc","sub_path":"utils/dicionario_lkg.py","file_name":"dicionario_lkg.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"31754486726","text":"# coding=utf-8\n\n\ndef get_conditions(questions, n):\n questions.sort(key=lambda x: x[\"similarity\"], reverse=True)\n if questions[0][\"similarity\"] < 50:\n return {}\n num = n\n difficulty, point_id, black, teach_item_type = [], [], [], []\n subject = questions[0][\"subject\"]\n k = 0\n for question in questions:\n if \"knowledge_points\" in question and question[\"knowledge_points\"] and k <= 5:\n if question[\"similarity\"] >= 50:\n black.append(question['id'])\n d = question[\"knowledge_points\"][0][\"difficulty\"]\n p = question[\"knowledge_points\"][0][\"point_id\"]\n t = question[\"knowledge_points\"][0][\"teach_item_type\"]\n if d and d not in difficulty:\n difficulty.append(d)\n if p and p not in point_id:\n point_id.append(p)\n if t and t not in teach_item_type:\n teach_item_type.append(t)\n k += 1\n if subject != 0:\n continue\n elif question[\"subject\"] != 0:\n subject = question[\"subject\"]\n\n conditions = {\"difficulty\": difficulty, \"point_id\": point_id, \"teach_item_type\": teach_item_type, \\\n \"num\": num, \"black\": black, \"subject\": subject}\n\n return conditions\n\n\n'''\ndef get_conditions(questions, n):\n num = n\n difficulty, point_id, black, teach_item_type = [], [], [], []\n rank_high, rank_rec = rank_filter(questions, 'high'), rank_filter(questions, 'rec')\n if rank_high:\n subject = rank_high[-1][\"subject\"]\n elif not rank_high:\n subject = rank_rec[0][\"subject\"]\n for i in range(len(rank_high)):\n black.append(rank_high[i]['id'])\n for j in range(len(rank_rec)):\n d = rank_rec[j][\"knowledge_points\"][0][\"difficulty\"]\n p = rank_rec[j][\"knowledge_points\"][0][\"point_id\"]\n t = rank_rec[j][\"knowledge_points\"][0][\"teach_item_type\"]\n if d not in difficulty:\n difficulty.append(d)\n if p not in point_id:\n point_id.append(p)\n if t not in teach_item_type:\n teach_item_type.append(t)\n\n conditions = {\"difficulty\": difficulty, \"point_id\": point_id, \"teach_item_type\": teach_item_type,\\\n \"num\": num, \"black\": black, \"subject\": subject}\n\n return conditions\n\n\ndef rank_filter(questions, rank):\n questions_ranked = [] # similarity filter\n for i in range(len(questions)):\n if len(questions_ranked) == 5:\n break\n if rank == 'rec':\n if questions[i]['similarity'] < 50 and questions[i]['index_id'] == 0 and questions[i]['knowledge_points']:\n questions_ranked.append(questions[i])\n elif rank == 'high':\n if questions[i]['similarity'] >= 50 and questions[i]['index_id'] == 0:\n questions_ranked.append(questions[i])\n\n return questions_ranked\n'''","repo_name":"th0masli/question_recommendation","sub_path":"question/condition_rec.py","file_name":"condition_rec.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72880936618","text":"name = \"helloworld\"\n\nfor i in name:\n print(i)\nelse:\n print(name)\n\ni = 1\nwhile True:\n print(\"i am superMan\")\n i = i + 1\n if i == 5:\n break\n\nj = 0\nwhile j < 5:\n print(\"i am superWoman\")\n j += 1\nelse:\n print(\"退出循环\")\n","repo_name":"DreamerPXY/python-study-project","sub_path":"一、基础语法/03_流程流转.py","file_name":"03_流程流转.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15691525639","text":"### Brandon Python Slot Machine\r\nimport random\r\n#Variables\r\nwallet = 10\r\nspin_counter = 0\r\n\r\noutput_list = [\"@\", \"#\", \"7\"]\r\noutput_1 = output_list[random.randrange(len(output_list))]\r\noutput_2 = output_list[random.randrange(len(output_list))]\r\noutput_3 = output_list[random.randrange(len(output_list))]\r\n\r\nprint(\"Welcome To Brandon Python Slot Machine!\")\r\nprint(\"Here's 10 Coins On The House! \")\r\nprint(\"Cain Wallet = \", wallet)\r\n\r\ndef gamestart():\r\n global coin_input\r\n try:\r\n coin_input = int(input(\"Insert Coins (insert 0 to end game): \"))\r\n except ValueError:\r\n print(\"Give Me A Number!\")\r\n gamestart()\r\n\r\n if coin_input > wallet:\r\n print(coin_input, \"Is Not Enough!\")\r\n gamestart()\r\n elif coin_input == wallet:\r\n print(\"All Or Nothing Baby!\")\r\n slot_spin()\r\n elif coin_input == 0:\r\n print(\"You Came Out With: \", wallet, \"Coins!\")\r\n print(\"See You Next Time!\")\r\n quit()\r\n else:\r\n slot_spin()\r\n\r\ndef slot_spin():\r\n global coin_input\r\n global wallet\r\n global spin_counter\r\n wallet -= coin_input\r\n while coin_input > 0:\r\n global output_1\r\n output_1 = output_list[random.randrange(len(output_list))]\r\n global output_2\r\n output_2 = output_list[random.randrange(len(output_list))]\r\n global output_3\r\n output_3 = output_list[random.randrange(len(output_list))]\r\n\r\n coin_input = coin_input - 1\r\n spin_counter += 1\r\n\r\n print(f\" Spin:\", spin_counter)\r\n print(\" +=======+\")\r\n print(f\" [\", output_1, output_2, output_3, \"]\")\r\n print(\" +=======+\")\r\n\r\n if output_1 == output_2 and output_1 == output_3:\r\n wallet += 10\r\n print(\" JACKPOT!!!!\")\r\n else:\r\n print(\" Better Luck Next Time!\")\r\n print(\" \")\r\n print(\"Wallet = \", wallet)\r\n\r\n if wallet == 0:\r\n print(\" GAME OVER\")\r\n quit()\r\n\r\n gamestart()\r\n\r\ngamestart()\r\n\r\nprint(\"end\")","repo_name":"BrandonPham977/Brandon_Portfolio","sub_path":"Brandon's_Python_Slot_Mahine_Ver_1.py","file_name":"Brandon's_Python_Slot_Mahine_Ver_1.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70904587497","text":"import sys\nfrom collections import deque\n\nN = int(sys.stdin.readline())\ndeq = deque()\n\nfor _ in range(N):\n data = sys.stdin.readline().split()\n\n if data[0] == 'push_front':\n deq.appendleft(data[1])\n elif data[0] == 'push_back':\n deq.append(data[1])\n elif data[0] == 'pop_front':\n if len(deq) > 0:\n print(deq.popleft()) \n else:\n print(-1)\n elif data[0] == 'pop_back':\n if len(deq) > 0:\n print(deq.pop())\n else:\n print(-1)\n elif data[0] == 'size':\n print(len(data[0]))\n elif data[0] == 'empty':\n if len(data) == 0:\n print(1)\n else:\n print(0)\n elif data[0] == 'front':\n if len(data) > 0:\n print(deq[0])\n else:\n print(-1)\n elif data[0] == 'back':\n if len(data) > 0:\n print(deq[-1])\n else:\n print(-1)\n print(deq)\n","repo_name":"dohun31/algorithm","sub_path":"2021/week_01/큐/1966.py","file_name":"1966.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"20767345901","text":"# https://www.acmicpc.net/problem/2606\n\nimport sys\nimport collections\n\n\ninput = sys.stdin.readline\nn = int(input())\nm = int(input())\ngraph = collections.defaultdict(list)\n\nfor _ in range(m):\n N, M = map(int, input().split())\n graph[N].append(M)\n graph[M].append(N)\n\n\ndef bfs(current):\n route = [current]\n q = collections.deque([current])\n\n while q:\n pop = q.popleft()\n for node in graph[pop]:\n if node not in route:\n route.append(node)\n q.append(node)\n\n return route[1:]\n\n\nprint(len(bfs(1)))\n","repo_name":"feVeRin/Algorithm","sub_path":"problems/2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"20444696458","text":"from rest_framework import viewsets, status\nfrom .models import Conversation, Message, Notice\nfrom .serializers import ConversationSerializer, MessageSerializer\nfrom rest_framework.decorators import list_route\nfrom rest_framework.response import Response\nfrom redzza import utils\n\n\nclass ConversationViewSet(viewsets.ModelViewSet):\n queryset = Conversation.objects.all()\n serializer_class = ConversationSerializer\n http_method_names = ['get', 'head', 'delete']\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if request.user != instance.notice.profile.user:\n return Response({'success': False, 'err': 'user-unauthorized'}, status=status.HTTP_401_UNAUTHORIZED)\n self.perform_destroy(instance)\n return Response({'success': True})\n\n def list(self, request):\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def retrieve(self, request, pk=None):\n try:\n conversation = Conversation.getConversation(pk)\n if utils.getProfile(request.user) not in conversation.contestant.all():\n return Response({'success': False, 'err': 'user-unauthorized'}, status=status.HTTP_401_UNAUTHORIZED)\n conversations = [conversation]\n context = []\n for conversation in conversations:\n listContestants = utils.getProfileSimple(conversation.contestant.all())\n listReviews = utils.getProfileSimple(conversation.review.all())\n listNotices = utils.getDataNotice(conversation.notice.all(), fullData=False)\n listMessages = utils.getDataMessages(Message.search(conversation))\n context.append({'id': conversation.id, 'modified': conversation.modified, 'contestants': listContestants, 'notices': listNotices, 'reviews': listReviews, 'messages': listMessages})\n return Response({'success': True, 'data': context})\n except Exception as e:\n if hasattr(e, 'message'):\n err = e.message\n else:\n err = e\n return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n\nclass MessageViewSet(viewsets.ModelViewSet):\n queryset = Message.objects.all()\n serializer_class = MessageSerializer\n\n\nclass ApiServicesViewSet(viewsets.ViewSet):\n\n # Inicio de conversacion\n @list_route(methods=['post'])\n def startConversation(self, request):\n try:\n user = request.user\n profileSender = utils.getProfile(user)\n idNotice = request.data.get('notice', None)\n notice = None if idNotice is None else Notice.getNotice(idNotice)\n idUserReceiver = request.data.get('user', None)\n profileReceiver = utils.getProfile(utils.getUser(idUserReceiver)) if notice is None else notice.profile\n profiles = []\n profiles.append(profileReceiver)\n profiles.append(profileSender)\n text = request.data.get('text', None)\n image = request.data.get('image', None)\n if profiles[0] == profiles[1]:\n return Response({'success': False, 'err': 'Message to myself not allowed'}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n if (text or image) and len(profiles) > 0:\n conversation = Conversation.create(profiles, notice)[0][0]\n Message.create(text, image, profileSender, conversation)\n utils.sendEmail(profiles[0].user.email, 'notifications/new_message.html')\n return Response({'success': True, 'msg': 'conversation-created'}, status=status.HTTP_201_CREATED)\n else:\n return Response({'success': False, 'err': 'Incomplete data'}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n if hasattr(e, 'message'):\n err = e.message\n else:\n err = e\n return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Agrega mensaje a una conversacion\n @list_route(methods=['post'])\n def addMessage(self, request):\n # try:\n user = request.user\n profileSender = utils.getProfile(user)\n idConversation = request.data.get('conversation', None)\n conversation = Conversation.getConversation(idConversation)\n text = request.data.get('text', None)\n image = request.data.get('image', None)\n\n if (text or image) and conversation and profileSender:\n Message.create(text, image, profileSender, conversation)\n contestants = conversation.contestant.all()\n for profile in contestants:\n if profile != profileSender:\n utils.sendEmail(profile.user.email, 'notifications/new_message.html')\n return Response({'success': True, 'msg': 'message-created'}, status=status.HTTP_201_CREATED)\n else:\n return Response({'success': False, 'err': 'Incomplete data'}, status=status.HTTP_400_BAD_REQUEST)\n # except Exception as e:\n # if hasattr(e, 'message'):\n # err = e.message\n # else:\n # err = e\n # return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Obtiene inbox de un usuario\n @list_route(methods=['get'])\n def getInbox(self, request):\n try:\n user = request.user\n profile = utils.getProfile(user)\n context = []\n conversations = Conversation.search(profile)\n for conversation in conversations:\n listContestants = utils.getProfileSimple(conversation.contestant.all())\n listReviews = utils.getProfileSimple(conversation.review.all())\n listNotices = utils.getDataNotice(conversation.notice.all(), fullData=False)\n listMessages = utils.getDataMessages(Message.search(conversation))\n context.append({'id': conversation.id, 'modified': conversation.modified, 'contestants': listContestants, 'notices': listNotices, 'reviews': listReviews, 'messages': listMessages})\n return Response({'success': True, 'data': context})\n except Exception as e:\n if hasattr(e, 'message'):\n err = e.message\n else:\n err = e\n return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Obtiene numero de notificaciones de un usuario\n @list_route(methods=['get'])\n def getCountNotifications(self, request):\n try:\n user = request.user\n profile = utils.getProfile(user)\n count = Conversation.countNotifications(profile)\n return Response({'success': True, 'count': count})\n except Exception as e:\n if hasattr(e, 'message'):\n err = e.message\n else:\n err = e\n return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Leido de conversacion\n @list_route(methods=['post'])\n def reviewConversation(self, request):\n try:\n user = request.user\n idProfile = utils.getProfile(user).id\n idConversation = request.data.get('conversation', None)\n Conversation.addReview(idProfile, idConversation)\n return Response({'success': True, 'msg': 'conversation-review'})\n except Exception as e:\n if hasattr(e, 'message'):\n err = e.message\n else:\n err = e\n return Response({'success': False, 'err': str(err)}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n","repo_name":"larry852/redzza","sub_path":"inbox/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"7307142729","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom array import ArrayType\nimport sys\nimport threading\n\nsys.setrecursionlimit(10 ** 7) # max depth of recursion\nthreading.stack_size(2 ** 27) # new thread will get stack of such size\n\n\nclass TreeHeight:\n\n def read(self):\n self.n = int(sys.stdin.readline())\n self.parents = ArrayType(\"l\", (int(w) for w in\n sys.stdin.readline().split()))\n self.heights = ArrayType(\"l\", (0 for p in self.parents))\n\n def height(self, node):\n if not self.heights[node]:\n parent = self.parents[node]\n self.heights[node] = self.height(parent)\n\n if self.heights[node] == 1:\n return 1\n\n def compute_height(self):\n heights = ArrayType(\"h\", [0] * self.n)\n pending = [(node, parent) for node, parent in enumerate(self.parent)]\n while pending:\n postponed = []\n for node, parent in pending:\n if parent == -1:\n heights[node] = 1\n elif heights[parent]:\n heights[node] = heights[parent] + 1\n else:\n postponed.append((node, parent))\n pending = postponed\n return max(heights)\n\n\ndef main():\n tree = TreeHeight()\n tree.read()\n print(tree.compute_height())\n\n\nthreading.Thread(target=main).start()\n","repo_name":"pointtonull/algorithms","sub_path":"data_structures/P2 - Tree height/tree_height.py","file_name":"tree_height.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"5042837762","text":"def main():\n N = int(input())\n A = [list(map(int, input().split())) for _ in range(N)]\n INF = 10**18\n for i in range(N):\n A[i][i] = INF\n\n ans = 0\n for u in range(N):\n for v in range(u + 1, N):\n min_dist = INF\n for w in range(N):\n min_dist = min(min_dist, A[u][w] + A[w][v])\n if min_dist < A[u][v]:\n print(-1)\n exit()\n if A[u][v] < min_dist:\n ans += A[u][v]\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"valusun/Compe_Programming","sub_path":"AtCoder/ABC/ABC074/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"32936569492","text":"inFile = open('Homo_sapiens.GRCh37.75.lncrna.fa.fa.pep')\nouFile1 = open('Homo_sapiens.GRCh37.75.lncrna.fa.fa.pep-1', 'w')\nouFile2 = open('Homo_sapiens.GRCh37.75.lncrna.fa.fa.pep-2', 'w')\nD = {}\nwhile True:\n line1 = inFile.readline().strip()\n line2 = inFile.readline().strip()\n if line1:\n if line2.find('*') == -1:\n ouFile1.write(line1+ '\\n')\n ouFile1.write(line2+ '\\n')\n else:\n peps = line2.split('*')\n for i,x in enumerate(peps):\n if len(x) >= 95:\n D.setdefault(x, [])\n D[x].append(line1 + ':' + str(i))\n else:\n for k in D:\n ouFile2.write('\\t'.join(D[k])+'\\n')\n ouFile2.write(k + '\\n')\n break\ninFile.close()\nouFile1.close()\nouFile2.close()\n","repo_name":"wanghuanwei-gd/SIBS","sub_path":"lncRNA-MSMS/6-stop.py","file_name":"6-stop.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17358709554","text":"from common import *\r\ndata = json.load(open(\"orbiter.json\")) #copy from https://www.orbiter.finance/static/js/app.ce6f5863.js and using demjson.decode and json.dumps\r\n\r\ndef chainname(c):\r\n return {\"mainnet\":\"Ethereum\", \"arbitrum\":\"Arbitrum\", \"optimism\":\"Optimism\", \"polygon\":\"Polygon\", \"zksync\":\"zkSync\"}.get(c,c)\r\n\r\nres = [HEADER]\r\nfor i in data:\r\n print(chainname(i[\"c1Name\"]), chainname(i[\"c2Name\"]))\r\n #[\"bridge\",\"srcchain\",\"srctoken\",\"dstchain\",\"dsttoken\",\"srctoken_contract\",\"dsttoken_contract\",\"srcholder\",\"dstholder\",\"isopen\",\"fee_fixed\",\"fee_percent\",\"fee_minfee\",\"fee_maxfee\",\"minamount\", \"liquidity\"]\r\n res.append([\"Orbiter\", chainname(i[\"c1Name\"]), i[\"tName\"], chainname(i[\"c2Name\"]), i[\"tName\"],\r\n i[\"t1Address\"], i[\"t2Address\"], i[\"makerAddress\"], i[\"makerAddress\"],\r\n True, i[\"c1TradingFee\"], i[\"c1GasFee\"]/10, 0, 0, i[\"c1MinPrice\"], i[\"c1MaxPrice\"],\r\n \"\"\r\n ])\r\n res.append([\"Orbiter\", chainname(i[\"c2Name\"]), i[\"tName\"], chainname(i[\"c1Name\"]), i[\"tName\"],\r\n i[\"t2Address\"], i[\"t1Address\"], i[\"makerAddress\"], i[\"makerAddress\"],\r\n True, i[\"c2TradingFee\"], i[\"c2GasFee\"]/10, 0, 0, i[\"c2MinPrice\"], i[\"c2MaxPrice\"],\r\n \"\"\r\n ])\r\nwritecsv(\"orbiter.txt\", res)","repo_name":"DeFiEye/BridgeEye","sub_path":"crosschain/orbiter.py","file_name":"orbiter.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"90"} +{"seq_id":"33835669710","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n' description of this module '\n\n__author__ = 'Marvin Huang'\n\nclass Student(object):\n count = 0\n\n def __init__(self, name):\n self.name = name\n Student.count += 1\n # __init__本质是每次添加新的实例时就会调用\n # 所以一切与新加实例相关的都可以写在它的下面\n\n\n# 测试:\nif Student.count != 0:\n print('测试失败!')\nelse:\n bart = Student('Bart')\n if Student.count != 1:\n print('测试失败!')\n else:\n lisa = Student('Bart')\n if Student.count != 2:\n print('测试失败!')\n else:\n print('Students:', Student.count)\n print('测试通过!')\n\n\n","repo_name":"MarvinHuang92/Marvin_Haves_Fun_2309","sub_path":"40_Python/201712_Practice_Python/类属性和实例属性的测试.py","file_name":"类属性和实例属性的测试.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36575711780","text":"\"\"\"Feature Engineering functions for various activities like outlier treatment, encoding, missing value imputation.\r\n\r\nThe intention here is to have a Pipeline and not model. Our focus is to set it up in such a way that\r\nit can be saved/loaded, tweaked for different model choices and so on.\r\n\r\n\"\"\"\r\nimport os\r\nimport os.path as op\r\nimport panel as pn\r\nfrom category_encoders import OneHotEncoder\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.impute import SimpleImputer\r\nfrom sklearn.pipeline import Pipeline\r\n\r\nimport ta_lib.core.api as dataset\r\nimport ta_lib.eda.api as ta_analysis\r\nfrom scripts import CustomFeatureGeneration\r\nfrom ta_lib.core.api import (\r\n DEFAULT_ARTIFACTS_PATH,\r\n DEFAULT_DATA_BASE_PATH,\r\n get_dataframe,\r\n get_feature_names_from_column_transformer,\r\n get_package_path,\r\n load_dataframe,\r\n load_dataset,\r\n register_processor,\r\n save_pipeline,\r\n)\r\nfrom ta_lib.data_processing.api import Outlier, WoeBinningTransformer\r\n\r\npn.extension(\"bokeh\")\r\n\r\n\r\nos.environ[\"TA_DEBUG\"] = \"False\"\r\nos.environ[\"TA_ALLOW_EXCEPTIONS\"] = \"True\"\r\n\r\n\r\ndef _columns_to_drop(\r\n corr_table, features_to_be_dropped=[], corr_threshold=0.6\r\n):\r\n \"\"\"List features with low correlation.\"\"\"\r\n\r\n corr_table = corr_table.sort_values(\"Variable 1\")\r\n for index, row in corr_table.iterrows():\r\n if row[\"Abs Corr Coef\"] > corr_threshold:\r\n if row[\"Variable 1\"] not in features_to_be_dropped:\r\n features_to_be_dropped.append(row[\"Variable 2\"])\r\n return features_to_be_dropped\r\n\r\n\r\n@register_processor(\"feat-engg\", \"transform-features\")\r\ndef transform_features(context, params):\r\n \"\"\"Perform feature transformation and outlier treatment.\"\"\"\r\n artifacts_folder = DEFAULT_ARTIFACTS_PATH\r\n\r\n # load datasets\r\n train_X = dataset.load_dataset(context, \"train/attrition/features\")\r\n train_y = dataset.load_dataset(context, \"train/attrition/target\")\r\n test_X = dataset.load_dataset(context, \"test/attrition/features\")\r\n test_y = dataset.load_dataset(context, \"test/attrition/target\")\r\n\r\n ref_date_for_churn = params[\"ref_date\"]\r\n agg_methods = params[\"agg_methods\"]\r\n\r\n # NOTE: You can use ``Pipeline`` to compose a collection of transformers\r\n # into a single transformer. In this case, we are composing a\r\n # customerFeatureGeneration class for feature engineering\r\n\r\n feature_generation_ppln = Pipeline(\r\n [\r\n (\r\n \"custom_features_generation\",\r\n CustomFeatureGeneration(\r\n context=context,\r\n ref_date=ref_date_for_churn,\r\n agg_methods=agg_methods,\r\n ),\r\n ),\r\n ]\r\n )\r\n\r\n train_X = feature_generation_ppln.fit_transform(train_X, train_y)\r\n test_X = feature_generation_ppln.transform(test_X)\r\n train_X.set_index(\"customer_code\", inplace=True)\r\n test_X.set_index(\"customer_code\", inplace=True)\r\n train_y.set_index(\"customer_code\", inplace=True)\r\n test_y.set_index(\"customer_code\", inplace=True)\r\n\r\n bin_columns = [\r\n \"overall_avg_pri_sales_amount\",\r\n \"overall_avg_roi_without_udaan_with_sub\",\r\n ]\r\n binning_transformer = ColumnTransformer(\r\n [(\"binn\", WoeBinningTransformer(encode=\"onehot\"), bin_columns)],\r\n remainder=\"passthrough\",\r\n )\r\n\r\n train_X_binned = get_dataframe(\r\n binning_transformer.fit_transform(train_X, train_y[\"target\"]),\r\n get_feature_names_from_column_transformer(binning_transformer),\r\n )\r\n\r\n train_X_binned.index = train_X.index\r\n train_X = train_X_binned.infer_objects()\r\n\r\n outlier_transformer = Outlier(method=\"percentile\")\r\n train_X = outlier_transformer.fit_transform(train_X)\r\n train_y = train_y.loc[train_X.index]\r\n\r\n cat_columns = train_X.select_dtypes(\"object\").columns\r\n num_columns = train_X.select_dtypes(\"number\").columns\r\n\r\n feature_transformation_ppln = ColumnTransformer(\r\n [\r\n (\r\n \"onehot_encoding\",\r\n OneHotEncoder(use_cat_names=True),\r\n cat_columns,\r\n ),\r\n (\r\n \"simple_imputation_constant\",\r\n SimpleImputer(strategy=\"constant\", fill_value=0),\r\n list(set(num_columns) - set([\"days_in_business\"])),\r\n ),\r\n (\r\n \"simple_imputation_median\",\r\n SimpleImputer(strategy=\"median\"),\r\n [\"days_in_business\"],\r\n ),\r\n ]\r\n )\r\n\r\n train_X = get_dataframe(\r\n feature_transformation_ppln.fit_transform(train_X, train_y),\r\n get_feature_names_from_column_transformer(feature_transformation_ppln),\r\n )\r\n\r\n corr_df = ta_analysis.get_correlation_table(train_X[num_columns])\r\n corr_df_drop = corr_df[\r\n corr_df[\"Abs Corr Coef\"] > params[\"correlation_threshold\"]\r\n ]\r\n columns_to_be_dropped = [\"ref_date\"]\r\n columns_to_be_dropped = _columns_to_drop(\r\n corr_df,\r\n features_to_be_dropped=columns_to_be_dropped,\r\n corr_threshold=0.6,\r\n )\r\n curated_columns = list(set(train_X.columns) - set(columns_to_be_dropped))\r\n\r\n save_pipeline(\r\n curated_columns,\r\n op.abspath(op.join(artifacts_folder, \"curated_columns.joblib\")),\r\n )\r\n save_pipeline(\r\n feature_generation_ppln,\r\n op.abspath(\r\n op.join(artifacts_folder, \"feature_generation_ppln.joblib\")\r\n ),\r\n )\r\n save_pipeline(\r\n binning_transformer,\r\n op.abspath(op.join(artifacts_folder, \"binning_transformer.joblib\")),\r\n )\r\n save_pipeline(\r\n outlier_transformer,\r\n op.abspath(op.join(artifacts_folder, \"outlier_transformer.joblib\")),\r\n )\r\n save_pipeline(\r\n feature_transformation_ppln,\r\n op.abspath(op.join(artifacts_folder, \"features_transformer.joblib\")),\r\n )\r\n print(\"Finished Saving Pipelines\")\r\n","repo_name":"abhishekashokkumar/TA_Case_study_Team_4","sub_path":"production/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17426118422","text":"import pandas as pd\nfrom dateutil import parser\n\nNRs = [[100, 100], [100, 1000], [1000, 100], [1000, 1000], [1000, 10000], [10000, 1000]]\nsigmas = ['0.05', '0.10', '0.15']\n\ndef conv_times(frame):\n return [parser.parse(frame.loc[i]['time']).time().hour * 3600.0 +\n parser.parse(frame.loc[i]['time']).time().minute * 60.0 +\n parser.parse(frame.loc[i]['time']).time().second +\n parser.parse(frame.loc[i]['time']).time().microsecond / 1E6\n for i in range(len(frame))]\n\nif __name__ == '__main__':\n table = list()\n for N, R in NRs:\n df = pd.read_csv(f'data/data/rotation_alias_study/cifar10/{N}_{R}_p_10', delimiter='\\t')\n n = len(df)\n row = [f'$N={N},R={R}$']\n row.append(f'${df[\"maxl2sqr\"].mean():.3}$')\n row.append('$\\SI{' + f'{sum(conv_times(df))/n:.4}' + '}{s}$')\n for sigma in sigmas:\n df = pd.read_csv(f'data/data/rotation_alias_study_cifar10/noise_{sigma}_{N}_{R}_p_10', delimiter='\\t')\n robacc = (df[\"correct\"] & (df[\"radius\"] >= 0.0)).sum()/n\n row.append(f'${int(robacc*100)}\\%$')\n row.append('$\\SI{' + f'{sum(conv_times(df))/n:.3}' + '}{s}$')\n table.append(row)\n\n toprule = '\\\\toprule\\n'\n head = '\\multirow{2}{*}{Sampling Numbers} & Sampling & Computing & ' + ' & '.join(['\\multicolumn{2}{c}{$\\\\sigma=' + s + '$}' for s in sigmas]) + '\\\\\\\\\\n'\n head2 = '& Err. $M$ & Time & ' + ' & '.join(['Rob. Acc. & Certify Time' for _ in sigmas]) + '\\\\\\\\\\n'\n hline = '\\\\hline\\n'\n body = ''.join([(' & '.join(row)) + '\\\\\\\\\\n' for row in table])\n bottomline = '\\\\bottomrule\\n'\n\n table_str = toprule + head + head2 + hline + body + bottomline\n print(table_str)\n\n","repo_name":"AI-secure/semantic-randomized-smoothing","sub_path":"semantic/visualize/sampling_study_stats.py","file_name":"sampling_study_stats.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"90"} +{"seq_id":"29602371058","text":"### Created on: 10-2023\n# -*- coding: utf-8 -*-\n\"\"\"\nScript should control the pure photonics and the OSA20\nWe choose the range that we want to sweep our seed's (\"signal\") wavelength\nand record the spectrum of the \"idler\" recorded by the OSA over a fixed number of avareged scans\nWe save the data in diff files\n\"\"\"\n\nimport numpy as np\nimport os\nimport time\nimport socket\n\nfrom datetime import datetime\n\nimport OSA20\nfrom itla import ITLAConnect, ITLA\nimport motors_control\n\nBUFFER_SIZE = 1024\n\n###############################################################################\n# Setting the directory\nfilestamp=datetime.now().strftime('%Y%m%d%H%M%S')\nTYPE = \"\\JSI_\" + str(filestamp)\nDATADIR = r\"C:\\\\Users\\\\Experience\\Desktop\\\\Multipartite Entanglement Experiment\\\\Data\\\\JSI\"+TYPE\n\n# Creates a list of frequencies, f_list, we want to sweep depending on the range of wavelength [start,stop] [m] and increments of inc [m].\nc=299792458\nstart=1545*1e-9\nstop=1554*1e-9\ninc=20*1e-12\nno_points=(stop-start)/inc\nw_list=np.arange(start,stop,inc)\nf_list=c/w_list # In Hz\n\n#######################\n# OSA setting params\n## Number of scan is 11\n# :INIT:TMOD 0 -> scan mode manual and :INIT:SMOD 1 -> continuous scan mode (until we stop)\n# \":INIT:SMOD 0;:INIT:TMOD 0\\r\\n\"\nset_single = \":INIT:SMOD 1;:INIT:TMOD 0\\r\\n\"\n# :TRAC1:TYPE 2 -> continuous average\ntrace_params = \":TRAC1:STAT 1;:TRAC1:ACT;:TRAC1:TYPE 2\\r\\n\"\n# :SENS:SENS 6 -> max sensitivity\nsens_params = \":SENS:SENS 6;:SENS:WAV:STAR 1545nm;:SENS:WAV:STOP 1554nm;:SENS:BAND NAT;:SENS:TIME:INT:ENAB 0\\r\\n\"\n\ntry:\n # OSA starts init\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n osa = OSA20.OSA(s, set_single, trace_params, sens_params)\n osa.zero_auto()\n PARAMS = PEAKS = True\n\n ###############################################################################\n # Connect to Pure photonics\n sercon = ITLAConnect(\"COM5\", baudrate=9600)\n print(\"sercon: \", sercon)\n ITLA(sercon, 0x31, 1800, 1) # sets the output power (Reange is [600,1800] [dBm]/100)\n\n for iter, freq in enumerate(f_list):\n ITLA(sercon, 0x35, int(freq * 1e-12), 1) # Setting the frequency (THz register)\n ITLA(sercon, 0x36, int((freq * 1e-12 - int(freq * 1e-12)) * 10000), 1) # Setting the frequency (GHz resgister)\n ITLA(sercon, 0x32, 0x08, 1) # Laser on\n\n time.sleep(25)\n print(f\"iter, freq, w_list[iter]: {iter}, {freq}, {w_list[iter]}\")\n\n osa.aquire_trace()\n time.sleep(130)\n osa.stop_acquire()\n osa.save_data(DATADIR, \"_Mira_on\", f\"signal_{w_list[iter]}\", 0, PARAMS, PEAKS)\n\n ITLA(sercon, 0x32, 0x00, 1) # turns the laser off # Included in the loop because I'm not sure I can change the frequency while the laser is on\n\n sercon.close()\n s.close()\n\nexcept:\n print(\"Exception raised\")\n s.close()\n sercon.close()","repo_name":"Multi-photon-lab-LIP6/Measurement_Scripts","sub_path":"pp_jsi_scan.py","file_name":"pp_jsi_scan.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8532937915","text":"import random\n\nclass animal:\n def __init__(self, is_alive=True, energy=10):\n self.is_alive = is_alive\n self.energy = energy\n\n def status(self):\n return {'is_alive' : self.is_alive, 'energy' : self.energy}\n\ndef live(method):\n def wrapper(self, *args, **kwargs):\n if self.is_alive:\n return method(self, *args, **kwargs)\n return wrapper\nanimal(False)\n\nclass Lion(animal):\n def __init__(self, is_alive=True, energy=10):\n super().__init__(is_alive,energy)\n super().status()\n self.is_hunting = False\n\n @live\n def hunt(self):\n if self.energy - 3 >= 0:\n self.energy -= 3\n self.is_hunting= True\n else:\n self.is_hunting = False\n\n @live\n def eat(self, animal):\n catch = random.randint(0, 1)\n if all((animal.is_alive, self.is_hunting, catch)):\n self.energy += animal.energy\n animal.energy = 0\n animal.is_alive = False\n\nclass Zebra(animal):\n def __init__(self, is_alive=True, energy=10):\n super().__init__(is_alive,energy)\n super().status()\n @live\n def eat(self):\n self.energy += 1\n\nlion = Lion()\nzebra = Zebra()\n\nprint('lion status :', lion.status())\nprint('zebra status :', zebra.status())\n\nprint('_'*40)\nlion.hunt()\nlion.eat(zebra)\n\nprint('lion status :', lion.status())\nprint('zebra status :', zebra.status())\n","repo_name":"mststar0510/Daily-Python","sub_path":"kadai15.py","file_name":"kadai15.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73616194858","text":"# coding=utf-8\r\n\"\"\"\r\nDesenvolvedor:\tasantos\r\nE-mail:\t\t\tArannã Sousa Santos\r\nMês:\t\t\t04\r\nAno:\t\t\t2015\r\nEmpresa:\t\tTINS - SOLUCOES CORPORATIVAS\r\n\"\"\"\r\n__author__ = u'asantos'\r\n\r\nfrom ....base import (TagCaracter, TagInteiro, TXT)\r\nfrom .... import TIPO_REGISTRO, NIVEIS, gera_text\r\n\r\nfrom .agencia import Agencia\r\nfrom .conta_corrente import ContaCorrente\r\n\r\nclass DadosDebito(TXT):\r\n\tdef __init__(self, lote=u'0001', num_seq=u'00001', segmento=u'R'):\r\n\t\tsuper(DadosDebito, self).__init__()\r\n\r\n\t\tself._lote = lote\r\n\t\tself._num_seq = num_seq\r\n\t\tself._segmento = segmento\r\n\r\n\t\t# -------------------------------------------------------------------------------------\r\n\t\t# \tcom essa informacao, eu consigo localizar os dados desse REGISTRO\r\n\t\t# -------------------------------------------------------------------------------------\r\n\t\tself._tipo_registro = TIPO_REGISTRO.SEGMENTO\r\n\r\n\t\t# tipo_registro, codigo, nome, de, ate, valor=None, descricao=None, alertas=[], comentario=None, segmento=None, operacao=None\r\n\r\n\t\tself.banco\t\t= TagInteiro(self._tipo_registro,\tu'22.3R', u'banco',\t208, 210, descricao=u'G001', comentario=u'Código do banco na conta do débito',\tsegmento=self._segmento, lote=self._lote, num_seq=num_seq)\r\n\t\tself.agencia\t= Agencia(lote=self._lote, segmento=self._segmento, num_seq=self._num_seq)\r\n\t\tself.conta\t\t= ContaCorrente(lote=self._lote, segmento=self._segmento, num_seq=self._num_seq)\r\n\t\tself.dv\t\t\t= TagCaracter(self._tipo_registro,\tu'27.3R', u'dv',\t230, 230, descricao=u'*G012', comentario=u'Dígito verificador agencia/conta',\tsegmento=self._segmento, lote=self._lote, num_seq=num_seq)\r\n\r\n\tdef get_txt(self):\r\n\t\ttxt = u''\r\n\t\ttxt += self.banco.txt\r\n\t\ttxt += self.agencia.txt\r\n\t\ttxt += self.conta.txt\r\n\t\ttxt += self.dv.txt\r\n\t\treturn txt\r\n\r\n\tdef set_txt(self, arquivo):\r\n\t\tif self._le_txt(arquivo):\r\n\t\t\tself.banco.txt = arquivo\r\n\t\t\tself.agencia.txt = arquivo\r\n\t\t\tself.conta.txt = arquivo\r\n\t\t\tself.dv.txt = arquivo\r\n\r\n\ttxt = property(get_txt, set_txt)\r\n\r\n\tdef get_alertas(self):\r\n\t\talertas = self._alertas or []\r\n\t\talertas.extend(self.banco.alertas)\r\n\t\talertas.extend(self.agencia.alertas)\r\n\t\talertas.extend(self.conta.alertas)\r\n\t\talertas.extend(self.dv.alertas)\r\n\t\treturn alertas\r\n\r\n\talertas = property(get_alertas)\r\n\r\n\tdef get_text(self):\r\n\t\ttxt = gera_text(NIVEIS.N3, unicode(__name__) + u'\\n')\r\n\t\ttxt += gera_text(NIVEIS.N4, self.banco.text)\r\n\t\ttxt += self.agencia.text\r\n\t\ttxt += self.conta.text\r\n\t\ttxt += gera_text(NIVEIS.N4, self.dv.text)\r\n\t\treturn txt\r\n\r\n\ttext = property(get_text)","repo_name":"arannasousa/pyfebraban","sub_path":"febraban/segmentos/segmentoR/dados_debito/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"21712445036","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\nheader = {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36\"}\n#for i in range(9, 4, -1):\nfor i in range(9, 4, -1):\n url = \"https://search.daum.net/search?w=tot&q=201{}%EB%85%84%EC%98%81%ED%99%94%EC%88%9C%EC%9C%84&DA=MOR&rtmaxcoll=MOR\".format(i)\n #print(url)\n\n res = requests.get(url, headers=header)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n images = soup.find_all(\"img\", attrs={\"class\":\"thumb_img\"})\n name = soup.find(\"a\", attrs={\"class\":\"tit_main\"}).get_text()\n\n for idx, image in enumerate(images):\n img_url = image[\"src\"]\n if img_url.startswith(\"//\"):\n img_url = \"https:\" + img_url\n print(img_url)\n img_res = requests.get(img_url)\n img_res.raise_for_status()\n\n with open(\"201{}_movie_images0{}_{}.jpg\".format(i, idx+1, name), \"wb\") as f:\n f.write(img_res.content)\n if idx >= 4:\n break\n else:\n continue\n","repo_name":"redpeng96/dev","sub_path":"WebScraper/11_daum_movies.py","file_name":"11_daum_movies.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"42075941407","text":"\"\"\"\n Класс локального хранилища\n\"\"\"\n\n\nimport typing\n\n\nclass NOKEY:\n pass\n\n\nKey = typing.NewType(\"Key\", str)\nValue = typing.Any\nNO_KEY = NOKEY()\nNoKeyOrValue = typing.Union[NOKEY, Value]\n\n\nclass Storage():\n def __init__(self):\n self.data: typing.Dict[Key, Value] = {}\n\n async def get(\n self,\n key: Key,\n default: NoKeyOrValue = NO_KEY\n ) -> typing.Union[typing.NoReturn, Value]:\n if await self.contains(key):\n return self.data[key]\n if default is NO_KEY:\n raise KeyError(\"There is no such key\")\n return default\n\n async def put(self, key: Key, value: Value) -> None:\n self.data[key] = value\n return None\n\n async def delete(self, key: Key) -> typing.Optional[typing.NoReturn]:\n if not await self.contains(key):\n raise KeyError(\"Storage doesn't contain this key.\")\n del self.data[key]\n return None\n\n async def contains(self, key: Key) -> bool:\n return key in self.data\n","repo_name":"mrgick/pskgu_bot","sub_path":"pskgu_bot/db/local_storage.py","file_name":"local_storage.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"90"} +{"seq_id":"23786270870","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom models.networks.sync_batchnorm import SynchronizedBatchNorm2d\nimport torch.nn.utils.spectral_norm as spectral_norm\n\n\n# Returns a function that creates a normalization function\n# that does not condition on semantic map\ndef get_nonspade_norm_layer(opt, norm_type='instance'):\n # helper function to get # output channels of the previous layer\n def get_out_channel(layer):\n if hasattr(layer, 'out_channels'):\n return getattr(layer, 'out_channels')\n return layer.weight.size(0)\n\n # this function will be returned\n def add_norm_layer(layer):\n nonlocal norm_type\n if norm_type.startswith('spectral'):\n layer = spectral_norm(layer)\n subnorm_type = norm_type[len('spectral'):]\n\n if subnorm_type == 'none' or len(subnorm_type) == 0:\n return layer\n\n # remove bias in the previous layer, which is meaningless\n # since it has no effect after normalization\n if getattr(layer, 'bias', None) is not None:\n delattr(layer, 'bias')\n layer.register_parameter('bias', None)\n\n if subnorm_type == 'batch':\n norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)\n elif subnorm_type == 'sync_batch':\n norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)\n elif subnorm_type == 'instance':\n norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)\n else:\n raise ValueError('normalization layer %s is not recognized' % subnorm_type)\n\n return nn.Sequential(layer, norm_layer)\n\n return add_norm_layer\n\n\nclass SPADE(nn.Module):\n def __init__(self, cin, seg_dim):\n super().__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(seg_dim, 128, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n )\n self.alpha = nn.Conv2d(128, cin,\n kernel_size=3, stride=1, padding=1)\n self.beta = nn.Conv2d(128, cin,\n kernel_size=3, stride=1, padding=1)\n \n @staticmethod\n def PN(x):\n '''\n positional normalization: normalize each positional vector along the channel dimension\n '''\n assert len(x.shape) == 4, 'Only works for 4D(image) tensor'\n x = x - x.mean(dim=1, keepdim=True)\n x_norm = x.norm(dim=1, keepdim=True) + 1e-6\n x = x / x_norm\n return x\n \n def DPN(self, x, s):\n h, w = x.shape[2], x.shape[3]\n s = F.interpolate(s, (h, w), mode='bilinear', align_corners = False)\n s = self.conv(s)\n a = self.alpha(s)\n b = self.beta(s)\n return x * (1 + a) + b\n\n def forward(self, x, s):\n x_out = self.DPN(self.PN(x), s)\n return x_out\n","repo_name":"AnonymScholar/SpMT","sub_path":"models/networks/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"90"} +{"seq_id":"71193871336","text":"from settings import get_mongo_client\n\nclient = get_mongo_client()\ndb = client['coinmarketrend']\n\ndef find_word_cloud(coin_code):\n collection = db[WordCloud.WORD_CLOUD_COLLECTION]\n word_clouds = list(collection.find({'coin_code': coin_code}).sort([(\"_id\", -1)]).limit(1))\n return word_clouds\n\nclass WordCloud(object):\n\n WORD_CLOUD_COLLECTION = \"word_cloud\"\n\n def __init__(self, coin_code, words_date, words):\n self.coin_code = coin_code\n self.words_date = words_date\n self.words = words\n\nif __name__ == \"__main__\":\n coin_code = \"ocn\"\n find_word_cloud(coin_code)","repo_name":"gabrieltifui1993/cmt","sub_path":"cmtrest/repository/wordcloud.py","file_name":"wordcloud.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"29966276876","text":"from typing import List\n\n\nclass Solution:\n def solution(self, nums: List[int]) -> int:\n if nums[0] < nums[-1]:\n # Если последний элемент больше первого,\n # значит ротации не было.\n # А это значит что первый элемент является минимальным.\n return nums[0]\n\n gap_index = self.search_gap(nums)\n return nums[gap_index]\n\n def search_gap(self, nums: List[int]) -> int:\n left, rigth = 0, len(nums) - 1\n\n while left <= rigth:\n mid = (left + rigth) // 2\n\n if nums[mid] < nums[mid - 1] and mid - 1 >= 0:\n # Если текующий элемент оказался меньше предыдущего,\n # значит мы нашли место разрыва. Этот индекс и будет ответом.\n return mid\n\n if nums[mid] >= nums[0]:\n # Если текущая середина больше или равна первому,\n # значит разрыв находится правее.\n left = mid + 1\n else:\n # Если текущее чило меньше первого,\n # значит оно находится после разрыва. И надо искать левее.\n rigth = mid - 1\n\n return -1\n","repo_name":"DushnoAndTochka/solutions_algorithmic_problems","sub_path":"solutions/find_minimum_in_rotated_sorted_array/solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"ru","doc_type":"code","stars":16,"dataset":"github-code","pt":"90"} +{"seq_id":"5796836849","text":"\"\"\"\nMake a wrapper of tree-sitter node\n\"\"\"\n\nfrom __future__ import annotations\nimport os\nfrom functools import cmp_to_key\nfrom typing import Dict, Optional, Iterable, TYPE_CHECKING\nif TYPE_CHECKING:\n from .basic_node import BasicNode\nfrom tree_sitter import Language, Parser\n\n\nclass Util():\n \"\"\"\n\n Methods:\n sort_nodes(nodes: Iterable, reverse: bool = False): sort the nodes by\n their position in internal_src.\n \"\"\"\n\n def get_parser(self):\n abs_path = os.path.abspath(__file__)\n dire = os.path.dirname(abs_path)\n C_LANGUAGE = Language(f'{dire}/../cinspector-tree-sitter.so', 'c')\n parser = Parser()\n parser.set_language(C_LANGUAGE)\n return parser\n\n def get_tree(self, src: str):\n parser = self.get_parser()\n tree = parser.parse(bytes(src, 'utf8'))\n return tree\n\n def get_cursor(self, src: str):\n parser = self.get_parser()\n tree = parser.parse(bytes(src, 'utf8'))\n return tree.walk()\n\n @staticmethod\n def sort_nodes(nodes: Iterable, reverse: bool = False) -> Iterable:\n \"\"\" Sort the instances of BasicNode by their position in source code\n\n Args:\n nodes (Iterable): nodes waiting for sorting\n reverse (bool=False): use descending instead of ascending\n\n Return:\n sorted Iterable object\n \"\"\"\n\n def cmp_position(node1: BasicNode, node2: BasicNode) -> int:\n if node1.start_point[0] < node2.start_point[0] or \\\n (node1.start_point[0] == node2.start_point[0] and node1.start_point[1] < node2.start_point[1]):\n return -1\n else:\n return 1\n\n sorted_nodes = sorted(nodes,\n key=cmp_to_key(cmp_position),\n reverse=reverse)\n return sorted_nodes\n\n def get_raw(self, s: str, start: tuple, end: tuple):\n lst = s.split('\\n')\n s_row, s_col = start\n e_row, e_col = end\n\n if s_row > e_row or (s_row == e_row and s_col >= e_col):\n return None\n\n # potential bug: corresponding line does not have enough character\n if s_row == e_row:\n return lst[s_row][s_col:e_col]\n elif s_row + 1 == e_row:\n return lst[s_row][s_col:] + '\\n' + lst[e_row][:e_col]\n else:\n return lst[s_row][s_col:] \\\n + '\\n'.join(lst[s_row+1:e_row]) \\\n + lst[e_row][:e_col]\n\n def get_node_raw(self, s: str, node):\n if not node:\n return None\n return self.get_raw(s, node.start_point, node.end_point)\n\n\nclass Query():\n \"\"\" Access the specific nodes in the source code\n\n Query is used to access the nodes with specific properties in the\n source code. For example, find the enumeration with the type identifier\n \"weekdays\". To implement this, we let EnumSpecifierNode inherit from\n Query and implement the __type_identifier_result method, i.e., returns the\n field . The class in interface such as CCode will gather\n all the EnumSpecifierNode and check the query method to find\n the ideal node.\n\n Attributes:\n mapping: a dictionary that maps the query key to the method\n\n Methods:\n query: query the node with the given query\n \"\"\"\n\n def __init__(self):\n pass\n\n def query(self, query: Dict[str, str]) -> bool:\n \"\"\" Query the node with the given query\n\n Args:\n query: the query to be executed\n\n Returns:\n True if the node satisfies the query, otherwise False\n \"\"\"\n mapping = {\n 'type_identifier': self._type_identifier_result,\n 'identifier': self._identifier_result,\n }\n\n for key, value in query.items():\n if not mapping[key]() == value:\n return False\n return True\n\n def _identifier_result(self) -> Optional[str]:\n raise NotImplementedError\n\n def _type_identifier_result(self) -> Optional[str]:\n raise NotImplementedError\n\n\nclass Node():\n \"\"\" The root calss of all nodes\n\n In general, there are three types of nodes. Node is\n the root class of all nodes while both AbstractNode\n and BasicNode are the direct children of Node.\n\n AbstractNode represents the logical node in the source code.\n It does not correspond to a exactly same element in the source code.\n We design AbstractNode mainly for the needs of program analysis.\n\n BasicNode is the base class of a series of nodes that\n correspond to the actually existing elements in the source\n code.\n \"\"\"\n pass\n","repo_name":"PeiweiHu/cinspector","sub_path":"cinspector/nodes/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"90"} +{"seq_id":"72290715818","text":"from cmdstanpy import CmdStanModel\nfrom tarpan.cmdstanpy.compare_parameters import save_compare_parameters\nfrom tarpan.shared.compare_parameters import CompareParametersType\n\n\ndef run_model():\n model = CmdStanModel(stan_file=\"eight_schools.stan\")\n\n data = {\n \"J\": 8,\n \"y\": [28, 8, -3, 7, -1, 1, 18, 12],\n \"sigma\": [15, 10, 16, 11, 9, 11, 10, 18]\n }\n\n fit1 = model.sample(data=data, chains=4, cores=4, seed=1,\n sampling_iters=1000, warmup_iters=1000)\n\n # Increase the uncertainties\n data[\"sigma\"] = [i * 2 for i in data[\"sigma\"]]\n\n fit2 = model.sample(data=data, chains=4, cores=4, seed=1,\n sampling_iters=1000, warmup_iters=1000)\n\n extra = [{\"mu\": 2.2, \"tau\": 1.3}] # Add extra values (optional)\n\n save_compare_parameters([fit1, fit2],\n labels=[\n 'Original',\n 'Larger uncertainties',\n 'Extra'],\n extra_values=extra,\n type=CompareParametersType.TEXT, # or GITLAB_LATEX\n param_names=['mu', 'tau'])\n\n\nif __name__ == '__main__':\n run_model()\n print('We are done')\n","repo_name":"evgenyneu/tarpan","sub_path":"docs/examples/save_compare_parameters/a01_save_compare_parameters/compare_parameters.py","file_name":"compare_parameters.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"18147059939","text":"wor = input().upper()\ncou = 0\n\nimport re\nwhile True :\n tes = input().split()\n if tes[0] == 'END_OF_TEXT': break\n tes = [temp.upper() for temp in tes]\n cou += tes.count(wor)\n\nprint(cou)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02419/s642538791.py","file_name":"s642538791.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"75053675816","text":"\"\"\"empty message\n\nRevision ID: a600790df447\nRevises: 85bd7c34fbf3\nCreate Date: 2023-07-30 16:58:14.377621\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a600790df447'\ndown_revision = '85bd7c34fbf3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(op.f('uq_category_id'), 'category', ['id'])\n op.add_column('menu', sa.Column('created_at', sa.DateTime(), nullable=True))\n op.add_column('menu', sa.Column('updated_at', sa.DateTime(), nullable=True))\n op.create_index(op.f('ix_menu_created_at'), 'menu', ['created_at'], unique=False)\n op.create_index(op.f('ix_menu_updated_at'), 'menu', ['updated_at'], unique=False)\n op.create_unique_constraint(op.f('uq_menu_id'), 'menu', ['id'])\n op.create_unique_constraint(op.f('uq_order_id'), 'order', ['id'])\n op.create_unique_constraint(op.f('uq_payment_id'), 'payment', ['id'])\n op.create_unique_constraint(op.f('uq_product_id'), 'product', ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(op.f('uq_product_id'), 'product', type_='unique')\n op.drop_constraint(op.f('uq_payment_id'), 'payment', type_='unique')\n op.drop_constraint(op.f('uq_order_id'), 'order', type_='unique')\n op.drop_constraint(op.f('uq_menu_id'), 'menu', type_='unique')\n op.drop_index(op.f('ix_menu_updated_at'), table_name='menu')\n op.drop_index(op.f('ix_menu_created_at'), table_name='menu')\n op.drop_column('menu', 'updated_at')\n op.drop_column('menu', 'created_at')\n op.drop_constraint(op.f('uq_category_id'), 'category', type_='unique')\n # ### end Alembic commands ###\n","repo_name":"lucaspacifico/mashgin","sub_path":"api/migrations/versions/2023_07_30_1658-a600790df447_.py","file_name":"2023_07_30_1658-a600790df447_.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37767930543","text":"# Import the necessary packages\nimport consolemenu\nimport consolemenu.items as cm_items\n\n# Create the menu\nmenu = consolemenu.ConsoleMenu(\"Title\", \"Subtitle\")\n\n# Create some cm_items\n\n# MenuItem is the base class for all cm_items, it doesn't do anything when selected\nmenu_item = cm_items.MenuItem(\"Menu Item\")\nfunction_item = cm_items.FunctionItem(\"Call a Python function\", input, [\"Enter an input \"])\ncommand_item = cm_items.CommandItem(\"Run a console command\", \"touch hello.txt\")\n\n# A SelectionMenu constructs a menu from a list of strings\nselection_menu = consolemenu.SelectionMenu([\"item1\", \"item2\", \"item3\"], 'title', 'subtitle', \n False, prologue_text='asdf', epilogue_text='ariuh')\n\n# A SubmenuItem lets you add a menu (the selection_menu above, for example)\n# as a submenu of another menu\n\n\n\nsub_menu = consolemenu.ConsoleMenu(\"submenu\", \"Subtitle\")\n\n\nsub_add_item = cm_items.FunctionItem(\"add item\", lambda: sub_menu.append_item(menu_item))\nsub_rem_item = cm_items.FunctionItem(\"remove item\", lambda: sub_menu.remove_item(menu_item))\n\nsub_menu.append_item(sub_add_item)\nsub_menu.append_item(sub_rem_item)\n\n\nsubmenu_item = cm_items.SubmenuItem(\"Submenu item\", sub_menu, menu)\n\n# Once we're done creating them, we just add the cm_items to the menu\nmenu.append_item(menu_item)\nmenu.append_item(function_item)\nmenu.append_item(command_item)\nmenu.append_item(submenu_item)\n\n# Finally, we call show to show the menu and allow the user to interact\n\nmenu.show()","repo_name":"huy-hng/Sandbox","sub_path":"python/templates/console_menu.py","file_name":"console_menu.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"75198405382","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\n\n\nimport numpy as np\n\nimport pandas as pd\n\nimport os\n\nimport copy\n\nimport sys\n\nfrom PIL import Image\n\nimport time \n\nfrom tqdm.autonotebook import tqdm\n\nimport random\n\nimport gc\n\nimport cv2\n\nimport scipy\n\nimport math\n\nimport matplotlib.pyplot as plt\n\n\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.model_selection import KFold,StratifiedKFold\n\nfrom sklearn.metrics import fbeta_score\n\n\n\nimport torch\n\nfrom torch.utils.data import TensorDataset, DataLoader,Dataset\n\nimport torch.nn as nn\n\nimport torch.nn.functional as F\n\nimport torchvision\n\nimport torchvision.transforms as transforms\n\nimport torch.optim as optim\n\nfrom torch.optim import lr_scheduler\n\nfrom torch.optim.optimizer import Optimizer\n\nimport torch.backends.cudnn as cudnn\n\nfrom torch.autograd import Variable\n\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nfrom torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR, _LRScheduler\n\n\n\n# Any results you write to the current directory are saved as output.\nimport scipy.special\n\n\n\nSEED = 42\n\nbase_dir = '../input/'\n\ndef seed_everything(seed=SEED):\n\n random.seed(seed)\n\n os.environ['PYHTONHASHSEED'] = str(seed)\n\n np.random.seed(seed)\n\n torch.manual_seed(seed)\n\n torch.cuda.manual_seed(seed)\n\n torch.backends.cudnn.deterministic = True\n\nseed_everything(SEED)\ntrain_df = pd.read_csv('../input/train.csv')\n\nlabels_df = pd.read_csv('../input/labels.csv')\n\ntest_df = pd.read_csv('../input/sample_submission.csv')\n\n\n\ntr, val = train_test_split(train_df['id'], test_size=0.15, random_state=SEED)\n\n\n\nimg_class_dict = {k:v for k, v in zip(train_df.id, train_df.attribute_ids)}\n\n\n\ndef get_label(attribute_ids):\n\n attribute_ids = attribute_ids.split()\n\n one_hot = np.zeros(1103, dtype=np.int)\n\n for _,ids in enumerate(attribute_ids):\n\n one_hot[int(ids)] = 1\n\n return one_hot\nprint(train_df.columns)\n\nprint(labels_df.columns)\nclasses =train_df['attribute_ids'].value_counts().to_frame().reset_index()\n\nclasses.rename(columns={'index': 'classes', 'attribute_ids':'counts'}, inplace=True)\nprint(classes)\n#classes['classes'] = classes['classes'].apply(get_label)\nclasses['ratio'] = classes['counts']/train_df.shape[0]\nclasses.head(10)\ndef get_label_name(attribute_ids):\n\n attribute_ids = attribute_ids.split()\n\n attribute_name = []\n\n for _,ids in enumerate(attribute_ids):\n\n attribute_name.append(labels_df.loc[labels_df['attribute_id']==int(ids)])\n\n return attribute_name\n#train_df['attribute_name'] = train_df['attribute_ids'].apply(get_label_name)\n\n#too slow\ntrain_df['count'] = train_df.groupby(['attribute_ids'])['id'].transform('count')\ntrain_df = train_df.sort_values(by='attribute_ids')\n#train_df['attribute_ids'] = train_df['attribute_ids'].apply(get_label)\ntrain_df.head(30)\ngrouped_id = train_df.groupby('attribute_ids')['id']\ncollect_image_names = {}\n\n\n\nfor key in classes['classes']:\n\n name = grouped_id.get_group(key).values[0]\n\n count = grouped_id.get_group(key).values.shape[0]\n\n collect_image_names[name] = count\nimport operator\n\nsorted_collect_image_names = sorted(collect_image_names.items(), key=operator.itemgetter(1))\n\nsorted_collect_image_names.reverse()\n\nprint(len(sorted_collect_image_names))\nprint(sorted_collect_image_names[:10])\nimage_name = sorted_collect_image_names[0][0]\n\nattribute_ids = train_df.loc[train_df['id']==image_name]['attribute_ids'].values[0]\n\nprint(attribute_ids.split())\nc = 1\n\nplt.figure(figsize=[20, 20])\n\nfor idx in range(10):\n\n image_name = sorted_collect_image_names[idx][0]\n\n img = cv2.imread(\"../input/train/{}.png\".format(image_name))[...,[2,1,0]]\n\n plt.subplot(5,2,c)\n\n plt.imshow(img)\n\n \n\n attribute_ids = train_df.loc[train_df['id']==image_name]['attribute_ids'].values[0].split()\n\n attribute_name = []\n\n for _,ids in enumerate(attribute_ids):\n\n attribute_name.append(labels_df.loc[labels_df['attribute_id']==int(ids)]['attribute_name'].values[0])\n\n plt.title(\"train image {} count {}\".format(attribute_name, sorted_collect_image_names[idx][1]))\n\n c += 1\n\nplt.show()\nc = 1\n\nplt.figure(figsize=[20,20])\n\n\n\nsize = len(sorted_collect_image_names)\n\n\n\nfor idx in range(size-10, size):\n\n image_name = sorted_collect_image_names[idx][0]\n\n img = cv2.imread(\"../input/train/{}.png\".format(image_name))[...,[2,1,0]]\n\n plt.subplot(5,2,c)\n\n plt.imshow(img)\n\n \n\n attribute_ids = train_df.loc[train_df['id']==image_name]['attribute_ids'].values[0].split()\n\n attribute_name = []\n\n for _,ids in enumerate(attribute_ids):\n\n attribute_name.append(labels_df.loc[labels_df['attribute_id']==int(ids)]['attribute_name'].values[0])\n\n plt.title(\"train image {} count {}\".format(attribute_name, sorted_collect_image_names[idx][1]))\n\n c += 1\n\nplt.show()\nname = grouped_id.get_group(classes['classes'][0]).values[0]\n\ncount = grouped_id.get_group(classes['classes'][0]).values.shape[0]\nc = 1\n\nplt.figure(figsize=[20,20])\n\n\n\nmost_frequent_class_top_10 = {}\n\n\n\nfor i in range(10):\n\n name = grouped_id.get_group(classes['classes'][0]).values[i]\n\n count = grouped_id.get_group(classes['classes'][0]).values.shape[0]\n\n most_frequent_class_top_10[name] = count\n\n\n\nsize = len(most_frequent_class_top_10)\n\n\n\nfor element in most_frequent_class_top_10:\n\n image_name = element\n\n img = cv2.imread(\"../input/train/{}.png\".format(image_name))[...,[2,1,0]]\n\n plt.subplot(5,2,c)\n\n plt.imshow(img)\n\n \n\n attribute_ids = train_df.loc[train_df['id']==image_name]['attribute_ids'].values[0].split()\n\n attribute_name = []\n\n for _,ids in enumerate(attribute_ids):\n\n attribute_name.append(labels_df.loc[labels_df['attribute_id']==int(ids)]['attribute_name'].values[0])\n\n plt.title(\"train image {} count {}\".format(attribute_name, most_frequent_class_top_10[element]))\n\n c += 1\n\nplt.show()\ncategory_count = {}\n\n\n\nfor i in range(1103):\n\n category_count[i] = 0\nfor key in classes['classes']:\n\n category_name = key.split()\n\n count = grouped_id.get_group(key).values.shape[0]\n\n for element in category_name:\n\n category_count[int(element)] += count\nsorted_category_count = sorted(category_count.items(), key=operator.itemgetter(1))\n\nsorted_category_count.reverse()\nsorted_category_count_frame = pd.DataFrame.from_dict(sorted_category_count)\n\nsorted_category_count_frame.columns=['attribute_id', 'count']\n\nsorted_category_count_frame['ratio'] = sorted_category_count_frame['count']/train_df.shape[0]\nsorted_category_count_frame.head(30)\ncategory_name_count = {}\n\n\n\nfor element in sorted_category_count:\n\n key = element[0]\n\n name = labels_df[labels_df['attribute_id']==key]['attribute_name'].values[0]\n\n category_name_count[name] = element[1]\nsorted_category_name_count = sorted(category_name_count.items(), key=operator.itemgetter(1))\n\nsorted_category_name_count.reverse()\nsorted_sorted_category_name_count_frame = pd.DataFrame.from_dict(sorted_category_name_count)\n\nsorted_sorted_category_name_count_frame.columns=['attribute_name', 'count']\n\nsorted_sorted_category_name_count_frame['ratio'] = sorted_sorted_category_name_count_frame['count']/train_df.shape[0]\nsorted_sorted_category_name_count_frame.head(30)\nsorted_category_count_frame.to_csv('sorted_category_count_frame.csv', index=False)","repo_name":"aorursy/new-nb-4","sub_path":"jionie_eda-classes-most-frequent-and-least-frequent.py","file_name":"jionie_eda-classes-most-frequent-and-least-frequent.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"18934772180","text":"import multiprocessing\n\n#server process\n\ndef print_records(records):\n \"\"\"\n function to print record(tuples) in records(list)\n \"\"\"\n for record in records:\n print(\"Name: {0}\\nScore: {1}\\n\".format(record[0], record[1]))\n\n\ndef insert_record(record, records):\n \"\"\"\n function to add a new record to records(list)\n \"\"\"\n records.append(record)\n print(\"New record added!\\n\")\n\nif __name__ == '__main__':\n with multiprocessing.Manager() as manager:\n # create new list in server process memory\n records = manager.list([('sam',10),('cuong','14')])\n new_record = ('test',2)\n\n #create process\n p1 = multiprocessing.Process(target=insert_record,args=(new_record,records))\n p2 = multiprocessing.Process(target=print_records,args=(records,))\n\n p1.start()\n p2.start()\n\n p1.join()\n p2.join()\n","repo_name":"cuongpianna/python","sub_path":"python/app4.py","file_name":"app4.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"43554349054","text":"\"\"\"This component loads a seed dataset from the hub.\"\"\"\nimport logging\nimport typing as t\n\nimport dask\nimport dask.dataframe as dd\nimport pandas as pd\nfrom fondant.component import DaskLoadComponent\nfrom fondant.core.schema import Field\n\nlogger = logging.getLogger(__name__)\n\ndask.config.set({\"dataframe.convert-string\": False})\n\n\nclass LoadFromHubComponent(DaskLoadComponent):\n def __init__(\n self,\n *,\n produces: t.Dict[str, Field],\n dataset_name: str,\n column_name_mapping: t.Optional[dict],\n image_column_names: t.Optional[list],\n n_rows_to_load: t.Optional[int],\n index_column: t.Optional[str],\n **kwargs,\n ) -> None:\n \"\"\"\n Args:\n produces: The schema the component should produce\n dataset_name: name of the dataset to load.\n column_name_mapping: Mapping of the consumed hub dataset to fondant column names\n image_column_names: A list containing the original hub image column names. Used to\n format the image from HF hub format to a byte string\n n_rows_to_load: optional argument that defines the number of rows to load. Useful for\n testing pipeline runs on a small scale.\n index_column: Column to set index to in the load component, if not specified a default\n globally unique index will be set.\n kwargs: Unhandled keyword arguments passed in by Fondant.\n \"\"\"\n self.dataset_name = dataset_name\n self.column_name_mapping = column_name_mapping\n self.image_column_names = image_column_names\n self.n_rows_to_load = n_rows_to_load\n self.index_column = index_column\n self.produces = produces\n\n def get_columns_to_keep(self) -> t.List[str]:\n # Only read required columns\n columns = []\n\n if self.column_name_mapping:\n invert_column_name_mapping = {\n v: k for k, v in self.column_name_mapping.items()\n }\n else:\n invert_column_name_mapping = {}\n\n for field_name, field in self.produces.items():\n column_name = field_name\n if invert_column_name_mapping and column_name in invert_column_name_mapping:\n columns.append(invert_column_name_mapping[column_name])\n else:\n columns.append(column_name)\n\n if self.index_column is not None:\n columns.append(self.index_column)\n\n return columns\n\n def convert_images_to_bytes(self, dask_df) -> dd.DataFrame:\n if self.image_column_names:\n for image_column_name in self.image_column_names:\n dask_df[image_column_name] = dask_df[image_column_name].map(\n lambda x: x[\"bytes\"],\n meta=(\"bytes\", bytes),\n )\n\n return dask_df\n\n def set_df_index(self, dask_df: dd.DataFrame) -> dd.DataFrame:\n if self.index_column is None:\n logger.info(\n \"Index column not specified, setting a globally unique index\",\n )\n\n def _set_unique_index(dataframe: pd.DataFrame, partition_info=None):\n \"\"\"Function that sets a unique index based on the partition and row number.\"\"\"\n dataframe[\"id\"] = 1\n dataframe[\"id\"] = (\n str(partition_info[\"number\"])\n + \"_\"\n + (dataframe.id.cumsum()).astype(str)\n )\n dataframe.index = dataframe.pop(\"id\")\n return dataframe\n\n def _get_meta_df() -> pd.DataFrame:\n meta_dict = {\"id\": pd.Series(dtype=\"object\")}\n for field_name, field in self.produces.items():\n meta_dict[field_name] = pd.Series(\n dtype=pd.ArrowDtype(field.type.value),\n )\n return pd.DataFrame(meta_dict).set_index(\"id\")\n\n meta = _get_meta_df()\n dask_df = dask_df.map_partitions(_set_unique_index, meta=meta)\n else:\n logger.info(f\"Setting `{self.index_column}` as index\")\n dask_df = dask_df.set_index(self.index_column, drop=True)\n\n return dask_df\n\n def return_subset_of_df(self, dask_df: dd.DataFrame) -> dd.DataFrame:\n if self.n_rows_to_load is not None:\n partitions_length = 0\n npartitions = 1\n for npartitions, partition in enumerate(dask_df.partitions, start=1):\n if partitions_length >= self.n_rows_to_load:\n logger.info(\n f\"\"\"Required number of partitions to load\\n\n {self.n_rows_to_load} is {npartitions}\"\"\",\n )\n break\n partitions_length += len(partition)\n dask_df = dask_df.head(self.n_rows_to_load, npartitions=npartitions)\n dask_df = dd.from_pandas(dask_df, npartitions=npartitions)\n return dask_df\n\n def load(self) -> dd.DataFrame:\n # 1) Load data, read as Dask dataframe\n logger.info(\"Loading dataset from the hub...\")\n\n columns = self.get_columns_to_keep()\n\n logger.debug(f\"Columns to keep: {columns}\")\n dask_df = dd.read_parquet(f\"hf://datasets/{self.dataset_name}\", columns=columns)\n\n # 2) Make sure images are bytes instead of dicts\n dask_df = self.convert_images_to_bytes(dask_df)\n\n # 3) Rename columns\n logger.info(\"Renaming columns...\")\n dask_df = dask_df.rename(columns=self.column_name_mapping)\n\n # 4) Optional: only return specific amount of rows\n dask_df = self.return_subset_of_df(dask_df)\n\n # 5) Set the index\n dask_df = self.set_df_index(dask_df)\n\n return dask_df\n","repo_name":"ml6team/fondant","sub_path":"components/load_from_hf_hub/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"8"} +{"seq_id":"74197550981","text":"'''\r\nName: Lim Hur\r\nClass: DAAA/FT/2B/02\r\nAdmin: 2112589\r\n'''\r\n\r\n# Imports\r\nfrom utils import Utils\r\nfrom tools.thesaurus import Thesaurus\r\nfrom random import shuffle, choice\r\nfrom tools.summary import TextSummarization\r\nfrom tools.text_process import TextProcessor\r\n\r\nclass Program:\r\n def __init__(self, config):\r\n # Load config regarding assignment details\r\n # thesaurus is None when program starts\r\n self.__name, self.__admin, self.__class = config['author']['name'], config['author']['adminNo'], config['author']['class']\r\n self.__module = config['application_welcome']\r\n self.__module_code = config['module_code']\r\n self.__thesaurus = None\r\n\r\n # Run the main loop/menu of the program. Allows users to move between the options.\r\n def run(self):\r\n self.__print_start()\r\n while True:\r\n Utils.press_anywhere()\r\n starting_choices = ['New', 'Open', 'Sort', 'Process Text', 'Text Summarization', 'Text Searching','Print', 'Save', 'Save as', 'Exit']\r\n user_choice = Utils.get_number_choice(arr_choices=starting_choices)\r\n if user_choice == 1:\r\n self.__choice1()\r\n elif user_choice == 2:\r\n self.__choice2()\r\n elif user_choice == 3:\r\n self.__choice3()\r\n elif user_choice == 4:\r\n self.__choice4()\r\n elif user_choice ==5:\r\n self.__choice5()\r\n elif user_choice == 6:\r\n self.__choice6()\r\n elif user_choice == 7:\r\n self.__choice7()\r\n elif user_choice == 8:\r\n self.__choice8()\r\n elif user_choice == 9:\r\n self.__choice9()\r\n elif user_choice == 10:\r\n self.__choice10()\r\n return\r\n # Print initial banner and assignment details\r\n def __print_start(self):\r\n print('*' * 57)\r\n print(f'''* {self.__module}\\t*''')\r\n print('*' + '-' * 55 + '*')\r\n\r\n print('*',' '*53 ,'*')\r\n print(\"\"\"* -\"\"\", 'Done by:', f'{self.__name}({self.__admin})', ' '*24, '*')\r\n print(\"\"\"* -\"\"\", 'Class:', f'{self.__class}', ' '*32, '*')\r\n print('*' * 57)\r\n \r\n # Option 1: Allows user to set up NEW thesaurus \r\n def __choice1(self):\r\n print('\\nWe will be starting a new Thesaurus.\\nYou may now enter a series of keywords and their synonyms.\\n')\r\n self.__thesaurus = Thesaurus.new_thesaurus()\r\n # Check if thesaurus exists, else return to option menu\r\n if not self.__thesaurus:\r\n return \r\n print()\r\n print('Your new thesaurus is ready and printed here....')\r\n print(self.__thesaurus)\r\n \r\n # Option 2: Allow user to set up new thesaurus from .txt file\r\n def __choice2(self):\r\n thesaurus_file = Thesaurus.read_thesaurus_file()\r\n # Check if thesaurus file exists\r\n if thesaurus_file:\r\n self.__thesaurus = thesaurus_file\r\n print(self.__thesaurus)\r\n print()\r\n \r\n # Option 3: Different sorting conditions\r\n def __choice3(self):\r\n # Check if thesaurus exists\r\n if not self.__thesaurus:\r\n print('You dont have any thesaurus')\r\n return\r\n sort_ls = ['Alphabetically (Default)', 'Length/Alphabetically', 'Length/Random Alphabetically', 'Randomly', 'Back to main menu']\r\n sort_choice = Utils.get_number_choice(sort_ls)\r\n if sort_choice == 1:\r\n self.__thesaurus = self.__thesaurus.custom_sort()\r\n # Default sorting (Alphabetically)\r\n elif sort_choice == 2:\r\n self.__thesaurus = self.__thesaurus.custom_sort( [len, 'alphabet'] )\r\n # Sorting by length/alphabetically\r\n elif sort_choice == 3:\r\n self.__thesaurus = self.__thesaurus.custom_sort( choice )\r\n # Sort by length/random alphabetically\r\n elif sort_choice == 4:\r\n self.__thesaurus = self.__thesaurus.custom_sort( shuffle )\r\n # Sort randodmly\r\n else:\r\n return\r\n print('Sorting Synonyms: {}'.format(sort_ls[sort_choice-1]))\r\n print(self.__thesaurus)\r\n # Choice 4: Text processing, for simplified and elegant writing\r\n def __choice4(self):\r\n if not self.__thesaurus:\r\n print('You dont have any thesaurus')\r\n return \r\n data ,input_file = Utils.get_file_data(msg='Select the file you want to process')\r\n if input_file == '':\r\n return\r\n \r\n print('The text before processing:')\r\n print(data, '\\n')\r\n\r\n Utils.press_anywhere()\r\n print('Next choose a text processing option.\\n')\r\n writing_choice = Utils.get_number_choice(['Simplified Writing', 'Elegant writing', 'Back to Main Menu'])\r\n if writing_choice in {1,2}:\r\n thesaurus_variant = self.__thesaurus if writing_choice == 2 else {item: key for key ,ls in self.__thesaurus.items() for item in ls}\r\n self.__processor = TextProcessor(thesaurus= thesaurus_variant, word_ls= thesaurus_variant.keys() )\r\n print('Processing Text for: {}\\n'.format('Elegant writing' if writing_choice==2 else 'Simplified Writing'))\r\n try:\r\n final_text = self.__processor.replace(text=data)\r\n except:\r\n return\r\n else:\r\n return\r\n print('The text after processing:\\n{}\\n'.format(final_text))\r\n Utils.press_anywhere()\r\n\r\n save_to_file = ''\r\n while save_to_file not in ['y', 'n']:\r\n save_to_file = input('Do you want to save the text into a file? y/n: ').strip()\r\n if save_to_file == 'y':\r\n _ = Utils.save_as_file(final_text, medium = 'The text has been saved in \"{}\"')\r\n\r\n # Choice5 : Advance feature: Text Summarization\r\n def __choice5(self):\r\n data ,input_file = Utils.get_file_data(msg='Select the file you wish to summarize')\r\n if input_file == '':\r\n return\r\n print('Your file {} you wish to summarize is printed here:'.format(input_file))\r\n print(data)\r\n Utils.press_anywhere()\r\n \r\n summary = TextSummarization(data, top_n=5)\r\n summary.get_freq_list()\r\n print('Your file after summarization:\\n')\r\n print(summary.get_main_message())\r\n \r\n\r\n # Choice 6: Text Searching\r\n def __choice6(self):\r\n print('We will be performing text searching')\r\n data ,input_file = Utils.get_file_data(msg='Select the file you wish to perform text searching on')\r\n if input_file == '':\r\n return\r\n word_ls = Utils.get_word_ls()\r\n if len(word_ls) <1:\r\n return\r\n searcher = TextProcessor(thesaurus=None, word_ls= word_ls)\r\n final_text = searcher.search(text=data)\r\n if final_text is None:\r\n final_text = 'No search matches'\r\n print('\\nPrinting out search summary now (**Printing out sentences that contains searched (words searched printed in [ ])**):\\n')\r\n print(final_text)\r\n\r\n # Choice 7: printing of thesaurus (self.__thesaurus.tracker is used to track if user opened thesaurus from file or set up from user validation)\r\n def __choice7(self):\r\n if not self.__thesaurus:\r\n print('You dont have any thesaurus')\r\n return \r\n print('The thesaurus {} is printed here....'.format('that you created' if self.__thesaurus.tracker == 1 else self.__thesaurus.tracker))\r\n print(self.__thesaurus)\r\n\r\n # Option 8: Save option for existing thesaurus that has file\r\n def __choice8(self):\r\n try:\r\n if self.__thesaurus.tracker ==1:\r\n print('The thesaurus doesnt have an existing file. Please save as instead')\r\n return\r\n except:\r\n print('Please make sure thesaurus is available')\r\n return\r\n # Set mode to 'w' to overwrite the file.\r\n _ = Utils.save_as_file(self.__thesaurus, mode='w', saved_existing_file=self.__thesaurus.tracker) \r\n\r\n \r\n # Option 9: Allow user to save into new text file\r\n def __choice9(self):\r\n if not self.__thesaurus:\r\n print('You dont have any thesaurus! Please choose New or Open to load a thesaurus')\r\n return \r\n print('Save As')\r\n saved_file = Utils.save_as_file(self.__thesaurus) # Save thesaurus into text file.\r\n # Sets the tracker to the newly saved as file\r\n if saved_file is not None:\r\n self.__thesaurus.tracker = saved_file \r\n\r\n # Friendly goodbye message for user\r\n def __choice10(self):\r\n print()\r\n print(f'Bye, thanks for using {self.__module_code} DSAA: Thesaurus Based Text Processor')","repo_name":"lhurr/Thesaurus-Based-Text-Processing-Application","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"26942797357","text":"\"\"\"\nUtility functions for computing with modular forms for Weil representations.\n\"\"\"\nfrom fqm_weil.modules.finite_quadratic_module.finite_quadratic_module_base import \\\n FiniteQuadraticModule_base\nfrom fqm_weil.modules.weil_module.weil_module import WeilModule\nimport logging\nfrom logging import getLogger\n\nfrom sage.all import ZZ, QQ\nfrom sage.arith.misc import gcd, xgcd\nfrom sage.matrix.constructor import matrix\nfrom sage.rings.integer import Integer\nfrom sage.rings.number_field.number_field import CyclotomicField\nfrom sage.rings.rational import Rational\n\nlog = getLogger(__name__)\n\n\ndef cusp_normalisers_and_stabilisers(group) -> dict:\n cusp_normalisers = {}\n cusp_stabilisers = {}\n cusps = group.cusps()\n cusps.sort(reverse=True)\n for cusp in cusps:\n a = cusp.numerator()\n c = cusp.denominator()\n w = group.level() / gcd(group.level(), c**2)\n Tp = matrix(ZZ, [[1-c*a*w, a**2*w], [-c**2*w, 1+a*c*w]])\n g, s, t = xgcd(a, c)\n Ai = matrix([[a, -t], [c, s]])\n cusp_normalisers[cusp] = Ai\n cusp_stabilisers[cusp] = Tp\n return {\n 'cusp_normalisers': cusp_normalisers,\n 'cusp_stabilisers': cusp_stabilisers\n }\n\n\ndef exp_as_zN_power(N, arg):\n \"\"\"\n Return e(arg) as a power of a primitive N-th root of unit z_N if possible.\n\n INPUT:\n\n - ``N`` -- positive integer\n - ``arg`` -- rational number\n \"\"\"\n if not isinstance(N, (int, Integer)):\n raise ValueError(\"N must be an integer\")\n if not isinstance(arg, Rational):\n raise ValueError(\"arg must be a rational number\")\n\n zN = CyclotomicField(N).gen()\n m = arg.numerator()\n n = arg.denominator()\n # We must have n | N\n if N % n != 0:\n raise ArithmeticError(f\"Denominator `{n}` should divide the level `{N}`\")\n return zN ** (m * N // n)","repo_name":"fredstro/fqm-weil","sub_path":"src/fqm_weil/modular/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"10208164787","text":"from flask import Flask\r\nfrom ibm_watson import ToneAnalyzerV3\r\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\r\nimport pandas as pd \r\nimport numpy as np\r\nimport json\r\ndf = pd.read_csv('7282_1.csv')\r\napikey='9d_xot7IbgklVQ3j6cEieTfozGgDh4ojZ9snN_MVuIQH'\r\nURL='https://gateway-lon.watsonplatform.net/tone-analyzer/api'\r\nversion='2019-10-23'\r\nauthenticator = IAMAuthenticator(apikey)\r\ntone_analyzer = ToneAnalyzerV3(\r\n version=version,\r\n authenticator=authenticator\r\n)\r\ntone_analyzer.set_service_url(URL)\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/hotels/')\r\ndef hello_world(hotel_name):\r\n #tones=[]\r\n hotel_analysis=[]\r\n tones_dict = {\r\n\t\t\"Anger\" : 0,\r\n\t\t\"Fear\" : 0,\r\n\t\t\"Joy\" : 0,\r\n\t\t\"Sadness\" : 0,\r\n\t\t\"Analytical\" : 0,\r\n\t\t\"Confident\" : 0\r\n\t}\r\n hotels = df['name'] == hotel_name\r\n \r\n hotel_df=df[hotels]\r\n text=hotel_df['reviews.text'].tolist()\r\n size = np.shape(text)[0]\r\n count_joy=0;count_fear=0;count_anger=0; count_sadness=0;count_analytical=0;count_confident=0\r\n for i in range(size):\r\n tone_analysis = tone_analyzer.tone(\r\n {'text': text[i]},\r\n content_type='text/plain',\r\n sentences=False\r\n ).get_result()\r\n if(tone_analysis['document_tone']['tones']):\r\n tones=tone_analysis['document_tone']['tones']\r\n hotel_analysis.append(tone_analysis['document_tone']['tones'])\r\n for j in range(np.shape(tones)[0]):\r\n if tones[j]['tone_id']=='anger':\r\n tones_dict['Anger']+=tones[j]['score']\r\n count_anger = count_anger+1\r\n elif tones[j]['tone_id']=='fear':\r\n tones_dict['Fear']+=tones[j]['score']\r\n count_fear = count_fear+1\r\n elif tones[j]['tone_id']=='joy':\r\n tones_dict['Joy']+=tones[j]['score']\r\n count_joy = count_joy+1\r\n elif tones[j]['tone_id']=='sadness':\r\n tones_dict['Sadness']+=tones[j]['score']\r\n count_sadness = count_sadness+1\r\n elif tones[j]['tone_id']=='analytical':\r\n tones_dict['Analytical']+=tones[j]['score']\r\n count_analytical = count_analytical+1\r\n elif tones[j]['tone_id']=='confident':\r\n tones_dict['Confident']+=tones[j]['score']\r\n count_confident = count_confident+1\r\n tones_dict['Anger'] = round(tones_dict['Anger']/count_anger,2) if count_anger !=0 else 0\r\n tones_dict['Fear'] = round(tones_dict['Fear']/count_fear,2) if count_fear !=0 else 0\r\n tones_dict['Joy'] = round(tones_dict['Joy']/count_joy,2) if count_joy !=0 else 0\r\n tones_dict['Sadness']=round(tones_dict['Sadness']/count_sadness,2) if count_sadness != 0 else 0\r\n tones_dict['Analytical']=round(tones_dict['Analytical']/count_analytical,2) if count_analytical !=0 else 0\r\n tones_dict['Confident']=round(tones_dict['Confident']/count_confident,2) if count_confident !=0 else 0\r\n\r\n\r\n \r\n \r\n return json.dumps(tones_dict, indent=2)\r\n\r\nif __name__ == '__main__':\r\n app.run()","repo_name":"Mohamed19966/Hotel-Tone-Analyzer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"14871906471","text":"\"\"\"Added is_viewable column to Pattern class\n\nRevision ID: d970b2cf54bf\nRevises: 3613b8d53bbe\nCreate Date: 2021-10-16 18:46:52.284892\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd970b2cf54bf'\ndown_revision = '3613b8d53bbe'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pattern', sa.Column('is_viewable', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('pattern', 'is_viewable')\n # ### end Alembic commands ###\n","repo_name":"zaraconsulting/comececeme","sub_path":"migrations/versions/d970b2cf54bf_added_is_viewable_column_to_pattern_.py","file_name":"d970b2cf54bf_added_is_viewable_column_to_pattern_.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70961110663","text":"# still needs debugging.\r\n# documentation completed.\r\n# code cleaned and refactored.\r\n\r\nfrom flask import Flask,request,json\r\nimport random, numpy as np, torch\r\nimport os,sys\r\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\r\nimport SUPERVISOR\r\nimport UTILITY\r\nimport PARAMS\r\n\r\nrobot = SUPERVISOR.Nao()\r\n\r\nif PARAMS.ALGORITHM=='DDPGV2':\r\n noise = UTILITY.OUNoise(action_space={'dim':len(robot.__getActuators__()),'low':PARAMS.ACTOR_MIN_OUTPUT,'high':PARAMS.ACTOR_MAX_OUTPUT},\r\n mu=PARAMS.NOISE_LIST[0]['mu'], # mean of the process\r\n theta=PARAMS.NOISE_LIST[0]['theta'], # frequency\r\n max_sigma=PARAMS.NOISE_LIST[0]['sigma'], # min volatility \r\n min_sigma=PARAMS.NOISE_LIST[0]['sigma'], # max volatility \r\n decay_period=PARAMS.DECAY_PERIOD)\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/update_networks\",methods=['POST'])\r\ndef update_networks():\r\n \"\"\"Will update worker actor and critic networks using provided arguments.\r\n\r\n Returns:\r\n dict: Emptry dictonary will be returned.\r\n \"\"\"\r\n # initiate values\r\n req_in = json.loads(request.json)\r\n\r\n # update actor network \r\n robot.actor = torch.load(req_in['actor_network'])\r\n\r\n # update critic network\r\n robot.critic = torch.load(req_in['critic_network'])\r\n\r\n # return empty dictonary\r\n return {}\r\n\r\n@app.route(\"/generate_samples\",methods=['POST'])\r\ndef generate_samples(episodes=None):\r\n \"\"\"Will generate list of samples of form (s,a,r,s2) using specified actor and critic networks in the agent.\r\n\r\n Args:\r\n episodes (int): Number of the episodes to run worker and collect samples.\r\n\r\n Raises:\r\n KeyError: Input request must contain a dictionary with episodes:int element otherwise a KeyError will be raised.\r\n\r\n Returns:\r\n Dict: Dictionary with sample_list key and value equals to sample_list of form (s,a,r,s2) that will be used to trian the controller target networks.\r\n \"\"\"\r\n # initiate values\r\n req_in = json.loads(request.json)\r\n robot.actor.eval()\r\n robot.critic.eval()\r\n if PARAMS.ALGORITHM=='DDPGV2':\r\n noise.reset()\r\n\r\n # load argument from input json request\r\n try : episodes = req_in['episodes']\r\n except : raise KeyError('Request must contain a dictoinary with episodes:int element.')\r\n\r\n trajectory_list = []\r\n log_list = []\r\n episodes_reward_list = []\r\n\r\n for _ in range(episodes):\r\n\r\n # Set the location and pose of the robot to initiate state\r\n robot.__setCurrentState__('initialState')\r\n\r\n if PARAMS.WORKER_RANDOM_INITIALIZATION==True:\r\n # We will make initial state random and undependent of previous trajectory to minimize forgetting and maximize exploration\r\n # uniform initialization is a good way to go. we will perform two random actions after initialization.\r\n if (random.randint(1,4)%2==0):\r\n for _ in range(random.randint(2,4)):\r\n a = noise.get_action(np.random.rand(len(robot.__getActuators__())))\r\n robot.__act__(a)\r\n robot.__stepSimulaiton__()\r\n else:\r\n robot.__setRandomPosiiton__()\r\n\r\n # sample a trajectory\r\n trajectory,log_info,episode_steps_rewards_list = robot.__eval__()\r\n\r\n # store trajectory\r\n trajectory_list += [trajectory]\r\n log_list += [log_info]\r\n episodes_reward_list += [episode_steps_rewards_list]\r\n\r\n return {'sample_list':trajectory_list, 'log_list': log_list, 'list_of_episodes_steps_rewards' : episodes_reward_list}\r\n\r\ndef main():\r\n\r\n # Store initiate state to use in future sampling process\r\n robot.__stepSimulaiton__()\r\n \r\n # Store init position to ensure zero velocity and acceleration\r\n robot.supervisor.getFromDef(robot.supervisor.getName()).saveState(stateName='initialState')\r\n\r\n # Start the flask server app and wait for main_agent requests\r\n app.run(host='localhost', port=int(robot.supervisor.getCustomData()))\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"KKMOfficial/usb_nao_bot","sub_path":"controllers/WORKER_AGENT_SUPERVISOR/WORKER_AGENT_SUPERVISOR.py","file_name":"WORKER_AGENT_SUPERVISOR.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"32516323941","text":"# https://www.geeksforgeeks.org/stepping-numbers/\n\nstart_num = 0\nend_num = 50\nstep_num = []\nfor num in range(start_num, end_num + 1):\n num = str(num)\n if len(num) == 1:\n step_num.append(int(num))\n else:\n for pos in range(1, len(num)):\n if abs(int(num[pos - 1]) - int(num[pos])) != 1:\n continue\n step_num.append(int(num))\n\nprint(step_num)\n","repo_name":"kiranlvs93/PythonPrograms","sub_path":"interview_questions/stepping_numbers.py","file_name":"stepping_numbers.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"27623552874","text":"import pandas as pd\nimport itertools\nimport numpy as np\nimport matchingStrategies as ms\nimport json\nimport logging\nimport jinja2\nimport collections\n\nlogger = logging.getLogger('DataProcessor')\nlogger.setLevel(logging.DEBUG)\nclass DataProcessor:\n def __init__(self):\n self.df = []\n self.pairedDf = []\n self.filteredDf = []\n self.finalPairs = []\n self.scoreBoard = []\n self.ratedWeight = []\n self.strategies = {}\n self.dropped = {}\n self.strategiesEngine = ms.MatchingStrategies()\n\n def loadDataFromFile(self, fileName, strategyName):\n self.df = pd.read_csv(fileName, dtype = str)\n with open(strategyName) as strategiesFile: \n self.strategies = json.load(strategiesFile) \n self.mapToPairs()\n\n def mapToPairs(self):\n a,b = map(list, zip(*itertools.combinations(self.df.index, 2)))\n merged = pd.concat([self.df.loc[a].reset_index(drop=True), self.df.loc[b].reset_index(drop=True)], axis=1)\n merged.columns = list(map('.'.join, itertools.product([\"matchingUser\", \"matchedUser\"], ['UserName'] + list(self.df.columns)[1:])))\n self.pairedDf = merged.infer_objects()\n logger.info(\"Finished mapToPairs, remaining entries\")\n logger.info(self.pairedDf)\n logger.info(self.pairedDf.shape)\n\n def removeImpossiblePairs(self):\n self.filteredDf = self.pairedDf.copy()\n for ruleColumn in self.strategies[\"hardConstraints\"]:\n logger.info(\"Processing rule named {}\".format(ruleColumn))\n if (('matchingUser.{}'.format(ruleColumn) not in set(self.filteredDf.columns)) or ('matchedUser.{}'.format(ruleColumn) not in set(self.filteredDf.columns))):\n logger.info('{} not in existing columns, skipping this rule'.format(ruleColumn))\n continue\n ruleType = self.strategies[\"hardConstraints\"][ruleColumn][\"ruleType\"]\n if (ruleType in self.strategiesEngine.hardConstraintsAlgoMap):\n options = self.strategies[\"hardConstraints\"][ruleColumn].copy()\n if (\"postfixes\" in options):\n extraColumnNames = list(map('.'.join, itertools.product([\"matchingUser\", \"matchedUser\"], ['{}{}'.format(ruleColumn, post) for post in options[\"postfixes\"]])))\n options[\"extraData\"] = [self.filteredDf[x] for x in extraColumnNames]\n needDrop = self.strategiesEngine.hardConstraintsAlgoMap[ruleType](self.filteredDf['matchingUser.{}'.format(ruleColumn)], self.filteredDf['matchedUser.{}'.format(ruleColumn)], optional = options)\n self.dropped[ruleColumn] = self.filteredDf.iloc[needDrop, :].copy()\n self.filteredDf = self.filteredDf.drop(needDrop, axis=0).reset_index(drop=True)\n else:\n logger.info('Method for {} : {} not yet implemented'.format(ruleColumn, ruleType))\n logger.info(\"Finished removeImpossiblePairs, remaining entries\")\n logger.info(self.filteredDf.shape)\n\n # this function computes ratings for created pairs based on \n def computeRatingsForPairs(self):\n logger.debug(\"computeRatingsForPairs for dataset\")\n filteredDf = self.filteredDf.copy()\n logger.debug(filteredDf.columns)\n self.ratedWeight = []\n self.scoreBoard = filteredDf[['matchingUser.UserName', 'matchedUser.UserName']]\n if (filteredDf.shape[0] == 0):\n return\n for ruleColumn in self.strategies[\"ratedConstraints\"]:\n logger.info(\"Processing rule named {}\".format(ruleColumn))\n if (('matchingUser.{}'.format(ruleColumn) not in set(filteredDf.columns)) or ('matchedUser.{}'.format(ruleColumn) not in set(filteredDf.columns))):\n logger.info('{} not in existing columns, skipping this rule'.format(ruleColumn))\n continue\n ruleType = self.strategies[\"ratedConstraints\"][ruleColumn][\"ruleType\"]\n if (ruleType in self.strategiesEngine.ratingAlgoMap):\n options = self.strategies[\"ratedConstraints\"][ruleColumn].copy()\n if (\"postfixes\" in options):\n extraColumnNames = list(map('.'.join, itertools.product([\"matchingUser\", \"matchedUser\"], ['{}{}'.format(ruleColumn, post) for post in options[\"postfixes\"]])))\n options[\"extraData\"] = [filteredDf[x] for x in extraColumnNames]\n score = self.strategiesEngine.ratingAlgoMap[ruleType](filteredDf['matchingUser.{}'.format(ruleColumn)], filteredDf['matchedUser.{}'.format(ruleColumn)], optional = options)\n print(score)\n scoreColumnName = '{}Score'.format(ruleColumn)\n self.scoreBoard = self.scoreBoard.assign(**{scoreColumnName: score.values})\n self.ratedWeight.append(self.strategies[\"ratedConstraints\"][ruleColumn][\"weight\"])\n else:\n logger.info('Method for {} : {} not yet implemented'.format(ruleColumn, ruleType))\n self.scoreBoard[\"totalScore\"] = np.matmul(np.array(self.scoreBoard.iloc[:, 2:]), np.array(self.ratedWeight))\n self.scoreBoard = self.scoreBoard.sort_values(by=['totalScore'], ascending = False)\n def getPairs(self):\n pairNumSoFar = 0\n existingUser = set()\n pairIndex = []\n logger.debug('Final score board:')\n logger.debug(self.scoreBoard)\n logger.debug(self.scoreBoard.shape)\n for i in range(self.scoreBoard.shape[0]):\n matchingUser, matchedUser = self.scoreBoard[['matchingUser.UserName', 'matchedUser.UserName']].iloc[i]\n if ( (matchingUser not in existingUser) and (matchedUser not in existingUser)):\n pairNumSoFar += 1\n pairIndex.append(i)\n existingUser.update([matchingUser, matchedUser])\n if (pairNumSoFar == self.strategies[\"totalPairs\"]):\n logger.info(\"Required {} pairs found, end searching\".format(self.strategies[\"totalPairs\"]))\n break\n self.finalPairs = self.scoreBoard.iloc[pairIndex]\n self.finalPairs = self.finalPairs.merge(self.df, how='inner', left_on=\"matchingUser.UserName\", right_on=\"UserName\", suffixes=('', 'MatchingUser'))\n self.finalPairs = self.finalPairs.merge(self.df, how='inner', left_on=\"matchedUser.UserName\", right_on=\"UserName\", suffixes=('', 'MatchedUser'))\n self.finalPairs = self.finalPairs[list(self.finalPairs.columns[:3+len(self.ratedWeight)]) + list(sorted(self.finalPairs.columns[3+len(self.ratedWeight): ]))]\n self.scoreBoard = self.scoreBoard.merge(self.df, how='inner', left_on=\"matchingUser.UserName\", right_on=\"UserName\", suffixes=('', 'MatchingUser'))\n self.scoreBoard = self.scoreBoard.merge(self.df, how='inner', left_on=\"matchedUser.UserName\", right_on=\"UserName\", suffixes=('', 'MatchedUser'))\n self.scoreBoard = self.scoreBoard[list(self.scoreBoard.columns[:3+len(self.ratedWeight)]) + list(sorted(self.scoreBoard.columns[3+len(self.ratedWeight): ]))]\n\n def generateReports(self):\n # output final pairs\n self.finalPairs.to_csv(\"../output/finalPairs.csv\")\n self.scoreBoard.to_csv(\"../output/scoreBoard.csv\")\n for key in self.dropped:\n self.dropped[key].to_csv(\"../output/droppedBy{}.csv\".format(key))\n\n\n # create a more detailed report\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='../configs'))\n template = env.get_template('reportTemplate.html')\n\n # get # of dropped pairs by feature\n self.finalDroppedTable = pd.DataFrame.from_dict(self.dropped, orient='index')\n droppedCounter = {}\n total = self.pairedDf.shape[0]\n droppedCounter = {\"totalPairsPossible\": total}\n for element in self.dropped:\n droppedCounter[element] = self.dropped[element].shape[0]\n total -= droppedCounter[element]\n droppedCounter[\"remaining\"] = total\n droppedCount = pd.DataFrame.from_dict(droppedCounter, orient='index')\n droppedCount.columns = ['#pairsDropped']\n\n # get most matched users\n allUsersMatched = list(self.scoreBoard['matchingUser.UserName']) + list(self.scoreBoard['matchedUser.UserName'])\n dict(collections.Counter(allUsersMatched))\n userMatchedCountTable = pd.DataFrame.from_dict(dict(collections.Counter(allUsersMatched)), orient='index').reset_index()\n try:\n userMatchedCountTable.columns = [\"userName\", \"matchCount\"]\n userTableWithMatchCount = self.df.merge(userMatchedCountTable, how='left', left_on=\"Unnamed: 0\", right_on=\"userName\")\n userTableWithMatchCountSorted = userTableWithMatchCount.sort_values(by=['matchCount'], ascending = False)\n except:\n userTableWithMatchCountSorted = userMatchedCountTable\n html = template.render(\n table=self.finalPairs.to_html(),\n droppedCountTable=droppedCount.to_html(),\n userByMatchCount = userTableWithMatchCountSorted.iloc[:, :].to_html()\n )\n with open('../output/matchingReport.html', 'w+') as f:\n f.write(html)\n\n \n\n\n \n","repo_name":"xiwu5/matchingAlgo","sub_path":"dataProcessor.py","file_name":"dataProcessor.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"24329933014","text":"# LINK TO THE PROBLEM => https://www.hackerrank.com/challenges/quicksort2/problem\n\ndef quickSort(arr):\n if len(arr) == 1:\n return arr\n smaller, larger, p = divide(arr)\n if len(smaller) > 1:\n smaller = quickSort(smaller)\n if len(larger) > 1:\n larger = quickSort(larger)\n print(' '.join([str(x) for x in smaller + [p] + larger]))\n return smaller + [p] + larger\n \ndef divide(arr):\n p = arr[0]\n smaller = []\n larger = []\n for i in arr:\n if i > p:\n larger.append(i)\n elif i < p:\n smaller.append(i)\n return smaller, larger, p\n\ninput_1 = input()\ninput_2 = input().split(' ')\narr = []\nfor i in input_2:\n arr.append(int(i))\n\nquickSort(arr)\n","repo_name":"Natneam/competitive-programming","sub_path":"a2sv camp problems contests and more/Day 8/Quick_sort_2.py","file_name":"Quick_sort_2.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"74771302022","text":"\"\"\"This module patch the ``flair.models.sequence_tagger_model.SequenceTagger\\\n.predict`` method in order to not reverse sort all sequences by their length \\\nbefore creating the batch that will be feed to the model. By doing this we \\\npreserve the possibility for the ``flair.data.Sentence`` to have the good \\\nleft and right context (see Flair issues #2350 and #2650).\n\"\"\"\nimport logging\nfrom typing import List, Optional, Union\n\nimport flair.nn\nimport torch\nimport torch.nn\nfrom fastcore.basics import patch_to\nfrom flair.data import Sentence, Span\nfrom flair.datasets import DataLoader, FlairDatapointDataset\nfrom flair.models.sequence_tagger_model import SequenceTagger\nfrom flair.models.sequence_tagger_utils.bioes import get_spans_from_bio\nfrom flair.training_utils import store_embeddings\nfrom tqdm import tqdm\n\nlog = logging.getLogger(\"flair\")\n\n\n@patch_to(SequenceTagger)\ndef predict(\n self: SequenceTagger,\n sentences: Union[List[Sentence], Sentence],\n mini_batch_size: int = 32,\n return_probabilities_for_all_classes: bool = False,\n verbose: bool = False,\n label_name: Optional[str] = None,\n return_loss=False,\n embedding_storage_mode=\"none\",\n force_token_predictions: bool = False,\n): # type: ignore\n \"\"\"\n Predicts labels for current batch with CRF or Softmax.\n :param sentences: List of sentences in batch\n :param mini_batch_size: batch size for test data\n :param return_probabilities_for_all_classes: Whether to return probabilities for all classes\n :param verbose: whether to use progress bar\n :param label_name: which label to predict\n :param return_loss: whether to return loss value\n :param embedding_storage_mode: determines where to store embeddings - can be \"gpu\", \"cpu\" or None.\n \"\"\"\n if label_name is None:\n label_name = self.tag_type\n\n with torch.no_grad():\n if not sentences:\n return sentences\n\n # make sure its a list\n if not isinstance(sentences, list) and not isinstance(\n sentences, flair.data.Dataset\n ):\n sentences = [sentences]\n\n # filter empty sentences\n sentences = [sentence for sentence in sentences if len(sentence) > 0]\n\n # reverse sort all sequences by their length\n reordered_sentences = (\n sentences # sorted(sentences, key=lambda s: len(s), reverse=True)\n )\n\n if len(reordered_sentences) == 0:\n return sentences\n\n dataloader = DataLoader(\n dataset=FlairDatapointDataset(reordered_sentences),\n batch_size=mini_batch_size,\n )\n # progress bar for verbosity\n if verbose:\n dataloader = tqdm(dataloader, desc=\"Batch inference\")\n\n overall_loss = torch.zeros(1, device=flair.device)\n batch_no = 0\n label_count = 0\n for batch in dataloader:\n\n batch_no += 1\n\n # stop if all sentences are empty\n if not batch:\n continue\n\n # get features from forward propagation\n features, gold_labels = self.forward(batch)\n\n # remove previously predicted labels of this type\n for sentence in batch:\n sentence.remove_labels(label_name)\n\n # if return_loss, get loss value\n if return_loss:\n loss = self._calculate_loss(features, gold_labels)\n overall_loss += loss[0]\n label_count += loss[1]\n\n # Sort batch in same way as forward propagation\n lengths = torch.LongTensor([len(sentence) for sentence in batch])\n _, sort_indices = lengths.sort(dim=0, descending=True)\n batch = [batch[i] for i in sort_indices]\n\n # make predictions\n if self.use_crf:\n predictions, all_tags = self.viterbi_decoder.decode(\n features, return_probabilities_for_all_classes, batch\n )\n else:\n predictions, all_tags = self._standard_inference(\n features, batch, return_probabilities_for_all_classes\n )\n\n # add predictions to Sentence\n for sentence, sentence_predictions in zip(batch, predictions):\n\n # BIOES-labels need to be converted to spans\n if self.predict_spans and not force_token_predictions:\n sentence_tags = [\n label[0] for label in sentence_predictions\n ]\n sentence_scores = [\n label[1] for label in sentence_predictions\n ]\n predicted_spans = get_spans_from_bio(\n sentence_tags, sentence_scores\n )\n for predicted_span in predicted_spans:\n span: Span = sentence[\n predicted_span[0][0] : predicted_span[0][-1] + 1\n ]\n span.add_label(\n label_name,\n value=predicted_span[2],\n score=predicted_span[1],\n )\n\n # token-labels can be added directly (\"O\" and legacy \"_\" predictions are skipped)\n else:\n for token, label in zip(\n sentence.tokens, sentence_predictions\n ):\n if label[0] in [\"O\", \"_\"]:\n continue\n token.add_label(\n typename=label_name, value=label[0], score=label[1]\n )\n\n # all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided\n for (sentence, sent_all_tags) in zip(batch, all_tags):\n for (token, token_all_tags) in zip(\n sentence.tokens, sent_all_tags\n ):\n token.add_tags_proba_dist(label_name, token_all_tags)\n\n store_embeddings(sentences, storage_mode=embedding_storage_mode)\n\n if return_loss:\n return overall_loss, label_count\n","repo_name":"GuiGel/MedDocAn","sub_path":"meddocan/models/sequence_tagger_model.py","file_name":"sequence_tagger_model.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"4584593365","text":"# from tkinter import *\nimport Tkinter as tk\nfrom winsound import *\n\nroot = tk.Tk() # create tkinter window\n\nplay = lambda: PlaySound('Sound.wav', SND_FILENAME)\nbutton = tk.Button(root, text = 'Play', command = play)\n\nbutton.pack()\nroot.mainloop()","repo_name":"RishabhBrajabasi/Internship_IITB","sub_path":"Rough Space 3.py","file_name":"Rough Space 3.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"23581992057","text":"# problem statement\n\"\"\"Max Min List Implement a function called max_min(lst) which will re-arrange the elements of a sorted list such\nthat the 0th index will have the largest number, the 1st index will have the smallest, and the 2nd index will have\nsecond-largest, and so on. In other words, all the even-numbered indices will have the largest numbers in the list in\ndescending order and the odd-numbered indices will have the smallest numbers in ascending order.\n\nInput -\nA sorted list of size n\n\nOutput -\nMax min list\n\n[1,2,3,4] -> [1,2] [3,4]\niterate forward in first list and backwards in second list.\n\n[4, 1, 3, 2]\n\n[1,2,3,4,5] -> [5,1,4,2,3]\n\nApproach 1:\nDivide the list in half\nif even number of elements:\n floor(size - 1)/2\nif odd no of elem:\n size/2\n\nwhile index1 < l1.size && index2 < l2.size:\n res.append(l2[index2])\n res.append(l1[index1])\n index1 += 1\n index2 += 1\n\nreturn res\n\"\"\"\n\n\ndef approach_1(lst):\n res = []\n ind1 = 0\n ind2 = len(lst) - 1\n if len(lst) % 2 == 0:\n split = len(lst) // 2 - 1\n else:\n split = len(lst) // 2\n\n print(split)\n l1 = lst[:2]\n l2 = lst[2:]\n\n # while ind1 < split < ind2:\n # # if ind1 > ind2:\n # # break\n # res.append(lst[ind2])\n # res.append(lst[ind1])\n # ind2 -= 1\n # ind1 += 1\n\n for i, j in l1 and l2:\n print(i, j)\n\n return res\n\n\nl1 = list(range(4))\n# print(approach_1(l1))\n\n\ndef approach_2(lst):\n i1 = 0\n i2 = len(lst) - 1\n res = []\n div = len(lst) % 2\n while i1 <= i2:\n if i1 == i2:\n if div == 0:\n break\n else:\n res.append(lst[i2])\n break\n res.append(lst[i2])\n res.append(lst[i1])\n i2 -= 1\n i1 += 1\n return res\n\n\nprint(approach_2(l1))\n","repo_name":"apnork/DSA","sub_path":"Lists/max_min_list.py","file_name":"max_min_list.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"42344002219","text":"#! python3\n# The fraction 49/98 is a curious fraction, as an inexperienced mathematician in \n# attempting to simplify it may incorrectly believe that 49/98 = 4/8, \n# which is correct, is obtained by cancelling the 9s.\n#\n# We shall consider fractions like, 30/50 = 3/5, to be trivial examples.\n# There are exactly four non-trivial examples of this type of fraction, \n# less than one in value, and containing two digits in the numerator and denominator.\n#\n# If the product of these four fractions is given in its lowest common terms, \n# find the value of the denominator.\n\nimport fractions\nfrom functools import reduce\n\n\ndef getdigits(num):\n digits = set([])\n while num > 0:\n digits.add(num % 10)\n num = num // 10\n return digits\n\t\n\ndef dumb_simplify(num, denum):\n\tnum_digits = getdigits(num)\n\tdenum_digits = getdigits(denum)\n\tif len(num_digits) == 2 and len(denum_digits) == 2:\n\t\tif num_digits & denum_digits == set():\n\t\t\treturn [0, 1]\n\t\telif num_digits & denum_digits == {0}:\n\t\t\treturn [0, 1]\n\t\telse:\n\t\t\tnew_num_digits = num_digits - denum_digits\n\t\t\tnew_denum_digits = denum_digits - num_digits\n\t\t\tif 0 in new_denum_digits:\n\t\t\t\treturn [0, 1]\n\t\t\telif len(new_num_digits) == 0:\n\t\t\t\treturn [0, 1]\n\t\t\telse:\n\t\t\t\treturn [new_num_digits.pop(), new_denum_digits.pop()]\n\telse:\n\t\treturn [0, 1]\n\t\t\n\t\t\nres = []\n\nfor n in range(10, 100):\n\tfor d in range(n + 1, 100):\n\t\tfrac = fractions.Fraction(n, d)\n\t\tdumb_simp = dumb_simplify(n, d)\n\t\tdumb_frac = fractions.Fraction(dumb_simp[0], dumb_simp[1])\n\t\tif frac == dumb_frac:\n\t\t\tres.append(frac)\n\t\t\tprint(f'{n}/{d}')\n\nproduct = reduce((lambda x, y: x*y), res)\nprint(product)","repo_name":"bruyss/Project-Euler","sub_path":"33_digitcancellingsum.py","file_name":"33_digitcancellingsum.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70925627463","text":"import os\nimport math\nimport sys\n\nimport pygame\n\nfrom get_map import Map\nfrom buttons import LayersButton, SearchButton, ResetButton, CheckButton\nfrom input_field import InputField\nfrom geocoder_funcs import get_response, get_object_info\nfrom info_field import InfoField\n\n\nclass MapWindow(object):\n LON_STEP, LAT_STEP = 0.02, 0.008\n\n def __init__(self, width, height):\n self.z = 15\n self.coordinates = ['37.620070', '55.753630'] # Долгота (lon), Широта (lat)\n self.pts = list()\n self.type_layer = 'map'\n self.org = ''\n self.buttons = pygame.sprite.Group()\n self.l_btn = LayersButton(self.buttons, self)\n self.reset_btn = ResetButton(self.buttons, 10, 49, 'Сброс поискового результата', self)\n self.search = InputField(self)\n self.btn_search = SearchButton(self.buttons, self.search.outer_rect.x + 10 + self.search.outer_rect.width,\n self.search.outer_rect.y, self, self.search)\n self.postal_code_btn = CheckButton(self.buttons, self)\n self.info = InfoField('')\n self.last_search = ''\n self.map = Map(self.coordinates, self.z, self.pts, self.type_layer)\n self.get_map()\n self.w, self.h = width, height\n pygame.init()\n self.screen = pygame.display.set_mode((width, height))\n pygame.display.flip()\n\n def update_map(self):\n self.map = Map(self.coordinates, self.z, self.pts, self.type_layer)\n self.get_map()\n\n def update(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_PAGEUP:\n self.z = self.z + 1 if self.z < 19 else 19\n self.update_map()\n if event.key == pygame.K_PAGEDOWN:\n self.z = self.z - 1 if self.z > 2 else 2\n self.update_map()\n if event.key == pygame.K_DOWN:\n lat = self.LAT_STEP * math.pow(2, 15 - self.z)\n lat = 70 + float(self.coordinates[1]) if float(self.coordinates[1]) - lat < -70 else lat\n self.coordinates = self.coordinates[0], str(float(self.coordinates[1]) - lat)\n self.update_map()\n if event.key == pygame.K_UP:\n lat = self.LAT_STEP * math.pow(2, 15 - self.z)\n lat = 70 - float(self.coordinates[1]) if float(self.coordinates[1]) + lat > 70 else lat\n self.coordinates = self.coordinates[0], str(float(self.coordinates[1]) + lat)\n self.update_map()\n if event.key == pygame.K_LEFT:\n lon = self.LON_STEP * math.pow(2, 15 - self.z)\n lon = 160 + float(self.coordinates[0]) if float(self.coordinates[0]) - lon < -160 else lon\n self.coordinates = str(float(self.coordinates[0]) - lon), self.coordinates[1]\n self.update_map()\n if event.key == pygame.K_RIGHT:\n lon = self.LON_STEP * math.pow(2, 15 - self.z)\n lon = 160 - float(self.coordinates[0]) if float(self.coordinates[0]) + lon > 160 else lon\n self.coordinates = str(float(self.coordinates[0]) + lon), self.coordinates[1]\n self.update_map()\n if event.type == pygame.MOUSEMOTION:\n pass\n if event.type == pygame.MOUSEBUTTONDOWN:\n btns_array = [self.postal_code_btn.rect.collidepoint(event.pos[0], event.pos[1]),\n self.l_btn.rect.collidepoint(event.pos[0], event.pos[1]),\n self.search.outer_rect.collidepoint(event.pos[0], event.pos[1]),\n self.reset_btn.rect.collidepoint(event.pos[0], event.pos[1]),\n self.btn_search.rect.collidepoint(event.pos[0], event.pos[1])]\n btns_array.extend([x.rect.collidepoint(event.pos[0], event.pos[1]) for x in self.l_btn.layers_buttons])\n if not any(btns_array):\n step_lon, step_lat, upper_corner_left = self.get_step()\n coordinates = [str(float(upper_corner_left[0]) + step_lon * event.pos[0]),\n str(float(upper_corner_left[1]) - step_lat * event.pos[1])]\n if event.button == 1:\n self.reset_search()\n self.append_pt(coordinates[0], coordinates[1])\n self.search_object(','.join(x for x in coordinates), type_of_request='click')\n elif event.button == 3:\n self.reset_search()\n data = self.map.search_org(coordinates)\n if data is not None:\n self.org = data.get('name', '')\n org_coordinates = data.get('coordinates')\n self.append_pt(org_coordinates[0], org_coordinates[1])\n self.search_object(','.join(data.get('coordinates')), type_of_request='click', org=True)\n\n self.l_btn.update(event)\n self.search.update(event)\n self.btn_search.update(event)\n self.reset_btn.update(event)\n self.postal_code_btn.update(event)\n\n def draw(self):\n self.screen.blit(pygame.image.load(os.path.join('map_parts/', self.map.name)), (0, 0))\n self.l_btn.draw(self.screen)\n self.search.draw(self.screen)\n self.btn_search.draw(self.screen)\n self.reset_btn.draw(self.screen)\n self.info.draw(self.screen)\n self.postal_code_btn.draw(self.screen)\n pygame.display.flip()\n self.update()\n\n def get_step(self):\n lon = self.LON_STEP * math.pow(2, 15 - self.z) / 1.55\n lat = self.LAT_STEP * math.pow(2, 15 - self.z) / 1.47\n upper_corner_right = str(float(self.coordinates[0]) + lon), str(float(self.coordinates[1]) + lat)\n lower_corner_left = str(float(self.coordinates[0]) - lon), str(float(self.coordinates[1]) - lat)\n upper_corner_left = str(float(self.coordinates[0]) - lon), str(float(self.coordinates[1]) + lat)\n step_lon = abs(float(lower_corner_left[0]) - float(upper_corner_right[0])) / self.w\n step_lat = abs(float(lower_corner_left[1]) - float(upper_corner_right[1])) / self.h\n return step_lon, step_lat, upper_corner_left\n\n def append_pt(self, lon, lat):\n self.pts.append('{},{},round'.format(lon, lat))\n\n def reset_search(self):\n self.pts.clear()\n self.org = ''\n self.last_search = ''\n self.info.change_address('')\n self.update_map()\n\n def update_search(self):\n self.search_object(self.last_search)\n\n def search_object(self, text, type_of_request=None, org=False):\n if text != '':\n self.last_search = text\n if type_of_request is None:\n self.pts.clear()\n self.search.text = ''\n data = get_object_info(get_response(text))\n if data is not None:\n coords = data.get('coordinates')[0], data.get('coordinates')[1]\n self.append_pt(coords[0], coords[1])\n self.coordinates = coords\n if self.postal_code_btn.state:\n if org:\n self.info.change_address('{}, {}. Индекс: {}'.format(data.get('address'),\n self.org,\n data.get('postal_code')))\n else:\n self.info.change_address('{}. Индекс: {}'.format(data.get('address'),\n data.get('postal_code')))\n else:\n if org:\n self.info.change_address('{}, {}'.format(data.get('address'), self.org))\n else:\n self.info.change_address(data.get('address'))\n else:\n data = get_object_info(get_response(text))\n if data is not None:\n self.coordinates = text.split(',')\n if self.postal_code_btn.state:\n if org:\n self.info.change_address('{}, {}. Индекс: {}'.format(data.get('address'),\n self.org,\n data.get('postal_code')))\n else:\n self.info.change_address('{}. Индекс: {}'.format(data.get('address'),\n data.get('postal_code')))\n else:\n if org:\n self.info.change_address('{}, {}'.format(data.get('address'), self.org))\n else:\n self.info.change_address(data.get('address'))\n self.update_map()\n\n def get_map(self):\n try:\n self.map.get_map()\n except BaseException as e:\n print('Возникла ошибка при получении карты: {}. Работа программы завершена.'.format(e))\n pygame.quit()\n sys.exit(0)\n\n\nmap_show = MapWindow(600, 450)\nwhile True:\n map_show.draw()\n","repo_name":"Bopobywek/simple_map_YandexAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"32269459948","text":"from django.conf.urls import url\nfrom . import views\nfrom .views import RegisterView\n\nurlpatterns = [\n url(r'^login/$', views.login, name='login'),\n url(r'^register/$', RegisterView.as_view(), name='register_view'),\n url(r'^logout/$', views.logout_view, name='logout'),\n url(r'^cargo/(?P\\d+)/$', views.cargo_detail, name='cargo_detail'),\n url(r'^cargo/$', views.cargo_list, name='cargo_list'),\n url(r'^cargo/$', views.cargos_list, name='cargos_list'),\n url(r'^cargo/center/$', views.cargo_center_list, name='cargo_center_list'),\n]\n","repo_name":"xbsonmez/CargoSystem","sub_path":"CargoSystem/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"26039277470","text":"from uiautomator import device as d\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport numpy as np\r\nimport requests\r\nimport os\r\nimport time\r\ndef setAndroidHome():\r\n os.environ[\"ANDROID_HOME\"] = \"/Users/abg/Library/Android/sdk\"\r\n os.environ[\"PATH\"] = \"${PATH}:$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools\"\r\n#setAndroidHome()\r\ncenter = [500, 800]\r\nprint(\"started..\")\r\ndef getXmlSnapshot():\r\n xml = d.dump()\r\n #message_1=d(resourceId=\"com.whatsapp:id/single_msg_tv\" ).text\r\n soup = BeautifulSoup(xml, 'xml')\r\n #print(message_1)\r\n print(xml)\r\n#getXmlSnapshot()\r\n# swiping functions\r\ndef fullPageSwipe():\r\n d.swipe(100, 1400, 100, 150, steps=50)\r\n\r\ndef miniSwipe():\r\n d.swipe(100, 1000, 100, 600, steps=50)\r\nprint(\"started..\")\r\ndef read_message():\r\n new_message =d(resourceId=\"com.whatsapp:id/message_text\").text\r\n print(new_message)\r\n return new_message\r\n#getXmlSnapshot()\r\nprint(\"started..\")\r\ndef sendMessageOnWhatsApp(msg):\r\n d(resourceId='com.whatsapp:id/entry').set_text(msg)\r\n d(resourceId='com.whatsapp:id/send').click()\r\nprint(\"started..\")\r\ndef readLastMsg ():\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage)\r\n#readLastMsg()\r\n#readLastMsg()\r\ndef openunseenmsg ():\r\n number=d(resourceId=\"com.whatsapp:id/conversations_row_message_count\").text\r\n d(resourceId=\"com.whatsapp:id/conversations_row_message_count\").click()\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n sendMessageOnWhatsApp(\" welcome to queen's press : 1,2\")\r\n check_new_msg()\r\n\r\n#sendMessageOnWhatsApp(read_message())\r\ndef sharee ():\r\n d(resourceId=\"com.whatsapp:id/input_attach_button\").click()\r\n d(text=\"Gallery\").click()\r\n d(text=\"Download\").click()\r\n count=d(className=\"android.widget.ImageView\").count\r\n #print(count)\r\n d(resourceId=\"com.whatsapp:id/menuitem_select_multiple\").click()\r\n for i in range(1,6):\r\n d(className=\"android.widget.ImageView\",instance=i).click()\r\n time.sleep(1)\r\n d(text=\"OK\").click()\r\n d(resourceId=\"com.whatsapp:id/send\").click()\r\ndef moresharee():\r\n d(resourceId=\"com.whatsapp:id/input_attach_button\").click()\r\n d(text=\"Gallery\").click()\r\n d(text=\"Download\").click()\r\n count=d(className=\"android.widget.ImageView\").count\r\n #print(count)\r\n d(resourceId=\"com.whatsapp:id/menuitem_select_multiple\").click()\r\n for i in range(7,8):\r\n d(className=\"android.widget.ImageView\",instance=i).click()\r\n time.sleep(1)\r\n d(text=\"OK\").click()\r\n d(resourceId=\"com.whatsapp:id/send\").click()\r\n \r\ndef kurti ():\r\n d(resourceId=\"com.whatsapp:id/input_attach_button\").click()\r\n d(text=\"Gallery\").click()\r\n d(text=\"Download\").click()\r\n count=d(className=\"android.widget.ImageView\").count\r\n d(resourceId=\"com.whatsapp:id/menuitem_select_multiple\").click()\r\n for i in range(9,14):\r\n d(className=\"android.widget.ImageView\",instance=i).click()\r\n time.sleep(1)\r\n d(text=\"OK\").click()\r\n d(resourceId=\"com.whatsapp:id/send\").click()\r\ndef morekurti():\r\n d(resourceId=\"com.whatsapp:id/input_attach_button\").click()\r\n d(text=\"Gallery\").click()\r\n d(text=\"Download\").click()\r\n count=d(className=\"android.widget.ImageView\").count\r\n print(count)\r\n d(resourceId=\"com.whatsapp:id/menuitem_select_multiple\").click()\r\n for i in range(15,18):\r\n d(className=\"android.widget.ImageView\",instance=i).click()\r\n time.sleep(1)\r\n d(text=\"OK\").click()\r\n d(resourceId=\"com.whatsapp:id/send\").click()\r\n#media (\"Kurti\")\r\ndef getCoordinatesFromMatrix(index):\r\n d.wait.idle()\r\n soup = BeautifulSoup(d.dump(), 'xml')\r\n w, h = getWindowSize(soup)\r\n w0, h0, y0 = getItemSizeAndStartY(soup)\r\n print(\"w0, h0, y0 : \", w0, h0, y0)\r\n m = int(w/w0) #no of pics in a row\r\n print(\"m: \", m)\r\n x_req = ((index % m) - 0.5) * w0\r\n y_req = y0 + (int(index/m) + 0.5) * h0\r\n return [x_req, y_req]\r\ndef getWindowSize(soup):\r\n bounds =soup.find(lambda tag:tag.name == \"node\" and tag[\"package\"] == \"com.whatsapp\")[\"bounds\"]\r\n x, y, w, h = getCornersFromBounds(bounds)\r\n print(w, \" - \", h)\r\n return [w, h]\r\ndef getItemSizeAndStartY(soup):\r\n # soup = BeautifulSoup(d.dump(), 'xml')\r\n bounds = soup.find(lambda tag:tag.name == \"node\" and \"ImageView\" in tag[\"class\"] and tag[\"package\"] == \"com.whatsapp\")[\"bounds\"]\r\n x, y, w, h = getCornersFromBounds(bounds)\r\n return [w-x, h-y, y]\r\n\r\ndef getCornersFromBounds(bounds):\r\n corners = bounds.replace('][',',').replace(']','').replace('[','').split(',')\r\n x, y, w, h = [int(corner) for corner in corners]\r\n return [x,y,w,h]\r\n\r\ndef sendMedia(index):\r\n d(resourceId=\"com.whatsapp:id/input_attach_button\").click()\r\n d(text=\"Gallery\").click()\r\n d.wait.idle()\r\n # d(resourceId=\"com.whatsapp:id/title\").click()\r\n d(text=\"All media\").click()\r\n d.wait.update()\r\n count=d(className=\"android.widget.ImageView\").count \r\n # d(className=\"android.widget.ImageView\").click()\r\n clickOnImage(index)\r\n #print(count)\r\n d.wait.idle()\r\n d(resourceId=\"com.whatsapp:id/send\").click()\r\n\r\ndef clickOnImage(index):\r\n print(\"here\")\r\n d.click(*getCoordinatesFromMatrix(index))\r\n\r\ndef keepCheckingForNewMessage():\r\n while(not d(resourceId=\"com.whatsapp:id/conversations_row_message_count\").exists):\r\n time.sleep(2)\r\n openunseenmsg()\r\n#sendMedia(3)\r\ndef read_inside_msg():\r\n a=d.dump()\r\n count_1=d(resourceId='com.whatsapp:id/message_text').count\r\n time.sleep(30)\r\n count_2=d(resourceId='com.whatsapp:id/message_text').count\r\n b=d.dump()\r\n if a != b and count_1!=count_2:\r\n sendMessageOnWhatsApp(\"Hi\")\r\ndef check_new_msg():\r\n counter=0\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage_1=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_1)\r\n print(\".....\")\r\n while(counter<30):\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n time.sleep(2)\r\n lastmessage_2=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_2)\r\n counter=counter+1\r\n if lastmessage_1!=lastmessage_2:\r\n break\r\n print(counter)\r\n if counter < 30:\r\n print(\"wait..\")\r\n if lastmessage_2== '1':\r\n sharee()\r\n sendMessageOnWhatsApp(\"To see More press 0\")\r\n sendMessageOnWhatsApp(\"TO buy press the resource id\")\r\n sendMessageOnWhatsApp(\"To see kurti press 2\")\r\n counter_1=0\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage_1=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_1)\r\n print(\"....\") \r\n while(counter_1<30):\r\n time.sleep(2)\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage_2=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_2)\r\n counter_1=counter_1+1\r\n if lastmessage_1!=lastmessage_2:\r\n break\r\n print(counter_1)\r\n if counter_1 < 30:\r\n print(\"wait..\")\r\n print(lastmessage_2)\r\n if lastmessage_2== \"0\":\r\n moresharee()\r\n if lastmessage_2 == \"2\":\r\n kurti()\r\n else:\r\n sendMessageOnWhatsApp(\"Thanks For Buying Product\") \r\n if lastmessage_2=='2':\r\n kurti()\r\n sendMessageOnWhatsApp(\"To see More press 0\")\r\n sendMessageOnWhatsApp(\"TO buy press the resource id\")\r\n sendMessageOnWhatsApp(\"To see sharee press 1\")\r\n counter_1=0\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage_1=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_1)\r\n print(\"....\") \r\n while(counter_1<30):\r\n time.sleep(2)\r\n count=d(resourceId='com.whatsapp:id/message_text').count\r\n lastmessage_2=d(resourceId='com.whatsapp:id/message_text',instance=count-1).text\r\n print(lastmessage_2)\r\n counter_1=counter_1+1\r\n if lastmessage_1!=lastmessage_2:\r\n break\r\n print(counter_1)\r\n if counter_1 < 30:\r\n print(\"wait..\")\r\n print(lastmessage_2)\r\n if lastmessage_2== \"0\":\r\n morekurti()\r\n if lastmessage_2 == \"2\":\r\n sharee()\r\n else:\r\n sendMessageOnWhatsApp(\"Thanks For Buying Product\") \r\nopenunseenmsg() \r\n#read_inside_msg()\r\n#keepCheckingForNewMessage()\r\n#def downloadImage():\r\n #message_1=d(resourceId=\"com.whatsapp:id/single_msg_tv\" ).text\r\n #os.system(\"adb shell input text \" + url)\r\n #print(message_1)\r\n #d.press.enter()\r\n #time.sleep(3)\r\n #d.long_click(*center)\r\n # d(text='Download image').click()\r\n #d(text='Save image').click()\r\n\r\n\r\n#url = \"https://s3.ap-south-1.amazonaws.com/static.queensapp.in/queensSmall.png\"\r\n#downloadImage()\r\n#getXmlSnapshot()\r\n\r\n","repo_name":"Shau1111/whatsapp_uiautomator","sub_path":"queen_s.py","file_name":"queen_s.py","file_ext":"py","file_size_in_byte":9580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"72542275141","text":"import unittest\n\n# complexity O(n) Time and Space\ndef is_binary_search_tree(root):\n \n # Determine if the tree is a valid binary search tree\n \n # idea (inspired by interviewcake):\n ## We do a depth-first walk through the tree, \n ## testing each node for validity as we go. \n ## If a node appears in the left subtree of an ancestor, \n ## it must be less than that ancestor.\n ## If a node appears in the right subtree of an ancestor, \n ## it must be greater than that ancestor. \n\n \n node_and_bounds_stack = [(root,-float('inf'),float('inf'))]\n \n while len(node_and_bounds_stack):\n \n node, lower_bound, upper_bound = node_and_bounds_stack.pop() \n # 2 cases, node or leaf.\n # If this node is invalid, we return false right away\n if node.value <= lower_bound or node.value >= upper_bound:\n return False\n \n if node.left:\n # This node must be less than the current node\n node_and_bounds_stack.append([node.left,lower_bound,node.value])\n # This node must be greater than the current node\n if node.right:\n node_and_bounds_stack.append([node.right,node.value,upper_bound])\n\n \n return True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Tests (by interview cake)\n\nclass Test(unittest.TestCase):\n\n class BinaryTreeNode(object):\n\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def insert_left(self, value):\n self.left = Test.BinaryTreeNode(value)\n return self.left\n\n def insert_right(self, value):\n self.right = Test.BinaryTreeNode(value)\n return self.right\n\n def test_valid_full_tree(self):\n tree = Test.BinaryTreeNode(50)\n left = tree.insert_left(30)\n right = tree.insert_right(70)\n left.insert_left(10)\n left.insert_right(40)\n right.insert_left(60)\n right.insert_right(80)\n result = is_binary_search_tree(tree)\n self.assertTrue(result)\n\n def test_both_subtrees_valid(self):\n tree = Test.BinaryTreeNode(50)\n left = tree.insert_left(30)\n right = tree.insert_right(80)\n left.insert_left(20)\n left.insert_right(60)\n right.insert_left(70)\n right.insert_right(90)\n result = is_binary_search_tree(tree)\n self.assertFalse(result)\n\n def test_descending_linked_list(self):\n tree = Test.BinaryTreeNode(50)\n left = tree.insert_left(40)\n left_left = left.insert_left(30)\n left_left_left = left_left.insert_left(20)\n left_left_left.insert_left(10)\n result = is_binary_search_tree(tree)\n self.assertTrue(result)\n\n def test_out_of_order_linked_list(self):\n tree = Test.BinaryTreeNode(50)\n right = tree.insert_right(70)\n right_right = right.insert_right(60)\n right_right.insert_right(80)\n result = is_binary_search_tree(tree)\n self.assertFalse(result)\n\n def test_one_node_tree(self):\n tree = Test.BinaryTreeNode(50)\n result = is_binary_search_tree(tree)\n self.assertTrue(result)\n\n\nunittest.main(verbosity=2)\n","repo_name":"alaouiib/DS_and_Algorithms_Training","sub_path":"is_bst.py","file_name":"is_bst.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"33720273876","text":"from json import loads\nfrom typing import Dict, List\nfrom urllib.error import URLError\nfrom urllib.request import Request, urlopen\n\nfrom hp_class_action.hp_database.mdb_handlers import (fetch_query)\n\n\ndef get_visitors_info() -> List[Dict]:\n \"\"\"Returns True if csv row has already been uploaded to mdb\"\"\"\n sql_query = \"\"\"\n SELECT visit_datetime \"last_visit_datetime\", ip_address, \n country_name, city, org, user_agent, COUNT(*) \"visits\"\n FROM hp_trial.website_visitors_info\n GROUP BY ip_address, country_name, city, org\n ORDER BY visit_datetime ASC\n \"\"\"\n parameters = None\n results = fetch_query(sql_query=sql_query, variables=parameters)\n\n return results\n\n\ndef get_json_request(url):\n req = Request(url)\n try:\n response = urlopen(url=req, timeout=20, )\n except URLError as e:\n if hasattr(e, 'reason'):\n print('We failed to reach a server.')\n print('Reason: ', e.reason)\n elif hasattr(e, 'code'):\n print(\"The server couldn't fulfill the request.\")\n print('Error code: ', e.code)\n return None\n\n # read JSOn data\n # https://stackoverflow.com/questions/32795460/loading-json-object-in-python-using-urllib-request-and-json-modules\n encoding = response.info().get_content_charset('utf-8')\n data = response.read()\n response = loads(data.decode(encoding))\n return response\n\n\ndef print_arin_info(mdb_row: dict) -> bool:\n \"\"\"https://www.arin.net/resources/registry/whois/rdap/\"\"\"\n \"\"\" same without using requests lib\n https://github.com/rush-dev/arin-whois/blob/master/whois.py\n \"\"\"\n response_format = \"json\"\n url = f'http://whois.arin.net/rest/ip/{mdb_row[\"ip_address\"]}.{response_format}'\n\n print(\"#\" * 50, \"ARIN Information\", \"#\" * 50)\n print(f\"getting info for ip: {mdb_row['ip_address']} \"\n f\"in country: {mdb_row['country_name']}/{mdb_row['city']} \"\n f\"for org: {mdb_row['org']}\")\n ip_response = get_json_request(url=url)\n\n print(ip_response.get('net').get('resources').get(\"limitExceeded\"))\n # IP network categories\n start_address = ip_response['net']['startAddress']['$']\n end_address = ip_response['net']['endAddress']['$']\n handle = ip_response['net']['handle']['$']\n name = ip_response['net']['name']['$']\n try:\n org_name = ip_response['net']['orgRef']['@name']\n org_handle = ip_response['net']['orgRef']['@handle']\n except Exception as ex:\n print(f'Error with org_name/org_handle: {ex}')\n return False\n last_updated = ip_response['net']['updateDate']['$']\n rest_link = ip_response['net']['ref']['$']\n\n # Second GET request with organization name\n\n url = f'https://whois.arin.net/rest/org/{org_handle}.{response_format}'\n org_response = get_json_request(url=url)\n\n # Organization categories\n\n city = org_response['org']['city']['$']\n postal = org_response['org']['postalCode']['$']\n country = org_response['org']['iso3166-1']['code2']['$']\n org_last_updated = org_response['org']['updateDate']['$']\n org_rest_link = org_response['org']['ref']['$']\n\n # Try statements to catch commonly blank fields and differences in indexing on ARIN's side\n\n try:\n cidr = ip_response['net']['netBlocks']['netBlock']['cidrLength']['$']\n except TypeError:\n cidr = ip_response['net']['netBlocks']['netBlock'][0]['cidrLength']['$']\n\n try:\n net_type = ip_response['net']['netBlocks']['netBlock']['description']['$']\n except TypeError:\n net_type = ip_response['net']['netBlocks']['netBlock'][0]['description']['$']\n\n try:\n parent_name = ip_response['net']['parentNetRef']['@name']\n parent_handle = ip_response['net']['parentNetRef']['@handle']\n except KeyError:\n parent_name = ''\n parent_handle = ''\n\n try:\n origin_as = ip_response['net']['originASes']['originAS'][0]['$']\n except KeyError:\n origin_as = ''\n\n try:\n reg_date = ip_response['net']['registrationDate']['$']\n except KeyError:\n reg_date = ''\n\n try:\n org_reg_date = org_response['org']['registrationDate']['$']\n except KeyError:\n org_reg_date = ''\n\n try:\n state = org_response['org']['iso3166-2']['$']\n except KeyError:\n state = ''\n\n try:\n street = org_response['org']['streetAddress']['line']['$']\n except TypeError:\n street = org_response['org']['streetAddress']['line'][0]['$']\n\n # Output to terminal\n print('Network')\n print(f'NetRange: {start_address} - {end_address}')\n print(f'CIDR: {start_address}/{cidr}')\n print(f'Name: {name}')\n print(f'Handle: {handle}')\n print(f'Parent: {parent_name} ({parent_handle})')\n print(f'NetType: {net_type}')\n print(f'OriginAS: {origin_as}')\n print(f'Organization: {org_name} ({org_handle})')\n print(f'RegistrationDate: {reg_date}')\n print(f'LastUpdated: {last_updated}')\n print(f'RESTful Link: {rest_link}\\n')\n print('Organization')\n print(f'Name: {org_name}')\n print(f'Handle: {org_handle}')\n print(f'Street: {street}')\n print(f'City: {city}')\n print(f'State/Province: {state}')\n print(f'PostalCode: {postal}')\n print(f'Country: {country}')\n print(f'RegistrationDate: {org_reg_date}')\n print(f'LastUpdated: {org_last_updated}')\n print(f'RESTful Link: {org_rest_link}')\n return True\n\n\ndef print_all_visitors_info():\n visitors = get_visitors_info()\n errors = []\n for visitor in visitors:\n if not print_arin_info(mdb_row=visitor):\n errors.append(visitor)\n if errors:\n print(\"-\" * 100)\n print(f\"ARIN couldn't find information for the followings IPs:\")\n [print(f\" {index + 1:02d}-\", error) for index, error in enumerate(errors)]\n\n\nif __name__ == '__main__':\n get_visitors_info()\n print_all_visitors_info()\n","repo_name":"nono-london/hp_class_action","sub_path":"hp_class_action/hp_website/visitors_ip/arin_whois.py","file_name":"arin_whois.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"28486287076","text":"#!/usr/bin/env python\n\nimport rospy, math\nfrom numpy import interp\nfrom std_msgs.msg import Float32, Float64MultiArray\nfrom geometry_msgs.msg import Twist, TwistStamped, Pose, Point, Quaternion, TransformStamped\nfrom nav_msgs.msg import Odometry\nfrom tf2_ros import TransformBroadcaster\nimport tf\n\n\n\nclass Odometry_Calculator:\n def __init__(self):\n ## tbd \n rospy.init_node(\"odometry_calculator\", anonymous=True)\n self.throttle = 0.0\n self.steering = 0.0\n self.pose = Pose()\n self.angular_vel = 0.0\n self.last_timestamp = rospy.Time.now()\n self.odom_pub = rospy.Publisher(\"odom\", Odometry, queue_size=10)\n self.odom_trans_broadcaster = TransformBroadcaster()\n\n def calculate_odometry(self):\n # distance traveled in x per second = 208 * cmd_vel.linear.x - 43.64\n # Auto bewegt sich nicht bei cmd_vel.linear.x Werten unter 0.2 -> zurueckgelegte Strecke 0\n duration = (rospy.Time.now() - self.last_timestamp).to_sec()\n print(\"Duration between calculation: {0}\".format(duration))\n self.last_timestamp = rospy.Time.now()\n\n explicit_quat = [\n self.pose.orientation.x,\n self.pose.orientation.y,\n self.pose.orientation.z,\n self.pose.orientation.w\n ]\n\n print(\"Position: ( {0} | {1} | {2} ) \\n Orientation: ( {3} | {4} | {5} | {6}\".format(\n self.pose.position.x, \n self.pose.position.y,\n self.pose.position.z,\n self.pose.orientation.x,\n self.pose.orientation.y,\n self.pose.orientation.z,\n self.pose.orientation.w\n ))\n\n #explicit_quat = [0.0, 0.0, 0.0, 0.0]\n\n roll, pitch, yaw = tf.transformations.euler_from_quaternion(explicit_quat)\n print(\"Yaw: {0}\".format(yaw))\n x_dot = 0\n y_dot = 0\n\n if self.throttle == 0:\n x_dot = 0\n y_dot = 0\n else: \n x_dot = (208 * self.throttle - 43) * math.cos(yaw) # euler[2] == yaw\n y_dot = (208 * self.throttle -43) * math.sin(yaw)\n\n self.pose.position.x += x_dot * duration\n self.pose.position.y += y_dot * duration\n\n yaw += self.angular_vel * duration # update yaw \n\n \n explicit_quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n\n #Reihenfolge von x y z w unklar im nparray was von quaternion_from_euler zurueckkomt -> pruefen\n self.pose.orientation.x = explicit_quat[0]\n self.pose.orientation.y = explicit_quat[1]\n self.pose.orientation.z = explicit_quat[2]\n self.pose.orientation.w = explicit_quat[3]\n\n odom = Odometry()\n odom.header.frame_id = \"/odom\"\n odom.header.stamp = rospy.Time.now()\n odom.pose = self.pose\n odom.twist.covariance[0] = 0.2 #x\n odom.twist.covariance[7] = 0.2 #y \n odom.twist.covariance[35] = 0.4 #yaw\n odom.twist.twist.linear.x = self.throttle\n odom.twist.twist.linear.y = 0\n odom.twist.twist.angular.z = self.angular_vel\n\n\n # Odom Transform publishen\n\n odom_stamped = TransformStamped()\n odom_stamped.header.stamp = self.last_timestamp\n odom_stamped.header.frame_id = \"odom\"\n odom_stamped.child_frame_id = \"base_link\"\n odom_stamped.transform.translation.x = x_dot * duration\n odom_stamped.transform.translation.y = y_dot * duration\n self.odom_trans_broadcaster.sendTransform(odom_stamped)\n\n return odom\n\n def throttle_callback(self, throttle):\n self.throttle = throttle.data\n\n def steering_callback(self, steering):\n self.steering = steering.data\n\n def racecar_data_callback(self, racecar_data):\n self.throttle = racecar_data.data[0]\n self.steering = racecar_data.data[1]\n self.angular_vel = racecar_data.data[2]\n \n\n\n def odometry_node(self):\n # rospy.Subscriber(\"throttle\", Float32, self.throttle_callback, queue_size=1)\n # rospy.Subscriber(\"steering\", Float32, self.steering_callback, queue_size=1)\n rospy.Subscriber(\"racecar_data\", Float64MultiArray, self.racecar_data_callback, queue_size=1)\n print(\"Odometry started.\")\n while not rospy.is_shutdown():\n odom = self.calculate_odometry()\n self.odom_pub.publish(odom)\n\n rospy.sleep(0.1)\n\n #rospy.spin()\n \n # def convert_rotational_vel_to_steering_angle(self, v, omega):\n # if omega == 0:\n # return 0\n \n # radius = v / omega\n # print(\"Radius : {0}\".format(radius))\n # print(\"Steering Angle in Radian: {0}\".format(math.atan(self.wheelbase/radius)))\n # print(\"Steering Angle in Degree: {0}\".format(math.degrees(math.atan(self.wheelbase/radius))))\n # return math.atan(self.wheelbase/radius)\n\n\nif __name__ == '__main__':\n \n odom_calc = Odometry_Calculator()\n odom_calc.odometry_node()\n print(\"Shutting down\")\n\n","repo_name":"mounteverset/jetracer_ros","sub_path":"Software/odom_calculator/src/odometry_calculation.py","file_name":"odometry_calculation.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"75198659782","text":"import os\n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\nfrom tensorflow.keras.applications import VGG16\n\nfrom tensorflow.keras import models, layers, optimizers\n\nfrom tensorflow.keras import callbacks\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nlen(os.listdir('../input/dogs-vs-cats/train/train'))\ntrain_dir = \"../input/dogs-vs-cats/train/train\"\n\ntest_dir = \"../input/dogs-vs-cats/test1/test1\"\n\n\n\ntrain_images = os.listdir(train_dir)\n\ntrain_labels = []\n\nfor image in train_images:\n\n label = image.split('.')[0]\n\n train_labels.append(label)\n\ndf = pd.DataFrame({\n\n 'id': train_images,\n\n 'label': train_labels\n\n})\ntrain_set, val_set = train_test_split(df, test_size=0.2)\n\n\n\ntrain_gen = ImageDataGenerator(rescale=1./255,\n\n horizontal_flip=True,\n\n rotation_range=45,\n\n zoom_range=0.2,\n\n shear_range=0.2,\n\n height_shift_range=0.2,\n\n width_shift_range=0.2,\n\n fill_mode='nearest')\n\nval_gen = ImageDataGenerator(rescale=1./255)\n\n\n\ntrain_data = train_gen.flow_from_dataframe(\n\n train_set, \n\n train_dir, \n\n x_col='id',\n\n y_col='label',\n\n target_size=(150, 150),\n\n class_mode='binary',\n\n batch_size=100)\n\n\n\nval_data = val_gen.flow_from_dataframe(\n\n val_set, \n\n train_dir, \n\n x_col='id',\n\n y_col='label',\n\n target_size=(150, 150),\n\n class_mode='binary',\n\n batch_size=100)\nmodel = models.Sequential([layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),\n\n layers.Conv2D(16, (3, 3), activation='relu'),\n\n layers.BatchNormalization(),\n\n layers.MaxPooling2D(2, 2),\n\n \n\n layers.Conv2D(32, (3, 3), activation='relu'),\n\n layers.Conv2D(32, (3, 3), activation='relu'),\n\n layers.BatchNormalization(),\n\n layers.MaxPooling2D(2, 2),\n\n \n\n layers.Dropout(0.2),\n\n \n\n layers.Conv2D(64, (3, 3), activation='relu'),\n\n layers.Conv2D(64, (3, 3), activation='relu'),\n\n layers.BatchNormalization(),\n\n layers.MaxPooling2D(2, 2),\n\n \n\n layers.Flatten(),\n\n layers.Dense(256, activation='relu'),\n\n layers.Dense(1, activation='sigmoid')])\nmodel.summary()\nmodel.compile(loss='binary_crossentropy',\n\n optimizer=optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),\n\n metrics=['accuracy'])\nhistory = model.fit_generator(train_data,\n\n steps_per_epoch=200,\n\n epochs=10,\n\n validation_data=val_data,\n\n validation_steps=50)\nmodel.save('model.h5')\ndef plot_history(history):\n\n acc = history.history['accuracy']\n\n val_acc = history.history['val_accuracy']\n\n loss = history.history['loss']\n\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(acc)+1)\n\n \n\n plt.plot(epochs, acc, 'b--', label='acc')\n\n plt.plot(epochs, val_acc, 'r--', label='val_acc')\n\n plt.xlabel('epochs')\n\n plt.ylabel('accuracy')\n\n plt.legend()\n\n plt.grid()\n\n \n\n plt.figure()\n\n plt.plot(epochs, loss, 'b--', label='loss')\n\n plt.plot(epochs, val_loss, 'r--', label='val_loss')\n\n plt.xlabel('epochs')\n\n plt.ylabel('accuracy')\n\n plt.legend()\n\n plt.grid()\ntest_images = os.listdir(test_dir)\n\nsubmission = pd.DataFrame({\n\n 'id': test_images\n\n})\n\n\n\n\n\ntest_gen = ImageDataGenerator(rescale=1./255)\n\n\n\ntest_data = test_gen.flow_from_dataframe(\n\n submission, \n\n test_dir, \n\n x_col='id',\n\n y_col=None,\n\n class_mode=None,\n\n target_size=(150, 150),\n\n batch_size=100,\n\n shuffle=False\n\n)\n\n\n\npredictions = model.predict_generator(test_data, steps=125)\npredictions = [1 if pred > 0.5 else 0 for pred in predictions]\n\n\n\nsubmission['label'] = predictions\n\n\n\nlabel_maps = dict((i, j) for j, i in train_data.class_indices.items())\n\nsubmission['label'] = submission['label'].replace(label_maps)\n\n\n\n\n\nsubmission['label'] = submission['label'].replace({ 'dog': 1, 'cat': 0 })\n\n\n\nsubmission.to_csv('submission.csv', index=False)\n\n\n\nsubmission.head()","repo_name":"aorursy/new-nb-4","sub_path":"kanametov_cats-vs-dogs.py","file_name":"kanametov_cats-vs-dogs.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"16362693704","text":"import json\n\nimport yaml\nfrom tqdm.auto import tqdm\n\nfrom utils.captioner import SceneDescripter\nfrom utils.splitter import SceneSplitter\nfrom utils.synter import TextToSpeech\nfrom pathlib import Path\n\n\nclass PipelineVideoPrepare:\n def __init__(self):\n with open(\"./configs/pipeline.cfg.yml\") as config:\n params = yaml.safe_load(config)\n\n self.splitter = SceneSplitter(\n params[\"splitter\"][\"threshold_splitter\"],\n params[\"splitter\"][\"frame_skip\"],\n )\n\n self.descriptor = SceneDescripter(\n ckpt_scene=params[\"descriptor_scene\"][\"model\"],\n device_scene=params[\"descriptor_scene\"][\"device\"],\n ckpt_background=params[\"descriptor_background\"][\"model\"],\n device_background=params[\"descriptor_background\"][\"device\"],\n model_trans_name=params[\"translation\"][\"model\"],\n device_trans=params[\"translation\"][\"device\"],\n background_threshold=params[\"descriptor_background\"][\"threshold\"], scene_threshold=params[\"descriptor_scene\"][\"threshold\"],\n window_len=params[\"descriptor_scene\"][\"window_len\"]\n )\n\n self.txt2speech = TextToSpeech()\n\n def run(self, path_to_video: Path):\n self.scenes_frames = self.splitter.get_scenes_frames(str(path_to_video))\n\n self.scenes_backbones = {\n timestamp: self.descriptor.detect_backbones(scene_frame)\n for timestamp, scene_frame in tqdm(self.scenes_frames.items())\n }\n\n self.scenes_descriptions = {\n timestamp: self.descriptor.get_scene_description(**scene_backbone) \n for timestamp, scene_backbone in tqdm(self.scenes_backbones.items())\n }\n self.descriptor.back_prev = None\n self.descriptor.scenes_window = []\n \n self.scenes_speeches = {\n timestamp: {'sound': self.txt2speech.save_audio(\n self.txt2speech.create_audio(scene_desc['text']),\n name=(path_to_video.parent / f\"{i}.mp3\")\n ),\n 'state_pause': scene_desc['state_pause'],\n 'state_using_frame': scene_desc['state_using_frame']}\n for i, (timestamp, scene_desc) in enumerate(\n tqdm(self.scenes_descriptions.items())\n )\n }\n\n with open(path_to_video.parent / \"timecodes.json\", \"w\") as f:\n output_dict = {\n \"timecodes\": [\n {\n \"time\": time,\n \"sound\": data[\"sound\"][\"name\"],\n \"duration\": data[\"sound\"][\"duration\"],\n \"state_pause\": data[\"state_pause\"],\n \"state_using_frame\": data[\"state_using_frame\"]\n }\n for time, data in self.scenes_speeches.items()\n ]\n }\n json.dump(output_dict, f)\n\n with open(path_to_video.parent / \"timecodes_text.json\", \"w\", encoding=\"utf-8\") as f:\n output_dict = {\n \"timecodes\": [\n {\n \"time\": time,\n \"text\": data['text'],\n \"state_pause\": data['state_pause'],\n \"state_using_frame\": data['state_using_frame']\n }\n for time, data in self.scenes_descriptions.items()\n ]\n }\n print(output_dict)\n json.dump(output_dict, f)\n","repo_name":"m-danya/aitech-audio-accompaniment","sub_path":"neuro-container/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"8"} +{"seq_id":"38277712230","text":"\"\"\"Scene object that store all level's obstacles.\"\"\"\n\nfrom pygame.math import Vector2\nfrom pygame.rect import Rect\nimport pygame\nfrom pygame import gfxdraw\nfrom . import Game\nfrom .GameObject import PlayerTank\nfrom .Enemy import EnemyTank\nfrom .Spawner import Spawner\n\n\nclass Scene():\n \"\"\"Scene.\n\n :var pygame.Rect bbox: scene bounding box\n :var int cellSize: size of single cell, for both X and Y axis\n :var list[pygame.Rect] bricks: list with brick blocks\n :var pygame.Surface surface: texture for scene rendering\n \"\"\"\n\n def __init__(self, sceneName):\n \"\"\"Load scene from file.\n\n :param str sceneName: file name with scene\n \"\"\"\n w = -1\n h = 0\n self.bricks = []\n self.cellSize = 64\n with open(sceneName) as sceneFile:\n for line in sceneFile:\n line = line.rstrip()\n if w == -1:\n w = len(line)\n else:\n assert(w == len(line))\n for i, cell in enumerate(line):\n self._processCell(cell, Vector2(i, h))\n h += 1\n self.bbox = Rect(Vector2(0, 0), Vector2(w, h) * self.cellSize)\n try:\n self.bricksImage = pygame.image.load(\"res/bricks.png\")\n except FileNotFoundError:\n self.bricksImage = pygame.image.load(\"../res/bricks.png\")\n halfSize = self.cellSize // 2\n self.bricksImage = pygame.transform.scale(self.bricksImage, (halfSize, halfSize))\n self.surface = pygame.Surface(self.bbox.size)\n print(\"Use {}x{} texture for intermediate rendering\".format(\n self.surface.get_width(), self.surface.get_height()))\n\n def _processCell(self, cell, pos):\n \"\"\"Add cell to scene.\n\n :param str cell: type of cell\n :param pygame.math.Vector2 pos: cell position in scene\n \"\"\"\n if cell == 'b':\n self.bricks.append(Rect(pos * self.cellSize, Vector2(1, 1) * self.cellSize))\n elif cell == '.':\n pass # empty cell\n elif cell == 'p':\n Game.all_objects.append(Spawner(PlayerTank, pos=(pos * self.cellSize), size=self.cellSize))\n elif cell == 'e':\n Game.all_objects.append(Spawner(EnemyTank, numSpawns=10,\n pos=(pos * self.cellSize), size=self.cellSize))\n else:\n assert False, f\"Unknown cell type \\\"{cell}\\\"\"\n\n def testCollision(self, rect):\n \"\"\"Test collision between rect and scene.\n\n :param pygame.Rect rect: rectangle to be tested\n \"\"\"\n fittedRect = rect.clamp(self.bbox)\n return fittedRect.topleft != rect.topleft or rect.collidelist(self.bricks) != -1\n\n def render(self):\n \"\"\"Draw scene into its texture.\"\"\"\n backgroundColor = (0, 0, 0)\n self.surface.fill(backgroundColor)\n\n for brick in self.bricks:\n points = [brick.topleft, brick.topright, brick.bottomright, brick.bottomleft]\n gfxdraw.textured_polygon(self.surface, points, self.bricksImage, 2, -1)\n\n def damage(self, rect, direction):\n \"\"\"Damage scene obstacles.\n\n :param pygame.Rect rect: rectangle that dealing damage\n :param pygame.math.Vector2 direction: rectangle's movement direction\n \"\"\"\n hits = rect.collidelistall(self.bricks)\n halfSize = self.cellSize // 2\n forRemove = []\n for idx in hits:\n b = self.bricks[idx]\n if b.w == self.cellSize:\n forRemove.append(idx)\n self.bricks.append(Rect((b.x, b.y), (halfSize, halfSize)))\n self.bricks.append(Rect((b.x + halfSize, b.y), (halfSize, halfSize)))\n self.bricks.append(Rect((b.x, b.y + halfSize), (halfSize, halfSize)))\n self.bricks.append(Rect((b.x + halfSize, b.y + halfSize), (halfSize, halfSize)))\n for idx in rect.collidelistall(self.bricks[-4:]):\n idx = -4 + idx\n b = self.bricks[idx]\n self.bricks[idx] = b.clip(Rect(Vector2(b.topleft) + direction * halfSize // 2, b.size))\n else:\n self.bricks[idx] = b.clip(Rect(Vector2(b.topleft) + direction * halfSize // 2, b.size))\n if self.bricks[idx].size == (0, 0):\n forRemove.append(idx)\n for idx in forRemove[-1::-1]:\n self.bricks.pop(idx)\n","repo_name":"myCloudStrife/DendyTanks","sub_path":"DendyTanks/Scene.py","file_name":"Scene.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"73404458822","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.todo_page, name='todo'), \n path('tasks/', views.items_page, name='tasks'), \n path('delete_todo_list/', views.delete_todo_list, name='delete_todo_list'), \n path('task_complete/', views.task_complete, name='task_complete'), \n path('task_not_complete/', views.task_not_complete, name='task_not_complete'), \n path('task_delete/', views.task_delete, name='task_delete'), \n]\n","repo_name":"s-shifat/home-finance","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70460197062","text":"# -*- coding: UTF-8 -*-\nfrom django.conf.urls import patterns, url\nfrom app import views\n\nurlpatterns = patterns('',\n url(r'^$', views.home, name=\"home\"),\n url(r'^index/', views.index, name=\"index\"),\n url(r'^edit/', views.edit),\n url(r'^tracker/', views.tracker),\n #url(r'^members/', views.members),\n #url(r'^send/', views.message),\n #url(r'^buddy_match/', views.buddy_match),\n #url(r'^send_match/', views.message_match),\n url(r'^workout_plan/', views.workout_plan),\n url(r'^workout/days/(?P
\\w+)/$', views.workout_day),\n url(r'^delete_exercise/', views.delete_exercise),\n url(r'^plan_manage/', views.plan_manage),\n url(r'^permission_denied/', views.permission_denied),\n url(r'^delete_plan_msg/', views.delete_plan_msg),\n url(r'^delete_athlete_msg/', views.delete_athlete_msg),\n url(r'^screenings/(?P\\w+)/$', views.screenings),\n url(r'^screenings/$', views.screenings),\n url(r'^create_screening/(?P\\w+)/$', views.create_screening),\n url(r'^delete_screening/', views.delete_screening),\n url(r'^alunos/$', views.AlunoList.as_view()),\n url(r'^alunos_list/$', views.alunos_list_json),\n url(r'^add_aluno/$', views.AddAluno.as_view()),\n url(r'^manage_workout/(?P\\w+)/$', views.ManageWorkout.as_view()),\n url(r'^manage_personal/', views.ManagePersonal.as_view()),\n url(r'^subscribe/', views.SubscribeView.as_view(), name='subscribe'),\n)\n","repo_name":"brunoliveira8/fibrando","sub_path":"project/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"32253661578","text":"import imaplib\nimport email\nfrom email.header import decode_header\nimport os\n# import gspread\nimport pygsheets\n\n# from oauth2client.service_account import ServiceAccountCredentials\n\n# Configura tus credenciales de Gmail\nemail_address = \"camorcillos@gmail.com\"\npassword = \"jayb ilui iwct kaew\"\n\n# Conéctate al servidor IMAP de Gmail\nmail = imaplib.IMAP4_SSL(\"imap.gmail.com\")\nmail.login(email_address, password)\n\n# Selecciona la bandeja de entrada\nmail.select(\"inbox\")\n\n# Define el criterio de búsqueda (en este caso, buscar correos con un texto específico en el cuerpo)\nsearch_criteria = '(BODY \"#18228\")'\n\nsave_directory = \"./files\"\n\nif not os.path.exists(save_directory):\n os.makedirs(save_directory)\n\n# Realiza la búsqueda y obtén el ID del correo que contiene el adjunto\nstatus, email_ids = mail.search(None, search_criteria)\n\nif status == \"OK\":\n email_ids = email_ids[0].split() # Convierte los IDs de correo en una lista de números\n if len(email_ids) > 0:\n # Obtén el primer correo que cumple con el criterio de búsqueda\n email_id = email_ids[0]\n\n # Descarga el correo completo\n status, msg_data = mail.fetch(email_id, \"(RFC822)\")\n\n if status == \"OK\":\n raw_email = msg_data[0][1]\n msg = email.message_from_bytes(raw_email)\n\n # Recorre las partes del correo para buscar adjuntos\n for part in msg.walk():\n if part.get_content_maintype() == \"multipart\":\n continue\n if part.get(\"Content-Disposition\") is None:\n continue\n\n # Si es un adjunto, descárgalo\n filename = part.get_filename()\n if filename:\n # Decodifica el nombre del archivo si es necesario\n filename = email.header.decode_header(filename)[0][0]\n if filename:\n # Construye la ruta completa al archivo en la carpeta \"files\"\n file_path = os.path.join(save_directory, filename)\n \n # Extrae el número del nombre del archivo descargado\n numero_de_orden = filename.split(\"TicketOrder\")[1].split(\".\")[0]\n\n print(f\"Número de Orden del archivo descargado: {numero_de_orden}\")\n\n # Guarda el adjunto en la carpeta \"files\"\n with open(file_path, \"wb\") as f:\n f.write(part.get_payload(decode=True))\nelse:\n print(\"Error al buscar correos.\")\n\n# Cierra la conexión\nmail.logout()\n\n# # Obtiene el ID de la hoja de cálculo\n# spreadsheet_url = \"https://docs.google.com/spreadsheets/d/189UzdG5vAlmprVxlSmewp-T7vfPfUPl6vIyWf4SR5xk/edit?usp=sharing\"\n\n# # spreadsheet = gspread.open_by_url(spreadsheet_id)\n# # Abre la hoja de cálculo\n# # spreadsheet = gspread.open_by_url(spreadsheet_id)\n\n# # # Obtiene la primera hoja de cálculo\n# # worksheet = spreadsheet.get_worksheet(0)\n\n# # # Escribe en la celda A1\n# # worksheet.update_cell(2, 2, numero_de_orden)\n\n# # # Guarda los cambios\n# # worksheet.save()\n\n# gc = pygsheets.authorize()\n# # spreadsheet = gc.open_by_url(spreadsheet_url)\n# sh = gc.open(spreadsheet_url)\n# # Selecciona la primera hoja de cálculo\n# worksheet = sh.sheet1\n\n# # Escribe en la celda B2\n# worksheet.update_value(\"B2\", numero_de_orden)\n","repo_name":"camorcillos/parque-del-cafe","sub_path":"read_email.py","file_name":"read_email.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"23835217898","text":"import datetime\nimport numpy as np\nimport pickle\nfrom scipy import interpolate\nimport scipy.signal\n\nfrom collections import Counter\n\nimport sys\nsys.path.append('/home/larda3/larda/')\nimport pyLARDA.helpers as h\n\n# flags for cloud particles\ncloud_particles=[1,3,4,5,6,7]\nliquid=[1,3,5,7]\ndroplets_only=[1,5]\nice=[4,5,6]\nice_only=4\nmelting=[6,7]\ndrizzle=[2,3]\n\nclass cloud_feature :\n \n def __init__(self):\n \n self.time=-1\n \n self.top=-1\n self.base=-1\n \n self.classifications=[]\n self.hasl=[]\n self.temperature=[]\n self.has_melting=False\n self.has_drizzle=False\n self.type=\"none\"\n self.valid=False\n \n self.measurements={}\n self.measurements[\"IWC\"]=[]\n self.measurements[\"LWC\"]=[]\n self.measurements[\"Z\"]=[]\n self.measurements[\"LDR\"]=[]\n self.measurements[\"LDR_min\"]=[]\n self.measurements[\"v\"]=[]\n self.measurements[\"width\"]=[]\n self.measurements[\"T\"]=[]\n self.measurements[\"p\"]=[]\n self.measurements[\"SNR\"]=[]\n self.measurements[\"alpha_hogan\"]=[]\n self.measurements[\"beta\"]=[]\n self.measurements[\"voldepol\"]=[]\n #self.measurements[\"tfv\"]=[]\n #self.measurements[\"vair\"]=[]\n\n self.liquid_layers=0\n self.liquid_layer_base=[]\n self.liquid_layer_top=[]\n \n self.precipitation_top=-1\n self.precipitation_base=-1\n \n self.cloud_system=-1\n \n def show(self):\n \n print(\"Top height:\", self.top, \"- Base height:\", self.base, \"- Type:\", self.type, \"- Time:\", self.time)\n\n\ndef get_only_valid(data):\n return data['var'][~data['mask']]\n\ndef fill_with(data, fill):\n \"\"\"fill an array where mask is true with fill value\"\"\"\n filled = data['var'].copy()\n filled[data['mask']] = fill\n return filled\n\ndef flatten(xs):\n \"\"\"flatten inhomogeneous deep lists\n e.g. ``[[1,2,3],4,5,[6,[7,8],9],10]``\n \"\"\"\n result = []\n if isinstance(xs, (list, tuple)):\n for x in xs:\n result.extend(flatten(x))\n else:\n result.append(xs)\n return result\n\n\ndef autocorr(x):\n result = np.correlate(x, x, mode='full')\n return result[result.size // 2:]\n\n\n\ndef time_analysis_from_vel(locations_of_vel, idx):\n \"\"\"get the autocorrelation and the periodogram from the dl observations\n\n Args:\n locations_of_vel: list of tupels (ts, rg, vel)\n\n Returns:\n (f, Pxx_den), (time_shifts, v_autocorr)\n \"\"\"\n\n locations_of_vel_s = sorted(locations_of_vel, key=lambda k: k[0])\n\n sep_time = np.array([e[0] for e in locations_of_vel_s])\n #sep_range = [e[1] for e in locations_of_vel_s]\n vel_line = np.array([e[idx] for e in locations_of_vel_s])\n\n #print(vel_line)\n\n delta_t = np.median(sep_time[1:] - sep_time[:-1])\n print('delta t', delta_t, vel_line[:10])\n\n if len(vel_line) > 0:\n f, Pxx_den = scipy.signal.welch(vel_line[vel_line != None],\n fs=1/delta_t)\n\n v_autocorr = autocorr(vel_line[vel_line != None])\n v_autocorr = v_autocorr/float(v_autocorr.max())\n time_shifts = np.arange(v_autocorr.shape[0])*delta_t\n else:\n f, Pxx_den = np.array([]), np.array([])\n time_shifts, v_autocorr = np.array([]), np.array([])\n\n return (f, Pxx_den), (time_shifts[:500], v_autocorr[:500])\n\n\nclass cloud():\n \n def __init__(self):\n \n self.cloud_type=\"none\"\n \n self.begin_time=-1\n self.end_time=-1\n \n #self.top = -1\n #self.base = -1\n self.top_range = -1\n self.base_range = -1\n \n self.times=[]\n self.tops=[]\n self.bases=[]\n self.types=[]\n \n self.features=[]\n\n self.n_valid=0\n self.n_invalid=0\n \n def update_geometry(self):\n \n begin_t,base_h,duration_t,extent_h=self.geometry()\n self.begin_time=begin_t\n self.end_time=begin_t+duration_t\n self.base=base_h\n self.top=base_h+extent_h\n \n def append_feature(self,feature):\n \n self.features.append(feature)\n \n self.times.append(feature.time)\n self.tops.append(feature.top_range)\n self.bases.append(feature.base_range)\n self.types.append(feature.type)\n \n self.update_geometry()\n\n def validate_features(self):\n\n sep_height=[]\n\n for f in self.features:\n\n ll_base=-1\n if len(f.liquid_layer_base)>0:\n ll_base=f.liquid_layer_base[0]\n elif f.type==\"pure liquid\":\n ll_base=f.base\n sep_height.append(ll_base)\n \n med_sep_height=np.median(sep_height)\n\n print('validate list of liquid layer bases', med_sep_height, sep_height)\n \n avg_th, med_th, std_th, _ = self.cloud_top_thickness()\n\n valid,invalid=0,0\n\n for f in self.features:\n\n ll_base=-1\n if len(f.liquid_layer_base)>0:\n ll_base=f.liquid_layer_base[0]\n elif f.type==\"pure liquid\":\n ll_base=f.base\n\n print('time of feature ', h.ts_to_dt(f.time), ll_base)\n if np.abs(ll_base-med_sep_height)>150.0:\n f.valid=False\n invalid+=1\n else:\n f.valid=True\n valid+=1\n\n self.n_valid=valid\n self.n_invalid=invalid\n\n def n_profiles(self,types=[]):\n \n n=0\n if len(types)==0:\n n=len(self.features)\n else:\n for f in self.features:\n if f.type in types:\n n+=1\n\n return n\n \n def geometry(self):\n \"\"\"return the geometrical boundaries of the cloud\"\"\" \n cloud_min_t = np.min(self.times)\n cloud_min_h = np.min(self.bases)\n cloud_width = self.time_length()\n cloud_height = np.max(self.tops)-np.min(self.bases)\n \n #print('current geometry ', datetime.datetime.utcfromtimestamp(cloud_min_t),\n # \" dur \", cloud_width, \" range \", cloud_min_h, cloud_height)\n return cloud_min_t,cloud_min_h,cloud_width,cloud_height\n \n def most_common_type(self):\n types_count = Counter(self.types)\n #s_types=np.sort(self.types\n #return s_types[len(s_types)/2]\n return types_count.most_common(1)[0][0]\n \n def top_variation(self):\n \"\"\"get the std from tops\"\"\"\n\n print(self.tops)\n \n return np.std(self.tops)\n\n def top_diffsum(self):\n\n return np.average(np.abs(np.diff(self.tops)))\n\n def length(self):\n \n return (max(self.times) - min(self.times)) * 10.0\n \n def time_length(self):\n \n return max(self.times) - min(self.times)\n \n def fill_factor(self):\n \n return self.n_profiles()*30.0/self.time_length()\n\n def return_values(self,name,particle_types=[]):\n\n values=[]\n for f in self.features:\n #if f.valid==False:\n # continue\n if particle_types:\n cc_mask = np.isin(f.classifications, particle_types)\n else:\n cc_mask = np.full(f.classifications, True)\n var = f.measurements[name]['var'][cc_mask]\n mask = f.measurements[name]['mask'][cc_mask]\n values += var[~mask].tolist()\n\n values = np.array(values).ravel().astype(np.float64)\n assert np.all(np.isfinite(values)), values\n\n return values\n\n def return_values_separation(self,name,spacing):\n\n print(\"separation average\", name, spacing)\n if not(name in self.features[0].measurements.keys()):\n print('name not in features')\n return np.array([])\n\n values=[]\n print('no_features', len(self.features))\n for f in self.features:\n\n\n #if f.valid==False:\n # continue\n #print('precip top',f.precipitation_top, f.precipitation_top-spacing)\n #print('cc_profile', f.classifications )\n\n if f.precipitation_top!=-1 and f.precipitation_top-spacing>0:\n mask = f.measurements[name]['mask'][f.precipitation_top-spacing]\n values += f.measurements[name]['var'][f.precipitation_top-spacing][~mask].tolist()\n\n values=np.array(values).ravel()\n return values\n\n\n def combined_average(self,name,ref_name,particle_types=[]):\n\n if not(name in self.features[0].measurements.keys()) or not(ref_name in self.features[0].measurements.keys()):\n return 0,0,0,0\n\n values=[0]\n for f in self.features:\n\n #if f.valid==False:\n # continue\n if not(ref_name in f.measurements.keys()) or len(f.measurements[name])!=len(f.measurements[ref_name]):\n continue\n\n for v in range(len(f.measurements[name])):\n\n\n if f.measurements[name][v]!=0 and f.measurements[ref_name][v]!=0:\n value=f.measurements[name][v]\n else:\n value=0 \n\n if len(particle_types)>0:\n if f.classifications[v] in particle_types:\n values.append(value)\n else:\n values.append(value)\n\n values=np.array(values)\n\n if len(values)>0 and ~np.all(values==0):\n avg=np.average(values[values!=0])\n med=np.median(values[values!=0])\n std=np.std(values[values!=0])\n n=len(values[values!=0])\n else:\n avg=0.0\n med=0.0\n std=0.0\n n=0\n\n return avg,med,std,n\n\n\n def average(self,name,particle_types=[]):\n\n values=[]\n print('assemble', name, h.ts_to_dt(self.features[0].time))\n for f in self.features:\n #if f.valid==False:\n # continue\n if particle_types:\n cc_mask = np.isin(f.classifications, particle_types)\n else:\n cc_mask = np.full(f.classifications, True)\n\n #print('classification ', f.classifications)\n #print('cc_mask ', cc_mask)\n if name in f.measurements.keys() and not type(f.measurements[name]) == list \\\n and any(cc_mask):\n var = f.measurements[name]['var'][cc_mask]\n mask = f.measurements[name]['mask'][cc_mask].astype(bool)\n #print('var', type(var), var.dtype, var)\n #print('mask', type(mask), mask.dtype, mask)\n if not np.all(mask) and np.all(~np.isnan(var)):\n values += var[~mask].tolist()\n\n #print('average', name, values[:10], values[-10:])\n values = np.array(values).ravel().astype(np.float64)\n assert np.all(np.isfinite(values)), values\n if len(values)>0:\n avg=np.average(values)\n med=np.median(values)\n std=np.std(values)\n n=len(values)\n else:\n avg=0.0\n med=0.0\n std=0.0\n n=0\n print('assembled ', name, values[:10], values[-10:], ' avg ', avg)\n \n return avg,med,std,n\n \n def pp90(self,name,particle_types=[]):\n\n values=[]\n for f in self.features:\n #if f.valid==False:\n # continue\n if particle_types:\n cc_mask = np.isin(f.classifications, particle_types)\n else:\n cc_mask = np.full(f.classifications, True)\n var = f.measurements[name]['var'][cc_mask]\n mask = f.measurements[name]['mask'][cc_mask]\n values += var[~mask].tolist()\n\n values = np.array(values).ravel().astype(np.float64)\n assert np.all(np.isfinite(values)), values\n\n if len(values)>1 and ~np.all(values==0):\n sorted_vals=np.sort(values[values!=0])\n ln=len(sorted_vals)\n\n return sorted_vals[int(0.1*ln)],sorted_vals[int(0.9*ln)]\n\n else:\n\n return 0,0\n\n \n def n_values(self,name,particle_types=[]):\n \n n_values=0\n for f in self.features:\n for v in range(len(f.measurements[name])):\n value=f.measurements[name][v]\n if len(particle_types)>0:\n if (f.classifications[v] in particle_types) and value!=0:\n n_values+=1\n else:\n if value!=0:\n n_values+=1\n \n return n_values\n\n def temperature_range(self):\n \"\"\"loop over features, take the base and top temperature\n and return min(base), med(top), max(top)\n \"\"\" \n top_temps=[]\n base_temps=[]\n\n for f in self.features:\n top_temps.append(f.measurements[\"T\"]['var'][-1])\n base_temps.append(f.measurements[\"T\"]['var'][0])\n \n return np.max(base_temps),np.median(top_temps),np.min(top_temps)\n\n def velocities_radar(self):\n\n v_top=[]\n v_top.append(0)\n\n width_top=[]\n width_top.append(0)\n\n Z_top=[]\n Z_top.append(0)\n\n for f in self.features:\n if f.valid==False:\n continue\n ll_base=-1\n if len(f.liquid_layer_base)>0:\n ll_base=f.liquid_layer_base[0]\n elif f.type==\"pure liquid\":\n ll_base=f.base\n\n if not f.measurements[\"Z\"][\"mask\"][-1]:\n Z_top.append(f.measurements[\"Z\"]['var'][-1])\n\n #for i in range(len(f.measurements[\"v\"])):\n vr=f.measurements[\"v\"]\n wt=f.measurements[\"width\"]\n rg_valid = vr[\"rg\"] >= ll_base\n v_top += flatten(fill_with(vr, 0)[rg_valid].tolist())\n width_top += flatten(fill_with(wt, 0)[rg_valid].tolist())\n\n v_top=np.array(v_top)\n v_top=v_top[v_top!=0]\n v_top_gt0=v_top[v_top>0.0]\n\n width_top=np.array(width_top)\n width_top=width_top[width_top!=0]\n\n Z_top=np.array(Z_top)\n Z_top=Z_top[Z_top!=0]\n\n if len(v_top)>0:\n v_mean=np.average(v_top)\n v_std=np.std(v_top)\n v_n=len(v_top)\n else:\n v_mean=0\n v_std=0\n v_n=0\n\n if len(v_top_gt0)>0:\n v_mean_gt0=np.average(v_top_gt0)\n v_std_gt0=np.std(v_top_gt0)\n v_n_gt0=len(v_top_gt0)\n else:\n v_mean_gt0=0\n v_std_gt0=0\n v_n_gt0=0\n\n if len(width_top)>0:\n width_avg=np.average(width_top)\n else:\n width_avg=0\n\n if len(Z_top)>1:\n z_top=np.median(Z_top)\n else:\n z_top=0\n\n return v_mean,v_std,v_n,v_mean_gt0,v_std_gt0,v_n_gt0,width_avg,v_top,z_top\n\n def velocities_liquid_radar(self, where):\n \"\"\"\n retrun the velocities of the radar at the specified position of the liquid layer\n ``top``, ``whole``, ``base``\n \"\"\"\n\n v_base=[]\n locations_of_vel = []\n\n for f in self.features:\n \n if f.valid==False or \"v\" not in f.measurements.keys() \\\n or len(f.measurements['v']['rg'].shape) == 0:\n print('no measurements in this feature, valid? ', f.valid)\n continue\n\n ll_idx = None\n if len(f.liquid_layer_base)>0:\n #print('liquid layer bases',f.liquid_layer_base)\n #select highest liquid layer\n if where == 'base':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.liquid_layer_base[-1])\n elif where == 'top-90':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.liquid_layer_top[-1])-3\n elif where == 'top':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.liquid_layer_top[-1])\n elif where == 'whole':\n i_base = h.argnearest(f.measurements['v']['rg'], f.liquid_layer_base[-1])\n i_top = h.argnearest(f.measurements['v']['rg'], f.liquid_layer_top[-1])\n ll_idx = slice(i_base, i_top+1)\n\n elif f.type==\"pure_liquid\":\n if where == 'base':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.base_range)\n elif where == 'top-90':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.top_range)-3\n elif where == 'top':\n ll_idx = h.argnearest(f.measurements['v']['rg'], f.top_range)\n elif where == 'whole':\n i_base = h.argnearest(f.measurements['v']['rg'], f.base_range)\n i_top = h.argnearest(f.measurements['v']['rg'], f.top_range)\n ll_idx = slice(i_base, i_top+1)\n\n #print('velocities', f.measurements.keys())\n #print('ll_idx', ll_idx, f.type)\n #print('found indices ', ll_idx, f.liquid_layer_base, f.liquid_layer_top)\n if ll_idx is not None and len(f.measurements['v']['rg'].shape) > 0:\n v = f.measurements[\"v\"]\n #v_lidar['mask'] = np.logical_or(v_lidar['mask'], a_lidar['var']0:\n v_mean=np.average(v_base)\n v_std=np.std(v_base)\n v_n=len(v_base)\n else:\n v_mean=0\n v_std=0\n v_n=0\n\n return v_mean,v_std,v_n,v_base,locations_of_vel\n\n def velocities_fixed_height(self, sample_height):\n \"\"\"read the doppler lidar velocities at a fixed height of maximum backscatter\n [Suggestion by referee2]\n \n \"\"\"\n\n a_thr=0\n #a_thr=8e4\n\n v_base=[]\n v_base.append(0)\n locations_of_vel = []\n\n for f in self.features:\n\n # 2020-10-12: try without the validity check\n # if f.valid==False or \"v_lidar\" not in f.measurements.keys():\n # print('no measurements in this feature, valid? ', f.valid)\n # continue\n\n \n if 'v_lidar' not in f.measurements:\n print('v_lidar missing at ', h.ts_to_dt(f.time))\n\n #print('here ', ll_base)\n \n if 'v_lidar' in f.measurements and sample_height >= 0 and len(f.measurements[\"v_lidar\"]) > 0:\n v_lidar = f.measurements[\"v_lidar\"]\n a_lidar = f.measurements[\"a_lidar\"]\n\n v_lidar['mask'] = np.logical_or(v_lidar['mask'], a_lidar['var'] 1 and v_lidar['var'].shape[0] > 1:\n mx_ind = h.argnearest(v_lidar['rg'], sample_height)\n else:\n mx_ind = 0\n #print(ll_base, mx_ind)\n #print(len(a_lidar['var']), a_lidar['var'].shape)\n \n for it in range(a_lidar['var'].shape[0]):\n #index of bsc max above liquid base\n idx = np.argmax(fill_with(a_lidar, -99)[it,mx_ind:])\n #print(it, idx, mx_ind+idx)\n if not v_lidar['mask'][it, mx_ind+idx]:\n v_base.append(v_lidar['var'][it, mx_ind+idx])\n #print('v', v_lidar['var'][it, mx_ind+idx])\n #print('a',a_lidar['var'][it, mx_ind+idx])\n # sometimes rg is only a float\n if isinstance(v_lidar['rg'], np.ndarray):\n rg = v_lidar['rg'][mx_ind+idx]\n else:\n rg = v_lidar['rg']\n locations_of_vel.append((v_lidar['ts'][it], rg, v_lidar['var'][it, mx_ind+idx], f.valid))\n #print(v_lidar['ts'][it], v_lidar['rg'][mx_ind+idx])\n #if v_lidar['var'].shape[1] > 1:\n \n #else:\n #locations_of_vel.append((v_lidar['ts'][it], v_lidar['rg']))\n\n \n v_base=np.array(v_base)\n v_base=v_base[v_base != 0]\n v_base=v_base[v_base != None]\n print('v_base', v_base)\n\n if len(v_base)>0:\n v_mean=np.mean(v_base)\n v_std=np.std(v_base)\n v_n=len(v_base)\n else:\n v_mean=0\n v_std=0\n v_n=0\n\n return v_mean,v_std,v_n,v_base,locations_of_vel\n\n\n\n def velocities(self):\n \"\"\"refactored\n reads out the doppler lidar velocities of each feature\n\n Returns:\n v_mean,v_std,v_n,v_base,locations_of_vel\n\n\n with locations_of_vel = v_lidar['ts'][it], v_lidar['rg'][mx_ind+idx], v_lidar['var'][it, mx_ind+idx], f.valid\n \n \"\"\"\n a_thr=0\n #a_thr=8e4\n\n v_base=[]\n v_base.append(0)\n locations_of_vel = []\n\n for f in self.features:\n\n # 2020-10-12: try without the validity check\n # if f.valid==False or \"v_lidar\" not in f.measurements.keys():\n # print('no measurements in this feature, valid? ', f.valid)\n # continue\n\n ll_base=-1\n if len(f.liquid_layer_base)>0:\n #select highest liquid layer\n ll_base=f.liquid_layer_base[-1]\n elif f.type==\"pure_liquid\":\n ll_base=f.base_range\n #print('velocities', f.measurements.keys())\n #print('ll_base', ll_base, f.type)\n \n if 'v_lidar' not in f.measurements:\n print('v_lidar missing at ', h.ts_to_dt(f.time))\n\n #print('here ', ll_base)\n \n if 'v_lidar' in f.measurements and ll_base >= 0 and len(f.measurements[\"v_lidar\"]) > 0:\n v_lidar = f.measurements[\"v_lidar\"]\n a_lidar = f.measurements[\"a_lidar\"]\n\n v_lidar['mask'] = np.logical_or(v_lidar['mask'], a_lidar['var'] 1 and v_lidar['var'].shape[0] > 1:\n mx_ind = h.argnearest(v_lidar['rg'], ll_base)\n else:\n mx_ind = 0\n #print(ll_base, mx_ind)\n #print(len(a_lidar['var']), a_lidar['var'].shape)\n \n for it in range(a_lidar['var'].shape[0]):\n #index of bsc max above liquid base\n idx = np.argmax(fill_with(a_lidar, -99)[it,mx_ind:])\n #print(it, idx, mx_ind+idx)\n if not v_lidar['mask'][it, mx_ind+idx]:\n v_base.append(v_lidar['var'][it, mx_ind+idx])\n #print('v', v_lidar['var'][it, mx_ind+idx])\n #print('a',a_lidar['var'][it, mx_ind+idx])\n # sometimes rg is only a float\n if isinstance(v_lidar['rg'], np.ndarray):\n rg = v_lidar['rg'][mx_ind+idx]\n else:\n rg = v_lidar['rg']\n locations_of_vel.append((v_lidar['ts'][it], rg, v_lidar['var'][it, mx_ind+idx], f.valid))\n #print(v_lidar['ts'][it], v_lidar['rg'][mx_ind+idx])\n #if v_lidar['var'].shape[1] > 1:\n \n #else:\n #locations_of_vel.append((v_lidar['ts'][it], v_lidar['rg']))\n\n \n v_base=np.array(v_base)\n v_base=v_base[v_base != 0]\n v_base=v_base[v_base != None]\n print('v_base', v_base)\n\n if len(v_base)>0:\n v_mean=np.mean(v_base)\n v_std=np.std(v_base)\n v_n=len(v_base)\n else:\n v_mean=0\n v_std=0\n v_n=0\n\n return v_mean,v_std,v_n,v_base,locations_of_vel\n\n def no_node_hist(self):\n \"\"\"histogram over number of nodes for full cloud\n\n Returns:\n histogram of the node numbers\n \"\"\"\n\n no_nodes=np.array([])\n\n for f in self.features:\n if 'pT_no' in f.measurements:\n var = f.measurements[\"pT_no\"]['var'].ravel()\n no_nodes = np.append(no_nodes, var, axis=0)\n \n hist, bins = np.histogram(no_nodes, bins=[0,1,3,5,7,9,11,13,15,17,19,21,23])\n\n return hist.tolist()\n\n def no_node_hist_above_cb(self):\n \"\"\"histogram over number of nodes for full cloud\n\n\n Returns:\n histogram of the node numbers\n \"\"\"\n\n no_nodes=np.array([])\n\n for f in self.features:\n print(f.liquid_layer_base)\n if 'pT_no' in f.measurements:\n rg = f.measurements[\"pT_no\"]['rg']\n print('pT range', rg)\n rg_lt_base = np.where(rg > f.liquid_layer_base[0])[0]\n i_base = rg_lt_base[0]\n print(i_base)\n #print(f.measurements[\"pT_no\"]['var'].shape)\n #print(f.measurements[\"pT_no\"]['var'][:,i_base:].shape)\n var = f.measurements[\"pT_no\"]['var'][:,i_base:].ravel()\n no_nodes = np.append(no_nodes, var, axis=0)\n \n hist, bins = np.histogram(no_nodes, bins=[0,1,3,5,7,9,11,13,15,17,19,21,23])\n\n return hist.tolist()\n\n\n def no_node_hist_ice_liq(self):\n \"\"\"histogram over number of nodes for full cloud\n\n\n Returns:\n histogram of the node numbers\n \"\"\"\n\n no_nodes=np.array([])\n\n for f in self.features:\n print(f.classifications)\n print(f.ranges)\n if 'pT_no' in f.measurements:\n rg = f.measurements[\"pT_no\"]['rg']\n print('pT range', rg)\n lowest_ice = np.where(np.isin(f.classifications, [1,4,5]))[0]\n print('lowest ice ', lowest_ice)\n if len(lowest_ice) > 0:\n rg_lowest = f.ranges[lowest_ice][0]\n rg_lt_base = np.where(rg > rg_lowest)[0]\n i_base = rg_lt_base[0]\n print(i_base)\n var = f.measurements[\"pT_no\"]['var'][:,i_base:].ravel()\n no_nodes = np.append(no_nodes, var, axis=0)\n \n hist, bins = np.histogram(no_nodes, bins=[0,1,3,5,7,9,11,13,15,17,19,21,23])\n return hist.tolist()\n\n\n def horizontal_wind(self, cth, h_range):\n\n N_features=len(self.features)\n f=self.features[N_features/2]\n\n wp_hasl=f.measurements[\"wp_hasl\"]\n wp_vel=f.measurements[\"wp_vel\"]\n\n cth_index = np.argmin(np.abs(wp_hasl-cth))\n \n top = cth_index+h_range\n bottom = cth_index-h_range\n\n if top>=len(wp_vel):\n top=len(wp_vel)-1\n\n if bottom<0:\n bottom=0\n\n #Advection profile around cloud top\n vel_profile=np.array(wp_vel[bottom:top])\n hasl_profile=np.array(wp_hasl[bottom:top])\n\n clean_index=(~np.isnan(vel_profile))*(vel_profile>0.0)*(vel_profile<100.0)\n \n vel_profile=vel_profile[clean_index]\n hasl_profile=hasl_profile[clean_index]\n\n if len(vel_profile)>0:\n \n std=np.std(vel_profile)\n avg=np.average(vel_profile)\n mx=np.max(vel_profile)\n mn=np.min(vel_profile)\n dvdh=np.average(np.diff(vel_profile)/np.diff(hasl_profile))\n\n else:\n \n std,avg,mx,mn,dvdh = 0,0,0,0,0\n\n return avg, std, mx, mn, dvdh\n\n \n def ilr(self, spacing):\n\n iwc_top=[]\n lwc_top=[]\n\n for f in self.features:\n if f.precipitation_top!=-1 and f.precipitation_top-spacing>0:\n iwc_top.append(f.measurements[\"IWC\"]['var'][f.precipitation_top-spacing])\n if np.sum(f.measurements[\"LWC\"]['var'])>0:\n #print('lwc', f.measurements[\"LWC\"]['var'], f.measurements[\"LWC\"]['mask'])\n lwc_top.append(np.average(f.measurements[\"LWC\"]['var'][f.measurements[\"LWC\"]['var']!=0]))\n else:\n lwc_top.append(0)\n\n iwc_top=np.array(iwc_top)\n lwc_top=np.array(lwc_top)\n\n\n if len(lwc_top!=0)>0:\n ilcr=iwc_top[lwc_top!=0]/lwc_top[lwc_top!=0]\n else:\n ilcr=[0]\n\n print('at ilr; iwc, lwc, ilcr average', \n np.average(iwc_top), np.average(lwc_top), \n np.average(ilcr))\n\n #n_ice=len(ilcr>0.9)/float(len(ilcr))\n #n_liq=len(ilcr<0.1)/float(len(ilcr))\n #n_mix=(len(ilcr)-n_ice-n_liq)/float(len(ilcr)\n\n\n bins = np.logspace(-5, 0, 20)\n if len(ilcr)>0:\n histogr=np.histogram(ilcr,bins=bins)[0]\n else:\n histogr=np.repeat(0,len(bins))\n\n return np.average(ilcr),np.median(ilcr),len(ilcr),list(histogr)\n\n def separation_location(self, spacing):\n\n times=[]\n ranges=[]\n for f in self.features:\n if f.precipitation_top!=-1 and f.precipitation_top-spacing>0:\n ranges.append(f.ranges[f.precipitation_top-spacing])\n times.append(f.time)\n\n return times, ranges\n\n\n def separation_average(self,name,spacing):\n\n print(\"separation average\", name, spacing)\n if not(name in self.features[0].measurements.keys()):\n print('name not in features')\n return 0,0,0,0\n\n values=[]\n print('no_features', len(self.features))\n for f in self.features:\n\n\n #if f.valid==False:\n # continue\n #print('precip top',f.precipitation_top, f.precipitation_top-spacing)\n #print('cc_profile', f.classifications )\n\n if f.precipitation_top!=-1 and f.precipitation_top-spacing>0:\n mask = f.measurements[name]['mask'][f.precipitation_top-spacing].astype(bool)\n #print(type(mask), mask.dtype, mask.data)\n values += f.measurements[name]['var'][f.precipitation_top-spacing][~mask].tolist()\n\n\n values=np.array(values).ravel()\n\n #print(values)\n if len(values)>0 and ~np.all(values==0):\n values_avg=np.average(values[values!=0])\n values_med=np.median(values[values!=0])\n values_std=np.std(values[values!=0])\n n=len(values[values!=0])\n else:\n values_avg=0\n values_med=0\n values_std=0\n n=0\n\n return values_avg,values_med,values_std,n\n\n\n def separation_values(self,name,spacing):\n\n print(\"separation values\", name, spacing)\n if not(name in self.features[0].measurements.keys()):\n print('name not in features')\n return 0,0,0,0\n\n values=[]\n print('no_features', len(self.features))\n for f in self.features:\n\n\n #if f.valid==False:\n # continue\n print('precip top',f.precipitation_top, f.precipitation_top-spacing)\n print('cc_profile', f.classifications )\n\n if f.precipitation_top!=-1 and f.precipitation_top-spacing>0:\n mask = f.measurements[name]['mask'][f.precipitation_top-spacing]\n values += f.measurements[name]['var'][f.precipitation_top-spacing][~mask].tolist()\n\n\n values=np.array(values).ravel()\n\n return values\n\n\n def liquid_layer_variation(self):\n\n cb=[]\n for f in self.features:\n if f.liquid_layer_base!=[]:\n cb.append(f.liquid_layer_base[0])\n else:\n cb.append(f.base)\n\n return np.std(cb)\n\n def cloud_top_thickness(self):\n \"\"\"thickness of the liquid layer\n \n if a liquid base is detected?\n\n ! change mr:\n omit non liquid thickness\n\n Returns:\n np.average(thickness), np.median(thickness), np.std(thickness), thickness_with_time\n\n\n where (f.time, f.top_range-f.liquid_layer_base[0], f.liquid_layer_base[0], f.top_range, flag)\n\n flag 0: feature.liquid_layer_base[0]\n \n flag 1: feature.base_range [omitted as base_range is frequently the base of the ice]\n \"\"\"\n\n thickness_with_time = []\n for f in self.features:\n\n print(f.time, f.top_range, f.liquid_layer_base, f.base_range)\n # 2020-10-12 f.base_range and liquid_layer_base are not equal???\n #\n\n if f.liquid_layer_base!=[]:\n thickness_with_time.append((f.time, f.top_range-f.liquid_layer_base[0], f.liquid_layer_base[0], f.top_range, 0))\n else:\n pass\n #\n #thickness_with_time.append((f.time, f.top_range-f.base_range, f.base_range, f.top_range, 1))\n\n print('cloud top thickness: no tops ', len(self.tops), ' no thickness ', len(thickness_with_time))\n thickness = [e[1] for e in thickness_with_time]\n return np.average(thickness), np.median(thickness), np.std(thickness), thickness_with_time\n\n\n def cloud_top_avg(self, frac=0.5):\n \"\"\"calculated the mean height of the mid of the liquid layer\n (based on cloud_top_thickness())\n \n Returns:\n np.average(thickness), np.median(thickness), np.std(thickness), thickness_with_time\n\n \"\"\"\n\n mid_with_time = []\n for f in self.features:\n\n print(f.time, f.top_range, f.liquid_layer_base, f.base_range)\n # 2020-10-12 f.base_range and liquid_layer_base are not equal???\n #\n\n if f.liquid_layer_base!=[]:\n mid = f.liquid_layer_base[0] + frac*(f.top_range-f.liquid_layer_base[0])\n mid_with_time.append((f.time, mid))\n\n mids = [e[1] for e in mid_with_time]\n return np.average(mids), mid_with_time\n\n\n\n def average_paths(self):\n \n lwp=[]\n lwp_s=[]\n iwp=[]\n\n #dh=self.features[0].ranges[1]-self.features[0].ranges[0]\n #dh = self.features[0].measurements[\"LWC\"]['rg'][1] - self.features[0].measurements[\"LWC\"]['rg'][0]\n dh = self.features[0].dh\n for f in self.features:\n lwp.append(np.sum(get_only_valid(f.measurements[\"LWC\"]))*dh)\n lwp_s.append(np.sum(get_only_valid(f.measurements[\"LWC_S\"]))*dh)\n iwp.append(np.sum(get_only_valid(f.measurements[\"IWC\"]))*dh)\n\n #ATTENTION!\n\n lwp=np.array(lwp)\n lwp_s=np.array(lwp_s)\n iwp=np.array(iwp)\n print('paths lwp, lwp_s', lwp, lwp_s)\n\n if lwp!=[] and ~np.all(lwp==0) and len(lwp[lwp_s!=0])>0:\n lwp_average=np.average(lwp[lwp_s!=0])\n else:\n lwp_average=0\n\n if lwp_s!=[] and ~np.all(lwp_s==0):\n lwp_s_average=np.average(lwp_s[lwp_s!=0])\n else:\n lwp_s_average=0\n\n if iwp!=[] and ~np.all(iwp==0):\n iwp_average=np.average(iwp[iwp!=0])\n iwp_std=np.std(iwp[iwp!=0])\n else:\n iwp_average=0\n iwp_std=0\n\n print('path averages ', lwp_average, lwp_s_average, iwp_average)\n\n return lwp_average, lwp_s_average, iwp_average, iwp_std\n\n def pressure_range(self):\n \"\"\"loop over features, take the base and top pressure\n and return max(base), min(top)\n \"\"\" \n top_pressure=[]\n base_pressure=[]\n\n for f in self.features:\n top_pressure.append(f.measurements[\"p\"]['var'][-1])\n base_pressure.append(f.measurements[\"p\"]['var'][0])\n\n return np.max(base_pressure),np.min(top_pressure)\n\n def print_values(self, name1, name2):\n for f in self.features:\n for v in range(len(f.measurements[name1])):\n if f.measurements[name1][v]!=0 and f.measurements[name2][v]!=0:\n print(f.measurements[name1][v], f.measurements[name2][v])\n\n def n_melting(self):\n\n n_melt=0\n\n for f in self.features:\n if f.has_melting:\n n_melt+=1\n\n return n_melt\n\n def n_drizzle(self):\n\n n_driz=0\n\n for f in self.features:\n if f.has_drizzle:\n n_driz+=1\n\n return n_driz\n\n def correct_LDR(self,snr_co=-20,ldr_limit=-33):\n # isn't that done during reading?\n pass\n #for f in self.features:\n\n # LDR_min=snr_co-f.measurements[\"SNR\"]\n # f.measurements[\"LDR\"][f.measurements[\"LDR\"] None:\n \"\"\"Switch setup for nefit easy.\"\"\"\n entities: list[NefitEntity] = []\n\n client = hass.data[DOMAIN][config_entry.entry_id][\"client\"]\n data = config_entry.data\n\n for description in SWITCHES:\n if description.key == \"hot_water\":\n entities.append(NefitHotWater(description, client, data))\n elif description.key == \"lockui\":\n entities.append(NefitSwitch(description, client, data, \"true\", \"false\"))\n elif description.key == \"weather_dependent\":\n entities.append(NefitSwitch(description, client, data, \"weather\", \"room\"))\n elif description.key == \"home_entrance_detection\":\n await setup_home_entrance_detection(\n entities,\n client,\n data,\n description.key,\n description.name,\n description.icon,\n )\n else:\n entities.append(NefitSwitch(description, client, data))\n\n async_add_entities(entities, True)\n\n\nasync def setup_home_entrance_detection(\n entities: list[NefitEntity],\n client: NefitEasy,\n data: MappingProxyType[str, Any],\n basekey: str,\n basename: str,\n baseicon: str,\n) -> None:\n \"\"\"Home entrance detection setup.\"\"\"\n for i in range(0, 10):\n endpoint = \"/ecus/rrc/homeentrancedetection\"\n name = await client.async_init_presence(endpoint, i)\n\n if name is not None:\n description = NefitSwitchEntityDescription(\n key=f\"presence{i}_detected\",\n name=basename.format(name),\n url=f\"{endpoint}/userprofile{i}/detected\",\n icon=baseicon,\n )\n entities.append(NefitSwitch(description, client, data))\n\n\nclass NefitSwitch(NefitEntity, SwitchEntity):\n \"\"\"Representation of a NefitSwitch entity.\"\"\"\n\n entity_description: NefitSwitchEntityDescription\n\n def __init__(\n self,\n entity_description: NefitSwitchEntityDescription,\n client: NefitEasy,\n data: MappingProxyType[str, Any],\n on_value: str = \"on\",\n off_value: str = \"off\",\n ) -> None:\n \"\"\"Init Nefit Switch.\"\"\"\n super().__init__(entity_description, client, data)\n\n self._on_value = on_value\n self._off_value = off_value\n\n @property\n def is_on(self) -> bool:\n \"\"\"Get whether the switch is in on state.\"\"\"\n return bool(\n self.coordinator.data.get(self.entity_description.key) == self._on_value\n )\n\n @property\n def assumed_state(self) -> bool:\n \"\"\"Return true if we do optimistic updates.\"\"\"\n return False\n\n async def async_turn_on(self, **kwargs: Any) -> None:\n \"\"\"Turn the entity on.\"\"\"\n self._client.nefit.put_value(self.get_endpoint(), self._on_value)\n\n self._client.nefit.get(self.get_endpoint())\n\n _LOGGER.debug(\n \"Switch Nefit %s to %s, endpoint=%s.\",\n self.entity_description.key,\n self._on_value,\n self.get_endpoint(),\n )\n\n async def async_turn_off(self, **kwargs: Any) -> None:\n \"\"\"Turn the entity off.\"\"\"\n self._client.nefit.put_value(self.get_endpoint(), self._off_value)\n\n self._client.nefit.get(self.get_endpoint())\n\n _LOGGER.debug(\n \"Switch Nefit %s to %s, endpoint=%s.\",\n self.entity_description.key,\n self._off_value,\n self.get_endpoint(),\n )\n\n\nclass NefitHotWater(NefitSwitch):\n \"\"\"Class for nefit hot water entity.\"\"\"\n\n def get_endpoint(self) -> str:\n \"\"\"Get end point.\"\"\"\n endpoint = (\n \"dhwOperationClockMode\"\n if self.coordinator.data.get(\"user_mode\") == \"clock\"\n else \"dhwOperationManualMode\"\n )\n return \"/dhwCircuits/dhwA/\" + endpoint\n","repo_name":"ksya/ha-nefiteasy","sub_path":"custom_components/nefiteasy/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"8"} +{"seq_id":"36117935515","text":"import sys\nfrom collections import Counter\n\nn, m = map(int, input().split())\n\nd_list = []\nfor _ in range(n):\n d = sys.stdin.readline().strip()\n d_list.append(d)\n\nb_list = []\nfor _ in range(m):\n b = sys.stdin.readline().strip()\n b_list.append(b)\n\ndb_list = d_list + b_list\n\ncounter = dict(Counter(db_list))\nresult = {key: value for key, value in counter.items() if value > 1}\n\nprint(len(result))\nfor value in sorted(result):\n print(value)\n","repo_name":"pppp-qqqq/algorithm","sub_path":"CLASS3/04_듣보잡/1764_sj.py","file_name":"1764_sj.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"12491990926","text":"from math import gcd\n\n\ndef find_e(phi):\n for i in range(2, phi):\n if gcd(i, phi) == 1:\n return i\n return None\n\n\ndef find_d(phi, e):\n for i in range(2, phi):\n if (i * e) % phi == 1:\n return i\n return None\n\n\ndef encrypt(message, public_key):\n n, e = public_key\n encrypted_message = message ** e % n\n return encrypted_message\n\n\ndef decrypt(encrypted_message, private_key):\n n, d = private_key\n decrypted_message = encrypted_message ** d % n\n return decrypted_message\n\n\n# Generate public and private keys\np = 17\nq = 23\n\nn = p * q\n\nphi = (p - 1) * (q - 1)\n\ne = find_e(phi)\nif e is None:\n print(\"No e found\")\n exit(1)\n\nd = find_d(phi, e)\nif d is None:\n print(\"No d found\")\n exit(1)\n\npublic_key = (n, e)\nprivate_key = (n, d)\n\nprint(\"Public key: \", public_key)\nprint(\"Private key: \", private_key)\n\n\n# User input\nuser_choice = input(\"Encrypt or decrypt? (e/d): \")\n\nif user_choice == \"e\":\n message = input(\"Enter message to encrypt: \")\n crypted_message = \"\"\n for char in message:\n crypted_chars = encrypt(ord(char), public_key)\n crypted_message += str(crypted_chars) + \" \"\n print(\"Encrypted message: \", crypted_message)\n\nelif user_choice == \"d\":\n crypted_message = input(\"Message: \")\n crypted_chars = crypted_message.split(\" \")\n message = \"\"\n for char in crypted_chars:\n # Decryption\n decrypted_char = decrypt(int(char), private_key)\n message += chr(decrypted_char)\n print(\"Decrypted message: \", message)\n\nelse:\n print(\"Invalid choice\")\n","repo_name":"BawBaw31/rsa-encryption","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"11529465966","text":"from __future__ import absolute_import, print_function\n\nfrom flask import g\n\nfrom .base_api import Resource\nfrom ..service import DataService\n\n\nclass Datas(Resource):\n \"\"\"\n ref: web_api.yaml\n \"\"\"\n\n def get(self):\n \"\"\"\n ref: web_api.yaml\n :return:\n \"\"\"\n pattern = None\n if 'pattern' in g.args:\n pattern = g.args['pattern']\n datas = DataService.list(pattern)\n\n for data_no, data in enumerate(datas):\n datas[data_no] = {\n \"id\": data.id,\n \"name\": data.name,\n \"uri\": '/v1/data/%s' % data.name,\n \"createTime\": data.create_time * 1000,\n \"updateTime\": data.update_time * 1000,\n \"labelRatio\": data.label_ratio,\n \"period\": {\n \"length\": data.period,\n \"ratio\": data.period_ratio\n },\n \"display\": {\n \"start\": data.start_time * 1000,\n \"end\": min(data.start_time + 86400, data.end_time) * 1000\n },\n \"time\": {\n \"start\": data.start_time * 1000,\n \"end\": data.end_time * 1000\n }\n }\n\n return self.render(data=datas), 200, None\n","repo_name":"curleywang/Curve","sub_path":"api/curve/v1/api/datas.py","file_name":"datas.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"8"} +{"seq_id":"42764032361","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ndef readfile(data_path):\n\tdf = pd.read_csv(data_path, header=0)\n\tInput = df['Input']\n\tOutput = df['Output']\n\tm1 = []\n\tfor x in Input:\n\t\tm1.append(1)\n\treturn Input, Output, m1\n\ndef linearRegression(Input, Output, m1):\t \n\tm = np.matrix([m1, Input], dtype=int)\n\tm = m.T\n\ty = np.asmatrix(Output,dtype=int)\n\ty = y.T\n\n\tB = (m.T * m).I * (m.T * y)\t\t# B = (m^t * m)^(-1) * (m^t * y) is the least square matrix\n\tB = np.asmatrix(B, dtype=int)\t# B stores w0 and w1\n\n\tinput_size = np.size(Input)\n\tinput_range = Input.values\n\tinput_range = np.sort(input_range)\n\tmax_num = input_range[input_size-1]\n\tmin_num = input_range[0]\n\n\tlin_x = []\n\tlin_y = []\n\tfor x in range(min_num,max_num+1):\n\t\tlin_x.append(x)\n\t\tX = np.matrix([1, x])\n\t\tlin_y.append(int(X * B))\t# y = w0 + w1 * x\n\treturn lin_x, lin_y, B\n\t\n\ndata_path = './data.csv'\nInput, Output, m1 = readfile(data_path)\nlin_x, lin_y, B = linearRegression(Input, Output, m1)\n\nprint(B)\nprint(lin_x)\nprint(lin_y) \n\n\n#======================plot============================\n\nfig, a1 = plt.subplots()\na1.plot(Input, Output, 'ro')\t# plot original data\na1.plot(lin_x, lin_y, label='linear regression')\t# plot the linear regression line.\na1.set_xlabel('Input')\na1.set_ylabel('Output')\nplt.show()\n","repo_name":"Jyun-Neng/Machine_Learning","sub_path":"linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"71749420742","text":"import pytest\n\n\ndef eq_(a, b, msg=None):\n __tracebackhide__ = True\n assert a == b, msg or \"{!r} != {!r}\".format(a, b)\n\n\ndef callcounter():\n def f(*args, **kwargs):\n f.callcount += 1\n\n f.callcount = 0\n return f\n\n\nclass CallLogger:\n \"\"\"This is a dummy object that logs all calls made to it.\n\n It is used to simulate the GUI layer.\n \"\"\"\n\n def __init__(self):\n self.calls = []\n\n def __getattr__(self, func_name):\n def func(*args, **kw):\n self.calls.append(func_name)\n\n return func\n\n def clear_calls(self):\n del self.calls[:]\n\n def check_gui_calls(self, expected, verify_order=False):\n \"\"\"Checks that the expected calls have been made to 'self', then clears the log.\n\n `expected` is an iterable of strings representing method names.\n If `verify_order` is True, the order of the calls matters.\n \"\"\"\n __tracebackhide__ = True\n if verify_order:\n eq_(self.calls, expected)\n else:\n eq_(set(self.calls), set(expected))\n self.clear_calls()\n\n def check_gui_calls_partial(self, expected=None, not_expected=None, verify_order=False):\n \"\"\"Checks that the expected calls have been made to 'self', then clears the log.\n\n `expected` is an iterable of strings representing method names. Order doesn't matter.\n Moreover, if calls have been made that are not in expected, no failure occur.\n `not_expected` can be used for a more explicit check (rather than calling `check_gui_calls`\n with an empty `expected`) to assert that calls have *not* been made.\n \"\"\"\n __tracebackhide__ = True\n if expected is not None:\n not_called = set(expected) - set(self.calls)\n assert not not_called, f\"These calls haven't been made: {not_called}\"\n if verify_order:\n max_index = 0\n for call in expected:\n index = self.calls.index(call)\n if index < max_index:\n raise AssertionError(f\"The call {call} hasn't been made in the correct order\")\n max_index = index\n if not_expected is not None:\n called = set(not_expected) & set(self.calls)\n assert not called, f\"These calls shouldn't have been made: {called}\"\n self.clear_calls()\n\n\nclass TestApp:\n def __init__(self):\n self._call_loggers = []\n\n def clear_gui_calls(self):\n for logger in self._call_loggers:\n logger.clear_calls()\n\n def make_logger(self, logger=None):\n if logger is None:\n logger = CallLogger()\n self._call_loggers.append(logger)\n return logger\n\n def make_gui(self, name, class_, view=None, parent=None, holder=None):\n if view is None:\n view = self.make_logger()\n if parent is None:\n # The attribute \"default_parent\" has to be set for this to work correctly\n parent = self.default_parent\n if holder is None:\n holder = self\n setattr(holder, f\"{name}_gui\", view)\n gui = class_(parent)\n gui.view = view\n setattr(holder, name, gui)\n return gui\n\n\n# To use @with_app, you have to import app in your conftest.py file.\ndef with_app(setupfunc):\n def decorator(func):\n func.setupfunc = setupfunc\n return func\n\n return decorator\n\n\n@pytest.fixture\ndef app(request):\n setupfunc = request.function.setupfunc\n if hasattr(setupfunc, \"__code__\"):\n argnames = setupfunc.__code__.co_varnames[: setupfunc.__code__.co_argcount]\n\n def getarg(name):\n if name == \"self\":\n return request.function.__self__\n else:\n return request.getfixturevalue(name)\n\n args = [getarg(argname) for argname in argnames]\n else:\n args = []\n app = setupfunc(*args)\n return app\n\n\ndef _unify_args(func, args, kwargs, args_to_ignore=None):\n \"\"\"Unify args and kwargs in the same dictionary.\n\n The result is kwargs with args added to it. func.func_code.co_varnames is used to determine\n under what key each elements of arg will be mapped in kwargs.\n\n if you want some arguments not to be in the results, supply a list of arg names in\n args_to_ignore.\n\n if f is a function that takes *args, func_code.co_varnames is empty, so args will be put\n under 'args' in kwargs.\n\n def foo(bar, baz)\n _unifyArgs(foo, (42,), {'baz': 23}) --> {'bar': 42, 'baz': 23}\n _unifyArgs(foo, (42,), {'baz': 23}, ['bar']) --> {'baz': 23}\n \"\"\"\n result = kwargs.copy()\n if hasattr(func, \"__code__\"): # built-in functions don't have func_code\n args = list(args)\n if getattr(func, \"__self__\", None) is not None: # bound method, we have to add self to args list\n args = [func.__self__] + args\n defaults = list(func.__defaults__) if func.__defaults__ is not None else []\n arg_count = func.__code__.co_argcount\n arg_names = list(func.__code__.co_varnames)\n if len(args) < arg_count: # We have default values\n required_arg_count = arg_count - len(args)\n args = args + defaults[-required_arg_count:]\n for arg_name, arg in zip(arg_names, args):\n # setdefault is used because if the arg is already in kwargs, we don't want to use default values\n result.setdefault(arg_name, arg)\n else:\n # 'func' has a *args argument\n result[\"args\"] = args\n if args_to_ignore:\n for kw in args_to_ignore:\n del result[kw]\n return result\n\n\ndef log_calls(func):\n \"\"\"Logs all func calls' arguments under func.calls.\n\n func.calls is a list of _unify_args() result (dict).\n\n Mostly used for unit testing.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n unified_args = _unify_args(func, args, kwargs)\n wrapper.calls.append(unified_args)\n return func(*args, **kwargs)\n\n wrapper.calls = []\n return wrapper\n","repo_name":"arsenetar/dupeguru","sub_path":"hscommon/testutil.py","file_name":"testutil.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":4244,"dataset":"github-code","pt":"8"} +{"seq_id":"70425535941","text":"import threading\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor, wait\n\ntarget = 'http://192.168.1.162:8080/index.php'\nsession = requests.session()\nflag = 'helloworld'\n\n\ndef upload(e: threading.Event):\n files = [\n ('file', ('load.png', b'a' * 40960, 'image/png')),\n ]\n data = {'PHP_SESSION_UPLOAD_PROGRESS': rf''''); echo('{flag}'); ?>'''}\n\n while not e.is_set():\n requests.post(\n target,\n data=data,\n files=files,\n cookies={'PHPSESSID': flag},\n )\n\n\ndef write(e: threading.Event):\n while not e.is_set():\n response = requests.get(\n f'{target}?file=/tmp/sess_{flag}',\n )\n\n if flag.encode() in response.content:\n e.set()\n\n\nif __name__ == '__main__':\n futures = []\n event = threading.Event()\n pool = ThreadPoolExecutor(15)\n for i in range(10):\n futures.append(pool.submit(upload, event))\n\n for i in range(5):\n futures.append(pool.submit(write, event))\n\n wait(futures)\n","repo_name":"Hpd0ger/CTF-Scripts","sub_path":"thread/contend2.py","file_name":"contend2.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"8"} +{"seq_id":"2184384282","text":"\"\"\"Base classes for NiMARE.\"\"\"\nimport gzip\nimport inspect\nimport logging\nimport pickle\nfrom abc import ABCMeta, abstractmethod\nfrom collections import defaultdict\n\nfrom nimare.results import MetaResult\n\nLGR = logging.getLogger(__name__)\n\n\nclass NiMAREBase(metaclass=ABCMeta):\n \"\"\"Base class for NiMARE.\n\n This class contains a few features that are useful throughout the library:\n\n - Custom __repr__ method for printing the object.\n - get_params from scikit-learn, with which parameters provided at __init__ can be viewed.\n - set_params from scikit-learn, with which parameters provided at __init__ can be overwritten.\n I'm not sure that this is actually used or useable in NiMARE.\n - save to save the object to a Pickle file.\n - load to load an instance of the object from a Pickle file.\n\n TODO: Actually write/refactor class methods. They mostly come directly from sklearn\n https://github.com/scikit-learn/scikit-learn/blob/\n 2a1e9686eeb203f5fddf44fd06414db8ab6a554a/sklearn/base.py#L141\n \"\"\"\n\n def __init__(self):\n pass\n\n def __repr__(self):\n \"\"\"Show basic NiMARE class representation.\n\n Specifically, this shows the name of the class, along with any parameters\n that are **not** set to the default.\n \"\"\"\n # Get default parameter values for the object\n signature = inspect.signature(self.__init__)\n defaults = {\n k: v.default\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty\n }\n\n # Eliminate any sub-parameters (e.g., parameters for a MetaEstimator's KernelTransformer),\n # as well as default values\n params = self.get_params()\n params = {k: v for k, v in params.items() if \"__\" not in k}\n params = {k: v for k, v in params.items() if defaults.get(k) != v}\n\n # Convert to strings\n param_strs = []\n for k, v in params.items():\n if isinstance(v, str):\n # Wrap string values in single quotes\n param_str = f\"{k}='{v}'\"\n else:\n # Keep everything else as-is based on its own repr\n param_str = f\"{k}={v}\"\n param_strs.append(param_str)\n\n rep = f\"{self.__class__.__name__}({', '.join(param_strs)})\"\n return rep\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator.\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p\n for p in init_signature.parameters.values()\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\" % (cls, init_signature)\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep : :obj:`bool`, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : :obj:`dict`\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key, None)\n if deep and hasattr(value, \"get_params\"):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as pipelines). The latter have parameters of the form\n ``__`` so that it's possible to update each\n component of a nested object.\n\n Returns\n -------\n self\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if key not in valid_params:\n raise ValueError(\n \"Invalid parameter %s for estimator %s. \"\n \"Check the list of available parameters \"\n \"with `estimator.get_params().keys()`.\" % (key, self)\n )\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def save(self, filename, compress=True):\n \"\"\"Pickle the class instance to the provided file.\n\n Parameters\n ----------\n filename : :obj:`str`\n File to which object will be saved.\n compress : :obj:`bool`, optional\n If True, the file will be compressed with gzip. Otherwise, the\n uncompressed version will be saved. Default = True.\n \"\"\"\n if compress:\n with gzip.GzipFile(filename, \"wb\") as file_object:\n pickle.dump(self, file_object)\n else:\n with open(filename, \"wb\") as file_object:\n pickle.dump(self, file_object)\n\n @classmethod\n def load(cls, filename, compressed=True):\n \"\"\"Load a pickled class instance from file.\n\n Parameters\n ----------\n filename : :obj:`str`\n Name of file containing object.\n compressed : :obj:`bool`, optional\n If True, the file is assumed to be compressed and gzip will be used\n to load it. Otherwise, it will assume that the file is not\n compressed. Default = True.\n\n Returns\n -------\n obj : class object\n Loaded class object.\n \"\"\"\n if compressed:\n try:\n with gzip.GzipFile(filename, \"rb\") as file_object:\n obj = pickle.load(file_object)\n except UnicodeDecodeError:\n # Need to try this for python3\n with gzip.GzipFile(filename, \"rb\") as file_object:\n obj = pickle.load(file_object, encoding=\"latin\")\n else:\n try:\n with open(filename, \"rb\") as file_object:\n obj = pickle.load(file_object)\n except UnicodeDecodeError:\n # Need to try this for python3\n with open(filename, \"rb\") as file_object:\n obj = pickle.load(file_object, encoding=\"latin\")\n\n if not isinstance(obj, cls):\n raise IOError(f\"Pickled object must be {cls}, not {type(obj)}\")\n\n return obj\n\n\nclass Estimator(NiMAREBase):\n \"\"\"Estimators take in Datasets and return MetaResults.\n\n All Estimators must have a ``_fit`` method implemented, which applies algorithm-specific\n methods to a Dataset and returns a dictionary of arrays to be converted into a MetaResult.\n\n Users will interact with the ``_fit`` method by calling the user-facing ``fit`` method.\n ``fit`` takes in a ``Dataset``, calls ``_collect_inputs``, then ``_preprocess_input``,\n then ``_fit``, and finally converts the dictionary returned by ``_fit`` into a ``MetaResult``.\n \"\"\"\n\n # Inputs that must be available in input Dataset. Keys are names of\n # attributes to set; values are strings indicating location in Dataset.\n _required_inputs = {}\n\n def _collect_inputs(self, dataset, drop_invalid=True):\n \"\"\"Search for, and validate, required inputs as necessary.\n\n This method populates the ``inputs_`` attribute.\n\n .. versionchanged:: 0.0.12\n\n Renamed from ``_validate_input``.\n\n Parameters\n ----------\n dataset : :obj:`~nimare.dataset.Dataset`\n drop_invalid : :obj:`bool`, optional\n Whether to automatically drop any studies in the Dataset without valid data or not.\n Default is True.\n\n Attributes\n ----------\n inputs_ : :obj:`dict`\n A dictionary of required inputs for the Estimator, extracted from the Dataset.\n The actual inputs collected in this attribute are determined by the\n ``_required_inputs`` variable that should be specified in each child class.\n \"\"\"\n if not hasattr(dataset, \"slice\"):\n raise ValueError(\n f\"Argument 'dataset' must be a valid Dataset object, not a {type(dataset)}.\"\n )\n\n if self._required_inputs:\n data = dataset.get(self._required_inputs, drop_invalid=drop_invalid)\n # Do not overwrite existing inputs_ attribute.\n # This is necessary for PairwiseCBMAEstimator, which validates two sets of coordinates\n # in the same object.\n # It makes the *strong* assumption that required inputs will not changes within an\n # Estimator across fit calls, so all fields of inputs_ will be overwritten instead of\n # retaining outdated fields from previous fit calls.\n if not hasattr(self, \"inputs_\"):\n self.inputs_ = {}\n\n for k, v in data.items():\n if v is None:\n raise ValueError(\n f\"Estimator {self.__class__.__name__} requires input dataset to contain \"\n f\"{k}, but no matching data were found.\"\n )\n self.inputs_[k] = v\n\n @abstractmethod\n def _preprocess_input(self, dataset):\n \"\"\"Perform any additional preprocessing steps on data in self.inputs_.\n\n Parameters\n ----------\n dataset : :obj:`~nimare.dataset.Dataset`\n The Dataset\n \"\"\"\n pass\n\n @abstractmethod\n def _fit(self, dataset):\n \"\"\"Apply estimation to dataset and output results.\n\n Must return a dictionary of results, where keys are names of images\n and values are ndarrays.\n \"\"\"\n pass\n\n def fit(self, dataset, drop_invalid=True):\n \"\"\"Fit Estimator to Dataset.\n\n Parameters\n ----------\n dataset : :obj:`~nimare.dataset.Dataset`\n Dataset object to analyze.\n drop_invalid : :obj:`bool`, optional\n Whether to automatically ignore any studies without the required data or not.\n Default is False.\n\n Returns\n -------\n :obj:`~nimare.results.MetaResult`\n Results of Estimator fitting.\n\n Attributes\n ----------\n inputs_ : :obj:`dict`\n Inputs used in _fit.\n\n Notes\n -----\n The `fit` method is a light wrapper that runs input validation and\n preprocessing before fitting the actual model. Estimators' individual\n \"fitting\" methods are implemented as `_fit`, although users should\n call `fit`.\n \"\"\"\n self._collect_inputs(dataset, drop_invalid=drop_invalid)\n self._preprocess_input(dataset)\n maps, tables = self._fit(dataset)\n\n if hasattr(self, \"masker\") and self.masker is not None:\n masker = self.masker\n else:\n masker = dataset.masker\n\n return MetaResult(self, mask=masker, maps=maps, tables=tables)\n","repo_name":"liuzhenqi77/NiMARE","sub_path":"nimare/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"8"} +{"seq_id":"420699233","text":"import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision\nfrom torchvision.transforms import ToTensor\nimport os\n\nclass MyDataset(Dataset):\n def __init__(self, is_train=True, transform=None):\n super().__init__()\n self.is_train = is_train\n self.transform = transform \n ###################################\n # load all data if it fits into memory, otherwise put references (e. g. file paths) in a list\n if self.is_train:\n self.data = [(11.,1), (12.,1), (13.,1), (14.,0), (15.,0), (16.,0), (17.,0), (18.,0)]\n else:\n self.data = [(11.5,1), (12.5,1), (9.,1), (14.5,0), (17.,0)]\n ###################################\n\n def __getitem__(self, idx):\n ###################################\n # get the sample with index idx, it is self.data[idx] if all data is in memory, \n # otherwise acquire the data associated with the reference self.data[idx]\n sample = self.data[idx]\n ###################################\n if self.transform:\n sample = self.transform(sample) # modify raw data if necessary\n return sample\n\n def __len__(self):\n return len(self.data)\n\n\ndef demo1(dl):\n ''' Using iterators and next with an iterable such as a Dataset or a DataLoader, \n a StopIteration exception may be thrown if no further data is available.\n ''' \n it = iter(dl)\n sample = next(it)\n print(sample)\n sample = next(it)\n print(sample)\n\n \ndef demo2(dl):\n ''' Using a while loop with an iterable such as a Dataset or a DataLoader\n ''' \n for sample in dl:\n print(sample)\n\n\nif __name__ == '__main__':\n # Test your dataset first in a pure Python/Numpy environment, you do not need to know\n # much about Torch for it\n train_ds = MyDataset(is_train=False)\n demo1(train_ds)\n demo2(train_ds)\n \n train_dl = DataLoader(train_ds, batch_size=3, shuffle=False)\n demo1(train_dl)\n demo2(train_dl)\n \n # There are also well-known datasets already prepared.\n # These datasets yield images in the standard Python PIL format. You may convert them to numpy arrays by\n # np.asarray(pil_img). We will use a transform to automatically convert the image to a torch tensor and rescale\n # its pixel values in the range 0..1\n root_dir = os.path.dirname(__file__)\n train_ds = torchvision.datasets.FashionMNIST(root_dir+'/fashion_mnist_data', download=True, transform=ToTensor())\n train_dl = DataLoader(train_ds, batch_size=3, shuffle=False)\n demo1(train_ds)\n demo1(train_dl)\n\n# also try:\n# train_ds = torchvision.datasets.MNIST(root_dir+'/mnist_data', download=True, transform=ToTensor())\n# for the MNIST data. If the download is not successful, search and copy the files \n# t10k-labels-idx1-ubyte.gz and train-labels-idx1-ubyte.gz\n# from the internet and copy it into the mnist_data/MNIST/raw directory.\n\n# Remark: The MNIST server rejects standard Python requests. You may change the requesting user by adding:\n# -----------------------------------------------------\n# import urllib\n# opener = urllib.request.build_opener()\n# opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n# urllib.request.install_opener(opener)\n# -----------------------------------------------------\n\n","repo_name":"SamuelMMT/Projekt_ML","sub_path":"dataloader_demo.py","file_name":"dataloader_demo.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"6808260249","text":"import os\nimport argparse \nimport json\nfrom tabnanny import check\nfrom geopy.distance import great_circle\nfrom requests import get\nfrom geopy.geocoders import Nominatim\n\nJSON_MIN_RTT = \"min rtt\"\nJSON_AVG_RTT = \"avg rtt\"\nJSON_MAX_RTT = \"max rtt\"\nJSON_MDEV_RTT = \"mdev\"\nJSON_STATUS = \"status\"\nJSON_INFO = \"info\"\n\nOUTPUT_DIRS = [\"./GEOINFO\", \"./PINGS\"]\nGEOINFO_DIR = 0\nPINGS_DIR = 1\n\nPING_FILE = \"pinginfo.txt\"\nGEOFILE = \"geotriplets.txt\"\n\nPING_COUNT = 100\n\nTARGET_HEADERS = [\"TO COUNTRY\", \"TO CITY\", \"COMPLETE TO\", \"TO IP\", \"TO LAT\", \"TO LON\"]\nDISTANCE_HEADER = \"DISTANCE (KM)\"\nSOURCE_HEADERS = [\"FROM COUNTRY\", \"FROM CITY\", \"COMPLETE FROM\", \"FROM IP\", \"FROM LAT\", \"FROM LON\"]\nH_COUNTRY = 0\nH_CITY = 1\nH_COMPLETE_LOC = 2\nH_IP = 3\nH_LATITUDE = 4\nH_LONGITUDE = 5\n\ndef checkDirs():\n print(\"========== CHECKING NECESSARY DIRECTORIES ==========\")\n for dir in OUTPUT_DIRS:\n if not os.path.exists(dir):\n os.makedirs(dir)\n print(\"{} directory created\".format(dir))\n print(\"============ ALL DIRECTORIES ARE READY =============\")\n\ndef getArgParser():\n parser = argparse.ArgumentParser(description='Realiza una cantidad especificada de pings hacia múltiples destinos')\n parser.add_argument('-c', metavar='--ping_count', type=int, required = False, default = PING_COUNT,\n help='Número de pings a ejecutar (por defecto, {})'.format(PING_COUNT))\n parser.add_argument('-fin', metavar='--inputFile', type=str, required = False, default = GEOFILE,\n help='Fichero de entrada con las tripletas Ciudad-IP-Usuario_Remoto (por defecto, {})'.format(GEOFILE))\n\n return parser\n\n\ndef getHeaderValue(headers, idx):\n return headers[idx].split(\":\")[1].strip()\n\n\ndef getHeaders(file,targetFile=False):\n input1 = open(file,\"rt\",encoding=\"utf8\",errors=\"ignore\")\n lines = [line.strip() for line in list(filter(lambda line: line != \"\\n\", input1.readlines()))]\n input1.close()\n\n headers_len = len(TARGET_HEADERS)\n headers = [h+\":\"+v for (h,v) in zip(SOURCE_HEADERS if not targetFile else TARGET_HEADERS, lines[0:headers_len])]\n\n return headers\n\n\n\ndef getDistance(sourcePoint, targetPoint):\n (sLat, sLon) = sourcePoint\n (tLat, tLon) = targetPoint\n\n return great_circle((sLat,sLon),(tLat,tLon)).km\n\n\ndef getSourceFile(files):\n ip = get('https://api.ipify.org').text\n for file in files:\n if ip == list(filter(lambda x: x != \"\\n\" and x != \"\\r\" and x != \"\\r\\n\", open(file,\"rt\",encoding=\"utf8\",errors=\"ignore\").readlines()))[3].strip():\n return file\n\ndef dumpGeoData(geotriplets):\n files = []\n geolocator = Nominatim(user_agent=\"http\")\n print(\"\\n========== PARSING GEO DATA ==========\")\n\n for (city,ip,_) in geotriplets:\n fname = os.path.join(OUTPUT_DIRS[GEOINFO_DIR], \"geo\"+city+\".txt\")\n geofile = open(fname,\"w+\")\n \n loc = geolocator.geocode(city, language=\"en\")\n data = [loc.address.split(\",\")[-1].strip(), city, loc, ip, loc.latitude, loc.longitude]\n geofile.write(\"\\n\".join(map(str,data)))\n \n print(\"{} file was provided with {} geo data\".format(fname, city))\n geofile.close()\n\n files.append(fname)\n\n print(\"========= PARSING TERMINATED =========\")\n\n return files\n \n\nparser = getArgParser()\nargs = parser.parse_args()\ncheckDirs()\ngeofin = open(GEOFILE,\"rt\")\n\n# lista de tripletas (ciudad,ip,nombreMaquina)\ngeotriplets = [tuple(pair.strip().split(\"-\")) for pair in list(filter(lambda x: x != \"\\n\" and x != \"\\r\" and x != \"\\r\\n\",geofin.readlines()[1:]))]\ngeofin.close()\nfiles = dumpGeoData(geotriplets) # vuelca la geoinformacion de cada tripleta en su correspondiente fichero de tipo geo[ciudad].txt\n\nsource = getSourceFile(files)\nsource_h = getHeaders(source)\n\nfor target in files:\n\n baseSource = os.path.basename(source)\n baseTarget = os.path.basename(target)\n if baseSource == baseTarget or not (baseTarget.startswith(\"geo\") and baseTarget.endswith(\".txt\")):\n continue\n\n print(\"File: \"+ target)\n\n target_h = getHeaders(target,True)\n\n dist = getDistance((getHeaderValue(source_h, H_LATITUDE), getHeaderValue(source_h, H_LONGITUDE)), \n (getHeaderValue(target_h, H_LATITUDE), getHeaderValue(target_h, H_LONGITUDE)))\n\n\n outname = OUTPUT_DIRS[PINGS_DIR]+\"/\"+getHeaderValue(source_h,H_CITY).replace(\" \",\"\")+\"-\"+getHeaderValue(target_h,H_CITY).replace(\" \",\"\")+\"-\"+PING_FILE \n out = open(outname,\"w+\")\n out.write(\"\\n\".join(map(str,source_h)))\n out.write(\"\\n\")\n out.write(\"\\n\".join(map(str,target_h)))\n out.write(\"\\n{}:{}\\n\".format(DISTANCE_HEADER,dist))\n\n toIP = getHeaderValue(target_h,H_IP)\n\n minSize = len(out.readlines())+4 #4 es el minimo numero de lineas que genera como output un ping\n\n print(\"PINGING FROM {} TO {}...\".format(getHeaderValue(source_h,H_CITY), getHeaderValue(target_h,H_CITY)))\n print(\"Outputting in {}...\".format(outname))\n pingCMD = \"ping -{} {} {} >> {}\".format(\"n\" if os.name == \"nt\" else \"c\", args.c, toIP, outname)\n print(\"Command: \" + pingCMD)\n response = os.system(pingCMD)\n\n if (response == 1):\n\n data_set = {JSON_STATUS: \"failure\", JSON_INFO: \"FAILURE --> Ping returned error code\"}\n\n else:\n\n flines = list(filter(lambda line: line != \"\\n\", out.readlines()))\n\n if (len(flines) <= minSize):\n\n data_set = {JSON_STATUS: \"failure\", JSON_INFO: \"FAILURE --> Couldn't connect to {}\".format(toIP)}\n\n else:\n\n print(\"SUCCESSFULL PING\")\n if (os.name == \"posix\"):\n\n # dat = [min rtt, avg rtt, max rtt, mdev, status]\n dat = flines[-1::][0].split()[3].split(\"/\")\n data_set = {JSON_MIN_RTT: dat[0], JSON_AVG_RTT: dat[1], JSON_MAX_RTT: dat[2], JSON_MDEV_RTT: dat[3], JSON_STATUS: \"success\"} \n\n else:\n\n # dat = [min rtt, avg rtt, max rtt, status]\n dat = [token.split(\"=\")[1] for token in flines[-1::][0].split(\",\")]\n data_set = {JSON_MIN_RTT: dat[0], JSON_AVG_RTT: dat[2], JSON_MAX_RTT: dat[1], JSON_STATUS: \"success\"} \n\n\n #json_dump = json.dump(data_set, open(JSON_FILE,\"w\"))\n print(json.dumps(data_set))\n print(\"\\n\")\n out.close()\n","repo_name":"HOP-Ubiquitous/geontology","sub_path":"SCRIPTS-ALL/others/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"1000682057","text":"# %% Imports\nimport pandas as pd\nimport numpy as np\nimport logging\nimport matplotlib.pyplot as plt\nimport peakutils # for basic peak-finding utils\nimport pywt # for wavelet transform bits\nimport wfdb # for physionet tools\nimport biosppy\nimport neurokit2 as nk\nimport scipy\nimport seaborn as sns\nimport heartpy\n\nimport os\nrepopath = '/Users/inespereira/Documents/Github/aml2020'\nos.chdir(repopath)\n\n# %% Load training dataset from csv\nX = pd.read_csv(f'{repopath}/project3_ines/X_train_small.csv')\n# X = X.iloc[:, 1:]\ny = pd.read_csv(f'{repopath}/project3_ines/y_train_small.csv')\n# y = y.iloc[:, 1:]\nX_u = pd.read_csv(f'{repopath}/project3_ines/X_test.csv')\nlogging.info('I have imported your training dataset! :D')\nprint(f'Shape of training set is {X.shape}')\nsampling_rate = 300\n\n# %%\nX.iloc[:,17979]\n\n# %% Save smaller dataset to not waste hours waiting for loading\nNROWS = 30\nX.iloc[:NROWS, :].to_csv('project3_ines/X_train_small.csv', index=False)\ny.iloc[:NROWS, :].to_csv('project3_ines/y_train_small.csv', index=False)\n\n# %%\nNROWS = 10\nX_u.iloc[:NROWS, :].to_csv('project3_ines/X_test_small.csv', index=False)\n\n# %%\n# print(X)\nX.describe()\n# print(y)\ny.describe()\n\n# %% Making class imbalance apparent\ny['y'].value_counts()\n\n# %% Get indices from the smaller classes\nclass1 = y.index[y['y'] == 1].tolist()\nclass2 = y.index[y['y'] == 2].tolist()\nclass3 = y.index[y['y'] == 3].tolist()\n\n# %%\nprint(class2)\n\n# %% Observations:\n# - a lot of NaNs: but we probably need to extract features anyway\n# - Class imbalance\n\n# %% Allocate space for new training data\ncol_names = [\n 'mean_HR',\n 'std_HR',\n 'P_waved'\n]\nnew_X_train = pd.DataFrame(columns=col_names)\nprint(new_X_train)\n\n# %% Plot some time series\n# TODO: write for loop wrapper over this to apply preprocessing over all time series\nn = 3\necg = X.iloc[n, :].dropna().to_numpy()\nplt.plot(ecg)\nplt.show()\nprint(f'The corresponding class is: {y.iloc[n]}')\nprint(ecg)\n\n# %% Apply Fourier transform: use this to exclude class 3 samples?\necg_fft = np.fft.fft(ecg)\nplt.plot(abs(ecg_fft))\nplt.show()\ntype(ecg_fft)\nprint(ecg_fft)\n# %% Biosppy\necg_biosppy = biosppy.signals.ecg.ecg(\n signal=ecg,\n sampling_rate=sampling_rate,\n show=True)\n\n# %% Analyse biosppy summary\n# type(ecg_biosppy)\n# ecg_biosppy['ts']\n# ecg_biosppy['rpeaks']\nplt.plot(ecg_biosppy['filtered'])\nplt.show()\n# ecg_biosppy['templates_ts']\n# ecg_biosppy['templates']\n# ecg_biosppy['heart_rate_ts']\necg_biosppy['heart_rate']\n\n# %% Populate new_X_train\nnew_X_train.loc[n, 'mean_HR'] = np.mean(ecg_biosppy['heart_rate'])\nnew_X_train.loc[n, 'std_HR'] = np.std(ecg_biosppy['heart_rate'])\nprint(new_X_train)\n\n# %% Save filtered data to mat file\nscipy.io.savemat('test.mat', {'mydata': ecg_biosppy['filtered']})\n\n# %% Wavelets\nwavelets = pywt.wavedec(\n data=ecg,\n wavelet='db4', # from YouTube Video\n level=5\n)\n\n# %% Neurokit2\n# %% Also do analysis\ndf, info = nk.ecg_process(ecg_biosppy['filtered'], sampling_rate=sampling_rate)\nanalyze_df = nk.ecg_analyze(df, sampling_rate=sampling_rate)\nanalyze_df\n\n# %%\ndf\n# %% Download data\n# ecg_signal = nk.data(dataset=\"ecg_3000hz\")['ECG']\n# ecg_signal = pd.Series(ecg_biosppy['filtered'],dtype='float64')\necg_signal = pd.Series(df['ECG_Clean'], dtype='float64')\n\n# Analyze the ecg signal\ntype(ecg_signal)\nprint(ecg_signal)\n# %%\n\n# Extract R-peaks locations\n_, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=sampling_rate)\n\n# Delineate\nsignal, waves = nk.ecg_delineate(\n ecg_signal, rpeaks, sampling_rate=sampling_rate, method=\"dwt\", show=True, show_type='all')\n\n\n# %% Plan: define the features you want to look at.\n# 1. Preprocessing of data\n# 1.1. (Ines) Removal of low frequency components. R peaks should be at the same height\n# + Smoothing of signal → ecg function from biosppy does filtering.\n# 1.3. (Raffi) Artefact removal (outlier removal): visuelle Überprüfung\n# 1.4. (Francesco) Identify QRS complex and verify polarity\n# + Find features related to waves\n# For an example: https://www.youtube.com/watch?v=WyjGCEWU4zY\n\n# (Raffi) To isolate class 3:\n# - Extract Fourier transform and plot mean for each class. You should see a peak\n# - Number of artefacts\n# Detect inverted QRS and reinvert them?\n\n# Data-driven: convolutional nets? A mean wave with the variance?\n\n# References and software to check out:\n# Watch (general): https://www.youtube.com/watch?v=WyjGCEWU4zY\n# https://pypi.org/project/py-ecg-detectors/\n# Plotting: https://pypi.org/project/ecg-plot/\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.electrocardiogram.html\n# Time series anomalies: https://www.youtube.com/watch?v=qN3n0TM4Jno\n# Other ideas: consider nested CV https://www.youtube.com/watch?v=DuDtXtKNpZs\n\n# Preprocessing pipeline steps\n# 1. Detection and exclusion of class 3 from training set (TODO Raffi)\n# 2. Detection of flipped signals and flipping (TODO Raffi + Inês)\n# 3. Filtering (getting isoelectric line and smoothing)\n# 4. Waveform detection\n# 4.1 R-peaks and HR:\n# - mean_HR (class 1)\n# - std_HR (class 1)\n# 4.2 P, QRS and T: TODO (Francesco)\n# - number of P waves, amplitude_P_wave (class 1)\n# - mean_S_amplitude, ?? std_S_amplitude (class 2)\n# - mean_QRS_duration, (class 2)\n# - std_QRS_duration (class 2)\n\n# Build preprocessing pipeline TODO (Inês)\n# - Remove preprocessing from last project (PCA, outlier detection, standardization)\n# - Write down new Estimator\n#\n# Diagnostics plots: TODO (Raffi + Francesco): interactive, that show full time series and segmented P-QRS-T waves\n# import plotly.express as px\n#\n# Sanity checks\n# - Check that HR and R-peaks between biosppy and Neurokit\n","repo_name":"ratheile/aml2020","sub_path":"project3_ines/prototyping.py","file_name":"prototyping.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"70927345223","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Console script for mooquant.\"\"\"\n\nimport os\nimport sys\nimport shutil\nimport locale\nimport zipfile\n\nimport click\nfrom tqdm import tqdm\n\nfrom mooquant_history import history\n\n\n@click.group()\n@click.option('-v', '--verbose', count=True)\n@click.pass_context\ndef cli(ctx, verbose):\n ctx.obj[\"VERBOSE\"] = verbose\n\n\n@cli.command(help='A股的历史数据下载(sina源).')\n@click.option('-d', '--directory', default=os.path.expanduser(\"~/.mooquant/bundle\"), type=click.Path(file_okay=False), help='历史数据下载目录, 默认 ~/.mooquant/bundle.')\n@click.option('-i', '--initial', is_flag=True, help='初始化历史, 第一次下载使用该参数, 默认 False.')\n@click.option('-s', '--symbol', default=None, help='更新单个股票的代码, 默认为空.')\n@click.option('-a', '--append', is_flag=True, help='更新股票的代码 增项增量模式.')\n@click.option('-t', '--thread', default='2', help='同时请求线程数, 默认2,建议不要超过5个, 会被封IP.')\n@click.option('--delay', default='0.5', help='默认每次请求后等待时间')\ndef bundle(directory, initial, symbol, append, delay, thread):\n history.bundle(dtype='day', export='csv', **locals()) \n\n@cli.command(help='更新股票代码.')\n@click.option('-d', '--directory', default=os.path.expanduser(\"~/.mooquant/bundle\"), type=click.Path(file_okay=False), help='历史数据下载目录.')\ndef symbol(directory):\n from mooquant_history.helpers.symbol import update_stock_codes\n update_stock_codes()\n\n@cli.command(help='实时行情(支持sina, qq, ).')\n@click.option('-s', '--symbol', default=None, help='单个股票的代码行情.')\ndef quotes(symbol):\n click.echo('运行实时行情')\n\n\n@cli.command(help='导出 bundle.')\n@click.option('-d', '--directory', default=os.path.expanduser(\"~/.mooquant/bundle\"), type=click.Path(file_okay=False), help='历史数据下载目录.')\n@click.option('-o', '--output', default=os.path.expanduser(\"./bundle\"), type=click.Path(file_okay=False), help='导出文件目录, 默认当前目录.')\ndef export(directory, output):\n data_path = os.path.join(directory, 'day', 'raw_data')\n file_name = os.path.join(output,'bundle.zip')\n file_list = [f for f in os.listdir(data_path) if f.endswith('.csv')]\n\n click.echo('Starting...')\n z = zipfile.ZipFile(file_name, 'w', zipfile.ZIP_DEFLATED)\n\n if file_list: \n for f in tqdm(file_list):\n z.write(os.path.join(data_path, f), f) \n\n z.close()\n click.echo('done.')\n\n\n@cli.command(help='转换为 MooQaunt 格式.')\n@click.option('-s', '--strategy', default='', help='运行回测规则路径.')\ndef covert(strategy):\n click.echo('运行回测规则')\n\ndef main():\n cli(obj={})\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bopo/mooquant_history","sub_path":"mooquant_history/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"11868526940","text":"import configparser\nimport os\nimport shutil\n\nCONFIG = configparser.ConfigParser()\n\nif not os.path.exists('config.ini'):\n shutil.copy('config.ini.example', 'config.ini')\nCONFIG.read('config.ini')\n\nif not os.path.exists(CONFIG['app']['data_dir']):\n os.mkdir(CONFIG['app']['data_dir'])\n\nif not os.path.exists(CONFIG['app']['note_dir']):\n os.mkdir(CONFIG['app']['note_dir'])\n\nif not os.path.exists(CONFIG['app']['index_dir']):\n os.mkdir(CONFIG['app']['index_dir'])\n","repo_name":"fzdp/wxpython-note","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"zh","doc_type":"code","stars":12,"dataset":"github-code","pt":"8"} +{"seq_id":"73818992263","text":"from flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///todo.db\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\nclass Todo(db.Model):\n sno = db.Column(db.Integer,primary_key = True)\n your_name = db.Column(db.String(200),nullable =False)\n item = db.Column(db.String(500),nullable = False)\n quantity = db.Column(db.Integer,nullable = False)\n date_created = db.Column(db.DateTime,default = datetime.now())\n\n def __repr__(self) ->str:\n return f\"{self.sno} - {self.your_name}\"\n\n@app.route(\"/\",methods = [\"GET\",\"POST\"])\n\ndef home():\n if request.method == \"POST\":\n your_name = request.form['your_name']\n item = request.form['item']\n quantity = request.form['quantity']\n todo = Todo(your_name = your_name,item=item,quantity=quantity)\n db.session.add(todo)\n db.session.commit()\n allTodo = Todo.query.all()\n return render_template(\"index.html\",allTodo = allTodo)\n\n@app.route(\"/delete/\")\ndef delete(sno):\n todo = Todo.query.filter_by(sno=sno).first()\n db.session.delete(todo)\n db.session.commit()\n return redirect(\"/\")\n\n@app.route(\"/update/\",methods = ['GET','POST'])\ndef update(sno):\n if request.method == \"POST\":\n your_name = request.form['your_name']\n item = request.form['item']\n quantity = request.form['quantity']\n todo = Todo.query.filter_by(sno=sno).first()\n todo.your_name = your_name\n todo.item = item\n todo.quantity = quantity\n db.session.add(todo)\n db.session.commit()\n return redirect(\"/\")\n todo = Todo.query.filter_by(sno=sno).first()\n return render_template(\"update.html\",todo=todo)\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\nif __name__ == \"__main__\":\n app.run(debug=True,port=5000)\n","repo_name":"heera-ai/OurFood","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"16346400541","text":"from copy import deepcopy\n\nfrom django.core.urlresolvers import reverse\n\nfrom regulations.generator import api_reader, node_types\nfrom regulations.generator.html_builder import PreambleHTMLBuilder\nfrom regulations.generator.section_url import SectionUrl\nfrom regulations.generator.versions import fetch_grouped_history\nfrom regulations.views.partial import PartialView\nimport math\n\nPAGE_SIZE = 10\n\n\nurl_rules = {\n 'cfr': 'chrome_search',\n 'preamble': 'chrome_search_preamble',\n}\n\n\nclass PartialSearch(PartialView):\n \"\"\"Display search results without any chrome.\"\"\"\n template_name = 'regulations/search-results.html'\n\n def add_prev_next(self, current_page, context):\n total = float(context['results']['total_hits']) / PAGE_SIZE\n total = int(math.ceil(total))\n context['current'] = {'page': current_page + 1, 'total': total}\n\n if current_page > 0:\n context['previous'] = {'length': PAGE_SIZE,\n 'page': current_page - 1}\n max_this_page = (current_page + 1) * PAGE_SIZE\n remaining = context['results']['total_hits'] - max_this_page\n if remaining > 0:\n context['next'] = {'page': current_page + 1,\n 'length': min(remaining, PAGE_SIZE)}\n\n def get_context_data(self, doc_type, **kwargs):\n # We don't want to run the content data of PartialView -- it assumes\n # we will be applying layers\n context = super(PartialView, self).get_context_data(**kwargs)\n\n context['q'] = self.request.GET.get('q')\n context['version'] = self.request.GET.get('version')\n context['doc_type'] = doc_type\n\n context['regulation'] = context['label_id'].split('-')[0]\n context['url_rule'] = url_rules[doc_type]\n\n try:\n page = int(self.request.GET.get('page', '0'))\n except ValueError:\n page = 0\n\n context['warnings'] = []\n if not context['q']:\n context['warnings'].append('Please provide a query.')\n if doc_type == 'cfr' and not context['version']:\n context['warnings'].append('Please provide a version.')\n\n if context['warnings']:\n results = {'results': [], 'total_hits': 0}\n else:\n results = api_reader.ApiReader().search(\n context['q'], context['doc_type'],\n version=context['version'], regulation=context['regulation'],\n page=page, page_size=PAGE_SIZE,\n is_root='false', is_subpart='false',\n )\n\n if doc_type == 'cfr':\n context['results'] = process_cfr_results(results,\n context['version'])\n for version in fetch_grouped_history(context['regulation']):\n for notice in version['notices']:\n if notice['document_number'] == context['version']:\n context['version_by_date'] = notice['effective_on']\n else:\n context['results'] = process_preamble_results(results)\n\n self.add_prev_next(page, context)\n\n return context\n\n\ndef add_cfr_headers(result):\n \"\"\"We always want a title to click, even if the search result doesn't have\n one. We also want to prevent duplication, so we'll only show additional\n levels of headings if they differ.\"\"\"\n if result.get('title'):\n result['header'] = result['title']\n else:\n result['header'] = node_types.label_to_text(result['label'])\n if result.get('match_title') and result['match_title'] != result['title']:\n result['subheader'] = result['match_title']\n if (result.get('paragraph_title') and\n result['paragraph_title'] != result['match_title']):\n result['subsubheader'] = result['paragraph_title']\n return result\n\n\ndef process_cfr_results(results, version):\n \"\"\"Modify the results of a search over the CFR by adding a human-readable\n label, appropriate links, and version information\"\"\"\n section_url = SectionUrl()\n results = deepcopy(results)\n for result in results['results']:\n add_cfr_headers(result)\n result['section_id'] = section_url.view_label_id(\n result['label'], version)\n result['url'] = section_url.fetch(\n result['label'], version, sectional=True)\n return results\n\n\ndef process_preamble_results(results):\n \"\"\"Modify the results of a search over a notice preamble by adding a\n human-readable label, appropriate links, etc.\"\"\"\n results = deepcopy(results)\n for result in results['results']:\n result['header'] = PreambleHTMLBuilder.human_label(result)\n if 'title' in result:\n result['header'] += ' ' + result['title']\n result['section_id'] = '-'.join(\n [result['label'][0], 'preamble'] + result['label'])\n result['url'] = '{}#{}'.format(\n reverse(\n 'chrome_preamble',\n kwargs={'paragraphs': '/'.join(result['label'][:2])},\n ),\n '-'.join(result['label']),\n )\n return results\n","repo_name":"eregs/regulations-site","sub_path":"regulations/views/partial_search.py","file_name":"partial_search.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"8"} +{"seq_id":"30862146189","text":"from datetime import timedelta\n\nfrom django.utils import timezone\nfrom django.db.models import Q\nfrom rest_framework import serializers\nfrom movielist.serializers import MovieSerializer\nfrom movielist.models import Movie\nfrom .models import Cinema, Screening\n\n\nclass CinemaSerializer(serializers.ModelSerializer):\n movies = serializers.HyperlinkedIdentityField(\n many=True,\n read_only=True,\n view_name='movie-detail',\n )\n\n class Meta:\n model = Cinema\n fields = ['id', 'name', 'city', 'movies']\n depth = 1\n\n\n\n\nclass ScreeningSerializer(serializers.ModelSerializer):\n cinema = serializers.SlugRelatedField(\n slug_field='name',\n queryset=Cinema.objects.all()\n )\n\n movie = serializers.SlugRelatedField(\n slug_field='title',\n queryset=Movie.objects.all()\n )\n\n class Meta:\n model = Screening\n fields = ('movie', 'cinema', 'date')","repo_name":"marcinpelszyk/Django","sub_path":"project/showtimes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"74777304581","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QWidget, QLCDNumber,\n QSlider, QVBoxLayout,\n QApplication, QGridLayout,\n QLabel)\n\n\nclass Example(QWidget):\n \"\"\"\n This is a simple example demonstrating signals and slots in PyQt5.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n\n def init_ui(self):\n lcd = QLCDNumber(self)\n sld = QSlider(Qt.Horizontal, self)\n\n sld.setRange(1, 100)\n sld.setSingleStep(1)\n\n vbox = QVBoxLayout()\n vbox.addWidget(lcd)\n vbox.addWidget(sld)\n\n self.setLayout(vbox)\n\n sld.valueChanged.connect(lcd.display)\n # x and y coordinate w, h for frame\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('Signal and slot demo')\n self.show()\n\n # Reimplementing event handler - Events in PyQt5 are processed often by reimplementing event handlers.\n # Inherited from QWidget\n ##############################################\n # keyPressEvent(self, e) is the event handler#\n ##############################################\n def keyPressEvent(self, e):\n\n if e.key() == Qt.Key_Escape:\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n","repo_name":"loveplay1983/pyqt_pro","sub_path":"2_further_pyqt/0_extending_signals/1_event_and_signal_event_handler.py","file_name":"1_event_and_signal_event_handler.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"8"} +{"seq_id":"16431988274","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import SongApi, SingerApi\n\nrouter = DefaultRouter()\nrouter.register('singer', SingerApi, basename='singer')\nrouter.register('song', SongApi, basename='song')\n\nurlpatterns = [\n path('', include(router.urls)),\n path('auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n","repo_name":"pranavelric/DRF_Practice","sub_path":"SerializerRelations/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"8"} +{"seq_id":"36272002805","text":"import random\n\nplay_game = 'y'\n\nwhile (play_game == 'y'):\n answer = random.randint(1, 100)\n try_number = int(input('Guess a number between 1 and 100: '))\n count = 1\n while try_number != answer:\n if try_number > answer:\n print(\"Your number is too large\")\n if try_number < answer:\n print(\"Your number is too small.\")\n try_number = int(input('Guess a number between 1 and 100: '))\n count += 1\n print(\"You got it! You tried \" + str(count) + ' times')\n play_game = input(\"Continue? (y/n) \").lower()\n\n if (play_game != \"y\" or play_game != \"n\"):\n print(\"Click y for yes and n for no!\")\n play_game = input(\"Continue? (y/n) \").lower()\n\n if (play_game == \"n\"):\n print(\"See you later...\")\n","repo_name":"BraceZin/Number-Guessing-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36617492956","text":"import tkinter as tk\nfrom random import randrange as rnd, choice\nimport math\n\nWIDTH = 800\nHEIGHT = 600\ng = 10 # g per time unit\ndt = 50\ncs = 1.01 # collision speedup\n\n\nclass Ball:\n def __init__(self):\n self.R = rnd(20, 50)\n self.x = rnd(self.R, WIDTH - self.R)\n self.y = rnd(self.R, HEIGHT - self.R)\n self.dx, self.dy = (+2, +2)\n self.color = choice(['green',\n 'blue',\n 'red',\n 'pink',\n 'white',\n 'brown'])\n self.check_inside()\n self.ball_id = canvas.create_oval(self.x - self.R,\n self.y - self.R,\n self.x + self.R,\n self.y + self.R,\n fill=self.color)\n\n def move(self):\n global g, dt\n\n self.x += self.dx\n self.y += self.dy\n og = game.direction\n self.dy += g * dt * math.cos(math.pi * og / 8) / 1000\n self.dx += g * dt * math.sin(math.pi * og / 8) / 1000\n if self.x + self.R > WIDTH:\n self.dx = -self.dx - 1\n if self.x - self.R <= 0:\n self.dx = 2\n if self.y + self.R > HEIGHT:\n self.dy = - self.dy - 1\n elif self.y - self.R <= 0:\n self.dy = 2\n self.check_collision()\n\n def show(self):\n canvas.move(self.ball_id, self.dx, self.dy)\n\n def check_collision(self):\n global cs\n\n for ball in balls:\n if self != ball and (\n (ball.R + self.R) ** 2 >= (ball.x - self.x) ** 2 + (ball.y - self.y) ** 2):\n self.dx = - cs * self.dx\n self.dy = - cs * self.dy\n\n def check_inside(self):\n for i in balls:\n while (i.R + self.R) ** 2 >= (i.x - self.x) ** 2 + (i.y - self.y) ** 2:\n self.x = rnd(self.R, WIDTH - self.R)\n self.y = rnd(self.R, HEIGHT - self.R)\n\n\ndef tick():\n global dt\n\n for ball in balls:\n ball.move()\n ball.show()\n root.after(dt, tick)\n\n\nclass Game:\n def __init__(self):\n self.score = 0\n self.direction = 0\n\n def canvas_click_handler(self, event):\n for ball in balls:\n if ball.x + ball.R >= event.x >= ball.x - ball.R and ball.y + ball.R >= event.y >= ball.y - ball.R:\n self.score += 1\n print(str(self.score))\n self.direction += 1\n self.direction = self.direction % 16\n\n def main(self):\n global root, canvas, balls\n root = tk.Tk()\n root.geometry(str(WIDTH) + 'x' + str(HEIGHT))\n canvas = tk.Canvas(root, bg='black')\n canvas.pack(fill=tk.BOTH, expand=1)\n canvas.bind('', self.canvas_click_handler)\n # balls = [Ball() for i in range(5)]\n balls = []\n for i in range(5):\n balls.append(Ball())\n tick()\n root.mainloop()\n\n\ngame = Game()\ngame.main()\n","repo_name":"belskii/lab3_belskii","sub_path":"ball_game.py","file_name":"ball_game.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41735938797","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# import RPi.GPIO as GPIO\nimport socket\n\n# GPIO.setmode(GPIO.BCM)\n# GPIO.setup(22, GPIO.OUT)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# 绑定端口:\ns.bind(('10.10.100.83', 50000))\n\nprint('Bind UDP on 50000...')\n\nwhile True:\n # 接收数据:\n data, addr = s.recvfrom(1024)\n print('Received from %s:%s.' % addr)\n\n result = data.decode('utf-8')\n\n print(result)\n if result == '01':\n print('let on')\n s.sendto(b'let on', ('10.10.100.40', 50000))\n # GPIO.output(22, GPIO.HIGH)\n else:\n print('let off')\n s.sendto(b'let off', ('10.10.100.40', 50000))\n # GPIO.output(22, GPIO.LOW)\n\n\n","repo_name":"maxlee12/Python","sub_path":"Raspbian/Learn_Demo/UDP.py","file_name":"UDP.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70610797102","text":"import unittest\nfrom unittest.mock import Mock, patch\nfrom ttrest import TTAuthenticator\nfrom ttrest import TTLedgerClient\nfrom ttrest import TTEnvironments\n\n\nclass TestTTLedgerClient(unittest.TestCase):\n def setUp(self):\n self.auth_handler = Mock(spec=TTAuthenticator)\n self.ledger_client = TTLedgerClient(self.auth_handler)\n\n @patch(\"ttrest.rest_client.TTRestClient._authenticated_get\")\n def test_get_fills_with_query_parameters(self, mock_authenticated_get):\n mock_response = Mock()\n mock_response.json.return_value = {}\n mock_authenticated_get.return_value = mock_response\n\n self.ledger_client.auth_handler.environment = TTEnvironments.UAT # Set the environment to UAT\n\n self.ledger_client.get_fills(\n account_id=0,\n max_timestamp=1690930800000000000,\n min_timestamp=1690844400000000000,\n order_id=1234,\n product_id=5678,\n include_otc=True\n )\n\n expected_url = f\"{self.ledger_client.TT_BASE_URL}/ttledger/{TTEnvironments.UAT.value}/fills\"\n expected_query = {\n \"accountId\": 0,\n \"maxTimestamp\": 1690930800000000000,\n \"minTimestamp\": 1690844400000000000,\n \"orderId\": 1234,\n \"productId\": 5678,\n \"includeOTC\": \"true\"\n }\n\n # Verify that the method is called with the expected URL and query\n mock_authenticated_get.assert_called_once_with(\n expected_url,\n query=expected_query\n )\n\n @patch(\"ttrest.rest_client.TTRestClient._authenticated_get\")\n def test_get_fills_return_json(self, mock_authenticated_get):\n mock_response = Mock()\n mock_response.json.return_value = {\"data\": \"fill_data\"}\n mock_authenticated_get.return_value = mock_response\n\n result = self.ledger_client.get_fills()\n\n self.assertEqual(result, {\"data\": \"fill_data\"})\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"isaaclm/tt-rest-api","sub_path":"tests/unit_tests/test_ledger.py","file_name":"test_ledger.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"32417808384","text":"import os\nimport os.path\nimport numpy as np\nimport torch\nimport glob\nimport torchvision.transforms as transforms\nfrom utils.utils_image import *\nimport scipy.io as sio\n\n\ndef extract_metainfo(path='0151_METADATA_RAW_010.MAT'):\n meta = sio.loadmat(path)['metadata']\n mat_vals = meta[0][0]\n mat_keys = mat_vals.dtype.descr\n\n keys = []\n for item in mat_keys:\n keys.append(item[0])\n\n py_dict = {}\n for key in keys:\n py_dict[key] = mat_vals[key]\n\n device = py_dict['Model'][0].lower()\n bitDepth = py_dict['BitDepth'][0][0]\n if 'iphone' in device or bitDepth != 16:\n noise = py_dict['UnknownTags'][-2][0][-1][0][:2]\n iso = py_dict['DigitalCamera'][0, 0]['ISOSpeedRatings'][0][0]\n pattern = py_dict['SubIFDs'][0][0]['UnknownTags'][0][0][1][0][-1][0]\n time = py_dict['DigitalCamera'][0, 0]['ExposureTime'][0][0]\n\n else:\n noise = py_dict['UnknownTags'][-1][0][-1][0][:2]\n iso = py_dict['ISOSpeedRatings'][0][0]\n pattern = py_dict['UnknownTags'][1][0][-1][0]\n time = py_dict['ExposureTime'][0][0] # the 0th row and 0th line item\n\n rgb = ['R', 'G', 'B']\n pattern = ''.join([rgb[i] for i in pattern])\n\n asShotNeutral = py_dict['AsShotNeutral'][0]\n b_gain, _, r_gain = asShotNeutral\n\n # only load ccm1\n ccm = py_dict['ColorMatrix1'][0].astype(float).reshape((3, 3))\n\n return {'device': device,\n 'pattern': pattern,\n 'iso': iso,\n 'noise': noise,\n 'time': time,\n 'wb': np.array([r_gain, 1, b_gain]),\n 'ccm': ccm, }\n\ndef raw2stack(var):\n h, w = var.shape\n if var.is_cuda:\n res = torch.cuda.FloatTensor(4, h // 2, w // 2).fill_(0)\n else:\n res = torch.FloatTensor(4, h // 2, w // 2).fill_(0)\n res[0] = var[0::2, 0::2]\n res[1] = var[0::2, 1::2]\n res[2] = var[1::2, 0::2]\n res[3] = var[1::2, 1::2]\n return res\n\ndef transform_to_rggb(img, pattern):\n assert len(img.shape) == 2\n res = img.copy()\n\n if pattern.lower() == 'bggr': # same pattern\n res[0::2, 0::2] = img[1::2, 1::2]\n res[1::2, 1::2] = img[0::2, 0::2]\n elif pattern.lower() == 'rggb':\n pass\n elif pattern.lower() == 'grbg':\n res[0::2, 0::2] = img[0::2, 1::2]\n res[0::2, 1::2] = img[0::2, 0::2]\n res[1::2, 0::2] = img[1::2, 1::2]\n res[1::2, 1::2] = img[1::2, 0::2]\n elif pattern.lower() == 'gbrg':\n res[0::2, 0::2] = img[1::2, 0::2]\n res[0::2, 1::2] = img[0::2, 0::2]\n res[1::2, 0::2] = img[1::2, 1::2]\n res[1::2, 1::2] = img[0::2, 1::2]\n else:\n assert 'no support'\n\n return res\n\n\nclass SIDDSubdataset_fromArray(torch.utils.data.Dataset):\n def __init__(self, size, seed, mode='train', patch_size='128',supervised=True):\n super().__init__()\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n load_transform = transforms.Compose([ \n transforms.ToTensor(), \n ])\n \n rng_dataset = np.random.default_rng(seed)\n train_set_indices = rng_dataset.choice(125594, size=size, replace=False, p=None)\n\n # Path to training data pre-processed with create_training_data.py\n path_to_train = \"/tobit/N2N_SIDD_denoising/\"\n # Path to \"ValidationNoisyBlocksRaw.mat\" and \"ValidationGtBlocksRaw.mat\"\n path_to_validation = \"/media/ssd1/SIDD/validation/\"\n # Path to SIDD_Benchmark_Data containing the meta data for the validation set (and benchmark set)\n path_to_benchmark_images = \"/tobit/N2N_SIDD_denoising/raw_validation_data/SIDD_Benchmark_Data/\"\n\n if mode=='train':\n inputs = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_input_array_raw.npy') \n if supervised:\n targets = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_gt_array_raw.npy')\n else:\n targets = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_noisy_target_array_raw.npy')\n\n self.examples = []\n\n for i in train_set_indices:\n input_image = inputs[i,:,:]\n\n target_image = targets[i,:,:]\n\n input_image = load_transform(input_image).to(device)\n target_image = load_transform(target_image).to(device)\n input_image = raw2stack(input_image[0])\n target_image = raw2stack(target_image[0])\n\n self.examples.append((input_image,target_image))\n \n elif mode=='val' or mode=='test':\n \n # the 40 scenes used for validation and benchmarking\n validation_scenes_list = sorted(glob.glob(path_to_benchmark_images + \"0*\"))\n \n noisy_images = sio.loadmat(path_to_validation + \"ValidationNoisyBlocksRaw.mat\")\n gt_images = sio.loadmat(path_to_validation + \"ValidationGtBlocksRaw.mat\")\n\n noisy_images = noisy_images['ValidationNoisyBlocksRaw']\n gt_images = gt_images['ValidationGtBlocksRaw']\n\n transform = transforms.Compose([ \n transforms.ToTensor(), \n ]) \n \n self.examples = []\n if mode=='val':\n pick_val_images = np.arange(0,size)\n elif mode=='test':\n pick_val_images = np.arange(40-size,40)\n else:\n raise ValueError('size must be 3 or 37')\n for i in pick_val_images:\n # here we need to implement the correction of the bayer pattern\n scene = validation_scenes_list[i]\n img_name, extension = os.path.splitext(os.path.basename(scene))\n scene_tag = img_name[0:4]\n py_meta = extract_metainfo(scene +\"/\" + scene_tag + \"_METADATA_RAW_010.MAT\")\n pattern = py_meta['pattern']\n\n for j in range(noisy_images.shape[1]):\n noisy_image = noisy_images[i,j,:,:]\n gt_image = gt_images[i,j,:,:]\n\n noisy_image = transform_to_rggb(noisy_image, pattern)\n gt_image = transform_to_rggb(gt_image, pattern)\n\n noisy_image = transform(noisy_image).to(device)\n gt_image = transform(gt_image).to(device)\n\n noisy_image = raw2stack(noisy_image[0])\n gt_image = raw2stack(gt_image[0])\n self.examples.append((noisy_image,gt_image))\n\n\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, index):\n\n sample = self.examples[index]\n\n return sample\n\n\nclass SIDDSubdataset_fromArray_gradDiff(torch.utils.data.Dataset):\n def __init__(self, size, seed, mode='train', patch_size='128'):\n super().__init__()\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n load_transform = transforms.Compose([ \n transforms.ToTensor(), \n ])\n \n rng_dataset = np.random.default_rng(seed)\n train_set_indices = rng_dataset.choice(125594, size=size, replace=False, p=None)\n\n # Path to training data pre-processed with create_training_data.py\n path_to_train = \"/tobit/N2N_SIDD_denoising/\"\n\n\n if mode=='train':\n inputs = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_input_array_raw.npy') \n \n targets_sup = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_gt_array_raw.npy')\n targets_self = np.load(f'{path_to_train}SIDD_train_all_scenes_patchSize_{patch_size}_patches_125594_noisy_target_array_raw.npy')\n\n self.examples = []\n\n for i in train_set_indices:\n input_image = inputs[i,:,:]\n\n target_image_sup = targets_sup[i,:,:]\n target_image_self = targets_self[i,:,:]\n\n input_image = load_transform(input_image).to(device)\n target_image_sup = load_transform(target_image_sup).to(device)\n target_image_self = load_transform(target_image_self).to(device)\n input_image = raw2stack(input_image[0])\n target_image_sup = raw2stack(target_image_sup[0])\n target_image_self = raw2stack(target_image_self[0])\n\n self.examples.append((input_image,target_image_sup,target_image_self))\n \n\n\n\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, index):\n\n sample = self.examples[index]\n\n return sample\n\n","repo_name":"MLI-lab/sample_complexity_ss_recon","sub_path":"Image_denoising_SIDD_figure3/utils/data_helpers/load_datasets_helpers.py","file_name":"load_datasets_helpers.py","file_ext":"py","file_size_in_byte":8799,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"507271809","text":"from auto_scan_test import OPConvertAutoScanTest, BaseNet\nfrom hypothesis import reproduce_failure\nimport hypothesis.strategies as st\nimport numpy as np\nimport unittest\nimport paddle\nimport random\n\n\nclass Net(BaseNet):\n \"\"\"\n simple Net\n \"\"\"\n\n def forward(self, x):\n \"\"\"\n forward\n \"\"\"\n scale = self.config[\"scale\"]\n if self.config['isTensor']:\n scale = paddle.to_tensor(np.array(scale).astype('float32'))\n x = paddle.scale(\n x,\n scale=scale,\n bias=self.config[\"bias\"],\n bias_after_scale=self.config[\"bias_after_scale\"])\n return x\n\n\nclass TestScaleConvert(OPConvertAutoScanTest):\n \"\"\"\n api: paddle.scale\n OPset version: 7, 9, 15\n \"\"\"\n\n def sample_convert_config(self, draw):\n input_shape = draw(\n st.lists(\n st.integers(\n min_value=2, max_value=20), min_size=0, max_size=5))\n # int32, int64 has a bug\n dtype = draw(st.sampled_from([\"float32\", \"float64\"]))\n\n scale = draw(st.floats(min_value=-20, max_value=20))\n isTensor = draw(st.booleans())\n\n bias = draw(st.floats(min_value=-20, max_value=20))\n bias_after_scale = draw(st.booleans())\n\n config = {\n \"op_names\": [\"scale\"],\n \"test_data_shapes\": [input_shape],\n \"test_data_types\": [[dtype]],\n \"opset_version\": [7, 9, 15],\n \"input_spec_shape\": [],\n \"scale\": scale,\n \"bias\": bias,\n \"bias_after_scale\": bias_after_scale,\n \"isTensor\": isTensor,\n }\n\n models = Net(config)\n\n return (config, models)\n\n def test(self):\n self.run_and_statis(max_examples=30)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"PaddlePaddle/Paddle2ONNX","sub_path":"tests/test_auto_scan_scale.py","file_name":"test_auto_scan_scale.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":591,"dataset":"github-code","pt":"91"} +{"seq_id":"2490893956","text":"#!/usr/bin/env python2\n# coding: utf-8\n\nimport web\nimport model\nimport datetime\n\nurls = (\n '/', 'Index',\n '/add', 'Add',\n '/admin', 'Admin',\n '/del/(\\d+)', 'Delete'\n )\n\n\nrender = web.template.Render('templates', base='base')\n\n\nclass Index:\n \"\"\"Index page, \"\"\"\n def GET(self):\n \"\"\"Show page\"\"\"\n cash = model.get_cash_details()\n return render.index(cash)\n\n\nclass Add:\n\n def GET(self):\n \"\"\"Show add page.\"\"\"\n return render.add(today=datetime.date.today())\n\n def POST(self):\n \"\"\"Add new entry.\"\"\"\n post = web.input()\n model.new_item(post.date, post.item, post.yuan, post.type)\n raise web.seeother('/')\n\n\nclass Delete:\n def POST(self, id):\n \"\"\"Delete based on ID\"\"\"\n id = int(id)\n model.del_item(id)\n raise web.seeother('/admin')\n\n\nclass Admin:\n\n def GET(self):\n \"\"\"Show page\"\"\"\n cash = model.get_cash_details()\n return render.admin(cash)\n\n\napp = web.application(urls, globals())\n\nif __name__ == '__main__':\n app.run()\nelse:\n application = app.wsgifunc()\n","repo_name":"wangwangwar/cashlist","sub_path":"cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"73479553584","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 18 11:56:29 2021\n\n@author: Fede\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\n\n\ndef RandomForestclf(dataset, param_list, target_class='Type', n_estimators=400):\n '''\n This function creates an ensamble of decision trees classifier and tune the hyperparameter thanks to randomsearchCV from sklearn.\n For more information, please visit:\n https://scikit-learn.org/stable/modules/tree.html\n param_list should be a dictionary with the ranges of the parameters.\n dataset should be a dataframe from pandas.\n\n Parameters\n ----------\n min_samples_split : integer\n min number of samples for splitting\n max_depth : integer\n Depth of the tree\n min_samples_leaf : integer\n min number of samples to define a leaf\n criterion : str\n Criterion for splitting the nodes.\n n_estimators : integer\n number of trees that partecipate to the classification problem\n ---------- \n\n '''\n #controllo che siano passati i giusti tipi di variabili.\n assert type(dataset)==pd.core.frame.DataFrame, 'Your dataset should be a pandas dataframe'\n\n assert type(param_list)==dict, 'Your param_list should be a dictionary. For more info about parameters, please check the documentation'\n\n assert type(target_class)==str, 'Your target_class should be a str with your target class name'\n\n ##Separo la target class dal dataset\n attributes = [col for col in df.columns if col != target_class]\n X = dataset[attributes].values\n y = dataset[target_class]\n\n ##Adesso trovo migliori iperparametri e performance con la nested cross-validation\n #definisco la crossvalidation interna\n cv_inner = KFold(n_splits=3, shuffle=True, random_state=42)\n #definisco il modello\n clf = RandomForestClassifier(n_estimators=n_estimators, criterion='gini', max_depth=None,\n min_samples_split=2, min_samples_leaf=1, class_weight=None)\n random_search = RandomizedSearchCV(clf, param_distributions=param_list,\n n_iter=100, n_jobs=-1, cv=cv_inner,\n scoring='accuracy')\n #definisco la crossvalidation esterna\n cv_outer = KFold(n_splits=10, shuffle=True, random_state=42)\n #faccio la nested crossvalidation\n scores = cross_val_score(random_search, X, y, scoring='accuracy', cv=cv_outer, n_jobs=-1)\n return scores\n\n\nif __name__ == '__main__':\n PATH_ACTUAL = os.getcwd()\n PATH = PATH_ACTUAL + \"/data/Stars.csv\"\n df = pd.read_csv(PATH)\n\n del df['L']\n\n ##Trasformiamo la target class in un valore categorico\n stars_type = ['Red Dwarf', 'Brown Dwarf', 'White Dwarf',\n 'Main Sequence', 'Super Giants', 'Hyper Giants']\n df['Type'] = df['Type'].replace(df['Type'].unique(), stars_type)\n\n ##Trasformo alcune variabili usando il logaritmo in base 10\n df['R'] = np.log10(df['R'].values)\n\n ##Trasformo le variabili categoriche in numeri interi\n column2encode = ['Spectral_Class', 'Color']\n for col in column2encode:\n le = LabelEncoder()\n df[col] = le.fit_transform(df[col])\n\n\n ##Creiamo l'ensemble di alberi e eseguiamo la nested cross validation\n leaf_list = list(np.arange(1, 100, 2))\n samples_list = list(np.arange(2, 100, 2))\n param_list = {'max_depth': [None] + list(np.arange(2, 20)),\n 'min_samples_split': samples_list,\n 'min_samples_leaf': leaf_list,\n 'criterion': ['gini', 'entropy']}\n scores = RandomForestclf(df, param_list=param_list, target_class='Type', n_estimators=100)\n\n ##Vediamo quali sono gli attributi che più impattano nella classificazione\n '''attributes = [col for col in df.columns if col != 'Type']\n for col, imp in zip(attributes, clf[0].feature_importances_):\n print(col, imp)'''\n\n #nested crossvalidation, veduamo le performance\n print('Cross validation Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))\n","repo_name":"fededalba/SCcmep2021","sub_path":"StarclassML/Randomforestclassifier.py","file_name":"Randomforestclassifier.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32249729200","text":"class Solution:\n def asteroidCollision(self, asteroids: List[int]) -> List[int]:\n res = []\n for asteroid in asteroids:\n while len(res) and asteroid < 0 and res[-1] > 0:\n if res[-1]== -asteroid:\n res.pop()\n break\n elif res[-1] < -asteroid:\n res.pop()\n continue\n elif res[-1] > -asteroid:\n break\n else:\n res.append(asteroid)\n return res","repo_name":"monishshah18/Leetcode","sub_path":"asteroid-collision/asteroid-collision.py","file_name":"asteroid-collision.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"9709777952","text":"from SpecificSizePerformanceTest import SpecificSizePerformanceTest\nfrom SpecificSizeQualityTest import SpecificSizeQualityTest\nfrom Partitioning import *\n\n\nclass TestRunner:\n def __init__(self):\n self.tests = [SpecificSizeQualityTest(30, bisection_methods=[Partitioning.sga, Partitioning.kla,\n Partitioning.rbha],\n graphs_number=100)\n ]\n\n def run(self):\n for test in self.tests:\n test.run()\n\n\nif __name__ == \"__main__\":\n TestRunner().run()\n","repo_name":"bwroblew/graph_bisection","sub_path":"Test/TestRunner.py","file_name":"TestRunner.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6165786503","text":"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom polygon import *\nfrom gjk_epa import *\nfrom cvxopt import solvers, matrix\n\nsys.path.append(\"../\") \nimport my_map as mymap\n\ndef test_polygon_self():\n\t'''\n\ttest item: the functions of polygon class\n\t'''\n\tplg = Polygon()\n\t# vers=[[0,0],[1,1],[2,0],[3,3],[0,4],[-1,3.5], [0,2]]\n\t# vers = [[0,0],[1,1],[2,0],[3,4],[2,1.5],[0,3]]\n\tvers = [[1,2], [2,2], [2.5,3], [1.5, 4], [0.5,3], [4,1], [5,1], [5.5,2], [4.5,3], [3.5,2]]\n\tplg.set_vertexes(vers)\n\tplg.find_min_convex()\n\tplg1 = move_along_dir_by_dis(plg, [1,0], 5)\n\tfig, ax = plt.subplots()\n\tplg.plot_in_ax(ax) # plot this plg\n\tplg1.plot_in_ax(ax) # plot this plg\n\tplt.show()\n\ndef test_decomposition():\n\t'''\n\ttest item: the decomposition of a concave polygon\n\t'''\n\tplg = Polygon()\n\t# vers=[[0,0],[1,1],[2,0],[3,3],[0,4],[-1,3.5], [0,2]]\n\tvers=[[0,0],[1,1],[2,0],[3,4],[2,1.5],[0,3]]\n\tplg.set_vertexes(vers)\n\tplg.plot() # plot this plg\n\n\t# decomposition the polygon\n\tconvex_plg_list=[]\n\tplg_to_convexplg(plg, convex_plg_list)\n\n\t# plot every convex polygon\n\t# for i in range(len(convex_plg_list)):\n\t# \tconvex_plg_list[i].plot()\n\n\t# plot the polygon the every convex subpolygon\n\tplt.ion()\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tplg.plot_in_ax(ax)\n\tplt.title(\"Decompositon of a Concave Polygon\")\n\n\tsubplg, = ax.plot([], [], 'r--')\n\tfig.canvas.draw()\n\tfig.canvas.flush_events()\n\tfor i in range(len(convex_plg_list)):\n\t\tshowtimes = 1\n\t\tif i== len(convex_plg_list)-1:\n\t\t\tshowtimes = 10000000000\n\t\tj = 0\n\t\twhile j 0.5) == v_is_member_labels.data.cpu().numpy())\r\n losses.update(loss.item(), model_input.size(0))\r\n top1.update(prec1, model_input.size(0))\r\n\r\n # compute gradient and do SGD step\r\n optimizer_mia.zero_grad()\r\n if args.fp16:\r\n optimizer_mia.backward(loss)\r\n else:\r\n loss.backward()\r\n\r\n optimizer_mia.step()\r\n\r\n # measure elapsed time\r\n batch_time.update(time.time() - end)\r\n end = time.time()\r\n\r\n if batch_idx - first_id >= num_batchs:\r\n break\r\n\r\n # plot progress\r\n if batch_idx % 10 == 0:\r\n print_and_log(\r\n '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | | Loss: {loss:.4f} | top1: {top1: .4f} '.format(\r\n batch=batch_idx,\r\n size=size,\r\n data=data_time.avg,\r\n bt=batch_time.avg,\r\n loss=losses.avg,\r\n top1=top1.avg,\r\n ))\r\n\r\n return (losses.avg, top1.avg)","repo_name":"JiePKU/SafeCompress","sub_path":"MIA/.ipynb_checkpoints/white_train-checkpoint.py","file_name":"white_train-checkpoint.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"2101072606","text":"from barplot import SixSidedDie_BarPlot as d6_plot\n\ndef lists():\n \"\"\" Application Entry point \"\"\"\n print(\"Main Started...\")\n c=[-45, 6, 0, 72, 1543]\n d=['Mary', 'Smith', 3.57, 2022]\n\n c+= [5]\n\n print(c)\n\n a_list = []\n\n for number in range(1, 6):\n a_list+=[number]\n\n print(a_list)\n\n letters = []\n\n letters+='Python'\n\n print(letters)\n\n\n list1 = [10, 20, 30]\n\n list2 = [40, 50]\n\n list3 = list1 + list2 + list1\n\n for i in range(len(list3)):\n print(f'{i}: {list3[i]}')\n\n \n lista = [1, 2, 3]\n\n listb = [1, 2, 3]\n\n listc = [3, 4, 5]\n\n print(lista==listb)\n\n print(lista==listc)\n\n someOtherList=[19, 3, 15, 7, 11]\n aNewList=someOtherList[1:3]\n endList=someOtherList[:2]\n\n print(endList)\n\n sentence=\"hello, my name is Tom\"\n\n if \"Tom\" in sentence:\n print(\"Tom is in the sentence\")\n else:\n print(\"Error: not in the sentence.\")\n\n del(someOtherList[-1])\n\n print(someOtherList)\n\n someOtherList.clear()\n\n print(someOtherList)\n\ndef tuples():\n john='John', 'Green', 3.3\n\n print(john)\n\n mary='Mary', 'Red', 3.4\n\n mary+=('Blue',)\n\n print(mary)\n\n singleton=('red',)\n\n print(singleton)\n\n print(mary[1])\n\ndef tuples_and_lists():\n numbers=[1, 2, 3, 4, 5]\n\n numbers+=(6, 7)\n\n print(numbers)\n\ndef unpack():\n student_tuple=('Amanda', [98, 85, 87])\n\n first_name, grades = student_tuple\n\n print(first_name)\n\n print(grades)\n\n science, math, english = grades\n print(science)\n print(math)\n print(english)\n\n first, second = 'hi'\n\n first, second = second, first\n\n print(first, second)\n\n number1, number2, number3 = [2, 3, 5]\n print(number1)\n print(number2)\n print(number3)\n\n number99 = 99\n number22 = 22\n number44 = 44\n number55 = 55\n\n number99, number22, number44, number55 = number22, number99, number55, number44\n\n print(number99, number22, number44, number55)\n\ndef iterate():\n colors=['red', 'orange', 'yellow']\n print(list(enumerate(colors)))\n\n print(tuple(enumerate(colors)))\n\ndef dictionaries():\n my_dictionary = {}\n my_dictionary[0]=\"0\"\n my_dictionary[\"Tom\"]=\"Tom\"\n my_dictionary[\"Mark\"]=\"Sensei\"\n my_dictionary[\"Toronto\"]=\"Hot\"\n\n print(my_dictionary)\n\n another_one={'Tom': 'Tom', 'Mark': 'Sensei', 'Toronto': 'Hot'}\n\n if(my_dictionary == another_one):\n print(\"both dictionaries are the same!\")\n\n print(dict(enumerate(another_one)))\n\n print(my_dictionary[0])\n\n print(list(my_dictionary.items()))\n\ndef is_odd(x):\n \"\"\"Returns True only if x is odd.\"\"\"\n return x % 2 !=0\n\ndef is_even(x):\n \"\"\"Returns True only if x is even. \"\"\"\n return x % 2 == 0\n\ndef counting():\n responses = [1, 2, 5, 4, 3, 5, 2, 1, 3, 3, 1, 4, 3, 3, 3, 2, 3, 3, 2, 2]\n\n for i in range(1, 6):\n print(f'{i} appears {responses.count(i)} times in responses')\n\n responses.reverse()\n\n print(responses)\n\n new_responses = responses.copy()\n\n new_responses.reverse()\n\n print(new_responses)\n\n print(list(filter(is_even, new_responses)))\n\n\nd6_plot(60000)","repo_name":"CentennialCollege/COMP301-M2020-Lesson9","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26913476749","text":"#实现二路归并排序算法\n#输入:[2,5,3,7,5,3,8,10]\n#输出:[2,3,3,5,5,7,8,10]\n#输入:[1,9,56,3,0,47,2,6]\n#输出:[0,1,2,3,6,9,47,56]\n#复杂度为:O(nlogn)\ndef merge(a,b): #合并\n i=0\n j=0\n c=[]\n #以下操作是将a和b数组合并操作\n while i '+\n '\\n -> '.join(avtnames))\n \n sys.exit()\n cam['name'] = camids[0][1]\n if not 'TriggerSource' in cam.keys():\n cam['TriggerSource'] = 'Line1'\n if not 'TriggerMode' in cam.keys():\n cam['TriggerMode'] = 'LevelHigh'\n if not 'TriggerSelector' in cam.keys():\n cam['TriggerSelector'] = 'FrameStart'\n if not 'AcquisitionMode' in cam.keys():\n cam['AcquisitionMode'] = 'Continuous'\n if not 'AcquisitionFrameCount' in cam.keys():\n cam['AcquisitionFrameCount'] = 1000\n if not 'nFrameBuffers' in cam.keys():\n cam['nFrameBuffers'] = 6\n self.cams.append(AVTCam(camId=camids[0][0],\n outQ = self.camQueues[-1],\n frameRate=cam['frameRate'],\n gain=cam['gain'],\n triggered = self.triggered,\n triggerSource = cam['TriggerSource'],\n triggerMode = cam['TriggerMode'],\n triggerSelector = cam['TriggerSelector'],\n acquisitionMode = cam['AcquisitionMode'],\n nTriggeredFrames = cam['AcquisitionFrameCount'],\n nFrameBuffers = cam['nFrameBuffers'],\n recorderpar = recorderpar))\n connected_avt_cams.append(camids[0][0])\n elif cam['driver'].lower() == 'qimaging':\n try:\n from .qimaging import QImagingCam\n except Exception as err:\n print(err)\n print(''' \n\n Could not load the QImaging driver. \n If you want to record from QImaging cameras install the QImaging driver.\n If not you have the wrong config file.\n\n Edit the file in USERHOME/labcams/default.json and delete the QImaging cam or use the -c option \n\n''')\n sys.exit(1)\n \n if not 'binning' in cam.keys():\n cam['binning'] = 2\n self.cams.append(QImagingCam(camId=cam['id'],\n outQ = self.camQueues[-1],\n exposure=cam['exposure'],\n gain=cam['gain'],\n binning = cam['binning'],\n triggerType = cam['triggerType'],\n triggered = self.triggered,\n recorderpar = recorderpar))\n \n elif cam['driver'].lower() == 'opencv':\n self.cams.append(OpenCVCam(camId=cam['id'],\n outQ = self.camQueues[-1],\n triggered = self.triggered,\n **cam,\n recorderpar = recorderpar))\n elif cam['driver'].lower() == 'pco':\n try:\n from .pco import PCOCam\n except Exception as err:\n print(err)\n print(''' \n\n Could not load the PCO driver. \n\n If you want to record from PCO cameras install the PCO.sdk driver.\n If not you have the wrong config file.\n\n Edit the file in USERHOME/labcams/default.json and delete the PCO cam or use the -c option\n\n''')\n\n sys.exit(1)\n \n if 'CamStimTrigger' in cam.keys():\n if not cam['CamStimTrigger'] is None:\n self.camstim_widget = CamStimTriggerWidget(\n port = cam['CamStimTrigger']['port'],\n outQ = self.camQueues[-1])\n camstim = self.camstim_widget.ino\n else:\n camstim = None\n if not 'binning' in cam.keys():\n cam['binning'] = None\n self.cams.append(PCOCam(camId=cam['id'],\n binning = cam['binning'],\n exposure = cam['exposure'],\n outQ = self.camQueues[-1],\n acquisition_stim_trigger = camstim,\n triggered = self.triggered,\n recorderpar = recorderpar))\n elif cam['driver'].lower() == 'basler':\n try:\n from .basler import BaslerCam\n except Exception as err:\n print(err)\n print(''' \n\n Could not load the Basler driver. \n\n If you want to record from BASLER cameras install the pypylon driver (pip install).\n If not you have the wrong config file.\n\n Edit the file in USERHOME/labcams/default.json and delete the Basler cam or use the -c option\n\n''')\n\n sys.exit(1)\n \n if 'CamStimTrigger' in cam.keys():\n if not cam['CamStimTrigger'] is None:\n self.camstim_widget = CamStimTriggerWidget(\n port = cam['CamStimTrigger']['port'],\n outQ = self.camQueues[-1])\n camstim = self.camstim_widget.ino\n else:\n camstim = None\n if not 'binning' in cam.keys():\n cam['binning'] = None\n self.cams.append(BaslerCam(camId=cam['id'],\n binning = cam['binning'],\n exposure = cam['exposure'],\n outQ = self.camQueues[-1],\n triggered = self.triggered,\n recorderpar = recorderpar))\n\n \n elif cam['driver'].lower() == 'ximea':\n try:\n from .ximeacam import XimeaCam\n except Exception as err:\n print(''' \n\n Could not load the Ximea driver. \n\n If you want to record from Ximea cameras install the Ximea driver.\n If not you have the wrong config file.\n\n Edit the file in USERHOME/labcams/default.json and delete the ximea cam or use the -c option\n\n''')\n raise(err)\n self.cams.append(XimeaCam(camId=cam['id'],\n binning = cam['binning'],\n exposure = cam['exposure'],\n outQ = self.camQueues[-1],\n triggered = self.triggered,\n recorderpar = recorderpar))\n elif cam['driver'].lower() == 'pointgrey':\n try:\n from .pointgreycam import PointGreyCam\n except Exception as err:\n print(err)\n\n print(''' \n\n Could not load the PointGrey driver.\n \n If you want to record from PointGrey/FLIR cameras install the Spinaker SDK.\n If not you have the wrong config file.\n\n Edit the file in USERHOME/labcams/default.json and delete the PointGrey cam or use the -c option\n\n''')\n sys.exit()\n if not 'roi' in cam.keys():\n cam['roi'] = []\n if not 'pxformat' in cam.keys():\n cam['pxformat'] = 'Mono8' #'BayerRG8'\n if not 'serial' in cam.keys():\n # camera serial number\n cam['serial'] = None \n if not 'binning' in cam.keys():\n cam['binning'] = None\n if not 'exposure' in cam.keys():\n cam['exposure'] = None\n if not 'gamma' in cam.keys():\n cam['gamma'] = None\n if not 'hardware_trigger' in cam.keys():\n cam['hardware_trigger'] = None\n if cam['roi'] is str:\n if ',' in cam['roi']:\n cam['roi'] = [int(c.strip('[').strip(']')) for c in cam['roi'].split(',')]\n else:\n cam['roi'] = []\n self.cams.append(PointGreyCam(camId=cam['id'],\n serial = cam['serial'],\n gain = cam['gain'],\n roi = cam['roi'],\n frameRate = cam['frameRate'],\n pxformat = cam['pxformat'],\n exposure = cam['exposure'],\n binning = cam['binning'],\n gamma = cam['gamma'],\n outQ = self.camQueues[-1],\n triggered = self.triggered,\n recorderpar = recorderpar,\n hardware_trigger = cam['hardware_trigger']))\n else: \n display('[WARNING] -----> Unknown camera driver' +\n cam['driver'])\n self.camQueues.pop()\n self.saveflags.pop()\n if not 'recorder_sleep_time' in self.parameters.keys():\n self.parameters['recorder_sleep_time'] = 0.3\n if 'SaveMethod' in cam.keys():\n cam['recorder'] = cam['SaveMethod']\n display('SaveMethod is deprecated, use recorder instead.')\n if not 'noqueue' in cam['recorder']:\n towriter = dict(inQ = self.camQueues[-1],\n datafolder=self.parameters['recorder_path'],\n pathformat = self.parameters['recorder_path_format'],\n framesperfile=self.parameters['recorder_frames_per_file'],\n sleeptime = self.parameters['recorder_sleep_time'],\n filename = expName,\n dataname = cam['description'])\n if cam['recorder'] == 'tiff':\n display('Recording to TIFF')\n self.writers.append(TiffWriter(compression = cam['compress'],\n **towriter))\n elif cam['recorder'] == 'ffmpeg':\n display('Recording with ffmpeg')\n if not 'hwaccel' in cam.keys():\n cam['hwaccel'] = None\n self.writers.append(FFMPEGWriter(compression = cam['compress'],\n hwaccel = cam['hwaccel'],\n **towriter))\n elif cam['recorder'] == 'binary':\n display('Recording binary')\n self.writers.append(BinaryWriter(**towriter))\n elif cam['recorder'] == 'opencv':\n display('Recording opencv')\n self.writers.append(OpenCVWriter(compression = cam['compress'],**towriter))\n else:\n print(''' \n\nThe available recorders are:\n - tiff (multiple tiffstacks - the default) \n - binary \n - ffmpeg Records video format using ffmpeg (hwaccel options: intel, nvidia - remove for no hardware acceleration)\n - opencv Records video format using openCV\n\nThe recorders can be specified with the '\"recorder\":\"ffmpeg\"' option in each camera setting of the config file.\n''')\n raise ValueError('Unknown recorder {0} '.format(cam['recorder']))\n else:\n self.writers.append(None)\n \n if 'CamStimTrigger' in cam.keys():\n self.camstim_widget.outQ = self.camQueues[-1]\n # Print parameters\n display('\\t Camera: {0}'.format(cam['name']))\n for k in np.sort(list(cam.keys())):\n if not k == 'name' and not k == 'recorder':\n display('\\t\\t - {0} {1}'.format(k,cam[k]))\n #self.resize(100,100)\n\n self.initUI()\n \n self.camerasRunning = False\n if hasattr(self,'camstim_widget'):\n self.camstim_widget.ino.start()\n self.camstim_widget.ino.disarm()\n\n for cam,writer in zip(self.cams[::-1],self.writers[::-1]):\n cam.start()\n if not writer is None:\n writer.init(cam)\n writer.start()\n \n camready = 0\n while camready != len(self.cams):\n camready = np.sum([cam.camera_ready.is_set() for cam in self.cams])\n display('Initialized cameras.')\n\n if hasattr(self,'camstim_widget'):\n self.camstim_widget.ino.arm()\n\n self.triggerCams(soft_trigger = self.software_trigger,\n save=self.saveOnStart)\n\n def setExperimentName(self,expname):\n # Makes sure that the experiment name has the right slashes.\n if os.path.sep == '/':\n expname = expname.replace('\\\\',os.path.sep)\n expname = expname.strip(' ')\n for flg,writer,cam in zip(self.saveflags,self.writers,self.cams):\n if flg:\n if not writer is None:\n writer.set_filename(expname)\n else:\n display('Setting serial recorder filename.')\n cam.eventsQ.put('filename='+expname)\n #time.sleep(0.15)\n self.recController.experimentNameEdit.setText(expname)\n \n def serverActions(self):\n if self.parameters['server'] == 'zmq':\n try:\n message = self.zmqSocket.recv_pyobj(flags=zmq.NOBLOCK)\n except:\n return\n self.zmqSocket.send_pyobj(dict(action='handshake'))\n elif self.parameters['server'] == 'udp':\n try:\n msg,address = self.udpsocket.recvfrom(N_UDP)\n except:\n return\n msg = msg.decode().split('=')\n message = dict(action=msg[0])\n if len(msg) > 1:\n message = dict(message,value=msg[1])\n #display('Server received message: {0}'.format(message))\n if message['action'].lower() == 'expname':\n self.setExperimentName(message['value'])\n self.udpsocket.sendto(b'ok=expname',address)\n elif message['action'].lower() == 'softtrigger':\n self.recController.softTriggerToggle.setChecked(\n int(message['value']))\n self.udpsocket.sendto(b'ok=software_trigger',address)\n elif message['action'].lower() == 'trigger':\n for cam in self.cams:\n cam.stop_acquisition()\n # make sure all cams closed\n for c,(cam,writer) in enumerate(zip(self.cams,self.writers)):\n cam.stop_saving()\n #if not writer is None: # Logic moved to inside camera.\n # writer.write.clear()\n self.triggerCams(soft_trigger = self.software_trigger,save = True)\n self.udpsocket.sendto(b'ok=save_hardwaretrigger',address)\n elif message['action'].lower() == 'settrigger':\n self.recController.camTriggerToggle.setChecked(\n int(message['value']))\n self.udpsocket.sendto(b'ok=hardware_trigger',address)\n elif message['action'].lower() in ['setmanualsave','manualsave']:\n self.recController.saveOnStartToggle.setChecked(\n int(message['value']))\n self.udpsocket.sendto(b'ok=save',address)\n elif message['action'].lower() == 'log':\n for cam in self.cams:\n cam.eventsQ.put('log={0}'.format(message['value']))\n # write on display\n #self.camwidgets[0].text_remote.setText(message['value'])\n self.udpsocket.sendto(b'ok=log',address)\n self.recController.udpmessages.setText(message['value'])\n elif message['action'].lower() == 'ping':\n display('Server got PING.')\n self.udpsocket.sendto(b'pong',address)\n elif message['action'].lower() == 'quit':\n self.udpsocket.sendto(b'ok=bye',address)\n self.close()\n def triggerCams(self,soft_trigger = True, save=False):\n # stops previous saves if there were any\n display(\"Waiting for the cameras to be ready.\")\n for c,cam in enumerate(self.cams):\n while not cam.camera_ready.is_set():\n time.sleep(0.001)\n display('Camera [{0}] ready.'.format(c))\n display('Doing save ({0}) and trigger'.format(save))\n if save:\n for c,(cam,flg,writer) in enumerate(zip(self.cams,\n self.saveflags,\n self.writers)):\n if flg:\n cam.saving.set()\n if not writer is None:\n writer.write.set()\n else:\n for c,(cam,flg,writer) in enumerate(zip(self.cams,\n self.saveflags,\n self.writers)):\n if flg:\n if not writer is None:\n cam.stop_saving()\n #writer.write.clear() # cam stops writer\n #time.sleep(2)\n if soft_trigger:\n for c,cam in enumerate(self.cams):\n cam.start_trigger.set()\n display('Software triggered cameras.')\n \n def experimentMenuTrigger(self,q):\n if q.text() == 'Set refresh time':\n self.timer.stop()\n res = QInputDialog().getDouble(self,\"What refresh period do you want?\",\"GUI refresh period\",\n self.updateFrequency)\n if res[1]:\n self.updateFrequency = res[0]\n self.timer.start(self.updateFrequency)\n #display(q.text()+ \"clicked. \")\n \n def initUI(self):\n # Menu\n self.setDockOptions(QMainWindow.AllowTabbedDocks |\n QMainWindow.AllowNestedDocks\n)\n bar = self.menuBar()\n editmenu = bar.addMenu(\"Options\")\n editmenu.addAction(\"Set refresh time\")\n editmenu.triggered[QAction].connect(self.experimentMenuTrigger)\n self.setWindowTitle(\"labcams\")\n self.tabs = []\n self.camwidgets = []\n self.recController = RecordingControlWidget(self)\n #self.setCentralWidget(self.recController)\n self.recControllerTab = QDockWidget(\"\",self)\n self.recControllerTab.setWidget(self.recController)\n self.addDockWidget(\n Qt.TopDockWidgetArea,\n self.recControllerTab)\n self.recController.setFixedHeight(self.recController.layout.sizeHint().height())\n for c,cam in enumerate(self.cams):\n tt = ''\n if self.saveflags[c]:\n tt += ' - ' + self.cam_descriptions[c]['description'] +' ' \n self.tabs.append(QDockWidget(\"Camera: \"+str(c) + tt,self))\n self.camwidgets.append(CamWidget(frame = np.zeros((cam.h,cam.w,cam.nchan),\n dtype=cam.dtype),\n iCam = c,\n parent = self,\n parameters = self.cam_descriptions[c]))\n self.tabs[-1].setWidget(self.camwidgets[-1])\n self.tabs[-1].setFloating(False)\n self.tabs[-1].setAllowedAreas(Qt.LeftDockWidgetArea |\n Qt.RightDockWidgetArea |\n Qt.BottomDockWidgetArea |\n Qt.TopDockWidgetArea)\n self.tabs[-1].setFeatures(QDockWidget.DockWidgetMovable |\n QDockWidget.DockWidgetFloatable)\n self.addDockWidget(\n Qt.BottomDockWidgetArea,\n self.tabs[-1])\n self.tabs[-1].setMinimumHeight(300)\n # there can only be one of these for now?\n if hasattr(self,'camstim_widget'):\n self.camstim_tab = QDockWidget(\"Camera excitation control\",self)\n self.camstim_tab.setWidget(self.camstim_widget)\n self.addDockWidget(\n Qt.LeftDockWidgetArea,\n self.camstim_tab)\n display('Init view: ' + str(c))\n self.timer = QTimer()\n self.timer.timeout.connect(self.timerUpdate)\n self.timer.start(self.updateFrequency)\n #self.move(0, 0)\n self.show()\n \t\n def timerUpdate(self):\n for c,cam in enumerate(self.cams):\n try:\n #self.camwidgets[c].image(frame,cam.nframes.value)\n frame = cam.get_img()\n if not frame is None:\n self.camwidgets[c].image(frame,cam.nframes.value) #frame\n except Exception as e:\n display('Could not draw cam: {0}'.format(c))\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(\n exc_tb.tb_frame.f_code.co_filename)[1]\n print(e, fname, exc_tb.tb_lineno)\n\n def closeEvent(self,event):\n if hasattr(self,'serverTimer'):\n self.serverTimer.stop()\n self.timer.stop()\n if hasattr(self,'camstim_widget'):\n self.camstim_widget.ino.disarm()\n self.camstim_widget.close()\n \n display('Acquisition stopped (close event).')\n for c,(cam,flg,writer) in enumerate(zip(self.cams,\n self.saveflags,\n self.writers)):\n if flg:\n cam.stop_saving()\n #writer.write.clear() # logic moved inside writer\n if not writer is None:\n writer.stop()\n cam.close()\n for c in self.cams:\n c.join()\n for c,(cam,flg,writer) in enumerate(zip(self.cams,\n self.saveflags,\n self.writers)):\n if flg:\n if not writer is None:\n writer.join()\n pg.setConfigOption('crashWarning', False)\n event.accept()\n\n\ndef main():\n\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n import os\n import json\n parser = ArgumentParser(description=LOGO + '\\n\\n Multiple camera control and recording.',formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('file',\n metavar='file',\n type=str,\n default=None,\n nargs=\"?\")\n parser.add_argument('-d','--make-config',\n type=str,\n default = None,\n action='store')\n parser.add_argument('-w','--wait',\n default = False,\n action='store_true')\n parser.add_argument('-t','--triggered',\n default=False,\n action='store_true')\n parser.add_argument('-c','--cam-select',\n type=int,\n nargs='+',\n action='store')\n parser.add_argument('--no-server',\n default=False,\n action='store_true')\n parser.add_argument('--bin-to-mj2',\n default=False,\n action='store_true')\n parser.add_argument('--mj2-rate',\n default=30.,\n action='store')\n \n opts = parser.parse_args()\n\n if opts.bin_to_mj2:\n from labcams.io import mmap_dat\n \n fname = opts.file\n \n assert not fname is None, \"Need to supply a binary filename to compress.\"\n assert os.path.isfile(fname), \"File {0} not found\".format(fname)\n ext = os.path.splitext(fname)[-1]\n assert ext in ['.dat','.bin'], \"File {0} needs to be binary.\".format(fname) \n stack = mmap_dat(fname)\n stack_to_mj2_lossless(stack, fname, rate = opts.mj2_rate)\n print('Converted {0}'.format(fname.replace(ext,'.mov')))\n sys.exit(0)\n \n if not opts.make_config is None:\n from .widgets import SettingsDialog\n app = QApplication(sys.argv)\n s = SettingsDialog(getPreferences())\n sys.exit(app.exec_())\n fname = opts.make_config\n getPreferences(fname, create=True)\n sys.exit(s.exec_())\n parameters = getPreferences(opts.file)\n cams = parameters['cams']\n if not opts.cam_select is None:\n cams = [parameters['cams'][i] for i in opts.cam_select]\n\n app = QApplication(sys.argv)\n w = LabCamsGUI(app = app,\n camDescriptions = cams,\n parameters = parameters,\n server = not opts.no_server,\n software_trigger = not opts.wait,\n triggered = opts.triggered)\n sys.exit(app.exec_())\n \nif __name__ == '__main__':\n main()\n","repo_name":"TrendingTechnology/labcams","sub_path":"labcams/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":33000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"9470085501","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport pandas as pd \nimport numpy as np\n\nimport pymysql\nfrom sqlalchemy import create_engine\npymysql.install_as_MySQLdb()\n\nimport html5lib\nfrom datetime import datetime\n\n\n# In[8]:\n\n\n# db에서 종목코드 가져오기\n# db 추출 연결\n#종목에서 끌어올 코드번호 데이터 프레임에 삽입\n\ndb = pymysql.connect(host = 'sp-articledb.clwrfz92pdul.ap-northeast-2.rds.amazonaws.com', port = 3306,user ='admin',passwd = 'sogangsp',db='mydb',charset = 'utf8')\ncursor = db.cursor()\n\nsql = \"\"\"\n select * from CODE_INFORMATION ;\n \"\"\"\ncursor.execute(sql)\ndb.commit()\n\ncode_data = pd.DataFrame(cursor.fetchall(), columns=['code', 'name'])\n\n#str 형태로 저장되어있는 코드명을 int로 치환 후 6자리 int형으로 고정\ncode_data.code = code_data.code.astype(int)\ncode_data.code=code_data.code.map('{:06d}'.format)\n\n\n# In[9]:\n\n\n\ndef get_url(item_name, code_data): \n code = code_data.query(\"name=='{}'\".format(item_name))['code'].to_string(index=False)\n code = code.strip(' ')\n url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code=code)\n\n print(\"요청 URL = {}\".format(url)) \n return url\n\ndef crawler_chart(item_name) :\n url = get_url(item_name, code_data) \n\n # 일자 데이터를 담을 df라는 DataFrame 정의 \n df = pd.DataFrame() \n\n for page in range(1, 2): \n pg_url = '{url}&page={page}'.format(url=url, page=page) \n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\n df = df.dropna()\n \n return df \n\ndef oneday_crawl(item_name) :\n update_chart = pd.DataFrame(columns=['CORP_NAME','TRADE_TIME','CLOSE_PRICE','DIFF','OPEN_PRICE','MAX_PRICE','MIN_PRICE','TRADE_AMOUNT'])\n now = datetime.now()\n for i in crawler_chart(item_name).get_values() :\n if i[0] == '{:04d}.{:02d}.{:02d}'.format(now.year,now.month,now.day) :\n data = {\n 'CORP_NAME' : item_name\n ,'TRADE_TIME' : i[0]\n ,'CLOSE_PRICE' : int(i[1])\n ,'DIFF' : int(i[2])\n ,'OPEN_PRICE' : int(i[3])\n ,'MAX_PRICE' : int(i[4])\n ,'MIN_PRICE' : int(i[5])\n ,'TRADE_AMOUNT' : int(i[6])\n }\n update_chart=update_chart.append(data,ignore_index=True)\n \n return update_chart\n\n\n# In[10]:\n\n\nupdate_chart = pd.DataFrame(columns=['CORP_NAME','TRADE_TIME','CLOSE_PRICE','DIFF','OPEN_PRICE','MAX_PRICE','MIN_PRICE','TRADE_AMOUNT'])\nfor i in code_data.name :\n update_chart=update_chart.append(oneday_crawl(i))\n\n\n# In[330]:\n\n\n# 오늘 데이터 삽입\nengine = create_engine(\"mysql+pymysql://admin:\"+\"sogangsp\"+\"@sp-articledb.clwrfz92pdul.ap-northeast-2.rds.amazonaws.com:3306/mydb?charset=utf8\", encoding='utf-8')\nconn = engine.connect()\nupdate_chart.to_sql(name = 'CORP_STOCK',con = engine, if_exists = 'append',index = False)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ptaejoon/Stock_Prediction","sub_path":"Crawling/one_day_crawler_final.py","file_name":"one_day_crawler_final.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33663458692","text":"\nimport numpy as np\nimport pandas as pd\nfrom TextRepresenter import PorterStemmer\nfrom Weighter import WeighterVector\nfrom IRModel import BM25Model\nfrom RandomWalk import whole_graph, PageRank2\n\ndef idf(N, num_documents):\n return np.log(N / (num_documents + 1))\n\n\nclass Featurer:\n\n def __init__(self, index, models=None):\n self.index = index\n print(\"precalcul features\")\n self.doc_N, self.corpus_features = self.pre_calcul_features()\n self.stemmer = PorterStemmer()\n # for each query, stock thing calculated in calcul_features_query and calcul_features_query_doc\n self.query_feature_cache = {}\n self.ranking_feature_cache = {}\n self.models = models if models is not None else [BM25Model(WeighterVector(index))]\n\n def get_features(self, doc_id, query):\n assert doc_id in self.corpus_features.index, \"The given document id {} is not present in corpus\".format(doc_id)\n\n corpus_feats = dict(zip(self.corpus_features.columns, self.corpus_features.loc[doc_id]))\n\n query_array = self.stemmer.getTextRepresentation(query)\n # calcul features for query\n query_feats = self.calculate_features_query(query_array, query)\n\n # calcul features for query and corpus\n query_corpus_feats = self.calculate_features_query_doc(doc_id, query)\n\n all_feats = {**corpus_feats, **query_feats, **query_corpus_feats}\n keys = np.sort(list(all_feats.keys()))\n return keys, [all_feats[f] for f in keys]\n\n\n def pre_calcul_features(self, pagerank_iters=5):\n \"\"\"\n Let's precalcule features that are independent from the query.\n\n -longueur du document\n -nombre de termes\n -somme des idf des termes du document\n -pagerank\n\n \"\"\"\n doc_N, doc_features = self.index.getDocFeatures()\n # add pagerank\n df = pd.DataFrame(doc_features[:, 1:], columns=['doc_len', 'doc_stems', 'doc_idfs'], index=doc_features[:, 0])\n pagerank = PageRank2(whole_graph(self.index))\n rank = pagerank.compute_mus(pagerank_iters)\n df['pagerank'] = rank\n return doc_N, df\n\n\n def calculate_features_query(self, query_array, query):\n \"\"\"\n For already calculated request, check cache.\n For new query, calculates\n -sum of idfs\n -sum of words\n -length of query\n\n :param query_array: contains all words in query\n :param query as plain text\n :return:\n \"\"\"\n if query not in self.query_feature_cache:\n idfs = np.array([idf(self.doc_N, len(self.index.getTfsForStem(w).keys())) for w in query_array])\n self.query_feature_cache[query] = {'query_idf':idfs.sum(), 'query_char_count':len(query)}\n return self.query_feature_cache[query]\n\n def calculate_features_query_doc(self, doc_id, query):\n \"\"\"\n For new request, calculates the ranking according the models.\n For existing request, check cache.\n :param doc_id:\n :param query:\n :return: Ranking of given doc_id for given query\n \"\"\"\n if query not in self.ranking_feature_cache:\n rankings = {}\n for model in self.models:\n model_name = type(model).__name__\n ranking = model.getRanking(query)\n ranking_array = np.array(ranking, dtype=object)\n doc_index, rank = ranking_array.T # split columns\n df = pd.DataFrame({'ranking': rank}, index=doc_index)\n rankings[model_name] = df\n self.ranking_feature_cache[query] = rankings\n\n query_rankings = self.ranking_feature_cache[query]\n #import pdb; pdb.set_trace()\n return {model_name:query_rankings[model_name].loc[doc_id].values[0] for model_name in query_rankings.keys()}\n\n\n\n","repo_name":"samutamm/RI","sub_path":"Featurer.py","file_name":"Featurer.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11863518469","text":"import json\nimport requests\nimport datetime\nimport time\nimport dateutil.parser\nimport pytz\n\n# YELP TIMEZONE BUG:\n# All events are assumed to originally have been in Pacific time.\n# To query Yelp, change the timezone of your query to Pacific (don't change time, just timezone)\n# and then convert that to UTC.\n# When getting results from Yelp, convert from UTC to Pacific time, then set the Timezone to the local time (without changing time)\ndef str_to_datetime(date_string, timezone_name):\n dt = dateutil.parser.parse(date_string)\n return pytz.timezone(timezone_name).localize(dt)\n\ndef str_to_unix(date_string, timezone_name):\n # dt = str_to_datetime(date_string, timezone_name)\n dt = str_to_datetime(date_string, 'America/Los_Angeles')\n return int(dt.timestamp())\n\ndef to_local_time(dt, timezone_name):\n # Convert from UTC to PDT\n dt = dt.astimezone(pytz.timezone(\"America/Los_Angeles\"))\n # Set timezone to actual timezone instead of PDT and remove timezone info.\n return pytz.timezone(timezone_name).localize(dt.replace(tzinfo=None)).replace(tzinfo=None)\n # return dt.astimezone(pytz.timezone(timezone_name))\n\nclass BusinessSearchResult():\n def __init__(self,\n rating=None,\n price=None,\n phone=None,\n id=None,\n alias=None,\n is_closed=None,\n categories=None,\n review_count=None,\n name=None,\n url=None,\n latitude=None,\n longitude=None,\n image_url=None,\n location=None,\n distance=None,\n transactions=None,\n hours=None):\n self. rating = rating\n self.price = price\n self.phone = phone\n self.id = id\n self.alias = alias\n self. is_closed = is_closed\n self.categories = categories\n self.review_count = review_count\n self.name = name\n self.url = url\n self.latitude = latitude\n self.longitude = longitude\n self.image_url = image_url\n self.location = location\n self.distance = distance\n self.transactions = transactions\n self.hours = hours # Will be none unless using graphql\n\nclass BusinessDetails():\n def __init__(self,\n id=None,\n alias=None,\n name=None,\n image_url=None,\n is_claimed=None,\n is_closed=None,\n url=None,\n price=None,\n rating=None,\n review_count=None,\n phone=None,\n photos=None,\n hours=None,\n categories=None,\n latitude=None,\n longitude=None,\n location=None,\n transactions=None\n ):\n self.id = id\n self.alias = alias\n self.name = name\n self.image_url = image_url\n self.is_claimed = is_claimed\n self.is_closed = is_closed\n self.url = url\n self.price = price\n self.rating = rating\n self.review_count = review_count\n self.phone = phone\n self.photos = photos\n self.hours = hours\n self.categories = categories\n self.latitude = latitude\n self.longitude = longitude\n self.location = location\n self.transactions = transactions\n\nclass EventSearchResult():\n def __init__(self,\n attending_count=None,\n categories=None,\n cost=None,\n cost_max=None,\n description=None,\n url=None,\n id=None,\n image_url=None,\n interested_count=None,\n is_canceled=None,\n is_free=None,\n is_official=None,\n latitude=None,\n longitude=None,\n name=None,\n tickets_url=None,\n start_time=None,\n end_time=None,\n location=None,\n cross_streets=None,\n business_alias=None):\n self.attending_count = attending_count\n self.categories = categories\n self.cost = cost\n self.cost_max = cost_max\n self.description = description\n self.url = url\n self.id = id\n self.image_url = image_url\n self.interested_count = interested_count\n self.is_canceled = is_canceled\n self.is_free = is_free\n self.is_official = is_official\n self.latitude = latitude\n self.longitude = longitude\n self.name = name\n self.tickets_url = tickets_url\n self.start_time = start_time\n self.end_time = end_time\n self.location = location\n self.cross_streets = cross_streets\n self.business_alias = business_alias\n\n\nclass YelpAPI():\n def __init__(self, apikey, categories_file):\n self.API_HOST = r\"https://api.yelp.com/v3/\"\n self.API_KEY = apikey\n self.BIZ_CATEGORIES = self._read_categories(categories_file)\n self.EVENT_CATEGORIES = {\n \"music\": {\"title\": \"Music\", \"parents\": []},\n \"visual-arts\": {\"title\": \"Visual Arts\", \"parents\": []},\n \"performing-arts\": {\"title\": \"Performing Arts\", \"parents\": []},\n \"film\": {\"title\": \"Film\", \"parents\": []},\n \"lectures-books\": {\"title\": \"Lectures & Books\", \"parents\": []},\n \"fashion\": {\"title\": \"Fashion\", \"parents\": []},\n \"food-and-drink\": {\"title\": \"Food & Drink\", \"parents\": []},\n \"festivals-fairs\": {\"title\": \"Festivals & Fairs\", \"parents\": []},\n \"charities\": {\"title\": \"Charities\", \"parents\": []},\n \"sports-active-life\": {\"title\": \"Sports & Active Life\", \"parents\": []},\n \"nightlife\": {\"title\": \"Nightlife\", \"parents\": []},\n \"kids-family\": {\"title\": \"Kids & Family\", \"parents\": []},\n \"other\": {\"title\": \"Other\", \"parents\": []}\n }\n\n def _read_categories(self, categories_file):\n with open(categories_file, \"r\") as f:\n cats = json.load(f)\n categories = {}\n for elem in cats:\n categories[elem['alias']] = {\"title\": elem['title'], \"parents\": elem['parents']}\n return categories\n\n def _request(self, endpoint, url_params):\n headers = {\n \"Authorization\": \"Bearer {0}\".format(self.API_KEY)\n }\n url = \"{0}{1}\".format(self.API_HOST, endpoint)\n response = requests.request('GET', url, headers=headers, params=url_params)\n return response.json()\n\n def _graphql_request(self, query):\n headers = {\n \"Authorization\": \"Bearer {0}\".format(self.API_KEY),\n \"Content-Type\": \"application/graphql\",\n \"Accept-Language\": \"en_US\"\n }\n url = \"{0}graphql\".format(self.API_HOST)\n response = requests.request('POST', url, headers=headers, data=query)\n resp = response.json()\n if 'data' in resp:\n return resp['data']\n else:\n return resp\n\n def flatten_raw_categories(self, categories):\n return set([cat['alias'] for cat in categories]) if categories is not None else None\n\n def add_parent_categories(self, categories):\n if categories is None:\n return None\n enhanced_categories = set()\n queue = []\n queue.extend(categories)\n while(len(queue) > 0):\n cat = queue.pop()\n if cat not in enhanced_categories:\n if cat in self.BIZ_CATEGORIES:\n parents = self.BIZ_CATEGORIES[cat]['parents']\n queue.extend(parents)\n enhanced_categories.add(cat)\n return enhanced_categories\n\n # https://www.yelp.com/developers/documentation/v3/business_search\n # Fusion endpoint. Doesn't return hours.\n def business_search(self,\n term=None,\n location=None,\n latitude=None,\n longitude=None,\n radius=None,\n categories=None,\n locale=None,\n limit=20,\n offset=0,\n sort_by=None,\n price=None,\n open_now=None,\n open_at=None,\n attributes=None,\n add_parent_categories=False,\n max_num_businesses=20):\n limit = min(max_num_businesses, limit)\n params = dict(locals())\n del params['add_parent_categories']\n del params['max_num_businesses']\n del params['self']\n keys = list(params.keys())\n for k in keys:\n if params[k] is None:\n del params[k]\n response = self._request(\"businesses/search\", params)\n businesses = response['businesses']\n business_objs = self._process_business_search_businesses(businesses, add_parent_categories)\n\n num_businesses_left = max_num_businesses - len(business_objs) - offset\n if num_businesses_left > 0 and len(business_objs) == limit:\n print(num_businesses_left, \"businesses left. Found\", len(business_objs),\" just now.\")\n additional_business_objs = self.business_search(term=term,\n location=location,\n latitude=latitude,\n longitude=longitude,\n radius=radius,\n categories=categories,\n locale=locale,\n limit=min(limit, num_businesses_left),\n offset= offset + len(business_objs),\n sort_by=sort_by,\n price=price,\n open_now=open_now,\n open_at=open_at,\n attributes=attributes,\n add_parent_categories=add_parent_categories,\n max_num_businesses=max_num_businesses)['businesses']\n business_objs.extend(additional_business_objs)\n\n result = {\n \"total\": response['total'],\n \"businesses\": business_objs,\n \"latitude\": response['region']['center']['latitude'],\n \"longitude\": response['region']['center']['longitude']\n }\n return result\n\n def _process_business_search_businesses(self, businesses, add_parent_categories):\n business_objs = []\n for biz in businesses:\n cats = self.flatten_raw_categories(biz.get('categories'))\n if add_parent_categories:\n cats = self.add_parent_categories(cats)\n business_objs.append(\n BusinessSearchResult(\n rating=biz.get('rating'),\n price=biz.get('price'),\n phone=biz.get('phone'),\n id=biz['id'],\n alias=biz['alias'],\n is_closed=biz.get('is_closed'),\n categories=cats,\n review_count=biz.get('review_count'),\n name=biz.get('name'),\n url=biz.get('url'),\n latitude=biz['coordinates']['latitude'],\n longitude=biz['coordinates']['longitude'],\n image_url=biz.get('image_url'),\n location=biz.get('location'),\n distance=biz.get('distance'),\n transactions=biz.get('transactions'),\n hours=biz.get('hours')\n )\n )\n return business_objs\n\n # https://www.yelp.com/developers/documentation/v3/business\n def business_details(self, id, add_parent_categories=False):\n result = self._request(\"businesses/{0}\".format(id), {})\n return self._process_business_detail(result, add_parent_categories)\n\n def graphql_bulk_business_hours(self, ids):\n business_query_str = \"\"\"\n b{0}: business(id: \"{1}\") {{\n name\n id\n hours {{\n is_open_now\n open {{\n day\n is_overnight\n start\n end\n }}\n }}\n }}\n \"\"\"\n formatted_businesses = []\n for idx, biz_id in enumerate(ids):\n formatted_businesses.append(business_query_str.format(idx, biz_id))\n query = \"{{ {0} }}\".format(\"\\n\".join(formatted_businesses))\n results = self._graphql_request(query)\n\n all_hours = []\n print(results)\n for i in range(len(ids)):\n result = results[\"b{0}\".format(i)]\n hours_raw = result.get('hours')\n hours = None\n if hours_raw is not None:\n hours = self._reformat_business_hours(hours_raw)\n all_hours.append(hours)\n return all_hours\n\n def add_hours_to_search_results(self, results):\n ids = [r.id for r in results]\n hours = self.graphql_bulk_business_hours(ids)\n for i in range(len(results)):\n results[i].hours = hours[i]\n\n def _reformat_business_hours(self, hours_raw):\n # Change format of hours to a list. One element for each day,\n # and each element is a list of the various hours for that day,\n # where the \"various hours\" are specified in dictionaries.\n # \"REGULAR\" is omitted since it's always that.\n hours = None\n if hours_raw is not None:\n hours = [[] for _ in range(7)]\n for hrs in hours_raw:\n for elem in hrs['open']:\n stime = time.strptime(elem['start'], \"%H%M\")\n etime = time.strptime(elem['end'], \"%H%M\")\n hours[elem['day']].append({\n \"is_overnight\": elem['is_overnight'],\n \"start\": datetime.time(stime.tm_hour, stime.tm_min),\n \"end\": datetime.time(etime.tm_hour, etime.tm_min)\n })\n return hours\n\n def _process_business_detail(self, result, add_parent_categories):\n cats = self.flatten_raw_categories(result.get('categories'))\n if add_parent_categories:\n cats = self.add_parent_categories(cats)\n\n hours_raw = result.get('hours')\n hours = None\n if hours_raw is not None:\n hours = self._reformat_business_hours(hours_raw)\n\n return BusinessDetails(\n id=result['id'],\n alias=result['alias'],\n name=result.get('name'),\n image_url=result.get('image_url'),\n is_claimed=result.get('is_claimed'),\n is_closed=result.get('is_closed'),\n url=result.get('url'),\n price=result.get('price'),\n phone=result.get('phone'),\n photos=result.get('photos'),\n hours=hours,\n categories=cats,\n latitude=result['coordinates']['latitude'],\n longitude=result['coordinates']['longitude'],\n transactions=result.get('transactions')\n )\n\n def event_search(self,\n offset=None,\n limit=None,\n sort_by=None,\n sort_on=None,\n start_date=None, # Datetime object\n end_date=None, # Datetime object\n categories=None,\n is_free=None,\n location=None,\n latitude=None,\n longitude=None,\n radius=None,\n excluded_events=None,\n timezone=None,\n duration_if_no_end=datetime.timedelta(seconds=3600 * 2)): # Default duration 2hrs if no end time\n if timezone is None:\n raise Exception(\"Must specify timezone\")\n start_date = str_to_unix(start_date, timezone)\n end_date = str_to_unix(end_date, timezone)\n params = dict(locals())\n #del params['add_parent_categories']\n keys = list(params.keys())\n for k in keys:\n if params[k] is None:\n del params[k]\n response = self._request(\"events\", params)\n events = response[\"events\"]\n event_objs = []\n print([x['id'] for x in events])\n for event in events:\n if \"category\" in event:\n cats = set([\"event\", event.get('category')]) # Events only have 1 category. Manually adding \"event\" as parent category.\n else:\n cats = set([\"event\"])\n print(event['time_start'], event['time_end'])\n stime = to_local_time(dateutil.parser.parse(event['time_start']), timezone)\n if event['time_end'] is not None:\n etime = to_local_time(dateutil.parser.parse(event['time_end']), timezone)\n elif duration_if_no_end is not None:\n etime = stime + duration_if_no_end\n else:\n etime = None\n event_objs.append(\n EventSearchResult(\n attending_count=event.get('attending_count'),\n categories=cats,\n cost=event.get('cost'),\n cost_max=event.get('cost_max'),\n url=event.get('event_site_url'),\n id=event['id'],\n image_url=event.get('image_url'),\n interested_count=event.get('interested_count'),\n is_canceled=event.get('is_canceled'),\n is_free=event.get('is_free'),\n is_official=event.get('is_official'),\n latitude=event.get('latitude'),\n longitude=event.get('longitude'),\n name=event.get('name'),\n tickets_url=event.get('tickets_url'),\n start_time=stime,\n end_time=etime,\n\n )\n )\n print(events[0]['time_start'])\n return event_objs\n\n\n","repo_name":"alexandermoore/dayplanner","sub_path":"api/yelp/yelp.py","file_name":"yelp.py","file_ext":"py","file_size_in_byte":18743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24833969555","text":"\"\"\"\nThese tests cover the navigation from the Get Started Page.\n\"\"\"\nimport pytest\nfrom pytest_bdd import scenarios, scenario, given, when, then, parsers\nfrom functools import partial\nfrom pages.getstarted import SleepioLandingPage\nfrom pages.sleepscoreinitialpage import SleepScoreHowWouldYouLikeToImproveYourSleepPage\n\n#scenarios = partial(scenario, '../tests/features/getstarted.feature')\n\n@scenario('../tests/features/getstarted.feature', 'Basic Sleepio Navigation From Landing Page')\ndef test_getstarted():\n pass\n\n@pytest.mark.usefixtures(\"browser\")\ndef test_navigation_from_landing_page(browser):\n landing_page = SleepioLandingPage(browser)\n result_page = SleepScoreHowWouldYouLikeToImproveYourSleepPage(browser)\n Expected_header = \"YOUR SLEEP SCORE\"\n return landing_page\n\n# Given the Sleepio home page is displayed\n@given(\"the Sleepio home page is displayed\")\ndef test_given_on_landing_page(browser):\n landing_page = SleepioLandingPage(browser)\n landing_page.load()\n\n# When the user clicks \"Get Started\"\n@when('the user clicks \"Get Started\"')\ndef getstarted_click(browser):\n landing_page = SleepioLandingPage(browser)\n landing_page.click()\n\n# Then the page header contains \"YOUR SLEEP SCORE\"\n@then('the page header contains \"YOUR SLEEP SCORE\"')\ndef page_header_assertion(browser):\n Expected_header = \"YOUR SLEEP SCORE\"\n result_page = SleepScoreHowWouldYouLikeToImproveYourSleepPage(browser)\n assert Expected_header in result_page.result_header_value()\n\n# And the page with the question \"How would you like to improve your sleep?\" appears\n@then('the page with the question \"How would you like to improve your sleep?\" appears')\ndef page_assertion(browser):\n result_page = SleepScoreHowWouldYouLikeToImproveYourSleepPage(browser)\n assert \"How would you like to improve your sleep?\" in result_page.result_link_title()\n","repo_name":"JoshiArpitaDemo/SleepioUITests","sub_path":"tests/test_getstarted_first.py","file_name":"test_getstarted_first.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73334560302","text":"import abc\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport copy\nimport numpy as np\n\n\n# There might be some dimension errors which can be fixed based on the input. The general idea is that you compute the feature distribution mean of a past window and compare it to the present window and if they're close, keep that past window in an exemplar set to replay.\n\n\nclass ExemplarHandler(nn.Module, metaclass=abc.ABCMeta):\n \"\"\"Abstract module for a classifier that can store and use exemplars.\n Adds a exemplar-methods to subclasses, and requires them to provide a 'feature-extractor' method.\"\"\"\n\n def __init__(self):\n super().__init__()\n\n # list with exemplar-sets\n self.exemplar_sets = (\n []\n ) # --> each exemplar_set is an of N images with shape (N, Ch, H, W)\n self.exemplar_means = []\n self.compute_means = True\n hidden_sizes = [12,12] # Change Accordingly\n self.net = nn.Sequential()\n sequence_length = 58 # Change Accordingly\n self.net.add_module(\"input\", nn.Linear(sequence_length, hidden_sizes[0]))\n\n for i in range(1, len(hidden_sizes)):\n # Each layer will divide the size of feature map by 2\n self.net.add_module(\n \"linear%d\" % i,\n nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]),\n )\n self.net.add_module(\"relu%d\" % i, nn.ReLU(True))\n # settings\n self.memory_budget = 2000\n self.norm_exemplars = True\n self.herding = True\n\n def _device(self):\n return next(self.parameters()).device\n\n def _is_on_cuda(self):\n return next(self.parameters()).is_cuda\n\n def feature_extractor(self, batch_x):\n\n return self.net(batch_x)\n \n\n ####----MANAGING EXEMPLAR SETS----####\n\n def reduce_exemplar_sets(self, m):\n for y, P_y in enumerate(self.exemplar_sets):\n self.exemplar_sets[y] = P_y[:m]\n\n def construct_exemplar_set(self, dataset, n):\n \"\"\"Construct set of [n] exemplars from [dataset] using 'herding'.\n Note that [dataset] should be from specific class; selected sets are added to [self.exemplar_sets] in order.\"\"\"\n\n # set model to eval()-mode\n mode = self.training\n self.eval()\n\n n_max = len(dataset)\n exemplar_set = []\n\n if self.herding:\n # compute features for each example in [dataset]\n first_entry = True\n\n # dataloader = define your dataloader\n\n # image_batch is just the batch of timeseries\n for i in range(0, len(dataset), 32): # Change Accordingly\n image_batch = torch.tensor(dataset[i:i+32]).type(torch.float32) # Change Accordingly\n # print(image_batch.shape)\n image_batch = image_batch.to(self._device())\n with torch.no_grad():\n feature_batch = self.feature_extractor(image_batch).cpu()\n if first_entry:\n features = feature_batch\n first_entry = False\n else:\n features = torch.cat([features, feature_batch], dim=0)\n if self.norm_exemplars:\n features = F.normalize(features, p=2, dim=1)\n\n # calculate mean of all features\n class_mean = torch.mean(features, dim=0, keepdim=True)\n if self.norm_exemplars:\n class_mean = F.normalize(class_mean, p=2, dim=1)\n\n # one by one, select exemplar that makes mean of all exemplars as close to [class_mean] as possible\n exemplar_features = torch.zeros_like(features[: min(n, n_max)])\n list_of_selected = []\n for k in range(min(n, n_max)):\n if k > 0:\n exemplar_sum = torch.sum(exemplar_features[:k], dim=0).unsqueeze(0)\n features_means = (features + exemplar_sum) / (k + 1)\n features_dists = features_means - class_mean\n else:\n features_dists = features - class_mean\n index_selected = np.argmin(torch.norm(features_dists, p=2, dim=1))\n if index_selected in list_of_selected:\n raise ValueError(\"Exemplars should not be repeated!!!!\")\n list_of_selected.append(index_selected)\n\n exemplar_set.append(dataset[index_selected])\n exemplar_features[k] = copy.deepcopy(features[index_selected])\n\n # make sure this example won't be selected again\n features[index_selected] = features[index_selected] + 10000\n else:\n indeces_selected = np.random.choice(\n n_max, size=min(n, n_max), replace=False\n )\n for k in indeces_selected:\n exemplar_set.append(dataset[k])\n\n # add this [exemplar_set] as a [n]x[ich]x[isz]x[isz] to the list of [exemplar_sets]\n self.exemplar_sets.append(np.array(exemplar_set))\n\n # set mode of model back\n print(np.array(self.exemplar_sets).shape)\n #self.train(mode=mode)\n return self.exemplar_sets\n","repo_name":"digantamisra98/CL_Timeseries","sub_path":"experience_replay/experience_replay.py","file_name":"experience_replay.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"43449022339","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium.webdriver.common.keys import Keys\n \ndriver =webdriver.Chrome()\ndriver.maximize_window()\n\nbaseUrl = \"https://www.bongobd.com/\"\ndriver.get(baseUrl)\n\ndriver.find_element_by_xpath(\"//*[@id='content']/div[1]/nav/div/div/div[2]/div[4]/ul/li[1]/a\").click()\n\nwindow_before = driver.window_handles[0]\nwindow_before_title = driver.title\ndriver.find_element_by_xpath(\"//*[@id='regNext']/span[2]\").click()\n\nwindow_after = driver.window_handles[1]\ndriver.switch_to_window(window_after)\n\n\ndriver.find_element_by_xpath(\"//*[@id='u_0_6q']\").send_keys(\"1710493613\")\n\ndriver.find_element_by_id(\"u_0_6r\").click()\ndriver.implicitly_wait(10)\n\n\nmyElement = driver.find_element_by_name(\"confirmation_code\")\nmyElement.send_keys(\"556723\")\nmyElement.send_keys(Keys.ENTER)\n\ndriver.implicitly_wait(10)\nbrowser.quit()\n\n\n","repo_name":"rafayet13/BongoBdD","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"35956126787","text":"from django.conf.urls import url\n\nfrom views import index, editperson, listpersons\nurlpatterns = [\n url(r'^$',index, name='pim-index'),\n url(r'^edit/(?P[0-9]+)/$',editperson,name='pim-edit'),\n url(r'^list/$',listpersons,name='pim-list')\n\n]\n\n\n\n\n\n","repo_name":"cogitoergozum/sample","sub_path":"pim/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37267746577","text":"#!/usr/bin/python3\r\n#coding: utf-8\r\n\r\n# post 13/12/2017\r\nDEFAULT_MATCHES_8 = [[\"ac-ajaccio\", \"aj-auxerre\"], [\"bourg-peronnas\", \"lorient\"], [\"chateauroux\", \"orléans\"], [\"clermont-foot\", \"sochaux\"]\r\n , [\"gfc-ajaccio\", \"nimes\"], [\"le-havre-ac\", \"niort\"], [\"as-nancy-lorraine\", \"paris-fc\"], [\"us-quevilly\", \"brest\"]\r\n , [\"stade-de-reims\", \"valenciennes\"], [\"lens\", \"tours\"],]\r\n\r\n# post 13/12/2017\r\nDEFAULT_MATCHES_9 = [[\"ac-ajaccio\", \"aj-auxerre\"], [\"bourg-peronnas\", \"lorient\"], [\"chateauroux\", \"orléans\"], [\"clermont-foot\", \"sochaux\"]\r\n , [\"gfc-ajaccio\", \"nimes\"], [\"le-havre-ac\", \"niort\"], [\"as-nancy-lorraine\", \"paris-fc\"], [\"us-quevilly\", \"brest\"]\r\n , [\"stade-de-reims\", \"valenciennes\"], [\"lens\", \"tours\"],]\r\n \r\n# post 02/02/2018\r\nDEFAULT_MATCHES_10 = [[\"bourg-peronnas\", \"clermont-foot\"], [\"tours\", \"aj-auxerre\"], [\"orléans\", \"le-havre-ac\"], [\"paris-fc\", \"niort\"]\r\n , [\"us-quevilly\", \"chateauroux\"], [\"valenciennes\", \"as-nancy-lorraine\"], [\"nimes\", \"ac-ajaccio\"], [\"sochaux\", \"gfc-ajaccio\"]\r\n , [\"lorient\", \"lens\"], [\"brest\", \"stade-de-reims\"],]\r\n \r\n# post 07/02/2018\r\nDEFAULT_MATCHES_11 = [[\"as-nancy-lorraine\", \"us-quevilly\"], [\"clermont-foot\", \"gfc-ajaccio\"], [\"chateauroux\", \"bourg-peronnas\"], \r\n [\"ac-ajaccio\", \"lorient\"], [\"tours\", \"paris-fc\"], [\"stade-de-reims\", \"sochaux\"], [\"le-havre-ac\", \"brest\"], \r\n [\"niort\", \"orléans\"], [\"lens\", \"valenciennes\"], [\"aj-auxerre\", \"nimes\"],]\r\n \r\n# post 15/02/2018\r\nDEFAULT_MATCHES_12 = [[\"orléans\", \"paris-fc\"], [\"gfc-ajaccio\", \"stade-de-reims\"], [\"nimes\", \"tours\"], \r\n [\"sochaux\", \"le-havre-ac\"], [\"clermont-foot\", \"chateauroux\"], [\"brest\", \"niort\"], [\"bourg-peronnas\", \"as-nancy-lorraine\"], \r\n [\"valenciennes\", \"ac-ajaccio\"], [\"lorient\", \"aj-auxerre\"], [\"us-quevilly\", \"lens\"],]\r\n \r\n# post 22/02/2018\r\nDEFAULT_MATCHES_13 = [[\"chateauroux\", \"gfc-ajaccio\"], [\"niort\", \"nimes\"], [\"le-havre-ac\", \"bourg-peronnas\"], \r\n [\"lens\", \"clermont-foot\"], [\"tours\", \"orléans\"], [\"aj-auxerre\", \"valenciennes\"], [\"ac-ajaccio\", \"sochaux\"], \r\n [\"stade-de-reims\", \"us-quevilly\"], [\"paris-fc\", \"brest\"], [\"as-nancy-lorraine\", \"lorient\"],]\r\n \r\n# post 01/03/2018\r\nDEFAULT_MATCHES_14 = [[\"chateauroux\", \"as-nancy-lorraine\"], [\"clermont-foot\", \"stade-de-reims\"], [\"valenciennes\", \"orléans\"], \r\n [\"us-quevilly\", \"niort\"], [\"brest\", \"tours\"], [\"sochaux\", \"aj-auxerre\"], [\"bourg-peronnas\", \"ac-ajaccio\"], \r\n [\"gfc-ajaccio\", \"lens\"], [\"nimes\", \"paris-fc\"], [\"lorient\", \"le-havre-ac\"],]\r\n \r\n# post 08/03/2018\r\nDEFAULT_MATCHES_15 = [[\"niort\", \"lorient\"], [\"orléans\", \"nimes\"], [\"tours\", \"valenciennes\"], \r\n [\"le-havre-ac\", \"clermont-foot\"], [\"stade-de-reims\", \"chateauroux\"], [\"ac-ajaccio\", \"us-quevilly\"], [\"aj-auxerre\", \"brest\"], \r\n [\"as-nancy-lorraine\", \"gfc-ajaccio\"], [\"paris-fc\", \"sochaux\"], [\"lens\", \"bourg-peronnas\"],]\r\n\r\n# post 15/03/2018\r\nDEFAULT_MATCHES_X = [[\"lorient\", \"orléans\"], [\"clermont-foot\", \"as-nancy-lorraine\"], [\"valenciennes\", \"paris-fc\"], \r\n [\"chateauroux\", \"le-havre-ac\"], [\"bourg-peronnas\", \"niort\"], [\"sochaux\", \"tours\"], [\"us-quevilly\", \"aj-auxerre\"], \r\n [\"gfc-ajaccio\", \"ac-ajaccio\"], [\"stade-de-reims\", \"lens\"], [\"brest\", \"nimes\"],] \r\n \r\nLIGUETWO_TEAMS = [\"stade-de-reims\", \"nimes\", \"ac-ajaccio\", \"le-havre-ac\", \"brest\", \"paris-fc\", \"lorient\", \"sochaux\"\r\n , \"clermont-foot\", \"chateauroux\", \"niort\", \"valenciennes\", \"gfc-ajaccio\", \"lens\", \"as-nancy-lorraine\", \"aj-auxerre\"\r\n , \"us-quevilly\", \"tours\", \"bourg-peronnas\", \"orléans\"]\r\n\r\nGENERAL_COEFF = \"gen\"\r\nOFF_COEFF = \"off\"\r\nDEF_COEFF = \"def\"\r\nGENERAL_HOME = \"home_gen\"\r\nOFF_HOME = \"home_off\"\r\nDEF_HOME = \"home_def\"\r\nGENERAL_AWAY = \"away_gen\"\r\nOFF_AWAY = \"away_off\"\r\nDEF_AWAY = \"away_def\"\r\n\r\nCOEFF_VICT_GEN = 2\r\nCOEFF_DEF_GEN = -1\r\nCOEFF_DRAW_GEN = 1\r\nADD_MATCHES_GEN = 10\r\nCOEFF_SCORED_GEN = 1\r\nCOEFF_TAKEN_GEN = -1\r\nADD_GOALS_GEN = 10\r\n\r\nCOEFF_VICT_OFF = 3/2\r\nCOEFF_DEF_OFF = -1\r\nCOEFF_DRAW_OFF = 1/2\r\nCOEFF_MATCHES_OFF = 5\r\nADD_MATCHES_OFF = 5\r\nFIRST_COEFF_OFF = 1\r\nCOEFF_SCORED_OFF = 1/3\r\nCOEFF_TAKEN_OFF = -1/2\r\nCOEFF_GOALS_OFF = 5/2\r\nADD_GOALS_OFF = 5\r\nSCD_COEFF_OFF = 2/3\r\n\r\nCOEFF_VICT_DEF = 1\r\nCOEFF_DEF_DEF = -4/3\r\nCOEFF_DRAW_DEF = 1/3\r\nCOEFF_MATCHES_DEF = 5\r\nADD_MATCHES_DEF = 6\r\nFIRST_COEFF_DEF = 1\r\nCOEFF_SCORED_DEF = 1/2\r\nCOEFF_TAKEN_DEF = -2\r\nCOEFF_GOALS_DEF = 5/2\r\nADD_GOALS_DEF = 5\r\nSCD_COEFF_DEF = 1\r\n\r\nMUTABLE_VARS = {}\r\n\r\n## OLD SCHOOL OUTPUT\r\n##MUTABLE_VARS[\"GLOBAL_COEFF\"] = 1\r\n##\r\n##MUTABLE_VARS[\"COEFF_VICT_GEN\"] = 3\r\n##MUTABLE_VARS[\"COEFF_DEF_GEN\"] = -9/2\r\n##MUTABLE_VARS[\"COEFF_DRAW_GEN\"] = 0\r\n##\r\n##MUTABLE_VARS[\"COEFF_SCORED_GEN\"] = 3\r\n##MUTABLE_VARS[\"COEFF_TAKEN_GEN\"] = -17/2\r\n##\r\n##MUTABLE_VARS[\"COEFF_LEARNING\"] = 13/2\r\n##MUTABLE_VARS[\"COEFF_GOALS\"] = 7/2\r\n##MUTABLE_VARS[\"COEFF_VALUE\"] = 7/1200\r\n##MUTABLE_VARS[\"COEFF_EXP\"] = 27/1000\r\n##MUTABLE_VARS[\"COEFF_WIN_STREAK\"] = 1\r\n##MUTABLE_VARS[\"SIMILAR_TEAM_RANGE\"] = 32\r\n\r\n## 66.45% \r\nMUTABLE_VARS[\"COEFF_VICT_GEN\"] = 7.7\r\nMUTABLE_VARS[\"COEFF_DEF_GEN\"] = 10.0\r\nMUTABLE_VARS[\"COEFF_DRAW_GEN\"] = -14.0\r\nMUTABLE_VARS[\"COEFF_SCORED_GEN\"] = -9.8\r\nMUTABLE_VARS[\"COEFF_TAKEN_GEN\"] = 12.7\r\nMUTABLE_VARS[\"COEFF_LEARNING\"] = -2.25333\r\nMUTABLE_VARS[\"COEFF_GOALS\"] = 2.98667\r\nMUTABLE_VARS[\"COEFF_VALUE\"] = -0.00219\r\nMUTABLE_VARS[\"COEFF_EXP\"] = -0.02102\r\nMUTABLE_VARS[\"SIMILAR_TEAM_RANGE\"] = 68.46667\r\nMUTABLE_VARS[\"COEFF_WIN_STREAK\"] = 12.13333\r\nMUTABLE_VARS[\"GLOBAL_COEFF\"] = 2.0\r\n\r\n\r\n\r\n\r\nDEFAULT_MATCHES_X = [\"['ac-ajaccio', 'clermont-foot']\", \"['aj-auxerre', 'bourg-peronnas']\", \"['le-havre-ac', 'us-quevilly']\", \"['nimes', 'valenciennes']\", \"['niort', 'sochaux']\", \"['orléans', 'brest']\", \"['paris-fc', 'gfc-ajaccio']\", \"['tours', 'lorient']\", \"['as-nancy-lorraine', 'stade-de-reims']\", \"['lens', 'chateauroux']\"]\r\n\r\nDEFAULT_MATCHES_X = [['ac-ajaccio', 'clermont-foot'], ['aj-auxerre', 'bourg-peronnas'], ['le-havre-ac', 'us-quevilly'], ['nimes', 'valenciennes'], ['niort', 'sochaux'], ['orléans', 'brest'], ['paris-fc', 'gfc-ajaccio'], ['tours', 'lorient'], ['as-nancy-lorraine', 'stade-de-reims'], ['lens', 'chateauroux']]\r\n\r\nWEEK_MATCHES = [['clermont-foot', 'niort'], ['lorient', 'paris-fc'], ['us-quevilly', 'nimes'], ['bourg-peronnas', 'tours'], ['brest', 'valenciennes'], ['chateauroux', 'ac-ajaccio'], ['gfc-ajaccio', 'aj-auxerre'], ['sochaux', 'orléans'], ['stade-de-reims', 'le-havre-ac'], ['lens', 'as-nancy-lorraine']]","repo_name":"Gargarok/FriendlyTortoise","sub_path":"LTAlgorithmConstants.py","file_name":"LTAlgorithmConstants.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72204608944","text":"from unittest import mock\n\nimport pytest\n\nfrom valsys.utils.utils import read_env\n\n\nMODULE_PREFIX = \"valsys.utils.utils\"\n\n\nclass TestReadEnv:\n\n @mock.patch(f\"{MODULE_PREFIX}.os.getenv\")\n def test_works_ok(self, mock_getenv):\n varn = 'variableName'\n mock_getenv.return_value = 42\n assert read_env(varn) == 42\n mock_getenv.assert_called_with(varn)\n\n @mock.patch(f\"{MODULE_PREFIX}.os.getenv\")\n def test_not_found_required(self, mock_getenv):\n varn = 'variableName'\n mock_getenv.return_value = None\n with pytest.raises(ValueError) as err:\n read_env(varn)\n assert varn in str(err)\n\n @mock.patch(f\"{MODULE_PREFIX}.os.getenv\")\n def test_not_found_not_required(self, mock_getenv):\n varn = 'variableName'\n mock_getenv.return_value = None\n assert read_env(varn, required=False) == None\n","repo_name":"valsysinc/valsys-api","sub_path":"valsys/utils/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"74045984624","text":"from questionModel import AskQues\nfrom data import question_data\nfrom quiz_brain import Score, CheckAns\n\n\naskQues = AskQues()\nquestion_checker = CheckAns()\n\n# print(askQues.get_ques(question_data[0]))\n\n\n# //ask as question\n# take a input\n# check asnwer\n# if true\n# continue till score reaches 10\n# else\n# print score and ask user to play again\nplay_again = True\nwhile (play_again):\n\n scoreCalc = Score()\n for i in range(0, 11):\n print(f\"{i+1}. {askQues.get_ques(question_data[i])}\\n(True/False)\")\n\n userAnswer = input().capitalize()\n answer = askQues.get_ans(question_data[i])\n\n result = question_checker.check_ans(userAnswer, answer=answer)\n\n scoreCalc.update_score(result)\n\n if (result):\n print(\"You got it right: \")\n print(f\"your current score is: {scoreCalc.get_score()}/{i}.\\n\\n\\n\")\n else:\n print(\"Thats Wrong: \")\n print(f\"your current score is: {scoreCalc.get_score()}/{i}.\\n\\n\\n\")\n break\n\n play_again = input(\n \"Do you want to play again:\\nType 'y' for playing again else any key to exit : \")\n\n print(\"\\n\\n\\n\")\n if play_again.lower() == \"y\":\n is_true = True\n else:\n is_true = False\n","repo_name":"Princechorasiya/NGO","sub_path":"Week_4/quizGameOOP/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28603566086","text":"#!/usr/bin/env python3\n\n\nimport os\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom astropy.io import fits\n\n\n# To ignore warning\nnp.seterr(divide='ignore', invalid='ignore')\n\n# A dictionary of window sizes and frame rates.\nwindow_rate_dict = {'511': 28.7185,\n '349': 61.0,\n '299': 82.0,\n '249': 115.0,\n '199': 180.0,\n '149': 300.0,\n '99' : 640.0}\n \n# To detect sawtooth in data. \ndef find_breaks(data):\n if len(data) == 0:\n return None\n data1 = np.roll(data,1)\n data1[0] = 0\n mask = data < data1\n indices = [i for i, m in enumerate(mask) if m ==True]\n return np.array(indices) \n\n# Function to create images.\ndef centroid_check(L1_FITS):\n hdulist = fits.open(L1_FITS)\n \n # To get the frame rate.\n window = str(hdulist[0].header['win_x_sz'])\n frame_rate = window_rate_dict[str(window)]\n \n # ~25 seconds of initial data could be BOD check.\n BOD_frame_length = frame_rate * 25\n frames = hdulist[2].data['SecHdrImageFrameCount']\n breaks = find_breaks(frames)\n if breaks is None:\n return\n BOD_breaks = breaks[breaks < BOD_frame_length]\n BOD_mask = np.ones(len(frames), dtype = bool)\n BOD_mask[frames == 1] = False\n if len(BOD_breaks) != 0:\n BOD_mask[: BOD_breaks[-1]] = False\n \n if len(frames[BOD_mask]) == 0:\n return\n \n # The Centroid column read as 8-bit integers (2016 columns).\n droid_array = hdulist[2].data['Centroid']\n droid_array = droid_array[BOD_mask]\n\n # Unpacks elements of an 8-bit int array into a binary-valued output array.\n # Now with 16128 columns.\n bit_data = np.unpackbits(droid_array, axis =1) \n\n # Reshaping the array with only 3 words (48 bits) in the row.\n len_row = len(bit_data) * 336\n bit_dat = bit_data.reshape(len_row, 48)\n\n # bit data gets converted to useful events data. Hold my beer! \n Rx = bit_dat[:, 0] * 256\n Lx = np.packbits(bit_dat[:, 1:9])\n Ix = Rx + Lx\n\n Ry = bit_dat[:, 16] * 256\n Ly = np.packbits(bit_dat[:, 17:25])\n Iy = Ry + Ly\n\n powers = np.array([32, 16, 8, 4, 2, 1], dtype=np.int8)\n\n Fx = bit_dat[:, 9:15]\n Fx = Fx.dot(powers)\n\n Fy = bit_dat[:, 25:31]\n Fy = Fy.dot(powers)\n\n # To convert the 6-bit integers to subpixels.\n substep = 0.03125\n\n Fx[Fx > 31] = Fx[Fx > 31] - 64\n Fx = Fx * substep\n\n Fy[Fy > 31] = Fy[Fy > 31] - 64\n Fy = Fy * substep\n\n # Adding the integer and float parts together.\n X_pos = Ix + Fx\n Y_pos = Iy + Fy\n \n hot_pixel_mask = ~np.logical_and(Ix == 131, Iy == 216)\n \n X_pos = X_pos[hot_pixel_mask]\n Y_pos = Y_pos[hot_pixel_mask]\n\n # Keeping some standards.\n X_p = X_pos[X_pos > 0.]\n Y_p = Y_pos[X_pos > 0.]\n\n fig, axs = plt.subplots(5, 2, figsize=(15, 9))\n\n marker_size = 0.2\n alpha = 0.2\n\n axs[0][0].scatter(X_p, Y_p, s = marker_size, alpha = alpha)\n axs[0][0].set_xlim(0, 102)\n axs[1][0].scatter(X_p, Y_p, s = marker_size, alpha = alpha)\n axs[1][0].set_xlim(102, 204)\n axs[2][0].scatter(X_p, Y_p, s = marker_size, alpha = alpha)\n axs[2][0].set_xlim(204, 306)\n axs[3][0].scatter(X_p, Y_p, s = marker_size, alpha = alpha)\n axs[3][0].set_xlim(306, 408)\n axs[4][0].scatter(X_p, Y_p, s = marker_size, alpha = alpha)\n axs[4][0].set_xlim(408, 512)\n axs[4][0].set_xlabel('X-centroids')\n\n axs[0][1].scatter(Y_p, X_p, s = marker_size, alpha = alpha)\n axs[0][1].set_xlim(0, 102)\n axs[1][1].scatter(Y_p, X_p, s = marker_size, alpha = alpha)\n axs[1][1].set_xlim(102, 204)\n axs[2][1].scatter(Y_p, X_p, s = marker_size, alpha = alpha)\n axs[2][1].set_xlim(204, 306)\n axs[3][1].scatter(Y_p, X_p, s = marker_size, alpha = alpha)\n axs[3][1].set_xlim(306, 408)\n axs[4][1].scatter(Y_p, X_p, s = marker_size, alpha = alpha)\n axs[4][1].set_xlim(408, 512)\n axs[4][1].set_xlabel('Y-centroids')\n\n path = os.path.normpath(L1_FITS)\n if len(path.split(os.sep)) > 1:\n parent = path.split(os.sep)[-2]\n filename = path.split(os.sep)[-1]\n figure_name = filename.replace('.fits', '_stretched_data.png')\n figure_name = parent + '_' + figure_name\n else:\n parent = os.getcwd() + os.sep\n figure_name = L1_FITS.replace('.fits', '_stretched_data.png')\n \n plt.savefig(figure_name,\n format = 'png',\n bbox_inches = 'tight',\n dpi = 100, \n facecolor = 'w', \n transparent = False)\n \n plt.clf()\n \n fig, axs = plt.subplots(5, 2, figsize=(15, 9))\n\n bins = np.arange(0, 512, 0.5)\n X_array, bin_edges = np.histogram(X_p, bins = bins)\n Y_array, bin_edges = np.histogram(Y_p, bins = bins)\n\n axs[0][0].plot(bin_edges[1:], X_array)\n axs[0][0].set_xlim(0, 102)\n axs[1][0].plot(bin_edges[1:], X_array)\n axs[1][0].set_xlim(102, 204)\n axs[2][0].plot(bin_edges[1:], X_array)\n axs[2][0].set_xlim(204, 306)\n axs[3][0].plot(bin_edges[1:], X_array)\n axs[3][0].set_xlim(306, 408)\n axs[4][0].plot(bin_edges[1:], X_array)\n axs[4][0].set_xlim(408, 512)\n axs[4][0].set_xlabel('X-centroids')\n\n axs[0][1].plot(bin_edges[1:], Y_array)\n axs[0][1].set_xlim(0, 102)\n axs[1][1].plot(bin_edges[1:], Y_array)\n axs[1][1].set_xlim(102, 204)\n axs[2][1].plot(bin_edges[1:], Y_array)\n axs[2][1].set_xlim(204, 306)\n axs[3][1].plot(bin_edges[1:], Y_array)\n axs[3][1].set_xlim(306, 408)\n axs[4][1].plot(bin_edges[1:], Y_array)\n axs[4][1].set_xlim(408, 512)\n axs[4][1].set_xlabel('Y-centroids')\n\n filename = path.split(os.sep)[-1]\n figure_name = filename.replace('.fits', '_stretched_data_histogram.png')\n figure_name = parent + '_' + figure_name\n\n plt.savefig(figure_name,\n format = 'png',\n bbox_inches = 'tight',\n dpi = 100, \n facecolor = 'w', \n transparent = False)\n\n plt.close('all')\n \n bins = np.arange(0, 512)\n X_array, bin_edges = np.histogram(X_p, bins = bins)\n Y_array, bin_edges = np.histogram(Y_p, bins = bins)\n\n X_array = X_array[12: 501]\n if len(X_array[X_array == 0]) > 0:\n print('\\nPossible sparse data, the gap detection could be UNRELIABLE for:')\n print(L1_FITS) \n \n X_left_ratio = X_array[1:] / X_array[:-1]\n X_right_ratio = X_array[:-1] / X_array[1:]\n X_ratio_product = X_left_ratio[1:] * X_right_ratio[:-1]\n\n Y_array = Y_array[12: 501]\n Y_left_ratio = Y_array[1:] / Y_array[:-1]\n Y_right_ratio = Y_array[:-1] / Y_array[1:]\n Y_ratio_product = Y_left_ratio[1:] * Y_right_ratio[:-1]\n \n threshold = 9\n Xgap_locations = np.argwhere(X_ratio_product > threshold)\n Ygap_locations = np.argwhere(Y_ratio_product > threshold) \n\n if len(Xgap_locations) != 0:\n print('\\nPossible gap along X-centroids, check images to confirm.')\n print(L1_FITS, Xgap_locations[0] + 13) \n \n if len(Ygap_locations) != 0:\n print('\\nPossible gap along Y-centroids, check images to confirm.')\n print(L1_FITS, Ygap_locations[0] + 13) \n\ndef check_centroid_gaps(L1_dir):\n print('\\nPlease wait, this may take time.')\n \n for dirpath, dirnames, files in os.walk(L1_dir):\n for s in files:\n fnam = os.path.join(dirpath, s)\n if fnam[-5:] == \".fits\" and fnam[-21] in ['N', 'F']:\n centroid_check(fnam) \n \n print('\\nDone! Please inspect the plots.\\n')\n\n\n\n\n\n\n","repo_name":"prajwel/uvitility","sub_path":"src/uvitility/uvitility.py","file_name":"uvitility.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29165765159","text":"import ure\nimport bh_plugin\nfrom bh_logging import debug, log\n\n\ndef exclude_bracket(enabled, filter_type, language_list, language):\n \"\"\"\n Exclude or include brackets based on filter lists.\n \"\"\"\n\n exclude = True\n if enabled:\n # Black list languages\n if filter_type == 'blacklist':\n exclude = False\n if language is not None:\n for item in language_list:\n if language == item.lower():\n exclude = True\n break\n # White list languages\n elif filter_type == 'whitelist':\n if language is not None:\n for item in language_list:\n if language == item.lower():\n exclude = False\n break\n return exclude\n\n\ndef is_valid_definition(params, language):\n \"\"\"\n Ensure bracket definition should be and can be loaded.\n \"\"\"\n\n return (\n not exclude_bracket(\n params.get(\"enabled\", True),\n params.get(\"language_filter\", \"blacklist\"),\n params.get(\"language_list\", []),\n language\n ) and\n params[\"open\"] is not None and params[\"close\"] is not None\n )\n\n\nclass BracketDefinition(object):\n \"\"\"\n Normal bracket definition.\n \"\"\"\n\n def __init__(self, bracket):\n \"\"\"\n Setup the bracket object by reading the passed in dictionary.\n \"\"\"\n\n self.name = bracket[\"name\"]\n self.style = bracket.get(\"style\", \"default\")\n self.compare = bracket.get(\"compare\")\n sub_search = bracket.get(\"find_in_sub_search\", \"false\")\n self.find_in_sub_search_only = sub_search == \"only\"\n self.find_in_sub_search = sub_search == \"true\" or self.find_in_sub_search_only\n self.post_match = bracket.get(\"post_match\")\n self.validate = bracket.get(\"validate\")\n self.scope_exclude_exceptions = bracket.get(\"scope_exclude_exceptions\", [])\n self.scope_exclude = bracket.get(\"scope_exclude\", [])\n self.ignore_string_escape = bracket.get(\"ignore_string_escape\", False)\n\n\nclass ScopeDefinition(object):\n \"\"\"\n Scope bracket definition.\n \"\"\"\n\n def __init__(self, bracket):\n \"\"\"\n Setup the bracket object by reading the passed in dictionary.\n \"\"\"\n\n self.style = bracket.get(\"style\", \"default\")\n self.open = ure.compile(\"\\\\A\" + bracket.get(\"open\", \".\"), ure.MULTILINE | ure.IGNORECASE)\n self.close = ure.compile(bracket.get(\"close\", \".\") + \"\\\\Z\", ure.MULTILINE | ure.IGNORECASE)\n self.name = bracket[\"name\"]\n sub_search = bracket.get(\"sub_bracket_search\", \"false\")\n self.sub_search_only = sub_search == \"only\"\n self.sub_search = self.sub_search_only is True or sub_search == \"true\"\n self.compare = bracket.get(\"compare\")\n self.post_match = bracket.get(\"post_match\")\n self.validate = bracket.get(\"validate\")\n self.scopes = bracket[\"scopes\"]\n\n\nclass SearchRules(object):\n def __init__(self, brackets, scopes, string_escape_mode, outside_adj):\n self.bracket_rules = brackets\n self.scope_rules = scopes\n self.enabled = False\n self.string_escape_mode = string_escape_mode\n self.outside_adj = outside_adj\n\n def load_rules(self, language, modules):\n self.enabled = False\n self.brackets = []\n self.scopes = []\n self.check_compare = False\n self.check_validate = False\n self.check_post_match = False\n self.parse_bracket_definition(language, modules)\n self.parse_scope_definition(language, modules)\n if len(self.scopes) or len(self.brackets):\n self.enabled = True\n\n def parse_bracket_definition(self, language, loaded_modules):\n \"\"\"\n Parse the bracket defintion\n \"\"\"\n\n names = []\n subnames = []\n find_regex = []\n sub_find_regex = []\n\n for params in self.bracket_rules:\n if is_valid_definition(params, language):\n try:\n bh_plugin.load_modules(params, loaded_modules)\n entry = BracketDefinition(params)\n if not self.check_compare and entry.compare is not None:\n self.check_compare = True\n if not self.check_validate and entry.validate is not None:\n self.check_validate = True\n if not self.check_post_match and entry.post_match is not None:\n self.check_post_match = True\n self.brackets.append(entry)\n if not entry.find_in_sub_search_only:\n find_regex.append(params[\"open\"])\n find_regex.append(params[\"close\"])\n names.append(params[\"name\"])\n else:\n find_regex.append(r\"([^\\s\\S])\")\n find_regex.append(r\"([^\\s\\S])\")\n\n if entry.find_in_sub_search:\n sub_find_regex.append(params[\"open\"])\n sub_find_regex.append(params[\"close\"])\n subnames.append(params[\"name\"])\n else:\n sub_find_regex.append(r\"([^\\s\\S])\")\n sub_find_regex.append(r\"([^\\s\\S])\")\n except Exception as e:\n log(e)\n\n if len(self.brackets):\n self.brackets = tuple(self.brackets)\n debug(\n \"Bracket Pattern: (%s)\\n\" % ','.join(names) +\n \" (Opening|Closing): (?:%s)\\n\" % '|'.join(find_regex)\n )\n debug(\n \"SubBracket Pattern: (%s)\\n\" % ','.join(subnames) +\n \" (Opening|Closing): (?:%s)\\n\" % '|'.join(sub_find_regex)\n )\n self.sub_pattern = ure.compile(\"(?:%s)\" % '|'.join(sub_find_regex), ure.MULTILINE | ure.IGNORECASE)\n self.pattern = ure.compile(\"(?:%s)\" % '|'.join(find_regex), ure.MULTILINE | ure.IGNORECASE)\n\n def parse_scope_definition(self, language, loaded_modules):\n \"\"\"\n Parse the scope defintion\n \"\"\"\n\n scopes = {}\n scope_count = 0\n for params in self.scope_rules:\n if is_valid_definition(params, language):\n try:\n bh_plugin.load_modules(params, loaded_modules)\n entry = ScopeDefinition(params)\n if not self.check_compare and entry.compare is not None:\n self.check_compare = True\n if not self.check_validate and entry.validate is not None:\n self.check_validate = True\n if not self.check_post_match and entry.post_match is not None:\n self.check_post_match = True\n for x in entry.scopes:\n if x not in scopes:\n scopes[x] = scope_count\n scope_count += 1\n self.scopes.append({\"name\": x, \"brackets\": [entry]})\n else:\n self.scopes[scopes[x]][\"brackets\"].append(entry)\n debug(\"Scope Regex (%s)\\n Opening: %s\\n Closing: %s\\n\" % (entry.name, entry.open.pattern, entry.close.pattern))\n except Exception as e:\n log(e)\n","repo_name":"herove/dotfiles","sub_path":"sublime/Packages/BracketHighlighter/bh_rules.py","file_name":"bh_rules.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"10254112985","text":"from pyteal import *\n\n# algosandbox goal app create --creator $acc1 --approval-prog contracts/petshop_approval.teal --clear-prog contracts/petshop_clear.teal --note pet-shop:uPets --global-byteslices 5 --global-ints 2 --local-byteslices 0 --local-ints 0 --app-arg str:TestPet --app-arg str:TestImage --app-arg int:5 --app-arg str:TestBreed --app-arg str:TestLocation\n\n\nclass Pet:\n class Variables:\n name = Bytes(\"NAME\") # ByteSlice\n image = Bytes(\"IMAGE\") # ByteSlice\n age = Bytes(\"AGE\") # ByteSlice\n breed = Bytes(\"BREED\") # ByteSlice\n location = Bytes(\"LOCATION\") # ByteSlice\n adopted = Bytes(\"ADOPTED\") # Uint64 0 means false, 1 means true\n owner = Bytes(\"OWNER\") # ByteSlice\n fee = Bytes(\"ADOPT_FEE\") # Uint64\n\n class AppMethods:\n adopt = Bytes(\"adopt\")\n\n # to create a new pet listed for adoption\n def application_creation(self):\n return Seq([\n # The number of arguments attached to the transaction should be exactly 6.\n Assert(Txn.application_args.length() == Int(6)),\n\n # The note attached to the transaction must be \"tutorial-marketplace:uv1\", which we define to be the note that marks a product within our marketplace\n Assert(Txn.note() == Bytes(\"pet-shop:uPetsv2\")),\n\n # Store the transaction arguments into the applications's global's state\n App.globalPut(self.Variables.name, Txn.application_args[0]),\n App.globalPut(self.Variables.image, Txn.application_args[1]),\n App.globalPut(self.Variables.age, Txn.application_args[2]),\n App.globalPut(self.Variables.breed, Txn.application_args[3]),\n App.globalPut(self.Variables.location, Txn.application_args[4]),\n App.globalPut(self.Variables.adopted, Int(0)),\n App.globalPut(self.Variables.owner, Txn.application_args[5]),\n\n Approve(),\n ])\n\n # get adoption fee from mod contract\n def getAdoptFee(self, mod_contract: Expr):\n # gets fee from mod_contract\n get_global_fee = App.globalGetEx(mod_contract, Bytes(\"FEE\"))\n\n return Seq(\n get_global_fee,\n If(get_global_fee.hasValue(), App.globalPut(self.Variables.fee,\n get_global_fee.value()), App.globalPut(self.Variables.fee, Int(0))),\n )\n\n def adopt(self):\n scratch_adopter = ScratchVar(TealType.bytes)\n\n return Seq([\n scratch_adopter.store(App.globalGet(self.Variables.owner)),\n # first sanity checks to check transaction params\n Assert(\n And(\n # The number of transactions within the group transaction must be exactly 2.\n # first one being the adopt function and the second being the payment transactions\n Global.group_size() == Int(2),\n\n # check that the adopt call is made ahead of the payment transaction\n Txn.group_index() == Int(0),\n\n # The number of external applications must be == 1. as a call is made to the Mod_contract to get the adoptionFee\n # Txn.applications[0] is a special index denoting the current app being interacted with\n Txn.applications.length() == Int(1),\n\n # The number of arguments attached to the transaction should be exactly 2.\n Txn.application_args.length() == Int(2),\n\n # Check that current owner is not the transaction sender as that's redundant\n scratch_adopter.load() != Txn.application_args[1],\n ),\n ),\n\n # get fee from the mod contract\n self.getAdoptFee(Txn.applications[1]),\n\n # checks for second transaction\n Assert(\n And(\n # check if fee is greater is zero\n App.globalGet(self.Variables.fee) > Int(0),\n # The second transaction of the group must be the payment transaction.\n Gtxn[1].type_enum() == TxnType.Payment,\n # The receiver of the payment should be the creator of the app\n Gtxn[1].receiver() == Global.creator_address(),\n # The payment amount should match the product's price multiplied by the number of products bought\n Gtxn[1].amount() == App.globalGet(self.Variables.fee),\n # The sender of the payment transaction should match the sender of the smart contract call transaction.\n Gtxn[1].sender() == Gtxn[0].sender(),\n )\n ),\n\n # The global state is updated using App.globalPut()\n\n App.globalPut(self.Variables.adopted, Int(1)),\n App.globalPut(self.Variables.owner, Txn.application_args[1]),\n Approve()\n\n ])\n\n # To delete a product.\n\n def application_deletion(self):\n scratch_owner = ScratchVar(TealType.bytes)\n return Seq(\n scratch_owner.store(App.globalGet(self.Variables.owner)),\n # The number of arguments attached to the transaction should be exactly 1.\n Assert(Txn.application_args.length() == Int(1)),\n Return(\n scratch_owner.load() == Txn.application_args[0],\n ),\n )\n\n # Check transaction conditions\n def application_start(self):\n return Cond(\n # checks if the application_id field of a transaction matches 0.\n # If this is the case, the application does not exist yet, and the application_creation() method is called\n [Txn.application_id() == Int(0), self.application_creation()],\n # If the the OnComplete action of the transaction is DeleteApplication, the application_deletion() method is called\n [Txn.on_completion() == OnComplete.DeleteApplication,\n self.application_deletion()],\n # if the irst argument of the transaction matches the AppMethods.buy value, the buy() method is called.\n [Txn.application_args[0] == self.AppMethods.adopt, self.adopt()],\n )\n\n # The approval program is responsible for processing all application calls to the contract.\n def approval_program(self):\n return self.application_start()\n\n # The clear program is used to handle accounts using the clear call to remove the smart contract from their balance record.\n def clear_program(self):\n return Return(Int(1))\n","repo_name":"osas2211/algo-petshop","sub_path":"src/contracts/petshop_contract.py","file_name":"petshop_contract.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40452157895","text":"import json\nimport pandas as pd\nimport os\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport argparse\nimport sys\nimport logging\n\n\nparser = argparse.ArgumentParser(description='A script to query the European Nucleotide Archive (ENA) website to create a CSV file with information for each read sequence file.')\nparser.add_argument(\"csv\", help=\"The input CSV file. It requires a column for the ENA_PROJECT with the project accesion numbers. \\\n It also expects columns for METAGENOMICS_ANALYSES and METAGENOMICS_SAMPLES if the ENA_PROJECT is missing.\")\nparser.add_argument(\"cache\", help=\"The path to a directory to cache the downloads of the ENA report files.\")\nparser.add_argument(\"-o\",\"--output\", help=\"The output CSV file for the individual read files. Each read file is a separate row in the table.\")\nargs = parser.parse_args()\n\ndef human_readable_file_size(size):\n \"\"\"\n Returns a human readable file size string for a size in bytes. \n \n Adapted from https://stackoverflow.com/a/25613067)\n \"\"\"\n\n from math import log2 \n _suffixes = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']\n\n # determine binary order in steps of size 10 \n # (coerce to int, // still returns a float)\n order = int(log2(size) / 10) if size else 0\n # format file size\n # (.4g results in rounded numbers for exact matches and max 3 decimals, \n # should never resort to exponent values)\n return '{:.4g} {}'.format(size / (1 << (order * 10)), _suffixes[order])\n\n\ndef cached_download( url, local_path ):\n \"\"\"\n Downloads a file if a local file does not already exist.\n\n Args:\n url: The url of the file to download.\n local_path: The local path of where the file should be. If this file isn't there or the file size is zero then this function downloads it to this location.\n\n Raises:\n Exception: Raises an exception if it cannot download the file.\n\n \"\"\"\n if not os.path.isfile( local_path ) or os.path.getsize(local_path) == 0:\n try:\n print(f\"Downloading {url} to {local_path}\")\n urllib.request.urlretrieve(url, local_path)\n except:\n raise Exception(f\"Error downloading {url}\")\n\n if not os.path.isfile( local_path ):\n raise Exception(f\"Error reading {local_path}\")\n\ndef download_ena_report( accession, result_type, cache_dir ):\n \"\"\"\n Downloads a TSV file report from the European Nucleotide Archive (ENA) website if it is not already cached.\n\n If the file already is cached then it is not downloaded again.\n Args:\n accession: The accession id for the query. It can be a project accession id or a sample accession id.\n result_type: The type of report we are seeking (e.g. 'analysis' or 'read_run').\n cache_dir: The path to the directory where the downloaded files are stored.\n \n Returns:\n The local path to the ENA file report.\n\n Raises:\n Exception: Raises an exception if it cannot download the file.\n \"\"\" \n path = f\"{cache_dir}/{accession}.{result_type}.tsv\"\n tsv_url = f\"https://www.ebi.ac.uk/ena/data/warehouse/filereport?accession={accession}&result={result_type}\"\n cached_download( tsv_url, path )\n return path\n\ndef ena_report_df( accession, result_type, cache_dir ):\n \"\"\" Same as 'download_ena_report' but this function opens the TSV file as a Pandas dataframe. \"\"\"\n\n path = download_ena_report( accession, result_type, cache_dir )\n return pd.read_csv( path, sep='\\t' )\n\n\n\ndf = pd.read_csv(args.csv, encoding=\"ISO-8859-1\")\n\ndata = []\n\nprojects_count = len(df.index)\nfor index,row in df.iterrows():\n project_accession = row['ENA_PROJECT']\n print(f\"Project {index} of {projects_count}: {project_accession}\")\n\n # Download 'analysis' data for project to get the sample accession ids\n print(project_accession, \"analysis\" )\n try:\n analysis_df = ena_report_df( project_accession, \"analysis\", args.cache )\n except:\n # Sometimes the ENA_PROJECT element in the CSV files for this project for some reason.\n # If this is the case, we can try the METAGENOMICS_SAMPLES element instead.\n # However, since the first column (i.e. ENA_PROJECT) is missing, we get the 'METAGENOMICS_SAMPLES' value from the 'METAGENOMICS_ANALYSES' column.\n project_accession = row['METAGENOMICS_ANALYSES']\n try:\n analysis_df = ena_report_df( project_accession, \"analysis\", args.cache )\n except:\n logging.warning(f\"WARNING: Cannot read row: {row}\")\n continue\n\n\n sample_accessions = analysis_df['sample_accession'].unique()\n\n # Occasionally the 'analysls' table for the ENA project accession number is empty. \n # Usually when this happens, the project accession ID can be used as the 'sample' accession id to download the 'read_run' table\n if len(sample_accessions) == 0 or str(sample_accessions) == \"[nan]\":\n sample_accessions = [project_accession]\n\n print('sample_accessions:', sample_accessions)\n for sample_accession in sample_accessions:\n read_run_df = ena_report_df( sample_accession, \"read_run\", args.cache )\n print(read_run_df)\n #assert len(read_run_df.index) > 0\n if len(read_run_df.index) == 0:\n logging.warning(f\"WARNING: No reads found for {project_accession}\")\n\n for _, sample_row in read_run_df.iterrows():\n if pd.isna(sample_row)['fastq_ftp']:\n logging.warning(f\"WARNING: No FASTQ files found for {project_accession}\")\n continue\n\n fastq_bytes_list = str(sample_row['fastq_bytes']).split(\";\")\n fastq_md5_list = str(sample_row['fastq_md5']).split(\";\")\n fastq_ftp_list = str(sample_row['fastq_ftp']).split(\";\")\n fastq_aspera_list = str(sample_row['fastq_aspera']).split(\";\")\n #fastq_galaxy_list = str(sample_row['fastq_galaxy']).split(\";\")\n\n for file_index, (fastq_bytes, fastq_md5, fastq_ftp, fastq_aspera) in enumerate(zip(fastq_bytes_list, fastq_md5_list, fastq_ftp_list, fastq_aspera_list)):\n #Cast to float first in case there are decimal points in the string for bytes. See https://stackoverflow.com/a/8948303\n fastq_bytes = int(float(fastq_bytes))\n data.append( [project_accession, sample_row['sample_accession'], file_index, fastq_bytes, human_readable_file_size(fastq_bytes), fastq_md5, fastq_ftp, fastq_aspera] )\n\nftp_df = pd.DataFrame( data, columns=[\"project_accession\", \"sample_accession\", \"file_index\", \"fastq_bytes\", \"fastq_bytes_human_readable\", \"fastq_md5\", \"fastq_ftp\", \"fastq_aspera\"])\nprint(ftp_df)\n\noutput_path = args.output\n\nif not output_path:\n csv_filename, _ = os.path.splitext(args.csv)\n output_path = csv_filename + \"-FASTQ.csv\"\n\nftp_df.to_csv( output_path )\ntotal_bytes = ftp_df['fastq_bytes'].sum()\nprint(human_readable_file_size(total_bytes))","repo_name":"ParkvilleData/MetaGenePipe","sub_path":"scripts/query_ena.py","file_name":"query_ena.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71923790384","text":"#! python3\n# rgxSearch.py\n# opens all .txt files in a folder and searches for any line that matches a user-supplied regular expression\n\nimport os\nimport re\n\n# Enter the desired folder\npath = ''\nwhile os.path.isdir(path) != True:\n #path = input('Insert the path to the file: ')\n # just for example, remove this line and insert the line above to receive user input\n path = '/home/theeam/Documents/learning/python/automate the boring stuff with python/Chapter 8/regex search'\n\nos.chdir(path)\nprint('\\n\\t### You are at: ' + os.getcwd() + '\\n')\n\nfiles = os.listdir(path)\nprint('\\n\\t### Files found: ')\nprint(files)\n\n#regExpr = input('Insert the regex: ')\n# just for example, remove this line and insert the line above to receive user input\nregExpr = '(ADJECTIVE|NOUN|ADVERB|VERB)'\nregexSearch = re.compile(regExpr)\n\nmatches = []\nfor fileName in files:\n if '.txt' in fileName:\n textFile = open(fileName)\n fileContent = textFile.read()\n match = regexSearch.findall(fileContent)\n if match:\n for i in match:\n matches.append(i)\n textFile.close()\n\nprint('\\n\\t### Matches found: ')\nprint(matches)\n","repo_name":"barbixxxa/python","sub_path":"automate the boring stuff with python/Chapter 8/regex search/rgxSearch.py","file_name":"rgxSearch.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36698246673","text":"# -*- coding: utf-8 -*-\n# QuestApplyIf.py - A script to let an item be applied only if a quest\n# reached a certain step\n# Arguments are:\n# - quest name\n# - a list of steps, either single value (x) or range (x-y)\n# If any matches, then the item can be applied, else it is not applied\n\nimport Crossfire\n\nplayer = Crossfire.WhoIsActivator()\nparams = Crossfire.ScriptParameters()\nargs = params.split()\n\nquestname = args[0]\ncurrentstep = player.QuestGetState(questname)\n\n# by default, forbid applying\nCrossfire.SetReturnValue(1)\n\nfor rule in args[1:]:\n if rule.find(\"-\") == -1:\n startstep = int(rule)\n endstep = startstep\n else:\n startstep = int(rule.split(\"-\")[0])\n endstep= int(rule.split(\"-\")[1])\n if currentstep >= startstep and currentstep <= endstep:\n Crossfire.SetReturnValue(0)\n","repo_name":"TitusCF/HeroWorld","sub_path":"maps/python/quests/QuestApplyIf.py","file_name":"QuestApplyIf.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"91"} +{"seq_id":"1908011467","text":"from __future__ import division\nimport numpy as np\nfrom typing import List\nfrom data import DatasetManager\n\n\ndef predicate(x: List[float], cof: List[float]) -> List[float]:\n \"\"\"\n 根据给定的x和多项式,求解y\n\n :param x: 横坐标\n :param cof: 多项式系数\n \"\"\"\n\n n = len(x)\n degree = len(cof)\n y: List[float] = []\n\n for i in range(n):\n res = 0\n pow = 1\n for j in range(degree):\n res += cof[j] * pow\n pow *= x[i]\n y.append(res)\n\n return np.array(y)\n\n\ndef analysis_without_regular(data: DatasetManager, degree: int):\n \"\"\"\n 根据数据集,通过解析式拟合给定阶数的多项式,不含正则化项\n\n :param data: 二维点集数据集\n :param degree: 多项式的阶数\n \"\"\"\n\n n = len(data.x())\n X = np.ones((n, degree))\n for i in range(n):\n for j in range(1, degree):\n X[i][j] = X[i][j - 1] * data.x()[i]\n\n return np.ravel(np.mat(X.T @ X).I @ X.T @ data.y())\n\n\ndef analysis_with_regular(data: DatasetManager, degree: int, lam: float):\n \"\"\"\n 根据数据集,通过解析式拟合给定阶数的多项式,含正则化项\n\n :param data: 二维点集数据集\n :param degree: 多项式的阶数\n :param lam: 回归系数\n \"\"\"\n\n degree += 1\n\n n = len(data.x())\n X = np.ones((n, degree))\n for i in range(n):\n for j in range(1, degree):\n X[i][j] = X[i][j - 1] * data.x()[i]\n\n return np.ravel(np.mat(X.T @ X + lam * np.eye(degree)).I @ X.T @ data.y())\n\n\ndef calc_gradient(w: List[float], x: List[float], y: List[float], lam: float):\n \"\"\"\n 求解给定系数的多项式在某一点的梯度,含正则化项\n \"\"\"\n\n return (1 / len(y)) * (np.dot(x.T, np.dot(x, w) - y) + lam * w)\n\n\ndef gradient_descent(data: DatasetManager, degree: int, lam: float, lr: float,\n eps: float):\n \"\"\"\n 根据数据集,通过梯度下降法拟合给定阶数的多项式,含正则化项\n\n :param data: 二维点集数据集\n :param degree: 多项式的阶数\n :param lam: 回归系数\n :param lr: 学习率\n :param eps: 梯度控制精度\n \"\"\"\n\n degree += 1\n\n n = len(data.x())\n x = np.ones((n, degree))\n for i in range(n):\n for j in range(1, degree):\n x[i][j] = x[i][j - 1] * data.x()[i]\n y = data.y().reshape(n, 1)\n w = np.mat(np.zeros(degree)).T\n grad = calc_gradient(w, x, y, lam)\n\n while np.sum(np.absolute(grad)) > eps:\n w = w - lr * grad\n grad = calc_gradient(w, x, y, lam)\n\n return np.ravel(w)\n\n\ndef conjugate_gradient(data: DatasetManager, degree: int, lam: float):\n \"\"\"\n 根据数据集,通过共轭梯度法拟合给定阶数的多项式,含正则化项\n\n :param data: 二维点集数据集\n :param degree: 多项式的阶数\n :param lam: 回归系数\n \"\"\"\n\n degree += 1\n\n n = len(data.x())\n x = np.ones((n, degree))\n for i in range(n):\n for j in range(1, degree):\n x[i][j] = x[i][j - 1] * data.x()[i]\n y = data.y().reshape(n, 1)\n\n Q = (1 / n) * (x.T @ x + lam * np.mat(np.eye(degree)))\n W = np.mat(np.zeros(degree)).T\n\n r = -calc_gradient(W, x, y, lam)\n p = r\n for i in range(1, n):\n a = float((r.T * r) / (p.T * Q * p))\n r_prev = r\n W = W + a * p\n r = r - a * Q * p\n p = r + float((r.T * r) / (r_prev.T * r_prev)) * p\n\n return np.ravel(W)\n","repo_name":"zirui-HIT/HIT_Lab","sub_path":"Machine_Learning/Lab1/src/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"7246690380","text":"import sys\r\n\r\ndef solution():\r\n N, K = map(int,sys.stdin.readline().split(\" \"))\r\n cost = []\r\n ways = []\r\n parents = []\r\n complete = [False] * N\r\n pre = [0] * N\r\n queue = []\r\n ans = [0] * N\r\n cost = list(map(int,sys.stdin.readline().split(\" \")))\r\n for i in range(N):\r\n ways.append([])\r\n parents.append([])\r\n for _ in range(K):\r\n first, second = map(int,sys.stdin.readline().split(\" \"))\r\n pre[second-1] += 1\r\n parents[second-1].append(first-1)\r\n ways[first-1].append(second-1)\r\n for i in range(N):\r\n if len(parents[i])==0:\r\n queue.append(i)\r\n ans[i]=cost[i]\r\n\r\n out = int(input())\r\n\r\n while(not complete[out-1]):\r\n complete[queue[0]] = True\r\n for node in ways[queue[0]]:\r\n pre[node] -= 1\r\n if pre[node]==0: queue.append(node)\r\n ans[node] = max(ans[node] , ans[queue[0]] + cost[node])\r\n del queue[0]\r\n\r\n print(ans[out-1])\r\n \r\nfor _ in range(int(sys.stdin.readline())):\r\n solution()\r\n","repo_name":"changdae20/BOJ","sub_path":"백준/Gold/1005. ACM Craft/ACM Craft.py","file_name":"ACM Craft.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29303947932","text":"import imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\nimport cv2\n\nimport numpy as np\nfrom PIL import Image\n\nseqRot90 = iaa.Sequential([\n iaa.Rotate(90)\n])\n\nseqFlipH = iaa.Sequential([\n iaa.Fliplr(1)\n])\n\nseqFlipV = iaa.Sequential([\n iaa.Flipud(1)\n])\n\nseqRot180 = iaa.Sequential([\n iaa.Rotate(180)\n])\n\nseqRot270 = iaa.Sequential([\n iaa.Rotate(270)\n])\n\ndef aug_test(image, all_boxes):\n\n lista_boxes = []\n #Se transforman las cajas a un formato manejable por imgaug\n for c, boxes_tmp in enumerate(all_boxes):\n for box in boxes_tmp:\n obj = BoundingBox(x1=box[0], y1=box[1],x2=box[0]+box[2], y2=box[1]+box[3], label=c)\n lista_boxes.append(obj)\n \n bbs = BoundingBoxesOnImage(lista_boxes, shape=image.shape)\n # Se especifican las aumentaciones a realizar\n lista_aumentaciones = [seqRot90, seqFlipH, seqFlipV, seqRot180, seqRot270]\n # Se crea una lista que almacena una tupla (imagen, etiquetado),\n # Almacenando la original en la primera posicion\n images_with_boxes = [(image, bbs)]\n # Por cada aumentación, se genera una nueva imagen, junto con su etiquetado\n for aument in lista_aumentaciones:\n image_aug, bbs_aug = aument(image=image, bounding_boxes=bbs)\n #Se eliminan boxes exteriores\n bbs_aug = bbs_aug.remove_out_of_image().clip_out_of_image()\n\n images_with_boxes.append((image_aug, bbs_aug))\n \n return images_with_boxes\n","repo_name":"nerfshacopls/tfg_2023","sub_path":"img_augmentator.py","file_name":"img_augmentator.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36976763892","text":"\nfrom traceback import print_tb\n\n\nvocabulary = {}\n\nvocabulary['student'] = '학생'\nvocabulary['teacher'] = '선생님'\nvocabulary['classroom'] = '교실'\nvocabulary['smart'] = '똑똑한'\nvocabulary['lunch'] = '점심'\n\n# # 저장\n# word = input('저장할 단어를 입력하세요 >> ')\n\n\n# # 최대 5개까지\n# # 중복 불가\n# if len(vocabulary) == 0 :\n# meaning = input('단어의 뜻을 입력하세요 >> ')\n# vocabulary[word] = meaning\n# elif len(vocabulary) > 0 and len(vocabulary) < 5 :\n# for key in vocabulary.keys() :\n# if key == word :\n# print('이미 등록된 단어입니다.')\n# break\n# else :\n# meaning = input('단어의 뜻을 입력하세요 >> ')\n# vocabulary[word] = meaning\n# break\n# else :\n# print('단어는 최대 5개까지만 입력가능합니다.')\n\n# print(vocabulary)\n\n# 검색\n# word = input('찾을 단어를 입력하세요 >> ')\n# lengthOfWord = len(word)\n\n# for key in vocabulary.keys() :\n# trimmedKey = key[0:lengthOfWord]\n# if trimmedKey == word :\n# print('{} : {}' .format(key, vocabulary[key]))\n# break\n# else :\n# print('단어를 검색할 수 없습니다')\n# break\n\n\n# 수정\n# word = input('수정할 단어를 입력하세요 >> ')\n# isExist = False\n\n# for key in vocabulary.keys() :\n# if key == word : \n# meaning = input('뜻을 입력하세요 >> ')\n# vocabulary[key] = meaning\n# isExist = True\n# break\n\n# if not isExist :\n# print('단어를 검색할 수 없습니다')\n\n# print(vocabulary)\n\n# 삭제\n# word = input('삭제할 단어를 입력하세요 >> ')\n# isExist = False\n\n# for key in vocabulary.keys() :\n# if key == word : \n# del vocabulary[key]\n# isExist = True\n# print('단어를 삭제하였습니다')\n# break\n\n# if not isExist :\n# print('단어를 검색할 수 없습니다')\n\n# print(vocabulary)\n\n# 목록\n# print('1.오름차순 2.내림차순')\n# submenu = input('원하는 정렬 방식을 입력하세요 >> ')\n# sortedVocabulary = sorted(vocabulary.items())\n# reversedVocabulary = reversed(sortedVocabulary)\n\n# if submenu == '1' :\n# for item in sortedVocabulary :\n# print('{} : {}' .format(item[0], item[1]))\n# elif submenu == '2' :\n# for item in reversedVocabulary :\n# print('{} : {}' .format(item[0], item[1]))\n# else :\n# print('잘못 입력하셨습니다.') \n# pass\n\n\n# 통계\ndef infoOfVocabulary() :\n print('저장된 단어 갯수 : {}' .format(len(vocabulary)))\n\n longestWord = None\n for word1 in vocabulary.keys() :\n longestWord = word1\n for word2 in vocabulary.keys() :\n if len(longestWord) <= len(word2) :\n longestWord = word2 \n print('단어의 문자 수가 가장 많은 단어 : {}' .format(longestWord))\n\n import operator \n vocabularyWithLength = {}\n for item1 in vocabulary.keys() :\n for item2 in vocabulary.keys() :\n result = operator.ge(len(item1), len(item2))\n print(result)\n if result :\n item1\n else :\n item2\n # print(sortedVocabulary)\n print('단어 글자 수 내림차순 출력(단어만)')\n\ninfoOfVocabulary()\n# 종료\n\n# 공통\n# 파일 IO","repo_name":"k-juyeong/backup","sub_path":"vocabulary/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41818180160","text":"\"\"\"\n1초마다 반복해서 동작하는 코드 만들기\n\"\"\"\n\nimport tkinter\nimport tkinter.font\n\n# window 객체 생성\nwindow = tkinter.Tk()\n\n# 타이틀을 정의\nwindow.title(\"가상화폐 금액표시\")\n\n# GUI의 사이즈를 설정\nwindow.geometry(\"400x200\")\n\n# 가로세로의 크기를 조절하지 못하도록 설정\nwindow.resizable(False, False)\n\n\n\n# 폰트를 적용하여 hello의 문자열을 출력\nfont = tkinter.font.Font(size=30)\nlabel = tkinter.Label(window, text=\"hello\", font=font)\nlabel.pack()\n\ncnt = 0\n\n# 1초마다 실행되는 함수\ndef get_coin_1sec():\n # 함수 안에서 cnt의 전역변수를 사용하기 위해 global를 붙여 전역변수인 cnt를 사용\n global cnt\n now_btc_price = str(cnt)\n cnt = cnt + 1\n \n # 라벨의 text를 변경\n label.config(text=now_btc_price)\n \n # 1초 후에 get_coin_1sec 함수를 불러온다.\n window.after(1000, get_coin_1sec)\n\n# 한 번 실행 후 자기 자신을 1초마다 호출\nget_coin_1sec()\n\n# GUI를 계속 실행하기 위해 mainloop를 살행\nwindow.mainloop()","repo_name":"woogieReal/python-40-programs","sub_path":"tkinter/30.가상화폐_금액표시_GUI_프로그램_만들기/main30-03.py","file_name":"main30-03.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"23978697425","text":"class Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n i, reach = 0, 0 \n while i < n and i <= reach:\n reach = max(i + nums[i], reach)\n i += 1\n return i == n\n","repo_name":"wuhao007/LeetCode","sub_path":"jump-game_1.py","file_name":"jump-game_1.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"9871183495","text":"from fastapi import APIRouter, HTTPException\n\nfrom back.crud.async_gallery import get_crud_async_gallery, get_gallery_by_gallery_id\nfrom back.dependency.security import api_security\nfrom back.model.gallery import Gallery\nfrom back.model.scope import ScopeEnum\n\nrouter = APIRouter(tags=[\"Gallery Tag\"])\n\n\n@router.get(\n \"/{gallery_id}/tag\",\n response_model=Gallery,\n dependencies=[api_security([ScopeEnum.gallery_tag_get.value])],\n)\nasync def get_tag(gallery_id: str) -> Gallery:\n gallery = await get_gallery_by_gallery_id(gallery_id)\n return gallery\n\n\n@router.post(\n \"/{gallery_id}/tag\",\n response_model=Gallery,\n dependencies=[api_security([ScopeEnum.gallery_tag_post.value])],\n)\nasync def post_tag(gallery_id: str, gallery: Gallery) -> Gallery:\n if gallery_id != gallery.id:\n raise HTTPException(\n status_code=409, detail=\"Conflict between post body and url parameter\"\n )\n\n crud = await get_crud_async_gallery(gallery_id)\n return await crud.update(gallery)\n","repo_name":"ZetsuBouKyo/ZetsuBou","sub_path":"back/api/v1/gallery/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"42447858136","text":"import setuptools\nimport agrimetscraper\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\n\nsetuptools.setup(\n name=\"agrimetscraper\",\n version=agrimetscraper.__version__,\n author=\"Xiaoping Li\",\n author_email=\"lixiaopi@oregonstate.edu\",\n description=\"A package to scrape AgriMet weather data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/lixiaopi1985/agrimet_scraper\",\n scripts=['agrimetscraper/bin/startproject.py',],\n packages=setuptools.find_packages(),\n install_requires=['pandas', 'numpy', 'python-crontab', 'requests', 'fake_useragent', 'bs4', 'pymongo', 'dnspython'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n )\n","repo_name":"lixiaopi1985/agrimet_scraper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29567180494","text":"import tkinter as tk\nfrom tkinter import filedialog\nimport pandas as pd\n\ndef fechar_janela():\n janela.quit()\n\ndef center_window(win, width=350, height=300):\n screen_width = win.winfo_screenwidth()\n screen_height = win.winfo_screenheight()\n \n x = (screen_width/2) - (width/2)\n y = (screen_height/2) - (height/2)\n\n win.geometry(f'{width}x{height}+{int(x)}+{int(y)}')\n\ndef carregar_arquivo():\n janela.lift()\n filepath = filedialog.askopenfilename(filetypes=[(\"Planilhas Excel\", \"*.xlsx\")])\n if filepath:\n global df\n df = pd.read_excel(filepath)\n exibir_janela_resultados()\n\ndef voltar_e_mostrar_principal(janela_resultados):\n janela_resultados.destroy()\n janela.deiconify()\n\ndef atualizar_informacoes():\n texto_resultados.delete(\"1.0\", tk.END)\n texto_resultados.insert(tk.END, df.to_string(index=True, justify='center'))\n\ndef adaptar_geometria(janela):\n largura = janela.winfo_screenwidth()\n altura = janela.winfo_screenheight()\n\n proporcao_largura = 0.8\n proporcao_altura = 0.8\n\n largura_janela = int(largura * proporcao_largura)\n altura_janela = int(altura * proporcao_altura)\n\n posicao_x = (largura - largura_janela) // 2\n posicao_y = (altura - altura_janela) // 2\n\n janela.geometry(f\"{largura_janela}x{altura_janela}+{posicao_x}+{posicao_y}\")\n janela.minsize(width=667, height=375)\n\ndef exibir_janela_resultados():\n janela_resultados = tk.Toplevel(janela)\n janela_resultados.title(\"Resultados\")\n adaptar_geometria(janela_resultados)\n\n global texto_resultados\n\n frame_topo = tk.Frame(janela_resultados)\n frame_topo.pack(side=tk.TOP, pady=10)\n\n botao_voltar = tk.Button(frame_topo, text=\"Voltar\", command=lambda: voltar_e_mostrar_principal(janela_resultados), bg=\"green\", fg=\"white\")\n botao_voltar.pack(side=tk.LEFT, padx=5)\n\n frame_baixo = tk.Frame(janela_resultados)\n frame_baixo.pack(side=tk.BOTTOM, pady=10)\n\n botao_carregar_outro_arquivo = tk.Button(frame_baixo, text=\"Carregar Outro Arquivo\", command=carregar_arquivo, bg=\"green\", fg=\"white\")\n botao_carregar_outro_arquivo.pack(side=tk.BOTTOM, padx=10)\n\n texto_resultados = tk.Text(janela_resultados, width=105, height=50)\n texto_resultados.insert(tk.END, df.to_string(index=True, justify='center'))\n texto_resultados.tag_configure(\"left\", justify='left')\n scrollbar_horizontal = tk.Scrollbar(janela_resultados, orient=\"horizontal\", command=texto_resultados.xview)\n scrollbar_horizontal.pack(side=tk.BOTTOM, fill=tk.X)\n texto_resultados.configure(xscrollcommand=scrollbar_horizontal.set)\n texto_resultados.insert(tk.END, df.to_string(index=True, justify='center'))\n texto_resultados.pack(expand=True, fill=tk.BOTH)\n texto_resultados.config(wrap=\"none\")\n\njanela = tk.Tk()\njanela.title(\"Projeto Dashboard\")\n\ncenter_window(janela)\n\nrotulo_titulo = tk.Label(janela, text=\"LEITOR DE EXCEL\")\nrotulo_titulo.pack()\nrotulo_titulo.place(relx=0.5, rely=0.3, anchor=\"center\")\n\nbotao_carregar = tk.Button(janela, text=\"Selecionar Arquivo\", command=carregar_arquivo, bg=\"green\", fg=\"white\")\nbotao_carregar.pack()\nbotao_carregar.place(relx=0.5, rely=0.5, anchor=\"center\")\n\nbotao_sair = tk.Button(janela, text=\"SAIR\", command=fechar_janela, bg=\"red\", fg=\"white\")\nbotao_sair.pack()\nbotao_sair.place(relx=1, rely=1, anchor=\"se\")\n\njanela.mainloop()\n","repo_name":"robertosilvati/Dashbord-Python-Excel","sub_path":"Dash.py","file_name":"Dash.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73090627184","text":"import argparse\nimport asyncio\nimport traceback\n\nfrom slacker import Slacker\n\nfrom JenkinsBuildBot import JenkinsBuildBot\n\n\nasync def main():\n parser = argparse.ArgumentParser(description=\"Simple jenkins build invoke bot\")\n\n parser.add_argument('bot_token', help='Bot User OAuth Access Token')\n parser.add_argument('user_token', help='OAuth Access Token')\n parser.add_argument('jenkins_url', help='jenkins server url')\n\n args = parser.parse_args()\n\n bot_slack = Slacker(args.bot_token)\n user_slack = Slacker(args.user_token)\n\n bot = JenkinsBuildBot(bot_slack, user_slack, args.jenkins_url)\n while True:\n try:\n await bot._listen()\n except:\n traceback.print_exc()\n await asyncio.sleep(60)\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"kdw9502/jenkins-build-slack-bot","sub_path":"StartBot.py","file_name":"StartBot.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"4503309933","text":"from django.shortcuts import render\nfrom core.models import Streak, Profile\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import CoinsEarnerLeaderBoard, CreatorLeaderBoard, ReferralLeaderBoard\n# Create your views here.\n\n\n\"\"\"\nAdd Pagination To All The Leaderboard\n\nAnd also make all the leaderboard daily\n\"\"\"\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef Leaderboard(request):\n user = request.user\n profile = Profile.objects.get(user=user)\n context={\n 'nav': 'leaderboard',\n 'profile': profile,\n }\n return render(request, 'leaderboard/leaderboard.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef StreakLeaderBoard(request):\n leaders = Streak.objects.all().order_by('-length', '-question')[0:1000]\n # add the get absolute url function to the profile\n # add pagination and waypoint or ajax I think Ajax will be more controllable\n\n context = {\n 'leaders': leaders,\n }\n\n return render(request, 'leaderboard/streak.html', context)\n\n\n# add the function for reward that will be triggered by celery. it wont't be a view function\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef WealthLeaderBoard(request,*args, **kwargs):\n leaders = CoinsEarnerLeaderBoard.objects.all().order_by('-coins')[0:1000]\n context = {\n 'leaders' : leaders,\n }\n\n return render(request, 'leaderboard/wealth.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef CreatorsLeaderBoard(request,*args, **kwargs):\n leaders = CreatorLeaderBoard.objects.all().order_by('-coins')[0:1000]\n context = {\n 'leaders' : leaders,\n }\n\n return render(request, 'leaderboard/wealth.html', context)\n\n\n\n\n@login_required(redirect_field_name='next', login_url='account_login')\ndef ReferralLeaderBoard(request,*args, **kwargs):\n leaders = ReferralLeaderBoard.objects.all().order_by('-refers')\n context = {\n 'leaders' : leaders,\n }\n\n return render(request, 'leaderboard/referral.html', context)\n\n\n\n\n\n","repo_name":"PeaceTem/student","sub_path":"leaderboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15415327131","text":"from setuptools import find_packages, setup\n\n# some RDKit versions are not recognized by setuptools\n# -> check if RDKit is installed by attempting to import it\n# -> if RDKit can be imported, do not add it to install_requires\nrdkit = False\ntry:\n from rdkit import Chem\n\n rdkit = True\nexcept ImportError:\n pass\n\nrdkit_requirement = [\"rdkit>=2022.3.3\"] if not rdkit else []\n\nsetup(\n name=\"hypothesis-rdkit\",\n version=\"0.5.2\",\n maintainer=\"Steffen Hirte\",\n maintainer_email=\"shirte@users.noreply.github.com\",\n packages=find_packages(),\n url=\"https://github.com/shirte/hypothesis-rdkit\",\n description=\"Hypothesis strategies for RDKit\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=(\"LICENSE\",),\n install_requires=[\"hypothesis\", \"tqdm\"] + rdkit_requirement,\n extras_require={\"dev\": [\"black\", \"isort\"], \"test\": [\"pytest\"]},\n entry_points={\"hypothesis\": {\"_ = hypothesis_rdkit.hook:_hypothesis_setup_hook\"}},\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Scientific/Engineering :: Chemistry\",\n \"Framework :: Hypothesis\",\n \"License :: OSI Approved :: MIT License\",\n ],\n)\n","repo_name":"shirte/hypothesis-rdkit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1869531019","text":"# coding:utf-8\nimport json\n\nimport time\nfrom tornado.websocket import WebSocketHandler\nimport threading\nimport commands\n\nfrom logger_conf import logger\n\n__author__ = 'uv2sun'\n\n\nclass ExecHandler(WebSocketHandler):\n def data_received(self, chunk):\n pass\n\n def on_message(self, message):\n logger.debug('received cmd, %s' % (message,))\n msg = json.loads(message)\n if msg and msg['cmds']:\n Executor(msg['cmds'], self).start()\n else:\n self.write_message(json.dumps({'msg_type': 'cmd_err', 'msg': 'no commands to execute'}))\n self.close(code=9, reason='no commands to execute')\n\n def open(self, *args, **kwargs):\n logger.debug(\"received exec request\")\n logger.debug(self.request)\n\n def on_close(self):\n logger.debug('close websocket request')\n\n def check_origin(self, origin):\n \"\"\"跨域检查永远通过\"\"\"\n return True\n\n\nclass Executor(threading.Thread):\n \"\"\"命令执行器,单独线程执行\"\"\"\n\n def __init__(self, cmds, websocket):\n super(Executor, self).__init__()\n self.cmds = cmds\n self.websocket = websocket\n\n def exec_cmd(self):\n for cmd in self.cmds:\n msg = 'begin to exec [%s]' % (cmd,)\n self.websocket.write_message(json.dumps({'msg_type': 'status', 'msg': msg}))\n logger.debug(msg)\n result = commands.getstatusoutput(cmd=cmd)\n logger.debug('执行结果:%s' % (result,))\n exit_code = result[0]\n echo = result[1]\n if exit_code == 0:\n msg = {'msg_type': 'status', 'msg': 'exec [%s] ok' % (cmd,)}\n time.sleep(5)\n self.websocket.write_message(json.dumps(msg))\n logger.debug(msg)\n else:\n logger.error('exec [%s] error, %s' % (cmd, result))\n # self.websocket.write_message('exec [%s] error, exit=%s, echo=%s', (cmd, exit_code, echo))\n self.websocket.write_message(json.dumps({'msg_type': 'cmd_err', 'msg': echo}))\n self.websocket.close(code=exit_code, reason=echo)\n return\n msg = {'msg_type': 'cmd_end', 'msg': 'exec cmds success'}\n logger.debug(msg)\n self.websocket.write_message(json.dumps(msg))\n logger.debug(\"exec over ok, send close event.\")\n self.websocket.close(code=0)\n\n def run(self):\n self.exec_cmd()\n","repo_name":"rogetsun/PM-Node","sub_path":"router/exec_router.py","file_name":"exec_router.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"27434860539","text":"import sys\nimport os\nimport astropy.io.fits as pyfits\nimport numpy\nimport math\nimport datetime\n\nimport matplotlib\nimport matplotlib.pyplot\n\nfrom podi_observingplots import *\nfrom podi_definitions import *\nfrom podi_commandline import *\n\n\n\nif __name__ == \"__main__\":\n\n print(\"Reading data\")\n direntry, arrays = read_data_from_files(get_clean_cmdline()[1:])\n obstype, exptime, filtername, photzp, photzpe, mjd, dateobs, airmass = arrays\n\n # Now create the plots\n\n\n # This is the MJD of 01/01/0001\n mjd_zeropoint = 1721424.500000 - 2400000.5 + (7./24.0)\n # Find out the start and end times of the data block\n time_start = numpy.min(mjd)-mjd_zeropoint\n time_end = numpy.max(numpy.array(mjd)+numpy.array(exptime)/86400)-mjd_zeropoint\n\n print(time_start*24, time_end*24.)\n hour_start = math.floor(time_start*24.)\n hour_end = math.ceil(time_end*24.)\n n_hours = int(hour_end - hour_start)\n print(n_hours, hour_start, hour_end)\n\n \n print(\"setting up plot\")\n fig = matplotlib.pyplot.figure()\n\n all_axes = []\n for cur_hour in range(n_hours):\n\n subplot_id = n_hours * 100 + 10 + (cur_hour+1)\n if (len(all_axes) == 0):\n ax = fig.add_subplot(n_hours, 1, cur_hour+1)\n else:\n ax = fig.add_subplot(n_hours, 1, cur_hour+1, sharey=all_axes[0])\n\n fiveminutes = matplotlib.dates.MinuteLocator(interval=5)\n minutes = matplotlib.dates.MinuteLocator(interval=1)\n hfmt = matplotlib.dates.DateFormatter(':%M')\n ax.xaxis.set_major_locator(fiveminutes)\n ax.xaxis.set_minor_locator(minutes)\n ax.xaxis.set_major_formatter(hfmt)\n ax.set_xlim(((hour_start+cur_hour)/24., (hour_start+cur_hour+1)/24.-1e-6))\n ax.axes.yaxis.set_ticks([])\n\n hour_of_the_day = int(hour_start + cur_hour) % 24\n #print hour_of_the_day\n\n date = datetime.date.fromordinal(int(math.floor((hour_start + cur_hour)/24.))).strftime(\"%d/%m/%y\")\n \n if (n_hours > 7):\n ylabel = \"%s-%02dh\" % (date, hour_of_the_day)\n else:\n ylabel = \"%s\\n%02dh MST\" % (date, hour_of_the_day)\n\n ax.set_ylabel(ylabel,\n rotation=\"horizontal\",\n verticalalignment=\"center\",\n horizontalalignment=\"right\")\n all_axes.append(ax)\n\n all_axes[0].set_ylim((0,1))\n# all_axes[0].set_title(\"Observing efficiency\")\n fig.suptitle(\"Observing efficiency\", fontsize=20)\n\n for i in range(len(all_axes)-1): #ax in all_axes[:-1]:\n #ax.axes.get_xaxis().set_visible(False)\n all_axes[i].axes.xaxis.set_ticklabels([])\n\n cc = matplotlib.colors.ColorConverter()\n\n\n def dzp_to_transparency(d_zp):\n return 100.*numpy.power(10., 0.4*d_zp)\n\n matplotlib.pyplot.subplots_adjust(bottom=.05, \n top=0.92, \n right=0.98, \n left=0.13 if n_hours <= 7 else 0.18)\n\n #\n # Now add the polygons to show the shutter-open efficiency.\n #\n # First, compute all times for each of the frames\n top_level = 1\n height = 1\n efficiency_plot = []\n efficiency_colors = []\n\n poly_for_axes = [[]] * n_hours\n polyc_for_axes = [[]] * n_hours\n print(poly_for_axes)\n\n for filename in direntry:\n \n this_file = direntry[filename]\n\n # compute all times\n # but first apply the MJD zeropoint to convert times to the matplotlib\n # format\n mjdobs = this_file['MJD-OBS'] - mjd_zeropoint\n init = mjdobs - seconds2mjd(10.)\n start = mjdobs\n end = mjdobs + seconds2mjd(this_file['EXPMEAS'])\n complete = end + seconds2mjd(25.)\n \n # Determine which hour we need\n hour_slot = int( math.floor(init*24.) - hour_start )\n print(hour_start, init*24, hour_slot)\n hour_slot=0\n\n this_color = 'grey'\n if (this_file['FILTER'] in known_filters):\n zp,amt,col = known_filters[this_file['FILTER']]\n this_color = col\n \n if (math.floor(init*24.) < math.floor(complete*24.)):\n # This block spans multiple hours\n hour_break = math.floor(complete*24.)/24.\n poly_start = [[init, top_level],\n [start if start < hour_break else hour_break , top_level-height],\n [end if end < hour_break else hour_break, top_level-height],\n [complete if end < hour_break else hour_break, top_level]\n ]\n poly_end = [[init if init > hour_break else hour_break, top_level],\n [start if start > hour_break else hour_break , top_level-height],\n [end if end > hour_break else hour_break, top_level-height],\n [complete if end > hour_break else hour_break, top_level]\n ]\n\n poly_for_axes[hour_slot].append(poly_start)\n polyc_for_axes[hour_slot].append(this_color)\n\n poly_for_axes[hour_slot+1].append(poly_end)\n polyc_for_axes[hour_slot+1].append(this_color)\n\n pass\n else:\n this_poly = [[init, top_level],\n [start, top_level-height],\n [end, top_level-height],\n [complete, top_level]\n ]\n poly_for_axes[hour_slot].append(this_poly)\n polyc_for_axes[hour_slot].append(this_color)\n\n\n for i in range(len(all_axes)):\n\n # and then plots all the polygons\n coll = matplotlib.collections.PolyCollection(poly_for_axes[i],\n facecolor=polyc_for_axes[i],\n #edgecolor='#808080', \n edgecolor=polyc_for_axes[i],\n #edgecolor='none',\n linestyle='-')\n all_axes[i].add_collection(coll)\n\n\n #tax.set_ylim((1,200))\n #tax.set_yscale('log')\n #tax.legend(loc='best', borderaxespad=1)\n\n # Set output size to 900x500 pixels\n fig.set_size_inches(9,5)\n output_filename = cmdline_arg_set_or_default(\"-output\", \"shutter_open.png\")\n print(\"Saving output to file\",output_filename)\n fig.savefig(output_filename, dpi=100)\n\n #tfig.savefig(\"transparency_trend.png\")\n\n if (cmdline_arg_isset(\"-show\")):\n matplotlib.pyplot.show()\n\n\n","repo_name":"WIYN-ODI/QuickReduce","sub_path":"podi_openshutterplot.py","file_name":"podi_openshutterplot.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"10762264666","text":"lista = []\r\nmaior = 0\r\nposicao = 0\r\nn = 0\r\nwhile n < 100:\r\n x = int(input())\r\n lista.append(x)\r\n n += 1\r\n if x > maior:\r\n maior = x\r\n posicao = lista.index(x) + 1\r\nprint(maior)\r\nprint(posicao)","repo_name":"LoboNeves/URI-Python","sub_path":"1080-Highest and Position.py","file_name":"1080-Highest and Position.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"42436035587","text":"import os\nimport sys\nimport sysconfig\n\n# define the path to mapserver test data\nTESTS_PATH = \"../../tests\"\n\nTESTMAPFILE = os.path.join(TESTS_PATH, \"test.map\")\nXMARKS_IMAGE = os.path.join(TESTS_PATH, \"xmarks.png\")\nTEST_IMAGE = os.path.join(TESTS_PATH, \"test.png\")\n\n# Put local build directory on head of python path\nplatformdir = \"-\".join(\n (sysconfig.get_platform(), \".\".join(map(str, sys.version_info[0:2])))\n)\nsys.path.insert(0, os.path.join(\"build\", \"lib.\" + platformdir))\n\n# import mapscript from the local build directory\nimport mapscript\n\n# normalize names, allows testing of module that uses the experimental\n# next generation names\nclassnames = [\n \"mapObj\",\n \"layerObj\",\n \"classObj\",\n \"styleObj\",\n \"shapeObj\",\n \"lineObj\",\n \"pointObj\",\n \"rectObj\",\n \"outputFormatObj\",\n \"symbolObj\",\n \"symbolSetObj\",\n \"colorObj\",\n \"imageObj\",\n \"shapefileObj\",\n \"projectionObj\",\n \"fontSetObj\",\n \"hashTableObj\",\n]\n\nfor name in classnames:\n try:\n new_name = name.replace(\"Obj\", \"\")\n new_name = new_name.capitalize()\n new_object = getattr(mapscript, new_name)\n setattr(mapscript, name, new_object)\n except AttributeError:\n pass\n","repo_name":"MapServer/MapServer","sub_path":"src/mapscript/python/tests/timing/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":934,"dataset":"github-code","pt":"91"} +{"seq_id":"3616064624","text":"#! /usr/bin/python\nfrom __future__ import division\n\nfrom pytronica import *\n\n\nf = Controller(1000)\nf.lineto(.01, 250)\nf.lineto(.08, 40)\nf.lineto(1, 40)\n\ne = Controller(1)\ne.lineto(.13, 1)\ne.lineto(.18, 0)\na = e * Saw(f)\n\nstep = 60/120\nc = Chain()\nfor _ in range(4):\n c.add(a, step)\nc = Chain([c]*16)\nc.play()\n#c.audacity()\n","repo_name":"chriswatrous/pytronica","sub_path":"songs/experiments/bass_drum.py","file_name":"bass_drum.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2655457558","text":"import pandas as pd\nimport numpy as np\nimport random\nimport logging\nfrom collections import Counter\nfrom boruta import BorutaPy\nfrom lightgbm import LGBMRegressor\nimport gc\n\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\ndef haversine(lat1, lon1, lat2, lon2, to_radians=True, earth_radius=6371):\n \"\"\"\n slightly modified version: of http://stackoverflow.com/a/29546836/2901002\n\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees or in radians)\n\n All (lat, lon) coordinates must have numeric dtypes and be of equal length.\n\n \"\"\"\n if to_radians:\n lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2])\n\n a = np.sin((lat2-lat1)/2.0)**2 + \\\n np.cos(lat1) * np.cos(lat2) * np.sin((lon2-lon1)/2.0)**2\n\n return earth_radius * 2 * np.arcsin(np.sqrt(a))\n\n\n\n\ndef get_distance(df:pd.DataFrame):\n '''\n function to calculates distances to each corner, calculated from minimum and\n maximum latitude and longitude\n df: pd.DataFrame\n '''\n\n\n df['min_latitude'] = min(df['latitude'])\n df['min_longitude'] = min(df['longitude'])\n \n df['max_latitude'] = max(df['latitude'])\n df['max_longitude'] = max(df['longitude'])\n\n \n for i in ['min','max']:\n for j in ['min','max']:\n \n df['distance_'+i+'_'+j] = df[[i+'_latitude', j+'_longitude', 'latitude','longitude']].apply(lambda x: haversine(x[1], x[0], x[3], x[2]), axis=1)\n \n \n df.drop(columns=['min_latitude','min_longitude','max_latitude','max_longitude'],inplace=True)\n \n \n return df\n\n\ndef price_interpol(grouper:list,shft:list):\n '''\n interpolate the missing values for the missing period for each grouper dataframe\n grouper: list of pd.DataFrames\n shft: amount of periods in the past to account for \n '''\n\n grouper = grouper.iloc[:-shft,:]\n grouper['year_month'] = grouper['year_month'].astype(str)+'-01'\n grouper['year_month'] = pd.to_datetime(grouper['year_month'])\n \n months = pd.date_range(\n start=str(grouper['year_month'].min()), end=str(grouper['year_month'].max()), freq='MS')\n \n grouper['year_month'] = grouper['year_month'].astype(str).str[:7]\n \n col = grouper.iloc[:,0] \n for month in months:\n for group in col.unique():\n if group != group:\n continue\n \n if str(month)[:7] not in grouper[grouper[col.name]==group]['year_month'].values:\n grouper = grouper.append(pd.DataFrame([[group,str(month)[:7],np.nan,np.nan]],\n columns=grouper.columns)).reset_index(drop=True)\n \n grouper = grouper.sort_values(by='year_month').reset_index(drop=True)\n \n grouper['year_month'] = grouper['year_month'].astype(str)+'-01'\n grouper['year_month'] = pd.to_datetime(grouper['year_month'])\n grouper.set_index('year_month',inplace=True)\n \n grouper[grouper.columns[1]] = grouper[grouper.columns[1]].astype(float)\n grouper[grouper.columns[2]] = grouper[grouper.columns[2]].astype(float)\n \n for group in col.unique():\n grouper.loc[grouper[col.name]==group,grouper.columns[1]] = grouper.loc[grouper[col.name]==group,grouper.columns[1]].interpolate(method='time').bfill().ffill()\n grouper.loc[grouper[col.name]==group,grouper.columns[2]] = grouper.loc[grouper[col.name]==group,grouper.columns[2]].interpolate(method='time').bfill().ffill()\n \n grouper = grouper.reset_index()\n \n grouper['year_month'] = grouper['year_month'].astype(str).str[:7] \n return grouper\n\n\ndef create_group(df:pd.DataFrame,groupers:list,col:str,period:str,shft:list,interpol=False):\n '''\n creates the groups of 'price' mean and median by `col` and `period`, shifts it\n by each value of `shft`, calls the `price_interpol` functon if `interpol` is True\n and store them in the `groupers` list\n '''\n \n for sh in shft:\n \n temp = df.copy()\n \n temp['price'] = temp.groupby([col,period])['price'].shift(-sh)\n\n grouper = temp.groupby([col,period]).agg(price_mean=('price','mean'),price_median=('price','median')).reset_index()\n\n if interpol: grouper = price_interpol(grouper,max(shft))\n \n grouper.rename(columns={'price_mean':col+'_'+str(sh)+'_'+period+'_price_mean','price_median':col+'_'+str(sh)+'_'+period+'_price_median'},inplace=True)\n groupers.append(grouper)\n df = pd.merge(df,grouper,on=[col,period],how='left')\n df.drop_duplicates(subset=['id'],inplace=True)\n \n return df,groupers\n\n\ndef grouping(df:pd.DataFrame):\n '''\n calls `create_group` function for each of three specific columns to get the values\n for the `price` of 3 months ago and 1 year ago\n '''\n\n groupers = []\n df[df.dtypes[df.dtypes=='object'].index.tolist()] = df[df.dtypes[df.dtypes=='object'].index.tolist()].astype(str)\n\n for col in ['room_type','neighbourhood','neighbourhood_group']:\n df,groupers = create_group(df,groupers,col,'year',[1])\n df,groupers = create_group(df,groupers,col,'year_month',[3],True)\n \n return df,groupers\n\n\ndef fill_missing(df:pd.DataFrame,groupers:list, dummies:list,to_remove=[]):\n '''\n brings the wanted columns of list `dummies` to the dataframe `df`, fills missing month \n and year dates by finding the 10 most closest `prices` of each dataframe in `groupers`\n and computing the overall most common date and concatenates `df` with each dataframe\n in the `groupers` list\n '''\n\n\n df = get_distance(df)\n df[df.dtypes[df.dtypes=='object'].index.tolist()] = df[df.dtypes[df.dtypes=='object'].index.tolist()].astype(str)\n\n if not to_remove:\n to_remove = ['neighbourhood', 'year','year_month','id','host_id']\n\n merging_groups = [grouper for grouper in groupers if len([x for x in grouper.columns if 'month' in x])>0]\n\n df['year'] = df['last_review'].astype(str).str[:4]\n df['year_month'] = df['last_review'].astype(str).str[:7]\n\n missings = df[df['last_review'].isna()]\n df.dropna(subset=['last_review'],inplace=True)\n \n if len(missings)>0:\n \n idxs = missings.index.tolist()\n for idx in idxs:\n dates = []\n\n price = missings.loc[idx,'price']\n\n for grouper in merging_groups:\n\n col = grouper.columns[1]\n class_ = missings.loc[idx,col]\n\n prices = grouper[grouper[col] == class_].iloc[:,-2]\n closest_idx = prices.iloc[(prices-price).abs().argsort()[:10]].index\n dates+=grouper.iloc[closest_idx,0].values.tolist()\n\n date = sorted(Counter(dates).items(), key=lambda item: item[1])[-1][0]\n\n missings.loc[idx,'year_month'] = str(date)[:7]\n missings.loc[idx,'year'] = str(date)[:4]\n \n df = pd.concat([df,missings],axis=1)\n \n \n for grouper in groupers:\n\n period = grouper.columns[1]\n col = grouper.columns[0]\n df[period] = df[period].astype(str)\n grouper[period] = grouper[period].astype(str)\n df = pd.merge(df,grouper,on=[col,period],how='left')\n\n missing_dummies = pd.get_dummies(df[['neighbourhood','neighbourhood_group','room_type']],dummy_na=True)\n df = pd.concat([df,missing_dummies],axis=1)\n df = df.drop(columns=to_remove)\n df[dummies.columns[~dummies.columns.isin(missing_dummies)]] = 0\n df.drop(columns = missing_dummies.columns[~missing_dummies.columns.isin(dummies)],inplace=True)\n\n df.drop(columns = 'room_type_nan',inplace=True)\n \n return df\n\n\ndef final_adjustments(df:pd.DataFrame): \n '''\n drop unwanted columns, get dummies for specific columns and redefine some columns types\n ''' \n\n df.drop_duplicates(subset=['id'],inplace=True) \n \n dummies = pd.get_dummies(df[['neighbourhood','neighbourhood_group','room_type']],dummy_na=True)\n df = pd.concat([df,dummies],axis=1)\n df = df.drop(columns=['room_type_nan', 'neighbourhood', 'year','year_month','id','host_id'])\n df.columns = [x.replace('/','_').replace('-','_').replace(',',' ').replace(\"'\",'') for x in df.columns]\n \n slicer = (df.dtypes == 'uint8')\n df[df.columns[slicer]] = df[df.columns[slicer]].astype(int)\n\n return df, dummies.columns.tolist()\n\n\ndef interpolating(df:pd.DataFrame):\n '''\n transforms `year_month` into an accountable float column ot then interpolate it to fill\n some missing values\n '''\n\n df['year'] = df['last_review'].astype(str).str[:4]\n df['year_month'] = df['last_review'].astype(str).str[:7]\n\n df = df.sort_values(by=['room_type','neighbourhood','longitude','latitude'])\n df.loc[df['year_month']!='nan','year_month'] = df.loc[df['year_month']!='nan','year_month'].str[:4].astype(float)*1000+(df.loc[df['year_month']!='nan','year_month'].str[5:].astype(int)*99/11).astype(float)\n df.loc[df['year_month']=='nan','year_month'] = np.nan\n df['year_month'] = df['year_month'].astype(float).interpolate(method='linear')\n df['year_month'] = df['year_month'].astype(str).str[:4]+'-0'+(df['year_month'].astype(str).str[5:].astype(float)*11/99).apply(np.ceil).astype(int).astype(str)\n df.dropna(subset=['year_month'],inplace=True)\n\n df.loc[df['year_month'].str[-3:].isin(['010','011','012']),'year_month'] = df.loc[df['year_month'].str[-3:].isin(['010','011','012']),'year_month'].str.replace('-0','-')\n df.loc[df['year']=='nan','year'] = df.loc[df['year']=='nan','year_month'].str[:4].astype(int)\n\n return df\n\ndef reducing_features(df:pd.DataFrame):\n '''\n uses Boruta to find the most important features\n '''\n\n X = df.drop(columns=['name','neighbourhood_group','room_type','host_name','last_review','reviews_per_month']).dropna()\n y = X.pop('price')\n\n\n model = LGBMRegressor(n_estimators=300, max_depth=50, random_state=42)\n\n # let's initialize Boruta\n feat_selector = BorutaPy(\n verbose=1,\n estimator=model,\n n_estimators='auto',\n max_iter=50 # number of iterations to perform\n )\n\n # train Boruta\n # N.B.: X and y must be numpy arrays\n feat_selector.fit(np.array(X), np.array(y))\n\n # print support and ranking for each feature\n\n gc.collect()\n cols = list(X.columns[feat_selector.support_])\n logger.info(f'{len(cols)} features remaining')\n return cols","repo_name":"dangstan/build-ml-pipeline-for-short-term-rental-prices","sub_path":"src/advanced_feature_engineer/feature_engineer.py","file_name":"feature_engineer.py","file_ext":"py","file_size_in_byte":10433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"37316543338","text":"#!/usr/bin/python3\n\n# Fishtank master control unit\n\n# We expect Rainbow Swirl or other suitable effect to be running at priority 100\n\nimport socket\nimport time\nimport subprocess\n\nimport emailCheck\n# Define those functions first\n\n\ndef flash_lights(server, port, msg):\n\tx = 0\n\t# echo '{\"color\":[0,255,0],\"command\":\"color\",\"priority\":1}' | nc localhost 19444\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\tsock.connect( (server, port) )\n\twhile x < 5:\n\t\t#result = sock.recv(4096)\n\t\tsock.sendall(msg)\n\t\t#print(result)\n\t\ttime.sleep(0.8)\n\t\tblank = '{\"color\":[0,0,0],\"command\":\"color\",\"priority\":1}\\n' # Black\n\t\tsock.sendall(blank.encode('utf8'))\n\t\ttime.sleep(0.3)\n\t\tx += 1\n\t# Reset to previous state\n\ttime.sleep(0.5)\n\tclear = '{\"command\":\"clear\",\"priority\":1}\\n'\n\tsock.sendall(clear.encode('utf8'))\n\n# Main function\n\nif __name__ == '__main__':\n\n\tred = [255,0,0]\n\tgreen = [0,255,0]\n\tblue = [0,0,255]\n\n\t#response = subprocess.call([\"./emailCheck.py\", \"-q\"])\n\tresponse = subprocess.check_output(['./emailCheck.py', '-q'])\n\tif response == b'up\\n':\n\t\tmessage = '{\"color\":[0,255,0],\"command\":\"color\",\"priority\":1}\\n'\n\t\tflash_lights('192.168.1.47', 19444, message.encode('utf8'))\n","repo_name":"djsmiley2k/smileys-random-tools","sub_path":"fishtank.py","file_name":"fishtank.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"4339572246","text":"class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n m = len(s1)\n n = len(s2)\n if m+n != len(s3):\n return False\n ans = [[False]*(n+1) for _ in range(m+1)]\n ans[0][0] = True\n for i in range(1, m+1):\n ans[i][0] = ans[i-1][0] and (s1[i-1] == s3[i-1])\n for j in range(1, n+1):\n ans[0][j] = ans[0][j-1] and (s2[j-1] == s3[j-1])\n for i in range(1, m+1):\n for j in range(1, n+1):\n ans[i][j] = s1[i-1] == s3[i+j-1] and ans[i-1][j] or s2[j-1] == s3[i+j-1] and ans[i][j-1]\n return ans[m][n]\n ","repo_name":"ShallowAlex/leetcode-py","sub_path":"1-100/97.py","file_name":"97.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"771271108","text":"\"\"\"Miscallaneous testing\"\"\"\n\n# pylint: disable=redefined-outer-name, import-error, missing-function-docstring, missing-class-docstring, invalid-name, attribute-defined-outside-init, unused-import\n\nfrom pathlib import Path\nimport shutil\nimport pytest\n\nfrom maize.core.component import Component\nfrom maize.core.interface import Input, Output\nfrom maize.core.node import Node\nfrom maize.core.workflow import Workflow\nfrom maize.steps.io import LoadFile, LoadFiles, Log, SaveFile, SaveFiles\nfrom maize.steps.plumbing import Copy, Delay, RoundRobin, Scatter, Multiply\nfrom maize.utilities.testing import TestRig\nfrom maize.utilities.macros import parallel\n\n\n@pytest.fixture\ndef mock_parent():\n return Component(name=\"parent\")\n\n\n@pytest.fixture\ndef node_hybrid(mock_parent):\n class TestNode(Node):\n inp: Input[int] = Input()\n inp_default: Input[int] = Input(default=17)\n inp_optional: Input[int] = Input(optional=True)\n out: Output[int] = Output()\n\n def run(self):\n a = self.inp.receive()\n b = self.inp_default.receive()\n c = 0\n if self.inp_optional.ready():\n c = self.inp_optional.receive()\n self.out.send(a + b + c)\n\n return TestNode\n\n\nclass Test_interfaces:\n def test_input(self, node_hybrid):\n rig = TestRig(node_hybrid)\n res = rig.setup_run(inputs={\"inp\": [42]})\n assert res[\"out\"].get() == 42 + 17\n\n rig = TestRig(node_hybrid)\n res = rig.setup_run(inputs={\"inp\": [42], \"inp_optional\": [2]})\n assert res[\"out\"].get() == 42 + 17 + 2\n\n rig = TestRig(node_hybrid)\n res = rig.setup_run(inputs={\"inp\": [42], \"inp_default\": [16], \"inp_optional\": [2]})\n assert res[\"out\"].get() == 42 + 16 + 2\n\n\ndef test_multi_file_load_save(shared_datadir, tmp_path):\n flow = Workflow(level=\"debug\")\n data = flow.add(\n LoadFiles[Path],\n parameters={\n \"files\": [shared_datadir / \"testorigin.abc\", shared_datadir / \"testorigin2.abc\"]\n },\n )\n save = flow.add(SaveFiles[Path], parameters={\"destination\": tmp_path})\n flow.connect(data.out, save.inp)\n flow.execute()\n assert (tmp_path / \"testorigin.abc\").exists()\n assert (tmp_path / \"testorigin2.abc\").exists()\n\n\ndef test_multi_file_load_copy_save(shared_datadir, tmp_path):\n dest1 = tmp_path / \"save1\"\n dest2 = tmp_path / \"save2\"\n dest1.mkdir(), dest2.mkdir()\n flow = Workflow(level=\"debug\")\n data = flow.add(\n LoadFiles[Path],\n parameters={\n \"files\": [shared_datadir / \"testorigin.abc\", shared_datadir / \"testorigin2.abc\"]\n },\n )\n copy = flow.add(Copy[list[Path]])\n log = flow.add(Log[list[Path]])\n save1 = flow.add(SaveFiles[Path], name=\"save1\", parameters={\"destination\": dest1})\n save2 = flow.add(SaveFiles[Path], name=\"save2\", parameters={\"destination\": dest2})\n flow.connect(data.out, log.inp)\n flow.connect(log.out, copy.inp)\n flow.connect(copy.out, save1.inp)\n flow.connect(copy.out, save2.inp)\n flow.execute()\n assert (dest1 / \"testorigin.abc\").exists()\n assert (dest1 / \"testorigin2.abc\").exists()\n assert (dest2 / \"testorigin.abc\").exists()\n assert (dest2 / \"testorigin2.abc\").exists()\n\n\ndef test_parallel_file(shared_datadir, tmp_path):\n flow = Workflow(level=\"debug\")\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n mult = flow.add(Multiply[Path], parameters={\"n_packages\": 2})\n scat = flow.add(Scatter[Path])\n dela = flow.add(parallel(Log[Path], n_branches=2, loop=True))\n roro = flow.add(RoundRobin[Path])\n out1 = flow.add(SaveFile[Path], name=\"out1\", parameters={\"destination\": tmp_path / \"test1.abc\"})\n out2 = flow.add(SaveFile[Path], name=\"out2\", parameters={\"destination\": tmp_path / \"test2.abc\"})\n flow.connect(data.out, mult.inp)\n flow.connect(mult.out, scat.inp)\n flow.connect(scat.out, dela.inp)\n flow.connect(dela.out, roro.inp)\n flow.connect(roro.out, out1.inp)\n flow.connect(roro.out, out2.inp)\n flow.execute()\n assert (tmp_path / \"test1.abc\").exists()\n assert (tmp_path / \"test2.abc\").exists()\n\n\ndef test_parallel_file_many(shared_datadir, tmp_path):\n class _Test(Node):\n inp: Input[Path] = Input(mode=\"copy\")\n out: Output[Path] = Output(mode=\"copy\")\n\n def run(self) -> None:\n file = self.inp.receive()\n out = Path(\"local.abc\")\n shutil.copy(file, out)\n self.out.send(out)\n\n n_files = 2\n flow = Workflow(level=\"debug\", cleanup_temp=False)\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n mult = flow.add(Multiply[Path], parameters={\"n_packages\": n_files})\n scat = flow.add(Scatter[Path])\n dela = flow.add(parallel(_Test, n_branches=4, loop=True))\n roro = flow.add(RoundRobin[Path])\n for i in range(n_files):\n out = flow.add(\n SaveFile[Path], name=f\"out{i}\", parameters={\"destination\": tmp_path / f\"test{i}.abc\"}\n )\n flow.connect(roro.out, out.inp)\n\n flow.connect(data.out, mult.inp)\n flow.connect(mult.out, scat.inp)\n flow.connect(scat.out, dela.inp)\n flow.connect(dela.out, roro.inp)\n flow.execute()\n for i in range(n_files):\n assert (tmp_path / f\"test{i}.abc\").exists()\n\n\ndef test_file_copy(shared_datadir, tmp_path):\n flow = Workflow()\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n copy = flow.add(Copy[Path])\n out1 = flow.add(SaveFile[Path], name=\"out1\", parameters={\"destination\": tmp_path / \"test1.abc\"})\n out2 = flow.add(SaveFile[Path], name=\"out2\", parameters={\"destination\": tmp_path / \"test2.abc\"})\n flow.connect(data.out, copy.inp)\n flow.connect(copy.out, out1.inp)\n flow.connect(copy.out, out2.inp)\n flow.execute()\n assert (tmp_path / \"test1.abc\").exists()\n assert (tmp_path / \"test2.abc\").exists()\n\n\ndef test_file_copy_delay(shared_datadir, tmp_path):\n flow = Workflow()\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n copy = flow.add(Copy[Path])\n del1 = flow.add(Delay[Path], name=\"del1\", parameters={\"delay\": 2})\n del2 = flow.add(Delay[Path], name=\"del2\", parameters={\"delay\": 5})\n out1 = flow.add(SaveFile[Path], name=\"out1\", parameters={\"destination\": tmp_path / \"test1.abc\"})\n out2 = flow.add(SaveFile[Path], name=\"out2\", parameters={\"destination\": tmp_path / \"test2.abc\"})\n flow.connect(data.out, del1.inp)\n flow.connect(del1.out, copy.inp)\n flow.connect(copy.out, out2.inp)\n flow.connect(copy.out, del2.inp)\n flow.connect(del2.out, out1.inp)\n flow.execute()\n assert (tmp_path / \"test1.abc\").exists()\n assert (tmp_path / \"test2.abc\").exists()\n\n\ndef test_nested_file_copy(shared_datadir, tmp_path):\n flow = Workflow()\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n copy1 = flow.add(Copy[Path], name=\"copy1\")\n copy2 = flow.add(Copy[Path], name=\"copy2\")\n out1 = flow.add(SaveFile[Path], name=\"out1\", parameters={\"destination\": tmp_path / \"test1.abc\"})\n out2 = flow.add(SaveFile[Path], name=\"out2\", parameters={\"destination\": tmp_path / \"test2.abc\"})\n out3 = flow.add(SaveFile[Path], name=\"out3\", parameters={\"destination\": tmp_path / \"test3.abc\"})\n flow.connect(data.out, copy1.inp)\n flow.connect(copy1.out, out1.inp)\n flow.connect(copy1.out, copy2.inp)\n flow.connect(copy2.out, out2.inp)\n flow.connect(copy2.out, out3.inp)\n flow.execute()\n assert (tmp_path / \"test1.abc\").exists()\n assert (tmp_path / \"test2.abc\").exists()\n assert (tmp_path / \"test3.abc\").exists()\n\n\ndef test_file_load(shared_datadir, tmp_path):\n flow = Workflow()\n data = flow.add(LoadFile[Path], parameters={\"file\": shared_datadir / \"testorigin.abc\"})\n out = flow.add(SaveFile[Path], parameters={\"destination\": tmp_path / \"test.abc\"})\n flow.connect(data.out, out.inp)\n flow.execute()\n assert (tmp_path / \"test.abc\").exists()\n assert (shared_datadir / \"testorigin.abc\").exists()\n","repo_name":"MolecularAI/maize","sub_path":"tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":8097,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"3737951053","text":"import os\nimport pickle\nimport numpy as np\nfrom collections import Counter\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef load_prediction(filepath):\n \"\"\"load prediction index\n Params:\n ------------------\n filepath : string\n path to prediction index file\n\n Returns:\n ------------------\n pred_list : list\n list of cluster idx (prediction)\n \"\"\"\n f = open(filepath)\n pred_list = []\n counter = 0\n for idx, pred in enumerate(f):\n if idx % 5 == 0:\n pred_list.append(int(pred[:-1]))\n\n return pred_list\n\n\nwith open(\"../voxel/test_data_filename.pkl\", \"rb\") as f:\n test_filename = pickle.load(f)\n\npred_list = load_prediction(\"../what3d_clusters/predictions.txt\")\npred_list = np.asarray(pred_list)\n\n\n# class_list = {}\n# for file in test_filename:\n# class_type = file.split(\"/\")[0]\n# if class_type not in class_list:\n# class_list[class_type] = 1\n# else:\n# class_list[class_type] += 1\n#\n# overall_class = [k for k in class_list.keys()]\n# overall_class.sort()\n# class_index = []\n# for file in test_filename:\n# class_type = file.split(\"/\")[0]\n# idx = overall_class.index(class_type)\n# class_index.append(idx)\n# class_index = np.asarray(class_index)\n\n\n###################### Plot Clustering ######################\ndistance_matrix = np.load(\"distance_matrix/Cluster_all_distance_matrix.npy\") #distance_matrix/Cluster_all_distance_matrix.npy Pred_all_distance_matrix.npy\nmodel = TSNE(perplexity=30, n_jobs=8, metric=\"precomputed\")\nembeddings = model.fit_transform(distance_matrix)\n# np.save(\"Pred_embedding.npy\", embeddings)\n\n# embeddings = np.load(\"clustering_embedding.npy\") # \"Pred_embedding.npy\" \"clustering_embedding.npy\"\n\n# LINE_STYLES = ['.', 'o', '+', 'dotted']\nNUM_COLORS = 500\n# # cm = plt.get_cmap('gist_rainbow')\n# cmap = plt.get_cmap('jet')\n# colors = cmap(np.linspace(0, 1.0, 500))\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n# for i in range(NUM_COLORS):\n# data_idx = np.where(pred_list[:]==i)[0]\n# if i == 1:\n# c = 2\n# if len(data_idx) == 0:\n# continue\n# cx, cy = embeddings[data_idx, 0], embeddings[data_idx, 1]\n# # ax.plot(cx, cy, marker='.', markersize=2, linestyle='None', color=float(i) / 500.)\n# # ax.scatter(cx, cy, c=float(i) / 500.)\n# # lines = ax.plot(cx, cy, marker='.', markersize=2, linestyle='None', colors=float(i)/500.)\n# # x = 1\n# # lines[0].set_color(rgba_color)\n# # lines[0].set_color(cm(i//3*3.0/NUM_COLORS))\n# # lines[0].set_color(cm(i / NUM_COLORS))\n\ncluster = np.arange(0,NUM_COLORS)\ncmap = plt.get_cmap('jet')\ncolors = cmap(np.linspace(0, 1.0, 500))\nfig = plt.figure()\nax = fig.add_subplot(111)\nfor cl, color in zip(cluster, colors):\n data_idx = np.where(pred_list[:]==cl)[0]\n if len(data_idx) == 0:\n continue\n cx, cy = embeddings[data_idx, 0], embeddings[data_idx, 1]\n plt.plot(cx, cy, '.', linestyle='None', color=color, markersize=2)\n\nplt.xticks([], [])\nplt.yticks([], [])\nplt.xlim([-70, 70])\nplt.ylim([-70, 70])\nfig.savefig('Clustering_TSNE.png')\nplt.show()\n\n###################### Plot Clustering ######################\n","repo_name":"arqam-ai/SVR","sub_path":"eval/TSNE.py","file_name":"TSNE.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38868874927","text":"class Solution:\n def isMatch(self, s: str, p: str) -> bool:\n dp=[[False for i in range(len(p)+1)] for i in range(len(s)+1)]\n dp[0][0]=True\n # * can match empty string if * is the first char\n for j in range(1,len(p)+1):\n if p[j - 1] == '*':\n dp[0][j] = dp[0][j - 1]\n for i in range(1,len(s)+1):\n for j in range(1,len(p)+1):\n if p[j-1]=='?'or s[i-1]==p[j-1]:\n dp[i][j]=dp[i-1][j-1]\n elif p[j-1]=='*':\n dp[i][j]=(dp[i][j-1] or dp[i-1][j])\n return dp[len(s)][len(p)]\n#https://www.youtube.com/watch?v=DSJ9kFsA_gs\n#https://www.geeksforgeeks.org/wildcard-pattern-matching/\n\n ","repo_name":"junone/Algorithme","sub_path":"Dynamic programming/WhildCard-matching.py","file_name":"WhildCard-matching.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36814382129","text":"import time\n\nfrom acts.controllers.anritsu_lib._anritsu_utils import AnritsuError\nfrom acts.controllers.anritsu_lib.md8475a import MD8475A\nfrom acts.controllers.anritsu_lib.md8475a import CBCHSetup\nfrom acts.controllers.anritsu_lib.md8475a import CTCHSetup\nfrom acts_contrib.test_utils.tel.anritsu_utils import ETWS_WARNING_EARTHQUAKETSUNAMI\nfrom acts_contrib.test_utils.tel.anritsu_utils import ETWS_WARNING_OTHER_EMERGENCY\nfrom acts_contrib.test_utils.tel.anritsu_utils import cb_serial_number\nfrom acts_contrib.test_utils.tel.anritsu_utils import etws_receive_verify_message_lte_wcdma\nfrom acts_contrib.test_utils.tel.anritsu_utils import set_system_model_gsm\nfrom acts_contrib.test_utils.tel.anritsu_utils import set_system_model_lte\nfrom acts_contrib.test_utils.tel.anritsu_utils import set_system_model_wcdma\nfrom acts_contrib.test_utils.tel.anritsu_utils import set_usim_parameters\nfrom acts_contrib.test_utils.tel.anritsu_utils import set_post_sim_params\nfrom acts_contrib.test_utils.tel.tel_defines import NETWORK_MODE_CDMA\nfrom acts_contrib.test_utils.tel.tel_defines import NETWORK_MODE_GSM_ONLY\nfrom acts_contrib.test_utils.tel.tel_defines import NETWORK_MODE_GSM_UMTS\nfrom acts_contrib.test_utils.tel.tel_defines import NETWORK_MODE_LTE_GSM_WCDMA\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_1XRTT\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_GSM\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_LTE\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_WCDMA\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_FAMILY_CDMA2000\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_FAMILY_GSM\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_FAMILY_LTE\nfrom acts_contrib.test_utils.tel.tel_defines import RAT_FAMILY_UMTS\nfrom acts_contrib.test_utils.tel.tel_test_utils import ensure_network_rat\nfrom acts_contrib.test_utils.tel.tel_test_utils import ensure_phones_idle\nfrom acts_contrib.test_utils.tel.tel_test_utils import toggle_airplane_mode\nfrom acts_contrib.test_utils.tel.tel_test_utils import start_qxdm_loggers\nfrom acts_contrib.test_utils.tel.TelephonyBaseTest import TelephonyBaseTest\nfrom acts.test_decorators import test_tracker_info\n\nWAIT_TIME_BETWEEN_REG_AND_MSG = 15 # default 15 sec\n\n\nclass TelLabEtwsTest(TelephonyBaseTest):\n SERIAL_NO = cb_serial_number()\n\n def setup_class(self):\n super().setup_class()\n self.ad = self.android_devices[0]\n self.ad.sim_card = getattr(self.ad, \"sim_card\", None)\n self.md8475a_ip_address = self.user_params[\n \"anritsu_md8475a_ip_address\"]\n self.wlan_option = self.user_params.get(\"anritsu_wlan_option\", False)\n self.md8475_version = self.user_params.get(\"md8475\", \"A\")\n self.ad.adb.shell(\"settings put secure cmas_additional_broadcast_pkg \"\n \"com.googlecode.android_scripting\")\n self.wait_time_between_reg_and_msg = self.user_params.get(\n \"wait_time_between_reg_and_msg\", WAIT_TIME_BETWEEN_REG_AND_MSG)\n\n try:\n self.anritsu = MD8475A(self.md8475a_ip_address, self.wlan_option,\n self.md8475_version)\n except AnritsuError:\n self.log.error(\"Error in connecting to Anritsu Simulator\")\n return False\n return True\n\n def setup_test(self):\n if getattr(self, \"qxdm_log\", True):\n start_qxdm_loggers(self.log, self.android_devices)\n ensure_phones_idle(self.log, self.android_devices)\n toggle_airplane_mode(self.log, self.ad, True)\n return True\n\n def teardown_test(self):\n self.log.info(\"Stopping Simulation\")\n self.anritsu.stop_simulation()\n toggle_airplane_mode(self.log, self.ad, True)\n\n def teardown_class(self):\n self.anritsu.disconnect()\n return True\n\n def _send_receive_etws_message(self, set_simulation_func, rat, message_id,\n warning_message):\n try:\n [self.bts1] = set_simulation_func(self.anritsu, self.user_params,\n self.ad.sim_card)\n set_usim_parameters(self.anritsu, self.ad.sim_card)\n if rat == RAT_LTE:\n set_post_sim_params(self.anritsu, self.user_params,\n self.ad.sim_card)\n self.anritsu.start_simulation()\n\n if rat == RAT_LTE:\n preferred_network_setting = NETWORK_MODE_LTE_GSM_WCDMA\n rat_family = RAT_FAMILY_LTE\n elif rat == RAT_WCDMA:\n self.bts1.wcdma_ctch = CTCHSetup.CTCH_ENABLE\n self.ad.droid.telephonyToggleDataConnection(False)\n preferred_network_setting = NETWORK_MODE_GSM_UMTS\n rat_family = RAT_FAMILY_UMTS\n elif rat == RAT_GSM:\n self.bts1.gsm_cbch = CBCHSetup.CBCH_ENABLE\n self.ad.droid.telephonyToggleDataConnection(False)\n preferred_network_setting = NETWORK_MODE_GSM_ONLY\n rat_family = RAT_FAMILY_GSM\n elif rat == RAT_1XRTT:\n preferred_network_setting = NETWORK_MODE_CDMA\n rat_family = RAT_FAMILY_CDMA2000\n else:\n self.log.error(\"No valid RAT provided for ETWS test.\")\n return False\n\n if not ensure_network_rat(\n self.log,\n self.ad,\n preferred_network_setting,\n rat_family,\n toggle_apm_after_setting=True):\n self.log.error(\n \"Failed to set rat family {}, preferred network:{}\".format(\n rat_family, preferred_network_setting))\n return False\n\n self.anritsu.wait_for_registration_state()\n if not etws_receive_verify_message_lte_wcdma(\n self.log, self.ad, self.anritsu,\n next(TelLabEtwsTest.SERIAL_NO), message_id,\n warning_message):\n self.log.error(\"Phone {} Failed to receive ETWS message\"\n .format(self.ad.serial))\n return False\n except AnritsuError as e:\n self.log.error(\"Error in connection with Anritsu Simulator: \" +\n str(e))\n return False\n except Exception as e:\n self.log.error(\"Exception during ETWS send/receive: \" + str(e))\n return False\n return True\n\n def test_carrier_tmobile(self):\n \"\"\" Sets the Carrier to TMO.\n Returns: None\n \"\"\"\n setattr(self.ad, \"sim_card\", \"FiTMO\")\n\n def test_carrier_sprint(self):\n \"\"\" Sets the Carrier to SPR.\n Returns: None\n \"\"\"\n setattr(self.ad, \"sim_card\", \"FiSPR\")\n\n def test_carrier_uscc(self):\n \"\"\" Sets the Carrier to USCC.\n Returns: None\n \"\"\"\n setattr(self.ad, \"sim_card\", \"FiUSCC\")\n\n \"\"\" Tests Begin \"\"\"\n\n @test_tracker_info(uuid=\"af4a00d0-9a91-45d5-9f65-9541e64a57f2\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_earthquake_tsunami_lte(self):\n \"\"\"ETWS Earthquake and Tsunami warning message reception on LTE\n\n Tests the capability of device to receive and inform the user\n about the ETWS Earthquake and Tsunami warning message when camped on\n LTE newtork\n\n Steps:\n 1. Make Sure Phone is camped on LTE network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(set_system_model_lte, RAT_LTE,\n ETWS_WARNING_EARTHQUAKETSUNAMI,\n \"LTE Earthquake and Tsunami\")\n\n @test_tracker_info(uuid=\"03785878-0319-413c-9190-d4e08f0edc33\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_other_emergency_lte(self):\n \"\"\"ETWS Other emergency warning message reception on LTE\n\n Tests the capability of device to receive and inform the user\n about the ETWS Other emergency warning message when camped on\n LTE newtork\n\n Steps:\n 1. Make Sure Phone is camped on LTE network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(set_system_model_lte, RAT_LTE,\n ETWS_WARNING_OTHER_EMERGENCY,\n \"LTE ETWS Other Emergency\")\n\n @test_tracker_info(uuid=\"1ef4a5d7-9ceb-49eb-8ec7-5538625c8bd4\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_earthquake_tsunami_wcdma(self):\n \"\"\"ETWS Earthquake and Tsunami warning message reception on WCDMA\n\n Tests the capability of device to receive and inform the user\n about the ETWS Earthquake and Tsunami warning message when camped on\n WCDMA newtork\n\n Steps:\n 1. Make Sure Phone is camped on WCDMA network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(\n set_system_model_wcdma, RAT_WCDMA, ETWS_WARNING_EARTHQUAKETSUNAMI,\n \"WCDMA Earthquake and Tsunami\")\n\n @test_tracker_info(uuid=\"71dc9650-d00a-4533-99f5-5cc301c21334\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_other_emergency_wcdma(self):\n \"\"\"ETWS Other emergency warning message reception on WCDMA\n\n Tests the capability of device to receive and inform the user\n about the ETWS Other emergency warning message when camped on\n WCDMA newtork\n\n Steps:\n 1. Make Sure Phone is camped on WCDMA network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(\n set_system_model_wcdma, RAT_WCDMA, ETWS_WARNING_OTHER_EMERGENCY,\n \"WCDMA ETWS Other Emergency\")\n\n @test_tracker_info(uuid=\"a9fd9c0e-21bf-41d1-81d2-c34679052fe0\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_earthquake_tsunami_gsm(self):\n \"\"\"ETWS Earthquake and Tsunami warning message reception on GSM\n\n Tests the capability of device to receive and inform the user\n about the ETWS Earthquake and Tsunami warning message when camped on\n GSM newtork\n\n Steps:\n 1. Make Sure Phone is camped on GSM network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(set_system_model_gsm, RAT_GSM,\n ETWS_WARNING_EARTHQUAKETSUNAMI,\n \"GSM Earthquake and Tsunami\")\n\n @test_tracker_info(uuid=\"0ae42f8d-1720-449c-9200-e88f7f1d2cbe\")\n @TelephonyBaseTest.tel_test_wrap\n def test_etws_other_emergency_gsm(self):\n \"\"\"ETWS Other emergency warning message reception on GSM\n\n Tests the capability of device to receive and inform the user\n about the ETWS Other emergency warning message when camped on\n GSM newtork\n\n Steps:\n 1. Make Sure Phone is camped on GSM network\n 2. Send ETWS Earthquake and Tsunami warning message from Anritsu\n\n Expected Result:\n Phone receives ETWS Earthquake and Tsunami warning message\n\n Returns:\n True if pass; False if fail\n \"\"\"\n return self._send_receive_etws_message(set_system_model_gsm, RAT_GSM,\n ETWS_WARNING_OTHER_EMERGENCY,\n \"GSM ETWS Other Emergency\")\n\n \"\"\" Tests End \"\"\"\n","repo_name":"rickdynasty/Aosp11","sub_path":"tools/test/connectivity/acts_tests/tests/google/tel/lab/TelLabEtwsTest.py","file_name":"TelLabEtwsTest.py","file_ext":"py","file_size_in_byte":12446,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"25772370672","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom treecat.format import export_rows\nfrom treecat.format import guess_schema\nfrom treecat.format import import_data\nfrom treecat.format import import_rows\nfrom treecat.format import load_data\nfrom treecat.format import load_schema\nfrom treecat.format import pd_outer_join\nfrom treecat.format import pickle_dump\nfrom treecat.format import pickle_load\nfrom treecat.testutil import TESTDATA\nfrom treecat.testutil import assert_equal\nfrom treecat.testutil import tempdir\n\nDATA_CSV = os.path.join(TESTDATA, 'tiny_data.csv')\nTYPES_CSV = os.path.join(TESTDATA, 'tiny_types.csv')\nVALUES_CSV = os.path.join(TESTDATA, 'tiny_values.csv')\nGROUPS_CSV = os.path.join(TESTDATA, 'tiny_groups.csv')\n\nEXAMPLE_DATA = [\n u'foo',\n [1, 2, 3],\n {\n u'foo': 0\n },\n np.array([[0, 1], [2, 3]], dtype=np.int8),\n]\n\n\n@pytest.mark.parametrize('data', EXAMPLE_DATA)\n@pytest.mark.parametrize('ext', ['pkz', 'jz'])\ndef test_pickle(data, ext):\n with tempdir() as dirname:\n filename = os.path.join(dirname, 'test.{}'.format(ext))\n pickle_dump(data, filename)\n actual = pickle_load(filename)\n assert_equal(actual, data)\n\n\ndef test_pd_outer_join():\n dfs = [\n pd.DataFrame({\n 'id': [0, 1, 2, 3],\n 'a': ['foo', 'bar', 'baz', np.nan],\n 'b': ['panda', 'zebra', np.nan, np.nan],\n }),\n pd.DataFrame({\n 'id': [1, 2, 3, 4],\n 'b': ['mouse', np.nan, 'tiger', 'egret'],\n 'c': ['toe', 'finger', 'nose', np.nan],\n }),\n ]\n expected = pd.DataFrame({\n 'id': [0, 1, 2, 3, 4],\n 'a': ['foo', 'bar', 'baz', np.nan, np.nan],\n 'b': ['panda', 'zebra', np.nan, 'tiger', 'egret'],\n 'c': [np.nan, 'toe', 'finger', 'nose', np.nan],\n }).set_index('id')\n actual = pd_outer_join(dfs, on='id')\n print(expected)\n print(actual)\n assert expected.equals(actual)\n\n\ndef test_guess_schema():\n with tempdir() as dirname:\n types_csv_out = os.path.join(dirname, 'types.csv')\n values_csv_out = os.path.join(dirname, 'values.csv')\n guess_schema(DATA_CSV, types_csv_out, values_csv_out)\n expected_types = open(TYPES_CSV).read()\n expected_values = open(VALUES_CSV).read()\n actual_types = open(types_csv_out).read()\n actual_values = open(values_csv_out).read()\n assert actual_types == expected_types\n assert actual_values == expected_values\n\n\ndef test_load_schema():\n load_schema(TYPES_CSV, VALUES_CSV, GROUPS_CSV)\n\n\ndef test_load_data():\n schema = load_schema(TYPES_CSV, VALUES_CSV, GROUPS_CSV)\n load_data(schema, DATA_CSV)\n\n\ndef test_import_data():\n with tempdir() as dirname:\n dataset_out = os.path.join(dirname, 'dataset.pkz')\n assert not os.path.exists(dataset_out)\n import_data(DATA_CSV, TYPES_CSV, VALUES_CSV, GROUPS_CSV, dataset_out)\n assert os.path.exists(dataset_out)\n\n\ndef test_export_import_rows():\n schema = load_schema(TYPES_CSV, VALUES_CSV, GROUPS_CSV)\n data = load_data(schema, DATA_CSV)\n print(schema['feature_index'])\n print(data)\n rows = export_rows(schema, data)\n assert len(rows) == data.shape[0]\n actual_data = import_rows(schema, rows)\n print(actual_data)\n assert np.all(actual_data == data)\n","repo_name":"posterior/treecat","sub_path":"treecat/format_test.py","file_name":"format_test.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"91"} +{"seq_id":"26262440259","text":"#!/usr/bin/env python\n\n\nimport numpy as np\nimport cv2\n\ndef filter_color(rgb_image, Lower_bound, Upper_bound):\n hsv_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2HSV)\n #cv2.imshow(\"HSV Image\", hsv_image)\n\n mask = cv2.inRange(hsv_image, Lower_bound, Upper_bound)\n\n return mask\n\ndef getContours(binary_image):\n contours, hierarchy = cv2.findContours(binary_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n return contours\n\ndef draw_ball(binary_image, rgb_image, contours):\n black_image = np.zeros([binary_image.shape[0], binary_image.shape[1],3],'uint8')\n\n for c in contours:\n area = cv2.contourArea(c)\n perimeter= cv2.arcLength(c, True)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n if (area>1000):\n cv2.drawContours(rgb_image, [c], -1, (150,250,150), 1)\n cv2.drawContours(black_image, [c], -1, (150,250,150), 1)\n cx, cy = contour_center(c)\n cv2.circle(rgb_image, (cx,cy),(int)(radius),(0,0,255),1)\n cv2.circle(black_image, (cx,cy),(int)(radius),(0,0,255),1)\n cv2.circle(black_image, (cx,cy),5,(150,150,255),-1)\n print (\"Area: {}, Perimeter: {}\".format(area, perimeter))\n print (\"number of contours: {}\".format(len(contours)))\n cv2.imshow(\"RGB Image Contours\",rgb_image)\n cv2.imshow(\"Black Image Contours\",black_image)\n\ndef contour_center(contour):\n M = cv2.moments(contour)\n cx=-1\n cy=-1\n if (M['m00']!=0):\n cx= int(M['m10']/M['m00'])\n cy= int(M['m01']/M['m00'])\n return cx, cy\n\ndef ball_tracking(frame):\n YellowLower = (30,150,100)\n YellowUpper = (50,255,255)\n rgb_image = frame\n binary_image_mask = filter_color(rgb_image,YellowLower,YellowUpper)\n contours = getContours(binary_image_mask)\n draw_ball(binary_image_mask,rgb_image,contours)\n\n\ndef main():\n \n #video_capture = cv2.VideoCapture(0)\n video_capture = cv2.VideoCapture('/home/darsh/catkin_ws/src/ball_tracking/src/video/tennis-ball-video.mp4')\n \n while(True):\n ret, frame = video_capture.read()\n ball_tracking(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Darshshah23/Ball-Tracking-using-ROS","sub_path":"src/ball_tracking.py","file_name":"ball_tracking.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37797391429","text":"from PIL import Image\nfrom cassandra.auth import PlainTextAuthProvider\nfrom getpass import getpass\nfrom tqdm import tqdm\nimport io\nimport numpy as np\nimport os\nimport uuid\nimport yaml\nfrom cassandradl import CassandraWriter\n\n\ndef get_data(path):\n img = Image.open(path).convert(\"RGB\")\n # resize and crop to 224x224\n tg = 224\n sz = np.array(img.size)\n min_d = sz.min()\n sc = float(tg) / min_d\n new_sz = (sc * sz).astype(int)\n img = img.resize(new_sz)\n off = (new_sz.max() - tg) // 2\n if new_sz[0] > new_sz[1]:\n box = [off, 0, off + tg, tg]\n else:\n box = [0, off, tg, off + tg]\n img = img.crop(box)\n # save to stream\n out_stream = io.BytesIO()\n img.save(out_stream, format=\"JPEG\")\n # write to db\n out_stream.flush()\n data = out_stream.getvalue()\n return data\n\n\ndef save_images(cassandra_ip, cass_user, cass_pass):\n auth_prov = PlainTextAuthProvider(cass_user, cass_pass)\n\n def ret(jobs):\n cw = CassandraWriter(\n auth_prov,\n [cassandra_ip],\n table_ids=\"isic.ids_224\",\n table_data=\"isic.data_224\",\n table_metadata=\"isic.metadata_224\",\n id_col=\"patch_id\",\n label_col=\"label\",\n data_col=\"data\",\n cols=[\"or_split\", \"or_label\"],\n get_data=get_data,\n )\n for path, label, partition_items in tqdm(jobs):\n cw.save_image(path, label, partition_items)\n\n return ret\n\n\ndef get_jobs(src_dir):\n fn = \"isic_classification_2018.yml\"\n fn = os.path.join(src_dir, fn)\n print(\"Reading YAML file...\", flush=True)\n with open(fn, \"r\") as f:\n isic = yaml.safe_load(f)\n print(\"Resizing and inserting in DB...\", flush=True)\n jobs = []\n labels = dict(enumerate(isic[\"classes\"]))\n labels = {v: k for k, v in labels.items()}\n for or_split in isic[\"split\"].keys():\n for num in isic[\"split\"][or_split]:\n item = isic[\"images\"][num]\n or_label = item[\"label\"]\n label = labels[or_label]\n partition_items = (or_split, or_label)\n path = os.path.join(src_dir, item[\"location\"])\n jobs.append((path, label, partition_items))\n return jobs\n","repo_name":"fversaci/CassandraDL","sub_path":"examples/isic_2018/isic_common.py","file_name":"isic_common.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72600951022","text":"import torch\r\nfrom model import resnet152\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nimport matplotlib.pyplot as plt\r\nimport json\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\ndata_transform = transforms.Compose(\r\n [transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # 预处理\r\n\r\n# load image\r\nimg = Image.open(\"H:/鸟类数据集/train/ABBOTTS BABBLER/001.jpg\") # 导入需要检测的图片\r\nplt.imshow(img)\r\n# [N, C, H, W]\r\nimg = data_transform(img)\r\n# expand batch dimension\r\nimg = torch.unsqueeze(img, dim=0)\r\n\r\n# read class_indict\r\ntry:\r\n json_file = open('./class_indices.json', 'r')\r\n class_indict = json.load(json_file)\r\nexcept Exception as e:\r\n print(e)\r\n exit(-1)\r\n\r\n# create model\r\nmodel = resnet152(num_classes=400) # 修改为你训练时一共的种类数\r\n# load model weights\r\nmodel_weight_path = \"./resNet152.pth\" # 导入训练好的模型\r\nmodel.load_state_dict(torch.load(model_weight_path, map_location=device))\r\nmodel.eval()\r\n# predict_list = []\r\nwith torch.no_grad(): # 不对损失梯度进行跟踪\r\n # predict class\r\n output = torch.squeeze(model(img)) # 压缩batch维度\r\n predict = torch.softmax(output, dim=0) # 得到概率分布\r\n predict_cla = torch.argmax(predict).numpy() # argmax寻找最大值对应的索引\r\n # predict_list.append(predict.tolist()[0])\r\nprint(class_indict[str(predict_cla)], predict[predict_cla].numpy())\r\nplt.show()\r\n","repo_name":"xia1jiao/11","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"35900462778","text":"class Solution:\n # APP1: for loop of 32.\n # Time: O(1) Space: O(1) Runtime: 50%\n def reverseBits(self, n: int) -> int:\n res = 0\n for _ in range(32):\n res = (res << 1) + (n & 1)\n n = n >> 1\n return res\n\n # APP2: reverse to the correct pos directly\n # Time: O(1) space: O(1) Runtime: 80%\n def reverseBits(self, n: int) -> int:\n res, power = 0, 31\n while n:\n res += (n & 1) << power\n n = n >> 1\n power -= 1\n return res","repo_name":"ruifengli-cs/leetcode","sub_path":"Bit manipulation/190. Reverse Bits.py","file_name":"190. Reverse Bits.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17022118014","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.utils.crypto import get_random_string\n\nfrom kitap.models import Category, Kitap\nfrom home.models import Setting, UserProfile\nfrom order.models import ShopCartForm, ShopCart, OrderForm, Order, OrderProduct\n\n\ndef index(request):\n return HttpResponse(\"Order App\")\n\n@login_required(login_url='/login')\ndef addtocart(request,id):\n url=request.META.get('HTTP_REFERER')\n current_user=request.user\n\n checkproduct=ShopCart.objects.filter(product_id=id)\n if checkproduct:\n control=1\n else:\n control=0\n if request.method=='POST':\n form=ShopCartForm(request.POST)\n if form.is_valid():\n if control==1:\n data=ShopCart.objects.get(product_id=id)\n data.quantity+=form.cleaned_data['quantity']\n data.save()\n else:\n data = ShopCart()\n data.user_id = current_user.id\n data.product_id = id\n data.quantity=form.cleaned_data['quantity']\n data.save()\n request.session['cart_items'] = ShopCart.objects.filter(user_id=current_user.id).count()\n messages.success(request,\"Ürün başarı ile sepete eklenmiştir.Teşekkür Ederiz\")\n return HttpResponseRedirect(url)\n else:\n if control == 1:\n data = ShopCart.objects.get(product_id=id)\n data.quantity +=1\n data.save()\n else:\n data=ShopCart()\n data.user_id=current_user.id\n data.product_id=id\n data.quantity=1\n data.save()\n request.session['cart_items'] = ShopCart.objects.filter(user_id=current_user.id).count()\n messages.success(request,\"Ürün başarı ile sepete eklenmiştir.Teşekkür Ederiz\")\n return HttpResponseRedirect(url)\n messages.warning(request,\"Ürün sepete eklemede hata oluştu.Lütfen kontrol ediniz\")\n return HttpResponseRedirect(url)\n\n@login_required(login_url='/login')\ndef shopcart(request):\n category=Category.objects.all()\n current_user=request.user\n setting = Setting.objects.get(pk=1)\n shopcart=ShopCart.objects.filter(user_id=current_user.id)\n request.session['cart_items'] = ShopCart.objects.filter(user_id=current_user.id).count()\n\n total = 0\n for rs in shopcart:\n total+= rs.product.fiyat*rs.quantity\n context={'category':category,\n 'shopcart':shopcart,\n 'setting':setting,\n 'total' : total,\n }\n return render(request,'Shopcart_products.html',context)\n\n@login_required(login_url='/login')\ndef deletefromcart(request,id):\n category=Category.objects.all()\n setting = Setting.objects.get(pk=1)\n ShopCart.objects.filter(id=id).delete()\n current_user=request.user\n request.session['cart_items'] = ShopCart.objects.filter(user_id=current_user.id).count()\n messages.success(request, \"Ürün sepetten silinmiştir\")\n context={'category':category,\n 'setting': setting,\n\n }\n return HttpResponseRedirect(\"/shopcart\",context)\n\n@login_required(login_url='/login')\ndef orderproduct(request):\n category = Category.objects.all()\n current_user=request.user\n shopcart=ShopCart.objects.filter(user_id=current_user.id)\n total=0\n for rs in shopcart:\n total += rs.product.fiyat * rs.quantity\n\n if request.method =='POST':\n form = OrderForm(request.POST)\n\n if form.is_valid():\n data = Order()\n data.first_name=form.cleaned_data['first_name']\n data.last_name = form.cleaned_data['last_name']\n data.address=form.cleaned_data['address']\n data.city=form.cleaned_data['city']\n data.phone=form.cleaned_data['phone']\n data.user_id=current_user.id\n data.total=total\n data.ip=request.META.get('REMOTE_ADDR')\n ordercode=get_random_string(5).upper()\n data.code=ordercode\n data.save()\n\n #move shopcrt items for order products items\n shopcart=ShopCart.objects.filter(user_id=current_user.id)\n for rs in shopcart: #schopcart\n detail=OrderProduct()\n detail.order_id =data.id\n detail.product_id =rs.product_id\n detail.user_id =current_user.id\n detail.quantity =rs.quantity\n detail.fiyat =rs.product.fiyat\n detail.stok_durum =rs.stok_durum\n detail.save()\n\n # ***************< Reduce >***************#\n product = Kitap.objects.get(id=rs.product_id)\n product.stok_durum -= rs.quantity\n product.save()\n #***************<^^^^^^^^^^>***************#\n\n ShopCart.objects.filter(user_id=current_user.id).delete()\n request.session['cart_items']=0\n messages.success(request,\"Your order has been completed , thank you!\" )\n return render(request,\"Order_Complated.html\",{'ordercode':ordercode,'category':category})\n else:\n messages.warning(request,form.errors)\n return HttpResponseRedirect(\"/order/orderproduct\")\n\n\n\n form = OrderForm()\n profile = UserProfile.objects.get(user_id=current_user.id)\n context = {'shopcart':shopcart,# hoca Schopcart yazmis\n 'category': category,\n 'total': total,\n 'profile': profile,\n 'form':form,\n }\n\n return render(request,'Order_Form.html',context)","repo_name":"maga62/DjangoKitap","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"13888305755","text":"import os\nfrom datetime import datetime\n\nimport humanize\n\n\ndef file_properties(path):\n \"\"\"Return the name, modification time and size of a file.\n\n args:\n path (str or Path): path to file of interest\n\n returns:\n (dict): file properties with keys \"name\", \"mtime\" and \"file_size\"\n \"\"\"\n stats = os.stat(path)\n return dict(\n name=path.name,\n mtime=humanize.naturaltime(datetime.fromtimestamp(stats.st_mtime)),\n file_size=humanize.naturalsize(stats.st_size),\n )\n","repo_name":"ImperialCollegeLondon/champ","sub_path":"main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"8637102796","text":"from UCB1_Learner import *\n\n\nclass SWUCB1_Learner(UCB1_Learner):\n\n def __init__(self, n_arms, margins, window_size, classes=[]):\n \"\"\"\n Initialization of SWUCB1\n :param n_arms: number of arms\n :param margins: margins vector\n :param window_size: the size of the sliding window\n :param classes: classes that are learned\n :self.sample_timestamp = for each arm: array of timestamps when the arm was pulled\n \"\"\"\n super().__init__(n_arms, margins)\n self.window_size = window_size\n self.sample_timestamp = [[] for _ in range(n_arms)]\n self.classes = classes\n # print(self.window_size)\n\n def pull_arm(self):\n \"\"\"\n Selection of the arm:\n We select the index of the arm with the maximum bound\n (in case of ties we choose randomly)\n :return: the pulled arm\n \"\"\"\n # if self.t < self.n_arms:\n # return self.t\n\n # for i in range(0, self.n_arms):\n # if len(self.samples_per_arm[i]) == 0:\n # return i\n\n margin_bounds = self.bounds * self.margins\n idxs = np.argwhere(margin_bounds == margin_bounds.max()).reshape(-1)\n pulled_arm = np.random.choice(idxs)\n return pulled_arm\n\n def update2(self, pulled_arm, reward):\n \"\"\"\n Function that update the parameters of the pulled arm\n :param pulled_arm: the selected arm\n :param reward: the associated reward\n \"\"\"\n self.update_observations(pulled_arm, reward)\n\n # if self.t < self.n_arms:\n # self.bounds[pulled_arm] = 0\n # else:\n # n_rounds_arm = len(self.samples_per_arm[pulled_arm][-self.window_size:])\n # windowed_mean = np.mean(self.samples_per_arm[pulled_arm][-self.window_size:])\n # self.bounds[pulled_arm] = windowed_mean + np.sqrt(2 * np.log(self.t + 1) / n_rounds_arm)\n\n n_rounds_arm = len(self.samples_per_arm[pulled_arm][-self.window_size:])\n windowed_mean = np.mean(self.samples_per_arm[pulled_arm][-self.window_size:])\n\n self.bounds[pulled_arm] = windowed_mean + np.sqrt(2 * np.log(self.t + 1) / n_rounds_arm)\n\n self.t += 1\n\n def update(self, pulled_arm, reward):\n \"\"\"\n Update of the bound of the selected arm\n :param pulled_arm: the selected arm\n :param reward: the reward\n \"\"\"\n\n # if self.t > self.window_size:\n # print(\"ok\")\n\n # 1. Move the window (discard old samples for all the arms)\n self.sample_timestamp[pulled_arm].append(self.t)\n for i in range(0, self.n_arms):\n if len(self.sample_timestamp[i]) > 0:\n while self.sample_timestamp[i][0] < (self.t - self.window_size):\n self.sample_timestamp[i] = self.sample_timestamp[i][1:]\n self.samples_per_arm[i] = self.samples_per_arm[i][1:]\n # print(i, self.sample_timestamp[i])\n # print(i, self.samples_per_arm[i])\n if len(self.sample_timestamp[i]) == 0:\n break\n\n # 2. Update the confidence bounds of all the arms since the window has been moved\n self.update_observations(pulled_arm, reward)\n\n for i in range(0, self.n_arms):\n if len(self.sample_timestamp[i]) == 0:\n self.bounds[i] = 1000\n else:\n n_rounds_arm = len(self.samples_per_arm[i])\n windowed_mean = np.mean(self.samples_per_arm[i])\n self.bounds[i] = windowed_mean+np.sqrt(2*np.log(self.t+1)/n_rounds_arm)\n\n self.t += 1\n","repo_name":"lucabonali/PricingDia","sub_path":"SWUCB1_Learner.py","file_name":"SWUCB1_Learner.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39554017923","text":"import json\nimport datetime\nfrom collections import Iterable\nfrom logging.config import dictConfig\nfrom bson import ObjectId\nfrom flask import Flask, request, jsonify\nfrom werkzeug.exceptions import HTTPException\nfrom flask_log_request_id import RequestID\nfrom eclogue.config import config\nfrom eclogue.middleware import Middleware\nfrom eclogue.api import router_v1\nfrom eclogue.api.routes import routes\nfrom eclogue.scheduler import scheduler\nfrom eclogue.lib.logger import get_logger\nfrom eclogue.model import Model\nfrom flask_socketio import SocketIO\n\nsocketio = SocketIO(async_mode='gevent')\n\n\ndef json_parser(o):\n if hasattr(o, '__dict__'):\n return o.__dict__()\n # if hasattr(o, 'to_dict')\n if isinstance(o, Model):\n return o.__dict__()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, datetime.datetime):\n return str(o)\n if isinstance(o, bytes):\n return o.decode('utf8')\n if isinstance(o, Iterable):\n return list(o)\n # return list(map(json_parser, o))\n\nclass JSONEncoder(json.JSONEncoder):\n # extend json-encoder class\n def default(self, o):\n res = json_parser(o)\n return res if res else json.JSONEncoder.default(self, o)\n\n\ndef create_app(schedule=True):\n dictConfig(config.logging)\n # root_path = os.path.join(config.home_path, 'public')\n root_path = config.home_path\n\n app = Flask(__name__, root_path=root_path, static_folder='public', static_url_path='')\n app.json_encoder = JSONEncoder\n app.config.from_object(config)\n app.config['LOG_REQUEST_ID_LOG_ALL_REQUESTS'] = True\n RequestID(app=app)\n Middleware(app)\n bp = router_v1(routes)\n app.register_blueprint(bp)\n # app.register_blueprint(static())\n if schedule:\n scheduler.start()\n socketio.init_app(app=app, cors_allowed_origins='*')\n import eclogue.socket\n # api.add_resource(Menus, '/menus')\n\n @app.route('/', methods=['get'])\n def index():\n return app.send_static_file('index.html')\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'message': 'not found',\n 'code': 404,\n 'error': str(error)\n }), 404\n\n @app.errorhandler(500)\n def server_error(error):\n\n return jsonify({\n 'message': 'server error',\n 'code': 500,\n 'error': str(error)\n }), 500\n\n @app.errorhandler(405)\n def metho_not_allow(error):\n return jsonify({\n 'message': 'method not allow',\n 'code': 405,\n 'error': str(error)\n }), 405\n\n @app.errorhandler(HTTPException)\n def handle_exception(e):\n \"\"\"Return JSON instead of HTML for HTTP errors.\"\"\"\n # start with the correct headers and status code from the error\n # replace the body with JSON\n log_info = {\n \"code\": e.code,\n \"title\": e.name,\n \"description\": e.description,\n }\n get_logger().error('api server error %s' % e.description, extra=log_info)\n\n return jsonify({\n 'code': 500,\n 'message': 'api server error'\n })\n\n return app\n\n\n__version__ = '0.0.1'\n","repo_name":"eclogue/eclogue","sub_path":"eclogue/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"70563766702","text":"from dependency_injector import containers, providers\nfrom dependency_injector.ext import aiohttp as ext_aiohttp\n\nfrom app.game.controllers import (\n get_last_session_info,\n start_session,\n complete_stage,\n close_session_due_to_failure,\n cancel_session,\n get_leaderboard,\n compare_images\n)\nfrom app.game.repositories import PlayerRepository\nfrom app.game.services import ImageComparator\nfrom app.game.transformer import PlayerTransformer, LeaderboardTransformer\n\n\nclass GamePackageContainer(containers.DeclarativeContainer):\n\n mappers = providers.DependenciesContainer()\n\n config = providers.Configuration()\n\n application_utils = providers.DependenciesContainer()\n\n # services\n\n image_comparator = providers.Factory(\n ImageComparator,\n deepai_api_url=config.deepai.api_url,\n deepai_api_key=config.deepai.api_key,\n logger=application_utils.logger\n )\n\n # transformers\n\n player_transformer = providers.Singleton(PlayerTransformer)\n\n leaderboard_transformer = providers.Singleton(\n LeaderboardTransformer,\n player_transformer=player_transformer\n )\n\n # repositories\n\n player_repository = providers.Singleton(\n PlayerRepository,\n user_mapper=mappers.user_mapper,\n session_mapper=mappers.session_mapper\n )\n\n # controllers\n\n get_last_session_info = ext_aiohttp.View(\n get_last_session_info,\n player_repository=player_repository,\n player_transformer=player_transformer,\n )\n\n start_session = ext_aiohttp.View(\n start_session,\n player_repository=player_repository,\n player_transformer=player_transformer,\n )\n\n complete_stage = ext_aiohttp.View(\n complete_stage,\n player_repository=player_repository,\n player_transformer=player_transformer,\n )\n\n close_session_due_to_failure = ext_aiohttp.View(\n close_session_due_to_failure,\n player_repository=player_repository,\n player_transformer=player_transformer,\n )\n\n cancel_session = ext_aiohttp.View(\n cancel_session,\n player_repository=player_repository,\n player_transformer=player_transformer,\n )\n\n get_leaderboard = ext_aiohttp.View(\n get_leaderboard,\n player_repository=player_repository,\n leaderboard_transformer=leaderboard_transformer,\n )\n\n compare_images = ext_aiohttp.View(\n compare_images,\n image_comparator=image_comparator\n )\n","repo_name":"BogdanGrebenuk/brain-game-back","sub_path":"app/game/containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"91"} +{"seq_id":"21630717390","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom scipy import stats\r\nfrom scipy.stats import randint\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import preprocessing\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.preprocessing import binarize, LabelEncoder, MinMaxScaler\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nfrom mlxtend.classifier import StackingClassifier\r\nimport tensorflow as tf\r\nimport argparse\r\n\r\nsurvey_data=pd.read_csv('survey.csv')\r\n# removing unwanted features\r\nsurvey_data.drop(['comments','state','Country'],axis=1,inplace=True)\r\nmissing_data1=survey_data.isnull().sum()\r\n#Now that we have removed unwanted features, lets check for NaN values and replace them\r\ninteger = 0\r\nString = 'NaN'\r\n# Create lists by data type\r\nintegerFeatures = ['Age']\r\nstringFeatures = ['Gender', 'self_employed', 'work_interfere']\r\n#Find if there are any outliers\r\nfor feature in survey_data:\r\n if feature in integerFeatures:\r\n survey_data[feature] = survey_data[feature].fillna(integer)\r\n elif feature in stringFeatures:\r\n survey_data[feature] = survey_data[feature].fillna(String)\r\nfor i in survey_data['Age']:\r\n if i>100 or i<0:\r\n survey_data['Age']=survey_data['Age'].replace(i,0)\r\n# Clean the NaN's\r\nmean_age=np.mean(survey_data['Age'])\r\nsurvey_data['Age']=survey_data['Age'].replace(0,mean_age).round()\r\ngender=survey_data['Gender'].unique()\r\n# As gender is in all different kinds we make sure that only 3 gender available male,female,trans\r\nmale = [\"male\", \"m\", \"male-ish\", \"maile\", \"mal\", \"male (cis)\", \"make\", \"male \", \"man\",\"msle\", \"mail\", \"malr\",\"cis man\", \"Cis Male\", \"cis male\"]\r\ntrans = [\"trans-female\", \"something kinda male?\", \"queer/she/they\", \"non-binary\",\"nah\", \"all\", \"enby\", \"fluid\", \"genderqueer\", \"androgyne\", \"agender\", \"male leaning androgynous\", \"guy (-ish) ^_^\", \"trans woman\", \"neuter\", \"female (trans)\", \"queer\", \"ostensibly male, unsure what that really means\"]\r\nfemale = [\"cis female\", \"f\", \"female\", \"woman\", \"femake\", \"female \",\"cis-female/femme\", \"female (cis)\", \"femail\"]\r\nfor (row, col) in survey_data.iterrows():\r\n\r\n if str.lower(col.Gender) in male:\r\n survey_data['Gender'].replace(to_replace=col.Gender, value='male', inplace=True)\r\n\r\n if str.lower(col.Gender) in female:\r\n survey_data['Gender'].replace(to_replace=col.Gender, value='female', inplace=True)\r\n\r\n if str.lower(col.Gender) in trans:\r\n survey_data['Gender'].replace(to_replace=col.Gender, value='trans', inplace=True)\r\n\r\n#Get rid of unknown\r\nunknown = ['A little about you', 'p']\r\nsurvey_data = survey_data[~survey_data['Gender'].isin(unknown)]\r\n\r\nsurvey_data['self_employed'] = survey_data['self_employed'].replace(String, 'No')\r\nsurvey_data['work_interfere'] = survey_data['work_interfere'].replace(String, 'Don\\'t know')\r\n# As we have replaced all NaN and null values we check if there are any other missing values\r\nmissing_data2=survey_data.isnull().sum()\r\n\r\n# We should convert the data into numbers to perform the analysis\r\n# Encoding the data\r\nlabels = {}\r\nfor feature in survey_data:\r\n encoder = preprocessing.LabelEncoder()\r\n encoder.fit(survey_data[feature])\r\n encoder_mapping = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))\r\n survey_data[feature] = encoder.transform(survey_data[feature])\r\n labelKey = 'label_' + feature\r\n labelValue = [*encoder_mapping]\r\n labels[labelKey] = labelValue\r\n# to print labels and thier encoded values\r\n#for key, value in labels.items():\r\n # print(key, value)\r\n#Covariance testing- Variability comparison between categories of variables\r\ncorr_matrix=survey_data.corr().round(2)\r\nf, ax = plt.subplots(figsize=(14, 10))\r\nsns.heatmap(corr_matrix,cmap=\"YlGnBu\", annot=True);\r\n\r\n# Distribution and No.of patients by Age\r\nsns.displot(survey_data[\"Age\"], bins=12)\r\nplt.title(\"Distribution and density by Age\")\r\nplt.xlabel(\"Age\")\r\nplt.ylabel(\"No.of Patients\")\r\n\r\n# Distribution with no.of patients and treatment\r\ng = sns.FacetGrid(survey_data, col='treatment', height=7)\r\ng.map(sns.distplot, \"Age\")\r\n\r\n#How many people has been treated?\r\nplt.figure(figsize=(12,8))\r\ng = sns.countplot(x=\"treatment\", data=survey_data)\r\nplt.title('Distribution whether treated or not')\r\n# Scaling Age by using MinMaxScalar\r\nscaler = MinMaxScaler()\r\nsurvey_data['Age'] = scaler.fit_transform(survey_data[['Age']])\r\n# Assigning the required data features for the model to train\r\nfeatures_needed = ['Age', 'Gender', 'family_history', 'benefits', 'care_options', 'anonymity', 'leave', 'work_interfere']\r\nfeatures = survey_data[features_needed]\r\ntarget = survey_data['treatment']\r\n\r\n# split X and y into training and testing sets\r\nbatch_size = 100\r\ntrain_steps = 10000\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, random_state=0)\r\n\r\ndef train_input_fn(features, labels, batch_size):\r\n \"\"\"An input function for training\"\"\"\r\n # Convert the inputs to a Dataset.\r\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\r\n\r\n # Shuffle, repeat, and batch the examples.\r\n return dataset.shuffle(100).repeat().batch(batch_size)\r\n\r\ndef eval_input_fn(features, labels, batch_size):\r\n features=dict(features)\r\n if labels is None:\r\n # No labels, use only features.\r\n inputs = features\r\n else:\r\n inputs = (features, labels)\r\n\r\n # Convert the inputs to a Dataset.\r\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\r\n\r\n # Batch the examples\r\n assert batch_size is not None, \"batch_size must not be None\"\r\n dataset = dataset.batch(batch_size)\r\n\r\n # Return the dataset.\r\n return dataset\r\n\r\n# Define Tensorflow feature columns\r\nage = tf.feature_column.numeric_column(\"Age\")\r\ngender = tf.feature_column.numeric_column(\"Gender\")\r\nfamily_history = tf.feature_column.numeric_column(\"family_history\")\r\nbenefits = tf.feature_column.numeric_column(\"benefits\")\r\ncare_options = tf.feature_column.numeric_column(\"care_options\")\r\nanonymity = tf.feature_column.numeric_column(\"anonymity\")\r\nleave = tf.feature_column.numeric_column(\"leave\")\r\nwork_interfere = tf.feature_column.numeric_column(\"work_interfere\")\r\nfeature_columns = [age, gender, family_history, benefits, care_options, anonymity, leave, work_interfere]\r\n\r\n# Build a DNN with 2 hidden layers and 10 nodes in each hidden layer.\r\nmodel = tf.estimator.DNNClassifier(feature_columns=feature_columns,hidden_units=[7,10],optimizer=tf.optimizers.Adam,activation_fn=tf.nn.relu)\r\n\r\nmodel.train(input_fn=lambda:train_input_fn(X_train, y_train, batch_size), steps=train_steps)\r\n\r\n# Evaluate the model.\r\neval_result = model.evaluate(input_fn=lambda:eval_input_fn(X_test, y_test, batch_size))\r\n\r\nprint('\\nTest set accuracy: {accuracy:0.2f}\\n'.format(**eval_result))\r\nmethodDict={}\r\n\r\n#Data for final graph\r\naccuracy = eval_result['accuracy'] * 100\r\nmethodDict['Neural Network'] = accuracy\r\npredictions = list(model.predict(input_fn=lambda:eval_input_fn(X_train, y_train, batch_size=batch_size)))\r\n\r\n# Generate predictions from the model\r\ntemplate = ('\\nIndex: \"{}\", Prediction is \"{}\" ({:.1f}%), expected \"{}\"')\r\n\r\n# Dictionary for predictions\r\ncol1 = []\r\ncol2 = []\r\ncol3 = []\r\n\r\nfor idx, input, p in zip(X_train.index, y_train, predictions):\r\n v = p[\"class_ids\"][0]\r\n # Adding to dataframe\r\n col1.append(idx) # Index\r\n col2.append(input)\r\n col3.append(v) # Prediction\r\nresults = pd.DataFrame({'index': col1, 'expected':col2,'prediction': col3})\r\nprint(results.head(10))","repo_name":"sashank5/Prediction-of-Schizophrenia","sub_path":"Schizophrenia.py","file_name":"Schizophrenia.py","file_ext":"py","file_size_in_byte":7684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72175834224","text":"#!/usr/bin/env python3\nimport numpy as np\nimport math as m\nfrom foundations_assignment.srv import GetJointVelocities\nimport rospy\nfrom rospy.rostime import switch_to_wallclock\nfrom rospy.service import ServiceException\nfrom std_msgs.msg import Float64\nfrom controller_manager_msgs.srv import SwitchController\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import Pose, Twist\n\nq1Pos = -1.27\nq2Pos = -0.3\nq3Pos = 0.0\n\n# q1Pos = 1.57\n# q2Pos = -2.8973\n# q3Pos = 0.0\n\np_q1 = 10.0\np_q2 = 10.0\np_q3 = 10.0\n\nd_q1 = 1.0\nd_q2 = 1.0\nd_q3 = 1.0\n\nflag = False\nrefFlag = False\njoint1_velocity_publisher = []\njoint2_velocity_publisher = []\njoint3_velocity_publisher = []\nrefPose = Pose()\ncurPose = Pose()\ndef control(msg):\n global q1Cur, q2Cur, q3Cur\n q1Cur = msg.velocity[0]\n q2Cur = msg.velocity[1]\n q3Cur = msg.velocity[2] \n# def getEndEffectorPose(msg):\n# global refPose, curPose, refFlag\n# curPose = msg\n# if refFlag:\n# refPose = curPose\n# refFlag = False\n\nif __name__=='__main__':\n try:\n rospy.init_node('velocity_controller_node')\n rate = rospy.Rate(100)\n joint1_position_publisher = rospy.Publisher('/robot/joint1_position_controller/command', Float64, queue_size=1)\n joint2_position_publisher = rospy.Publisher('/robot/joint2_position_controller/command', Float64, queue_size=1)\n joint3_position_publisher = rospy.Publisher('/robot/joint3_position_controller/command', Float64, queue_size=1)\n\n joint1_velocity_publisher = rospy.Publisher('/robot/joint1_velocity_controller/command', Float64, queue_size=1)\n joint2_velocity_publisher = rospy.Publisher('/robot/joint2_velocity_controller/command', Float64, queue_size=1)\n joint3_velocity_publisher = rospy.Publisher('/robot/joint3_velocity_controller/command', Float64, queue_size=1)\n\n\n rospy.Subscriber('/robot/joint_states', JointState, control)\n # rospy.Subscriber('/robot/pose', Pose, getEndEffectorPose)\n rospy.sleep(1)\n\n # move away from singularity\n joint1_position_publisher.publish(q1Pos)\n joint2_position_publisher.publish(q2Pos)\n joint3_position_publisher.publish(q3Pos)\n rospy.sleep(5)\n # refFlag = True\n\n rospy.wait_for_service('/robot/controller_manager/switch_controller')\n try:\n switch_service = rospy.ServiceProxy('/robot/controller_manager/switch_controller', SwitchController)\n start_controllers = ['joint1_velocity_controller', 'joint2_velocity_controller', 'joint3_velocity_controller']\n stop_controllers = ['joint1_position_controller', 'joint2_position_controller', 'joint3_position_controller']\n strictness = 2\n start_asap = False\n timeout = 0.0\n res = switch_service(start_controllers, stop_controllers, strictness, start_asap, timeout)\n print(\"Switched Controllers\")\n except rospy.ServiceException as e:\n print(\"Switch Service Call Failed\")\n print(e.what())\n flag = True\n twist = Twist()\n twist.linear.x = 0\n twist.linear.y = 1\n twist.linear.z = 0\n twist.angular.x = 0\n twist.angular.y = 0\n twist.angular.z = 0\n rospy.wait_for_service('/robot/get_joint_velocities_service')\n try:\n while True:\n f = open(\"src/foundations_assignment/velocity_plot.csv\", \"a\")\n get_joint_velocities_service = rospy.ServiceProxy('/robot/get_joint_velocities_service', GetJointVelocities)\n print(\"Twist: \", twist)\n q = get_joint_velocities_service(twist)\n print(\"Q = \", q)\n joint1_velocity_publisher.publish(q.q[0].data)\n joint2_velocity_publisher.publish(q.q[1].data)\n joint3_velocity_publisher.publish(q.q[2].data)\n val = \"\"+str(q.q[0].data)+\",\"+str(q.q[1].data)+\",\"+str(q.q[2].data)+\",\"+str(q1Cur)+\",\"+str(q2Cur)+\",\"+str(q3Cur)+\"\\n\"\n f.write(val)\n f.close()\n rospy.sleep(0.1)\n except rospy.ServiceException as e:\n print(\"Service Call Failed\")\n print(e.what())\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n # forward_kinematics(0.9, 1.57, 1.57)","repo_name":"chinmaytodankar/foundations_assignment","sub_path":"src/velocity_controller.py","file_name":"velocity_controller.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13456958800","text":"from rest_framework import serializers\nfrom .models import RawRefresco, RawRefrescoIntegrity, TimestampTransaction\nfrom django.core.validators import RegexValidator\n\n\nclass TimestampTransactionSerializer(serializers.ModelSerializer):\n\n txid = serializers.CharField(\n max_length=64,\n validators=[\n RegexValidator(\n regex='^.{64}$',\n message='Incorrect txid length, must be 64',\n code='txid64')])\n sender_raddress = serializers.CharField(\n max_length=34,\n validators=[\n RegexValidator(\n regex='^.{34}$',\n message='Incorrect raddress length, must be 34',\n code='raddress34')])\n\n class Meta:\n model = TimestampTransaction\n fields = ('id', 'sender_raddress', 'sender_name', 'tsintegrity', 'txid')\n\n\nclass RawRefrescoIntegritySerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(read_only=True)\n tx_list = TimestampTransactionSerializer(many=True, read_only=True)\n\n class Meta:\n model = RawRefrescoIntegrity\n fields = ('id', 'integrity_address', 'integrity_pre_tx', 'integrity_post_tx', 'batch', 'tx_list', 'batch_lot_raddress')\n\n\nclass RawRefrescoSerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(read_only=True)\n integrity_details = RawRefrescoIntegritySerializer(read_only=True)\n\n class Meta:\n model = RawRefresco\n fields = ('id', 'anfp', 'dfp', 'bnfp', 'pds', 'pde', 'jds', 'jde', 'bbd', 'pc', 'pl', 'rmn', 'pon', 'pop', 'mass', 'raw_json', 'integrity_details')\n","repo_name":"Open-Food-Chain/api-import","sub_path":"src/raw_refresco/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8900940685","text":"# By submitting this assignment, I agree to the following:\r\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\r\n# \"I have not given or received any unauthorized aid on this assignment.\"\r\n#\r\n# Name: Caleb Lewis\r\n# Section: 521\r\n# Assignment: Lab 2b\r\n# Date: 10 04 2021\r\n#\r\n\r\n# Prompt the user for a stress value\r\nx0 = float(input(\"Enter the strain: \"))\r\n# If the value is not in the range of the graph, print an error message and end the program, otherwise continue\r\nif(x0<0 or x0>.27):\r\n print(\"Strain is undefined in that region\")\r\n# Find the two nearest points on the graph to the left and right of the user’s input using a series of if-else statements\r\n# Set x1, y1, x2, and y2 to the values of those points\r\nelse:\r\n if(x0<=.01):\r\n x1 = 0\r\n y1 = 0\r\n x2 = .01\r\n y2 = 43\r\n elif(x0<=.06):\r\n x1 = .01\r\n y1 = 43\r\n x2 = .06\r\n y2 = 43.5\r\n elif(x0<=.18):\r\n x1 = .06\r\n y1 = 43.5\r\n x2 = .18\r\n y2 = 60\r\n else:\r\n x1 = .18\r\n y1 = 60\r\n x2 = .27\r\n y2 = 51\r\n# Plug the values into the linear interpolation equation\r\n y0 = ((y2-y1)/(x2-x1))*(x0-x1)+y1\r\n# Print y0\r\n print(\"The stress is approximately {:.1f}\".format(y0))\r\n\r\n\r\n\r\n\r\n","repo_name":"CalebLewis0917/Lab-5b","sub_path":"Lab5b_Act2.py","file_name":"Lab5b_Act2.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"75051495023","text":"from odoo import fields, models, api, _\nfrom odoo.exceptions import UserError, ValidationError\nfrom datetime import datetime, timedelta\n\nclass BmgAppointment(models.Model):\n _name= 'bmg.appointment'\n _inherit = ['mail.thread', 'mail.activity.mixin', 'portal.mixin']\n _description = \"Appointment\"\n _order = 'id desc'\n\n appointment_sequence = fields.Char(string='Appointment Sequence')\n appointment_name = fields.Char(string='Appointment Name')\n date_appointment = fields.Date(string='Date Appointment')\n time_appointment = fields.Float(string='Time Appointment')\n duration_appoinment =fields.Integer(string='Duration Appoinment')\n private_appointment = fields.Boolean(string='Private Appointment')\n appointment_type = fields.Selection([('person','Person'),('umkm','UMKM'),('small company','Small Company'),('big company','Big Company'),('other','Other')],string='Appointment Type')\n company_name = fields.Char(string='Company Name')\n company_contacts = fields.Many2one('res.partner',string='Company Contacts')\n company_phone = fields.Char(string='Company Phone')\n company_email = fields.Char(string='Company Email')\n person_name = fields.Char(string='Person Name')\n person_contacts = fields.Many2one('res.partner',string='Person Contacts')\n person_phone = fields.Char(string='Person Phone')\n person_email = fields.Char(string='Person Email')\n employee_in_charge = fields.Many2one('hr.employee',string='Employee in Charge to Consult')\n state = fields.Selection([('new','New'),('ongoing','Ongoing'),('done','Done'),('cancel','Cancel')],default='new',string='Status')\n client_ids = fields.One2many('res.partner','client_id')\n descriptions = fields.Text('Description')\n\n @api.constrains('date_appointment')\n def check_date_appointment(self):\n date_appointment = datetime.strftime()\n days_name = date.today().strftime(\"%A\")\n print('days_name constrain..',days_name)\n if days_name in ('Saturday','Sunday'):\n raise UserError(_('Cannot fill weekend')) \n \n\n @api.model\n def create(self, vals):\n if vals.get('appointment_sequence', 'New') == 'New':\n vals['appointment_sequence'] = self.env['ir.sequence'].next_by_code('bmg.appointment.seq') or '/'\n return super(BmgAppointment, self).create(vals)\n\nclass ResParnter(models.Model):\n _inherit = 'res.partner'\n\n client_id = fields.Many2one('bmg.appointment',\"Appointment\")","repo_name":"tubagusuhendra/bmg","sub_path":"bmg_appointment/models/bmg_appointment.py","file_name":"bmg_appointment.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"27910772648","text":"#メルカリのデータをスクレイピング\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.common.exceptions import StaleElementReferenceException\r\nfrom selenium.common.exceptions import ElementNotVisibleException\r\nfrom selenium.common.exceptions import WebDriverException\r\nimport re\r\nimport time\r\nimport datetime\r\n#import requests\r\nimport xlwt\r\nimport os\r\nimport urllib.request\r\nfrom openpyxl import Workbook, load_workbook\r\n\r\n# Excelのシート作成\r\n#book = xlwt.Workbook()\r\n#sheet1=book.add_sheet('sheet1')\r\nwb = Workbook()\r\nbook = wb['Sheet']\r\n\r\nbook['b1'].value = 'タイトル'\r\nbook['c1'].value = '価格'\r\nbook['d1'].value = '詳細'\r\nbook['e1'].value = '商品の状態'\r\nbook['f1'].value = '画像1'\r\nbook['g1'].value = '画像2'\r\nbook['h1'].value = '画像3'\r\nbook['i1'].value = '画像4'\r\n\r\n# chroを定義\r\ntry:\r\n chro = webdriver.Chrome()\r\nexcept:\r\n chro = webdriver.Chrome('C:/selenium/chromedriver')\r\n\r\ndef searchLink(linkString):\r\n elem_last_btn=chro.find_element_by_link_text(linkString)\r\n elem_last_btn.click()\r\n time.sleep(1)\r\n\r\ndef xpath_click(xpath):\r\n input = chro.find_element_by_xpath(xpath)\r\n input.click()\r\n time.sleep(1)\r\n\r\n#メルカリでログイン\r\nchro.get('https://www.mercari.com/jp/')\r\n\r\nchro.find_element_by_link_text('ログイン').click()\r\ntime.sleep(1)\r\n\r\ninput = chro.find_element_by_xpath('/html/body/div[1]/main/div/form/div/div[1]/input')\r\ntime.sleep(1)\r\n\r\ninput.send_keys('XXXXX@yahoo.co.jp')\r\ntime.sleep(1)\r\n\r\ninput = chro.find_element_by_xpath('/html/body/div[1]/main/div/form/div/div[2]/input')\r\n\r\ntime.sleep(1)\r\n\r\ninput.send_keys('XXXXX')\r\n\r\ntime.sleep(60)# 私はロボットではありませんを回避する\r\n\r\ninput = chro.find_element_by_xpath('/html/body/div[1]/main/div/form/div/button')\r\ninput.click()\r\ntime.sleep(1)\r\n\r\n\r\n\"\"\"page_dic = {}\"\"\"\r\ntry:\r\n os.mkdir('C:/Users/Fukasawa-gu/Desktop/merukari_data')\r\nexcept:\r\n None\r\n\r\npage = 1\r\ntry:\r\n k = 0\r\n while(True):\r\n if page == 1:\r\n url = 'https://www.mercari.com/jp/mypage/listings/listing/'\r\n else:\r\n url= chro.current_url\r\n print(page, url)\r\n for i in range(1,51):\r\n chro.get(url)\r\n time.sleep(1)\r\n \"\"\"if i == 51:\r\n print(i)\r\n break\"\"\"\r\n\r\n open = chro.find_element_by_xpath('//*[@id=\"mypage-tab-transaction-now\"]/li[{}]/a/div/div[2]/div'.format(i)).get_attribute('innerHTML')\r\n time.sleep(1)\r\n print(open)\r\n if '公開停止中' in open:\r\n print(open + 'なので飛ばします。')\r\n continue\r\n\r\n title = chro.find_element_by_xpath('//*[@id=\"mypage-tab-transaction-now\"]/li[{}]/a/div/div[1]'.format(i)).get_attribute('innerHTML')\r\n time.sleep(1)\r\n print(title)\r\n # 指定の文字がタイトルに含まれていた時は以下のコメントアウトを外す\r\n \"\"\"s = ''\r\n if s in title:\r\n print(s + 'が含まれているので飛ばします。')\r\n continue\"\"\"\r\n\r\n # 商品ページに移動\r\n xpath_click('//*[@id=\"mypage-tab-transaction-now\"]/li[{}]/a/div/div[2]/div'.format(i))\r\n\r\n # タイトル、詳細、値段、発送元、商品の状態\r\n title = chro.find_element_by_xpath('/html/body/div/main/div[1]/section/h2').get_attribute('innerHTML')\r\n description = chro.find_element_by_xpath('/html/body/div/main/div[1]/section/div[3]').get_attribute('innerHTML')\r\n price = chro.find_element_by_xpath('/html/body/div/main/div[1]/section/div[2]/span[1]').get_attribute('innerHTML')\r\n price = price.replace(',','')\r\n price = price.replace('¥','')\r\n price = price.replace(' ','')\r\n condition = chro.find_element_by_xpath('/html/body/div/main/div[1]/section/div[1]/table/tbody/tr[4]/td').get_attribute('innerHTML')\r\n\r\n print('-----------------------------------')\r\n print(chro.current_url)\r\n print('-----------------------------------')\r\n print(title)\r\n print('-----------------------------------')\r\n print(description)\r\n print('-----------------------------------')\r\n print(price)\r\n print('-----------------------------------')\r\n print(condition)\r\n\r\n gazos = {}\r\n filenames = []\r\n for j in range(1,5):\r\n try:\r\n xpath_click('/html/body/div/main/div[1]/section/div[1]/div/div/div[3]/div[%s]'%j)\r\n gazos[j] = chro.find_element_by_xpath('/html/body/div/main/div[1]/section/div[1]/div/div/div[1]/div/div[%s]/div/img'%j).get_attribute('src')\r\n print('gazos[j]',gazos[j])\r\n #res = requests.get(gazos[j])\r\n print('open',page,i,j)\r\n path = 'C:/Users/Fukasawa-gu/Desktop/merukari_data/img{0}_{1}_{2}.jpg'.format(page,i,j)\r\n filenames.append('img{0}_{1}_{2}.jpg'.format(page,i,j))\r\n print(path)\r\n urllib.request.urlretrieve(gazos[j], path)\r\n\r\n except:\r\n print('画像が4枚未満')\r\n pass\r\n\r\n print(gazos)\r\n\r\n book = wb['Sheet']\r\n book['b'+str(i+k+1)].value = title\r\n book['c'+str(i+k+1)].value = price\r\n book['d'+str(i+k+1)].value = description\r\n book['e'+str(i+k+1)].value = condition\r\n try:\r\n book['f'+str(i+k+1)].value = filenames[0]\r\n book['g'+str(i+k+1)].value = filenames[1]\r\n book['h'+str(i+k+1)].value = filenames[2]\r\n book['i'+str(i+k+1)].value = filenames[3]\r\n except:\r\n pass\r\n wb.save('C:/Users/Fukasawa-gu/Desktop/merukari_data/merukari_data.xlsx')\r\n\r\n chro.back()\r\n time.sleep(1)\r\n k += 50\r\n page += 1\r\n xpath_click('/html/body/div/main/div[1]/ul/li[2]/a')\r\n\r\nexcept Exception as e:\r\n print(e)\r\n print('最終ページまで行きつきました。')\r\n\r\nprint('end')\r\n","repo_name":"Fukasawa-gu/practice","sub_path":"merukari_get_data.py","file_name":"merukari_get_data.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25003170389","text":"# two pointers双指针算法\n# 给你一个递增排序的整数阵列nums,return每个数字的平方并组成新的整数阵列,并且新的阵列也须依照递增排序\n# 譬如:nums = [-4,-1,0,3,10] 输出:[0,1,9,16,100],因平方后,整数变为[16,1,0,9,100],排序后变为[0,1,9,16,100]\n\nclass Solution:\n def sortedSquares(self, nums:int) -> int:\n # n=5\n n = len(nums)\n # i=0, j=4, k=4\n i,j,k = 0, n-1, n-1\n # 宣告ans阵列=[-1, -1, -1, -1, -1]\n ans = [-1] * n\n\n while i <= j:\n # 整数阵列的最左边\n lm = nums[i] ** 2\n # 整数阵列的最右边\n rm = nums[j] ** 2\n \n if lm > rm:\n # 如果最左边大于最右边,就把最左边的数,移到ans阵列最右边\n ans[k] = lm\n i += 1\n else:\n # 如果最左边没有大于最右边,就把最右边的数,移到ans阵列最右边\n ans[k] = rm\n j -= 1\n # 当最右边的数放好后,ans的k指标-1\n k -= 1\n return ans\n\nnums = [-4,-1,0,3,10]\nAns = Solution()\nprint(\"The new array is %s\" %(Ans.sortedSquares(nums)))\n# The new array is [0, 1, 9, 16, 100]","repo_name":"blive0321/python","sub_path":"algorithm/two_pointers.py","file_name":"two_pointers.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24050477462","text":"\"\"\"\nProcedures for fitting marginal regression models to dependent data\nusing Generalized Estimating Equations.\n\nReferences\n----------\nKY Liang and S Zeger. \"Longitudinal data analysis using\ngeneralized linear models\". Biometrika (1986) 73 (1): 13-22.\n\nS Zeger and KY Liang. \"Longitudinal Data Analysis for Discrete and\nContinuous Outcomes\". Biometrics Vol. 42, No. 1 (Mar., 1986),\npp. 121-130\n\nA Rotnitzky and NP Jewell (1990). \"Hypothesis testing of regression\nparameters in semiparametric generalized linear models for cluster\ncorrelated data\", Biometrika, 77, 485-497.\n\nXu Guo and Wei Pan (2002). \"Small sample performance of the score\ntest in GEE\".\nhttp://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf\n\nLA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with\nimproved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.\n\"\"\"\nfrom statsmodels.compat.python import lzip\nfrom statsmodels.compat.pandas import Appender\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport patsy\nfrom collections import defaultdict\nfrom statsmodels.tools.decorators import cache_readonly\nimport statsmodels.base.model as base\n# used for wrapper:\nimport statsmodels.regression.linear_model as lm\nimport statsmodels.base.wrapper as wrap\n\nfrom statsmodels.genmod import families\nfrom statsmodels.genmod.generalized_linear_model import GLM, GLMResults\nfrom statsmodels.genmod import cov_struct as cov_structs\n\nimport statsmodels.genmod.families.varfuncs as varfuncs\nfrom statsmodels.genmod.families.links import Link\n\nfrom statsmodels.tools.sm_exceptions import (ConvergenceWarning,\n DomainWarning,\n IterationLimitWarning,\n ValueWarning)\nimport warnings\n\nfrom statsmodels.graphics._regressionplots_doc import (\n _plot_added_variable_doc,\n _plot_partial_residuals_doc,\n _plot_ceres_residuals_doc)\nfrom statsmodels.discrete.discrete_margins import (\n _get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,\n _check_at_is_all, _transform_names, _check_discrete_args,\n _get_dummy_index, _get_count_index)\n\n\nclass ParameterConstraint:\n \"\"\"\n A class for managing linear equality constraints for a parameter\n vector.\n \"\"\"\n\n def __init__(self, lhs, rhs, exog):\n \"\"\"\n Parameters\n ----------\n lhs : ndarray\n A q x p matrix which is the left hand side of the\n constraint lhs * param = rhs. The number of constraints is\n q >= 1 and p is the dimension of the parameter vector.\n rhs : ndarray\n A 1-dimensional vector of length q which is the right hand\n side of the constraint equation.\n exog : ndarray\n The n x p exognenous data for the full model.\n \"\"\"\n\n # In case a row or column vector is passed (patsy linear\n # constraints passes a column vector).\n rhs = np.atleast_1d(rhs.squeeze())\n\n if rhs.ndim > 1:\n raise ValueError(\"The right hand side of the constraint \"\n \"must be a vector.\")\n\n if len(rhs) != lhs.shape[0]:\n raise ValueError(\"The number of rows of the left hand \"\n \"side constraint matrix L must equal \"\n \"the length of the right hand side \"\n \"constraint vector R.\")\n\n self.lhs = lhs\n self.rhs = rhs\n\n # The columns of lhs0 are an orthogonal basis for the\n # orthogonal complement to row(lhs), the columns of lhs1 are\n # an orthogonal basis for row(lhs). The columns of lhsf =\n # [lhs0, lhs1] are mutually orthogonal.\n lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)\n self.lhs0 = lhs_u[:, len(lhs_s):]\n self.lhs1 = lhs_u[:, 0:len(lhs_s)]\n self.lhsf = np.hstack((self.lhs0, self.lhs1))\n\n # param0 is one solution to the underdetermined system\n # L * param = R.\n self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /\n lhs_s)\n\n self._offset_increment = np.dot(exog, self.param0)\n\n self.orig_exog = exog\n self.exog_fulltrans = np.dot(exog, self.lhsf)\n\n def offset_increment(self):\n \"\"\"\n Returns a vector that should be added to the offset vector to\n accommodate the constraint.\n\n Parameters\n ----------\n exog : array_like\n The exogeneous data for the model.\n \"\"\"\n\n return self._offset_increment\n\n def reduced_exog(self):\n \"\"\"\n Returns a linearly transformed exog matrix whose columns span\n the constrained model space.\n\n Parameters\n ----------\n exog : array_like\n The exogeneous data for the model.\n \"\"\"\n return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]\n\n def restore_exog(self):\n \"\"\"\n Returns the full exog matrix before it was reduced to\n satisfy the constraint.\n \"\"\"\n return self.orig_exog\n\n def unpack_param(self, params):\n \"\"\"\n Converts the parameter vector `params` from reduced to full\n coordinates.\n \"\"\"\n\n return self.param0 + np.dot(self.lhs0, params)\n\n def unpack_cov(self, bcov):\n \"\"\"\n Converts the covariance matrix `bcov` from reduced to full\n coordinates.\n \"\"\"\n\n return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))\n\n\n_gee_init_doc = \"\"\"\n Marginal regression model fit using Generalized Estimating Equations.\n\n GEE can be used to fit Generalized Linear Models (GLMs) when the\n data have a grouped structure, and the observations are possibly\n correlated within groups but not between groups.\n\n Parameters\n ----------\n endog : array_like\n 1d array of endogenous values (i.e. responses, outcomes,\n dependent variables, or 'Y' values).\n exog : array_like\n 2d array of exogeneous values (i.e. covariates, predictors,\n independent variables, regressors, or 'X' values). A `nobs x\n k` array where `nobs` is the number of observations and `k` is\n the number of regressors. An intercept is not included by\n default and should be added by the user. See\n `statsmodels.tools.add_constant`.\n groups : array_like\n A 1d array of length `nobs` containing the group labels.\n time : array_like\n A 2d array of time (or other index) values, used by some\n dependence structures to define similarity relationships among\n observations within a cluster.\n family : family class instance\n%(family_doc)s\n cov_struct : CovStruct class instance\n The default is Independence. To specify an exchangeable\n structure use cov_struct = Exchangeable(). See\n statsmodels.genmod.cov_struct.CovStruct for more\n information.\n offset : array_like\n An offset to be included in the fit. If provided, must be\n an array whose length is the number of rows in exog.\n dep_data : array_like\n Additional data passed to the dependence structure.\n constraint : (ndarray, ndarray)\n If provided, the constraint is a tuple (L, R) such that the\n model parameters are estimated under the constraint L *\n param = R, where L is a q x p matrix and R is a\n q-dimensional vector. If constraint is provided, a score\n test is performed to compare the constrained model to the\n unconstrained model.\n update_dep : bool\n If true, the dependence parameters are optimized, otherwise\n they are held fixed at their starting values.\n weights : array_like\n An array of case weights to use in the analysis.\n %(extra_params)s\n\n See Also\n --------\n statsmodels.genmod.families.family\n :ref:`families`\n :ref:`links`\n\n Notes\n -----\n Only the following combinations make sense for family and link ::\n\n + ident log logit probit cloglog pow opow nbinom loglog logc\n Gaussian | x x x\n inv Gaussian | x x x\n binomial | x x x x x x x x x\n Poisson | x x x\n neg binomial | x x x x\n gamma | x x x\n\n Not all of these link functions are currently available.\n\n Endog and exog are references so that if the data they refer\n to are already arrays and these arrays are changed, endog and\n exog will change.\n\n The \"robust\" covariance type is the standard \"sandwich estimator\"\n (e.g. Liang and Zeger (1986)). It is the default here and in most\n other packages. The \"naive\" estimator gives smaller standard\n errors, but is only correct if the working correlation structure\n is correctly specified. The \"bias reduced\" estimator of Mancl and\n DeRouen (Biometrics, 2001) reduces the downward bias of the robust\n estimator.\n\n The robust covariance provided here follows Liang and Zeger (1986)\n and agrees with R's gee implementation. To obtain the robust\n standard errors reported in Stata, multiply by sqrt(N / (N - g)),\n where N is the total sample size, and g is the average group size.\n %(notes)s\n Examples\n --------\n %(example)s\n\"\"\"\n\n_gee_nointercept = \"\"\"\n The nominal and ordinal GEE models should not have an intercept\n (either implicit or explicit). Use \"0 + \" in a formula to\n suppress the intercept.\n\"\"\"\n\n_gee_family_doc = \"\"\"\\\n The default is Gaussian. To specify the binomial\n distribution use `family=sm.families.Binomial()`. Each family\n can take a link instance as an argument. See\n statsmodels.genmod.families.family for more information.\"\"\"\n\n_gee_ordinal_family_doc = \"\"\"\\\n The only family supported is `Binomial`. The default `Logit`\n link may be replaced with `probit` if desired.\"\"\"\n\n_gee_nominal_family_doc = \"\"\"\\\n The default value `None` uses a multinomial logit family\n specifically designed for use with GEE. Setting this\n argument to a non-default value is not currently supported.\"\"\"\n\n_gee_fit_doc = \"\"\"\n Fits a marginal regression model using generalized estimating\n equations (GEE).\n\n Parameters\n ----------\n maxiter : int\n The maximum number of iterations\n ctol : float\n The convergence criterion for stopping the Gauss-Seidel\n iterations\n start_params : array_like\n A vector of starting values for the regression\n coefficients. If None, a default is chosen.\n params_niter : int\n The number of Gauss-Seidel updates of the mean structure\n parameters that take place prior to each update of the\n dependence structure.\n first_dep_update : int\n No dependence structure updates occur before this\n iteration number.\n cov_type : str\n One of \"robust\", \"naive\", or \"bias_reduced\".\n ddof_scale : scalar or None\n The scale parameter is estimated as the sum of squared\n Pearson residuals divided by `N - ddof_scale`, where N\n is the total sample size. If `ddof_scale` is None, the\n number of covariates (including an intercept if present)\n is used.\n scaling_factor : scalar\n The estimated covariance of the parameter estimates is\n scaled by this value. Default is 1, Stata uses N / (N - g),\n where N is the total sample size and g is the average group\n size.\n scale : str or float, optional\n `scale` can be None, 'X2', or a float\n If a float, its value is used as the scale parameter.\n The default value is None, which uses `X2` (Pearson's\n chi-square) for Gamma, Gaussian, and Inverse Gaussian.\n The default is 1 for the Binomial and Poisson families.\n\n Returns\n -------\n An instance of the GEEResults class or subclass\n\n Notes\n -----\n If convergence difficulties occur, increase the values of\n `first_dep_update` and/or `params_niter`. Setting\n `first_dep_update` to a greater value (e.g. ~10-20) causes the\n algorithm to move close to the GLM solution before attempting\n to identify the dependence structure.\n\n For the Gaussian family, there is no benefit to setting\n `params_niter` to a value greater than 1, since the mean\n structure parameters converge in one step.\n\"\"\"\n\n_gee_results_doc = \"\"\"\n Attributes\n ----------\n\n cov_params_default : ndarray\n default covariance of the parameter estimates. Is chosen among one\n of the following three based on `cov_type`\n cov_robust : ndarray\n covariance of the parameter estimates that is robust\n cov_naive : ndarray\n covariance of the parameter estimates that is not robust to\n correlation or variance misspecification\n cov_robust_bc : ndarray\n covariance of the parameter estimates that is robust and bias\n reduced\n converged : bool\n indicator for convergence of the optimization.\n True if the norm of the score is smaller than a threshold\n cov_type : str\n string indicating whether a \"robust\", \"naive\" or \"bias_reduced\"\n covariance is used as default\n fit_history : dict\n Contains information about the iterations.\n fittedvalues : ndarray\n Linear predicted values for the fitted model.\n dot(exog, params)\n model : class instance\n Pointer to GEE model instance that called `fit`.\n normalized_cov_params : ndarray\n See GEE docstring\n params : ndarray\n The coefficients of the fitted model. Note that\n interpretation of the coefficients often depends on the\n distribution family and the data.\n scale : float\n The estimate of the scale / dispersion for the model fit.\n See GEE.fit for more information.\n score_norm : float\n norm of the score at the end of the iterative estimation.\n bse : ndarray\n The standard errors of the fitted GEE parameters.\n\"\"\"\n\n_gee_example = \"\"\"\n Logistic regression with autoregressive working dependence:\n\n >>> import statsmodels.api as sm\n >>> family = sm.families.Binomial()\n >>> va = sm.cov_struct.Autoregressive()\n >>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)\n >>> result = model.fit()\n >>> print(result.summary())\n\n Use formulas to fit a Poisson GLM with independent working\n dependence:\n\n >>> import statsmodels.api as sm\n >>> fam = sm.families.Poisson()\n >>> ind = sm.cov_struct.Independence()\n >>> model = sm.GEE.from_formula(\"y ~ age + trt + base\", \"subject\",\n data, cov_struct=ind, family=fam)\n >>> result = model.fit()\n >>> print(result.summary())\n\n Equivalent, using the formula API:\n\n >>> import statsmodels.api as sm\n >>> import statsmodels.formula.api as smf\n >>> fam = sm.families.Poisson()\n >>> ind = sm.cov_struct.Independence()\n >>> model = smf.gee(\"y ~ age + trt + base\", \"subject\",\n data, cov_struct=ind, family=fam)\n >>> result = model.fit()\n >>> print(result.summary())\n\"\"\"\n\n_gee_ordinal_example = \"\"\"\n Fit an ordinal regression model using GEE, with \"global\n odds ratio\" dependence:\n\n >>> import statsmodels.api as sm\n >>> gor = sm.cov_struct.GlobalOddsRatio(\"ordinal\")\n >>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)\n >>> result = model.fit()\n >>> print(result.summary())\n\n Using formulas:\n\n >>> import statsmodels.formula.api as smf\n >>> model = smf.ordinal_gee(\"y ~ 0 + x1 + x2\", groups, data,\n cov_struct=gor)\n >>> result = model.fit()\n >>> print(result.summary())\n\"\"\"\n\n_gee_nominal_example = \"\"\"\n Fit a nominal regression model using GEE:\n\n >>> import statsmodels.api as sm\n >>> import statsmodels.formula.api as smf\n >>> gor = sm.cov_struct.GlobalOddsRatio(\"nominal\")\n >>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)\n >>> result = model.fit()\n >>> print(result.summary())\n\n Using formulas:\n\n >>> import statsmodels.api as sm\n >>> model = sm.NominalGEE.from_formula(\"y ~ 0 + x1 + x2\", groups,\n data, cov_struct=gor)\n >>> result = model.fit()\n >>> print(result.summary())\n\n Using the formula API:\n\n >>> import statsmodels.formula.api as smf\n >>> model = smf.nominal_gee(\"y ~ 0 + x1 + x2\", groups, data,\n cov_struct=gor)\n >>> result = model.fit()\n >>> print(result.summary())\n\"\"\"\n\n\ndef _check_args(endog, exog, groups, time, offset, exposure):\n\n if endog.size != exog.shape[0]:\n raise ValueError(\"Leading dimension of 'exog' should match \"\n \"length of 'endog'\")\n\n if groups.size != endog.size:\n raise ValueError(\"'groups' and 'endog' should have the same size\")\n\n if time is not None and (time.size != endog.size):\n raise ValueError(\"'time' and 'endog' should have the same size\")\n\n if offset is not None and (offset.size != endog.size):\n raise ValueError(\"'offset and 'endog' should have the same size\")\n\n if exposure is not None and (exposure.size != endog.size):\n raise ValueError(\"'exposure' and 'endog' should have the same size\")\n\n\nclass GEE(GLM):\n\n __doc__ = (\n \" Marginal Regression Model using Generalized Estimating \"\n \"Equations.\\n\" + _gee_init_doc %\n {'extra_params': base._missing_param_doc,\n 'family_doc': _gee_family_doc,\n 'example': _gee_example,\n 'notes': \"\"})\n\n cached_means = None\n\n def __init__(self, endog, exog, groups, time=None, family=None,\n cov_struct=None, missing='none', offset=None,\n exposure=None, dep_data=None, constraint=None,\n update_dep=True, weights=None, **kwargs):\n\n if type(self) is GEE:\n self._check_kwargs(kwargs)\n if family is not None:\n if not isinstance(family.link, tuple(family.safe_links)):\n msg = (\"The {0} link function does not respect the \"\n \"domain of the {1} family.\")\n warnings.warn(msg.format(family.link.__class__.__name__,\n family.__class__.__name__),\n DomainWarning)\n\n groups = np.asarray(groups) # in case groups is pandas\n\n if \"missing_idx\" in kwargs and kwargs[\"missing_idx\"] is not None:\n # If here, we are entering from super.from_formula; missing\n # has already been dropped from endog and exog, but not from\n # the other variables.\n ii = ~kwargs[\"missing_idx\"]\n groups = groups[ii]\n if time is not None:\n time = time[ii]\n if offset is not None:\n offset = offset[ii]\n if exposure is not None:\n exposure = exposure[ii]\n del kwargs[\"missing_idx\"]\n\n self.missing = missing\n self.dep_data = dep_data\n self.constraint = constraint\n self.update_dep = update_dep\n\n self._fit_history = defaultdict(list)\n\n # Pass groups, time, offset, and dep_data so they are\n # processed for missing data along with endog and exog.\n # Calling super creates self.exog, self.endog, etc. as\n # ndarrays and the original exog, endog, etc. are\n # self.data.endog, etc.\n super(GEE, self).__init__(endog, exog, groups=groups,\n time=time, offset=offset,\n exposure=exposure, weights=weights,\n dep_data=dep_data, missing=missing,\n family=family, **kwargs)\n\n _check_args(\n self.endog,\n self.exog,\n self.groups,\n self.time,\n getattr(self, \"offset\", None),\n getattr(self, \"exposure\", None),\n )\n\n self._init_keys.extend([\"update_dep\", \"constraint\", \"family\",\n \"cov_struct\"])\n # remove keys added by super that are not supported\n try:\n self._init_keys.remove(\"freq_weights\")\n self._init_keys.remove(\"var_weights\")\n except ValueError:\n pass\n\n # Handle the family argument\n if family is None:\n family = families.Gaussian()\n else:\n if not issubclass(family.__class__, families.Family):\n raise ValueError(\"GEE: `family` must be a genmod \"\n \"family instance\")\n self.family = family\n\n # Handle the cov_struct argument\n if cov_struct is None:\n cov_struct = cov_structs.Independence()\n else:\n if not issubclass(cov_struct.__class__, cov_structs.CovStruct):\n raise ValueError(\"GEE: `cov_struct` must be a genmod \"\n \"cov_struct instance\")\n\n self.cov_struct = cov_struct\n\n # Handle the constraint\n self.constraint = None\n if constraint is not None:\n if len(constraint) != 2:\n raise ValueError(\"GEE: `constraint` must be a 2-tuple.\")\n if constraint[0].shape[1] != self.exog.shape[1]:\n raise ValueError(\n \"GEE: the left hand side of the constraint must have \"\n \"the same number of columns as the exog matrix.\")\n self.constraint = ParameterConstraint(constraint[0],\n constraint[1],\n self.exog)\n\n if self._offset_exposure is not None:\n self._offset_exposure += self.constraint.offset_increment()\n else:\n self._offset_exposure = (\n self.constraint.offset_increment().copy())\n self.exog = self.constraint.reduced_exog()\n\n # Create list of row indices for each group\n group_labels, ix = np.unique(self.groups, return_inverse=True)\n se = pd.Series(index=np.arange(len(ix)), dtype=\"int\")\n gb = se.groupby(ix).groups\n dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]\n self.group_indices = dict(dk)\n self.group_labels = group_labels\n\n # Convert the data to the internal representation, which is a\n # list of arrays, corresponding to the groups.\n self.endog_li = self.cluster_list(self.endog)\n self.exog_li = self.cluster_list(self.exog)\n\n if self.weights is not None:\n self.weights_li = self.cluster_list(self.weights)\n\n self.num_group = len(self.endog_li)\n\n # Time defaults to a 1d grid with equal spacing\n if self.time is not None:\n if self.time.ndim == 1:\n self.time = self.time[:, None]\n self.time_li = self.cluster_list(self.time)\n else:\n self.time_li = \\\n [np.arange(len(y), dtype=np.float64)[:, None]\n for y in self.endog_li]\n self.time = np.concatenate(self.time_li)\n\n if (self._offset_exposure is None or\n (np.isscalar(self._offset_exposure) and\n self._offset_exposure == 0.)):\n self.offset_li = None\n else:\n self.offset_li = self.cluster_list(self._offset_exposure)\n if constraint is not None:\n self.constraint.exog_fulltrans_li = \\\n self.cluster_list(self.constraint.exog_fulltrans)\n\n self.family = family\n\n self.cov_struct.initialize(self)\n\n # Total sample size\n group_ns = [len(y) for y in self.endog_li]\n self.nobs = sum(group_ns)\n # The following are column based, not on rank see #1928\n self.df_model = self.exog.shape[1] - 1 # assumes constant\n self.df_resid = self.nobs - self.exog.shape[1]\n\n # Skip the covariance updates if all groups have a single\n # observation (reduces to fitting a GLM).\n maxgroup = max([len(x) for x in self.endog_li])\n if maxgroup == 1:\n self.update_dep = False\n\n # Override to allow groups and time to be passed as variable\n # names.\n @classmethod\n def from_formula(cls, formula, groups, data, subset=None,\n time=None, offset=None, exposure=None,\n *args, **kwargs):\n \"\"\"\n Create a GEE model instance from a formula and dataframe.\n\n Parameters\n ----------\n formula : str or generic Formula object\n The formula specifying the model\n groups : array_like or string\n Array of grouping labels. If a string, this is the name\n of a variable in `data` that contains the grouping labels.\n data : array_like\n The data for the model.\n subset : array_like\n An array-like object of booleans, integers, or index\n values that indicate the subset of the data to used when\n fitting the model.\n time : array_like or string\n The time values, used for dependence structures involving\n distances between observations. If a string, this is the\n name of a variable in `data` that contains the time\n values.\n offset : array_like or string\n The offset values, added to the linear predictor. If a\n string, this is the name of a variable in `data` that\n contains the offset values.\n exposure : array_like or string\n The exposure values, only used if the link function is the\n logarithm function, in which case the log of `exposure`\n is added to the offset (if any). If a string, this is the\n name of a variable in `data` that contains the offset\n values.\n %(missing_param_doc)s\n args : extra arguments\n These are passed to the model\n kwargs : extra keyword arguments\n These are passed to the model with two exceptions. `dep_data`\n is processed as described below. The ``eval_env`` keyword is\n passed to patsy. It can be either a\n :class:`patsy:patsy.EvalEnvironment` object or an integer\n indicating the depth of the namespace to use. For example, the\n default ``eval_env=0`` uses the calling namespace.\n If you wish to use a \"clean\" environment set ``eval_env=-1``.\n\n Optional arguments\n ------------------\n dep_data : str or array_like\n Data used for estimating the dependence structure. See\n specific dependence structure classes (e.g. Nested) for\n details. If `dep_data` is a string, it is interpreted as\n a formula that is applied to `data`. If it is an array, it\n must be an array of strings corresponding to column names in\n `data`. Otherwise it must be an array-like with the same\n number of rows as data.\n\n Returns\n -------\n model : GEE model instance\n\n Notes\n -----\n `data` must define __getitem__ with the keys in the formula\n terms args and kwargs are passed on to the model\n instantiation. E.g., a numpy structured or rec array, a\n dictionary, or a pandas DataFrame.\n \"\"\" % {'missing_param_doc': base._missing_param_doc}\n\n groups_name = \"Groups\"\n if isinstance(groups, str):\n groups_name = groups\n groups = data[groups]\n\n if isinstance(time, str):\n time = data[time]\n\n if isinstance(offset, str):\n offset = data[offset]\n\n if isinstance(exposure, str):\n exposure = data[exposure]\n\n dep_data = kwargs.get(\"dep_data\")\n dep_data_names = None\n if dep_data is not None:\n if isinstance(dep_data, str):\n dep_data = patsy.dmatrix(dep_data, data,\n return_type='dataframe')\n dep_data_names = dep_data.columns.tolist()\n else:\n dep_data_names = list(dep_data)\n dep_data = data[dep_data]\n kwargs[\"dep_data\"] = np.asarray(dep_data)\n\n family = None\n if \"family\" in kwargs:\n family = kwargs[\"family\"]\n del kwargs[\"family\"]\n\n model = super(GEE, cls).from_formula(formula, data=data, subset=subset,\n groups=groups, time=time,\n offset=offset,\n exposure=exposure,\n family=family,\n *args, **kwargs)\n\n if dep_data_names is not None:\n model._dep_data_names = dep_data_names\n model._groups_name = groups_name\n\n return model\n\n def cluster_list(self, array):\n \"\"\"\n Returns `array` split into subarrays corresponding to the\n cluster structure.\n \"\"\"\n\n if array.ndim == 1:\n return [np.array(array[self.group_indices[k]])\n for k in self.group_labels]\n else:\n return [np.array(array[self.group_indices[k], :])\n for k in self.group_labels]\n\n def compare_score_test(self, submodel):\n \"\"\"\n Perform a score test for the given submodel against this model.\n\n Parameters\n ----------\n submodel : GEEResults instance\n A fitted GEE model that is a submodel of this model.\n\n Returns\n -------\n A dictionary with keys \"statistic\", \"p-value\", and \"df\",\n containing the score test statistic, its chi^2 p-value,\n and the degrees of freedom used to compute the p-value.\n\n Notes\n -----\n The score test can be performed without calling 'fit' on the\n larger model. The provided submodel must be obtained from a\n fitted GEE.\n\n This method performs the same score test as can be obtained by\n fitting the GEE with a linear constraint and calling `score_test`\n on the results.\n\n References\n ----------\n Xu Guo and Wei Pan (2002). \"Small sample performance of the score\n test in GEE\".\n http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf\n \"\"\"\n\n # Since the model has not been fit, its scaletype has not been\n # set. So give it the scaletype of the submodel.\n self.scaletype = submodel.model.scaletype\n\n # Check consistency between model and submodel (not a comprehensive\n # check)\n submod = submodel.model\n if self.exog.shape[0] != submod.exog.shape[0]:\n msg = \"Model and submodel have different numbers of cases.\"\n raise ValueError(msg)\n if self.exog.shape[1] == submod.exog.shape[1]:\n msg = \"Model and submodel have the same number of variables\"\n warnings.warn(msg)\n if not isinstance(self.family, type(submod.family)):\n msg = \"Model and submodel have different GLM families.\"\n warnings.warn(msg)\n if not isinstance(self.cov_struct, type(submod.cov_struct)):\n warnings.warn(\"Model and submodel have different GEE covariance \"\n \"structures.\")\n if not np.equal(self.weights, submod.weights).all():\n msg = \"Model and submodel should have the same weights.\"\n warnings.warn(msg)\n\n # Get the positions of the submodel variables in the\n # parent model\n qm, qc = _score_test_submodel(self, submodel.model)\n if qm is None:\n msg = \"The provided model is not a submodel.\"\n raise ValueError(msg)\n\n # Embed the submodel params into a params vector for the\n # parent model\n params_ex = np.dot(qm, submodel.params)\n\n # Attempt to preserve the state of the parent model\n cov_struct_save = self.cov_struct\n import copy\n cached_means_save = copy.deepcopy(self.cached_means)\n\n # Get the score vector of the submodel params in\n # the parent model\n self.cov_struct = submodel.cov_struct\n self.update_cached_means(params_ex)\n _, score = self._update_mean_params()\n if score is None:\n msg = \"Singular matrix encountered in GEE score test\"\n warnings.warn(msg, ConvergenceWarning)\n return None\n\n if not hasattr(self, \"ddof_scale\"):\n self.ddof_scale = self.exog.shape[1]\n\n if not hasattr(self, \"scaling_factor\"):\n self.scaling_factor = 1\n\n _, ncov1, cmat = self._covmat()\n score2 = np.dot(qc.T, score)\n\n try:\n amat = np.linalg.inv(ncov1)\n except np.linalg.LinAlgError:\n amat = np.linalg.pinv(ncov1)\n\n bmat_11 = np.dot(qm.T, np.dot(cmat, qm))\n bmat_22 = np.dot(qc.T, np.dot(cmat, qc))\n bmat_12 = np.dot(qm.T, np.dot(cmat, qc))\n\n amat_11 = np.dot(qm.T, np.dot(amat, qm))\n amat_12 = np.dot(qm.T, np.dot(amat, qc))\n\n try:\n ab = np.linalg.solve(amat_11, bmat_12)\n except np.linalg.LinAlgError:\n ab = np.dot(np.linalg.pinv(amat_11), bmat_12)\n\n score_cov = bmat_22 - np.dot(amat_12.T, ab)\n\n try:\n aa = np.linalg.solve(amat_11, amat_12)\n except np.linalg.LinAlgError:\n aa = np.dot(np.linalg.pinv(amat_11), amat_12)\n\n score_cov -= np.dot(bmat_12.T, aa)\n\n try:\n ab = np.linalg.solve(amat_11, bmat_11)\n except np.linalg.LinAlgError:\n ab = np.dot(np.linalg.pinv(amat_11), bmat_11)\n\n try:\n aa = np.linalg.solve(amat_11, amat_12)\n except np.linalg.LinAlgError:\n aa = np.dot(np.linalg.pinv(amat_11), amat_12)\n\n score_cov += np.dot(amat_12.T, np.dot(ab, aa))\n\n # Attempt to restore state\n self.cov_struct = cov_struct_save\n self.cached_means = cached_means_save\n\n from scipy.stats.distributions import chi2\n try:\n sc2 = np.linalg.solve(score_cov, score2)\n except np.linalg.LinAlgError:\n sc2 = np.dot(np.linalg.pinv(score_cov), score2)\n score_statistic = np.dot(score2, sc2)\n score_df = len(score2)\n score_pvalue = 1 - chi2.cdf(score_statistic, score_df)\n return {\"statistic\": score_statistic,\n \"df\": score_df,\n \"p-value\": score_pvalue}\n\n def estimate_scale(self):\n \"\"\"\n Estimate the dispersion/scale.\n \"\"\"\n\n if self.scaletype is None:\n if isinstance(self.family, (families.Binomial, families.Poisson,\n families.NegativeBinomial,\n _Multinomial)):\n return 1.\n elif isinstance(self.scaletype, float):\n return np.array(self.scaletype)\n\n endog = self.endog_li\n cached_means = self.cached_means\n nobs = self.nobs\n varfunc = self.family.variance\n\n scale = 0.\n fsum = 0.\n for i in range(self.num_group):\n\n if len(endog[i]) == 0:\n continue\n\n expval, _ = cached_means[i]\n sdev = np.sqrt(varfunc(expval))\n resid = (endog[i] - expval) / sdev\n\n if self.weights is not None:\n f = self.weights_li[i]\n scale += np.sum(f * (resid ** 2))\n fsum += f.sum()\n else:\n scale += np.sum(resid ** 2)\n fsum += len(resid)\n\n scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))\n\n return scale\n\n def mean_deriv(self, exog, lin_pred):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n exog : array_like\n The exogeneous data at which the derivative is computed.\n lin_pred : array_like\n The values of the linear predictor.\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n\n Notes\n -----\n If there is an offset or exposure, it should be added to\n `lin_pred` prior to calling this function.\n \"\"\"\n\n idl = self.family.link.inverse_deriv(lin_pred)\n dmat = exog * idl[:, None]\n return dmat\n\n def mean_deriv_exog(self, exog, params, offset_exposure=None):\n \"\"\"\n Derivative of the expected endog with respect to exog.\n\n Parameters\n ----------\n exog : array_like\n Values of the independent variables at which the derivative\n is calculated.\n params : array_like\n Parameter values at which the derivative is calculated.\n offset_exposure : array_like, optional\n Combined offset and exposure.\n\n Returns\n -------\n The derivative of the expected endog with respect to exog.\n \"\"\"\n\n lin_pred = np.dot(exog, params)\n if offset_exposure is not None:\n lin_pred += offset_exposure\n\n idl = self.family.link.inverse_deriv(lin_pred)\n dmat = np.outer(idl, params)\n return dmat\n\n def _update_mean_params(self):\n \"\"\"\n Returns\n -------\n update : array_like\n The update vector such that params + update is the next\n iterate when solving the score equations.\n score : array_like\n The current value of the score equations, not\n incorporating the scale parameter. If desired,\n multiply this vector by the scale parameter to\n incorporate the scale.\n \"\"\"\n\n endog = self.endog_li\n exog = self.exog_li\n weights = getattr(self, \"weights_li\", None)\n\n cached_means = self.cached_means\n\n varfunc = self.family.variance\n\n bmat, score = 0, 0\n for i in range(self.num_group):\n\n expval, lpr = cached_means[i]\n resid = endog[i] - expval\n dmat = self.mean_deriv(exog[i], lpr)\n sdev = np.sqrt(varfunc(expval))\n\n if weights is not None:\n w = weights[i]\n wresid = resid * w\n wdmat = dmat * w[:, None]\n else:\n wresid = resid\n wdmat = dmat\n\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (wdmat, wresid))\n if rslt is None:\n return None, None\n vinv_d, vinv_resid = tuple(rslt)\n\n bmat += np.dot(dmat.T, vinv_d)\n score += np.dot(dmat.T, vinv_resid)\n\n try:\n update = np.linalg.solve(bmat, score)\n except np.linalg.LinAlgError:\n update = np.dot(np.linalg.pinv(bmat), score)\n\n self._fit_history[\"cov_adjust\"].append(\n self.cov_struct.cov_adjust)\n\n return update, score\n\n def update_cached_means(self, mean_params):\n \"\"\"\n cached_means should always contain the most recent calculation\n of the group-wise mean vectors. This function should be\n called every time the regression parameters are changed, to\n keep the cached means up to date.\n \"\"\"\n\n endog = self.endog_li\n exog = self.exog_li\n offset = self.offset_li\n\n linkinv = self.family.link.inverse\n\n self.cached_means = []\n\n for i in range(self.num_group):\n\n if len(endog[i]) == 0:\n continue\n\n lpr = np.dot(exog[i], mean_params)\n if offset is not None:\n lpr += offset[i]\n expval = linkinv(lpr)\n\n self.cached_means.append((expval, lpr))\n\n def _covmat(self):\n \"\"\"\n Returns the sampling covariance matrix of the regression\n parameters and related quantities.\n\n Returns\n -------\n cov_robust : array_like\n The robust, or sandwich estimate of the covariance, which\n is meaningful even if the working covariance structure is\n incorrectly specified.\n cov_naive : array_like\n The model-based estimate of the covariance, which is\n meaningful if the covariance structure is correctly\n specified.\n cmat : array_like\n The center matrix of the sandwich expression, used in\n obtaining score test results.\n \"\"\"\n\n endog = self.endog_li\n exog = self.exog_li\n weights = getattr(self, \"weights_li\", None)\n varfunc = self.family.variance\n cached_means = self.cached_means\n\n # Calculate the naive (model-based) and robust (sandwich)\n # covariances.\n bmat, cmat = 0, 0\n for i in range(self.num_group):\n\n expval, lpr = cached_means[i]\n resid = endog[i] - expval\n dmat = self.mean_deriv(exog[i], lpr)\n sdev = np.sqrt(varfunc(expval))\n\n if weights is not None:\n w = weights[i]\n wresid = resid * w\n wdmat = dmat * w[:, None]\n else:\n wresid = resid\n wdmat = dmat\n\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (wdmat, wresid))\n if rslt is None:\n return None, None, None, None\n vinv_d, vinv_resid = tuple(rslt)\n\n bmat += np.dot(dmat.T, vinv_d)\n dvinv_resid = np.dot(dmat.T, vinv_resid)\n cmat += np.outer(dvinv_resid, dvinv_resid)\n\n scale = self.estimate_scale()\n\n try:\n bmati = np.linalg.inv(bmat)\n except np.linalg.LinAlgError:\n bmati = np.linalg.pinv(bmat)\n\n cov_naive = bmati * scale\n cov_robust = np.dot(bmati, np.dot(cmat, bmati))\n\n cov_naive *= self.scaling_factor\n cov_robust *= self.scaling_factor\n return cov_robust, cov_naive, cmat\n\n # Calculate the bias-corrected sandwich estimate of Mancl and\n # DeRouen.\n def _bc_covmat(self, cov_naive):\n\n cov_naive = cov_naive / self.scaling_factor\n endog = self.endog_li\n exog = self.exog_li\n varfunc = self.family.variance\n cached_means = self.cached_means\n scale = self.estimate_scale()\n\n bcm = 0\n for i in range(self.num_group):\n\n expval, lpr = cached_means[i]\n resid = endog[i] - expval\n dmat = self.mean_deriv(exog[i], lpr)\n sdev = np.sqrt(varfunc(expval))\n\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (dmat,))\n if rslt is None:\n return None\n vinv_d = rslt[0]\n vinv_d /= scale\n\n hmat = np.dot(vinv_d, cov_naive)\n hmat = np.dot(hmat, dmat.T).T\n\n f = self.weights_li[i] if self.weights is not None else 1.\n\n aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (aresid,))\n if rslt is None:\n return None\n srt = rslt[0]\n srt = f * np.dot(dmat.T, srt) / scale\n bcm += np.outer(srt, srt)\n\n cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))\n cov_robust_bc *= self.scaling_factor\n\n return cov_robust_bc\n\n def _starting_params(self):\n\n if np.isscalar(self._offset_exposure):\n offset = None\n else:\n offset = self._offset_exposure\n\n model = GLM(self.endog, self.exog, family=self.family,\n offset=offset, freq_weights=self.weights)\n result = model.fit()\n return result.params\n\n @Appender(_gee_fit_doc)\n def fit(self, maxiter=60, ctol=1e-6, start_params=None,\n params_niter=1, first_dep_update=0,\n cov_type='robust', ddof_scale=None, scaling_factor=1.,\n scale=None):\n\n self.scaletype = scale\n\n # Subtract this number from the total sample size when\n # normalizing the scale parameter estimate.\n if ddof_scale is None:\n self.ddof_scale = self.exog.shape[1]\n else:\n if not ddof_scale >= 0:\n raise ValueError(\n \"ddof_scale must be a non-negative number or None\")\n self.ddof_scale = ddof_scale\n\n self.scaling_factor = scaling_factor\n\n self._fit_history = defaultdict(list)\n\n if self.weights is not None and cov_type == 'naive':\n raise ValueError(\"when using weights, cov_type may not be naive\")\n\n if start_params is None:\n mean_params = self._starting_params()\n else:\n start_params = np.asarray(start_params)\n mean_params = start_params.copy()\n\n self.update_cached_means(mean_params)\n\n del_params = -1.\n num_assoc_updates = 0\n for itr in range(maxiter):\n\n update, score = self._update_mean_params()\n if update is None:\n warnings.warn(\"Singular matrix encountered in GEE update\",\n ConvergenceWarning)\n break\n mean_params += update\n self.update_cached_means(mean_params)\n\n # L2 norm of the change in mean structure parameters at\n # this iteration.\n del_params = np.sqrt(np.sum(score ** 2))\n\n self._fit_history['params'].append(mean_params.copy())\n self._fit_history['score'].append(score)\n self._fit_history['dep_params'].append(\n self.cov_struct.dep_params)\n\n # Do not exit until the association parameters have been\n # updated at least once.\n if (del_params < ctol and\n (num_assoc_updates > 0 or self.update_dep is False)):\n break\n\n # Update the dependence structure\n if (self.update_dep and (itr % params_niter) == 0\n and (itr >= first_dep_update)):\n self._update_assoc(mean_params)\n num_assoc_updates += 1\n\n if del_params >= ctol:\n warnings.warn(\"Iteration limit reached prior to convergence\",\n IterationLimitWarning)\n\n if mean_params is None:\n warnings.warn(\"Unable to estimate GEE parameters.\",\n ConvergenceWarning)\n return None\n\n bcov, ncov, _ = self._covmat()\n if bcov is None:\n warnings.warn(\"Estimated covariance structure for GEE \"\n \"estimates is singular\", ConvergenceWarning)\n return None\n bc_cov = None\n if cov_type == \"bias_reduced\":\n bc_cov = self._bc_covmat(ncov)\n\n if self.constraint is not None:\n x = mean_params.copy()\n mean_params, bcov = self._handle_constraint(mean_params, bcov)\n if mean_params is None:\n warnings.warn(\"Unable to estimate constrained GEE \"\n \"parameters.\", ConvergenceWarning)\n return None\n\n y, ncov = self._handle_constraint(x, ncov)\n if y is None:\n warnings.warn(\"Unable to estimate constrained GEE \"\n \"parameters.\", ConvergenceWarning)\n return None\n\n if bc_cov is not None:\n y, bc_cov = self._handle_constraint(x, bc_cov)\n if x is None:\n warnings.warn(\"Unable to estimate constrained GEE \"\n \"parameters.\", ConvergenceWarning)\n return None\n\n scale = self.estimate_scale()\n\n # kwargs to add to results instance, need to be available in __init__\n res_kwds = dict(cov_type=cov_type,\n cov_robust=bcov,\n cov_naive=ncov,\n cov_robust_bc=bc_cov)\n\n # The superclass constructor will multiply the covariance\n # matrix argument bcov by scale, which we do not want, so we\n # divide bcov by the scale parameter here\n results = GEEResults(self, mean_params, bcov / scale, scale,\n cov_type=cov_type, use_t=False,\n attr_kwds=res_kwds)\n\n # attributes not needed during results__init__\n results.fit_history = self._fit_history\n self.fit_history = defaultdict(list)\n results.score_norm = del_params\n results.converged = (del_params < ctol)\n results.cov_struct = self.cov_struct\n results.params_niter = params_niter\n results.first_dep_update = first_dep_update\n results.ctol = ctol\n results.maxiter = maxiter\n\n # These will be copied over to subclasses when upgrading.\n results._props = [\"cov_type\", \"use_t\",\n \"cov_params_default\", \"cov_robust\",\n \"cov_naive\", \"cov_robust_bc\",\n \"fit_history\",\n \"score_norm\", \"converged\", \"cov_struct\",\n \"params_niter\", \"first_dep_update\", \"ctol\",\n \"maxiter\"]\n\n return GEEResultsWrapper(results)\n\n def _update_regularized(self, params, pen_wt, scad_param, eps):\n\n sn, hm = 0, 0\n\n for i in range(self.num_group):\n\n expval, _ = self.cached_means[i]\n resid = self.endog_li[i] - expval\n sdev = np.sqrt(self.family.variance(expval))\n\n ex = self.exog_li[i] * sdev[:, None]**2\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (resid, ex))\n sn0 = rslt[0]\n sn += np.dot(ex.T, sn0)\n hm0 = rslt[1]\n hm += np.dot(ex.T, hm0)\n\n # Wang et al. divide sn here by num_group, but that\n # seems to be incorrect\n\n ap = np.abs(params)\n clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)\n en = pen_wt * clipped * (ap > pen_wt)\n en /= (scad_param - 1) * pen_wt\n en += pen_wt * (ap <= pen_wt)\n en /= eps + ap\n\n hm.flat[::hm.shape[0] + 1] += self.num_group * en\n sn -= self.num_group * en * params\n try:\n update = np.linalg.solve(hm, sn)\n except np.linalg.LinAlgError:\n update = np.dot(np.linalg.pinv(hm), sn)\n msg = \"Encountered singularity in regularized GEE update\"\n warnings.warn(msg)\n hm *= self.estimate_scale()\n\n return update, hm\n\n def _regularized_covmat(self, mean_params):\n\n self.update_cached_means(mean_params)\n\n ma = 0\n\n for i in range(self.num_group):\n\n expval, _ = self.cached_means[i]\n resid = self.endog_li[i] - expval\n sdev = np.sqrt(self.family.variance(expval))\n\n ex = self.exog_li[i] * sdev[:, None]**2\n rslt = self.cov_struct.covariance_matrix_solve(\n expval, i, sdev, (resid,))\n ma0 = np.dot(ex.T, rslt[0])\n ma += np.outer(ma0, ma0)\n\n return ma\n\n def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,\n ddof_scale=None, update_assoc=5,\n ctol=1e-5, ztol=1e-3, eps=1e-6, scale=None):\n \"\"\"\n Regularized estimation for GEE.\n\n Parameters\n ----------\n pen_wt : float\n The penalty weight (a non-negative scalar).\n scad_param : float\n Non-negative scalar determining the shape of the Scad\n penalty.\n maxiter : int\n The maximum number of iterations.\n ddof_scale : int\n Value to subtract from `nobs` when calculating the\n denominator degrees of freedom for t-statistics, defaults\n to the number of columns in `exog`.\n update_assoc : int\n The dependence parameters are updated every `update_assoc`\n iterations of the mean structure parameter updates.\n ctol : float\n Convergence criterion, default is one order of magnitude\n smaller than proposed in section 3.1 of Wang et al.\n ztol : float\n Coefficients smaller than this value are treated as\n being zero, default is based on section 5 of Wang et al.\n eps : non-negative scalar\n Numerical constant, see section 3.2 of Wang et al.\n scale : float or string\n If a float, this value is used as the scale parameter.\n If \"X2\", the scale parameter is always estimated using\n Pearson's chi-square method (e.g. as in a quasi-Poisson\n analysis). If None, the default approach for the family\n is used to estimate the scale parameter.\n\n Returns\n -------\n GEEResults instance. Note that not all methods of the results\n class make sense when the model has been fit with regularization.\n\n Notes\n -----\n This implementation assumes that the link is canonical.\n\n References\n ----------\n Wang L, Zhou J, Qu A. (2012). Penalized generalized estimating\n equations for high-dimensional longitudinal data analysis.\n Biometrics. 2012 Jun;68(2):353-60.\n doi: 10.1111/j.1541-0420.2011.01678.x.\n https://www.ncbi.nlm.nih.gov/pubmed/21955051\n http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf\n \"\"\"\n\n self.scaletype = scale\n\n mean_params = np.zeros(self.exog.shape[1])\n self.update_cached_means(mean_params)\n converged = False\n fit_history = defaultdict(list)\n\n # Subtract this number from the total sample size when\n # normalizing the scale parameter estimate.\n if ddof_scale is None:\n self.ddof_scale = self.exog.shape[1]\n else:\n if not ddof_scale >= 0:\n raise ValueError(\n \"ddof_scale must be a non-negative number or None\")\n self.ddof_scale = ddof_scale\n\n # Keep this private for now. In some cases the early steps are\n # very small so it seems necessary to ensure a certain minimum\n # number of iterations before testing for convergence.\n miniter = 20\n\n for itr in range(maxiter):\n\n update, hm = self._update_regularized(\n mean_params, pen_wt, scad_param, eps)\n if update is None:\n msg = \"Singular matrix encountered in regularized GEE update\"\n warnings.warn(msg, ConvergenceWarning)\n break\n if itr > miniter and np.sqrt(np.sum(update**2)) < ctol:\n converged = True\n break\n mean_params += update\n fit_history['params'].append(mean_params.copy())\n self.update_cached_means(mean_params)\n\n if itr != 0 and (itr % update_assoc == 0):\n self._update_assoc(mean_params)\n\n if not converged:\n msg = \"GEE.fit_regularized did not converge\"\n warnings.warn(msg)\n\n mean_params[np.abs(mean_params) < ztol] = 0\n\n self._update_assoc(mean_params)\n ma = self._regularized_covmat(mean_params)\n cov = np.linalg.solve(hm, ma)\n cov = np.linalg.solve(hm, cov.T)\n\n # kwargs to add to results instance, need to be available in __init__\n res_kwds = dict(cov_type=\"robust\", cov_robust=cov)\n\n scale = self.estimate_scale()\n rslt = GEEResults(self, mean_params, cov, scale,\n regularized=True, attr_kwds=res_kwds)\n rslt.fit_history = fit_history\n\n return GEEResultsWrapper(rslt)\n\n def _handle_constraint(self, mean_params, bcov):\n \"\"\"\n Expand the parameter estimate `mean_params` and covariance matrix\n `bcov` to the coordinate system of the unconstrained model.\n\n Parameters\n ----------\n mean_params : array_like\n A parameter vector estimate for the reduced model.\n bcov : array_like\n The covariance matrix of mean_params.\n\n Returns\n -------\n mean_params : array_like\n The input parameter vector mean_params, expanded to the\n coordinate system of the full model\n bcov : array_like\n The input covariance matrix bcov, expanded to the\n coordinate system of the full model\n \"\"\"\n\n # The number of variables in the full model\n red_p = len(mean_params)\n full_p = self.constraint.lhs.shape[1]\n mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]\n\n # Get the score vector under the full model.\n save_exog_li = self.exog_li\n self.exog_li = self.constraint.exog_fulltrans_li\n import copy\n save_cached_means = copy.deepcopy(self.cached_means)\n self.update_cached_means(mean_params0)\n _, score = self._update_mean_params()\n\n if score is None:\n warnings.warn(\"Singular matrix encountered in GEE score test\",\n ConvergenceWarning)\n return None, None\n\n _, ncov1, cmat = self._covmat()\n scale = self.estimate_scale()\n cmat = cmat / scale ** 2\n score2 = score[red_p:] / scale\n amat = np.linalg.inv(ncov1)\n\n bmat_11 = cmat[0:red_p, 0:red_p]\n bmat_22 = cmat[red_p:, red_p:]\n bmat_12 = cmat[0:red_p, red_p:]\n amat_11 = amat[0:red_p, 0:red_p]\n amat_12 = amat[0:red_p, red_p:]\n\n score_cov = bmat_22 - np.dot(amat_12.T,\n np.linalg.solve(amat_11, bmat_12))\n score_cov -= np.dot(bmat_12.T,\n np.linalg.solve(amat_11, amat_12))\n score_cov += np.dot(amat_12.T,\n np.dot(np.linalg.solve(amat_11, bmat_11),\n np.linalg.solve(amat_11, amat_12)))\n\n from scipy.stats.distributions import chi2\n score_statistic = np.dot(score2,\n np.linalg.solve(score_cov, score2))\n score_df = len(score2)\n score_pvalue = 1 - chi2.cdf(score_statistic, score_df)\n self.score_test_results = {\"statistic\": score_statistic,\n \"df\": score_df,\n \"p-value\": score_pvalue}\n\n mean_params = self.constraint.unpack_param(mean_params)\n bcov = self.constraint.unpack_cov(bcov)\n\n self.exog_li = save_exog_li\n self.cached_means = save_cached_means\n self.exog = self.constraint.restore_exog()\n\n return mean_params, bcov\n\n def _update_assoc(self, params):\n \"\"\"\n Update the association parameters\n \"\"\"\n\n self.cov_struct.update(params)\n\n def _derivative_exog(self, params, exog=None, transform='dydx',\n dummy_idx=None, count_idx=None):\n \"\"\"\n For computing marginal effects, returns dF(XB) / dX where F(.)\n is the fitted mean.\n\n transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.\n\n Not all of these make sense in the presence of discrete regressors,\n but checks are done in the results in get_margeff.\n \"\"\"\n # This form should be appropriate for group 1 probit, logit,\n # logistic, cloglog, heckprob, xtprobit.\n offset_exposure = None\n if exog is None:\n exog = self.exog\n offset_exposure = self._offset_exposure\n\n margeff = self.mean_deriv_exog(exog, params, offset_exposure)\n\n if 'ex' in transform:\n margeff *= exog\n if 'ey' in transform:\n margeff /= self.predict(params, exog)[:, None]\n if count_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_count_effects)\n margeff = _get_count_effects(margeff, exog, count_idx, transform,\n self, params)\n if dummy_idx is not None:\n from statsmodels.discrete.discrete_margins import (\n _get_dummy_effects)\n margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,\n self, params)\n return margeff\n\n def qic(self, params, scale, cov_params, n_step=1000):\n \"\"\"\n Returns quasi-information criteria and quasi-likelihood values.\n\n Parameters\n ----------\n params : array_like\n The GEE estimates of the regression parameters.\n scale : scalar\n Estimated scale parameter\n cov_params : array_like\n An estimate of the covariance matrix for the\n model parameters. Conventionally this is the robust\n covariance matrix.\n n_step : integer\n The number of points in the trapezoidal approximation\n to the quasi-likelihood function.\n\n Returns\n -------\n ql : scalar\n The quasi-likelihood value\n qic : scalar\n A QIC that can be used to compare the mean and covariance\n structures of the model.\n qicu : scalar\n A simplified QIC that can be used to compare mean structures\n but not covariance structures\n\n Notes\n -----\n The quasi-likelihood used here is obtained by numerically evaluating\n Wedderburn's integral representation of the quasi-likelihood function.\n This approach is valid for all families and links. Many other\n packages use analytical expressions for quasi-likelihoods that are\n valid in special cases where the link function is canonical. These\n analytical expressions may omit additive constants that only depend\n on the data. Therefore, the numerical values of our QL and QIC values\n will differ from the values reported by other packages. However only\n the differences between two QIC values calculated for different models\n using the same data are meaningful. Our QIC should produce the same\n QIC differences as other software.\n\n When using the QIC for models with unknown scale parameter, use a\n common estimate of the scale parameter for all models being compared.\n\n References\n ----------\n .. [*] W. Pan (2001). Akaike's information criterion in generalized\n estimating equations. Biometrics (57) 1.\n \"\"\"\n\n varfunc = self.family.variance\n\n means = []\n omega = 0.0\n # omega^-1 is the model-based covariance assuming independence\n\n for i in range(self.num_group):\n expval, lpr = self.cached_means[i]\n means.append(expval)\n dmat = self.mean_deriv(self.exog_li[i], lpr)\n omega += np.dot(dmat.T, dmat) / scale\n\n means = np.concatenate(means)\n\n # The quasi-likelihood, use change of variables so the integration is\n # from -1 to 1.\n endog_li = np.concatenate(self.endog_li)\n du = means - endog_li\n qv = np.empty(n_step)\n xv = np.linspace(-0.99999, 1, n_step)\n for i, g in enumerate(xv):\n u = endog_li + (g + 1) * du / 2.0\n vu = varfunc(u)\n qv[i] = -np.sum(du**2 * (g + 1) / vu)\n qv /= (4 * scale)\n\n try:\n from scipy.integrate import trapezoid\n except ImportError:\n # Remove after minimum is SciPy 1.7\n from scipy.integrate import trapz as trapezoid\n ql = trapezoid(qv, dx=xv[1] - xv[0])\n\n qicu = -2 * ql + 2 * self.exog.shape[1]\n qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))\n\n return ql, qic, qicu\n\n\nclass GEEResults(GLMResults):\n\n __doc__ = (\n \"This class summarizes the fit of a marginal regression model \"\n \"using GEE.\\n\" + _gee_results_doc)\n\n def __init__(self, model, params, cov_params, scale,\n cov_type='robust', use_t=False, regularized=False,\n **kwds):\n\n super(GEEResults, self).__init__(\n model, params, normalized_cov_params=cov_params,\n scale=scale)\n\n # not added by super\n self.df_resid = model.df_resid\n self.df_model = model.df_model\n self.family = model.family\n\n attr_kwds = kwds.pop('attr_kwds', {})\n self.__dict__.update(attr_kwds)\n\n # we do not do this if the cov_type has already been set\n # subclasses can set it through attr_kwds\n if not (hasattr(self, 'cov_type') and\n hasattr(self, 'cov_params_default')):\n self.cov_type = cov_type # keep alias\n covariance_type = self.cov_type.lower()\n allowed_covariances = [\"robust\", \"naive\", \"bias_reduced\"]\n if covariance_type not in allowed_covariances:\n msg = (\"GEE: `cov_type` must be one of \" +\n \", \".join(allowed_covariances))\n raise ValueError(msg)\n\n if cov_type == \"robust\":\n cov = self.cov_robust\n elif cov_type == \"naive\":\n cov = self.cov_naive\n elif cov_type == \"bias_reduced\":\n cov = self.cov_robust_bc\n\n self.cov_params_default = cov\n else:\n if self.cov_type != cov_type:\n raise ValueError('cov_type in argument is different from '\n 'already attached cov_type')\n\n @cache_readonly\n def resid(self):\n \"\"\"\n The response residuals.\n \"\"\"\n return self.resid_response\n\n def standard_errors(self, cov_type=\"robust\"):\n \"\"\"\n This is a convenience function that returns the standard\n errors for any covariance type. The value of `bse` is the\n standard errors for whichever covariance type is specified as\n an argument to `fit` (defaults to \"robust\").\n\n Parameters\n ----------\n cov_type : str\n One of \"robust\", \"naive\", or \"bias_reduced\". Determines\n the covariance used to compute standard errors. Defaults\n to \"robust\".\n \"\"\"\n\n # Check covariance_type\n covariance_type = cov_type.lower()\n allowed_covariances = [\"robust\", \"naive\", \"bias_reduced\"]\n if covariance_type not in allowed_covariances:\n msg = (\"GEE: `covariance_type` must be one of \" +\n \", \".join(allowed_covariances))\n raise ValueError(msg)\n\n if covariance_type == \"robust\":\n return np.sqrt(np.diag(self.cov_robust))\n elif covariance_type == \"naive\":\n return np.sqrt(np.diag(self.cov_naive))\n elif covariance_type == \"bias_reduced\":\n if self.cov_robust_bc is None:\n raise ValueError(\n \"GEE: `bias_reduced` covariance not available\")\n return np.sqrt(np.diag(self.cov_robust_bc))\n\n # Need to override to allow for different covariance types.\n @cache_readonly\n def bse(self):\n return self.standard_errors(self.cov_type)\n\n def score_test(self):\n \"\"\"\n Return the results of a score test for a linear constraint.\n\n Returns\n -------\n Adictionary containing the p-value, the test statistic,\n and the degrees of freedom for the score test.\n\n Notes\n -----\n See also GEE.compare_score_test for an alternative way to perform\n a score test. GEEResults.score_test is more general, in that it\n supports testing arbitrary linear equality constraints. However\n GEE.compare_score_test might be easier to use when comparing\n two explicit models.\n\n References\n ----------\n Xu Guo and Wei Pan (2002). \"Small sample performance of the score\n test in GEE\".\n http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf\n \"\"\"\n\n if not hasattr(self.model, \"score_test_results\"):\n msg = \"score_test on results instance only available when \"\n msg += \" model was fit with constraints\"\n raise ValueError(msg)\n\n return self.model.score_test_results\n\n @cache_readonly\n def resid_split(self):\n \"\"\"\n Returns the residuals, the endogeneous data minus the fitted\n values from the model. The residuals are returned as a list\n of arrays containing the residuals for each cluster.\n \"\"\"\n sresid = []\n for v in self.model.group_labels:\n ii = self.model.group_indices[v]\n sresid.append(self.resid[ii])\n return sresid\n\n @cache_readonly\n def resid_centered(self):\n \"\"\"\n Returns the residuals centered within each group.\n \"\"\"\n cresid = self.resid.copy()\n for v in self.model.group_labels:\n ii = self.model.group_indices[v]\n cresid[ii] -= cresid[ii].mean()\n return cresid\n\n @cache_readonly\n def resid_centered_split(self):\n \"\"\"\n Returns the residuals centered within each group. The\n residuals are returned as a list of arrays containing the\n centered residuals for each cluster.\n \"\"\"\n sresid = []\n for v in self.model.group_labels:\n ii = self.model.group_indices[v]\n sresid.append(self.centered_resid[ii])\n return sresid\n\n def qic(self, scale=None, n_step=1000):\n \"\"\"\n Returns the QIC and QICu information criteria.\n\n See GEE.qic for documentation.\n \"\"\"\n\n # It is easy to forget to set the scale parameter. Sometimes\n # this is intentional, so we warn.\n if scale is None:\n warnings.warn(\"QIC values obtained using scale=None are not \"\n \"appropriate for comparing models\")\n\n if scale is None:\n scale = self.scale\n\n _, qic, qicu = self.model.qic(self.params, scale,\n self.cov_params(),\n n_step=n_step)\n\n return qic, qicu\n\n # FIXME: alias to be removed, temporary backwards compatibility\n split_resid = resid_split\n centered_resid = resid_centered\n split_centered_resid = resid_centered_split\n\n @Appender(_plot_added_variable_doc % {'extra_params_doc': ''})\n def plot_added_variable(self, focus_exog, resid_type=None,\n use_glm_weights=True, fit_kwargs=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_added_variable\n\n fig = plot_added_variable(self, focus_exog,\n resid_type=resid_type,\n use_glm_weights=use_glm_weights,\n fit_kwargs=fit_kwargs, ax=ax)\n\n return fig\n\n @Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})\n def plot_partial_residuals(self, focus_exog, ax=None):\n\n from statsmodels.graphics.regressionplots import plot_partial_residuals\n\n return plot_partial_residuals(self, focus_exog, ax=ax)\n\n @Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})\n def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,\n ax=None):\n\n from statsmodels.graphics.regressionplots import plot_ceres_residuals\n\n return plot_ceres_residuals(self, focus_exog, frac,\n cond_means=cond_means, ax=ax)\n\n def conf_int(self, alpha=.05, cols=None, cov_type=None):\n \"\"\"\n Returns confidence intervals for the fitted parameters.\n\n Parameters\n ----------\n alpha : float, optional\n The `alpha` level for the confidence interval. i.e., The\n default `alpha` = .05 returns a 95% confidence interval.\n cols : array_like, optional\n `cols` specifies which confidence intervals to return\n cov_type : str\n The covariance type used for computing standard errors;\n must be one of 'robust', 'naive', and 'bias reduced'.\n See `GEE` for details.\n\n Notes\n -----\n The confidence interval is based on the Gaussian distribution.\n \"\"\"\n # super does not allow to specify cov_type and method is not\n # implemented,\n # FIXME: remove this method here\n if cov_type is None:\n bse = self.bse\n else:\n bse = self.standard_errors(cov_type=cov_type)\n params = self.params\n dist = stats.norm\n q = dist.ppf(1 - alpha / 2)\n\n if cols is None:\n lower = self.params - q * bse\n upper = self.params + q * bse\n else:\n cols = np.asarray(cols)\n lower = params[cols] - q * bse[cols]\n upper = params[cols] + q * bse[cols]\n return np.asarray(lzip(lower, upper))\n\n def summary(self, yname=None, xname=None, title=None, alpha=.05):\n \"\"\"\n Summarize the GEE regression results\n\n Parameters\n ----------\n yname : str, optional\n Default is `y`\n xname : list[str], optional\n Names for the exogenous variables, default is `var_#` for ## in\n the number of regressors. Must match the number of parameters in\n the model\n title : str, optional\n Title for the top table. If not None, then this replaces\n the default title\n alpha : float\n significance level for the confidence intervals\n cov_type : str\n The covariance type used to compute the standard errors;\n one of 'robust' (the usual robust sandwich-type covariance\n estimate), 'naive' (ignores dependence), and 'bias\n reduced' (the Mancl/DeRouen estimate).\n\n Returns\n -------\n smry : Summary instance\n this holds the summary tables and text, which can be\n printed or converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary : class to hold summary results\n \"\"\"\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['Generalized']),\n ('', ['Estimating Equations']),\n ('Family:', [self.model.family.__class__.__name__]),\n ('Dependence structure:',\n [self.model.cov_struct.__class__.__name__]),\n ('Date:', None),\n ('Covariance type: ', [self.cov_type, ])\n ]\n\n NY = [len(y) for y in self.model.endog_li]\n\n top_right = [('No. Observations:', [sum(NY)]),\n ('No. clusters:', [len(self.model.endog_li)]),\n ('Min. cluster size:', [min(NY)]),\n ('Max. cluster size:', [max(NY)]),\n ('Mean cluster size:', [\"%.1f\" % np.mean(NY)]),\n ('Num. iterations:', ['%d' %\n len(self.fit_history['params'])]),\n ('Scale:', [\"%.3f\" % self.scale]),\n ('Time:', None),\n ]\n\n # The skew of the residuals\n skew1 = stats.skew(self.resid)\n kurt1 = stats.kurtosis(self.resid)\n skew2 = stats.skew(self.centered_resid)\n kurt2 = stats.kurtosis(self.centered_resid)\n\n diagn_left = [('Skew:', [\"%12.4f\" % skew1]),\n ('Centered skew:', [\"%12.4f\" % skew2])]\n\n diagn_right = [('Kurtosis:', [\"%12.4f\" % kurt1]),\n ('Centered kurtosis:', [\"%12.4f\" % kurt2])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' +\\\n \"Regression Results\"\n\n # Override the exog variable names if xname is provided as an\n # argument.\n if xname is None:\n xname = self.model.exog_names\n\n if yname is None:\n yname = self.model.endog_names\n\n # Create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname,\n title=title)\n smry.add_table_params(self, yname=yname, xname=xname,\n alpha=alpha, use_t=False)\n smry.add_table_2cols(self, gleft=diagn_left,\n gright=diagn_right, yname=yname,\n xname=xname, title=\"\")\n\n return smry\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Parameters\n ----------\n at : str, optional\n Options are:\n\n - 'overall', The average of the marginal effects at each\n observation.\n - 'mean', The marginal effects at the mean of each regressor.\n - 'median', The marginal effects at the median of each regressor.\n - 'zero', The marginal effects at zero for each regressor.\n - 'all', The marginal effects at each observation. If `at` is 'all'\n only margeff will be available.\n\n Note that if `exog` is specified, then marginal effects for all\n variables not specified by `exog` are calculated using the `at`\n option.\n method : str, optional\n Options are:\n\n - 'dydx' - dy/dx - No transformation is made and marginal effects\n are returned. This is the default.\n - 'eyex' - estimate elasticities of variables in `exog` --\n d(lny)/d(lnx)\n - 'dyex' - estimate semi-elasticity -- dy/d(lnx)\n - 'eydx' - estimate semi-elasticity -- d(lny)/dx\n\n Note that tranformations are done after each observation is\n calculated. Semi-elasticities for binary variables are computed\n using the midpoint method. 'dyex' and 'eyex' do not make sense\n for discrete variables.\n atexog : array_like, optional\n Optionally, you can provide the exogenous variables over which to\n get the marginal effects. This should be a dictionary with the key\n as the zero-indexed column number and the value of the dictionary.\n Default is None for all independent variables less the constant.\n dummy : bool, optional\n If False, treats binary variables (if present) as continuous. This\n is the default. Else if True, treats binary variables as\n changing from 0 to 1. Note that any variable that is either 0 or 1\n is treated as binary. Each binary variable is treated separately\n for now.\n count : bool, optional\n If False, treats count variables (if present) as continuous. This\n is the default. Else if True, the marginal effect is the\n change in probabilities when each observation is increased by one.\n\n Returns\n -------\n effects : ndarray\n the marginal effect corresponding to the input options\n\n Notes\n -----\n When using after Poisson, returns the expected number of events\n per period, assuming that the model is loglinear.\n \"\"\"\n\n if self.model.constraint is not None:\n warnings.warn(\"marginal effects ignore constraints\",\n ValueWarning)\n\n return GEEMargins(self, (at, method, atexog, dummy, count))\n\n def plot_isotropic_dependence(self, ax=None, xpoints=10,\n min_n=50):\n \"\"\"\n Create a plot of the pairwise products of within-group\n residuals against the corresponding time differences. This\n plot can be used to assess the possible form of an isotropic\n covariance structure.\n\n Parameters\n ----------\n ax : AxesSubplot\n An axes on which to draw the graph. If None, new\n figure and axes objects are created\n xpoints : scalar or array_like\n If scalar, the number of points equally spaced points on\n the time difference axis used to define bins for\n calculating local means. If an array, the specific points\n that define the bins.\n min_n : int\n The minimum sample size in a bin for the mean residual\n product to be included on the plot.\n \"\"\"\n\n from statsmodels.graphics import utils as gutils\n\n resid = self.model.cluster_list(self.resid)\n time = self.model.cluster_list(self.model.time)\n\n # All within-group pairwise time distances (xdt) and the\n # corresponding products of scaled residuals (xre).\n xre, xdt = [], []\n for re, ti in zip(resid, time):\n ix = np.tril_indices(re.shape[0], 0)\n re = re[ix[0]] * re[ix[1]] / self.scale ** 2\n xre.append(re)\n dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))\n xdt.append(dists)\n\n xre = np.concatenate(xre)\n xdt = np.concatenate(xdt)\n\n if ax is None:\n fig, ax = gutils.create_mpl_ax(ax)\n else:\n fig = ax.get_figure()\n\n # Convert to a correlation\n ii = np.flatnonzero(xdt == 0)\n v0 = np.mean(xre[ii])\n xre /= v0\n\n # Use the simple average to smooth, since fancier smoothers\n # that trim and downweight outliers give biased results (we\n # need the actual mean of a skewed distribution).\n if np.isscalar(xpoints):\n xpoints = np.linspace(0, max(xdt), xpoints)\n dg = np.digitize(xdt, xpoints)\n dgu = np.unique(dg)\n hist = np.asarray([np.sum(dg == k) for k in dgu])\n ii = np.flatnonzero(hist >= min_n)\n dgu = dgu[ii]\n dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])\n dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])\n\n ax.plot(dgx, dgy, '-', color='orange', lw=5)\n ax.set_xlabel(\"Time difference\")\n ax.set_ylabel(\"Product of scaled residuals\")\n\n return fig\n\n def sensitivity_params(self, dep_params_first,\n dep_params_last, num_steps):\n \"\"\"\n Refits the GEE model using a sequence of values for the\n dependence parameters.\n\n Parameters\n ----------\n dep_params_first : array_like\n The first dep_params in the sequence\n dep_params_last : array_like\n The last dep_params in the sequence\n num_steps : int\n The number of dep_params in the sequence\n\n Returns\n -------\n results : array_like\n The GEEResults objects resulting from the fits.\n \"\"\"\n\n model = self.model\n\n import copy\n cov_struct = copy.deepcopy(self.model.cov_struct)\n\n # We are fixing the dependence structure in each run.\n update_dep = model.update_dep\n model.update_dep = False\n\n dep_params = []\n results = []\n for x in np.linspace(0, 1, num_steps):\n\n dp = x * dep_params_last + (1 - x) * dep_params_first\n dep_params.append(dp)\n\n model.cov_struct = copy.deepcopy(cov_struct)\n model.cov_struct.dep_params = dp\n rslt = model.fit(start_params=self.params,\n ctol=self.ctol,\n params_niter=self.params_niter,\n first_dep_update=self.first_dep_update,\n cov_type=self.cov_type)\n results.append(rslt)\n\n model.update_dep = update_dep\n\n return results\n\n # FIXME: alias to be removed, temporary backwards compatibility\n params_sensitivity = sensitivity_params\n\n\nclass GEEResultsWrapper(lm.RegressionResultsWrapper):\n _attrs = {\n 'centered_resid': 'rows',\n }\n _wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,\n _attrs)\nwrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305\n\n\nclass OrdinalGEE(GEE):\n\n __doc__ = (\n \" Ordinal Response Marginal Regression Model using GEE\\n\" +\n _gee_init_doc % {'extra_params': base._missing_param_doc,\n 'family_doc': _gee_ordinal_family_doc,\n 'example': _gee_ordinal_example,\n 'notes': _gee_nointercept})\n\n def __init__(self, endog, exog, groups, time=None, family=None,\n cov_struct=None, missing='none', offset=None,\n dep_data=None, constraint=None, **kwargs):\n\n if family is None:\n family = families.Binomial()\n else:\n if not isinstance(family, families.Binomial):\n raise ValueError(\"ordinal GEE must use a Binomial family\")\n\n if cov_struct is None:\n cov_struct = cov_structs.OrdinalIndependence()\n\n endog, exog, groups, time, offset = self.setup_ordinal(\n endog, exog, groups, time, offset)\n\n super(OrdinalGEE, self).__init__(endog, exog, groups, time,\n family, cov_struct, missing,\n offset, dep_data, constraint)\n\n def setup_ordinal(self, endog, exog, groups, time, offset):\n \"\"\"\n Restructure ordinal data as binary indicators so that they can\n be analyzed using Generalized Estimating Equations.\n \"\"\"\n\n self.endog_orig = endog.copy()\n self.exog_orig = exog.copy()\n self.groups_orig = groups.copy()\n if offset is not None:\n self.offset_orig = offset.copy()\n else:\n self.offset_orig = None\n offset = np.zeros(len(endog))\n if time is not None:\n self.time_orig = time.copy()\n else:\n self.time_orig = None\n time = np.zeros((len(endog), 1))\n\n exog = np.asarray(exog)\n endog = np.asarray(endog)\n groups = np.asarray(groups)\n time = np.asarray(time)\n offset = np.asarray(offset)\n\n # The unique outcomes, except the greatest one.\n self.endog_values = np.unique(endog)\n endog_cuts = self.endog_values[0:-1]\n ncut = len(endog_cuts)\n\n nrows = ncut * len(endog)\n exog_out = np.zeros((nrows, exog.shape[1]),\n dtype=np.float64)\n endog_out = np.zeros(nrows, dtype=np.float64)\n intercepts = np.zeros((nrows, ncut), dtype=np.float64)\n groups_out = np.zeros(nrows, dtype=groups.dtype)\n time_out = np.zeros((nrows, time.shape[1]),\n dtype=np.float64)\n offset_out = np.zeros(nrows, dtype=np.float64)\n\n jrow = 0\n zipper = zip(exog, endog, groups, time, offset)\n for (exog_row, endog_value, group_value, time_value,\n offset_value) in zipper:\n\n # Loop over thresholds for the indicators\n for thresh_ix, thresh in enumerate(endog_cuts):\n\n exog_out[jrow, :] = exog_row\n endog_out[jrow] = int(np.squeeze(endog_value > thresh))\n intercepts[jrow, thresh_ix] = 1\n groups_out[jrow] = group_value\n time_out[jrow] = time_value\n offset_out[jrow] = offset_value\n jrow += 1\n\n exog_out = np.concatenate((intercepts, exog_out), axis=1)\n\n # exog column names, including intercepts\n xnames = [\"I(y>%.1f)\" % v for v in endog_cuts]\n if type(self.exog_orig) is pd.DataFrame:\n xnames.extend(self.exog_orig.columns)\n else:\n xnames.extend([\"x%d\" % k for k in range(1, exog.shape[1] + 1)])\n exog_out = pd.DataFrame(exog_out, columns=xnames)\n\n # Preserve the endog name if there is one\n if type(self.endog_orig) is pd.Series:\n endog_out = pd.Series(endog_out, name=self.endog_orig.name)\n\n return endog_out, exog_out, groups_out, time_out, offset_out\n\n def _starting_params(self):\n exposure = getattr(self, \"exposure\", None)\n model = GEE(self.endog, self.exog, self.groups,\n time=self.time, family=families.Binomial(),\n offset=self.offset, exposure=exposure)\n result = model.fit()\n return result.params\n\n @Appender(_gee_fit_doc)\n def fit(self, maxiter=60, ctol=1e-6, start_params=None,\n params_niter=1, first_dep_update=0,\n cov_type='robust'):\n\n rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,\n params_niter, first_dep_update,\n cov_type=cov_type)\n\n rslt = rslt._results # use unwrapped instance\n res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))\n # Convert the GEEResults to an OrdinalGEEResults\n ord_rslt = OrdinalGEEResults(self, rslt.params,\n rslt.cov_params() / rslt.scale,\n rslt.scale,\n cov_type=cov_type,\n attr_kwds=res_kwds)\n # for k in rslt._props:\n # setattr(ord_rslt, k, getattr(rslt, k))\n # TODO: document or delete\n\n return OrdinalGEEResultsWrapper(ord_rslt)\n\n\nclass OrdinalGEEResults(GEEResults):\n\n __doc__ = (\n \"This class summarizes the fit of a marginal regression model\"\n \"for an ordinal response using GEE.\\n\"\n + _gee_results_doc)\n\n def plot_distribution(self, ax=None, exog_values=None):\n \"\"\"\n Plot the fitted probabilities of endog in an ordinal model,\n for specified values of the predictors.\n\n Parameters\n ----------\n ax : AxesSubplot\n An axes on which to draw the graph. If None, new\n figure and axes objects are created\n exog_values : array_like\n A list of dictionaries, with each dictionary mapping\n variable names to values at which the variable is held\n fixed. The values P(endog=y | exog) are plotted for all\n possible values of y, at the given exog value. Variables\n not included in a dictionary are held fixed at the mean\n value.\n\n Example:\n --------\n We have a model with covariates 'age' and 'sex', and wish to\n plot the probabilities P(endog=y | exog) for males (sex=0) and\n for females (sex=1), as separate paths on the plot. Since\n 'age' is not included below in the map, it is held fixed at\n its mean value.\n\n >>> ev = [{\"sex\": 1}, {\"sex\": 0}]\n >>> rslt.distribution_plot(exog_values=ev)\n \"\"\"\n\n from statsmodels.graphics import utils as gutils\n\n if ax is None:\n fig, ax = gutils.create_mpl_ax(ax)\n else:\n fig = ax.get_figure()\n\n # If no covariate patterns are specified, create one with all\n # variables set to their mean values.\n if exog_values is None:\n exog_values = [{}, ]\n\n exog_means = self.model.exog.mean(0)\n ix_icept = [i for i, x in enumerate(self.model.exog_names) if\n x.startswith(\"I(\")]\n\n for ev in exog_values:\n\n for k in ev.keys():\n if k not in self.model.exog_names:\n raise ValueError(\"%s is not a variable in the model\"\n % k)\n\n # Get the fitted probability for each level, at the given\n # covariate values.\n pr = []\n for j in ix_icept:\n\n xp = np.zeros_like(self.params)\n xp[j] = 1.\n for i, vn in enumerate(self.model.exog_names):\n if i in ix_icept:\n continue\n # User-specified value\n if vn in ev:\n xp[i] = ev[vn]\n # Mean value\n else:\n xp[i] = exog_means[i]\n\n p = 1 / (1 + np.exp(-np.dot(xp, self.params)))\n pr.append(p)\n\n pr.insert(0, 1)\n pr.append(0)\n pr = np.asarray(pr)\n prd = -np.diff(pr)\n\n ax.plot(self.model.endog_values, prd, 'o-')\n\n ax.set_xlabel(\"Response value\")\n ax.set_ylabel(\"Probability\")\n ax.set_ylim(0, 1)\n\n return fig\n\n\ndef _score_test_submodel(par, sub):\n \"\"\"\n Return transformation matrices for design matrices.\n\n Parameters\n ----------\n par : instance\n The parent model\n sub : instance\n The sub-model\n\n Returns\n -------\n qm : array_like\n Matrix mapping the design matrix of the parent to the design matrix\n for the sub-model.\n qc : array_like\n Matrix mapping the design matrix of the parent to the orthogonal\n complement of the columnspace of the submodel in the columnspace\n of the parent.\n\n Notes\n -----\n Returns None, None if the provided submodel is not actually a submodel.\n \"\"\"\n\n x1 = par.exog\n x2 = sub.exog\n\n u, s, vt = np.linalg.svd(x1, 0)\n v = vt.T\n\n # Get the orthogonal complement of col(x2) in col(x1).\n a, _ = np.linalg.qr(x2)\n a = u - np.dot(a, np.dot(a.T, u))\n x2c, sb, _ = np.linalg.svd(a, 0)\n x2c = x2c[:, sb > 1e-12]\n\n # x1 * qm = x2\n ii = np.flatnonzero(np.abs(s) > 1e-12)\n qm = np.dot(v[:, ii], np.dot(u[:, ii].T, x2) / s[ii, None])\n\n e = np.max(np.abs(x2 - np.dot(x1, qm)))\n if e > 1e-8:\n return None, None\n\n # x1 * qc = x2c\n qc = np.dot(v[:, ii], np.dot(u[:, ii].T, x2c) / s[ii, None])\n\n return qm, qc\n\n\nclass OrdinalGEEResultsWrapper(GEEResultsWrapper):\n pass\nwrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305\n\n\nclass NominalGEE(GEE):\n\n __doc__ = (\n \" Nominal Response Marginal Regression Model using GEE.\\n\" +\n _gee_init_doc % {'extra_params': base._missing_param_doc,\n 'family_doc': _gee_nominal_family_doc,\n 'example': _gee_nominal_example,\n 'notes': _gee_nointercept})\n\n def __init__(self, endog, exog, groups, time=None, family=None,\n cov_struct=None, missing='none', offset=None,\n dep_data=None, constraint=None, **kwargs):\n\n endog, exog, groups, time, offset = self.setup_nominal(\n endog, exog, groups, time, offset)\n\n if family is None:\n family = _Multinomial(self.ncut + 1)\n\n if cov_struct is None:\n cov_struct = cov_structs.NominalIndependence()\n\n super(NominalGEE, self).__init__(\n endog, exog, groups, time, family, cov_struct, missing,\n offset, dep_data, constraint)\n\n def _starting_params(self):\n exposure = getattr(self, \"exposure\", None)\n model = GEE(self.endog, self.exog, self.groups,\n time=self.time, family=families.Binomial(),\n offset=self.offset, exposure=exposure)\n result = model.fit()\n return result.params\n\n def setup_nominal(self, endog, exog, groups, time, offset):\n \"\"\"\n Restructure nominal data as binary indicators so that they can\n be analyzed using Generalized Estimating Equations.\n \"\"\"\n\n self.endog_orig = endog.copy()\n self.exog_orig = exog.copy()\n self.groups_orig = groups.copy()\n if offset is not None:\n self.offset_orig = offset.copy()\n else:\n self.offset_orig = None\n offset = np.zeros(len(endog))\n if time is not None:\n self.time_orig = time.copy()\n else:\n self.time_orig = None\n time = np.zeros((len(endog), 1))\n\n exog = np.asarray(exog)\n endog = np.asarray(endog)\n groups = np.asarray(groups)\n time = np.asarray(time)\n offset = np.asarray(offset)\n\n # The unique outcomes, except the greatest one.\n self.endog_values = np.unique(endog)\n endog_cuts = self.endog_values[0:-1]\n ncut = len(endog_cuts)\n self.ncut = ncut\n\n nrows = len(endog_cuts) * exog.shape[0]\n ncols = len(endog_cuts) * exog.shape[1]\n exog_out = np.zeros((nrows, ncols), dtype=np.float64)\n endog_out = np.zeros(nrows, dtype=np.float64)\n groups_out = np.zeros(nrows, dtype=np.float64)\n time_out = np.zeros((nrows, time.shape[1]),\n dtype=np.float64)\n offset_out = np.zeros(nrows, dtype=np.float64)\n\n jrow = 0\n zipper = zip(exog, endog, groups, time, offset)\n for (exog_row, endog_value, group_value, time_value,\n offset_value) in zipper:\n\n # Loop over thresholds for the indicators\n for thresh_ix, thresh in enumerate(endog_cuts):\n\n u = np.zeros(len(endog_cuts), dtype=np.float64)\n u[thresh_ix] = 1\n exog_out[jrow, :] = np.kron(u, exog_row)\n endog_out[jrow] = (int(endog_value == thresh))\n groups_out[jrow] = group_value\n time_out[jrow] = time_value\n offset_out[jrow] = offset_value\n jrow += 1\n\n # exog names\n if isinstance(self.exog_orig, pd.DataFrame):\n xnames_in = self.exog_orig.columns\n else:\n xnames_in = [\"x%d\" % k for k in range(1, exog.shape[1] + 1)]\n xnames = []\n for tr in endog_cuts:\n xnames.extend([\"%s[%.1f]\" % (v, tr) for v in xnames_in])\n exog_out = pd.DataFrame(exog_out, columns=xnames)\n exog_out = pd.DataFrame(exog_out, columns=xnames)\n\n # Preserve endog name if there is one\n if isinstance(self.endog_orig, pd.Series):\n endog_out = pd.Series(endog_out, name=self.endog_orig.name)\n\n return endog_out, exog_out, groups_out, time_out, offset_out\n\n def mean_deriv(self, exog, lin_pred):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n exog : array_like\n The exogeneous data at which the derivative is computed,\n number of rows must be a multiple of `ncut`.\n lin_pred : array_like\n The values of the linear predictor, length must be multiple\n of `ncut`.\n\n Returns\n -------\n The derivative of the expected endog with respect to the\n parameters.\n \"\"\"\n\n expval = np.exp(lin_pred)\n\n # Reshape so that each row contains all the indicators\n # corresponding to one multinomial observation.\n expval_m = np.reshape(expval, (len(expval) // self.ncut,\n self.ncut))\n\n # The normalizing constant for the multinomial probabilities.\n denom = 1 + expval_m.sum(1)\n denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))\n\n # The multinomial probabilities\n mprob = expval / denom\n\n # First term of the derivative: denom * expval' / denom^2 =\n # expval' / denom.\n dmat = mprob[:, None] * exog\n\n # Second term of the derivative: -expval * denom' / denom^2\n ddenom = expval[:, None] * exog\n dmat -= mprob[:, None] * ddenom / denom[:, None]\n\n return dmat\n\n def mean_deriv_exog(self, exog, params, offset_exposure=None):\n \"\"\"\n Derivative of the expected endog with respect to exog for the\n multinomial model, used in analyzing marginal effects.\n\n Parameters\n ----------\n exog : array_like\n The exogeneous data at which the derivative is computed,\n number of rows must be a multiple of `ncut`.\n lpr : array_like\n The linear predictor values, length must be multiple of\n `ncut`.\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to exog.\n\n Notes\n -----\n offset_exposure must be set at None for the multinomial family.\n \"\"\"\n\n if offset_exposure is not None:\n warnings.warn(\"Offset/exposure ignored for the multinomial family\",\n ValueWarning)\n\n lpr = np.dot(exog, params)\n expval = np.exp(lpr)\n\n expval_m = np.reshape(expval, (len(expval) // self.ncut,\n self.ncut))\n\n denom = 1 + expval_m.sum(1)\n denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))\n\n bmat0 = np.outer(np.ones(exog.shape[0]), params)\n\n # Masking matrix\n qmat = []\n for j in range(self.ncut):\n ee = np.zeros(self.ncut, dtype=np.float64)\n ee[j] = 1\n qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))\n qmat = np.array(qmat)\n qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)\n bmat = bmat0 * qmat\n\n dmat = expval[:, None] * bmat / denom[:, None]\n\n expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))\n expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))\n\n dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2\n\n return dmat\n\n @Appender(_gee_fit_doc)\n def fit(self, maxiter=60, ctol=1e-6, start_params=None,\n params_niter=1, first_dep_update=0,\n cov_type='robust'):\n\n rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,\n params_niter, first_dep_update,\n cov_type=cov_type)\n if rslt is None:\n warnings.warn(\"GEE updates did not converge\",\n ConvergenceWarning)\n return None\n\n rslt = rslt._results # use unwrapped instance\n res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))\n # Convert the GEEResults to a NominalGEEResults\n nom_rslt = NominalGEEResults(self, rslt.params,\n rslt.cov_params() / rslt.scale,\n rslt.scale,\n cov_type=cov_type,\n attr_kwds=res_kwds)\n # TODO: document or delete\n # for k in rslt._props:\n # setattr(nom_rslt, k, getattr(rslt, k))\n\n return NominalGEEResultsWrapper(nom_rslt)\n\n\nclass NominalGEEResults(GEEResults):\n\n __doc__ = (\n \"This class summarizes the fit of a marginal regression model\"\n \"for a nominal response using GEE.\\n\"\n + _gee_results_doc)\n\n def plot_distribution(self, ax=None, exog_values=None):\n \"\"\"\n Plot the fitted probabilities of endog in an nominal model,\n for specified values of the predictors.\n\n Parameters\n ----------\n ax : AxesSubplot\n An axes on which to draw the graph. If None, new\n figure and axes objects are created\n exog_values : array_like\n A list of dictionaries, with each dictionary mapping\n variable names to values at which the variable is held\n fixed. The values P(endog=y | exog) are plotted for all\n possible values of y, at the given exog value. Variables\n not included in a dictionary are held fixed at the mean\n value.\n\n Example:\n --------\n We have a model with covariates 'age' and 'sex', and wish to\n plot the probabilities P(endog=y | exog) for males (sex=0) and\n for females (sex=1), as separate paths on the plot. Since\n 'age' is not included below in the map, it is held fixed at\n its mean value.\n\n >>> ex = [{\"sex\": 1}, {\"sex\": 0}]\n >>> rslt.distribution_plot(exog_values=ex)\n \"\"\"\n\n from statsmodels.graphics import utils as gutils\n\n if ax is None:\n fig, ax = gutils.create_mpl_ax(ax)\n else:\n fig = ax.get_figure()\n\n # If no covariate patterns are specified, create one with all\n # variables set to their mean values.\n if exog_values is None:\n exog_values = [{}, ]\n\n link = self.model.family.link.inverse\n ncut = self.model.family.ncut\n\n k = int(self.model.exog.shape[1] / ncut)\n exog_means = self.model.exog.mean(0)[0:k]\n exog_names = self.model.exog_names[0:k]\n exog_names = [x.split(\"[\")[0] for x in exog_names]\n\n params = np.reshape(self.params,\n (ncut, len(self.params) // ncut))\n\n for ev in exog_values:\n\n exog = exog_means.copy()\n\n for k in ev.keys():\n if k not in exog_names:\n raise ValueError(\"%s is not a variable in the model\"\n % k)\n\n ii = exog_names.index(k)\n exog[ii] = ev[k]\n\n lpr = np.dot(params, exog)\n pr = link(lpr)\n pr = np.r_[pr, 1 - pr.sum()]\n\n ax.plot(self.model.endog_values, pr, 'o-')\n\n ax.set_xlabel(\"Response value\")\n ax.set_ylabel(\"Probability\")\n ax.set_xticks(self.model.endog_values)\n ax.set_xticklabels(self.model.endog_values)\n ax.set_ylim(0, 1)\n\n return fig\n\n\nclass NominalGEEResultsWrapper(GEEResultsWrapper):\n pass\nwrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults) # noqa:E305\n\n\nclass _MultinomialLogit(Link):\n \"\"\"\n The multinomial logit transform, only for use with GEE.\n\n Notes\n -----\n The data are assumed coded as binary indicators, where each\n observed multinomial value y is coded as I(y == S[0]), ..., I(y ==\n S[-1]), where S is the set of possible response labels, excluding\n the largest one. Thererefore functions in this class should only\n be called using vector argument whose length is a multiple of |S|\n = ncut, which is an argument to be provided when initializing the\n class.\n\n call and derivative use a private method _clean to trim p by 1e-10\n so that p is in (0, 1)\n \"\"\"\n\n def __init__(self, ncut):\n self.ncut = ncut\n\n def inverse(self, lpr):\n \"\"\"\n Inverse of the multinomial logit transform, which gives the\n expected values of the data as a function of the linear\n predictors.\n\n Parameters\n ----------\n lpr : array_like (length must be divisible by `ncut`)\n The linear predictors\n\n Returns\n -------\n prob : ndarray\n Probabilities, or expected values\n \"\"\"\n\n expval = np.exp(lpr)\n\n denom = 1 + np.reshape(expval, (len(expval) // self.ncut,\n self.ncut)).sum(1)\n denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))\n\n prob = expval / denom\n\n return prob\n\n\nclass _Multinomial(families.Family):\n \"\"\"\n Pseudo-link function for fitting nominal multinomial models with\n GEE. Not for use outside the GEE class.\n \"\"\"\n\n links = [_MultinomialLogit, ]\n variance = varfuncs.binary\n safe_links = [_MultinomialLogit, ]\n\n def __init__(self, nlevels, check_link=True):\n \"\"\"\n Parameters\n ----------\n nlevels : int\n The number of distinct categories for the multinomial\n distribution.\n \"\"\"\n self._check_link = check_link\n self.initialize(nlevels)\n\n def initialize(self, nlevels):\n self.ncut = nlevels - 1\n self.link = _MultinomialLogit(self.ncut)\n\n\nclass GEEMargins:\n \"\"\"\n Estimated marginal effects for a regression model fit with GEE.\n\n Parameters\n ----------\n results : GEEResults instance\n The results instance of a fitted discrete choice model\n args : tuple\n Args are passed to `get_margeff`. This is the same as\n results.get_margeff. See there for more information.\n kwargs : dict\n Keyword args are passed to `get_margeff`. This is the same as\n results.get_margeff. See there for more information.\n \"\"\"\n\n def __init__(self, results, args, kwargs={}):\n self._cache = {}\n self.results = results\n self.get_margeff(*args, **kwargs)\n\n def _reset(self):\n self._cache = {}\n\n @cache_readonly\n def tvalues(self):\n _check_at_is_all(self.margeff_options)\n return self.margeff / self.margeff_se\n\n def summary_frame(self, alpha=.05):\n \"\"\"\n Returns a DataFrame summarizing the marginal effects.\n\n Parameters\n ----------\n alpha : float\n Number between 0 and 1. The confidence intervals have the\n probability 1-alpha.\n\n Returns\n -------\n frame : DataFrames\n A DataFrame summarizing the marginal effects.\n \"\"\"\n _check_at_is_all(self.margeff_options)\n from pandas import DataFrame\n names = [_transform_names[self.margeff_options['method']],\n 'Std. Err.', 'z', 'Pr(>|z|)',\n 'Conf. Int. Low', 'Cont. Int. Hi.']\n ind = self.results.model.exog.var(0) != 0 # True if not a constant\n exog_names = self.results.model.exog_names\n var_names = [name for i, name in enumerate(exog_names) if ind[i]]\n table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,\n self.pvalues, self.conf_int(alpha)))\n return DataFrame(table, columns=names, index=var_names)\n\n @cache_readonly\n def pvalues(self):\n _check_at_is_all(self.margeff_options)\n return stats.norm.sf(np.abs(self.tvalues)) * 2\n\n def conf_int(self, alpha=.05):\n \"\"\"\n Returns the confidence intervals of the marginal effects\n\n Parameters\n ----------\n alpha : float\n Number between 0 and 1. The confidence intervals have the\n probability 1-alpha.\n\n Returns\n -------\n conf_int : ndarray\n An array with lower, upper confidence intervals for the marginal\n effects.\n \"\"\"\n _check_at_is_all(self.margeff_options)\n me_se = self.margeff_se\n q = stats.norm.ppf(1 - alpha / 2)\n lower = self.margeff - q * me_se\n upper = self.margeff + q * me_se\n return np.asarray(lzip(lower, upper))\n\n def summary(self, alpha=.05):\n \"\"\"\n Returns a summary table for marginal effects\n\n Parameters\n ----------\n alpha : float\n Number between 0 and 1. The confidence intervals have the\n probability 1-alpha.\n\n Returns\n -------\n Summary : SummaryTable\n A SummaryTable instance\n \"\"\"\n _check_at_is_all(self.margeff_options)\n results = self.results\n model = results.model\n title = model.__class__.__name__ + \" Marginal Effects\"\n method = self.margeff_options['method']\n top_left = [('Dep. Variable:', [model.endog_names]),\n ('Method:', [method]),\n ('At:', [self.margeff_options['at']]), ]\n\n from statsmodels.iolib.summary import (Summary, summary_params,\n table_extend)\n exog_names = model.exog_names[:] # copy\n smry = Summary()\n\n const_idx = model.data.const_idx\n if const_idx is not None:\n exog_names.pop(const_idx)\n\n J = int(getattr(model, \"J\", 1))\n if J > 1:\n yname, yname_list = results._get_endog_name(model.endog_names,\n None, all=True)\n else:\n yname = model.endog_names\n yname_list = [yname]\n\n smry.add_table_2cols(self, gleft=top_left, gright=[],\n yname=yname, xname=exog_names, title=title)\n\n # NOTE: add_table_params is not general enough yet for margeff\n # could use a refactor with getattr instead of hard-coded params\n # tvalues etc.\n table = []\n conf_int = self.conf_int(alpha)\n margeff = self.margeff\n margeff_se = self.margeff_se\n tvalues = self.tvalues\n pvalues = self.pvalues\n if J > 1:\n for eq in range(J):\n restup = (results, margeff[:, eq], margeff_se[:, eq],\n tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])\n tble = summary_params(restup, yname=yname_list[eq],\n xname=exog_names, alpha=alpha,\n use_t=False,\n skip_header=True)\n tble.title = yname_list[eq]\n # overwrite coef with method name\n header = ['', _transform_names[method], 'std err', 'z',\n 'P>|z|',\n '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]\n tble.insert_header_row(0, header)\n # from IPython.core.debugger import Pdb; Pdb().set_trace()\n table.append(tble)\n\n table = table_extend(table, keep_headers=True)\n else:\n restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)\n table = summary_params(restup, yname=yname, xname=exog_names,\n alpha=alpha, use_t=False, skip_header=True)\n header = ['', _transform_names[method], 'std err', 'z',\n 'P>|z|', '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]\n table.insert_header_row(0, header)\n\n smry.tables.append(table)\n return smry\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n\n self._reset() # always reset the cache when this is called\n # TODO: if at is not all or overall, we can also put atexog values\n # in summary table head\n method = method.lower()\n at = at.lower()\n _check_margeff_args(at, method)\n self.margeff_options = dict(method=method, at=at)\n results = self.results\n model = results.model\n params = results.params\n exog = model.exog.copy() # copy because values are changed\n effects_idx = exog.var(0) != 0\n const_idx = model.data.const_idx\n\n if dummy:\n _check_discrete_args(at, method)\n dummy_idx, dummy = _get_dummy_index(exog, const_idx)\n else:\n dummy_idx = None\n\n if count:\n _check_discrete_args(at, method)\n count_idx, count = _get_count_index(exog, const_idx)\n else:\n count_idx = None\n\n # get the exogenous variables\n exog = _get_margeff_exog(exog, at, atexog, effects_idx)\n\n # get base marginal effects, handled by sub-classes\n effects = model._derivative_exog(params, exog, method,\n dummy_idx, count_idx)\n effects = _effects_at(effects, at)\n\n if at == 'all':\n self.margeff = effects[:, effects_idx]\n else:\n # Set standard error of the marginal effects by Delta method.\n margeff_cov, margeff_se = margeff_cov_with_se(\n model, params, exog, results.cov_params(), at,\n model._derivative_exog, dummy_idx, count_idx,\n method, 1)\n\n # do not care about at constant\n self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]\n self.margeff_se = margeff_se[effects_idx]\n self.margeff = effects[effects_idx]\n","repo_name":"statsmodels/statsmodels","sub_path":"statsmodels/genmod/generalized_estimating_equations.py","file_name":"generalized_estimating_equations.py","file_ext":"py","file_size_in_byte":116206,"program_lang":"python","lang":"en","doc_type":"code","stars":9039,"dataset":"github-code","pt":"91"} +{"seq_id":"20897259302","text":"import pytest\nfrom hamcrest import assert_that, equal_to\n\nfrom nodestream.pipeline import IterableExtractor, Pipeline, PipelineProgressReporter\n\n\n@pytest.mark.asyncio\nasync def test_pipeline_progress_reporter_calls_with_reporting_frequency(mocker):\n pipeline = Pipeline([IterableExtractor(range(100))], 10)\n reporter = PipelineProgressReporter(reporting_frequency=10, callback=mocker.Mock())\n await pipeline.run(reporter)\n assert_that(reporter.callback.call_count, equal_to(10))\n\n\n@pytest.mark.asyncio\nasync def test_pipeline_progress_reporter_for_testing(mocker):\n result = PipelineProgressReporter.for_testing([])\n assert_that(result.reporting_frequency, equal_to(1))\n assert_that(result.logger.name, equal_to(\"test\"))\n","repo_name":"nodestream-proj/nodestream","sub_path":"tests/unit/pipeline/test_pipeline_progress_reporter.py","file_name":"test_pipeline_progress_reporter.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"91"} +{"seq_id":"71708623664","text":"import os\nimport json\nfrom typing import Dict\nfrom pprint import pprint\n\nimport numpy as np\nimport pandas as pd\n\nfrom configs.tabnet import config\n\n\nclass SearchOption:\n def __init__(self, opt, type_):\n type_ = type_.lower()\n assert pd.api.types.is_list_like(opt)\n assert type_ in [\"choice\", \"range\"]\n self._opt = np.array(opt)\n self._type = type_\n if self._type == \"range\":\n assert len(self._opt) == 2\n assert self._opt[0] < self._opt[1]\n\n @property\n def opt(self):\n return self._opt\n\n @property\n def type(self):\n return self._type\n\n\nclass RandomParams:\n def __init__(self):\n self._range_dict = {}\n self._counter = 0\n\n def __call__(self, conf, make_d_a_same=True):\n for k, search_range in self._range_dict.items():\n if k in ['name', 'seed', 'device', 'data_dir', 'log_dir', 'bkup_dir', 'sub_dir']:\n continue\n\n # check unknown keys\n if not hasattr(conf, k):\n raise AttributeError(f\"type {type(conf)} does not have attribute {k}.\")\n\n if search_range.type == \"choice\":\n new_value = np.random.choice(search_range.opt)\n elif search_range.type == \"range\":\n min_, max_ = search_range.opt\n new_value = np.random.random() # 0 ~ 1\n new_value = new_value * (max_ - min_) + min_\n else:\n raise NotImplementedError\n\n if isinstance(new_value, np.integer):\n new_value = int(new_value)\n\n # inplace\n setattr(conf, k, new_value)\n\n if make_d_a_same:\n # n_a <- n_d\n setattr(conf, 'n_a', getattr(conf, 'n_d'))\n\n self.save_config(conf)\n self._counter += 1\n\n def save_config(self, conf: config):\n d = {k: getattr(conf, k) for k in dir(conf) if not k.startswith('__')}\n filename = f\"selected_params_{self._counter:02d}.json\"\n save_path = os.path.join(conf.log_dir, filename)\n with open(save_path, 'w') as fp:\n json.dump(d, fp)\n # check\n with open(save_path, 'r') as fp:\n pprint(json.load(fp))\n\n def set(self, param_name, opt, type_):\n self._range_dict[param_name] = SearchOption(opt, type_)\n\n\nif __name__ == \"__main__\":\n rp = RandomParams()\n rp.set(\"n_d\", [8, 16, 32], 'choice')\n\n c = config()\n for _ in range(3):\n rp(c)\n","repo_name":"Pseudo-Lab/20_MoA","sub_path":"tabnet/configs/random_search.py","file_name":"random_search.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"41144294809","text":"from airflow.operators.dummy_operator import DummyOperator\nfrom datetime import timedelta\n\nfrom config.v1.table_definitions import tables_test\nimport slack.msg as slack\nfrom utils import utils\n\n\n# set up DAG\ndag_name = \"dag_ETL_template\"\ndag_args = {}\nschedule_interval = \"@once\"\nconcurrency = 4\nmax_active_runs = 1\n\n# extract the information from the config.py file\nconn_id_extract = \"conn_test_source\"\nconn_id_transform = \"conn_test_transform\"\nconn_id_load = \"conn_test_load\"\nl_tables = tables_test\ndb_version = \"v1\"\n\n\n# DAG OBJECT\ndag = utils.dag_constructor(dag_name, dag_args, catchup=False,\n schedule_interval=schedule_interval, max_active_runs=max_active_runs,\n concurrency=concurrency)\n\n\n# Create a dummy operation just to know when every task is completed\ncompleted = DummyOperator(task_id=\"complete\",\n dag=dag,\n on_success_callback=slack.slack_succeeded_task)\n\n\nd_op_el = utils.get_python_operators_el(dag, conn_id_extract, conn_id_load, l_tables, db_version)\n\ncompleted.set_upstream(d_op_el.values())\n","repo_name":"watxaut/airflow-test","sub_path":"dags_test/dag_ETL_template.py","file_name":"dag_ETL_template.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13745932050","text":"from sys import platform\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\nclass Browser:\n def __enter__(self):\n dcap = dict(DesiredCapabilities.PHANTOMJS)\n dcap[\"phantomjs.page.settings.userAgent\"] = (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 \"\n \"(KHTML, like Gecko) Chrome/15.0.87\"\n )\n sarg = [\"--load-images=false\"]\n self.browser = webdriver.PhantomJS(executable_path=self.browser_platform(),\n desired_capabilities=dcap,\n service_args=sarg)\n self.browser.implicitly_wait(15)\n self.browser.set_window_size(1920, 1080)\n return self.browser\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.browser.quit()\n\n @staticmethod\n def browser_platform():\n if platform.startswith('win32'):\n return 'browsers\\windows\\phantomjs.exe'\n elif platform.startswith('linux'):\n return 'phantomjs'\n elif platform.startswith('darwin'):\n return 'browsers/osx/phantomjs'\n","repo_name":"mostaszewski/SleepWell","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"28575428605","text":"from scraper import trade_spider\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\ndef convert_to():\n options = trade_spider()\n flag = 0\n initial_currency = variable1.get()\n final_currency = variable2.get()\n if initial_currency == 'None':\n messagebox.showerror(\"Error input 1\", \"You did not choose the currency type !!!\")\n flag = 1\n if final_currency == 'None':\n messagebox.showerror(\"Error input 2\", \"You did not choose the currency type !!!\")\n flag = 1\n try: \n input_price = float(input_value.get('1.0',END))\n except:\n messagebox.showerror(\"Error input 3\", \"This number does not exist (use . no ,) !!!\")\n flag = 1\n if flag == 0:\n final_price = input_price * options[initial_currency]\n final_price = final_price / options[final_currency]\n output_value.delete('1.0', END)\n output_value.insert(END, str(final_price)) \n\n\nconverter = Tk()\nconverter.title(\"Unit Converter\")\nconverter.geometry(\"800x400\")\nconverter.config(bg=\"#f1e4d3\")\noptions = trade_spider()\nappName = Label(converter,text=\"Currency Converter\",font = (\"Comic Sans MS\", 30, \"bold\"),fg=\"#5b7771\", bg=\"#f1e4d3\")\nappName.place(x=215, y=10)\n\nvariable1 = StringVar(converter)\nvariable1.set(None)\n\ninput_options = OptionMenu(converter,variable1,*options)\ninput_options.place(x= 100 , y=150,width=300, height=50)\ninput_options.config(bg = \"#5b7771\",fg = \"#402e20\",font=(\"Comic Sans MS\",15,\"bold\"), activebackground = \"#bfb48d\" )\n\ninput_value = Text(converter,height=1,width=15,font=(\"arial\",20,\"bold\"),bd=5)\ninput_value.config(bg = \"#5b7771\", fg = \"#402e20\")\ninput_value.place(x=130, y=250)\n\nvariable2 = StringVar(converter)\nvariable2.set(None)\n\noutput_options = OptionMenu(converter,variable2,*options)\noutput_options.place(x= 400 , y=150,width=300, height=50)\noutput_options.config(bg = \"#5b7771\",fg = \"#402e20\",font=(\"Comic Sans MS\",15,\"bold\"), activebackground = \"#bfb48d\" )\n\noutput_value = Text(converter,height=1,width=15,font=(\"arial\",20,\"bold\"),bd=5)\noutput_value.config(bg = \"#5b7771\", fg = \"#402e20\")\noutput_value.place(x = 425, y = 250)\n\nconvert_button = Button(converter, text = \"convert\", bd = 5, command = convert_to)\nconvert_button.config(bg = \"#5b7771\",fg = \"#402e20\",font=(\"Comic Sans MS\",20,\"bold\"), activebackground = \"#bfb48d\")\nconvert_button.place(x = 330, y = 320)\n\nconverter.mainloop()","repo_name":"Bordei08/Python-course","sub_path":"Proiect/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"30526722244","text":"from flask import Blueprint , request, redirect,render_template,url_for , flash\nfrom flask_login import current_user\nfrom instagram_web.util.helpers import gateway\nfrom models.image import Image\nfrom models.user import User\nfrom models.donation import Donation\nimport stripe\nimport os\nimport requests\n\n\n\n\n\ndonations_blueprint=Blueprint(\"donations\",\n __name__,\n template_folder=\"templates\")\n\n\n\n@donations_blueprint.route('/new')\ndef new(image_id):\n print(image_id)\n client_token=gateway.client_token.generate()\n image=Image.get_by_id(int(image_id))\n\n\n return render_template('donations/new.html' , image=image ,client_token=client_token)\n\n\n@donations_blueprint.route('/' , methods=[\"POST\"])\ndef create(image_id):\n payload_nonce=request.form.get(\"payload_nonce\")\n payment_amount=request.form.get(\"payment_amount\")\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print(payload_nonce)\n \n image=Image.get_by_id(int(image_id))\n user=User.get_by_id(current_user.id)\n\n\n result = gateway.transaction.sale({\n \"amount\": payment_amount,\n \"payment_method_nonce\": payload_nonce,\n \"device_data\": None,\n \"options\": {\n \"submit_for_settlement\": True\n }\n})\n if result.is_success:\n donation=Donation(amount=int(payment_amount), donor=user, image=image)\n donation.save()\n flash(f\"Thanks you for the RM{payment_amount} donation {current_user.username}\" , \"success\")\n def send_simple_message():\n \treturn requests.post(\n\t\t \"https://api.mailgun.net/v3/sandboxeaa30d1922044857bee3122cfe3ac0f9.mailgun.org/messages\",\n\t\t auth=(\"api\", os.environ.get(\"MAILGUN_PRIVATE_KEY\")),\n\t\t data={\"from\": \"Excited User \",\n\t\t\t \"to\": [\"weikhang_93@hotmail.com\"],\n\t\t\t \"subject\": \"Hello hahaha\",\n\t\t\t \"text\": \"changing the textailgun awesomness!\",\n \"html\":f\" \"})\n\n\n print(send_simple_message())\n\n\n\n return redirect(url_for('images.show', image_id=image.id))\n\n\n@donations_blueprint.route('/stripe' , methods=[\"POST\"])\ndef stripenew(image_id):\n print(\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\")\n stripe.api_key=os.environ.get(\"STRIPE_API_KEY\")\n try:\n\n checkout_session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[\n {\n 'price_data': {\n 'currency': 'usd',\n 'unit_amount': 2000,\n 'product_data': {\n 'name': 'Stubborn Attachments',\n 'images': ['https://i.imgur.com/EHyR2nP.png'],\n },\n },\n 'quantity': 1,\n },\n ],\n mode='payment',\n success_url=f'https://localhost:5000/images/{image_id}/donations' + '/success.html',\n cancel_url=f'https://localhost:5000/images/{image_id}/donations' + '/cancel.html',\n )\n return jsonify({'id': checkout_session.id})\n except Exception as e:\n return jsonify(error=str(e)), 403\n\n\n@donations_blueprint.route(\"/success.html\")\ndef success(image_id):\n\n return render_template('donations/success.html')\n\n@donations_blueprint.route(\"/cancel.html\")\ndef cancel(image_id):\n\n return render_template('donations/cancel.html')\n\n\n@donations_blueprint.route('/checkout')\ndef checkout(image_id):\n image=Image.get_by_id(int(image_id))\n\n return render_template('donations/checkout.html' , image=image)\n\n\n@donations_blueprint.route('/testing')\ndef testing(image_id):\n stripe.api_key = 'sk_test_51HZvbqHyiZLRnqzJOLZZWOtFBAUiUuhzc4PpnGFbSsHXl3qgtF9AWMfENlstDaDL96LwZNgAhX5yIBjERvtFiyNA00DhvRazOn'\n\n result=stripe.PaymentIntent.create(\n amount=1000,\n currency='myr',\n payment_method_types=['card'],\n receipt_email='jenny.rosen@example.com',\n )\n\n print(result)\n \n\n return \"stripesssss\"","repo_name":"weikhang93/Flasktagram","sub_path":"instagram_web/blueprints/donations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13236548186","text":"import requests\nimport base64\n\n\ndef fetch_crowd_sec_ip_record(ip: str, api_key: str) -> dict[str, any]:\n \"\"\"\n Submits an ip to the CrowdSec API to see if it is deemed suspicious.\n\n Parameters:\n ip: The ip address of the server from the TLS Request.\n api_key: The CrowdSec API Key from the config file.\n\n Returns:\n The raw response from CrowdSec as a parsed JSON object.\n \"\"\"\n headers = {\"x-api-key\": api_key}\n url = f\"https://cti.api.crowdsec.net/v2/smoke/{ip}\"\n response = requests.get(url, headers=headers)\n json_response = response.json()\n if json_response.get(\"message\") != None:\n return None\n else:\n return response.json()\n\n\ndef fetch_virus_total_record(hostname: str, api_key: str) -> dict[str, any]:\n \"\"\"\n Submits the hostname to the VirusTotal API to see if it is deemed malicious.\n\n Parameters:\n hostname: The hostname from the TLS Request.\n api_key: The Virus Total API Key from the config file.\n\n Returns:\n The raw response from Virus Total as a parsed JSON object.\n \"\"\"\n headers = {\"x-apikey\": api_key}\n encoded_hostname = base64.b64encode(hostname.encode(\"utf8\")).decode(\"utf8\").replace(\"=\", \"\")\n url = f\"https://www.virustotal.com/api/v3/urls/{encoded_hostname}\"\n response = requests.get(url, headers=headers)\n\n return response.json()","repo_name":"mmacfadden/csc-842-sm23","sub_path":"cycle-8/lib/threat_intel.py","file_name":"threat_intel.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15701639975","text":"import datetime\n\nfrom wtforms import StringField, PasswordField, SelectField, FileField, RadioField, BooleanField, DateTimeField, SubmitField, SelectMultipleField, HiddenField\nfrom wtforms.validators import DataRequired, Email, MacAddress, IPAddress, URL\nfrom bluecat.wtform_extensions import GatewayForm, validate_element_in_tuple\nfrom bluecat.wtform_fields import *\n\nclass GenericFormTemplate(GatewayForm):\n # When updating the form, remember to make the corresponding changes to the workflow pages\n workflow_name = 'create_address_manager_user'\n workflow_permission = 'create_address_manager_user_page'\n\n username = CustomStringField(\n label='Username',\n is_disabled_on_start=False,\n required=True,\n validators=[]\n )\n\n password = PasswordField(\n label='Password',\n validators=[DataRequired()]\n )\n\n email = CustomStringField(\n label='Email Address',\n is_disabled_on_start=False,\n required=True,\n validators=[DataRequired(), Email()]\n )\n\n phonenumber = CustomStringField(\n label='Phone Number',\n is_disabled_on_start=False,\n required=False,\n )\n\n typeofuser = CustomSelectField(\n label='Type of User',\n is_disabled_on_start=False,\n # Below are the API values followed by GUI values\n choices=[('ADMIN', 'Administrator'), ('REGULAR', 'Non-Administrator')],\n clear_below_on_change=False,\n #Javascript call below to enable/disable secpriv and histpriv\n on_complete = ['is_admin']\n )\n\n secpriv = NoPreValidationSelectField(\n label='Security Privilege',\n # Below are the API values followed by GUI values\n choices=[ ('NO_ACCESS', 'No Access'), ('VIEW_MY_ACCESS_RIGHTS', 'View My Access Rights'), ('VIEW_OTHERS_ACCESS_RIGHTS', 'View Others Access Rights'), ('CHANGE_ACCESS_RIGHTS', 'Change Access Rights'), ('ADD_ACCESS_RIGHTS', 'Add Access Rights'), ('DELETE_ACCESS_RIGHTS', 'Delete Access Rights')]\n )\n\n histpriv = NoPreValidationSelectField(\n label='History Privilege',\n # Below are the API values followed by GUI values\n choices=[('HIDE', 'Hide'), ('VIEW_HISTORY_LIST', 'View History List')]\n )\n\n acctype = CustomSelectField(\n label='Access Type',\n # Below are the API values followed by GUI values\n choices=[('GUI', 'GUI'), ('API', 'API'), ('GUI_AND_API', 'GUI And API')],\n clear_below_on_change=False,\n is_disabled_on_start=False\n )\n\n #List of all Gateway groups (UDFs) for users manually, no API\n gateway_groups = CustomSelectField(\n label='Assign to Gateway Group',\n required=False,\n # Below are the UDF values for workflow permissions followed by GUI values\n choices=[('admin', 'admin'), ('all', 'all')],\n clear_below_on_change=False,\n is_disabled_on_start=False,\n result_decorator = None\n )\n\n #List of all Address Manager Groups by API\n usergroups = SelectMultipleField(\n 'Assign to Groups',\n coerce=int\n )\n\n submit = SubmitField(label='Submit')","repo_name":"bluecatlabs/gateway-workflows","sub_path":"Community/create_address_manager_user/create_address_manager_user_form.py","file_name":"create_address_manager_user_form.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"91"} +{"seq_id":"28597255347","text":"'''\n\n* 유형\n DFS, BFS\n\n* 체감 난이도\n *\n\n* 풀이\n 모든 노드에 대해 그래프 탐색을 실시하면 몇 개의 연결된 덩어리가 나오는지 알 수 있음.\n\n 이미 방문이 된 노드는 그 노드로부터 탐색을 할 이유가 없으므로 넘어가고\n 방문이 안 된 노드만 탐색을 하며 체크를 해주면 연결된 네트워크를 모두 체크할 수 있다.\n\n\n'''\n\n\ndef solution(n, computers):\n answer = 0\n graph = {com+1:[] for com in range(n)}\n [[graph[v1+1].append(v2+1) if conn == 1 and v1!=v2 else None for v2, conn in enumerate(l)] for v1, l in enumerate(computers)]\n check = {i+1:False for i in range(len(computers))}\n\n def dfs(here):\n check[here] = True\n [dfs(there) if not check[there] else None for there in graph[here]]\n\n for v in graph:\n if not check[v]:\n answer += 1\n dfs(v)\n return answer","repo_name":"hhebb/algorithm","sub_path":"week_1 [1~8]/네트워크.py","file_name":"네트워크.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41393999440","text":"from Matrix import Matrix\nfrom Matrix import matrix\np=matrix()\n\nclass Jugador1:\n def variable(self,movimientoDerecha,movimientoIzquierda,cutderecha,cutizquierda):\n self.x=[]\n self.y=[]\n self.respuesta=[]\n self.movimientoDerecha=movimientoDerecha\n self.movimientoIzquierda=movimientoIzquierda\n self.cutderecha=cutderecha\n self.cutizquierda=cutizquierda\n def movimientoJ1(self):\n self.movimientoDerecha=False\n self.movimientoIzquierda=False\n self.cutderecha=False\n self.cutizquierda=False\n respuesta=\" \"\n print(\"X turno \")\n x = int(input(\"Entra el numero fila:\"))\n y = int(input(\"Entra el numero columna:\"))\n if (str(Matrix[x][y]) == \"X\"):\n if (not y == 7 and not y == 0):\n\n if (str(Matrix[x + 1][y + 1]) == \" \"):\n self.movimientoIzquierda = True\n if (str(Matrix[x + 1][y - 1]) == \" \"):\n self.movimientoIzquierda = True\n elif (y == 7):\n if (str(Matrix[x + 1][y - 1]) == \" \"):\n self.movimientoDerecha = True\n else:\n if (str(Matrix[x + 1][y + 1]) == \" \"):\n self.movimientoDerecha = True\n if (not y > 5 and not y < 2):\n if (str(Matrix[x + 1][y + 1]) == \"O\" and not str(Matrix[x + 2][y + 2]) == \"O\" and not str(\n Matrix[x + 2][y + 2]) == \"X\"):\n self.cutderecha = True\n if (str(Matrix[x + 1][y - 1]) == \"O\" and not str(Matrix[x + 2][y - 2]) and not str(\n Matrix[x + 2][y - 2]) == \"X\"):\n self.cutizquierda = True\n elif(y>5):\n if(str(Matrix[x+1][y+1])==\"O\"and not str(Matrix[x+2][y+2])==\"O\"and not str(Matrix[x+2][y-2])==\"X\"):\n self.cutizquierda=True\n else:\n if(str(Matrix[x+1][y+1])==\"O\"and not str(Matrix[x+2][x-2])==\"O\"and not str(Matrix[x+2][y+2])==\"X\"):\n self.cutderecha=True\n if(any([self.cutizquierda,self.cutderecha])):\n self.movimientoIzquierda=False\n self.movimientoDerecha=False\n if(any([self.movimientoIzquierda,self.movimientoDerecha])):\n if(self.movimientoIzquierda):\n if(self.movimientoDerecha):\n respuesta=input(\"Movimiento Derecha o Izquierda ?, Si es Derecha o Izquierda , si es Derecha responder con D y si es Izquierda con I\")\n respuesta=respuesta.upper()\n else:\n respuesta=\"I\"\n else:\n respuesta=\"D\"\n if(any([respuesta==\"D\",respuesta==\"I\"])):\n if(respuesta==\"D\"):\n Matrix[x+1][y+1]=\"x\"\n Matrix[x][y]=\" \"\n p.printMatrix(Matrix)\n else:\n Matrix[x+1][y-1]=\"x\"\n Matrix[x][y]=\" \"\n p.printMatrix(Matrix)\n if(any([respuesta==\"D\",respuesta==\"I\"])):\n if(respuesta==\"D\"):\n Matrix[x+1][y+1]=\" \"\n Matrix[x][y]=\"x\"\n p.printMatrix(Matrix)\n else:\n Matrix[x+1][y-1]=\" \"\n Matrix[x][y]=\"x\"\n p.printMatrix(Matrix)\n elif(any([self.cutizquierda,self.cutderecha])):\n if(self.cutizquierda):\n if(self.cutderecha):\n respuesta=input(\"Movimiento Derecha o Izquierda,si es Derecha responder con D y si es Izquierda con I\")\n respuesta=respuesta.upper()\n else:\n respuesta=\"D\"\n else:\n respuesta=\"I\"\n if(any([respuesta==\"D\",respuesta==\"L\"])):\n if(respuesta==\"D\"):\n Matrix[x+1][y+1]=\" \"\n Matrix[x+2][y+2]=\"x\"\n Matrix[x][y]=\" \"\n else:\n Matrix[x+1][y-1]=\" \"\n Matrix[x+2][y-2]=\"x\"\n Matrix[x][y]=\" \"\n p.printMatrix(Matrix)\n\n if(not y>5 and not y<2):\n if(str(Matrix[x+1][y+1])==\"O\" and not str(Matrix[x+2][y+2])==\"O\" and not str(Matrix[x+2][y+2])==\"X\"):\n self.cutderecha=True\n if(str(Matrix[x+1][y-1])==\"O\"and not str(Matrix[x+2][y-2])==\"O\" and not str(Matrix[x+2][y-2])==\"X\"):\n self.cutizquierda=True\n elif(y>5):\n if(str(Matrix[x+1][y-1])==\"O\"and not str(Matrix[x+2][y-2])==\"O\" and not str(Matrix[x+2][y-2])==\"X\"):\n self.cutderecha=True\n else:\n if(str(Matrix[x+1][y+1])==\"O\" and not str(Matrix[x+2][y+2])==\"O\" and not str(Matrix[x+2][y+2])==\"X\"):\n self.cutizquierda=True\n\n\n else:\n print(\"Movimiento Invalido\")\n self.movimientoJ1()\n else:\n print(\"No es correcto\")\n self.movimientoJ1()\n","repo_name":"romanfh1998i/DamasFinalMiniProyecto","sub_path":"MovimientoX.py","file_name":"MovimientoX.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7611919587","text":"import os\n\npath_list_IDs = \"/home/leo/Dropbox/CNN for affordances detection/Validation_shapenet_32.txt\"\nf = open(path_list_IDs)\nlist_IDs = f.read().splitlines()\nf.close()\npath_root = '/media/leo/Datos/Datasets/Training_32/Validation'\nfor i, name in enumerate(list_IDs):\n print('con nombre: ',path_root+'/X/'+name+\".npy\")\n\n try:\n os.isfile(path_root+'/X/'+name+\".npy\")\n except:\n print('The file do not exits')\n try:\n os.isfile(path_root+'/Y/'+name+\".npy\")\n except:\n print('The file do not exits')\n\n\n\n","repo_name":"hideldt/SCBOV","sub_path":"Networks/PCN/eliminar_archivos.py","file_name":"eliminar_archivos.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1348292403","text":"from django.shortcuts import render\nfrom .forms import ApplicationForm, ContactForm\nfrom .models import Form\nfrom django.contrib import messages\nfrom django.core.mail import EmailMessage\nfrom decouple import config\n\n\nEMAIL_HOST_USER = config(\"EMAIL_HOST_USER\")\n\n\ndef index(request):\n if request.method == \"POST\":\n form = ApplicationForm(request.POST)\n if form.is_valid():\n first_name = form.cleaned_data[\"first_name\"]\n last_name = form.cleaned_data[\"last_name\"]\n email = form.cleaned_data[\"email\"]\n date = form.cleaned_data[\"date\"]\n status = form.cleaned_data[\"status\"]\n\n # Store data in the database \n Form.objects.create(first_name=first_name, last_name=last_name,\n email=email, date=date, status=status)\n \n message_body = f\"A new job application was submitted! \\n{first_name} {last_name} \\nThank you!\"\n email_message = EmailMessage(\"Form submission confirmation\", message_body, to=[email])\n email_message.send()\n \n # Show a message after submitting a form \n messages.success(request, \"Form submitted successfully!\")\n\n return render(request, \"index.html\")\n\n\ndef about(request):\n return render(request, \"about.html\")\n\n\ndef contact(request):\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n first_name = form.cleaned_data[\"first_name\"]\n last_name = form.cleaned_data[\"last_name\"]\n email = form.cleaned_data[\"email\"]\n subject = form.cleaned_data[\"subject\"]\n message = form.cleaned_data[\"message\"]\n\n message_body = f\"{subject}. From: {email} \\n{message} \\n{first_name} {last_name}\"\n email_message = EmailMessage(\"New message\", message_body, to=[EMAIL_HOST_USER])\n email_message.send()\n\n messages.success(request, \"Your message was successfully submitted! Thank you!\")\n return render(request, \"contact.html\")\n","repo_name":"alinatussupova/django_job_form_application","sub_path":"job_application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"23386956608","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport os\nimport zipfile\nimport numpy as np\nimport numpy as np\nimport random\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.optimizers import Adam\nimport keras_tuner as kt\nimport numpy as np\nimport warnings\n\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.keras.layers import ZeroPadding2D\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing import image\nimport tensorflow.keras.backend as K\n#from tensorflow.keras.utils import layer_utils\n#from keras.utils.data_utils import get_file\nfrom tensorflow.keras.applications.imagenet_utils import decode_predictions\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\n#from tensorflow.keras.applications.imagenet_utils import _obtain_input_shape\n#from tensorflow.keras.engine.topology import get_source_inputs\n\n\n\n\n#RESNET 50 CODE FROM https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\n\n\n\ndatastuff = np.load(\"/cosma5/data/durham/dc-will10/CNNtensors.npz\")\ntrain_dataset = datastuff[\"traindata\"]\ntest_dataset = datastuff[\"testdata\"]\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\ntrainlabels = datastuff[\"trainlabels\"]\nvallabels = datastuff[\"vallabels\"]\n\"\"\"\nprint(np.shape(trainlabels))\nfulldata = tf.concat([train_dataset, test_dataset], axis = 0)\nfulllabels = tf.concat([trainlabels, vallabels], axis = 0)\ntrainsplit = 0.6\nvalind = int(round(trainsplit*len(fulldata)))\nprint(valind)\ntest_dataset = fulldata[valind:-1]\ntrain_dataset = fulldata[0:valind]\nvallabels = fulllabels[valind:-1]\ntrainlabels = fulllabels[0:valind]\n#dataslice = tf.slice(train_dataset, int(0.8*len(train_dataset)),len(train_dataset) - int(0.8*len(train_dataset)))\n#labelslice = tf.slice(trainlabels, int(0.8*len(trainlabels)), len(trainlabels) - int(0.8*len(trainlabels)))\n\"\"\"\nprint(np.shape(trainlabels))\nprint(np.shape(vallabels))\nprint(np.shape(train_dataset))\nprint(np.shape(test_dataset))\nprint(\"TENSORS LOADED\")\n\"\"\"\nnewtrain = train_dataset\nnewlabels = trainlabels\nextratrain = []\nextralabels = []\nfor i in range(10000):\n r = np.random.randint(0, len(newtrain))\n lab = newlabels[r]\n theta = [90,180,270]\n r2 = np.random.randint(0,3)\n random_bit = random.getrandbits(1)\n random_bit2 = random.getrandbits(1)\n flip_h = bool(random_bit)\n flip_v = bool(random_bit2)\n augmented = ImageDataGenerator().apply_transform(x = newtrain[r], transform_parameters = {\"flip_horizontal\":flip_h, \"flip_vertical\":flip_v})\n augmented = augmented\n lab = lab\n extratrain.append(augmented)\n extralabels.append(lab)\n print(i)\nprint(np.shape(extratrain))\nprint(np.shape(extralabels))\nextratrain = np.array(extratrain)\nextralabels = np.array(extralabels)\nnp.savez(\"extradata.npz\", data = extratrain, labels = extralabels)\nimport pdb ; pdb.set_trace()\nextratrain = tf.convert_to_tensor(extratrain)\nextralabels = tf.convert_to_tensor(extralabels)\n\nnewtrain = tf.concat([newtrain,extratrain], axis = 0)\nnewlabels = tf.concat([newlabels, extralabels], axis = 0)\nprint(tf.shape(newtrain))\n\"\"\"\n\n\nWEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size,\n padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = layers.add([x, input_tensor])\n x = Activation('relu')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filterss of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n Note that from stage 3, the first conv layer at main path is with strides=(2,2)\n And the shortcut should have strides=(2,2) as well\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), strides=strides,\n name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size, padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(filters3, (1, 1), strides=strides,\n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = layers.add([x, shortcut])\n x = Activation('relu')(x)\n return x\n\n\ndef ResNet50(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates the ResNet50 architecture.\n Optionally loads weights pre-trained\n on ImageNet. Note that when using TensorFlow,\n for best performance you should set\n `image_data_format=\"channels_last\"` in your Keras config\n at ~/.keras/keras.json.\n The model and the weights are compatible with both\n TensorFlow and Theano. The data format\n convention used by the model is the one\n specified in your Keras config file.\n # Arguments\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization)\n or \"imagenet\" (pre-training on ImageNet).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or `(3, 224, 244)` (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 197.\n E.g. `(200, 200, 3)` would be one valid value.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n # Returns\n A Keras model instance.\n # Raises\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n \"\"\"\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n #input_shape = tf.keras.applications.imagenet_utils._obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n input_shape = (224,224,5)\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = ZeroPadding2D((3, 3))(img_input)\n x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n\n x = AveragePooling2D((7, 7), name='avg_pool')(x)\n\n if include_top:\n x = Flatten()(x)\n x = Dense(classes, name='fc1000')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = tf.keras.engine.topology.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='resnet50')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = tf.keras.utils.data_utils.get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = tf.keras.utils.data_utils.get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n tf.keras.utilslayer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n tf.keras.utilslayer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model\n\nepochs = 200\n#batch_size = 300\nmodel = ResNet50(include_top=True, weights=None,\n input_tensor=None, input_shape=(224,224,5),\n pooling=None,\n classes=16)\noptimizer = Adam(lr=0.0001)\n\nmodel.compile(optimizer = optimizer , loss = tf.keras.losses.MeanSquaredError(), metrics=[\"mean_absolute_error\"])\nhistory = model.fit(x = train_dataset, y = trainlabels, validation_data = (test_dataset, vallabels), epochs = epochs)\nmodel.save(\"/cosma/home/durham/dc-will10/CNNmodelRes\")\nval_results = []\nfor data in test_dataset:\n alt = np.expand_dims(data, axis = 0)\n print(np.shape(alt))\n pred = model.predict(alt)\n val_results.append(pred)\nval_data = vallabels\nloss = history.history[\"loss\"]\nvalloss = history.history[\"val_loss\"]\nnp.savez(\"cnnmetricsres.npz\", validation_results = val_results, validation_data = val_data, loss = loss, valloss = valloss)\n","repo_name":"samw187/spectra","sub_path":"Older_Models/CNN_trials_with_ResNet.py","file_name":"CNN_trials_with_ResNet.py","file_ext":"py","file_size_in_byte":15278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40525379698","text":"# class Solution:\n# def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n# if not nums: return []\n# ret = set()\n# l = len(nums)\n# def dfs(curr, rest):\n# if l == len(curr):\n# ret.add(tuple(curr))\n# return\n# for i in range(len(rest)):\n# dfs(curr + [rest[i]], rest[:i] + rest[i+1:])\n# # main\n# dfs([], nums)\n# return list(ret)\n\nclass Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n ret = [[]]\n for n in nums:\n new_ans = []\n for it in ret:\n for i in range(len(it)+1):\n new_ans.append(it[:i] + [n] + it[i:])\n if i < len(it) and n == it[i]: break # 这个判断写起来特别特别考验逻辑思维能力\n ret = new_ans\n return ret","repo_name":"Bieneath/LeetCode_training","sub_path":"Week_03/47.py","file_name":"47.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6008709339","text":"\"\"\"\nFormatter color tag constants.\n\nThese are ANSI strings that paints some terminal text.\n\"\"\"\n\n# Color tags.\nBLACK = '\\033[90m'\nBLUE = '\\033[94m'\nCYAN = '\\033[96m'\nGREEN = '\\033[92m'\nMAGENTA = '\\033[95m'\nRED = '\\033[91m'\nWHITE = '\\033[97m'\nYELLOW = '\\033[93m'\n","repo_name":"marlondecol/python-formatter","sub_path":"modules/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17020495944","text":"#!/usr/bin/env python3\n\"\"\" test for optimal number of clusters \"\"\"\nimport numpy as np\nkmeans = __import__('1-kmeans').kmeans\nvariance = __import__('2-variance').variance\n\n\ndef optimum_k(X, kmin=1, kmax=None, iterations=1000):\n \"\"\"\n tests for optimum number of lcusters by varaince\n X: data set. np.ndarray (n, d)\n n: number of data poitns\n \"\"\"\n if type(X) is not np.ndarray or len(X.shape) != 2:\n return None, None\n if not isinstance(kmin, int) or kmin < 1:\n return None, None\n if kmax is None:\n kmax = X.shape[0]\n if type(kmax) is not int or kmax < 1 or kmax < kmin + 1:\n return None, None\n if type(iterations) is not int or iterations < 1:\n return None, None\n\n # n, d = X.shape\n results = []\n d_vars = []\n\n for k in range(kmin, kmax + 1):\n C, clss = kmeans(X, k, iterations)\n results.append((C, clss))\n vari = variance(X, C)\n if k == kmin:\n small_var = vari\n d_vars.append(small_var - vari)\n return results, d_vars\n","repo_name":"mag389/holbertonschool-machine_learning","sub_path":"unsupervised_learning/0x01-clustering/3-optimum.py","file_name":"3-optimum.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25003934356","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nimport argparse\nimport os\n\nfrom sklearn.manifold import TSNE\n\n\nparser = argparse.ArgumentParser(description='Plot Test')\nparser.add_argument('--folder', type=str,\n help=\"folder containing means and stds\")\nparser.add_argument('--num-rooms', type=int,\n help=\"number of rooms in picolmaze\")\n# plt.set_cmap(plt.rcParamsDefault[\"image.cmap\"])\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n means = np.loadtxt(open(\n os.path.join(args.folder, 'inv_head_means.csv'), 'rb'), delimiter=',')\n stds = np.loadtxt(open(\n os.path.join(args.folder, 'inv_head_stds.csv'), 'rb'), delimiter=',')\n stds = np.nan_to_num(stds)\n phis = np.loadtxt(open(\n os.path.join(args.folder, 'inv_head_phis.csv'), 'rb'), delimiter=',')\n\n side = int(args.num_rooms**(1 / 2))\n\n # vmin_mean = np.min(means)\n # vmax_mean = np.max(means)\n vmin_mean = -2\n vmax_mean = 6\n\n # vmin_std = np.min(stds)\n # vmax_std = np.max(stds)\n vmin_std = 0\n vmax_std = 3\n\n cmap = 'viridis'\n\n\n fig, axs = plt.subplots(side, side)\n\n for i in range(args.num_rooms):\n ax = axs[i % side, i // side]\n img = ax.imshow(means[i].reshape(16, 18), vmin=vmin_mean, vmax=vmax_mean)\n img.set_cmap(cmap)\n fig.colorbar(img, ax=ax)\n ax.set_axis_off()\n\n plt.savefig(os.path.join(args.folder, 'inv_head_means.pdf'), dpi=300)\n plt.close()\n\n\n fig, axs = plt.subplots(side, side)\n\n for i in range(args.num_rooms):\n ax = axs[i % side, i // side]\n img = ax.imshow(np.sort(means[i]).reshape(16, 18), vmin=vmin_mean, vmax=vmax_mean)\n img.set_cmap(cmap)\n fig.colorbar(img, ax=ax)\n ax.set_axis_off()\n\n plt.savefig(os.path.join(args.folder, 'inv_head_means_sorted.pdf'), dpi=300)\n plt.close()\n\n\n fig, axs = plt.subplots(side, side)\n\n for i in range(args.num_rooms):\n ax = axs[i % side, i // side]\n img = ax.imshow(stds[i].reshape(16, 18), vmin=vmin_std, vmax=vmax_std)\n img.set_cmap(cmap)\n fig.colorbar(img, ax=ax)\n ax.set_axis_off()\n\n plt.savefig(os.path.join(args.folder, 'inv_head_stds.pdf'), dpi=300)\n plt.close()\n\n\n fig, axs = plt.subplots(side, side)\n\n for i in range(args.num_rooms):\n ax = axs[i % side, i // side]\n img = ax.imshow(np.sort(stds[i]).reshape(16, 18), vmin=vmin_std, vmax=vmax_std)\n img.set_cmap(cmap)\n fig.colorbar(img, ax=ax)\n ax.set_axis_off()\n\n plt.savefig(os.path.join(args.folder, 'inv_head_stds_sorted.pdf'), dpi=300)\n plt.close()\n\n\n phis_tsne = TSNE(n_components=2, n_iter=2000, learning_rate=200).fit_transform(phis[:, 1:])\n\n with plt.style.context('default'):\n img = plt.scatter(phis_tsne[:, 0], phis_tsne[:, 1], c=phis[:, 0], cmap='jet')\n plt.axis('off')\n plt.colorbar(img, ticks=range(args.num_rooms))\n\n plt.savefig(os.path.join(args.folder, 'inv_head_phis_tsne.pdf'), dpi=300)\n plt.close()\n","repo_name":"utanashati/curiosity-recast","sub_path":"plot_ideal_predictor.py","file_name":"plot_ideal_predictor.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34625461793","text":"import numpy as np\nfrom keras.layers.embeddings import Embedding\nimport os\nfrom keras.models import Sequential\nimport tensorflow as tf\n\n\n# 将原本的1/0 变成embedding编码\ndef embedding_category():\n model = Sequential()\n model.add(Embedding(2,256,input_length=93))\n half_year_features = np.load(\"pick_4_features_two_year.npy\")\n time = half_year_features[:,:,0].reshape(-1,half_year_features.shape[1],1)\n features = half_year_features[:,:,1:].reshape(-1, half_year_features.shape[2]-1)\n model.compile(\"rmsprop\",'mse')\n output_array = model.predict(features).reshape(-1,half_year_features.shape[1],93,256)\n np.save(\"4_two_year_embedding_features.npy\",output_array)\n np.save(\"4_two_year_time.npy\",time)\n print(output_array)\n print(time.shape)\n\n\nif __name__ == \"__main__\":\n embedding_category()\n","repo_name":"ZJU-BMI/survival-analysis","sub_path":"Embedding.py","file_name":"Embedding.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"12747350245","text":"import fitz\r\nimport pandas as pd\r\nimport os\r\n\r\ndef get_all_info(title,text):\r\n path=os.getcwd()\r\n path=path+'\\Dataset'\r\n for folder in os.listdir(path):\r\n for file in os.listdir(path+'\\\\'+folder):\r\n title.append(folder)\r\n text.append(file)\r\n\r\n return title,text\r\n\r\ndef get_topics():\r\n return os.listdir('Dataset')\r\n\r\ndef get_Data_and_Label(df):\r\n title=[]\r\n text=[]\r\n title,text=get_all_info(title,text)\r\n df['Text']=pd.DataFrame(text)\r\n df['Title']=pd.DataFrame(title) \r\n return df,title\r\n\r\ndef get_Text_from_pdf(info):\r\n path=os.getcwd()\r\n path=path+'\\Dataset'\r\n path=path+'\\\\'+info['Title']\r\n file=info['Text']\r\n doc=fitz.open(path+'\\\\'+file)\r\n page=doc[0]\r\n content=page.get_text()\r\n return content\r\n\r\ndef pre_process_Data_and_Label(df,title):\r\n\r\n #Iterating over each index and getting content of each file.\r\n for i in range(df.shape[0]):\r\n df.at[i,'Text']=get_Text_from_pdf(df.iloc[i])\r\n title=get_topics()\r\n for i in range(len(title)):\r\n df.loc[df['Title']==title[i],'Title']=i\r\n return df\r\n\r\n\r\ndef Generator():\r\n df=pd.DataFrame(columns=['Text','Title'])\r\n df,title=get_Data_and_Label(df) # Here we got dataframe with folder name and file name\r\n df=pre_process_Data_and_Label(df,title) #Getting file names\r\n df.to_csv('Dataset_final.csv')\r\n\r\n\r\nif __name__=='__main__':\r\n Generator()\r\n print('Dataset Generated Successfully')","repo_name":"Siddhant128-bit/Classify_PDF_Multinominal_Naive_Bayes","sub_path":"Dataset_Generator.py","file_name":"Dataset_Generator.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"12447086981","text":"#-------------------------------------------#\n\n# Universidad del Valle de Guatemala\n# Departamento de Computación\n# Organizador de Computadoras y Assembler\n\n# Javier Alejandro Ramírez Heredia-21600 \n# Mario Andres Cristales Cardona-21631\n# 24/01/2022\n \n# Ejercicio 5\n\n#-------------------------------------------#\nfrom pickle import TRUE\n\n\ndef main():\n\n estado = True\n\n\n while estado:\n menu()\n opcion = int(input(\"¡Bienvenido!, Que opcion del menu quieres elegir: \"))\n \n if opcion == 1:\n calculadoraCA2()\n input()\n elif opcion== 2:\n\n Respuesta = input('¿Desea salir del Programa: ')\n if Respuesta == \"si\":\n print (\"FeliZ Dia =)\")\n \n estado = False \n \n else:\n main()\n\n else:\n\n print('Ingrese una opcion correcta del menu')\n\n print(main())\n \ndef menu():\n\n print('''\n ------------------------------------------------------\n - ___ _ _ -\n - | _ ) (_) _ _ __ _ _ _ (_) ___ ___ -\n - | _ \\ | | | ' \\ / _` | | '_| | | / _ \\ (_-< -\n - |___/ |_| |_||_| \\__,_| |_| |_| \\___/ /__/ -\n ------------------------------------------------------\n ------------------------------------------------------\n - (1) Ingresar un Numero Binario -\n - (2) Salir -\n ------------------------------------------------------ \n ''')\n\n\n \ndef Com_A_2(lista):\n print(\"------------------------------------------------------\")\n print(\"lista ingresada: \")\n print(\"\".join(lista))\n\n for i in range(len(lista)):\n\n num = int(lista[i])\n if num == 1 :\n lista[i] = '0'\n elif num == 0:\n lista[i] = '1'\n \n newLista=\"\".join(lista)\n \n a = newLista\n b = \"1\"\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n\n resultado = ''\n\n carry = 0\n\n for i in range(max_len - 1, -1, -1):\n r = carry\n r += 1 if a[i] == '1' else 0\n r += 1 if b[i] == '1' else 0\n resultado = ('1' if r % 2 == 1 else '0') + resultado\n\n carry = 0 if r < 2 else 1\n\n if carry != 0:\n resultado = '1' + resultado\n \n print(\"------------------------------------------------------\")\n print(\"el complemento a 2 del número ingresado es: \")\n\n print(resultado.zfill(max_len))\n \ndef bin_a_dec(binario):\n pos = 0\n decimal = 0\n binario = binario[::-1]\n for digito in binario:\n multiplicar = 2**pos\n decimal += int(digito) * multiplicar\n pos += 1\n return decimal\n\n\nEstado = True\n\ndef calculadoraCA2():\n contador = 0\n numBin = input(\"Ingrese un numero binario de 8 bits:\")\n listB = list(numBin)\n if int(len(listB) == 8):\n try :\n for i in range(len(listB)):\n\n num = int(listB[i])\n if num == 1 or num == 0 :\n print(\"correcto\")\n\n elif num != 1 or num != 0:\n print(\"el dígito: \",num,\n \" no es 1 o 0, ingrese el dato de nuevo\")\n contador = contador +1\n\n if contador == 0 :\n Estado = False\n except Exception as e :\n print(\"error en el ciclo for\", e)\n else:\n print(\"los bits ingresados no coinciden con la cantidad requerida (7)\")\n\n print(\"------------------------------------------------------\")\n print(\"el numero ingresado en decimales es:\",bin_a_dec(numBin))\n print( Com_A_2(listB))\n \n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n \n","repo_name":"XJ4v1erX/Assembler","sub_path":"Ejercicio5.py","file_name":"Ejercicio5.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26743334083","text":"# from turtle import color, right\n# from numpy import poly\nfrom xml.dom import ValidationErr\nfrom matplotlib.style import available\nimport pandas as pd\nfrom src.data_structures.graph import Edge\nfrom src.data_structures import Point,scatter_points,poly_as_matplotlib,plot_polygons\nfrom src.data_structures.shapes import Polygon #,MultiPoint\nfrom shapely.geometry import LineString\n# from src.data_structures.graph import Graph,Edge\nimport matplotlib.pyplot as plt\n\nfrom src.puzzle_creators import rgon_1988_wrap as Rgon1988\nfrom src.consts import PLOT_COLORS\n# from src.algorithms.sweep_line.sweep_line import SweepLine\nimport logging\nfrom src import setup_logger\nfrom functools import reduce\n\nfrom src.data_structures.shapes import Polygon\nfrom src.puzzle_creators import Direction\n\nfrom math import pi,atan2\nimport numpy as np\n\n\n\nclass PuzzleCreator():\n\n def __init__(self,is_debug=False):\n self.interior_points = []\n self.frame_anchor_points = [] #frame anchor points\n self.frame_points = []\n self.pieces = [] #MultiPolygon\n # self.is_angles_convex = {}\n self.pieces_area = 0\n self.frame_polygon = None\n self.last_possible_rgons ={}\n self.n_iter = 0\n self.is_debug = is_debug\n self.fig, self.ax = plt.subplots()\n log_handler = setup_logger.get_file_handler(setup_logger.get_debug_log_file())\n self.logger = logging.getLogger(\"logger.puzzle_creator\")\n self.logger.addHandler(log_handler)\n self.debug_dir = setup_logger.get_debug_lastrun_dir()\n \n \n # def load_sampled_points(self,file_path):\n # role_points = {\n # \"interior\": self.interior_points,\n # \"frame_anchor\":self.frame_anchor_points,\n # \"frame\":self.frame_points\n # }\n # df = pd.read_csv(file_path,index_col=False)\n\n # for row in df.to_numpy():\n \n # if row[0] < 0 or row[1] < 0:\n # raise ValueError(f\"All points coordinates must not be negative. Recieved as input ({row[0]},{row[1]})\")\n\n # point = Point(row[0],row[1])\n # role_points[row[2]].append(point)\n\n # self.frame_polygon = Polygon(self.frame_points)\n \n # # maybe this should not be here\n # self.scan_direction = Direction.left\n # self._set_direction_scan(self.scan_direction.value)\n\n # for point in self.interior_points:\n # self.is_angles_convex[str(point)] = False\n \n # def plot_puzzle(self,fig,ax,pieces=None,**kwargs):\n # if pieces is None:\n # pieces = self.pieces\n # scatter_points(ax,self.interior_points,color=\"blue\")\n # scatter_points(ax,self.frame_anchor_points,color=\"red\") \n # frame_mat_polygon = poly_as_matplotlib(self.frame_polygon,edgecolor=\"black\",facecolor='white',lw=2)\n # puzzle_mat_polygons = [poly_as_matplotlib(piece,color=PLOT_COLORS[i%len(PLOT_COLORS)],**kwargs) for i,piece in enumerate(pieces)]\n # puzzle_mat_polygons.insert(0, frame_mat_polygon)\n # plot_polygons(ax,puzzle_mat_polygons)\n # for i,mat_poly in enumerate(pieces):\n # ax.text(mat_poly.centroid.x,mat_poly.centroid.y,str(i+1),style='italic',\n # bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})\n\n def _get_points_ahead(self,kernel_point,direction=1):\n self.logger.info(\"Start _get_points_ahead function. Filter point in space to get reachable points\")\n self.logger.debug(\"Filter point that are not ahead the scanning direction\")\n # on default - ahead to the left\n filter_condition = lambda item: item.x>=kernel_point.x and item!=kernel_point \n space = self.space_points.copy() #list(self.interior_points+self.frame_anchor_points)\n space.remove(kernel_point)\n\n # if requested ahead to right\n if direction == -1:\n filter_condition = lambda item: item.x<=kernel_point.x and item!=kernel_point \n space.reverse()\n\n space = list(filter(filter_condition,space)) \n space_str = reduce(lambda acc,x: acc + x + \";\" ,[\"\",\"\"] + list(map(lambda x: str(x),space)))\n self.logger.debug(f\"Points ahead are {str(space_str)}\")\n return space\n\n # def _get_accessible_points(self,kernel_point,space,direction=1):\n # self.logger.info(\"Start _get_accessible_points method\")\n # # fig,ax = plt.subplots()\n # # ax.title.set_text(f\"Debug accesible point\")\n # # self.plot_puzzle(fig,ax)\n # visible_points = []\n # ker_to_p_lines = [LineString([kernel_point,point])for point in space]\n\n # for point,curr_ker_to_p_line in zip(space,ker_to_p_lines):\n\n # # If the kernel, current point and other point forms a line, \n # # the far distant point is not visible\n # if any(curr_ker_to_p_line.contains(line) and not line.equals(curr_ker_to_p_line)\\\n # for line in ker_to_p_lines):\n # continue\n \n # # if other piece is blocking view to point\n # if any((curr_ker_to_p_line.crosses(piece) and not curr_ker_to_p_line.touches(piece) or curr_ker_to_p_line.within(piece))\\\n # for piece in self.pieces):\n # continue\n \n # # If it does visible\n # visible_points.append(point)\n # x, y = curr_ker_to_p_line.xy\n # # ax.plot(x, y,color=\"black\")\n\n # # fig.savefig(debug_dir + f\"/Last debug accesible point.png\")\n # # plt.close()\n\n # visible_points_str = reduce(lambda acc,x: acc + x + \";\" ,[\"\",\"\"] + list(map(lambda x: str(x),visible_points)))\n # self.logger.debug(f\"{str(len(visible_points))} points are visible: {str(visible_points_str)}\")\n # return visible_points\n\n def _set_direction_scan(self,direction):\n self.interior_points = sorted(self.interior_points,key=lambda p: p.x,reverse=direction<0)\n self.frame_anchor_points = sorted(self.frame_anchor_points,key=lambda p: p.x,reverse=direction<0)\n self.space_points = sorted(self.interior_points + self.frame_anchor_points,key=lambda p: p.x,reverse=direction<0)\n \n \n def create(self):\n self.logger.info(\"Starts create function\")\n self.n_iter = 0\n self.num_iter_no_new_piece = 0\n\n\n while True:\n # Rgon1988.direction = self.scan_direction\n self.logger.info(f\"Start to scan board to from {str(self.scan_direction.name)}\")\n for kernel_point in self.space_points:\n self.last_kernel_point = kernel_point # for the power group creator\n self.n_iter +=1\n\n try:\n possible_rgons = self.prepare_to_create(kernel_point)\n\n if possible_rgons is not None:\n polygon = self._create_rgon(possible_rgons) \n else:\n polygon = None\n \n self.after_rgon_creation(polygon)\n \n except Exception as err:\n self.logger.exception(err)\n raise err \n \n \n if self._is_finished_scan():\n return\n\n self.scan_direction = Direction(self.scan_direction.value * (-1))\n # Rgon1988.direction = self.scan_direction\n self._set_direction_scan(self.scan_direction.value)\n # plt.close(\"all\")\n\n if self.scan_direction.name == \"left\":\n msg =f\"Attempt to scan the board from left to right again.\"\n self.logger.error(msg)\n raise StopIteration(msg)\n # self.logger.info(\"Finish to assemble a puzzle\")\n \n \n\n def plot_results(self,fig_path):\n # fig,ax = plt.subplots()\n fig,ax = self.fig,self.ax\n ax.cla()\n self.plot_puzzle(fig,ax)\n fig.savefig(fig_path)\n # plt.close(fig) \n\n # def _count_piece(self,polygon):\n # self.pieces.append(polygon)\n # self.pieces_area += polygon.area\n \n def prepare_to_create(self,kernel_point):\n raise NotImplementedError(\"need to be implemented\")\n\n def after_rgon_creation(self,polygon):\n if isinstance(polygon,Polygon):\n self.logger.debug(f\"Next Polygon to create is : {str(polygon)}\")\n self.check_sanity_polygon(polygon)\n self._count_piece(polygon)\n\n # def _is_edges_angles_convex(self,center_point):\n # # self.logger.debug(f\"Find out wheter the angles between edges of point {str(center_point)} are all less than 180\")\n # '''Get pieces containing center point'''\n # center_point_coords = list(center_point.coords)[0]\n # pieces_contain_point = [list(piece.exterior.coords) for piece in self.pieces \\\n # if center_point_coords in list(piece.exterior.coords)]\n\n # '''Get neighbor points - sharing an edge with center_point'''\n # neighbors = set()\n # for piece_coords in pieces_contain_point:\n # index = piece_coords.index(center_point_coords)\n # left_neighbor_index = index-1\n # right_neighbot_index = index+1\n # # if it is the origin of the piece it will apear twice in the coordinates\n # # The polygon has at least 3 different verticies\n # if index == 0: #or index==len(piece_coords) - 1:\n # left_neighbor_index = -2\n # right_neighbot_index = 1\n \n # neighbors.add(Point(piece_coords[left_neighbor_index]))\n # neighbors.add(Point(piece_coords[right_neighbot_index]))\n\n # if len(neighbors) < 2:\n # return False\n\n # def calc_angle(neigh_point):\n # delta_y = neigh_point.y - center_point.y\n # delta_x = neigh_point.x - center_point.x\n # res = atan2(delta_y,delta_x)\n # if res < 0:\n # res+=2*pi\n # return np.degrees(res)\n \n\n # neighbors = list(neighbors)\n # angles = list(map(calc_angle,neighbors))\n # angles.sort()\n # neighbors_sorted = [point for _,point in sorted(zip(angles,neighbors))]\n # prev_angle = calc_angle(neighbors_sorted[0])\n # prev_point = neighbors_sorted[0]\n\n # for angle,point in zip(angles[1:] + [angles[0]+360],neighbors_sorted[1:] + [neighbors_sorted[0]]):\n # diff = angle - prev_angle\n # if diff > 180:\n # self.logger.debug(f\"Around the center point {str(center_point)} \\\n # the points {str(prev_point)} and {str(point)} angle is {angle}-{prev_point}={diff}>180\")\n # return False\n # prev_angle = angle\n # prev_point = point\n \n # return True\n\n # def check_sanity_polygon(self,curr_piece:Polygon):\n # if not curr_piece.is_simple:\n # ValidationErr(f\"Polygon must be simple. coords: {str(curr_piece)}\")\n \n # coords = curr_piece.exterior.coords\n\n # if len(coords) < 4:\n # raise ValidationErr(f\"Polygon minimun amount of vertecies is 3. coords: {str(curr_piece)}\")\n\n # if coords[0] != coords[-1]:\n # raise ValidationErr(f\"Polygon must end and open with the same vertex. coords: {str(curr_piece)}\")\n\n # if not all(coords[1:-1].count(c)==1 for c in coords[1:-1]):\n # raise ValidationErr(f\"Polygon coords cannot have duplicates. coords: {str(curr_piece)}\")\n\n # for piece in self.pieces:\n # if not(curr_piece.disjoint(piece) or curr_piece.touches(piece)):\n # raise ValidationErr(f\"piece {str(piece)} intersects with new piece {str(curr_piece)}\")\n # if curr_piece.equals(piece):\n # raise ValidationErr(f\"Tried to create equal piece to exist one. piece: {str(curr_piece)}.\")\n\n\n # for inter_point in self.interior_points:\n # if inter_point.within(curr_piece):\n # raise ValidationErr(f\"Piece {str(piece)} created contains interior point {str(inter_point)}\")\n\n # def _is_finished_scan(self):\n # self.logger.info(\"Check whether to stop board scanning or not\")\n # self.logger.debug(\"Check the sum of the pieces area against the whole framework\")\n # if self.pieces_area < self.frame_polygon.area:\n # self.logger.debug(f\"The sum of the pieces is less than the whole framework: {self.pieces_area}<{self.frame_polygon.area}\")\n # return False\n \n # self.logger.debug(\"Checking if all the interior points angles between their edges are less than 180\")\n # for point in self.interior_points:\n # if not self._is_edges_angles_convex(point): #self.is_angles_convex[str(point)]:\n # raise ValidationErr(\"The angle of the polygon are not convex even though the whole puzzle framework is covered\")\n \n \n # return True\n\n def _find_rgons_comb(self,kernel_point,continuity_edges):\n rgons = []\n\n for edge_str in list(continuity_edges.keys()):\n traverses = [list(dict.fromkeys(tr)) for tr in self._get_traverse(Edge(edge_str),continuity_edges)]\n # find all sequential sub combinations:\n for trav in traverses:\n for index_start in range(len(trav)):\n for index_end in range(index_start+1,len(trav)):\n sub_trav = trav[index_start:index_end+1]\n sub_trav.insert(0,str(kernel_point))\n poly = Polygon([Point(eval(point_str)) for point_str in sub_trav])\n\n try:\n self.check_sanity_polygon(poly)\n rgons.append(poly)\n except ValidationErr as err:\n pass\n \n # Remove duplicates\n final_rgons = []\n for rgon in rgons:\n if all(not rgon.equals(poly) for poly in final_rgons):\n final_rgons.append(rgon)\n \n # sorting by the most left point of the polygon without kernel (ease on debug)\n def left_most_point_x(poly):\n xs,ys = poly.exterior.coords.xy\n return min(xs[1:-1])\n\n final_rgons.sort(key = left_most_point_x )\n\n return final_rgons\n\n def _get_traverse(self,origin_edge,continuity_edges):\n if len(continuity_edges[str(origin_edge)]) == 0:\n return [[str(origin_edge.src_point),str(origin_edge.dst_point)]]\n\n travs = []\n available_edges = continuity_edges[str(origin_edge)]\n for next_edge in available_edges:\n cont_travs = self._get_traverse(next_edge,continuity_edges)\n\n if isinstance(cont_travs[0],list):\n flat_travs = [item for sublist in cont_travs for item in sublist]\n flat_travs.insert(0,str(origin_edge.dst_point))\n flat_travs.insert(0,str(origin_edge.src_point))\n\n travs.append(flat_travs)\n \n return travs\n\n def _create_rgon(self,possible_rgons):\n raise NotImplementedError(\"need to be implemented\")\n \n # def write_results(self,output_path):\n # xs = []\n # ys = []\n # piece_id = []\n # for index in range(len(self.pieces)):\n # for coord in self.pieces[index].exterior.coords:\n # xs.append(coord[0])\n # ys.append(coord[1])\n # piece_id.append(index)\n \n # df = pd.DataFrame({\"x\":xs,\"y\":ys,\"id\":piece_id})\n # df.to_csv(output_path)\n\n # def _get_surface(self,kernel_point,scan_direction,n_iter=-1,fig_prefix=\"\"):\n # # observe surface data\n # points_to_connect = self._get_points_ahead(kernel_point,direction=self.scan_direction.value) \n # points_to_connect = self._get_accessible_points(kernel_point,points_to_connect,direction=self.scan_direction.value) \n\n # if len(points_to_connect) < 2:\n # self.logger.debug(f\"Not enough points to connect ({len(points_to_connect)} < 2)\")\n # # self.is_angles_convex[str(kernel_point)] = self._is_edges_angles_convex(kernel_point)\n # # return {}\n # raise ValueError(f\"Not enough points to connect ({len(points_to_connect)} < 2)\")\n \n # stared_polygon = Rgon1988.get_stared_shape_polygon(kernel_point,points_to_connect,self.scan_direction)\n # visual_graph_polygon = Rgon1988.get_visualization_graph(kernel_point,stared_polygon,self.scan_direction)\n \n\n # # if self.is_debug:\n # # # fig,ax = plt.subplots()\n # # fig,ax = self.fig,self.ax\n # # ax.cla()\n # # self.plot_puzzle(fig,ax)\n # # [Edge(kernel_point,p).plot(ax,color='black', linestyle='dotted') for p in list(visual_graph_polygon.get_verticies())]\n # # visual_graph_polygon.plot_directed(ax) # way to plot the graph\n # # fig.savefig(self.debug_dir + f\"/visibility-graph-before-filter/{fig_prefix}{str(self.n_iter)}.png\")\n # # # plt.close(fig)\n\n # # Remove edges that are covered by polygons - do it more elegant less naive\n # self.logger.info(\"Filter edges covered by exist pieces\")\n # vs_grph_edges = list(visual_graph_polygon.get_edges()).copy()\n # lines = [LineString([edge.src_point,edge.dst_point]) for edge in vs_grph_edges]\n\n # for edge,line in zip(vs_grph_edges,lines):\n # for piece in self.pieces:\n \n # if line.crosses(piece) and not line.touches(piece):\n # self.logger.debug(f\"Edge {str(edge)} is crossed by piece {str(piece)} ,so remove it from visibility graph\")\n # visual_graph_polygon.remove_edge(edge)\n # break\n\n # if line.within(piece):\n # self.logger.debug(f\"Edge {str(edge)} is within piece {str(piece)} ,so remove it from visibility graph\")\n # visual_graph_polygon.remove_edge(edge)\n # break\n \n # if self.is_debug:\n # # fig,ax = plt.subplots()\n # fig,ax = self.fig,self.ax\n # ax.cla()\n # self.plot_puzzle(fig,ax)\n # [Edge(kernel_point,p).plot(ax,color='black', linestyle='dotted') for p in list(visual_graph_polygon.get_verticies())]\n # visual_graph_polygon.plot_directed(ax) # way to plot the graph\n # fig.savefig(self.debug_dir + f\"/visibility-graph-filtered/{fig_prefix}{str(self.n_iter)}.png\")\n # # plt.close(fig)\n\n # if len(list(visual_graph_polygon.get_edges())) == 0:\n # self.logger.debug(f\"Not enough edge to iterate on the visibility graph\")\n # # self.is_angles_convex[str(kernel_point)] = self._is_edges_angles_convex(kernel_point)\n # # return {}\n # raise ValueError(\"Not enough edge to iterate on the visibility graph\")\n\n # return Rgon1988.get_convex_chain_connectivity(visual_graph_polygon,self.scan_direction)\n\n def _find_first_possible_rgons(self,kernel_point,n_iter=-1):\n try:\n continuity_edges = self._get_surface(kernel_point,self.scan_direction,n_iter)\n except ValueError as err:\n raise err\n\n # num_edges = self._get_next_polygon_num_verticies(continuity_edges,edges_max_chain_length)\n possible_rgons = self._find_rgons_comb(kernel_point,continuity_edges)\n possible_rgons = list(filter(lambda pc:all(pc.disjoint(pc2) or pc.touches(pc2) for pc2 in self.pieces),possible_rgons))\n return possible_rgons\n\n def _filter_poss_rgons(self,last_possible_rgons):\n return list(filter(lambda pc:all(pc.disjoint(pc2) or pc.touches(pc2) for pc2 in self.pieces),last_possible_rgons))#","repo_name":"yanivohayon8/creatingRgons","sub_path":"src/puzzle_creators/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":19869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"531667587","text":"import os\nimport pandas as pd\nfrom glob import glob\n\n# A dictionary of the abbreviations to the lesion type for reference\nlesion_type_dict = {\n 'nv': 'Melanocytic nevi',\n 'mel': 'Melanoma',\n 'bkl': 'Benign keratosis-like lesions ',\n 'bcc': 'Basal cell carcinoma',\n 'akiec': 'Actinic keratoses',\n 'vasc': 'Vascular lesions',\n 'df': 'Dermatofibroma'\n}\nclasses = sorted(lesion_type_dict.keys())\n\ndef get_df_train(train_path):\n image_paths = glob(os.path.join(train_path, '*/*.jpg'))\n image_dict = {os.path.splitext(os.path.basename(x))[0]: x for x in image_paths}\n df_train = pd.read_csv(os.path.join(train_path,'HAM10000_metadata.csv'))\n df_train['path'] = df_train['image_id'].map(image_dict.get)\n df_train['class'] = df_train['dx'].apply(lambda x: classes.index(x))\n # Not using patient metadata\n df_train = df_train.drop(['dx_type', 'age', 'sex', 'localization'], axis=1)\n return df_train\n\ndef get_df_test(test_path):\n df_test = pd.read_csv(os.path.join(test_path, 'ISIC2018_Task3_Test_GroundTruth/ISIC2018_Task3_Test_GroundTruth/ISIC2018_Task3_Test_GroundTruth.csv'))\n df_test = df_test.melt(id_vars=['image'], var_name='dx', value_name='belongs_to_class')\\\n .query('belongs_to_class != 0')\\\n .drop(columns=['belongs_to_class'])\n df_test['dx'] = df_test['dx'].str.lower()\n df_test['class'] = df_test['dx'].apply(lambda x: classes.index(x))\n df_test = df_test.rename(columns={'image': 'image_id'})\n df_test['path'] = df_test['image_id'].apply(lambda x: os.path.join(test_path, 'ISIC2018_Task3_Test_Input/ISIC2018_Task3_Test_Input/' + x + '.jpg'))\n df_test = df_test.reset_index(drop=True)\n return df_test \n\ndef get_train_val_split(df_train, df_val):\n # Remove test data from train data\n df_train_unique = df_train[~df_train['lesion_id'].isin(df_val['lesion_id'])]\n df_train_unique.reset_index(inplace=True)\n df_val.reset_index(inplace=True)\n return df_train_unique, df_val","repo_name":"REFYSE/Skin-Lesion-Classifier","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2474972008","text":"from gui import *\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport sqlite3\n\nclass CompanyPage(Others):\n def __init__(self, parent, name, dimensions):\n super().__init__(parent, name, dimensions)\n\n self.tree_frame = Frame(self)\n self.tree_frame.grid(row=0, column=0, pady=10)\n\n # Create scrollbar for treeview\n self.tree_scroll = Scrollbar(self.tree_frame)\n self.tree_scroll.pack(side=RIGHT, fill=Y)\n\n # Create treeview\n self.tree = ttk.Treeview(self.tree_frame, yscrollcommand=self.tree_scroll.set, selectmode=\"extended\")\n self.tree.pack()\n\n # Configure scrollbar\n self.tree_scroll.config(command=self.tree.yview)\n\n # Define the columns of the treeview\n self.tree['columns'] = (\n \"Company ID\", \"Company Name\", \"Company Address\", \"VAT Number\"\n )\n\n # Place the columns\n self.tree.column(\"#0\", width=0, stretch=NO)\n self.tree.column(\"Company ID\", anchor=CENTER, width=100)\n self.tree.column(\"Company Name\", anchor=CENTER, width=200)\n self.tree.column(\"Company Address\", anchor=CENTER, width=300)\n self.tree.column(\"VAT Number\", anchor=CENTER, width=140)\n\n # Create headings for columns\n self.tree.heading(\"#0\", text=\"\", anchor=W)\n self.tree.heading(\"Company ID\", text=\"Company ID\", anchor=CENTER)\n self.tree.heading(\"Company Name\", text=\"Company Name\", anchor=CENTER)\n self.tree.heading(\"Company Address\", text=\"Company Address\", anchor=CENTER)\n self.tree.heading(\"VAT Number\", text=\"VAT Number\", anchor=CENTER)\n\n self.data_frame = LabelFrame(self, text=\"Company\")\n self.data_frame.grid(row=1, column=0)\n\n self.name_label = Label(self.data_frame, text=\"Company Name:\")\n self.name_label.grid(row=0, column=0, padx=10, pady=10)\n self.name_entry = Entry(self.data_frame)\n self.name_entry.grid(row=0, column=1, padx=10, pady=10)\n\n self.address_label = Label(self.data_frame, text=\"Company Address:\")\n self.address_label.grid(row=0, column=2, padx=10, pady=10)\n self.address_entry = Entry(self.data_frame)\n self.address_entry.grid(row=0, column=3, padx=10, pady=10)\n\n self.vat_number_label = Label(self.data_frame, text=\"VAT Number:\")\n self.vat_number_label.grid(row=0, column=4, padx=5, pady=10)\n self.vat_number_entry = Entry(self.data_frame)\n self.vat_number_entry.grid(row=0, column=5, padx=5, pady=10)\n\n self.button_frame = LabelFrame(self, text=\"Commands\")\n self.button_frame.grid(row=2, column=0, pady=10)\n\n self.add_company_button = Button(self.button_frame, text=\"Add Company\", command=self.add)\n self.add_company_button.grid(row=0, column=0, padx=10, pady=10)\n\n self.update_company_button = Button(self.button_frame, text=\"Update Company Info\", command=self.update)\n self.update_company_button.grid(row=0, column=1, padx=20, pady=10)\n\n self.remove_all_companies_button = Button(self.button_frame, text=\"Remove All Companies\", command=self.remove_all)\n self.remove_all_companies_button.grid(row=0, column=2, padx=20, pady=10)\n\n self.remove_many_companies_button = Button(self.button_frame, text=\"Remove Selected Companies\", command=self.remove_selected)\n self.remove_many_companies_button.grid(row=0, column=3, padx=20, pady=10)\n\n self.go_back_button = Button(self.button_frame, text=\"Go Back\", command=self.go_back)\n self.go_back_button.grid(row=0, column=4, padx=10, pady=10)\n\n self.clear_entries_button = Button(self.data_frame, text=\"Clear Entries\", command=self.clear_entries)\n self.clear_entries_button.grid(row=0, column=6, padx=10, pady=10)\n\n self.tree.bind(\"\", self.select)\n\n self.query()\n\n def go_back(self):\n self.destroy()\n\n def query(self):\n\n # Clear the treeview table\n for record in self.tree.get_children():\n self.tree.delete(record)\n\n # Create the database or connect to the existing database\n conn = sqlite3.connect('invoice_db.db')\n\n # Create a cursor instance\n cursor = conn.cursor()\n\n # Fetch the data\n cursor.execute(\"SELECT * FROM companies\")\n records = cursor.fetchall()\n\n for record in records:\n self.tree.insert(parent='', index='end', text='', values=(record[0], record[1], record[2], record[3]))\n\n # Commit changes\n conn.commit()\n\n # Close the connection\n conn.close()\n\n def clear_entries(self):\n self.name_entry.delete(0, END)\n self.address_entry.delete(0, END)\n self.vat_number_entry.delete(0, END)\n\n def select(self, e):\n\n self.clear_entries()\n\n selected = self.tree.focus()\n\n values = self.tree.item(selected, 'values')\n\n # Output to entry boxes\n self.name_entry.insert(0, values[1])\n self.address_entry.insert(0, values[2])\n self.vat_number_entry.insert(0, values[3])\n\n def add(self):\n\n # Create the database or connect to the existing database\n conn = sqlite3.connect('invoice_db.db')\n\n # Create a cursor instance\n cursor = conn.cursor()\n\n cursor.execute(\"INSERT INTO companies (company_name, company_address, vat_number) VALUES (?,?,?)\",\n (self.name_entry.get(), self.address_entry.get(), self.vat_number_entry.get()))\n # Commit changes\n conn.commit()\n\n # Close the connection\n conn.close()\n\n self.clear_entries()\n\n # Clear The treeview table\n self.tree.delete(*self.tree.get_children())\n\n # Pull data from database\n self.query()\n\n def update(self):\n # Create the database or connect to the existing database\n conn = sqlite3.connect('invoice_db.db')\n\n # Create a cursor instance\n cursor = conn.cursor()\n\n selected = self.tree.focus()\n\n id = self.tree.item(selected).get('values')[0]\n\n cursor.execute(\"\"\"UPDATE companies SET\n company_name = ?,\n company_address = ?,\n vat_number = ?\n WHERE company_id = ?\"\"\", (\n self.name_entry.get(),\n self.address_entry.get(),\n self.vat_number_entry.get(),\n id))\n\n # Commit changes\n conn.commit()\n\n cursor.execute(\"SELECT * FROM companies\")\n\n x = self.tree.index(selected)\n\n records = cursor.fetchall()\n\n self.tree.item(selected, text=\"\", values=(\n records[x][0], records[x][1], records[x][2], records[x][3]))\n\n # Commit changes\n conn.commit()\n\n # Close the connection\n conn.close()\n\n self.clear_entries()\n\n self.query()\n\n def remove_selected(self):\n # Ask a yes/no question\n response = messagebox.askyesno(message=\"Are you sure you want to delete the selected companies?\")\n # Delete the invoice from the treeview\n if response == 1:\n\n selection = self.tree.selection()\n\n self.clear_entries()\n\n # Create a list to store ids of invoices to be removed\n removed = []\n\n # Add selections to the list\n for x in selection:\n removed.append(self.tree.item(x, 'values')[0])\n\n for x in selection:\n self.tree.delete(x)\n\n conn = sqlite3.connect('invoice_db.db')\n\n cursor = conn.cursor()\n\n cursor.executemany(\"DELETE FROM companies WHERE company_id = ?\", [(x,) for x in removed])\n\n conn.commit()\n\n conn.close()\n\n def remove_all(self):\n # Ask a yes/no question\n response = messagebox.askyesno(message=\"Are you sure you want to delete all companies?\")\n # Delete the invoice from the treeview\n if response == 1:\n for x in self.tree.get_children():\n self.tree.delete(x)\n\n self.clear_entries()\n\n conn = sqlite3.connect('invoice_db.db')\n\n cursor = conn.cursor()\n\n cursor.execute(\"DELETE FROM companies\")\n\n conn.commit()\n\n conn.close()","repo_name":"SarpHarbali/invoice-organizer","sub_path":"company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"42740249522","text":"# from __future__ import print_function, division\n# from torch.optim import lr_scheduler\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\n# import torch.nn.functional as F\nimport torch.optim as optim\n# import torch.utils.data as Data\nimport numpy as np\n# import torchvision\n# from torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport io\nimport json\nimport ast\nimport sys\nimport random\n\n\nBATCH_SIZE = 32\nEPOCH = 100\nFILE_NUM = 4\nOUT_FILE_DIR = \"model/\"\n\nmodel_paths = []\n\ndef get_batchs(dataset_split_index, inputs, labels):\n x, y = [], []\n b_x, b_y = [], []\n for i, dsi in enumerate(dataset_split_index):\n if (i % BATCH_SIZE) == 0:\n b_x = list_to_cuda_var(b_x)\n b_y = list_to_cuda_var(b_y)\n x.append(b_x)\n y.append(b_y)\n b_x = []\n b_y = []\n rand_split_in_replay = np.random.randint(0, len(inputs[dsi]))\n b_x.append(inputs[dsi][rand_split_in_replay])\n b_y.append(labels[dsi])\n if b_x != []:\n b_x = list_to_cuda_var(b_x)\n b_y = list_to_cuda_var(b_y)\n x.append(b_x)\n y.append(b_y)\n del x[0], y[0]\n return x, y\n\ndef list_to_cuda_var(input_list):\n input_list = torch.tensor(input_list)\n input_list = input_list.type(torch.FloatTensor)\n input_list = Variable(input_list.cuda())\n return input_list\n \n\ndef get_loss_plot_data(losses_his):\n temp = []\n result = []\n for his in losses_his:\n his_result = [[]]\n EPOCH_LENGTH = int(len(his) / EPOCH)\n for item in his:\n if len(temp) == EPOCH_LENGTH:\n his_result[0].append(np.mean(temp))\n temp.clear()\n temp.append(item)\n # print(\"his_length: %d\" % len(his))\n # print(\"EPOCH_LENGTH: %d\" % EPOCH_LENGTH)\n result += his_result\n # print(\"result: %s\" % str(result))\n return result\n \n\ndef get_print_tensor(tensor):\n if tensor.dim() < 2:\n return str(tensor.tolist() )\n else:\n t_str = \"[\"\n for i in range(tensor.size()[0]):\n t_str += get_print_tensor(tensor[i]) + \",\\n\"\n t_str = t_str[:-2]\n t_str += \"]\"\n return t_str\n\ndef normalize_min_max(data, index, max_threashold, min_threashold = 0):\n data[index] = (data[index] - min_threashold) / (max_threashold - min_threashold)\n\ndef get_accuracy(predicts, labels):\n hit_num = 0\n if(len(predicts) != len(labels)):\n print(\"size of predicts and labels didn't match\")\n raise ValueError\n else:\n for predict,label in zip(predicts, labels):\n temp = 1 if predict > 0.5 else 0\n if temp == label:\n hit_num += 1\n return hit_num / len(predicts)\n\ndef save_model_for_cpp(nets, i):\n model = {\n \"name\" : \"net\" + str(i + 1),\n \"net\" : nets[i]\n }\n save_model(model)\n\n\ndef save_model(model):\n name = model['name']\n net = model['net']\n model_dict = net.state_dict()\n with open(OUT_FILE_DIR + name + \".model\",\"w\") as fo:\n for i in model_dict:\n if type(model_dict[i]) is torch.Tensor:\n structure = \"dict[%s]=\" % i\n fo.write(structure + \"\\n\")\n fo.write(str(model_dict[i].size()) + \"\\n\")\n fo.write(get_print_tensor(model_dict[i]))\n fo.write(\"\\n\")\n model_path = OUT_FILE_DIR + name +'.pt'\n model_paths.append(model_path)\n torch.save(net, model_path)\n \nif __name__ == '__main__':\n if not os.path.exists(OUT_FILE_DIR):\n os.mkdir(OUT_FILE_DIR)\n\n cuda0 = torch.cuda.set_device(0)\n\n plt.ion() \n\n inputs = []\n labels = []\n current_file_num = 0\n while current_file_num < FILE_NUM:\n feature_file_name = \"fo_tvt/fo_tvt_\" + str(current_file_num * 100) + \"_\" + str((current_file_num + 1) * 100)\n with open(feature_file_name) as fi:\n while True:\n line = fi.readline()\n if not line:\n break\n input_temp = ast.literal_eval(line)\n for i in range(len(input_temp)):\n # U_mineral normalization\n normalize_min_max(input_temp[i], 1, 4000)\n normalize_min_max(input_temp[i], 51, 4000)\n # U_gas normalization\n normalize_min_max(input_temp[i], 2, 1750)\n normalize_min_max(input_temp[i], 52, 1750)\n # U_supply normalization\n normalize_min_max(input_temp[i], 3, 350)\n normalize_min_max(input_temp[i], 53, 350)\n # I_mineral normalization\n normalize_min_max(input_temp[i], 4, 75000)\n normalize_min_max(input_temp[i], 54, 75000)\n # I_gas normalization\n normalize_min_max(input_temp[i], 5, 38000)\n normalize_min_max(input_temp[i], 55, 38000)\n # I_supply normalization\n normalize_min_max(input_temp[i], 6, 400)\n normalize_min_max(input_temp[i], 56, 400)\n # base_num normalization\n normalize_min_max(input_temp[i], 7, 10)\n normalize_min_max(input_temp[i], 57, 10)\n # building_score normalization\n normalize_min_max(input_temp[i], 8, 40000)\n normalize_min_max(input_temp[i], 58, 40000)\n # building_variety normalization\n normalize_min_max(input_temp[i], 9, 60)\n normalize_min_max(input_temp[i], 59, 60)\n # unit_num normalization\n normalize_min_max(input_temp[i], 10, 160)\n normalize_min_max(input_temp[i], 60, 160)\n # unit_score normalization\n normalize_min_max(input_temp[i], 11, 70000)\n normalize_min_max(input_temp[i], 61, 70000)\n # unit_variety normalization\n normalize_min_max(input_temp[i], 12, 30)\n normalize_min_max(input_temp[i], 62, 30)\n # vm_action_num normalization\n normalize_min_max(input_temp[i], 13, 100)\n normalize_min_max(input_temp[i], 63, 100)\n # unique_region normalization\n normalize_min_max(input_temp[i], 14, 30)\n normalize_min_max(input_temp[i], 64, 30)\n # building_slots normalization\n for j in range(15,31):\n normalize_min_max(input_temp[i], j, 25)\n for j in range(65,81):\n normalize_min_max(input_temp[i], j, 25)\n # unit_slots normalization\n for j in range(31,49):\n if j == 31 + 8:\n normalize_min_max(input_temp[i], j, 120)\n continue\n normalize_min_max(input_temp[i], j, 60)\n for j in range(81,99):\n if j == 81 + 8:\n normalize_min_max(input_temp[i], j, 120)\n continue\n normalize_min_max(input_temp[i], j, 60)\n # region_value normalization\n normalize_min_max(input_temp[i], 49, 20)\n normalize_min_max(input_temp[i], 99, 20)\n # chokedist normalization\n normalize_min_max(input_temp[i], 108, 600, 200)\n\n # delete features can not get from real system\n for i in range(len(input_temp)):\n del input_temp[i][108] # chokedist\n del input_temp[i][107] # walkable_num (16 binary values)\n del input_temp[i][64] # oppo unique_region\n del input_temp[i][51:57] # oppo resourse related features\n del input_temp[i][14] # self unique_region\n \n inputs.append(input_temp)\n label_file_name = feature_file_name + \"_label\"\n with open(label_file_name) as fi:\n for line in fi.readlines():\n label = ast.literal_eval(line)[-1]\n labels.append(label)\n current_file_num += 1\n\n if len(labels) != len(inputs):\n print(\"input length does not match label length!!!\")\n sys.exit(-1)\n feature_num = len(inputs[0][0])\n assert feature_num == 99\n\n # put dateset into torch dataset\n # inputs = torch.Tensor(inputs)\n # print(inputs.size())\n # labels = torch.Tensor(labels)\n # labels = labels.view(-1,1)\n # print(labels.size())\n # dataset = Data.TensorDataset(inputs, labels)\n\n winner_count = 0\n lose_count = 0\n for item in labels:\n if item == 1:\n winner_count += 1\n elif item == 0:\n lose_count += 1\n print(\"winner_count: %d\" % winner_count)\n print(\"lose_count: %d\" % lose_count)\n # use permutation to split train|dev|test dataset after shuffle whole data\n replay_num = len(inputs)\n replay_index = np.random.permutation(np.arange(replay_num))\n print(\"replay_num: %d\" % replay_num)\n print(\"replay_index: %s\" % str(replay_index))\n train_split_index = int(replay_num * 0.8)\n dev_split_index = int(replay_num * 0.9)\n train_dataset_index = replay_index[ : train_split_index]\n dev_dataset_index = replay_index[train_split_index : dev_split_index]\n test_dataset_index = replay_index[dev_split_index : ]\n\n #xavier initialization\n linear1 = torch.nn.Linear(feature_num, 256)\n linear2 = torch.nn.Linear(256, 256)\n linear3 = torch.nn.Linear(256, 1)\n # linear3 = torch.nn.Linear(256, 2)\n torch.nn.init.xavier_uniform_(linear1.weight)\n torch.nn.init.xavier_uniform_(linear2.weight)\n torch.nn.init.xavier_uniform_(linear3.weight)\n\n net1 = torch.nn.Sequential(\n linear1,\n torch.nn.Tanh(),\n linear2,\n torch.nn.ReLU(),\n linear3,\n torch.nn.Sigmoid()\n )\n\n net2, net3 = copy.deepcopy(net1), copy.deepcopy(net1)\n\n # net1 = torch.load(OUT_FILE_DIR + \"net1.pt\")\n # net2 = torch.load(OUT_FILE_DIR + \"net2.pt\")\n # net3 = torch.load(OUT_FILE_DIR + \"net3.pt\")\n\n print(net1) # net 的结构\n\n # Adam optimizer\n optimizer_1 = torch.optim.Adam(net1.parameters(), lr=1e-2) # 传入 net 的所有参数, 学习率\n optimizer_2 = torch.optim.Adam(net2.parameters(), lr=1e-3) # 传入 net 的所有参数, 学习率\n optimizer_3 = torch.optim.Adam(net3.parameters(), lr=1e-4) # 传入 net 的所有参数, 学习率\n optimizers = [optimizer_1, optimizer_2, optimizer_3]\n loss_func = torch.nn.BCELoss() # 二分类交叉熵loss\n train_losses_his = [[], [], []] # 记录 training 时train不同学习率的 loss\n dev_losses_his = [[], [], []] # 记录 training 时dev不同学习率的 loss\n test_losses_his = [[], [], []] # 记录 training 时test不同学习率的 loss\n test_acc_his = [[], [], []] # 记录 training 时test不同学习率的 acc\n highest_accuracies = [0, 0, 0] # 记录最高的准确率\n nets = [net1, net2, net3]\n\n # cuda\n if torch.cuda.is_available():\n net1.cuda()\n net2.cuda()\n net3.cuda()\n loss_func.cuda()\n\n\n # training\n since = time.time()\n for epoch in range(EPOCH):\n print('Epoch: ', epoch)\n # train dataset\n x, y = get_batchs(train_dataset_index, inputs, labels)\n for b_x, b_y in zip(x, y):\n for net, opt, l_his in zip(nets, optimizers, train_losses_his):\n output = net(b_x) # get output for every net\n output = output.squeeze()\n loss_func.zero_grad()\n loss = loss_func(output, b_y) # compute loss for every net\n opt.zero_grad() # clear gradients for next train\n loss.backward() # backpropagation, compute gradients\n opt.step() # apply gradients\n l_his.append(loss.data.cpu().numpy()) # loss recoder\n\n # dev dataset\n x, y = get_batchs(dev_dataset_index, inputs, labels)\n accuracies = [[], [], []]\n for b_x, b_y in zip(x, y):\n for net, opt, l_his, accuracy in zip(nets, optimizers, dev_losses_his, accuracies):\n output = net(b_x) # get output for every net\n output = output.squeeze()\n accuracy.append(get_accuracy(output, b_y))\n loss_func.zero_grad()\n loss = loss_func(output, b_y) # compute loss for every net\n opt.zero_grad() # clear gradients for next train\n l_his.append(loss.data.cpu().numpy()) # loss recoder\n # compare current accuracy to highest accuracy\n for i in range(len(highest_accuracies)):\n accuracy = np.mean(accuracies[i])\n if accuracy > highest_accuracies[i]:\n highest_accuracies[i] = accuracy\n save_model_for_cpp(nets, i)\n\n # test dataset\n x, y = get_batchs(test_dataset_index, inputs, labels)\n for b_x, b_y in zip(x, y):\n for net, opt, l_his, acc_his in zip(nets, optimizers, test_losses_his, test_acc_his):\n output = net(b_x) # get output for every net\n output = output.squeeze()\n acc_his.append(get_accuracy(output, b_y))\n loss_func.zero_grad()\n loss = loss_func(output, b_y) # compute loss for every net\n opt.zero_grad() # clear gradients for next train\n l_his.append(loss.data.cpu().numpy()) # loss recoder\n \n # test dataset\n accuracies = [[], [], []]\n x, y = get_batchs(test_dataset_index, inputs, labels)\n for net, model_path, opt, l_his, accuracy in zip(nets, model_paths, optimizers, test_losses_his, accuracies):\n net = torch.load(model_path)\n output = net(b_x) # get output for every net\n output = output.squeeze()\n accuracy.append(get_accuracy(output, b_y))\n loss = loss_func(output, b_y)\n loss_func.zero_grad()\n l_his.append(loss.data.cpu().numpy())\n test_loss_result = []\n test_accurate_result = []\n for l_his in test_losses_his:\n test_loss_result.append(l_his[-1])\n for accuracy in accuracies:\n test_accurate_result.append(accuracy[-1])\n with open(OUT_FILE_DIR + \"test_result.txt\", \"w\") as fo:\n fo.write(\"loss:\\n\")\n for result in test_loss_result:\n fo.write(str(result))\n fo.write(\"\\n\")\n fo.write(\"acc:\\n\")\n for result in test_accurate_result:\n fo.write(str(result))\n fo.write(\"\\n\")\n\n # record time\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed / 60, time_elapsed % 60))\n plot_labels = ['lr 1e-2', 'lr 1e-3', 'lr 1e-4']\n \n tlh_plot_data = get_loss_plot_data(train_losses_his)\n dlh_plot_data = get_loss_plot_data(dev_losses_his)\n test_lh_plot_data = get_loss_plot_data(test_losses_his)\n test_ah_plot_data = get_loss_plot_data(test_acc_his)\n\n # train plot\n for i, l_his in enumerate(tlh_plot_data):\n plt.plot(l_his, label=plot_labels[i])\n plt.legend(loc='best')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + \"train_summary.png\")\n plt.clf()\n # seperate optimizer plot\n for i, l_his in enumerate(tlh_plot_data):\n plt.plot(l_his)\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + plot_labels[i] + \"_train.png\")\n plt.clf()\n # dev plot\n plt.clf()\n for i, l_his in enumerate(dlh_plot_data):\n plt.plot(l_his, label=plot_labels[i])\n plt.legend(loc='best')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + \"dev_summary.png\")\n plt.clf()\n # seperate optimizer plot\n for i, l_his in enumerate(dlh_plot_data):\n plt.plot(l_his)\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + plot_labels[i] + \"_dev.png\")\n plt.clf()\n with open(OUT_FILE_DIR + \"train_model_loss\", \"w\") as fo:\n for train_los in train_losses_his:\n fo.write(\"optimizer\\n\")\n for item in train_los:\n fo.write(str(item))\n fo.write(\"\\n\")\n with open(OUT_FILE_DIR + \"dev_model_loss\", \"w\") as fo:\n for dev_loss in dev_losses_his:\n fo.write(\"optimizer\\n\")\n for item in dev_loss:\n fo.write(str(item))\n fo.write(\"\\n\")\n # test plot\n plt.clf()\n for i, l_his in enumerate(test_lh_plot_data):\n plt.plot(l_his, label=plot_labels[i])\n plt.legend(loc='best')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + \"test_loss_summary.png\")\n plt.clf()\n\n plt.clf()\n for i, l_his in enumerate(test_ah_plot_data):\n plt.plot(l_his, label=plot_labels[i])\n plt.legend(loc='best')\n plt.xlabel('Epochs')\n plt.ylabel('Accurate')\n plt.xlim((0))\n plt.ylim((0, 1.0))\n plt.savefig(OUT_FILE_DIR + \"test_acc_summary.png\")\n plt.clf()\n\n \n","repo_name":"pfan8/PyTorch-NN-C-","sub_path":"py/pytorch_nn_server.py","file_name":"pytorch_nn_server.py","file_ext":"py","file_size_in_byte":17680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"1915281743","text":"import pygame, sys\nfrom pygame.locals import *\nimport FIRST_PART.DRAW.all_drawing\nimport FIRST_PART.MECHANICA.signal_distribution\nimport FIRST_PART.DATAS\nimport FIRST_PART.DATAS.get_datas\n\nall_const = FIRST_PART.DATAS.const\n\n\ndef check_end_part():\n condition = FIRST_PART.DATAS.get_datas.get_condition_game()\n return True if condition == 'end' else False\n\n\ndef main():\n pygame.init()\n\n global all_const\n\n ####################\n SCREEN = pygame.display.set_mode((all_const.WIDTH, all_const.HEIGHT), 0, 32)\n\n draw_game = FIRST_PART.DRAW.all_drawing.DRAW_MOVING_AND_FON(SCREEN)\n accept_signals_user = FIRST_PART.MECHANICA.signal_distribution.USERS_SIGNAL()\n ###################\n\n while True:\n if check_end_part():\n break\n\n pygame.time.Clock().tick(all_const.FPS) # Частота обновления экрана\n\n for event in pygame.event.get():\n\n if event.type == QUIT:\n # Тип проверяемого события НАЖАТ Х В ОКНЕ ИГРЫ\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n # ESC key pressed\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n accept_signals_user.give_signals(pygame.mouse.get_pos())\n\n #################\n # MOVING GAME\n keys = pygame.key.get_pressed()\n side = None\n\n if keys[pygame.K_LEFT]:\n side = 'left'\n if keys[pygame.K_RIGHT]:\n side = 'right'\n if keys[pygame.K_UP]:\n side = 'up'\n if keys[pygame.K_DOWN]:\n side = 'down'\n\n accept_signals_user.give_signals(side) if side is not None \\\n else accept_signals_user.give_signals(side, absence=True)\n #################\n\n draw_game.processor()\n\n pygame.display.update()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ffidls/game-engine","sub_path":"GAME.py","file_name":"GAME.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"15084157839","text":"import numpy as np\n\nclass SpeedCalculator:\n\n def __init__(self, sampleTime):\n\n \"\"\"\n this function initializes speenCalc\n\n :param sampleTime: sysrem sample time\n \"\"\"\n\n self.dt = sampleTime\n\n def __calc_rotation_speed(self, angle):\n\n \"\"\"\n This function calculates reference speed of robot rotation atound Z axit\n :param angle: angle which robot need to turn\n :return:\n \"\"\"\n\n return angle*angle*angle/self.dt\n\n def calc_speed(self, x_ref, y_ref, angle):\n\n if angle < 0.1 and angle > -0.1:\n vx = x_ref[0]/self.dt\n vy = 0.0\n omega = 0.0\n else:\n omega = self.__calc_rotation_speed(angle)\n\n transition_mat = np.matrix([[np.sin(angle)/omega, -(1 - np.cos(angle))/omega], [(1 - np.cos(angle))/omega, np.sin(angle)/omega]])\n ref_position = np.matrix([x_ref, y_ref])\n print(transition_mat)\n ref_speed = np.matmul(np.linalg.inv(transition_mat), ref_position)\n vx = ref_speed[0, 0]\n vy = ref_speed[1, 0]\n\n return [2*vx, 2*vy, omega]\n\n","repo_name":"LlirikOknessu/RobotinoLab","sub_path":"src/speedCalculator.py","file_name":"speedCalculator.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"18397756256","text":"import os\nimport time\nfrom datetime import datetime, timedelta\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom datetime import datetime\nimport pika\nfrom influxdb_client import InfluxDBClient, Point\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nimport pandas as pd\nimport numpy as np\nimport pytz\nimport const\nimport utils\nimport alert_sms\n\ncolumns = [\n const.DATE_KEY,\n const.SECONDS_KEY,\n const.TEMPERATURE_TANK_KEY,\n const.TEMPERATURE_MAIN_TANK_KEY,\n const.TEMPERATURE_DOME_KEY,\n const.TEMPERATURE_PLANTS_KEY,\n const.TEMPERATURE_ATM_KEY,\n const.HUMEDITY_1_KEY,\n const.HUMEDITY_2_KEY,\n]\n\ncolumns_days = [\n const.DATE_DAY_KEY,\n const.DAY_OF_THE_YEAR_KEY,\n const.MEAN_TEMPERATURE_KEY,\n]\n\n\nclass Analytics():\n influx_bucket = 'rabbit'\n influx_token = 'token-secret'\n influx_url = 'http://influxDB:8086'\n influx_org = 'org'\n current_date = None\n alert = 0\n dataframe = pd.DataFrame(columns=columns)\n dataframe_prediction = pd.DataFrame(columns=columns_days)\n random_forest_regressor = AdaBoostRegressor(\n DecisionTreeRegressor(max_depth=5), n_estimators=10)\n counts = {\n 'COUNT_MIN_{}'.format(const.TEMPERATURE_MAIN_TANK_KEY): 0,\n 'COUNT_MIN_{}'.format(const.TEMPERATURE_TANK_KEY): 0,\n 'COUNT_MIN_{}'.format(const.HUMEDITY_1_KEY): 0,\n 'COUNT_MIN_{}'.format(const.HUMEDITY_2_KEY): 0,\n 'COUNT_MAX_{}'.format(const.TEMPERATURE_MAIN_TANK_KEY): 0,\n 'COUNT_MAX_{}'.format(const.TEMPERATURE_TANK_KEY): 0,\n 'COUNT_MAX_{}'.format(const.HUMEDITY_1_KEY): 0,\n 'COUNT_MAX_{}'.format(const.HUMEDITY_2_KEY): 0,\n }\n\n def write_db(self, tag, key, value, timestamp, tag_value='value'):\n client = InfluxDBClient(url=self.influx_url,\n token=self.influx_token, org=self.influx_org)\n write_api = client.write_api(write_options=SYNCHRONOUS)\n point = Point('Analytics').tag(tag, tag_value).field(\n key, value).time(timestamp, write_precision='ns')\n write_api.write(bucket=self.influx_bucket, record=point)\n\n # Get max values from every var and post to InfluxDB\n def get_max_values(self):\n if len(self.dataframe) > 0:\n for item in const.VARS_MAX_MIN:\n max_temperature = self.dataframe[item].max()\n max_temperature_idx = self.dataframe[item].idxmax()\n max_temperature_row = self.dataframe.iloc[max_temperature_idx]\n max_temperature_time = max_temperature_row[const.DATE_KEY]\n self.write_db(\n tag='max_values',\n key='MAX_{}'.format(item),\n value=max_temperature,\n timestamp=utils.get_time_to_influx(max_temperature_time)\n )\n\n # Get min values from every var and post to InfluxDB\n def get_min_values(self):\n if len(self.dataframe) > 0:\n for item in const.VARS_MAX_MIN:\n min_temperature = self.dataframe[item].min()\n min_temperature_idx = self.dataframe[item].idxmin()\n min_temperature_row = self.dataframe.iloc[min_temperature_idx]\n min_temperature_time = min_temperature_row[const.DATE_KEY]\n self.write_db(\n tag='min_values',\n key='MIN_{}'.format(item),\n value=min_temperature,\n timestamp=utils.get_time_to_influx(min_temperature_time)\n )\n\n def get_counts_dataframe(self):\n if len(self.dataframe) > 0:\n for key, value in const.VARS_COUNT.items():\n count_max = (self.dataframe[key] > value['max']).sum()\n count_min = (self.dataframe[key] <= value['min']).sum()\n self.write_db(\n tag='counts',\n tag_value='counts_max',\n key='COUNT_MAX_{}'.format(key),\n value=count_max,\n timestamp=utils.get_time_to_influx()\n )\n self.write_db(\n tag='counts',\n tag_value='counts_min',\n key='COUNT_MIN_{}'.format(key),\n value=count_min,\n timestamp=utils.get_time_to_influx()\n )\n\n for key, value in const.VARS_DOME.items():\n if key == 'day':\n # Count min from day\n dataframe_day_min = self.dataframe.apply(lambda x: utils.get_count_dome_day_min(\n x, value['min'], const.DOME_DAY_HOURS['min'], const.DOME_DAY_HOURS['max']), axis=1)\n count_day_min = len(\n dataframe_day_min[dataframe_day_min == True].index)\n self.write_db(\n tag='counts',\n tag_value='counts_min',\n key='COUNT_MIN_{}_{}'.format(\n key.upper(), const.TEMPERATURE_DOME_KEY),\n value=count_day_min,\n timestamp=utils.get_time_to_influx()\n )\n # Count max from day\n dataframe_day_max = self.dataframe.apply(lambda x: utils.get_count_dome_day_max(\n x, value['max'], const.DOME_DAY_HOURS['min'], const.DOME_DAY_HOURS['max']), axis=1)\n count_day_max = len(\n dataframe_day_max[dataframe_day_max == True].index)\n self.write_db(\n tag='counts',\n tag_value='counts_max',\n key='COUNT_MAX_{}_{}'.format(\n key.upper(), const.TEMPERATURE_DOME_KEY),\n value=count_day_max,\n timestamp=utils.get_time_to_influx()\n )\n else:\n # Count min from night\n dataframe_night_min = self.dataframe.apply(lambda x: utils.get_count_dome_night_min(\n x, value['min'], const.DOME_NIGHT_HOURS['min'], const.DOME_NIGHT_HOURS['max']), axis=1)\n count_night_min = len(\n dataframe_night_min[dataframe_night_min == True].index)\n self.write_db(\n tag='counts',\n tag_value='counts_min',\n key='COUNT_MIN_{}_{}'.format(\n key.upper(), const.TEMPERATURE_DOME_KEY),\n value=count_night_min,\n timestamp=utils.get_time_to_influx()\n )\n # Count max from night\n dataframe_night_max = self.dataframe.apply(lambda x: utils.get_count_dome_night_max(\n x, value['max'], const.DOME_NIGHT_HOURS['min'], const.DOME_NIGHT_HOURS['max']), axis=1)\n count_night_max = len(\n dataframe_night_max[dataframe_night_max == True].index)\n self.write_db(\n tag='counts',\n tag_value='counts_max',\n key='COUNT_MAX_{}_{}'.format(\n key.upper(), const.TEMPERATURE_DOME_KEY),\n value=count_night_max,\n timestamp=utils.get_time_to_influx()\n )\n\n def add_to_dataframe_prediction(self):\n timezone = pytz.timezone(\"America/Bogota\")\n last_timestamp = float(\n self.dataframe.iloc[-1][const.DATE_KEY])/1000000000\n last_day = datetime.fromtimestamp(last_timestamp, timezone).date()\n last_day_of_the_year = int((last_day).strftime('%j'))\n\n if len(self.dataframe_prediction) == 0:\n data_to_append = pd.DataFrame([[last_day, last_day_of_the_year, self.dataframe[const.TEMPERATURE_DOME_KEY].mean(\n )]], columns=self.dataframe_prediction.columns)\n self.dataframe_prediction = pd.concat(\n [self.dataframe_prediction, data_to_append], ignore_index=True)\n else:\n if last_day_of_the_year == self.dataframe_prediction.iloc[-1][const.DAY_OF_THE_YEAR_KEY]:\n self.dataframe_prediction = self.dataframe_prediction.drop(\n len(self.dataframe_prediction) - 1)\n data_to_append = pd.DataFrame([[last_day, last_day_of_the_year, self.dataframe[const.TEMPERATURE_DOME_KEY].mean(\n )]], columns=self.dataframe_prediction.columns)\n self.dataframe_prediction = pd.concat(\n [self.dataframe_prediction, data_to_append], ignore_index=True)\n else:\n data_to_append = pd.DataFrame([[last_day, last_day_of_the_year, self.dataframe[const.TEMPERATURE_DOME_KEY].mean(\n )]], columns=self.dataframe_prediction.columns)\n self.dataframe_prediction = pd.concat(\n [self.dataframe_prediction, data_to_append], ignore_index=True)\n\n print(\"Dataframe prediction \", self.dataframe_prediction, flush=True)\n\n def get_prediction_next_day(self, actual_time):\n if len(self.dataframe_prediction) > 0:\n X = self.dataframe_prediction[const.DAY_OF_THE_YEAR_KEY].to_numpy(\n ).reshape(-1, 1)\n Y = self.dataframe_prediction[const.MEAN_TEMPERATURE_KEY].to_numpy(\n )\n self.random_forest_regressor.fit(X, Y)\n timezone = pytz.timezone(const.TIME_ZONE)\n actual_time = float(actual_time)/1000000000\n next_day = datetime.fromtimestamp(actual_time, timezone).date() + timedelta(\n days=1)\n value_to_predict = np.array(\n int(next_day.strftime('%j'))).reshape(-1, 1)\n value_predicted = self.random_forest_regressor.predict(\n value_to_predict)\n # print(\"Predicted temperature for {} is {} \".format(\n # next_day, value_predicted[0]), flush=True)\n self.write_db(\n tag='predictions',\n tag_value='next_day',\n key='MEAN_{}'.format(const.TEMPERATURE_DOME_KEY),\n value=value_predicted[0],\n timestamp=time.time_ns()\n )\n else:\n print(\n \"No se puede realizar una prediccion en get_prediction_next_day()\", flush=True)\n\n def get_prediction_seconds(self, actual_time):\n if len(self.dataframe_prediction) > 0:\n timezone = pytz.timezone(const.TIME_ZONE)\n max_seconds = 86400\n forward_seconds = 10\n dataframe = self.dataframe.copy()\n X = dataframe[const.SECONDS_KEY].to_numpy().reshape(-1, 1)\n Y = dataframe[const.TEMPERATURE_DOME_KEY].to_numpy()\n regressor = AdaBoostRegressor(\n DecisionTreeRegressor(max_depth=5), n_estimators=10)\n regressor.fit(X, Y)\n actual_time = float(actual_time)/1000000000\n value_to_predict = actual_time + forward_seconds\n if value_to_predict > max_seconds:\n value_to_predict = np.array(value_to_predict - max_seconds)\n else:\n value_to_predict = np.array(value_to_predict)\n value_predicted = regressor.predict(\n value_to_predict.reshape(-1, 1))\n prediction_time = datetime.fromtimestamp(\n float(actual_time), timezone).strftime(\"%Y-%m-%dT%H:%M:%S\")\n # print(\"Predicted temperature for {} is {} \".format(\n # prediction_time, value_predicted[0]), flush=True)\n self.write_db(\n tag='predictions',\n tag_value='few_seconds',\n key='VALUE_OF_{}'.format(const.TEMPERATURE_DOME_KEY),\n value=value_predicted[0],\n timestamp=time.time_ns()\n )\n else:\n print(\n \"No se puede realizar una prediccion en get_prediction_seconds()\", flush=True)\n\n def check_counts(self):\n for key, value in self.counts.items():\n if value > 3:\n self.counts[key] = 0\n if int(self.alert):\n print('Sending messages', flush=True)\n alert_sms.send_alert_sms(const.ALERT_MESSAGES[key])\n print(self.counts, flush=True)\n\n def get_counts_alert(self, current_measurements):\n for key, value in const.VARS_COUNT.items():\n # print('---> key: {}, value: {}'.format(key, value), flush=True)\n if current_measurements[key] > value['max']:\n self.counts['COUNT_MAX_{}'.format(key)] += 1\n if current_measurements[key] <= value['min']:\n self.counts['COUNT_MIN_{}'.format(key)] += 1\n self.check_counts()\n\n def take_measurement(self, _message):\n measurements = {}\n splitted_message = _message.split(\" \")\n # Payload\n payload = splitted_message[1]\n #print(payload, flush=True)\n # Datetime in ns\n timestamp = splitted_message[2] if len(splitted_message) > 2 else time.time_ns()\n # print(timestamp, flush=True)\n measurements_string = payload.split(\",\")\n\n timezone = pytz.timezone(const.TIME_ZONE)\n timestamp_in_sec = float(timestamp)/1000000000\n incoming_datetime_str = datetime.fromtimestamp(\n float(timestamp_in_sec), timezone).strftime(\"%Y-%m-%dT%H:%M:%S\")\n # print(incoming_datetime_str, flush=True)\n incoming_datetime_str = incoming_datetime_str.split('T')[0]\n hour = datetime.fromtimestamp(\n float(timestamp_in_sec), timezone).strftime(\"%H\")\n minutes = datetime.fromtimestamp(\n float(timestamp_in_sec), timezone).strftime(\"%M\")\n seconds = datetime.fromtimestamp(\n float(timestamp_in_sec), timezone).strftime(\"%S\")\n time_second = (int(hour)*60 + int(minutes))*60 + int(seconds)\n\n if self.current_date is None:\n self.current_date = incoming_datetime_str\n else:\n if incoming_datetime_str > self.current_date:\n self.current_date = incoming_datetime_str\n self.dataframe = pd.DataFrame(columns=columns)\n\n # print('current_date {}'.format(self.current_date), flush=True)\n for measurement in measurements_string:\n measurement_key_value = measurement.split(\"=\")\n #print(measurement_key_value, flush=True)\n measurements[measurement_key_value[0]] = float(\n measurement_key_value[1])\n new_measurement = [\n float(timestamp),\n int(time_second),\n measurements[const.Temperatura_Tanque_de_reserva],\n measurements[const.Temperatura_Tanque_Principal],\n measurements[const.Temperatura_Domo],\n measurements[const.Temperatura_Plantas],\n measurements[const.Temperatura_medio_ambiente],\n measurements[const.Humedad_Planta_1],\n measurements[const.Humedad_Planta_2]\n ]\n new_measurement_dict = {\n const.DATE_KEY: float(timestamp),\n const.SECONDS_KEY: int(time_second),\n const.TEMPERATURE_TANK_KEY: measurements[const.Temperatura_Tanque_de_reserva],\n const.TEMPERATURE_MAIN_TANK_KEY: measurements[const.Temperatura_Tanque_Principal],\n const.TEMPERATURE_DOME_KEY: measurements[const.Temperatura_Domo],\n const.TEMPERATURE_PLANTS_KEY: measurements[const.Temperatura_Plantas],\n const.TEMPERATURE_ATM_KEY: measurements[const.Temperatura_medio_ambiente],\n const.HUMEDITY_1_KEY: measurements[const.Humedad_Planta_1],\n const.HUMEDITY_2_KEY: measurements[const.Humedad_Planta_2],\n }\n data_to_append = pd.DataFrame(\n [new_measurement], columns=self.dataframe.columns)\n self.dataframe = pd.concat(\n [self.dataframe, data_to_append], ignore_index=True)\n print(\"Normal Dataframe \", self.dataframe, flush=True)\n self.add_to_dataframe_prediction()\n self.get_counts_alert(new_measurement_dict)\n self.get_max_values()\n self.get_min_values()\n self.get_counts_dataframe()\n\n def get_prediction(self, _message):\n print('get_prediction - message {}'.format(_message), flush=True)\n\n def toggle_alerts(self, _message):\n print('toggle_alerts - value {}'.format(_message), flush=True)\n payload = _message.split(\" \")[1]\n value = payload.split('=')[1]\n self.alert = value\n\n\nif __name__ == '__main__':\n\n analytics = Analytics()\n\n def callback(ch, method, properties, body):\n global analytics\n message = body.decode(\"utf-8\")\n current_topic = method.routing_key\n if current_topic == 'data':\n analytics.take_measurement(message)\n elif current_topic == 'prediction':\n print('--------------->Prediction', flush=True)\n timestamp = message.split(\" \")[2]\n analytics.get_prediction_next_day(timestamp)\n analytics.get_prediction_seconds(timestamp)\n elif current_topic == 'alert':\n analytics.toggle_alerts(message)\n\n url = os.environ.get('AMQP_URL', 'amqp://guest:guest@rabbit:5672/%2f')\n params = pika.URLParameters(url)\n connection = pika.BlockingConnection(params)\n\n channel = connection.channel()\n channel.queue_declare(queue='messages')\n channel.queue_bind(exchange='amq.topic', queue='messages', routing_key='#')\n channel.basic_consume(\n queue='messages', on_message_callback=callback, auto_ack=True)\n channel.start_consuming()\n","repo_name":"gnaguerrer/server-hydroponic","sub_path":"analytics/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"41754031032","text":"# WasGo API Generator v2\n# Previous implementations of api generators used ClassDB and GDNative's api.json which missed some crucial information\n# In this script we will use C header files for ultimate accuracy\nimport json\nfrom pathlib import Path\n\ndef find_files_recursively(dir, glob_pattern):\n return [\"{0}\".format(path.relative_to(dir)) for path in Path(dir).rglob(glob_pattern)]\n\ndef parse_method(method_line, class_name):\n if (\"(\" not in method_line):\n return {}\n\n method = {}\n pre_paren = method_line[0:method_line.find(\"(\")]\n post_paren = method_line[method_line.find(\"(\") :]\n pre_words = [word.strip() for word in pre_paren.split()]\n method[\"virtual\"] = \"virtual\" in pre_words\n method[\"static\"] = \"static\" in pre_words\n pre_word_names = [word for word in pre_words if word not in [\"virtual\", \"static\", \"inline\"]]#TODO add other qualifiers\n if (not pre_words):\n return {}\n method[\"name\"] = pre_word_names[-1]\n if (pre_word_names.size() > 1):\n method[\"return_type\"] = pre_words[-2]\n method[\"constructor\"] = False\n method[\"destructor\"] = False\n elif class_name in method[\"name\"]:\n method[\"return_type\"] = \"\"\n if method[\"name\"][0] == \"~\":\n method[\"constructor\"] = False\n method[\"destructor\"] = True\n else:\n method[\"constructor\"] = True\n method[\"destructor\"] = False\n else:#might have been a preprocessor macro or something\n return {}\n\n if (\")\" not in post_paren):\n return {}\n argument_string = post_paren[0: post_paren.rfind(\")\")]\n if (argument_string.size() < 2):\n method[\"arguments\"] = []\n \n args = []\n arg_type = \"\"\n arg_name = \"\"\n arg_value = \"\"\n default_value = False\n arg_count = 0\n for character in argument_string:\n if character == \",\":\n if (arg_name):\n arg = {}\n arg[\"name\"] = arg_name.strip()\n arg[\"type\"] = arg_type.strip()\n arg[\"default_value\"] = arg_value.strip()\n arg[\"has_default_value\"] = default_value\n args[arg_count] = arg\n arg_count += 1\n arg_name = \"\"\n arg_type = \"\"\n arg_value = \"\"\n default_value = False\n elif character == \"=\":\n default_value = True\n elif default_value:\n arg_value += character\n elif character == \" \" or arg_name:\n arg_name += character\n else:\n arg_type += character\n return method\n\ndef parse_struct(struct_lines):\n return parse_class(struct_lines, True)\n\ndef parse_class(class_lines, public_default = False):\n if not class_lines:\n return {}\n class_dict = {}\n class_dict[\"classes\"] = []\n class_dict[\"structs\"] = []\n class_dict[\"enums\"] = []\n class_dict[\"constants\"] = []\n class_dict[\"methods\"] = []\n public = public_default #change to true for structs\n \n curly_brace_count = 0\n class_lines = []\n struct_lines = []\n enum_lines = []\n for index, line in enumerate(class_lines):\n if (index == 0):\n print(line)\n #extract class names and parent classes from the first line\n class_name = line[5:] #remove the class keyword\n parent_string = \"\"\n if \"{\" in class_name:\n class_name = class_name[0:class_name.find(\"{\")]\n if \":\" in class_name:\n char_index = class_name.find(\":\")\n parent_string = class_name[char_index:]\n class_name = class_name[0:char_index]\n #remove whitespaces\n class_dict[\"name\"] = class_name.strip()\n #TODO check for multiple inheritance\n parent_words = [s.strip() for s in parent_string.split() if s]\n if parent_words.size() > 1:\n class_dict[\"inheritance\"] = parent_words[0]\n class_dict[\"base_class\"] = parent_words[1]\n else:\n class_dict[\"inheritance\"] = \"\"\n class_dict[\"base_class\"] = \"\"\n elif line.startswith(\"public:\"): #we assume that's gonna be the whole line because the input is prettyfied\n public = True\n elif public:\n if \"{\" in line:\n curly_brace_count += 1\n\n if class_lines:\n class_lines.append(line)\n elif struct_lines:\n struct_lines.append(line)\n elif enum_lines:\n enum_lines.append(line)\n else:\n if line.startswith(\"class\"):\n class_lines.append(line)\n elif line.startswith(\"struct\"):\n struct_lines.append(line)\n elif line.startswith(\"enum\"):\n enum_lines.append(line)\n elif \"(\" in line:\n class_dict[\"methods\"].append(parse_method(line))\n\n if \"}\" in line:\n curly_brace_count -= 1\n if curly_brace_count == 0:\n if class_lines:\n class_dict[\"classes\"].append(parse_class(class_lines))\n class_lines.clear()\n if struct_lines:\n class_dict[\"structs\"].append(parse_struct(struct_lines))\n struct_lines.clear()\n if enum_lines:\n class_dict[\"enums\"].append(enum_lines)#just keep the whole thing without changing it\n enum_lines.clear()\n\n if curly_brace_count == 0:\n if line.startswith(\"protected:\") or line.startswith(\"private:\"):\n public = False\n #TODO figure out if singleton\n #TODO figure out if instanciable\n return class_dict\n\n \n \n\n\ndef parse_header_file(file_path):\n #assumes that header files are prettified and have newlines at the regular places\n #if that's not the case, just run a prettifyier on the files and try again\n #note the opening brace must be on the same line for class, struct, enum, and method defiitions\n input_file = open(file_path, 'r')\n lines = input_file.readlines()\n\n classes = []\n structs = []\n enums = []\n\n class_lines = []\n struct_lines = []\n enum_lines = []\n\n curly_brace_count = 0\n for raw_line in lines:\n line = raw_line.strip()\n if \"{\" in line:\n curly_brace_count += 1\n\n #continue the current definition\n if class_lines:\n class_lines.append(line)\n elif struct_lines:\n struct_lines.append(line)\n elif enum_lines:\n enum_lines.append(line)\n elif curly_brace_count == 1: # else see if we should start a new definition\n if line.startswith(\"class\"):\n print(line)\n class_lines.append(line)\n elif line.startswith(\"struct\"):\n struct_lines.append(line)\n elif line.startswith(\"enum\") and \"{\" in line:\n enum_lines.append(line)\n\n if \"}\" in line:\n curly_brace_count -= 1\n\n if (curly_brace_count == 0):\n if class_lines:\n classes.append(parse_class(class_lines))\n class_lines.clear()\n elif struct_lines:\n structs.append(parse_struct(struct_lines))\n struct_lines.clear()\n elif enum_lines:\n enums.append(enum_lines)\n enum_lines.clear()\n input_file.close()\n header_file = {}\n empty_class = {\n \"classes\": [],\n \"structs\": [],\n \"enums\": [],\n \"constants\": [],\n \"methods\": []\n }\n # header_file[\"classes\"] = [c for c in classes if c != empty_class]\n # header_file[\"structs\"] = [s for s in structs if s != empty_class]\n header_file[\"classes\"] = classes\n header_file[\"structs\"] = structs\n header_file[\"enums\"] = enums\n return header_file\n\n\nif __name__ == \"__main__\":\n input_folder = \"./ToBeGenerated\"\n output_folder = \"./Generated\"\n\n #Step 1 make a dictionary that shows the relationship of files to class to methods\n #Step 2 create the empty skeleton class that will live on the Wasm side. Our current target is C++, so these will be C++ style header and cpp files\n #Step 3 create the wasm side wrapper functions, these are all extern \"C\" functions which means they'll be exported in the global namespace with a _wasgo_ header. We'll keep these in a separate header file to not polute the global namespace too much\n #Step 4 create the Godot side wrapper functions, these should have the same name and signature as their wasm counterparts, but they don't have to be extern \"C\". Unlike the wasm wrappers, these will have bodies that call Godot functions on behalf of wasm\n #Step 5 create the function table that links the Godot wrappers to the wasm wrappers\n\n #******* STEP 1 ********\n input_files = find_files_recursively(input_folder, \"*.h\")\n\n file_classes_dict = {}\n for input_file in input_files:\n file_classes_dict[input_file] = parse_header_file(input_folder + \"/\" + input_file)\n\n with open('test_run.json', 'w') as file:\n file.write(json.dumps(file_classes_dict)) # use `json.loads` to do the reverse\n","repo_name":"MMMaellon/WasGo","sub_path":"api/api_generator_v2.py","file_name":"api_generator_v2.py","file_ext":"py","file_size_in_byte":9312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22781579394","text":"from src import api_app \n\nDrink = api_app.Drink\ndb = api_app.db\n\n# cola = Drink(title='cola', recipe='{\"color\":\"black\", \"name\":\"cola\", \"parts\":\"60\"}')\n# cola.insert()\n\nf = Drink.query.get(1)\nprint(f.short())\nprint(f.long())\n\n# f.delete()","repo_name":"MohamedDiaaEldin/Coffee-Shop","sub_path":"backend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"70002734608","text":"import tensorflow as tf\nfrom adanet.examples import simple_dnn\n\nfrom pipeline import SEED\n\n\nclass DNNGenerator:\n \"\"\"DNN subnetwork Generator using `adanet.examples.simple_dnn.Generator`\"\"\"\n\n FEATURE_KEY = \"image\"\n\n def __init__(self, learning_rate: int):\n self.learning_rate = learning_rate\n\n def build_subnetwork_generator(self):\n feature_columns = [\n tf.feature_column.numeric_column(self.FEATURE_KEY, shape=[28, 28, 1])\n ]\n\n return simple_dnn.Generator(\n feature_columns=feature_columns,\n optimizer=tf.train.AdamOptimizer(self.learning_rate),\n seed=SEED,\n )\n","repo_name":"novdov/ml-pipeline","sub_path":"pipeline/model_generator/dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72832724686","text":"import streamlit as st\r\nfrom streamlit_chat import message\r\nfrom langchain.chat_models import ChatOpenAI\r\nfrom langchain.chains import ConversationalRetrievalChain\r\nfrom langchain.prompts.prompt import PromptTemplate\r\nfrom langchain.callbacks import get_openai_callback\r\nfrom langchain import LLMChain\r\nfrom langchain.chains.llm import LLMChain\r\nfrom langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT\r\nfrom langchain.chains.question_answering import load_qa_chain\r\nimport os\r\nimport pickle\r\nimport tempfile\r\nimport pandas as pd\r\nimport pdfplumber\r\nimport datetime\r\nfrom langchain.vectorstores import FAISS\r\nfrom langchain.embeddings.openai import OpenAIEmbeddings\r\nfrom langchain.document_loaders import PyPDFLoader\r\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\r\nfrom langchain.callbacks import get_openai_callback\r\nfrom dotenv import load_dotenv\r\nfrom io import StringIO\r\nfrom tqdm.auto import tqdm\r\nfrom typing import List, Union\r\nfrom langchain.chat_models import ChatOpenAI\r\nfrom langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate\r\nfrom langchain.memory import ConversationBufferWindowMemory\r\n\r\n\r\nclass Utilities:\r\n\r\n @staticmethod\r\n def load_api_key():\r\n \"\"\"\r\n Loads the OpenAI API key from the .env file or \r\n from the user's input and returns it\r\n \"\"\"\r\n if os.path.exists(\".env\") and os.environ.get(\"OPENAI_API_KEY\") is not None:\r\n user_api_key = os.environ[\"OPENAI_API_KEY\"]\r\n st.sidebar.success(\"API key loaded from .env\", icon=\"­Ъџђ\")\r\n else:\r\n user_api_key = st.sidebar.text_input(\r\n label=\"#### Enter OpenAI API key ­ЪЉЄ\", placeholder=\"Paste your openAI API key, sk-\", type=\"password\"\r\n )\r\n if user_api_key:\r\n st.sidebar.success(\"API keys loaded\", icon=\"­Ъџђ\")\r\n\r\n return user_api_key\r\n \r\n @staticmethod\r\n def handle_upload():\r\n \"\"\"\r\n Handles the file upload and displays the uploaded file\r\n \"\"\"\r\n uploaded_file = st.sidebar.file_uploader(\"upload\", type=[\"pdf\"], label_visibility=\"collapsed\", accept_multiple_files = True)\r\n if uploaded_file is not None:\r\n\r\n def show_pdf_file(uploaded_file):\r\n file_container = st.expander(\"Your PDF file :\")\r\n for i in range(len(uploaded_file)):\r\n with pdfplumber.open(uploaded_file[i]) as pdf:\r\n pdf_text = \"\"\r\n for page in pdf.pages:\r\n pdf_text += page.extract_text() + \"\\n\\n\"\r\n file_container.write(pdf_text)\r\n \r\n file_extension = \".pdf\" \r\n\r\n if file_extension== \".pdf\" : \r\n show_pdf_file(uploaded_file)\r\n\r\n else:\r\n st.sidebar.info(\r\n \"­ЪЉє Upload your PDF file to get started..!\"\r\n )\r\n st.session_state[\"reset_chat\"] = True\r\n\r\n #print(uploaded_file)\r\n return uploaded_file\r\n\r\n @staticmethod\r\n def setup_chatbot(uploaded_file, model, temperature,):\r\n \"\"\"\r\n Sets up the chatbot with the uploaded file, model, and temperature\r\n \"\"\"\r\n embeds = Embedder()\r\n # Use RecursiveCharacterTextSplitter as the default and only text splitter\r\n splitter_type = \"RecursiveCharacterTextSplitter\"\r\n with st.spinner(\"Processing...\"):\r\n #uploaded_file.seek(0)\r\n file = uploaded_file\r\n \r\n # Get the document embeddings for the uploaded file\r\n vectors = embeds.getDocEmbeds(file, \"Docs\")\r\n\r\n # Create a Chatbot instance with the specified model and temperature\r\n chatbot = Chatbot(model, temperature,vectors)\r\n st.session_state[\"ready\"] = True\r\n\r\n return chatbot\r\n\r\n def count_tokens_agent(agent, query):\r\n \"\"\"\r\n Count the tokens used by the CSV Agent\r\n \"\"\"\r\n with get_openai_callback() as cb:\r\n result = agent(query)\r\n st.write(f'Spent a total of {cb.total_tokens} tokens')\r\n\r\n return result\r\n\r\nclass Layout:\r\n \r\n def show_header(self):\r\n \"\"\"\r\n Displays the header of the app\r\n \"\"\"\r\n st.markdown(\r\n \"\"\"\r\n

Ask Anything: Your Personal AI Assistant

\r\n \"\"\",\r\n unsafe_allow_html=True,\r\n )\r\n\r\n def show_api_key_missing(self):\r\n \"\"\"\r\n Displays a message if the user has not entered an API key\r\n \"\"\"\r\n st.markdown(\r\n \"\"\"\r\n
\r\n

Enter your OpenAI API key to start conversation

\r\n
\r\n \"\"\",\r\n unsafe_allow_html=True,\r\n )\r\n\r\n def prompt_form(self):\r\n \"\"\"\r\n Displays the prompt form\r\n \"\"\"\r\n with st.form(key=\"my_form\", clear_on_submit=True):\r\n user_input = st.text_area(\r\n \"Query:\",\r\n placeholder=\"Ask me anything about the document...\",\r\n key=\"input\",\r\n label_visibility=\"collapsed\",\r\n )\r\n submit_button = st.form_submit_button(label=\"Send\")\r\n \r\n is_ready = submit_button and user_input\r\n return is_ready, user_input\r\n\r\n\r\nclass Sidebar:\r\n\r\n MODEL_OPTIONS = [\"gpt-3.5-turbo\", \"gpt-4\"]\r\n TEMPERATURE_MIN_VALUE = 0.0\r\n TEMPERATURE_MAX_VALUE = 1.0\r\n TEMPERATURE_DEFAULT_VALUE = 0.0\r\n TEMPERATURE_STEP = 0.01\r\n\r\n @staticmethod\r\n def about():\r\n about = st.sidebar.expander(\"­ЪДа About\")\r\n sections = [\r\n \"#### Welcome to our AI Assistant, a cutting-edge solution to help you find the answers you need quickly and easily. Our AI Assistant is designed to provide you with the most relevant information from PDF sources.\",\r\n \"#### With our AI Assistant, you can ask questions on any topic, and our intelligent algorithms will search through our vast database to provide you with the most accurate and up-to-date information available. Whether you need help with a school assignment, are researching a topic for work, or simply want to learn something new, our AI Assistant is the perfect tool for you.\",\r\n ]\r\n for section in sections:\r\n about.write(section)\r\n\r\n @staticmethod\r\n def reset_chat_button():\r\n if st.button(\"Reset chat\"):\r\n st.session_state[\"reset_chat\"] = True\r\n st.session_state.setdefault(\"reset_chat\", False)\r\n\r\n def model_selector(self):\r\n model = st.selectbox(label=\"Model\", options=self.MODEL_OPTIONS)\r\n st.session_state[\"model\"] = model\r\n\r\n def temperature_slider(self):\r\n temperature = st.slider(\r\n label=\"Temperature\",\r\n min_value=self.TEMPERATURE_MIN_VALUE,\r\n max_value=self.TEMPERATURE_MAX_VALUE,\r\n value=self.TEMPERATURE_DEFAULT_VALUE,\r\n step=self.TEMPERATURE_STEP,\r\n )\r\n st.session_state[\"temperature\"] = temperature\r\n \r\n def csv_agent_button(self, uploaded_file):\r\n st.session_state.setdefault(\"show_csv_agent\", False)\r\n\r\n def show_options(self, uploaded_file):\r\n with st.sidebar.expander(\"­ЪЏа№ИЈ Tools\", expanded=False):\r\n\r\n self.reset_chat_button()\r\n self.csv_agent_button(uploaded_file)\r\n # self.model_selector()\r\n # self.temperature_slider()\r\n st.session_state.setdefault(\"model\", model_name)\r\n st.session_state.setdefault(\"temperature\", temperature)\r\n\r\noriginal_filename=\"Docs\"\r\nclass Embedder:\r\n\r\n def __init__(self):\r\n self.PATH = \"embeddings\"\r\n self.createEmbeddingsDir()\r\n\r\n def createEmbeddingsDir(self):\r\n \"\"\"\r\n Creates a directory to store the embeddings vectors\r\n \"\"\"\r\n if not os.path.exists(self.PATH):\r\n os.mkdir(self.PATH)\r\n\r\n def storeDocEmbeds(self, file, original_filename=\"Docs\"):\r\n \"\"\"\r\n Stores document embeddings using Langchain and FAISS\r\n \"\"\"\r\n with tempfile.NamedTemporaryFile(mode=\"wb\", delete=False) as tmp_file:\r\n tmp_file.write(file)\r\n tmp_file_path = tmp_file.name\r\n\r\n \r\n text_splitter = RecursiveCharacterTextSplitter(\r\n # Set a really small chunk size, just to show.\r\n chunk_size = 2000,\r\n chunk_overlap = 50,\r\n length_function = len,\r\n )\r\n file_extension = \".pdf\" #get_file_extension(original_filename)\r\n\r\n\r\n if file_extension == \".pdf\":\r\n loader = PyPDFLoader(file_path=tmp_file_path) \r\n data = loader.load_and_split(text_splitter)\r\n \r\n \r\n embeddings = OpenAIEmbeddings()\r\n\r\n vectors = FAISS.from_documents(data, embeddings)\r\n os.remove(tmp_file_path)\r\n\r\n # Save the vectors to a pickle file\r\n with open(f\"{self.PATH}/{original_filename}.pkl\", \"wb\") as f:\r\n pickle.dump(vectors, f)\r\n\r\n\r\n def getDocEmbeds(self, file, original_filename):\r\n \"\"\"\r\n Retrieves document embeddings\r\n \"\"\"\r\n # Use RecursiveCharacterTextSplitter as the default and only text splitter\r\n splitter_type = \"RecursiveCharacterTextSplitter\"\r\n # Load and process the uploaded PDF or TXT files.\r\n loaded_text = load_docs(file)\r\n #st.write(\"Documents uploaded and processed.\")\r\n\r\n # Split the document into chunks\r\n splits = split_texts(loaded_text, chunk_size=500,\r\n overlap=0, split_method=splitter_type)\r\n embeddings = OpenAIEmbeddings()\r\n vectors = create_retriever(embeddings, splits, retriever_type=\"SIMILARITY SEARCH\")\r\n return vectors\r\n\r\nclass ChatHistory:\r\n \r\n def __init__(self):\r\n self.history = st.session_state.get(\"history\", [])\r\n st.session_state[\"history\"] = self.history\r\n\r\n def default_greeting(self):\r\n return \"Hey! ­ЪЉІ\"\r\n\r\n def default_prompt(self, topic):\r\n return f\"Hello ! Ask me anything about {topic} ­ЪцЌ\"\r\n\r\n def initialize_user_history(self):\r\n st.session_state[\"user\"] = [self.default_greeting()]\r\n\r\n def initialize_assistant_history(self, uploaded_file):\r\n st.session_state[\"assistant\"] = [self.default_prompt(original_filename)]\r\n\r\n def initialize(self, uploaded_file):\r\n if \"assistant\" not in st.session_state:\r\n self.initialize_assistant_history(original_filename)\r\n if \"user\" not in st.session_state:\r\n self.initialize_user_history()\r\n\r\n def reset(self, uploaded_file):\r\n st.session_state[\"history\"] = []\r\n \r\n self.initialize_user_history()\r\n self.initialize_assistant_history(original_filename)\r\n st.session_state[\"reset_chat\"] = False\r\n\r\n def append(self, mode, message):\r\n st.session_state[mode].append(message)\r\n\r\n def generate_messages(self, container):\r\n con = []\r\n if st.session_state[\"assistant\"]:\r\n with container:\r\n for i in range(len(st.session_state[\"assistant\"])):\r\n message(\r\n st.session_state[\"user\"][i],\r\n is_user=True,\r\n key=f\"{i}_user\",\r\n avatar_style=\"big-smile\",\r\n )\r\n message(st.session_state[\"assistant\"][i], key=str(i), avatar_style=\"thumbs\")\r\n con.append(\"Human: \" + str(st.session_state[\"user\"][i]))\r\n con.append(\"AI: \" + str(st.session_state[\"assistant\"][i]))\r\n con.append(\"\\n\")\r\n return con\r\n\r\n def load(self):\r\n if os.path.exists(self.history_file):\r\n with open(self.history_file, \"r\") as f:\r\n self.history = f.read().splitlines()\r\n\r\n def save(self):\r\n with open(self.history_file, \"w\") as f:\r\n f.write(\"\\n\".join(self.history))\r\n\r\n\r\nfrom langchain.chains.question_answering import load_qa_chain\r\nfrom langchain.memory import ConversationBufferMemory\r\nclass Chatbot:\r\n\r\n def __init__(self, model_name, temperature, vectors):\r\n self.model_name = model_name\r\n self.temperature = temperature\r\n self.vectors = vectors\r\n\r\n\r\n _template = \"\"\"Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question.\r\n Chat History:\r\n {chat_history}\r\n Follow-up entry: {question}\r\n Standalone question:\"\"\"\r\n CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\r\n\r\n qa_template = \"\"\"You are a friendly conversational assistant, designed to answer questions and chat with the user from a contextual file.\r\n You receive data from a user's files and a question, you must help the user find the information they need. \r\n Your answers must be user-friendly and respond to the user.\r\n You will get questions and contextual information.\r\n\r\n question: {question}\r\n =========\r\n context: {context}\r\n =======\"\"\"\r\n QA_PROMPT = PromptTemplate(template=qa_template, input_variables=[\"question\", \"context\"])\r\n\r\n def conversational_chat(self, query):\r\n \"\"\"\r\n Start a conversational chat with a model via Langchain\r\n \"\"\"\r\n llm = ChatOpenAI(model_name=model_name, temperature=temperature)\r\n\r\n retriever = self.vectors#.as_retriever()\r\n\t \r\n memory = ConversationBufferMemory(memory_key=\"chat_history\")\r\n question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT,verbose=True)\r\n doc_chain = load_qa_chain(llm=llm, \r\n \r\n prompt=self.QA_PROMPT,\r\n verbose=True,\r\n chain_type= \"stuff\"\r\n )\r\n\r\n chain = ConversationalRetrievalChain(\r\n retriever=retriever, combine_docs_chain=doc_chain, question_generator=question_generator, memory=memory, verbose=True)#, return_source_documents=True)\r\n\r\n\r\n chain_input = {\"question\": query}#, \"chat_history\": st.session_state[\"history\"]}\r\n result = chain(chain_input)\r\n\r\n st.session_state[\"history\"].append((query, result[\"answer\"]))\r\n #count_tokens_chain(chain, chain_input)\r\n return result[\"answer\"]\r\n\r\ndef count_tokens_chain(chain, query):\r\n with get_openai_callback() as cb:\r\n result = chain.run(query)\r\n st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\r\n return result \r\n# from langchain.vectorstores import Chroma\r\n# from langchain.document_loaders import UnstructuredPDFLoader\r\nimport PyPDF2\r\n@st.cache_data\r\ndef load_docs(files):\r\n st.sidebar.info(\"`Reading doc ...`\")\r\n all_text = \"\"\r\n for file_path in files:\r\n file_extension = os.path.splitext(file_path.name)[1]\r\n if file_extension == \".pdf\":\r\n pdf_reader = PyPDF2.PdfReader(file_path)\r\n text = \"\"\r\n for page in pdf_reader.pages:\r\n text += page.extract_text()\r\n all_text += text\r\n elif file_extension == \".txt\":\r\n stringio = StringIO(file_path.getvalue().decode(\"utf-8\"))\r\n text = stringio.read()\r\n all_text += text\r\n else:\r\n st.warning('Please provide txt or pdf.', icon=\"Рџа№ИЈ\")\r\n return all_text\r\n\r\n\r\n\r\n\r\n@st.cache_resource\r\ndef create_retriever(_embeddings, splits, retriever_type):\r\n if retriever_type == \"SIMILARITY SEARCH\":\r\n try:\r\n vectorstore = FAISS.from_texts(splits, _embeddings)\r\n except (IndexError, ValueError) as e:\r\n st.error(f\"Error creating vectorstore: {e}\")\r\n return\r\n retriever = vectorstore.as_retriever(k=5)\r\n elif retriever_type == \"SUPPORT VECTOR MACHINES\":\r\n retriever = SVMRetriever.from_texts(splits, _embeddings)\r\n\r\n return retriever\r\n\r\n@st.cache_resource\r\ndef split_texts(text, chunk_size, overlap, split_method):\r\n\r\n # Split texts\r\n # IN: text, chunk size, overlap, split_method\r\n # OUT: list of str splits\r\n\r\n st.sidebar.info(\"`Splitting doc ...`\")\r\n\r\n split_method = \"RecursiveTextSplitter\"\r\n text_splitter = RecursiveCharacterTextSplitter(\r\n chunk_size=chunk_size, chunk_overlap=overlap, separators=[\" \", \",\", \"\\n\"])\r\n\r\n splits = text_splitter.split_text(text)\r\n if not splits:\r\n st.error(\"Failed to split document\")\r\n st.stop()\r\n\r\n return splits\r\ndef doc_search(temperature):\r\n def generate(query):\r\n template = \"\"\"Assistant is a large language model trained by OpenAI.\r\n\r\n Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\r\n\r\n Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\r\n\r\n Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\r\n\r\n {history}\r\n Human: {human_input}\r\n Assistant:\"\"\"\r\n\r\n prompt = PromptTemplate(\r\n input_variables=[\"history\", \"human_input\"], \r\n template=template\r\n )\r\n\r\n\r\n chatgpt_chain = LLMChain(\r\n llm=OpenAI(temperature=0), \r\n prompt=prompt, \r\n verbose=True, \r\n memory=ConversationBufferWindowMemory(k=2),\r\n )\r\n\r\n output = chatgpt_chain.predict(human_input=query)\r\n return output\r\n def get_text():\r\n input_text = st.text_input(\"\", key=\"input\")\r\n return input_text \r\n def prompt_form():\r\n \"\"\"\r\n Displays the prompt form\r\n \"\"\"\r\n with st.form(key=\"my_form\", clear_on_submit=True):\r\n user_input = st.text_area(\r\n \"Query:\",\r\n placeholder=\"Ask me anything about the document...\",\r\n key=\"input_\",\r\n label_visibility=\"collapsed\",\r\n )\r\n submit_button = st.form_submit_button(label=\"Send\")\r\n \r\n is_ready = submit_button and user_input\r\n return is_ready, user_input\r\n col1, col2 = st.columns([1,0.19])\r\n col1.write(\"Write your query here:­Ъњг\")\r\n\r\n if 'generated' not in st.session_state:\r\n st.session_state['generated'] = ['I am ready to help you!']\r\n\r\n if 'past' not in st.session_state:\r\n st.session_state['past'] = ['Hey there!']\r\n #user_input = get_text()\r\n is_ready, user_input = prompt_form()\r\n #is_readyy = st.button(\"Send\")\r\n convo = []\r\n output = \"\"\r\n if is_ready: # user_input:\r\n output = generate(user_input)\r\n st.session_state.past.append(user_input)\r\n st.session_state.generated.append(output)\r\n \r\n\r\n if st.session_state['generated']:\r\n\r\n for i in range(len(st.session_state['generated'])-1, -1, -1):\r\n message(st.session_state[\"generated\"][i], key=str(i))\r\n message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')\r\n convo.append(\"Human: \" + str(st.session_state['past'][i]))\r\n convo.append(\"AI: \" + str(st.session_state[\"generated\"][i]))\r\n convo.append(\"\\n\")\r\n text_conv = '\\n'.join(convo)\r\n # Provide download link for text file\r\n col2.download_button(\r\n label=\"Download Conversation\",\r\n data=text_conv,\r\n file_name=f\"Conversation_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt\",\r\n mime=\"text/plain\"\r\n )\r\n\r\n\r\ndef main(temperature):\r\n # Instantiate the main components\r\n layout, sidebar, utils = Layout(), Sidebar(), Utilities()\r\n\r\n layout.show_header()\r\n\r\n if not user_api_key:\r\n layout.show_api_key_missing()\r\n else:\r\n os.environ[\"OPENAI_API_KEY\"] = user_api_key\r\n\r\n uploaded_file = utils.handle_upload()\r\n\r\n if uploaded_file:\r\n # Initialize chat history\r\n history = ChatHistory()\r\n\r\n # Configure the sidebar\r\n sidebar.show_options(uploaded_file)\r\n\r\n try:\r\n chatbot = utils.setup_chatbot(\r\n uploaded_file, st.session_state[\"model\"], st.session_state[\"temperature\"]\r\n )\r\n st.session_state[\"chatbot\"] = chatbot\r\n\r\n if st.session_state[\"ready\"]:\r\n # Create containers for chat responses and user prompts\r\n response_container, prompt_container = st.container(), st.container()\r\n\r\n with prompt_container:\r\n # Display the prompt form\r\n is_ready, user_input = layout.prompt_form()\r\n\r\n # Initialize the chat history\r\n history.initialize(uploaded_file)\r\n\r\n # Reset the chat history if button clicked\r\n if st.session_state[\"reset_chat\"]:\r\n history.reset(uploaded_file)\r\n\r\n if is_ready:\r\n # Update the chat history and display the chat messages\r\n history.append(\"user\", user_input)\r\n output = st.session_state[\"chatbot\"].conversational_chat(user_input)\r\n history.append(\"assistant\", output)\r\n\r\n con = history.generate_messages(response_container)\r\n con = '\\n'.join(con)\r\n # Provide download link for text file\r\n st.download_button(\r\n label=\"Download Conversation\",\r\n data=con,\r\n file_name=f\"Conversation_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt\",\r\n mime=\"text/plain\"\r\n )\r\n except Exception as e:\r\n st.error(f\"Error: {str(e)}\")\r\n\r\n sidebar.about()\r\n\r\n\r\n\r\n# Define a dictionary with the function names and their respective functions\r\nfunctions = [\r\n \"Select a Chat\",\r\n \"Chat with Docs\",\r\n \"Chat with AI\"\r\n]\r\n\r\nst.set_page_config(layout=\"wide\", page_icon=\"­Ъњг\", page_title=\"AI Chatbot ­Ъцќ\")\r\n#st.markdown(\"# AI Chat with Docs and Web!­ЪЉй\")\r\nst.markdown(\r\n \"\"\"\r\n
\r\n

Chat with Docs and AI!­Ъњг

\r\n
\r\n \"\"\",\r\n unsafe_allow_html=True,\r\n )\r\n#st.title(\"\")\r\n\r\nst.subheader(\"Select any chat type­ЪЉЄ\")\r\n# Create a selectbox with the function names as options\r\nselected_function = st.selectbox(\"Select a Chat\", functions, index = 0)\r\n \r\n\r\nif os.path.exists(\".env\") and os.environ.get(\"OPENAI_API_KEY\") is not None:\r\n user_api_key = os.environ[\"OPENAI_API_KEY\"]\r\n st.sidebar.success(\"API key loaded from .env\", icon=\"­Ъџђ\")\r\nelse:\r\n user_api_key = st.sidebar.text_input(\r\n label=\"#### Enter OpenAI API key ­ЪЉЄ\", placeholder=\"Paste your openAI API key, sk-\", type=\"password\"\r\n )\r\n if user_api_key:\r\n st.sidebar.success(\"OpenAI API key loaded\", icon=\"­Ъџђ\")\r\n MODEL_OPTIONS = [\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-32k\"]\r\n max_tokens = {\"gpt-4\":7000, \"gpt-4-32k\":31000, \"gpt-3.5-turbo\":3000}\r\n TEMPERATURE_MIN_VALUE = 0.0\r\n TEMPERATURE_MAX_VALUE = 1.0\r\n TEMPERATURE_DEFAULT_VALUE = 0.9\r\n TEMPERATURE_STEP = 0.01\r\n model_name = st.sidebar.selectbox(label=\"Model\", options=MODEL_OPTIONS)\r\n top_p = st.sidebar.slider(\"Top_P\", 0.0, 1.0, 1.0, 0.1)\r\n freq_penalty = st.sidebar.slider(\"Frequency Penalty\", 0.0, 2.0, 0.0, 0.1)\r\n temperature = st.sidebar.slider(\r\n label=\"Temperature\",\r\n min_value=TEMPERATURE_MIN_VALUE,\r\n max_value=TEMPERATURE_MAX_VALUE,\r\n value=TEMPERATURE_DEFAULT_VALUE,\r\n step=TEMPERATURE_STEP,)\r\n\r\nif selected_function == \"Chat with Docs\":\r\n main(temperature)\r\nelif selected_function == \"Chat with AI\":\r\n os.environ[\"OPENAI_API_KEY\"] = user_api_key\r\n doc_search(temperature)\r\nelif selected_function == \"Select a Chat\":\r\n st.markdown(\r\n \"\"\"\r\n
\r\n

Enter your OpenAI API Key First and then select a chat type!

\r\n
\r\n \"\"\",\r\n unsafe_allow_html=True,\r\n )\r\nelse:\r\n st.warning(\"You haven't selected any AI Chat!!\")\r\n \r\n \r\n","repo_name":"vantagecorp-jarvis/AI-Chat-with-PDF","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":25808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"26131885691","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\nprint(iris)\nprint(iris.data)\n\nimport seaborn as sns\ndf = sns.load_dataset('iris')\nprint(df.head)\n\nX = df.iloc[:, :-1] # species non independent feature\nY = iris.target\n\nprint(X)\nprint(Y)\n\n# model training\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)\n\nprint(X_train)\n\n# making decision tree model\nfrom sklearn.tree import DecisionTreeClassifier\ntreemodel = DecisionTreeClassifier() # post pruning\ntreemodel.fit(X_train, Y_train)\n\nfrom sklearn import tree\nplt.figure(figsize=(10, 8))\ntree.plot_tree(treemodel, filled=True)\n# plt.show()\n\n# prediction tyme\ny_pred = treemodel.predict(X_test)\nprint(y_pred)\n\nfrom sklearn.metrics import accuracy_score, classification_report\nscore = accuracy_score(y_pred, Y_test)\nprint(score) # won't be good as we have not done any pruning\n\nprint(classification_report(y_pred, Y_test))\n","repo_name":"Suga7772/Deep-Learning-BWF-Mahrukh-Khan","sub_path":"Task 18/more decision tree.py","file_name":"more decision tree.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22901042297","text":"\"\"\"Definition of useful linear operators\"\"\"\nimport numpy as np\nfrom copy import copy\nfrom interface import LinearOperator\n\nclass NDOperator(LinearOperator):\n \"\"\"Subclass of LinearOperator that handle multidimensional inputs and outputs\"\"\"\n def __init__(self, shapein, shapeout, matvec, rmatvec=None, matmat=None, rmatmat=None,\n dtypein=None, dtypeout=None, dtype=None):\n\n sizein = np.prod(shapein)\n sizeout = np.prod(shapeout)\n shape = (sizeout, sizein)\n\n ndmatvec = lambda x: matvec(x.reshape(shapein)).ravel()\n\n if rmatvec is not None:\n ndrmatvec = lambda x: rmatvec(x.reshape(shapeout)).ravel()\n else:\n ndrmatvec = None\n\n LinearOperator.__init__(self, shape, ndmatvec, ndrmatvec, dtype=dtype,\n dtypein=dtypein, dtypeout=dtypeout)\n\n self.ndmatvec = matvec\n self.ndrmatvec = rmatvec\n self.shapein = shapein\n self.shapeout = shapeout\n\n\nclass NDSOperator(NDOperator):\n def __init__(self, shapein=None, shapeout=None, classin=None,\n classout=None, dictin=None, dictout=None, xin=None, xout=None,\n matvec=None, rmatvec=None, dtype=np.float64, dtypein=None,\n dtypeout=None):\n \"Wrap linear operation working on ndarray subclasses in InfoArray style\"\n if xin is not None:\n shapein = xin.shape\n classin = xin.__class__\n dictin = xin.__dict__\n dtype = xin.dtype\n\n if xout is not None:\n shapeout = xout.shape\n classout = xout.__class__\n dictout = xout.__dict__\n\n sizein = np.prod(shapein)\n sizeout = np.prod(shapeout)\n shape = (sizeout, sizein)\n\n self.ndsmatvec = matvec\n self.ndsrmatvec = rmatvec\n self.classin = classin\n self.classout = classout\n self.dictin = dictin\n self.dictout = dictout\n self.shapein = shapein\n self.shapeout = shapeout\n\n if matvec is not None:\n def smatvec(x):\n xi = classin(data=x)\n xi.__dict__ = dictin\n return matvec(xi)\n else:\n raise ValueError('Requires a matvec function')\n\n if rmatvec is not None:\n def srmatvec(x):\n xo = classout(data=x)\n xo.__dict__ = dictout\n return rmatvec(xo)\n else:\n rmatvec = None\n\n NDOperator.__init__(self, shapein, shapeout, smatvec, rmatvec=srmatvec,\n dtypein=dtypein, dtypeout=dtypeout, dtype=dtype)\n \ndef ndoperator(*kargs, **kwargs):\n \"Transform n-dimensional linear operators into LinearOperators\"\n return NDOperator(*kargs, **kwargs)\n\ndef masubclass(xin=None, xout=None, shapein=None, shapeout=None, classin=None,\n classout=None, dictin=None, dictout=None,\n matvec=None, rmatvec=None, dtype=np.float64, dtypein=None, dtypeout=None):\n \"Wrap linear operation working on ndarray subclasses in MaskedArray style\"\n if xin is not None:\n shapein = xin.shape\n classin = xin.__class__\n dictin = xin.__dict__\n dtype = xin.dtype\n if xout is not None:\n shapeout = xout.shape\n classout = xout.__class__\n dictout = xout.__dict__\n sizein = np.prod(shapein)\n sizeout = np.prod(shapeout)\n shape = (sizeout, sizein)\n if matvec is not None:\n def ndmatvec(x):\n xi = classin(x.reshape(shapein))\n xi.__dict__ = dictin\n return matvec(xi).reshape(sizeout)\n else:\n raise ValueError('Requires a matvec function')\n if rmatvec is not None:\n def ndrmatvec(x):\n xo = classout(x.reshape(shapeout))\n xo.__dict__ = dictout\n return rmatvec(xo).reshape(sizein)\n else:\n ndrmatvec = None\n return LinearOperator(shape, matvec=ndmatvec, rmatvec=ndrmatvec, dtype=dtype,\n dtypein=dtypein, dtypeout=dtypeout)\n\ndef ndsubclass(**kwargs):\n \"Wrap linear operation working on ndarray subclasses in InfoArray style\"\n return NDSOperator(**kwargs)\n\ndef diag(d, shape=None, dtype=None):\n \"Returns a diagonal Linear Operator\"\n if shape is None:\n shape = 2 * (d.size,)\n if shape[0] != shape[1]:\n raise ValueError('Diagonal operators must be square')\n def matvec(x):\n return d * x\n if dtype is None:\n dtype = d.dtype\n return LinearOperator(shape, matvec=matvec, rmatvec=matvec, dtype=dtype)\n\ndef identity(shape, dtype=np.float64):\n \"Returns the identity linear Operator\"\n if shape[0] != shape[1]:\n raise ValueError('Identity operators must be square')\n def matvec(x):\n return x\n return LinearOperator(shape, matvec=matvec, rmatvec=matvec, dtype=dtype)\n\ndef eye(shape, dtype=np.float64):\n \"Returns the identity linear Operator\"\n if shape[0] == shape[1]:\n return identity(shape, dtype=dtype)\n else:\n def matvec(x):\n return x[:shape[0]]\n def rmatvec(x):\n return np.concatenate(x, np.zeros(shape[0] - shape[1]))\n return LinearOperator(shape, matvec=matvec, rmatvec=rmatvec, dtype=dtype)\n\ndef fftn(shapein, dtypein=np.float64, dtypeout=np.complex128, s=None, axes=None):\n \"fftn LinearOperator\"\n import numpy.fft\n if s is None:\n shapeout = shapein\n else:\n shapeout = s\n def matvec(x):\n return np.fft.fftn(x, s=s, axes=axes)\n def rmatvec(x):\n return np.fft.ifftn(x, s=s, axes=axes)\n return ndoperator(shapein, shapeout, matvec, rmatvec, dtypein=dtypein,\n dtypeout=dtypeout)\n\ndef fft2(shapein, dtypein=np.float64, dtypeout=np.complex128, s=None, axes=(-2, -1)):\n \"fft2 LinearOperator\"\n import numpy.fft\n if len(shapein) != 2:\n raise ValueError('Error expected 2 dimensional shape')\n if s is None:\n shapeout = shapein\n else:\n shapeout = s\n def matvec(x):\n return np.fft.fftn(x, s=s, axes=axes)\n def rmatvec(x):\n return np.fft.ifftn(x, s=s, axes=axes)\n return ndoperator(shapein, shapeout, matvec, rmatvec, \n dtypein=dtypein, dtypeout=dtypeout)\n\ndef convolve(shapein, kernel, mode='full'):\n \"\"\" Linear Operator to convolve two N-dimensional arrays\n\n See Also:\n scipy.signal.convolve\n \"\"\"\n from scipy.signal import convolve\n #if len(shapein) != 2:\n # raise ValueError('Error expected 2 dimensional shape')\n if mode == 'full':\n shapeout = [s + ks - 1 for s, ks in zip(shapein, kernel.shape)]\n if mode == 'valid':\n shapeout = [s - ks + 1 for s, ks in zip(shapein, kernel.shape)]\n if mode == 'same':\n shapeout = shapein\n # reverse kernel\n s = (slice(None, None, -1), ) * kernel.ndim\n rkernel = kernel[s]\n def matvec(x):\n return convolve(x, kernel, mode=mode)\n def rmatvec(x):\n if mode == 'full':\n rmode = 'valid'\n elif mode == 'valid':\n rmode = 'full'\n elif mode == 'same':\n rmode = 'same'\n return convolve(x, rkernel, mode=rmode)\n return ndoperator(shapein, shapeout, matvec, rmatvec, dtype=kernel.dtype)\n\ndef mask(mask, dtype=np.float64, copy_array=False, remove_nan=False):\n \"Masking as a LinearOperator\"\n shapein = mask.shape\n shapeout = mask.shape\n # make a copy to be sure mask does not change\n op_mask = copy(1 - mask)\n def matvec(x):\n if copy_array:\n y = copy(x)\n else:\n y = x\n x *= op_mask\n if remove_nan:\n x[np.isnan(x)] = 0.\n return y\n\n return ndoperator(shapein, shapeout, matvec, matvec, dtype=dtype)\n\ndef decimate(mask, dtype=np.float64):\n \"Masking as a LinearOperator\"\n shapein = mask.shape\n shapeout = np.sum(mask == False)\n def matvec(x):\n return x[mask==False]\n def rmatvec(x):\n y = np.zeros(shapein, dtype=dtype)\n y[mask==False] = x\n return y\n return ndoperator(shapein, shapeout, matvec, rmatvec, dtype=dtype)\n\ndef diff(shapein, axis=-1, dtype=np.float64):\n shapeout = np.asarray(shapein)\n shapeout[axis] -= 1\n shapetmp = list(shapeout)\n shapetmp[axis] += 2\n tmp = np.zeros(shapetmp)\n s = [slice(None),] * len(shapein)\n s[axis] = slice(1, -1)\n def matvec(x):\n return np.diff(x, axis=axis)\n def rmatvec(x):\n tmp[s] = x\n return - np.diff(tmp, axis=axis)\n return ndoperator(shapein, shapeout, matvec, rmatvec, dtype=dtype)\n\ndef binning(shapein, factor, axis=-1, dtype=np.float64):\n shapeout = np.asarray(copy(shapein))\n shapeout[axis] /= factor\n def matvec(x):\n return bin(x, factor, axis=axis)\n def rmatvec(x):\n return replicate(x, factor, axis=axis)\n return ndoperator(shapein, shapeout, matvec=matvec, rmatvec=rmatvec, \n dtype=dtype)\n\ndef bin(arr, factor, axis=-1):\n shapeout = np.asarray(arr.shape)\n shapeout[axis] /= factor\n outarr = np.zeros(shapeout)\n s0 = [slice(None),] * arr.ndim\n s1 = [slice(None),] * arr.ndim\n for i in xrange(arr.shape[axis]):\n s0[axis] = i\n s1[axis] = np.floor(i / factor)\n outarr[s1] += arr[s0]\n return outarr\n\ndef replicate(arr, factor, axis=-1):\n shapeout = np.asarray(arr.shape)\n shapeout[axis] *= factor\n outarr = np.zeros(shapeout)\n s0 = [slice(None),] * arr.ndim\n s1 = [slice(None),] * arr.ndim\n for i in xrange(shapeout[axis]):\n s0[axis] = i\n s1[axis] = np.floor(i / factor)\n outarr[s0] = arr[s1]\n return outarr\n\ndef axis_mul(shapein, vect, axis=-1, dtype=np.float64):\n shapeout = shapein\n def matvec(x):\n y = np.empty(x.shape)\n s = [slice(None), ] * x.ndim\n for i in xrange(x.shape[axis]):\n s[axis] = i\n y[s] = x[s] * vect\n return y\n return ndoperator(shapein, shapeout, matvec=matvec, rmatvec=matvec,\n dtype=dtype)\n\ndef mul(shapein, num, dtype=np.float64):\n if not np.isscalar(num):\n raise ValueError('mul expect a scalar as input')\n def matvec(x):\n y = num * x\n return y\n return ndoperator(shapein, shapein, matvec=matvec, rmatvec=matvec,\n dtype=dtype)\n","repo_name":"admin-root-123456/apliHRTest2","sub_path":"venv/Lib/site-packages/lo/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"39358042648","text":"# file: nonblocking_post.py\n\n# Dynamic Twisted imports not recognized.\n# pylint: disable-msg=E1101\n\n\"\"\"\nExample for a non-blocking HTTP server with Twisted.\n\nTwo things are important:\n1. The render method of a page returns `server.NOT_DONE_YET` to\n signal that it will deliver its content later.\n2. We use the decorator `@defer.inlineCallbacks` to make\n using deferreds easier.\n\"\"\"\n\n\nimport math\nimport time\n\nfrom twisted.internet import defer, threads\nfrom twisted.web import resource, server\n\nimport plain_pi\n\n\nclass EntryPage(resource.Resource):\n \"\"\"Start page with form for inputing calculation values.\n \"\"\"\n\n def render(self, request):\n \"\"\"Render the page.\n \"\"\"\n return \"\"\"\n \n \n Pi with Twisted\n \n \n

Twisted-based Calculation of pi with\n the Monte Carlo Method

\n
\n
\n                Number of iterations: \n                \n            
\n \n \n \"\"\"\n\n\nclass PiPost(resource.Resource):\n \"\"\"Page showing the calculation results.\n \"\"\"\n\n def render(self, request):\n \"\"\"Render asynchronously using `server.NOT_DONE_YET`.\n \"\"\"\n self._calculatePi(request).addCallback(self._gotResult, request)\n return server.NOT_DONE_YET\n\n @staticmethod\n @defer.inlineCallbacks\n def _calculatePi(request):\n \"\"\"Generator to calculate pi.\n\n We use the decorator `@defer.inlineCallbacks to write\n this method sequentially but still use a deferred.\n With suspend `piPlan` to a thread to make it nonblocking.\n \"\"\"\n iterations = int(float(request.args['n'][0]))\n start = time.time()\n calc_pi = yield threads.deferToThread(plain_pi.piPlain, iterations)\n duration = time.time() - start\n defer.returnValue({'iterations': iterations,\n 'pi': calc_pi,\n 'duration': duration,\n 'math_pi': math.pi})\n\n @staticmethod\n def _gotResult(result, request):\n \"\"\"Render the result page.\n\n This staticmethod, i.e. function inside a class without\n access to `self` is added as a callback.\n \"\"\"\n request.write('''\n \n Pi with Twisted\n \n

\n Twisted - Result of Monte Carlo Calculation of Pi\n

\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
Number of iterations%(iterations)d
Pi by Monte Carlo Method%(pi)15.12f
Exact pi%(math_pi)15.12f
Calculation time%(duration)15.3f seconds
\n \n \n ''' % result) #6\n request.finish()\n\n\ndef main():\n \"\"\"Setup and start the server.\n \"\"\"\n from twisted.internet import reactor\n root = resource.Resource()\n root.putChild('', EntryPage())\n root.putChild('calcpi', PiPost())\n site = server.Site(root)\n reactor.listenTCP(8080, site)\n reactor.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gansell/python","sub_path":"python examples/network/twisted/http/nonblocking_post.py","file_name":"nonblocking_post.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"15609878064","text":"import pickle\nimport pandas as pd\nimport argparse\n\n\ndef highlight_max(s):\n '''\n highlight the maximum in a Series yellow.\n '''\n is_max = s == s.max()\n return ['background-color: yellow' if v else '' for v in is_max]\n\n\ndef load_record_dict(record_dict_fn):\n print(record_dict_fn)\n with open(record_dict_fn, 'rb') as f:\n record_dict = pickle.load(f)\n data = {'epoch time': [time[1] for time in record_dict['epoch_time']],\n 'train loss': [loss[1] for loss in record_dict['train_loss']],\n 'AP': [abs(ap[1][-1]) for ap in record_dict['validation_loss']]\n }\n print(data)\n# df = pd.DataFrame(data)\n# df.style.apply(highlight_max)\n# print(df)\n\ndef load_options_dict(options_dict_fn):\n print(options_dict_fn)\n with open(options_dict_fn, 'rb') as f:\n options_dict = pickle.load(f)\n\n# df = pd.DataFrame(options_dict, index=0)\n for key, value in options_dict.items():\n print(str(key)+ \": \" +str(value))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--rec_dict', type=str)\n parser.add_argument('--opt_dict', type=str) \n\n args = parser.parse_args()\n\n if args.rec_dict is not None:\n load_record_dict(args.rec_dict)\n\n if args.opt_dict is not None:\n load_options_dict(args.opt_dict)\n","repo_name":"christiaanjacobs/nchlt_awe","sub_path":"embeddings/read_stats.py","file_name":"read_stats.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"1547023368","text":"import math\n\n\ndef main_():\n n, k, q = map(int, input().split())\n array = list(map(int, input().split()))\n for i in range(q):\n queries = list(map(int, input().split()))\n if queries[0] == 1:\n l, r = queries[1] - 1, queries[2]\n product = 1\n for j in range(l, r):\n product *= array[j]\n special_value = math.pow(product, 1 / k)\n if math.ceil(special_value) == math.floor(special_value):\n print(\"Yes\")\n else:\n print(\"No\")\n elif queries[0] == 2:\n l, r, x, y = queries[1] - 1, queries[2], queries[3], queries[4]\n for j in range(l, r):\n array[j] = array[j] * math.pow(x, y)\n elif queries[0] == 3:\n l, r, x = queries[1] - 1, queries[2], queries[3]\n for j in range(l, r):\n array[j] = x\n\n\nmain_()\n","repo_name":"shivam221098/hackerearth-codes","sub_path":"k th root subarray.py","file_name":"k th root subarray.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"75047784526","text":"def solution(msg):\n answer = []\n dictionary = ['ㅁ', 'A', 'B', 'C', 'D', 'E', 'F',\n 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S',\n 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n message = list(msg + '0')\n temp = ''\n for char in message:\n if temp + char not in dictionary:\n dictionary.append(temp + char)\n answer.append(dictionary.index(temp))\n temp = char\n else:\n temp += char\n print(answer)\n print(dictionary)\n return answer\n\n\nsolution('KAKAO')\nsolution('TOBEORNOTTOBEORTOBEORNOT')\nsolution('ABABABABABABABAB')\n","repo_name":"YanZisuka/ALgorithm_sTUDY","sub_path":"LimJaeHyun/압축.py","file_name":"압축.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"2908577621","text":"from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nimport nltk\nimport os\n\ndef create_title(text):\n #nltk.download('punkt')\n\n tokenizer = AutoTokenizer.from_pretrained(\"fabiochiu/t5-small-medium-title-generation\") # czearing/article-title-generator\n model = AutoModelForSeq2SeqLM.from_pretrained(\"fabiochiu/t5-small-medium-title-generation\")# czearing/article-title-generator\n\n\n inputs = [\"summarize: \" + text]\n\n inputs = tokenizer(inputs, truncation=True, return_tensors=\"pt\")\n output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=64)\n decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]\n predicted_title = nltk.sent_tokenize(decoded_output.strip())[0]\n return predicted_title\n# Conversational AI: The Future of Customer Service\n","repo_name":"MuratCelik3506/NLP_HealthCare","sub_path":"TitleGeneration.py","file_name":"TitleGeneration.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"29544898372","text":"import unittest\nfrom cpyroot import *\nimport time\n\nfile1 = 'test1.root'\nfile2 = 'test2.root'\n\nclass TreeStackTestCase(unittest.TestCase):\n\n def setUp(self):\n self.t1 = Chain(file1)\n self.t2 = Chain(file2)\n \n def test_1(self):\n self.assertTrue(self.t1)\n self.assertTrue(self.t2)\n stack = TreeStack('MyStack')\n stack.add('bulk', self.t1, sBlue)\n stack.add('peak', self.t2, sRed)\n stack.project('x','1', 100, -5, 5)\n stack.draw()\n gPad.Update()\n time.sleep(5)\n # stack.histsum.Draw()\n # gPad.Update()\n # time.sleep(1)\n \n\nif __name__ == '__main__':\n from testtree import create_tree\n import os\n if not os.path.isfile(file1):\n create_tree(file1, 0, 1, 5000)\n if not os.path.isfile(file2):\n create_tree(file2, 1, 0.2, 5000)\n unittest.main() \n","repo_name":"cbernet/cpyroot","sub_path":"testing/test_treestack.py","file_name":"test_treestack.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"36676483874","text":"import asyncio, discord, dotenv\nfrom discord.ext import commands\nimport ExplodingDice, Cards, Chips\n\nTOKEN = dotenv.get_key(\".env\", \"TOKEN\")\n\nIntents = discord.Intents.default()\nIntents.message_content = True\n\nbot = discord.ext.commands.Bot(command_prefix=\"!\", intents=Intents)\nbot.help_command = commands.MinimalHelpCommand()\n\n\n@bot.event\nasync def on_ready():\n print(f\"We have logged in as {bot.user}\")\n\n\nasync def main():\n await Chips.setup(bot)\n await ExplodingDice.setup(bot)\n await Cards.setup(bot)\n await bot.start(TOKEN)\n\n\nasync def close(bot):\n await bot.close()\n\n\ntry:\n asyncio.run(main())\nexcept KeyboardInterrupt:\n cog1 = bot.get_cog(\"Chips\")\n cog1.save()\n asyncio.run(close(bot))\n","repo_name":"BlakePR/DeadlandsDiscordBot","sub_path":"DeadlandsBot.py","file_name":"DeadlandsBot.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"37210763603","text":"pe_1 = {\n 'first_name': 'Daniel', \n 'last_name':'mashmaniuk', \n 'age' : 17,\n 'city' : 'mokrets',\n }\npe_2 = {\n 'first_name': 'Anna', \n 'last_name':'tomashuk', \n 'age' : 14,\n 'city' : 'dulibu',\n }\npe_3 = {\n 'first_name': 'vlad', \n 'last_name':'borovets', \n 'age' : 15,\n 'city' : 'mokrets',\n }\npeoples = (pe_1, pe_2,pe_3)\nfor people in peoples: \n print(people)\n","repo_name":"DanielMashmaniuk/python","sub_path":"peoples_R6.py","file_name":"peoples_R6.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"35185295747","text":"import multiprocessing\nfrom multiprocessing import Process, Queue\nimport time \nfrom indicators import kc\nimport requests\nfrom pandas import DataFrame, Series\nfrom finta import TA\nfrom finta.utils import to_dataframe\nfrom notifications.twilio_notification import TwilioNotifications\nimport datetime\nimport pandas as pd\n\nclass TradingAlarmProcess:\n def __init__(self, alarm):\n self.alarm = alarm\n self.status = 'inactive'\n self.main_queu = Queue()\n self.tickerSymbol = alarm['tickerSymbol']\n self.frequencyType = 'minute'\n if 'day' in alarm['chartPeriod']:\n self.frequencyType = 'daily' \n ##TODO add other frequencies\n ##For now it works only with min 1,5,10,15,30 and 1 day\n self.params = ( \n ('apikey', alarm['ameritrade_key']), \n ('periodType', 'day'), \n ('period', '1'), \n ('frequencyType', 'minute'), \n ('frequency', alarm['chartPeriod'][0] ), \n ) \n self.headers = { \n 'Authorization': '', \n } \n self.price_history_request_url =\\\n 'https://api.tdameritrade.com/v1/marketdata/' + self.tickerSymbol\\\n + '/pricehistory' \n self.set_indicator_function() \n self.active = True\n self.condition = True \n self.start_alarm()\n\n def set_indicator_function(self ):\n indicator = self.alarm['indicator']\n if indicator == 'Keltner Channel':\n self.indicator_main = kc,\n\n def start_alarm(self):\n self.process = Process(target=self.main_alarm)\n self.process.start()\n\n\n def main_alarm(self):\n while self.active: \n #Currently for minutes only 1,5,10,15,30 \n time.sleep(5 * int(self.alarm['chartPeriod'][0] ))\n response = requests.get(self.price_history_request_url, headers=self.headers, params=self.params)\n df = pd.DataFrame(response.json()['candles'])\n kc = TA.KC(df)\n kc_tail = kc.tail(2)\n df_tail = df.tail(2)\n comparing = ''\n comparing_av = ''\n if self.alarm['kcband'] == 'Upper':\n comparing = kc_tail.KC_UPPER.values\n elif self.alarm['kcband'] == 'Lower':\n comparing = kc_tail.KC_LOWER.values\n if self.alarm['price'] == 'close':\n comparing_av = df_tail.close\n elif self.alarm['price'] == 'high':\n comparing_av = df_tail.high\n elif self.alarm['price'] == 'low':\n comparing_av = df_tail.low\n elif self.alarm['price'] == 'open':\n comparing_av = df_tail.open\n if self.alarm['crossingType'] == 'Above':\n if comparing_av.values[0] >= comparing[0] and comparing_av.values[1] <= comparing[1]:\n message == self.tickerSymbol + ' / ' + self.alarm['chartPeriod'] +\\\n ' input ' + self.alarm['title'] + ' has been met - '+\\\n datetime.datetime.fromtimestamp(df_tail.datetime.values[1]).strftime(\"%H:%M:%S\")\n \n elif self.alarm['crossingType'] == 'Below':\n if comparing_av.values[0] <= comparing[0] and comparing_av.values[1] >= comparing[1]:\n message == self.tickerSymbol + ' / ' + self.alarm['chartPeriod'] +\\\n ' input ' + self.alarm['title'] + ' has been met - '+\\\n datetime.datetime.fromtimestamp(df_tail.datetime.values[1]).strftime(\"%H:%M:%S\")\n print('Alarm: ' + self.alarm['title'] + ' running')\n\n def create_notifications(self, message):\n notifications = TwilioNotifications(self.alarm['twilio_sid'], self.alarm['twilio_key'], message, self.alarm['from_phone'], self.alarm['to_phones'])\n if self.alarm['phone_call']:\n notifications.send_call()\n if self.alarm['sms']:\n pass\n #notifications.send_sms()\n\n def main_stop_alarm(self):\n self.active = False\n self.process.join()\n\n\n","repo_name":"eilst/notifications-bot","sub_path":"backend/trading_alarms/trading_alarm_process.py","file_name":"trading_alarm_process.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"16923727797","text":"class Solution:\n def largestLocal(self, grid: List[List[int]]) -> List[List[int]]:\n n = len(grid)\n maxLocal = [[0]*(n-2) for i in range(n-2)]\n\n for i in range(n-2):\n for j in range(n-2):\n\n for k in range(i, i+3):\n for l in range(j, j+3):\n maxLocal[i][j] = max(grid[k][l], maxLocal[i][j])\n\n return maxLocal","repo_name":"nahubn1/A2sv","sub_path":"2373-largest-local-values-in-a-matrix/2373-largest-local-values-in-a-matrix.py","file_name":"2373-largest-local-values-in-a-matrix.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"32177197944","text":"\"\"\"\npython implementation of Flexihash; all the stuff\nin one .py file because it's pretty short\n\"\"\"\nimport zlib\nimport hashlib\nimport bisect\nfrom typing import List, Tuple, Optional, Any, Dict, Union\n\n\nPosition = Any # Any orderable type - md5sum (bytes), crc32 (int)\nTarget = Union[str, bytes]\nResource = Union[str, bytes]\n\n\nclass FlexihashException(Exception):\n pass\n\n\nclass Hasher(object):\n def hash(self, value: Union[Resource, Target]) -> Position:\n raise NotImplementedError()\n\n\nclass Md5Hasher(Hasher):\n def hash(self, value: Union[Resource, Target]) -> Position:\n if hasattr(value, \"encode\"):\n value = value.encode()\n return hashlib.md5(value).hexdigest()\n\n\nclass Crc32Hasher(Hasher):\n def hash(self, value: Union[Resource, Target]) -> Position:\n if hasattr(value, \"encode\"):\n value = value.encode()\n return zlib.crc32(value)\n\n\nclass Flexihash(object):\n def __init__(self, hasher: Optional[Hasher] = None, replicas: Optional[int] = None):\n self.replicas = replicas or 64\n self.hasher = hasher or Crc32Hasher()\n self.positionToTarget: Dict[Position, Target] = {}\n self.positionToTargetSorted: List[Tuple[Position, Target]] = []\n self.targetToPositions = {}\n\n def addTarget(self, target: Target, weight: int = 1) -> \"Flexihash\":\n if target in self.targetToPositions:\n raise FlexihashException(\"Target '%s' already exists\" % target)\n\n self.targetToPositions[target] = []\n\n for i in range(0, self.replicas * weight):\n position = self.hasher.hash(target + str(i))\n self.positionToTarget[position] = target\n self.targetToPositions[target].append(position)\n\n self.positionToTargetSorted = []\n\n return self\n\n def addTargets(self, targets: List[Target]) -> \"Flexihash\":\n for target in targets:\n self.addTarget(target)\n\n return self\n\n def removeTarget(self, target: Target) -> \"Flexihash\":\n if target not in self.targetToPositions:\n raise FlexihashException(\"Target '%s' does not exist\" % target)\n\n for position in self.targetToPositions[target]:\n del self.positionToTarget[position]\n\n del self.targetToPositions[target]\n\n self.positionToTargetSorted = []\n\n return self\n\n def getAllTargets(self) -> List[Target]:\n return sorted(list(self.targetToPositions.keys()))\n\n def lookup(self, resource: Resource) -> Target:\n targets = self.lookupList(resource, 1)\n if not targets:\n raise FlexihashException(\"No targets exist\")\n return targets[0]\n\n def lookupList(self, resource: Resource, requestedCount: int) -> List[Target]:\n if not requestedCount:\n raise FlexihashException(\"Invalid count requested\")\n\n if len(self.targetToPositions) == 0:\n return []\n\n if len(self.targetToPositions) == 1:\n return [list(self.positionToTarget.values())[0]]\n\n resourcePosition = self.hasher.hash(resource)\n\n ptts = self.sortPositionTargets()\n\n offset = bisect.bisect_left(ptts, (resourcePosition, \"\"))\n n_targets = len(self.targetToPositions)\n\n results = []\n for _, value in self.offsetIterator(ptts, offset):\n if value not in results:\n results.append(value)\n\n if len(results) == requestedCount or len(results) == n_targets:\n return results\n\n return results\n\n def offsetIterator(self, lst, offset):\n l = len(lst)\n for n in range(l):\n yield lst[(n + offset) % l]\n\n def sortPositionTargets(self) -> List[Tuple[Position, Target]]:\n if not self.positionToTargetSorted:\n self.positionToTargetSorted = sorted(self.positionToTarget.items())\n return self.positionToTargetSorted\n","repo_name":"shish/flexihash-py","sub_path":"flexihash/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"13167016360","text":"import socket\r\nimport sys\r\n\r\ndef get_equation(res):\r\n result = ''\r\n for i in res:\r\n if i.isdigit() or i == '+':\r\n result += i\r\n return str(eval(result)).encode()\r\n\r\ndef main():\r\n data = ''\r\n hostname, port = sys.argv[1], int(sys.argv[2])\r\n\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.connect((hostname, port))\r\n data = s.recv(1024)\r\n while 'KSI{' not in data.decode().rstrip() and 'Hmm' not in data.decode().rstrip():\r\n response = get_equation(data.decode().rstrip())\r\n s.sendall(response)\r\n data = s.recv(1024)\r\n else:\r\n print(data.decode().rstrip())\r\n\r\nif __name__ == '__main__':\r\n try:\r\n sys.argv[1]\r\n except:\r\n exit()\r\n main()\r\n\r\n\r\n\r\n#KSI{jak_je_tedy_sob_ve_dne_v_noci,_ktere_nepretrzite_musely_by_pocitat}","repo_name":"ValachPatrik/telnetfun","sub_path":"2b.py","file_name":"2b.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"2110597267","text":"# A test case for building up to a Keyforge prototype,\r\n# testing random.shuffle(X) to shuffle decks and play war.\r\n# I am playing with a more complicated ruleset of war than just the base version.\r\n# Glitch found! There's something wrong with how I'm handling holdings!\r\n# Fixed that glitch! Now I've got a glitch in how I'm handling a deck with less than 5 cards.\r\n # It's because I haven't handled the edge case of a player's last card causing a war!\r\n# Next glitch! If a player's deck is empty, it just loops trying to shuffle their deck instead of declaring a winner!\r\n # I believe the problem was in shuffle(), with an if statement that checked if discard == 0 instead of len(discard)\r\n# Now it ended, but only after losing 16 cards, which means I have a different problem with holding, I think only if you get a double war into a a double war.\r\n # Confirmed with a special built test case.\r\n # Now I realized it's not a glitch, it's just because my test statements tracking number of cards don't account for the cards in holding.\r\n# Also it turns out return doesn't return strings, fixed game winning statements.\r\n\r\nimport random\r\n\r\ndeck = [[15, 'A of Spades'], [14, 'K of Spades'], [13, 'Q of Spades'], [12, 'J of Spades'], [11, '10 of Spades'], [10, '9 of Spades'], [9, '8 of Spades'], [8, '7 of Spades'], [7, '6 of Spades'], [6, '5 of Spades'], [5, '4 of Spades'], [4, '3 of Spades'], [3, '2 of Spades'], \\\r\n [15, 'A of Diamonds'], [14, 'K of Diamonds'], [13, 'Q of Diamonds'], [12, 'J of Diamonds'], [11, '10 of Diamonds'], [10, '9 of Diamonds'], [9, '8 of Diamonds'], [8, '7 of Diamonds'], [7, '6 of Diamonds'], [6, '5 of Diamonds'], [5, '4 of Diamonds'], [4, '3 of Diamonds'], [3, '2 of Diamonds'], \\\r\n [15, 'A of Clubs'], [14, 'K of Clubs'], [13, 'Q of Clubs'], [12, 'J of Clubs'], [11, '10 of Clubs'], [10, '9 of Clubs'], [9, '8 of Clubs'], [8, '7 of Clubs'], [7, '6 of Clubs'], [6, '5 of Clubs'], [5, '4 of Clubs'], [4, '3 of Clubs'], [3, '2 of Clubs'], \\\r\n [15, 'A of Hearts'], [14, 'K of Hearts'], [13, 'Q of Hearts'], [12, 'J of Hearts'], [11, '10 of Hearts'], [10, '9 of Hearts'], [9, '8 of Hearts'], [8, '7 of Hearts'], [7, '6 of Hearts'], [6, '5 of Hearts'], [5, '4 of Hearts'], [4, '3 of Hearts'], [3, '2 of Hearts']]\r\n\r\n# print (len(deck))\r\n\r\ndef shuffle(discard):\r\n '''Shuffles discard piles, and if a discard pile is empty, declares a winner.'''\r\n if len(discard) == 0:\r\n return True\r\n else:\r\n print(\"Shuffling!\")\r\n print('')\r\n random.shuffle(discard)\r\n return discard\r\n\r\ndef empty(deckA, deckB, discardA, discardB):\r\n '''Handles an empty deck then calls flip() or declares a winner'''\r\n if len(deckA) == 0:\r\n print('Player A\\'s deck is empty. Shuffling!')\r\n print('')\r\n # print(\"Discard A contains \", len(discardA), \" cards.\")\r\n # print(\"Discard B contains \", len(discardB), \" cards.\")\r\n # print(\"Total cards: \", (len(discardA) + len(discardB)))\r\n if shuffle(discardA) == True:\r\n print('Player A has no cards to shuffle.')\r\n print('')\r\n print('Player B wins the game!')\r\n return\r\n else:\r\n deckA.extend(shuffle(discardA))\r\n discardA = []\r\n # print('Called from empty deck A: Discard B (', len(discardB), ') + Discard A (', len(discardA), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardB) + len(deckA) + len(deckB)))\r\n flip(deckA, deckB, discardA, discardB)\r\n elif len(deckB) == 0:\r\n print('Player B\\'s deck is empty. Shuffling!')\r\n print('')\r\n # print(\"Discard A contains \", len(discardA), \" cards.\")\r\n # print(\"Discard B contains \", len(discardB), \" cards.\")\r\n # print(\"Total cards (first time only): \", (len(deckA) + len(discardB)))\r\n if shuffle(discardB) == True:\r\n print('Player B has no cards to shuffle.')\r\n print('')\r\n print('Player A wins the game!')\r\n return\r\n else:\r\n deckB.extend(shuffle(discardB))\r\n discardB = []\r\n # print('Called from empty deck B: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardA) + len(deckA) + len(deckB)))\r\n flip(deckA, deckB, discardA, discardB)\r\n else:\r\n if len(deckA) < 5:\r\n deckA.extend(shuffle(discardA))\r\n discardA = []\r\n # print('Called from <5 Deck A: Discard B (', len(discardB), ') + Discard A (', len(discardA), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardB) + len(discardA) + len(deckA) + len(deckB)))\r\n flip(deckA, deckB, discardA, discardB)\r\n elif len(deckB) < 5:\r\n deckB.extend(shuffle(discardB))\r\n discardB = []\r\n # print('Called from <5 Deck B: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardA) + len(discardB) + len(deckA) + len(deckB)))\r\n flip(deckA, deckB, discardA, discardB)\r\n\r\ndef tie(deckA, deckB, discardA, discardB, holding = []):\r\n '''Handles ties, then passes back off to flip(), or to itself'''\r\n print('')\r\n print(\"War!\")\r\n print('')\r\n # print('Called from War: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardA) + len(discardB) + len(deckA) + len(deckB)))\r\n #base case empty deck, leading to shuffle()\r\n if len(deckA) == 0 or len(deckB) == 0:\r\n # print(\"Base case\")\r\n empty(deckA, deckB, discardA, discardB)\r\n #if the deck has less than 5 cards and discard contains something, call empty()\r\n elif (len(deckA) < 5 and len(discardA) > 0) or (len(deckB) < 5 and len(discardB) > 0):\r\n # print(\"Less than 5 but discard not empty.\")\r\n # print('Called from tie() before sending to <5 shuffle(): Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', len(deckA), ') + Deck B (', len(deckB), ') = ', (len(discardA) + len(discardB) + len(deckA) + len(deckB)))\r\n empty(deckA, deckB, discardA, discardB)\r\n #now if deck has less than 5 cards discard will be empty\r\n elif len(deckA) < 5: #impossible for both to be\r\n print('Player A reveals a ', deckA[(len(deckA)-1)][1])\r\n print('Player B reveals a ', deckB[4][1])\r\n print('')\r\n #find length of deckA - if it's 1, because discard will be empty, they lose by forfeit\r\n if len(deckA) == 1:\r\n print('')\r\n print(\"Player A's deck has no remaining cards with which to fight this war and is forced to forfeit.\")\r\n print('')\r\n print(\"Player B wins the game.\")\r\n return\r\n elif deckA[(len(deckA)-1)][0] > deckB[4][0]:\r\n print('Player A wins this war!')\r\n print('')\r\n #put cards in index 0-4 into winner's discard and call flip()\r\n winnings = deckA[0:] + deckB[0:5]\r\n discardA.extend(winnings)\r\n # print(len(discardA), 'in discard A after winnings.')\r\n discardA.extend(holding)\r\n # print(len(discardA), 'in discard A after holdings.')\r\n if len(deckB) >= 6:\r\n # print('Called from Player A winnings if B has 6+ cards: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', 0, ') + Deck B (', (len(deckB) - 5), ') = ', (len(discardA) + len(discardB) + 0 + len(deckB) - 5))\r\n flip([], deckB[5:], discardA, discardB)\r\n else:\r\n # print('Called from Player A winnings if B has <6 cards: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', 0, ') + Deck B (', 0, ') = ', (len(discardA) + len(discardB) + 0 + 0))\r\n flip([], [], discardA, discardB)\r\n elif deckA[(len(deckA)-1)][0] < deckB[4][0]:\r\n print(\"Player B wins the game!\")\r\n return\r\n else:\r\n print('')\r\n print(\"Player A has run out of cards and is forced to forfeit\")\r\n print('')\r\n print(\"Player B wins the game!\")\r\n return\r\n elif len(deckB) < 5: #impossible for both to be\r\n print('Player A reveals a ', deckA[4][1])\r\n print('Player B reveals a ', deckB[(len(deckB)-1)][1])\r\n print('')\r\n if len(deckB) == 1:\r\n print('')\r\n print(\"Player B's deck has no remaining cards with which to fight this war and is forced to forfeit.\")\r\n print('')\r\n print(\"Player B wins the game!\")\r\n return\r\n elif deckB[(len(deckB)-1)][0] > deckA[4][0]:\r\n print('Player B wins this war!')\r\n print('')\r\n #put cards in index 0-4 into winner's discard and call flip()\r\n winnings = deckA[0:5] + deckB[0:]\r\n discardB.extend(winnings)\r\n # print(len(discardB), 'in discard B after winnings.')\r\n discardB.extend(holding)\r\n # print(len(discardB), 'in discard B after holdings.')\r\n if len(deckA) >= 6:\r\n # print('Called from Player B winnings if A has 6+ cards: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', (len(deckA) - 5), ') + Deck B (', 0, ') = ', (len(discardA) + len(discardB) + 0 + len(deckA) - 5))\r\n flip(deckA[5:], [], discardA, discardB)\r\n else:\r\n # print('Called from Player B winnings if A has 6+ cards: Discard A (', len(discardA), ') + Discard B (', len(discardB), ') + Deck A (', 0, ') + Deck B (', 0, ') = ', (len(discardA) + len(discardB) + 0 + 0))\r\n flip([], [], discardA, discardB)\r\n elif deckB[(len(deckB)-1)][0] < deckA[4][0]:\r\n print(\"Player A wins the game!\")\r\n return\r\n else:\r\n print('')\r\n print(\"Player B has run out of cards and is forced to forfeit\")\r\n print('')\r\n print(\"Player A wins the game!\")\r\n return\r\n elif deckA[4][0] == deckB[4][0]: \r\n print('Player A reveals a ', deckA[4][1])\r\n print('Player B reveals a ', deckB[4][1])\r\n print('')\r\n print('Double War!')\r\n print('')\r\n #put cards into holding and call tie() again\r\n holdings = deckA[0:4] + deckB[0:4]\r\n holding.extend(holdings)\r\n print(holding)\r\n tie(deckA[4:], deckB[4:], discardA, discardB, holding)\r\n elif deckA[4][0] > deckB[4][0]:\r\n print('Player A reveals a ', deckA[4][1])\r\n print('Player B reveals a ', deckB[4][1])\r\n print('')\r\n print('Player A wins this war!')\r\n print('')\r\n #put cards in index 0-4 into winner's discard\r\n winnings = deckA[0:5] + deckB[0:5]\r\n discardA.extend(winnings)\r\n # print(len(discardA), 'in discard A after winnings.')\r\n discardA.extend(holding)\r\n # print(len(discardA), 'in discard A after holdings.')\r\n if len(deckA) >= 6 and len(deckB) >= 6:\r\n flip(deckA[5:], deckB[5:], discardA, discardB)\r\n elif len(deckA) < 6:\r\n flip([], deckB[5:], discardA, discardB)\r\n elif len(deckB) < 6:\r\n flip(deckA[5:], [], discardA, discardB)\r\n else: #if both are less than 6\r\n flip([], [], discardA, discardB)\r\n elif deckA[4][0] < deckB[4][0]: \r\n print('Player A reveals a ', deckA[4][1])\r\n print('Player B reveals a ', deckB[4][1])\r\n print('')\r\n print('Player B wins this war!')\r\n print('')\r\n #put cards in index 0-4 into winner's discard\r\n winnings = deckA[0:5] + deckB[0:5]\r\n discardB.extend(winnings)\r\n # print(len(discardB), 'in discard B after winnings.')\r\n discardB.extend(holding)\r\n # print(len(discardB), 'in discard B after holdings.')\r\n if len(deckA) >= 6 and len(deckB) >= 6:\r\n flip(deckA[5:], deckB[5:], discardA, discardB)\r\n elif len(deckA) < 6:\r\n flip([], deckB[5:], discardA, discardB)\r\n elif len(deckB) < 6:\r\n flip(deckA[5:], [], discardA, discardB)\r\n else: #if both are less than 6\r\n flip([], [], discardA, discardB)\r\n else:\r\n print(\"You found a different else statment that should never have been found.\")\r\n return\r\n\r\n\r\ndef flip(deckA, deckB, discardA = [], discardB = []):\r\n '''The battles of the actual game'''\r\n #base case empty deck, calls empty(), which either calls shuffle() or declares a winner.\r\n if len(deckA) == 0 or len(deckB) == 0:\r\n empty(deckA, deckB, discardA, discardB) \r\n #if flipped cards have same value calls tie(), which handles everything else\r\n elif deckA[0][0] == deckB[0][0]:\r\n print('Player A reveals a ', deckA[0][1])\r\n print('Player B reveals a ', deckB[0][1])\r\n print('')\r\n tie(deckA, deckB, discardA, discardB, [])\r\n elif deckA[0][0] > deckB[0][0]:\r\n print('Player A reveals a ', deckA[0][1])\r\n print('Player B reveals a ', deckB[0][1])\r\n print('')\r\n print('Player A wins this battle!')\r\n print('')\r\n # note to self: need append over extend in this case\r\n discardA.append(deckA[0])\r\n discardA.append(deckB[0])\r\n if len(deckA) == 1 and len(deckB) == 1:\r\n flip([], [], discardA, discardB)\r\n elif len(deckA) == 1:\r\n flip([], deckB[1:], discardA, discardB)\r\n elif len(deckB) == 1:\r\n flip(deckA[1:], [], discardA, discardB)\r\n else:\r\n flip(deckA[1:], deckB[1:], discardA, discardB)\r\n elif deckA[0][0] < deckB[0][0]:\r\n print('Player A reveals a ', deckA[0][1])\r\n print('Player B reveals a ', deckB[0][1])\r\n print('')\r\n print('Player B wins this battle!')\r\n print('')\r\n discardB.append(deckA[0])\r\n discardB.append(deckB[0])\r\n if len(deckA) == 1 and len(deckB) == 1:\r\n flip([], [], discardA, discardB)\r\n elif len(deckA) == 1:\r\n flip([], deckB[1:], discardA, discardB)\r\n elif len(deckB) == 1:\r\n flip(deckA[1:], [], discardA, discardB)\r\n else:\r\n flip(deckA[1:], deckB[1:], discardA, discardB)\r\n else:\r\n print(\"Congratulations, you've somehow broken the program and found a secret else statement!\")\r\n return\r\n\r\n\r\ndef war(deck):\r\n '''Deals cards and starts the match by calling flip()'''\r\n # random.shuffle(deck)\r\n #deckA = deck[0:26]\r\n deckA = deck[1::2]\r\n #deckB = deck[26:37] + deck[50:52] + deck[37:50]\r\n deckB = deck[0::2]\r\n # print(len(deckA))\r\n # print(len(deckB))\r\n yes = input('Flip the next card? ')\r\n print('')\r\n if yes == 'Y' or yes == 'Yes' or yes == 'yes' or yes == 'y' or yes == '':\r\n flip(deckA, deckB)\r\n else:\r\n print(\"Well, ok then.\")\r\n return\r\n\r\nwar(deck)","repo_name":"doomnonius/keyforging","sub_path":"war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":14999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"30937344731","text":"import argparse\nimport xml.etree.ElementTree as ET\nimport logging\nimport sys\nfrom datetime import datetime\nfrom os import path\n\nif not __name__ == \"__main__\":\n sys.stderr.write(\"File must be run as the main module\")\n\n# Logging\nFORMAT = '%(asctime)s %(levelname)s %(message)s'\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\nsh = logging.StreamHandler(stream=sys.stdout)\nsf = logging.Formatter(fmt=FORMAT)\nsh.setFormatter(sf)\nlog.addHandler(sh)\n\n# Arguments\nparser = argparse.ArgumentParser(prog=\"SVG resizer\")\nparser.add_argument(\"file\", help=\"SVG file\")\nparser.add_argument(\"w\", help=\"New width\")\nparser.add_argument(\"h\", help=\"New height\")\nparser.add_argument(\"-o\", \"--output\", help=\"Output file name\")\nparser.add_argument(\"-r\", \"--round\", help=\"max digits to round to\",\n action=\"store\", default=2, type=int)\nparser.add_argument(\"-s\", \"--stroke\", help=\"force new stroke width\",\n action=\"store\", default=None, type=int)\nargs = parser.parse_args()\n\nFILE = args.file\nOUTFILE = args.output\nW = int(args.w)\nH = int(args.h)\nROUND = args.round\nSTROKE = args.stroke\n\nlog.debug(\"Starting\")\n\nif not path.exists(FILE):\n log.error(\"File does not exists\")\n exit(1)\n\noutfile = OUTFILE if OUTFILE is not None else \"{}_resized.svg\".format(\n path.splitext(FILE)[0])\n\nif path.exists(outfile):\n log.error(\"Outfile exists already\")\n exit(1)\n\ntree = ET.parse(FILE)\nroot = tree.getroot()\n\nif \"svg\" not in root.tag or \"width\" not in root.attrib or \"height\" not in root.attrib:\n log.error(\"Invalid SVG\")\n exit(1)\n\nROOT_WIDTH = int(root.attrib[\"width\"])\nROOT_HEIGHT = int(root.attrib[\"height\"])\n\n# Calculate scaling ratio\nRATIO_WIDTH = W / ROOT_WIDTH\nRATIO_HEIGHT = H / ROOT_HEIGHT\n\nlog.debug(\"W ratio {}\".format(RATIO_WIDTH))\nlog.debug(\"H ratio {}\".format(RATIO_HEIGHT))\n\n\ndef _set_new(elem, attr_name, ratio):\n old_val_str = elem.attrib[attr_name]\n old_val = float(old_val_str)\n new_val = old_val * ratio\n if '.' in old_val_str or ',' in old_val_str:\n new_val = \"{0:.2f}\".format(round(new_val, 2))\n else:\n new_val = round(new_val)\n elem.set(attr_name, str(new_val))\n log.debug(\"Resizing '{}' from '{}' to '{}'\".format(\n attr_name, old_val, new_val))\n\n\ndef _resize_element(elem):\n if elem is None:\n return\n tag_name = elem.tag\n log.info(\"Processing element {}\".format(tag_name))\n if \"x\" in elem.attrib: _set_new(elem, \"x\", RATIO_WIDTH)\n if \"cx\" in elem.attrib: _set_new(elem, \"cx\", RATIO_WIDTH)\n if \"x1\" in elem.attrib: _set_new(elem, \"x1\", RATIO_WIDTH)\n if \"x2\" in elem.attrib: _set_new(elem, \"x2\", RATIO_WIDTH)\n if \"y\" in elem.attrib: _set_new(elem, \"y\", RATIO_HEIGHT)\n if \"cy\" in elem.attrib: _set_new(elem, \"cy\", RATIO_HEIGHT)\n if \"y1\" in elem.attrib: _set_new(elem, \"y1\", RATIO_HEIGHT)\n if \"y2\" in elem.attrib: _set_new(elem, \"y2\", RATIO_HEIGHT)\n if \"width\" in elem.attrib: _set_new(elem, \"width\", RATIO_WIDTH)\n if \"height\" in elem.attrib: _set_new(elem, \"height\", RATIO_HEIGHT)\n if \"stroke-width\" in elem.attrib: _set_new(elem, \"stroke-width\", RATIO_WIDTH)\n if \"r\" in elem.attrib: _set_new(elem, \"r\", RATIO_WIDTH)\n # Recurse\n for child in elem:\n _resize_element(child)\n\n\n_resize_element(root)\nlog.info(\"Done resizing\")\n\n# Flush to a new file\nlog.info(\"Writing into {}\".format(outfile))\ntree.write(outfile)\nwith open(outfile, \"r+\") as f:\n text = f.read()\n f.seek(0)\n f.write(text.replace(\"ns0:\", \"\").replace(\":ns0\", \"\"))\n f.truncate()\nlog.info(\"All done\")\n","repo_name":"samlinz/scripts","sub_path":"resizesvg/resizesvg.py","file_name":"resizesvg.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"35572516012","text":"import logging\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple, Type, Union\n\nimport grpc\nfrom grpc import ServicerContext\n\nfrom core import utils\nfrom core.api.grpc import common_pb2, core_pb2\nfrom core.api.grpc.common_pb2 import MappedConfig\nfrom core.api.grpc.configservices_pb2 import ConfigServiceConfig\nfrom core.api.grpc.emane_pb2 import GetEmaneModelConfig\nfrom core.api.grpc.services_pb2 import (\n NodeServiceConfig,\n NodeServiceData,\n ServiceConfig,\n ServiceDefaults,\n)\nfrom core.config import ConfigurableOptions\nfrom core.emane.nodes import EmaneNet\nfrom core.emulator.data import InterfaceData, LinkData, LinkOptions, NodeOptions\nfrom core.emulator.distributed import DistributedServer\nfrom core.emulator.enumerations import LinkTypes, NodeTypes\nfrom core.emulator.session import Session\nfrom core.errors import CoreError\nfrom core.location.mobility import BasicRangeModel, Ns2ScriptedMobility\nfrom core.nodes.base import CoreNode, CoreNodeBase, NodeBase\nfrom core.nodes.docker import DockerNode\nfrom core.nodes.interface import CoreInterface\nfrom core.nodes.lxd import LxcNode\nfrom core.nodes.network import WlanNode\nfrom core.services.coreservices import CoreService\n\nWORKERS = 10\n\n\nclass CpuUsage:\n def __init__(self) -> None:\n self.stat_file: Path = Path(\"/proc/stat\")\n self.prev_idle: int = 0\n self.prev_total: int = 0\n\n def run(self) -> float:\n lines = self.stat_file.read_text().splitlines()[0]\n values = [int(x) for x in lines.split()[1:]]\n idle = sum(values[3:5])\n non_idle = sum(values[:3] + values[5:8])\n total = idle + non_idle\n total_diff = total - self.prev_total\n idle_diff = idle - self.prev_idle\n self.prev_idle = idle\n self.prev_total = total\n return (total_diff - idle_diff) / total_diff\n\n\ndef add_node_data(node_proto: core_pb2.Node) -> Tuple[NodeTypes, int, NodeOptions]:\n \"\"\"\n Convert node protobuf message to data for creating a node.\n\n :param node_proto: node proto message\n :return: node type, id, and options\n \"\"\"\n _id = node_proto.id\n _type = NodeTypes(node_proto.type)\n options = NodeOptions(\n name=node_proto.name,\n model=node_proto.model,\n icon=node_proto.icon,\n image=node_proto.image,\n services=node_proto.services,\n config_services=node_proto.config_services,\n canvas=node_proto.canvas,\n )\n if node_proto.emane:\n options.emane = node_proto.emane\n if node_proto.server:\n options.server = node_proto.server\n # 添加判断position,以防止grpc不传xy参数 这里也默认赋值 0.0 0.0,用于后面设置位置时能根据geo值计算xy\n # 即api修改为 跟edit node 一样 只能传xy 或 geo 另一个值默认换算。\n if node_proto.HasField(\"position\"):\n position = node_proto.position\n options.set_position(position.x, position.y)\n if node_proto.HasField(\"geo\"):\n geo = node_proto.geo\n options.set_location(geo.lat, geo.lon, geo.alt)\n return _type, _id, options\n\n\ndef link_iface(iface_proto: core_pb2.Interface) -> InterfaceData:\n \"\"\"\n Create interface data from interface proto.\n\n :param iface_proto: interface proto\n :return: interface data\n \"\"\"\n iface_data = None\n if iface_proto:\n name = iface_proto.name if iface_proto.name else None\n mac = iface_proto.mac if iface_proto.mac else None\n ip4 = iface_proto.ip4 if iface_proto.ip4 else None\n ip6 = iface_proto.ip6 if iface_proto.ip6 else None\n iface_data = InterfaceData(\n id=iface_proto.id,\n name=name,\n mac=mac,\n ip4=ip4,\n ip4_mask=iface_proto.ip4_mask,\n ip6=ip6,\n ip6_mask=iface_proto.ip6_mask,\n )\n return iface_data\n\n\ndef add_link_data(\n link_proto: core_pb2.Link,\n) -> Tuple[InterfaceData, InterfaceData, LinkOptions, LinkTypes]:\n \"\"\"\n Convert link proto to link interfaces and options data.\n\n :param link_proto: link proto\n :return: link interfaces and options\n \"\"\"\n iface1_data = link_iface(link_proto.iface1)\n iface2_data = link_iface(link_proto.iface2)\n link_type = LinkTypes(link_proto.type)\n options = LinkOptions()\n options_proto = link_proto.options\n if options_proto:\n options.delay = options_proto.delay\n options.bandwidth = options_proto.bandwidth\n options.loss = options_proto.loss\n options.dup = options_proto.dup\n options.jitter = options_proto.jitter\n options.mer = options_proto.mer\n options.burst = options_proto.burst\n options.mburst = options_proto.mburst\n options.buffer = options_proto.buffer\n options.unidirectional = options_proto.unidirectional\n options.key = options_proto.key\n return iface1_data, iface2_data, options, link_type\n\n\ndef create_nodes(\n session: Session, node_protos: List[core_pb2.Node]\n) -> Tuple[List[NodeBase], List[Exception]]:\n \"\"\"\n Create nodes using a thread pool and wait for completion.\n\n :param session: session to create nodes in\n :param node_protos: node proto messages\n :return: results and exceptions for created nodes\n \"\"\"\n funcs = []\n for node_proto in node_protos:\n _type, _id, options = add_node_data(node_proto)\n _class = session.get_node_class(_type)\n args = (_class, _id, options)\n funcs.append((session.add_node, args, {}))\n start = time.monotonic()\n results, exceptions = utils.threadpool(funcs, workers=6)\n total = time.monotonic() - start\n logging.debug(\"grpc created nodes time: %s\", total)\n return results, exceptions\n\n\ndef create_links(\n session: Session, link_protos: List[core_pb2.Link]\n) -> Tuple[List[NodeBase], List[Exception]]:\n \"\"\"\n Create links using a thread pool and wait for completion.\n\n :param session: session to create nodes in\n :param link_protos: link proto messages\n :return: results and exceptions for created links\n \"\"\"\n funcs = []\n for link_proto in link_protos:\n node1_id = link_proto.node1_id\n node2_id = link_proto.node2_id\n iface1, iface2, options, link_type = add_link_data(link_proto)\n args = (node1_id, node2_id, iface1, iface2, options, link_type)\n funcs.append((session.add_link, args, {}))\n start = time.monotonic()\n results, exceptions = utils.threadpool(funcs, workers=5)\n total = time.monotonic() - start\n logging.debug(\"grpc created links time: %s\", total)\n return results, exceptions\n\n\ndef edit_links(\n session: Session, link_protos: List[core_pb2.Link]\n) -> Tuple[List[None], List[Exception]]:\n \"\"\"\n Edit links using a thread pool and wait for completion.\n\n :param session: session to create nodes in\n :param link_protos: link proto messages\n :return: results and exceptions for created links\n \"\"\"\n funcs = []\n for link_proto in link_protos:\n node1_id = link_proto.node1_id\n node2_id = link_proto.node2_id\n iface1, iface2, options, link_type = add_link_data(link_proto)\n args = (node1_id, node2_id, iface1.id, iface2.id, options, link_type)\n funcs.append((session.update_link, args, {}))\n start = time.monotonic()\n results, exceptions = utils.threadpool(funcs)\n total = time.monotonic() - start\n logging.debug(\"grpc edit links time: %s\", total)\n return results, exceptions\n\n\ndef convert_value(value: Any) -> str:\n \"\"\"\n Convert value into string.\n\n :param value: value\n :return: string conversion of the value\n \"\"\"\n if value is not None:\n value = str(value)\n return value\n\n\ndef get_config_options(\n config: Dict[str, str],\n configurable_options: Union[ConfigurableOptions, Type[ConfigurableOptions]],\n) -> Dict[str, common_pb2.ConfigOption]:\n \"\"\"\n Retrieve configuration options in a form that is used by the grpc server.\n\n :param config: configuration\n :param configurable_options: configurable options\n :return: mapping of configuration ids to configuration options\n \"\"\"\n results = {}\n for configuration in configurable_options.configurations():\n value = config.get(configuration.id, configuration.default)\n config_option = common_pb2.ConfigOption(\n label=configuration.label,\n name=configuration.id,\n value=value,\n type=configuration.type.value,\n select=configuration.options,\n )\n results[configuration.id] = config_option\n for config_group in configurable_options.config_groups():\n start = config_group.start - 1\n stop = config_group.stop\n options = list(results.values())[start:stop]\n for option in options:\n option.group = config_group.name\n return results\n\n\ndef get_node_proto(session: Session, node: NodeBase) -> core_pb2.Node:\n \"\"\"\n Convert CORE node to protobuf representation.\n\n :param session: session containing node\n :param node: node to convert\n :return: node proto\n \"\"\"\n node_type = session.get_node_type(node.__class__)\n position = core_pb2.Position(\n x=node.position.x, y=node.position.y, z=node.position.z\n )\n geo = core_pb2.Geo(\n lat=node.position.lat, lon=node.position.lon, alt=node.position.alt\n )\n services = [x.name for x in node.services]\n model = node.type\n node_dir = None\n config_services = []\n if isinstance(node, CoreNodeBase):\n node_dir = node.nodedir\n config_services = [x for x in node.config_services]\n channel = None\n if isinstance(node, CoreNode):\n channel = node.ctrlchnlname\n emane_model = None\n if isinstance(node, EmaneNet):\n emane_model = node.model.name\n image = None\n if isinstance(node, (DockerNode, LxcNode)):\n image = node.image\n server = None # by@lk233 补充 get node 的 server 序列化参数\n if isinstance(node.server, DistributedServer):\n server = node.server.name\n return core_pb2.Node(\n id=node.id,\n name=node.name,\n emane=emane_model,\n model=model,\n type=node_type.value,\n position=position,\n geo=geo,\n services=services,\n icon=node.icon,\n image=image,\n server=server,\n config_services=config_services,\n dir=node_dir,\n channel=channel,\n canvas=node.canvas,\n )\n\n\ndef get_links(node: NodeBase):\n \"\"\"\n Retrieve a list of links for grpc to use.\n\n :param node: node to get links from\n :return: protobuf links\n \"\"\"\n links = []\n for link in node.links():\n link_proto = convert_link(link)\n links.append(link_proto)\n return links\n\n\ndef convert_iface(iface_data: InterfaceData) -> core_pb2.Interface:\n return core_pb2.Interface(\n id=iface_data.id,\n name=iface_data.name,\n mac=iface_data.mac,\n ip4=iface_data.ip4,\n ip4_mask=iface_data.ip4_mask,\n ip6=iface_data.ip6,\n ip6_mask=iface_data.ip6_mask,\n )\n\n\ndef convert_link_options(options_data: LinkOptions) -> core_pb2.LinkOptions:\n return core_pb2.LinkOptions(\n jitter=options_data.jitter,\n key=options_data.key,\n mburst=options_data.mburst,\n mer=options_data.mer,\n loss=options_data.loss,\n bandwidth=options_data.bandwidth,\n burst=options_data.burst,\n delay=options_data.delay,\n dup=options_data.dup,\n buffer=options_data.buffer,\n unidirectional=options_data.unidirectional,\n )\n\n\ndef convert_link(link_data: LinkData) -> core_pb2.Link:\n \"\"\"\n Convert link_data into core protobuf link.\n\n :param link_data: link to convert\n :return: core protobuf Link\n \"\"\"\n iface1 = None\n if link_data.iface1 is not None:\n iface1 = convert_iface(link_data.iface1)\n iface2 = None\n if link_data.iface2 is not None:\n iface2 = convert_iface(link_data.iface2)\n options = convert_link_options(link_data.options)\n return core_pb2.Link(\n type=link_data.type.value,\n node1_id=link_data.node1_id,\n node2_id=link_data.node2_id,\n iface1=iface1,\n iface2=iface2,\n options=options,\n network_id=link_data.network_id,\n label=link_data.label,\n color=link_data.color,\n )\n\n\ndef get_net_stats() -> Dict[str, Dict]:\n \"\"\"\n Retrieve status about the current interfaces in the system\n\n :return: send and receive status of the interfaces in the system\n \"\"\"\n with open(\"/proc/net/dev\", \"r\") as f:\n data = f.readlines()[2:]\n\n stats = {}\n for line in data:\n line = line.strip()\n if not line:\n continue\n line = line.split()\n line[0] = line[0].strip(\":\")\n stats[line[0]] = {\"rx\": float(line[1]), \"tx\": float(line[9])}\n\n return stats\n\n\ndef session_location(session: Session, location: core_pb2.SessionLocation) -> None:\n \"\"\"\n Set session location based on location proto.\n\n :param session: session for location\n :param location: location to set\n :return: nothing\n \"\"\"\n session.location.refxyz = (location.x, location.y, location.z)\n session.location.setrefgeo(location.lat, location.lon, location.alt)\n session.location.refscale = location.scale\n\n\ndef service_configuration(session: Session, config: ServiceConfig) -> None:\n \"\"\"\n Convenience method for setting a node service configuration.\n\n :param session: session for service configuration\n :param config: service configuration\n :return:\n \"\"\"\n session.services.set_service(config.node_id, config.service)\n service = session.services.get_service(config.node_id, config.service)\n if config.files:\n service.configs = tuple(config.files)\n if config.directories:\n service.dirs = tuple(config.directories)\n if config.startup:\n service.startup = tuple(config.startup)\n if config.validate:\n service.validate = tuple(config.validate)\n if config.shutdown:\n service.shutdown = tuple(config.shutdown)\n\n\ndef get_service_configuration(service: CoreService) -> NodeServiceData:\n \"\"\"\n Convenience for converting a service to service data proto.\n\n :param service: service to get proto data for\n :return: service proto data\n \"\"\"\n return NodeServiceData(\n executables=service.executables,\n dependencies=service.dependencies,\n dirs=service.dirs,\n configs=service.configs,\n startup=service.startup,\n validate=service.validate,\n validation_mode=service.validation_mode.value,\n validation_timer=service.validation_timer,\n shutdown=service.shutdown,\n meta=service.meta,\n )\n\n\ndef iface_to_data(iface: CoreInterface) -> InterfaceData:\n ip4 = iface.get_ip4()\n ip4_addr = str(ip4.ip) if ip4 else None\n ip4_mask = ip4.prefixlen if ip4 else None\n ip6 = iface.get_ip6()\n ip6_addr = str(ip6.ip) if ip6 else None\n ip6_mask = ip6.prefixlen if ip6 else None\n return InterfaceData(\n id=iface.node_id,\n name=iface.name,\n mac=str(iface.mac),\n ip4=ip4_addr,\n ip4_mask=ip4_mask,\n ip6=ip6_addr,\n ip6_mask=ip6_mask,\n )\n\n\ndef iface_to_proto(node_id: int, iface: CoreInterface) -> core_pb2.Interface:\n \"\"\"\n Convenience for converting a core interface to the protobuf representation.\n\n :param node_id: id of node to convert interface for\n :param iface: interface to convert\n :return: interface proto\n \"\"\"\n if iface.node and iface.node.id == node_id:\n _id = iface.node_id\n else:\n _id = iface.net_id\n net_id = iface.net.id if iface.net else None\n node_id = iface.node.id if iface.node else None\n net2_id = iface.othernet.id if iface.othernet else None\n ip4_net = iface.get_ip4()\n ip4 = str(ip4_net.ip) if ip4_net else None\n ip4_mask = ip4_net.prefixlen if ip4_net else None\n ip6_net = iface.get_ip6()\n ip6 = str(ip6_net.ip) if ip6_net else None\n ip6_mask = ip6_net.prefixlen if ip6_net else None\n mac = str(iface.mac) if iface.mac else None\n return core_pb2.Interface(\n id=_id,\n net_id=net_id,\n net2_id=net2_id,\n node_id=node_id,\n name=iface.name,\n mac=mac,\n mtu=iface.mtu,\n flow_id=iface.flow_id,\n ip4=ip4,\n ip4_mask=ip4_mask,\n ip6=ip6,\n ip6_mask=ip6_mask,\n )\n\n\ndef get_nem_id(\n session: Session, node: CoreNode, iface_id: int, context: ServicerContext\n) -> int:\n \"\"\"\n Get nem id for a given node and interface id.\n\n :param session: session node belongs to\n :param node: node to get nem id for\n :param iface_id: id of interface on node to get nem id for\n :param context: request context\n :return: nem id\n \"\"\"\n iface = node.ifaces.get(iface_id)\n if not iface:\n message = f\"{node.name} missing interface {iface_id}\"\n context.abort(grpc.StatusCode.NOT_FOUND, message)\n net = iface.net\n if not isinstance(net, EmaneNet):\n message = f\"{node.name} interface {iface_id} is not an EMANE network\"\n context.abort(grpc.StatusCode.INVALID_ARGUMENT, message)\n nem_id = session.emane.get_nem_id(iface)\n if nem_id is None:\n message = f\"{node.name} interface {iface_id} nem id does not exist\"\n context.abort(grpc.StatusCode.INVALID_ARGUMENT, message)\n return nem_id\n\n\ndef get_emane_model_configs(session: Session) -> List[GetEmaneModelConfig]:\n configs = []\n for _id in session.emane.node_configurations:\n if _id == -1:\n continue\n model_configs = session.emane.node_configurations[_id]\n for model_name in model_configs:\n model = session.emane.models[model_name]\n current_config = session.emane.get_model_config(_id, model_name)\n config = get_config_options(current_config, model)\n node_id, iface_id = utils.parse_iface_config_id(_id)\n iface_id = iface_id if iface_id is not None else -1\n model_config = GetEmaneModelConfig(\n node_id=node_id, model=model_name, iface_id=iface_id, config=config\n )\n configs.append(model_config)\n return configs\n\n\ndef get_wlan_configs(session: Session) -> Dict[int, MappedConfig]:\n configs = {}\n for node_id in session.mobility.node_configurations:\n model_config = session.mobility.node_configurations[node_id]\n if node_id == -1:\n continue\n for model_name in model_config:\n if model_name != BasicRangeModel.name:\n continue\n current_config = session.mobility.get_model_config(node_id, model_name)\n config = get_config_options(current_config, BasicRangeModel)\n mapped_config = MappedConfig(config=config)\n configs[node_id] = mapped_config\n return configs\n\n\ndef get_mobility_configs(session: Session) -> Dict[int, MappedConfig]:\n configs = {}\n for node_id in session.mobility.node_configurations:\n model_config = session.mobility.node_configurations[node_id]\n if node_id == -1:\n continue\n for model_name in model_config:\n if model_name != Ns2ScriptedMobility.name:\n continue\n current_config = session.mobility.get_model_config(node_id, model_name)\n config = get_config_options(current_config, Ns2ScriptedMobility)\n mapped_config = MappedConfig(config=config)\n configs[node_id] = mapped_config\n return configs\n\n\ndef get_hooks(session: Session) -> List[core_pb2.Hook]:\n hooks = []\n for state in session.hooks:\n state_hooks = session.hooks[state]\n for file_name, file_data in state_hooks:\n hook = core_pb2.Hook(state=state.value, file=file_name, data=file_data)\n hooks.append(hook)\n return hooks\n\n\ndef get_emane_models(session: Session) -> List[str]:\n emane_models = []\n for model in session.emane.models.keys():\n if len(model.split(\"_\")) != 2:\n continue\n emane_models.append(model)\n return emane_models\n\n\ndef get_default_services(session: Session) -> List[ServiceDefaults]:\n default_services = []\n for name, services in session.services.default_services.items():\n default_service = ServiceDefaults(node_type=name, services=services)\n default_services.append(default_service)\n return default_services\n\n\ndef get_node_service_configs(session: Session) -> List[NodeServiceConfig]:\n configs = []\n for node_id, service_configs in session.services.custom_services.items():\n for name in service_configs:\n service = session.services.get_service(node_id, name)\n service_proto = get_service_configuration(service)\n config = NodeServiceConfig(\n node_id=node_id,\n service=name,\n data=service_proto,\n files=service.config_data,\n )\n configs.append(config)\n return configs\n\n\ndef get_node_config_service_configs(session: Session) -> List[ConfigServiceConfig]:\n configs = []\n for node in session.nodes.values():\n if not isinstance(node, CoreNodeBase):\n continue\n for name, service in node.config_services.items():\n if not service.custom_templates and not service.custom_config:\n continue\n config_proto = ConfigServiceConfig(\n node_id=node.id,\n name=name,\n templates=service.custom_templates,\n config=service.custom_config,\n )\n configs.append(config_proto)\n return configs\n\n\ndef get_emane_config(session: Session) -> Dict[str, common_pb2.ConfigOption]:\n current_config = session.emane.get_configs()\n return get_config_options(current_config, session.emane.emane_config)\n\n\ndef get_mobility_node(\n session: Session, node_id: int, context: ServicerContext\n) -> Union[WlanNode, EmaneNet]:\n try:\n return session.get_node(node_id, WlanNode)\n except CoreError:\n try:\n return session.get_node(node_id, EmaneNet)\n except CoreError:\n context.abort(grpc.StatusCode.NOT_FOUND, \"node id is not for wlan or emane\")\n","repo_name":"liangkang233/core-test","sub_path":"daemon/core/api/grpc/grpcutils.py","file_name":"grpcutils.py","file_ext":"py","file_size_in_byte":22239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"35164095864","text":"from geocomp.common import point\nfrom geocomp.common import prim\n\nclass IntersectionBST:\n \n class Node:\n def __init__(self, k, h, s, r, l):\n self.key = k #key is a point\n self.height = h\n self.size = s\n self.right = r\n self.left = l\n\n def __init__(self):\n self.root = None\n\n def isEmpty(self):\n return self.root == None\n \n def size (self):\n return self.size_aux(self.root)\n \n def size_aux(self, n):\n if (n == None): \n return 0\n return n.size\n\n def height (self):\n return self.height_aux(self.root)\n \n def height_aux(self, n):\n if (n == None): \n return -1\n return n.height\n\n def removeMinKey(self):\n node = self.root\n while (node.left != None): \n node = node.left\n self.remove(node.key)\n return node\n\n def compare_to (self, a, b):\n if(prim.left(b.init, b.to, a.to)):\n return 1\n elif (prim.left_on(b.init, b.to, a.to)):\n return 0\n return -1\n \n def insert (self, segment):\n if (self.root == None):\n self.root = self.Node(segment, 0, 1, None, None) \n else:\n self.root = self.insert_aux(self.root, segment)\n \n def insert_aux (self, node, segment):\n if (node == None):\n return self.Node(segment, 0, 1, None, None)\n cmp = self.compare_to(node.key, segment)\n if (cmp > 0):\n node.left = self.insert_aux(node.left, segment)\n elif (cmp < 0):\n node.right = self.insert_aux(node.right, segment)\n \n node.size = 1 + self.size_aux(node.left) + self.size_aux(node.right)\n node.height = 1 + max(self.height_aux(node.left), self.height_aux(node.right))\n return self.balance(node)\n\n def balance_factor(self, node):\n return self.height_aux(node.left) - self.height_aux(node.right)\n\n def balance (self, node):\n if (self.balance_factor(node) < -1):\n if (self.balance_factor(node.right) > 0):\n node.right = self.rotate_right(node.right)\n node = self.rotate_left(node)\n\n elif (self.balance_factor(node) > 1):\n if (self.balance_factor(node.left) < 0):\n node.left = self.rotate_left(node.left)\n node = self.rotate_right(node)\n\n return node \n\n def rotate_right(self, node):\n node2 = node.left\n node.left = node2.right\n node2.right = node\n node2.size = node.size\n node.size = 1 + self.size_aux(node.left) + self.size_aux(node.right)\n node.height = 1 + max(self.height_aux(node.left), self.height_aux(node.right))\n node2.height = 1 + max(self.height_aux(node2.left), self.height_aux(node2.right))\n return node2\n\n def rotate_left(self, node):\n node2 = node.right\n node.right = node2.left\n node2.left = node\n node2.size = node.size\n node.size = 1 + self.size_aux(node.left) + self.size_aux(node.right)\n node.height = 1 + max(self.height_aux(node.left), self.height_aux(node.right))\n node2.height = 1 + max(self.height_aux(node2.left), self.height_aux(node2.right))\n return node2\n\n def contains(self, segment):\n if (segment == None): return False\n return self.contains_aux(self.root, segment)\n\n def contains_aux(self, node, segment):\n if (node == None):\n return node\n \n cmp = self.compare_to(node.key, segment)\n \n if (cmp > 0):\n return self.contains_aux(node.left, segment)\n elif (cmp < 0):\n return self.contains_aux(node.right, segment)\n \n return node\n\n def remove(self, segment):\n if (self.contains(segment)):\n self.root = self.remove_aux(self.root, segment)\n\n def remove_aux(self, node, segment):\n cmp = self.compare_to(node.key, segment)\n if (cmp > 0): node.left = self.remove_aux(node.left, point)\n elif (cmp < 0): node.right = self.remove_aux(node.right, point)\n else:\n if (node.left == None):\n return node.right\n elif(node.right == None):\n return node.left\n else:\n node_y = self.Node(node.key, node.height, node.size, node.right, node.left, node.segment)\n node = self.min_aux(node_y.right)\n node.right = self.remove_min_aux(node_y.right)\n node.left = node_y.left \n \n node.size = 1 + self.size_aux(node.left) + self.size_aux(node.right)\n node.height = 1 + max(self.height_aux(node.left), self.height_aux(node.right))\n return self.balance(node)\n\n def remove_min(self):\n if(not self.isEmpty()):\n self.root = self.remove_min_aux(self.root)\n\n def remove_min_aux(self, node):\n if (node.left == None): return node.right\n node.left = self.remove_min_aux(node.left)\n node.size = 1 + self.size_aux(node.left) + self.size_aux(node.right)\n node.height = 1 + max(self.height_aux(node.left), self.height_aux(node.right))\n return self.balance(node)\n\n def min(self):\n if (not self.isEmpty()):\n return self.min_aux(self.root)\n\n def min_aux(self, node):\n if(node.left == None):\n return node\n return self.min_aux(node.left)\n\n def max(self):\n if (not self.isEmpty()):\n return self.max_aux(self.root)\n\n def max_aux(self, node):\n if(node.right == None):\n return node\n return self.max_aux(node.right)\n\n def imprime(self):\n if (self.root == None): return\n print_root = 'init: ' + str(self.root.key.init) + ', to: ' + str(self.root.key.to) \n print(\"A raíz é: \" + print_root)\n self.imprime_aux(self.root)\n \n def imprime_aux(self, node):\n if (node == None):\n return\n self.imprime_aux(node.left)\n print('init: ' + str(node.key.init) + ', to: ' + str(node.key.to))\n self.imprime_aux(node.right)\n \n \n def plot_segments(self, color):\n self.plot_segments_aux(color, self.root)\n \n \n def plot_segments_aux(self, color, node):\n if (node == None): return\n \n self.plot_segments_aux(color, node.left)\n node.key.hilight(color)\n self.plot_segments_aux(color, node.right)\n \n def remove_from_sweepline(self, segment_tree, event_key):\n self.remove_from_sweepline_aux(segment_tree, event_key, self.root)\n \n def remove_from_sweepline_aux(self, segment_tree, event_key, node):\n if (node == None): return\n\n self.remove_from_sweepline_aux(segment_tree, event_key, node.left)\n segment_tree.remove(node.key, event_key)\n self.remove_from_sweepline_aux(segment_tree, event_key, node.right)\n\n def insert_in_sweepline(self, segment_tree, event_key):\n self.insert_in_sweepline_aux(segment_tree, event_key, self.root)\n\n def insert_in_sweepline_aux(self, segment_tree, event_key, node):\n if (node == None): return\n\n self.insert_in_sweepline_aux(segment_tree, event_key, node.left)\n segment_tree.insert(node.key, event_key)\n self.insert_in_sweepline_aux(segment_tree, event_key, node.right)\n","repo_name":"biamarou/MAC0331","sub_path":"geocomp/lineintersections/IntersectionBST.py","file_name":"IntersectionBST.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"3652892553","text":"from flask import Flask, jsonify, request, render_template\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.externals import joblib\n\nNB_spam_model = open('model.pkl','rb')\nmodel = joblib.load(NB_spam_model)\n\n\napp =Flask(__name__)\n\n\n@app.route('/')\ndef Home():\n return render_template('index.html')\n\n\n\ndef new_prediction(new_review):\n new_review = re.sub('[^a-zA-Z]', ' ', new_review)\n new_review = new_review.lower()\n new_review = new_review.split()\n ps = PorterStemmer()\n all_stopwords = stopwords.words('english')\n all_stopwords.remove('not')\n new_review = [ps.stem(word) for word in new_review if not word in set(all_stopwords)]\n new_review = ' '.join(new_review)\n new_corpus = [new_review]\n \n model=pickle.load(open('model.pkl','rb'))\n new_X_test = cv.transform(new_corpus).toarray()\n new_y_pred = model.predict(new_X_test)\n return(new_y_pred[0])\n \n@app.route('/predict',methods=['POST','GET'])\ndef predict_review():\n dataset = pd.read_csv('Restaurant_Reviews.tsv',delimiter='\\t',quoting=3)\n corpus = []\n for i in range(0,1000):\n review = re.sub('[^a-zA-z]',' ',dataset['Review'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n all_stopwords = stopwords.words('english')\n all_stopwords.remove('not')\n review = [ps.stem(word) for word in review if word not in set(all_stopwords)]\n review = \" \".join(review)\n corpus.append(review)\n cv = CountVectorizer()\n X = cv.fit_transform(corpus).toarray()\n y = dataset.iloc[:,-1].values \n \n if request.method == 'POST':\n new_review = request.form['review']\n #print(review)\n new_review = re.sub('[^a-zA-Z]', ' ', new_review)\n new_review = new_review.lower()\n new_review = new_review.split()\n ps = PorterStemmer()\n all_stopwords = stopwords.words('english')\n all_stopwords.remove('not')\n new_review = [ps.stem(word) for word in new_review if not word in set(all_stopwords)]\n new_review = ' '.join(new_review)\n new_corpus = [new_review]\n new_X_test = cv.transform(new_corpus).toarray()\n result = model.predict(new_X_test)\n \n #return render_template('index.html',pred=\"done\")\n #print(int(result[0]))\n if(int(result[0])==1):\n return render_template('index.html',pred='This is a positive review')\n else:\n return render_template('index.html',pred='This is a negative review')\n\napp.run() \n","repo_name":"Princeshaw/machine-learning","sub_path":"NLP analysis of Restaurant reviews/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"70101580686","text":"from pathlib import Path\nfrom xmlrpc.client import Boolean\nfrom discord import Guild, User\nfrom json import dump, load\n\n\nclass UserManager:\n def __init__(self, save_path) -> None:\n try:\n path = Path(save_path)\n if not path.exists():\n path.mkdir(parents=True, exist_ok=True)\n\n self.save_path = path\n except Exception as e:\n print(\"Invalid path given\")\n\n def _check_guild(self, guild: Guild):\n guild_path = self.save_path / str(guild.id)\n guild_path.mkdir(parents=True, exist_ok=True)\n\n guild_info = f\"\"\"\"guild_id\": {guild.id}\n\"guild_name\": {guild.name}\n\"guild_members\": {guild.members}\n\"guild_owner\": {guild.owner}\n\"voice_channels\": {guild.voice_channels}\n\"text_channels\": {guild.text_channels}\n \"\"\"\n\n guild_info_path = guild_path / \"info.txt\"\n\n with open(guild_info_path, \"w+\") as file:\n file.write(str(guild_info))\n\n def _check_user(self, guild: Guild, user: User):\n guild_path = self.save_path / str(guild.id)\n user_file = guild_path / f\"{user.id}.json\"\n\n if not user_file.exists():\n user_info = {\n \"user_name\": user.name,\n \"user_id\": user.id,\n \"user_score\": 0,\n \"user_tries\": 0,\n }\n\n with open(user_file, \"w+\") as file:\n dump(user_info, file)\n\n def _write_user_data(self, user_data, guild: Guild, user: User):\n guild_path = self.save_path / str(guild.id)\n user_file = guild_path / f\"{user.id}.json\"\n\n with open(user_file, \"w+\") as file:\n dump(user_data, file)\n\n def get_user_data(self, guild: Guild, user: User) -> dict:\n guild_path = self.save_path / str(guild.id)\n user_file = guild_path / f\"{user.id}.json\"\n\n with open(user_file, \"r\") as file:\n user_data = load(file)\n\n return user_data\n\n def increment_user(self, guild: Guild, user: User, correct_answer: Boolean = True):\n self._check_guild(guild)\n self._check_user(guild, user)\n\n user_data = self.get_user_data(guild, user)\n\n if correct_answer:\n user_data[\"user_score\"] += 1\n user_data[\"user_tries\"] += 1\n\n self._write_user_data(user_data, guild, user)\n","repo_name":"tjallo/DiscordPyBot","sub_path":"src/api/user_manager.py","file_name":"user_manager.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"65"} +{"seq_id":"71921746127","text":"from typing import *\n\n\nclass Solution:\n def vowelStrings(self, words: List[str], queries: List[List[int]]) -> List[int]:\n vowel = {'a', 'e', 'i', 'o', 'u'}\n vowel_array = [1 if word[0] in vowel and word[-1] in vowel else 0 for word in words]\n\n prefix_sum = []\n acc = 0\n for vowel in vowel_array:\n acc += vowel\n prefix_sum.append(acc)\n\n\n def get_query(query):\n if query[0] <= 0:\n return prefix_sum[query[1]]\n elif query[1] >= len(prefix_sum):\n return prefix_sum[-1]\n\n return prefix_sum[query[1]] - prefix_sum[query[0] - 1]\n\n return [get_query(query) for query in queries]\n","repo_name":"Bluntsord/InterviewPrep","sub_path":"Contest/Weekly/331/Question2.py","file_name":"Question2.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"21783239762","text":"\"\"\"\nModule containing class for Basic card of 0th generation\n\"\"\"\nfrom typing import (\n List,\n NamedTuple,\n Tuple\n)\n\nfrom cryptography.hazmat.primitives.asymmetric import ec\n\nfrom . import base\nfrom .base import Base\nfrom .. import exceptions\nfrom ..binary_utils import path_to_bytes\nfrom ..cryptos import encode_pubkey\nfrom ..enums import (\n AuthType,\n Derivation,\n KeyType,\n SeedSource,\n SlotIndex\n)\n\n\nclass BasicG0(Base):\n \"\"\"\n Class containing functionality for Basic cards of the 0th generation\n \"\"\"\n select_apdu = [0xA0, 0x00, 0x00, 0x10, 0x00, 0x01, 0x01]\n puk_rule = \"15 digits\"\n\n _ALGORITHM = ec.SECP256K1\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._initialized = False\n\n self._check_init()\n\n def change_pairing_key(self, index: int, pairing_key: bytes, puk: str = \"\") -> None:\n if len(pairing_key) != 32:\n raise exceptions.DataValidationException(\"Pairing key has to be 32 bytes.\")\n if not 0 <= index <= 7:\n raise exceptions.DataValidationException(\"Index must be between 0 and 7\")\n\n self.connection.send_encrypted([0x80, 0xDA, 0x00, 0x00],\n index.to_bytes(1, \"big\") + pairing_key)\n\n def derive(self, key_type: KeyType = KeyType.K1, path: str = \"\"):\n self.get_public_key(Derivation.DERIVE_AND_MAKE_CURRENT, key_type, path=path)\n\n def dual_seed_public_key(self, pin: str = \"\") -> bytes:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def dual_seed_load(self, data: bytes, pin: str = \"\") -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n @property\n def extended_public_key(self) -> bool:\n return False\n\n def generate_random_number(self, size: int) -> bytes:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def generate_seed(self, pin=\"\") -> bytes:\n try:\n gen_resp = self.connection.send_encrypted([0x80, 0xD4, 0x00, 0x00], b\"\")\n except exceptions.GenericException as error:\n if error.status[0] == 0x69 and error.status[1] == 0x86:\n raise exceptions.KeyAlreadyGenerated(\"The card already has a key generated\\n\\n\"\n \"It is not possible to generate another one \"\n \"without resetting the card\") from error\n raise\n\n if len(gen_resp) != 32:\n raise exceptions.KeyGenerationException(\"Bad data received during key generation\")\n\n return gen_resp\n\n def get_public_key(self, derivation: Derivation, key_type: KeyType = KeyType.K1, path: str = \"\",\n compressed: bool = True, hexed: bool = True) -> str:\n if not self.valid_key:\n raise exceptions.SeedException()\n\n derivation = Derivation(derivation)\n key_type = KeyType(key_type)\n\n if derivation == Derivation.PINLESS_PATH:\n raise exceptions.DerivationSelectionException(\"This operation doesn't support this \"\n \"derivation form\")\n\n message = [0x80, 0xC2, derivation + key_type, 1]\n binary_path = path_to_bytes(path) if path else b\"\"\n data = self.connection.send_encrypted(message, binary_path)\n\n if data[3:5] != b\"\\x41\\x04\":\n raise exceptions.ReadPublicKeyException(\"Invalid data received during public key \"\n \"reading\")\n\n result = data[4:].hex() if hexed else data[4:]\n if compressed:\n result = encode_pubkey(result, \"bin_compressed\").hex()\n\n return result\n\n def history(self, index: int = 0) -> NamedTuple:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n @property\n def initialized(self) -> bool:\n return self._initialized\n\n def load_seed(self, seed: bytes, pin: str = \"\") -> None:\n try:\n result = self.connection.send_encrypted([0x80, 0xD0, 0x03, 0x00], seed)\n except exceptions.GenericException as error:\n if error.status[0] == 0x69 and error.status[1] == 0x86:\n raise exceptions.KeyAlreadyGenerated(\"The card already has a key generated\\n\\n\"\n \"It is not possible to generate another one \"\n \"without resetting the card\") from error\n raise\n\n if len(result) != 32:\n raise exceptions.KeyGenerationException(\"Bad data received during key generation\")\n\n @property\n def pin_authentication(self) -> bool:\n return True\n\n @property\n def pinless_enabled(self) -> bool:\n return False\n\n def reset(self, puk: str) -> None:\n puk = self.valid_puk(puk)\n\n message = [0x80, 0xC0, Derivation.CURRENT_KEY, 0x00]\n\n self.connection.send_encrypted(message, puk.encode(\"ascii\"))\n self.auth_type = AuthType.NO_AUTH\n\n @property\n def seed_source(self) -> SeedSource:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def set_pin_authentication(self, status: bool, puk: str) -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def set_pinless_path(self, path: str, puk: str) -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def set_extended_public_key(self, status: bool, puk: str) -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def sign(self, data: bytes, derivation: Derivation, key_type: KeyType = KeyType.K1,\n path: str = \"\", pin: str = \"\", filter_eos: bool = False) -> bytes:\n pin = self.valid_pin(pin)\n derivation = Derivation(derivation)\n key_type = KeyType(key_type)\n\n message = [0x80, 0xC0, derivation + key_type, 0x00]\n\n derivation_base = (derivation + key_type) & 0x0F\n if derivation_base in (1, 2):\n data += path_to_bytes(path)\n\n result = self._sign_eos(message, data, pin) if filter_eos else \\\n self.connection.send_encrypted(message, data)\n\n if not result or result[70] != 0x30:\n raise exceptions.DataException(\"Invalid data received during signature\")\n\n return result[70:]\n\n @property\n def signing_counter(self) -> int:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_add(self, slot: SlotIndex, data_info: str, public_key: bytes, puk_code: str,\n cred_id: bytes = b\"\") -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_delete(self, slot: SlotIndex, puk_code: str) -> None:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_info(self, slot: SlotIndex) -> Tuple[str, str]:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_enabled(self, slot_index: SlotIndex) -> bool:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_challenge_response_nonce(self) -> bytes:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_challenge_response_open(self, slot: SlotIndex, signature: bytes) -> bool:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def user_key_signature_open(self, slot: SlotIndex, message: bytes, signature: bytes) -> bool:\n raise NotImplementedError(\"Card doesn't have this functionality\")\n\n def _sign_eos(self, apdu: List[int], data: bytes, pin: str) -> bytes:\n count = 0\n\n while True:\n result = self.connection.send_encrypted(apdu, data)\n len_r = int(result[73])\n len_s = int(result[75 + len_r])\n if len_r == 32 and len_s == 32:\n break\n\n count += 1\n if count >= 10:\n raise exceptions.EOSKeyError(\"The signature wasn't compatible with EOS standard \"\n \"after 10 tries\")\n self.verify_pin(pin)\n\n return result\n\n @staticmethod\n def valid_puk(puk: str, puk_name: str = \"puk\") -> str:\n if len(puk) != BasicG0.PUK_LENGTH:\n raise exceptions.DataValidationException(f\"The {puk_name} must have \"\n f\"{BasicG0.PUK_LENGTH} numeric \"\n f\"characters\")\n if not puk.isdigit():\n raise exceptions.DataValidationException(f\"The {puk_name} must be numeric.\")\n\n return puk\n\n @property\n def valid_key(self) -> bool:\n \"\"\"\n Check if the card has a valid key\n\n :return: Whether the card has a valid key.\n :rtype: bool\n \"\"\"\n return self._data and self._data != [0] * 32\n\n def verify_pin(self, pin: str) -> None:\n pin = self.valid_pin(pin)\n apdu = [0x80, 0x20, 0x00, 0x00]\n\n self.connection.send_encrypted(apdu, bytes(pin, 'ascii'))\n\n if not self.open:\n self.auth_type = AuthType.PIN\n\n def _check_init(self) -> None:\n apdu = [0x80, 0xFE, 0x00, 0x00, 0x01, 0x01]\n\n try:\n _, code1, code2 = self.connection.send_apdu(apdu)\n except exceptions.DataValidationException:\n return\n\n self._initialized = code1 == 0x6D and code2 == 0x00\n\n @property\n def _owner(self) -> base.User:\n message = [0x80, 0xFA, 0x00, 0x00]\n try:\n data = self.connection.send_encrypted(message, bytes([0]))\n except exceptions.CryptnoxException:\n return base.User(\"\", \"\")\n\n name_length = data[0]\n name = data[1:name_length + 1].decode(\"ascii\")\n email_length = data[name_length + 1]\n user_list_offset = email_length + 2 + name_length\n email = data[name_length + 2:user_list_offset].decode(\"ascii\")\n\n return base.User(name, email)\n","repo_name":"Cryptnox-Software/cryptnoxpy","sub_path":"cryptnoxpy/card/basic_g0.py","file_name":"basic_g0.py","file_ext":"py","file_size_in_byte":10093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"71028859087","text":"'''\nTkinter_tutorial.py demonstrates some of the standard widgets \nand the grid() geometry manager\n'''\nfrom Tkinter import *\nroot = Tk()\n'''\n# A canvas widget\ndrawpad = Canvas(root, background='brown')\ndrawpad.grid(row=0, column=1)\nitem = drawpad.create_oval(10, 50, 100, 100, fill='green')\n\n# A checkbox widget\nbox = Checkbutton(root, text=\"Check here.\")\nbox.grid(row=0, column=0)\n'''\n\n'''\n# An editor widget\neditor = Text(width=80, height=10)\neditor.grid(row=2, column=1, rowspan=2, sticky=SE)\n'''\n\n# A button. This widget is demonstrating using an event handler with \n# the command argument\ntimes_pressed = 0\ndef pressed():\n global times_pressed\n times_pressed = 'You have just recived a treasure map, your goal is to decipher it. '\n editor.insert(END, times_pressed)\n editor.see(END)\nbutton = Button(root, text='Explorer Ready?', \n command=pressed)\nbutton.grid(row=1, column=0)\n\n# An editor widget\neditor = Text(width=80, height=10)\neditor.grid(row=2, column=1, rowspan=2, sticky=SE)\n\n\nnextroot = Tk()\n\n\n\ndef start(self):\n times_pressed = 1\ndef pressed():\n global times_pressed\n times_pressed = 'How do you find the back of an egg? Click the one of the next two buttons '\n editor.insert(END, times_pressed)\n editor.see(END)\nbutton = Button(root, text='Solve this', \n command=pressed)\nbutton.grid(row=2, column=0)\n'''\neditor = Text(width=80, height=10)\neditor.grid(row=4, column=1, rowspan=2, sticky=SE)\n'''\nself.new_window = name_w\n\n\n\nnextroot = Tk()\n\n\ntimes_pressed = 1\ndef pressed():\n global times_pressed\n times_pressed = 'If you said pick the front then you are wrong '\n editor.insert(END, times_pressed)\n editor.see(END)\nbutton = Button(root, text='Front', \n command=pressed)\nbutton.grid(row=3, column=0)\ndef quit_window(self):\n self.Q1()\nself.new_window.desroy()\n'''\ntimes_pressed = 1\ndef pressed():\n global times_pressed\n times_pressed = 'Correct! You have solved our story that is completley suffiencet to pass Computer Scinece given....everything '\n editor.insert(END, times_pressed)\n editor.see(END)\nbutton = Button(root, text='Who cares', \n command=pressed)\nbutton.grid(row=4, column=0)\n\n'''\n# Slider\nspeed = IntVar()\nslider = Scale(root, from_=1, to=10, \n label='Speed', variable=speed)\nslider.grid(row=2, column=0)\nspeed.get()\n'''\n# Radio buttons: you can only select one\nradio = [0]*4 # create a list\ndata = IntVar()\nfor i in range(4):\n radio[i] = Radiobutton(root, text=str(i),\n variable=data, value=i)\n radio[i].grid(row=i,column=2)\ndata.set(3)\n'''\nroot.mainloop()","repo_name":"I-browz/csp1718","sub_path":"GUI-Project/1.5.2.PY Tkinter_tutorial.py","file_name":"1.5.2.PY Tkinter_tutorial.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"74344919566","text":"#BOJ11967 블켜기 20210528\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\ndef inScope(x,y,n):\n return 1 <= x <= n and 1 <= y <= n\n\ndef main():\n n, m = map(int, input().rstrip().split())\n info = {}\n for _ in range(m):\n x,y,a,b = map(int, input().rstrip().split())\n if (x,y) not in info:\n info[(x,y)] = [(a,b)]\n else:\n info[(x,y)].append((a,b))\n visited = set([(1,1)])\n lightedRoom = set([(1,1)])\n q = deque([(1,1)])\n dx = [-1,0,1,0]\n dy = [0,1,0,-1]\n while q:\n x,y = q.popleft()\n if (x,y) in info:\n for a,b in info[(x,y)]:\n lightedRoom.add((a,b))\n if (a,b) not in visited:\n for i in range(4):\n pa = a + dx[i]\n pb = b + dy[i]\n if inScope(pa,pb,n) and (pa,pb) in visited:\n visited.add((a,b))\n q.append((a,b))\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if inScope(nx,ny,n) and (nx,ny) not in visited:\n if (nx,ny) in lightedRoom:\n visited.add((nx,ny))\n q.append((nx,ny))\n print(len(lightedRoom))\n\nif __name__ == '__main__':\n main()","repo_name":"ccc96360/Algorithm","sub_path":"BOJ/Gold III/BOJ11967.py","file_name":"BOJ11967.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"20247150164","text":"#coding: utf-8\n#!usr/bin/env python\n\nclass A:\n x = 7\n\nprint(A.x)\nA.y = 9 #对类新增一个属性\nprint(A.y)\ndel A.x\n#print(A.x) #AttributeError: type object 'A' has no attribute 'x'\n\n#创建实例\nclass person:\n \"\"\"\n This is a sample class\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def color(self, color):\n d = {}\n d[self.name] = color\n return d\n\n #创建实例, 就是调用类. 当类被调用后:\n #1. 创建实例对象.\n #2. 检查是否有____专业的说法: 是否实现__()方法. 如果没有, 则返回实例对象.\n #3. 如果有__init__(),则调用该方法并且将实例对象作为第一个参数self传进去\n #__init()__作为一个特殊方法, 是比较特殊的, 在它里面, 一般是规定一些属性或者\n #做一些初始化让类具有一些特征, 但是, 他没有语句.\n\n #__init__()初始化函数, 除了第一个参数必须是self, 不能有return语句之外, 其他方面和普通函数一样.\n #设置参数和里面的属性\nclass Room:\n def __init__(self, name, lang=\"golang\", website=\"www.google.com\"):\n self.name = name\n self.lang = lang\n self.website = website\n self.email = \"dsads@gmail.com\"\n\n\n\n","repo_name":"sf1025/python-study","sub_path":"test_demo/class2.py","file_name":"class2.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"38154165359","text":"import requests, json, os, pickle\nimport networkx as nx\nimport GOSTnets as gn\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom time import sleep\nimport pandas as pd\nimport geopandas as gpd\nimport rasterio\nfrom rasterio.windows import Window\nfrom rasterio.plot import * \nfrom rasterio.mask import * \nimport numpy as np\nfrom shapely.geometry import Point\nfrom shapely.geometry import box\nimport contextily as ctx\nimport osmnx as ox\nfrom fiona.crs import from_epsg\nimport pycrs\nimport geoviews as gv\nimport hvplot.pandas\nimport random\nimport utility\nfrom functions import n_closest_geodetic\n\n\nmapbox_tokens = []\nmapbox_tokens.append(os.environ.get(\"MAPBOX_TOKEN\", \"NO_TOKEN\"))\n\n \ndef get_travel_times_mapbox(origins, \n destinations, \n mode, \n d_name, \n dest_id_col=False,\n n_keep = 2,\n num_retries = 2, \n starting_token_index = 0,\n use_pickle = False,\n do_pickle_result=True,\n pickle_region_name = \"\",\n batch_limit=None):\n \"\"\"\n \n \"\"\"\n \n # Immediately return pickled data if requested\n pickle_name = f\"../data/interim/mb_origins_{pickle_region_name}_{d_name}_{mode}\"\n if use_pickle == True:\n return unpickle_data(picke_name)\n \n\n #tries = 0\n token_index = starting_token_index\n #while tries <= num_retries:\n origins = mapbox_matrix_API(\n token = mapbox_tokens[token_index],\n origins = origins,\n destinations = destinations,\n mode = mode,\n d_name = d_name,\n dest_id_col = dest_id_col,\n n_keep = n_keep,\n batch_limit = batch_limit\n )\n origins[f\"closest_{d_name}_geom\"] = gpd.points_from_xy(\n origins.loc[:, f\"closest_{d_name}_geom_lon_x\"],\n origins.loc[:, f\"closest_{d_name}_geom_lat_y\"]\n )\n origins[\"mb_snapped_dest_geom\"] = gpd.points_from_xy(\n origins.loc[:, \"mb_snapped_dest_lon_x\"],\n origins.loc[:, \"mb_snapped_dest_lat_y\"]\n )\n origins[\"mb_snapped_src_geom\"] = gpd.points_from_xy(\n origins.loc[:, \"mb_snapped_src_lon_x\"],\n origins.loc[:, \"mb_snapped_src_lat_y\"]\n )\n \n # Pickle generated origins if requested\n if do_pickle_result == True:\n pickle_data(origins, pickle_name)\n # tries += 1\n\n return origins\n\n\n\n\n\ndef mapbox_matrix_API(token, \n origins, \n destinations, \n mode='driving', \n d_name='poi',\n dest_id_col='osmid',\n n_keep=3, \n batch_limit=None):\n \"\"\"\n Given a geopandas set of origins and destinations, return the origins with extra columns\n with the closest destination in minutes given the mode of transportation for each origin.\n \n Also returns the snap distance to the origin (geodetic distance from origin point to closest road)\n Keywords:\n do_all [False]: By default avoid repeating work that has been done.\n \n \"\"\"\n\n MAPBOX_TOKEN = token\n osrm_server=\"https://api.mapbox.com/directions-matrix/v1\"\n modes=['driving-traffic', 'driving', 'cycling', 'walking']\n if mode not in modes:\n raise ValueError(\"Mode should be one of [driving-traffic, driving, cycling, walking]\")\n\n url = f\"{osrm_server}/mapbox/{mode}\"\n\n max_coordinates = 24\n \n if mode == \"driving-traffic\":\n max_coordinates = 10\n\n\n # Limit Coordinates Total = 25\n # Since limit is 25, that means that # sources + # destinations = 24\n\n batch_size = int(np.floor(max_coordinates / (n_keep + 1)))\n\n # Append proper columns\n for c in [\n f\"hrs_to_{d_name}\",\n f\"mins_to_{d_name}\",\n f\"dist_to_{d_name}\",\n f\"closest_{d_name}_id\",\n f\"closest_{d_name}_name\",\n f\"closest_{d_name}_geom_lon_x\",\n f\"closest_{d_name}_geom_lat_y\",\n f\"closest_{d_name}_geodetic_dist\",\n \"mb_snapped_dest_name\",\n \"mb_snapped_dest_dist\",\n \"mb_snapped_dest_lon_x\",\n \"mb_snapped_dest_lat_y\",\n \"mb_snapped_src_name\",\n \"mb_snapped_src_dist\",\n \"mb_snapped_src_lon_x\",\n \"mb_snapped_src_lat_y\"\n\n ]:\n origins[c] = -1\n\n\n \n\n # Out of the unprocessed origins, get the next batch to process\n # Get origins where time to hospital is -1 or unprocessed\n queued_origins = origins.loc[origins[f\"hrs_to_{d_name}\"] == -1, :]\n queued_origins_size = queued_origins.shape[0]\n \n for iteration in np.arange(queued_origins_size / batch_size):\n if ((batch_limit is not None) and int(iteration) >= batch_limit):\n break\n\n queued_origins = origins.loc[origins[f\"hrs_to_{d_name}\"] == -1, :]\n print(f\"\"\"\n Process batch # {int(iteration)} \n Remaining: {queued_origins.shape[0]}\n \"\"\")\n #origins_to_process = queued_origins.iloc[int(batch_size*iteration):].head(batch_size).copy()\n \n\n origins_to_process = queued_origins.head(batch_size).copy()\n \n\n relevant_destinations = n_closest_geodetic(origins_to_process, destinations, n_keep)\n\n # Safety check\n if (len(origins_to_process) + len(relevant_destinations)) > max_coordinates:\n raise ValueError(\"Over limit for Mapbox API\")\n\n origins_url = origins_to_process[['geometry']].copy().reset_index(drop=False)\n dest_url = relevant_destinations[['geometry']].copy().reset_index(drop=False)\n origins_url['type'] = 'origin'\n dest_url['type'] = 'dest'\n\n od = pd.concat([origins_url, dest_url]).reset_index(drop=True)\n\n origins_coords = \";\".join([\",\".join([str(row.centroid.x),str(row.centroid.y)]) for row in od.loc[od['type'] =='origin','geometry']])\n relevant_dest_coords=\";\".join([\",\".join([str(row.centroid.x),str(row.centroid.y)]) for row in od.loc[od['type'] == 'dest', 'geometry']])\n\n\n origin_coords_indices = ';'.join([str(x) for x in od.loc[od['type'] == 'origin', :].index.tolist()])\n dest_coords_indices = ';'.join([str(x) for x in od.loc[od['type'] == 'dest', :].index.tolist()])\n\n\n full_url = f\"{url}/{origins_coords};{relevant_dest_coords}.json?sources={origin_coords_indices}&destinations={dest_coords_indices}&annotations=distance,duration&access_token={MAPBOX_TOKEN}\"\n\n\n response = requests.get(full_url)\n response.raise_for_status()\n\n\n response = json.loads(response.text)\n \n \n durations = response['durations']\n distances = response['distances']\n mb_dests = response['destinations']\n sources = response['sources']\n\n\n for ix, dur_set in enumerate(durations):\n if len(dur_set) != len(relevant_destinations):\n raise ValueError(\"Incorrect response from Mapbox\")\n \n \n #Look up what the index is of this origin\n origin_ix = int(od.loc[:, \"index\"][ix])\n #print(origin_ix)\n \n \n \n\n # Clean the dataset for any non-routes, keeping index\n dur_set = list(map(lambda x: x if x else 99999999999999, dur_set))\n\n travel_time_to_closest_dest = min(dur_set)\n closest_dest_ix = np.argmin(dur_set)\n distance_to_closest_dest = distances[ix][closest_dest_ix]\n closest_dest_osm = relevant_destinations.iloc[closest_dest_ix]\n closest_dest_mb = mb_dests[closest_dest_ix]\n source_mb = sources[ix]\n\n\n\n\n # Closest OSM Destination\n origins_to_process.loc[origin_ix, f'hrs_to_{d_name}'] = ((travel_time_to_closest_dest / 60) / 60)\n origins_to_process.loc[origin_ix, f'mins_to_{d_name}'] = (travel_time_to_closest_dest / 60)\n origins_to_process.loc[origin_ix, f'dist_to_{d_name}'] = distance_to_closest_dest\n\n origins_to_process.loc[origin_ix, f\"closest_{d_name}_id\"] = closest_dest_osm[dest_id_col]\n origins_to_process.loc[origin_ix, f\"closest_{d_name}_name\"] = closest_dest_osm['name']\n origins_to_process.loc[origin_ix, f\"closest_{d_name}_geodetic_dist\"] = closest_dest_osm['distance_to_or']\n origins_to_process.loc[origin_ix, f\"closest_{d_name}_geom_lon_x\"] = closest_dest_osm['geometry'].x\n origins_to_process.loc[origin_ix, f\"closest_{d_name}_geom_lat_y\"] = closest_dest_osm['geometry'].y\n\n\n # Closest MB Destination\n origins_to_process.loc[origin_ix, f\"mb_snapped_dest_name\"] = closest_dest_mb['name']\n origins_to_process.loc[origin_ix, f\"mb_snapped_dest_dist\"] = closest_dest_mb['distance']\n origins_to_process.loc[origin_ix, f\"mb_snapped_dest_lon_x\"] = closest_dest_mb['location'][0]\n origins_to_process.loc[origin_ix, f\"mb_snapped_dest_lat_y\"] = closest_dest_mb['location'][1]\n\n # Closest Source\n origins_to_process.loc[origin_ix, f\"mb_snapped_src_name\"] = source_mb['name']\n origins_to_process.loc[origin_ix, f\"mb_snapped_src_dist\"] = source_mb['distance']\n origins_to_process.loc[origin_ix, f\"mb_snapped_src_lon_x\"] = source_mb['location'][0]\n origins_to_process.loc[origin_ix, f\"mb_snapped_src_lat_y\"] = source_mb['location'][1]\n\n\n\n\n \n #dest_geom = relevant_destinations.loc[:, \"geometry\"][closest_dest_ix]\n #origins_to_process.loc[origin_ix, f\"closest_{d_name}_geometry\"] = dest_geom\n\n\n # Get the new stuff into origins\n origins.loc[origins_to_process.index, :] = origins_to_process.loc[:,:].copy()\n\n \n queued_origins = origins.loc[origins[f\"hrs_to_{d_name}\"] == -1, :]\n queued_origins_size = queued_origins.shape[0]\n \n print(f\"There are still {queued_origins_size} unprocessed origins\")\n return origins\n\n\ndef mapbox_analysis(mode,origins,destinations):\n \n \"\"\"\n Given origins, destinations and filtered destinations conducts analysis on travel distane for origin-destination pairs \n \"\"\"\n \n \n # Temp\n #failed_data = origins.loc[origins.t_hospital == -1, :]\n #origins = origins.loc[origins.t_hospital != -1, :].copy()\n #failed_data = failed_data.reset_index(drop=True)\n #failed_data = failed_data.loc[failed_data.index != 1, :]\n #origins = pd.concat([origins, failed_data]).reset_index(drop = True)\n #origins.shape\n \n #Data Formatting \n #origins.to_file(\"./data/origins.geojson\", driver=\"GeoJSON\")\n #destinations.to_file(\"./data/dest.geojson\", driver=\"GeoJSON\")\n #o = gpd.read_file(\"./data/origins.geojson\")\n #h = gpd.read_file(\"./data/dest.geojson\")\n #o.head()\n \n o = origins.copy()\n h = destinations.copy()\n \n #Plotting \n #%matplotlib inline\n plt.figure()\n o['hrs_to_hosp_or_clinic'].plot.hist(mode, alpha=0.7,bins=1000,cumulative=False,density=False,log=False,logx=False,weights=o['tot_pop'])\n plt.xlim((0,2))\n plt.title(mode)\n plt.ylabel('Population 1e7')\n plt.xlabel('Distance to closest: Hospital')\n plt.show()\n","repo_name":"datapartnership/hospital-accessibility","sub_path":"src/mapbox.py","file_name":"mapbox.py","file_ext":"py","file_size_in_byte":11159,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"65"} +{"seq_id":"37523271593","text":"import asyncio\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\n\nimport asyncclick as click\nimport sqlalchemy\nfrom core import logging\nfrom core.aws_requester import AwsRequester\nfrom core.queues.sqs import SqsMessageQueue\nfrom core.requester import Requester\nfrom core.slack_client import SlackClient\nfrom core.store.database import Database\nfrom core.util import chain_util\nfrom core.web3.eth_client import RestEthClient\nfrom sqlalchemy.sql import functions as sqlalchemyfunc\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))\nfrom notd.block_processor import BlockProcessor\nfrom notd.manager import NotdManager\nfrom notd.store.retriever import Retriever\nfrom notd.store.saver import Saver\nfrom notd.store.schema import BlocksTable\nfrom notd.store.schema import TokenTransfersTable\nfrom notd.store.schema_conversions import token_transfer_from_row\nfrom notd.token_manager import TokenManager\n\n\n@click.command()\n@click.option('-s', '--start-block-number', 'startBlock', required=True, type=int)\n@click.option('-e', '--end-block-number', 'endBlock', required=True, type=int)\n@click.option('-b', '--batch-size', 'batchSize', required=False, type=int, default=1000)\nasync def reprocess_multi_transfers(startBlock: int, endBlock: int, batchSize: int):\n databaseConnectionString = Database.create_psql_connection_string(username=os.environ[\"DB_USERNAME\"], password=os.environ[\"DB_PASSWORD\"], host=os.environ[\"DB_HOST\"], port=os.environ[\"DB_PORT\"], name=os.environ[\"DB_NAME\"])\n database = Database(connectionString=databaseConnectionString)\n saver = Saver(database=database)\n retriever = Retriever(database=database)\n #NOTE Change to aws credentials before final push\n workQueue = SqsMessageQueue(region='us-east-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.us-east-1.amazonaws.com/113848722427/FemiKiBa')\n tokenQueue = SqsMessageQueue(region='us-east-1', accessKeyId=os.environ['AWS_KEY'], accessKeySecret=os.environ['AWS_SECRET'], queueUrl='https://sqs.us-east-1.amazonaws.com/113848722427/FemiKiBa')\n requester = Requester()\n requester = Requester()\n ethClient = RestEthClient(url=f'https://mainnet.infura.io/v3/{os.environ[\"INFURA_PROJECT_ID\"]}', requester=requester)\n blockProcessor = BlockProcessor(ethClient=ethClient)\n requester = Requester()\n revueApiKey = os.environ['REVUE_API_KEY']\n tokenManager = TokenManager(saver=saver, retriever=retriever, tokenQueue=tokenQueue, collectionProcessor=None, tokenMetadataProcessor=None, tokenOwnershipProcessor=None, collectionActivityProcessor=None)\n notdManager = NotdManager(blockProcessor=blockProcessor, saver=saver, retriever=retriever, workQueue=workQueue, tokenManager=tokenManager, requester=requester, revueApiKey=revueApiKey)\n\n await database.connect()\n await workQueue.connect()\n await tokenQueue.connect()\n currentBlockNumber = startBlock\n while currentBlockNumber < endBlock:\n start = currentBlockNumber\n end = min(currentBlockNumber + batchSize, endBlock)\n logging.info(f'Working on {start} to {end}...')\n async with saver.create_transaction() as connection:\n multiTransferSubquery = (\n TokenTransfersTable.select()\n .with_only_columns(TokenTransfersTable.c.transactionHash)\n .filter(TokenTransfersTable.c.blockNumber >= start)\n .filter(TokenTransfersTable.c.blockNumber < end)\n .group_by(TokenTransfersTable.c.transactionHash)\n .having(sqlalchemyfunc.count(TokenTransfersTable.c.transactionHash) > 1)\n .subquery()\n )\n query = (\n TokenTransfersTable.select()\n .with_only_columns(TokenTransfersTable.c.blockNumber)\n .where(sqlalchemy.or_(\n TokenTransfersTable.c.transactionHash.in_(sqlalchemy.select(multiTransferSubquery)),\n TokenTransfersTable.c.registryAddress == '0x57f1887a8BF19b14fC0dF6Fd9B2acc9Af147eA85')\n )\n )\n result = await database.execute(query=query)\n blocksToReprocess = {row[0] for row in result}\n logging.info(f'Reprocessing {len(blocksToReprocess)} blocks')\n await notdManager.process_blocks_deferred(blockNumbers=list(blocksToReprocess), shouldSkipProcessingTokens=True)\n blocksToBackfill = set(list(range(start, end))) - blocksToReprocess\n logging.info(f'Back filling {len(blocksToBackfill)} blocks')\n values = {}\n values[TokenTransfersTable.c.isMultiAddress.key] = False\n values[TokenTransfersTable.c.isInterstitial.key] = False\n values[TokenTransfersTable.c.isSwap.key] = False\n values[TokenTransfersTable.c.isBatch.key] = False\n query = TokenTransfersTable.update(TokenTransfersTable.c.blockNumber.in_(blocksToBackfill)).values(values)\n await database.execute(query=query)\n\n currentBlockNumber = currentBlockNumber + batchSize\n\n await database.disconnect()\n await workQueue.disconnect()\n await tokenQueue.disconnect()\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n asyncio.run(reprocess_multi_transfers())\n","repo_name":"kibalabs/nftoftheday","sub_path":"api/scripts/reprocess_multi_transfers.py","file_name":"reprocess_multi_transfers.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"34787686996","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 1 16:03:11 2016\r\n\r\n@author: LeBaron\r\n\"\"\"\r\n\r\n# uses 2 lists to create a Hash Table class that implements the Map ADT. One list, called slots, will hold the key items and a parallel list, called data, will hold the values.\r\n# implements a simple remainder hash function with linear probing rehash\r\n\r\nclass HashTable:\r\n \r\n def __init__(self):\r\n self.size = 11 # arbitrary but should be a prime number\r\n self.slots = [None] * self.size\r\n self.data = [None] * self.size\r\n \r\n def put(self, key, data):\r\n hasvalue = self.hashfunction(key,len(self.slots))\r\n if self.slots[hashvalue] == None:\r\n self.slots[hashvalue] = key\r\n self.data[hashvalue] = data\r\n else:\r\n if self.slots[hashvalue] == key:\r\n self.data[hashvalue] = data # replace\r\n else:\r\n nextslot = self.rehash(hashvalue, len(self.slots))\r\n while self.slots[nextslot] != None and self.slots[nextslot] != key:\r\n nextslot = self.rehash(nextslot,len(self.slots))\r\n if self.slots[nextslot] == None:\r\n self.slots[nextslot] = key\r\n self.data[nextslot] = data\r\n else:\r\n self.data[nextslot] = data # replace\r\n \r\n def hashfunction(self,key,size):\r\n return key % size\r\n \r\n def rehash(self, oldhash, size):\r\n return (oldhash + 1) % size\r\n \r\n def get(self,key):\r\n startslot = self.hashfunction(key,len(self.slots))\r\n data = None\r\n stop = False\r\n found = False\r\n position = startslot\r\n while self.slots[position] != None and not found and not stop:\r\n if self.slots[position] == key:\r\n found = True\r\n data = self.data[position]\r\n else:\r\n position = self.rehash(position, len(self.slots))\r\n if position == startslot:\r\n stop = True\r\n return data\r\n \r\n def __getitem__(self, key):\r\n return self.get(key)\r\n \r\n def __setitem__(self, key, data):\r\n self.put(key, data)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"zakkatkk/dataStructures","sub_path":"HashTable.py","file_name":"HashTable.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"26343959913","text":"# importing QISKit\nimport numpy as np\nimport scipy\n\nfrom qiskit import Aer, IBMQ\nfrom qiskit.quantum_info.operators import Operator\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute\nfrom qiskit.extensions import HamiltonianGate\n\ndef getQState(qwalk, n):\n '''Method to get the quantum state before the measurement.'''\n sv_simulator = Aer.get_backend('statevector_simulator')\n\n # Execute and get counts\n result = execute(qwalk, sv_simulator).result()\n statevector = result.get_statevector(qwalk)\n\n # statevector\n\n b = []\n for k in range(0,len(statevector)):\n b.append(k)\n\n i = iter(statevector)\n j = iter(b)\n dct = dict(zip(j, i))\n\n # Create the dictionary without all the \n qstate = {}\n for i in range(0,len(dct)):\n qstate[format(i, '0'+str(n)+'b')] = dct[i]\n \n return qstate\n\ndef getNonZeros(dct):\n '''Method that returns the positions of the state space that have non-zero amplitudes in\n the form of a dictionary.'''\n non_zeros = dict()\n\n for key in dct.keys():\n if (dct[key] != (0+0j)):\n non_zeros[key] = dct[key]\n \n return non_zeros\n\ndef getProbabilities(dct, n):\n '''Methods that calculates the probabilities of each position of the state space from\n the amplitudes of the quantum state. Returns a dictionary that contains the states with\n removed the ancilla and coin qubits.'''\n dct = getNonZeros(dct)\n probs = dict()\n \n probs = {k[-n:]:0 for k,v in dct.items()}\n for k,v in dct.items():\n probs[k[-n:]]+=np.abs(v)**2\n \n dec = [0]*len(probs.keys())\n probs_dec = dict()\n \n for key in probs.keys():\n probs_dec[int(key, 2)] = probs[key]\n \n return probs_dec\n\ndef getPosProb(n, t, meas):\n '''Method that returns the positions of the particle for a single number of coin flips,\n as well as the probabilities for the walker to be at each of the states.'''\n \n qwalk = ctqw(n, t, meas) # Return the CTQW circuit\n qstate = getQState(qwalk, n) # Return the statevector (i.e quantum state before measurement)\n dct = getProbabilities(qstate, n)\n \n pos = list(dct.keys())\n prob = list(dct.values())\n \n return pos,prob\n\ndef unitaryHam(N, t):\n '''Returns the operator resulting from the Hamiltonian exponentiation.'''\n # Define the Hamiltonian\n H = np.array([\n [0.+0.j]*N\n ]*N)\n\n H[0][N-1] = 1/2\n H[N-1][0] = 1/2\n for i in range(0, N):\n for j in range(0, N):\n if (j == i+1) or (j == i-1):\n H[i][j] = 1/2\n\n H = Operator(H)\n \n expHam = HamiltonianGate(H, t, label='CTQW') # This format includes time, t\n\n return expHam\n\n# --------- Previous version -------------\n# def unitaryHam(N, t):\n# # ATTENTION: this representation containes the experssion -iHt, i.e the power of the expression e^{-iHt}, and H=A*1/d => (0.-1.j) = -i\n# '''Returns the operator resulting from the Hamiltonian exponentiation.'''\n# miHt = np.array([\n# [0.+0.j]*N\n# ]*N)\n\n# miHt[0][N-1] = (0.-1.j)*t/2\n# miHt[N-1][0] = (0.-1.j)*t/2\n# for i in range(0, N):\n# for j in range(0, N):\n# if (j == i+1) or (j == i-1):\n# miHt[i][j] = (0.-1.j)*t/2\n\n# # Perform the exponent e^{-iHt}\n# exp_miHt = scipy.linalg.expm(miHt)\n\n# U_c = Operator(exp_miHt)\n\n# return U_c\n# -----------------------------------------\n\ndef ctqw(n, t, meas):\n '''Creates the continues time quantum walk circuit on an N-cycle'''\n N = 2**n\n U_c = unitaryHam(N, t)\n \n q = QuantumRegister(n, 'q')\n c = ClassicalRegister(n, 'c')\n circ = QuantumCircuit(q, c)\n\n circ.append(U_c, [q[i] for i in range(0, n)])\n \n if (meas == True):\n circ.measure(q,c)\n \n return circ","repo_name":"kgeorgopoulos2/programbench","sub_path":"ctqwalk.py","file_name":"ctqwalk.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"30288955059","text":"import tweepy\nimport app_config\n\nconfig = app_config.read_config()\nclient = tweepy.Client(config['twitter']['bearer_token'])\n\nresponse = client.search_recent_tweets(\n \"ddos attack -is:retweet lang:en -is:reply\",\n max_results = 100,\n tweet_fields = ['author_id','created_at','text','source','lang','geo'],\n user_fields = ['name','username','location','verified'],\n expansions = ['geo.place_id', 'author_id'],\n place_fields = ['country','country_code']\n)\n\n# print(response)\n\n# Get users list from the includes object\nusers = {u[\"id\"]: u for u in response.includes['users']}\n\nfor tweet in response.data:\n print(\"----------------------------------------------\")\n print(tweet.text) # print the text\n print(\"Language:\", tweet.data['lang']) # print the language (PL, since we're filtering by it)\n print(\"Source :\", tweet.data['source']) # what did the user use to publish the tweet?\n print(\"Created :\", tweet.data['created_at'])\n\n if users[tweet.author_id]:\n user = users[tweet.author_id]\n\n #user = next(\n # (item for item in response.includes['users'] if item['id'] == tweet.author_id),\n # {}\n #)\n\n print(user.name)\n print(user.username)\n print(user.verified)\n\n print(f\"URL: https://twitter.com/{user.username}/status/{tweet.data['id']}\")\n\n\n# https://twitter.com/places/555d6b4e2f6a18f6 # BOZ\n# https://dev.to/twitterdev/a-comprehensive-guide-for-using-the-twitter-api-v2-using-tweepy-in-python-15d9","repo_name":"sbroekhoven/twitterscripts","sub_path":"twitter3.py","file_name":"twitter3.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"19879252386","text":"import csv\r\nimport numpy as np\r\n\r\nclass DecisionTree():\r\n\t\r\n\tdef learn(self, training_set,max_depth):\r\n\t\t\r\n\t\tdef split_tree(attr,val,attr_inner,val_inner,ipdata,type):\r\n\t\t\ttree_left = []\r\n\t\t\ttree_right = []\r\n\t\t\tif type == \"uni\":\r\n\t\t\t\tfor ix in ipdata:\r\n\t\t\t\t\tif ix[attr] < val:\r\n\t\t\t\t\t\ttree_left.append(ix)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttree_right.append(ix)\r\n\t\t\telse:\r\n\t\t\t\tfor ix in ipdata:\r\n\t\t\t\t\tif ix[attr] < val and ix[attr_inner] > val_inner:\r\n\t\t\t\t\t\ttree_left.append(ix)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttree_right.append(ix)\r\n\t\t\treturn tree_left,tree_right\r\n\t\t\r\n\t\tdef calc_entropy(ip_vec):\r\n\t\t\tix0_count = 0\r\n\t\t\tix1_count = 0\r\n\t\t\tfor ix in ip_vec:\r\n\t\t\t\tif int(ix) == 0:\r\n\t\t\t\t\tix0_count += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tix1_count += 1\r\n\t\t\tif ix0_count == 0 or ix1_count == 0:\r\n\t\t\t\treturn 0\r\n\t\t\telse:\r\n\t\t\t\treturn (-1*(((ix0_count/(ix0_count+ix1_count))*(np.log2((ix0_count/(ix0_count+ix1_count)))))+((ix1_count/(ix0_count+ix1_count))*(np.log2(ix1_count/(ix0_count+ix1_count))))))\r\n\t\t\t\r\n\t\tdef get_ratio(child_tree):\r\n\t\t\tcnt0 = 0\r\n\t\t\tcnt1 = 0\r\n\t\t\tfor ix in child_tree:\r\n\t\t\t\tif ix == 0:\r\n\t\t\t\t\tcnt0 += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tcnt1 += 1\r\n\t\t\tif cnt0 == 0 and cnt1 == 0:\r\n\t\t\t\treturn(0.5)\r\n\t\t\treturn(1-((cnt0/(cnt0+cnt1))**2)-((cnt1/(cnt0+cnt1))**2))\t\t\r\n\t\t\r\n\t\tdef calc_gini_score(parent_gini,tree_lt,tree_rt):\r\n\t\t\tlt_gini = get_ratio([ix[-1] for ix in tree_lt])\r\n\t\t\trt_gini = get_ratio([ix[-1] for ix in tree_rt])\r\n\t\t\ttmp_gini = parent_gini - ((len(tree_lt)/(len(tree_lt)+len(tree_rt)))*lt_gini) - ((len(tree_rt)/(len(tree_lt)+len(tree_rt)))*rt_gini)\r\n\t\t\treturn tmp_gini\t\r\n\t\t\r\n\t\tdef calc_info_gain(parent_ent,i,tree_lt,tree_rt):\r\n\t\t\tlt_entropy = calc_entropy([ix[-1] for ix in tree_lt])\r\n\t\t\trt_entropy = calc_entropy([ix[-1] for ix in tree_rt])\r\n\t\t\ttmp_gain = parent_ent - ((len(tree_lt)/(len(tree_lt)+len(tree_rt)))*lt_entropy) - ((len(tree_rt)/(len(tree_lt)+len(tree_rt)))*rt_entropy)\r\n\t\t\treturn tmp_gain\r\n\t\t\r\n\t\tdef chk_last_node(ipSet):\r\n\t\t\tclass_val = [elem[-1] for elem in ipSet]\r\n\t\t\tresponse = max(set(class_val),key=class_val.count)\r\n\t\t\treturn(response)\r\n\t\t\t\r\n\t\t\r\n\t\tdef get_child(ipdata):\r\n\t\t\tdef sortByIndex(item):\r\n\t\t\t\treturn item[2]\r\n\t\t\t\t\r\n\t\t\tdecision_cols = list(range(len(ipdata[0])-1))\r\n\t\t\tparent_gini = get_ratio([ip[-1] for ip in ipdata])\r\n\t\t\tgini_gain = []\t\t\t\r\n\r\n\t\t\tfor ix in decision_cols:\r\n\t\t\t\tix_values = set(ix1[ix] for ix1 in ipdata)\r\n\t\t\t\tfor ix_val in ix_values:\r\n\t\t\t\t\tresult_left, result_right = split_tree(ix,ix_val,None,None,ipdata,\"uni\")\r\n\t\t\t\t\tgini_gain.append((ix,ix_val,calc_gini_score(parent_gini,result_left,result_right)))\r\n\t\t\t\r\n\t\t\tsorted_info_gain_first = sorted(gini_gain,key=sortByIndex,reverse=True)\r\n\t\t\t\r\n\t\t\tdecision_cols = list((set(decision_cols) - set([sorted_info_gain_first[0][0]])))\r\n\t\t\tgini_gain = []\r\n\t\t\tfor ix in decision_cols:\r\n\t\t\t\tix_values = set(ix1[ix] for ix1 in ipdata)\r\n\t\t\t\tfor ix_val in ix_values:\r\n\t\t\t\t\tresult_left, result_right = split_tree(ix,ix_val,None,None,ipdata,\"uni\")\r\n\t\t\t\t\tgini_gain.append((ix,ix_val,calc_gini_score(parent_gini,result_left,result_right)))\r\n\t\t\t\r\n\t\t\tsorted_info_gain_second = sorted(gini_gain,key=sortByIndex,reverse=True)\r\n\t\t\t\r\n\t\t\tresult_left, result_right = split_tree(sorted_info_gain_first[0][0],sorted_info_gain_first[0][1],sorted_info_gain_second[1][0],sorted_info_gain_second[1][1],ipdata,\"bi\")\r\n\t\t\t\r\n\t\t\treturn {'attribute1':sorted_info_gain_first[0][0],'chk_value1':sorted_info_gain_first[0][1],'attribute2':sorted_info_gain_second[1][0],'chk_value2':sorted_info_gain_second[1][1],'left_branch':result_left,'right_branch':result_right}\r\n\t\t\r\n\t\tdef splitTree(main_tree,max_depth,curr_depth):\r\n\t\t\tleft_child = main_tree['left_branch']\r\n\t\t\tright_child = main_tree['right_branch']\r\n\r\n\t\t\tdel(main_tree['left_branch'])\r\n\t\t\tdel(main_tree['right_branch'])\r\n\t\t\t\r\n\t\t\tif not left_child and right_child:\r\n\t\t\t\tmain_tree['left_child'] = chk_last_node(right_child)\r\n\t\t\t\tmain_tree['right_child'] = main_tree['left_child']\r\n\t\t\t\treturn\r\n\t\t\t\r\n\t\t\tif not right_child and left_child:\r\n\t\t\t\tmain_tree['right_child'] = chk_last_node(left_child)\r\n\t\t\t\tmain_tree['left_child'] = main_tree['right_child']\r\n\t\t\t\treturn\r\n\t\t\t\r\n\t\t\tif curr_depth >= max_depth:\r\n\t\t\t\tmain_tree['left_child'] = chk_last_node(left_child)\r\n\t\t\t\tmain_tree['right_child'] = chk_last_node(right_child)\r\n\t\t\t\treturn\r\n\r\n\t\t\tmain_tree['left_child'] = get_child(left_child)\r\n\t\t\tsplitTree(main_tree['left_child'],max_depth,curr_depth+1)\r\n\t\t\t\t\r\n\t\t\tmain_tree['right_child'] = get_child(right_child)\r\n\t\t\tsplitTree(main_tree['right_child'],max_depth,curr_depth+1)\r\n\t\t\t\t\r\n\t\t\treturn main_tree\r\n\t\t\r\n\t\t## Start of the functin\r\n\t\tself.tree = {}\r\n\t\tself.tree = get_child(training_set)\r\n\t\tsplitTree(self.tree,max_depth,1)\r\n\t\r\n\r\n # implement this function\r\n\tdef classify(self, test_instance):\r\n\t\tresult = 0 # baseline: always classifies as 0\r\n\t\t\r\n\t\tdef get_class(tree_input,test_data):\r\n\t\t\tif test_data[tree_input['attribute1']] < tree_input['chk_value1'] and test_data[tree_input['attribute2']] > tree_input['chk_value2']:\r\n\t\t\t\tif isinstance(tree_input['left_child'], dict):\r\n\t\t\t\t\treturn get_class(tree_input['left_child'],test_data)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn tree_input['left_child']\r\n\t\t\telse:\r\n\t\t\t\tif isinstance(tree_input['right_child'], dict):\r\n\t\t\t\t\treturn get_class(tree_input['right_child'],test_data)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn tree_input['right_child']\r\n\t\t\r\n\t\tresult = get_class(self.tree,test_instance)\r\n\t\treturn result\r\n\r\n\r\n# Load data set\r\nwith open(\"wine-dataset.csv\") as f:\r\n next(f, None)\r\n data = [tuple(line) for line in csv.reader(f, delimiter=\",\")]\r\nprint(\"Number of records: %d\" % len(data))\r\n\r\ndef run_decision_tree():\r\n\r\n\ttree = DecisionTree()\r\n\r\n\t# Split training/test sets\r\n\t# You need to modify the following code for cross validation.\r\n\tK = 10\r\n\r\n\taccuracy_list = []\r\n\tfor ix in list(range(10)):\r\n\t\ttraining_set = [tuple(map(float,x)) for i, x in enumerate(data) if i % K != ix]\r\n\t\ttest_set = [tuple(map(float,x)) for i, x in enumerate(data) if i % K == ix]\r\n\r\n\t\t# Construct a tree using training set, set maximum depth to prevent looping\r\n\t\tmax_depth = 50\r\n\t\toutput = tree.learn(training_set,max_depth)\r\n\r\n\t\t# Classify the test set using the tree we just constructed\r\n\t\tresults = []\r\n\t\tfor instance in test_set:\r\n\t\t\tresult = tree.classify( instance[:-1] )\r\n\t\t\tresults.append( result == instance[-1])\r\n\r\n\t\t# Accuracy\r\n\t\taccuracy = float(results.count(True))/float(len(results))\r\n\t\t\r\n\t\t# Create a list of accuracy scores for each fold\r\n\t\taccuracy_list.append(accuracy)\r\n\r\n\taccuracy = sum(accuracy_list)/float(len(accuracy_list))\r\n\tprint(\"Mean Accuracy using 10 fold validation (Multivariate Decision Tree): \" + str(accuracy))\r\n\r\n\r\n\t# Writing results to a file\r\n\tf = open(myname+\"result.txt\", \"w\")\r\n\tf.write(\"accuracy: %.4f\" % accuracy)\r\n\tf.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_decision_tree()","repo_name":"srinivasksh/DecisionTree-ID3-Based-implementation-of-ML-Algorithm","sub_path":"Decision_Tree_Multivariate.py","file_name":"Decision_Tree_Multivariate.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"44949503475","text":"b = []\nsum = 0\ncount = 0\nfor i in range(10):\n a = int(input('Enter in a number:'))\n b.append(a)\n\nfor elem in b:\n count += 1\n sum += elem\n\nprint('The average of the the numbers you entered was', sum / count, '.')\n ","repo_name":"lillyph/yummy","sub_path":"school projects/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"38862996570","text":"\"\"\"\r\nstatus_frane.py\r\n\r\nStatus Bar frame class for PyGPSClient application.\r\n\r\nThis handles the status bar notifications at the foot of the window.\r\n\r\nCreated on 12 Sep 2020\r\n\r\n:author: semuadmin\r\n:copyright: SEMU Consulting © 2020\r\n:license: BSD 3-Clause\r\n\"\"\"\r\n\r\nfrom tkinter import VERTICAL, E, Frame, Label, N, S, StringVar, W, ttk\r\n\r\n\r\nclass StatusFrame(Frame):\r\n \"\"\"\r\n Status bar frame class.\r\n \"\"\"\r\n\r\n def __init__(self, app, *args, **kwargs):\r\n \"\"\"\r\n Constructor\r\n\r\n :param Frame app: reference to main tkinter application\r\n :param args: optional args to pass to Frame parent class\r\n :param kwargs: optional kwargs to pass to Frame parent class\r\n \"\"\"\r\n\r\n self.__app = app # Reference to main application class\r\n self.__master = self.__app.appmaster # Reference to root class (Tk)\r\n Frame.__init__(self, self.__master, *args, **kwargs)\r\n\r\n self._status = StringVar()\r\n self._connection = StringVar()\r\n self.width, self.height = self.get_size()\r\n self._body()\r\n\r\n self.bind(\"\", self._on_resize)\r\n\r\n def _body(self):\r\n \"\"\"\r\n Set up frame and widgets.\r\n \"\"\"\r\n\r\n self.grid_rowconfigure(0, weight=1)\r\n\r\n self.option_add(\"*Font\", self.__app.font_md)\r\n\r\n self._lbl_connection = Label(self, textvariable=self._connection, anchor=W)\r\n self._lbl_status_preset = Label(self, textvariable=self._status, anchor=W)\r\n self._lbl_connection.grid(column=0, row=0, sticky=(W, E))\r\n ttk.Separator(self, orient=VERTICAL).grid(column=1, row=0, sticky=(N, S))\r\n self._lbl_status_preset.grid(column=2, row=0, sticky=(W, E))\r\n\r\n def set_connection(self, connection: str, color: str = \"\"):\r\n \"\"\"\r\n Sets connection description in status bar.\r\n\r\n :param str connection: description of connection\r\n :param str color: rgb color string (default=blue)\r\n\r\n \"\"\"\r\n\r\n if len(connection) > 100:\r\n connection = \"...\" + connection[-100:]\r\n if color != \"\":\r\n self._lbl_connection.config(fg=color)\r\n self._connection.set(\" \" + connection)\r\n\r\n def set_status(self, message, color: str = \"\"):\r\n \"\"\"\r\n Sets message in status bar.\r\n\r\n :param str message: message to be displayed in status bar\r\n :param str color: rgb color string (default=blue)\r\n\r\n \"\"\"\r\n\r\n if len(message) > 200:\r\n message = \"...\" + message[-200:]\r\n if color != \"\":\r\n self._lbl_status_preset.config(fg=color)\r\n self._status.set(\" \" + message)\r\n\r\n def clear_status(self):\r\n \"\"\"\r\n Clears status bar.\r\n \"\"\"\r\n\r\n self._connection.set(\"\")\r\n self._status.set(\"\")\r\n\r\n def _on_resize(self, event): # pylint: disable=unused-argument\r\n \"\"\"\r\n Resize frame\r\n\r\n :param event event: resize event\r\n\r\n \"\"\"\r\n\r\n self.width, self.height = self.get_size()\r\n\r\n def get_size(self):\r\n \"\"\"\r\n Get current frame size.\r\n\r\n :return: window size (width, height)\r\n :rtype: tuple\r\n \"\"\"\r\n\r\n self.update_idletasks() # Make sure we know about any resizing\r\n width = self.winfo_width()\r\n height = self.winfo_height()\r\n return (width, height)\r\n","repo_name":"semuconsulting/PyGPSClient","sub_path":"src/pygpsclient/status_frame.py","file_name":"status_frame.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"65"} +{"seq_id":"37342126990","text":"import numpy as np\nimport data_loader.data_helper as helper\nimport Utils.CONFIG\n\n\nclass DataGenerator:\n def __init__(self, config):\n self.config = config\n # load data here\n self.batch_size = self.config.batch_size\n self.load_data()\n\n # load the specified dataset in the config to the data_generator instance\n def load_data(self):\n graphs, labels = helper.hierarchy_load_dataset(self.config.dataset_name, self.config.input_order)\n graphs, labels = helper.shuffle(graphs, labels)\n idx = len(graphs) // 10\n self.train_graphs, self.train_labels, self.val_graphs, self.val_labels = graphs[idx:], labels[idx:], graphs[:idx], labels[:idx]\n # change validation graphs to the right shape\n self.val_graphs = [np.expand_dims(g, 0) for g in self.val_graphs]\n self.train_size = len(self.train_graphs)\n self.val_size = len(self.val_graphs)\n\n def next_batch(self):\n return next(self.iter)\n\n # initialize an iterator from the data for one training epoch\n def initialize(self, is_train):\n if is_train:\n self.reshuffle_data()\n else:\n self.iter = zip(self.val_graphs, self.val_labels)\n\n # resuffle data iterator between epochs\n def reshuffle_data(self):\n graphs, labels = helper.group_same_size(self.train_graphs, self.train_labels)\n graphs, labels = helper.shuffle_same_size(graphs, labels)\n graphs, labels = helper.split_to_batches(graphs, labels, self.batch_size)\n self.num_iterations_train = len(graphs)\n graphs, labels = helper.shuffle(graphs, labels)\n self.iter = zip(graphs, labels)\n\n","repo_name":"JiaHe-yogurt/Equivariant-Graph-Learning-with-Provable-Expressive-Power","sub_path":"gnn/data_loader/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"34839552813","text":"from datetime import datetime\nfrom dateutil import tz\nimport uuid\n\nclass Event:\n\t\"\"\"Represents a calendar ics\"\"\"\n\tdef __init__(self, name, begin, end, location, person, uid=None, cancel=False):\n\t\tself.name = name\n\t\tself.begin = begin\n\t\tself.end = end\n\t\tself.location = location\n\t\tself.person = person\n\t\tself.uid = uid\n\t\tself.cancel = cancel\n\tdef create_string_event(self):\n\t\tx = \"\"\n\t\tx = x + \"BEGIN:VEVENT\\n\"\n\t\tto_zone = tz.tzutc()\n\t\tfrom_zone = tz.tzlocal()\n\t\tself.begin = self.begin.replace(tzinfo=from_zone)\n\t\tself.begin = self.begin.astimezone(to_zone)\n\t\tself.end = self.end.replace(tzinfo=from_zone)\n\t\tself.end = self.end.astimezone(to_zone)\n\n\t\tstr_people = \"\"\n\t\ti = 1\n\t\tfor person in self.person:\n\t\t\tif i == len(self.person):\n\t\t\t\tstr_people += person\n\t\t\telse:\n\t\t\t\tstr_people += person + \", \"\n\t\t\ti = i + 1\n\t\tif self.uid:\n\t\t\tuid = self.uid + '\\n'\n\t\t\tif self.cancel:\n\t\t\t\tuid += 'STATUS:CANCELLED\\n'\n\t\telse:\n\t\t\tuid = str(uuid.uuid1()) + '\\n'\n\t\tx = x + \"DTSTAMP:20151231T000000Z\\n\"\n\t\tx = x + \"DTSTART:\" + self.begin.strftime(\"%Y%m%dT%H%M%SZ\")+ '\\n'\n\t\tx = x + \"DTEND:\" + self.end.strftime(\"%Y%m%dT%H%M%SZ\") + '\\n'\n\t\tx = x + \"SUMMARY:\" + self.name + '\\n'\n\t\tx = x + \"DESCRIPTION:\" + \"with \" + str_people + '\\n'\n\t\tx = x + \"LOCATION:\" + self.location + '\\n'\n\t\tx = x + \"UID:\" + uid\n\t\tx = x + \"END:VEVENT\"\n\t\treturn x","repo_name":"kl2532/LifePlanner","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"33354796625","text":"__author__ = 'elip'\n\nfrom setuptools import setup\n\nCOSMO_CELERY_VERSION = \"0.3\"\nCOSMO_CELERY_BRANCH = \"develop\"\nCOSMO_CELERY = \"https://github.com/CloudifySource\" \\\n \"/cosmo-celery-common/tarball/{0}\"\\\n .format(COSMO_CELERY_BRANCH)\n\n# Replace the place holders with values for your project\n\nsetup(\n name='${PLUGIN_NAME}',\n version='${VERSION}',\n author='${AUTHOR}',\n author_email='${AUTHOR_EMAIL}',\n packages=['plugin'],\n license='LICENSE',\n description='${DESCRIPTION}',\n zip_safe=False,\n install_requires=[\n \"cosmo-celery-common\" # Necessary dependency for developing plugins. do not remove\n ],\n test_requires=[\n \"nose\"\n ],\n dependency_links=[\"{0}#egg=cosmo-celery-common-{1}\"\n .format(COSMO_CELERY, COSMO_CELERY_VERSION)]\n)\n","repo_name":"CloudifySource/cloudify-plugin-template","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"18356616319","text":"N = int(input())\nd = {}\nfor i in range(N):\n s = list(map(str, input()))\n s.sort()\n s = \"\".join(s)\n d[s] = d[s] + 1 if s in d else 1\nans = 0\nfor i in d.values():\n ans += i * (i-1) // 2\nprint(ans)\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02947/s374464972.py","file_name":"s374464972.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18191380379","text":"# abc171_b.py\n# https://atcoder.jp/contests/abc171/tasks/abc171_b\n\n# B - Mix Juice /\n# 実行時間制限: 2 sec / メモリ制限: 1024 MB\n# 配点 : 200点\n\n# 問題文\n# ある店で N種類の果物、果物 1,…,N が売られており、それぞれの価格は一個あたり p1,…,pN円です。\n# この店で K種類の果物を一個ずつ買うとき、それらの合計価格として考えられる最小の金額を求めてください。\n\n# 制約\n# 1≤K≤N≤1000\n# 1≤pi≤1000\n# 入力中の値はすべて整数である。\n\n# 入力\n# 入力は以下の形式で標準入力から与えられる。\n# N K\n# p1 p2 … pN\n\n# 出力\n# 果物の最小の合計価格を表す整数を出力せよ。\n\n# 入力例 1\n# 5 3\n# 50 100 80 120 80\n\n# 出力例 1\n# 210\n\n# この店では、果物 1,2,3,4,5がそれぞれ 50 円、100 円、80 円、120 円、80円で売られています。\n# これらから 3種類を買うときの最小合計価格は、果物 1,3,5 を買うときの 50+80+80=210円です。\n\n# 入力例 2\n# 1 1\n# 1000\n\n# 出力例 2\n# 1000\n\n\nglobal FLAG_LOG\nFLAG_LOG = False\n\n\ndef log(value):\n # FLAG_LOG = True\n FLAG_LOG = False\n if FLAG_LOG:\n print(str(value))\n\n\ndef calculation(lines):\n # S = lines[0]\n # N = int(lines[0])\n N, K = list(map(int, lines[0].split()))\n values = list(map(int, lines[1].split()))\n # values = list(map(int, lines[2].split()))\n # values = list()\n # for i in range(N):\n # values.append(int(lines[i]))\n # valueses = list()\n # for i in range(N):\n # valueses.append(list(map(int, lines[i+1].split())))\n\n values.sort()\n tmps = values[:K]\n result = sum(tmps)\n\n return [result]\n\n\n# 引数を取得\ndef get_input_lines(lines_count):\n lines = list()\n for _ in range(lines_count):\n lines.append(input())\n return lines\n\n\n# テストデータ\ndef get_testdata(pattern):\n if pattern == 1:\n lines_input = ['5 3', '50 100 80 120 80']\n lines_export = [210]\n if pattern == 2:\n lines_input = ['1 1', '1000']\n lines_export = [1000]\n return lines_input, lines_export\n\n\n# 動作モード判別\ndef get_mode():\n import sys\n args = sys.argv\n global FLAG_LOG\n if len(args) == 1:\n mode = 0\n FLAG_LOG = False\n else:\n mode = int(args[1])\n FLAG_LOG = True\n return mode\n\n\n# 主処理\ndef main():\n import time\n started = time.time()\n mode = get_mode()\n if mode == 0:\n lines_input = get_input_lines(2)\n else:\n lines_input, lines_export = get_testdata(mode)\n\n lines_result = calculation(lines_input)\n\n for line_result in lines_result:\n print(line_result)\n\n # if mode > 0:\n # print(f'lines_input=[{lines_input}]')\n # print(f'lines_export=[{lines_export}]')\n # print(f'lines_result=[{lines_result}]')\n # if lines_result == lines_export:\n # print('OK')\n # else:\n # print('NG')\n # finished = time.time()\n # duration = finished - started\n # print(f'duration=[{duration}]')\n\n\n# 起動処理\nif __name__ == '__main__':\n main()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02628/s600423209.py","file_name":"s600423209.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27775683513","text":"from art.tests.factories import ProductFactory\nfrom django.test import TestCase\nfrom account.models import User\nfrom shop.models import Client\n\n\nclass AddressTests(TestCase):\n def setUp(self):\n self.user = User.objects.create(\n username=\"c1\",\n first_name=\"name_c1\",\n last_name=\"sure_name_c1\",\n password=\"123456Mp\",\n email=\"c1@c1.c1\",\n )\n self.shop_client = Client.objects.create(user=self.user)\n ProductFactory().save()\n\n def test__should_return_add_address_form_if_method_is_get(self):\n response = self.client.get(\"/shop/add_address/\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.context[\"form\"])\n\n def test_should_add_address_to_desired_clinet_if_form_is_valid(self):\n self.client.force_login(user=self.user)\n session = self.client.session\n session[\"cart\"] = [{\"product_id\": 1, \"quantity\": 2}]\n session.save()\n address_data = {\n \"city\": \"city_4\",\n \"street\": \"street_4\",\n \"number\": \"44\",\n \"zip_code\": \"44-111\",\n }\n\n response = self.client.post(\"/shop/add_address/\", address_data)\n\n self.assertEqual(response.status_code, 200)\n","repo_name":"mario-pe/art_gallery","sub_path":"gallery/shop/tests/test_address.py","file_name":"test_address.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38206631885","text":"from torch.onnx import register_custom_op_symbolic\nfrom torch.onnx.symbolic_helper import parse_args, _get_tensor_dim_size, _get_tensor_sizes\nimport torch.onnx.symbolic_helper as sym_help\nimport torch\n\n\nclass CustomOpSymbolicRegistry:\n _SYMBOLICS = {}\n\n @classmethod\n def register(cls, name, domain, fn):\n cls._SYMBOLICS[domain + '::' + name] = fn\n\n @classmethod\n def register_all(cls):\n for name, fn in cls._SYMBOLICS.items():\n # Symbolic name is in format: domain::name\n register_custom_op_symbolic(name, fn, 1)\n\n\ndef register_symbolic(name, domain=''):\n def symbolic_wrapper(fn):\n CustomOpSymbolicRegistry.register(name, domain, fn)\n return fn\n return symbolic_wrapper\n\n\n@register_symbolic('cross_entropy_loss')\n@parse_args('v', 'v', 'v', 'i', 'v', 'v')\ndef cross_entropy_loss(g, self, target, weight, reduction, ignore_index, label_smoothing=0.0):\n label_smoothing = sym_help._maybe_get_const(label_smoothing, \"f\")\n if label_smoothing > 0.0:\n raise RuntimeError(\"Unsupported: ONNX does not support label_smoothing\")\n\n # reduction: 0->none, 1->mean, 2->sum\n reduction = sym_help._maybe_get_const(reduction, 'i')\n reduction_vals = ['none', 'mean', 'sum']\n reduction = reduction_vals[reduction]\n output, log_prob = g.op(\"com.microsoft::SoftmaxCrossEntropyLossInternal\",\n self, target, weight, ignore_index,\n reduction_s=reduction, outputs=2)\n output.setType(self.type())\n log_prob.setType(self.type())\n return output\n\n\n@register_symbolic('nll_loss')\n@parse_args('v', 'v', 'v', 'i', 'v')\ndef nll_loss(g, self, target, weight, reduction, ignore_index):\n # reduction: 0->none, 1->mean, 2->sum\n reduction = sym_help._maybe_get_const(reduction, 'i')\n reduction_vals = ['none', 'mean', 'sum']\n reduction = reduction_vals[reduction]\n output = g.op(\"com.microsoft::NegativeLogLikelihoodLossInternal\",\n self, target, weight, ignore_index, reduction_s=reduction)\n output.setType(self.type())\n return output\n\n\n@register_symbolic('embedding')\ndef embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):\n output = g.op(\"org.pytorch.aten::ATen\", weight, indices, padding_idx, scale_grad_by_freq, sparse,\n operator_s='aten::embedding')\n indices_shape = _get_tensor_sizes(indices)\n if indices_shape is not None and hasattr(weight.type(), 'with_sizes'):\n output_type = weight.type().with_sizes(\n indices_shape + [_get_tensor_dim_size(weight, 1)])\n output.setType(output_type)\n return output\n\n@register_symbolic('bitwise_or')\ndef bitwise_or(g, self, other):\n return g.op(\"org.pytorch.aten::ATen\", self, other,\n operator_s='aten::bitwise_or', overload_name_s='Tensor')\n\n@register_symbolic('diagonal')\ndef diagonal(g, self, offset, dim1, dim2):\n return g.op(\"org.pytorch.aten::ATen\", self, offset, dim1, dim2,\n operator_s='aten::diagonal')\n\n\n@register_symbolic('multinomial')\ndef multinomial(g, self, num_samples, replacement=False, generator=None):\n if generator is not None and not sym_help._is_none(generator):\n raise RuntimeError(\"Unsupported: ONNX does not support generator for multinomial\")\n return g.op(\"org.pytorch.aten::ATen\", self, num_samples, replacement, generator,\n operator_s='aten::multinomial')\n\n\n@register_symbolic('max_pool2d')\ndef max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode):\n stride_val = sym_help._maybe_get_const(stride, 'is')\n if not stride_val:\n stride = kernel_size\n return g.op(\"org.pytorch.aten::ATen\", self, kernel_size, stride, padding, dilation, ceil_mode,\n operator_s='aten::max_pool2d_with_indices', outputs=2)[0]\n\n\n@register_symbolic('unfold')\ndef unfold(g, input, dimension, size, step):\n return g.op(\"org.pytorch.aten::ATen\", input, dimension, size, step, operator_s='aten::unfold')\n\n\n@register_symbolic('argmax')\ndef argmax(g, input, dim, keepdim):\n return g.op(\"org.pytorch.aten::ATen\", input, dim, keepdim, operator_s='aten::argmax')\n\n\n@register_symbolic('avg_pool2d')\ndef avg_pool2d(g, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override):\n stride_val = sym_help._maybe_get_const(stride, 'is')\n if not stride_val:\n stride = kernel_size\n return g.op(\"org.pytorch.aten::ATen\", self, kernel_size, stride, padding, ceil_mode,\n count_include_pad, divisor_override, operator_s='aten::avg_pool2d')\n\n\n@register_symbolic('adaptive_avg_pool2d')\ndef adaptive_avg_pool2d(g, self, output_size):\n return g.op(\"org.pytorch.aten::ATen\", self, output_size, operator_s='aten::_adaptive_avg_pool2d')\n\n\n@register_symbolic('binary_cross_entropy_with_logits')\ndef binary_cross_entropy_with_logits(g, self, target, weight, pos_weight, reduction):\n # If weight is not None, we need to check if it requires grad and add gradient graph accordingly.\n # But current custom_gradient_registry doesn't support such None checking,\n # So doesn't support non-None weight for now.\n if weight is None or sym_help._is_none(weight):\n return g.op(\"org.pytorch.aten::ATen\", self, target, weight, pos_weight, reduction,\n operator_s='aten::binary_cross_entropy_with_logits')\n from torch.onnx.symbolic_opset12 import binary_cross_entropy_with_logits as bce\n return bce(g, self, target, weight, pos_weight, reduction)\n\n@register_symbolic('numpy_T')\ndef numpy_T(g, self):\n # Numpy-style `a.T`: returns the tensor\n # with dims reversed\n rank = sym_help._get_tensor_rank(self)\n if rank is not None:\n axes = list(reversed(range(rank)))\n return g.op(\"Transpose\", self, perm_i=axes)\n else:\n # if we don't have dim information we cannot\n # output a permute so use ATen instead\n return g.op(\"com.microsoft::ATenOp\", self, name_s='aten::numpy_T')\n\n# For torch.einsum.\ndef parse_equation(equation):\n pos_comma = equation.find(',')\n pos_arrow = equation.find('->')\n assert pos_comma != -1 and pos_arrow > pos_comma\n lhs_labels = [label for label in equation[:pos_comma] if label != ' ']\n rhs_labels = [label for label in equation[pos_comma + 1:pos_arrow] if label != ' ']\n result_labels = [label for label in equation[pos_arrow + 2:] if label != ' ']\n # Two operands and result are not empty, and are all alpha characters.\n assert lhs_labels and rhs_labels and result_labels\n assert all(label.isalpha() for label in lhs_labels + rhs_labels + result_labels)\n # Output has no repeated label, each label must be in at least one operand.\n assert len(result_labels) == len(set(result_labels))\n assert all(label in lhs_labels or label in rhs_labels for label in result_labels)\n return lhs_labels, rhs_labels, result_labels\n\ndef need_permute(perm):\n return any(idx != axis for idx, axis in enumerate(perm))\n\ndef map_labels_to_output(input_labels, label_perm_map):\n output_len = len(label_perm_map)\n perm = [-1] * output_len\n unsqueeze_axes = []\n idx = 0\n for label in input_labels:\n # Lookup output index for label.\n perm[label_perm_map[label]] = idx\n idx += 1\n\n # Add dimensions for missing labels.\n for i in range(output_len):\n if perm[i] == -1:\n unsqueeze_axes.append(idx)\n perm[i] = idx\n idx += 1\n\n return perm, unsqueeze_axes\n\ndef unsqueeze_and_permute_for_mul(g, tensor, unsqueeze_axes, perm):\n # If perm is sorted after removing unsqueeze axes, then permute is not needed.\n # For example, a.unsqueeze(2).permute([0, 2, 1]) is same as a.unsqueeze(1).\n if unsqueeze_axes:\n new_perm = [v for v in perm if v not in unsqueeze_axes]\n sorted = all(new_perm[i] < new_perm[i + 1] for i in range(len(new_perm) - 1))\n if sorted:\n return sym_help._unsqueeze_helper(g, tensor, [perm.index(axis) for axis in unsqueeze_axes])\n\n if len(unsqueeze_axes) > 0:\n tensor = sym_help._unsqueeze_helper(g, tensor, unsqueeze_axes)\n if need_permute(perm):\n tensor = g.op(\"Transpose\", tensor, perm_i=perm)\n return tensor\n\ndef combine_unsqueeze_and_permute_for_matmul(unsqueeze_axes, perm1, perm2):\n # When going here, the unsqueeze axes must be some axes at the end.\n # We can combine two permutes and remove unsqueeze axes, because we will reshape it after this.\n # For example, a.unsqueeze([2,3]).permute([2,3,1,0]).permute([0,1,3,2])\n # = a.unsqueeze([2,3]).permute([2,3,0,1]) = a.permute([0,1]) = a.\n new_perm = [perm1[axis] for axis in perm2]\n new_perm = [axis for axis in new_perm if axis not in unsqueeze_axes]\n return new_perm\n\ndef is_axes_contiguous(axes):\n return len(axes) < 2 or all(axes[axis] + 1 == axes[axis + 1] for axis in range(len(axes) - 1))\n\ndef get_shape_tensor_by_axes(g, input, input_shape, axes, need_numel_shape):\n if input_shape is None:\n input_shape = g.op(\"Shape\", input)\n shape_tensor = g.op(\"Gather\", input_shape, g.op(\"Constant\", value_t=torch.tensor(axes, dtype=torch.int64)), axis_i=0)\n numel_shape_tensor = None\n if need_numel_shape:\n assert len(axes) > 1\n numel_shape_tensor = g.op(\"ReduceProd\", shape_tensor)\n return shape_tensor, numel_shape_tensor, input_shape\n\ndef reshape_tensor(g, input, shape_tensors):\n shape_tensor = g.op(\"Concat\", *shape_tensors, axis_i=0) if len(shape_tensors) > 1 else shape_tensors[0]\n return g.op(\"Reshape\", input, shape_tensor)\n\ndef permute_and_reshape_tensor(g, tensor, is_lhs, rank, perm, matmul_output_axes, contraction_axes,\n batch_length, matmul_output_numel_tensor, contraction_numel_tensor, shape_tensor):\n # If matmul_output_axes and contraction_axes are contiguous in input tensor,\n # we can move Reshape to before Transpose, so it's possible that the Transpoase is fused to MatMul.\n # Otherwise, we have to Transpose first to move those axes together and then Reshape.\n is_matmul_output_axes_contiguous = is_axes_contiguous(matmul_output_axes)\n is_contraction_axes_contiguous = is_axes_contiguous(contraction_axes)\n if is_matmul_output_axes_contiguous and is_contraction_axes_contiguous:\n # Combine contiguous axes to one axis.\n first_matmul_output_axis = matmul_output_axes[0] if len(matmul_output_axes) > 1 else -1\n first_contraction_axis = contraction_axes[0] if len(contraction_axes) > 1 else -1\n # If length of matmul_output_axes and contraction_axes are less than 2, no need to Reshape,\n # it needs an Unsqueeze and a Transpose if needed.\n if first_matmul_output_axis == -1 and first_contraction_axis == -1:\n assert not matmul_output_axes and len(contraction_axes) == 1\n if need_permute(perm):\n new_tensor = sym_help._unsqueeze_helper(g, tensor, [-1])\n pos = batch_length if is_lhs else len(perm)\n perm = perm[:pos] + [len(perm)] + perm[pos:]\n new_tensor = g.op(\"Transpose\", new_tensor, perm_i=perm)\n else:\n new_tensor = sym_help._unsqueeze_helper(g, tensor, [batch_length if is_lhs else -1])\n else:\n axes_to_remove = contraction_axes[1:] # contraction_axes can't be empty.\n if len(matmul_output_axes) > 1:\n axes_to_remove = axes_to_remove + matmul_output_axes[1:]\n remaining_axes = [axis for axis in range(rank) if axis not in axes_to_remove]\n # Calculate the new shape, use 0 or -1 if possible.\n shape_tensors = []\n all_zeros = True\n for axis in remaining_axes:\n if axis == first_matmul_output_axis:\n shape_tensors.append(matmul_output_numel_tensor)\n all_zeros = False\n elif axis == first_contraction_axis:\n shape_tensors.append(contraction_numel_tensor)\n all_zeros = False\n elif all_zeros:\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.int64)))\n elif axis == remaining_axes[-1]:\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([-1], dtype=torch.int64)))\n else:\n single_axis_shape_tensor, _, shape_tensor = get_shape_tensor_by_axes(\n g, tensor, shape_tensor, [axis], False)\n shape_tensors.append(single_axis_shape_tensor)\n # Adjust the perm.\n perm = [axis for axis in perm if axis not in axes_to_remove]\n new_axis = 0\n for axis in remaining_axes:\n perm[perm.index(axis)] = new_axis\n new_axis += 1\n # If matmul_output_axes is empty, need to add a dim-1 axis.\n if not matmul_output_axes:\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([1], dtype=torch.int64)))\n pos = batch_length if is_lhs else len(perm)\n perm = perm[:pos] + [new_axis] + perm[pos:]\n new_tensor = reshape_tensor(g, tensor, shape_tensors)\n if need_permute(perm):\n new_tensor = g.op(\"Transpose\", new_tensor, perm_i=perm)\n else:\n if need_permute(perm):\n new_tensor = g.op(\"Transpose\", tensor, perm_i=perm)\n # Calculate the new shape, use 0 or -1 if possible.\n shape_tensors = [g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.int64))] * batch_length\n if is_lhs:\n if matmul_output_numel_tensor is None:\n matmul_output_numel_tensor = g.op(\"Constant\", value_t=torch.tensor([1 - len(matmul_output_axes)], dtype=torch.int64))\n shape_tensors.append(matmul_output_numel_tensor)\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([-1], dtype=torch.int64)))\n else:\n if contraction_numel_tensor is None: # contraction_axes can't be empty, None here means only one contraction axis.\n contraction_numel_tensor = g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.int64))\n shape_tensors.append(contraction_numel_tensor)\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([-1], dtype=torch.int64)))\n new_tensor = reshape_tensor(g, new_tensor, shape_tensors)\n return new_tensor, shape_tensor\n\n@register_symbolic('einsum')\n@parse_args('s', 'v')\ndef einsum(g, equation, tensor_list):\n tensors = sym_help._unpack_list(tensor_list)\n num_ops = len(tensors)\n assert num_ops > 0\n\n # Doesn't support implicit output is ellipsis or more than 2 oprands for now.\n # Doesn't support ellipsis ('...') for now as not easy to get sizes of oprands.\n if num_ops != 2 or equation.find('->') == -1 or '.' in equation:\n return g.op(\"Einsum\", *tensors, equation_s=equation)\n\n # Take \"ks,ksm->sm\" as example. After prcoess inputs,\n # lhs_labels = [k,s], rhs_labels = [k,s,m], result_labels = [s,m].\n lhs_labels, rhs_labels, result_labels = parse_equation(equation)\n\n # Doesn't support repeated label in operand for now as it needs to take extra diagonal.\n if len(lhs_labels) != len(set(lhs_labels)) or len(rhs_labels) != len(set(rhs_labels)):\n return g.op(\"Einsum\", *tensors, equation_s=equation)\n\n # Add contraction labels (labels not present in output).\n # After process contraction labels, contraction_labels = [k],\n # label_perm_map = {(s, 0), (m, 1), (k, 2)}, out_size = 2, perm_size = 3.\n out_size = len(result_labels)\n label_perm_map = dict([(label, idx) for idx, label in enumerate(result_labels)])\n perm_size = out_size\n contraction_labels = []\n lhs_reduce_sum_axes = []\n rhs_reduce_sum_axes = []\n for label in lhs_labels + rhs_labels:\n if label not in label_perm_map:\n if label in lhs_labels and label in rhs_labels:\n label_perm_map[label] = perm_size\n contraction_labels.append(label)\n perm_size += 1\n elif label in lhs_labels:\n lhs_reduce_sum_axes.append(lhs_labels.index(label))\n else:\n rhs_reduce_sum_axes.append(rhs_labels.index(label))\n\n lhs_tensor = tensors[0]\n rhs_tensor = tensors[1]\n\n # If lhs_reduce_sum_axes/rhs_reduce_sum_axes is not empty, ReduceSum on that axes, update lhs_labels/rhs_labels,\n # and use the output as original_lhs_tensor/original_rhs_tensor.\n if lhs_reduce_sum_axes:\n lhs_tensor = sym_help._reducesum_helper(g, lhs_tensor, lhs_reduce_sum_axes, keepdims_i=False)\n lhs_labels = [lhs_labels[axis] for axis in range(len(lhs_labels)) if axis not in lhs_reduce_sum_axes]\n\n if rhs_reduce_sum_axes:\n rhs_tensor = sym_help._reducesum_helper(g, rhs_tensor, rhs_reduce_sum_axes, keepdims_i=False)\n rhs_labels = [rhs_labels[axis] for axis in range(len(rhs_labels)) if axis not in rhs_reduce_sum_axes]\n\n # Need to unsqueeze and permute the inputs to order of output with contraction labels.\n # lhs_perm = [1,2,0], lhs_unsqueeze_axes = [2].\n # rhs_perm = [1,2,0], rhs_unsqueeze_axes = [].\n lhs_perm, lhs_unsqueeze_axes = map_labels_to_output(lhs_labels, label_perm_map)\n rhs_perm, rhs_unsqueeze_axes = map_labels_to_output(rhs_labels, label_perm_map)\n\n # If there is no contraction labels, unsqueeze and permute the inputs and Mul them to get final result.\n if not contraction_labels:\n lhs_tensor = unsqueeze_and_permute_for_mul(g, lhs_tensor, lhs_unsqueeze_axes, lhs_perm)\n rhs_tensor = unsqueeze_and_permute_for_mul(g, rhs_tensor, rhs_unsqueeze_axes, rhs_perm)\n return g.op(\"Mul\", lhs_tensor, rhs_tensor)\n\n # If contraction_labels is not empty, need a BatchedMatMul.\n # Batched labels are those in all inputs and output. Below axes are based on output.\n # batched_labels = [s], batched_axes = [0] for the example.\n # Matmul output labels are those in one of inputs and output.\n # matmul_output_labels = [m], matmul_output_axes = [1] for the example.\n # contraction_labels = [k], contraction_axes = [2] for the example.\n batched_axes = []\n matmul_output_axes = []\n contraction_axes = [axis for axis in range(out_size, perm_size)]\n for axis in range(out_size):\n label = result_labels[axis]\n if label in lhs_labels and label in rhs_labels:\n batched_axes.append(axis)\n else:\n matmul_output_axes.append(axis)\n\n # Based on above unsqueeze and permute on inputs, need to permute again.\n # For lhs input, the new permute is batched_axes + matmul_output_axes + contraction_axes: [0, 1, 2],\n # i.e., a.unsqueeze([2]).permute([1,2,0]).permute([0,1,2]) = [s,1,k] for the example.\n # For rhs input, the new permute is batched_axes + contraction_axes + matmul_output_axes: [0, 2, 1].\n # i.e., b.unsqueeze([]).permute([1,2,0]).permute([0,2,1]) = [s,k,m] for the example.\n lhs_perm = combine_unsqueeze_and_permute_for_matmul(lhs_unsqueeze_axes, lhs_perm, batched_axes + matmul_output_axes + contraction_axes)\n rhs_perm = combine_unsqueeze_and_permute_for_matmul(rhs_unsqueeze_axes, rhs_perm, batched_axes + contraction_axes + matmul_output_axes)\n\n # Need to Reshape two input tensors before the BatchedMatMul and Reshape result to output shape.\n # Reshape lhs input to [[batched_shapes], Mul(lhs_matmul_output_shapes), Mul(contraction_shapes)].\n # Reshape rhs input to [[batched_shapes], Mul(contraction_shapes), Mul(rhs_matmul_output_shapes)]\n # Convert all axes based on inputs.\n # lhs_contraction_axes = [0], rhs_contraction_axes = [0], lhs_matmul_output_axes = [], rhs_matmul_output_axes = [2] for the example.\n lhs_contraction_axes = [lhs_labels.index(label) for label in contraction_labels]\n rhs_contraction_axes = [rhs_labels.index(label) for label in contraction_labels]\n lhs_matmul_output_axes = [lhs_labels.index(result_labels[axis]) for axis in matmul_output_axes if result_labels[axis] in lhs_labels]\n rhs_matmul_output_axes = [rhs_labels.index(result_labels[axis]) for axis in matmul_output_axes if result_labels[axis] in rhs_labels]\n\n # Caches of input shape tensors to avoid generating duplicated graph.\n lhs_shape_tensor = None\n rhs_shape_tensor = None\n\n # contraction_numel_tensor should be tensor([size(k)]) for the example, but since length is 1, it's None here.\n contraction_numel_tensor = None\n if len(lhs_contraction_axes) > 1:\n _, contraction_numel_tensor, lhs_shape_tensor = get_shape_tensor_by_axes(\n g, lhs_tensor, lhs_shape_tensor, lhs_contraction_axes, True)\n\n # Prepare some shape tensors for Reshape if needed.\n # Both lhs_matmul_output_shape_tensor and lhs_matmul_output_numel_tensor is None for the example.\n lhs_matmul_output_shape_tensor = None\n lhs_matmul_output_numel_tensor = None\n if len(lhs_matmul_output_axes) > 1:\n lhs_matmul_output_shape_tensor, lhs_matmul_output_numel_tensor, lhs_shape_tensor = get_shape_tensor_by_axes(\n g, lhs_tensor, lhs_shape_tensor, lhs_matmul_output_axes, True)\n\n # Both rhs_matmul_output_shape_tensor and rhs_matmul_output_numel_tensor is None for the example.\n rhs_matmul_output_shape_tensor = None\n rhs_matmul_output_numel_tensor = None\n if len(rhs_matmul_output_axes) > 1:\n rhs_matmul_output_shape_tensor, rhs_matmul_output_numel_tensor, rhs_shape_tensor = get_shape_tensor_by_axes(\n g, rhs_tensor, rhs_shape_tensor, rhs_matmul_output_axes, True)\n\n new_lhs_tensor = lhs_tensor\n # Need to Reshape lhs_tensor if lhs_matmul_output_axes or lhs_contraction_axes is not 1, otherwise permute it directly.\n # Need to Reshape the lhs_tensor for the example, the new shape is [size(s), 1, size(k)].\n if len(lhs_matmul_output_axes) != 1 or len(lhs_contraction_axes) != 1:\n new_lhs_tensor, lhs_shape_tensor = permute_and_reshape_tensor(\n g, lhs_tensor, True, len(lhs_labels), lhs_perm, lhs_matmul_output_axes, lhs_contraction_axes,\n len(batched_axes), lhs_matmul_output_numel_tensor, contraction_numel_tensor, lhs_shape_tensor)\n else:\n if need_permute(lhs_perm):\n new_lhs_tensor = g.op(\"Transpose\", lhs_tensor, perm_i=lhs_perm)\n\n # Need to Reshape rhs_tensor if rhs_matmul_output_axes or rhs_contraction_axes is not 1, otherwise permute it directly.\n # rhs_tensor's new shape should be [size(s), size(k), size(m)], but doesn't need to Reshape for the example.\n new_rhs_tensor = rhs_tensor\n if len(rhs_matmul_output_axes) != 1 or len(rhs_contraction_axes) != 1:\n new_rhs_tensor, rhs_shape_tensor = permute_and_reshape_tensor(\n g, rhs_tensor, False, len(rhs_labels), rhs_perm, rhs_matmul_output_axes, rhs_contraction_axes,\n len(batched_axes), rhs_matmul_output_numel_tensor, contraction_numel_tensor, rhs_shape_tensor)\n else:\n if need_permute(rhs_perm):\n new_rhs_tensor = g.op(\"Transpose\", rhs_tensor, perm_i=rhs_perm)\n\n # Perform final BatchedMatMul. Output is shape [size(s), 1, size(m)] for the example.\n result = g.op(\"MatMul\", new_lhs_tensor, new_rhs_tensor)\n\n # Need to Reshape the result if lhs_matmul_output_axes or rhs_matmul_output_axes is not 1.\n # Need to Reshape the result for the example, the new shape is [size(s), size(m)].\n if len(lhs_matmul_output_axes) != 1 or len(rhs_matmul_output_axes) != 1:\n shape_tensors = [g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.int64))] * len(batched_axes)\n if lhs_matmul_output_axes:\n if len(lhs_matmul_output_axes) == 1:\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.int64)))\n else:\n shape_tensors.append(lhs_matmul_output_shape_tensor)\n if rhs_matmul_output_axes:\n if len(rhs_matmul_output_axes) == 1:\n shape_tensors.append(g.op(\"Constant\", value_t=torch.tensor([-1], dtype=torch.int64)))\n else:\n shape_tensors.append(rhs_matmul_output_shape_tensor)\n result = reshape_tensor(g, result, shape_tensors)\n\n # Now output axes is ordered by [batched_axes, lhs_matmul_output_axes, rhs_matmut_output_axes],\n # if this is not same as output, need one permute.\n labels = [result_labels[axis] for axis in batched_axes] + [\n lhs_labels[axis] for axis in lhs_matmul_output_axes] + [rhs_labels[axis] for axis in rhs_matmul_output_axes]\n assert len(labels) == out_size\n output_perm = [labels.index(label) for label in result_labels]\n assert all(axis in output_perm for axis in range(out_size))\n if need_permute(output_perm):\n result = g.op(\"Transpose\", result, perm_i=output_perm)\n\n return result\n# End of torch.einsum.\n","repo_name":"fengbingchun/PyTorch_Test","sub_path":"src/onnxruntime/orttraining/orttraining/python/training/ortmodule/_custom_op_symbolic_registry.py","file_name":"_custom_op_symbolic_registry.py","file_ext":"py","file_size_in_byte":24815,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"90"} +{"seq_id":"18464436901","text":"from random import random\ndef posEvalEndgameVariation(obs):\n '''\n input: obs: the board\n n: end game start at n empty spaces left \n '''\n valueMap = [[100, -20, 10, 5, 5, 10, -20, 100],\n [-20, -50, -2, -2, -2, -2, -50, -20],\n [10, -2, -1, -1, -1, -1, -2, 10],\n [5, -2, -1, -1, -1, -1, -2, 5],\n [5, -2, -1, -1, -1, -1, -2, 5],\n [10, -2, -1, -1, -1, -1, -2, 10],\n [-20, -50, -2, -2, -2, -2, -50, -20],\n [100, -20, 10, 5, 5, 10, -20, 100]]\n s = 0\n\n # if near the end of the game -> optimize stone amount\n simple_s = 0 # let all squares have same value -> just get stone amount\n empty = 0\n for i in range(8):\n for j in range(8):\n \n # Static score\n s += valueMap[i][j] * obs[i*8+j]\n # Endgame stone count\n simple_s += obs[i*8+j]\n # empty spaces\n if obs[i*8+j] == 0:\n empty += 1\n\n if empty <= 5:\n return simple_s * 16\n\n return s+random()\n\ndef posEval(obs):\n '''\n A simple sum of value for every tile.\n Should be good enough for testing.\n Took from: \n Reinforcement Learning and its Application to Othello.\n by Nees Jan van Eck, Michiel van Wezel\n '''\n valueMap = [[100, -20, 10, 5, 5, 10, -20, 100],\n [-20, -50, -2, -2, -2, -2, -50, -20],\n [10, -2, -1, -1, -1, -1, -2, 10],\n [5, -2, -1, -1, -1, -1, -2, 5],\n [5, -2, -1, -1, -1, -1, -2, 5],\n [10, -2, -1, -1, -1, -1, -2, 10],\n [-20, -50, -2, -2, -2, -2, -50, -20],\n [100, -20, 10, 5, 5, 10, -20, 100]]\n s = 0\n for i in range(8):\n for j in range(8): \n s += valueMap[i][j] * obs[i*8+j]\n return s+random()\n\ndef enhancedPosEval(obs, color):\n valueMap = [[500, -86, 96, 26, 26, 96, -86, 500],\n [-86, -1219, -6, 0, 0, -6, -1219, -86],\n [96, -6, 52, 15, 15, 52, -6, 96],\n [26, 0, 15, -17, -17, 15, 0, 26],\n [26, 0, 15, -17, -17, 15, 0, 26],\n [96, -6, 52, 15, 15, 52, -6, 96],\n [-86, -1219, -6, 0, 0, -6, -1219, -86],\n [500, -86, 96, 26, 26, 96, -86, 500]]\n s = 0\n liberty = 0\n # change tiles around corner to 0 if corner is taken\n direct = [(1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]\n if obs[0]:\n valueMap[0][1] = 0\n valueMap[1][1] = 0\n valueMap[1][0] = 0\n if obs[56]:\n valueMap[7][1] = 0\n valueMap[6][1] = 0\n valueMap[6][0] = 0\n if obs[7]:\n valueMap[0][6] = 0\n valueMap[1][6] = 0\n valueMap[1][7] = 0\n if obs[63]:\n valueMap[7][6] = 0\n valueMap[6][6] = 0\n valueMap[6][7] = 0\n\n # print( valueMap ) \n\n for i in range(8):\n for j in range(8):\n # static score\n s += valueMap[i][j] * obs[i*8+j]\n # liberty: difference empty space adjacent of two colors\n if obs[i*8+j] == 0:\n for d in direct:\n if 0<=i+d[0]<8 and 0<=j+d[1]<8:\n liberty += obs[ (i+d[0])*8+j+d[1] ]\n \n # TODO: edge/corner bonus\n bonusRecord = [0 for _ in range(64)]\n if obs[0] == color:\n for i in range(8): # right\n if obs[i*8] == color:\n bonusRecord[i*8] = 1\n else: break\n for i in range(8): # down\n if obs[i] == color:\n bonusRecord[i] = 1\n else: break\n if obs[7] == color:\n for i in range(8): # right\n if obs[i*8+7] == color:\n bonusRecord[i*8+7] = 1\n else: break\n for i in range(8): # up\n if obs[7-i] == color:\n bonusRecord[7-i] = 1\n else: break\n if obs[56] == color:\n for i in range(8): # left\n if obs[(7-i)*8] == color:\n bonusRecord[(7-i)*8] = 1\n else: break\n for i in range(8): # down\n if obs[56+i] == color:\n bonusRecord[56+i] = 1\n else: break\n if obs[63] == color:\n for i in range(8): # left\n if obs[(7-i)*8+7] == color:\n bonusRecord[(7-i)*8+7] = 1\n else: break\n for i in range(8): # up\n if obs[7*8+7-i] == color:\n bonusRecord[7*8+7-i] = 1\n else: break\n\n # print(bonusRecord)\n # print(liberty)\n\n return s + sum(bonusRecord)*26*color - liberty * 104 * color\n\nif __name__ == \"__main__\":\n board = [ 0, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 1,-1, 0, 0,-1,\n 0, 0, 0,-1, 1, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0]\n \n print(enhancedPosEval(board, 1))","repo_name":"GWFrank/ItC-Reversi-Agent","sub_path":"agent/GWFrank_func/eval_funcs.py","file_name":"eval_funcs.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72211369578","text":"# 누적 합\r\n# 각 층의 종유석과 석순의 개수를 누적합을 이용하면 효율적으로 계산할 수 있다.\r\n# 종유석의 경우 거꾸로 매달려있어 인덱싱에 주의를 요해야하는데\r\n# 종유석의 길이가 i라면, 종유석의 끝이 가리키는 층은 h - i + 1이 된다.\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn, h = map(int, input().split())\r\n\r\ndown = [0] * (h + 1)\r\nup = [0] * (h + 1)\r\nfor i in range(n):\r\n a = int(input())\r\n if i % 2 == 0:\r\n down[a] += 1\r\n else:\r\n # 종유석의 끝이 가리키는 층은 h - i + 1\r\n up[h - a + 1] += 1\r\n\r\n# 누적 합, O(h)\r\n# 석순은 5 -> 1, 종유석은 1 -> 5 순으로 누적 합\r\nfor i in range(2, h+1):\r\n # 석순은 5 -> 1\r\n down[h - i + 1] = down[h - i + 2] + down[h - i + 1]\r\n # 종유석은 1 -> 5\r\n up[i] = up[i-1] + up[i]\r\n\r\n# 각 층끼리 더해준다. O(h)\r\ndestroy = []\r\nfor i in range(1, h+1):\r\n destroy.append(down[i] + up[i])\r\n\r\n# 최솟값과 최솟값의 개수를 센다. O(h + h)\r\ndestroy_min = min(destroy)\r\nprint(destroy_min, destroy.count(destroy_min))\r\n\r\n\"\"\"\r\n문제\r\n개똥벌레 한 마리가 장애물(석순과 종유석)로 가득찬 동굴에 들어갔다. 동굴의 길이는 N미터이고, 높이는 H미터이다. \r\n(N은 짝수) 첫 번째 장애물은 항상 석순이고, 그 다음에는 종유석과 석순이 번갈아가면서 등장한다.\r\n\r\n아래 그림은 길이가 14미터이고 높이가 5미터인 동굴이다. (예제 그림)\r\n\r\n\r\n\r\n이 개똥벌레는 장애물을 피하지 않는다. 자신이 지나갈 구간을 정한 다음 일직선으로 지나가면서 만나는 모든 장애물을 파괴한다.\r\n\r\n위의 그림에서 4번째 구간으로 개똥벌레가 날아간다면 파괴해야하는 장애물의 수는 총 여덟개이다. \r\n(4번째 구간은 길이가 3인 석순과 길이가 4인 석순의 중간지점을 말한다)\r\n\r\n하지만, 첫 번째 구간이나 다섯 번째 구간으로 날아간다면 개똥벌레는 장애물 일곱개만 파괴하면 된다.\r\n\r\n동굴의 크기와 높이, 모든 장애물의 크기가 주어진다. \r\n이때, 개똥벌레가 파괴해야하는 장애물의 최솟값과 그러한 구간이 총 몇 개 있는지 구하는 프로그램을 작성하시오.\r\n\r\n입력\r\n첫째 줄에 N과 H가 주어진다. N은 항상 짝수이다. (2 ≤ N ≤ 200,000, 2 ≤ H ≤ 500,000)\r\n\r\n다음 N개 줄에는 장애물의 크기가 순서대로 주어진다. 장애물의 크기는 H보다 작은 양수이다.\r\n\r\n출력\r\n첫째 줄에 개똥벌레가 파괴해야 하는 장애물의 최솟값과 그러한 구간의 수를 공백으로 구분하여 출력한다.\r\n\r\n예제 입력 1 \r\n14 5\r\n1\r\n3\r\n4\r\n2\r\n2\r\n4\r\n3\r\n4\r\n3\r\n3\r\n3\r\n2\r\n3\r\n3\r\n예제 출력 1 \r\n7 2\r\n예제 입력 2\r\n14 7\r\n1\r\n4\r\n5\r\n3\r\n3\r\n5\r\n4\r\n5\r\n4\r\n4\r\n4\r\n3\r\n4\r\n4\r\n예제 출력 2\r\n6 1\r\n\"\"\"\r\n","repo_name":"khyup0629/Algorithm","sub_path":"누적 합(Prefix sum)/개똥벌레.py","file_name":"개똥벌레.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"21308315581","text":"import config\nimport discord\nimport utils\nfrom discord.ext import commands\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Baby(bot))\n\n\nclass Baby(commands.Cog, name=\"Baby Shark\"):\n def __init__(self, bot):\n self.bot = bot\n self.doos = 6\n self.family = [\n 'Baby',\n 'Mommy',\n 'Daddy',\n 'Grandma',\n 'Grandpa',\n ]\n self.repetitions = 3\n \n @commands.command(brief=\"Sings the baby shark song\")\n async def baby(self, ctx: commands.Context, *args):\n name = ' '.join(args) or 'Violet'\n sections = []\n for role in self.family:\n section = []\n line = f'{role} {name}, ' + ', '.join(['doo'] * self.doos)\n for _ in range(self.repetitions):\n section.append(line)\n\n section.append(f'{role} {name}!')\n sections.append('\\n'.join(section))\n\n await ctx.send('\\n\\n'.join(sections))\n\n","repo_name":"qmatias/hyli-bot","sub_path":"src/cogs/baby.py","file_name":"baby.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"18569336139","text":"import sys\n\nN = int(sys.stdin.readline())\n\nl = 0\nr = N - 1\n\np = 0\nprint(p, flush=True)\nfirst = sys.stdin.readline().strip()\nif first == \"Vacant\":\n sys.exit()\nif first == \"Male\":\n second = \"Female\"\nelse:\n second = \"Male\"\n\nwhile True:\n p = (l + r) // 2\n print(p, flush=True)\n sex = sys.stdin.readline().strip()\n if sex == \"Vacant\":\n # print(\"end!\")\n sys.exit()\n if p % 2 == 0:\n # pまでの間に空席がない場合は性別が同じ\n if sex == first:\n l = p + 1\n # ずれているので、そこまでに空席がある\n else:\n r = p\n else:\n if sex == second:\n l = p + 1\n else:\n r = p","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03439/s880307854.py","file_name":"s880307854.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"19362778384","text":"\"\"\"\n\nSphinx Autodoc coverage checker.\n================================\n\nThis builder extension makes sure all modules in the documented\npackage is represented in the autodoc API reference.\n\nUsage\n-----\n\n.. code-block:: console\n\n $ sphinx-build -b apicheck -d _build/doctrees . _build/apicheck\n\nConfiguration\n-------------\n\napicheck_ignore_modules\n~~~~~~~~~~~~~~~~~~~~~~~\n\nList of modules to ignore, either as module names or regexes.\n\nExample:\n\n.. code-block:: python\n\n apicheck_ignore_modules = [\n 'django.utils.functional',\n r'django.db.*',\n ]\n\nTest packages are ignored by default, even if this setting is defined.\n\napicheck_package\n~~~~~~~~~~~~~~~~\n\nThe package to verify, can be the fully-qualified name of a module\nor an actual module.\n\nExample:\n\n.. code-block:: python\n\n apicheck_package = 'django'\n\nDefault is the value of the ``project`` configuration key in all lowercase.\n\n\napicheck_domains\n~~~~~~~~~~~~~~~~\n\nList of domains to check.\n\nDefault is ``['py']`` and Python is the only domain currently supported.\n\n\"\"\"\n\nimport importlib\nimport os\nfrom collections import defaultdict\n\nimport sphinx\nfrom sphinx.ext import autodoc\nfrom sphinx.util.console import bold, darkgreen, green, red\n\nfrom .builders import BaseBuilder\nfrom .utils import bytes_if_py2\n\nDEFAULT_IGNORE = [r'.*?\\.tests.*']\n\nTITLEHEADER = '='\nSUBHEADER = '-'\n\nERR = 'ERROR'\nERR_MISSING = '{error}: In index but module does not exist: {module}'\nERR_UNDOCUMENTED = 'Undocumented Autodoc Modules'\nOK_STATUS = 'OK: All modules documented :o)'\n\nNOK_STATUS = \"\"\"\n{title}\n\n{undocumented}\\\n\"\"\"\n\nDOMAIN_FORMAT = \"\"\"\\\n{domain}\n\n{modules}\n\"\"\"\n\nMODULE_FORMAT = '- {module}'\n\n\nclass ModuleDocumenter(autodoc.ModuleDocumenter):\n missing_modules = set()\n\n def import_object(self):\n if not super().import_object():\n self.missing_modules.add(self.modname)\n return False\n return True\n\n\ndef title(s, spacing=2, sep=TITLEHEADER):\n return '\\n'.join([\n sep * (len(s) + spacing),\n '{0}{1}{0}'.format(' ' * (spacing // 2), red(s)),\n sep * (len(s) + spacing),\n ])\n\n\ndef header(s, sep=SUBHEADER):\n return '\\n'.join([bold(s), sep * len(s)])\n\n\ndef find_python_modules(package):\n if isinstance(package, str):\n package = importlib.import_module(package)\n name, path = package.__name__, package.__file__\n current_dist_depth = len(name.split('.')) - 1\n current_dist = os.path.join(os.path.dirname(path),\n *([os.pardir] * current_dist_depth))\n abs = os.path.abspath(current_dist)\n dist_name = os.path.basename(abs)\n\n for dirpath, _, filenames in os.walk(abs):\n package = (dist_name + dirpath[len(abs):]).replace('/', '.')\n if '__init__.py' in filenames:\n yield package\n for filename in filenames:\n if filename.endswith('.py') and filename != '__init__.py':\n yield '.'.join([package, filename])[:-3]\n\n\nclass APICheckBuilder(BaseBuilder):\n\n name = 'apicheck'\n pickle_filename = 'apicheck.pickle'\n\n find_modules = {\n 'py': find_python_modules,\n }\n\n def init(self):\n self.ignore_patterns = self.compile_regexes(\n self.config.apicheck_ignore_modules + DEFAULT_IGNORE,\n )\n self.check_domains = self.config.apicheck_domains\n self.check_package = (\n self.config.apicheck_package or self.config.project.lower())\n\n self.undocumented = defaultdict(list)\n self.all_modules = defaultdict(set)\n\n def is_ignored_module(self, module):\n return any(regex.match(module) for regex in self.ignore_patterns)\n\n def write(self, *ignored):\n for domain in self.check_domains:\n self.build_coverage(domain)\n self.check_missing()\n if not self.app.statuscode:\n self.write_coverage(self.check_domains)\n\n def build_coverage(self, domain):\n self.all_modules[domain].update(self.find_modules[domain](\n self.check_package,\n ))\n self.undocumented[domain].extend(self.find_undocumented(\n domain, self.env.domaindata[domain]['modules'],\n ))\n\n def find_undocumented(self, domain, documented):\n return (\n mod for mod in self.all_modules[domain]\n if mod not in documented and not self.is_ignored_module(mod)\n )\n\n def write_coverage(self, domains):\n status = any(self.undocumented.values())\n if status:\n self.app.statuscode = 2\n print(self.format_undocumented_domains(domains))\n else:\n print(green(OK_STATUS))\n\n def check_missing(self):\n for mod in ModuleDocumenter.missing_modules:\n self.app.statuscode = 3\n print(ERR_MISSING.format(\n error=red(ERR),\n module=bold(mod),\n ))\n\n def format_undocumented_domains(self, domains):\n return NOK_STATUS.format(\n title=title(ERR_UNDOCUMENTED),\n undocumented='\\n'.join(\n self.format_undocumented_domain(domain) for domain in domains\n ),\n )\n\n def format_undocumented_domain(self, domain):\n return DOMAIN_FORMAT.format(domain=header(domain), modules='\\n'.join(\n self.format_undocumented_module(module)\n for module in self.undocumented[domain]\n ))\n\n def format_undocumented_module(self, module):\n return MODULE_FORMAT.format(module=darkgreen(module))\n\n def as_dict(self):\n return {\n 'undocumented': dict(self.undocumented),\n }\n\n\ndef _add_documenter_override(app, cls):\n # Install documenter for automodule without generating warning.\n from sphinx.ext.autodoc.directive import AutodocDirective\n app.registry.add_documenter(cls.objtype, cls)\n directive_name = 'auto' + cls.objtype\n if sphinx.version_info < (1, 8):\n try:\n from docutils.parsers.rst import directives\n except ImportError:\n pass\n else:\n directives._directives.pop(directive_name, None)\n app.add_directive(directive_name, AutodocDirective)\n else:\n # override was added in Sphinx 1.8\n app.add_directive(directive_name, AutodocDirective, override=True)\n\n\ndef setup(app):\n app.add_builder(APICheckBuilder)\n app.add_config_value(\n bytes_if_py2('apicheck_ignore_modules'), [], False)\n app.add_config_value(\n bytes_if_py2('apicheck_domains'), ['py'], False)\n app.add_config_value(\n bytes_if_py2('apicheck_package'), None, False)\n _add_documenter_override(app, ModuleDocumenter)\n\n return {\n 'parallel_read_safe': True,\n }\n","repo_name":"celery/sphinx_celery","sub_path":"sphinx_celery/apicheck.py","file_name":"apicheck.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"90"} +{"seq_id":"41633866887","text":"#!/usr/bin/python3\n\nimport sys\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport csv\nfrom collections import namedtuple, Counter\nfrom collections import deque\nsys.path.append(os.path.join(os.path.dirname(sys.argv[0]), \"..\", \"lib\", \"python\"))\nimport tgt\nimport util\nimport xlsbatch\nimport re\nimport gui\n\n\ncachedir = \".\"\nBeepTuple = namedtuple(\"beep_iv_tuple\", [\"t_start\", \"t_end\", \"correlation\"])\nIntensityVal = namedtuple(\"IntensityVal\", [\"t\", \"intensity\"])\n\n\ndef segment_beeps(infile, outfile, wavfile, beepchannel, refbeep,\n silencethreshold, minsoundingduration, seekflank, mincorrelation):\n logging.info(\"Segmenting beeps in %s\" % wavfile)\n tmpdir = tempfile.mkdtemp()\n try:\n duration, _ = util.get_wav_duration(wavfile)\n tg, tier = util.init_textgrid(infile, duration, \"seg.beep\")\n beepsegmentscript = os.path.join(os.path.dirname(sys.argv[0]), \"beepsegment.praat\")\n beeplists = []\n for rb in refbeep:\n logging.info(\"Running correlation search for beep %s\" % rb)\n result = util.call_check([\"praat\", \"--run\", beepsegmentscript, os.path.realpath(wavfile), str(beepchannel),\n os.path.realpath(rb), str(silencethreshold), str(minsoundingduration),\n str(int(seekflank))], True)\n beeplist = []\n for line in result.decode().split(\"\\n\"):\n if line:\n t_start, t_end, correlation = line.split(\"\\t\")\n bt = BeepTuple(float(t_start), float(t_end), float(correlation))\n beeplist.append(bt)\n beeplists.append(beeplist)\n\n max_index_list = []\n for btlist in zip(*beeplists):\n bt = max(btlist, key=lambda t: t.correlation)\n max_index_list.append(btlist.index(bt))\n if bt.correlation < mincorrelation:\n logging.warning(\"low correlation (%.02f) with reference beep at %.4f seconds\" %\n (bt.correlation, bt.t_start))\n continue\n overlapping_beeps = tier.get_annotations_between_timepoints(bt.t_start, bt.t_end,\n left_overlap=True, right_overlap=True)\n can_add = True\n for x in overlapping_beeps:\n if x.correlation < bt.correlation:\n tier.delete_annotation_by_start_time(x.start_time)\n else:\n can_add = False\n if can_add:\n anno = tgt.Annotation(bt.t_start, bt.t_end, \"beep\")\n anno.correlation = bt.correlation\n tier.add_annotation(anno)\n cnt = Counter(max_index_list)\n logging.info(\"Found %d beep instances with (index, count) distribution %s\" %\n (len(max_index_list), sorted(cnt.items())))\n tier = tier.get_copy_with_gaps_filled(empty_string=\"speech\")\n if len(tier) > 0:\n tier[0].text = \"\"\n logging.info(\"Note: setting first (silence) interval empty\")\n tg.add_tier(tier)\n logging.info(\"Writing %s\" % outfile)\n tgt.io.write_to_file(textgrid=tg, filename=outfile, format=\"long\")\n finally:\n shutil.rmtree(tmpdir)\n\n\ndef segment_speech_praat(wavfile, channel=1, denoise=False, trainbegin=0, trainwindow=1, threshold=50,\n min_sil_duration=0.02, min_snd_duration=0.02):\n vadscript = os.path.join(os.path.dirname(sys.argv[0]), \"vad.praat\")\n result = util.call_check([\"praat\", \"--run\", vadscript, os.path.realpath(wavfile), str(channel),\n str(int(denoise)), str(trainbegin), str(trainwindow), str(threshold),\n str(min_sil_duration), str(min_snd_duration)], True)\n speech_chunks = []\n intensities = []\n for line in result.decode().split(\"\\n\"):\n items = line.split(\"\\t\")\n if line.startswith(\"silence threshold\"):\n logging.info(line)\n elif len(items) > 2:\n if items[0] == \"chunk\":\n iv = tgt.Interval(float(items[1]), float(items[2]), items[3])\n iv.as_db = float(items[3])\n speech_chunks.append(iv)\n elif items[0] == \"itn\":\n intensities.append(IntensityVal(float(items[1]), float(items[2])))\n return speech_chunks, intensities\n\n\ndef window(seq, n):\n it = iter(seq)\n win = deque((next(it, None) for _ in range(n)), maxlen=n)\n yield win\n append = win.append\n for e in it:\n append(e)\n yield win\n\n\ndef find_silence_level(intensities, window_size):\n min_max_val = None\n for w in window(intensities, int(window_size*100)):\n max_int = max(w, key=lambda x: x.intensity)\n if min_max_val is None or min_max_val.intensity > max_int.intensity:\n min_max_val = max_int\n return min_max_val.intensity\n\n\ndef filter_chunks(speech_chunks, silencelevel, speechthresh=0.8):\n dbvalues = [float(x.text) - silencelevel for x in speech_chunks]\n dbfilterthreshold = silencelevel + (sum(dbvalues) / len(dbvalues) * speechthresh)\n logging.info(\"speech filtering threshold: %s\" % dbfilterthreshold)\n logging.info(\"vad segments: %s\" % len(speech_chunks))\n\n dbfilteredivs = tgt.IntervalTier(name=\"seg.filt\")\n for siv in [x for x in speech_chunks if float(x.text) > dbfilterthreshold]:\n dbfilteredivs.add_annotation(siv)\n\n logging.info(\"dropped %s intervals\" % (len(speech_chunks)-len(dbfilteredivs)))\n return dbfilteredivs\n\n\ndef segment_speech(infile, outfile, wavfile, channel, filtertiername, shiftonset, shiftoffset, denoise,\n trainbegin, trainwindow, speechthresh, snradd):\n logging.info(\"Segmenting speech in %s\" % wavfile)\n duration, _ = util.get_wav_duration(wavfile)\n tg, tier = util.init_textgrid(infile, duration, \"seg.speech\")\n\n logging.info(\"Floor estimation...\")\n _, intensities = segment_speech_praat(wavfile, channel,\n denoise=denoise, trainbegin=trainbegin, trainwindow=trainwindow)\n silencelevel = find_silence_level(intensities, trainwindow) + snradd\n logging.info(\"estimated floor noise level: %s\" % silencelevel)\n logging.info(\"Segmentation...\")\n speech_chunks, intensities = segment_speech_praat(wavfile, channel, threshold=silencelevel,\n denoise=denoise, trainbegin=trainbegin, trainwindow=trainwindow)\n for iv in speech_chunks:\n tier.add_annotation(iv)\n\n dbvalues = [x.as_db - silencelevel for x in tier]\n dbfilterthreshold = silencelevel + (sum(dbvalues) / len(dbvalues) * speechthresh)\n logging.info(\"speech filtering threshold: %s\" % dbfilterthreshold)\n logging.info(\"vad segments: %s\" % len(tier))\n\n if filtertiername is None:\n filtertier = tgt.IntervalTier()\n filtertier.add_annotation(tgt.Annotation(tier.start_time, tier.end_time, \"speech\"))\n filtertiername = \"\"\n else:\n filtertier = tg.get_tier_by_name(filtertiername)\n resulttier = tgt.IntervalTier(name=\"seg.speech\")\n speechsegments = [s for s in filtertier if s.text == \"speech\"]\n logging.info(\"expected speech segments: %s\" % len(speechsegments))\n stats_filtered = 0\n stats_all = 0\n for speechseg in speechsegments:\n speechivs = tier.get_annotations_between_timepoints(speechseg.start_time, speechseg.end_time)\n if len(speechivs) == 0:\n speechivs = tier.get_annotations_between_timepoints(speechseg.start_time, speechseg.end_time,\n left_overlap=True, right_overlap=True)\n if len(speechivs) > 0:\n logging.warning(\"Speech segments overlap with the boundaries of %s in %s. \"\n \"VAD problem? Shortening...\" % (\n speechseg, filtertiername))\n for siv in speechivs:\n siv.start_time = max(speechseg.start_time, siv.start_time)\n siv.end_time = min(speechseg.end_time, siv.end_time)\n\n if len(speechivs) == 0:\n logging.warning(\"No speech segments in %s overlap with %s\" % (filtertiername, speechseg))\n continue\n\n dbfilteredivs = tgt.IntervalTier()\n for siv in [x for x in speechivs if x.as_db > dbfilterthreshold]:\n dbfilteredivs.add_annotation(siv)\n stats_filtered += len(speechivs) - len(dbfilteredivs)\n stats_all += len(speechivs)\n if len(dbfilteredivs) == 0:\n logging.warning(\"All speech segments in %s dropped since their energy is below %.2f\" % (\n speechseg, dbfilterthreshold))\n continue\n start_time = min([x.start_time for x in dbfilteredivs])\n end_time = max([x.end_time for x in dbfilteredivs])\n\n resulttier.add_annotation(tgt.Interval(\n start_time + shiftonset,\n end_time + shiftoffset, \"speech\"))\n assert stats_all > 0, \"VAD was unable segment speech. Check silence region: calculated threshold %.2f db. \" \\\n \"Speech threshold: %.2f db.\" % (silencelevel, dbfilterthreshold)\n tier = resulttier\n logging.info(\"Dropped %d of %d speech segments (%.2f%%) with energy below %.2f db\" %\n (stats_filtered, stats_all, (stats_filtered / stats_all * 100), dbfilterthreshold))\n\n tier.name = \"seg.speech\"\n tg.add_tier(tier)\n logging.info(\"Writing %s\" % outfile)\n tgt.io.write_to_file(textgrid=tg, filename=outfile, format=\"long\")\n\n\ndef add_tier(infile, outfile, wavfile, mode, sourcetier, filtertier, desttier, text, pattern):\n if mode in ['trim', 'copy']:\n assert sourcetier is not None, \"source tier required for mode %s\" % mode\n logging.info(\"Adding tier using mode: \" + mode)\n duration, _ = util.get_wav_duration(wavfile)\n tg, tier = util.init_textgrid(infile, duration, desttier)\n duration, _ = util.get_wav_duration(wavfile)\n tier.start_time = 0\n tier.end_time = duration\n if mode == \"trim\":\n annotier = tg.get_tier_by_name(sourcetier)\n if len(annotier) > 0:\n tier.add_annotation(tgt.Annotation(annotier[0].start_time, annotier[-1].end_time, text))\n elif mode == \"all\":\n tier.add_annotation(tgt.Annotation(0, duration, text))\n elif mode == \"copy\":\n annotier = tg.get_tier_by_name(sourcetier)\n for iv in annotier:\n if re.match(pattern, iv.text) is not None:\n tier.add_annotation(tgt.Annotation(iv.start_time, iv.end_time, text))\n elif mode == \"trimright\":\n overlaptier = tg.get_tier_by_name(sourcetier)\n trimtier = tg.get_tier_by_name(filtertier)\n for overlapseg in overlaptier.intervals:\n if re.match(pattern, overlapseg.text) is not None:\n overlaps = trimtier.get_annotations_between_timepoints(overlapseg.start_time, overlapseg.end_time,\n left_overlap=True, right_overlap=True)\n if len(overlaps) > 0:\n tier.add_annotation(tgt.Annotation(overlapseg.start_time,\n min(overlaps[-1].end_time+0.5, overlapseg.end_time), text))\n tg.add_tier(tier)\n logging.info(\"Writing %s\" % outfile)\n tgt.io.write_to_file(textgrid=tg, filename=outfile, format=\"long\")\n\n\ndef load_dict(filename):\n pdict = {}\n with open(filename, 'r') as dictfile:\n dictreader = csv.reader(dictfile, delimiter=';', quoting=csv.QUOTE_NONE)\n for row in dictreader:\n pdict[row[0]] = row[1]\n return pdict\n\n\ndef save_dict(pdict, filename):\n with open(filename, 'w') as dictfile:\n dictwriter = csv.writer(dictfile, delimiter=';', quoting=csv.QUOTE_NONE, lineterminator=\"\\n\")\n for key, val in pdict.items():\n dictwriter.writerow([key, val])\n\n\ndef get_phonetic_transcriptions(tmpdir, segtier, annotier, language):\n intervalcnt = 0\n logging.info(\"Preparing transcription dictionary\")\n pdict = {}\n cachefilename = os.path.join(cachedir, \"aligntool.%s.cache\" % language)\n if os.path.exists(cachefilename):\n pdict = load_dict(cachefilename)\n\n missing_kan = set()\n for speechseg in segtier.intervals:\n if speechseg.text == \"speech\":\n intervalcnt += 1\n wordsegments = annotier.get_annotations_between_timepoints(speechseg.start_time, speechseg.end_time,\n left_overlap=True, right_overlap=True)\n if len(wordsegments) == 0:\n continue\n annotation = \" \".join([x.text for x in wordsegments]).split()\n for w in annotation:\n assert \"_\" not in w, \"Word %s contains invalid character _\" % w\n if w not in pdict.keys():\n missing_kan.add(w)\n logging.info(\"%s missing phonetic transcriptions\" % len(missing_kan))\n if len(missing_kan) > 0:\n lexfile = os.path.join(tmpdir, \"lexicon.txt\")\n txtout = open(lexfile, 'w')\n for w in missing_kan:\n print(w, file=txtout)\n txtout.close()\n g2pscript = os.path.join(os.path.dirname(sys.argv[0]), \"rung2pwebservice.sh\")\n util.call_check([g2pscript, lexfile, language])\n lextransfile = os.path.join(tmpdir, \"lexicon.tab\")\n newdictlines = open(lextransfile, 'r').readlines()\n for w, response in zip(missing_kan, newdictlines):\n responsew, responset = response[:-1].split(';')\n assert responset != \"\", \"word '%s' was mapped to empty string. Invalid chars?\" % w\n if w != responsew:\n logging.warning(\"g2p expanded %s to %s\" % (w, responsew))\n pdict[w] = \"\".join(responset.split(\" \"))\n save_dict(pdict, cachefilename)\n return pdict\n\n\ndef split_utterances(tmpdir, speechtiername, infile, wavfile, channel, denoise):\n logging.info(\"Splitting audio into utterance segments\")\n splitaudioscript = os.path.join(os.path.dirname(sys.argv[0]), \"splitaudio.praat\")\n if denoise:\n logging.warning(\"Assuming 1-4 seconds are non-speech for denoising\")\n result = util.call_check([\"praat\", \"--run\", splitaudioscript, os.path.realpath(wavfile), os.path.realpath(infile),\n \"%s/iv\" % tmpdir, str(channel), speechtiername, str(int(denoise))], True)\n offsets = []\n for line in result.decode().split(\"\\n\"):\n items = line.split(\"\\t\")\n if len(items) == 3:\n foffset = tgt.Interval(start_time=float(items[0]), end_time=float(items[1]), text=items[2])\n offsets.append(foffset)\n logging.info(\"Split completed: %s segments\" % len(offsets))\n return offsets\n\n\ndef parse_maus_par(parfilename, sample_rate):\n ort_ivs = []\n mau_ivs = []\n with open(parfilename, 'r') as parfile:\n # print(parfilename)\n parreader = csv.reader(parfile, delimiter='\\t', quotechar=None)\n for row in parreader:\n if row[0] == \"ORT:\":\n oiv = tgt.Interval(0, 0, row[2])\n oiv.has_begin_set = False\n ort_ivs.append(oiv)\n assert len(ort_ivs) == int(row[1]) + 1\n elif row[0] == \"MAU:\":\n ivbegin = float(row[1]) / sample_rate\n ivend = (float(row[1]) + float(row[2]) + 1) / sample_rate\n wnum = int(row[3])\n # print(wnum, ivbegin, ivend, row[4])\n ort_ivs[wnum].end_time = ivend\n if wnum >= 0 and not ort_ivs[wnum].has_begin_set:\n ort_ivs[wnum].start_time = ivbegin\n ort_ivs[wnum].has_begin_set = True\n mau_ivs.append(tgt.Interval(ivbegin, ivend, row[4]))\n if not mau_ivs:\n return [], []\n for iv in ort_ivs:\n assert iv.has_begin_set, \"Incomplete MAU tier in %s\" % parfilename\n return ort_ivs, mau_ivs\n\n\ndef read_maus_alignments(tmpdir, offsets, orttier, mautier, sample_rate):\n logging.info(\"Reading MAUS alignments\")\n for i, foffset in enumerate(offsets):\n intervalcnt = i+1\n parfile = \"%s/iv%s.par\" % (tmpdir, intervalcnt)\n try:\n ort_ivs, mau_ivs = parse_maus_par(parfile, sample_rate)\n if not ort_ivs and foffset.transcription_valid:\n logging.warning(\"No alignment imported for interval %s: %s\" % (intervalcnt, foffset))\n for iv in ort_ivs:\n orttier.add_annotation(tgt.Interval(iv.start_time + foffset.start_time,\n iv.end_time + foffset.start_time, iv.text))\n for iv in mau_ivs:\n mautier.add_annotation(tgt.Interval(iv.start_time + foffset.start_time,\n iv.end_time + foffset.start_time, iv.text))\n except IOError:\n if foffset.transcription_valid:\n logging.warning(\"No alignment imported for interval %s: %s\" % (intervalcnt, foffset))\n except:\n logging.error(\"Exception while parsing TextGrid %s\" % parfile)\n raise\n\n\ndef generate_maus_transcriptions(tmpdir, segtier, offsets, annotier, pdict):\n logging.info(\"Generating transcriptions for MAUS alignment\")\n for intervalcnt, foffset in enumerate(offsets):\n foffset.transcription_valid = False\n seg_ivs = segtier.get_annotations_between_timepoints(foffset.start_time, foffset.end_time,\n left_overlap=True, right_overlap=True)\n seg_ivs = [x for x in seg_ivs if x.text == \"speech\"]\n assert len(seg_ivs) <= 1, \"Invalid segmentation hierarchy: %s is overlap-contained by multiple \" \\\n \"pre-segmentation intervals %s\" % (foffset, seg_ivs)\n if not seg_ivs:\n logging.warning(\"%s does not seem to correspond to any pre-segmentation interval\" % foffset)\n continue\n seg_iv = seg_ivs[0]\n assert seg_iv.text == \"speech\", \"speech interval %s contained by non-speech pre-segmentation %s\" % (foffset, seg_iv)\n wordsegments = annotier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n if len(wordsegments) == 0:\n logging.warning(\"Ignoring empty annotation for interval %s\" % seg_iv)\n continue\n foffset.transcription_valid = True\n txtout = open(\"%s/iv%s.par\" % (tmpdir, intervalcnt+1), 'w')\n words = \" \".join([x.text for x in wordsegments]).split()\n for i, w in enumerate(words):\n print(\"ORT:\\t%s\\t%s\" % (i, w), file=txtout)\n for i, w in enumerate(words):\n print(\"KAN:\\t%s\\t%s\" % (i, pdict[w]), file=txtout)\n txtout.close()\n\n\ndef align_maus(infile, wavfile, outfile, denoise, channel, segtiername, filtertiername, initialsilence, remote,\n language):\n logging.info(\"Aligning %s based on segmentation in %s\" % (wavfile, infile))\n tmpdir = tempfile.mkdtemp()\n try:\n duration, _ = util.get_wav_duration(wavfile)\n tg, orttier, mautier = util.init_textgrid(infile, duration, \"maus.ort\", \"maus.pho\")\n annotier = tg.get_tier_by_name(\"anno.trans\")\n segtier = tg.get_tier_by_name(filtertiername)\n\n pdict = get_phonetic_transcriptions(tmpdir, segtier, annotier, language)\n offsets = split_utterances(tmpdir, segtiername, infile, wavfile, channel, denoise)\n generate_maus_transcriptions(tmpdir, segtier, offsets, annotier, pdict)\n\n logging.info(\"Performing MAUS alignment\")\n if not os.path.exists(os.path.join(os.path.dirname(sys.argv[0]), \"..\", \"external\", \"maus\")):\n mausscript = os.path.join(os.path.dirname(sys.argv[0]), \"runmauswebservice.sh\")\n else:\n mausscript = os.path.join(os.path.dirname(sys.argv[0]), \"runmauslocal.sh\")\n util.call_check([mausscript, tmpdir, str(not initialsilence), language])\n\n duration, sample_rate = util.get_wav_duration(wavfile)\n read_maus_alignments(tmpdir, offsets, orttier, mautier, sample_rate)\n tg.add_tier(orttier)\n tg.add_tier(mautier)\n logging.info(\"Writing %s\" % outfile)\n tgt.io.write_to_file(textgrid=tg, filename=outfile, format=\"long\")\n except:\n logging.error(\"Exception while running maus alignment. Retained temp dir: %s\" % tmpdir)\n raise\n else:\n shutil.rmtree(tmpdir)\n\n\nclass DumpRecord:\n pass\n\n\ndef dump_boundaries(infile, outfile, filtertiername, ref_seg_tiername):\n logging.info(\"Dumping boundaries in %s to %s\" % (infile, outfile))\n tg = tgt.io.read_textgrid(infile)\n anno_tier = tg.get_tier_by_name(\"anno.trans\")\n meta_tier = tg.get_tier_by_name(\"anno.meta\")\n align_tier = tg.get_tier_by_name(\"maus.ort\")\n pho_tier = tg.get_tier_by_name(\"maus.pho\")\n seg_tier = tg.get_tier_by_name(filtertiername)\n if ref_seg_tiername is not None:\n ref_seg_tier = tg.get_tier_by_name(ref_seg_tiername)\n else:\n ref_seg_tier = None\n with open(outfile, \"w\") as fout:\n varnames = (\"meta_ref\", \"meta_valid\", \"meta_orig\", \"meta_edited\", \"meta_ignore\", \"meta_shortened\",\n \"meta_error\", \"transcription\", \"anno_onset\", \"anno_offset\", \"align_onset\", \"align_offset\",\n \"word_cnt\", \"nwords\", \"onset_pho\", \"offset_pho\", \"anno_preseg_begin\", \"preseg_begin\")\n print(\"\\t\".join(varnames), file=fout)\n # loop over utterances\n # loop over meta intervals\n # all valid intervals must have an matching anno.trans interval (ground truth)\n # hard index correspondence between maus.ort intervals and anno.trans intervals\n # one line output for each valid meta interval to catch edits and ignores\n for seg_iv in [s for s in seg_tier if s.text == \"speech\"]:\n word_cnt = 1\n anno_ivs = anno_tier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n meta_ivs = meta_tier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n align_ivs = align_tier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n ref_seg_iv = None\n if ref_seg_tier is not None:\n ref_seg_ivs = ref_seg_tier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n ref_seg_ivs = [s for s in ref_seg_ivs if s.text == \"speech\"]\n assert len(ref_seg_ivs) == 1, \"unclear mapping to reference pre-segmentation %s\" % ref_seg_ivs\n ref_seg_iv = ref_seg_ivs[0]\n\n anno_iter, align_iter = [iter(x) for x in (anno_ivs, align_ivs)]\n for metaiv in meta_ivs:\n record = DumpRecord()\n record.nwords = len(anno_ivs)\n metadata = util.MetaData.from_json(metaiv.text)\n record.meta_ref = metadata.ref\n record.meta_valid = int(metadata.valid)\n record.meta_orig = metadata.orig\n record.meta_edited = int(metadata.edited)\n record.meta_ignore = int(metadata.ignore)\n record.meta_shortened = int(metadata.shortened)\n record.meta_error = 0\n record.anno_onset = metaiv.start_time\n record.anno_offset = metaiv.end_time\n record.preseg_begin = seg_iv.start_time\n if ref_seg_iv is not None:\n record.anno_preseg_begin = ref_seg_iv.start_time\n if metadata.valid:\n try:\n annoiv, aligniv = [next(x) for x in (anno_iter, align_iter)]\n assert annoiv.start_time == metaiv.start_time and annoiv.end_time == metaiv.end_time, \\\n (\"Metadata interval %s not matching annotation interval %s\", metaiv, annoiv)\n if annoiv.text != aligniv.text:\n logging.warning(\n \"Mismatch between annotation %s and alignment result %s\" % (annoiv, aligniv))\n record.transcription = annoiv.text\n record.align_onset = aligniv.start_time\n record.align_offset = aligniv.end_time\n pho_ivs = pho_tier.get_annotations_between_timepoints(aligniv.start_time, aligniv.end_time,\n left_overlap=True, right_overlap=True)\n pho_ivs_cleaned = [x.text for x in pho_ivs if x.text not in (\"?\", \"\", \"\")]\n if pho_ivs_cleaned:\n record.onset_pho = pho_ivs_cleaned[0]\n record.offset_pho = pho_ivs_cleaned[-1]\n else:\n logging.warning(\"Phonetic alignment for %s seems empty after filtering: %s\"\n % (aligniv, pho_ivs_cleaned))\n record.meta_error = 1\n record.word_cnt = word_cnt\n word_cnt += 1\n except StopIteration:\n logging.warning(\"Alignment in %s is missing %s\" % (seg_iv, metadata.orig))\n record.meta_error = 1\n print(\"\\t\".join([str(getattr(record, x, \"\")) for x in varnames]), file=fout)\n\n\ndef export_boundaries(infile, outfile, filtertiername):\n logging.info(\"Exporting boundaries in %s to %s\" % (infile, outfile))\n tg = tgt.io.read_textgrid(infile)\n align_tier = tg.get_tier_by_name(\"maus.ort\")\n pho_tier = tg.get_tier_by_name(\"maus.pho\")\n seg_tier = tg.get_tier_by_name(filtertiername)\n with open(outfile, \"w\") as fout:\n varnames = (\"meta_error\", \"transcription\", \"align_onset\", \"align_offset\", \"word_cnt\", \"nwords\", \"onset_pho\",\n \"offset_pho\", \"preseg_begin\")\n print(\"\\t\".join(varnames), file=fout)\n for seg_iv in [s for s in seg_tier if s.text == \"speech\"]:\n word_cnt = 1\n align_ivs = align_tier.get_annotations_between_timepoints(seg_iv.start_time, seg_iv.end_time,\n left_overlap=True, right_overlap=True)\n for aligniv in align_ivs:\n record = DumpRecord()\n record.transcription = aligniv.text\n record.nwords = len(align_ivs)\n record.meta_error = 0\n record.preseg_begin = seg_iv.start_time\n record.align_onset = aligniv.start_time\n record.align_offset = aligniv.end_time\n pho_ivs = pho_tier.get_annotations_between_timepoints(aligniv.start_time, aligniv.end_time,\n left_overlap=True, right_overlap=True)\n pho_ivs_cleaned = [x.text for x in pho_ivs if x.text not in (\"?\", \"\", \"\")]\n if pho_ivs_cleaned:\n record.onset_pho = pho_ivs_cleaned[0]\n record.offset_pho = pho_ivs_cleaned[-1]\n else:\n logging.warning(\"Phonetic alignment for %s seems empty after filtering: %s\"\n % (aligniv, pho_ivs_cleaned))\n record.meta_error = 1\n record.word_cnt = word_cnt\n word_cnt += 1\n print(\"\\t\".join([str(getattr(record, x, \"\")) for x in varnames]), file=fout)\n\n\ndef add_textgrid_options(parser):\n parser.add_argument('-i', \"--input-textgrid\", dest='infile', metavar='', action='store', required=False,\n help='input TextGrid')\n parser.add_argument('-o', \"--output-textgrid\", dest='outfile', metavar='', action='store', required=True,\n help='output TextGrid')\n\n\ndef add_language_options(parser):\n langs = [\"deu-DE\", \"gsw-CH\", \"eng-GB\", \"fin-FI\", \"fra-FR\", \"eng-AU\", \"eng-US\",\n \"nld-NL\", \"spa-ES\", \"ita-IT\", \"por-PT\", \"hun-HU\", \"ekk-EE\", \"pol-PL\",\n \"eng-NZ\", \"kat-GE\", \"rus-RU\"]\n parser.add_argument('-l', \"--language\", dest='language', metavar='', action='store',\n required=False, choices=langs, default=\"deu-DE\", help='language code')\n\n\ndef parse_arguments(argv, parser=argparse.ArgumentParser(prog=os.path.basename(__file__), add_help=True)):\n sub_cmd_parser = parser.add_subparsers(dest='cmd', title='subcommands (-h for more help)')\n sub_cmd_parser.required = True\n\n xlsbatch_parser = sub_cmd_parser.add_parser('xlsbatch', help='batch process xls files ...',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n sub_xlsbatch_parser = xlsbatch_parser.add_subparsers(dest='cmd', title='import commands')\n sub_xlsbatch_parser.required = True\n xlsbatch.setup(sub_xlsbatch_parser)\n\n segment_beep_parser = sub_cmd_parser.add_parser('segmentBeeps', help='segment audio files on beeps',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n segment_beep_parser.set_defaults(cmd=segment_beeps)\n add_textgrid_options(segment_beep_parser)\n segment_beep_parser.add_argument('-w', \"--wav-file\", dest='wavfile', metavar='', action='store',\n required=True,\n help='input wav file')\n segment_beep_parser.add_argument('-b', dest='beepchannel', type=int, default=2, metavar='', action='store',\n required=False, help='beep channel')\n segment_beep_parser.add_argument('-r', \"--refbeep\", dest='refbeep', metavar='', action='append',\n required=True,\n help='reference beep signal')\n segment_beep_parser.add_argument('-x', \"--mincorrelation\", dest='mincorrelation', type=float, default=0,\n metavar='<0-1>', action='store', help='minimum required cross correlation peak')\n segment_beep_parser.add_argument('-s', \"--silencethreshold\", dest='silencethreshold', type=float, default=-25,\n metavar='', action='store',\n help='silence threshold for beep segmentation (default: -25)')\n segment_beep_parser.add_argument('-m', \"--minsounding\", dest='minsoundingduration', type=float, default=0.18,\n metavar='', action='store', help='min sounding duration')\n segment_beep_parser.add_argument('--seekflank', dest='seekflank', action='store_true',\n help='enable seekflank heuristic for postprocessing the correlation method')\n\n segment_speech_parser = sub_cmd_parser.add_parser('segmentSpeech',\n help='segment audio file into speech and non-speech',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n segment_speech_parser.set_defaults(cmd=segment_speech)\n add_textgrid_options(segment_speech_parser)\n segment_speech_parser.add_argument('-w', \"--wav-file\", dest='wavfile', metavar='', action='store',\n required=True,\n help='input wav file')\n segment_speech_parser.add_argument('-c', \"--speech-channel\", dest='channel', type=int, default=1,\n metavar='', action='store',\n required=False, help='speech channel')\n segment_speech_parser.add_argument('-f', \"--filter-tier\", dest='filtertiername', metavar='', action='store',\n required=False,\n help='filter/suppress speech intervals based on existing speech interval tier')\n segment_speech_parser.add_argument(\"-d\", \"--denoise\", dest='denoise', action='store_true', help='enable denoising')\n segment_speech_parser.add_argument('--shiftonsets', dest='shiftonset', metavar='', action='store', type=float,\n default=0,\n required=False, help='shift detected onsets by seconds')\n segment_speech_parser.add_argument('--shiftoffsets', dest='shiftoffset', metavar='', action='store', type=float,\n default=0,\n required=False, help='shift detected offsets by seconds')\n\n segment_speech_parser.add_argument('--trainbegin', dest='trainbegin', metavar='', action='store', type=float,\n default=3.3,\n required=False,\n help='interval for calculating reference energy ends at seconds')\n segment_speech_parser.add_argument('--trainwindow', dest='trainwindow', metavar='', action='store', type=float,\n default=1,\n required=False,\n help='interval for calculating reference energy ends at seconds')\n segment_speech_parser.add_argument('--speechthresh', dest='speechthresh', metavar='', action='store', type=float,\n default=0.5,\n required=False,\n help=\"Relation of the average interval intensity to the valid speech interval \"\n \"intensity\")\n segment_speech_parser.add_argument('--snradd', dest='snradd', metavar='', action='store', type=float,\n default=1,\n required=False, help=\"add to the silence threshold\")\n\n add_tier_parser = sub_cmd_parser.add_parser('addTier',\n help='add a tier, e.g. to mark the whole utterance as speech',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n add_tier_parser.set_defaults(cmd=add_tier)\n add_tier_parser.add_argument('-m', \"--mode\", dest='mode', metavar='',\n action='store', required=True, choices=['all', 'trim', 'copy', 'trimright'],\n help='set mode (all, trim, copy)')\n add_tier_parser.add_argument('-w', \"--wav-file\", dest='wavfile', metavar='', action='store',\n required=True, help='input wav file')\n add_tier_parser.add_argument('-s', \"--source-tier\", dest='sourcetier', metavar='',\n action='store', default=None, required=False,\n help='use as source for copying or trimming')\n add_tier_parser.add_argument('-d', \"--dest-tier\", dest='desttier', metavar='',\n action='store', required=False, default=\"seg.speech\",\n help='destination tier ')\n add_tier_parser.add_argument('-t', \"--text\", dest='text', metavar='',\n action='store', required=False, default=\"speech\",\n help=\"set new tier's intervals content to \")\n add_tier_parser.add_argument('-f', \"--filter\", dest='pattern', metavar='',\n action='store', required=False, default=\"speech\",\n help=\"only copy intervals with content matching pattern \")\n add_tier_parser.add_argument(\"--filter-tier\", dest='filtertier', metavar='',\n action='store', default=None, required=False,\n help='use as source for overlap based trimming')\n add_textgrid_options(add_tier_parser)\n\n align_maus_parser = sub_cmd_parser.add_parser('alignMAUS',\n help='align files using the MAUS webservice',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n align_maus_parser.set_defaults(cmd=align_maus)\n add_textgrid_options(align_maus_parser)\n add_language_options(align_maus_parser)\n\n align_maus_parser.add_argument('-w', \"--wav-file\", dest='wavfile', metavar='', action='store',\n required=True,\n help='input wav file')\n align_maus_parser.add_argument('-c', \"--speech-channel\", dest='channel', metavar='', action='store',\n type=int, default=1,\n help='input wav file')\n align_maus_parser.add_argument('-f', \"--filter-tier\", dest='filtertiername', metavar='', action='store',\n default=\"seg.beep\",\n help=' providing initial segmentation, overlapping ground truth')\n align_maus_parser.add_argument('-s', \"--segmentation-tier\", dest='segtiername', metavar='', action='store',\n default=\"seg.beep\",\n help=' providing speech segmentation')\n align_maus_parser.add_argument(\"-d\", \"--denoise\", dest='denoise', action='store_true', help='enable denoising')\n align_maus_parser.add_argument(\"--initialsilence\", dest='initialsilence', action='store_true',\n help='enable initial and final silence models')\n align_maus_parser.add_argument(\"--remote\", dest='remote', action='store_true',\n help='use maus online service')\n\n dump_boundaries_parser = sub_cmd_parser.add_parser('dumpBoundaries',\n help='dump alignment and reference alignments for evaluation',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n dump_boundaries_parser.set_defaults(cmd=dump_boundaries)\n dump_boundaries_parser.add_argument('-i', \"--input-textgrid\", dest='infile', metavar='', action='store',\n required=False,\n help='input TextGrid')\n dump_boundaries_parser.add_argument('-o', \"--output-file\", dest='outfile', metavar='', action='store',\n required=True,\n help='statistics file')\n dump_boundaries_parser.add_argument('-f', \"--filter-tier\", dest='filtertiername', metavar='', action='store',\n default=\"seg.beep\",\n help='filter/suppress speech intervals based on existing speech interval tier')\n dump_boundaries_parser.add_argument('-r', \"--ref_seg_tier\", dest='ref_seg_tiername', metavar='', action='store',\n help='reference segmentation/filter tier')\n\n export_boundaries_parser = sub_cmd_parser.add_parser('exportBoundaries', help='export alignment results',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n export_boundaries_parser.set_defaults(cmd=export_boundaries)\n export_boundaries_parser.add_argument('-i', \"--input-textgrid\", dest='infile', metavar='', action='store',\n required=False,\n help='input TextGrid')\n export_boundaries_parser.add_argument('-o', \"--output-file\", dest='outfile', metavar='', action='store',\n required=True,\n help='statistics file')\n export_boundaries_parser.add_argument('-f', \"--filter-tier\", dest='filtertiername', metavar='', action='store',\n default=\"seg.beep\",\n help='pre-segmentation tier')\n gui_parser = sub_cmd_parser.add_parser('gui', help='open a simple graphical user interface',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n gui_parser.set_defaults(cmd=gui.setup)\n\n args = parser.parse_args(argv)\n return args\n\n\ndef main():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('-=%(levelname)s=- [%(asctime)s.%(msecs)d] %(message)s', datefmt='%H:%M:%S')\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n try:\n args = parse_arguments(sys.argv[1:])\n args.cmd(**util.extract_args(args))\n except subprocess.CalledProcessError as e:\n print(sys.stderr, e)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n sys.exit(1)\n","repo_name":"lschilli/aligntool","sub_path":"src/aligntool.py","file_name":"aligntool.py","file_ext":"py","file_size_in_byte":41830,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"90"} +{"seq_id":"5376196169","text":"__author__ = 'Splitty'\n\nfrom yapsy.PluginManager import PluginManagerSingleton\nfrom yapsy.IPlugin import IPlugin\n\nclass AdminUtilsPlugin(IPlugin):\n app = None\n config = None\n manager = None\n wittyconf = None\n\n def __init__(self):\n self.default_config = {\n 'admins': []\n }\n super(AdminUtilsPlugin, self).__init__()\n\n def init(self):\n self.manager = PluginManagerSingleton.get()\n self.app = self.manager.app\n self.wittyconf = self.manager.wittyconf\n\n def privmsg(self, user, channel, msg):\n authenticated = False if user not in self.config['admins'] else True\n\n # list operators\n if msg == '_ops':\n self.list_operators(channel)\n\n # help\n elif msg == '_help':\n self.help(channel)\n\n # plugin help\n elif msg.startswith('_help'):\n plugin_name = msg[5:].strip()\n self.help_plugin(channel, plugin_name)\n\n # reload config + rehash plugins\n elif authenticated and msg == '_rehash':\n self.manager.wittyconf.reload_config()\n self.rehash(channel)\n\n # give operator status\n elif authenticated and msg.startswith('_op'):\n _user = msg[3:].strip()\n self.give_operator_status(channel, _user)\n\n # take operator status\n elif authenticated and msg.startswith('_deop'):\n _user = msg[5:].strip()\n self.take_operator_status(channel, _user)\n\n # quit\n elif authenticated and msg == '_quit':\n self.app.quit('bye')\n\n # quit with message\n elif authenticated and msg.startswith('_quit'):\n self.app.quit(str(msg[5:]).strip())\n\n # send latest logs to user\n elif authenticated and msg.startswith('_log'):\n count = int(msg[5:].strip())\n self.send_logs(user, count)\n\n def list_operators(self, channel):\n if not self.config['admins']:\n self.app.say(channel, 'No administrators found.')\n else:\n msg = 'Operators: %s' % str(', '.join(self.config['admins']))\n self.app.say(channel, msg)\n\n def give_operator_status(self, channel, user):\n if user not in self.config['admins']:\n self.config['admins'].append(user)\n self.wittyconf.update_plugin_config(self.plugin_name, self.config)\n self.app.say(channel, 'Gave %s permission to use adminutils.' % user)\n\n def take_operator_status(self, channel, user):\n removed = 0\n while user in self.config['admins']:\n self.config['admins'].remove(user)\n removed += 1\n if removed > 0:\n self.app.say(channel, 'Took admin permissions from %s' % user)\n self.wittyconf.update_plugin_config(self.plugin_name, self.config)\n\n def rehash(self, channel):\n PluginManagerSingleton._PluginManagerSingleton__instance = None\n self.app.load_plugins()\n self.manager = PluginManagerSingleton.get()\n self.app.say(channel, 'Plugins rehashed.')\n\n def send_logs(self, user, count):\n f = open('witty.log', 'r')\n lines = []\n for line in f:\n lines.append(line)\n f.close()\n if count <= len(lines):\n result = []\n for i in range(len(lines) - count, len(lines), 1):\n result.append('%i %s' % (i, lines[i]))\n self.app.msg(user, '\\n'.join(result))\n else:\n self.app.msg(user, 'Only %i lines in witty.log' % len(lines))\n\n def help(self, channel):\n plugins = []\n for plugin in self.manager.getAllPlugins():\n plugins.append(plugin.name)\n msg = 'Plugins: %s' % str(' | '.join(plugins))\n self.app.say(channel, msg)\n\n def help_plugin(self, channel, plugin_name):\n description = None\n usage = None\n for plugin in self.manager.getAllPlugins():\n if plugin_name == plugin.name:\n description = str(plugin.description)\n if hasattr(plugin.plugin_object, 'usage'):\n usage = str(plugin.plugin_object.usage)\n if usage is not None:\n self.app.say(channel, 'Usage: %s' % usage)\n if description is not None:\n self.app.say(channel, 'Description: %s' % description)\n","repo_name":"SplittyDev/witty","sub_path":"src/plugins/Admin/adminutils.py","file_name":"adminutils.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"4067597245","text":"from __future__ import absolute_import\n\nfrom ..types import TypedList\nfrom ..util import prep\nfrom datetime import datetime\nimport json\nimport logging\nimport six\n\nclass CellLink(object):\n\n \"\"\"Smartsheet CellLink data model.\"\"\"\n\n def __init__(self, props=None, base_obj=None):\n \"\"\"Initialize the CellLink model.\"\"\"\n self._base = None\n if base_obj is not None:\n self._base = base_obj\n self._pre_request_filter = None\n\n self.allowed_values = {\n 'status': [\n 'OK',\n 'BROKEN',\n 'INACCESSIBLE',\n 'NOT_SHARED',\n 'BLOCKED',\n 'CIRCULAR',\n 'INVALID',\n 'DISABLED']}\n\n self._column_id = None\n self._row_id = None\n self._sheet_id = None\n self._sheet_name = None\n self._status = None\n\n if props:\n # account for alternate variable names from raw API response\n if 'columnId' in props:\n self.column_id = props['columnId']\n if 'column_id' in props:\n self.column_id = props['column_id']\n if 'rowId' in props:\n self.row_id = props['rowId']\n if 'row_id' in props:\n self.row_id = props['row_id']\n if 'sheetId' in props:\n self.sheet_id = props['sheetId']\n if 'sheet_id' in props:\n self.sheet_id = props['sheet_id']\n if 'sheetName' in props:\n self.sheet_name = props['sheetName']\n if 'sheet_name' in props:\n self.sheet_name = props['sheet_name']\n if 'status' in props:\n self.status = props['status']\n\n @property\n def column_id(self):\n return self._column_id\n\n @column_id.setter\n def column_id(self, value):\n if isinstance(value, six.integer_types):\n self._column_id = value\n\n @property\n def row_id(self):\n return self._row_id\n\n @row_id.setter\n def row_id(self, value):\n if isinstance(value, six.integer_types):\n self._row_id = value\n\n @property\n def sheet_id(self):\n return self._sheet_id\n\n @sheet_id.setter\n def sheet_id(self, value):\n if isinstance(value, six.integer_types):\n self._sheet_id = value\n\n @property\n def sheet_name(self):\n return self._sheet_name\n\n @sheet_name.setter\n def sheet_name(self, value):\n if isinstance(value, six.string_types):\n self._sheet_name = value\n\n @property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, value):\n if isinstance(value, six.string_types):\n if value not in self.allowed_values['status']:\n raise ValueError(\n (\"`{0}` is an invalid value for CellLink`status`,\"\n \" must be one of {1}\").format(\n value, self.allowed_values['status']))\n self._status = value\n\n def to_dict(self, op_id=None, method=None):\n obj = {\n 'columnId': prep(self._column_id),\n 'rowId': prep(self._row_id),\n 'sheetId': prep(self._sheet_id),\n 'sheetName': prep(self._sheet_name),\n 'status': prep(self._status)}\n return obj\n\n def to_json(self):\n return json.dumps(self.to_dict(), indent=2)\n\n def __str__(self):\n return json.dumps(self.to_dict())\n","repo_name":"sudeepag/csap","sub_path":"csap-bot/botenv/lib/python3.4/dist-packages/smartsheet/models/cell_link.py","file_name":"cell_link.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"21362750343","text":"import socket\nimport time\nimport sys\n\nTCP_IP = '10.0.0.3'\nTCP_PORT = 8900\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\ns.connect((TCP_IP, TCP_PORT))\n\nwhile True:\n msg = \"test filebeat tcp message send.\"\n s.send(msg.encode())\n print(msg)\n time.sleep(1)\n data = s.recv(1024)\n print(data.decode())","repo_name":"SummerQiuye/devops-api","sub_path":"test/socketMessage.py","file_name":"socketMessage.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"90"} +{"seq_id":"72798963817","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# read the csv file\ndf = pd.read_csv(\n \"D:\\\\Study meterial\\\\PLACEMENT\\\\WINTER TRANNING PROGRAM\\\\PYTHON & SQL\\\\DAY 8\\\\dataset.csv\")\n# print(df.head(10))\n\n# # find total number of runs kohli has scored\n# totalruns = df[\"Runs\"].sum()\n# no_of_match = len(df[\"Runs\"])\n# print(f\"Total no. of runs kohli scored {no_of_match} matches: {totalruns}\")\n\n# # avarage of number of runs he has\n# avg_runs = df[\"Runs\"].mean()\n# print(f\"Average runs of kohli in {no_of_match} maches : {int(avg_runs)}\")\n\n# # number of matchs he played different position\n# position = df[\"Pos\"].unique()\n# print(position)\n\n# df[\"Pos\"] = df[\"Pos\"].map({\n# 3.0: \"Batting at 3\",\n# 4.0: \"Batting at 4\",\n# 2.0: \"Batting at 2\",\n# 1.0: \"Batting at 1\",\n# 7.0: \"Batting at 7\",\n# 5.0: \"Batting at 5\",\n# 6.0: \"Batting at 6\"\n# })\n\n# print(df[[\"Runs\", \"Pos\", \"Opposition\"]].head)\n\n\ndef show_pie_plot(df, key):\n counts = df[key].value_counts()\n count_values = counts.values\n count_labels = counts.index\n\n fig = plt.figure(figsize=(10, 7))\n plt.pie(count_values, labels=count_labels)\n plt.show()\n\n\n# show_pie_plot(df, \"Pos\")\n# show_pie_plot(df, \"Opposition\")\n# show_pie_plot(df, \"Ground\")\n\n\n# pos_counts = df[\"Pos\"].value_counts()\n# print(pos_counts)\n# print(type(pos_counts))\n\n# pos_values = pos_counts.values\n# pos_labels = pos_counts.index\n# print(pos_values)\n\n# fig = plt.figure(figsize=(10, 7))\n# plt.pie(pos_values, labels=pos_labels)\n# plt.show()\n\n# total runs scored in diffirent position\nruns_at_pos = df.groupby(\"Pos\")[\"Runs\"].sum()\nruns_at_pos_values = runs_at_pos.values\nruns_at_pos_labels = runs_at_pos.index\n\n# fig = plt.figure(figsize=(10, 7))\n# plt.pie(runs_at_pos_values, labels=runs_at_pos_labels)\n# plt.show()\n\n# Total sixes scored with different opposition\nsixes_with_ops = df.groupby(\"Opposition\")[\"6s\"].sum()\nsixes_with_ops_values = sixes_with_ops.values\nsixes_with_ops_labels = sixes_with_ops.index\n\n# fig = plt.figure(figsize=(10, 7))\n# plt.pie(sixes_with_ops_values, labels=sixes_with_ops_labels)\n# plt.show()\n\n# number of countries scoered by kohli in first\ncenturies = df.query(\"Runs >=100\")\nprint(centuries)\n\ninnings = centuries[\"Inns\"]\ntons = centuries[\"Runs\"]\n\n# fig = plt.figure(figsize=(10, 7))\n# plt.bar(innings, tons, color=\"blue\", width=0.2)\n# plt.show()\n\n# calculate the dismissals of kohli\ndismissals = df[\"Dismissal\"].value_counts()\nprint(dismissals)\n\ndismissals_counts = dismissals.values\ndismissals_labels = dismissals.index\n# show_pie_plot(df, \"Dismissal\")\n\n# Against which team he has scored the most runs\n# fig = plt.figure(figsize=(10, 7))\n# plt.bar(\n# runsdf[\"\"] -------------------------------- code not complete\n# )\n# plt.show()\n\n# Against which team he has scored the most centuries\nfig = plt.figure(figsize=(10, 7))\nplt.bar(\n centuries[\"Opposition\"], centuries[\"Runs\"], color=\"green\", width=0.2\n)\nplt.show()\n\n# Analyze the strike rate\n","repo_name":"SayanGhorai/kohli_performance_analysis","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"44803039196","text":"def get_num(row, col):\n return row * 10 + col\n\nwidth = 9\nheight = 4\nfor row in range(0, height):\n for col in range(row + 1, width - row + 1):\n room_num = get_num(row, col)\n with open(f'room-{room_num}.json', 'x') as f:\n f.write('{\\n')\n f.write(f'\"data_file\": \"room-{room_num}.png\"')\n if col > row + 1:\n f.write(',\\n')\n f.write(f'\"left_neighbour\": \"room-{get_num(row, col - 1)}\"')\n if col < width - row:\n f.write(',\\n')\n f.write(f'\"right_neighbour\": \"room-{get_num(row, col + 1)}\"')\n if row > 0:\n f.write(',\\n')\n f.write(f'\"bottom_neighbour\": \"room-{get_num(row - 1, col)}\"')\n if row < 3:\n f.write(',\\n')\n f.write(f'\"top_neighbour\": \"room-{get_num(row + 1, col)}\"')\n f.write('\\n}\\n')\n","repo_name":"Kripner/MinAtar","sub_path":"data/montezumas_revenge/json-gen/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"30022818137","text":"\"\"\"\r\nScript asks user for e-mail. It's supposed e-mail is valid one.\r\nAfter script print greeting message with user registered name took from e-mail and e-mail domain.\r\nIf user's domain is in the list of popular ones, script prints approving message.\r\nIf it is not, script prints message that user's domain is rare.\r\n\"\"\"\r\n\r\n# importing regular expressions support\r\nimport re\r\n\r\n# Making dictionary with two popular domain as a demonstrator of \"list\"\r\npopular_domains = {'Mail.ru': 'mail.ru', 'Google mail': 'gmail.com'}\r\n\r\nuser_email = input(\"Enter your e-mail: \").strip()\r\n\r\n# Short regular expression to exctract user's name\r\nuser_name = re.findall('(\\\\w+)', user_email)[0].title()\r\n# Exctracting domain name after \"@\"\r\nuser_domain = user_email.split('@')[-1]\r\n\r\n# Check if domain name presents in the list of popular ones and print according message\r\nif user_domain in popular_domains.values():\r\n print(f'\\nHello {user_name}! We are happy with your popular \"{user_domain}\" email domain!')\r\nelse:\r\n print(f'\\nHello {user_name}! How rare your \"{user_domain}\" email domain is!')\r\n\r\n","repo_name":"RVSRD/backyard_sandbox","sub_path":"email_slicer.py","file_name":"email_slicer.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"320163474","text":"def msg_load_best_model_complete(metrics_type: str,\n load_model: dict):\n \"\"\"\n Message load best model complete\n\n :param metrics_type: metrics type\n :param load_model: load model dictionary\n \"\"\"\n\n print(\"LOADED BEST MODEL ({}):\".format(metrics_type.upper()),\n \"\\ntrained for {} epochs with Sensitivity Work Point: {:.3f}\".format(load_model['epoch'],\n load_model[metrics_type]))\n\n\ndef msg_load_resume_model_complete(load_model: dict):\n \"\"\"\n Message load resume model complete\n\n :param load_model: load model dictionary\n \"\"\"\n\n print(\"LOADED RESUME-MODEL:\"\n \"\\ntrained for {} epochs with\".format(load_model['epoch']),\n \"\\n- Sensitivity Work Point: {:.3f}\".format(load_model['sensitivity work point']),\n \"\\n- AUFROC [0, 10]: {:.3f}\".format(load_model['AUFROC [0, 10]']))\n","repo_name":"cirorusso2910/GravityNet","sub_path":"net/utility/msg/msg_load_model_complete.py","file_name":"msg_load_model_complete.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"17706523866","text":"__author__ = \"Samvid Mistry\"\n\nfrom abc import abstractmethod\n\nfrom PySide.QtCore import Signal\n\nfrom MComponents.MShape import MShape\n\n\nclass MTwoStateShape(MShape):\n \"\"\"\n An abstract class representing any MShape which can have binary state, which is checked and unchecked.\n Examples of this can be a checkbox, switch or a radio button.\n \"\"\"\n checkedChangeSignal = Signal()\n\n def __init__(self):\n MShape.__init__(self)\n self.__checked = False\n\n @property\n def checked(self) -> bool:\n return self.__checked\n\n @checked.setter\n def checked(self, checked: bool):\n self.__checked = checked\n\n def when_check_changes(self, slot):\n if slot is not None:\n self.__checkedChangeSignal.connect(slot)\n\n def disconnect_slot(self, slot):\n if slot is not None:\n self.__checkedChangeSignal.disconnect(slot)\n\n @abstractmethod\n def check(self) -> bool:\n \"\"\"\n Implementations should properly indicate the change in the state of the widget in implementation.\n WARNING: Subclasses implementing this method should call through super to make sure that the slots\n connected to change signal get notified properly.\n \"\"\"\n if self.__checked is True:\n return False\n\n self.__checked = True\n self.checkedChangeSignal.emit()\n return True\n\n @abstractmethod\n def uncheck(self) -> bool:\n \"\"\"\n Implementations should properly indicate the change in the state of the widget in implementation.\n WARNING: Subclasses implementing this method should call through super to make sure that the slots\n connected to change signal get notified properly.\n \"\"\"\n if self.__checked is False:\n return False\n\n self.__checked = False\n self.checkedChangeSignal.emit()\n return True\n","repo_name":"GelaniNijraj/PyMaterial","sub_path":"MComponents/MTwoStateShape.py","file_name":"MTwoStateShape.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"90"} +{"seq_id":"29721613300","text":"from dana import *\nimport matplotlib.pyplot as plt\n\n# Parameters\n# -----------------------------------------------------------------------------\n# Population size\nn = 4\n\n# Default trial duration\nduration = 3.0*second\n\n# Default Time resolution\ndt = 1.0*millisecond\n\n# Initialization of the random generator (reproductibility !)\n# np.random.seed(1)\n\n# Sigmoid parameter\nVmin = 0.0\nVmax = 20.0\nVh = 16.0\nVc = 3.0\n\n# Thresholds\nCortex_h = -3.0\nStriatum_h = 0.0\nSTN_h = -10.0\nGPi_h = 10.0\nThalamus_h = -40.0\n\n# Time constants\nCortex_tau = 0.01\nStriatum_tau = 0.01\nSTN_tau = 0.01\nGPi_tau = 0.01\nThalamus_tau = 0.01\n\n# Noise level (%)\nCortex_N = 0.01\nStriatum_N = 0.001\nSTN_N = 0.001\nGPi_N = 0.03\nThalamus_N = 0.001\n\n\n# Helper functions\n# -----------------------------------------------------------------------------\ndef sigmoid(V,Vmin=Vmin,Vmax=Vmax,Vh=Vh,Vc=Vc):\n return Vmin + (Vmax-Vmin)/(1.0+np.exp((Vh-V)/Vc))\n\ndef noise(Z, level):\n Z = (1+np.random.uniform(-level/2,level/2,Z.shape))*Z\n return np.maximum(Z,0.0)\n\ndef init_weights(L, gain=1):\n Wmin, Wmax = 0.25, 0.75\n W = L._weights\n N = np.random.normal(0.5, 0.005, W.shape)\n N = np.minimum(np.maximum(N, 0.0),1.0)\n L._weights = gain*W*(Wmin + (Wmax - Wmin)*N)\n\n\n# Populations\n# -----------------------------------------------------------------------------\nCortex_cog = zeros((n,1), \"\"\"dV/dt = (-V + I + L + Iext - Cortex_h)/Cortex_tau;\n U = noise(V,Cortex_N); L; I; Iext\"\"\")\nCortex_mot = zeros((1,n), \"\"\"dV/dt = (-V + I + L + Iext - Cortex_h)/Cortex_tau;\n U = noise(V,Cortex_N); L; I; Iext\"\"\")\nCortex_ass = zeros((n,n), \"\"\"dV/dt = (-V + I + Iext - Cortex_h)/Cortex_tau;\n U = noise(V,Cortex_N); I; Iext\"\"\")\nStriatum_cog = zeros((n,1), \"\"\"dV/dt = (-V + I - Striatum_h)/Striatum_tau;\n U = noise(sigmoid(V), Striatum_N); I\"\"\")\nStriatum_mot = zeros((1,n), \"\"\"dV/dt = (-V + I - Striatum_h)/Striatum_tau;\n U = noise(sigmoid(V), Striatum_N); I\"\"\")\nStriatum_ass = zeros((n,n), \"\"\"dV/dt = (-V + I - Striatum_h)/Striatum_tau;\n U = noise(sigmoid(V), Striatum_N); I\"\"\")\nSTN_cog = zeros((n,1), \"\"\"dV/dt = (-V + I - STN_h)/STN_tau;\n U = noise(V,STN_N); I\"\"\")\nSTN_mot = zeros((1,n), \"\"\"dV/dt = (-V + I - STN_h)/STN_tau;\n U = noise(V,STN_N); I\"\"\")\nGPi_cog = zeros((n,1), \"\"\"dV/dt = (-V + I - GPi_h)/GPi_tau;\n U = noise(V,GPi_N); I\"\"\")\nGPi_mot = zeros((1,n), \"\"\"dV/dt = (-V + I - GPi_h)/GPi_tau;\n U = noise(V,GPi_N); I\"\"\")\nThalamus_cog = zeros((n,1), \"\"\"dV/dt = (-V + I - Thalamus_h)/Thalamus_tau;\n U = noise(V,Thalamus_N); I\"\"\")\nThalamus_mot = zeros((1,n), \"\"\"dV/dt = (-V + I - Thalamus_h)/Thalamus_tau;\n U = noise(V, Thalamus_N); I\"\"\")\n\n\n# Connectivity\n# -----------------------------------------------------------------------------\nif 1:\n L = DenseConnection( Cortex_cog('U'), Striatum_cog('I'), 1.0)\n init_weights(L)\n L = DenseConnection( Cortex_mot('U'), Striatum_mot('I'), 1.0)\n init_weights(L)\n L = DenseConnection( Cortex_ass('U'), Striatum_ass('I'), 1.0)\n init_weights(L)\n L = DenseConnection( Cortex_cog('U'), Striatum_ass('I'), np.ones((1,2*n+1)))\n init_weights(L,0.2)\n L = DenseConnection( Cortex_mot('U'), Striatum_ass('I'), np.ones((2*n+1,1)))\n init_weights(L,0.2)\n\n DenseConnection( Cortex_cog('U'), STN_cog('I'), 1.0 )\n DenseConnection( Cortex_mot('U'), STN_mot('I'), 1.0 )\n DenseConnection( Striatum_cog('U'), GPi_cog('I'), -2.0 )\n DenseConnection( Striatum_mot('U'), GPi_mot('I'), -2.0 )\n DenseConnection( Striatum_ass('U'), GPi_cog('I'), -2.0*np.ones((1,2*n+1)))\n DenseConnection( Striatum_ass('U'), GPi_mot('I'), -2.0*np.ones((2*n+1,1)))\n DenseConnection( STN_cog('U'), GPi_cog('I'), 1.0*np.ones((2*n+1,1)) )\n DenseConnection( STN_mot('U'), GPi_mot('I'), 1.0*np.ones((1,2*n+1)) )\n\n DenseConnection( GPi_cog('U'), Thalamus_cog('I'), -0.5 )\n DenseConnection( GPi_mot('U'), Thalamus_mot('I'), -0.5 )\n\n DenseConnection( Thalamus_cog('U'), Cortex_cog('I'), 0.4 )\n DenseConnection( Thalamus_mot('U'), Cortex_mot('I'), 0.4 )\n DenseConnection( Cortex_cog('U'), Thalamus_cog('I'), 0.1 )\n DenseConnection( Cortex_mot('U'), Thalamus_mot('I'), 0.1 )\n\n K = -np.ones((2*n+1,1)) * 0.5\n K[n,0] = +0.5\n DenseConnection( Cortex_cog('U'), Cortex_cog('L'), K)\n\n K = -np.ones((1,2*n+1)) * 0.5\n K[0,n] = +0.5\n DenseConnection( Cortex_mot('U'), Cortex_mot('L'), K)\n\n\n\n# Trial setup\n# -----------------------------------------------------------------------------\n@clock.at(500*millisecond)\ndef set_trial(t):\n m1,m2 = np.random.randint(0,4,2)\n while m2 == m1:\n m2 = np.random.randint(4)\n c1,c2 = np.random.randint(0,4,2)\n while c2 == c1:\n c2 = np.random.randint(4)\n Cortex_mot['Iext'] = 0\n Cortex_cog['Iext'] = 0\n Cortex_ass['Iext'] = 0\n v = 7\n Cortex_mot['Iext'][0,m1] = v + np.random.normal(0,v*Cortex_N)\n Cortex_mot['Iext'][0,m2] = v + np.random.normal(0,v*Cortex_N)\n Cortex_cog['Iext'][c1,0] = v + np.random.normal(0,v*Cortex_N)\n Cortex_cog['Iext'][c2,0] = v + np.random.normal(0,v*Cortex_N)\n Cortex_ass['Iext'][c1,m1] = v + np.random.normal(0,v*Cortex_N)\n Cortex_ass['Iext'][c2,m2] = v + np.random.normal(0,v*Cortex_N)\n\n@clock.at(2500*millisecond)\ndef set_trial(t):\n Cortex_mot['Iext'] = 0\n Cortex_cog['Iext'] = 0\n Cortex_ass['Iext'] = 0\n\n\n# Measurements\n# -----------------------------------------------------------------------------\nsize = int(duration/dt)\n\ntimesteps = np.zeros(size)\nmotor = np.zeros((5, n, size))\ncognitive = np.zeros((5, n, size))\nassociative = np.zeros((2, n*n, size))\n\n@after(clock.tick)\ndef register(t):\n index = int(t*1000)\n\n timesteps[index] = t\n\n motor[0,:,index] = Cortex_mot['U'].ravel()\n motor[1,:,index] = Striatum_mot['U'].ravel()\n motor[2,:,index] = STN_mot['U'].ravel()\n motor[3,:,index] = GPi_mot['U'].ravel()\n motor[4,:,index] = Thalamus_mot['U'].ravel()\n\n cognitive[0,:,index] = Cortex_cog['U'].ravel()\n cognitive[1,:,index] = Striatum_cog['U'].ravel()\n cognitive[2,:,index] = STN_cog['U'].ravel()\n cognitive[3,:,index] = GPi_cog['U'].ravel()\n cognitive[4,:,index] = Thalamus_cog['U'].ravel()\n\n associative[0,:,index] = Cortex_ass['U'].ravel()\n associative[1,:,index] = Striatum_ass['U'].ravel()\n\n\n# Simulation\n# -----------------------------------------------------------------------------\nrun(time=duration, dt=dt)\n\n\n# Display 1\n# -----------------------------------------------------------------------------\nif 1:\n fig = plt.figure(figsize=(12,5))\n plt.subplots_adjust(bottom=0.15)\n\n fig.patch.set_facecolor('.9')\n ax = plt.subplot(1,1,1)\n\n plt.plot(timesteps, cognitive[0,0],c='r', label=\"Cognitive Cortex\")\n plt.plot(timesteps, cognitive[0,1],c='r')\n plt.plot(timesteps, cognitive[0,2],c='r')\n plt.plot(timesteps, cognitive[0,3],c='r')\n\n plt.plot(timesteps, motor[0,0],c='b', label=\"Motor Cortex\")\n plt.plot(timesteps, motor[0,1],c='b')\n plt.plot(timesteps, motor[0,2],c='b')\n plt.plot(timesteps, motor[0,3],c='b')\n\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Activity (Hz)\")\n plt.legend(frameon=False, loc='upper left')\n plt.xlim(0.0,duration)\n plt.ylim(-5.0,80.0)\n\n plt.xticks([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0],\n ['0.0','0.5\\n(Trial start)','1.0','1.5', '2.0','2.5\\n(Trial stop)','3.0'])\n plt.savefig(\"model-without-gpi.pdf\")\n plt.show()\n\n\n# Display 2\n# -----------------------------------------------------------------------------\nif 0:\n fig = plt.figure(figsize=(18,12))\n fig.patch.set_facecolor('1.0')\n\n def subplot(rows,cols,n, alpha=0.0):\n ax = plt.subplot(rows,cols,n)\n ax.patch.set_facecolor(\"k\")\n ax.patch.set_alpha(alpha)\n\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.yaxis.set_ticks_position('left')\n ax.yaxis.set_tick_params(direction=\"outward\")\n return ax\n\n ax = subplot(5,3,1)\n ax.set_title(\"MOTOR\", fontsize=24)\n ax.set_ylabel(\"STN\", fontsize=24)\n for i in range(4):\n plt.plot(timesteps, motor[2,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,2)\n ax.set_title(\"COGNITIVE\", fontsize=24)\n for i in range(4):\n plt.plot(timesteps, cognitive[2,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,3,alpha=0)\n ax.set_title(\"ASSOCIATIVE\", fontsize=24)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.spines['left'].set_color('none')\n\n\n ax = subplot(5,3,4)\n ax.set_ylabel(\"CORTEX\", fontsize=24)\n for i in range(4):\n ax.plot(timesteps, motor[0,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,5)\n for i in range(4):\n plt.plot(timesteps, cognitive[0,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,6)\n for i in range(16):\n plt.plot(timesteps, associative[0,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,7)\n ax.set_ylabel(\"STRIATUM\", fontsize=24)\n for i in range(4):\n plt.plot(timesteps, motor[1,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,8)\n for i in range(4):\n plt.plot(timesteps, cognitive[1,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,9)\n for i in range(16):\n plt.plot(timesteps, associative[1,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,10)\n ax.set_ylabel(\"GPi\", fontsize=24)\n for i in range(4):\n plt.plot(timesteps, motor[3,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,11)\n for i in range(4):\n plt.plot(timesteps, cognitive[3,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,13)\n ax.set_ylabel(\"THALAMUS\", fontsize=24)\n for i in range(4):\n plt.plot(timesteps, motor[4,i], c='k', lw=.5)\n ax.set_xticks([])\n\n ax = subplot(5,3,14)\n for i in range(4):\n plt.plot(timesteps, cognitive[4,i], c='k', lw=.5)\n ax.set_xticks([])\n\n plt.savefig(\"model-results-all.pdf\")\n plt.show()\n","repo_name":"rougier/Neurosciences","sub_path":"basal-ganglia/topalidou-et-al-2014/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"90"} +{"seq_id":"18141394799","text":"from itertools import combinations\n\nif __name__ == '__main__':\n while True:\n # ?????????????????\\???\n data = [int(x) for x in input().split(' ')]\n upper_limit = data[0]\n target_number = data[1]\n if upper_limit == 0 and target_number == 0:\n break\n\n # ????????????????????????\n choices = combinations(range(1, upper_limit+1), 3)\n\n # ????¨???????X????????´????????°????¢????????????????\n hit_count = 0\n for c in choices:\n # print(c)\n if sum(c) == target_number:\n hit_count += 1\n print(hit_count)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02412/s584734885.py","file_name":"s584734885.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"8984306619","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 20 13:08:34 2020\r\n\r\n@author: Aki z\r\n\"\"\"\r\n\"\"\"\r\n專題\r\n先想好策略,再透過歷史資料,看看是否得益\r\n透過RSI指標去尋找買進賣出的點\r\n交易策略:\r\n 當短周期超過長週期時買進;\r\n IRS6>80賣出\"\"\"\r\n\r\n \r\nfrom Methodology import *\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport matplotlib.dates as md\r\nfrom datetime import datetime\r\nimport datetime\r\nfrom pylab import figure,show\r\n\r\nticker=2330\r\nstart='2019-03-30'\r\nend='2020-03-09'\r\n\r\ngetpricedata(ticker,start,end)\r\n\r\n#讀取檔案\r\ndata=pd.read_csv(str(ticker)+'.csv')\r\n\r\n#把每一日的價-前一日的價\r\na=1\r\ndata['Ut']=Ut(a,data)#注意新增欄位size要相符\r\ndata['Dt']=Dt(a,data)\r\n\r\n#UPt.DNt\r\nfor a in [6,12,24]:\r\n data['UPt'+str(a)]=UPt(a,data)\r\n data['DNt'+str(a)]=DNt(a,data)\r\n data['RSI'+str(a)]=RSI(a,data['UPt'+str(a)],data['DNt'+str(a)])\r\n#畫圖(分析資料)\r\nplt.figure(figsize=(20,10))\r\nplt.style.use('ggplot')\r\n#data.index=list(data['Date'])#將index改為日期\r\n\r\n#設圖表x軸--時間\r\ndate=[]\r\nfor i in range(0,len(data)):\r\n s=data['Date'][i]\r\n y=int(s[0:4])\r\n m=int(s[5:7])\r\n d=int(s[8:10])\r\n dt=datetime.datetime(int(y),int(m),int(d))\r\n date.append(md.date2num(dt))\r\nx=date \r\n#print(x)\r\n#圖名,軸名\r\nplt.title(str(ticker)+' RSI Analysis')\r\nplt.xlabel('Date')\r\nplt.ylabel('RSI')\r\n#x=[datetime.strptime(d, '%Y-%m-%d').date() for d in data['Date']]\r\ny0=data['RSI6'].tolist()#.astype('str')\r\ny1=data['RSI12'].tolist()#.astype('str')\r\ny2=data['RSI24'].tolist()#.astype('str')\r\n#畫圖\r\n#print(type(y0))\r\n#print(y0)\r\n\r\n\r\nplt.plot(x[6:],y0[6:],label='RSI6')\r\nplt.plot(x[12:],y1[12:],label='RSI12')\r\nplt.plot(x[24:],y2[24:],label='RSI24')\r\n\r\nplt.gca().xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))\r\nplt.gca().xaxis.set_major_locator(md.DayLocator())\r\nplt.gcf().autofmt_xdate()\r\nplt.legend(loc = \"best\", fontsize=20)\r\n#y_sticks=np.arange(0,100,1)\r\n#plt.yticks(y_sticks)\r\nplt.show()\r\n\r\n#長期:\r\n#策略:當短周期超過長週期時買進\r\nbuy=[]\r\nsell=[]\r\nfor i in range(0,len(data)-1):\r\n if data['RSI6'][i]==' ' or data['RSI12'][i]==' ' or data['RSI24'][i]==' ':\r\n #print(i)\r\n continue\r\n elif ((data['RSI6'][i]>=data['RSI12'][i]) and (data['RSI12'][i]>=data['RSI24'][i])) or \\\r\n data['RSI6'][i]<=20:\r\n #print(i)\r\n temp=data['Open'][i+1]\r\n buy.append(temp)\r\n if data['RSI6'][i]==' ':\r\n #print(i)\r\n continue\r\n elif ((data['RSI6'][i]<=data['RSI12'][i]) and (data['RSI12'][i]<=data['RSI24'][i])) or \\\r\n data['RSI6'][i]>=80:\r\n temp=data['Open'][i+1]\r\n sell.append(temp)\r\nprint('平均買進價: ',sum(buy)/len(buy))\r\nprint('平均賣出價: ',sum(sell)/len(sell))\r\nif sum(buy)/len(buy) > sum(sell)/len(sell):\r\n print('長期:我���棒')\r\nelse:\r\n print('長期:我就爛') \r\n#短期\r\nBuy=0\r\nbuy=[]\r\nsell=[]\r\nfor i in range(0,len(data)-1):\r\n if data['RSI6'][i]==' ' or data['RSI12'][i]==' ' or data['RSI24'][i]==' ':\r\n #print(i)\r\n continue\r\n elif ((data['RSI6'][i]>=data['RSI12'][i]) and (data['RSI12'][i]>=data['RSI24'][i])) or \\\r\n data['RSI6'][i]<=20:\r\n #print(i)\r\n temp=data['Open'][i+1]\r\n buy.append(temp)\r\n for j in range(i,len(data)):\r\n if data['RSI6'][i]==' ':\r\n #print(i)\r\n continue\r\n elif ((data['RSI6'][i]<=data['RSI12'][i]) and (data['RSI12'][i]<=data['RSI24'][i])) or \\\r\n data['RSI6'][i]>=80:\r\n temp=data['Open'][i+1]\r\n sell.append(temp)\r\n break\r\n \r\n\r\n","repo_name":"AkiLin7110/Python","sub_path":"RSI回測/RSI回測.py","file_name":"RSI回測.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"28362106290","text":"# AUTHOR: Brooke Baker\n# ASSIGNMENT: Final Project\n# OVERVIEW: This script scrapes the https://www.schneier.com/cgi-bin/mt/mt-search.cgi?search=vulnerabilities website for articles discussing various vulnerabilities\n\n### IMPORT STATEMENTS ###\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport urllib.parse\n\n### FUNCTIONS ###\n# function to scrape security links for vulnerability articles\ndef get_articles(url):\n links = []\n for i in range(1, 33):\n # set url page number\n if i <= 10:\n url = url[:-1] + str(i)\n else:\n url = url[:-2] + str(i)\n\n # get html code and parse to BeautifulSoup object\n webpage = requests.get(url)\n soup = bs(webpage.text, 'html.parser')\n\n # get page links\n links += get_article_links(soup)\n\n # call function to store article text\n store_article_text(links)\n\n# function to store page links\ndef get_article_links(soup):\n # article links are all located in h3 tags with entry class (entry class is unique identifier on page), get those objects\n article_titles = soup.find(class_='entry')\n article_links_a_tags = article_titles.find_all('a')\n article_links = []\n for link in article_links_a_tags:\n try:\n article_links.append(link.get('href'))\n except:\n print('problem with link; moving on to next one')\n \n # keep only unique links\n article_links = set(article_links)\n article_links = list(article_links)\n\n return article_links\n\n# function to store article text\ndef store_article_text(links):\n counter = 1\n\n for link in links:\n print('Writing Security Article file #' + str(counter) + '...')\n # open webpage\n webpage = requests.get(link)\n soup = bs(webpage.text, 'html.parser')\n\n # store article title and content\n try:\n title = soup.find(class_='entry')\n title = title.get_text()\n except:\n title = 'Title Not Listed.'\n\n # get article content\n article_content = soup.find(class_='article')\n\n # remove unneeded footer items that are wrapped in p tags\n try:\n footer_items = article_content.find(class_='entry-tags')\n footer_items.decompose()\n footer_items = article_content.find(class_='posted')\n footer_items.decompose()\n except:\n print('no footer_items to remove')\n\n # store all article p tags\n paragraphs = article_content.find_all('p')\n\n # store article to txt file\n with open('C:/Users/Brooke/Documents/My Documents/BYU/Winter 2019/LING 360/Final Project/Security/Security_' + str(counter) + '.txt', 'w', encoding='utf-8') as fout:\n\n # write link and article title to file\n fout.write(link + '\\n')\n fout.write(title + '.\\n\\n')\n\n # write paragraphs to file\n for i in paragraphs:\n fout.write(i.get_text() + '\\n')\n\n counter += 1\n\n\n### FUNCTION CALLS ###\nget_articles('https://www.schneier.com/cgi-bin/mt/mt-search.cgi?search=vulnerabilities&__mode=tag&IncludeBlogs=2&blog_id=2&limit=10&page=1')\n","repo_name":"bbaker10/Vulnerable-Project","sub_path":"Scripts_SecurityScrapper.py","file_name":"Scripts_SecurityScrapper.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73155333737","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import getdate, validate_email_add, cint,flt\n\nclass SMSCreditsRequest(Document):\n\n\tdef validate(self):\n\t\tif not self.get(\"__islocal\"):\n\t\t\tif not self.allocated_credits and frappe.session.user=='Administrator':\n\t\t\t\tfrappe.throw(\"Please enter Allocated Credits before save..!\")\n\t\tif self.requested_credits <=0 :\n\t\t\tfrappe.throw(\"Please enter Valid Credits before save..!\")\n\t\tif self.allocated_credits and self.allocated_credits <=0 :\n\t\t\tfrappe.throw(\"Please enter Valid Credits before save..!\")\n\t\t#frappe.sendmail(recipients=\"email.kadam@gmail.com\", sender='gangadhar.k@indictranstech.com', content=\"msg_member\", subject='')\n\t\n\tdef on_submit(self):\n\t\tobj=frappe.get_doc(\"User\",self.user_name)\n\t\tobj.sms_credits=cint(obj.sms_credits)+cint(self.allocated_credits)\n\t\tobj.save(ignore_permissions=True)\n\t\tobj1=frappe.get_doc(\"User\",\"Administrator\")\n\t\tobj1.sms_credits=cint(obj1.sms_credits)-cint(self.allocated_credits)\n\t\tobj1.save(ignore_permissions=True)\n\n\n@frappe.whitelist()\ndef check_balance():\n\treturn frappe.db.sql(\"select sms_credits from tabUser where name='Administrator'\")\n\n\n","repo_name":"gangadharkadam/verve_local","sub_path":"church_ministry/church_ministry/doctype/sms_credits_request/sms_credits_request.py","file_name":"sms_credits_request.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"40303126494","text":"from random import random\nfrom pyglet.gl import *\nfrom pyglet import shapes\nfrom math import sqrt, sin, cos, atan\n\nwindow = pyglet.window.Window(900, 700)\n\nglClearColor(0, 0, 0, 1)\nx = 50\ny = 0\nt = 0.0\nang0 = 0.7853\ng = 9.81\nlength = 150\n\ndef draw_ball():\n circle = shapes.Circle(x, y, 5, color=(90, 225, 30))\n circle.draw()\n\n\ndef calculate():\n global t, x, y \n t = t + 0.25\n \n ang = ang0 * cos(sqrt(g / length)*t + 1.0)\n\n x = 200 + length * sin(ang);\n y = 200 - length * cos(ang);\n \n\n@window.event\ndef on_draw():\n window.clear()\n draw_ball() \n\n\ndef update(dt):\n calculate()\n\n\npyglet.clock.schedule_interval(update, 2 / 60.0)\npyglet.app.run()","repo_name":"dougles/pyglet-practice","sub_path":"examples/pendular.py","file_name":"pendular.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10947220983","text":"#!python3\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\nimport datetime\nimport json\nimport logging\nimport os\nimport random\nimport string\nimport sys\nfrom enum import Enum\nfrom pathlib import Path\nfrom pprint import pprint as pp\nfrom typing import List, Literal, NamedTuple, Tuple, Union\n\nimport jwt\nimport requests\nfrom redo import retry\n\nlogging.getLogger(\"requests\").setLevel(logging.DEBUG)\n\nATN_UPLOAD_URL = \"https://addons.thunderbird.net/api/v3/addons/langpack-{langcode}@thunderbird.mozilla.org/versions/{version}/\"\nCHUNK_SIZE = 128 * 1024\n\n\nclass ATNChannel(Enum):\n LISTED = \"listed\"\n UNLISTED = \"unlisted\"\n\n\nLocales = List[str]\nVersion = str\nApiParam = str\nEnvVars = NamedTuple(\n \"EnvVars\",\n [\n (\"langpack_version\", Version),\n (\"locales\", Locales),\n (\"langpack_dir\", Path),\n (\"langpack_channel\", Literal[ATNChannel.LISTED, ATNChannel.UNLISTED]),\n (\"api_key\", ApiParam),\n (\"api_secret\", ApiParam),\n ],\n)\nResult = Tuple[str, Union[object, None]]\n\n\ndef print_line(message):\n msg_bytes = message.encode(\"utf-8\")\n written = 0\n while written < len(msg_bytes):\n written += sys.stdout.buffer.write(msg_bytes[written:]) or 0\n sys.stdout.buffer.flush()\n\n\nclass ATNUploader:\n def __init__(self, options: EnvVars):\n self.api_key = options.api_key\n self.api_secret = options.api_secret\n self.langpack_dir = options.langpack_dir\n self.langpack_version = options.langpack_version\n self.langpack_channel = options.langpack_channel\n self.locales = options.locales\n\n def mk_headers(self) -> dict:\n now = datetime.datetime.utcnow()\n payload = {\n \"iss\": self.api_key,\n \"jti\": \"\".join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(64)\n ),\n \"exp\": now + datetime.timedelta(seconds=60),\n \"iat\": now,\n }\n headers = {\n \"Authorization\": \"JWT {0}\".format(\n jwt.encode(payload, self.api_secret, algorithm=\"HS256\")\n )\n }\n return headers\n\n def upload_langpack(self, locale: str) -> Result:\n langpack_path = self.langpack_dir / locale / \"target.langpack.xpi\"\n headers = self.mk_headers()\n langpack_fd = open(langpack_path, \"rb\")\n file = {\"upload\": (\"upload\", langpack_fd)}\n data = {\"channel\": self.langpack_channel}\n\n url = ATN_UPLOAD_URL.format(version=self.langpack_version, langcode=locale)\n with requests.put(url, files=file, data=data, headers=headers, verify=False) as resp:\n if not resp.ok:\n print_line(f\"Failed {locale}\")\n return resp.json()\n else:\n return resp.json()\n\n def upload_all_locales(self) -> Tuple[List[Result], List[Result]]:\n failed = []\n success = []\n for locale in self.locales:\n try:\n rv = retry(self.upload_langpack, args=(locale,), attempts=3, sleeptime=10)\n if \"error\" not in rv:\n success.append((locale, rv))\n else:\n failed.append((locale, rv))\n except requests.HTTPError as e:\n print_line(e)\n failed.append((locale, None))\n return success, failed\n\n\ndef get_secret(name: str) -> Tuple[ApiParam, ApiParam]:\n secret = {}\n if \"MOZ_SCM_LEVEL\" in os.environ:\n level = os.environ.get(\"MOZ_SCM_LEVEL\", \"1\")\n taskcluster_url = os.environ.get(\"TASKCLUSTER_PROXY_URL\") or os.environ.get(\n \"TASKCLUSTER_ROOT_URL\", \"\"\n )\n secrets_url = (\n f\"{taskcluster_url}/secrets/v1/secret/project/comm/thunderbird/releng\"\n f\"/build/level-{level}/{name}\"\n )\n res = requests.get(secrets_url)\n res.raise_for_status()\n secret = res.json()\n elif \"SECRET_FILE\" in os.environ: # For local dev/debug\n with open(os.environ[\"SECRET_FILE\"]) as fp:\n secret = json.load(fp)[\"secret\"]\n secret = secret.get(\"secret\")\n api_key = secret[\"api_key\"] if \"api_key\" in secret else None\n api_secret = secret[\"api_secret\"] if \"api_secret\" in secret else None\n if api_key is None or api_secret is None:\n raise Exception(f\"Unable to get secret. {secret.keys()}\")\n\n return api_key, api_secret\n\n\ndef read_env_vars() -> EnvVars:\n try:\n langpack_version = os.environ[\"LANGPACK_VERSION\"]\n locales_json = os.environ[\"LOCALES\"]\n langpack_dir = Path(os.environ[\"MOZ_FETCHES_DIR\"]).resolve()\n langpack_channel = os.environ[\"ATN_CHANNEL\"]\n except KeyError:\n raise Exception(\"Missing environment variable(s)\")\n\n locales = json.loads(locales_json)\n api_key, api_secret = get_secret(\"atn_langpack\")\n\n return EnvVars(\n langpack_version, locales, langpack_dir, ATNChannel(langpack_channel), api_key, api_secret\n )\n\n\ndef main():\n options = read_env_vars()\n\n atn_uploader = ATNUploader(options)\n success, failed = atn_uploader.upload_all_locales()\n\n pp(success)\n if failed:\n pp(failed)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mozilla/releases-comm-central","sub_path":"taskcluster/docker/tb-atn/atn_langpack.py","file_name":"atn_langpack.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"90"} +{"seq_id":"73456693096","text":"import wx\nimport os\nimport constant as const\nimport datetime as dt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import (\n FigureCanvasWxAgg as FigureCanvas)\n\n\nclass BoardTab(wx.Panel):\n\n\tdef __init__(self, parent, mainframe):\n\t\twx.Panel.__init__(self, parent)\n\t\tself.mainframe = mainframe\n\t\tself.CellVoltage = [ (i) for i in range(0, 18) ]\n\t\tself.GPIOVoltage = [ (i) for i in range(0, 9) ]\n\t\tself.StatsValue = [ (i) for i in range (0,7) ]\n\t\treturn\n\n\tdef createBoardTab(self):\n\t\tself.createTabHeader()\n\t\tself.ui_fault_panel_design()\n\t\tself.ui_cell_voltage_panel_design()\n\t\tself.ui_gpio_voltage_panel_design()\n\t\tself.ui_stats_panel_design()\n\t\tself.ui_select_cell_panel_design()\n\t\tself.ui_select_gpio_panel_design()\n\t\tself.ui_graph_panel_design()\n\t\treturn\n\n\n\tdef createTabHeader(self):\n\n\t\tself.SetBackgroundColour('WHITE') # background colour tab 1\n\t\tpng = wx.Image(os.path.join('analog_logo.png'), # image for tab 1\n\t\t\t\t wx.BITMAP_TYPE_ANY).ConvertToBitmap()\n\t\twx.StaticBitmap(\n\t\t\tself,\n\t\t\t-1,\n\t\t\tpng,\n\t\t\tpos=(1060, 2),\n\t\t\tsize=(png.GetWidth(), png.GetHeight()))\n\n\n\t\t# Add Text\n\t\tst = wx.StaticText(self, label=const.CONFIG_PAGE_TITLE, pos=(423, 5), size=(20,40), style=wx.TE_RICH)\n\t\tst.SetForegroundColour((const.CONG_TXT_RGB_COLOR))\n\t\tfont = wx.Font(pointSize=30, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD,\n\t\t\t\t underline=False) \n\t\tst.SetFont(font)\n\n\n\t\tst_left_title = wx.StaticText(self, label=const.CONFIG_PAGE_LEFT_TITLE, pos=(2, 5), size=(20,40), style=wx.TE_RICH)\n\t\ttxtfont = wx.Font(pointSize=11, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL, weight=wx.FONTWEIGHT_BOLD, underline=False)\n\t\tst_left_title.SetForegroundColour((const.CONG_TXT_RGB_COLOR))\n\t\tst_left_title.SetFont(txtfont)\n\n\t\ttxttopline = wx.TextCtrl(self, id=wx.ID_ANY, pos=(0, 50), size=(1160, 8), style= wx.BORDER_NONE | wx.TE_READONLY)\n\t\ttxttopline.SetBackgroundColour((const.CONG_TXT_RGB_COLOR))\n\n\t\treturn\n\n\n\tdef ui_fault_panel_design(self):\n\n\t\t# Board Fault Box\n\t\tFaultBox = wx.StaticBox( # box for tab 2\n\t\t\tself,\n\t\t\twx.ID_ANY,\n\t\t\t'Fault',\n\t\t\tpos=(30, 61),\n\t\t\tsize=(455, 69))\n\t\tfont = wx.Font(11, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\t\tFaultBox.SetFont(font)\n\t\tFaultBox.SetForegroundColour((40, 96, 134))\n\t\tfault_font = wx.Font(7, wx.DECORATIVE, wx.NORMAL, wx.FONTWEIGHT_BOLD)\n\n\t\t# Cell Over voltage\n\t\tself.CellOV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[0], pos=(60, 84), size=(10, 5),\n\t\t\t\t style=0) # defualt checked\n\t\tself.CellOV.SetFont(fault_font)\n\t\tself.txtCellOV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(39, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# Cell Under voltage\n\t\tself.CellUV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[1], pos=(60, 107),\n\t\t\t\t size=(10, 5),\n\t\t\t\t style=0) # defualt checked\n\t\tself.CellUV.SetFont(fault_font)\n\t\tself.txtCellUV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(39, 105), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# Cell Open-wire\n\t\tself.CellOW = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[2], pos=(125, 84), size=(10, 5),\n\t\t\t\t style=0) # defualt checked\n\t\tself.CellOW.SetFont(fault_font)\n\t\tself.txtCellOW = wx.TextCtrl(self, id=wx.ID_ANY, pos=(105, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# Line Open-wire\n\t\tself.LineOW = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[3], pos=(125, 107), size=(10, 5),\n\t\t\t\t style=0) # defualt checked\n\t\tself.LineOW.SetFont(fault_font)\n\t\tself.txtLineOW = wx.TextCtrl(self, id=wx.ID_ANY, pos=(105, 105), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# VReg Over voltage\n\t\tself.VREGOV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[4], pos=(190, 84), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.VREGOV.SetFont(fault_font)\n\t\tself.txtVREGOV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(171, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# VReg Under voltage\n\t\tself.VREGUV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[5], pos=(190, 107), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.VREGUV.SetFont(fault_font)\n\t\tself.txtVREGUV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(171, 105), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# VRegD Over voltage\n\t\tself.VREGDOV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[6], pos=(256, 84), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.VREGDOV.SetFont(fault_font)\n\t\tself.txtVREGDOV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(237, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# VRegD Over voltage\n\t\tself.VREGDUV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[7], pos=(256, 107), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.VREGDUV.SetFont(fault_font)\n\t\tself.txtVREGDUV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(237, 105), size=(17, 17), style=wx.TE_READONLY)\n\n\t\t# Stack Over voltage\n\t\tself.STACKOV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[8], pos=(330, 84), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.STACKOV.SetFont(fault_font)\n\t\tself.txtSTACKOV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(310, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# Stack Under voltage\n\t\tself.STACKUV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[9], pos=(330, 107), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.STACKUV.SetFont(fault_font)\n\t\tself.txtVSTACKUV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(310, 105), size=(17, 17), style=wx.TE_READONLY) \n\n\t\t# GPIO Over voltage\n\t\tself.GPIO_OV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[10], pos=(400, 84), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.GPIO_OV.SetFont(fault_font)\n\t\tself.txtGPIO_OV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(380, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# GPIO Under voltage\n\t\tself.GPIO_UV = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[11], pos=(400, 107), size=(20, 10),\n\t\t\t\t style=0) # defualt checked\n\t\tself.GPIO_UV.SetFont(fault_font)\n\t\tself.txtGPIO_UV = wx.TextCtrl(self, id=wx.ID_ANY, pos=(380, 105), size=(17, 17), style=wx.TE_READONLY)\n\n\t\t# Die Over temperature\n\t\tself.DIE_OT = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[12], pos=(462, 84),\n\t\t\t\t style=0) # defualt checked\n\t\tself.DIE_OT.SetFont(fault_font)\n\t\tself.txtDIE_OT = wx.TextCtrl(self, id=wx.ID_ANY, pos=(443, 82), size=(16, 16), style=wx.TE_READONLY)\n\n\t\t# Die Under temperature\n\t\tself.DIE_UT = wx.StaticText(self, label=list(const.FAULT_TYPES.keys())[13], pos=(462, 107),\n\t\t\t\t style=0) # defualt checked\n\t\tself.DIE_UT.SetFont(fault_font)\n\t\tself.txtDIE_UT = wx.TextCtrl(self, id=wx.ID_ANY, pos=(443, 105), size=(16, 16), style=wx.TE_READONLY)\n\t\treturn\n\n\n\tdef ui_cell_voltage_panel_design(self):\n\n\t\tfont = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\t\tbtn_font = wx.Font(7, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\n\t\t# Cell Voltage Box\n\t\tbox5 = wx.StaticBox( # box for tab 2\n\t\t\tself,\n\t\t\twx.ID_ANY,\n\t\t\t'Cell Voltages',\n\t\t\tpos=(30, 138),\n\t\t\tsize=(200, 480))#(200, 548))\n\t\tbox5.SetFont(font)\n\t\tbox5.SetForegroundColour((40, 96, 134))\n\t\tfont = wx.Font(9, wx.DECORATIVE, wx.NORMAL, wx.FONTWEIGHT_NORMAL)\n\n\t\tself.i = 0\n\t\ty_pos = 159\n\t\tfor i in range(18):\n\t\t\tLab = (\"C\" + str(i + 1) + \"V\")\n\t\t\tself.CellVoltageText = wx.StaticText(self, label=Lab, pos=(48, y_pos))\n\t\t\tself.CellVoltageText.SetFont(font)\n\t\t\tself.CellVoltage[i] = wx.TextCtrl(self, id=wx.ID_ANY, pos=(118, y_pos), size=(80, 20))\n\t\t\ty_pos = y_pos + 25\n\n\t\treturn\n\n\n\n\tdef ui_gpio_voltage_panel_design(self):\n\n\t\tfont = wx.Font(pointSize=10, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL,\n\t\t\t\t weight=wx.BOLD, underline=False)\n\n\t\t# GPIO Voltage Box\n\t\tbox6 = wx.StaticBox( # box for tab 2\n\t\t\tself,\n\t\t\twx.ID_ANY,\n\t\t\t'GPIO Voltages',\n\t\t\tpos=(270, 137),\n\t\t\tsize=(200, 260))\n\t\tfont = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\n\t\tbox6.SetFont(font)\n\t\tbox6.SetForegroundColour((40, 96, 134))\n\n\t\ty_pos = 155\n\t\tfor i in range(9):\n\t\t\tLab = (\"G\" + str(i + 1) + \"V\")\n\t\t\tself.GPIOVoltageText = wx.StaticText(self, label=Lab, pos=(288, y_pos))\n\t\t\tself.GPIOVoltageText.SetFont(font)\n\t\t\tself.GPIOVoltage[i] = wx.TextCtrl(self, id=wx.ID_ANY, pos=(357, y_pos), size=(80, 20))\n\t\t\ty_pos = y_pos + 27\n\n\t\treturn\n\n\tdef ui_stats_panel_design(self):\n\t\t# Stat Value Box\n\t\tbox7 = wx.StaticBox( # box for tab 2\n\t\tself,\n\t\twx.ID_ANY,\n\t\t'Stat Value',\n\t\tpos=(270, 400),\n\t\tsize=(200, 218))\n\t\tfont = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\t\tbox7.SetFont(font)\n\t\tbox7.SetForegroundColour((40, 96, 134))\n\n\t\tStateValueName = const.StateNameList\n\t\tfont = wx.Font(pointSize=10, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL,\n\t\t\t\t weight=wx.FONTWEIGHT_NORMAL, underline=False)\n\n\t\ty_pos = 425\n\t\tfor i in range(7):\n\t\t\tLab = StateValueName[i]\n\t\t\tself.StatsValueText = wx.StaticText(self, label=Lab, pos=(276, y_pos))\n\t\t\tself.StatsValue[i] = wx.TextCtrl(self, id=wx.ID_ANY, pos=(357, y_pos), size=(80, 20))\n\t\t\ty_pos = y_pos + 27\n\n\t\treturn\n\n\n\tdef ui_graph_panel_design(self):\n\n\t\t# plot text\n\t\tPlots = wx.StaticText(self, label=const.Graph_title, pos=const.pos_graph_title, size=(20,40))\n\t\tfont = wx.Font(pointSize=15, family=wx.FONTFAMILY_DEFAULT, style=wx.FONTSTYLE_NORMAL,\n\t\t\t\t weight=wx.FONTWEIGHT_NORMAL, underline=False)\n\t\tPlots.SetFont(font)\n\t\tPlots.SetForegroundColour((40, 96, 134))\n\n\n\t\t# geenrate empty list initially for cell selection\n\t\tself.CellList = [] # cell selected list\n\t\tself.GpioList = []\t# selected Gpio list\n\t\tself.legendCellList = [] # legend cell label display list\n\t\tself.legendCellColorCellList = [] # final legend display color lest\n\t\tself.legendColorGpioList =[]\n\n\t\t# pre-asigned legend colour to each cell\n\t\tself.legendColorCellList = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\", \"#8c564b\",\n\t\t\t\t \t\"#e377c2\", \"#7f7f7f\", \"#bcbd22\", \"#17becf\", \"#1f77b4\", \"#ff7f0e\",\n\t\t\t\t \t\"#2ca02c\", \"#d62728\", \"#9467bd\", \"#8c564b\", \"#e377c2\", \"#7f7f7f\"]\n\n\t\tself.legendColorGpioList = [\"#2c03fc\", \"#857bba\", \"#d61a91\", \"#179c0e\", \"#d16981\", \"#170f30\",\n\t\t\t\t \t\"#f0e00a\", \"#e06c07\", \"#07e0bc\"]\n\n\t\tself.legendList =[]\n\t\tself.legendCellList =[]\n\t\tself.legendGpioList =[]\n\t\t\n\t\tself.dates = [(dt.datetime.now() + dt.timedelta(seconds=i)).strftime('%H:%M:%S') for i in range(7)]\n\n\n\n\t\t# Add figure graph\n\t\tself.figure1 = Figure(figsize=(5, 1), dpi=90, facecolor='white')\n\t\tself.figure1.autofmt_xdate(bottom=0.2, rotation=90, ha='center')\n\n\t\tself.axes1tab2 = self.figure1.add_subplot(111)\n\t\tself.axes1tab2.axis([0, 6, 0, 7])\n\t\tself.axes1tab2.spines['bottom'].set_color('black')\n\t\tself.axes1tab2.spines['left'].set_color('black')\n\t\tself.axes1tab2.spines['bottom'].set_linewidth(0.5)\n\t\tself.axes1tab2.spines['left'].set_linewidth(0.3)\n\t\tself.axes1tab2.set_xticklabels(self.dates)\n\t\tself.axes1tab2.set_yticks([0, 1, 2, 3, 4, 5, 6, 7])\n\t\tself.axes1tab2.set_ylabel(const.Graph_y_label, fontsize=8, color='black', loc=\"center\") ##TODO\n\t\tself.axes1tab2.set_xlabel(const.Graph_x_label, fontsize=8, color='black', loc=\"center\") ##TODO\n\n\t\t# axis ticks customization\n\t\tself.axes1tab2.tick_params(axis='x', labelsize=9, color='white',\n\t\t\t\t colors='black') # axis line param , NOTE: color = axis tick color and colors: axis label color\n\t\tself.axes1tab2.tick_params(axis='y', labelsize=9, color='white', colors='black') # axis line param\n\n\t\tself.axes1tab2.grid()\n\t\t\n\t\tself.canvas1 = FigureCanvas(self, -1, self.figure1)\n\t\tself.canvas1.SetSize((650, 250))\n\t\tself.canvas1.SetPosition((485, 110))\n\t\tself.canvas1.draw()\n\t\treturn\n\n\n\tdef ui_select_cell_panel_design(self):\n\n\t\tbox7 = wx.StaticBox( \n\t\t\tself,\n\t\t\twx.ID_ANY,\n\t\t\t'Select Cell',\n\t\t\tpos=(567, 359),\n\t\t\tsize=(508, 150))\n\t\tfont = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\t\tbox7.SetFont(font)\n\t\tbox7.SetForegroundColour((40, 96, 134))\n\n\t\t# Check box for Cell\n\t\tself.CheckBoxCVAll = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(590, 380), size=(15, 15))\n\t\tself.CheckBoxCVAll.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCVALL)\n\t\tself.CheckBoxCV1 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(665, 380), size=(15, 15))\n\t\tself.CheckBoxCV1.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV1)\n\t\tself.CheckBoxCV2 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(740, 380), size=(15, 15))\n\t\tself.CheckBoxCV2.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV2)\n\t\tself.CheckBoxCV3 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(815, 380), size=(15, 15))\n\t\tself.CheckBoxCV3.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV3)\n\t\tself.CheckBoxCV4 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(890, 380), size=(15, 15))\n\t\tself.CheckBoxCV4.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV4)\n\t\tself.CheckBoxCV5 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(965, 380), size=(15, 15))\n\t\tself.CheckBoxCV5.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV5)\n\t\tself.CheckBoxCV6 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(1040, 380), size=(15, 15))\n\t\tself.CheckBoxCV6.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV6)\n\t\tself.CheckBoxCV7 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(590, 425), size=(15, 15))\n\t\tself.CheckBoxCV7.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV7)\n\t\tself.CheckBoxCV8 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(665, 425), size=(15, 15))\n\t\tself.CheckBoxCV8.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV8)\n\t\tself.CheckBoxCV9 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(740, 425), size=(15, 15))\n\t\tself.CheckBoxCV9.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV9)\n\t\tself.CheckBoxCV10 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(815, 425), size=(15, 15))\n\t\tself.CheckBoxCV10.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV10)\n\t\tself.CheckBoxCV11 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(890, 425), size=(15, 15))\n\t\tself.CheckBoxCV11.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV11)\n\t\tself.CheckBoxCV12 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(965, 425), size=(15, 15))\n\t\tself.CheckBoxCV12.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV12)\n\t\tself.CheckBoxCV13 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(1040, 425), size=(15, 15))\n\t\tself.CheckBoxCV13.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV13)\n\t\tself.CheckBoxCV14 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(590, 465), size=(15, 15))\n\t\tself.CheckBoxCV14.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV14)\n\t\tself.CheckBoxCV15 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(665, 465), size=(15, 15))\n\t\tself.CheckBoxCV15.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV15)\n\t\tself.CheckBoxCV16 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(740, 465), size=(15, 15))\n\t\tself.CheckBoxCV16.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV16)\n\t\tself.CheckBoxCV17 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(815, 465), size=(15, 15))\n\t\tself.CheckBoxCV17.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV17)\n\t\tself.CheckBoxCV18 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(890, 465), size=(15, 15))\n\t\tself.CheckBoxCV18.Bind(wx.EVT_CHECKBOX, self.onCheckBoxCV18)\n\n\t\t# Static Text for Checkbox\n\t\tLable_List_Cell = [\"ALL\", \"C1V\", \"C2V\", \"C3V\", \"C4V\", \"C5V\", \"C6V\",\n\t\t\t\t \"C7V\", \"C8V\", \"C9V\", \"C10V\", \"C11V\", \"C12V\", \"C13V\",\n\t\t\t\t \"C14V\", \"C15V\", \"C16V\", \"C17V\", \"C18V\"]\n\n\t\tX_List_Cell = [587, 660, 735, 810, 885, 960, 1035,\n\t\t\t\t 585, 660, 735, 807, 882, 956, 1032,\n\t\t\t\t 581, 656, 731, 807, 882]\n\n\t\tY_List_Cell = [395, 395, 395, 395, 395, 395, 395,\n\t\t\t\t 440, 440, 440, 440, 440, 440, 440,\n\t\t\t\t 480, 480, 480, 480, 480]\n\n\t\tfor i in range(19):\n\t\t\tself.CGV = wx.StaticText(self, id=wx.ID_ANY, label=Lable_List_Cell[i], pos=(X_List_Cell[i], Y_List_Cell[i]))\n\t\t\tself.CGV.SetFont(font)\n\t\t\tself.CGV.SetForegroundColour('black')\n\t\treturn\n\n\n\tdef ui_select_gpio_panel_design(self):\n\t\t# Select GPIO Box\n\t\tbox8 = wx.StaticBox( # box for tab 2\n\t\t\tself,\n\t\t\twx.ID_ANY,\n\t\t\t'Select GPIO',\n\t\t\tpos=(567, 526),\n\t\t\tsize=(508, 82))\n\t\tfont = wx.Font(10, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n\t\tbox8.SetFont(font)\n\t\tbox8.SetForegroundColour((40, 96, 134))\n\t\t# Check box for GPIO\n\t\tself.CheckBoxGVAll = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(590, 545), size=(12, 12))\n\t\tself.CheckBoxGVAll.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGVALL)\n\t\tself.CheckBoxGV1 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(665, 545), size=(12, 12))\n\t\tself.CheckBoxGV1.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV1)\n\t\tself.CheckBoxGV2 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(740, 545), size=(12, 12))\n\t\tself.CheckBoxGV2.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV2)\n\t\tself.CheckBoxGV3 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(815, 545), size=(12, 12))\n\t\tself.CheckBoxGV3.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV3)\n\t\tself.CheckBoxGV4 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(890, 545), size=(12, 12))\n\t\tself.CheckBoxGV4.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV4)\n\t\tself.CheckBoxGV5 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(965, 545), size=(12, 12))\n\t\tself.CheckBoxGV5.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV5)\n\t\tself.CheckBoxGV6 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(1040, 545), size=(12, 12))\n\t\tself.CheckBoxGV6.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV6)\n\t\tself.CheckBoxGV7 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(590, 575), size=(12, 12))\n\t\tself.CheckBoxGV7.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV7)\n\t\tself.CheckBoxGV8 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(665, 575), size=(12, 12))\n\t\tself.CheckBoxGV8.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV8)\n\t\tself.CheckBoxGV9 = wx.CheckBox(self, id=wx.ID_ANY, label=\"\", pos=(740, 575), size=(12, 12))\n\t\tself.CheckBoxGV9.Bind(wx.EVT_CHECKBOX, self.onCheckBoxGV9)\n\n\t\tLable_List_GPIO = [\"ALL\", \"G1V\", \"G2V\", \"G3V\", \"G4V\", \"G5V\", \"G6V\",\n\t\t\t\t\t \"G7V\", \"G8V\", \"G9V\"]\n\n\t\tX_List_GPIO = [587, 660, 735, 810, 885, 960, 1035,\n\t\t\t\t 585, 660, 735]\n\n\t\tY_List_GPIO = [557, 557, 557, 557, 557, 557, 557,\n\t\t\t\t 588, 588, 588]\n\n\t\tfor i in range(10):\n\t\t\tself.CGV = wx.StaticText(self, id=wx.ID_ANY, label=Lable_List_GPIO[i], pos=(X_List_GPIO[i], Y_List_GPIO[i]))\n\t\t\tself.CGV.SetFont(font)\n\t\t\tself.CGV.SetForegroundColour('black')\n\t\treturn\n\n\n\tdef graph_init(self):\n\t\tself.axes1tab2.clear() # clear previous plottings if any\n\n\t\tself.dates = [(dt.datetime.now() + dt.timedelta(seconds=i)).strftime('%H:%M:%S') for i in range(7)]\n\n\t\tself.axes1tab2.set_facecolor('white') # set graph background colour\n\t\tself.axes1tab2.spines['bottom'].set_color('black')\n\t\tself.axes1tab2.spines['left'].set_color('black')\n\t\tself.axes1tab2.spines['bottom'].set_linewidth(0.5)\n\t\tself.axes1tab2.spines['left'].set_linewidth(0.3)\n\t\tself.axes1tab2.grid()\n\t\t# axis ticks customization\n\t\tself.axes1tab2.tick_params(axis='x', labelsize=9, color='white',\n\t\t\t\t colors='black') # axis line param , NOTE: color = axis tick color and colors: axis label color\n\t\tself.axes1tab2.tick_params(axis='y', labelsize=9, color='white', colors='black') # axis line param\n\t\treturn\n\n\t# if checkbox is selected, ennable cell display value\n\tdef onCheckBoxCVALL(self, event):\n\t\tif self.CheckBoxCVAll.GetValue() == True:\n\t\t\tself.CheckBoxCV1.SetValue(True)\n\t\t\tself.CheckBoxCV2.SetValue(True)\n\t\t\tself.CheckBoxCV3.SetValue(True)\n\t\t\tself.CheckBoxCV4.SetValue(True)\n\t\t\tself.CheckBoxCV5.SetValue(True)\n\t\t\tself.CheckBoxCV6.SetValue(True)\n\t\t\tself.CheckBoxCV7.SetValue(True)\n\t\t\tself.CheckBoxCV8.SetValue(True)\n\t\t\tself.CheckBoxCV9.SetValue(True)\n\t\t\tself.CheckBoxCV10.SetValue(True)\n\t\t\tself.CheckBoxCV11.SetValue(True)\n\t\t\tself.CheckBoxCV12.SetValue(True)\n\t\t\tself.CheckBoxCV13.SetValue(True)\n\t\t\tself.CheckBoxCV14.SetValue(True)\n\t\t\tself.CheckBoxCV15.SetValue(True)\n\t\t\tself.CheckBoxCV16.SetValue(True)\n\t\t\tself.CheckBoxCV17.SetValue(True)\n\t\t\tself.CheckBoxCV18.SetValue(True)\n\t\t\tdel self.CellList[:]\n\t\t\tfor i in range(0, 18):\n\t\t\t\tself.CellList.append(i+1)\n\t\telse:\n\t\t\tself.CheckBoxCV1.SetValue(False)\n\t\t\tself.CheckBoxCV2.SetValue(False)\n\t\t\tself.CheckBoxCV3.SetValue(False)\n\t\t\tself.CheckBoxCV4.SetValue(False)\n\t\t\tself.CheckBoxCV5.SetValue(False)\n\t\t\tself.CheckBoxCV6.SetValue(False)\n\t\t\tself.CheckBoxCV7.SetValue(False)\n\t\t\tself.CheckBoxCV8.SetValue(False)\n\t\t\tself.CheckBoxCV9.SetValue(False)\n\t\t\tself.CheckBoxCV10.SetValue(False)\n\t\t\tself.CheckBoxCV11.SetValue(False)\n\t\t\tself.CheckBoxCV12.SetValue(False)\n\t\t\tself.CheckBoxCV13.SetValue(False)\n\t\t\tself.CheckBoxCV14.SetValue(False)\n\t\t\tself.CheckBoxCV15.SetValue(False)\n\t\t\tself.CheckBoxCV16.SetValue(False)\n\t\t\tself.CheckBoxCV17.SetValue(False)\n\t\t\tself.CheckBoxCV18.SetValue(False)\n\t\t\tfor i in range(0, 18):\n\t\t\t\tself.CellList.remove(i+1)\n\n\t\treturn\n\n\n\tdef onCheckBoxCV1(self, event):\n\t\tif self.CheckBoxCV1.IsChecked() == True: # if checkbox for a particular cell selected\n\t\t\tself.CellList.append(1) # appen the cell number to CellList to know which cells are selected by the user\n\t\telse:\n\t\t\tself.CellList.remove(1) # remove item from list as it was unselected by the user\n\n\t\treturn\n\n\tdef onCheckBoxCV2(self, event):\n\t\tif self.CheckBoxCV2.IsChecked() == True:\n\t\t\tself.CellList.append(2)\n\t\telse:\n\t\t\tself.CellList.remove(2)\n\n\t\treturn\n\n\tdef onCheckBoxCV3(self, event):\n\t\tif self.CheckBoxCV3.IsChecked() == True:\n\t\t\tself.CellList.append(3)\n\t\telse:\n\t\t\tself.CellList.remove(3)\n\n\t\treturn\n\n\tdef onCheckBoxCV4(self, event):\n\t\tif self.CheckBoxCV4.IsChecked() == True:\n\t\t\tself.CellList.append(4)\n\t\telse:\n\t\t\tself.CellList.remove(4)\n\n\t\treturn\n\n\tdef onCheckBoxCV5(self, event):\n\t\tif self.CheckBoxCV5.IsChecked() == True:\n\t\t\tself.CellList.append(5)\n\t\telse:\n\t\t\tself.CellList.remove(5)\n\n\t\treturn\n\n\tdef onCheckBoxCV6(self, event):\n\t\tif self.CheckBoxCV6.IsChecked() == True:\n\t\t\tself.CellList.append(6)\n\t\telse:\n\t\t\tself.CellList.remove(6)\n\n\t\treturn\n\n\tdef onCheckBoxCV7(self, event):\n\t\tif self.CheckBoxCV7.IsChecked() == True:\n\t\t\tself.CellList.append(7)\n\t\telse:\n\t\t\tself.CellList.remove(7)\n\n\t\treturn\n\n\tdef onCheckBoxCV8(self, event):\n\t\tif self.CheckBoxCV8.IsChecked() == True:\n\t\t\tself.CellList.append(8)\n\t\telse:\n\t\t\tself.CellList.remove(8)\n\n\t\treturn\n\n\tdef onCheckBoxCV9(self, event):\n\t\tif self.CheckBoxCV9.IsChecked() == True:\n\t\t\tself.CellList.append(9)\n\t\telse:\n\t\t\tself.CellList.remove(9)\n\n\t\treturn\n\n\tdef onCheckBoxCV10(self, event):\n\t\tif self.CheckBoxCV10.IsChecked() == True:\n\t\t\tself.CellList.append(10)\n\t\telse:\n\t\t\tself.CellList.remove(10)\n\n\t\treturn\n\n\tdef onCheckBoxCV11(self, event):\n\t\tif self.CheckBoxCV11.IsChecked() == True:\n\t\t\tself.CellList.append(11)\n\t\telse:\n\t\t\tself.CellList.remove(11)\n\n\t\treturn\n\n\tdef onCheckBoxCV12(self, event):\n\t\tif self.CheckBoxCV12.IsChecked() == True:\n\t\t\tself.CellList.append(12)\n\t\telse:\n\t\t\tself.CellList.remove(12)\n\n\tdef onCheckBoxCV13(self, event):\n\t\tif self.CheckBoxCV13.IsChecked() == True:\n\t\t\tself.CellList.append(13)\n\t\telse:\n\t\t\tself.CellList.remove(13)\n\n\t\treturn\n\n\tdef onCheckBoxCV14(self, event):\n\t\tif self.CheckBoxCV14.IsChecked() == True:\n\t\t\tself.CellList.append(14)\n\t\telse:\n\t\t\tself.CellList.remove(14)\n\n\t\treturn\n\n\tdef onCheckBoxCV15(self, event):\n\t\tif self.CheckBoxCV15.IsChecked() == True:\n\t\t\tself.CellList.append(15)\n\t\telse:\n\t\t\tself.CellList.remove(15)\n\n\t\treturn\n\n\tdef onCheckBoxCV16(self, event):\n\t\tif self.CheckBoxCV16.IsChecked() == True:\n\t\t\tself.CellList.append(16)\n\t\telse:\n\t\t\tself.CellList.remove(16)\n\n\t\treturn\n\n\tdef onCheckBoxCV17(self, event):\n\t\tif self.CheckBoxCV17.IsChecked() == True:\n\t\t\tself.CellList.append(17)\n\t\telse:\n\t\t\tself.CellList.remove(17)\n\n\t\treturn\n\n\tdef onCheckBoxCV18(self, event):\n\t\tif self.CheckBoxCV18.IsChecked() == True:\n\t\t\tself.CellList.append(18)\n\t\telse:\n\t\t\tself.CellList.remove(18)\n\n\t\treturn\n\n\tdef onCheckBoxGVALL(self, event):\n\t\tif self.CheckBoxGVAll.GetValue() == True:\n\t\t\tself.CheckBoxGV1.SetValue(True)\n\t\t\tself.CheckBoxGV2.SetValue(True)\n\t\t\tself.CheckBoxGV3.SetValue(True)\n\t\t\tself.CheckBoxGV4.SetValue(True)\n\t\t\tself.CheckBoxGV5.SetValue(True)\n\t\t\tself.CheckBoxGV6.SetValue(True)\n\t\t\tself.CheckBoxGV7.SetValue(True)\n\t\t\tself.CheckBoxGV8.SetValue(True)\n\t\t\tself.CheckBoxGV9.SetValue(True)\n\t\t\tdel self.GpioList[:]\n\t\t\tfor i in range(0, 9):\n\t\t\t\tself.GpioList.append(i+1)\n\t\telse:\n\t\t\tself.CheckBoxGV1.SetValue(False)\n\t\t\tself.CheckBoxGV2.SetValue(False)\n\t\t\tself.CheckBoxGV3.SetValue(False)\n\t\t\tself.CheckBoxGV4.SetValue(False)\n\t\t\tself.CheckBoxGV5.SetValue(False)\n\t\t\tself.CheckBoxGV6.SetValue(False)\n\t\t\tself.CheckBoxGV7.SetValue(False)\n\t\t\tself.CheckBoxGV8.SetValue(False)\n\t\t\tself.CheckBoxGV9.SetValue(False)\n\t\t\tfor i in range(0, 9):\t\t\t\t\n\t\t\t\tself.GpioList.remove(i+1)\n\n\t\treturn\n\n\tdef onCheckBoxGV1(self, event):\n\t\tif self.CheckBoxGV1.IsChecked() == True:\n\t\t\tself.GpioList.append(1)\n\t\telse:\n\t\t\tself.GpioList.remove(1)\n\n\t\treturn\n\n\tdef onCheckBoxGV2(self, event):\n\t\tif self.CheckBoxGV2.IsChecked() == True:\n\t\t\tself.GpioList.append(2)\n\t\telse:\n\t\t\tself.GpioList.remove(2)\n\n\t\treturn\n\n\tdef onCheckBoxGV3(self, event):\n\t\tif self.CheckBoxGV3.IsChecked() == True:\n\t\t\tself.GpioList.append(3)\n\t\telse:\n\t\t\tself.GpioList.remove(3)\n\n\t\treturn\n\n\tdef onCheckBoxGV4(self, event):\n\t\tif self.CheckBoxGV4.IsChecked() == True:\n\t\t\tself.GpioList.append(4)\n\t\telse:\n\t\t\tself.GpioList.remove(4)\n\n\t\treturn\n\n\tdef onCheckBoxGV5(self, event):\n\t\tif self.CheckBoxGV5.IsChecked() == True:\n\t\t\tself.GpioList.append(5)\n\t\telse:\n\t\t\tself.GpioList.remove(5)\n\n\t\treturn\n\n\tdef onCheckBoxGV6(self, event):\n\t\tif self.CheckBoxGV6.IsChecked() == True:\n\t\t\tself.GpioList.append(6)\n\t\telse:\n\t\t\tself.GpioList.remove(6)\n\n\t\treturn\n\n\tdef onCheckBoxGV7(self, event):\n\t\tif self.CheckBoxGV7.IsChecked() == True:\n\t\t\tself.GpioList.append(7)\n\t\telse:\n\t\t\tself.GpioList.remove(7)\n\n\t\treturn\n\n\tdef onCheckBoxGV8(self, event):\n\t\tif self.CheckBoxGV8.IsChecked() == True:\n\t\t\tself.GpioList.append(8)\n\t\telse:\n\t\t\tself.GpioList.remove(8)\n\n\t\treturn\n\n\tdef onCheckBoxGV9(self, event):\n\t\tif self.CheckBoxGV9.IsChecked() == True:\n\t\t\tself.GpioList.append(9)\n\t\telse:\n\t\t\tself.GpioList.remove(9)\n\n\t\treturn\n\n\n\tdef ui_update_measurements(self, AFE_Id, df_shadow):\n\n\t\t# update measurement values in text boxes based on response\n\t\tfor z in range(0,18):\n\t\t\tself.CellVoltage[z].SetValue(str(df_shadow.AFE_cell_y[AFE_Id][z][6]))\n\n\t\tfor z in range(0,9):\n\t\t\tself.GPIOVoltage[z].SetValue(str(df_shadow.AFE_gpio_y[AFE_Id][z][6]))\n\n\t\tfor z in range(0,7):\n\t\t\tself.StatsValue[z].SetValue(str(df_shadow.AFE_stat_y[AFE_Id][z][6]))\n\n\t\treturn\n\n\tdef ui_update_fault(self, AFE_Id, df_shadow):\n\n\t\tdebugPrint( \"FAULTS:\")\n\t\t\n\t\t# Update Cell voltage and GPIO Text box background colour based on fault\n\t\tfor z in range(0,18):\n\t\t\tif (df_shadow.AFE_cell_ow_fault_list[AFE_Id][z]== '1'):\n\t\t\t\tself.CellVoltage[z].SetBackgroundColour(const.CELL_OW_COLOR)\n\t\t\telif (df_shadow.AFE_cell_ov_fault_list[AFE_Id][z]== '1'):\n\t\t\t\tself.CellVoltage[z].SetBackgroundColour(const.CELL_OV_COLOR)\n\t\t\telif (df_shadow.AFE_cell_uv_fault_list[AFE_Id][z]== '1'):\n\t\t\t\tself.CellVoltage[z].SetBackgroundColour(const.CELL_UV_COLOR)\n\t\t\telse:\n\t\t\t\tself.CellVoltage[z].SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.CellVoltage[z].Refresh()\n\n\t\tfor z in range(0,9):\n\t\t\tif (df_shadow.AFE_gpio_uv_fault_list[AFE_Id][z]== '1'):\n\t\t\t\tself.GPIOVoltage[z].SetBackgroundColour(const.GPIO_UV_COLOR)\n\t\t\telif (df_shadow.AFE_gpio_ov_fault_list[AFE_Id][z]== '1'):\n\t\t\t\tself.GPIOVoltage[z].SetBackgroundColour(const.GPIO_OV_COLOR)\n\t\t\telse:\n\t\t\t\tself.GPIOVoltage[z].SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.GPIOVoltage[z].Refresh()\n\n\t\t# update fault panel\n\t\t\n\t\tfault = '1'\n\t\tif fault in (df_shadow.AFE_cell_uv_fault_list[AFE_Id]):\n\t\t\tself.txtCellUV.SetBackgroundColour(const.CELL_UV_COLOR)\n\t\t\tself.txtCellUV.Refresh()\n\t\telse:\n\t\t\tself.txtCellUV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtCellUV.Refresh()\n\t\tif fault in (df_shadow.AFE_cell_ov_fault_list[AFE_Id]):\n\t\t\tself.txtCellOV.SetBackgroundColour(const.CELL_OV_COLOR)\n\t\t\tself.txtCellOV.Refresh()\n\t\telse:\n\t\t\tself.txtCellOV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtCellOV.Refresh()\n\t\tif fault in (df_shadow.AFE_cell_ow_fault_list[AFE_Id]):\t\t\n\t\t\tself.txtCellOW.SetBackgroundColour(const.CELL_OW_COLOR)\n\t\t\tself.txtCellOW.Refresh()\n\t\telse:\n\t\t\tself.txtCellOW.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtCellOW.Refresh()\n\t\tif fault in (df_shadow.AFE_gpio_uv_fault_list[AFE_Id]):\n\t\t\tself.txtGPIO_UV.SetBackgroundColour(const.GPIO_UV_COLOR)\n\t\t\tself.txtGPIO_UV.Refresh()\n\t\telse:\n\t\t\tself.txtGPIO_UV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtGPIO_UV.Refresh()\n\t\tif fault in (df_shadow.AFE_gpio_ov_fault_list[AFE_Id]):\n\t\t\tself.txtGPIO_OV.SetBackgroundColour(const.GPIO_OV_COLOR)\n\t\t\tself.txtGPIO_OV.Refresh()\n\t\telse:\n\t\t\tself.txtGPIO_OV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtGPIO_OV.Refresh()\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tself.txtLineOW.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n \n\t\tif (df_shadow.AFE_VAuv[AFE_Id] == '1'):\n\t\t\tself.txtVREGUV.SetBackgroundColour(const.VREG_UV_COLOR)\n\t\t\tself.txtVREGUV.Refresh()\n\t\telse:\n\t\t\tself.txtVREGUV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtVREGUV.Refresh()\n\t\tif (df_shadow.AFE_VAov[AFE_Id] == '1'):\n\t\t\tself.txtVREGOV.SetBackgroundColour(const.VREG_OV_COLOR)\n\t\t\tself.txtVREGOV.Refresh()\n\t\telse:\n\t\t\tself.txtVREGOV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtVREGOV.Refresh()\n\t\tif (df_shadow.AFE_VDuv[AFE_Id] == '1'):\n\t\t\tself.txtVREGDUV.SetBackgroundColour(const.VREGD_UV_COLOR)\n\t\t\tself.txtVREGDUV.Refresh()\n\t\telse:\n\t\t\tself.txtVREGDUV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtVREGDUV.Refresh()\n\t\tif (df_shadow.AFE_VDov[AFE_Id] == '1'):\n\t\t\tself.txtVREGDOV.SetBackgroundColour(const.VREGD_OV_COLOR)\n\t\t\tself.txtVREGDOV.Refresh()\n\t\telse:\n\t\t\tself.txtVREGDOV.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtVREGDOV.Refresh()\n\t\tif (df_shadow.AFE_STKov[AFE_Id] == '1'):\n\t\t\tself.txtVSTACKUV.SetBackgroundColour(const.STACK_UV_COLOR) \n\t\t\tself.txtVSTACKUV.Refresh()\n\t\telse:\n\t\t\tself.txtVSTACKUV.SetBackgroundColour(const.FAULT_WHITE_COLOR) \n\t\t\tself.txtVSTACKUV.Refresh()\n\t\tif (df_shadow.AFE_STKov[AFE_Id] == '1'):\n\t\t\tself.txtSTACKOV.SetBackgroundColour(const.STACK_OV_COLOR) \n\t\t\tself.txtSTACKOV.Refresh()\n\t\telse:\n\t\t\tself.txtSTACKOV.SetBackgroundColour(const.FAULT_WHITE_COLOR) \n\t\t\tself.txtSTACKOV.Refresh()\n\t\tif (df_shadow.AFE_DIEut[AFE_Id] == '1'):\n\t\t\tdebugPrint(\"AFE_DIEut -- if\", df_shadow.AFE_DIEut)\n\t\t\tself.txtDIE_UT.SetBackgroundColour(const.DIET_UT_COLOR)\n\t\t\tself.txtDIE_UT.Refresh()\n\t\telse:\n\t\t\tdebugPrint(\"AFE_DIEut -- else\", df_shadow.AFE_DIEut)\n\t\t\tself.txtDIE_UT.SetBackgroundColour(const.FAULT_WHITE_COLOR)\n\t\t\tself.txtDIE_UT.Refresh()\n\t\tif (df_shadow.AFE_DIEot[AFE_Id] == '1'):\n\t\t\tself.txtDIE_OT.SetBackgroundColour(const.DIET_OT_COLOR) \n\t\t\tself.txtDIE_OT.Refresh()\n\t\telse:\n\t\t\tself.txtDIE_OT.SetBackgroundColour(const.FAULT_WHITE_COLOR) \n\t\t\tself.txtDIE_OT.Refresh()\n\n\t\treturn\n \n\n\tdef draw_graph(self, AFE_Id, df_shadow):\n\t\tdebugPrint(\"draw_graph\")\n\t\tself.graph_init()\n\n\t\tself.axes1tab2.axis([0, 6, 0, 7])\n\t\tdebugPrint(str(AFE_Id))\n\t\tdel self.legendCellList[:]\n\t\tself.CellList.sort()\t\n\t\t# draw cell voltage lines\n\t\tfor i in range(0,len(self.CellList)):\n\n\t\t\tself.y = df_shadow.AFE_cell_y[AFE_Id][self.CellList[i]-1]\n\t\t\tdebugPrint(\"Cell values\")\n\t\t\tdebugPrint(self.dates)\n\t\t\tdebugPrint(self.y)\n\t\t\n\t\t\tself.legendCellList.append(\"C%dV\" % (self.CellList[i]))\n\t\t\tself.axes1tab2.plot(self.dates, self.y,\n\t\t\t\t\t\t self.legendColorCellList[self.CellList[i]- 1],\n\t\t\t\t\t\t linewidth=1.0, label='linear')\n\n\t\tdel self.legendGpioList[:]\n\t\tself.GpioList.sort()\n\t\t# draw gpio voltage lines\n\t\tfor i in range(0,len(self.GpioList)):\n\t\t\t# gp_index= int(self.GpioList[i])\n\n\t\t\tself.y = df_shadow.AFE_gpio_y[AFE_Id][self.GpioList[i] - 1]\n\t\t\tdebugPrint(\"Gpio values\")\n\t\t\tdebugPrint(self.dates)\t\n\t\t\tdebugPrint(self.y)\n\t\t\n\t\t\tself.legendGpioList.append(\"GP%dV\" % (self.GpioList[i]))\n\t\t\tself.axes1tab2.plot(self.dates, self.y,\n\t\t\t\t\t\t self.legendColorGpioList[self.GpioList[i] - 1],\n\t\t\t\t\t\t linewidth=1.0, label='linear')\n\n\t\tself.legendList= (self.legendCellList + self.legendGpioList)\n\t\t\n\t\tdebugPrint (self.legendCellList)\n\t\tdebugPrint (self.legendList)\n\n\t\tself.axes1tab2.legend(labels=self.legendList, fontsize=7, frameon=False,\n\t\t\t\t bbox_to_anchor=(1.01, 1.15), loc=\"best\") # add label\n\n\t\tself.canvas1.draw()\n\n\t\t# update measurement values in text boxes based on response\t\n\t\tself.ui_update_measurements(AFE_Id, df_shadow)\n\t\tself.ui_update_fault(AFE_Id, df_shadow)\n\t\tself.mainframe.tab[0].ui_update_system_fault(AFE_Id, 0, 0, df_shadow)\n\n\t\treturn\n\n \ndef debugPrint(strn, val=\"\"):\n\tprint(f\"{strn} : {val}\")\n\treturn\t\n\n","repo_name":"ArrowElectronics/BMCU","sub_path":"Source/ADI_BMS_UI/BoardTab.py","file_name":"BoardTab.py","file_ext":"py","file_size_in_byte":32003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"7199526586","text":"\"\"\"movie_task URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom myapp import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('register/',views.Register_user.as_view(),name='reg_user'),\n path('collection/',views.Create_collection_movie.as_view(),name='coll_movie'),\n path('collection//',views.List_collections.as_view(),name='colls'),\n path('movies/',views.Movies_list.as_view(),name='mov_lst'),\n path('request-count/',views.Count_value_request.as_view(),name='rqst-cnt'),\n path('request-count/reset/',views.Count_request_reset.as_view(),name='cnt_reset')\n]\n","repo_name":"uday99/one_fin_task","sub_path":"movie_task/movie_task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"15781627188","text":"#!/usr/bin/env python\n\n\nimport numpy as np\n\n\ndef main():\n \"\"\" main body \"\"\"\n perc = [0.4, 0.7, 0.8, 0.9, 1.0]\n data = {}\n for i in range(len(perc)):\n data[perc[i]] = np.sqrt(125 * np.pi * (0.5 ** 2 + 0.7 ** 2) / perc[i])\n\n print(data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ali-mahani/ComputationalPhysics-Fall2020","sub_path":"Final/p3/system_size.py","file_name":"system_size.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"90"} +{"seq_id":"73246480296","text":"import re\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\n\n\ndef clean_str(string, TREC=False):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Every dataset is lower cased except for TREC\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip() if TREC else string.strip().lower()\n\n\ndef read_data(path): \n features = []\n labels = []\n with open(path, encoding='utf8') as f:\n for line in f:\n y, _, X = line.partition(' ')\n y = int(y)\n \n # clean the text\n X = clean_str(X.strip()) \n # lower the word \n X = X.lower() \n \n features.append(X)\n labels.append(y)\n \n\n return features, labels \n\n\nclass FakeDataset(Dataset):\n def __init__(self, X, y, tokenizer):\n self.X = X\n self.y = y\n self.tokenizer = tokenizer\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n X = self.X[index]\n y = self.y[index]\n\n inputs = self.tokenizer.encode_plus(\n X, \n None,\n add_special_tokens = True,\n padding = 'max_length',\n return_token_type_ids = True,\n truncation = True\n )\n\n ids = inputs['input_ids']\n mask = inputs['attention_mask']\n\n return {\n 'ids': torch.LongTensor(ids),\n 'mask': torch.LongTensor(mask),\n 'y': torch.LongTensor([y])\n }\n\ndef build_vocab(data_path, tokenizer):\n X, y = read_data(data_path)\n data_set = FakeDataset(X, y, tokenizer)\n return data_set\n\n\ndef build_iter(data_set, batch_size):\n data_iter = DataLoader(data_set, batch_size=batch_size, shuffle=True, num_workers=4)\n\n return data_iter","repo_name":"andr2w/Malicious-Attack","sub_path":"ma/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"72030785897","text":"import logging\nimport boto3\nfrom botocore.exceptions import ClientError\nimport os\ns3_client = boto3.client('s3')\nBUCKET = \"bucket-name\"\nfolder = './tmp/'\nfilename = 'test.xlsx'\n\ndef upload_file(bucket_name, file_name, object_name=None):\n \"\"\"Upload a file to an S3 bucket\n\n :param bucket_name: Bucket to upload to\n :param file_name: File to upload\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket_name, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\ndef generate_presigned_url(bucket_name, object_name, expiration=3600):\n try:\n response = s3_client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': bucket_name,\n 'Key': object_name\n },\n ExpiresIn=expiration\n )\n except ClientError as e:\n logging.error(e)\n return None\n\n # The response contains the presigned URL\n return response\n\ndef handler(event, context):\n # store a file in s3\n stored = upload_file(BUCKET, folder+filename)\n # generate a presigned url to the file\n url = generate_presigned_url(BUCKET, filename)\n \n if stored and url:\n # return the url as a redirect\n return {\n 'statusCode': 301,\n 'headers': {\n 'Location': url\n }\n }\n else:\n return {\n 'statusCode': 500,\n 'body': 'Error storing file in s3'\n }\n\nprint(handler(None, None))\n","repo_name":"tonythree/s3-upload-and-redirect","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"21304834645","text":"from permpy import Permutation as Perm\n\n\ndef test_cycle_decomp():\n p = Perm(53814276)\n expected = [[4, 3, 0], [6], [7, 5, 1, 2]]\n result = p.cycle_decomp()\n assert result == expected, (\n f\"Perm({p}).cycle_decomp() returned {result},\"\n f\" but it should return {expected}.\"\n )\n","repo_name":"engenmt/permpy","sub_path":"tests/test_permmisc.py","file_name":"test_permmisc.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"90"} +{"seq_id":"7219152675","text":"def heapify(a, i, N):\n child1 = i * 2 + 1\n if child1 >= N:\n return\n if child1 + 1 < N and a[child1 + 1] > a[child1]:\n child1 += 1\n\n if a[child1] <= a[i]:\n return\n\n a[i], a[child1] = a[child1], a[i]\n print(\"in: \", i, a)\n heapify(a, child1, N)\n\n\ndef heapsort(a):\n N = len(a)\n\n print(\"init: \", a)\n for i in range(int(N / 2) - 1, -1, -1):\n heapify(a, i, N)\n\n print(\"heap: \", a)\n for i in range(N - 1, 0, -1):\n a[0], a[i] = a[i], a[0]\n print(\"each step: \", a)\n heapify(a, 0, i)\n\n\ndef main():\n v = [7, 6, 1, 7, 10, 5, 3, 12, 19, 2, 15]\n heapsort(v)\n print(\"fin: \", v)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"meshidenn/algorithm_and_data_structure","sub_path":"python/shakyo/code12.4.py","file_name":"code12.4.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"2059361524","text":"import rtmidi\n\n# CONFIGURE BUTTONS ONLY\n\nmidiin = rtmidi.MidiIn()\nin_ports = midiin.get_ports()\nmidiout = rtmidi.MidiOut()\nout_ports = midiout.get_ports()\n\noutidx = 1\n\nprint(out_ports, outidx)\nif outidx:\n midiout.open_port(outidx)\nelse:\n raise Exception('NO Output-PORTS FOUND')\n\n\nmsg = [0xB6, 64, 63]\nprint(msg)\ninput('Press Enter to send a rotary Message: ')\nmidiout.send_message(msg)\n\ninput('Final Enter')\n\ndel midiin\ndel midiout\n\n\n","repo_name":"emvasilopoulos/ai_auto_dj","sub_path":"midi_controller/Rekordbox/configuration_rotarty2.py","file_name":"configuration_rotarty2.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"2428736271","text":"import tensorflow as tf\nimport math\n\nclass DilatedRNN:\n\n type_to_func_dict = {\n \"VanillaRNN\": tf.contrib.rnn.BasicRNNCell,\n \"LSTM\": tf.contrib.rnn.BasicLSTMCell,\n \"GRU\": tf.contrib.rnn.GRUCell\n }\n available_cell_types = type_to_func_dict.keys()\n\n \n def __init__(self, typeof_cell, num_hidden_units, list_of_dilations, dropout = None):\n\n if typeof_cell not in self.available_cell_types:\n raise ValueError(\"Valid cell type is 'VanillaRNN' or 'LSTM' or 'GRU'\")\n \n self.cell_type = typeof_cell\n self.hidden_unit_size = num_hidden_units\n \n cell_func = self.type_to_func_dict[typeof_cell]\n self.cell_list = [cell_func(num_hidden_units) for _ in range(len(list_of_dilations))]\n\n self.list_of_dilations = list_of_dilations\n\n if dropout is not None:\n self.cell_list[0] = tf.contrib.rnn.DropoutWrapper(cell_list[0], dropout, 1, 1)\n self.dropout = dropout\n\n \n \n def drnn(self, x_data):\n \n input_data = x_data.copy()\n timesteps = len(input_data)\n\n \n layer = 1\n \n for dilation, cell_layer in zip(self.list_of_dilations, self.cell_list):\n \n # Define the number of timestamps\n\n input_data = tf.convert_to_tensor(input_data)\n \n scope = \"Layer_%d\" %layer\n\n # Input has shape (T, batch_size, input_size)\n # For dilation d we want to transorm it to shape (T/d, batch_size*d, input_size)\n\n # Pad the sequence with 0s in order to make T divisible by d\n \n # We want dilation to divide exactly the timestamps\n if (timesteps % dilation == 0):\n # Reduce the sequence length by `dilation` times\n reduced_timesteps = timesteps // dilation\n else:\n reduced_timesteps = math.ceil(timesteps/dilation)\n n_timesteps_to_add = (reduced_timesteps * dilation) - timesteps\n zero_padding = tf.zeros_like(input_data[0])\n zero_padding = tf.tile(tf.expand_dims(zero_padding, axis=0), tf.constant([n_timesteps_to_add, 1, 1]))\n input_data = tf.concat([input_data, zero_padding], axis=0)\n \n input_data = tf.split(input_data, dilation)\n input_data = tf.concat(input_data, axis=1)\n\n input_data = tf.unstack(input_data)\n reduced_input_to_layer = input_data\n \n reduced_output_from_layer, _ = tf.contrib.rnn.static_rnn(cell_layer, reduced_input_to_layer, dtype=tf.float32, scope = scope)\n \n splitted_tensors = [tf.split(tensor, dilation) for tensor in reduced_output_from_layer]\n output_from_layer = [item for sublist in splitted_tensors for item in sublist]\n\n input_data = output_from_layer[:timesteps]\n layer += 1\n \n return input_data\n \n\n def classification(self, input_data, class_num, experiment):\n \n # Change Tensor's shape from (batch_size, T, input_size) to \n # list of Tensors with shape (batch_size, input_size)\n # and length of T\n rnn_data = tf.unstack(input_data, axis=1)\n \n outputs = self.drnn(rnn_data)\n\n if experiment == \"mnist\":\n start_dilation = self.list_of_dilations[0]\n if start_dilation == 1:\n out_weights = tf.Variable(tf.random_normal(shape=[self.hidden_unit_size, class_num]))\n out_bias = tf.Variable(tf.random_normal(shape=[class_num]))\n fuse_outputs = outputs[-1]\n else:\n out_weights = tf.Variable(tf.random_normal(shape=[self.hidden_unit_size*start_dilation, class_num]))\n out_bias = tf.Variable(tf.random_normal(shape=[class_num]))\n fuse_outputs = outputs[-start_dilation]\n for i in range(-start_dilation+1, 0, 1):\n fuse_outputs = tf.concat([fuse_outputs, outputs[i]], axis = 1)\n \n log_predictions = tf.add(tf.matmul(fuse_outputs, out_weights), out_bias)\n \n elif experiment == \"copy_memory\" or experiment == \"PTB\":\n \n out_weights = tf.Variable(tf.random_normal(shape=[self.hidden_unit_size, class_num]))\n out_bias = tf.Variable(tf.random_normal(shape=[class_num]))\n \n outputs = tf.stack(outputs, axis = 0)\n out_h = tf.einsum('ijk,kl->jil', outputs, out_weights)\n log_predictions = tf.add(out_h, out_bias)\n \n else:\n \n print(\"Wrong selection for the variable 'experiment'\")\n \n return log_predictions","repo_name":"petrosi/DilatedRNN","sub_path":"models/dilated_rnn.py","file_name":"dilated_rnn.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70546681577","text":"from flask import Flask,Blueprint,request,jsonify\nfrom api import db\nfrom api.Upvotes.upvotes_model import Upvote\nfrom api.User.user_model import User\n\n\nupvotes = Blueprint(\"upvotes\",__name__)\n\n@upvotes.route('/add_upvote', methods=[\"POST\"])\ndef add_upvote():\n data = request.get_json()\n\n user = User.query.filter_by(id=data[\"user_id\"]).first()\n\n\n upvoted_data=Upvote(user_id=user.id,upvoted_blog_id=data[\"blog_id\"])\n\n db.session.add(upvoted_data)\n db.session.commit()\n\n return jsonify(\"Upvoted!!\")\n\n@upvotes.route('/downvote//', methods=[\"DELETE\"])\ndef downvote(user_id,blog_id):\n upvote = Upvote.query.filter_by(user_id=user_id,upvoted_blog_id=blog_id).first()\n print(upvote)\n db.session.delete(upvote)\n db.session.commit()\n return jsonify(\"Downvoted\");\n\n@upvotes.route('/upvotes_blog/', methods=[\"GET\"])\ndef get_blog_upvotes(id):\n upvotes = Upvote.query.filter_by(upvoted_blog_id=id).all()\n\n upvotes_array = []\n for upvote in upvotes:\n upvotes_array.append(upvote)\n return jsonify({\"count\": len(upvotes_array)})\n\n@upvotes.route('/check_upvote//', methods=[\"GET\"])\ndef check_user_upvote(user_id,blog_id):\n upvote = Upvote.query.filter_by(user_id=user_id,upvoted_blog_id=blog_id).first()\n count = 0;\n if(upvote):\n count+=1\n \n return jsonify({\"count\": count}) \n\n \n\n \n","repo_name":"Bviveksingh/dank-football-backend","sub_path":"api/Upvotes/upvotes.py","file_name":"upvotes.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10725191228","text":"from PyQt5 import QtGui, QtCore\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtGui import QCursor, QIcon\nfrom PyQt5.QtWidgets import QLabel, QComboBox, QLineEdit, QPushButton, QTableView, QAbstractItemView, QVBoxLayout, \\\n QWidget, QGridLayout, QMenu, QMessageBox\n\nfrom DcsUi.variablecoercion.model import variableGroupModel\nfrom utils import core\nfrom xps.ExploreTable import smallTableModel\nfrom xps.mythreads import mythread\n\n\nclass searchWindow(QWidget):\n my_Signal = QtCore.pyqtSignal(str)\n count = 0\n\n def __init__(self, group_name=None):\n super().__init__()\n self.setWindowIcon(QIcon(':/static/default.png'))\n\n self.group_name = group_name\n\n self.comboboxColumnlist = [\n '强制值', '当前值', 'sig_name', 'sig_type', 'chr',\n 'slot', 'engineering_unit', 'rlo',\n 'rhi', 'elo', 'ehi',\n 'channel', 'initial', 'reg', 'block',\n 'offset', 'bit'\n ]\n self.comboboxlist2 = ['And', 'Or']\n\n # 查询模型\n self.queryModel = None\n\n # 数据表\n self.tableView = None\n\n self.date = {\n 'header': core.MainWindowConfig.header,\n 'date': []\n }\n\n self.initUI()\n\n def initUI(self):\n label1 = QLabel(self)\n label2 = QLabel(self)\n label3 = QLabel(self)\n label4 = QLabel(self)\n label5 = QLabel(self)\n label1.setText('列:')\n label2.setText('值:')\n label3.setText('关联:')\n label4.setText('列:')\n label5.setText('值:')\n\n self.comboboxColumn1 = QComboBox(self, minimumWidth=170, minimumHeight=40)\n self.combobox = QComboBox(self, minimumWidth=100, minimumHeight=40)\n self.comboboxColumn2 = QComboBox(self, minimumWidth=170, minimumHeight=40)\n self.combobox.addItems(self.comboboxlist2)\n\n self.initComomboboxColumn1()\n self.initComomboboxColumn2()\n\n self.line1 = QLineEdit(self)\n self.line2 = QLineEdit(self)\n\n self.buttonsearch = QPushButton('查找')\n self.buttonsearch.clicked.connect(self.searchButtonClicked)\n\n self.timer = QTimer(self)\n self.timer.start(1000)\n self.timer.timeout.connect(self.dateUpdate)\n\n self.tableView = QTableView()\n self.tableView.horizontalHeader().setStretchLastSection(True)\n\n self.queryModel = smallTableModel(self.date['header'], self.date['date'])\n self.tableView.setModel(self.queryModel)\n self.tableView.setContextMenuPolicy(Qt.CustomContextMenu)\n self.tableView.customContextMenuRequested.connect(self.showContextMenu)\n self.tableView.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.searchButtonClicked()\n self.queryModel.err.connect(self.errwindow)\n\n self.thread = mythread()\n self.thread.start()\n\n layout = QVBoxLayout(self)\n grid = QGridLayout()\n grid.addWidget(label1, 1, 0)\n grid.addWidget(label2, 1, 1)\n grid.addWidget(label3, 1, 2)\n grid.addWidget(label4, 1, 3)\n grid.addWidget(label5, 1, 4)\n grid.addWidget(self.comboboxColumn1, 2, 0)\n grid.addWidget(self.line1, 2, 1)\n grid.addWidget(self.combobox, 2, 2)\n grid.addWidget(self.comboboxColumn2, 2, 3)\n grid.addWidget(self.line2, 2, 4)\n grid.addWidget(self.buttonsearch, 2, 5)\n layout.addLayout(grid)\n layout.addWidget(self.tableView)\n self.setLayout(layout)\n\n def initComomboboxColumn1(self):\n for i in range(len(self.comboboxColumnlist)):\n self.comboboxColumn1.addItem(self.comboboxColumnlist[i])\n self.comboboxColumn1.setCurrentIndex(-1)\n\n def initComomboboxColumn2(self):\n for i in range(len(self.comboboxColumnlist)):\n self.comboboxColumn2.addItem(self.comboboxColumnlist[i])\n self.comboboxColumn2.setCurrentIndex(-1)\n\n def showContextMenu(self):\n self.tableView.contextMenu = QMenu(self)\n self.action = self.tableView.contextMenu.addAction('移除选中行')\n self.tableView.contextMenu.popup(QCursor.pos()) # 1菜单显示的位置\n self.action.triggered.connect(self.actionHandler)\n self.tableView.contextMenu.show()\n\n def onComboboxActivate(self):\n pass\n\n def searchButtonClicked(self):\n pass\n\n def actionHandler(self):\n pass\n\n def dateUpdate(self):\n pass\n\n def closeEvent(self, a0: QtGui.QCloseEvent) -> None:\n self.my_Signal.emit('exit')\n self.close()\n\n def errwindow(self, text):\n pass\n\n @classmethod\n def getResult(cls):\n cls.count += 1\n return cls.count\n\n @classmethod\n def deleteResult(cls):\n cls.count -= 1\n return cls.count\n\n\nclass mySearchWindow(searchWindow):\n def __init__(self, group_name=None):\n searchWindow.__init__(self, group_name)\n\n def onComboboxActivate(self):\n \"\"\"搜索窗口下拉框函数\"\"\"\n text1 = self.line1.text()\n text2 = self.line2.text()\n conditiontext1 = self.comboboxColumn1.currentText()\n conditiontext2 = self.comboboxColumn2.currentText()\n conditiontext3 = self.combobox.currentText()\n if self.group_name == None:\n group_points = variableGroupModel.searchDate(\n column1=conditiontext1,\n column2=conditiontext2,\n value1=text1,\n value2=text2,\n relation=conditiontext3\n )\n else:\n group_points = variableGroupModel.selectGroupData(\n name=self.group_name,\n column1=conditiontext1,\n column2=conditiontext2,\n value1=text1,\n value2=text2,\n relation=conditiontext3\n )\n self.queryModel.datas = group_points\n\n def searchButtonClicked(self):\n \"\"\"查询按钮功能函数\"\"\"\n self.onComboboxActivate()\n self.queryModel.layoutChanged.emit()\n\n def actionHandler(self):\n \"\"\"移除选中行功能函数\"\"\"\n row = self.tableView.currentIndex().row()\n self.queryModel.remove_row(row)\n\n def dateUpdate(self):\n \"\"\"时间卡绑定函数\"\"\"\n self.queryModel.layoutChanged.emit()\n\n def errwindow(self, text):\n \"\"\"变量强制报错时触发函数\"\"\"\n QMessageBox.information(\n self,\n \"信息提示\",\n '强制值设置出错,错误原因:%s' % text,\n QMessageBox.Yes | QMessageBox.No\n )\n","repo_name":"a452669850/DCSNEW","sub_path":"DcsUi/variablecoercion/smallWindow.py","file_name":"smallWindow.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"41948521567","text":"import math\ndef checkIfNumberOk(self, array, number, x, y): #function to check numbers in y and x axis and in the 'square'\n if number not in array[:,x] and number not in array[y,:]:\n squareSize = int(math.pow(len(array),0.5))\n squarePosX = int(math.floor(x/squareSize) * squareSize)\n squarePosY = int(math.floor(y/squareSize) * squareSize)\n for y in range(squarePosY, squarePosY + squareSize):\n for x in range(squarePosX, squarePosX + squareSize):\n if number != array[y,x]:\n continue\n else:\n return False\n return True\n else:\n return False","repo_name":"Fcek/SudokuSolver","sub_path":"CheckNumber.py","file_name":"CheckNumber.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"13685119194","text":"def checkUnimodal(arr, n):\r\n i = 1\r\n while (i < n and arr[i] > arr[i - 1]):\r\n i += 1\r\n\r\n while (i < n and arr[i] == arr[i - 1]):\r\n i += 1\r\n\r\n while (i < n and arr[i] < arr[i - 1]):\r\n i += 1\r\n\r\n return (i == n)\r\n\r\nif __name__ == '__main__':\r\n arr = [1,0,100]\r\n n = len(arr)\r\n if (checkUnimodal(arr, n)):\r\n print(\"Yes\")\r\n else:\r\n print(\"No\")\r\n\r\n","repo_name":"Rigasha/temple-steps","sub_path":"beautiful array.py","file_name":"beautiful array.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"17952400779","text":"A,B,C,D,E,F = map(int,input().split())\n\nws = set()\nfor a in range(0,F+1,100*A):\n for b in range(0,F+1,100*B):\n if a+b > F: break\n ws.add(a+b)\nws.remove(0)\n\nss = set()\nfor c in range(0,F+1,C):\n for d in range(0,F+1,D):\n if c+d > F: break\n ss.add(c+d)\n\nbest_s = -1\nbest_w = 1\nfor w in ws:\n for s in ss:\n if w+s > F: continue\n if E*w < s*100: continue\n if best_s * (s+w) < s * (best_s + best_w):\n best_s = s\n best_w = w\nprint(best_s+best_w, best_s)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03599/s545517105.py","file_name":"s545517105.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"26035051695","text":"from collections import defaultdict, deque\nN, M, V = map(int, input().split())\ng = defaultdict(dict)\nfor _ in range(M):\n a, b = map(int, input().split())\n g[a][b] = True\n g[b][a] = True\n\n# DFS\ndfs, visited, order = deque([V]), set(), []\nwhile len(dfs) > 0:\n v = dfs.pop()\n if v in visited:\n continue\n order.append(v)\n visited.add(v)\n for k in sorted(g[v].keys(), reverse=True):\n if k not in visited:\n dfs.append(k)\nprint(' '.join(map(str, order)))\n\n# BFS\nbfs, visited, order = deque([V]), set(), []\nwhile len(bfs) > 0:\n v = bfs.popleft()\n if v in visited:\n continue\n order.append(v)\n visited.add(v)\n for k in sorted(g[v].keys()):\n if k not in visited:\n bfs.append(k)\nprint(' '.join(map(str, order)))\n","repo_name":"yeonghoey/boj","sub_path":"1260/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"38844785966","text":"# coding:utf-8\nimport json\nimport pytest\nfrom datetime import datetime\nfrom apis.device_management.device_account.apis_device_account import Apis\n\n\n@pytest.mark.bvt\n@pytest.mark.device\n@pytest.mark.flaky(reruns=3, reruns_delay=3)\ndef test_get_model_templates_by_type():\n \"\"\"\n 获取默认采集规格\n \"\"\"\n try:\n params = {\n \"type\": 0,\n \"_t\": datetime.now()\n }\n\n res = Apis().api_measure_get_model_templates_by_type(params=params)\n assert res.status_code <= 200, \"Http请求状态码错误\"\n # assert len(json.loads(res.text)['catalogs']) > 0, \"采集规格为空!\" # 存在但可能会为空,instances必须存在即可\n assert len(json.loads(res.text)['instances']) > 0, \"采集规格为空!\"\n\n except Exception as e:\n raise e\n","repo_name":"zj1995-09-09/supercare_api","sub_path":"testcase/device_management/device_account/test_measure_get_model_templates_by_type.py","file_name":"test_measure_get_model_templates_by_type.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"10995261845","text":"#!/usr/bin/env python3\n\n\nimport logging\nimport sys\nfrom os import path\nimport requests\nimport json\n\nsys.path.append('../src')\n\nimport testapi\nfrom testapi import Rule, Response, Collection\n\n\nimport unittest\n\n\ncommandServer = None\nhttpServer = None\n\nclass TestCommandProt(unittest.TestCase):\n def setUp(self):\n httpServer.reset()\n\n\n def test_1(self):\n httpServer.expect(Rule().method(\"GET\").url(\"apa\")\n .respondWith(Response()))\n\n r = requests.get(url = \"http://127.0.0.1:8090/bepa\")\n\n self.assertTrue(*httpServer.checkStatus())\n\n def tearDown(self):\n pass\n\n\nif __name__ == \"__main__\":\n # configure logging\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s.%(msecs)03d [%(levelname)s: %(threadName)s] %(filename)s:%(lineno)d > %(message)s', datefmt='%H:%M:%S',\n handlers=[\n logging.FileHandler(filename=\"tests.log\", mode='w'),\n logging.StreamHandler()\n ])\n\n cmdClient = testapi.CommandClient()\n cmdClient.connect()\n httpServer = cmdClient.startTestServer()\n unittest.main(exit=False)\n httpServer.kill()\n \n","repo_name":"birgkr/test_backend","sub_path":"tests/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"70416458217","text":"#-*- coding: UTF-8 -*-\nimport datetime\nimport timestamp_utils\nimport re\nfrom bs4 import BeautifulSoup\nfrom WebParser import WebParser\n\nclass GuanchaParser(WebParser):\n def translate_timestamp(self, timeStr):\n if re.match('\\d{4}\\-\\d{2}\\-\\d{2}\\s+\\d{2}:\\d{2}', timeStr) != None:\n [date_str, time_str] = timeStr.split(' ')\n date_array = date_str.split('-')\n year = int(date_array[0])\n month = int(date_array[1])\n day = int(date_array[2])\n time_array = time_str.split(':')\n hh = int(time_array[0])\n mm = int(time_array[1])\n ss = 0\n return [year, month, day, hh, mm, ss]\n return None\n\n\n def get_full_description(self, entry):\n if entry['published'] == None:\n entry['published'] = [1970, 1, 1, 0, 0, 0]\n r = self.httpClient.get(entry['link'])\n if r.status_code != 200:\n return entry\n r.encoding = 'utf-8'\n pattern = re.compile('!wap\\.jpg\\\"') # fix the img node\n new_text = pattern.sub('\">', r.text)\n html = BeautifulSoup(new_text, 'html5lib')\n if html == None:\n return entry\n textPage = html.find('section', attrs={'class': 'textPageContInner'})\n if textPage == None:\n return entry\n h1s = textPage.find_all('h1')\n for h1 in h1s:\n h1.decompose()\n entry['description'] = textPage.prettify()\n time_span = textPage.find('span', attrs={'class': 'time'})\n timestamp_str = ''\n if time_span != None:\n if re.match('\\d{4}\\-\\d{2}\\-\\d{2}\\s+\\d{2}:\\d{2}', time_span.string.strip()) != None:\n timestamp_str = time_span.string.strip()\n if timestamp_str != '':\n beijing_time = self.translate_timestamp(timestamp_str)\n beijing_time.append(8)\n entry['published'] = timestamp_utils.adjustTimeByTimezon(*beijing_time)\n #print entry['title']\n #print ' '.join(map(lambda x:'%d' % x, entry['published']))\n return entry\n\n def __fetch_entry_from_node_a(self, a):\n if a == None:\n return None\n h3 = a.find('h3')\n if a['href'] != None and h3 != None:\n #print(h3.string)\n return {\n 'title': h3.string,\n 'link': self.url + a['href'],\n 'published': None,\n 'description': h3.string\n }\n else:\n return None\n\n def get_abstract_feed(self):\n feed = {\n 'title': 'Guancha News',\n 'link': self.url,\n 'description': 'Guancha News',\n 'entries': []\n }\n r = self.httpClient.get(self.url)\n if r.status_code != 200:\n return feed\n r.encoding = 'utf-8'\n html = BeautifulSoup(r.text, 'html5lib')\n if html == None:\n return feed\n headline = html.find('li', attrs={'class': 'headline'})\n if headline != None:\n a = headline.find('a')\n entry = self.__fetch_entry_from_node_a(a)\n if entry != None:\n feed['entries'].append(entry)\n box_rights = html.find_all('div', attrs={'class': 'box-right'})\n for br in box_rights:\n a_s = br.find_all('a')\n for a in a_s:\n entry = self.__fetch_entry_from_node_a(a)\n if entry != None:\n feed['entries'].append(entry)\n return feed\n\nif __name__ == \"__main__\":\n feed_info = {}\n feed_info['url'] = 'https://m.guancha.cn/'\n feed_info['name'] = 'GuanchaNews'\n feed_info['keywords'] = []\n feed_info['update'] = ''\n feed_info['conf_file'] = '../config/config.xml'\n feed_info['log_file'] = '../log/log.log'\n parser = GuanchaParser(feed_info)\n feed_data = parser.parse()\n print(' '*1 + 'feed_title: ' + feed_data['title'])\n print(' '*1 + 'entries: ')\n for entry in feed_data['entries']:\n print(' '*3 + 'entry_link: ' + entry['link'])\n print(' '*3 + 'entry_title: ' + entry['title'])\n #print ' '*3 + 'entry_des: ' + entry['description']\n #print ' '*3 + 'published: ' + entry['pubDate']\n\n","repo_name":"zjuerkzhang/RssFullTrans","sub_path":"src/GuanchaParser.py","file_name":"GuanchaParser.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"71695122536","text":"import random, asyncio, discord, random\r\nfrom discord.ext import commands\r\nfrom Cogs import PickList\r\n\r\nMAX_ROLLS = \"MAX_ROLLS\"\r\nMAX_SIDES = \"MAX_SIDES\"\r\nMAX_DICE = \"MAX_DICE\"\r\nMAX_CHARS = \"MAX_CHARS\"\r\nINVALID = \"INVALID\"\r\n\r\nclass RollParser:\r\n '''\r\n Class to parse individual NdN±N(a|d) roll strings\r\n '''\r\n def __init__(self, **kwargs):\r\n self.roll_string = kwargs.get(\"roll\",\"1d20\")\r\n self.index = kwargs.get(\"index\",0)\r\n # Stages are 0 = dice, 1 = sides, 2 = modifiers, 3 = (dis)advantage\r\n self.stage = kwargs.get(\"stage\",0)\r\n self.roll = {}\r\n\r\n def reset(self):\r\n self.index = 0\r\n self.stage = 0\r\n self.roll = {}\r\n\r\n def parse(self):\r\n # Walks the next character, and parses accordingly\r\n if self.index >= len(self.roll_string):\r\n # Let's assign any defaults\r\n if \"dice\" in self.roll and not \"sides\" in self.roll:\r\n # used 2-5d type syntax - assume d20\r\n self.roll[\"sides\"] = self.roll[\"dice\"]\r\n self.roll[\"dice\"] = 1\r\n self.roll[\"dice\"] = int(self.roll.get(\"dice\",1))\r\n self.roll[\"sides\"] = int(self.roll.get(\"sides\",20))\r\n self.roll[\"mod_sign\"] = self.roll.get(\"mod_sign\",True)\r\n self.roll[\"mod\"] = int(self.roll.get(\"mod\",0))\r\n self.roll[\"adv_dis\"] = self.roll.get(\"adv_dis\",True if self.roll_string.lower() == \"a\" else False if self.roll_string.lower() == \"d\" else None)\r\n return self.roll\r\n char = self.roll_string[self.index]\r\n self.index += 1 # Increment for the next loop\r\n # Check which stage - and verify values\r\n if self.stage == 0: # We're checking only for numbers - \"d+-\" breaks it\r\n if char.lower() in \"da+-\":\r\n if char.lower() == \"a\" and len(self.roll_string) > 1: return INVALID # Can't start with an \"a\"\r\n self.stage += 1\r\n if char.lower() in \"+-\": self.index -= 1\r\n return self.parse()\r\n if not char.isnumeric(): return INVALID # Not \"d\" and not numeric - bail\r\n # Got what we want, add it to our dice count\r\n self.roll[\"dice\"] = self.roll.get(\"dice\",\"\")+char\r\n elif self.stage == 1: # We're checking for the number of sides\r\n if char.lower() in \"da\": # We skipped the modifier\r\n self.index -= 1\r\n self.stage += 1\r\n return self.parse()\r\n if char in \"+-\": # We got a modifier break\r\n self.roll[\"mod_sign\"] = (char == \"+\") # True for +, False for -\r\n self.stage += 1\r\n return self.parse()\r\n if not char.isnumeric(): return INVALID # Not \"+-\" and not numeric - bail\r\n self.roll[\"sides\"] = self.roll.get(\"sides\",\"\")+char\r\n elif self.stage == 2: # We're checking for the modifier\r\n if char.lower() in \"da\":\r\n self.roll[\"adv_dis\"] = (char.lower() == \"a\")\r\n self.stage += 1\r\n # We got our last char - let's force exit\r\n return self.parse()\r\n if not char.isnumeric(): return INVALID # Not \"da\" and not numeric - bail\r\n self.roll[\"mod\"] = self.roll.get(\"mod\",\"\")+char\r\n else: return INVALID # Extra chars - this isn't right\r\n return self.parse()\r\n\r\nclass Roller:\r\n '''\r\n Class to manage rolling and displaying of dice rolls\r\n '''\r\n def __init__(self):\r\n self.max_sides = 1000 # Max number of sides per die\r\n self.max_rolls = 10 # Max number of rolls per parse\r\n self.max_dice = 1000 # Max number of dice per roll\r\n self.print_pad = 40 # Pad length when printing\r\n # self.max_chars = 2000 # Max characters per message\r\n\r\n def _roll(self, roll):\r\n rolls = [random.randint(1,roll[\"sides\"]) for x in range(roll[\"dice\"])]\r\n roll_dict = {\r\n \"rolls\":rolls,\r\n \"pretotal\":sum(rolls),\r\n \"total\":sum(rolls) + roll[\"mod\"] if roll[\"mod_sign\"] else sum(rolls) - roll[\"mod\"],\r\n \"crit\":any(x == roll[\"sides\"] for x in rolls),\r\n \"fail\":any(x == 1 for x in rolls)\r\n }\r\n roll_dict[\"crit_string\"] = \"{}{}\".format(\"C\" if roll_dict[\"crit\"] else \"\", \"F\" if roll_dict[\"fail\"] else \"\")\r\n return roll_dict\r\n\r\n def _string_from_roll(self, roll):\r\n roll_string = \"{}d{}\".format(roll[\"dice\"],roll[\"sides\"])\r\n if roll[\"mod\"]: roll_string += (\"+\" if roll[\"mod_sign\"] else \"-\") + str(roll[\"mod\"])\r\n if roll[\"adv_dis\"] != None: roll_string += \"a\" if roll[\"adv_dis\"] else \"d\"\r\n return roll_string\r\n\r\n def roll(self, roll_string = None):\r\n roll_string = roll_string if roll_string else \"1d20\" # Default to a single d20 if nothing passed\r\n # Let's walk our string - up to 10 valid dice rolls can exist here\r\n rolls = roll_string.split()\r\n if len(rolls) > self.max_rolls: return MAX_ROLLS\r\n parsed_rolls = []\r\n for roll in rolls:\r\n out = RollParser(roll=roll).parse()\r\n if out == INVALID: return INVALID\r\n if out[\"dice\"] > self.max_dice: return MAX_DICE\r\n if out[\"sides\"] > self.max_sides: return MAX_SIDES\r\n out[\"roll_string\"] = self._string_from_roll(out)\r\n # Actually roll the dice\r\n roll_list = [self._roll(out)]\r\n if out[\"adv_dis\"] != None:\r\n # Roll again\r\n roll_list.append(self._roll(out))\r\n roll_list = sorted(roll_list,key=lambda x:x[\"total\"],reverse=out[\"adv_dis\"])\r\n out[\"rolls\"] = roll_list\r\n parsed_rolls.append(out)\r\n return parsed_rolls\r\n\r\n def rolls_string(self, roll_list = None):\r\n if not roll_list: return\r\n return \"\\n\".join([\"{}. {}\".format(x+1,y) for x,y in enumerate(self.rolls_list(roll_list))])\r\n\r\n def rolls_list(self, roll_list = None):\r\n if not roll_list: return\r\n return [\"{} = {:,}{}\".format(x[\"roll_string\"],x[\"rolls\"][0][\"total\"],\" ({})\".format(x[\"rolls\"][0][\"crit_string\"]) if x[\"rolls\"][0][\"crit_string\"] else \"\") for x in roll_list]\r\n\r\n def roll_string(self, roll = None):\r\n # Only get the first roll if a list is passed\r\n if isinstance(roll, list) and len(roll): roll = roll[0]\r\n # Add our headers - padding them out as needed\r\n roll_string = \"= Dice Roll{} \".format(\"\" if roll[\"dice\"]==1 else \"s\").ljust(self.print_pad,\"=\")+\"\\n\"\r\n roll_string += \"\\n{}\\n\".format(\"-\"*self.print_pad).join([\", \".join([\"{:,}\".format(x) for x in y[\"rolls\"]]) for y in roll[\"rolls\"]])\r\n if roll[\"mod\"]: # We have a modifier - give a pre-total, then the modifier\r\n roll_string += \"\\n\\n\"+\"= Pre-Total \".ljust(self.print_pad,\"=\")+\"\\n\"\r\n roll_string += \"\\n{}\\n\".format(\"-\"*self.print_pad).join([str(x[\"pretotal\"]) for x in roll[\"rolls\"]])\r\n roll_string += \"\\n\\n\"+\"= Modifier \".ljust(self.print_pad,\"=\")+\"\\n\"\r\n roll_string += \"{}{:,}\".format(\"+\" if roll[\"mod_sign\"] else \"-\",roll[\"mod\"])\r\n if roll[\"adv_dis\"] != None: # We have either advantage or disadvantage - highlight which via *{}*\r\n roll_string += \"\\n\\n\"+\"= {}\".format(\"Advantage\" if roll[\"adv_dis\"] else \"Disadvantage\").ljust(self.print_pad,\"=\")+\"\\n\"\r\n roll_string += \"\\n{}\\n\".format(\"-\"*self.print_pad).join([\"*{:,}*\".format(x[\"total\"]) if i == 0 else \"{:,}\".format(x[\"total\"]) for i,x in enumerate(roll[\"rolls\"])])\r\n # Print our final total and return the results\r\n roll_string += \"\\n\\n\"+\"= Final Total \".ljust(self.print_pad,\"=\")+\"\\n\"\r\n roll_string += \"{:,}\".format(roll[\"rolls\"][0][\"total\"])\r\n return roll_string\r\n\r\ndef setup(bot):\r\n # Add the bot and deps\r\n settings = bot.get_cog(\"Settings\")\r\n bot.add_cog(Dice())\r\n\r\nclass Dice(commands.Cog):\r\n '''\r\n The Dice class exists to roll dice for table top games like Dungeons and Dragons.\r\n '''\r\n @commands.command()\r\n async def roll(self, ctx, *, dice = None):\r\n \"\"\"Performs up to 10 space-delimited dice rolls in NdN±Na|d format.\"\"\"\r\n # Display the table then wait for a reaction\r\n d = Roller()\r\n r = d.roll(dice)\r\n if r == MAX_ROLLS: return await ctx.send(\"I can only perform {:,} roll{} at a time!\".format(d.max_rolls,\"\" if d.max_rolls == 1 else \"s\"))\r\n if r == MAX_DICE: return await ctx.send(\"I can only roll up to {:,} dice per roll!\".format(d.max_dice))\r\n if r == MAX_SIDES: return await ctx.send(\"I can only roll dice with up to {:,} face{} per roll!\".format(d.max_sides,\"\" if d.max_sides == 1 else \"s\"))\r\n if r == INVALID: return await ctx.send(\"Dice rolls must be in `NdN±Na|d` format! Rolling a single d10 with a -5 modifier and disadvantage would look like: `{}roll 1d10-5d`\".format(ctx.prefix))\r\n message = None\r\n while True:\r\n index, message = await PickList.Picker(list=d.rolls_list(r), title=\"Pick a roll to show details:\", ctx=ctx, timeout=300, message=message).pick()\r\n if index < 0:\r\n # Edit message to replace the pick title\r\n return await message.edit(content=message.content.replace(\"Pick a roll to show details:\", \"Roll results:\"))\r\n # Show what we need\r\n new_mess = \"{}. {}:\\n```\\n{}\\n```\".format(index+1, r[index][\"roll_string\"], d.roll_string(r[index]))\r\n if len(new_mess) > 2000: # Tooooooo big\r\n new_mess = \"{}. {}:\\n```\\nThe details of this roll are longer than 2,000 characters :(```\".format(index+1, r[index][\"roll_string\"])\r\n await message.edit(content=new_mess)\r\n # Add the return reaction - then wait for it or the timeout\r\n await message.add_reaction(\"◀\")\r\n # Setup a check function\r\n def check(reaction, user):\r\n return user == ctx.author and reaction.message.id == message.id and str(reaction.emoji) == \"◀\"\r\n # Attempt to wait for a response\r\n try:\r\n reaction, user = await ctx.bot.wait_for('picklist_reaction', timeout=30, check=check)\r\n except:\r\n # Didn't get a reaction\r\n pass\r\n # Clear our our reactions\r\n try: await message.clear_reactions()\r\n except: pass\r\n # Reset back to our totals\r\n continue","repo_name":"corpnewt/CorpBot.py","sub_path":"Cogs/Dice.py","file_name":"Dice.py","file_ext":"py","file_size_in_byte":10430,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"90"} +{"seq_id":"35902028258","text":"from collections import OrderedDict\nfrom zope.interface import implements, Interface\n\n\nclass IRenderable(Interface):\n def render(request):\n '''Render (normally to html) this object\n '''\n\n\nclass IResource(Interface):\n def render(request):\n '''Render the given resource'''\n\n\nclass Resource(object):\n implements(IResource, IRenderable)\n\n html = u'%(path)s\\n'\n\n def __init__(self, path, use_static_url=False):\n self.path = path\n self.use_static_url = use_static_url\n\n def render(self, request):\n path = self.path\n if self.use_static_url:\n path = request.static_url(path)\n return self.html % {'path': path}\n\n def __unicode__(self):\n return u'<%s path=%s>' % (self.__class__.__name__, self.path)\n\n\nclass JavascriptResource(Resource):\n html = (u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n","repo_name":"rayeshman/howtobecomeahacker","sub_path":"cache/.mako.tmp/post_helper.tmpl.py","file_name":"post_helper.tmpl.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"17943478049","text":"#!/usr/local/bin/python3\n# https://atcoder.jp/contests/abc075/tasks/abc075_b\n\nH, W = map(int, input().split())\nS = [list(input()) for _ in range(H)]\n\n\ndef check(S, i, j):\n if S[i][j] == '#':\n return '#'\n\n num = 0\n for _i in range(max(0, i-1), min(H, i+2)):\n for _j in range(max(0, j-1), min(W, j+2)):\n if S[_i][_j] == '#':\n num += 1\n return str(num)\n\n\nfor i in range(H):\n for j in range(W):\n S[i][j] = check(S, i, j)\n\nfor i in range(H):\n for j in range(W):\n print(S[i][j], end=\"\")\n print()\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03574/s479978438.py","file_name":"s479978438.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"36127084420","text":"from operator import attrgetter\nfrom ReservationV2 import *\nfrom copy import deepcopy\n\nclass Link:\n def __init__(self, nodes, length):\n self.timeWindow = []\n self.availSlots = []\n\n for i in range(TIME_WNDW_SIZE):\n row = [EMPTY] * MAX_NUM_FREQ\n self.timeWindow.append(row)\n\n total = MAX_NUM_FREQ\n self.availSlots.append(total)\n\n self.nodes = nodes\n self.length = length\n\n # ===================================== R e s e r v a t i o n W i n d o w ===================================\n\n def UpdateSize(self, startT, depth):\n windowLength = len(self.timeWindow)\n newFrames = []\n newAvail = []\n if windowLength < startT + depth + STRT_WNDW_SIZE:\n for x in range(10 * (startT + depth + STRT_WNDW_SIZE - windowLength)):\n row = [EMPTY] * MAX_NUM_FREQ\n newFrames.append(row)\n total = MAX_NUM_FREQ\n newAvail.append(total)\n\n self.timeWindow += newFrames\n self.availSlots += newAvail\n\n # For checking if a space exists starting from a current start slot\n def CheckSpaceFull(self, startSlot, size, checkTime, depth):\n\n# for row in self.availSlots[startT:startT+depth]:\n# if row < size:\n# return True\n\n for row in range(0, depth):\n for space in range(0, size):\n try:\n curSlot = self.timeWindow[checkTime + row][startSlot + space]\n except:\n print(checkTime, startSlot)\n raise\n if curSlot != EMPTY:\n return True\n elif curSlot == EMPTY:\n continue\n else:\n raise\n\n return False\n\n def GetTimeAvailSlots(self, startT, depth):\n self.UpdateSize(startT, depth)\n return self.availSlots[startT]\n\n def GetListOfOpenSpaces(self, size, startT, depth):\n listOfSpaces = [] # List of spaces of size(size) in the current row\n numAvail = []\n startSlot = []\n for x in range(STRT_WNDW_RANGE):\n listOfSpaces.append([])\n numAvail.append(0)\n startSlot.append(None)\n endT = startT+depth + STRT_WNDW_SIZE\n i = 0\n while (i < MAX_NUM_FREQ):\n spaceList = []\n for x in range(STRT_WNDW_RANGE):\n spaceList.append(EMPTY)\n numEmpty = STRT_WNDW_RANGE\n for row in range(startT,endT):\n if self.timeWindow[row][i] != EMPTY:\n for space in range(STRT_WNDW_RANGE):\n if startT + space <= row and row < startT + space + depth:\n if spaceList[space] == EMPTY: # If space was not already invalidated\n numEmpty -= 1 # Deincrement the number of possible valid spaces\n spaceList[space] = FULL # Mark space Full\n if numEmpty == 0: # If no spaces can be empty after this point\n break\n# freeSpaces = CheckLineIsFull(self.timeWindow[startT:endT], i, depth, STRT_WNDW_RANGE)\n for space in range(STRT_WNDW_RANGE):\n if spaceList[space] == EMPTY:\n numAvail[space] += 1\n if startSlot[space] == None:\n startSlot[space] = i\n if numAvail[space] >= size:\n listOfSpaces[space].append(startSlot[space])\n startSlot[space] += 1\n else:\n numAvail[space] = 0\n startSlot[space] = None\n #print(\"For Column\", i)\n #print(freeSpaces)\n #input()\n i += 1\n\n return listOfSpaces # If at least one suitable space is found, return true and the list of suitable spaces\n\n def PlaceRes(self, startSlot, size, depth, startDepth):\n i = 0\n j = 0\n numSlotsFilled = 0\n\n for row in range(startDepth,startDepth+depth):\n self.availSlots[row] -= size\n\n for i in range(depth):\n for j in range(size):\n if self.timeWindow[startDepth + i][startSlot + j] == EMPTY:\n if j == 0 and i == 0:\n self.timeWindow[startDepth + i][startSlot + j] = START\n else:\n self.timeWindow[startDepth + i][startSlot + j] = FULL\n numSlotsFilled += 1\n elif self.timeWindow[startDepth + i][startSlot + j] == FULL or self.timeWindow[startDepth + i][startSlot + j] == START:\n print(\"LINK\", self.nodes)\n self.PrintGraphic(startDepth + depth + 10)\n print(\"tried to fill slot that was aready full: StartSlot\", startSlot, j, \"StartDepth\", startDepth, i)\n print(\"Dimensions\", size, \"by\", depth)\n raise\n else:\n print(self.timeWindow[startDepth + i][startSlot + j])\n raise\n\n if numSlotsFilled != (size*depth):\n print(numSlotsFilled, size, depth)\n raise\n\n # =================================== O t h e r L i n k F u n c t i o n s =================================\n\n def PrintInfo(self):\n print(\"Link\", self.nodes, \"of cost\", self.length)\n\n def PrintGraphic(self, end):\n i = 0\n for row in self.timeWindow[0:end]:\n print(\"{:5} \".format(i), end='')\n for column in row:\n if column == FULL:\n print(\"X\", end='')\n elif column == START:\n print(\"O\", end='')\n elif column == EMPTY:\n print(\"-\", end='')\n else:\n print(column)\n raise\n\n print('')\n i += 1\n print(\" \", end='')\n for x in range(MAX_NUM_FREQ):\n print(str(int((x % 1000)/100)), end='')\n print(\"\\n \", end='')\n for x in range(MAX_NUM_FREQ):\n print(str(int((x % 100)/10)), end='')\n print(\"\\n \", end='')\n for x in range(MAX_NUM_FREQ):\n print(str(x % 10), end='')\n print('')\n\n def GetLinkNodes(self):\n return self.nodes[0], self.nodes[1]\n\n# testNode = Network()\n# i = 1\n# total = 0\n# while(i < 21):\n# for x in range(0,20):\n# total += testNode.RunProcess(i)\n#\n# print(\"Total Number of Blocks with lambda\", i, \"equal to:\", total/20)\n# i += 1\n#\n#\n# print(\"done\")\n\ndef CheckLineIsFull(window, slot, depth, slideSize):\n spaceList = []\n for x in range(slideSize):\n spaceList.append(EMPTY)\n numEmpty = slideSize\n for row in range(0,len(window)):\n if window[row][slot] != EMPTY:\n for space in range(slideSize):\n if space <= row and row < space + depth:\n if spaceList[space] == EMPTY: # If space was not already invalidated\n numEmpty -= 1 # Deincrement the number of possible valid spaces\n spaceList[space] = FULL # Mark space Full\n if numEmpty == 0: # If no spaces can be empty after this point\n break\n return spaceList\n\ndef UpdateSizeOuter(length, startT, depth):\n windowLength = length\n newFrames = []\n if windowLength < startT + depth + STRT_WNDW_SIZE:\n for x in range(10 * (startT + depth + STRT_WNDW_SIZE - windowLength)):\n row = []\n row.append(MAX_NUM_FREQ)\n row.append([EMPTY] * MAX_NUM_FREQ)\n newFrames.append(row)\n\n return newFrames\n\nif __name__ == '__main__':\n link = Link('AB', 500)\n #link.PlaceRes(startSlot, size, depth, startDepth)\n link.PlaceRes(0, 4, 6, 0)\n link.PlaceRes(6, 6, 2, 0)\n link.PlaceRes(4, 2, 3, 4)\n #link.PlaceRes(5, 1, 1, 2)\n link.PrintGraphic(7)\n #link.GetListOfOpenSpaces(size, startT, depth)\n listOf = link.GetListOfOpenSpaces(2, 0, 4)\n for thing in listOf:\n print(thing)\n","repo_name":"NetLab/reservation-testbed","sub_path":"LinkV2.py","file_name":"LinkV2.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"74979202535","text":"# it is all settings for project like size of screen, color of cube etc.\n\n\nNAME_GAME = \"GRATE TENNIS\"\nFPS = 60\nSCREEN_SIZE = (900, 300) # (x, y)\n\nBOARD_START_POS = (0, 0)\nBOARD_SIZE_X = 10\nBOARD_SIZE_Y = SCREEN_SIZE[1] // 3\nBOARD_SPEED = 2\n\nCUBE_START_POS = (350, 175)\nCUBE_SIZE = 10\nCUBE_SPEED_X = 4\nCUBE_SPEED_Y = 3\n\n\nSCORE_FONT = None\nSCORE_FONT_SIZE = 25\nSCORE_POS = (SCREEN_SIZE[0] // 2 - 70, SCREEN_SIZE[1] // 15) # 70 - half of size text in px if font_size = 25\n\n\nBUTTON_SIZE_Y = 40\nBUTTON_SIZE_X = 100\n\n\n\n# COLORS IN RGB\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n","repo_name":"kirillprotsenko03/tennis","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37463037105","text":"#!/usr/local/bin/python3.7\n\nfrom seating import Seating\n\nif __name__ == '__main__':\n \"\"\"\n Updates seating for each selected period.\n \"\"\"\n\n print('******* New Table Groups! *******')\n user_input = input(\"Enter period(s) or 'all':\")\n seating_chart = Seating(user_input) # Retrieves current seating.\n seating_chart.update() # New seating for selected periods.\n seating_chart.write_names() # Record in Google Sheet.\n seating_chart.update_storage() # Add new seating to storage.\n seating_chart.verify_seating() # Print out current state of seating object.\n print('******* Finished *******')\n","repo_name":"brett-ford/seating-charts","sub_path":"src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"22043403045","text":"# -*- coding: utf 8 -*-\nfrom django.conf.urls import url\nfrom . import views\n\napp_name = 'core'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^register/$', views.register, name='register'),\n url(r'^login/$', views.login_user, name='login_user'),\n url(r'^logout_user/$', views.logout_user, name='logout_user'),\n url(r'^editprofile/$', views.edit_details_profile, name='editprofile'),\n url(r'^editprofile/settings/$', views.settings, name='settings'),\n url(r'^editprofile/settings/blocked/$', views.lista_bloqueados, name='blockedlist'),\n url(r'^friendsactivities/$', views.friendsactivities, name='friendsactivities'),\n url(r'^notificaIndex/$', views.notificaIndex, name='notifica'),\n url(r'^notificaIndexParticipation/$', views.notificaIndexParticipation, name='notificaParticipation'),\n url(r'^notificaIndexPosts/$', views.notificaIndexPosts, name='notificaPosts'),\n url(r'^atualizaVisto/$', views.atualizaVisto, name='atualizaVisto'),\n url(r'^verificadispo/$', views.verificaDispo, name='verificaDispo'),\n url(r'^allmessages/$', views.all_messages, name='all_messages'),\n url(r'^verifica_leitura/$', views.verifica_leitura, name='verifica_leitura'),\n url(r'^deleta_conversa/$', views.deleta_conversa, name='deleta_conversa'),\n url(r'^notificaall/$', views.notificaAll, name='notificaAll'),\n url(r'^timeline/$', views.quantTL, name='quantTL'),\n url(r'^feedback/$', views.anon_feedback, name='feedback'),\n url(r'^sobre/$', views.sobre, name='sobre'),\n url(r'^excluiUser/$', views.excluiUser),\n\n url(r'^verificaexp/$', views.verifica_exp),\n url(r'^comecar/$', views.comecar),\n \n #url(r'^login/$', views.login_user, name='login_user'),\n\n #Páginas de fix\n url(r'^fix/new/$', views.create_fix, name='create_fix'),\n url(r'^fix/(?P[0-9]+)/$', views.fix_detail, name='fix_detail'),\n url(r'^fix/myfixies/$', views.my_fixies, name='myfixies'),\n url(r'^fix/myfixies/notify/$', views.my_fixiesN, name='myfixiesN'),\n url(r'^fix/participations/$', views.participationsSemUser, name='myparticipations'),\n url(r'^fix/participations/notify/$', views.participationsSemUserN, name='myparticipationsN'),\n url(r'^fix/favorites/$', views.favoritesSemUser, name='myfavorites'),\n url(r'^fix/favorites/notify/$', views.favoritesSemUserN, name='myfavoritesN'),\n\n #Páginas de profile\n url(r'^profile/(?P[\\w.@+-]+)/$', views.profile, name='profile'),\n url(r'^profile/(?P[\\w.@+-]+)/participations/$', views.participations, name='participations'),\n url(r'^profile/(?P[\\w.@+-]+)/favorites/$', views.favorites, name='favorites'),\n url(r'^profile/(?P[\\w.@+-]+)/following/$', views.following, name='following'),\n url(r'^profile/(?P[\\w.@+-]+)/followers/$', views.follower, name='follower'),\n url(r'^participations/$', views.participationsSemUser, name='participations'),\n\n #Páginas de post\n url(r'^post/create_post/$', views.create_post, name='create_post'),\n url(r'^post/(?P[0-9]+)/$', views.post_detail, name='post_detail'),\n url(r'^post/myposts/$', views.my_posts, name='my_posts'),\n url(r'^post/myposts/notify/$', views.my_postsN, name='my_postsN'),\n url(r'^post/(?P[0-9]+)/edit/$', views.edit_post, name='edit_post'),\n\n #Rotas de requisiçoes AJAX - Fixies\n #My fixies\n url(r'^fix/(?P[0-9]+)/getnotifymyfix/$', views.getnotifymyfix, name='getnotifymyfix'),\n url(r'^fix/(?P[0-9]+)/inativenotifyfix/$', views.inativeNotifyMyFixies, name='inativeNotifyMyFixies'),\n url(r'^fix/(?P[0-9]+)/ativenotifyfix/$', views.ativeNotifyMyFixies, name='ativeNotifyMyFixies'),\n url(r'^fix/(?P[0-9]+)/delete_confirm/$', views.confirm_delete_fix, name='confirm_delete_fix'),\n url(r'^fix/(?P[0-9]+)/fixed/$', views.mark_fixed_code, name='mark_fixed_code'),\n url(r'^fix/(?P[0-9]+)/restore/$', views.to_restore_fixed_code, name='restore_fixed_code'),\n url(r'^fix/(?P[0-9]+)/best_answer/$', views.best_answer, name='best_answer'),\n url(r'^fix/(?P[0-9]+)/report/$', views.report_coment, name='report_coment'),\n\n #My participations\n url(r'^fix/(?P[0-9]+)/getnotifyparticipation/$', views.getnotifyparticipation, name='getnotifyparticipation'),\n url(r'^fix/(?P[0-9]+)/inativenotifyparticipate/$', views.inativeNotifyParticipations, name='inativeNotifyParticipations'),\n url(r'^fix/(?P[0-9]+)/ativenotifyparticipate/$', views.ativeNotifyParticipations, name='ativeNotifyParticipations'),\n url(r'^fix/(?P[0-9]+)/deleteparticipation/$', views.deleteParticipation, name='deleteParticipation'),\n\n #Visitante\n url(r'^fix/(?P[0-9]+)/getRelationshipFavorite/$', views.getRelationshipFavorite, name='getRelationshipFavorite'),\n url(r'^fix/(?P[0-9]+)/favorite/$', views.favorite_fix, name='favorite_fix'),\n url(r'^fix/(?P[0-9]+)/un_favorite/$', views.un_favorite_fix, name='un_favorite_fix'),\n\n #Rotas de requisições AJAX - Posts\n url(r'^post/(?P[0-9]+)/getkeypostprofile/$', views.getkeypostprofile, name='getkeypostprofile'),\n url(r'^post/(?P[0-9]+)/ativepostprofile/$', views.ativepostprofile, name='getkeypostprofile'),\n url(r'^post/(?P[0-9]+)/inativepostprofile/$', views.inativepostprofile, name='getkeypostprofile'),\n url(r'^post/(?P[0-9]+)/delete_post/$', views.delete_post, name='delete_post'),\n url(r'^post/(?P[0-9]+)/getkeyactivepost/$', views.getkeyactivepost, name='getkeyactivepost'),\n url(r'^post/(?P[0-9]+)/activenotifypost/$', views.ativeNotifyPost, name='activenotifypost'),\n url(r'^post/(?P[0-9]+)/inactivenotifypost/$', views.inativeNotifyPost, name='inactivenotifypost'),\n url(r'^post/(?P[0-9]+)/report/$', views.report_coment_post, name='report'),\n\n #Profile\n url(r'^profile/(?P\\w+)/getrelationship/$', views.getrelationship, name='getrelationship'),\n url(r'^profile/(?P\\w+)/follower/$', views.followajax, name='followajax'),\n url(r'^profile/(?P\\w+)/unfollower/$', views.unfollowajax, name='unfollowajax'),\n url(r'^bloquear/$', views.bloquear_user, name='bloquear'),\n url(r'^desbloquear/$', views.desbloquear_user, name='desbloquear'),\n\n\n #Paginas de inbox\n url(r'^inbox/$', views.my_contacts, name='my_contacts'),\n url(r'^inbox/message/(?P[0-9]+)/$', views.sala, name='sala'),\n url(r'^inbox/message/(?P[0-9]+)/notview/$', views.messages_not_view, name='messages_not_view'),\n url(r'^inbox/message/(?P[0-9]+)/read/$', views.read_messages, name='read_messages'),\n url(r'^inbox/message/(?P[0-9]+)/send/$', views.send_message, name='send_message'),\n\n #Páginas de search - areas\n url(r'^area/(?P[0-9]+)/$', views.search_area_fix, name='search_area_fix'),\n url(r'^area/(?P[0-9]+)/posts/$', views.search_area_post, name='search_area_posts'),\n url(r'^area/(?P[0-9]+)/users/$', views.search_area_user, name='search_area_users'),\n\n #Páginas de search - geral\n url(r'^search=(?P.+)$', views.search_fix, name='search_fix'),\n url(r'^posts/search=(?P.+)$', views.search_post, name='search_post'),\n url(r'^users/search=(?P.+)$', views.search_user, name='search_user'),\n\n]\n","repo_name":"tonifc007/FixCodeProject","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"72912461327","text":"# import itertools\nimport warnings\n\nimport abjad\nimport quicktions as fractions\n\nfrom mutwo import abjad_converters\nfrom mutwo import abjad_parameters\nfrom mutwo import cdd_parameters\nfrom mutwo import music_parameters\n\n\n# Monkey patch Arpeggio process_leaf, so that\n# arpeggio above staves can handle direction.\n# Furthermore minimal length of arpeggio is defined here.\ndef Arpeggio_process_leaf_monkey_patched(self, leaf: abjad.Leaf):\n thickness = 3\n abjad.attach(\n abjad.LilyPondLiteral(\"\\\\override Arpeggio.thickness = #'{}\".format(thickness)),\n leaf,\n )\n direction = self.direction\n if direction in self._string_to_direction:\n direction = self._string_to_direction[direction]\n move_direction = \"UP\" if direction == abjad.enums.Up else \"DOWN\"\n abjad.attach(\n abjad.LilyPondLiteral(\n r\"\\override PianoStaff.Arpeggio.arpeggio-direction = #\" + move_direction\n ),\n leaf,\n )\n else:\n warnings.warn(f\"Found unknown direction '{direction}'.\")\n abjad.attach(abjad.Arpeggio(direction=direction), leaf)\n\n # If interval is small (smaller than a third) arpeggio will\n # be very short. This is an ugly tweak to increase length\n # of arpeggio line.\n # See https://lists.gnu.org/archive/html/lilypond-user/2006-05/msg00181.html\n\n # THIS DOESN'T WORK DUE TO DURATION LINES\n\n # if leaf.note_heads:\n # named_pitch_list = sorted(\n # [note_head.named_pitch for note_head in leaf.note_heads]\n # )\n # interval_list = [\n # pitch0 - pitch1\n # for pitch0, pitch1 in itertools.combinations(named_pitch_list, 2)\n # ]\n # max_interval = max([abs(interval.cents) for interval in interval_list])\n # if max_interval < 500:\n # new_pitch = named_pitch_list[0] + abjad.NumberedInterval(10)\n # new_note_head = abjad.NoteHead(new_pitch)\n # abjad.tweak(new_note_head).transparent = \"##t\"\n # leaf.note_heads.append(new_note_head)\n return leaf\n\n\nabjad_parameters.Arpeggio.process_leaf = Arpeggio_process_leaf_monkey_patched\n\n\nclass CentDeviation(\n cdd_parameters.CentDeviation, abjad_parameters.abc.BangFirstAttachment\n):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n if self.deviation % 100 != 0:\n if self.deviation > 0:\n prefix = \"+\"\n else:\n prefix = \"-\"\n adjusted_deviation = round(abs(self.deviation))\n markup = abjad.Markup(\n \"\\\\tiny { \" + f\"{prefix}{adjusted_deviation}\" + \" } \", direction=\"up\"\n )\n abjad.attach(\n markup,\n leaf,\n )\n return leaf\n\n\nclass DurationLine(\n cdd_parameters.DurationLine, abjad_parameters.abc.BangFirstAttachment\n):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n abjad.attach(\n abjad.LilyPondLiteral(\n (\n fr\"\\once \\override DurationLine.dash-period = #'{self.dash_period} \"\n fr\"\\once \\override DurationLine.thickness = #'{self.thickness} \"\n fr\"\\once \\override DurationLine.bound-details.right.end-style = #'{self.end_style} \"\n fr\"\\override DurationLine.bound-details.hook-direction = #{self.hook_direction} \"\n )\n ),\n leaf,\n )\n if self.style != \"line\":\n abjad.attach(\n abjad.LilyPondLiteral(\n (fr\"\\once \\override DurationLine.style = #'{self.style} \")\n ),\n leaf,\n )\n return leaf\n\n\nclass FancyGlissando(\n cdd_parameters.FancyGlissando, abjad_parameters.abc.BangFirstAttachment\n):\n @staticmethod\n def _command_to_lilypond_string(command: tuple[tuple[float, ...], ...]):\n lilypond_string = \"\"\n for part in command:\n lilypond_string += \"({})\\n\".format(\" \".join(map(str, part)))\n return fr\"\\fancy-gliss #'({lilypond_string})\"\n\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n leaf = abjad.mutate.copy(leaf)\n fancy_glissando = FancyGlissando._command_to_lilypond_string(self.command)\n abjad.attach(abjad.LilyPondLiteral(fancy_glissando, format_slot=\"before\"), leaf)\n abjad.attach(abjad.Glissando(), leaf)\n return leaf\n\n\nclass Optional(\n music_parameters.abc.ExplicitPlayingIndicator,\n abjad_parameters.abc.BangFirstAttachment,\n):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n if hasattr(leaf, \"note_head\"):\n note_head_list = [leaf.note_head]\n elif hasattr(leaf, \"note_heads\"):\n note_head_list = leaf.note_heads\n else:\n note_head_list = []\n for note_head in note_head_list:\n note_head.is_parenthesized = True\n return leaf\n\n\nclass IrregularGlissando(\n music_parameters.abc.ExplicitPlayingIndicator,\n abjad_parameters.abc.BangFirstAttachment,\n):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n leaf = abjad.mutate.copy(leaf)\n fancy_glissando = r\"\"\"\n\\fancy-gliss\n #'(\n (1 3 0.2 2 1 1)\n (2 -2)\n (3 2)\n (4 1)\n (5 2.5)\n (6 0)\n (7 0 8 6 12 0))\n\"\"\"\n abjad.attach(\n abjad.LilyPondLiteral(fancy_glissando, format_slot=\"before\"),\n leaf,\n )\n if hasattr(leaf, \"note_head\"):\n pitch = leaf.note_head.written_pitch\n elif hasattr(leaf, \"note_heads\"):\n pitch = leaf.note_heads[0].written_pitch\n else:\n raise NotImplementedError()\n hidden_leaf = abjad.Note(pitch, fractions.Fraction(1, 64))\n omit = r\"\\once \\omit\"\n abjad.attach(\n abjad.LilyPondLiteral(\n (\n f\"{omit} Accidental \"\n f\"{omit} NoteHead \"\n f\"{omit} Beam \"\n f\"{omit} Stem \"\n f\"{omit} Flag \"\n ),\n format_slot=\"before\",\n ),\n hidden_leaf,\n )\n\n voice = abjad.Voice([leaf, hidden_leaf])\n\n abjad.attach(abjad.Glissando(), leaf)\n\n return voice\n\n\nclass NoteHead(cdd_parameters.NoteHead, abjad_parameters.abc.BangEachAttachment):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n abjad.attach(\n abjad.LilyPondLiteral(\n fr\"\\override NoteHead.style = #'{self.style}\",\n format_slot=\"absolute_before\",\n ),\n leaf,\n )\n abjad.attach(\n abjad.LilyPondLiteral(\n fr\"\\override NoteHead.style = #'{self.default_style}\",\n format_slot=\"absolute_after\",\n ),\n leaf,\n )\n return leaf\n\n\nclass NoteHeadHintList(\n cdd_parameters.NoteHeadHintList, abjad_parameters.abc.BangFirstAttachment\n):\n def process_leaf(self, leaf: abjad.Leaf) -> abjad.Leaf:\n abjad.attach(\n abjad.LilyPondLiteral(\n (\n r\"\\set fingeringOrientations = #'(right) \"\n rf\"\\override Fingering.font-size = #{self.font_size}\"\n ),\n format_slot=\"absolute_before\",\n ),\n leaf,\n )\n\n if hasattr(leaf, \"note_head\"):\n note_head_list = [leaf.note_head]\n elif hasattr(leaf, \"note_heads\"):\n note_head_list = leaf.note_heads\n else:\n note_head_list = []\n\n for note_head, hint in zip(note_head_list, self.hint_list):\n note_head.lilypond_literal = fr'\\finger \"({hint})\"'\n\n return leaf\n\n\n# override mutwo default value\nabjad_converters.configurations.DEFAULT_ABJAD_ATTACHMENT_CLASS_TUPLE = (\n abjad_converters.configurations.DEFAULT_ABJAD_ATTACHMENT_CLASS_TUPLE\n + (\n CentDeviation,\n FancyGlissando,\n IrregularGlissando,\n NoteHead,\n NoteHeadHintList,\n Optional,\n DurationLine,\n )\n)\n","repo_name":"levinericzimmermann/cdd","sub_path":"mutwo.ext-cdd/mutwo/cdd_parameters/abjad_attachments.py","file_name":"abjad_attachments.py","file_ext":"py","file_size_in_byte":8099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"34249810593","text":"\"\"\"Написать класс треугольник. В конструкторе определить длины сторон. Добавить методы для вычисления площади,\r\nпериметра, определения типа треугольника (прямой, равнобедренный, равносторонний, разносторонний)\"\"\"\r\n\r\n\r\ndef _triangle_exist(triangle):\r\n if (triangle.side1 + triangle.side2 > triangle.side3 and triangle.side1 + triangle.side3 > triangle.side2 and\r\n triangle.side2 + triangle.side3 > triangle.side1):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass Triangle(object):\r\n\r\n def __init__(self, side1, side2, side3):\r\n self.side1 = float(side1)\r\n self.side2 = float(side2)\r\n self.side3 = float(side3)\r\n self.is_exist = _triangle_exist(self)\r\n\r\n def triangle_perimeter(self):\r\n if self.is_exist is True:\r\n perimeter = self.side1 + self.side2 + self.side3\r\n return round(perimeter, 2)\r\n else:\r\n return \"Треугольник не существует.\"\r\n\r\n def triangle_square(self):\r\n if self.is_exist is True:\r\n half_perimeter = (self.side1 + self.side2 + self.side3)/2\r\n square = (half_perimeter * (half_perimeter - self.side1) * (half_perimeter - self.side2) *\r\n (half_perimeter - self.side3)) ** 0.5\r\n return round(square, 2)\r\n else:\r\n return \"Треугольник не существует.\"\r\n\r\n def triangle_type(self):\r\n if self.is_exist is True:\r\n if self.side1 == self.side2 == self.side3:\r\n return \"Треугольник равностронний\"\r\n elif ((self.side1 == self.side2 and self.side2 != self.side3) or (self.side2 == self.side3 and\r\n self.side3 != self.side1) or (self.side3 == self.side1 and self.side1 != self.side2)):\r\n return \"Треугольник равнобедренный\"\r\n elif ((self.side1 ** 2 == self.side2 ** 2 + self.side3 ** 2) or\r\n (self.side2 ** 2 == self.side1 ** 2 + self.side3 ** 2) or\r\n (self.side3 ** 2 == self.side1 ** 2 + self.side2 ** 2)):\r\n return \"Треугольник прямоугольный\"\r\n else:\r\n return \"Треугольник разносторонний\"\r\n else:\r\n return \"Треугольник не существует.\"\r\n","repo_name":"Primula13/Review","sub_path":"Review/Tasks/Triangle_task.py","file_name":"Triangle_task.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17971593081","text":"import cv2 as cv\r\nimport numpy as np\r\nfrom djitellopy import tello\r\nfrom Other import Other\r\nimport time\r\n\r\nhsv = 95\r\nlower_blue1 = np.array([95, 95, 95])\r\nupper_blue1 = np.array([105, 255, 255])\r\nlower_blue2 = np.array([85, 95, 95])\r\nupper_blue2 = np.array([95, 255, 255])\r\nlower_blue3 = np.array([85, 95, 95])\r\nupper_blue3 = np.array([95, 255, 255])\r\n\r\ncv.namedWindow('img_color')\r\n\r\ncv.namedWindow('img_result')\r\n\r\n\r\ndef GetMiddle(img_color):\r\n\r\n # 원본 영상을 HSV 영상으로 변환\r\n img_hsv = cv.cvtColor(img_color, cv.COLOR_BGR2HSV)\r\n img_mask1 = cv.inRange(img_hsv, lower_blue1, upper_blue1)\r\n img_mask2 = cv.inRange(img_hsv, lower_blue2, upper_blue2)\r\n img_mask3 = cv.inRange(img_hsv, lower_blue3, upper_blue3)\r\n img_mask = img_mask1 | img_mask2 | img_mask3\r\n\r\n kernel = np.ones((11, 11), np.uint8)\r\n img_mask = cv.morphologyEx(img_mask, cv.MORPH_OPEN, kernel)\r\n img_mask = cv.morphologyEx(img_mask, cv.MORPH_CLOSE, kernel)\r\n\r\n # 마스크 이미지로 원본 이미지에서 범위값에 해당되는 영상 부분을 획득\r\n img_result = cv.bitwise_and(img_color, img_color, mask=img_mask)\r\n\r\n numOfLabels, img_label, stats, centroids = cv.connectedComponentsWithStats(img_mask)\r\n returnX = -1\r\n returnY = -1\r\n\r\n for idx, centroid in enumerate(centroids):\r\n if stats[idx][0] == 0 and stats[idx][1] == 0:\r\n continue\r\n\r\n if np.any(np.isnan(centroid)):\r\n continue\r\n\r\n x, y, width, height, area = stats[idx]\r\n centerX, centerY = int(centroid[0]), int(centroid[1])\r\n print(centerX, centerY)\r\n\r\n if area > 900:\r\n returnX = centerX\r\n returnY = centerY\r\n cv.circle(img_color, (centerX, centerY), 10, (0, 0, 255), 10)\r\n cv.rectangle(img_color, (x, y), (x + width, y + height), (0, 0, 255))\r\n\r\n cv.imshow('img_color', img_color)\r\n cv.imshow('img_mask', img_mask)\r\n cv.imshow('img_result', img_result)\r\n return returnX, returnY\r\n\r\ncap = cv.VideoCapture(0)\r\nprint(\"{}*{}\\nfps : {}\".format(cap.get(cv.CAP_PROP_FRAME_HEIGHT), cap.get(cv.CAP_PROP_FRAME_WIDTH),\r\n cap.get(cv.CAP_PROP_FPS)))\r\n\r\npid_x = Other(320, 0.25, 0.0, 0.0, \"X\")\r\npid_y = Other(240, 0.25, 0.0, 0.0, \"Y\")\r\n\r\ndrone = tello.Tello()\r\ndrone.connect()\r\ndrone.takeoff()\r\n\r\nStart_time = time.time()\r\n\r\nwhile True:\r\n ret, img = cap.read()\r\n if not ret :\r\n continue\r\n x, y = GetMiddle(img)\r\n velocity_x, velocity_y = pid_x.do(x, time.time() - Start_time), pid_y.do(y, time.time() - Start_time) # 제어값 계산\r\n print(str(velocity_x)+\" \"+str(velocity_y))\r\n drone.send_rc_control(-int(velocity_x), 0, int(velocity_y), 0) # 제어 값 전송\r\n\r\n print(\"\")\r\n if (cv.waitKey(1) & 0xff) == 27:\r\n break\r\ndrone.land()\r\ncap.release()\r\ncv.destroyAllWindows()\r\n","repo_name":"daehwan5024/Tello_Marker","sub_path":"Detect.py","file_name":"Detect.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"34031974462","text":"# coding: utf-8\n# Django settings for hermanmiller project.\nimport os\nfrom settings_defaults import * # DO NOT delete, there are default settings!\n\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nWWW_ROOT = os.path.join(SITE_ROOT, 'wwwroot')\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nUSE_DEBUG_TOOLBAR = True\n\nALLOWED_HOST = 'herman-miller.com.ua'\nALLOWED_SUBDOMAIN = 'hermanmiller.picassoft.com.ua'\nALLOWED_HOSTS = [\n 'localhost',\n '.{}'.format(ALLOWED_HOST), # Allow domain and subdomains\n '.{}.'.format(ALLOWED_HOST), # Also allow FQDN and subdomains\n '.{}'.format(ALLOWED_SUBDOMAIN), # Allow domain and subdomains\n '.{}.'.format(ALLOWED_SUBDOMAIN), # Also allow FQDN and subdomains\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'hermanmiller', # Or path to database file if using sqlite3.\n 'USER': 'postgres', # Not used with sqlite3.\n 'PASSWORD': 'c', # Not used with sqlite3.\n 'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/Kiev'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'uk'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(WWW_ROOT, 'media')\nDEFAULT_FILE_STORAGE = 'picassoft.files.storage.HashedFileSystemStorage'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(WWW_ROOT, 'static')\nSTATIC_SOURCE = os.path.join(SITE_ROOT, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\nADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n STATIC_SOURCE,\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'u12k&hqz8+agf%q3ob5fg6e2-4t#4s+do9$d_#!63=6r9$ou3)'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.i18n',\n 'mediatools.context_processors.settings',\n 'constance.context_processors.config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n)\n\nif USE_DEBUG_TOOLBAR:\n MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n )\n\n# After DebugToolbarMiddleware to show SQL queries.\nMIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n)\n\nROOT_URLCONF = 'hermanmiller.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'hermanmiller.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(SITE_ROOT, 'templates'),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.sitemaps',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.webdesign',\n\n 'debug_toolbar',\n #'django_extensions',\n 'south',\n 'sorl.thumbnail',\n 'paging',\n #'modeltranslation',\n 'feincms',\n 'mptt',\n 'haystack',\n 'constance',\n 'constance.backends.database',\n\n 'picassoft.utils',\n 'picassoft.sprite_bundler',\n\n 'search',\n 'navigation',\n 'catalog',\n 'content',\n 'shop',\n 'slideshow',\n)\n\n# Debug Bar settings\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n}\n\n# Cache settings\nCACHE_MIDDLEWARE_KEY_PREFIX = os.path.split(SITE_ROOT)[1]\nCACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n}\n\n# Languages settings\n_ = lambda s: s\nLANGUAGES = (\n ('ru', _(\"Russian\")),\n)\n\nLOCALE_PATHS = os.path.join(SITE_ROOT, 'locale'),\n\n# Model translation settings\nTRANSLATION_REGISTRY = 'hermanmiller.translations'\n\n# Sprite bundles settings\nSPRITE_BUNDLES = [\n {\n 'format': \"sprite-{name}\",\n 'source': os.path.join(SITE_ROOT, 'sprites', '*.*'),\n 'url': \"../img/sprites.png\",\n 'sprite_file': os.path.join(STATIC_SOURCE, 'img', 'sprites.png'),\n 'css_file': os.path.join(STATIC_SOURCE, 'css', 'sprites.css'),\n }\n]\n\n#Thumbnail settings\nTHUMBNAIL_DEBUG = DEBUG\n\nTHUMBNAIL_STORAGE = 'django.core.files.storage.FileSystemStorage' # Set explicitly to override default storage\n\nTHUMBNAIL_SETTINGS = {\n 'FULLSCREEN_SIZE': '870',\n 'ICON_SIZE': '250x250',\n 'ONE_COLUMN_ICON_SIZE': '270',\n 'CONTENT_PICTURE': '250x187',\n 'CONTENT_PICTURE_NEWS': '270x202',\n}\n\nTHUMBNAIL_PADDING = True\n\n#Search settings\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.xapian_backend.XapianEngine',\n 'PATH': os.path.join(SITE_ROOT, 'index', 'xapian'),\n 'INCLUDE_SPELLING': True,\n },\n}\n\n#Constance setings\nCONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'\nCONSTANCE_DATABASE_CACHE_BACKEND = 'default'\n\nCONSTANCE_CONFIG = {\n 'CURRENCY_RATE_EUR': (12.5, _(\"Currency rate UAH to EUR (per 1 EUR).\")),\n 'INVOICE_SUPPLIER': (\"\", _(\"Invoice supplier. New lines are taken into account.\")),\n 'LIQPAY_PUBLIC_KEY': ('i1887235161', _(\"LiqPay public key.\")),\n 'LIQPAY_PRIVATE_KEY': ('1LjptrUnv71ecOv1W62MOWK8QgVYd82ObadL48Iw', _(\"LiqPay private key.\")),\n}\n\n#Session settings\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\n\n#Email settings\nEMAIL_FILE_PATH = os.path.join(SITE_ROOT, 'mailbox')\n\nORDER_RECIPIENTS = (\n 'sales@picassoft.com.ua',\n 'a.butsan@picassoft.com.ua',\n)\n\nORDER_SENDER = 'sales@hermanmiller.com.ua'\n\n#Auth settings\nLOGIN_URL = 'registration:login'\nLOGIN_REDIRECT_URL = 'home'\n","repo_name":"alexgula/django_sites","sub_path":"sites/hermanmiller/hermanmiller/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"42304781705","text":"# Graph Theory\nclass Graph:\n def __init__(self, gdict=None):\n if gdict is None:\n gdict = {}\n else:\n self.gdict = gdict\n def addEdge(self, vertex, edge):\n self.gdict[vertex].append(edge)\n\n def bfs(self, vertex):\n visited = [vertex]\n queue = [vertex]\n while queue:\n dequeue = queue.pop(0)\n print(dequeue)\n for adjacentVertex in self.gdict[dequeue]:\n if adjacentVertex not in visited:\n visited.append(adjacentVertex)\n queue.append(adjacentVertex)\n\n def dfs(self, vertex):\n visited = [vertex]\n stack = [vertex]\n while stack:\n deStack = stack.pop()\n print(deStack)\n for adjacentVertex in self.gdict[deStack]:\n if adjacentVertex not in visited:\n visited.append(adjacentVertex)\n stack.append(adjacentVertex)\n\n\ncustomDict = { \"a\" : [\"b\",\"c\"],\n \"b\" : [\"a\",\"d\",\"e\"],\n \"c\" : [\"a\",\"e\"],\n \"d\" : [\"b\",\"e\",\"f\"],\n \"e\" : [\"b\",\"c\",\"d\"],\n \"f\" : [\"d\",\"e\"]\n }\ngraph = Graph(customDict)\ngraph.addEdge(\"e\", \"f\")\ngraph.bfs(\"a\")\ngraph.dfs(\"a\")\nprint(graph.gdict)\n","repo_name":"Odion-Sonny/open","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"70663243408","text":"import re\nfrom openerp import netsvc\nfrom openerp.osv import osv, fields\n\nclass role_book(osv.osv):\n \"\"\"\"\"\"\n \n _name = 'nautical.role_book'\n _description = 'role_book'\n\n _columns = {\n 'estimated_dep_date': fields.datetime(string='Estimated Departure Date', required=True),\n 'partner_id': fields.many2one('res.partner', string='Partner', readonly=True, required=True),\n 'destiny': fields.char(string='Destiny'),\n 'crew_qty': fields.integer(string='Crew'),\n 'est_arrival_date': fields.datetime(string='Estimated Arrival Date'),\n 'craft_id': fields.many2one('nautical.craft', string='Craft', readonly=True, required=True, ondelete='cascade'), \n }\n\n _defaults = {\n }\n\n\n _constraints = [\n ]\n\n\n\n\nrole_book()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"ingadhoc/odoo-nautical","sub_path":"nautical/role_book.py","file_name":"role_book.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"65"} +{"seq_id":"16133510627","text":"##from abaqus import *\r\n##from odbAccess import *\r\nfrom numpy import *\r\nimport os\r\n##import load\r\nimport random\r\nimport itertools\r\nimport numpy as np\r\n##from jobMessage import ANY_JOB, ANY_MESSAGE_TYPE\r\nloadList=['Load-bt1','Load-bt2','Load-bt3']\r\nbase_dir=os.getcwd()\r\n\r\ndef change_of_load(pathName,modelName,jobName,bias):\r\n mdb=openMdb(pathName=pathName)\r\n myModel=mdb.models[modelName]\r\n for i in range(len(loadList)):\r\n myModel.loads[loadList[i]].setValuesInStep(stepName='Step-2',magnitude=float(bias[i]))\r\n myJob=mdb.Job(name=jobName,model=modelName,numCpus=8,numDomains=8)\r\n print('start job')\r\n myJob.submit()\r\n myJob.waitForCompletion()\r\n print('finish job')\r\n \r\ndef extract_result(odbPath,outPath,option):\r\n odb=openOdb(path=odbPath)\r\n stepkeys=odb.steps.keys()\r\n\r\n oface=odb.rootAssembly.nodeSets['SET-OUTPUT']\r\n S=odb.steps[stepkeys[-1]].frames[-1].fieldOutputs[option]\r\n oS=S.getSubset(region=oface)\r\n\r\n labels, xyz = [], []\r\n for node in oface.nodes[0]:\r\n labels.append(node.label)\r\n xyz.append(node.coordinates)\r\n cc = dict(zip(labels, xyz))\r\n\r\n ovalues=oS.values\r\n \r\n cpFile=file(outPath,'w')\r\n for i in range(len(ovalues)):\r\n coord=cc[ovalues[i].nodeLabel]\r\n cpFile.write(str(ovalues[i].nodeLabel)+' '+str(coord[0])+' '+str(coord[1])+' '+str(ovalues[i].data[-1])+'\\n')\r\n cpFile.close()\r\n odb.close()\r\ndef getloss(filePath):\r\n fp=open(filePath)\r\n cpress=[]\r\n for line in fp.readlines():\r\n values=line.strip().split(' ')\r\n cpress.append(float(values[3]))\r\n varcpress=var(cpress)\r\n return varcpress\r\ndef cal_modelval(inputval,modelpath,modelname):\r\n change_of_load(modelpath,modelname,'test',inputval)\r\n extract_result(base_dir+'test.odb',base_dir+'/test.txt','U')\r\n loss=getloss(base_dir+'/test.txt')\r\n return loss\r\n#初始采样\r\ndef all_sampling(mindata,maxdata,ptnum):\r\n length=len(mindata)\r\n ptlist=[]\r\n for i in range(ptnum):\r\n for j in range(ptnum):\r\n a=[i,j]\r\n smallist=[]\r\n for m in range(len(a)):\r\n downval=mindata[m]+a[m]*(maxdata[m]-mindata[m])/ptnum\r\n upval=mindata[m]+(a[m]+1)*(maxdata[m]-mindata[m])/ptnum\r\n smallist.append(random.uniform(downval,upval))\r\n ptlist.append(smallist)\r\n return transpose(array(ptlist))\r\n#均匀采样 \r\ndef uni_sampling(mindata,maxdata,ptnum):\r\n length=len(mindata)\r\n num=int(sqrt(ptnum))\r\n ptlist=[]\r\n for i in range(num):\r\n for j in range(num):\r\n a=[i,j]\r\n smallist=[]\r\n for m in range(len(a)):\r\n downval=mindata[m]+a[m]*(maxdata[m]-mindata[m])/num\r\n upval=mindata[m]+(a[m]+1)*(maxdata[m]-mindata[m])/num\r\n smallist.append((downval+upval)/2)\r\n ptlist.append(smallist)\r\n return transpose(array(ptlist))\r\n#拉丁超立方采样,返回长度*点数\r\ndef latian1_sampling(mindata,maxdata,ptnum,trans=True):\r\n length=len(mindata)\r\n ptlist=[]\r\n for i in range(length):\r\n smalist=[]\r\n for j in range(ptnum):\r\n downval=mindata[i]+j*(maxdata[i]-mindata[i])/ptnum\r\n upval=mindata[i]+(j+1)*(maxdata[i]-mindata[i])/ptnum\r\n smalist.append(random.uniform(downval,upval))\r\n random.shuffle(smalist)\r\n ptlist.append(array(smalist))\r\n pts=vstack(ptlist)\r\n if trans:\r\n pts=pts.T\r\n return pts\r\n#最远区域采样.data:2*n\r\ndef fps_sampling(data,ptnum):\r\n dimsize,allnum=shape(data)\r\n newids=[0]\r\n data1=expand_dims(data,axis=1)\r\n data2=expand_dims(data,axis=2)\r\n dismat=sqrt(sum(square(data1-data2),axis=0))#n*n\r\n for i in range(ptnum-1):\r\n farmat=dismat[newids]\r\n newid=argmax(amin(farmat,axis=0))\r\n newids.append(newid)\r\n## newdata=data[newids]\r\n return newids\r\n#修改版的超立方采样,可以去除中间区域\r\ndef latian2_sampling(mindata,maxdata,minv,maxv,ptnum):\r\n length=len(mindata)\r\n ptlist=[]\r\n ptnum1=ptnum+1\r\n for i in range(length):\r\n smalist=[]\r\n valrange=(maxdata[i]-mindata[i])-(maxv[i]-minv[i])\r\n sym=0\r\n index=0\r\n for j in range(ptnum1):\r\n if sym==0:\r\n downval=mindata[i]+j*valrange/ptnum1\r\n upval=mindata[i]+(j+1)*valrange/ptnum1\r\n else:\r\n downval=maxdata[i]-(ptnum-index)*valrange/ptnum1\r\n upval=downval+valrange/ptnum1\r\n\r\n if upval>minv[i] and upval0\r\ndef list_same(l1,mat2):\r\n err=sum(np.min(square(array(l1)-array(mat2)),axis=0))\r\n if err<0.01:\r\n return False\r\n else:\r\n return True\r\ndef elem_same(l1,l2):\r\n sl1=sorted(list(l1))\r\n sl2=sorted(list(l2))\r\n if sl1==sl2:\r\n return True\r\n else:\r\n return False\r\ndef elem_exist(l1,mat):\r\n al=array(l1)\r\n err=np.min(np.sum(np.square(al-array(mat)),axis=-1))\r\n if err<0.001:\r\n return False\r\n else:\r\n return True\r\ndef err_evaluate(data,pts,b,degree=2,limit=4,\\\r\n getdegree=True,idin=None,use_idlist=False,k=0.5,threshold=0.3,eva_type='k',delta=5):\r\n ptnum,dimnum=np.shape(pts)\r\n dis=np.sum(np.square(expand_dims(pts,axis=0)-expand_dims(pts,axis=1)),axis=-1)#ptnum*ptnum\r\n maxdis=np.max(dis)\r\n evadis=np.sum(np.square(expand_dims(pts,axis=1)-expand_dims(data,axis=0)),axis=-1)#ptnum*evanum\r\n judge=np.sum(np.where(evadis0:\r\n pts_data=np.concatenate([pts_data,xins],axis=0)\r\n ys_data=np.concatenate([ys_data,yins],axis=0)\r\n else:\r\n pts_data=xins\r\n ys_data=yins\r\n local_pts.append(pts_data)\r\n local_ys.append(ys_data)\r\n return region_r,x,local_ys,local_pts\r\n#data:n*dimnum\r\n#output:1\r\ndef monte_integral(data,func):\r\n## data=latian1_sampling(down,up,num)\r\n dev2,dev=cal_dev2(data,func)#n*dimnum\r\n## result=np.exp(-5*np.min(np.abs(dev2),axis=1))\r\n result=np.max(np.max(np.abs(dev2),axis=1),axis=1)\r\n result=np.sum(result)\r\n## print(dev,dev2)\r\n## assert False\r\n result2=np.exp(-5*np.min(np.min(np.abs(dev2),axis=1),axis=1))\r\n## result2=np.sum(np.abs(dev),axis=-1)\r\n result2=np.sum(result2)\r\n return dev2,dev,result,result2\r\n \r\n#data:n*dimnum\r\n#output:n*dimnum\r\ndef cal_dev2(data,func):\r\n ptnum,dimnum=np.shape(data)\r\n tiny_dis=1e-5\r\n movemat=np.eye(dimnum)*tiny_dis\r\n alldata=np.expand_dims(movemat,axis=0)+np.expand_dims(data,axis=1)#n*dimnum*dimnum\r\n alldata=np.reshape(alldata,[-1,dimnum])\r\n yy=func(alldata)#(n*dimnum)*1\r\n## print(np.shape(alldata))\r\n yy=np.reshape(yy,[ptnum,dimnum,-1])\r\n y=np.expand_dims(func(data),axis=1)#n*1*dimnum\r\n## print(y)\r\n## print(yy)\r\n## print(data,alldata)\r\n dev2=(yy-y)/tiny_dis#n*dimnum*dimnum\r\n## print(np.shape(yy),np.shape(y),ptnum,dimnum)\r\n## print(y)\r\n## dev2=np.reshape(dev2,[ptnum,-1])\r\n## dev=np.reshape(np.array(y),[ptnum,-1])\r\n return dev2,np.array(y)\r\n \r\n \r\nif __name__=='__main__':\r\n## x1=array([1.0,2.0,3.0,4.0,5.0,6.0]).T\r\n## x2=array([[1.0,2.0,3.0,4.0,5.0,6.0],[1.5,2.5,3.5,4.0,5.0,6.0]]).T\r\n## print(hartmann_func6(x1),hartmann_func6(x2))\r\n## pts=latian1_sampling([0,0,0,0,0,0],[1,1,1,1,1,1],36)\r\n pts=latian1_sampling([-100,-100],[100,100],36)\r\n pts2=latian1_sampling([-100,-100],[100,100],50)\r\n## b=goldstein_price(pts)\r\n b=expand_dims(GN_func(pts),axis=1)\r\n y=expand_dims(GN_func(pts2),axis=1)\r\n pts=transpose(pts)\r\n pts2=pts2.T\r\n## polys(pts,3)\r\n## b=hartmann_func6(pts)\r\n## b=branin_hoo(pts)\r\n## degree,idlist=get_degree(pts,b,4,delta=5)\r\n## degree,idlist=err_evaluate(pts2,pts,b,degree=2,limit=4,getdegree=True,idin=None,use_idlist=False,k=0.5,threshold=0.3,eva_type='w')\r\n## print(degree,idlist)\r\n beta,idlist=solve_polysrbf(pts,b,2,None,False,delta=5)\r\n result=cal_polysrbf(beta,pts2,pts,idlist,2,delta=5)\r\n print(shape(result))\r\n err=sqrt(mean(square(result-y)))\r\n print(err)\r\n## beta,idlist=solve_polysrbf(pts,b,degree,delta=5)\r\n## gra=cal_gradient(pts2,pts,degree,beta,idlist,delta=5)\r\n## print(gra)\r\n \r\n \r\n## print(fps_sampling(pts,6))\r\n## sampts=pts[:,fps_sampling(pts,6)]\r\n## print(sampts)\r\n \r\n \r\n \r\n","repo_name":"Tianxinhuang/DRS","sub_path":"tf_util.py","file_name":"tf_util.py","file_ext":"py","file_size_in_byte":24829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"29159830252","text":"from transformers import BertModel, BertConfig\n\n# Load the Bert config\nconfig = BertConfig()\n# print(config)\n\n# Train the Bert with the state config\nmodel = BertModel(config)\n\n# Or just reused the pretrained model, the cache is located at ~/.cache/huggingface/transformers.\nmodel_pre = BertModel.from_pretrained(\"bert-base-cased\")\n\n# Save the model\n# model_pre.save_pretrained(\"bert_retrained\")\n\nsequences = [\"Hello!\", \"Cool.\", \"Nice!\"]\n\n# Tokenizer convert the sequences to the encoded_sequences\nencoded_sequences = [\n [101, 7592, 999, 102],\n [101, 4658, 1012, 102],\n [101, 3835, 999, 102],\n]\n\nimport torch\nmodel_inputs = torch.tensor(encoded_sequences)\n\noutput = model_pre(model_inputs)\nprint(output)\n\n\n","repo_name":"Y-Sui/transformers","sub_path":"transformers_huggingface/using_transformer/transformer_code_05.py","file_name":"transformer_code_05.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"8673664573","text":"def slow_lcm(maximum: int)-> int:\n '''lowest common multiple of natural numbers from 1 to maximum'''\n i = maximum\n while True:\n divisible = True\n for num in range(1, maximum + 1):\n if maximum % num != 0:\n divisible = False\n break\n if divisible:\n return maximum\n maximum += 1\n\ndef fast_lcm(factors: list, lcm = 1) -> int:\n '''Least common multiple of integers in list, multiplied by lcm'''\n if len(factors) == 1: \n return lcm * factors[0]\n #Iterates function with cutting down\n return fast_lcm(\n [item/factors[0] * (not bool(item%factors[0])) + item * bool(item%factors[0]) for item in factors[1:]],\n lcm * factors[0]\n )\n\n#print(slow_lcm(10))\nprint(fast_lcm(list(range(1,100))))","repo_name":"CDL-Project-Euler/solutions","sub_path":"000-025/p005/inle.py","file_name":"inle.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"29272678588","text":"import pygame\nfrom pygame.constants import FULLSCREEN, RESIZABLE\nfrom pygame.locals import *\nfrom Game.Common import SpriteState\nfrom Game.Components.SpriteSheet import *\n\n# Initialize PyGAme\npygame.init()\nSCREEN_W, SCREEN_H = 800, 600\ncanvas = pygame.Surface((SCREEN_W, SCREEN_H))\nwindow = pygame.display.set_mode((SCREEN_W,SCREEN_H))\n###################\n\n# Test Code Here\nactor = SpriteSheet(\"Character1\")\nanimslist = actor.getAnimationList(\"Idle\", SpriteState.DOWN)\nanimindex = 0\n################\n\n# PyGame Game Loop\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n \n canvas.fill((0,100,100))\n\n window.blit(canvas,(0,0))\n pygame.display.update()\n\n##################","repo_name":"bryanfassett/TinyFarm","sub_path":"run_game.py","file_name":"run_game.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"25674749711","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport settings\nimport numpy as np\n\nfrom components.test_item import TestItem\nimport math\n\nimport os\nimport time\nimport csv\n\n\nclass TestSeriesResultController:\n def __init__(self, main_ctrl):\n self.main_ctrl = main_ctrl\n self.page = None\n self.plot = None\n self.canvas = None\n self.result_polar = None\n self.result_disperse = None\n self.result_total = None\n\n def connect_page(self, page):\n self.page = page\n self.canvas = page.canvas\n self.plot = page.plot\n\n self.page.save_button.config(command=self.save_series)\n\n def before_show(self):\n test_list = self.main_ctrl.get_test_list()\n x_values = []\n y_values = []\n dy_values = []\n for test in test_list:\n fluid_data = self.main_ctrl.get_fluid_data(test.fluid)\n if(fluid_data is not None):\n # calculate x,y based on fluid and angle\n x = (\n math.sqrt(fluid_data[settings.FLUID_IDX_POLAR])\n / math.sqrt(fluid_data[settings.FLUID_IDX_DISPERSE])\n )\n\n y = (\n (1.0 + math.cos(test.fit_result[\"angle\"]))\n * (\n (fluid_data[settings.FLUID_IDX_IFT])\n / (\n 2.0 * math.sqrt(fluid_data[settings.FLUID_IDX_DISPERSE])\n )\n )\n )\n\n # calculate deviation values\n # dx has no deviation\n dy1 = (\n (1.0 + math.cos(test.fit_result[\"angle\"] + test.fit_result[\"deviation\"]))\n * (\n (fluid_data[settings.FLUID_IDX_IFT])\n / (\n 2.0 * math.sqrt(fluid_data[settings.FLUID_IDX_DISPERSE])\n )\n )\n )\n\n dy2 = (\n (1.0 + math.cos(test.fit_result[\"angle\"] - test.fit_result[\"deviation\"]))\n * (\n (fluid_data[settings.FLUID_IDX_IFT])\n / (\n 2.0 * math.sqrt(fluid_data[settings.FLUID_IDX_DISPERSE])\n )\n )\n )\n\n dy = abs(dy2 - dy1) / 2.0\n\n print(\"DY1: {0:.8f} - DY2: {1:.8f}\".format(dy1, dy2))\n print(\"X: {0:.8f}, Y: {1:.8f}\".format(x, y))\n x_values.append(x)\n y_values.append(y)\n dy_values.append(dy)\n else:\n print(\"Fehler: Ungültiges Fluid \" + test.fluid)\n # end for test in test_list\n print(x_values, y_values)\n x_values = np.array(x_values)\n y_values = np.array(y_values)\n\n z = np.polyfit(x_values, y_values, 1)\n z_1d = np.poly1d(z)\n\n m = z[0]\n b = z[1]\n\n self.result_polar = m * m\n self.result_disperse = b * b\n\n self.result_total = self.result_polar + self.result_disperse\n\n # start drawing\n self.plot.cla()\n # self.plot.scatter(\n # x_values,\n # y_values,\n # marker=\"+\",\n # color=\"r\"\n # )\n self.plot.errorbar(\n x_values,\n y_values,\n dy_values,\n marker=\".\",\n linestyle=\"None\",\n capsize=3,\n color=\"r\"\n )\n\n line_x = np.arange(0, 2, 0.01)\n self.plot.plot(\n line_x,\n z_1d(line_x)\n )\n\n self.plot.grid(\n True\n )\n\n self.plot.set_title(\"Gesamtergebnis\")\n self.canvas.draw()\n\n self.page.line_label.config(\n text=\"Steigung: {0:.5f}, Achsenabschnitt: {1:.5f}\".format(\n m, b\n )\n )\n\n self.page.result_label.config(\n text=\"Polar: {0:.5f}, Dispers: {1:.5f}, Total: {2:.5f}\".format(\n self.result_polar, self.result_disperse, self.result_total\n )\n )\n\n # end before_show\n\n def save_series(self):\n time_label = time.strftime(\"%Y%m%d_%H%M%S\")\n dir_path = \"test_series_data/test_series_\" + time_label\n\n test_list = self.main_ctrl.get_test_list()\n\n params_list = []\n\n print(\"Testserie speichern\")\n print(\"Zeitpunkt: \" + time_label)\n try:\n # check if all tests are finished\n for test in test_list:\n if(not test.is_finished()):\n print(\"Alle Tests müssen abgeschlossen sein!\")\n raise ValueError()\n\n # create directory\n os.mkdir(dir_path)\n\n # title lines for csv\n label_list = [None] * settings.TEST_SERIES_FILE_COL_COUNT\n\n label_list[settings.SAVE_IDX_INDEX] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_INDEX]\n label_list[settings.SAVE_IDX_LABEL] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_LABEL]\n label_list[settings.SAVE_IDX_FLUID] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_FLUID]\n label_list[settings.SAVE_IDX_FIT_METHOD] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_FIT_METHOD]\n label_list[settings.SAVE_IDX_EDGE_METHOD] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_EDGE_METHOD]\n label_list[settings.SAVE_IDX_EDGE_TOP_BOTTOM] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_EDGE_TOP_BOTTOM]\n label_list[settings.SAVE_IDX_DROP_CROP] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_DROP_CROP]\n label_list[settings.SAVE_IDX_NEEDLE_CROP] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_NEEDLE_CROP]\n label_list[settings.SAVE_IDX_BASELINE_FIRST_SECOND] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_BASELINE_FIRST_SECOND]\n label_list[settings.SAVE_IDX_ANGLE] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_ANGLE]\n label_list[settings.SAVE_IDX_DEVIATION] = settings.SAVE_COL_LABELS[settings.SAVE_IDX_DEVIATION]\n\n params_list.append(label_list)\n\n # save images and gather test data\n for index, test in enumerate(test_list):\n test.original_image.save(dir_path + \"/\" + str(index) + \".png\")\n\n test_list = [None] * settings.TEST_SERIES_FILE_COL_COUNT\n\n test_list[settings.SAVE_IDX_INDEX] = index\n test_list[settings.SAVE_IDX_LABEL] = test.label\n test_list[settings.SAVE_IDX_FLUID] = test.fluid\n test_list[settings.SAVE_IDX_FIT_METHOD] = test.fit_method\n test_list[settings.SAVE_IDX_EDGE_METHOD] = test.edge_method\n test_list[settings.SAVE_IDX_EDGE_TOP_BOTTOM] = str([test.edge_value_top, test.edge_value_bottom])\n test_list[settings.SAVE_IDX_DROP_CROP] = str(test.drop_crop)\n test_list[settings.SAVE_IDX_NEEDLE_CROP] = str(test.needle_crop)\n test_list[settings.SAVE_IDX_BASELINE_FIRST_SECOND] = str(test.baseline.first_point + test.baseline.second_point)\n test_list[settings.SAVE_IDX_ANGLE] = test.fit_result[\"angle\"]\n test_list[settings.SAVE_IDX_DEVIATION] = test.fit_result[\"deviation\"]\n\n params_list.append(test_list)\n\n with open(dir_path + \"/\" + settings.TEST_RESULT_FILE_NAME, \"w\") as csv_file:\n # write params\n writer = csv.writer(\n csv_file, delimiter=\";\"\n )\n for line in params_list:\n writer.writerow(line)\n\n # save result of complete series result\n with open(dir_path + \"/\" + settings.TEST_SERIES_RESULT_FILE_NAME, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\n \"result_polar\", \"result_disperse\", \"result_total\"\n ])\n writer.writerow([\n self.result_polar, self.result_disperse, self.result_total\n ])\n\n\n except ValueError:\n print(\"Etwas ist schief gelaufen\")\n\n def before_hide(self):\n pass\n","repo_name":"bit-stone/sessile-drop-python","sub_path":"ctrl/test_series_result_ctrl.py","file_name":"test_series_result_ctrl.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"27759437916","text":"from omegaconf import DictConfig\nfrom typing import Any, Dict, Optional\n\nfrom torch.optim import Optimizer\nfrom sce.optimizers.utils import scale_learning_rate\n\nfrom sce.schedulers.utils import _SCHEDULERS\n\n\ndef scheduler_factory(\n optimizer: Optimizer,\n name: str,\n params: DictConfig = {},\n interval: str = 'epoch',\n num_steps_per_epoch: Optional[int] = None,\n scaler: Optional[str] = None,\n batch_size: Optional[int] = None,\n) -> Dict[str, Any]:\n \"\"\"Scheduler factory.\n\n Args:\n optimizer (Optimizer): Optimizer to wrap around.\n name (str): Name of the scheduler to retrieve the scheduler constructor from the _SCHEDULERS dict.\n params (DictConfig): Scheduler parameters for the scheduler constructor. Defaults to {}.\n interval (str): Interval to call step, if epoch call `.step()` at each epoch. Defaults to 'epoch'.\n num_steps_per_epoch (Optional[int]): Number of steps per epoch. Usefull for some schedulers. Defaults to None.\n scaler (Optional[str]): Scaler rule for the initial learning rate. Defaults to None.\n batch_size (Optional[int]): Batch size for the input of the model. Defaults to None.\n\n Returns:\n Dict[str, Any]: Scheduler configuration for pytorch lightning.\n \"\"\"\n\n if interval == 'step':\n if name == 'linear_warmup_cosine_annealing_lr':\n params.max_epochs = num_steps_per_epoch * params.max_epochs\n params.warmup_epochs = num_steps_per_epoch * params.warmup_epochs\n if params.get('eta_min'):\n params.eta_min = scale_learning_rate(params.eta_min, scaler, batch_size)\n elif name == 'cosine_annealing_lr':\n if params.get('eta_min'):\n params.eta_min = scale_learning_rate(params.eta_min, scaler, batch_size)\n\n scheduler = _SCHEDULERS[name](optimizer=optimizer, **params)\n\n return {'scheduler': scheduler, 'interval': interval}\n","repo_name":"CEA-LIST/SCE","sub_path":"sce/schedulers/scheduler_factory.py","file_name":"scheduler_factory.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"65"} +{"seq_id":"39446758920","text":"import os\nimport tarfile\nimport shutil\nfrom google.cloud import storage\nimport threading\n\n\ndef unpack(out_dir, tar_file, learning_curve=False):\n \"\"\"\n Function to instantiate data sets from tar files\n :param learning_curve: used to indicate if the tar file was made by learning_curve.py\n :param out_dir: output directory to extract tar file to\n :param tar_file: tar file name as it shows in the a cloud bucket\n :return: None, file should be extracted into out_dir\n \"\"\"\n if 'tar.gz' in tar_file and learning_curve is False:\n download_gs(tar_file=tar_file, tar_bucket=os.environ.get('TAR_BUCKET'), out_dir=os.path.join(out_dir, 'data'))\n print('Unpacking {} to {}'.format(tar_file, out_dir))\n tar = tarfile.open(os.path.join(out_dir, 'data', tar_file))\n tar.extractall(path=os.path.join(out_dir, 'data'))\n tar.close()\n elif 'tar.gz' in tar_file and learning_curve is True:\n download_gs(tar_file=tar_file, tar_bucket=os.environ.get('TAR_BUCKET'), out_dir=os.path.join(out_dir, 'data'))\n def threaded_extract(tar_file):\n tar = tarfile.open(os.path.join(out_dir, 'data', tar_file))\n tar.extractall(path=os.path.join(out_dir, 'data'))\n tar.close()\n thread1 = threading.Thread(target=threaded_extract(tar_file))\n thread1.start()\n thread1.join()\n if 'train' in tar_file and 'temp' in os.listdir(os.path.join(out_dir, 'data')):\n shutil.rmtree(os.path.join(out_dir, 'data', 'train'))\n os.rename(os.path.join(out_dir, 'data', 'temp'), os.path.join(out_dir, 'data', 'train'))\n elif os.path.isfile(tar_file) and 'tar.gz' in tar_file and 's3' not in tar_file:\n print('Unpacking {}'.format(tar_file))\n tar = tarfile.open(tar_file)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n tar.extractall(path=out_dir)\n tar.close()\n elif 'tar.gz' in tar_file and 's3' in tar_file:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n # download first then untar\n t = download_s3(tar_file, out_dir)\n print('Unpacking {}'.format(t))\n tar = tarfile.open(t)\n tar.extractall(path=out_dir)\n tar.close()\n\ndef make_blc_dir(blc_dir, blc_species):\n os.mkdir(blc_dir)\n for specie in blc_species:\n os.mkdir(os.path.join(blc_dir, specie))\n\ndef has_number(tar_file):\n return any(letter.isdigit() for letter in tar_file)\n\n\ndef download_gs(tar_file, tar_bucket, out_dir):\n \"\"\"\n Downloading from google storage buckets\n :param tar_file: tar file name in bucket. If location is gs://mbari-bucket/train.tar.gz, tar_file = 'train.tar.gz'\n :param tar_bucket: tar bucket to look in. If location is gs://mbari-bucket/train.tar.gz, tar_bucket = 'mbari-bucket'\n :param out_dir: output directory, usually something like a /data folder.\n :return: None, tar file should be downloaded from cloud to local.\n \"\"\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(tar_bucket)\n blob = bucket.blob(tar_file)\n out_file = os.path.join(out_dir, tar_file)\n blob.download_to_filename(out_file)\n\n\ndef list_bucket_contents(bucket):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket)\n classes = []\n blobs = bucket.list_blobs()\n uniques = set()\n for item in blobs:\n l = str(item).split(', ')\n current_species = l[1].split('/')[1]\n if current_species not in uniques:\n classes.append(current_species)\n uniques.add(current_species)\n return classes\n\n\ndef download_s3(source_file, target_dir):\n try:\n import boto3\n from botocore.client import Config\n import botocore\n import os\n env = os.environ.copy()\n from urllib.parse import urlparse\n urlp = urlparse(source_file)\n endpoint_url = 'http://' + urlp.netloc\n bucket_name = urlp.path.split('/')[1]\n KEY_IN = urlp.path.split(bucket_name + '/')[1]\n KEY_OUT = os.path.join(target_dir, os.path.basename(urlp.path))\n print('Downloading {} from {} to {}'.format(KEY_IN, endpoint_url, KEY_OUT))\n s3 = boto3.resource('s3',\n endpoint_url=endpoint_url,\n aws_access_key_id=env['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=env['AWS_SECRET_ACCESS_KEY'],\n config=Config(signature_version='s3v4'),\n region_name='us-east-1')\n\n try:\n s3.Bucket(bucket_name).download_file(KEY_IN, KEY_OUT)\n return KEY_OUT\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n print(e)\n except Exception as e:\n raise(e)\n\n","repo_name":"AtlasHale/ml_classify","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"19585297254","text":"from MenuItem import resources, menu\n\ndef report():\n resourcesDict = []\n for resource in resources:\n resourcesDict.append(resources[resource])\n\n return resourcesDict\n\ndef is_resource_sufficient(drink):\n for _ in drink['ingredients']:\n if (drink['ingredients']['coffee'] > resources['coffee'] or drink['ingredients']['milk'] > resources['milk'] or\n drink['ingredients']['water'] > resources['water']):\n return False\n return True\n\n\ndef messageResources(resource):\n message = \"\"\n\n if resource[0] < 50:\n message += \" water\"\n if resource[1] < 100:\n message += \" milk\"\n if resource[2] < 18:\n message += \" coffee\"\n\n return message\n\ndef make_coffee(order, coffeType):\n print(\"Prepare the coffee...\")\n # rest water\n resources['water'] -= order['ingredients']['water']\n print(\"####\")\n print(\"Turn water\")\n # rest milk\n if coffeType != 'espresso':\n resources['milk'] -= order['ingredients']['milk']\n print(\"########\")\n print(\"Turn milk\")\n # rest coffee\n resources['coffee'] -= order['ingredients']['coffee']\n print(\"############\")\n print(\"Turn coffee\")\n # add money\n resources['money'] += order['cost']\n print(\"################\")\n print(f\"Here is your {coffeType}. Enjoy!\")\n print(\"####################\")\n existCoffeInDict = False","repo_name":"BrandConstantin/python","sub_path":"projects/Intermediate/02_CoffeeMachine_OOP/CoffeeMaker.py","file_name":"CoffeeMaker.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"23760206700","text":"# Given a string and a pattern, find the smallest substring in the given string which has all the characters of the given pattern.\n# Grokking https://www.educative.io/courses/grokking-the-coding-interview/xoyL4q6ApNE\n# Leetcode https://leetcode.com/problems/minimum-window-substring/\n# Solution https://leetcode.com/problems/minimum-window-substring/solution/\n# Time Complexity O(N+M)\n# Space Complexity O(M), where M is the number of distinct characters in given pattern\n\n\ndef find_substring(str1, pattern):\n char_frequency_map = {}\n window_start, matched = 0, 0\n min_substr_len = float('inf')\n substr_start = 0\n for c in pattern:\n char_frequency_map[c] = char_frequency_map.get(c, 0)+1\n\n # expand the window\n for window_end in range(len(str1)):\n right_char = str1[window_end]\n\n # if character is in freq map, reduce its count\n # if freq for a char is 0, that means its permuation was matched in given window\n if right_char in char_frequency_map:\n char_frequency_map[right_char] -= 1\n if char_frequency_map[right_char] == 0:\n matched += 1\n\n # if matched and keys of char_frequency_map match, then permutation exists\n # shrink the window\n while matched == len(char_frequency_map):\n if min_substr_len > window_end-window_start+1:\n min_substr_len = window_end-window_start+1\n substr_start = window_start\n\n left_char = str1[window_start]\n window_start += 1\n if left_char in char_frequency_map:\n if char_frequency_map[left_char] == 0:\n matched -= 1\n char_frequency_map[left_char] += 1\n if min_substr_len > len(str1):\n return \"\"\n return str1[substr_start:substr_start+min_substr_len]\n\n\ndef main():\n print(find_substring(\"aabdec\", \"abc\"))\n print(find_substring(\"abdbca\", \"abc\"))\n print(find_substring(\"adcad\", \"abc\"))\n print(find_substring(\"ADOBECODEBANC\", \"ABC\"))\n print(find_substring(\"ab\", \"a\"))\n\nmain()\n","repo_name":"harsh2kumar/TechSeries","sub_path":"Data Structures and Algorithms/Pattern - Sliding Window/Smallest Window containing Substring.py","file_name":"Smallest Window containing Substring.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"72688274446","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Intro\n# \n# - Tutorial source: https://docs.tweepy.org/en/latest/getting_started.html\n# - Original plan: https://realpython.com/twitter-sentiment-python-docker-elasticsearch-kibana/#twitter-streaming-api\n# - Later found out that this was obsolote tutorial, from more than 5 years ago, with old docker version\n\n# # Hello Tweepy\n\nfrom config_twitter import *\nimport tweepy\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\npublic_tweets = api.home_timeline()\nfor tweet in public_tweets:\n print(tweet.text)\n\n\n# ## Models\n\n# Get the User object for twitter...\nuser = api.get_user('twitter')\n\nprint(user.screen_name)\nprint(user.followers_count)\nfor friend in user.friends():\n print(friend.screen_name)\n\n\n# # Streaming with Tweepy\n\n# ## Create StreamListener\n\nimport tweepy\n#override tweepy.StreamListener to add logic to on_status\nclass MyStreamListener(tweepy.StreamListener):\n\n def on_status(self, status):\n print(status.text)\n\n\n# ## Creating a Stream\n\nmyStreamListener = MyStreamListener()\nmyStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)\n\n\n# ## Starting a Stream\n\nmyStream.filter(track=['python'])\nmyStream.filter(follow=[\"enlik\"])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"enliktjioe/28daysofnlp","sub_path":"week2_sentiment_analysis/script/day11_tweepy_streaming.py","file_name":"day11_tweepy_streaming.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"65"} +{"seq_id":"9698888201","text":"class nodeLCS:\n def __init__(self,tam=0,dir='') -> None:\n self.tam = tam\n self.dir = dir\n\n\ndef LongaSubseqComum(X,m,Y,n,LCS):\n #ANTES DA CHAMADA:\n #m = len(X)+1\n #n = len(Y)+1\n #LCS = [None]*m\n #for i in range(0,m):\n # LCS[i] = [None]*n\n #for i in range(0,m):\n # for j in range(0,n):\n # LCS[i][j] = nodeLCS()\n \n #casos base\n for i in range(0,m):\n LCS[i][0].tam = 0\n LCS[i][0].dir = '*'\n for j in range(0,n):\n LCS[0][j].tam = 0\n LCS[0][j].dir = '*'\n\n #caso geral\n for i in range(1,m):\n for j in range(1,n):\n if(X[i-1] == Y[j-1]):\n LCS[i][j].tam = 1 + LCS[i-1][j-1].tam\n LCS[i][j].dir = 'D'\n else:\n if(LCS[i-1][j].tam >= LCS[i][j-1].tam):\n LCS[i][j].tam = LCS[i-1][j].tam\n LCS[i][j].dir = 'A'\n else:\n LCS[i][j].tam = LCS[i][j-1].tam\n LCS[i][j].dir = 'E'\n \n for i in range(0,m):\n for j in range(0,n):\n print(LCS[i][j].tam,end='-')\n print(LCS[i][j].dir,end=' ')\n print()\n\ndef printLCS(LCS,i,j,X):\n #printLCS(LCS,m-1,n-1,X)\n if not(i==0 or j==0):\n if(LCS[i][j].dir == 'D'):\n printLCS(LCS,i-1,j-1,X)\n print(X[i-1])\n elif(LCS[i][j].dir == 'A'):\n printLCS(LCS,i-1,j,X)\n else:\n printLCS(LCS,i,j-1,X)\n\nX = \"xcxagx\"\nY = \"gcagf\"\n#X = \"abcbdab\"\n#Y = \"bcdb\"\nm = len(X)+1\nn = len(Y)+1\nLCS = [None]*m\nfor i in range(0,m):\n LCS[i] = [None]*n\n\nfor i in range(0,m):\n for j in range(0,n):\n LCS[i][j] = nodeLCS()\n\nLongaSubseqComum(X,m,Y,n,LCS)\nprint(\"Size of LCS: \",LCS[m-1][n-1].tam)\nprintLCS(LCS,m-1,n-1,X)","repo_name":"Jp9910/Projeto-e-Analise-de-Algoritmos","sub_path":"ProgramacaoDinamica/LongestCommonSubsequence.py","file_name":"LongestCommonSubsequence.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"43052970094","text":"#selenium, pandas, lxml, time, html5lib\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver import Keys\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom openpyxl import Workbook\r\nfrom openpyxl import load_workbook\r\nfrom pathlib import Path\r\nfrom selenium.webdriver.chrome.service import Service\r\n\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\n#opens chrome\r\n#driver = webdriver.Chrome('./chromedriver')\r\n#driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\r\n\r\n#option to keep browser open\r\n#chrome_options = Options()\r\n#chrome_options.add_experimental_option(\"detach\", True)\r\n\r\nExcel_File_Path = 'VinList.xlsx'\r\npath = Path(Excel_File_Path)\r\n\r\nStart_Column = 1\r\nCheck_Start_Row = 2\r\n\r\n#vinList = ['1C4SJVFJ1NS103023','1C4SJVFJ1NS103023','1C4SJVDT6NS110263','1C4SJVDT3NS102718','1C4SJVDT9NS104439','1C4SJVDT7NS104178','1C4SJVFJ9NS101827','1C4SJVFJ7NS108484','1C4SJVDT6NS104026','1C4SJVDT6NS131985','1C4SJVDT9NS103761','1C4SJVGJ3NS101739','1C4SJVGJ3NS101739','1C4SJVDT3NS106672','1C4SJVDT3NS103982','1C4SJVDT6NS101434','1C4SJVDT6NS101434','1C4SJVDT6NS101434','1C4SJVDT6NS101434','1C4SJVFJ0NS107130','1C4SJVDT4NS101691','1C4SJVDT2NS105660','1C4SJVDT2NS105660','1C4SJVGJ5NS101533','1C4SJVGJ3NS115625','1C4SJVDT3NS104372','1C4SJVDT2NS104931','1C4SJVDT2NS104931','1C4SJVDT6NS104088','1C4SJVDT1NS104886','1C4SJVDT7NS105279','1C4SJVDT8NS103122','1C4SJVDT8NS103122','1C4SJVDT9NS103033','1C4SJVDT8NS107512','1C4SJVGJ5NS110197','1C4SJVDT7NS106612','1C4SJVGJ9NS101227','1C4SJVDT1NS103673']\r\n#claimList=['039840','039840','657362','758780','636675','183029','537066','748895','342780','182977','16774C','318069','318069','034132','001369','071988','071988','071988','071988','768720','265941','C20078','D00780','013806','098504','228829','441425','A40605','354140','660492','574547','142479','142479','244156','597720','A51246','433961','A88588','W81202']\r\nvinList=[]\r\nclaimList=[]\r\n\r\nif path.is_file():\r\n print(f'The file {Excel_File_Path} exists')\r\n workbook = load_workbook(filename=\"VinList.xlsx\")\r\nelse:\r\n print(f'The file {Excel_File_Path} does not exist')\r\n workbook = Workbook()\r\n print(f'Created file:{Excel_File_Path}')\r\nsheet = workbook.active\r\n\r\nusername = str(sheet.cell(row = 1, column = 1).value)\r\npassword = str(sheet.cell(row = 1, column = 2).value)\r\nloopIndex=0\r\n\r\nwhile sheet.cell(row = Check_Start_Row, column = 1).value != None:\r\n #print(sheet.cell(row = 2, column = Start_Column).value)\r\n #print(\"column: \", Start_Column)\r\n vinList.append(str(sheet.cell(row = Check_Start_Row, column = 1).value))\r\n claimList.append(str(sheet.cell(row=Check_Start_Row, column=2).value))\r\n Check_Start_Row += 1\r\n\r\ndef button_clicked(browser):\r\n global loopIndex\r\n if (browser.find_element(By.ID, 'next_claim_button').get_attribute('value')==\"true\"):\r\n loopIndex+=1\r\n if (loopIndex > (len(vinList)-1)):\r\n loopIndex=0\r\n browser.execute_script(\"alert('already at last claim, Going to first claim in 3 seconds');\")\r\n time.sleep(3)\r\n search_for_claim(browser)\r\n return True\r\n if (browser.find_element(By.ID, 'previous_claim_button').get_attribute('value')==\"true\"):\r\n loopIndex-=1\r\n if (loopIndex < 0):\r\n loopIndex=(len(vinList)-1)\r\n browser.execute_script(\"alert('Already at first claim, Going to last claim in 3 seconds');\")\r\n time.sleep(3)\r\n search_for_claim(browser)\r\n return True\r\n\r\nprint(\"claimTabView('{vinFirst}','{vinLast}','','{claimNum}','1','C','','');\".format( vinFirst=vinList[0][0:9:1] , vinLast=vinList[0][9:17:1] , claimNum=claimList[0] ))\r\ndef Open_GCS():\r\n try:\r\n # option to keep browser open\r\n chrome_options = Options()\r\n #chrome_options = webdriver.ChromeOptions()\r\n chrome_options.binary_location = \"drivers\\chrome-win\\chrome.exe\"\r\n #service_obj = Service(\"/drivers/chromedriver.exe\")\r\n driverPath = \"drivers\\chromedriver.exe\"\r\n\r\n #chrome_options.add_experimental_option(\"detach\", True)\r\n\r\n #chrome_options = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\r\n ##chrome_options.add_argument(\"user-data-dir=C:\\\\Users\\\\Desktop\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\")\r\n global browser # this will prevent the browser variable from being garbage collected\r\n\r\n #this adds user optiosn for chrome\r\n #browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)\r\n browser = webdriver.Chrome(executable_path=driverPath,options=chrome_options)\r\n # Connect\r\n browser.set_window_size(900, 900)\r\n\r\n browser.get(\"https://sts.fiatgroup.com/adfs/ls/IdpInitiatedSignOn.aspx?RelayState=RPID%3Dhttps%253A%252F%252Fwww.esupplierconnect.com%252Fsaml2%252Fsp%252Facs%26RelayState%3Dfiat&fhr=default\")\r\n #browser.get(\"https://webprod.extra.chrysler.com/VehiInquiryWeb/viTabsExecute?urlFlag=\")\r\n\r\n\r\n #login\r\n try:\r\n element = WebDriverWait(browser, 10).until(\r\n EC.presence_of_element_located((By.ID, \"userNameInput\"))\r\n )\r\n finally:\r\n browser.find_element(By.ID, 'userNameInput').send_keys(username, Keys.TAB, password, Keys.ENTER)\r\n #browser.find_element(By.Id(\"showMoreappBtnTxt\")).Click()\r\n time.sleep(15)\r\n start_search_for_claim(browser)\r\n\r\n #browser.get(\"https://webprod.extra.chrysler.com/VehiInquiryWeb/viTabsExecute?urlFlag=\")\r\n #browser.find_element(By.Id(\"showMoreappBtnTxt\")).Click()\r\n #https://webprod.extra.chrysler.com/VehiInquiryWeb/searchExecute\r\n #browser.find_element(By.NAME, 'username').send_keys('MYEMAIL', Keys.TAB, 'MYPW', Keys.ENTER)\r\n except Exception as e:\r\n print (e, 'GCS')\r\n\r\ndef start_search_for_claim(browser):\r\n browser.execute_script(\"showMoreApps();\")\r\n browser.find_element_by_xpath('//span[contains(text(),\"Supplier Warranty Management - WIS,EWT,GCS,QNA\")]').click()\r\n time.sleep(5)\r\n browser.get(\"https://webprod.extra.chrysler.com/VehiInquiryWeb/viTabsExecute?urlFlag=\")\r\n time.sleep(5)\r\n\r\n search_for_claim(browser)\r\n\r\n #browser.find_element_by_link_text('Supplier Warranty Management - WIS,EWT,GCS,QNA').click()\r\n #browser.find_element_by_link_text('System Links').click()\r\n #browser.find_element_by_link_text('GCS - Vehicle Claims Inquiry').click()\r\n #browser.find_element(By.ID, 'viVin2').send_keys(vinList[0], Keys.ENTER)\r\n ## JSCommand=\"claimTabView('{vinFirst}','{vinLast}','','{claimNum}','1','C','','');\".format( vinFirst=vinList[0][0:9:1] , vinLast=vinList[0][9:17:1] , claimNum=claimList[0])\r\n ## browser.execute_script(str(JSCommand))\r\n ## browser.execute_script(\"loadTabAction(6);\")\r\n ## time.sleep(1)\r\n## mycode = \"\"\"document.getElementById('tabMain').insertAdjacentHTML('afterend', '
');\"\"\"\r\n## browser.execute_script(str(mycode))\r\n\r\ndef search_for_claim(browser):\r\n while True:\r\n JSCommand = \"claimTabView('{vinFirst}','{vinLast}','','{claimNum}','1','C','','');\".format(\r\n vinFirst=vinList[loopIndex][0:9:1], vinLast=vinList[loopIndex][9:17:1], claimNum=claimList[loopIndex])\r\n browser.execute_script(str(JSCommand))\r\n browser.execute_script(\"loadTabAction(6);\")\r\n time.sleep(.5)\r\n\r\n mycode = \"\"\"document.getElementById('tabMain').insertAdjacentHTML('afterend', '

1/85

');\"\"\"\r\n browser.execute_script(str(mycode))\r\n browser.execute_script(\"document.querySelector('#gview_clmnarrGrid > div.ui-jqgrid-bdiv').style = 'height:500px;width:750px;'\")\r\n WebDriverWait(browser, timeout=100000).until(button_clicked)\r\n\r\n\r\n\r\n## double pound is working code\r\n## htmlSRC = browser.page_source\r\n #soup = BeautifulSoup(htmlSRC, 'html.parser')\r\n\r\n #div = soup.select_one(\"id#clmnarrGrid\")\r\n #print(div)\r\n #table = pd.read_html(div)\r\n\r\n #HTMLTableRead = pd.read_html(browser.page_source, attrs={'id': 'clmnarrGrid'})\r\n ## pd.set_option('display.max_colwidth', None)\r\n ## pd.set_option(\"max_columns\", None) # show all cols\r\n ## pd.set_option('max_colwidth', None) # show full width of showing cols\r\n\r\n ## HTMLTableRead = pd.read_html(htmlSRC, attrs={'id': 'clmnarrGrid'})\r\n ## print(HTMLTableRead)\r\n\r\n ## dataFromTables = HTMLTableRead[0]\r\n ## print(dataFromTables)\r\n\r\n\r\n\r\nOpen_GCS()\r\n\r\n\r\n\r\n#opens website\r\n#driver.get(\"https://webprod.extra.chrysler.com/VehiInquiryWeb/viTabsExecute?urlFlag=\")\r\n\r\n\r\n\r\n#print(driver.title)\r\n\r\n\r\n# Press the green button in the gutter to run the script.\r\n#if __name__ == '__main__':\r\n\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n\r\n\r\n#
\r\n # \r\n # \r\n#
\r\n#document.getElementById('tabMain').insertAdjacentHTML('afterend', '
');\r\n","repo_name":"ismaeel96/WS-claim-Assist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"41437011845","text":"#!/usr/bin/env python\nimport ugali.candidate.associate\nimport ugali.utils.parser\nimport numpy as np\nfrom ugali.utils.projector import gal2cel,ang2const,ang2iau\n\n#CATALOGS = ['McConnachie12','Rykoff14', 'Harris96', 'Corwen04', 'Nilson73', 'Webbink85', 'Kharchenko13', 'WEBDA14']\nCATALOGS = ['McConnachie12', 'Harris96', 'Corwen04', 'Nilson73', 'Webbink85', 'Kharchenko13', 'Bica08', 'WEBDA14', 'ExtraDwarfs','ExtraClusters']\n\nif __name__ == \"__main__\":\n import argparse\n description = \"python script\"\n parser = ugali.utils.parser.Parser(description=description)\n parser.add_coords(required=True,radius=True,targets=True)\n parser.add_argument('-n','--nnearest',default=1,type=int)\n opts = parser.parse_args()\n\n catalog = ugali.candidate.associate.SourceCatalog()\n for i in CATALOGS:\n catalog += ugali.candidate.associate.catalogFactory(i)\n\n for name,(glon,glat,radius) in zip(opts.names, opts.coords):\n ra,dec = gal2cel(glon,glat)\n iau = ang2iau(glon,glat)\n const = ang2const(glon,glat)[0]\n if radius <= 0: radius = None\n \n idx1,idx2,sep = catalog.match([glon],[glat],tol=radius,nnearest=opts.nnearest)\n match = catalog[idx2]\n\n if len(match) > 0:\n n = match[0]['name']\n s = sep[0]\n l,b = match[0]['glon'],match[0]['glat']\n r,d = match[0]['ra'],match[0]['dec']\n else:\n n = 'NONE'\n s = np.nan\n l,b = np.nan,np.nan\n r,d = np.nan,np.nan\n\n \n if opts.gal is not None:\n msg='%s [%s, %s] (GLON=%.2f,GLAT=%.2f) --> %s (GLON=%.2f,GLAT=%.2f): %.4f'%(name,iau,const,glon,glat,n,l,b,s)\n else:\n msg='%s [%s, %s] (RA=%.2f,DEC=%.2f) --> %s (RA=%.2f,DEC=%.2f): %.4f'%(name,iau,const,ra,dec,n,r,d,s)\n print(msg)\n\n\n #for i,c in enumerate(opts.coords):\n # glon,glat,radius\n # if i in idx1:\n # name = catalog[idx2[np.where(idx1==i)[0]]]['name']\n # s = sep[np.where(idx1==i)[0]]\n # else:\n # name = \"NONE\"\n # s = np.nan\n # \n # if opts.gal is not None:\n # msg='%s (GLON=%.2f,GLAT=%.2f): %.4f'%(name,c[1],c[2],s)\n # else:\n # ra,dec = gal2cel(c1,c2)\n # msg='%s (RA=%.2f,DEC=%.2f): %.4f'%(name,ra,dec,s)\n # print msg\n","repo_name":"DarkEnergySurvey/ugali","sub_path":"ugali/scratch/lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"65"} +{"seq_id":"675810445","text":"import gym\nimport numpy as np\nfrom ppo_torch_cont import Agent\n# from ppo_torch import Agent\nimport time\n\nif __name__ == '__main__':\n # initialize environment and initial conditions\n start_time = time.time()\n env_name = 'MountainCarContinuous-v0'\n env = gym.make(env_name)\n N = 20 # trajectories to store in memory\n batch_size = 5 # batchsize for update\n n_epochs = 4\n alpha = 0.001\n\n # initialize agent\n agent = Agent(env=env, batch_size=batch_size,\n alpha=alpha, n_epochs=n_epochs)\n n_games = 10000\n\n figure_file = f'plots/{env_name}_cont.png'\n\n best_score = env.reward_range[0]\n score_history = []\n\n learn_iters = 0\n avg_score = 0\n n_steps = 0\n\n for i in range(n_games):\n observation = env.reset()\n done = False\n score = 0\n while not done:\n env.render()\n action, prob, val = agent.choose_action(observation)\n observation_, reward, done, info = env.step([action]) # need to make into array to convert to tensor\n n_steps += 1\n\n score += reward\n agent.remember(observation, action, val, prob, reward, done) # store trajectory in PPO memory\n\n # update parameters when memory is full\n if n_steps % N == 0:\n agent.learn()\n learn_iters += 1\n observation = observation_\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n\n if avg_score > best_score:\n best_score = avg_score\n agent.save_models()\n\n print('episode', i, 'score %.1f' % score, 'avg score %.1f' % avg_score,\n 'time_steps', n_steps, 'learning_steps', learn_iters)\n\n\n agent.save_agent(actor_path=env_name + '_PPO_actor.h5',critic_path=env_name + '_PPO_critic.h5')\n np.save(env_name+'_PPO.npy',score_history)\n x = [i+1 for i in range(len(score_history))]\n env.close()\n \n end_time = time.time() - start_time\n time_text = f'the time it takes to run is {end_time}'\n print('time recorded')\n with open('times/'+env_name+'time.txt', 'w') as f:\n f.write(time_text)\n","repo_name":"shoang22/rl-methods-review","sub_path":"ppo/ppo_main_cont.py","file_name":"ppo_main_cont.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"672735702","text":"import os\nimport sys\nfrom datetime import datetime, timezone\nfrom sqlalchemy import Column, ForeignKey, Integer, String, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom eralchemy import render_er\n\nBase = declarative_base()\n\nclass User(Base):\n __tablename__ = 'user'\n # Here we define columns for the table person\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n name = Column(String(112), nullable=False)\n username = Column(String(10), nullable=False, unique=True)\n password = Column(String(100), nullable=False)\n email_address = Column(String(100), nullable=False, unique=True)\n posts = relationship(\"Post\", backref=\"author\")\n comments = relationship(\"Comment\", backref=\"author\")\n sent_messages = relationship(\"Messages\", backref=\"sender\")\n received_messages = relationship(\"Messages\", backref=\"recipient\")\n\nclass Post(Base):\n __tablename__ = 'post'\n # Here we define columns for the table address.\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n user_id = Column(String(250), ForeignKey('user.id'))\n photo_url = Column(String(250))\n caption = Column(String(250), nullable=False)\n date = Column(DateTime())\n comments = relationship(\"Comment\", backref=\"post\")\n\nclass Comment(Base):\n __tablename__ = 'comment'\n id = Column(Integer, primary_key=True)\n user_id = Column(String(250), ForeignKey('user.id'))\n post_id = Column(Integer, ForeignKey('user.id'))\n text = Column(String)\n date = Column(DateTime())\n\nclass Message(Base):\n __tablename__ = 'message'\n sender_id = Column(Integer, ForeignKey('user.id'))\n recipient_id = Column(Integer, ForeignKey('user.id'))\n id = Column(Integer, primary_key=True)\n text = Column(String(250))\n created_at = Column(DateTime())\n\n def to_dict(self):\n return {}\n\n## Draw from SQLAlchemy base\nrender_er(Base, 'diagram.png')","repo_name":"NizaV/Instagram-Data-Modeling","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"17044202134","text":"import support.bokeh_helper as bh\nimport qsim.sim_analyze as simeval\nfrom dashboard.settings import *\nfrom support.common import *\nfrom support.cache import Cache\nfrom qsim.sims.tfidf_cos_sim import TfidfCosSim\nfrom qsim.sims.exact_sim import ExactSim\n\n\nclass Model:\n sim = TfidfCosSim()\n base_df = None\n cache = Cache()\n\n @classmethod\n def _compute_sim_matrix(cls, df, cs_only):\n return cls.sim.get_similarity_matrix(df, cs_only)\n\n # --- caching -----------------------------------------------------------\n\n @classmethod\n def _using_cache(cls, method, payload, relevant):\n key = method.__name__ + str([(k, None if k not in payload else payload[k]) for k in relevant])\n\n res = cls.cache.retrieve(key)\n if res is not None:\n return res\n\n res = method(payload)\n\n cls.cache.store(key, res)\n\n return res\n\n @classmethod\n def get_res_sim_matrix(cls, payload):\n return cls._using_cache(cls._get_res_sim_matrix, payload, relevant=[KW])\n\n @classmethod\n def get_res_df(cls, payload):\n return cls._using_cache(cls._get_res_df, payload, relevant=[KW])\n\n @classmethod\n def get_bar_chart_df(cls, payload):\n return cls._using_cache(cls._get_bar_chart_df, payload, relevant=[KW, SELECTED_RES_INDEX, CS_ONLY])\n\n @classmethod\n def get_heatmap_df(cls, payload):\n return cls._get_heatmap_df(payload) # don't use cache here - we always want a new random chart\n\n @classmethod\n def get_comp_df(cls, payload):\n return cls._get_comp_df(payload)\n\n # --- functions -----------------------------------------------------------\n\n @classmethod\n def _get_res_sim_matrix(cls, payload):\n res_df = cls.get_res_df(payload)\n cs_only = payload[CS_ONLY] if CS_ONLY in payload else False\n return cls._compute_sim_matrix(res_df, cs_only)\n\n @classmethod\n def _get_res_df(cls, payload):\n df = cls.base_df[cls.base_df['all_text'].str.contains(payload[KW], na=False, case=False)]\n\n return df.iloc[:MAX_SEARCH_RES]\n\n @classmethod\n def _get_bar_chart_df(cls, payload):\n if SELECTED_RES_INDEX not in payload:\n return None\n\n df = cls.get_res_df(payload)\n selected_res_index = payload[SELECTED_RES_INDEX]\n cs_only = payload[CS_ONLY] if CS_ONLY in payload else False\n\n sim_matrix = cls.get_res_sim_matrix(payload)\n\n df['similarity'] = sim_matrix[selected_res_index]\n df['color'] = df['survey_id'].apply(lambda si: 'green' if si == df.iloc[selected_res_index]['survey_id'] else 'red')\n\n df = df.drop(df.index[selected_res_index])\n\n if cs_only:\n df = df[df['color'] == 'red']\n\n df = df.sort_values(by='similarity', ascending=False)\n\n df['index'] = range(len(df))\n\n df = df.iloc[:MAX_BARS]\n\n return df\n\n @classmethod\n def _get_heatmap_df(cls, payload):\n df = cls.get_res_df(payload)\n cs_only = payload[CS_ONLY] if CS_ONLY in payload else False\n\n if len(df) > MAX_HEATMAP_ITEMS:\n df = df.sample(MAX_HEATMAP_ITEMS)\n\n sim_matrix = cls._compute_sim_matrix(df, cs_only)\n\n hm_df = bh.get_heatmap_df(df, sim_matrix, 'similarity')\n\n return hm_df\n\n @classmethod\n def _get_comp_df(cls, payload):\n if COMPARED_BASE not in payload:\n return None\n\n if payload[COMPARED_BASE] == COMPARED_BASE_BAR:\n res_df = cls.get_res_df(payload)\n selected_res_index = payload[SELECTED_RES_INDEX]\n bar_chart_df = cls.get_bar_chart_df(payload)\n selected_bar_index = payload[SELECTED_BAR_INDEX]\n\n qx = res_df.iloc[selected_res_index]\n qy = bar_chart_df.iloc[selected_bar_index]\n\n return cls._create_comp_df(qx, qy)\n\n if payload[COMPARED_BASE] == COMPARED_BASE_HM:\n uuid_x = payload[SELECTED_HM_X]\n uuid_y = payload[SELECTED_HM_Y]\n\n qx = cls.base_df.loc[uuid_x]\n qy = cls.base_df.loc[uuid_y]\n\n return cls._create_comp_df(qx, qy)\n\n @classmethod\n def _create_comp_df(cls, qx, qy):\n col2doc_sim = [(c, cls.sim.get_text_sim) for c in ANALYSED_COLS + ['survey_name']]\n exact_sim = ExactSim()\n col2doc_sim.extend([(c, exact_sim.get_text_sim) for c in ['survey_id', 'form_type', 'tr_code']])\n\n df = simeval.create_comp_df(qx, qy, DISPLAYED_COLS, dict(col2doc_sim))\n\n return df\n\n @classmethod\n def init(cls):\n try:\n cls.base_df = load_clean_df()\n except:\n fpath = BUNDLED_DATA_DIR + '/clean-light.csv'\n cls.base_df = load_clean_df(fpath=fpath)\n","repo_name":"ONSBigData/qbank-tools","sub_path":"dashboard/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"65"} +{"seq_id":"28365002569","text":"# Aula 21: Funções - Parte 2\ndef somar(a=0,b=0,c=0):\n \"\"\"\n Faz a soma de três valores e mostra o resultado na tela. Os três valores são opcionais.\n :param a: primeiro valor\n :param b: segundo valor\n :param c: terceiro valor\n Função criada por Bruna Manzi\n \"\"\"\n s = a + b + c\n print(f'A soma vale {s}')\nsomar(3,2,5)\nsomar()","repo_name":"brunamanzi/Aulas.CursoemVideo","sub_path":"aula021.py","file_name":"aula021.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"12102561842","text":"import os\r\nimport json\r\nimport requests # pip install requests\r\n\r\n# The authentication key (API Key).\r\n# Get your own by registering at https://app.pdf.co\r\nAPI_KEY = \"****************************************************\"\r\n\r\n# Base URL for PDF.co Web API requests\r\nBASE_URL = \"https://api.pdf.co/v1\"\r\n\r\n### STEP 1: GENERATE BARCODE ###\r\n\r\n# Result file name\r\nResultFile = \".\\\\barcode.png\"\r\n# Barcode type. See valid barcode types in the documentation https://apidocs.pdf.co\r\nBarcodeType = \"Code128\"\r\n# Barcode value\r\nBarcodeValue = \"qweasd123456\"\r\n\r\n# Barcode Source File\r\nSourceFile = ResultFile\r\n\r\n\r\ndef main(args = None):\r\n # Generate Barcode\r\n generateBarcode(ResultFile) \r\n\r\n # Upload Barcode to Cloud \r\n uploadedBarcodeUrl = uploadFile(SourceFile)\r\n \r\n # Convert HTML to PDF\r\n GeneratePDFFromHtml(SampleHtml, DestinationFile)\r\n \r\n # Upload PDF to Cloud\r\n uploadedPdfUrl = uploadFile(SourcePdfFile)\r\n\r\n # PDF with Barcode Output\r\n addImageToExistingPdf(uploadedPdfUrl, uploadedBarcodeUrl, DestinationFile)\r\n\r\n\r\n\r\ndef generateBarcode(destinationFile):\r\n \"\"\"Generates Barcode using PDF.co Web API\"\"\"\r\n\r\n # Prepare requests params as JSON\r\n # See documentation: https://apidocs.pdf.co\r\n parameters = {}\r\n parameters[\"name\"] = os.path.basename(destinationFile)\r\n parameters[\"type\"] = BarcodeType\r\n parameters[\"value\"] = BarcodeValue\r\n\r\n # Prepare URL for 'Barcode Generate' API request\r\n url = \"{}/barcode/generate\".format(BASE_URL)\r\n\r\n # Execute request and get response as JSON\r\n response = requests.post(url, data=parameters, headers={ \"x-api-key\": API_KEY })\r\n if (response.status_code == 200):\r\n json = response.json()\r\n\r\n if json[\"error\"] == False:\r\n # Get URL of result file\r\n resultFileUrl = json[\"url\"] \r\n # Download result file\r\n r = requests.get(resultFileUrl, stream=True)\r\n if (r.status_code == 200):\r\n with open(destinationFile, 'wb') as file:\r\n for chunk in r:\r\n file.write(chunk)\r\n #print(f\"Result file saved as \\\"{destinationFile}\\\" file.\")\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n else:\r\n # Show service reported error\r\n print(json[\"message\"])\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n\r\n\r\n### STEP 2: UPLOAD FILE AS TEMPORARY FILE ###\r\n\r\n\r\n\r\ndef uploadFile(fileName):\r\n \"\"\"Uploads file to the cloud\"\"\"\r\n \r\n # 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.\r\n\r\n # Prepare URL for 'Get Presigned URL' API request\r\n url = \"{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}\".format(\r\n BASE_URL, os.path.basename(fileName))\r\n \r\n # Execute request and get response as JSON\r\n response = requests.get(url, headers={ \"x-api-key\": API_KEY })\r\n if (response.status_code == 200):\r\n json = response.json()\r\n \r\n if json[\"error\"] == False:\r\n # URL to use for file upload\r\n uploadUrl = json[\"presignedUrl\"]\r\n # URL for future reference\r\n uploadedFileUrl = json[\"url\"]\r\n\r\n # 2. UPLOAD FILE TO CLOUD.\r\n with open(fileName, 'rb') as file:\r\n requests.put(uploadUrl, data=file, headers={ \"x-api-key\": API_KEY, \"content-type\": \"application/octet-stream\" })\r\n\r\n return uploadedFileUrl\r\n else:\r\n # Show service reported error\r\n print(json[\"message\"]) \r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n\r\n return None\r\n\r\n\r\n### STEP 3: CONVERT HTML TO PDF ###\r\n\r\n\r\n# HTML template\r\nfile_read = open(\".\\\\sample.html\", mode='r', encoding= 'utf-8')\r\nSampleHtml = file_read.read()\r\nfile_read.close()\r\n\r\n# Destination PDF file name\r\nDestinationFile = \".\\\\result.pdf\"\r\n\r\n\r\ndef GeneratePDFFromHtml(SampleHtml, destinationFile):\r\n \"\"\"Converts HTML to PDF using PDF.co Web API\"\"\"\r\n\r\n # Prepare requests params as JSON\r\n # See documentation: https://apidocs.pdf.co/?#1-json-pdfconvertfromhtml\r\n parameters = {}\r\n\r\n # Input HTML code to be converted. Required.\r\n parameters[\"html\"] = SampleHtml\r\n\r\n # Name of resulting file\r\n parameters[\"name\"] = os.path.basename(destinationFile)\r\n\r\n # Set to css style margins like 10 px or 5px 5px 5px 5px.\r\n parameters[\"margins\"] = \"5px 5px 5px 5px\"\r\n\r\n # Can be Letter, A4, A5, A6 or custom size like 200x200\r\n parameters[\"paperSize\"] = \"Letter\"\r\n\r\n # Set to Portrait or Landscape. Portrait by default.\r\n parameters[\"orientation\"] = \"Portrait\"\r\n\r\n # true by default. Set to false to disable printing of background.\r\n parameters[\"printBackground\"] = \"true\"\r\n\r\n # If large input document, process in async mode by passing true\r\n parameters[\"async\"] = \"false\"\r\n\r\n # Set to HTML for header to be applied on every page at the header.\r\n parameters[\"header\"] = \"\"\r\n\r\n # Set to HTML for footer to be applied on every page at the bottom.\r\n parameters[\"footer\"] = \"\"\r\n\r\n # Prepare URL for 'HTML To PDF' API request\r\n url = \"{}/pdf/convert/from/html\".format(\r\n BASE_URL\r\n )\r\n\r\n # Execute request and get response as JSON\r\n\r\n response = requests.post(url, data=parameters, headers={ \"x-api-key\": API_KEY })\r\n if (response.status_code == 200):\r\n json = response.json()\r\n\r\n if json[\"error\"] == False:\r\n # Get URL of result file\r\n resultFileUrl = json[\"url\"] \r\n # Download result file\r\n r = requests.get(resultFileUrl, stream=True)\r\n if (r.status_code == 200):\r\n with open(destinationFile, 'wb') as file:\r\n for chunk in r:\r\n file.write(chunk)\r\n #print(f\"Result file saved as \\\"{destinationFile}\\\" file.\")\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n else:\r\n # Show service reported error\r\n print(json[\"message\"])\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n\r\n\r\n### STEP 4: UPLOAD PDF AS TEMPORARY FILE ###\r\n\r\n\r\n# Source PDF file\r\nSourcePdfFile = DestinationFile\r\n\r\n\r\n### STEP 5: ADD BARCODE TO PDF ###\r\n\r\n\r\n# Destination PDF file name\r\nDestinationFile = \".//result.pdf\"\r\n\r\n# Image params\r\nType = \"image\"\r\nX = 400\r\nY = 20\r\nWidth = 150\r\nHeight = 50\r\n\r\ndef addImageToExistingPdf(PdfUrl, barcodeUrl, destinationFile):\r\n import json\r\n \"\"\"Add image using PDF.co Web API\"\"\"\r\n\r\n # Prepare requests params as JSON\r\n # See documentation: https://apidocs.pdf.co\r\n payload = json.dumps({\r\n \"name\": os.path.basename(destinationFile),\r\n \"url\": PdfUrl,\r\n \"images\": [{\r\n \"url\": barcodeUrl,\r\n \"x\": X,\r\n \"y\": Y,\r\n \"width\": Width,\r\n \"height\": Height\r\n }]\r\n })\r\n\r\n # Prepare URL for 'PDF Edit' API request\r\n url = \"{}/pdf/edit/add\".format(BASE_URL)\r\n\r\n # Execute request and get response as JSON\r\n response = requests.post(url, data=payload, headers={ \"x-api-key\": API_KEY })\r\n if (response.status_code == 200):\r\n json = response.json()\r\n\r\n if json[\"error\"] == False:\r\n # Get URL of result file\r\n resultFileUrl = json[\"url\"] \r\n # Download result file\r\n r = requests.get(resultFileUrl, stream=True)\r\n if (r.status_code == 200):\r\n with open(destinationFile, 'wb') as file:\r\n for chunk in r:\r\n file.write(chunk)\r\n print(f\"Result file saved as \\\"{destinationFile}\\\" file.\")\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n else:\r\n # Show service reported error\r\n print(json[\"message\"])\r\n else:\r\n print(f\"Request error: {response.status_code} {response.reason}\")\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"bytescout/ByteScout-SDK-SourceCode","sub_path":"PDF.co Web API/PDF from HTML/Python/Generate PDF From HTML And Add Barcode/GeneratePdfFromHtmAndAddBarcode.py","file_name":"GeneratePdfFromHtmAndAddBarcode.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"65"} +{"seq_id":"71930083087","text":"import sys\nimport heapq\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\n\nadj = [[] for _ in range(N+1)]\n\nfor _ in range(M):\n A, B, C = map(int, input().split())\n adj[A].append((B, C))\n adj[B].append((A, C))\n\nq = []\nS = set([1])\ncosts = []\n\n\nfor next, cost in adj[1]:\n heapq.heappush(q, (cost, next))\n\nwhile len(S) != N:\n curr_cost, curr = heapq.heappop(q)\n if curr not in S:\n S.add(curr)\n heapq.heappush(costs, -curr_cost)\n\n for next, cost in adj[curr]:\n if next not in S:\n heapq.heappush(q, (cost, next))\nheapq.heappop(costs)\nprint(-sum(costs))","repo_name":"harveydev24/baekjoon","sub_path":"백준/Gold/1647. 도시 분할 계획/도시 분할 계획.py","file_name":"도시 분할 계획.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"69831875409","text":"import pandas as pd\nimport numpy as np\nimport spacy\nimport os\nimport requests\nimport logging\nimport bz2\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom joblib import Parallel, delayed\n\n\ndef get_tfidf_file():\n url = 'https://www.ideals.illinois.edu/items/91826/bitstreams/285420/object?dl=1'\n fn = '../../data/experiment_data/tfidf_weights.csv'\n if not os.path.exists(fn):\n response = requests.get(url)\n response.raise_for_status()\n os.makedirs(os.path.dirname(fn), exist_ok=True)\n bz2_path = fn + '.bz2'\n with open(bz2_path, 'wb') as f:\n f.write(response.content)\n with bz2.BZ2File(bz2_path, 'rb') as f_in, open(fn, 'wb') as f_out:\n f_out.write(f_in.read())\n os.remove(bz2_path)\n\ndef process_text(tfidf_filtered, tokens, stopwords):\n n_tokens = len(tokens)\n idf_scores_ibf = tfidf_filtered[tfidf_filtered['token'].isin(tokens)]['IBF'].values\n sum_ibf = np.sum(idf_scores_ibf)\n return sum_ibf, n_tokens\n\n\nif __name__ == \"__main__\":\n LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'\n logging.basicConfig(filename=f'{os.path.basename(__file__)}.log', level=logging.INFO, format=LOG_FORMAT,\n datefmt='%Y-%m-%d %H:%M:%S', filemode='w')\n get_tfidf_file()\n tfidf = pd.read_csv(\"../../data/experiment_data/tfidf_weights.csv\")\n df = pd.read_csv(\"../../data/experiment_data/data_clean.csv\")\n logging.info(\"length of df before dropping na: \" + str(len(df)))\n df = df.dropna(subset=['response_text'])\n logging.info(\"length of df after dropping na: \" + str(len(df)))\n\n nlp = spacy.load(\"en_core_web_sm\", disable=[\"tagger\", \"parser\", \"ner\"])\n\n # get all unique tokens in one go\n all_docs = list(nlp.pipe(df['response_text'], batch_size=500))\n all_tokens = {token.text for doc in all_docs for token in doc if token.is_alpha}\n logging.info(\"All tokens extracted\")\n\n # filter tfidf once for all tokens\n tfidf_filtered = tfidf[tfidf['token'].isin(all_tokens)]\n logging.info(\"TFIDF filtered\")\n\n results = Parallel(n_jobs=-1)(\n delayed(process_text)(tfidf_filtered, [token.text for token in doc if token.is_alpha], STOP_WORDS) for doc in\n all_docs)\n\n\n df['elab_ibf'], df['elab_n_tokens'] = zip(*results)\n logging.info(\"length of df\" + str(len(df)))\n logging.info(\"Elaboration features added\")\n df.to_csv(\"../../data/experiment_data/data_clean_with_elab.csv\", index=False)\n logging.info(\"Data saved\")\n","repo_name":"josh-ashkinaze/net_create","sub_path":"scripts/analyze_experiment/add_elaboration_feats.py","file_name":"add_elaboration_feats.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"8859632414","text":"import argparse\nimport os\nfrom skimage.io import imread, imsave\n\nimport CCID\nfrom CCID.denoiser.denoiser import *\nfrom CCID.fusion.fusion import fuse_image\nfrom CCID.confidence.confidence import predict_confidence\n\n\"\"\" This .py file will control the full logic of the program \"\"\"\n\n\ndef parse_args():\n module_root = os.path.dirname(CCID.__file__)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-directory\", default=os.path.join(module_root, \"library/dataset/clean_data/Test/Set12\"),\n type=str, help=\"Directory containing input images.\")\n parser.add_argument(\"--output-directory\", default=os.path.join(module_root, \"report_data\"), type=str,\n help=\"Directory to which the output will be written to.\")\n parser.add_argument(\"--dncnn-model\",\n default=os.path.join(module_root,\n \"library/DnCNN/TrainingCodes/dncnn_pytorch/models/DnCNN_sigma25/model.pth\"),\n type=str, help=\"Path to the DnCNN model to use.\")\n parser.add_argument(\"--sigma\", default=55, type=int, help=\"Noise level to be artificially added.\")\n parser.add_argument(\"--weight\", default=0.5, type=float, help=\"Fusion weight.\")\n return parser.parse_args()\n\n\ndef main():\n \"\"\"The main test code.\"\"\"\n\n args = parse_args()\n\n sigma_normalized = args.sigma / 255.0\n\n \"\"\" For reproducibility \"\"\"\n np.random.seed(seed=0)\n\n if not os.path.isdir(args.input_directory):\n raise Exception(\"Not a directory: %s\" % args.input_directory)\n\n for image_name in os.listdir(args.input_directory):\n assert any(image_name.endswith(ext) for ext in [\"png\", \"jpg\", \"jpeg\", \"bmp\"])\n\n model = torch.load(args.dncnn_model,\n map_location=torch.device(\"cpu\"))\n model.eval()\n original_image = np.array(imread(os.path.join(args.input_directory, image_name)), dtype=np.float32) / 255.0\n noisy_image = original_image + np.random.normal(0, sigma_normalized, original_image.shape)\n noisy_image = noisy_image.astype(np.float32)\n noisy_image_torch = torch.from_numpy(noisy_image).view(1, -1, noisy_image.shape[0],\n noisy_image.shape[1])\n\n \"\"\" Take noisy image, return the denoised one.\n The denoise is done using DnCNN model, trained on sigma=25\n \"\"\"\n denoised_image = model(noisy_image_torch)\n denoised_image = denoised_image.view(noisy_image.shape[0], noisy_image.shape[1])\n denoised_image = denoised_image.cpu()\n denoised_image = denoised_image.detach().numpy().astype(np.float32)\n\n \"\"\" Take noisy image and denoised image, return the predicted confidence map.\n The confidence is per region based, each 8x8 region have a confidence value.\n \"\"\"\n confidence_map = predict_confidence(noisy_image, denoised_image)\n residual_image = noisy_image - denoised_image\n\n \"\"\" Take noisy image, denoised image and confidence map, together with the user input\n fusion weight, return the fused image \n \"\"\"\n reliable_image, fused_image = fuse_image(noisy_image, denoised_image, args.weight,\n confidence_map=confidence_map)\n # print(\"Here\")\n # imsave(os.path.join(args.output_directory, \"different_sigma_values/sigma=55/denoised_image/{}\".format(image_name)),\n # (np.clip(denoised_image, 0, 1) * 255).astype(np.uint8))\n # imsave(os.path.join(args.output_directory, \"different_sigma_values/sigma=55/reliable_image/{}\".format(image_name)),\n # (np.clip(reliable_image, 0, 1) * 255).astype(np.uint8))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IVRL/CCID","sub_path":"CCID/pipeline_no_gui.py","file_name":"pipeline_no_gui.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"65"} +{"seq_id":"19847725588","text":"\"\"\"empty message\n\nRevision ID: 3f29be0e0f54\nRevises: c7f07906f020\nCreate Date: 2020-06-18 17:40:15.011481\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3f29be0e0f54'\ndown_revision = 'c7f07906f020'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('migrate_version')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('migrate_version',\n sa.Column('repository_id', sa.VARCHAR(length=250), nullable=False),\n sa.Column('repository_path', sa.TEXT(), nullable=True),\n sa.Column('version', sa.INTEGER(), nullable=True),\n sa.PrimaryKeyConstraint('repository_id')\n )\n # ### end Alembic commands ###\n","repo_name":"kgorshkoff/architecture","sub_path":"migrations/versions/3f29be0e0f54_.py","file_name":"3f29be0e0f54_.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"29378887708","text":"import os\nimport sys\nimport pandas as pd\n\n\ndef parse_pdb(pdb_in):\n import Bio.PDB\n if os.path.dirname(pdb_in):\n dir_pdb = os.path.dirname(pdb_in)\n pdb_file = os.path.basename(pdb_in)\n else:\n dir_pdb = \"\"\n pdb_file = pdb_in\n\n pdb_id, ext = os.path.splitext(pdb_file)\n chain_lengths = []\n parser = Bio.PDB.PDBParser()\n struct = parser.get_structure(pdb_id, pdb_in)\n model = struct[0]\n chains = [ch_id for ch_id in model.get_list()]\n return chains\n\n\ndef calc_min_dist(res_a, res_b):\n import numpy as np\n na_atoms = len(res_a.get_list())\n nb_atoms = len(res_b.get_list())\n atomic_matrix = np.zeros((na_atoms, nb_atoms))\n for i, atom_i in enumerate(res_a):\n for j, atom_j in enumerate(res_b):\n if atom_i.mass > 2 and atom_j.mass > 2:\n atomic_matrix[i][j] = np.linalg.norm(atom_i.get_coord() - atom_j.get_coord())\n mindist = np.amin(atomic_matrix[np.nonzero(atomic_matrix)])\n atom_i_index, atom_j_index = np.where(atomic_matrix == mindist) # gives index of mindist atoms i,j\n min_atom_i = res_a.get_list()[atom_i_index[0]]\n min_atom_j = res_b.get_list()[atom_j_index[0]]\n return mindist, (min_atom_i.id, min_atom_j.id)\n\n\ndef calc_ca_distance(res_a, res_b):\n \"\"\"\n Calculates the distance between a pair of CA atoms\n :param res_a: Biopython residue object - residue a\n :param res_b: Biopython residue object - residue b\n :return: Distance between CA atoms\n \"\"\"\n import numpy as np\n a = res_a[\"CA\"].get_coord()\n b = res_b[\"CA\"].get_coord()\n dist = np.linalg.norm(a - b)\n return dist\n\n\ndef three2one(sequence):\n \"\"\" Lookup table - translate a protein sequence from 3 to 1 letter code\n \"\"\"\n\n code = {\"GLY\": \"G\", \"ALA\": \"A\", \"LEU\": \"L\", \"ILE\": \"I\",\n \"ARG\": \"R\", \"LYS\": \"K\", \"MET\": \"M\", \"CYS\": \"C\",\n \"TYR\": \"Y\", \"THR\": \"T\", \"PRO\": \"P\", \"SER\": \"S\",\n \"TRP\": \"W\", \"ASP\": \"D\", \"GLU\": \"E\", \"ASN\": \"N\",\n \"GLN\": \"Q\", \"PHE\": \"F\", \"HIS\": \"H\", \"VAL\": \"V\",\n \"M3L\": \"K\", \"MSE\": \"M\", \"CAS\": \"C\"}\n\n newprot = \"\"\n for a in sequence:\n newprot += code.get(a, \"?\")\n\n return newprot\n\n\ndef get_residues(chains, seq=False):\n \"\"\"\n Build a simple list of residues from a single chain of a PDB file.\n :param chains: Bio PDB Chain Object\n :param seq: Boolean (Default: False) - Outputs sequence if True.\n :return: A list of Bio.PDB.Residue objects.\n \"\"\"\n import Bio.PDB\n # from parse_pdb import parse_pdb\n # chains = parse_pdb(pdb)\n chain_lengths = []\n residues = []\n sequence = []\n for ch in chains:\n # make sure res are standard AA\n num_residues = 0\n for res in filter(lambda r: Bio.PDB.is_aa(r), ch.get_residues()):\n # if Bio.PDB.is_aa(res, standard=True):\n is_regular_res = res.has_id('CA') and res.has_id('O')\n res_id = res.get_id()[0]\n num_residues += 1\n if (\n res_id == ' ' or res_id == 'H_MSE' or res_id == 'H_M3L' or res_id == 'H_CAS' or res_id == 'HMS') and is_regular_res:\n residues.append(res)\n sequence.append(res.get_resname())\n else:\n sys.stderr.write(\"WARNING: non-standard AA at %r%s\" %\n (res.get_id(), os.linesep))\n chain_lengths.append(num_residues)\n\n if seq:\n sequence = three2one(sequence)\n seq_a = sequence[:chain_lengths[0]]\n seq_b = sequence[chain_lengths[0]:]\n return seq_a, seq_b\n else:\n return residues, chain_lengths\n\n\ndef get_residues_web(pdb, seq=False, chain_ids=None):\n \"\"\"\n Build a simple list of residues from a single chain of a PDB file.\n :param pdb: String - PDB filename\n :param seq: Boolean (Default: False) - Outputs sequence if True.\n :param chain_ids:\n :return: A list of Bio.PDB.Residue objects.\n \"\"\"\n import Bio.PDB\n\n if chain_ids is None:\n chain_ids = [\"A\", \"B\"]\n\n # chain_ids = [msa_name.split(\"_\")[1], msa_name.split(\"_\")[3]]\n chain_lengths = []\n parser = Bio.PDB.PDBParser()\n\n struct = parser.get_structure(pdb, pdb)\n model = struct[0]\n # if len(self.chain_ids) == 0:\n # get residues from every chain.\n # chains = model.get_list()\n # else:\n chains = [model[ch_id] for ch_id in chain_ids]\n\n residues = []\n sequence = []\n for ch in chains:\n # make sure res are standard AA\n num_residues = 0\n for res in filter(lambda r: Bio.PDB.is_aa(r), ch.get_residues()):\n # if Bio.PDB.is_aa(res, standard=True):\n is_regular_res = res.has_id('CA') and res.has_id('O')\n res_id = res.get_id()[0]\n if (res_id == ' ' or res_id == 'H_MSE' or res_id == 'H_M3L' or res_id == 'H_CAS') and is_regular_res:\n residues.append(res)\n sequence.append(res.get_resname())\n num_residues += 1\n else:\n sys.stderr.write(\"WARNING: non-standard AA at %r%s\" %\n (res.get_id(), os.linesep))\n chain_lengths.append(num_residues)\n\n if seq:\n sequence = three2one(sequence)\n seq_a = sequence[:chain_lengths[0]]\n seq_b = sequence[chain_lengths[0]:]\n return seq_a, seq_b\n else:\n return residues, chain_lengths\n\n\ndef distance_matrix(pdb_in, heavy_atom=False):\n \"\"\"\n Calculates distance matrix and outputs it to a csv file.\n :param pdb_in:String; Full PDB file path and name.\n :param heavy_atom: Boolean; Calculate heavy atom distance or Calpha.\n :return: Dataframe and List of chain lengths\n \"\"\"\n import time\n from itertools import combinations_with_replacement\n\n # Checks if pdb_in has a directory included, if not set output dir to working dir\n if os.path.dirname(pdb_in):\n pdb = os.path.basename(pdb_in)\n pdb_dir = os.path.dirname(pdb_in)\n else:\n pdb = pdb_in\n pdb_dir = \"\"\n # array initialization\n resi_list = []\n resj_list = []\n actual_i_list = []\n actual_j_list = []\n distance_list = []\n chain_1_list = []\n chain_2_list = []\n atom_id_list = []\n residue_list = []\n\n # Function information output\n print(\"\\t(distance matrix) begin loop\")\n chain_model = parse_pdb(pdb_in)\n\n residues, chain_lengths = get_residues(chain_model) # Output list of residues from pdb\n # count every pair of residues NOTE: INDEX BEGINS AT 0 BUT 1 IS ADDED BELOW\n pair_list = combinations_with_replacement(range(len(residues)), 2)\n\n start_time = time.time()\n for i, j in pair_list:\n if i != j: # ensure residue i not equal to j\n res_a = residues[int(i)]\n res_b = residues[int(j)]\n actual_i_list.append(res_a.id[1])\n actual_j_list.append(res_b.id[1])\n # get chain id\n chain_1_list.append(res_a.get_parent().id)\n chain_2_list.append(res_b.get_parent().id)\n # resets res index to 1 SEE NOTE ABOVE.\n resi_list.append(i + 1)\n resj_list.append(j + 1)\n residue_list.append((res_a.resname, res_b.resname))\n if heavy_atom:\n mindist, atom_ids = calc_min_dist(res_a, res_b)\n distance_list.append(mindist)\n atom_id_list.append(atom_ids)\n else:\n if res_a.has_id(\"CA\") and res_b.has_id(\"CA\"):\n distance_list.append(calc_ca_distance(res_a, res_b))\n else:\n print(f\"NOTE:Res {res_a.get_full_id()}\\n\\tor {res_b.get_full_id()} not calculated! (missing CA)\\n\")\n # fileout.close()\n print(\"\\t -- LOOP TIME -- {}\".format(time.time() - start_time))\n # makes a pandas dataframe\n dir_matrix = os.path.join(pdb_dir, \"matrix\")\n if not os.path.exists(dir_matrix):\n os.makedirs(dir_matrix)\n if heavy_atom:\n df_pdb = pd.DataFrame({'i': resi_list, 'j': resj_list, 'd': distance_list,\n 'si': actual_i_list, 'sj': actual_j_list, 'chain_1': chain_1_list,\n 'chain_2': chain_2_list, 'resnames': residue_list, 'atom_id': atom_id_list})\n filename = os.path.join(dir_matrix, f\"atom_distance_matrix_{pdb.split('.pdb')[0]}.txt\")\n header = \"i\\tj\\tdist_aa\\tsi\\tsj\\tchain_1\\tchain_2\\tresnames\\tatom_id\"\n df_pdb.to_csv(filename, sep='\\t', index=False, header=header, float_format='%.5f')\n else:\n df_pdb = pd.DataFrame({'i': resi_list, 'j': resj_list, 'd': distance_list,\n 'si': actual_i_list, 'sj': actual_j_list,\n 'chain_1': chain_1_list, 'chain_2': chain_2_list, 'resnames': residue_list})\n filename = os.path.join(dir_matrix, f\"ca_distance_matrix_{pdb.split('.pdb')[0]}.txt\")\n header = \"i\\tj\\tdist_ca\\tsi\\tsj\\tchain_1\\tchain_2\\tresnames\"\n df_pdb.to_csv(filename, sep='\\t', index=False, header=header, float_format='%.5f')\n\n print(f\"wrote {filename}\")\n return df_pdb, chain_lengths\n\n\ndef pipeline_pdb(pdb_id, dir_pdb):\n # PDB distance matrix\n # PDB_id = \"5pti\"\n # PDB_id = \"1or7\"\n matrix_file = os.path.join(dir_pdb, \"matrix\", f\"atom_distance_matrix_{pdb_id}.txt\")\n if os.path.exists(matrix_file):\n print(\"Path exists! Reading from file instead...\")\n pdb_dataframe = pd.read_csv(matrix_file, header=0, delimiter=\"\\t\")\n else:\n # uncomment if not reading\n pdb_path = os.path.join(dir_pdb, f\"{pdb_id}.pdb\")\n pdb_dataframe, chain_len = distance_matrix(pdb_path, heavy_atom=True)\n\n # min_i = pdb_dataframe[pdb_dataframe.si > 0][\"i\"].min()\n # pdb_dataframe[\"i\"] = pdb_dataframe[\"i\"] - (min_i - 1)\n # pdb_dataframe[\"j\"] = pdb_dataframe[\"j\"] - (min_i - 1)\n # out_df = pdb_dataframe[pdb_dataframe.si > 0]\n # return out_df.reset_index(drop=True)\n return pdb_dataframe\n","repo_name":"nissmogt/sequence_degradation","sub_path":"data/tools/pdb.py","file_name":"pdb.py","file_ext":"py","file_size_in_byte":9910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"12807435923","text":"from rest_framework import serializers\nfrom .models import Category, Brand, Product, ProductImage\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n created = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n updated = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n count = serializers.IntegerField()\n\n class Meta:\n model = Category\n fields = (\n \"title\",\n \"image\",\n \"updated\",\n \"created\",\n \"is_published\",\n \"count\"\n )\n\n\nclass BrandSerializer(serializers.ModelSerializer):\n created = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n updated = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n\n class Meta:\n model = Brand\n fields = (\"title\", \"image\", \"updated\", \"created\", \"is_published\")\n\n\nclass ProductImageSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductImage\n fields = (\"image\",)\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"regular_price\",\n \"sale_price\",\n \"is_published\",\n \"category_id\",\n \"slug\",\n )\n\n\nclass SingleProductSerializer(serializers.ModelSerializer):\n created = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n updated = serializers.DateTimeField(format=\"%d-%m-%Y %H:%M\", read_only=True)\n images = ProductImageSerializer(many=True)\n category = serializers.CharField(source='category.title')\n brand = serializers.CharField(source='brand.title')\n\n class Meta:\n model = Product\n fields = (\n \"id\",\n \"title\",\n \"title_ru\",\n \"description\",\n \"description_ru\",\n \"cover\",\n \"brand\",\n \"images\",\n \"regular_price\",\n \"sale_price\",\n \"stock\",\n \"category\",\n \"created\",\n \"updated\",\n \"is_published\",\n \"slug\",\n )\n","repo_name":"KalilovM/liveme","sub_path":"liveme_django/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"36769832256","text":"from selenium import webdriver\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\nimport shutil\n\nmin_diff = input(\"Enter minimum difficulty level\\n\")\nmax_diff = input(\"Enter maximum difficulty level\\n\")\ndriver = webdriver.Firefox(executable_path=\"geckodriver.exe\")\nscrape_link = \"https://codeforces.com/problemset?tags=\"+min_diff+\"-\"+max_diff\ndriver.get(scrape_link)\ndriver.implicitly_wait(10)\nproblem_links_table = driver.find_elements_by_xpath(\"//*[@class='problems']/tbody/tr/td[1]\")\nproblem_links = []\nfor problem_link in problem_links_table:\n problem_links.append(problem_link.find_element_by_tag_name(\"a\").get_attribute(\"href\"))\ncurrent_directory = os.getcwd()\nfor problem_link in problem_links:\n driver.get(problem_link)\n problem_index = problem_link.split(\"/\")[-2] + problem_link.split(\"/\")[-1]\n try:\n problem_frame = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME,\"problem-statement\"))\n )\n except:\n driver.close()\n \n problem_path = os.path.join(os.getcwd(),problem_index)\n if(os.path.exists(problem_index)):\n shutil.rmtree(problem_index)\n os.mkdir(problem_path)\n problem_frame.screenshot(os.path.join(problem_path,\"problem.png\"))\n\n inputs = problem_frame.find_elements_by_class_name(\"input\")\n i=1\n for input_element in inputs:\n inputText = input_element.find_element_by_tag_name(\"pre\").text\n inputFile = open(os.path.join(problem_path,\"input\"+str(i)+\".txt\"),\"w+\")\n inputFile.write(inputText)\n inputFile.close()\n i=i+1\n\n outputs = problem_frame.find_elements_by_class_name(\"output\")\n i=1\n for output_element in outputs:\n outputText = output_element.find_element_by_tag_name(\"pre\").text\n outputFile = open(os.path.join(problem_path,\"output\"+str(i)+\".txt\"),\"w+\")\n outputFile.write(outputText)\n outputFile.close()\n i=i+1\n\n\ndriver.close()","repo_name":"AwesomePaneer/DevClubWebScrapingAssignment","sub_path":"Codeforces/fetch_round_difficulty_filter.py","file_name":"fetch_round_difficulty_filter.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"65"} +{"seq_id":"7043431121","text":"class Solution:\n def kSum(self, nums: List[int], k: int) -> int:\n N = len(nums)\n posSum = sum(x for x in nums if x > 0)\n A = sorted(abs(x) for x in nums)\n res = posSum\n maxHeap = [(-posSum + A[0], 0)]\n\n for _ in range(k - 1):\n nextSum, index = heappop(maxHeap)\n\n if index + 1 < N:\n # -(-nextSum - A[index + 1])\n heappush(maxHeap, (nextSum + A[index + 1], index + 1))\n # -(-nextSum + A[index] - A[index + 1])\n heappush(maxHeap, (nextSum - A[index] + A[index + 1], index + 1))\n \n res = -nextSum\n \n return res","repo_name":"hwennnn/leetcode-solutions","sub_path":"problems/find_the_k-sum_of_an_array/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"} +{"seq_id":"71364632624","text":"import cv2\nimport math\nimport numpy as np\nimport sys\nimport cv3\n\nparser = sys.argv\nvidname, vidformat = parser[1].split('.')\nsmoothing_radius = int(parser[2])\nsrc = cv2.VideoCapture(\"{}.{}\".format(vidname,vidformat))\n\nret, prev = src.read()\nprev_gray = cv2.cvtColor(prev,cv2.COLOR_BGR2GRAY)\nprev_corner = cv2.goodFeaturesToTrack(prev_gray,200,0.01,30)\nlast_transform, prev_to_cur_transform, trajectory, smooth_trajectory, smooth_prev_to_cur_transform = [], [], [], [], []\na = 0\noutput_traj=open(\"output_traj.txt\",'w')\nwhile(src.isOpened()):\n ret, cur = src.read()\n if not ret:\n break\n cur_gray = cv2.cvtColor(cur,cv2.COLOR_BGR2GRAY)\n cur_corner = cv2.goodFeaturesToTrack(cur_gray,200,0.01,30)\n cur_corner, status, err = cv2.calcOpticalFlowPyrLK(prev_gray,cur_gray,prev_corner,cur_corner)\n prev_corner2 = prev_corner[np.where(status==1)[0]]\n cur_corner2 = cur_corner[np.where(status==1)[0]]\n transform, ret2 = cv2.estimateAffinePartial2D(prev_corner2,cur_corner2)\n if transform is None:\n transform = last_transform\n last_transform = transform\n da = np.arctan2(transform[1][0],transform[0][0])*180/math.pi\n a += da\n trajectory.append(a)\n prev_to_cur_transform.append(da)\n output_traj.write(\"{}\\n\".format(da))\n prev, prev_gray, prev_corner = cur, cur_gray, cur_corner\noutput_traj.close()\n\n\noutput_smoothtraj = open(\"output_smoothtraj.txt\",'w')\nfor traj in range(len(trajectory)):\n sum_a, k = 0, 0\n for dist in range(-smoothing_radius,smoothing_radius):\n if 0 <= traj+dist < len(trajectory):\n sum_a += trajectory[traj+dist]\n k += 1\n smooth_trajectory.append(sum_a/k)\n output_smoothtraj.write(\"{}\\n\".format(sum_a/k))\noutput_smoothtraj.close()\n\noutput_trans=open(\"output_smoothtrans.txt\",'w')\na = 0\nfor trans in range(len(prev_to_cur_transform)):\n a += prev_to_cur_transform[trans]\n smooth_prev_to_cur_transform.append(prev_to_cur_transform[trans] + smooth_trajectory[trans] - a)\n output_trans.write(\"{}\\n\".format(prev_to_cur_transform[trans] + smooth_trajectory[trans] - a))\noutput_trans.close()\n\nsrc.set(cv2.CAP_PROP_POS_FRAMES, 0)\nhoriz, vert = src.get(cv2.CAP_PROP_FRAME_WIDTH), src.get(cv2.CAP_PROP_FRAME_HEIGHT)\ndiag = int(math.sqrt(vert*vert+horiz*horiz))\nout = cv2.VideoWriter(\"{}_rotstab{}.{}\".format(vidname,smoothing_radius,vidformat) , cv2.VideoWriter_fourcc(*'XVID'), src.get(cv2.CAP_PROP_FPS), (diag,diag))\nk = 0\n\nfor angle in smooth_prev_to_cur_transform:\n ret, cur = src.read()\n if not ret:\n break\n cur = cv3.pad_rotate(cur,angle)\n out.write(cur) \n\nsrc.release()\nout.release()\ncv2.destroyAllWindows() \n\n\n","repo_name":"bsulyok/cv","sub_path":"rotstab.py","file_name":"rotstab.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33610258565","text":"# SCG\n# yep, it's happening! :D\n# whee\n\nimport sys\nfrom inter import *\nfrom sparser import *\n\n\ntry:\n path = sys.argv[1]\n f = open(path, encoding='utf-8')\n program = f.read()\n f.close()\nexcept Exception:\n exit()\n\n\nprogram = fully_parse(program)\ninter = Inter()\ninter.run_code(program)\ninter.output()\n","repo_name":"an-OK-squirrel/scg","sub_path":"scg.py","file_name":"scg.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41989939356","text":"# use: \n# python make_supercell.py POSCAR_original # # # POSCAR_new\n##### made by Joohee_Lee #####\n##### modified by Yong #####\n##### (20190306) #####\n# This is a code to build supercell to find magnetic primitive cell.\n\nimport sys, string\n\ndef isNum(s) :\n\ttry :\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError :\n\t\treturn False\n\ntry:\n poscar_original = sys.argv[1]\n expand_a = int(sys.argv[2])\n expand_b = int(sys.argv[3])\n expand_c = int(sys.argv[4])\n poscar_new = sys.argv[5]\nexcept:\n sys.exit()\n\n# file open\nf=open(poscar_original,'r')\npnw = open(poscar_new,'w')\n\n\n# title, scale, and lattice\nf.readline(); line=f.readline().split(); scale=float(line[0])\nline=f.readline().split(); A1=scale*float(line[0]); A2=scale*float(line[1]); A3=scale*float(line[2])\nline=f.readline().split(); B1=scale*float(line[0]); B2=scale*float(line[1]); B3=scale*float(line[2])\nline=f.readline().split(); C1=scale*float(line[0]); C2=scale*float(line[1]); C3=scale*float(line[2])\n\n# name and number, or just number \nname=0\nline=f.readline()\nnumber_list=line.split()\nif isNum(number_list[0])==0:\n name=line\n number_list=f.readline().split()\n\n# total atom number calculation \ntotnum=0\nfor i in range(len(number_list)):\n\ttotnum=totnum+int(number_list[i])\n\n# Selective dynamics or D/C\nDorC=f.readline()\nif DorC[0]=='S':\n\tDorC=f.readline()\n\n# coordination list by the line order (list of lines)\ncoor_list=[]\nfor t in range(totnum):\n coor_list.append(f.readline())\n\n# printing, title, scale, new_lattices, name_list, new number_list, Selective dynamics \nif name: pnw.write(\"make supercell\"+' '+name) #if atom names are written there \nelse: pnw.write(\"make supercell\\n\")\npnw.write(\" 1.00000000000000\\n\")\npnw.write(\" %(a1)16.13F %(a2)16.13F %(a3)16.13F\\n\" % {'a1':A1*expand_a,'a2':A2*expand_a,'a3':A3*expand_a})\npnw.write(\" %(b1)16.13F %(b2)16.13F %(b3)16.13F\\n\" % {'b1':B1*expand_b,'b2':B2*expand_b,'b3':B3*expand_b})\npnw.write(\" %(c1)16.13F %(c2)16.13F %(c3)16.13F\\n\" % {'c1':C1*expand_c,'c2':C2*expand_c,'c3':C3*expand_c} )\nif name: #if atom names are written there\n pnw.write(' '+name)\npnw.write(' '+' '.join([str(int(x)*expand_a*expand_b*expand_c) for x in number_list])+'\\n')\npnw.write(\"Selective dynamics\\n\")\n\n# print if it is Direct\nif DorC[0]=='D':\n\tpnw.write(\"Direct\\n\")\n\tfor t in range(len(coor_list)):\n\t\tfor i in range(expand_a):\n\t\t\tfor j in range(expand_b):\n\t\t\t\tfor k in range(expand_c):\n\t\t\t\t\ttempX=float(coor_list[t].split()[0]); tempY=float(coor_list[t].split()[1]); tempZ=float(coor_list[t].split()[2])\n\t\t\t\t\tif '!' in coor_list[t]:\n\t\t\t\t\t\tat_inform = coor_list[t].split('!')[-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tat_inform = '\\n'\n\t\t\t\t\tpnw.write(\" %(tx)19.16F %(ty)19.16F %(tz)19.16F T T T !\" % {'tx':(i+tempX)/expand_a,'ty':(j+tempY)/expand_b,'tz':(k+tempZ)/expand_c} + at_inform)\n\n# else if it is Cartesian\nelif DorC[0]=='C':\n\tpnw.write(\"Cartesian\\n\")\n\tfor t in range(len(coor_list)):\n\t\tfor i in range(expand_a):\n\t\t\tfor j in range(expand_b):\n\t\t\t\tfor k in range(expand_c):\n\t\t\t\t\ttempX=float(coor_list[t].split()[0]); tempY=float(coor_list[t].split()[1]); tempZ=float(coor_list[t].split()[2])\n\t\t\t\t\tif '!' in coor_list[t]:\n\t\t\t\t\t\tat_inform = coor_list[t].split('!')[-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\tat_inform = '\\n'\n\n\t\t\t\t\tpnw.write(\" %(tx)19.16F %(ty)19.16F %(tz)19.16F T T T !\" % {'tx':tempX+(i*A1)+(j*B1)+(k*C1),'ty':tempY+(i*A2)+(j*B2)+(k*C2),'tz':tempZ+(i*A3)+(j*B3)+(k*C3)}+at_inform)\n","repo_name":"MDIL-SNU/AMP2","sub_path":"src/make_supercell.py","file_name":"make_supercell.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"91"} +{"seq_id":"19877916249","text":"from libqtile.config import Key, ScratchPad, DropDown\nfrom libqtile.config import Group, Match\nfrom libqtile.lazy import lazy\n\nfrom utils.settings import terminal\n\nfrom .keys import keys, MOD, SHIFT\n\ngroups = [\n Group(\n # exclusive=True,\n name=\"code\",\n label=\"\",\n layout=\"monadtall\",\n matches=[Match(wm_instance_class=[\"code\", \"Code\"])],\n ),\n Group(\n # exclusive=True,\n name=\"web\",\n label=\"\",\n layout=\"columns\",\n matches=[Match(wm_instance_class=[\"microsoft-edge\",\"firefox\", \"Firefox\", \"Navigator\"])],\n ),\n Group(\n # exclusive=True,\n name=\"terminal\",\n label=\"\",\n layout=\"monadtall\",\n # matches=[Match(wm_instance_class=[\"alacritty\", \"Alacritty\"])],\n ),\n Group(\n # exclusive=True,\n name=\"messaging\",\n label=\"\",\n layout=\"monadtall\",\n matches=[Match(wm_instance_class=[\"discord\", \"Discord\"])],\n ),\n Group(\n name=\"misc\",\n label=\"\",\n layout=\"monadtall\",\n )\n]\n\nkeys.extend([\n Key([MOD], \"ampersand\", lazy.group[\"code\"].toscreen(), desc=\"Switch to group code\"),\n Key([MOD, SHIFT], \"ampersand\", lazy.window.togroup(\"code\", switch_group=True), desc=\"Switch to & move focused window to group code\"),\n\n Key([MOD], \"eacute\", lazy.group[\"web\"].toscreen(), desc=\"Switch to group web\"),\n Key([MOD, SHIFT], \"eacute\", lazy.window.togroup(\"web\", switch_group=True), desc=\"Switch to & move focused window to group web\"),\n\n Key([MOD], \"quotedbl\", lazy.group[\"terminal\"].toscreen(), desc=\"Switch to group terminal\"),\n Key([MOD, SHIFT], \"quotedbl\", lazy.window.togroup(\"terminal\", switch_group=True), desc=\"Switch to & move focused window to group terminal\"),\n\n Key([MOD], \"apostrophe\", lazy.group[\"messaging\"].toscreen(), desc=\"Switch to group messaging\"),\n Key([MOD, SHIFT], \"apostrophe\", lazy.window.togroup(\"messaging\", switch_group=True), desc=\"Switch to & move focused window to group messaging\"),\n\n Key([MOD], \"parenleft\", lazy.group[\"misc\"].toscreen(), desc=\"Switch to group misc\"),\n Key([MOD, SHIFT], \"parenleft\", lazy.window.togroup(\"misc\", switch_group=True), desc=\"Switch to & move focused window to group misc\"),\n])\n# group_hotkeys=[\"ampersand\", \"eacute\", \"quotedbl\", \"apostrophe\", \"parenleft\"]\n# for i, group in enumerate(groups):\n# if not isinstance(i, ScratchPad):\n# group = group.name\n# keys.extend([\n# Key([MOD], group_hotkeys[i], lazy.group[group].toscreen(), desc=\"Switch to group {}\".format(group)),\n# Key([MOD, SHIFT], group_hotkeys[i], lazy.window.togroup(group, switch_group=True), desc=\"Switch to & move focused window to group {}\".format(group)),\n# ])\n\n\n\ngroups.append(\n ScratchPad(\n \"scratchpad\",\n [\n DropDown(\"alacritty\", terminal, opacity=0.9, width=0.8, height=0.5, x=0.1, y=0.25),\n DropDown(\"pulsemixer\", \"alacritty --command=\\\"pulsemixer\\\"\", opacity=0.9, width=0.8, height=0.5, x=0.1, y=0.25)\n ],\n )\n)\n","repo_name":"webflo-dev/dotfiles","sub_path":"dotfiles/config/qtile/core/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70577803182","text":"import sys\nsys.stdin = open('백준10250. ACM 호텔.txt', 'r')\n\n# 간단한 수학문제 - 나머지, 몫이 0일 때만 주의하면 된다.\nT = int(input())\nfor tc in range(T):\n H, W, N = map(int, input().split())\n\n if N % H == 0:\n row = H\n col = (N // H)\n else:\n row = N % H\n col = (N // H) + 1\n\n print(str(row) + '0' + str(col)) if col < 10 else print(str(row) + str(col))","repo_name":"djagmlrhks3/Algorithm_Problem_Solving","sub_path":"Baekjoon/브론즈 III/백준10250. ACM 호텔.py","file_name":"백준10250. ACM 호텔.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"42482618315","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nimport json\nfrom .models import Person\nfrom django.contrib.auth import logout\nfrom .forms import AddPerson, AddSkills\nfrom django.contrib.auth.models import User\n\ndef get_name(request):\n if request.method == 'POST':\n form = AddPerson(request.POST)\n if form.is_valid():\n temp = Person()\n temp.first_name=request.POST.get('first_name')\n temp.last_name=request.POST.get('last_name')\n temp.save()\n return HttpResponseRedirect('/candidates')\n else:\n form = AddPerson()\n data=Person.objects.values('id', 'first_name', 'last_name', 'skills')\n data=json.dumps(list(data))\n return render(request, 'addperson/index.html', {'form':form, 'data':data})\n\ndef logout_user(request):\n if request.method == 'GET':\n logout(request)\n return HttpResponseRedirect('/candidates')\n\ndef redirect(request):\n if request.method == 'GET':\n return HttpResponseRedirect('/candidates')\n\ndef get_skills(request, pk):\n if request.method == 'POST':\n form = AddSkills(request.POST)\n if form.is_valid():\n person=Person.objects.get(id=pk)\n print(person.skills)\n skills=person.skills\n if skills!='':\n skills=json.loads(skills)\n else:\n skills={}\n temp = {}\n for i in form.fields:\n if i!='person' and i!='recruter':\n temp.update({i: request.POST.get(i)})\n recname=request.user.id\n skills[recname]=temp\n person.skills=json.dumps(skills)\n person.save()\n return HttpResponseRedirect('/candidates')\n else:\n form = AddSkills()\n person=Person.objects.get(id=pk)\n name = person.first_name+\" \"+person.last_name\n return render(request, 'addskills/index.html', {'form':form, 'person':name})\n\ndef delete_person(request, pk):\n if request.method == 'GET':\n person = Person.objects.get(id=pk)\n print(person)\n person.delete()\n print(person)\n return HttpResponseRedirect('/candidates')\n","repo_name":"toshani08/P-VS-Z-","sub_path":"django-drf-react-quickstart/project/candidates/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74525128943","text":"import logging\n\nfrom aiohttp import web\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\nconfig = {'host': 'mongo', 'port': 27017, 'name': 'stock-exchanges'}\n\nmongo_client = AsyncIOMotorClient(host=config['host'], port=config['port'])\ndatabase = mongo_client[config['name']]\n\nroutes = web.RouteTableDef()\n\n\n@routes.get('/api/v1/markets')\nasync def handle_markets(request):\n collections = await database.list_collection_names()\n data = {\n collection: await database[collection].distinct('symbol')\n for collection in collections\n }\n logging.info(msg=f\"Returned count: {len(data)}\")\n return web.json_response(data, status=200)\n\n\n@routes.get('/api/v1/trades')\nasync def trades_handle(request):\n \"\"\"\n\n :param request:\n exchange\n symbol\n since\n limit\n :return:\n \"\"\"\n params = request.rel_url.query\n\n exchange = params['exchange']\n query = {}\n\n if 'symbol' in params:\n query.update({'symbol': params['symbol'].upper().replace('_', '/')})\n\n if 'till' in params:\n query.update({'timestamp': {'$lte': int(params['till'])}})\n\n limit = min(int(params['limit']), 10000) if 'limit' in params else 100\n\n cursor = database[exchange]\\\n .find(query, {'_id': 0})\\\n .sort(\"timestamp\", -1)\\\n .limit(limit)\n\n data = [item async for item in cursor]\n logging.info(msg=f\"Returned count: {len(data)}\")\n\n return web.json_response(data, status=200)\n","repo_name":"AdamovskyiAnatolii/bdit4da02","sub_path":"services/api/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"91"} +{"seq_id":"34281911131","text":"# Plot AUC curves for classifiers\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\n 3 Features\n'''\n# feature_whole = pd.read_csv('Data/precision_recall_3Features_filtered_False.csv')\n# feature_subset = pd.read_csv('Data/precision_recall_3Features_filtered_True.csv')\n# n_feature = 3\n# whole_auc = {'mlp':0.935, 'logistic_regression':0.930, 'svc':0.905} # scores need to be added manually\n# subset_auc = {'mlp':0.891, 'logistic_regression':0.872, 'svc':0.887} # scores need to be added manually\n\n'''\n 7 Features\n'''\nfeature_whole = pd.read_csv('Data/precision_recall_7Features_filtered_False.csv')\nfeature_subset = pd.read_csv('Data/precision_recall_7Features_filtered_True.csv')\nn_feature = 7\nwhole_auc = {'mlp':0.921, 'logistic_regression':0.923, 'svc':0.912} # scores need to be added manually\nsubset_auc = {'mlp':0.882, 'logistic_regression':0.887, 'svc':0.885} # scores need to be added manually\n\nset_indx = 1 if n_feature == 7 else 2\n\nprint(feature_whole, feature_subset)\nprint(feature_whole.columns, feature_subset.columns)\n\nfor idx_feature_whole in feature_whole.index:\n feature_whole.loc[idx_feature_whole,'precisions'] = \\\n np.array(feature_whole.loc[idx_feature_whole,'precisions'].replace('[','').replace(']','').split()).astype(np.float)\n\n feature_whole.loc[idx_feature_whole,'recalls'] = \\\n np.array(feature_whole.loc[idx_feature_whole,'recalls'].replace('[','').replace(']','').split()).astype(np.float)\n\nfor idx_feature_subset in feature_subset.index:\n feature_subset.loc[idx_feature_subset,'precisions'] = \\\n np.array(feature_subset.loc[idx_feature_subset,'precisions'].replace('[','').replace(']','').split()).astype(np.float)\n\n feature_subset.loc[idx_feature_subset,'recalls'] = \\\n np.array(feature_subset.loc[idx_feature_subset,'recalls'].replace('[','').replace(']','').split()).astype(np.float)\n\n\nfeature_whole_lr_recalls = feature_whole.iloc[0,2]\nfeature_whole_lr_precisons = feature_whole.iloc[0,1]\nfeature_whole_svc_recalls = feature_whole.iloc[1,2]\nfeature_whole_svc_precisons = feature_whole.iloc[1,1]\nfeature_whole_nn_recalls = feature_whole.iloc[2,2]\nfeature_whole_nn_precisons = feature_whole.iloc[2,1]\n\nfeature_subset_lr_recalls = feature_subset.iloc[0,2]\nfeature_subset_lr_precisons = feature_subset.iloc[0,1]\nfeature_subset_svc_recalls = feature_subset.iloc[1,2]\nfeature_subset_svc_precisons = feature_subset.iloc[1,1]\nfeature_subset_nn_recalls = feature_subset.iloc[2,2]\nfeature_subset_nn_precisons = feature_subset.iloc[2,1]\n\nfig = plt.figure()\ndataset_str = '(Dataset ' + str(set_indx) + ')'\nplt.plot(feature_whole_nn_recalls,feature_whole_nn_precisons, c='black', label=('Neural Network ' + dataset_str + ', AUC=' + str(whole_auc['mlp'])))\nplt.plot(feature_whole_lr_recalls,feature_whole_lr_precisons, c='orange', label=('Logistic Regression ' + dataset_str + ', AUC=' + str(whole_auc['logistic_regression'])))\nplt.plot(feature_whole_svc_recalls,feature_whole_svc_precisons, c='green', label=('Support Vector Machine ' + dataset_str + ', AUC=' + str(whole_auc['svc'])))\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.legend(loc='lower left')\nfig.savefig('../Figures/' + str(n_feature) + '_features_precision_recall_whole.png')\nplt.show()\n\nfig = plt.figure()\nsubset_str = '(Subset ' + str(set_indx) + ')'\nplt.plot(feature_subset_nn_recalls,feature_subset_nn_precisons, c='black', label=('Neural Network ' + subset_str + ', AUC=' + str(subset_auc['mlp'])))\nplt.plot(feature_subset_lr_recalls,feature_subset_lr_precisons, c='orange', label=('Logistic Regression ' + subset_str + ', AUC=' + str(subset_auc['logistic_regression'])))\nplt.plot(feature_subset_svc_recalls,feature_subset_svc_precisons, c='green', label=('Support Vector Machine ' + subset_str + ', AUC=' + str(subset_auc['svc'])))\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.legend(loc='lower left')\nfig.savefig('../Figures/' + str(n_feature) + '_features_precision_recall_subset.png')\nplt.show()\n\n","repo_name":"renshuangxia/Predict-PaO2-with-SpO2","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9259501629","text":"# -*- coding: utf-8 -*-\r\nfrom odoo import models,fields,api,tools\r\n\r\nclass is_account_move_line(models.Model):\r\n _name='is.account.move.line'\r\n _description='is.account.move.line'\r\n _order='move_id desc, id'\r\n _auto = False\r\n\r\n move_id = fields.Many2one('account.move', 'Facture')\r\n #internal_number = fields.Char(\"N°Facture\")\r\n invoice_date = fields.Date(\"Date Facture\")\r\n #order_id = fields.Many2one('sale.order', 'Commande')\r\n product_id = fields.Many2one('product.product', 'Article')\r\n description = fields.Text(\"Description\")\r\n quantity = fields.Float('Quantité' , digits=(14,2))\r\n price_unit = fields.Float('Prix unitaire', digits=(14,4))\r\n price_subtotal = fields.Float('Montant HT' , digits=(14,4))\r\n partner_id = fields.Many2one('res.partner', u'Client/Fournisseur Facturé')\r\n #move_id = fields.Many2one('stock.move', 'Mouvement')\r\n move_type = fields.Char(\"Type Facture\")\r\n state = fields.Char(\"Etat Facture\")\r\n\r\n\r\n def init(self):\r\n cr=self._cr\r\n tools.drop_view_if_exists(cr, 'is_account_move_line')\r\n cr.execute(\"\"\"\r\n CREATE OR REPLACE view is_account_move_line AS (\r\n select\r\n aml.id,\r\n aml.move_id,\r\n am.invoice_date,\r\n aml.product_id,\r\n aml.name description,\r\n aml.quantity,\r\n aml.price_unit,\r\n aml.price_subtotal,\r\n am.partner_id,\r\n am.move_type,\r\n am.state\r\n from account_move_line aml inner join account_move am on aml.move_id=am.id\r\n );\r\n \"\"\")\r\n# bsa14=# select * from sale_order_line_invoice_rel ;\r\n# invoice_line_id | order_line_id \r\n# -----------------+---------------\r\n# 1 | 2\r\n","repo_name":"tonygalmiche/is_bsa14","sub_path":"models/is_account_move_line.py","file_name":"is_account_move_line.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19487844560","text":"import sys\nimport random\n\nfrom cowboysmall.sims.simulation import Simulation\n\n\nclass Pi(Simulation):\n\n def step(self, iteration: int, data: dict) -> None:\n x, y = random.random(), random.random()\n if (x ** 2) + (y ** 2) <= 1:\n data['total'] += 1\n\n\ndef main(argv):\n random.seed(1337)\n\n iterations = int(argv[0])\n\n sim = Pi({'total': 0})\n data = sim.run(iterations)\n\n print()\n print('Pi - %s iterations' % (iterations))\n print()\n print(' Total: %8d' % (data['total']))\n print(' Pi: %8f' % (data['total'] * 4 / float(iterations)))\n print()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"cowboysmall-research/simulations","sub_path":"examples/pi/pi_01.py","file_name":"pi_01.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11685469971","text":"from typing import List, NewType\n\n\nclass ListNode: \n Data = \"\" \n Pointer = -1\n def __init__(self,Data:str = \"\",Pointer:int = 0):\n self.Data = Data\n self.Pointer = Pointer\n def __str__(self):\n return(self.Data+\" \"+str(self.Pointer))\n\nclass LinkedList():\n CONST_NULLPOINTER = -1\n def __init__(self):\n self.List = []\n self.startPointer = self.CONST_NULLPOINTER\n self.freeListPointer = 0\n for i in range(4):\n self.List.append(ListNode(Pointer = i+1))\n self.List.append(ListNode(Pointer = self.CONST_NULLPOINTER)) \n def print(self):\n for i in range(len(self.List)):\n print(self.List[i])\n def InsertNode(self,newItem):\n if self.freeListPointer != self.CONST_NULLPOINTER:\n self.newNodePointer = self.freeListPointer\n self.List[self.newNodePointer].Data = newItem \n self.freeListPointer = self.List[self.freeListPointer].Pointer\n self.thisNodePointer = self.startPointer\n self.previousNodePointer = self.CONST_NULLPOINTER\n while self.thisNodePointer != self.CONST_NULLPOINTER and self.List[self.thisNodePointer].Data < newItem:\n self.previousNodePointer = self.thisNodePointer\n self.thisNodePointer = self.List[self.thisNodePointer].Pointer\n if self.previousNodePointer == self.CONST_NULLPOINTER:\n self.List[self.newNodePointer].Pointer = self.startPointer\n self.startPointer = self.newNodePointer\n else:\n self.List[self.newNodePointer].Pointer = self.List[self.previousNodePointer].Pointer\n self.List[self.previousNodePointer].Pointer = self.newNodePointer\n def FindNode(self,dataItem):\n self.currentNodePointer = self.startPointer\n while self.currentNodePointer != self.CONST_NULLPOINTER and self.List[self.currentNodePointer].Data != dataItem:\n self.currentNodePointer = self.List[self.currentNodePointer].Pointer\n return self.currentNodePointer\n def DeleteNode(self,dataItem):\n self.thisNodePointer = self.startPointer\n while self.thisNodePointer != self.CONST_NULLPOINTER and self.List[self.thisNodePointer].Data != dataItem:\n self.previousNodePointer =self.thisNodePointer\n self.thisNodePointer = self.List[self.thisNodePointer].Pointer\n if self.thisNodePointer != self.CONST_NULLPOINTER:\n if self.thisNodePointer == self.startPointer:\n self.startPointer = self.List[self.startPointer].Pointer\n else:\n self.List[self.previousNodePointer].Pointer = self.List[self.thisNodePointer].Pointer\n self.List[self.thisNodePointer].Pointer = self.freeListPointer\n self.freeListPointer = self.thisNodePointer\n def OutputAllNodes(self):\n self.currentNodePointer = self.startPointer# start at beginning of list\n if self.startPointer == self.CONST_NULLPOINTER:\n print(\"No data in list\")\n print(self.currentNodePointer)\n while self.currentNodePointer != self.CONST_NULLPOINTER: # while not end of list\n print(self.currentNodePointer, \"\",self.List[self.currentNodePointer].Data)\n \n # follow the pointer to the next node\n self.currentNodePointer = List[self.currentNodePointer].Pointer\n\n\n\n","repo_name":"shlegg4/CS-Programming","sub_path":"Questions_Misc/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"24762006595","text":"class NumMatrix(object):\n def __init__(self, matrix):\n if not matrix: return \n self.matrix = matrix\n self.row = len(matrix[0]) + 1\n self.col = len(matrix) + 1\n self.bit = [[0 for _ in xrange(self.row)] for _ in xrange(self.col)]\n for i in xrange(len(self.matrix)):\n for j in xrange(len(self.matrix[0])):\n self.add(i, j , matrix[i][j])\n return\n\n def update(self, row, col, val):\n delta = val - self.matrix[row][col]\n if delta != 0:\n self.add(row, col, delta)\n self.matrix[row][col] = val\n return \n\n def sumRegion(self, row1, col1, row2, col2):\n\n def sumRegion_bit(row, col):\n row = row + 1\n col = col + 1\n i = row\n ret = 0\n while i > 0:\n j = col\n while j > 0 :\n ret += self.bit[i][j]\n j -= (j & -j)\n i -= (i & -i)\n return ret\n\n ret = sumRegion_bit(row2, col2)\n if row1 > 0 and col1 > 0:\n ret += sumRegion_bit(row1 - 1, col1 -1)\n if col1 > 0:\n ret -= sumRegion_bit(row2, col1 - 1)\n if row1 > 0:\n ret -= sumRegion_bit(row1 - 1, col2)\n return ret\n\n def add(self, row, col, val):\n row = row + 1\n col = col + 1\n i = row\n while i < self.col :\n j = col\n while j < self.row:\n self.bit[i][j] += val\n j += (j & -j)\n i += (i & -i)\n return\n\n","repo_name":"Omega094/lc_practice","sub_path":"range_sum_query_2d_mutable/RangeSum_BIT.py","file_name":"RangeSum_BIT.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32081458854","text":"import logging\nimport voluptuous as vol\nimport homeassistant.helpers.config_validation as cv\nimport requests\nimport geopy.distance\nimport datetime\nimport time\n\n_LOGGER = logging.getLogger(__name__)\n\n# Domain and component constants and validation\nDOMAIN = 'open_sense'\nCONF_USERNAME = \"username\"\nCONF_PASSWORD = \"password\"\nCONF_LAT = \"latitude\"\nCONF_LON = \"longitude\"\nCONF_MEASURANDS = \"measurands\"\nCONF_TOKEN = \"token\"\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Optional(CONF_USERNAME): cv.string,\n vol.Optional(CONF_PASSWORD): cv.string,\n vol.Required(CONF_LAT): cv.latitude,\n vol.Required(CONF_LON): cv.longitude,\n vol.Required(CONF_MEASURANDS): cv.string,\n vol.Required(CONF_TOKEN): cv.string,\n }),\n}, extra=vol.ALLOW_EXTRA)\n\n\ndef setup(hass, config):\n \"\"\" setup OpenSense domain \"\"\"\n\n config = config.get(DOMAIN)\n\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n lat = config.get(CONF_LAT)\n lon = config.get(CONF_LON)\n measurands = config.get(CONF_MEASURANDS)\n token = config.get(CONF_TOKEN)\n\n # Attempt to login\n \"\"\"api_key = get_api_key()\n if api_key == -1:\n _LOGGER.error(\"OpenSense login failed.\")\n return False\"\"\"\n if measurands == \"all\":\n sensors = get_sensors_for_all_measurands(lat, lon)\n else:\n sensors = get_sensors_for_given_measurands(measurands, lat, lon)\n\n for sensor in sensors:\n sensor.set_state(hass)\n\n return True\n\n\nclass Sensor:\n\n def __init__(self, sensor_id, measurand):\n \"\"\"Initialize the sensor\"\"\"\n self.id = sensor_id\n if sensor_id == -1:\n self.measurand = measurand\n self.latitude = \"\"\n self.longitude = \"\"\n self.altitude_above_ground = \"\"\n self.sensor_model = \"\"\n self.accuracy = \"\"\n self.attribution_text = \"\"\n self.value = \"no sensors near\"\n self.unit = \"\"\n else:\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/{0}\".format(sensor_id)\n r = requests.get(link)\n data = r.json()\n self.measurand = measurand\n self.latitude = data['location']['lat']\n self.longitude = data['location']['lng']\n self.altitude_above_ground = data['altitudeAboveGround']\n self.sensor_model = data['sensorModel']\n self.accuracy = data['accuracy']\n self.attribution_text = data['attributionText']\n self.value, self.unit = OpenSense.get_last_value(sensor_id)\n self.attributes = {\n \"friendly name\": \"OpenSense {0}\".format(self.measurand),\n \"id\": self.id,\n \"position\": \"{0}; {1}\".format(self.get_latitude, self.get_longitude),\n \"sensor model\": self.get_sensor_model,\n \"altitude above ground\": self.get_altitude_above_ground,\n \"accuracy\": self.get_accuracy,\n \"attribution text\": self.get_attribution_text\n }\n\n @property\n def get_id(self):\n return self.id\n\n @property\n def get_measurand(self):\n return self.measurand\n\n @property\n def get_value(self):\n return self.value\n\n @property\n def get_latitude(self):\n return self.latitude\n\n @property\n def get_longitude(self):\n return self.longitude\n\n @property\n def get_altitude_above_ground(self):\n return self.altitude_above_ground\n\n @property\n def get_sensor_model(self):\n return self.sensor_model\n\n @property\n def get_accuracy(self):\n return self.accuracy\n\n @property\n def get_attribution_text(self):\n return self.attribution_text\n\n @property\n def get_unit(self):\n return self.unit\n\n @property\n def get_attributes(self):\n return self.attributes\n\n def set_state(self, hass):\n if self.get_id == -1:\n hass.states.set(\"OpenSense.{0}\".format(self.get_measurand), self.get_value, self.get_attributes)\n else:\n hass.states.set(\"OpenSense.{0}\".format(self.get_measurand), \"%.2f\" % self.get_value +\n \" {0}\".format(self.get_unit), self.get_attributes)\n\n\nclass OpenSense:\n\n @staticmethod\n def find_closest_sensor(data, lat, lon):\n min_dist = float(\"inf\")\n sensor_id = -1\n location = (lat, lon)\n for json in data:\n j_lat = json['location']['lat']\n j_lon = json['location']['lng']\n location2 = (j_lat, j_lon)\n dist = geopy.distance.geodesic(location, location2).m\n if dist < min_dist:\n if OpenSense.get_last_value(json['id'])[0] is not None:\n min_dist = dist\n sensor_id = json['id']\n return sensor_id\n\n @staticmethod\n def get_id_of_closest_sensor(lat, lon, measurand_id):\n dist = 100\n data = []\n while len(data) <= 1 and dist < 10000:\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors?measurandId={0}&refPoint={1}, \" \\\n \"{2}&maxDistance={3}\" \\\n .format(measurand_id, lat, lon, dist)\n r = requests.get(link)\n data = r.json()\n if len(data) == 1:\n if OpenSense.get_last_value(data[0]['id'])[0] is not None:\n return data[0]['id']\n dist += 200\n if len(data) == 0:\n return -1\n if len(data) == 1:\n return -1\n return OpenSense.find_closest_sensor(data, lat, lon)\n\n @staticmethod\n def get_last_value(sensor_id):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/{0}/values\".format(sensor_id)\n r = requests.get(link)\n data = r.json()\n last_index = len(data['values']) - 1\n if last_index == -1:\n return None, None\n return data['values'][last_index]['numberValue'], OpenSense.get_unit_name_from_unit_id(data['unitId'])\n\n @staticmethod\n def get_measurand_id_from_sensor(sensor_id):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/{0}\".format(sensor_id)\n r = requests.get(link)\n data = r.json()\n return data['measurandId']\n\n @staticmethod\n def get_measurand_name_from_measurand_id(measurand_id):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/measurands/{0}\".format(measurand_id)\n r = requests.get(link)\n data = r.json()\n return data['name']\n\n @staticmethod\n def get_measurand_id_from_measurand_name(measurand_name):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/measurands?name={0}\".format(measurand_name)\n r = requests.get(link)\n data = r.json()\n return data[0]['id']\n\n @staticmethod\n def create_sensor(measurand_id, unit_id, lat, lon, license_id, altitude_above_ground, direction_vertical,\n direction_horizontal, sensor_model, accuracy, attribution_text, attribution_url):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/addSensor\"\n json_data = \\\n {\n \"measurandId\": measurand_id,\n \"unitId\": unit_id,\n \"location\":\n {\n \"lat\": lat,\n \"lng\": lon\n },\n \"licenseId\": license_id,\n \"altitudeAboveGround\": altitude_above_ground,\n \"directionVertical\": direction_vertical,\n \"directionHorizontal\": direction_horizontal,\n \"sensorModel\": sensor_model,\n \"accuracy\": accuracy,\n \"attributionText\": attribution_text,\n \"attributionURL\": attribution_url\n }\n headers = \\\n {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": OpenSense.get_api_key()\n }\n r = requests.post(link, headers=headers, json=json_data)\n data = r.json()\n return data['id']\n\n @staticmethod\n def get_api_key(username=\"smarthome\", password=\"8KO9koE+\"):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/users/login\"\n json_data = \\\n {\n \"username\": username,\n \"password\": password\n }\n r = requests.post(link, json=json_data)\n if r.status_code != 200:\n return -1\n return r.json()['id']\n\n @staticmethod\n def get_unit_name_from_unit_id(unit_id):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/units/{0}\".format(unit_id)\n r = requests.get(link)\n data = r.json()\n return data['name']\n\n @staticmethod\n def post_value_to_sensor(sensor_id, value, timestamp=-1):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/addValue\"\n if timestamp == -1:\n timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]\n\n json_data = \\\n {\n \"sensorId\": sensor_id,\n \"timestamp\": timestamp,\n \"numberValue\": value\n }\n\n headers = \\\n {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": OpenSense.get_api_key()\n }\n\n r = requests.post(link, headers=headers, json=json_data)\n return r.status_code\n\n @staticmethod\n def collapsed_post_to_sensor(sensor_id, values, timestamps):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/sensors/addMultipleValues\"\n\n collapsed_messages = []\n\n for i in range(len(values)):\n json_value = \\\n {\n \"sensorId\": sensor_id,\n \"timestamp\": timestamps[i],\n \"numberValue\": values[i]\n }\n collapsed_messages.append(json_value)\n\n message = \\\n {\n \"collapsedMessages\": collapsed_messages\n }\n\n headers = \\\n {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": OpenSense.get_api_key()\n }\n\n r = requests.post(link, headers=headers, json=message)\n return r.status_code\n\n\ndef get_sensors_for_all_measurands(lat, lon):\n link = \"https://www.opensense.network/progprak/beta/api/v1.0/measurands\"\n r = requests.get(link)\n data = r.json()\n number_of_measurands = len(data)\n sensors = []\n for i in range(number_of_measurands):\n measurand_id = i + 1\n measurand_name = OpenSense.get_measurand_name_from_measurand_id(measurand_id)\n sensor_id = OpenSense.get_id_of_closest_sensor(lat, lon, measurand_id)\n if sensor_id == -1:\n sensors.append(Sensor(sensor_id, measurand_name))\n else:\n sensors.append(Sensor(sensor_id, measurand_name))\n return sensors\n\n\ndef get_sensors_for_given_measurands(measurands, lat, lon):\n measurands = measurands.replace(\" \", \"\").split(',')\n\n sensors = []\n for measurand in measurands:\n measurand_id = OpenSense.get_measurand_id_from_measurand_name(measurand)\n sensor_id = OpenSense.get_id_of_closest_sensor(lat, lon, measurand_id)\n if sensor_id == -1:\n sensors.append(Sensor(sensor_id, measurand))\n else:\n sensors.append(Sensor(sensor_id, measurand))\n return sensors\n","repo_name":"mateo9686/HomeAssistantComponent","sub_path":"open_sense.py","file_name":"open_sense.py","file_ext":"py","file_size_in_byte":11633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72823554224","text":"import logging\nimport os\n\nfrom argparse import ArgumentParser\n\nimport torch\n\nimport torch.nn as nn\n\n\n\nfrom src.detr.matcher import build_matcher\n\nfrom src.utils.configs import get_default_configuration, load_config\n\n\nfrom src.utils.confusion import BinaryConfusionMatrix\nfrom src.data import data_factory\n\nimport src.utils.visualise as vis_tools\n\nfrom tqdm import tqdm\nimport numpy as np\nfrom PIL import Image\n\nimport time\nimport glob\n\n\nimport pinet.CurveLanes.agent as agent\n\nfrom pinet.CurveLanes.parameters import Parameters\nimport pinet.CurveLanes.util as util\nimport cv2\np = Parameters()\n\nimage_mean=[0.485, 0.456, 0.406]\nimage_std=[0.229, 0.224, 0.225]\n\ndef eliminate_fewer_points(x, y):\n # eliminate fewer points\n out_x = []\n out_y = []\n for i, j in zip(x, y):\n if len(i)>2:\n out_x.append(i)\n out_y.append(j) \n return out_x, out_y \n\n############################################################################\n## generate raw output\n############################################################################\ndef generate_result(confidance, offsets,instance, thresh):\n\n mask = confidance > thresh\n\n grid = p.grid_location[mask]\n\n offset = offsets[mask]\n feature = instance[mask]\n\n lane_feature = []\n x = []\n y = []\n for i in range(len(grid)):\n if (np.sum(feature[i]**2))>=0:\n point_x = int((offset[i][0]+grid[i][0])*p.resize_ratio)\n point_y = int((offset[i][1]+grid[i][1])*p.resize_ratio)\n if point_x > p.x_size or point_x < 0 or point_y > p.y_size or point_y < 0:\n continue\n if len(lane_feature) == 0:\n lane_feature.append(feature[i])\n x.append([point_x])\n y.append([point_y])\n else:\n flag = 0\n index = 0\n min_feature_index = -1\n min_feature_dis = 10000\n for feature_idx, j in enumerate(lane_feature):\n dis = np.linalg.norm((feature[i] - j)**2)\n if min_feature_dis > dis:\n min_feature_dis = dis\n min_feature_index = feature_idx\n if min_feature_dis <= p.threshold_instance:\n lane_feature[min_feature_index] = (lane_feature[min_feature_index]*len(x[min_feature_index]) + feature[i])/(len(x[min_feature_index])+1)\n x[min_feature_index].append(point_x)\n y[min_feature_index].append(point_y)\n elif len(lane_feature) < 12:\n lane_feature.append(feature[i])\n x.append([point_x])\n y.append([point_y])\n \n return x, y\ndef test_ori(lane_agent, ori_image, test_images,w_ratio, h_ratio, draw_type, thresh=p.threshold_point): # p.threshold_point:0.81\n\n result = lane_agent.predict_lanes_test(test_images)\n torch.cuda.synchronize()\n confidences, offsets, instances = result[-1]\n test_images = test_images.cpu().numpy()\n# logging.error('TEST TEST IMAGES ' + str(test_images.shape))\n num_batch = len(test_images)\n\n out_x = []\n out_y = []\n out_images = []\n \n for i in range(num_batch):\n # test on test data set\n image = np.copy(test_images[i])\n image = np.rollaxis(image, axis=2, start=0)\n image = np.rollaxis(image, axis=2, start=0)*255.0\n image = image.astype(np.uint8).copy()\n\n# logging.error('TEST LOOP IMAGE ' + str(image.shape))\n\n confidence = confidences[i].view(p.grid_y, p.grid_x).cpu().data.numpy()\n \n offset = offsets[i].cpu().data.numpy()\n offset = np.rollaxis(offset, axis=2, start=0)\n offset = np.rollaxis(offset, axis=2, start=0)\n \n instance = instances[i].cpu().data.numpy()\n instance = np.rollaxis(instance, axis=2, start=0)\n instance = np.rollaxis(instance, axis=2, start=0)\n \n # generate point and cluster\n raw_x, raw_y = generate_result(confidence, offset, instance, thresh)\n\n # eliminate fewer points\n in_x, in_y = eliminate_fewer_points(raw_x, raw_y)\n \n # sort points along y \n in_x, in_y = util.sort_along_y(in_x, in_y) \n\n if draw_type == 'line':\n result_image = util.draw_lines_ori(in_x, in_y, ori_image,w_ratio, h_ratio) \n elif draw_type == 'point':\n result_image = util.draw_point_ori(in_x, in_y, ori_image,w_ratio, h_ratio) \n else:\n result_image = util.draw_points(in_x, in_y,np.copy(image)) \n\n out_x.append(in_x)\n out_y.append(in_y)\n out_images.append(result_image)\n \n return out_x, out_y, out_images\n\n\ndef evaluate(dataloader, model, confusion, config,args):\n \n model.evaluate_mode()\n\n \n logging.error('VALIDATION')\n # Iterate over dataset\n for i, batch in enumerate(tqdm(dataloader)):\n \n seq_images, targets, _ = batch\n if seq_images == None:\n continue\n seq_images = seq_images.cuda()\n cuda_targets = []\n\n \n for b in targets:\n temp_dict={}\n temp_dict['center_img'] = b['center_img'].cuda()\n\n temp_dict['labels'] = b['labels'].cuda()\n temp_dict['roads'] = b['roads'].cuda()\n temp_dict['control_points'] = b['control_points'].cuda()\n temp_dict['con_matrix'] = b['con_matrix'].cuda()\n temp_dict['endpoints'] = b['endpoints'].cuda()\n\n temp_dict['mask'] = b['mask'].cuda()\n temp_dict['bev_mask'] = b['bev_mask'].cuda()\n \n temp_dict['obj_corners'] = b['obj_corners'].cuda()\n temp_dict['obj_converted'] = b['obj_converted'].cuda()\n temp_dict['obj_exists'] = b['obj_exists'].cuda()\n \n \n\n temp_dict['left_traffic'] = b['left_traffic'].cuda()\n temp_dict['outgoings'] = b['outgoings']\n temp_dict['incomings'] = b['incomings']\n cuda_targets.append(temp_dict)\n \n \n logging.error('SCENE ' + targets[0]['scene_name'])\n logging.error('SAMPLE ' + targets[0]['sample_token'])\n \n test_image = seq_images/255\n \n\n w_ratio = p.x_size * 1.0 / 800\n h_ratio = p.y_size* 1.0 / 448\n \n ori_image = np.uint8(cv2.resize(np.squeeze(np.transpose(seq_images.data.cpu().numpy(),(0,2,3,1)),axis=0),(800,448)))\n \n out_x, out_y, ti = test_ori(model, ori_image, test_image, w_ratio, h_ratio,draw_type= 'point',thresh=p.threshold_point)\n \n calib = targets[0]['calib'].numpy()\n\n \n coefs_list, boundaries_list, out_dict = vis_tools.get_spline_for_pinet(out_x[0],out_y[0], calib, targets[0])\n \n \n '''\n GET ESTIMATES BASED ON THRESHOLDING\n '''\n \n static_inter_dict = dict()\n static_inter_dict['src_boxes'] = out_dict['src_boxes']\n \n hausdorff_static_dist, hausdorff_static_idx, hausdorff_gt = vis_tools.hausdorff_match(out_dict, targets[0],pinet=True)\n try:\n confusion.update(out_dict, hausdorff_gt, hausdorff_static_idx, targets[0], static=True,pinet=True)\n \n except Exception as e:\n logging.error('EXCEPTION IN CONFUSION ')\n logging.error(str(e))\n continue\n\n# vis_tools.pinet_save_results_eval(seq_images.cpu().numpy(), [out_x, out_y, ti], coefs_list,boundaries_list,targets, config)\n\n \n return confusion\n\ndef load_checkpoint(path, model, load_orig_ckpt=False):\n \n ckpt = torch.load(path)\n \n \n if isinstance(model, nn.DataParallel):\n model = model.module\n \n \n \n model.load_state_dict(ckpt['model'],strict=True)\n # with torch.no_grad():\n # model.left_object_embed.weight.copy_(model.object_embed.weight)\n \n\n \n if 'iteration' not in ckpt.keys():\n to_return_iter = 0\n else:\n to_return_iter = ckpt['iteration']\n # to_return_iter = 0\n logging.error('LOADED MY')\n return ckpt['epoch'], ckpt['best_iou'],to_return_iter\n\n\n\n# Load the configuration for this experiment\ndef get_configuration(args):\n\n # Load config defaults\n config = get_default_configuration()\n\n\n\n return config\n\n\ndef create_experiment(config, resume=None):\n\n # Restore an existing experiment if a directory is specified\n if resume is not None:\n print(\"\\n==> Restoring experiment from directory:\\n\" + resume)\n logdir = resume\n \n else:\n # Otherwise, generate a run directory based on the current time\n # name = datetime.now().strftime('{}_%y-%m-%d--%H-%M-%S').format('run')\n name = 'pinet'\n logdir = os.path.join(os.path.expandvars(config.logdir), name)\n print(\"\\n==> Creating new experiment in directory:\\n\" + logdir)\n os.makedirs(logdir,exist_ok=True)\n os.makedirs(os.path.join(config.logdir,'val_images'),exist_ok=True)\n os.makedirs(os.path.join(config.logdir,'train_images'),exist_ok=True)\n \n # Display the config options on-screen\n print(config.dump())\n \n # Save the current config\n with open(os.path.join(logdir, 'config.yml'), 'w') as f:\n f.write(config.dump())\n \n return logdir\n\n\n\n \ndef freeze_backbone_layers(model):\n logging.error('MODEL FREEZE')\n for n, p in model.named_parameters():\n# logging.error('STR ' + str(n))\n if \"backbone\" in n and p.requires_grad:\n \n# if (('block14' in n) |('block15' in n) |('block16' in n) |('block17' in n) |('block18' in n) \n# |('block19' in n) | ('block20' in n) | ('block21' in n) | ('spp' in n)):\n if ( ('block18' in n) |('block19' in n) | ('block20' in n) | ('block21' in n) | ('spp' in n)):\n p.requires_grad_(True)\n else:\n p.requires_grad_(False)\n # logging.error(str(n) + ', '+str(p.requires_grad))\n \n# logging.error(str(n) + ', '+str(p.requires_grad))\n\n\nobject_refinement = True\n\napply_poly_loss = True\n\nsplit_pe = True\n\napply_bev_pe = True\nabs_bev = True\n\nonly_bev_pe=False\n\n\nnum_object_classes = 8\n\nbase_dir = '/scratch_net/catweazle/cany/lanefinder'\n\n\ndef main():\n\n large_parameters = dict()\n large_parameters['hidden_dim'] = 256\n large_parameters['dim_feedforward'] = 512\n \n large_parameters['class_embed_dim']=256\n large_parameters['class_embed_num']=3\n \n large_parameters['box_embed_dim']=256\n large_parameters['box_embed_num']=3\n large_parameters['endpoint_embed_dim']=256\n large_parameters['endpoint_embed_num']=3\n large_parameters['assoc_embed_dim']=256\n large_parameters['assoc_embed_last_dim']=128\n large_parameters['assoc_embed_num']=3\n large_parameters['assoc_classifier_dim']=256\n large_parameters['assoc_classifier_num']=3\n \n \n num_queries = 100\n num_enc_layers = 4\n num_dec_layers = 4\n \n# model_name = 'maxi_combined_objects_3'\n model_name = 'pinet'\n \n parser = ArgumentParser()\n\n parser.add_argument('--resume', default=None, \n help='path to an experiment to resume')\n\n parser.add_argument('--apply_poly_loss', type=bool, default=apply_poly_loss,\n help='whether it is on dgx')\n \n parser.add_argument('--objects', type=bool, default=True,\n help='whether estimate objects')\n \n parser.add_argument('--num_object_queries', default=100, type=int,\n help=\"Number of query slots\")\n \n \n \n parser.add_argument('--num_object_classes', default=8, type=int,\n help=\"Num object classes\")\n \n parser.add_argument('--num_spline_points', default=3, type=int,\n help=\"Num object classes\")\n \n \n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', default=True,\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=num_enc_layers, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=num_dec_layers, type=int,\n help=\"Number of decoding layers in the transformer\")\n \n \n parser.add_argument('--dim_feedforward', default=large_parameters['dim_feedforward'], type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n \n \n parser.add_argument('--hidden_dim', default=large_parameters['hidden_dim'], type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n \n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n \n parser.add_argument('--nheads', default=4, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=num_queries, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--pre_norm', action='store_true')\n\n # * Segmentation\n parser.add_argument('--masks',default=False,\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_obj_cost_class', default=2, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_obj_cost_center', default=3, type=float,\n help=\"Class coefficient in the matching cost\")\n\n parser.add_argument('--set_obj_cost_len', default=0.5, type=float,\n help=\"Class coefficient in the matching cost\")\n\n parser.add_argument('--set_obj_cost_orient', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_obj_cost_image_center', default=0, type=float,\n help=\"Class coefficient in the matching cost\")\n\n parser.add_argument('--set_cost_class', default=2, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=1, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_end', default=1, type=float,\n help=\"L1 endpoint coefficient in the matching cost\")\n \n parser.add_argument('--set_cost_giou', default=1, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n\n \n parser.add_argument('--object_detection_loss_coef', default=4, type=float)\n parser.add_argument('--object_center_loss_coef', default=3, type=float)\n parser.add_argument('--object_len_loss_coef', default=0.5, type=float)\n parser.add_argument('--object_orient_loss_coef', default=0.5, type=float)\n \n parser.add_argument('--polyline_loss_coef', default=2, type=float)\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--assoc_loss_coef', default=1, type=float)\n parser.add_argument('--detection_loss_coef', default=3, type=float)\n parser.add_argument('--endpoints_loss_coef', default=2, type=float)\n parser.add_argument('--bbox_loss_coef', default=2, type=float)\n parser.add_argument('--focal_loss_coef', default=0.1, type=float)\n \n parser.add_argument('--loss_end_match_coef', default=1, type=float)\n \n \n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--visible_loss_coef', default=1, type=float)\n \n parser.add_argument('--eos_coef', default=0.2, type=float,\n help=\"Relative classification weight of the no-object class\")\n \n parser.add_argument('--object_eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n \n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval',default=False, action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n \n args = parser.parse_args()\n \n \n print('GOT ARGS ')\n logging.error(str(args))\n \n # Load configuration\n config = get_configuration(args)\n \n # Create a directory for the experiment\n logdir = create_experiment(config, args.resume)\n \n config.save_logdir = logdir\n config.n_control_points = args.num_spline_points\n config.freeze()\n \n device = torch.device(args.device)\n # Setup experiment\n model = agent.Agent()\n # lane_agent.load_weights(804, \"tensor(0.5786)\")\n model.load_weights(32, \"tensor(1.1001)\")\n\n model.to(device)\n \n if config.train_dataset == 'nuscenes':\n \n train_loader,train_dataset, val_loader, val_dataset = data_factory.build_nuscenes_dataloader(config,args, val=True, pinet=True)\n \n else:\n train_loader,train_dataset, val_loader, val_dataset = data_factory.build_argoverse_dataloader(config,args, val=True, pinet=True)\n\n \n logging.error('LOADED MY CHECKPOINT')\n\n val_confusion = BinaryConfusionMatrix(1,args.num_object_classes)\n val_con = evaluate(val_loader, model, val_confusion,config, args)\n \n static_res_dict, object_res_dict = val_con.get_res_dict\n file1 = open(os.path.join(logdir,'val_res.txt'),\"a\")\n \n for k in static_res_dict.keys():\n logging.error(str(k) + ' : ' + str(static_res_dict[k]))\n file1.write(str(k) + ' : ' + str(static_res_dict[k]) + ' \\n')\n \n for k in object_res_dict.keys():\n logging.error(str(k) + ' : ' + str(object_res_dict[k]))\n file1.write(str(k) + ' : ' + str(object_res_dict[k]) + ' \\n')\n \n file1.close() \n \n\nif __name__ == '__main__':\n main()\n\n \n\n","repo_name":"ybarancan/STSU","sub_path":"validator_pinet.py","file_name":"validator_pinet.py","file_ext":"py","file_size_in_byte":19617,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"91"} +{"seq_id":"73293181742","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\nsys.path.append(\"../../../Asset Wealth\")\n\nimport os\nimport functools\n\nimport ee\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\n\nimport time\nfrom csv import DictReader\nimport json\n\nfrom src.data_utils import truncate\nfrom src.config import csv_path\nfrom src.config import gdrive_dir_viirs\nfrom src.config import download_path_viirs\nfrom src.config import country_code_map\n\n\nee.Authenticate()\nee.Initialize()\n\n\n\n# Function to get a square around point of interest\n# Rural : 10 km Radius\n# Urban : 2 km Radius\ndef bounding_box(loc, urban_rural, urban_radius, rural_radius):\n '''Function to get a square around point of interest.\n Rural : 10 km Radius\n Urban : 2 km Radius\n \n Args:\n loc(ee.Geometry.Point): Geolocation of cluster (from DHS survey)\n urban_rural(int): Binary encoding for type of region: 0 = urban, 1 = rural\n urban_radius(int): Radius around coordinates for Urban regions in meter\n rural_radius(int): Radius around coordinates for Rural regions in meter\n \n Returns:\n intermediate_box (ee.Geometry): bounding box around cluster coordinates\n with a size of 10x10km for rural/ 2x2km for Urban\n '''\n if urban_rural == 0 or urban_rural == '0':\n size = urban_radius\n else:\n size = rural_radius\n\n intermediate_buffer = loc.buffer(size) #buffer radius, half your box width in m\n intermediate_box = intermediate_buffer.bounds() #Draw a bounding box around the circle\n return(intermediate_box)\n\n\ndef get_image(cluster, survey_name, urban_radius, rural_radius):\n '''Extract Information about cluster to get Sentinel-2 image for corresponding year and coordinates.\n \n Args:\n cluster(DictReader object): Information about the Cluster (cluster number, coordinates, survey name, etc.)\n survey_name(str): Name of the survey (COUNTRY_YEAR)\n urban_radius(int): Radius around coordinates for Urban regions in meter\n rural_radius(int): Radius around coordinates for Rural regions in meter\n country_code(str): ISO code for survey country (COUNTRY)\n MAX_CLOUD_PROBABILITY(int): %\n\n Returns:\n Requests Image from Earth Engine. Files are named by the following pattern:\n Latitude_Longitude_begin-end_COUNTRY_r/u_sidelength\n coordinates: 4 Nachkommastellen\n date format: YYYYMMDD\n country: Official 3 letters acronym (ISO)\n Rural/Urban: u or r\n side length: Sidelength (size) of tile in km with one decimal place.\n '''\n # Get images collections\n viirs_img = ee.ImageCollection(\"NOAA/VIIRS/DNB/MONTHLY_V1/VCMSLCFG\")\n\n # Get time span\n year = cluster[\"SURVEY_YEAR\"]\n if int(year) < 2016:\n START_DATE = ee.Date('2015-06-01')\n END_DATE = ee.Date('2016-07-01')\n date_range = '20150601-20160701'\n else:\n START_DATE = ee.Date(str(int(year)) + '-01-01')\n END_DATE = ee.Date(str(int(year)) + '-12-31')\n date_range = str(year) + '0101-' + str(year) + '1231'\n\n # Point of interest (longitude, latidude)\n lat_float = float(cluster[\"LATNUM\"])\n lon_float = float(cluster[\"LONGNUM\"])\n loc = ee.Geometry.Point([lon_float, lat_float])\n # Region of interest\n region = bounding_box(loc, cluster['URBAN_RURA'], urban_radius, rural_radius)\n\n viirs_img = viirs_img.filterBounds(region).filterDate(START_DATE, END_DATE)\n\n viirs_img = viirs_img.select('avg_rad').median().clip(region)\n\n if ur == 'u':\n filename = str(truncate(lat_float, 4)) + '_' + \\\n str(truncate(lon_float, 4)) + '_' + \\\n str(date_range) + '_' + \\\n str(country_code) + '_' + \\\n ur + '_' + \\\n str(float(urban_radius / 1000))\n else:\n filename = str(truncate(lat_float, 4)) + '_' + \\\n str(truncate(lon_float, 4)) + '_' + \\\n str(date_range) + '_' + \\\n str(country_code) + '_' + \\\n ur + '_' + \\\n str(float(rural_radius / 1000))\n print(filename)\n task = ee.batch.Export.image.toDrive(**{\n 'image': s2CloudMasked,\n 'description': filename,\n 'folder': 'viirs',\n 'scale': 10})\n print('Create', filename)\n task.start()\n while task.active(): # request status of task\n print('Polling for task (id: {}).'.format(task.id))\n time.sleep(5) # set sleep timer for 5 sec if task is still active\n return loc\n\n\ndef get_survey_images(file_dir, survey_name, urban_radius, rural_radius):\n '''Get VIIRS Image for each Cluster and download from GoogleDrive.\n \n Args:\n file_dir(str): Path to DHS survey csv file\n survey_name(str): Name of the survey (COUNTRY_YEAR)\n urban_radius(int): Radius around coordinates for Urban regions in meter\n rural_radius(int): Radius around coordinates for Rural regions in meter\n '''\n with open(file_dir, 'r') as read_obj:\n # pass the file object to DictReader() to get the DictReader object\n dict_reader = DictReader(read_obj)\n # get a list of dictionaries from dct_reader\n if clusters[0]['COUNTRY'].replace('_', ' ') == 'Democratic Republic of Congo':\n country_code = 'COD'\n elif clusters[0]['COUNTRY'].replace('_', ' ') == 'Cote d\\'Ivoire':\n country_code = 'CIV'\n elif clusters[0]['COUNTRY'].replace('_', ' ') == 'Burkina Faso':\n country_code = 'BFA'\n elif clusters[0]['COUNTRY'].replace('_', ' ') == 'Sierra Leone':\n country_code = 'SLE'\n elif clusters[0]['COUNTRY'].replace('_', ' ') == 'Tanzania':\n country_code = 'TJK'\n\n else:\n country_code = country_code_map[clusters[0]['COUNTRY'].replace('_', ' ')]\n for cluster in clusters:\n loc = get_image(cluster, urban_radius, rural_radius, country_code, MAX_CLOUD_PROBABILITY)\n if float(cluster[\"DHSCLUST\"]) % 50 == 0:\n download_local(os.path.join(img_dir, survey_name))\n download_local(os.path.join(img_dir, survey_name))\n\n\ndef download_local(survey_dir):\n '''Download images from GoogleDrive folder.\n \n Args:\n survey_dir(str): Output directory for download\n '''\n # folder which want to download from Drive\n folder_id = gdrive_dir_viirs\n\n\n if survey_dir[-1] != '/':\n survey_dir = survey_dir + '/'\n\n file_list = drive.ListFile({'q': \"'{}' in parents and trashed=false\".format(folder_id)}).GetList()\n for i, file1 in enumerate(sorted(file_list, key=lambda x: x['title']), start=1):\n print('Downloading {} from GDrive ({}/{})'.format(file1['title'], i, len(file_list)))\n title = file1['title']\n if not os.path.exists(survey_dir + title):\n file1.GetContentFile(survey_dir + title)\n file1.Delete()\n else:\n count = 1\n while os.path.exists(survey_dir + title):\n title = title.split('.')[0] + '_' + str(count) + '.tif'\n file1.GetContentFile(survey_dir + title)\n file1.Delete()\n\n# Main functions for getting the viirs images; here: only the directory for each survey is created\ndef viirs_img_survey(img_dir, csv_dir, viirs_done, urban_radius, rural_radius):\n '''Iterate over survey csvs and get VIIRS images for each cluster.\n \n Args:\n img_dir(str): Path to directory where VIIRS images are stored\n csv_dir(str): Path to directory where DHS csv files are stored\n sentinel_done(str): Filepath for file to document for which surveys were are already completed\n urban_radius(int): Radius around coordinates for Urban rgions in meter\n rural_radius(int): Radius around coordinates for Rural regions in meter\n '''\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n if not os.path.isfile(viirs_done):\n open(viirs_done, 'a').close()\n\n csv_directory = os.listdir(csv_dir)\n img_directory = os.listdir(img_dir)\n\n for file in csv_directory:\n if file.endswith('.csv'):\n filename = file[:file.rfind('.')]\n print(filename)\n # Check if survey is already done we skip this survey (viirs_done file has to be edited manually)\n with open(viirs_done) as f:\n if not filename in f.read():\n survey_name = file[:file.rfind('.')]\n survey_dir = os.path.join(img_dir, survey_name)\n if not os.path.exists(survey_dir):\n os.makedirs(survey_dir)\n file_dir = os.path.join(csv_dir, file)\n get_survey_images(file_dir, survey_name, urban_radius, rural_radius)\n download_local(survey_dir)\n # Add survey to txt file which stores all surveys which are done to avoid downloading them again if you reload the program\n file1 = open(viirs_done, \"a\") # append mode\n file1.write(file + \"\\n\")\n file1.close()\n print(file, 'finished')\n\n\n# Main Part\n\n# Parameter\nurban_radius = 2000 # meter\nrural_radius = 10000 # meter\n\n# Paths\n\n# Path to Label Data\ncsv_dir = csv_path\n# Directory where the viirs images are stored\nimg_dir = download_path_viirs\n# Path to txt files which contains all surveys for which all images are already retrieved\nviirs_done = \"./VIIRS_done.txt\"\n\n# Functions\nee.Initialize()\ngauth = GoogleAuth()\ngauth.LoadCredentialsFile(\"mycreds.txt\")\ndrive = GoogleDrive(gauth)\nviirs_img_survey(img_dir, csv_dir, viirs_done, urban_radius, rural_radius)\n\n\n","repo_name":"gheisenberg/FoodSecurity","sub_path":"Asset Wealth/src/ee_viirs.py","file_name":"ee_viirs.py","file_ext":"py","file_size_in_byte":9900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"71751558703","text":"import time\nimport sampling\nimport logging\n\ndef decodeseq(model, seq):\n try:\n s = model.decode_string(seq).decode('utf8', errors='backslashreplace')\n except Exception as e:\n logging.exception(\"Failed to decode sequence\")\n s = '??? %s\\n ' % (str(seq))\n return repr(s)\n\ndef req2str(model, reqs):\n out = ''\n for req in reqs:\n if req.key is None:\n continue\n out += '---- %s\\n' % req.key\n for (k,v) in req.__dict__.items():\n if k == 'forced_input':\n out += ' forced_input : %s\\n' % decodeseq(model, v)\n elif k not in ['initial_state', 'samples', 'key', 'on_finish', 'chains']:\n out += ' %s : %s\\n' % (k,v)\n for (i,s) in enumerate(req.samples):\n out += ' ---- sample %d\\n' % i\n for (k,v) in s.__dict__.items():\n if k in ['model_output_scores', 'states', 'model_output_probs', 'probs', 'model_next_states']:\n out += ' %s : [%d values]\\n' % (k, len(v))\n elif k in ['sampled_sequence', 'input_tokens']:\n out += ' %s : %s\\n' % (k,decodeseq(model, v))\n elif k not in ['model_input_token', 'model_input_state']:\n out += ' %s : %s\\n' % (k,v)\n return out\nclass StatsRequestModule():\n def __init__(self, sampler):\n self.sampler = sampler\n def forward(self, request):\n request.start_time = time.time()\n def backward(self, request):\n request.end_time = time.time()\n request.elapsed = request.end_time - request.start_time\n request.requestinfo = req2str(self.sampler.sampler.model, self.sampler.requests)\n\nclass StatsRequest(sampling.SamplerRequest):\n def __init__(self, sampler):\n self.chains = sampling.SamplerChains([StatsRequestModule(sampler)], [], [])\n self.key = None\n self.samples = []\n","repo_name":"antihutka/pytorch-rnn","sub_path":"statsrequest.py","file_name":"statsrequest.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"91"} +{"seq_id":"72845622063","text":"import numpy as np\nimport cv2\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\nclass ProcessImageDF(object):\n \"\"\"Process image dataframe\"\"\"\n def __init__(self, df, max_recognizable_digits=5):\n self.df=df\n self.columns = ['N', 'bb', 'image', 'y']\n self.max_recognizable_digits=max_recognizable_digits\n for column in self.columns: assert column in self.df.columns.values\n\n def get_bbox_corners(self, i_image):\n \"\"\"finds bbox top left and bottom right coords from image indexed at i_image in dataframe\n :returns list(bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax)\"\"\"\n bboxes = self.df.iloc[i_image]['bb']\n image = self.df.iloc[i_image]['image']\n N = self.df.iloc[i_image]['N']\n bbox_xmin, bbox_xmax = image.shape[1], 0\n bbox_ymin, bbox_ymax = image.shape[0], 0\n for j in range(N):\n if j < self.max_recognizable_digits:\n x_left, x_right = bboxes[j, 0], bboxes[j, 0] + bboxes[j, 2]\n y_top, y_bot = bboxes[j, 1], bboxes[j, 1] + bboxes[j, 3]\n bbox_xmin = min(bbox_xmin, x_left)\n bbox_xmax = max(bbox_xmax, x_right)\n # TODO: find bbox_ymin, bbox_ymax\n bbox_ymin = min(bbox_ymin, y_top)\n bbox_ymax = max(bbox_ymax, y_bot)\n\n # - extract a square (or close to square) cropped image from original image\n bbox_xmin, bbox_xmax = int(round(min(bbox_xmin, bbox_xmax))), int(round(max(bbox_xmin, bbox_xmax)))\n bbox_ymin, bbox_ymax = int(round(min(bbox_ymin, bbox_ymax))), int(round(max(bbox_ymin, bbox_ymax)))\n return([bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax])\n\n def get_bboxCoords_afterRotation(self, M, bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax):\n \"\"\"M: rotation matrix\"\"\"\n bbox_org = np.array([[bbox_xmin, bbox_xmin, bbox_xmax, bbox_xmax],\n [bbox_ymin, bbox_ymax, bbox_ymin, bbox_ymax],\n [1., 1., 1., 1.]]).astype(np.float32)\n\n # New coords under rotation\n bbox_rotated = (np.round(M.dot(bbox_org))).astype(np.int)\n bbox_xmin, bbox_ymin = np.min(bbox_rotated, axis=1)\n bbox_xmax, bbox_ymax = np.max(bbox_rotated, axis=1)\n return ([bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax])\n\n def getDF_with_resized_images_aroundBB(self, size=32, max_recognizable_digits=5, max_randomExpansion=(0.4,0.6), augment_With_randomExpansion=False, rotationRange=0):\n \"\"\"find bbox around sequence as bbox_xmin = min left coord bbox_xmax = max of all x+w.\n New bbox_w = bbox_xmax - bbox_xmin\n x_center = bbox_xmin + bbox_w/2\n then extend bbox w between 100-125% of original width (check edge cases)\n resize to 32x32\n augment_With_randomExpansion: Add randomly expanded boxes\"\"\"\n x_lefts, x_rights = [], []\n\n # - construct new dataframe\n columns_new = ['image', 'N', 'y', 'box_size', 'aspect_WbyH']\n df_img_resized = pd.DataFrame(columns=columns_new)\n\n w_streches = np.arange(max_randomExpansion[0], max_randomExpansion[1]+0.05, 0.05) + 1.0\n for i in range(self.df.shape[0]): # for each dataset\n N = self.df.iloc[i]['N']\n image = self.df.iloc[i]['image']\n y = self.df.iloc[i]['y']\n bbox_xLL, bbox_xUL = 0, image.shape[1]\n bbox_yLL, bbox_yUL = 0, image.shape[0]\n\n bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax = self.get_bbox_corners(i)\n\n if rotationRange>0:\n h, w = bbox_ymax - bbox_ymin, bbox_xmax - bbox_xmin\n x_rotation_center, y_rotation_center = int(round(bbox_xmin + w / 2.)), int(round(bbox_ymin + h / 2.)) # rotate about digits center\n rotationAngle = np.random.choice(np.arange(-rotationRange, rotationRange)) # random rotation\n cols, rows = bbox_xUL - bbox_xLL, bbox_yUL - bbox_yLL\n M = cv2.getRotationMatrix2D((x_rotation_center, y_rotation_center), rotationAngle, 1)\n image = cv2.warpAffine(image, M, (cols, rows), borderMode=cv2.BORDER_REPLICATE)\n bbox_xmin, bbox_xmax, bbox_ymin, bbox_ymax = self.get_bboxCoords_afterRotation(M,\n bbox_xmin, bbox_xmax,\n bbox_ymin, bbox_ymax)\n\n h, w = bbox_ymax-bbox_ymin, bbox_xmax-bbox_xmin\n x_center, y_center = bbox_xmin + w/2., bbox_ymin + h/2.\n #region - Find a random cropped image 1x-1.45x of found bbox\n box_size = np.int(np.round(1.0 * max(h, w))) # find bounding box for cropping and randomly expand it between 0-25%\n x_tl, y_tl = int(round(max(bbox_xLL, x_center-box_size/2.))), int(round(max(bbox_yLL, y_center-box_size/2.)))\n x_br, y_br = int(round(min(bbox_xUL, x_center+box_size/2.))), int(round(min(bbox_yUL, y_center+box_size/2.)))\n aspect_WbyH = (x_br - x_tl)*1. / (y_br - y_tl) # w by h\n image_resized = cv2.resize(image[y_tl:y_tl+box_size, x_tl:x_tl+box_size], (size, size))\n\n df2 = pd.DataFrame(dict(zip(columns_new, [[item] for item in [image_resized, N, y, box_size, aspect_WbyH]])))\n df_img_resized = df_img_resized.append(df2)\n #endregion\n # region - Augment with Random box expansion\n if augment_With_randomExpansion:\n box_size = np.int(np.round(np.random.choice(w_streches) * max(h, w))) # find bounding box for cropping and randomly expand it between 0-25%\n x_tl, y_tl = int(round(max(bbox_xLL, x_center - box_size / 2.))), int(round(max(bbox_yLL, y_center - box_size / 2.)))\n x_br, y_br = int(round(min(bbox_xUL, x_center + box_size / 2.))), int(round(min(bbox_yUL, y_center + box_size / 2.)))\n aspect_WbyH = (x_br - x_tl) * 1. / (y_br - y_tl) # w by h\n image_resized = cv2.resize(image[y_tl:y_tl + box_size, x_tl:x_tl + box_size], (size, size))\n\n df2 = pd.DataFrame(dict(zip(columns_new, [[item] for item in [image_resized, N, y, box_size, aspect_WbyH]])))\n df_img_resized = df_img_resized.append(df2)\n # endregion\n\n return df_img_resized\n\ndef img_meanSubtraction_compressTo1p0(images):\n \"\"\" subtract mean from each image and compress values between 0-1.0\"\"\"\n images = images.astype('float32') / 255.\n return images - images.mean(axis=(-3,-2,-1), keepdims=1)\n\ndef rotate_image(image, rotationAngle):\n rows, cols, n_channels = image.shape\n M = cv2.getRotationMatrix2D((cols // 2, rows // 2), rotationAngle, 1)\n return(cv2.warpAffine(image, M, (cols, rows), borderMode=cv2.BORDER_REPLICATE))\n\ndef get_rotated_images(df_32x32, rotationRange):\n \"\"\"\n rotate all images in input dataframe df_32x32 while sampling rotation angle uniformly between +-rotationRange\n :param df_32x32: input data frame\n :param rotationRange: rotation range\n :return: dataframe\n \"\"\"\n columns = ['image', 'N', 'y', 'box_size', 'aspect_WbyH']\n assert len(columns) == len(df_32x32.columns) # checks\n for attr in columns: assert attr in df_32x32.columns\n df_new = pd.DataFrame(columns=columns)\n for i_image in range(df_32x32.shape[0]):\n image = df_32x32.iloc[i_image]['image']\n N = df_32x32.iloc[i_image]['N']\n y = df_32x32.iloc[i_image]['y']\n box_size = df_32x32.iloc[i_image]['box_size']\n aspect_WbyH = df_32x32.iloc[i_image]['aspect_WbyH']\n\n rotationAngle = np.random.choice(np.arange(-rotationRange, rotationRange))\n rows, cols, n_channels = image.shape\n M = cv2.getRotationMatrix2D((cols // 2, rows // 2), rotationAngle, 1)\n dst = cv2.warpAffine(image, M, (cols, rows), borderMode=cv2.BORDER_REPLICATE)\n\n df2 = pd.DataFrame(dict(zip(columns,\n [[item] for item in [dst, N, y, box_size, aspect_WbyH]])))\n df_new = df_new.append(df2)\n\n return(df_new)\n\n# - Compress image to min_dim=200 but no aspect ratio change\ndef compress_image(image, minDim_size=200):\n \"\"\"minDim_size: size of minimum dimension\"\"\"\n h, w, d = image.shape\n need_resize = False\n if h < w:\n if h > minDim_size:\n aspect_rat = w * 1. / h\n h, w = minDim_size, np.int(np.round(minDim_size*1. * aspect_rat))\n need_resize = True\n else:\n if w > minDim_size:\n aspect_rat = h * 1. / w\n h, w = np.int(np.round(minDim_size*1. * aspect_rat)), minDim_size\n need_resize = True\n if need_resize: image = cv2.resize(image, (w, h))\n return (image)\n\n\ndef sliding_window_crops(image, bbox, box_size, resizeTO=(32,32), strides=(4, 4), minDim_size=200, debug=False):\n \"\"\"stride: in x,y directions. minDim_size: size of minimum dimension\"\"\"\n x0,y0,w,h=bbox\n bx, by = box_size\n w,h=max(w,bx+1), max(h,by+1)\n assert h > by and w > bx\n if debug: plt.imsave('debug/crop__bbox_{0}_{1}_{2}_{3}__atSize_{4}_{5}.png'.format(x0,y0,w,h,bx,by), image[y0:y0+h, x0:x0+w][...,[2,1,0]]) # TODO: remove\n\n def sliding_window_crop_locations(h, w, box_size, strides):\n \"\"\"height, width, box_size, strides. Returns crop locations\"\"\"\n sx, sy = strides\n bx, by = box_size\n xx, yy = np.meshgrid((x0+np.arange(w - bx))[::sx], (y0+np.arange(h - by))[::sy])\n return (np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))) # [[x,y]] locations\n\n crop_locs = sliding_window_crop_locations(h, w, box_size, strides)\n crops = [cv2.resize(image[y:y + by, x:x + bx], resizeTO) for x, y in crop_locs]\n\n if debug:\n test_image=image.copy() # TODO: Remove\n for x, y in crop_locs:\n test_image = cv2.rectangle(test_image, (x,y),(x+bx,y+by),(0,0,255),1)\n plt.imsave('debug/slides__bbox_{0}_{1}_{2}_{3}__atSize_{4}_{5}.png'.format(x0,y0,w,h,bx,by), test_image[...,[2,1,0]])\n\n w,h=resizeTO\n return (np.vstack(crops).reshape(-1, h, w, 3), crop_locs)\n\ndef sliding_window_animation(image, crop_locs, box_size, hit_miss):\n \"\"\"\n crop_locs: [[x,y]] locations\n hit_miss: boolean array. If true, green bbox. Else blue bbox\"\"\"\n for i in range(crop_locs.shape[0]):\n img = image.copy()\n pt1 = crop_locs[i]\n pt1, pt2 = tuple(pt1), tuple(pt1+box_size)\n color = (0,255,0) if hit_miss[i] else (255,0,0)\n img = cv2.rectangle(img, pt1, pt2, color, 1)\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef display_hits(image, bboxes_tl, bbox_size):\n img=image.copy()\n w,h=bbox_size\n for x,y in bboxes_tl:\n img=cv2.rectangle(img, (x,y), (x+w, y+h),(0,255,0),1)\n return(img)\n\nif __name__==\"__main__\":\n test_image = cv2.imread('../data/test_images/house1.jpg', 1)\n\n box_size=[40,40]\n compressed_image = compress_image(test_image, minDim_size=200)\n crops, crop_locs = sliding_window_crops(compressed_image, box_size, minDim_size=200)\n\n hit_miss = np.zeros(crops.shape[0], dtype=np.bool)\n sliding_window_animation(compressed_image, crop_locs, box_size, hit_miss)\n\n\n\n\n\n\n","repo_name":"yogeshluthra/MultiDigit_detection_in_natural_scene","sub_path":"PreProcessing/image_preprocessings.py","file_name":"image_preprocessings.py","file_ext":"py","file_size_in_byte":11266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"33471554017","text":"import logging\nimport json\nfrom channels.sessions import channel_session\n# from replacer.models import TranslateRequest\nfrom replacer.main import rv_main\n\nlog = logging.getLogger(__name__)\n\n\n@channel_session\ndef ws_connect(message):\n prefix = message['path'].strip('/').split('/')[0]\n if prefix != 'replace':\n log.debug('invalid ws path=%s', message['path'])\n return\n\n\n@channel_session\ndef ws_receive(message):\n def reply_json(status, content):\n msg = {'type': status, 'content': content}\n message.reply_channel.send({'text': json.dumps(msg)})\n data = json.loads(message['text'])\n\n for status, content in rv_main(data['text']):\n reply_json(status, content)\n","repo_name":"shannywu/verb-replacer-2.0","sub_path":"verbreplacer/replacer/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2769427392","text":"import string\r\ndef encoding(message,key1):\r\n alphabet=\" \"+string.ascii_lowercase\r\n positions=dict()\r\n for i in range(27):\r\n positions[alphabet[i]]=i\r\n encoded_message=\"\"\r\n key=list(positions.keys())\r\n values=list(positions.values())\r\n index=0\r\n for i in message:\r\n index=positions[i]+key1\r\n if index>26:\r\n index=index-26-1\r\n encoded_message=encoded_message+key[index]\r\n return encoded_message\r\n \r\nmessage = \"hi this is my mini project\"\r\nkey=3\r\nprint(encoding(message,key))","repo_name":"VimalanKM/Cipher-Encoder","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15665217458","text":"from google.appengine.ext import webapp\nfrom google.appengine.ext import ndb\nfrom models import ChickenPlace\nimport datetime\nimport logging\n\n\nclass FlushPlaces(webapp.RequestHandler):\n def get(self):\n # Flush old JustEat chickenplaces\n ten_days_ago = datetime.date.today() - datetime.timedelta(days=10)\n cursor = None\n more = True\n keys = []\n while more:\n results, cursor, more = ChickenPlace.query(namespace=\"JustEat\").filter(ChickenPlace.created < ten_days_ago) \\\n .fetch_page(150, keys_only=True, start_cursor=cursor)\n keys.extend(results)\n\n logging.info(\"Deleting %s old ChickenPlaces\"%len(keys))\n ndb.delete_multi(keys)\n logging.info(\"Deleted!\")\n\napp = webapp.WSGIApplication([('/cron/flush_old', FlushPlaces)],\n debug=True)\n\n","repo_name":"orf/FindMeChicken-GAE","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4607810972","text":"import mysql.connector\nimport json\nfrom pathlib import Path \n\nclass DbHelper:\n __connessione = None\n __config = json.loads(Path(r\"config.json\").read_text())\n \n def __init__(self) -> None:\n try:\n # if(self.__connessione == None):\n self.__connessione = mysql.connector.connect(**self.__config)\n except Exception as e:\n print('errore durante la connessione')\n print(e)\n\n def exe_query(self, query):\n try:\n cursore = self.__connessione.cursor() \n cursore.execute(query)\n result = cursore.fetchall()\n return result\n except Exception as e:\n print(e)\n finally:\n # if (self.__connessione != None):\n self.__connessione.close()","repo_name":"Frowingg/Data-Layer","sub_path":"DbHelper.py","file_name":"DbHelper.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70108683182","text":"import json\nimport logging\nimport os\nfrom typing import Dict, Union, List\n\nfrom deepdiff import DeepDiff\n\nfrom ..exceptions import MachineSignatureNotFoundError\nfrom ..foundation.test.Test import Test\nfrom ..model.Lab import Lab\nfrom ..model.Machine import Machine\n\n\nclass BuiltInTest(Test):\n def __init__(self, lab: Lab) -> None:\n Test.__init__(self, lab)\n\n def create_signature(self) -> None:\n for (machine_name, machine) in self.lab.machines.items():\n logging.info(\"Building `builtin` signature for device %s...\" % machine_name)\n\n machine_status = self._get_machine_status(machine)\n\n # Write the signature into the proper file\n with open(\"%s/%s.builtin\" % (self.signature_path, machine_name), 'w') as machine_signature_file:\n machine_signature_file.write(json.dumps(machine_status, indent=4))\n\n def test(self) -> bool:\n test_passed = True\n\n for (machine_name, machine) in self.lab.machines.items():\n logging.info(\"Executing `builtin` tests for device %s...\" % machine_name)\n\n machine_state = self._get_machine_status(machine)\n\n # Read the signature from machine file\n machine_signature_path = \"%s/%s.builtin\" % (self.signature_path, machine.name)\n if os.path.exists(machine_signature_path):\n with open(machine_signature_path, 'r') as machine_signature_file:\n machine_signature = json.loads(machine_signature_file.read())\n else:\n raise MachineSignatureNotFoundError(machine_name)\n\n # Save machine state into result file\n machine_result_path = \"%s/%s.builtin\" % (self.results_path, machine.name)\n with open(machine_result_path, 'w') as machine_result_file:\n machine_result_file.write(json.dumps(machine_state, indent=4))\n\n diff = self.check_signature(machine_signature, machine_state)\n test_passed = False if diff else test_passed\n\n machine_diff_path = \"%s/%s.diff\" % (self.results_path, machine.name)\n with open(machine_diff_path, 'w') as machine_diff_file:\n machine_diff_file.write(\"Builtin Test Result:\\n\")\n machine_diff_file.write(json.dumps(diff, indent=4) + \"\\n\" if diff else \"OK\\n\")\n\n return test_passed\n\n def check_signature(self, signature: Union[Dict, str], status: Union[Dict, str]) -> Union[DeepDiff, List]:\n diff = DeepDiff(status, signature, ignore_order=True)\n\n if \"iterable_item_added\" in diff:\n diff[\"it_is\"] = diff.pop(\"iterable_item_added\")\n if \"iterable_item_removed\" in diff:\n diff[\"should_be\"] = diff.pop(\"iterable_item_removed\")\n\n return diff\n\n @staticmethod\n def _get_machine_status(machine: Machine) -> Dict:\n # Machine interfaces\n (ip_addr, _) = Test._get_machine_command_output(lab_hash=machine.lab.hash,\n machine_name=machine.name,\n command=\"ip -j addr show\"\n )\n if ip_addr:\n ip_addr = json.loads(ip_addr)\n\n # Get only relevant information (interface name, state and list of address/prefix)\n ip_addr_clean = {}\n for info in ip_addr:\n ip_addr_clean[info['ifname']] = {'ip_addresses': [x[\"local\"] + \"/\" + str(x[\"prefixlen\"])\n for x in info[\"addr_info\"]\n ],\n 'state': info['operstate']\n }\n else:\n ip_addr_clean = {}\n\n # Machine routes\n (ip_route, _) = Test._get_machine_command_output(lab_hash=machine.lab.hash,\n machine_name=machine.name,\n command=\"ip -j route show\"\n )\n\n ip_route = json.loads(ip_route) if ip_route else []\n\n # Machine opened ports\n (net_stat, _) = Test._get_machine_command_output(lab_hash=machine.lab.hash,\n machine_name=machine.name,\n command=\"netstat -tuwln\"\n )\n # Remove Docker ports and header lines. Sort the array alphabetically.\n net_stat = sorted(list(filter(lambda x: \"127.0.0.11\" not in x, net_stat.splitlines()))[2:]) if net_stat else []\n\n # Machine processes\n (processes, _) = Test._get_machine_command_output(lab_hash=machine.lab.hash,\n machine_name=machine.name,\n command=\"ps -e -o command\"\n )\n # Remove header line and sort the array alphabetically.\n processes = sorted([x.strip() for x in processes.splitlines()[1:]]) if processes else []\n\n return {\n \"interfaces\": ip_addr_clean,\n \"route\": ip_route,\n \"listening_ports\": net_stat,\n \"processes\": processes\n }\n","repo_name":"KatharaFramework/Kathara","sub_path":"src/Kathara/test/BuiltinTest.py","file_name":"BuiltinTest.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":335,"dataset":"github-code","pt":"91"} +{"seq_id":"3232413973","text":"from config import utils\n\nimport pandas as pd\n\nimport sqlite3\nfrom sqlite3 import Error\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--url\", \"-u\", help=\"Source url from file csv\")\nargs = parser.parse_args()\n\n\ndef request_api_save_file(url, destination_database):\n data = pd.read_csv(url)\n split = url.split(\"/\")\n name_table = split[-1:][0].replace(\".csv\", \"\")\n columns_create = \"\"\n columns_size = \"\"\n columns = \"\"\n\n for column in data.columns:\n columns_create += (column + \" TEXT, \")\n columns += (column + \",\")\n columns_size += (\"?,\")\n\n if name_table == \"companies\":\n data[\"HEADCOUNT\"] = data[\"HEADCOUNT\"].fillna(\n 0.0).astype(int).astype(str)\n data[\"MOST_RECENT_RAISE\"] = data[\"MOST_RECENT_RAISE\"].fillna(\n 0.0).astype(int).astype(str)\n data[\"MOST_RECENT_VALUATION\"] = data[\"MOST_RECENT_VALUATION\"].fillna(\n 0.0).astype(int).astype(str)\n data[\"KNOWN_TOTAL_FUNDING\"] = data[\"KNOWN_TOTAL_FUNDING\"].fillna(\n 0.0).astype(int).astype(str)\n data[\"COMPANY_LINKEDIN_NAMES\"] = data[\"COMPANY_LINKEDIN_NAMES\"].map(\n lambda x: x.strip().replace('[\\n', \"\").replace('\\n]', \"\").replace('\"', '').strip())\n data[\"INVESTORS\"] = data[\"INVESTORS\"].astype(str).map(lambda x: x.strip().replace(\n '[\\n', \"\").replace('\\n]', \"\").replace('\"', '').replace('\\n', '').strip())\n\n list_data = data.values.tolist()\n\n conn = None\n try:\n conn = sqlite3.connect(destination_database)\n cur = conn.cursor()\n cur.executescript(f\"\"\"\n BEGIN;\n DROP TABLE IF EXISTS {name_table};\n CREATE TABLE IF NOT EXISTS {name_table}({columns_create[:-2]});\n COMMIT;\n \"\"\")\n\n cur.executemany(f\"\"\"\n INSERT INTO {name_table} ({columns[:-1]})\n VALUES ({columns_size[:-1]})\n \"\"\", list_data)\n\n conn.commit()\n print(\"Insert data sucessful.\")\n\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\n return\n\n\nif __name__ == \"__main__\":\n url = args.url\n request_api_save_file(url, utils.db_raw_dir)\n\n# Example of run script:\n# python.exe ingest.py -u https://contrary-engineering-interview.s3.amazonaws.com/data/companies.csv\n# python.exe ingest.py -u https://contrary-engineering-interview.s3.amazonaws.com/data/people.csv\n","repo_name":"leonnardoo/Contrary-Engineering-Backend-Data-Engineering","sub_path":"ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"5433197290","text":"from github import Github\nimport json\nimport os\n\ndef main():\n #Read config file\n with open('config.json') as file:\n data = json.load(file)\n\n #Get Username, accessToken, repository path & code command\n username = data[\"username\"]\n accessToken = data[\"accessToken\"]\n repositoriesPath = data[\"repositoryFolder\"]\n codeCommand = data[\"codeCommand\"]\n\n github = Github(accessToken)\n\n repoName = input(\"Enter a repository name: \")\n description = input(\"Enter a description or leave blank: \")\n website = input(\"Enter a website or leave blank: \")\n\n private = False\n try:\n value = int(input(\"Is this a private repo? 0 => no | 1 => yes: \"))\n\n if value > 1:\n raise Exception(\"value > 1\")\n \n private = bool(value)\n except:\n #If input is not a number or bigger then 1 set to False\n private = False\n\n #Get GitHub user\n user = github.get_user()\n\n #Create new repository on GitHub\n newRepo = user.create_repo(repoName, description, website, private)\n\n print(\"New repo with name\" + newRepo.name + \" created...\\nInitializing repo with README.md\")\n\n #Create Repository folder for the new Repository\n os.system(\"mkdir \" + repositoriesPath + newRepo.name);\n os.chdir(repositoriesPath + newRepo.name)\n #Create README.md and initialize a new Git repository\n #And push it to the new GitHub repository\n os.system(\"echo # \" + newRepo.name +\" >> README.md\");\n os.system(\"git init\")\n os.system(\"git add *\")\n os.system(\"git commit -m \\\"Init Commit\\\"\")\n os.system(\"git branch -M master\")\n os.system(\"git remote add origin https://github.com/\" + username + \"/\" + newRepo.name + \".git\")\n os.system(\"git push -u origin master\")\n\n #Start VsCode or VsCode Insiders\n os.system(codeCommand)\n\nmain()","repo_name":"Founntain/RepositoryGenerator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13634993761","text":"# -------------------------------------------------------------------\n# Uncertainty estimation by the Hamiltonian Monte Carlo method.\n#\n# Author: Liang Ding\n# Email: myliang.ding@mail.utoronto.ca\n# -------------------------------------------------------------------\n\nfrom MTTools.DMomentTensors import DMT_enz\nfrom pyCAPLunar.DCAPUtils import mag2moment\nfrom pyCAPLunar.DPrepare import _prepare_N_staiton_sgt, _prepare_N_staiton_data\n\n\nfrom pyCAPLunar.DCAP import DCAP\nimport numpy as np\nimport pickle\n\n\nclass DHMC_DC(DCAP):\n '''DC component solver that is Based on the Hamiltonian Monte Carlo method. '''\n\n def __init__(self, sgts_list, data_list,\n loc_src, loc_stations,\n n_tp_sgt, n_ts_sgt,\n n_tp_data, n_ts_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase=5, n_component=3,\n w_pnl=1.0, w_srf=1.0,\n misfit_threshold=5.0,\n reject_rate=1.0,\n taper_scale=-0.4, taper_rate_srf=0.0,\n cc_threshold=0.2,\n amplitude_ratio_threshold=2.0,\n job_id=None):\n\n super().__init__(sgts_list, data_list,\n loc_src, loc_stations,\n n_tp_sgt, n_ts_sgt,\n n_tp_data, n_ts_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase, n_component,\n w_pnl, w_srf,\n misfit_threshold,\n reject_rate,\n taper_scale, taper_rate_srf,\n cc_threshold,\n amplitude_ratio_threshold)\n\n\n # constant encoding the observational uncertainties ???\n self.sigma_d = 0.1\n self.sigma_q = np.inf\n\n # Double-couple component.\n # strike, dip, rake, magnitude.\n self.Nq = 4\n # delta to calculate derivatives.\n self.delta_q0 = 0.1 * np.ones(self.Nq)\n self.delta_q0[3] = 0.01 # magnitude\n\n self.b_initial_q0 = False\n self.MAX_SAMPLE = 100000\n\n # save samples\n self.b_save_samples = True\n self.saving_dir = None\n # save cache?\n self.b_save_cache = True\n self.n_step_cache = 10\n self.job_id = job_id\n\n\n\n def create_p(self):\n '''Using probability density function to create P. '''\n return np.random.normal(loc=0, scale=1.0, size=self.Nq)\n\n\n def Uq(self, q):\n '''Compute Uq. '''\n mt0 = mag2moment(q[3]) * DMT_enz(np.deg2rad(q[0]), np.deg2rad(q[1]),\n np.deg2rad(q[2]), np.deg2rad(90),\n np.deg2rad(0))\n\n misfit_matrix, _, _ = self.cut_and_paste(mt0)\n Uq = np.average(misfit_matrix[np.where(misfit_matrix != self.MAX_MISFIT)]) / (2.0 * np.power(self.sigma_d, 2))\n return Uq\n\n\n def Kp(self, p):\n '''Compute Kp, as M is identical matrix. '''\n return np.dot(p, p)/2.0\n\n def dUdq(self, q):\n '''Compute du_dqi'''\n self.dU_dqi=np.zeros(self.Nq)\n Uq = self.Uq(q)\n\n # dU/dqi\n for i in range(self.Nq):\n _q = q.copy()\n _q[i] += self.delta_q0[i]\n self.dU_dqi[i] = (self.Uq(_q) - Uq)/(self.delta_q0[i])\n\n # gradient for magnitude\n self.dU_dqi[3] = self.dU_dqi[3]/1000\n\n\n def set_q(self, q):\n '''\n Set initial solution.\n q0 = [strike, dip, rake, magnitude]\n '''\n\n if len(q) < self.Nq:\n print(\"Bad q0 with {} parameters, while {} are required. \".format(len(q), self.Nq))\n return\n\n self.q0 = np.zeros(self.Nq) # 4 = cont([strike, dip, rake, mag])\n self.q0[0] = q[0]\n self.q0[1] = q[1]\n self.q0[2] = q[2]\n self.q0[3] = q[3]\n\n if not self.b_initial_q0:\n self.raw_q0 = self.q0.copy()\n\n self.b_initial_q0 = True\n self.dUdq(self.q0)\n\n\n def hmc(self, epsilon, n_step):\n if not self.b_initial_q0:\n print(\"Un-initial Q0!\")\n return\n\n q = self.q0.copy()\n current_q = self.q0.copy()\n\n # initial p(t)\n p = self.create_p()\n current_p = p.copy()\n\n for i in range(n_step):\n # p(t+epsilon/2)\n p = p - (epsilon / 2.0) * self.dU_dqi\n # q(t+epsilon)\n q = q + epsilon * p\n self.set_q(q)\n p = p - (epsilon/2.0) * self.dU_dqi\n current_Uq = self.Uq(current_q)\n current_Kp = self.Kp(current_p)\n proposed_Uq = self.Uq(q)\n proposed_Kp = self.Kp(p)\n\n if 1.0 < np.exp(current_Uq-proposed_Uq+current_Kp-proposed_Kp):\n print(\"! Accept q={}\".format(np.round(q, 2)))\n self.set_q(q)\n return q, True\n else:\n print(\"Reject, q={}\".format(np.round(q, 2)))\n self.set_q(self.raw_q0.copy())\n return current_q, False\n\n\n def sampling(self, epsilon, n_step, n_sample):\n '''Sampling'''\n if not self.b_initial_q0:\n print(\"Un-initial Q0!\")\n return\n\n if self.b_save_samples:\n # todo: checking directory\n if self.saving_dir is None:\n print(\"No saving directory available!\")\n return\n\n samples = []\n _tmp_n_sample = 0\n while(len(samples) < n_sample and _tmp_n_sample < self.MAX_SAMPLE):\n _tmp_n_sample += 1\n\n # save the cache of sampling results.\n if self.b_save_cache:\n if len(samples)!=0 and np.mod(len(samples), self.n_step_cache) == 0:\n self.save_to_file(name_str=\"cache_samples_N{}\".format(len(samples)), samples=samples)\n print(\"Cache including {} samples saved!\".format(len(samples)))\n try:\n q, accept = self.hmc(epsilon, n_step)\n if accept:\n samples.append(q)\n except:\n # reset\n self.set_q(self.raw_q0.copy())\n\n # write samples to file.\n if self.b_save_samples:\n self.save_to_file(name_str=\"Samples_N{}_\".format(len(samples)), samples=samples)\n return samples\n\n\n def set_saving_dir(self, saving_dir):\n self.saving_dir = saving_dir\n\n def save_to_file(self, name_str, samples, format='.pkl'):\n '''Write out the sample to pkl file. '''\n if self.job_id is None:\n file_path = str(self.saving_dir) + str(name_str) + str(format)\n else:\n file_path = str(self.saving_dir) + str(self.job_id) + str('_') + str(name_str) + str(format)\n\n with open(file_path, 'wb') as f:\n pickle.dump(samples, f)\n\n\n\nclass DHMC_MT(DHMC_DC):\n '''Solver using Hamiltonian Monte Carlo. '''\n\n def __init__(self, sgts_list, data_list,\n loc_src, loc_stations,\n n_tp_sgt, n_ts_sgt,\n n_tp_data, n_ts_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase=5, n_component=3,\n w_pnl=1.0, w_srf=1.0,\n misfit_threshold=5.0,\n reject_rate=1.0,\n taper_scale=-0.4, taper_rate_srf=0.0,\n cc_threshold=0.2,\n amplitude_ratio_threshold=2.0,\n job_id=None):\n\n super().__init__(sgts_list, data_list,\n loc_src, loc_stations,\n n_tp_sgt, n_ts_sgt,\n n_tp_data, n_ts_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase, n_component,\n w_pnl, w_srf,\n misfit_threshold,\n reject_rate,\n taper_scale, taper_rate_srf,\n cc_threshold,\n amplitude_ratio_threshold,\n job_id)\n\n self.Nq = 6\n # delta to calculate derivatives.\n self.delta_q0 = 0.1 * np.ones(self.Nq)\n self.delta_q0[3] = 0.01 # magnitude\n\n\n def Uq(self, q):\n '''Compute Uq. '''\n mt0 = mag2moment(q[3]) * DMT_enz(np.deg2rad(q[0]), np.deg2rad(q[1]),\n np.deg2rad(q[2]), np.deg2rad(q[4]),\n np.deg2rad(q[5]))\n misfit_matrix, _, _ = self.cut_and_paste(mt0)\n Uq = np.average(misfit_matrix[np.where(misfit_matrix != self.MAX_MISFIT)]) / (2.0 * np.power(self.sigma_d, 2))\n return Uq\n\n\n def dUdq(self, q):\n '''Compute du_dqi'''\n self.dU_dqi=np.zeros(self.Nq)\n Uq = self.Uq(q)\n\n # dU/dqi\n for i in range(self.Nq):\n _q = q.copy()\n _q[i] += self.delta_q0[i]\n self.dU_dqi[i] = (self.Uq(_q) - Uq)/(self.delta_q0[i])\n\n # gradient for magnitude\n self.dU_dqi[3] = self.dU_dqi[3]/1000\n\n\n def set_q(self, q):\n ''' Set q=[strike, dip, rake, mag, lune_lat, lune_long]. '''\n self.q0 = q\n\n if not self.b_initial_q0:\n self.raw_q0 = self.q0.copy()\n\n self.b_initial_q0 = True\n self.dUdq(self.q0)\n\n\n\n# moment tensor and location.\nclass DHMC_PRO(DHMC_MT):\n '''Solving the moment tensor and source location by the Hamiltonian Monte Carlo method. '''\n\n def __init__(self, sgtMgr, station_names,\n sampling_rate,\n p_freqmin, p_freqmax,\n s_freqmin, s_freqmax,\n n_p_length, n_p_offset,\n n_s_length, n_s_offset,\n vp, vs,\n data_n_stations, loc_src, loc_stations,\n n_tp_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase=5, n_component=3,\n w_pnl=1.0, w_srf=1.0,\n misfit_threshold=5.0,\n reject_rate=1.0,\n taper_scale=-0.4, taper_rate_srf=0.0,\n cc_threshold=0.2,\n amplitude_ratio_threshold=2.0,\n job_id=None):\n\n self.sgtMgr = sgtMgr\n self.station_names = station_names\n self.loc_stations = loc_stations\n self.sampling_rate = sampling_rate\n self.p_freqmin = p_freqmin\n self.p_freqmax = p_freqmax\n self.s_freqmin = s_freqmin\n self.s_freqmax = s_freqmax\n self. n_p_length = n_p_length\n self.n_p_offset = n_p_offset\n self.n_tp_data = n_tp_data\n self.n_s_length = n_s_length\n self.n_s_offset = n_s_offset\n self.vp = vp\n self.vs = vs\n\n try:\n sgts_list = self.get_sgt_list_n_stations(loc_src)\n data_list = self.get_data_list_n_stations(data_n_stations)\n except:\n print('Unable to initial the SGT and data.')\n return\n\n super().__init__(sgts_list, data_list,\n loc_src, loc_stations,\n self.n_tp_sgt, self.n_ts_sgt,\n self.n_tp_data, self.n_ts_data,\n n_tp_max_shift, n_ts_max_shift,\n dt, n_phase, n_component,\n w_pnl, w_srf,\n misfit_threshold,\n reject_rate,\n taper_scale, taper_rate_srf,\n cc_threshold,\n amplitude_ratio_threshold,\n job_id)\n\n\n # constant encoding the observational uncertainties ???\n self.sigma_d = 0.3\n self.sigma_q = np.inf\n\n # nine parameters [strike, dip, rake, magnitude, lune_latitude, lune_long, lat, long, depth]\n self.Nq = 9\n\n # delta to calculate derivatives.\n self.delta_q0 = 0.1 * np.ones(self.Nq)\n self.delta_q0[3] = 0.01 # magnitude\n\n # increment for latitude: q0[6], longitude: q0[7], depth: q0[8].\n # as the spacing of the mesh is 500 m.\n self.delta_q0[6] = 0.0051\n self.delta_q0[7] = 0.0051\n self.delta_q0[8] = 1.0\n\n self.b_initial_q0 = False\n\n\n\n def get_sgt_list_n_stations(self, loc_src):\n df = self.sampling_rate\n\n # loc_src=[lat, long, depth in km (the depth is minus)]\n source = loc_src.copy()\n source[2] = -1000 * np.fabs(source[2])\n\n lat, long, z, \\\n utm_x, utm_y, utm_z, \\\n idx_processor, element_index, \\\n xi, eta, gamma = self.sgtMgr.find(x=source[0], y=source[1], z=source[2], n_points=1)\n\n # element index in the ibool file starts from 1, while 0 in our code.\n idx_element = element_index - 1\n\n try:\n self.sgtMgr.Initialize(idx_processor, idx_element, self.station_names)\n interp_sgt_n_stations = self.sgtMgr.get_sgt(source, mode='LAGRANGE')\n except:\n print(\"Unable to get SGT\")\n return None\n\n\n # computing the approximate p and s travel time for cutting the SGT (synthetic waveform).\n self.n_ts_data = np.zeros_like(self.n_tp_data, dtype=int)\n n_tp_sgt_stations = []\n n_ts_sgt_stations = []\n for i, loc_sta in enumerate(self.loc_stations):\n dist = 111.0 * np.sqrt(np.square(loc_sta[0] - loc_src[0]) + np.square(loc_sta[1] - loc_src[1]))\n dist = np.sqrt(np.power(dist, 2) + np.power(loc_src[2] - loc_sta[2], 2))\n n_tp = int((dist / self.vp) * self.sampling_rate)\n n_ts = int((dist / self.vs) * self.sampling_rate)\n n_tp_sgt_stations.append(n_tp)\n n_ts_sgt_stations.append(n_ts)\n self.n_ts_data[i] = int(self.n_tp_data[i]) + (n_ts - n_tp)\n\n self.n_tp_sgt = self.n_p_offset * np.ones_like(n_tp_sgt_stations, dtype=int)\n self.n_ts_sgt = self.n_s_offset * np.ones_like(n_ts_sgt_stations, dtype=int)\n\n sgt_list_n_stations = _prepare_N_staiton_sgt(interp_sgt_n_stations, df,\n self.p_freqmin, self.p_freqmax,\n self.s_freqmin, self.s_freqmax,\n n_tp_sgt_stations, self.n_p_length, self.n_p_offset,\n n_ts_sgt_stations, self.n_s_length, self.n_s_offset)\n return sgt_list_n_stations\n\n\n def get_data_list_n_stations(self, data_n_stations):\n df = self.sampling_rate\n data_list_n_stations = _prepare_N_staiton_data(data_n_stations, df,\n self.p_freqmin, self.p_freqmax,\n self.s_freqmin, self.s_freqmax,\n self.n_p_length, self.n_s_length,\n self.n_tp_data, self.n_ts_data)\n return data_list_n_stations\n\n\n\n def Uq(self, q, b_sgt=False):\n '''Compute Uq. '''\n\n if b_sgt:\n sgt_list = self.get_sgt_list_n_stations(q[-3:])\n self.update_sgt(sgt_list)\n self.update_azimuth(q[-3:])\n\n mt0 = mag2moment(q[3]) * DMT_enz(np.deg2rad(q[0]), np.deg2rad(q[1]),\n np.deg2rad(q[2]), np.deg2rad(q[4]),\n np.deg2rad(q[5]))\n\n misfit_matrix, _, _ = self.cut_and_paste(mt0)\n Uq = np.average(misfit_matrix[np.where(misfit_matrix != self.MAX_MISFIT)]) / (2.0 * np.power(self.sigma_d, 2))\n return Uq\n\n\n def dUdq(self, q):\n '''Compute du_dqi'''\n self.dU_dqi=np.zeros(self.Nq)\n Uq = self.Uq(q)\n\n # original code to claculate dU/dqi\n # dU/dqi\n for i in range(self.Nq):\n _q = q.copy()\n _q[i] += self.delta_q0[i]\n if i >= 6:\n b_sgt = True\n else:\n b_sgt = False\n self.dU_dqi[i] = (self.Uq(_q, b_sgt) - Uq)/(self.delta_q0[i])\n\n\n def set_q(self, q):\n '''Set q. '''\n self.q0 = q\n if not self.b_initial_q0:\n self.raw_q0 = self.q0.copy()\n\n self.b_initial_q0 = True\n self.dUdq(self.q0)\n\n","repo_name":"Liang-Ding/pyCAPLunar","sub_path":"pyCAPSolvers/DHMC.py","file_name":"DHMC.py","file_ext":"py","file_size_in_byte":15912,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"91"} +{"seq_id":"1386802363","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 1.a vidējā vērtība\n# ====================\n# uzrakstīt programmu, kas liek lietotājam ievadīt skaitļus(float).\n# programma pēc katra ievada rāda visu ievadīto skaitļu vidējo vērtību.\n# ps. 1a var iztikt bez lists\n\n# 1.b programma rāda gan skaitļu vidējo vērtību, gan visus ievadītos skaitļus\n# ps iziešana no programmas ir ievadot \"q\"\n\n# 1.c programma nerāda visus ievadītos skaitļus bet gan tikai top3 un bottom3 un protams joprojām vidējo.\n\nskaitli = []\n\nwhile True:\n\n ievads = input(\"Ievadiet daļskaitļi: \")\n\n try:\n skaitlis = float(ievads)\n skaitli.append(skaitlis)\n avg = sum(skaitli) / len(skaitli)\n\n if len(skaitli) > 6:\n print(\n f\"Ievadītie daļskaitļi: \\n {skaitli[:3]} \\n ... \\n {skaitli[-3:]} \\\n \\n Vidējā vērtība: {avg}\")\n else:\n print(f\"Ievadītie skaitļi: {skaitli} \\n Vidējā vērtība: {avg}\")\n\n except ValueError:\n if ievads == \"q\":\n break\n print(\"Nav daļskaitlis. Ievadiet q lai izietu.\")\n","repo_name":"b72uno/courses","sub_path":"RTU_Python_101/nod_6_1.py","file_name":"nod_6_1.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"lv","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26802175226","text":"import copy\nimport json\nimport pathlib\nimport os\n\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nimport torchvision.models\nfrom cleverhans.torch.attacks.projected_gradient_descent import (\n projected_gradient_descent,\n)\n\nfrom shrinkbench.experiment import QuantizeExperiment, DNNExperiment\nfrom shrinkbench import models\nfrom shrinkbench.models.head import mark_classifier\nfrom shrinkbench.metrics import model_size, flops, accuracy, correct\nfrom shrinkbench.util import OnlineStats\nfrom experiment_utils import format_path\n\nattacks = {\"pgd\": projected_gradient_descent}\n\n\nclass Model_Evaluator(DNNExperiment):\n default_dl_kwargs = {\n \"batch_size\": 128,\n \"pin_memory\": False,\n \"num_workers\": 4,\n }\n\n def __init__(\n self,\n model_type,\n model_path,\n dataset=\"CIFAR10\",\n gpu=None,\n seed: int=None,\n dl_kwargs: {} = None,\n debug=None,\n attack_method=\"pgd\",\n attack_kwargs=None,\n quantized=False,\n surrogate_model_path =None,\n ):\n super().__init__(seed=seed)\n if seed:\n self.fix_seed(seed, deterministic=True)\n\n # set environment variable to be used by shrinkbench\n os.environ[\"DATAPATH\"] = str(format_path(\"datasets\"))\n\n if dl_kwargs:\n self.dl_kwargs = dl_kwargs\n else:\n self.dl_kwargs = self.default_dl_kwargs\n\n self.path = format_path(model_path)\n self.resume = self.path\n\n self.gpu = gpu\n self.dataset = dataset\n self.debug = debug\n self.model_type = model_type\n self.model_path = model_path\n self.surrogate_model_path = surrogate_model_path\n\n self.build_dataloader(dataset=dataset, **self.dl_kwargs)\n self.quantized = quantized\n if not quantized:\n self.build_model(\n self.model_type,\n pretrained=False,\n resume=self.model_path,\n dataset=dataset,\n )\n self.surrogate_model = None\n else:\n self.surrogate_model = self.build_model(\n self.model_type,\n pretrained=False,\n resume=self.surrogate_model_path,\n dataset=dataset,\n )\n self.model = QuantizeExperiment.load_quantized_model(\n path=self.model_path,\n model_type=self.model_type,\n dataset=self.dataset,\n )\n self.attack_method = attack_method\n self.attack_kwargs = attack_kwargs\n\n def clean_acc(self):\n res = list(\n accuracy(\n model=self.model,\n dataloader=self.train_acc_dl,\n topk=(1, 5),\n debug=self.debug,\n )\n )\n self.clean_train_acc1 = res[0]\n self.clean_train_acc5 = res[1]\n res = list(\n accuracy(\n model=self.model, dataloader=self.val_dl, topk=(1, 5), debug=self.debug\n )\n )\n self.clean_val_acc1 = res[0]\n self.clean_val_acc5 = res[1]\n\n def prune_metrics(self):\n \"\"\"Collect the pruning metrics.\"\"\"\n # Model Size\n size, size_nz = model_size(self.model)\n self.model_size = size\n self.model_size_nz = size_nz\n self.compression_ratio = size / size_nz\n self.model_file_size = pathlib.Path(self.model_path).stat().st_size\n\n x, y = next(iter(self.val_dl))\n if not self.quantized:\n x, y = x.to(self.device), y.to(self.device)\n else:\n x, y = x.to('cpu'), y.to('cpu')\n\n # FLOPS\n ops, ops_nz = flops(self.model, x, quantized=self.quantized)\n self.flops = ops\n self.flops_nz = ops_nz\n self.theoretical_speedup = ops / ops_nz\n\n def adv_acc(self, train=True):\n assert self.attack_method is not None\n assert self.attack_kwargs is not None\n\n self.model.eval()\n\n if train:\n dl = self.train_acc_dl\n data = \"train\"\n else:\n dl = self.val_dl\n data = \"test\"\n\n results = {\"adversarial_dataset\": data, \"inputs_tested\": 0}\n\n clean_acc1 = OnlineStats()\n clean_acc5 = OnlineStats()\n adv_acc1 = OnlineStats()\n adv_acc5 = OnlineStats()\n\n epoch_iter = tqdm(dl)\n epoch_iter.set_description(f\"{attack_name} on {data} dataset\")\n\n for i, (x, y) in enumerate(epoch_iter, start=1):\n if self.debug is not None and i > self.debug:\n break\n x, y = x.to(self.device), y.to(self.device)\n if not self.quantized:\n x_adv = attacks[self.attack_method](self.model, x, **self.attack_kwargs)\n else:\n x_adv = attacks[self.attack_method](self.surrogate_model, x, **self.attack_kwargs)\n x, x_adv, y = x.to('cpu'), x_adv.to('cpu'), y.to('cpu')\n y_pred = self.model(x) # model prediction on clean examples\n y_pred_adv = self.model(x_adv) # model prediction on adversarial examples\n\n results[\"inputs_tested\"] += y.size(0)\n\n clean_c1, clean_c5 = correct(y_pred, y, (1, 5))\n clean_acc1.add(clean_c1 / dl.batch_size)\n clean_acc5.add(clean_c5 / dl.batch_size)\n\n adv_c1, adv_c5 = correct(y_pred_adv, y, (1, 5))\n adv_acc1.add(adv_c1 / dl.batch_size)\n adv_acc5.add(adv_c5 / dl.batch_size)\n\n epoch_iter.set_postfix(\n clean_acc1=clean_acc1.mean,\n clean_acc5=clean_acc5.mean,\n adv_acc1=adv_acc1.mean,\n adv_acc5=adv_acc5.mean,\n )\n\n results[\"clean_acc1\"] = clean_acc1.mean\n results[\"clean_acc5\"] = clean_acc5.mean\n results[\"adv_acc1\"] = adv_acc1.mean\n results[\"adv_acc5\"] = adv_acc5.mean\n\n self.adv_results = results\n\n def print_results(self):\n metrics = {}\n for name in [\n \"clean_train_acc1\",\n \"clean_train_acc5\",\n \"clean_val_acc1\",\n \"clean_val_acc5\",\n \"model_size\",\n \"model_size_nz\",\n \"compression_ratio\",\n \"flops\",\n \"flops_nz\",\n \"theoretical_speedup\",\n \"adv_results\",\n \"model_file_size\",\n ]:\n if hasattr(self, name):\n metrics[name] = getattr(self, name)\n\n print(json.dumps(metrics, indent=4))\n return metrics\n\n def run(self, attack=False):\n print(\"Evaluating model ...\\nGetting clean accuracy ...\")\n self.clean_acc()\n print(\"Getting pruning metrics ...\")\n self.prune_metrics()\n if attack and self.attack_method is not None and self.attack_kwargs is not None:\n print(\"Getting adversarial accuracy ...\")\n self.adv_acc()\n return self.print_results()\n\n\nif __name__ == \"__main__\":\n path = \"experiments/experiment_12/googlenet/CIFAR10/googlenet_GreedyPGDGlobalMagGrad_2_compression_5_finetune_iterations/prune/checkpoints/checkpoint-5.pt\"\n model_type = \"googlenet\"\n gpu = 0\n evaluator = Model_Evaluator(\n model_type=model_type, model_path=pathlib.Path(path), gpu=gpu\n )\n evaluator.evaluate()\n","repo_name":"jonahobw/aicas","sub_path":"evaulate_model.py","file_name":"evaulate_model.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2332081816","text":"#!/usr/bin/env python3\n\nimport os\nimport glob\nfrom pydriller import Repository\n\nfrom bugsinpy import AFTER_FOLDER_NAME, BEFORE_FOLDER_NAME\n\nBEFORE_FOLDER_NAME = \"before\"\nAFTER_FOLDER_NAME = \"after\"\n\nGH_JAVA_PROJECTS = { 'apache-commons-cli': 'https://github.com/apache/commons-cli.git', 'google-guava': 'https://github.com/google/guava.git', 'ok-http': 'https://github.com/square/okhttp.git', 'h2': 'https://github.com/h2database/h2database.git', 'zaproxy': 'https://github.com/zaproxy/zaproxy.git', 'jabref': 'https://github.com/JabRef/jabref.git', 'elastic-search': 'https://github.com/elastic/elasticsearch.git', 'killbill': 'https://github.com/killbill/killbill.git', 'drool': 'https://github.com/kiegroup/drools.git', 'signal-server': 'https://github.com/signalapp/Signal-Server.git'}\nGH_JAVA_PATH = \"gh-java\"\n\nGH_PYTHON_PROJECTS = {'black': 'https://github.com/psf/black.git', 'scikit-learn': 'https://github.com/scikit-learn/scikit-learn.git', 'wagtail': 'https://github.com/wagtail/wagtail.git', 'home-assitant': 'https://github.com/wagtail/wagtail.git', 'textual': 'https://github.com/Textualize/textual.git', 'pyxel': 'https://github.com/kitao/pyxel.git', 'django': 'https://github.com/django/django.git', 'keras': 'https://github.com/keras-team/keras.git', 'ansible': 'https://github.com/ansible/ansible.git', 'requests': 'https://github.com/psf/requests.git'}\nGH_PYTHON_PATH = \"gh-python\"\n\ndef handle_projects(projects, extension, base_dir, max_files=100):\n print(f\"Handle {extension} projects in {base_dir}\")\n for project in projects:\n already_performed = len(glob.glob(r'' + base_dir + '/before/' + project + '/**/*' + extension, recursive=True))\n if already_performed >= max_files:\n print(f\"Already enough files in project {project}\")\n continue\n \n print(f\"Starting gathering files in {project}\")\n gathered_files = 0\n for commit in Repository(projects[project], only_no_merge=True, only_modifications_with_file_types=[extension]).traverse_commits():\n if gathered_files >= max_files:\n break\n\n for file in commit.modified_files:\n if gathered_files < max_files and file.filename.endswith(extension) and file.source_code_before != None and file.source_code != None:\n before_dir = f\"{base_dir}/{BEFORE_FOLDER_NAME}/{project}/{commit.hash}\"\n after_dir = f\"{base_dir}/{AFTER_FOLDER_NAME}/{project}/{commit.hash}\"\n os.makedirs(before_dir, exist_ok=True)\n os.makedirs(after_dir, exist_ok=True)\n clean_file_name = file.filename\n if not (os.path.exists(f\"{before_dir}/{clean_file_name}\") or os.path.exists(f\"{after_dir}/{clean_file_name}\")):\n with open(f\"{before_dir}/{clean_file_name}\", 'w') as f:\n f.write(file.source_code_before)\n with open(f\"{after_dir}/{clean_file_name}\", 'w') as f:\n f.write(file.source_code)\n gathered_files += 1\n print(f\"Gathered {str(gathered_files)} in project {project}\")\n\nif __name__ == '__main__':\n handle_projects(GH_JAVA_PROJECTS, \".java\", GH_JAVA_PATH)\n handle_projects(GH_PYTHON_PROJECTS, \".py\", GH_PYTHON_PATH)\n","repo_name":"GumTreeDiff/datasets","sub_path":"gh-benchs.py","file_name":"gh-benchs.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"71150388142","text":"from random import randint\nfrom time import sleep\ncores = {'padrao': '\\033[m', 'negrito': '\\033[1m', 'verde': '\\033[1;32m'}\n\n\ndef deco():\n print(f'{cores[\"verde\"]}-{cores[\"padrao\"]}' * 70)\n\n\ndeco()\nprint(f'{cores[\"negrito\"]}Sorteando e somando valores pares através de funções!'.center(70))\ndeco()\n\nvalores = []\n\n\ndef sorteia(lista):\n print('Sorteando 5 valores da lista: ', end='')\n for numero in range(0, 5):\n numero = randint(1, 10)\n valores.append(numero)\n\n sleep(0.5)\n print(numero, end=' ')\n sleep(0.5)\n print('PRONTO!')\n\n\ndef somaPar(lista):\n pares = 0\n print(f'Somando os valores pares de ', end='')\n for numero in valores:\n sleep(0.5)\n print(numero, end=' ')\n if numero % 2 == 0:\n pares += numero\n sleep(0.5)\n print(f'temos {pares}')\n\n\nsorteia(valores)\nsomaPar(valores)\ndeco()\n","repo_name":"lucasassuino/Exercises-Python3-CEV","sub_path":"ex100.py","file_name":"ex100.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7912196295","text":"import serial\nfrom time import strftime, gmtime\nimport subprocess \n\n# Use hackrf_transfer to log data collected by the HackRF\nop_freq_1 = str(100000000) # 100 MHz\nop_freq_2 = str(400000000) # 400 MHz\ndata_dir = \"/home/pi/Desktop/hackrf_data\"\nnum_samples = str(1000)\n \n# Store the raw IQ in a timestamped data file\nstamp = strftime('%Y-%m-%d-%H_%M_%S', gmtime())\ncmd = \"hackrf_transfer -f \" + op_freq_1 + \" -n \" + num_samples + \" -r\" + data_dir + \"/LOG_IQ_\" + op_freq_1 + \"_\" + stamp\nrun_cmd = subprocess.call(cmd, shell=True)\n \n \nstamp = strftime('%Y-%m-%d-%H_%M_%S', gmtime())\ncmd = \"hackrf_transfer -f \" + op_freq_2 + \" -n \" + num_samples + \" -r\" + data_dir + \"/LOG_IQ_\" + op_freq_2 + \"_\" + stamp\nrun_cmd = subprocess.call(cmd, shell=True)\n","repo_name":"boxyghost/Edge-of-Space-2021","sub_path":"Radio_pi_files/hackrf_auto.py","file_name":"hackrf_auto.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1794426218","text":"# coding=gbk\r\n# @file:04-手刃盗版天堂.py\r\n# @data:2021/7/22 22:07\r\n# Editor:clown\r\n\r\n# 1.定位到2021比看片\r\n# 2.从2021必看片中提取到子页面的链接地址\r\n# 3.从请求子页面的链接地址中拿到我们想要的下载地址\r\nimport re\r\n\r\nimport requests\r\nimport urllib3\r\nurllib3.disable_warnings() #Adding certificate verification is strongly advised. 对付这种问题\r\nurl=\"https://www.dy2018.com/\"\r\nheads={\r\n\"User-Agent\": \"Mozilla/5.0 (WindowsNT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36\"\r\n}\r\n#False verify=False 就行了 去掉安全验证\r\nresp=requests.get(url,headers=heads,verify=False)\r\nresp.encoding='gb2312' #这样已处理可以 指定字符集\r\n\r\nobj1=re.compile(r\"2021必看热片.*?