diff --git "a/5843.jsonl" "b/5843.jsonl" new file mode 100644--- /dev/null +++ "b/5843.jsonl" @@ -0,0 +1,821 @@ +{"seq_id":"36715212224","text":"# **************************** Desafio 098 ************************* #\n# Função de Contador #\n# Faça um programa que tenha uma função chamada contador(), que #\n# receba três parâmetros: início, fim e passo. Seu programa tem que #\n# realizar três contagens através da função criada: #\n# #\n# a) de 1 até 10, de 1 em 1 #\n# b) de 10 até 0, de 2 em 2 #\n# c) uma contagem personalizada #\n# ****************************************************************** #\n#\n# Definindo as funções:\nfrom time import sleep\n\n\ndef lin():\n print('+=' * 24)\n\n\ndef lin1():\n print('\\033[1;34m*=\\033[m' * 26)\n\n\ndef titulo(msg):\n lin()\n texto = f' \\033[1;3;4;7;34m{msg}\\033[m '\n print(f'\\n{texto:*^64}\\n')\n lin()\n print()\n\n\ntitulo('Função de Contador')\n\n\n# ****************************************************************** #\n# def exibe(início, fim, passo):\n# print(f'Contagem de {início} até {fim} de {passo} em {passo}:')\ndef linha():\n print('-=' * 23)\n\n\ndef contador(ini, fim, p):\n from time import sleep\n if p < 0:\n p = -p\n if p == 0:\n p = 1\n linha()\n print(f'Contagem de {ini} até {fim} de {p} em {p}:')\n if ini < fim:\n fim = fim + 1\n else:\n fim = fim - 1\n p = -p\n for c in range(ini, fim, p):\n print(c, end=' ', flush=True) # Se não usar flush=True, o programa cria um buffer de tela,\n # e somente após o término da contagem que ele mostra na tela.\n sleep(.25)\n print('FIM!')\n\n\ncontador(1, 10, 1)\ncontador(10, 0, 2)\nlinha()\nprint('Agora é a sua vez de personalizar a contagem!')\ni = int(input('Início: '))\nf = int(input('Fim: '))\np = int(input('Passo: '))\ncontador(i, f, p)\n","repo_name":"EduardoPessanha/Git-Python","sub_path":"exercicios/ex098.py","file_name":"ex098.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13190097070","text":"\"\"\"\nmavsim_python\n - Chapter 12 assignment for Beard & McLain, PUP, 2012\n - Last Update:\n 4/3/2019 - BGM\n 2/27/2020 - RWB\n 3/30/2022 - RWB\n 1/5/2023 - David L. Christiansen\n 7/13/2023 - RWB\n\"\"\"\nimport os, sys\n# insert parent directory at beginning of python search path\nfrom pathlib import Path\nsys.path.insert(0,os.fspath(Path(__file__).parents[1]))\n# use QuitListener for Linux or PC <- doesn't work on Mac\n#from tools.quit_listener import QuitListener\nimport pyqtgraph as pg\nimport parameters.simulation_parameters as SIM\nimport parameters.planner_parameters as PLAN\nfrom models.mav_dynamics_sensors import MavDynamics\nfrom models.wind_simulation import WindSimulation\nfrom control.autopilot import Autopilot\nfrom estimation.observer import Observer\nfrom planning.path_follower import PathFollower\nfrom planning.path_manager import PathManager\nfrom planning.path_planner import PathPlanner\nfrom viewers.mav_world_viewer import MAVWorldViewer\nfrom viewers.data_viewer import DataViewer\nfrom message_types.msg_world_map import MsgWorldMap\n#quitter = QuitListener()\n\nVIDEO = False\nDATA_PLOTS = False\nANIMATION = True\nPLANNING_VIEWER = True\n\n# video initialization\nif VIDEO is True:\n from viewers.video_writer import VideoWriter\n video = VideoWriter(video_name=\"chap12_video.avi\",\n bounding_box=(0, 0, 1000, 1000),\n output_rate=SIM.ts_video)\n \n#initialize the visualization\nif ANIMATION or DATA_PLOTS:\n app = pg.QtWidgets.QApplication([]) # use the same main process for Qt applications\nif ANIMATION:\n world_view = MAVWorldViewer(app=app) # initialize the viewer\nif DATA_PLOTS:\n data_view = DataViewer(app=app,dt=SIM.ts_simulation, plot_period=SIM.ts_plot_refresh, \n data_recording_period=SIM.ts_plot_record_data, time_window_length=30)\n\n# initialize elements of the architecture\nmav = MavDynamics(SIM.ts_simulation)\nwind = WindSimulation(SIM.ts_simulation)\nautopilot = Autopilot(SIM.ts_simulation)\nobserver = Observer(SIM.ts_simulation)\npath_follower = PathFollower()\npath_manager = PathManager()\nplanner_flag = 'simple_straight' # return simple waypoint path\n# planner_flag = 'simple_dubins' # return simple dubins waypoint path\n# planner_flag = 'rrt_straight' # plan path through city using straight-line RRT\n# planner_flag = 'rrt_dubins' # plan path through city using dubins RRT\npath_planner = PathPlanner(app=app, planner_flag=planner_flag, show_planner=PLANNING_VIEWER)\nworld_map = MsgWorldMap()\n\n# initialize the simulation time\nsim_time = SIM.start_time\nend_time = 200\n\n# main simulation loop\nprint(\"Press 'Esc' to exit...\")\nwhile sim_time < SIM.end_time:\n # -------observer-------------\n measurements = mav.sensors() # get sensor measurements\n estimated_state = observer.update(measurements) # estimate states from measurements\n # Observer occasionally gives bad results, true states always work.\n #estimated_state = mav.true_state\n # -------path planner - ----\n if path_manager.manager_requests_waypoints is True:\n waypoints = path_planner.update(world_map, estimated_state, PLAN.R_min)\n\n # -------path manager-------------\n path = path_manager.update(waypoints, PLAN.R_min, estimated_state)\n\n # -------path follower-------------\n autopilot_commands = path_follower.update(path, estimated_state)\n\n # -------autopilot-------------\n delta, commanded_state = autopilot.update(autopilot_commands, estimated_state)\n\n # -------physical system-------------\n current_wind = wind.update() # get the new wind vector\n mav.update(delta, current_wind) # propagate the MAV dynamics\n\n # -------update viewer-------------\n if ANIMATION:\n world_view.update(mav.true_state, path, waypoints, world_map) # plot path and MAV\n if DATA_PLOTS:\n plot_time = sim_time\n data_view.update(mav.true_state, # true states\n estimated_state, # estimated states\n commanded_state, # commanded states\n delta) # inputs to aircraft\n if ANIMATION or DATA_PLOTS or PLANNING_VIEWER:\n app.processEvents()\n\n # -------increment time-------------\n sim_time += SIM.ts_simulation\n\n # -------Check to Quit the Loop-------\n # if quitter.check_quit():\n # break\n\nif VIDEO is True:\n video.close()\n\n\n\n","repo_name":"randybeard/mavsim_public","sub_path":"mavsim_python/design_projects/chap12/mavsim_chap12.py","file_name":"mavsim_chap12.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":294,"dataset":"github-code","pt":"81"} +{"seq_id":"13378093099","text":"import sqlite3\nimport sys\ntry:\n connection = sqlite3.connect('user_settings.db', timeout=20)\n cursor = connection.cursor()\n print('Введите координаты')\n\nexcept sqlite3.Error as error:\n print('* Ошибка подключения к базе данных *')\n print(error)\n","repo_name":"Timon55/carbis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9529558430","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nclass Sprite:\n def __init__(self, data: str) -> None:\n self.data = list(reversed(data.split('\\n')))\n self.height = len(self.data)\n self.width = len(self.data[0])\n\n def can_place_at(self, line, x, y):\n sprite_line = self.data[y]\n for i in range(self.width):\n if sprite_line[i] == '#' and line[x + i] != '.':\n return False\n return True\n\n def place_at(self, line, x, y):\n line = [x for x in line]\n for i, d in enumerate(self.data[y]):\n if d == '#':\n line[x + i] = '#'\n\n return ''.join(line)\n\n def __str__(self) -> str:\n return f'Sprite({self.width}x{self.height} - {self.data})'\n\n def __repr__(self) -> str:\n return self.__str__()\n\nempty_line = '.......'\nsprites = [Sprite(x) for x in \"\"\"####\n\n.#.\n###\n.#.\n\n..#\n..#\n###\n\n#\n#\n#\n#\n\n##\n##\"\"\".split('\\n\\n')]\n\ndef read_input() -> str:\n with open('input.txt') as file:\n return file.read()\n\ndef process_wind(w: int, sprite: Sprite, x: int) -> int:\n if x + w < 0 or x + w + sprite.width > 7:\n return x\n return x + w\n\ndef can_move_down(sprite: Sprite, x: int, y: int, field: list[str]) -> bool:\n for i in range(sprite.height):\n if len(field) < y + i:\n continue\n if not sprite.can_place_at(field[y + i - 1], x, i):\n return False\n\n return True\n\ndef can_move_sideways(w: int, sprite: Sprite, x: int, y: int, field: list[str]) -> bool:\n if x + w < 0 or x + w + sprite.width > 7:\n return False\n\n for i in range(sprite.height):\n if len(field) < y + i + 1:\n continue\n if not sprite.can_place_at(field[y + i], x + w, i):\n return False\n\n # TODO\n return True\n\ndef place_sprite(sprite: Sprite, field: list[str], x: int, y: int) -> list[str]:\n while len(field) < y + sprite.height:\n field.append(empty_line)\n\n for i in range(sprite.height):\n field[y + i] = sprite.place_at(field[y + i], x, i)\n return field\n\ndef has_repeating_pattern(matches):\n return find_repeating_pattern(matches) is not None\n\ndef find_repeating_pattern(matches):\n diff = set([(matches[i+1][1] - matches[i][1], matches[i+1][2] - matches[i][2]) for i in range(len(matches) - 1)])\n if len(diff) != 1:\n return None\n\n diff = list(diff)[0]\n return {\n 'stone_start': matches[0][1],\n 'height_start': matches[0][2],\n 'stone_diff': diff[0],\n 'height_diff': diff[1],\n }\n\ndef process_stones(winds, count):\n field = ['-------']\n\n current_stone = 0\n current_wind = 0\n\n patterns = {}\n found_pattern = None\n\n necessary_count = count\n while current_stone < necessary_count:\n if found_pattern is None:\n key = (current_stone % len(sprites), current_wind)\n current_value = (field[-1], current_stone, len(field))\n if key in patterns:\n patterns[key].append(current_value)\n matches = [p for p in patterns[key] if p[0] == field[-1]]\n if len(matches) >= 3:\n found_pattern = find_repeating_pattern(matches)\n if found_pattern is not None:\n necessary_count = current_stone + ((count - found_pattern['stone_start']) % found_pattern['stone_diff'])\n continue\n\n else:\n patterns[key] = [current_value]\n\n current_x = 2\n sprite = sprites[current_stone % len(sprites)]\n for _ in range(4):\n current_x = process_wind(winds[current_wind], sprite, current_x)\n current_wind = (current_wind + 1) % len(winds)\n\n current_y = len(field)\n\n while can_move_down(sprite, current_x, current_y, field):\n current_y -= 1\n if can_move_sideways(winds[current_wind], sprite, current_x, current_y, field):\n current_x = process_wind(winds[current_wind], sprite, current_x)\n current_wind = (current_wind + 1) % len(winds)\n\n # Place sprite\n field = place_sprite(sprite, field, current_x, current_y)\n\n current_stone += 1\n\n if found_pattern is None:\n return len(field) - 1\n\n height_start = found_pattern['height_start']\n height_end = (len(field) - height_start) % found_pattern['height_diff']\n height_repetition = ((count - found_pattern['stone_start']) // found_pattern['stone_diff']) * found_pattern['height_diff']\n\n return height_start + height_repetition + height_end - 1\n\ndef main(report_result):\n winds = [({'>': 1, '<': -1}[x]) for x in read_input().strip()]\n\n height = process_stones(winds, 2022)\n report_result('Field height after 2022 stones:', height)\n\n height = process_stones(winds, 1000000000000)\n report_result('Field height after 1000000000000 stones:', height)\n\nif __name__ == '__main__':\n main(print)\n","repo_name":"nilshohmann/AdventOfCode2022","sub_path":"17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20951766664","text":"#!/usr/bin/env python\n\"\"\" Implementing low level Tensorflow variables \"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.models import Model\n\nv = tf.Variable(\n [-1, 2],\n dtype=tf.float32,\n name='test_vect')\n\nv.assign([1101, 2202])\n\n# print(v, '\\n')\n# print(v ** 2, '\\n')\n# print(v.numpy(), '\\n')\n\n# Tensors!\nh = v + [9, 3]\nprint(h, '\\n')\n\ninputs = Input(shape=(5,))\nh = Dense(16, activation='sigmoid')(inputs)\nprint(h, '\\n')\n\noutputs = Dense(10, activation='softmax', name='out_layer')(h)\nprint(outputs, '\\n')\n\nmodel = Model(inputs=inputs, outputs=outputs)\nprint(model.input)\nprint(model.output)\n\nconst = tf.constant([[5, 2], [1, 3]])\nprint('\\n\\nConstant tensor:', const, '\\n')\nprint(const.numpy(), '\\n')\n\nprint('\\n', tf.ones(shape=(5, 4)), '\\n')\nprint(tf.zeros(shape=(3, 8)), '\\n')\n","repo_name":"Erick-INCS/Exotic-Tensorflow","sub_path":"tf_variables/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73608237385","text":"import pandas as pd \r\nimport os\r\nfrom tqdm import tqdm\r\nimport nibabel as nib\r\nimport numpy as np\r\n\r\nfile_path = '/home/justin.park/Summer_Research/dataset_creator/updated_dataset/train_path.csv'\r\nnew_image_path = '/home/justin.park/Summer_Research/dataset_creator/updated_dataset/images/training_dataset_images'\r\n\r\nimg_paths = pd.read_csv(file_path, header=None)\r\n\r\ndef read_save_nifti_file(filepath, name):\r\n\tscan = nib.load(filepath)\r\n\timage = scan.get_fdata()\r\n\timage = np.squeeze(image)\r\n\t\r\n\theight, width, depth = image.shape\r\n\r\n\timage_1 = image[round(height/2)-10:round(height/2)+10, :, :]\r\n\timage_2 = image[:, round(width/2)-10:round(width/2)+10, :]\r\n\timage_3 = image[:, :, round(depth/2)-10:round(depth/2)+10]\r\n\r\n\t# Save 20 center slices of 3 different views for each subject\r\n\tfor i in range(20):\r\n\t\tim_1 = image_1[i,:,:]\r\n\t\tim_2 = image_2[:,i,:]\r\n\t\tim_3 = image_3[:,:,i]\r\n\r\n\t\tfilename_1 = name + '_' + filepath.split('/')[-1].split('.')[0] + str(i) + '1' + '.nii'\r\n\t\tfilename_2 = name + '_' + filepath.split('/')[-1].split('.')[0] + str(i) + '2' + '.nii'\r\n\t\tfilename_3 = name + '_' + filepath.split('/')[-1].split('.')[0] + str(i) + '3' + '.nii'\r\n\r\n\t\tim_1 = nib.Nifti1Image(im_1, scan.affine, scan.header)\r\n\t\tim_2 = nib.Nifti1Image(im_2, scan.affine, scan.header)\r\n\t\tim_3 = nib.Nifti1Image(im_3, scan.affine, scan.header)\r\n\r\n\t\tnib.save(im_1, os.path.join(new_image_path, filename_1))\r\n\t\tnib.save(im_2, os.path.join(new_image_path, filename_2))\r\n\t\tnib.save(im_3, os.path.join(new_image_path, filename_3))\r\n\r\nfor ind in tqdm(range(len(img_paths))):\r\n\tpath = img_paths.iloc[ind, 0]\r\n\tif '/AD_Update' in path:\r\n\t\tif 'siemens_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_siemens_3')\r\n\t\tif 'siemens_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_siemens_15')\r\n\t\tif 'philips_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_philips_3')\r\n\t\tif 'philips_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_philips_15')\r\n\t\tif 'ge_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_GE_3')\r\n\t\tif 'ge_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'AD_GE_15')\r\n\t\t\r\n\tif '/CN_Update' in path:\r\n\t\tif 'siemens_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_siemens_3')\r\n\t\tif 'siemens_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_siemens_15')\r\n\t\tif 'philips_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_philips_3')\r\n\t\tif 'philips_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_philips_15')\r\n\t\tif 'ge_3' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_GE_3')\r\n\t\tif 'ge_15' in path.lower():\r\n\t\t\tread_save_nifti_file(path, 'ADNI_GE_15')\r\n\t\t\r\n\r\n\r\n","repo_name":"JPark-UC/DL_Research","sub_path":"2_Data_Organizer.py","file_name":"2_Data_Organizer.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30480591236","text":"from collections import OrderedDict\nimport numpy as np\nfrom gym.spaces import Dict\nfrom rllab.spaces import Box\nfrom rllab.misc import logger\n\nfrom sandbox.ours.envs.sawyer.env_util import get_stat_in_paths, \\\n create_stats_ordered_dict, get_asset_full_path\nfrom sandbox.ours.envs.sawyer.core.multitask_env import MultitaskEnv\nfrom sandbox.ours.envs.sawyer.base import SawyerXYZEnv\n\n\nclass SawyerReachXYZEnv(SawyerXYZEnv, MultitaskEnv):\n def __init__(\n self,\n reward_type='hand_distance',\n indicator_threshold=0.06,\n\n fix_goal=False,\n fixed_goal=(0.15, 0.6, 0.3),\n goal_low=None,\n goal_high=None,\n\n hide_goal_markers=False,\n\n **kwargs\n ):\n self.quick_init(locals())\n MultitaskEnv.__init__(self)\n SawyerXYZEnv.__init__(self, model_name=self.model_name, **kwargs)\n\n if goal_low is None:\n goal_low = self.hand_low\n if goal_high is None:\n goal_high = self.hand_high\n goal_low = np.array(goal_low)\n goal_high = np.array(goal_high)\n\n self.reward_type = reward_type\n self.indicator_threshold = indicator_threshold\n\n self.fix_goal = fix_goal\n self.fixed_goal = np.array(fixed_goal)\n self.goal_space = Box(goal_low, goal_high)\n self._state_goal = None\n\n self.hide_goal_markers = hide_goal_markers\n\n self.action_space = Box(np.array([-1, -1, -1]), np.array([1, 1, 1]))\n self.hand_space = Box(self.hand_low, self.hand_high)\n self._observation_space_dict = Dict([\n ('observation', self.hand_space),\n ('desired_goal', self.goal_space),\n ('achieved_goal', self.hand_space),\n ('state_observation', self.hand_space),\n ('state_desired_goal', self.goal_space),\n ('state_achieved_goal', self.hand_space),\n ('proprio_observation', self.hand_space),\n ('proprio_desired_goal', self.goal_space),\n ('proprio_achieved_goal', self.hand_space),\n ])\n self.observation_space = Box(np.concatenate([self.hand_low, self.hand_low]), np.concatenate([self.hand_high, self.hand_high]))\n\n def step(self, action):\n self.set_xyz_action(action)\n # keep gripper closed\n self.do_simulation(np.array([1]))\n # The marker seems to get reset every time you do a simulation\n self._set_goal_marker(self._state_goal)\n obs_dict = self._get_obs_dict()\n reward = self.compute_reward(action, obs_dict)\n info = self._get_info()\n obs = self._convert_obs_dict_to_obs(obs_dict)\n done = False\n return obs, reward, done, info\n\n def _get_obs_dict(self):\n flat_obs = self.get_endeff_pos()\n return dict(\n observation=flat_obs,\n desired_goal=self._state_goal,\n achieved_goal=flat_obs,\n state_observation=flat_obs,\n state_desired_goal=self._state_goal,\n state_achieved_goal=flat_obs,\n proprio_observation=flat_obs,\n proprio_desired_goal=self._state_goal,\n proprio_achieved_goal=flat_obs,\n )\n\n def _get_info(self):\n hand_distance = np.linalg.norm(self._state_goal - self.get_endeff_pos())\n return dict(\n hand_distance=hand_distance,\n hand_success=float(hand_distance < self.indicator_threshold),\n )\n\n def _set_goal_marker(self, goal):\n \"\"\"\n This should be use ONLY for visualization. Use self._state_goal for\n logging, learning, etc.\n \"\"\"\n self.data.site_xpos[self.model.site_name2id('hand-goal-site')] = (\n goal\n )\n if self.hide_goal_markers:\n self.data.site_xpos[self.model.site_name2id('hand-goal-site'), 2] = (\n -1000\n )\n\n @property\n def model_name(self):\n return 'sawyer_reach.xml'\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n self.viewer.cam.lookat[0] = 0\n self.viewer.cam.lookat[1] = 1.0\n self.viewer.cam.lookat[2] = 0.5\n self.viewer.cam.distance = 0.3\n self.viewer.cam.elevation = -45\n self.viewer.cam.azimuth = 270\n self.viewer.cam.trackbodyid = -1\n\n def reset_model(self):\n self._reset_hand()\n goal = self.sample_goal()\n self._state_goal = goal['state_desired_goal']\n self._set_goal_marker(self._state_goal)\n self.sim.forward()\n return self._convert_obs_dict_to_obs(self._get_obs_dict())\n\n def _reset_hand(self):\n for _ in range(10):\n self.data.set_mocap_pos('mocap', np.array([0, 0.5, 0.02]))\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n self.do_simulation(None, self.frame_skip)\n\n \"\"\"\n Multitask functions\n \"\"\"\n def get_goal(self):\n return {\n 'desired_goal': self._state_goal,\n 'state_desired_goal': self._state_goal,\n }\n\n def set_to_goal(self, goal):\n state_goal = goal['state_desired_goal']\n for _ in range(30):\n self.data.set_mocap_pos('mocap', state_goal)\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n # keep gripper closed\n self.do_simulation(np.array([1]))\n\n def sample_goals(self, batch_size):\n if self.fix_goal:\n goals = np.repeat(\n self.fixed_goal.copy()[None],\n batch_size,\n 0\n )\n else:\n goals = np.random.uniform(\n self.goal_space.low,\n self.goal_space.high,\n size=(batch_size, self.goal_space.low.size),\n )\n return {\n 'desired_goal': goals,\n 'state_desired_goal': goals,\n }\n\n def compute_rewards(self, actions, obs):\n achieved_goals = obs['state_achieved_goal']\n desired_goals = obs['state_desired_goal']\n hand_pos = achieved_goals\n goals = desired_goals\n distances = np.linalg.norm(hand_pos - goals, axis=1)\n if self.reward_type == 'hand_distance':\n r = -distances\n elif self.reward_type == 'hand_success':\n r = -(distances < self.indicator_threshold).astype(float)\n else:\n raise NotImplementedError(\"Invalid/no reward type.\")\n return r\n\n def get_diagnostics(self, paths, prefix=''):\n statistics = OrderedDict()\n for stat_name in [\n 'hand_distance',\n 'hand_success',\n ]:\n stat_name = stat_name\n stat = get_stat_in_paths(paths, 'env_infos', stat_name)\n statistics.update(create_stats_ordered_dict(\n '%s%s' % (prefix, stat_name),\n stat,\n always_show_all_stats=True,\n ))\n statistics.update(create_stats_ordered_dict(\n 'Final %s%s' % (prefix, stat_name),\n [s[-1] for s in stat],\n always_show_all_stats=True,\n ))\n return statistics\n\n def get_env_state(self):\n base_state = super().get_env_state()\n goal = self._state_goal.copy()\n return base_state, goal\n\n def set_env_state(self, state):\n base_state, goal = state\n super().set_env_state(base_state)\n self._state_goal = goal\n self._set_goal_marker(goal)\n\n def _convert_obs_dict_to_obs(self, obs_dict):\n return np.concatenate([obs_dict['observation'], obs_dict['desired_goal']])\n\n def log_diagnostics(self, paths):\n diagnostics = self.get_diagnostics(paths)\n logger.record_tabular('HandDistanceMean', diagnostics['hand_distance Mean'])\n logger.record_tabular('FinalHandDistanceMean', diagnostics['Final hand_distance Mean'])\n logger.record_tabular('FinalHandSuccessMean', diagnostics['Final hand_success Mean'])\n\nclass SawyerReachXYEnv(SawyerReachXYZEnv):\n def __init__(self, *args,\n fixed_goal=(0.15, 0.6),\n hand_z_position=0.055, **kwargs):\n self.quick_init(locals())\n SawyerReachXYZEnv.__init__(\n self,\n *args,\n fixed_goal=(fixed_goal[0], fixed_goal[1], hand_z_position),\n **kwargs\n )\n self.hand_z_position = hand_z_position\n self.action_space = Box(np.array([-1, -1]), np.array([1, 1]))\n self.hand_space = Box(\n np.hstack((self.hand_space.low[:2], self.hand_z_position)),\n np.hstack((self.hand_space.high[:2], self.hand_z_position))\n )\n self.observation_space = Dict([\n ('observation', self.hand_space),\n ('desired_goal', self.hand_space),\n ('achieved_goal', self.hand_space),\n ('state_observation', self.hand_space),\n ('state_desired_goal', self.hand_space),\n ('state_achieved_goal', self.hand_space),\n ('proprio_observation', self.hand_space),\n ('proprio_desired_goal', self.hand_space),\n ('proprio_achieved_goal', self.hand_space),\n ])\n\n def step(self, action):\n delta_z = self.hand_z_position - self.data.mocap_pos[0, 2]\n action = np.hstack((action, delta_z))\n return super().step(action)\n\n\nif __name__ == \"__main__\":\n import time\n env = SawyerReachXYZEnv()\n env.reset()\n for _ in range(100):\n env.render()\n obs, rew, done, info = env.step(env.action_space.sample()) # take a random action\n print(obs)\n time.sleep(env.dt)","repo_name":"jonasrothfuss/model_ensemble_meta_learning","sub_path":"sandbox/ours/envs/sawyer/sawyer_reach.py","file_name":"sawyer_reach.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"69822608265","text":"\nfrom django.shortcuts import render, redirect, reverse\nfrom django.views.generic import View\nfrom stock.forms import StockQuoteForm\nfrom stock.models import Person, Portfolio\nfrom stock.module_stock import TradingData\nfrom howdimain.howdimain_vars import STOCK_DETAILS\nfrom howdimain.utils.get_ip import get_client_ip\nfrom howdimain.utils.format_and_tokens import add_display_tokens, format_and_sort_stocks\nfrom howdimain.utils.plogger import Logger\n\n\nlogger = Logger.getlogger()\nsource = 'quotes'\n\n\nclass QuoteView(View):\n stockquote_form = StockQuoteForm\n template_name = 'finance/stock_quotes.html'\n\n td = TradingData()\n td.setup()\n markets = ['XNAS', 'XNYS', 'XAMS', 'INDEX']\n data_provider_url = td.data_provider_url\n\n def get(self, request):\n user = request.user\n if user.is_authenticated:\n # add Person class to user\n user.__class__ = Person\n\n try:\n default_user = Person.objects.get(username='default_user')\n\n except Person.DoesNotExist:\n default_user = None\n\n quote_string = request.session.get('quote_string', '')\n selected_portfolio = request.session.get('selected_portfolio', '')\n markets = request.session.get('markets', self.markets)\n stockdetail = request.session.get('stockdetail', STOCK_DETAILS[0][0])\n form = self.stockquote_form(initial={\n 'quote_string': quote_string,\n 'selected_portfolio': selected_portfolio,\n 'markets': markets,\n 'stockdetails': stockdetail,\n })\n portfolios = default_user.get_portfolio_names()\n\n if user.is_authenticated:\n portfolios += user.get_portfolio_names()\n\n if selected_portfolio:\n try:\n # try if user has selected a portfolio if authenticated\n if user.is_authenticated:\n symbols = Portfolio.objects.get(\n user=user, portfolio_name=selected_portfolio).get_stock()\n stock_info = self.td.get_stock_trade_info(symbols[0:20])\n stock_info += self.td.get_stock_trade_info(symbols[20:40])\n\n else:\n raise Portfolio.DoesNotExist\n\n except Portfolio.DoesNotExist:\n # try if it is a default portfolio\n try:\n symbols = Portfolio.objects.get(\n user=default_user, portfolio_name=selected_portfolio).get_stock() #pylint: disable=line-too-long\n stock_info = self.td.get_stock_trade_info(symbols[0:20])\n stock_info += self.td.get_stock_trade_info(symbols[20:40])\n\n except Portfolio.DoesNotExist:\n pass\n\n else:\n symbols = self.td.parse_stock_quote(quote_string, markets=markets)\n stock_info = self.td.get_stock_trade_info(symbols[0:20])\n\n stock_info = add_display_tokens(stock_info)\n stock_info = format_and_sort_stocks(stock_info)\n\n context = {'source': source,\n 'stock_info': stock_info,\n 'form': form,\n 'portfolios': sorted(portfolios),\n 'data_provider_url': self.data_provider_url, }\n\n return render(request, self.template_name, context)\n\n def post(self, request):\n user = request.user\n quote_string = request.session.get('quote_string', '')\n selected_portfolio = request.session.get('selected_portfolio', '')\n markets = request.session.get('markets', self.markets)\n stockdetail = request.session.get('stockdetail', STOCK_DETAILS[0][0])\n\n form = self.stockquote_form(request.POST)\n if form.is_valid():\n form_data = form.cleaned_data\n new_quote_string = form_data.get('quote_string')\n new_selected_portfolio = form_data.get('selected_portfolio')\n markets = form_data.get('markets')\n stockdetail = form_data.get('stockdetails')\n\n if new_selected_portfolio != selected_portfolio:\n selected_portfolio = new_selected_portfolio\n quote_string = ''\n\n elif new_quote_string != quote_string:\n quote_string = new_quote_string\n selected_portfolio = ''\n\n else:\n pass\n\n request.session['quote_string'] = quote_string\n request.session['selected_portfolio'] = selected_portfolio\n request.session['markets'] = markets\n request.session['stockdetail'] = stockdetail\n logger.info(f'user {user} [ip: {get_client_ip(request)}] looking '\n f'up: {quote_string} / {selected_portfolio}')\n\n else:\n pass\n\n return redirect(reverse('stock_quotes'))\n","repo_name":"bvermeulen/Django","sub_path":"stock/views/quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"7818371421","text":"outFp = None\noutStr = \"\"\n\noutFp = open(\"/Users/jaeng/Desktop/python_txt 파일/data2.txt\", \"w\")\n\nwhile True:\n outStr = input(\"내용 입력 : \")\n if outStr != \"\":\n outFp.writelines(outStr + \"\\n\")\n else:\n break;\n \noutFp.close()\nprint(\"---정상적으로 파일에 씀---\")","repo_name":"zxxng/Python-MySQL","sub_path":"강의자료/파일 입출력/2. 파일 입력.py","file_name":"2. 파일 입력.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21007323149","text":"import torch\r\n\r\ndef laplacian_proj(w):\r\n psi = laplacian_eigenvectors(N=w.shape[-1])[0]\r\n # wn = w.pow(2).sum(dim=[-1, -2]).sqrt()\r\n # print('Filtering weights fraction: ', 1 - (wn > 1e-3).float().mean().item())\r\n # w = w[wn > 1e-4]\r\n return torch.einsum('iyx,jyx->ij', w, psi).pow(2).mean(0)\r\n\r\ndef laplacian_eigenvectors(N=3):\r\n \"\"\"Compute the eigenvectors of the Laplacian on the NxN grid.\r\n :param int n: grid size\r\n :returns torch.Tensor: the N^2 eigenvectors of the grid Laplacian. Each eigenvector is a NxN matrix.\"\"\"\r\n\r\n laplacian = torch.zeros(N**2, N**2)\r\n for i in range(N**2):\r\n x = i % N\r\n y = i // N\r\n n = 0\r\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\r\n xx = x + dx\r\n yy = y + dy\r\n if 0 <= xx < N and 0 <= yy < N:\r\n laplacian[i, N * yy + xx] = -1\r\n n += 1\r\n laplacian[i, i] = n\r\n\r\n lambd, psi = torch.linalg.eigh(laplacian) # torch.symeig(laplacian, eigenvectors=True)\r\n return psi.T.reshape(N**2, N, N), lambd\r\n\r\ndef extract_convolutional_weights(f):\r\n return [p.detach().flatten(0, -3) for p in f.parameters() if len(p.shape) == 4]\r\n\r\n","repo_name":"leonardopetrini/relativestability","sub_path":"laplacian/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34047498956","text":"\ndef largest(array):\n if len(array) < 2:\n return print('Too small')\n\n previous_num = current_sum = array[0]\n max_sum = array[0] + array[1]\n\n for num in array[1:]:\n current_sum = max(current_sum + num, previous_num + num)\n max_sum = max(current_sum, max_sum)\n previous_num = num\n\n return max_sum\n\nprint(largest([7, 1, 2, -1, 3, 4, 10, -12, 3, 21, -19]))\nprint(largest([-6, -19, -5, -3]))\nprint(largest([-6, -19, 2, -3]))","repo_name":"gsaukov/python-machine","sub_path":"core/interviewquest/maximum_sum.py","file_name":"maximum_sum.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71217360906","text":"from random import randint\n\n\nclass Character:\n def __init__(self, name):\n self.name = name\n self.strength = 1 # This is the base damage\n self.armor = 0 # This is the base armor rating\n # # Damage vs Armor, if damage <= armor 'You Missed'\n # self.money = 0\n self.current_hp = 1\n self.max_hp = 1\n\n def attack_enemy(self, enemy):\n damage_amount = randint(1, self.strength)\n if damage_amount <= enemy.armor:\n # '| 1) Attack 2) Drink Potion 3) Check Hero Status 4) Flee the Battle |'\n print('| {}\\'s attack misses! |'.format(self.name))\n else:\n damage_amount = damage_amount-enemy.armor\n # '| 1) Attack 2) Drink Potion 3) Check Hero Status 4) Flee the Battle |'\n print('| {} hits for {} damage! |'.format(self.name, damage_amount))\n enemy.take_damage(damage_amount)\n\n def take_damage(self, damage_dealt):\n # This takes into account any armor that caused a miss\n self.current_hp -= damage_dealt\n\n\n\n","repo_name":"zz3430gs/Py_Game_RPG","sub_path":"characters/Character.py","file_name":"Character.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9186292994","text":"import json\nimport requests\nfrom django.http import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom user.common import UserResponse, detect_phone, generate_token\nfrom user.models import UserInfo, ScoreRecord\nfrom Axepanda import settings\nimport os, datetime\nimport base64\nimport json\nfrom Crypto.Cipher import AES\nfrom user.auth import JSONWebTokenAuth\nimport uuid,random\n\nclass IndexDetail(APIView):\n # authentication_classes = [JSONWebTokenAuth,]\n def get(self, request, *args, **kwargs):\n \"\"\"\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n response = UserResponse()\n crunchies = request.GET.get('crunchies', None)\n category = request.GET.get('category', None)\n month = request.GET.get('month', None)\n types = request.GET.get('type', \"total\")\n data_list = []\n current_month = datetime.datetime.now().month\n target_month = self._select_month(month,current_month)\n if category == 'athletics' and crunchies == \"0\":\n if types == \"month\":\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=0,\n created__month=target_month).order_by(\"-total\").values(\n \"user__phone\", \"total\").distinct()[:50]\n else:\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=0).order_by(\"-total\").values(\n \"user__phone\", \"total\").distinct()[:50]\n score_obj = self._distinct_score(score_obj)\n data_list = self._getdata(score_obj, data_list, crunchies=crunchies)\n\n elif category == 'athletics' and crunchies == \"1\":\n if types == \"month\":\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=1,\n created__month=target_month).order_by(\n \"-total\").values(\"user__phone\", \"total\").distinct()[:50]\n else:\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=1).order_by(\"-total\").values(\n \"user__phone\", \"total\").distinct()[:50]\n score_obj = self._distinct_score(score_obj)\n data_list = self._getdata(score_obj, data_list, crunchies=crunchies)\n\n elif category == 'athletics' and crunchies == \"3\":\n if types == \"month\":\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=3,\n created__month=target_month).order_by(\n \"-total\").values(\"user__phone\", \"total\").distinct()[:50]\n else:\n score_obj = ScoreRecord.objects.filter(category=0, crunchies=3).order_by(\"-total\").values(\n \"user__phone\",\"total\").distinct()[:50]\n score_obj = self._distinct_score(score_obj)\n data_list = self._getdata(score_obj, data_list, crunchies=crunchies)\n\n elif category == \"recreation\":\n pass\n response.datalist = data_list\n response.msg = \"Query successfully\"\n return Response(response.get_data)\n\n def _getdata(self, score_obj, data_list, crunchies):\n for index, item in enumerate(score_obj):\n data = {}\n total = item.get(\"total\")\n phone = item.get(\"user__phone\")\n user_obj = UserInfo.objects.filter(phone=phone).first()\n of_user = user_obj.username\n avatar = user_obj.avatar\n rank = index + 1\n crunchies = crunchies\n openid = user_obj.openid\n # 找到每个人在不同榜单上的排名。并存当前排名\n obj = ScoreRecord.objects.filter(user__openid=openid,crunchies=int(crunchies), total=total).first()\n if obj:\n if (obj.rank == None) or (obj.rank is not rank):\n obj.rank = rank\n obj.save()\n data[\"total\"] = total\n data[\"of_user\"] = of_user\n data[\"avatar\"] = avatar\n data[\"rank\"] = rank\n data[\"openid\"] = openid\n data[\"crunchies\"] = crunchies\n data_list.append(data)\n return data_list\n\n def _distinct_score(self,score_obj):\n count_times = {}\n tmp = []\n for item in score_obj:\n count = count_times.get(item.get('user__phone'), 0) + 1\n count_times[item.get('user__phone')] = count\n if count <= 1:\n tmp.append(item)\n return tmp\n\n def _select_month(self,month,current_month):\n if month == \"\" or month is None or month == \"None\" or month == \"null\":\n target_month = current_month\n return target_month\n elif month and isinstance(month,str):\n if int(month) in [x for x in range(1,13)]:\n target_month = int(month)\n return target_month\n else:\n target_month = current_month\n return target_month\n\nclass UserDetail(APIView):\n authentication_classes = [JSONWebTokenAuth,]\n def get(self, request, *args, **kwargs):\n \"\"\"\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n response = UserResponse()\n openid = request.GET.get('openid', None)\n crunchies = request.GET.get('crunchies', None)\n user_obj = UserInfo.objects.filter(openid=openid).first()\n if user_obj:\n datalist = []\n if crunchies:\n score_obj = ScoreRecord.objects.filter(user_id=user_obj.id, crunchies=crunchies).values(\n \"first\", \"second\",\n \"third\", \"fourth\",\n \"fifth\", \"sixth\",\n \"seventh\", \"eighth\",\n \"ninth\", \"tenth\", \"crunchies\", \"total\", \"rank\")\n else:\n score_obj = ScoreRecord.objects.filter(user_id=user_obj.id).values(\n \"first\", \"second\",\n \"third\", \"fourth\",\n \"fifth\", \"sixth\",\n \"seventh\", \"eighth\",\n \"ninth\", \"tenth\", \"crunchies\", \"total\", \"rank\")\n for item in score_obj:\n data = self._data_process(item)\n if data.get(\"rank\") != None:\n datalist.append(data)\n response.username = user_obj.username\n response.phone = user_obj.phone\n response.avatar = user_obj.avatar\n response.datalist = datalist\n response.msg = \"Query successfully\"\n else:\n response.status = 401\n response.msg = \"User doesn't exist\"\n return Response(response.get_data)\n\n def _data_process(self, item):\n score = {}\n tmp = []\n for k, v in item.items():\n if k not in [\"total\", \"rank\", \"crunchies\"]:\n tmp.append(v)\n else:\n score[k] = v\n score[\"gradelist\"] = tmp\n return score\n\n\nclass WXBizDataCrypt:\n def __init__(self, appId, sessionKey):\n self.appId = appId\n self.sessionKey = sessionKey\n\n def decrypt(self, encryptedData, iv):\n # base64 decode\n sessionKey = base64.b64decode(self.sessionKey)\n encryptedData = base64.b64decode(encryptedData)\n iv = base64.b64decode(iv)\n\n cipher = AES.new(sessionKey, AES.MODE_CBC, iv)\n\n decrypted = json.loads(self._unpad(cipher.decrypt(encryptedData)))\n\n if decrypted['watermark']['appid'] != self.appId:\n raise Exception('Invalid Buffer')\n return decrypted\n\n def _unpad(self, s):\n return s[:-ord(s[len(s) - 1:])]\n\n\nclass WechatLoginView(APIView):\n def post(self, request, *args, **kwargs):\n response = UserResponse()\n code = request.data.get('code', None)\n encryptedData = request.data.get('encryptedData', None)\n iv = request.data.get('iv', None)\n if not code:\n return Response({'message': 'lack code'}, status=status.HTTP_400_BAD_REQUEST)\n\n url = \"https://api.weixin.qq.com/sns/jscode2session?appid={0}&secret={1}&js_code={2}&grant_type=authorization_code\" \\\n .format(settings.APP_ID, settings.APP_KEY, code)\n r = requests.get(url)\n res = json.loads(r.text.decode('utf-8'))\n openid = res['openid'] if 'openid' in res else None\n session_key = res['session_key'] if 'session_key' in res else None\n if not openid:\n return Response({'message': 'The call to WeChat failed'}, status=status.HTTP_400_BAD_REQUEST)\n pc = WXBizDataCrypt(settings.APP_ID, session_key)\n res = pc.decrypt(encryptedData, iv)\n phone = res.get('phoneNumber')\n print(phone)\n user = UserInfo.objects.filter(openid=openid).first()\n if user:\n UserInfo.objects.filter(openid=openid).update(phone=phone)\n token = generate_token(user.id, openid)\n response.msg = \"登录成功\"\n else:\n user_obj = UserInfo.objects.create(username=self._create_tmp_username(),openid=openid, phone=phone)\n token = generate_token(user_obj.id, openid)\n response.msg = \"暂无排名,等待后台上传成绩\"\n response.token = token\n response.phone = phone\n response.openid = openid\n return Response(response.get_data)\n\n def _create_tmp_username(self):\n return str(uuid.uuid5(uuid.NAMESPACE_DNS, str(uuid.uuid1()) + str(random.random()))).replace('-','')[:12]\n\n\nclass GetUserInfo(APIView):\n def post(self, request, *args, **kwargs):\n response = UserResponse()\n gender = request.data.get('gender', None)\n nationality = request.data.get('nationality', None)\n avatar = request.data.get('avatar', None)\n openid = request.data.get('openid', None)\n nickname = request.data.get('nickName', None)\n if not all([gender, nationality, avatar, openid]):\n return Response({\"status\": 401, \"msg\": \"数据不完整\"})\n if openid:\n user_obj = UserInfo.objects.filter(openid=openid)\n if user_obj:\n try:\n UserInfo.objects.filter(openid=openid).update(\n nickname=nickname,gender=gender, nationality=nationality, avatar=avatar)\n response.msg = \"传送信息成功\"\n except Exception as e :\n response.status = 402\n response.msg = str(e)\n else:\n response.status = 402\n response.msg = \"无效openid,没有对应的用户\"\n else:\n response.status = 403\n response.msg = \"openid 不存在\"\n return Response(response.get_data)\n\n\nclass UpdateName(APIView):\n def post(self,request,*args,**kwargs):\n response = UserResponse()\n username = request.data.get('username', None)\n openid = request.data.get('openid', None)\n print(username,openid)\n if username and openid:\n user_obj = UserInfo.objects.filter(username=username).first()\n if user_obj:\n print(\"该名字已存在,请重新输入\")\n response.status = 401\n response.msg = \"该名字已存在,请重新输入\"\n else:\n UserInfo.objects.filter(openid=openid).update(username=username)\n response.msg = \"更新成功\"\n return Response(response.get_data)\n","repo_name":"Axepanda/Axepanda","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11140690896","text":"# with open(\"learn.txt\",\"r\")as nani:\n# info=nani.read()\n# print(info)\n#\n# with open(\"learn.txt\",'w')as n:\n# res=n.write(\"narendra is not good boy\")\n# print(res)\n\n#\n# with open('nani.txt','r') as file:\n# print(file.readlines())\n# #\n# file = open('nani.txt', 'w')\n# print(file.read(10))\n# print(file.read(6))\n# print(file.read(10))\n\n# list1 = []\n# for i in range(3):\n# name = input(\"enter employee name:\")\n# list1.append(name + '\\n')\n# file.writelines(list1)\n# file.close()\n# print(\"data saved\")\n\n\nfile1 = open('learn.txt', 'r')\nprint(file1.readline())\nfile2 = open('nani3.txt', 'a')\n\nstr1 =''\nwhile str1:\n str1 = file1.readline()\n file2.write(str1)\n\n\n\nfile1.close()\nfile2.close()\nprint('its yes')\n# s = open('nani.txt', 'r')\n# s1 = s.readlines()\n# print(s1)\n","repo_name":"Narendra-1997/Python-Basic-Programs","sub_path":"file_handling/fill.py","file_name":"fill.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19812699528","text":"# -*- coding: utf-8 -*-\n# noinspection PyUnresolvedReferences\n\"\"\"\nCreated on Wed Dec 5 14:03:55 2018\n\n@author: HydeR\n\nBasic wrapper to load data and run my Implementation of Data Density Based Clustering:\nR. Hyde and P. Angelov, “Data density based clustering,” in 2014 14th UK Workshop\n on Computational Intelligence (UKCI), 2014, pp. 1–7.\n\nWritten with Spyder 3.3.2 using Python 3.6 and QT5 graphics backend (or plots\n don't come to foreground)\n\nNote: This is my first attempt at Python coding, any feedback is most welcome, especially constructive criticism!\n\nParameters:\n initial_radii: initial radii for clusters. DDC is robust to larger than necessary\n radii as they are adjusted by the algorithm. One radius will be set\n for all data dimensions, or provide one for each.\n verbose: set to 1 for in process plots and info. Note, using verbose will\n leave a plot of final clusters allowing you to differentiate between\n multiple clusters of similar colour but can be very slow.\n merge: set to perform a simple merge function whereby clusters with their\n centre inside another are combined\n\nExample radii:\n for file DS2.csv, use 0.06\n for file Gaussian 5000.csv (~5000 data in each cluster) use 0.10\n\"\"\"\n\n# Initialise\nimport numpy as np\nimport DDC_01a as ddc\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\n\n# Close any open plot windows\nplt.close('all')\n\n# Load data\nwith open('Gaussian5000.csv') as f:\n lines = (line for line in f if not line.startswith('#'))\n data_in = np.loadtxt(lines, delimiter=',', skiprows=1)\n\ndata_in = data_in[:, 0:2:1] # in this example, ground truth data is in the final column, not to be used for clustering!\n\n# normalize each axis 0-1\n# not required, but may improve accuracy and simplifies radii selection\ndata_in = (data_in-np.min(data_in, 0)) / (np.max(data_in, 0) - np.min(data_in, 0))\n\n# set variables\ninitial_radii = (0.15, 0.06) # cluster initial radii, use single radius for equal dimension radii, or 1 per each data dimension\nverbose = 0 # flag to provide info and plots during cluster analysis (limited implementation)\nmerge = 1 # flag to do some basic cluster merging after 1st analysis (not currently implemented)\n\ntime_start = timer()\ncentre_list, radii_list, results = ddc.ddc(data_in, initial_radii, verbose, merge)\ntime_end = timer()\ntime_elapsed = time_end - time_start\n\n# Display Results\nplt.figure(98)\nplt.clf()\nplt.xlim([0, 1])\nplt.ylim([0, 1])\nfor i in results:\n plt.scatter(i[0], i[1], s=0.5)\nplt.title('{:d} Data Clustered in {:.4f} s' .format(data_in.shape[0], time_elapsed))\nplt.show()\n\n\n# Code to time DDC algorithm, e.g. 3 averages of 100 repetition\n'''\n# import timeit\nsetup = \nimport numpy as np\nimport DDC_01 as DDC01\nimport matplotlib.pyplot as plt\nwith open('exampledata02.csv') as f:\n lines = (line for line in f if not line.startswith('#'))\n DataIn = np.loadtxt(lines, delimiter=',', skiprows=1)\n\n# normalize each axis 0-1\nDataIn = ( DataIn-np.min(DataIn,0) ) / (np.max(DataIn,0) - np.min(DataIn,0))\n\n# set variables\nInitR = [0.14] # cluster initial radii, use single radius for equal dimension radii, or 1 per each data dimension\nVerbose = 0 # flag to provide info and plots during cluster analysis (limited implementation)\nMerge = 0\n\nR = 3\nN = 1000\nT = timeit.repeat(setup=setup, stmt = 'Results = DDC01.DDC(DataIn, InitR, Verbose, Merge)', repeat=R, number=N)\nprint ('Fastest time = ', \"{:.3g}\".format(min(T)/N), 'ms')\n'''\n","repo_name":"RHyde67/DDC-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41831923178","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org\r\n# Python version Copyright (c) 2010 Ken Lauer / sirkne at gmail dot com\r\n# \r\n# This software is provided 'as-is', without any express or implied\r\n# warranty. In no event will the authors be held liable for any damages\r\n# arising from the use of this software.\r\n# Permission is granted to anyone to use this software for any purpose,\r\n# including commercial applications, and to alter it and redistribute it\r\n# freely, subject to the following restrictions:\r\n# 1. The origin of this software must not be misrepresented; you must not\r\n# claim that you wrote the original software. If you use this software\r\n# in a product, an acknowledgment in the product documentation would be\r\n# appreciated but is not required.\r\n# 2. Altered source versions must be plainly marked as such, and must not be\r\n# misrepresented as being the original software.\r\n# 3. This notice may not be removed or altered from any source distribution.\r\n\r\n__version__ = \"$Revision$\"\r\n__date__ = \"$Date$\"\r\n# $Source$\r\n\r\nfrom framework import *\r\nfrom math import sqrt\r\nfrom pypybox2d.settings import LINEAR_SLOP\r\n\r\nclass OneSidedPlatform (Framework):\r\n name=\"One-sided Platform\"\r\n description=\"Try to move the shape with the mouse through the platform from all directions\"\r\n def __init__(self):\r\n super(OneSidedPlatform, self).__init__()\r\n\r\n self.world.contact_manager.pre_solve = self.pre_solve\r\n # The ground\r\n ground = self.world.create_static_body(shapes=b2.Edge((-20, 0),(20, 0)))\r\n\r\n # The platform\r\n half_height=0.5\r\n ypos = 10\r\n body = self.world.create_static_body(\r\n position=(0, ypos),\r\n shapes=b2.Polygon(box=(3, half_height))\r\n )\r\n self.platform = body.fixtures[0]\r\n\r\n # The circular character\r\n self.character_radius=0.5\r\n body = self.world.create_dynamic_body(\r\n position=(0, 12), \r\n fixtures=b2.Fixture(shape=b2.Circle(self.character_radius), density=1.0),\r\n )\r\n\r\n self.character = body.fixtures[0]\r\n body.linear_velocity=(0, -50)\r\n\r\n self.bottom = ypos - half_height # The bottom of the platform \r\n self.top = ypos + half_height # The top of the platform \r\n self.state = 'unknown'\r\n\r\n def pre_solve(self, contact, old_manifold):\r\n # Make sure we're dealing with the platform and the character\r\n if contact.fixture_a != self.platform and contact.fixture_a != self.character:\r\n return\r\n if contact.fixture_b != self.platform and contact.fixture_b != self.character:\r\n return\r\n\r\n # If below the top of the platform, disable the collision response\r\n if (self.character.body.position.y - self.character_radius+3.0*LINEAR_SLOP) < self.top:\r\n contact.enabled = False\r\n\r\nif __name__==\"__main__\":\r\n main(OneSidedPlatform)\r\n","repo_name":"pybox2d/pypybox2d","sub_path":"testbed/one_sided_platform.py","file_name":"one_sided_platform.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"13357245517","text":"from flask import Flask, render_template as render,request\nfrom twilio.rest import Client\nfrom twilio import twiml\nfrom dotenv import load_dotenv\nimport os\nimport requests\nload_dotenv()\n\napp = Flask(__name__)\nACCOUNT_SID=os.environ.get('ACCOUNT_SID')\nAUTH_TOKEN=os.environ.get('AUTH_TOKEN')\nclient = Client('','')\n\n@app.route('/')\ndef index():\n return render(\"index.html\")\n\n@app.route('/recharge')\ndef recharge():\n print(\"Recharge requested\")\n amount = int(request.args.get('amount'))\n rupees=int(request.args.get('rupees'))\n phone='+91'+request.args.get('phone')\n rupees+=amount\n message = client.messages \\\n .create(\n body=\"Energy Meter Balance Alert:\\nYour energy meter has been recharged Rs:\"+str(amount)+\"\\nTotal Balance:\"+str(rupees)+\"\\nEelctricity Has Been Connected\\nThank you\",\n from_='+15392245359', #use your twilio no here\n to=phone, #use your verified phone no. here\n )\n url='https://api.thingspeak.com/update?api_key=R3HL6661AHSHY2HY&field1='+str(rupees)\n requests.get(url)\n return(\"Recharge Succesfull\")\n\n@app.route('/alert')\ndef alert():\n print(\"Alert Send\")\n message = client.messages \\\n .create(\n body=\"Your connection has been cut due to low balance. Please recharge immediately to restore connection.\",\n from_='+15392245359', #use your twilio no here\n to='+918606068522', #use your verified phone no. here\n )\n return(\"Alert Sent\")\nif __name__ == \"__main__\":\n app.run(debug=False)\n\n\n\n","repo_name":"mdb571/energy-meter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9811728673","text":"import datetime as dt\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.sql import text\n\nfrom .models import Base, Field, FieldData\n\nengine = create_engine(\"postgresql+psycopg2://kmg:qwerty123@127.0.0.1:10001/kmg\")\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass BadRequest(Exception):\n pass\n\n\ndef initialize_database():\n Base.metadata.create_all(engine)\n\n\ndef drop_database():\n Base.metadata.drop_all(engine)\n\n\ndef add_field(name: str) -> int:\n field = session.query(Field).filter(Field.name == name).first()\n if not field:\n field = Field(name=name)\n session.add(field)\n session.commit()\n return field.id\n\n\ndef save_field_data(instances: list, path: str, update: bool = False) -> dict:\n session.add_all(instances)\n try:\n session.commit()\n except IntegrityError:\n session.rollback()\n if not update:\n print(path, \"file parsing rolled back. One or more of the rows is already exists in database.\")\n return {\n \"status\": \"Fail\",\n \"message\": path if type(\n path) is str else \"File\" + \" file parsing rolled back. One or more of the rows is already exists in database.\"\n }\n return update_exists_rows(instances, path)\n print(\"File successfully parsed\")\n return {\"status\": \"Success\", \"message\": \"Success\"}\n\n\ndef update_exists_rows(instances: list, path: str) -> dict:\n print(\"Argument \\\"-u\\\" or \\\"--update\\\" was set. Trying to update rows that already exists in database...\")\n for instance in instances:\n row = session.query(FieldData).filter(FieldData.start_datetime == instance.start_datetime,\n FieldData.field_id == instance.field_id).first()\n if not row:\n row = instance\n session.add(row)\n try:\n session.commit()\n except Exception as e:\n print(\"Error:\", e)\n print(\"Can't to update exist rows in the database. File:\", path)\n return {\"status\": \"Fail\", \"message\": \"Can't to update exist rows in the database\"}\n print(\"File successfully parsed\")\n return {\"status\": \"Success\", \"message\": \"Success\"}\n\n\ndef get_fields() -> list:\n return session.query(Field).all()\n\n\ndef get_field_by_id(field_id: int) -> (Field, None):\n return session.query(Field).get(field_id)\n\n\ndef get_positive_field_data(field_id: int, start, finish: dt.datetime) -> list:\n result = []\n with engine.begin() as conn:\n sql = text(\"\"\"select *\n from field_data t\n where t.field_id = :field_id\n and t.start_datetime >= to_timestamp(:start, 'YYYY-MM-DD hh24:mi:ss')::timestamp\n and t.start_datetime <= to_timestamp(:finish, 'YYYY-MM-DD hh24:mi:ss')::timestamp\"\"\"\n )\n data = conn.execute(sql, {\"field_id\": field_id, \"start\": start.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"finish\": finish.strftime(\"%Y-%m-%d %H:%M:%S\")}).mappings().all()\n\n for item in data:\n result.append(FieldData(**item))\n return result\n\n\ndef get_negative_field_data(field_id: int, start, finish: dt.datetime) -> list:\n result = []\n with engine.begin() as conn:\n sql = text(\"\"\"select \n null as id,\n start_datetime::timestamp as start_datetime,\n 0 as value,\n '00:00:00'::time as duration,\n :field_id as field_id\n from generate_series(\n to_timestamp(:start, 'YYYY-MM-DD hh24:mi:ss')::timestamp,\n to_timestamp(:finish, 'YYYY-MM-DD hh24:mi:ss')::timestamp,\n '1 second'\n ) as gs(start_datetime)\n where start_datetime not in (\n select start_datetime\n from field_data t\n where t.field_id = :field_id\n and t.start_datetime >= to_timestamp(:start, 'YYYY-MM-DD hh24:mi:ss')::timestamp\n and t.start_datetime <= to_timestamp(:finish, 'YYYY-MM-DD hh24:mi:ss')::timestamp)\"\"\"\n )\n data = conn.execute(sql, {\"field_id\": field_id, \"start\": start.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"finish\": finish.strftime(\"%Y-%m-%d %H:%M:%S\")}).mappings().all()\n\n for item in data:\n result.append(FieldData(**item))\n return result\n\n\ndef get_export_data(field_id: int, date: dt.datetime) -> dict:\n x = []\n y = []\n str_date = date.strftime(\"%Y-%m-%d\")\n start = str_date + \" 00:00:00\"\n finish = str_date + \" 23:59:59\"\n with engine.begin() as conn:\n sql = text(\"\"\"with s1 as (select \n start_datetime::timestamp as start_datetime\n from generate_series(\n to_timestamp(:start, 'YYYY-MM-DD hh24:mi:ss')::timestamp,\n to_timestamp(:finish, 'YYYY-MM-DD hh24:mi:ss')::timestamp,\n '1 second'\n ) as gs(start_datetime))\n select t.start_datetime,\n coalesce(fd.value, 0.0) as value\n from s1 as t\n left join field_data fd ON fd.field_id = :field_id and fd.start_datetime = t.start_datetime\"\"\"\n )\n data = conn.execute(sql, {\"field_id\": field_id, \"start\": start,\n \"finish\": finish}).mappings().all()\n\n for item in data:\n x.append(item.start_datetime)\n y.append(item.value)\n return {'x': x, 'y': y}\n","repo_name":"seidakhmet/kmg","sub_path":"api/database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71016950986","text":"overlaps = 0\npartial_overlaps = 0\nwith open(\"./input.txt\") as f:\n for line in f:\n assignments = line.strip().split(\",\")\n bounds = [[int(b) for b in a.split(\"-\")] for a in assignments]\n\n set1 = set(range(bounds[0][0], bounds[0][1] + 1))\n set2 = set(range(bounds[1][0], bounds[1][1] + 1))\n\n if set1.issuperset(set2) or set2.issuperset(set1):\n overlaps += 1\n\n if len(set1 & set2):\n partial_overlaps += 1\nprint(overlaps)\nprint(partial_overlaps)\n","repo_name":"bpaske/aoc2022","sub_path":"4/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28159121326","text":"#!/usr/bin/env python\n# by kkyick2\n# import pkg\nimport os\nimport pandas as pd\nimport ntc_templates\nimport textfsm\nfrom datetime import datetime\n\nNEXTLINE = '\\n'\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nOUT_CMD_DIR = 'cmdoutput'\nOUT_XLS_DIR = 'output'\n\n\ndef _load_template(device_type, command, templates_dir):\n # _load_template function will load correct template\n with open(f\"{templates_dir}/{device_type}_{command}.textfsm\") as f:\n return textfsm.TextFSM(f)\n\n\ndef _parse_each_file():\n # _parse_each_file will prepare a dataframe containing parsed content of each file.\n df = pd.DataFrame()\n\n for device_type in sorted(os.listdir(OUT_CMD_DIR)):\n for file in sorted(os.listdir(os.path.join(OUT_CMD_DIR, device_type))):\n print(os.path.join(device_type, file))\n\n template.Reset() # otherwise entires from next loop item adds to the previous loop item,\n OUT_CMD_TXT = os.path.join(OUT_CMD_DIR, device_type, file)\n with open(OUT_CMD_TXT) as f:\n text = f.read()\n\n df_parsed = pd.DataFrame(template.ParseTextToDicts(text))\n df_parsed.insert(0, 'DEVICE_NAME', file[:-19])\n # df = df.append(df_parsed) # append method is deprecated and will be removed from pandas\n df = pd.concat([df, df_parsed])\n return df\n\n\ndef _write_to_excel(df_list):\n # this function will write the dataframes for each command into an excel file.\n DATETIME = datetime.now().strftime(\"%Y%m%d_%H%M\")\n writer = pd.ExcelWriter('output/parsed_commands_' + DATETIME + '.xlsx', engine='xlsxwriter')\n for df, sheetname in df_list:\n df.to_excel(writer, sheet_name=sheetname, index=False)\n workbook = writer.book\n worksheet = writer.sheets[sheetname]\n cell_format = workbook.add_format()\n cell_format.set_text_wrap()\n cell_format.set_align('center')\n cell_format.set_align('vcenter')\n # worksheet.set_column('A:E', 20, cell_format)\n writer.save()\n\n\nif __name__ == \"__main__\":\n # Find the folder where ntc_templates are installed.\n TEMPLATES_DIR = os.path.dirname(os.path.dirname(ntc_templates.__file__)) + '/ntc_templates/templates'\n ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n device_type = 'cisco_ios'\n commands = [\n 'show_version',\n 'show_inventory',\n # 'show_environment_power_all',\n 'show_vlan',\n 'show_interfaces',\n #'show_interfaces_switchport',\n # 'show_ip_interface',\n 'show_ip_interface_brief',\n 'show_ip_bgp_summary',\n 'show_ip_bgp',\n 'show_ip_bgp_neighbors',\n 'show_ip_route',\n # 'show_standby_brief',\n # 'show_mac-address-table',\n # 'show_ip_arp',\n 'show_cdp_neighbors_detail',\n 'show_clock',\n\n ]\n # Create a seperate df for each command thereby seperate sheet for each command for all devices.\n df_list = []\n for command in commands:\n print(f\"Parsing the cmdoutput of {device_type} {command}\\n!\")\n template = _load_template(device_type, command, TEMPLATES_DIR)\n df_list.append([_parse_each_file(), command])\n _write_to_excel(df_list)","repo_name":"kkyick2/py-netscript","sub_path":"2pytextfsm.py","file_name":"2pytextfsm.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34382288284","text":"from pywinauto import application\nimport pyautogui as pya\nimport time\n\napp = application.Application()\nvm= 'VALAR MORGHULIS'\nvd='VALAR DOHAERIS'\napp.start(\"Notepad.exe\")\npya.press('enter', 3)\npya.press('tab', 3)\npya.typewrite(vm, 0.11)\ntime.sleep(1)\npya.press('backspace', len(vm))\npya.typewrite(vd, 0.11)\ntime.sleep(2)\napp.Notepad.MenuSelect(\"File ->Exit\")\ntime.sleep(0.23)\napp.Notepad.Dontsave.click()\n","repo_name":"somesh3168/no_one","sub_path":"no_one.py","file_name":"no_one.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929475466","text":"from sys import stdin, setrecursionlimit, maxsize\nsetrecursionlimit(10 ** 9)\n\n\nn = int(stdin.readline())\na = [0] + list(map(int, stdin.readline().split()))\ndp = [-1 for _ in range(n + 1)] # 각 위치에서 징검다리를 건너는 최소 비용\n\n\ndef cost(i: int, j: int) -> int:\n return (j - i) * (1 + abs(a[i] - a[j]))\n\n\n# x: 출발 위치\n# return: 출발 위치에서 징검다리를 건너는 최소 비용\ndef jump(x: int) -> int:\n # 맨 마지막 징검다리에서 출발한 경우 비용은 0\n if x == n:\n return 0\n\n if dp[x] > -1:\n return dp[x]\n\n dp[x] = maxsize\n for stop in range(x + 1, n + 1):\n dp[x] = min(dp[x], max(cost(x, stop), jump(stop)))\n\n return dp[x]\n\n\nprint(jump(1))\n\n","repo_name":"boorooksus/Algorithm-Study","sub_path":"백준/CH13_Binary_Search/S1-22871-Stepping_Stone.py","file_name":"S1-22871-Stepping_Stone.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8983101072","text":"import go\nimport mcts\nimport nn\n\nclass TestMCTS():\n def gen_root(self):\n game = go.GoGame(9,8,4.5)\n t = mcts.SNode(game, None)\n return t\n def gen_tree(self):\n t = self.gen_root()\n child1 = mcts.AEdge(1,2,t,0.7)\n child2 = mcts.AEdge(0,0,t,0.3)\n t.actions = [child1, child2]\n return t,child1\n def gen_network(self):\n return nn.Network(9,8,\n 128,10,\n 32,32,256)\n def test_expand(self):\n network = self.gen_network()\n \n tree = self.gen_tree()[0]\n assert tree.is_expanded() == False\n tree.expand(network) #network is not implemented yet\n assert tree.is_expanded() == True\n\n def test_visits(self):\n tree=self.gen_tree()[0]\n assert tree.visits==0\n tree.visit()\n assert tree.visits==1\n tree.visit()\n assert tree.visits==2\n\n def test_best_child(self):\n tree,best_child=self.gen_tree()\n assert tree.best_child()==best_child\n\n def test_search(self):\n t = self.gen_root()\n network = self.gen_network()\n out = mcts.search(t, network, playouts=100)\n move = mcts.pick_move(out, .01)\n print(\"picked move:\", move)\ntest = TestMCTS()\ntest.test_search()\n","repo_name":"phinanix/alphamini","sub_path":"mcts_test.py","file_name":"mcts_test.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2561132434","text":"\"\"\"\nCreates the app, creates the database, and initializes the expletives.\nSQL Injection is covered by SQLAlchemy and XSS is covered by Jinja2.\n\"\"\"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nfrom portfolio import models\ndb.drop_all()\ndb.create_all()\ncensor = {\"fuck\": \"f***\", \"shit\": \"s***\", \"crap\": \"c***\", \"damn\": \"d***\", \"bitch\": \"b****\", \"ass\": \"a**\"}\nfor word in censor:\n new_expletive = models.Expletives(word, censor[word])\n db.session.add(new_expletive)\n db.session.commit()\nfrom portfolio import views\n","repo_name":"arnmishra/WebPortfolio","sub_path":"portfolio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31391721226","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def insertionSortList(self, head: ListNode) -> ListNode:\n sol, cur, p, t = ListNode(val=-5001), head, None, None\n while cur:\n t=sol\n while t:\n if t.val > cur.val:\n break\n p, t= t, t.next\n p.next, cur = cur, cur.next\n p.next.next=t\n return sol.next","repo_name":"jitaeyun/algorithm","sub_path":"leetcode/Python/insertion-sort-list.py","file_name":"insertion-sort-list.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12511361653","text":"import datetime\nimport glob\nimport os\nimport shutil\n\ndef print_dir():\n basepath = '../den01'\n for entry in os.listdir(basepath): # or os.scandir\n if os.path.isfile(os.path.join(basepath, entry)):\n print(\"File: \", entry)\n if os.path.isdir(os.path.join(basepath, entry)):\n print(\"Directory: \", entry)\n\ndef make_dirs():\n os.chdir(\"..\")\n os.makedirs(\"2019/11/13\", exist_ok=True)\n os.makedirs(os.path.join(\"2019\", \"11\", \"13\"), exist_ok=True)\n\ndef filter_by_glob():\n os.chdir(\"../..\")\n print(glob.glob(\"**/*.py\", recursive=True))\n\ndef ls_la():\n basepath = '../den01'\n for entry in os.listdir(basepath):\n stat = os.stat(os.path.join(basepath, entry))\n print(stat)\n date = datetime.datetime.fromtimestamp(stat.st_mtime)\n print(date.isoformat())\n\nif __name__ == '__main__':\n ls_la()\n\n\n","repo_name":"whoden/Python_course","sub_path":"den03/files1.py","file_name":"files1.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"73297144906","text":"\"\"\"\r\n@project: plane_war\r\n@author: Leon\r\n@file: plane_sprite.py\r\n@ide: PyCharm\r\n\"\"\"\r\n\r\n\r\nimport pygame\r\nimport random\r\n\r\nSCREEN_RECT = pygame.Rect(0, 0, 480, 650)\r\nFRAME_PER_SEC = 60\r\nCREATE_ENEMY_EVENT = pygame.USEREVENT\r\nHERO_FIRE_EVENT = pygame.USEREVENT + 1\r\n\r\n\r\nclass GameSprite(pygame.sprite.Sprite):\r\n def __init__(self, image_name, speed=1):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load(image_name)\r\n self.rect = self.image.get_rect()\r\n self.speed = speed\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n\r\n\r\nclass BackGround(GameSprite):\r\n \"\"\"BG sprite\"\"\"\r\n def __init__(self, is_alt=False):\r\n super().__init__('./images/background.png')\r\n\r\n if is_alt:\r\n self.rect.y = -self.rect.height\r\n\r\n def update(self):\r\n super().update()\r\n\r\n if self.rect.y >= SCREEN_RECT.height:\r\n self.rect.y = -SCREEN_RECT.height\r\n\r\n\r\nclass Enemy(GameSprite):\r\n \"\"\"enemy sprite\"\"\"\r\n def __init__(self):\r\n super().__init__('./images/enemy1.png')\r\n\r\n # set random speed\r\n self.speed = random.randint(1, 3)\r\n\r\n # set x and y\r\n self.rect.bottom = 0\r\n max_x = SCREEN_RECT.width - self.rect.width\r\n self.rect.x = random.randint(0, max_x)\r\n\r\n def update(self):\r\n super().update()\r\n\r\n if self.rect.y >= SCREEN_RECT.height:\r\n self.kill()\r\n\r\n def __del__(self):\r\n self.image = pygame.image.load('./images/enemy1_down1.png')\r\n self.image = pygame.image.load('./images/enemy1_down2.png')\r\n self.image = pygame.image.load('./images/enemy1_down3.png')\r\n self.image = pygame.image.load('./images/enemy1_down4.png')\r\n pygame.display.update()\r\n\r\n\r\nclass Hero(GameSprite):\r\n \"\"\"hero sprite\"\"\"\r\n __flag = True\r\n\r\n def __init__(self):\r\n super().__init__('./images/me1.png', speed=0)\r\n\r\n self.rect.centerx = SCREEN_RECT.centerx\r\n self.rect.bottom = SCREEN_RECT.bottom - 120\r\n\r\n self.bullets = pygame.sprite.Group()\r\n\r\n def update(self):\r\n\r\n if self.__flag:\r\n self.image = pygame.image.load('./images/me2.png')\r\n self.__flag = not self.__flag\r\n else:\r\n self.image = pygame.image.load('./images/me1.png')\r\n self.__flag = not self.__flag\r\n\r\n self.rect.x += self.speed\r\n\r\n if self.rect.x < 0:\r\n self.rect.x = 0\r\n elif self.rect.right > SCREEN_RECT.right:\r\n self.rect.right = SCREEN_RECT.right\r\n\r\n def fire(self):\r\n bullet = Bullet()\r\n\r\n bullet.rect.bottom = self.rect.y - 20\r\n bullet.rect.centerx = self.rect.centerx\r\n\r\n self.bullets.add(bullet)\r\n\r\n def __del__(self):\r\n self.image = pygame.image.load('./images/me_destroy_1.png')\r\n self.image = pygame.image.load('./images/me_destroy_2.png')\r\n self.image = pygame.image.load('./images/me_destroy_3.png')\r\n self.image = pygame.image.load('./images/me_destroy_4.png')\r\n pygame.display.update()\r\n\r\n\r\nclass Bullet(GameSprite):\r\n \"\"\"bullet sprite\"\"\"\r\n def __init__(self):\r\n super().__init__('./images/bullet1.png', speed=-2)\r\n\r\n def update(self):\r\n super().update()\r\n\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n def __del__(self):\r\n # print('bullet destroyed...')\r\n pass\r\n","repo_name":"leon785/plane_war","sub_path":"plane_sprite.py","file_name":"plane_sprite.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7887819154","text":"from sys import stdin, stdout\n\n\n# #B.\n# #..\n# GG.\ndef solve_the_maze(maze, n, m):\n\n dir = [[1,0], [-1,0], [0, 1], [0, -1]]\n\n # no B, G close to each other\n for i in range(n):\n for j in range(m):\n if maze[i][j] == 'B':\n for d in dir:\n di = i + d[0]\n dj = j + d[1]\n if 0 <= di < n and 0 <= dj < m:\n if maze[di][dj] == 'G':\n return \"NO\"\n elif maze[di][dj] == '.':\n maze[di][dj] = '#'\n\n # union find G\n ufa = [0 for i in range(m*n)]\n for i in range(len(ufa)):\n ufa[i] = i\n\n for i in range(n-1, -1, -1):\n for j in range(m-1, -1, -1):\n if maze[i][j] == '#':\n continue\n\n # right\n if j+1 < m and maze[i][j+1] != '#':\n union(ufa, m*i + j, m*i + j + 1)\n\n # left\n if i+1 < n and maze[i+1][j] != '#':\n union(ufa, m*i + j, m * (i+1) + j)\n\n #print(ufa)\n\n # check if all G belongs to same group\n root = ufind(ufa, n*m-1)\n for i in range(n):\n for j in range(m):\n if maze[i][j] == 'G':\n grp = ufind(ufa, i*m+j)\n if grp != root:\n return \"NO\"\n\n return \"YES\"\n\n\n# assign a root to b\ndef union(ufa, a, b):\n ar = ufind(ufa, a)\n br = ufind(ufa, b)\n\n ufa[br] = ar\n\n\ndef ufind(ufa, a):\n if ufa[a] == a:\n return a\n\n ufa[a] = ufind(ufa, ufa[a])\n return ufa[a]\n\n\nif __name__ == '__main__':\n t = int(stdin.readline())\n\n for i in range(t):\n (n, m) = list(map(int, stdin.readline().split()))\n maze = []\n for i in range(n):\n maze.append(list(stdin.readline().strip()))\n\n if i == 62:\n print(maze)\n\n stdout.write(solve_the_maze(maze, n, m) + '\\n')\n","repo_name":"tycyd/codeforces","sub_path":"union find/1365D Solve The Maze.py","file_name":"1365D Solve The Maze.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33278925695","text":"from pwn import *\n\ncontext.binary = elf = ELF(\"shellcoded\")\n\nr = remote(\"challenge.ctf.games\", 32175)\n\n# shellcode from pwn library\nshellcode = list(asm(shellcraft.sh()))\n\n# manually find shellcode online\n#shellcode = list(b'\\x31\\xc0\\x48\\xbb\\xd1\\x9d\\x96\\x91\\xd0\\x8c\\x97\\xff\\x48\\xf7\\xdb\\x53\\x54\\x5f\\x99\\x52\\x57\\x54\\x5e\\xb0\\x3b\\x0f\\x05')\n\nmodified_sc = []\n\n# reverse the action done by the binary\nfor i in range(len(shellcode)):\n if i & 1 != 0:\n v3 = 1\n else:\n v3 = -1\n \n # 1. '& 0xFF': brings negative int back to unsigned byte convertible range\n # 2. byte-order: little since arch is amd-64-little\n # 3. byte code shld be unsigned, since adding or subtracting from the original compiled shell code\n # will result in unsigned overflow if not within range 0 to 0xff, which brings it back to the original bytecode in the shellcode\n modified_sc.append(((shellcode[i] + (v3 * i))&0xFF).to_bytes(1, byteorder = 'little', signed = False))\n \nstr_sc = b''.join(modified_sc)\n\n# payload\nprint(str_sc)\n\nr.sendline(str_sc)\n\nr.interactive()\n \n# flag{f27646ae277113d24c73dbc66a816721}\n","repo_name":"yl-ang/CTF","sub_path":"HacktivityConCTF2021/PWN/Shellcoded/shellcoded_solve.py","file_name":"shellcoded_solve.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9529644510","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\ndef read_input() -> str:\n with open('input.txt') as file:\n return file.read()\n\ndef snafu_to_decimal(data: str):\n mapping = {\n '=': -2,\n '-': -1,\n '0': 0,\n '1': 1,\n '2': 2,\n }\n\n result = 0\n for i, x in enumerate(reversed(data)):\n result += mapping[x] * pow(5, i)\n return result\n\ndef decimal_to_snafu(value: int) -> str:\n mapping = {\n -2: '=',\n -1: '-',\n 0: '0',\n 1: '1',\n 2: '2',\n }\n\n result = []\n\n i = 0\n while value > 0:\n if len(result) <= i:\n result.append(0)\n\n v = result[i] + value % 5\n value //= 5\n\n if 0 <= v < 3:\n result[i] = v\n else:\n result[i] = v - 5\n result.append(1)\n i += 1\n\n return ''.join(reversed([mapping[x] for x in result]))\n\ndef main(report_result):\n data = [x for x in read_input().split('\\n') if x != '']\n\n needed_fuel = sum([snafu_to_decimal(d) for d in data])\n report_result('Needed fuel in snafu:', decimal_to_snafu(needed_fuel))\n\nif __name__ == '__main__':\n main(print)\n","repo_name":"nilshohmann/AdventOfCode2022","sub_path":"25/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23238678149","text":"import unittest\nclass Solution(object):\n def distributeCandies(self, candies):\n sisterSet = set(candies)\n if len(sisterSet) >= len(candies) / 2:\n return len(candies) / 2\n else:\n return len(sisterSet)\n\nclass Test(unittest.TestCase):\n def test(self):\n self._test([1,1,1,1], 1)\n self._test([1,2,3,4,5,6],3)\n\n def _test(self, candie, expected):\n actual1 = Solution().distributeCandies(candie)\n self.assertEqual(actual1, expected)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"WaltXin/py_leet","sub_path":"p575.py","file_name":"p575.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41109666340","text":"# -*- coding: UTF-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom Config import config\n\ndef textbirnn(input_x, dropout_keep_prob, dataset, reuse=False):\n \"\"\"\n A Bi-directional RNN for text classification.\n Uses an embedding layer, followed by a bi-directional LSTM layer, a dropout layer and a fully-connected layer.\n \"\"\"\n\n num_classes = config.num_classes[dataset]\n vocab_size = config.num_words[dataset]\n embedding_size = 300\n\n # Embedding layer\n with tf.variable_scope(\"embedding\", reuse=reuse):\n embeddings = tf.get_variable(\"W\", \n initializer=tf.random_uniform([vocab_size+1, embedding_size], -1.0, 1.0),\n trainable=True)\n embedded_chars = tf.nn.embedding_lookup(embeddings, input_x, name=\"embedded_chars\") # [None, sequence_length, embedding_size]\n\n # Create a bi-directional LSTM layer for each rnn layer\n with tf.variable_scope('bilstm', reuse=reuse):\n cell_fun = tf.nn.rnn_cell.BasicLSTMCell\n \n def get_bi_cell():\n fw_cell = cell_fun(128, state_is_tuple=True) #forward direction cell\n bw_cell = cell_fun(128, state_is_tuple=True) #backward direction cell\n return fw_cell, bw_cell\n\n # Bi-lstm layer\n fw_cell, bw_cell = get_bi_cell()\n outputs, last_state = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, embedded_chars, dtype=tf.float32)\n outputs = tf.concat(outputs, axis=2)\n output = tf.reduce_mean(outputs, axis=1)\n\n # Add dropout\n with tf.variable_scope(\"dropout\", reuse=reuse):\n rnn_drop = tf.nn.dropout(output, dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.variable_scope(\"output\", reuse=reuse):\n W = tf.get_variable(\n \"W\",\n shape=[128*2, num_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", initializer=tf.constant(0.1, shape=[num_classes]))\n\n scores = tf.nn.xw_plus_b(rnn_drop, W, b, name=\"scores\")\n predictions = tf.argmax(scores, 1, name=\"predictions\", output_type=tf.int32)\n\n return embeddings, embedded_chars, predictions, scores","repo_name":"JHL-HUST/FGPM","sub_path":"text_birnn.py","file_name":"text_birnn.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"81"} +{"seq_id":"9247880461","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy, sys, datetime\nimport smooth\n\n# dem lists tho\ndates = []\nredSen = []\ntwitSen = []\npriceUSD = []\npriceBTC = []\n\ninputCurrency = sys.argv[1] # \"ethereum\" or \"bitcoin\"\n\n# read data\ninputDataFileName = \"../logs/\" + inputCurrency + \"_LOG.csv\"\ninputDataFile = open(inputDataFileName,'r')\n\n# datetime from string\ndef gimmeDatetime(dateString):\n datetime_object = datetime.datetime.strptime(dateString, '%Y-%m-%d %H:%M:%S')\n return datetime_object\n\n# extract data from CSV\nfor line in inputDataFile:\n data = line.split(\";\") #date,REDDIT,sens,TWITTER,sens,PRICE,prices\n dates.append(gimmeDatetime(data[0]))\n if data[2] != '':\n redSen.append([float(i) for i in data[2].split(\",\")])\n else:\n redSen.append([])\n if data[4] != '':\n twitSen.append([float(i) for i in data[4].split(\",\")])\n else:\n twitSen.append([])\n if len(data[6].split(\",\")) < 2:\n priceUSD.append(priceUSD[-1])\n priceBTC.append(priceBTC[-1])\n else:\n priceUSD.append(float(data[6].split(\",\")[0].strip()))\n priceBTC.append(float(data[6].split(\",\")[1].strip()))\n\n# debug output\n\"\"\"\nprint(dates)\nprint(redSen)\nprint(twitSen)\nprint(priceUSD)\nprint(priceBTC)\n\"\"\"\n# function to remove mismatched points\ndef matchUp(list1,list2):\n out = [[],[]]\n for i in range(0,len(list1)):\n entry1 = list1[i]\n entry2 = list2[i]\n if numpy.isnan(entry2):\n continue\n else:\n out[0].append(entry1)\n out[1].append(entry2)\n return out\n\n# get mean of a bunch of sentiments\ndef processSentiment(sentiments):\n out = []\n curr = 0\n for s in sentiments:\n if len(s) > 0:\n curr = numpy.mean(s)\n out.append(curr)\n return out\n\n# average the last N block sentiments\ndef movingAverageSentiment(secondBlocks,period):\n lastNblocks = secondBlocks[0:period] # start with the first N blocks\n averages = [] # store the averages\n for i in range(0,period): # N seconds at the beginning have no average\n averages.append(numpy.nan) # pad averages with nan in the beginning\n for secondBlock in secondBlocks[period:]: # loop over every 1-second block in the period of N seconds\n sentimentsToAverage = [] # to hold all the sentiments from the period\n for block in lastNblocks: # loop over every block in the period\n for sentiment in block: # loop over each block's sentiments\n sentimentsToAverage.append(sentiment) # store sentiment\n avgSentiment = numpy.mean(sentimentsToAverage) # average all the sentiments from the period\n averages.append(avgSentiment) # store average for this block\n lastNblocks = lastNblocks[1:] # remove first block from moving period\n lastNblocks.append(secondBlock) # add the last block to the moving period\n return averages # return a list of averages for every single 1-second block\n\n# averages before and after\ndef movingAverageSentimentMIRRORORIG(sentiments,period):\n averages = []\n for i in range(0,period/2):\n averages.append(numpy.nan)\n for i in range(0,len(sentiments)-period):\n sens_tot = 0\n sens_num = 0\n for j in range(0, period):\n for s in sentiments[i+j]:\n sens_tot = sens_tot + s\n sens_num = sens_num + 1\n if sens_num > 0:\n averages.append(sens_tot/sens_num)\n else:\n averages.append(0)\n for i in range(len(averages), len(sentiments)):\n averages.append(numpy.nan)\n return averages\n\n# get d/dt\ndef derivative(ys, window):\n res = []\n for i in range(0, window):\n res.append(numpy.nan)\n for i in range(window, len(ys)-window):\n res.append(float(ys[i+window]-ys[i-window])/(2*window))\n for i in range(0, window):\n res.append(numpy.nan)\n return res\n\n# get float from time\ndef time_to_float(t):\n return float((t-datetime.datetime(2018,1,1)).total_seconds())\n\n############################\n# START PLOTTING ###########\n############################\nplt.rc('xtick',labelsize=20)\nplt.rc('ytick',labelsize=20)\nfig, ax = plt.subplots()\nax.set_xlabel(\"Date\", fontsize=20)\n\ndef MAKE_PLOT_1(): # shows reddit & twitter, smoothed, on top of their 30min averaged sentiments, and priceUSD\n date_floats = map(time_to_float, dates)\n if inputCurrency == \"bitcoin\":\n ax.set_ylabel(\"BTC value in USD\", fontsize=20)\n priceUSD_smooth = smooth.savitzky_golay(numpy.array(priceUSD), 1201, 3) # SG filter on USD price\n #d_priceUSD_smooth = derivative(priceUSD_smooth, 10)\n ax.plot(dates,priceUSD,color=\"#333333\",linewidth=3,alpha=0.2)\n ax.plot(dates,priceUSD_smooth,color=\"#333333\",linewidth=3)\n else:\n ax.set_ylabel(\"ETH value in BTC\", fontsize=20)\n priceBTC_smooth = smooth.savitzky_golay(numpy.array(priceBTC), 1201, 3) # SG filter on USD price\n ax.plot(dates,priceBTC,color=\"#333333\",linewidth=3,alpha=0.2)\n ax.plot(dates,priceBTC_smooth,color=\"#333333\",linewidth=3)\n\n # reddit\n redSen_avg = movingAverageSentiment(redSen, 1800)\n redDates_new = matchUp(dates,redSen_avg)[0]\n redSen_new = matchUp(dates,redSen_avg)[1]\n redSen_smooth = smooth.savitzky_golay(numpy.array(redSen_new), 1201, 3) # SG filter on reddit\n axR = ax.twinx()\n axR.plot(redDates_new,redSen_new,color=\"#FF4500\",linewidth=3,alpha=0.2)\n axR.plot(redDates_new,redSen_smooth,color=\"#FF4500\",linewidth=3)\n axR.tick_params(axis='y', colors='#FF4500')\n axR.set_ylabel(\"Sentiment\", fontsize=20)\n\n # twitter\n twitSen_avg = movingAverageSentiment(twitSen, 1800)\n twitDates_new = matchUp(dates,twitSen_avg)[0]\n twitSen_new = matchUp(dates,twitSen_avg)[1]\n twitSen_smooth = smooth.savitzky_golay(numpy.array(twitSen_new), 1201, 3) # SG filter on twitter\n axT = ax.twinx()\n axT.plot(twitDates_new,twitSen_new,color=\"#1DA1F2\",linewidth=3,alpha=0.2)\n axT.plot(twitDates_new,twitSen_smooth,color=\"#1DA1F2\",linewidth=3)\n axT.tick_params(axis='y', colors='#1DA1F2')\n\nMAKE_PLOT_1()\nplt.show()\n","repo_name":"jamcowl/SentimentBlocks","sub_path":"plotting/plotDataPretty.py","file_name":"plotDataPretty.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"27650219058","text":"import enemy\nimport projectile2\nimport weapon\nimport ai\nimport math\nimport random\nimport threading\nimport projectile2\n\nclass enemy_holder(object):\n def __init__(self,ship_art,proj_art,context_size,context):\n self.time = 0\n self.spawn_rate = 250\n self.context_size = context_size\n self.context = context\n #weap1 = weapon.weapon((0,0), proj1, mvmtPtrn1, screen, size)\n self.enemy_arr = []\n self.ship_art_arr = ship_art\n self.proj_art_arr = proj_art\n self.move_ptrn_arr = []\n self.ai_func_arr = []\n self.proj1 = projectile2.Projectile(0, 0, 15, 1.5 * math.pi, 10, proj_art[0])\n self.move_ptrn_arr.append(self.mvmtPtrn1)\n #self.add_enemy(self.art_arr[0],self.ai_func_arr[0],weapon.weapon,self.move_ptrn_arr[0])\n\n def reset(self):\n self.enemy_arr = []\n self.reset_spawn_rate()\n\n def update(self):\n [self.enemy_arr[i].update() for i in range(len(self.enemy_arr))]\n self.time = (self.time +1)%self.spawn_rate#50\n if not self.time:\n self.add_random_enemy()\n #self.add_enemy(self.art_arr[0],self.ai_func_arr[0],weapon.weapon,self.move_ptrn_arr[0])\n\n def reset_spawn_rate(self):\n self.spawn_rate = 250\n \n def update_spawn_rate(self):\n self.spawn_rate = self.spawn_rate-20\n if self.spawn_rate<2:\n self.spawn_rate=2\n\n def update_coll(self, proj, proj_size):\n\n points = 0\n # Go through all enemy ships\n for i in range(len(self.enemy_arr)):\n ship_pos = self.enemy_arr[i].get_pos()\n ship_size = self.enemy_arr[i].get_art_size()\n \n # print(\"ship pos: \" + str(ship_pos))\n # print(\"ship size: \" + str(ship_size))\n \n # check if projectiles overlap with the current ship\n for j in range(len(proj)):\n # find if the boxes are intersecting\n #x_bool = math.fabs(ship_pos[0] - (proj[j][0] + proj_size[0])) < (ship_size[0] + proj_size[0])\n #y_bool = math.fabs(ship_pos[1] - (proj[j][1] + proj_size[1])) < (ship_size[1] + proj_size[1])\n col_bool = ((ship_pos[0]-(ship_size[0]/2)) <= (proj[j][0]+10) and (proj[j][0]-10) <= (ship_pos[0]+(ship_size[0]/2)) and\n (ship_pos[1]-(ship_size[0]/2)) <= (proj[j][1]+10) and (proj[j][1]-10) <= (ship_pos[1]+(ship_size[0]/2)))\n \n # if there is a collision, kill the ship\n #if x_bool and y_bool:\n if col_bool:\n #print(\"coll\" + str(i))\n self.enemy_arr[i].set_alive(False)\n points += 1\n #print(self.enemy_arr[i].get_alive())\n \n # print(points)\n return points\n \n def update_proj_coll(self,player_pos,player_size=(32,32)):\n for i in range(len(self.enemy_arr)):\n proj = self.enemy_arr[i].weapon.get_proj_pos()\n #print(proj)\n proj_size = self.enemy_arr[i].weapon.get_proj_art_size()\n\n for j in range(len(proj)):\n #x_bool = math.fabs(player_pos[0] - (proj[j][0] + proj_size[0])) < (player_size[0] + proj_size[0])\n #y_bool = math.fabs(player_pos[1] - (proj[j][1] + proj_size[1])) < (player_size[1] + proj_size[1])\n col_bool= ((player_pos[0]-15) <= (proj[j][0]+10) and (proj[j][0]-10) <= (player_pos[0]+15) and\n (player_pos[1]-15) <= (proj[j][1]+10) and (proj[j][1]-10) <= (player_pos[1]+15))\n \n # if there is a collision, kill the ship\n #if x_bool and y_bool:\n if col_bool:\n del self.enemy_arr[i].weapon.proj[j]\n return True\n return False\n\n '''\n\nbullet.prototype.hitP = function(px,py){\nvar hit =false;\nif(alive==false){return false;}\nelse if((player_pos[0]-15) <= (proj[j][0]+10) && (proj[j][0]-10) <= (player_pos[0]+15) &&\n (player_pos[1]-15) <= (proj[j][1]+10) && (proj[j][1]-10) <= (player_pos[1]+15)&&this.alive==true){\n\t\t\thit=true;\n\t\t\tthis.alive=false;\n\t\t\treturn hit;\n\t\t\t}\n\n}\n\n \n Game.target[i].xloc<(Game.proj[j].xloc+15) && Game.target[i].xloc>(Game.proj[j].xloc-15)\n && Game.target[i].yloc<(Game.proj[j].yloc+20) && Game.target[i].yloc>(Game.proj[j].yloc-20)\n '''\n \n def draw(self):\n [self.enemy_arr[i].draw(self.context) for i in range(len(self.enemy_arr))]\n\n def add_enemy(self,art,ai_obj,weapon, weapon_ptrn):\n self.enemy_arr.append(enemy.enemy_object(art, self.context_size, self.context, ai_obj, weapon,self.proj1,weapon_ptrn))\n\n\n def add_random_enemy(self):\n (ai_func, args) = random.choice(ai.aiList)\n ai_object = ai_func(*args)\n self.add_enemy(random.choice(self.ship_art_arr), ai_object, weapon.weapon, random.choice(self.move_ptrn_arr))\n\n def kill_enemy(self,i):\n self.enemy_arr.pop(i)\n\n def draw_proj(self):\n for i in range(len(self.enemy_arr)):\n self.enemy_arr[i].draw_proj()\n\n def update_proj(self):\n \n # Finds which enemies are alive,\n # if an enemy is dead and it has\n # no projectiles, it is totally\n # dead, remove it\n removeIdx = []\n for i in range(len(self.enemy_arr)):\n self.enemy_arr[i].update_proj(True)\n if not self.enemy_arr[i].get_alive():\n # print(len(self.enemy_arr[i].get_weapon().get_proj()))\n if len(self.enemy_arr[i].get_weapon().get_proj()) == 0:\n # print(\"adding \" + str(i))\n removeIdx.append(i)\n \n #print(removeIdx)\n \n # removes enemies that are totally dead \n for j in range(len(removeIdx)-1,-1,-1):\n del self.enemy_arr[removeIdx[j]]\n \n # [self.enemy_arr[i].update_proj(True) for i in range(len(self.enemy_arr))]\n\n # threads = []\n # for i in range(len(self.enemy_arr)):\n # t=threading.Thread(target=self.enemy_arr[i].update_proj,args =(True,))\n # threads.append(t)\n # for t in threads:\n # t.start()\n\n # for x in threads:\n # x.join()\n\n\n def mvmtPtrn1(self,x):\n if x % 32 == 0: # fire proj\n return (.5 * math.pi)\n else:\n return None \n \n# straight line \n# def mvmtPtrn1(x):\n # if x % 8 == 0: # fire proj\n # return (1.5 * math.pi)\n # else:\n # return None\n \n # spray\n# def mvmtPtrn2(x):\n # if x % 8 == 0: # fire proj\n # return (1.5 * math.pi)\n # else:\n # return None\n","repo_name":"nshaheed/eecs448_semesterProj","sub_path":"enemy_holder.py","file_name":"enemy_holder.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11288030347","text":"n=int(input())\ne=0\no=0\nwhile n!=0:\n r=n%10\n if r%2==0:\n e=e+1\n else:\n o=o+1\n n=n//10\nif e>0 and o>0:\n print('Mixed')\nelif e>0 :\n print('Even')\nelse:\n print('Odd')","repo_name":"22P31A0527/codemind-python","sub_path":"Even_Odd_Mixed.py","file_name":"Even_Odd_Mixed.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70794962825","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2010, Tatsuya Akagi\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, \r\n# with or without modification, \r\n# are permitted provided that the following conditions are met:\r\n#\r\n# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\r\n# * Redistributions in binary form must reproduce the above copyright notice, \r\n# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\r\n# * Neither the name of the RedM Studio nor the names of its contributors may be used to endorse \r\n# or promote products derived from this software without specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, \r\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. \r\n# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, \r\n# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; \r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \r\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \r\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n#\r\nfrom .. color import *\r\nfrom .. vector import *\r\nfrom .. light import *\r\n\r\nclass shader(object):\r\n\tdiffuseColor = color(0.5, 0.5, 0.5)\r\n\r\nclass lambert(shader):\r\n\tdef __init__(self, **args):\r\n\t\tfor key, val in args.items():\r\n\t\t\tself.__dict__[key] = val\r\n\t\tpass\r\n\tdef getDiffuse(self, lights, normal, hitPos):\r\n\t\tdiffuse = 0\r\n\t\tfor lit in lights:\r\n\t\t\tdifDir = vector.dot(vector.reflection(lit.getDir(hitPos), normal), normal)\r\n\t\t\tif difDir > 0:\r\n\t\t\t\tdiffuse += difDir*lit.getIntencity(hitPos)\r\n\t\treturn self.diffuseColor * diffuse\r\n\r\nclass blinn(shader):\r\n\tpower = 2\r\n\tdef __init__(self, **args):\r\n\t\tfor key, val in args.items():\r\n\t\t\tself.__dict__[key] = val\r\n\t\t\r\n\tdef getDiffuse(self, lights, normal, hitPos):\r\n\t\tdiffuse = 0\r\n\t\tfor lit in lights:\r\n\t\t\tdifDir = vector.dot(vector.reflection(lit.getDir(hitPos), normal), normal)\r\n\t\t\tif difDir > 0:\r\n\t\t\t\tdifDir = math.pow(difDir, self.power)\r\n\t\t\t\tdiffuse += difDir*lit.getIntencity(hitPos)\r\n\t\treturn self.diffuseColor * diffuse\r\n","repo_name":"akiwoRM/pyrayts","sub_path":"pyrayts/shading/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71179944265","text":"import argparse\nfrom ..services import events\n\ndef main():\n \n parser = argparse.ArgumentParser(prog='smartcal',\n description='A smarter way to timeblock your Google calendar.')\n \n parser.add_argument('-g', '--get', help='returns events to the console.', dest='get')\n\n args = parser.parse_args()\n\n if 'get' in args:\n print('getting events...')\n events.get_events()","repo_name":"christiancthomas/google-smart-calendar","sub_path":"src/smartcal/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9824134953","text":"# This will rename all files in a directory while also replacing the names (text) of those files in the given text document\n# This is useful if you want to keep COCO annotations attached to files, but want to rename them.\n\nimport os\n\ntargetDir = 'D:/path/to/imageDir'\nannotationsTxtFile = targetDir + \"/_annotations.coco.json\"\n\nnewImageName = \"NewFileName_\"\nnewImageStartingNum = 1\nnewImageZeroPadding = 3\n\n\nif os.path.exists(targetDir) == False:\n print (\"The path:\" + targetDir + \" was not found!\")\n exit()\n \n \n\nannotationsFile = open(annotationsTxtFile,\"r\")\nif annotationsFile == IOError:\n print (\"Could not open annotations file for r/w\" + annotationsTxtFile)\n exit()\n \n#contents of the annotations are stored\ntextContents = annotationsFile.read()\n\n \nfor filename in os.listdir(targetDir):\n\n f = os.path.join(targetDir, filename)\n \n if os.path.isfile(f):\n \n if os.path.samefile(f,annotationsTxtFile): #don't rename the annotations file itself :P\n continue \n \n fileEnding = os.path.splitext(filename)[1] #blah.jpg is now .jpg\n newFileName = newImageName + str(newImageStartingNum).zfill(newImageZeroPadding) + fileEnding\n newFilePath = targetDir + \"/\" + newFileName\n \n try:\n os.rename(f, newFilePath)\n except IOError:\n print (\"RENAME FAILED:\" + f)\n \n else:\n print (\"Renamed -- \" + filename + \" -----> \" + newFileName)\n \n textContents = textContents.replace(filename, newFileName) #replace the name inside the annotations file\n \n newImageStartingNum += 1\n \n#save changes to annotations \nwith open(annotationsTxtFile, 'w') as file:\n file.write(textContents)\n file.close()\n\nprint (\"\\n\\nFile names in \" + annotationsTxtFile + \" have been replaced accordingly.\")\n\n","repo_name":"Dezt/yoloSAMtools","sub_path":"code/renameFilesAndAnnotationsCOCO.py","file_name":"renameFilesAndAnnotationsCOCO.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30314922209","text":"import RPi.GPIO as GPIO\r\nimport time\r\nimport smtplib\r\n\r\nimport datetime ##used to return the time of the break in\r\n\r\n\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setup(23,GPIO.IN)\r\nGPIO.setup(24,GPIO.OUT)\r\n\r\n\r\nGMAIL_USER= 'your gmail'\r\nGMAIL_PASS= 'your password'\r\nSMTP_SERVER = 'smtp.gmail.com'\r\nSMTP_PORT = 587\r\n\r\n\r\n##make a GUI for this ish\r\n\r\nnow=datetime.datetime.now()\r\ntext='Intruder alert. Motion detected at' + str(now)\r\n\r\n\r\n\r\nwhile True:\r\n input_state = GPIO.input(18)\r\n if input_state == True:\r\n ## String to acknowledge the motion\r\n print('Motion Detected')\r\n\r\n ## Set off the alarm\r\n ## buzz(5000,20)\r\n \r\n ## send the email to the user\r\n send_email('pgrzyby@gmail.com,', 'INTRUDER ALERT!',text)\r\n time.sleep(120)\r\n\r\n ## insert what you actually want to be done when the motion is detected\r\n else:\r\n time.sleep(1) ##how often it checks the motion\r\n\r\n##while True:\r\n## if GPIO.input(23): ##if there is a movement, PIR sensor gives input to GPIO 23\r\n## print(\"Motion detected\")\r\n## GPIO.output(24,True) ##output given to buzzer\r\n## time.sleep(1) #buzzer turns on for 1 second\r\n## GPIO.output(24,False)\r\n## time.sleep(5)\r\n## time.sleep(0.1) \r\n##GPIO.cleanup()\r\n\r\n\r\ndef send_email(recipient, subject, text):\r\n smtpserver= smtplib.SMTP(SMTP_SERVER, SMTP_PORT)\r\n smtpserver.ehlo()\r\n smtpserver.starttls()\r\n smtpserver.ehlo\r\n smtpserver.login(GMAIL_USER, GMAIL_PASS)\r\n header= 'To:' + recipient + '\\n' + 'From: ' + GMAIL_USER\r\n header= header + '\\n' + 'Subject:' + subject + '\\n'\r\n msg = header + '\\n' + text + '\\n\\n'\r\n smtpserver.sendmail(GMAIL_USER, recipient, msg)\r\n smtpserver.close()\r\n\r\n\r\ndef buzz(pitch,duration):\r\n period= 1.0/pitch\r\n delay= period/2\r\n cycles= int(duration *pitch)\r\n for i in range(cycles):\r\n GPIO.output(24, True)\r\n time.sleep(delay)\r\n GPIO.output(24, False)\r\n time.sleep(delay)\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n","repo_name":"pgrzyby/RPifinal","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19726748739","text":"from audioop import add\nfrom turtle import up\nfrom urllib import response\nfrom django.shortcuts import render\nfrom .models import LibrearyCard, BookAvaliable,StudentForm\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import LibrearyCardSerializers,BookAvaliableSerializers,StudentFormSerializers\n\n# Create your views here.\n@api_view(['POST'])\ndef libreary_card(request):\n name = request.data.get('name')\n address = request.data.get('address')\n standard = request.data.get('standard')\n division = request.data.get('division')\n sr_no = request.data.get('sr_no')\n phone_no = request.data.get('phone_no')\n\n if name and address:\n res = LibrearyCard.objects.create(name=name,address=address,standard=standard,\n division=division,sr_no=sr_no,phone_no=phone_no)\n serializers = LibrearyCardSerializers(res)\n return Response(serializers.data)\n else:\n return Response('Please no card !')\n\n@api_view(['GET'])\ndef libreary_(request):\n ans = LibrearyCard.objects.all()\n data = LibrearyCardSerializers(ans, many=True).data\n return Response(data)\n\n@api_view(['POST'])\ndef book_Avaliable(request):\n first_year = request.data.get('first_year')\n second_year = request.data.get('second_year')\n third_year = request.data.get('third_year')\n MPSC = request.data.get('MPSC')\n UPS = request.data.get('UPS')\n god = request.data.get('god')\n good_thought = request.data.get('good_thought')\n spoken_english = request.data.get('spoken_english')\n\n if first_year and second_year:\n res = BookAvaliable.objects.create(first_year=first_year,second_year=second_year,\n third_year=third_year,MPSC=MPSC,UPS=UPS,god=god,\n good_thought=good_thought,spoken_english=spoken_english)\n serializers = BookAvaliableSerializers(res)\n return Response(serializers.data)\n else:\n return Response('Please no book avaliable !')\n\n@api_view(['GET'])\ndef available_book_(request):\n ans = BookAvaliable.objects.all()\n data = BookAvaliableSerializers(ans, many=True).data\n return Response(data)\n\n\n@api_view(['POST'])\ndef student_inform(request):\n name = request.data.get('name')\n address = request.data.get('address')\n phone_no = request.data.get('phone_no')\n\n if name:\n res = StudentForm.objects.create(name=name,address=address,phone_no=phone_no)\n serializers = StudentFormSerializers(res)\n return Response(serializers.data)\n else:\n return Response('no match information !')\n\n@api_view(['GET'])\ndef student_(request):\n ans = StudentForm.objects.all()\n data = StudentFormSerializers(ans, many=True).data\n return Response(data)\n\n\n@api_view(['DELETE'])\ndef s_inform(request):\n id = StudentForm.objects.filter(\n id=request.data.get('id')).delete()\n return Response('student id delete successfull')\n","repo_name":"aniket0951/New-project","sub_path":"djangoProject/LibrearyManagement/BookStole/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11803340119","text":"\"\"\"\n75. Sort Colors\n\nGiven an array nums with n objects colored red, white, or blue, sort them\nin-place so that objects of the same color are adjacent, with the colors in\nthe order red, white, and blue.\n\nWe will use the integers 0, 1, and 2 to represent the color red, white, and\nblue, respectively.\n\nYou must solve this problem without using the library's sort function.\n\nThis could be a counted sort, as total are low\n\"\"\"\n\n\nfrom typing import List\n\n\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n allColors = [0, 1, 2]\n countedColorsAppearence = [0]*3\n\n for color in nums:\n countedColorsAppearence[color] += 1\n\n index = 0\n for color in allColors:\n for _ in range(countedColorsAppearence[color]):\n nums[index] = color\n index += 1\n","repo_name":"ylsama/leetcode","sub_path":"75.py","file_name":"75.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74282053705","text":"from enum import Enum, unique\n\nimport sys, os, traceback, re, ts3lib, ts3defines, pytson, ts3client, devtools, io, json, pytsonui\nfrom PythonQt.QtGui import *\nfrom PythonQt.QtCore import Qt, QFile, QIODevice, QUrl\nfrom PythonQt.QtUiTools import QUiLoader\nfrom PythonQt.QtNetwork import QNetworkAccessManager, QNetworkRequest, QNetworkReply\nfrom PythonQt import BoolResult\nfrom rlcompleter import Completer\nfrom itertools import takewhile\nfrom tempfile import gettempdir\nfrom zipfile import ZipFile\n\n\n@unique\nclass ValueType(Enum):\n \"\"\"\n enum to define the types of value shown and received with getValues.\n \"\"\"\n boolean = 1\n integer = 2\n double = 3\n string = 4\n listitem = 5\n\n\ndef _createReturnDict(widgets):\n ret = {}\n\n for key, w in widgets.items():\n if not key in [\"dialog\", \"buttonbox\"]:\n if type(w) is QCheckBox:\n ret[key] = w.checked\n elif type(w) is QSpinBox or type(w) is QDoubleSpinBox:\n ret[key] = w.value\n elif type(w) is QLineEdit:\n ret[key] = w.text\n elif type(w) is QPlainTextEdit:\n ret[key] = w.plainText\n elif type(w) is QGroupBox:\n counter = 0\n for c in w.children():\n if type(c) is QRadioButton:\n if c.checked:\n ret[key] = counter\n\n counter += 1\n elif type(w) is QComboBox:\n ret[key] = w.currentIndex\n elif type(w) is QListWidget:\n ret[key] = [w.row(item) for item in w.selectedItems()]\n\n return ret\n\n\ndef getValues(parent, title, params, doneclb):\n \"\"\"\n Convenience function to open a dialog to get multiple input values from the user.\n @param parent: the dialog's parent, pass None to ignore\n @type parent: QWidget (or derived type)\n @param title: the dialog's title\n @type title: str\n @param params: a dict definining the user input type {'key': (ValueType, label, startingValue, minimum, maximum)}. Potential types are defined in the enum ValueType. The label will be displayed right next to the input widget. All other elements in this tuple are dependent on the ValueType. startingValue defines a predefined value of input. Minimum and maximum define input limits.\n boolean: startingValue is bool, minimum and maximum are not used; the widget used is a QCheckBox without an extra QLabel\n integer: startingValue, minimum and maximum are int; the widget used is a QSpinBox with an extra QLabel\n double: startingValue, minimum; the widget used is a QDoubleSpinBox with an extra QLabel\n string: startingValue is str, minimum is not used, if maximum == 1 the widget used is a QLineEdit, otherwise a QPlainTextEdit with a maximumBlockCount of maximum, each with an extra QLabel\n listitem: startingValue is a tuple([str], [int]) defining the listitems in the first element, the second element is a list with prechecked item indexes, minimum is an int defining how much items the user at least has to choose, maximum is an int defining if the user can choose more than one item (maximum != 1), depending on minimum and maximum the used widget is a QGroupBox and multiple QRadioButtons, a QComboBox with an extra QLabel or a QListWidget with an extra QLabel\n @type params: dict{str: tuple(ValueType, str, int/double/str/tuple(list[str], list[int]), int/double, int/double)}\n @param doneclb: a callable which gets the dialogs return code (see QDialog.DialogCode) and on success, a dict with the resulting values, referenced by the key.\n @type doneclb: callable(int, dict{str: int/str/bool/[str]})\n @return: Returns a dict containing the used input widgets plus the dialog and the QDialogButtonBox\n @rtype: dict{str: QWidget}\n\n \"\"\"\n ret = {}\n\n dlg = ret['dialog'] = QDialog(parent)\n dlg.setWindowTitle(title)\n\n dlg.connect(\"finished(int)\", lambda r: (\n doneclb(r, _createReturnDict(ret)) if r == QDialog.Accepted else doneclb(r, {}), dlg.delete()))\n\n form = QFormLayout()\n box = ret['buttonbox'] = QDialogButtonBox(QDialogButtonBox.Cancel | QDialogButtonBox.Ok, Qt.Horizontal, dlg)\n box.connect(\"accepted()\", dlg.accept)\n box.connect(\"rejected()\", dlg.reject)\n\n vlayout = QVBoxLayout(dlg)\n vlayout.addLayout(form)\n vlayout.addWidget(box)\n\n dlg.setLayout(vlayout)\n\n for key, (t, label, start, minimum, maximum) in params.items():\n if key in [\"dialog\", \"buttonbox\"]:\n dlg.delete()\n raise Exception(\"Keys dialog and buttonbox are forbidden\")\n\n if t is ValueType.boolean:\n w = ret[key] = QCheckBox(label, dlg)\n w.setChecked(start)\n\n form.addRow(w)\n elif t is ValueType.integer:\n l = QLabel(label, dlg)\n w = ret[key] = QSpinBox(dlg)\n w.setMinimum(minimum)\n w.setMaximum(maximum)\n w.setValue(start)\n\n form.addRow(l, w)\n elif t is ValueType.double:\n l = QLabel(label, dlg)\n w = ret[key] = QDoubleSpinBox(dlg)\n w.setMinimum(minimum)\n w.setMaximum(maximum)\n w.setValue(start)\n\n form.addRow(l, w)\n elif t is ValueType.string:\n l = QLabel(label, dlg)\n if maximum == 1:\n w = ret[key] = QLineEdit(start, dlg)\n else:\n w = ret[key] = QPlainTextEdit(start, dlg)\n w.setMaximumBlockCount(maximum)\n\n form.addRow(l, w)\n elif t is ValueType.listitem:\n if minimum == maximum == 1:\n grp = ret[key] = QGroupBox(label, dlg)\n layout = QVBoxLayout(grp)\n for i, s in enumerate(start[0]):\n b = QRadioButton(s, grp)\n b.setChecked(i in start[1])\n\n layout.addWidget(b)\n\n form.addRow(grp)\n elif maximum == 1:\n l = QLabel(label, dlg)\n w = QComboBox(dlg)\n w.addItems(start[0])\n if len(start[1]) > 0:\n w.setCurrentIndex(start[1][0])\n\n form.addRow(l, w)\n else:\n l = QLabel(label, dlg)\n w = QListWidget(dlg)\n for i, s in enumerate(start[0]):\n item = QListWidgetItem(s, w)\n\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n item.setCheckState(Qt.Checked if i in start[1] else Qt.Unchecked)\n\n form.addRow(l, w)\n else:\n dlg.delete()\n raise Exception(\"Unrecognized ValueType\")\n\n dlg.show()\n\n return ret\n","repo_name":"Bluscream/pyTSon_plugins","sub_path":"include/getvalues.py","file_name":"getvalues.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"81"} +{"seq_id":"74470829705","text":"from .web3_top_class import Web_Class_IPC\r\nimport redis\r\nimport datetime\r\n\r\nclass Save_Block_Chain_Data(object):\r\n\r\n def __init__(self):\r\n redis_handle = redis.StrictRedis( db=1 )\r\n signing_key = '/mnt/ssd/ethereum/dev_data/keystore/UTC--2019-12-08T20-29-05.205871190Z--75dca28623f88b105b8d0c718b4bfde0f1568688'\r\n ipc_socket = \"/home/pi/geth.ipc\"\r\n self.w3 = Web_Class_IPC(ipc_socket,redis_handle,signing_key)\r\n print(self.w3.get_block_number())\r\n \r\n \r\n def append_data(self,contract_name,method,*data) :\r\n contract_object = self.w3.get_contract(contract_name)\r\n receipt = self.w3.transact_contract_data(contract_object,method, *data)\r\n return receipt\r\n \r\nif __name__ == \"__main__\":\r\n save_block_chain_data = Save_Block_Chain_Data()\r\n receipt = save_block_chain_data.append_data(\"EventHandler\",\"transmit_event\",[\"event_name\",\"event_sub_id\",\"data\"])\r\n print(receipt)","repo_name":"NanoDataCenter/nano_data_center","sub_path":"code/ethereum_block_chain/block_chain_insert_py3.py","file_name":"block_chain_insert_py3.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6865221038","text":"import unittest\nfrom HTMLTestRunner import HTMLTestRunner\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nimport BoneTrading\n\n\n\n#Suite=unittest.TestSuite()\n#Suite.addTest(Login.MyPhone('test_1login'))\n#Suite.addTest(BoneTrading.Bone_Collect('test_2getBone'))\n#Suite.addTest(BoneTrading.Bone_Collect('test_BoneSell'))\n\n\nnow=time.strftime(\"%Y-%m-%d %H_%M_%S\")\n\n\n\ndef senderEmail ():\n sender = '893202527@qq.com'\n password = 'pzdbnedfvbsrbbcj'\n receive = '1084353375@qq.com'\n smtp_server = 'smtp.qq.com'\n sendfile = open('../Text/'+now+'text.html','rb').read()#读取邮件文本\n message = MIMEText(sendfile,_subtype=\"html\",_charset='utf-8')\n\n message['Content-Type']='text/html'\n message['Content-disposition']='attachment; filename=Text.html'\n\n\n\n subject = '邮件测试'\n\n\n message['Subject'] = subject\n message['From'] =sender # 发送者\n message['To'] =receive # 接收者\n\n\n\n try:\n Smtp_eMail = smtplib.SMTP()\n Smtp_eMail.connect(smtp_server, 25)\n Smtp_eMail.set_debuglevel(1)\n Smtp_eMail.login(sender, password)#发送者的账号密码\n Smtp_eMail.sendmail(sender, receive, message.as_string())\n Smtp_eMail.quit()\n print('发送邮件')\n except smtplib.SMTPException as e:\n print('发送失败:'+e)\n\n\nif __name__=='__main__':\n while True:\n Suite = unittest.TestSuite()\n Suite.addTest(BoneTrading.Bone_Collect('test_BoneSell'))\n filename='../Text/'+now+'text.html'\n\n\n with open(filename, \"wb\") as f:\n runner = unittest.TextTestRunner()\n runner=HTMLTestRunner(\n stream=f,\n title='测试报告',\n description='用例执行情况'\n\n )\n\n runner.run(Suite)\n #senderEmail()","repo_name":"893202527/JK","sub_path":"DR_Test/Contact_Android/Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32634579234","text":"import argparse\nfrom pathlib import Path\nimport os\nfrom strong.core.signature import get_function_parameters, get_function_context\nimport inspect\nimport importlib.util\nimport sys\nfrom typing import Callable\n\nIGNORE_ARGS = [\"self\", \"cls\"]\n\nparser = argparse.ArgumentParser(\n description=\"Verifies that every function in every module is typed.\"\n)\n\nparser.add_argument(\n \"input\",\n metavar=\"INPUT\",\n type=str,\n default=\".\",\n help=\"Python file or directory containing Python files to be analysed\",\n)\n\n\ndef check_function(f: Callable) -> None:\n parameters, out_type = get_function_parameters(f)\n\n header = get_function_context(f)\n\n for parameter_name, parameter_type in parameters.items():\n if (\n parameter_type.annotation == inspect.Parameter.empty\n and parameter_name not in IGNORE_ARGS\n ):\n print(\"%s: parameter `%s` is missing type-hint\" % (header, parameter_name))\n if out_type == inspect.Parameter.empty:\n print(\"%s: return value is missing type-hint\" % header)\n\n\ndef check_module(filename: str) -> None:\n module_name = inspect.getmodulename(filename)\n spec = importlib.util.spec_from_file_location(module_name, filename)\n module = importlib.util.module_from_spec(spec)\n\n try:\n spec.loader.exec_module(module)\n except Exception:\n pass # Some files like setup.py cannot be loaded...\n\n members = inspect.getmembers(module)\n module_path = inspect.getfile(module)\n\n def _check_members(obj, members, depth):\n\n if depth < 0:\n return\n\n for member_name, member_type in members:\n member = getattr(obj, member_name, None)\n\n if inspect.isfunction(member_type) and module_path == inspect.getfile(\n member_type\n ):\n check_function(member)\n elif inspect.isclass(member_type):\n _check_members(member, inspect.getmembers(member), depth - 1)\n\n _check_members(module, members, 1)\n\n\ndef main() -> None:\n args = parser.parse_args()\n if os.path.isfile(args.input):\n _, ext = os.path.splitext(args.input)\n if ext.lower() != \".py\":\n raise TypeError(\"Strong can only handle Python files\")\n else:\n check_module(args.input)\n elif os.path.isdir(args.input):\n if args.input != \".\":\n sys.path.insert(0, os.path.abspath(args.input))\n for path in Path(args.input).rglob(\"*.py\"):\n check_module(str(path))\n else:\n raise TypeError(\"`%s` is not a directory nor a file\" % args.input)\n","repo_name":"jeertmans/strong","sub_path":"strong/scripts/strong.py","file_name":"strong.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15603624253","text":"#import PyQt5.QtWidgets as qt\nimport PyQt6.QtWidgets as qt\n\n\nfrom gui.didispeech_gui import DidispeechGui\n\nclass DidispeechApp(qt.QApplication):\n\t\"\"\" Handle application.\n\t\tThis is the application.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Main class, application\n\n\t\t\"\"\"\n\t\tsuper().__init__([\"DidispeechApp\"])\n\t\tself._window = qt.QFrame()\n\n\tdef repaint(self, frame: qt.QLayout):\n\t\t\"\"\" Repaint the gui with the selected frame.\n\t\t\tIt sets as window layout the selected frame.\n\n\t\tArgs:\n\t\t\tframe (qt.QLayout): an arbitrary layout\n\t\t\"\"\"\n\t\tself._window = qt.QFrame()\n\t\tself._window.resize(700, 500)\n\t\tself._window.setLayout(frame)\n\t\tself._window.show()\t\n\nif __name__=='__main__':\n\t\n\t# create and show app window\n\tdidispeech_app = DidispeechApp()\n\n\tdidispeech_gui = DidispeechGui(didispeech_app)\n\tdidispeech_gui.init()\n\n\tdidispeech_app.repaint(didispeech_gui)\n\tdidispeech_app.exec()","repo_name":"mions1/didispeech","sub_path":"didispeech.py","file_name":"didispeech.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8723935886","text":"\"\"\"Tests for `helpers` package.\"\"\"\nimport pytest\nfrom unittest.mock import patch\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.datasets import make_classification\nfrom imblearn.pipeline import Pipeline\nfrom sklearn.metrics import make_scorer, accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\nfrom aidp.ml import helpers\nfrom testhelpers import get_sample_data\n\ndef test_specificty_all_correct_one():\n \"\"\"If all predictions are correct, return 1.0\"\"\"\n true = [1,0,1,0,1] \n pred = true\n\n spec = helpers.specificity(true, pred)\n\n assert 1.0 == spec\n\ndef test_specificty_all_incorrect_zero():\n \"\"\"If all predictions are incorrect, return 0.0\"\"\"\n true = [1,0,1,0,1] \n pred = [0,1,0,1,0]\n\n spec = helpers.specificity(true, pred)\n\n assert 0.0 == spec\n\ndef test_specificty_correctly_calculates():\n \"\"\"Calculates specificity correctly\"\"\"\n true = [0,0,0,0,0,0,0,0,0,0] \n pred = [0,0,0,0,1,1,1,1,1,1]\n\n spec = helpers.specificity(true, pred)\n\n assert 0.4 == spec\n\ndef test_negative_predictive_value_all_correct_one():\n \"\"\"If all predictions are correct, return 1.0\"\"\"\n true = [1,0,1,0,1] \n pred = true\n\n npv = helpers.negative_predictive_value(true, pred)\n\n assert 1.0 == npv\n\ndef test_negative_predictive_value_all_incorrect_zero():\n \"\"\"If all predictions are incorrect, return 0.0\"\"\"\n true = [1,0,1,0,1] \n pred = [0,1,0,1,0]\n\n npv = helpers.negative_predictive_value(true, pred)\n\n assert 0.0 == npv\n\ndef test_negative_predictive_value_correctly_calculates():\n \"\"\"Calculates npv correctly\"\"\"\n true = [0,0,0,0,1,1,1,1,1,1]\n pred = [0,0,0,0,0,0,0,0,0,0] \n\n npv = helpers.negative_predictive_value(true, pred)\n\n assert 0.4 == npv\n\ndef test_get_weighted_confusion_matrix_correct():\n \"\"\"Calculates weighted confusion matrix correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n tp, fp, fn, tn = helpers.get_weighted_confusion_matrix(true, pred)\n\n assert tp == pytest.approx(0.9247, 0.1)\n assert tn == pytest.approx(0.9636, 0.1)\n assert fp == pytest.approx(0.0364, 0.1)\n assert fn == pytest.approx(0.07534, 0.1)\n\ndef test_weighted_accuracy_correct():\n \"\"\"Calculates weighted accuracy correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n w_acc = helpers.weighted_accuracy(true, pred)\n\n assert w_acc == pytest.approx(0.94414, 0.1)\n\ndef test_weighted_sensitivity_correct():\n \"\"\"Calculates weighted sensitivity correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n w_acc = helpers.weighted_sensitivity(true, pred)\n\n assert w_acc == pytest.approx(0.924657, 0.1)\n\ndef test_weighted_specificity_correct():\n \"\"\"Calculates weighted specificity correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n w_acc = helpers.weighted_specificity(true, pred)\n\n assert w_acc == pytest.approx(0.924657, 0.1)\n\ndef test_weighted_ppv_correct():\n \"\"\"Calculates weighted postiive predictive value correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n w_acc = helpers.weighted_ppv(true, pred)\n\n assert w_acc == pytest.approx(0.924657, 0.1)\n\ndef test_weighted_npv_correct():\n \"\"\"Calculates weighted negative predictive value correctly\"\"\"\n true, pred = get_sample_data(135,53,2,11)\n\n w_acc = helpers.weighted_npv(true, pred)\n\n assert w_acc == pytest.approx(0.924657, 0.1)\n\ndef test_print_score_summaries_three_scores_three_summaries(capsys):\n \"\"\"If there are three score sets in input, print three summaries\"\"\"\n scores_dict = {'average': np.array([1,0,1,0]), 'ppv': np.array([1,1,1,1]), 'npv':np.array([0,0,0,0])}\n \n helpers.print_score_summaries(scores_dict)\n\n captured = capsys.readouterr()\n assert captured.out.count(\"\\n\") == 3\n\ndef test_print_score_summaries_ignores_nan(capsys):\n \"\"\"If there are nan values in score array, ignore in summary calculation\"\"\"\n scores_dict = {'average': np.array([1,1,1,1, np.nan, np.nan])}\n \n helpers.print_score_summaries(scores_dict)\n\n captured = capsys.readouterr()\n assert \"1.0\\t0.0\" in captured.out \n\ndef test_get_metrics_correctly_calculates_scores():\n \"\"\" If a scoring list is not provided use the default value \"\"\"\n X = [[1,2], [2,2], [3,2]]\n y = [0,1,1]\n model = DummyClassifier(strategy=\"constant\", constant=1).fit(X,y)\n scoring_list = {'average':make_scorer(accuracy_score), 'average2':make_scorer(accuracy_score)}\n\n metrics = helpers.get_metrics(model, X, y, scoring_list)\n\n assert 'average' in metrics\n assert 'average2' in metrics\n\ndef test_plot_roc_verbose_true_prints_extra_data(capsys):\n \"\"\" If verbose flag is true, print extra optional information \"\"\"\n X = [[1,2], [2,2], [3,2]]\n y = [1,1,0]\n model = DummyClassifier(strategy=\"constant\", constant=1).fit(X,y)\n\n helpers.plot_roc(model, X, y, verbose=True)\n\n captured = capsys.readouterr()\n assert \"CLASSIFICATION\" in captured.out \n assert \"PROBABILITIES\" in captured.out \n assert \"RAW DATA\" in captured.out\n\ndef test_plot_coefficients_no_errors():\n \"\"\" Shows plot without error \"\"\"\n X = [[1,2], [2,2], [3,2]]\n y = [1,1,0]\n model = LogisticRegression().fit(X,y)\n\n helpers.plot_coefficients(model, ['first','second'], 1)\n\ndef test_print_feature_importance_different_length_inputs_assertion_error():\n names = [\"one\", \"two\", \"three\"]\n coefs = [1.0, 2.0]\n\n with pytest.raises(AssertionError):\n helpers.print_feature_importance(names, coefs)\n\ndef test_print_feature_importance_same_length_inputs_no_error():\n names = [\"one\", \"two\", \"three\"]\n coefs = [1.0, 2.0, 3.0]\n\n helpers.print_feature_importance(names, coefs)\n\ndef test_plot_confusion_matrix_no_errors():\n \"\"\" Shows plot without error \"\"\"\n confusion_matrix = np.array([[10,1],[3,17]])\n\n helpers.plot_confusion_matrix(confusion_matrix, print_matrix=True)\n\ndef test_grid_search_optimization_no_errors():\n \"\"\" Runs optimization without error \"\"\"\n param_grid = {}\n clf = Pipeline([('classifier', LogisticRegression())])\n X, y = make_classification()\n\n helpers.grid_search_optimization(clf, param_grid, X, y, X, y, cv=2, n_jobs=1, verbose=True)\n","repo_name":"jtbricker/aidp","sub_path":"tests/helpers_test.py","file_name":"helpers_test.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"7362443497","text":"import argparse\n\nSEPARATOR = [' ', '\\n', '\\t', '\\b']\nBRACKET = ['(', ')', '[', ']', '{', '}', ';', ',']\nkeyword_lists = ['boolean', 'break', 'continue', 'else', 'for', 'float', 'if', 'int', 'return', 'void', 'while']\nboolean_const = ['false', 'true']\n\n\nclass CharacterType:\n\n def __init__(self, lines):\n \"\"\"\n Init the mapping with a list of lines\n :param lines: A list of lines which use to define the character set for each token type\n \"\"\"\n\n self.character2type = {}\n for i, line in enumerate(lines):\n for c in line.strip():\n self.character2type[c] = i\n\n self.n = len(lines)\n\n def get_edge(self, c):\n \"\"\"\n\n :param c: input character\n :return:\n A number which indicates the the token type, is used for following the automata\n \"\"\"\n if c in SEPARATOR:\n return self.n-1\n\n if c in BRACKET:\n return self.n-1\n\n i = self.character2type.get(c, -1)\n\n # We get an out of vocabulary character\n if i == -1:\n return self.n-1\n else:\n return i\n\n\nclass DFA:\n \"\"\"\n Represent a DFA class\n \"\"\"\n\n def __init__(self, input_file):\n \"\"\"\n\n :param input_file: Init the DFA with the input file\n \"\"\"\n\n # The graph is stored as an dictionary\n # for example: self.graph[0] represent a list of state when following each token types\n self.graph = {}\n\n with open(input_file, 'r') as f:\n line1 = f.readline()\n line1_int = [int(w) for w in line1.strip().split()]\n n = line1_int[0]\n d = line1_int[1]\n\n # Read transition graph\n for i in range(n):\n line = f.readline()\n tmp = [int(w) if w != 'x' else -1 for w in line.strip().split() ]\n self.graph[tmp[0]] = [0]*(d-1)\n for j in range(1, len(tmp)):\n self.graph[tmp[0]][j-1] = tmp[j]\n\n # Read mapping character type\n list_character_types = []\n for i in range(d):\n list_character_types.append(f.readline().strip())\n\n self.parser_character = CharacterType(list_character_types[1:])\n\n # Read ending state\n line = f.readline().strip().split()\n line1 = f.readline().strip().split()\n list_ending_asterisk = [int(w) for w in line[1:]]\n name_ending_asterisk = [w for w in line1[1:]]\n\n # Register the ending states with asterisk\n self.ending_asterisk = dict([(list_ending_asterisk[i], name_ending_asterisk[i])\n for i in range(len(list_ending_asterisk))])\n\n line = f.readline().strip().split()\n line1 = f.readline().strip().split()\n list_ending = [int(w) for w in line[1:]]\n name_ending = [w for w in line1[1:]]\n # Register the normal ending states\n self.ending = dict([(list_ending[i], name_ending[i])\n for i in range(len(list_ending))])\n\n def run_forward(self, word, input_state, ch):\n \"\"\"\n\n :param word: the reading word\n :param input_state: the state\n :param ch: the next character\n :return: A tuple of (flag, next word, next state, info)\n info is the metadata which is used to retrieve the token, token type and the error information\n \"\"\"\n c_type = self.parser_character.get_edge(ch)\n\n if c_type == -1:\n # Skip separator\n return True, word, input_state, {\"token\": \"\", \"token_type\": \"NA\", \"error\": False}\n else:\n edge = self.graph.get(input_state, [])\n if len(edge) == 0:\n # Going from a no-edge state\n if (ch not in SEPARATOR) and (ch not in BRACKET):\n return False, word, 0, {\"token\": \"\", \"token_type\": \"NA\", \"error\": True}\n else:\n return True, word, 0, {\"token\": \"\", \"token_type\": \"NA\", \"error\": False}\n\n output_st = edge[c_type]\n if output_st == -1:\n # Follow a no-edge connection\n if (ch not in SEPARATOR) and (ch not in BRACKET):\n return False, word, 0, {\"token\": \"\", \"token_type\": \"NA\", \"error\": True}\n else:\n return True, word, 0, {\"token\": \"\", \"token_type\": \"NA\", \"error\": False}\n\n elif output_st in self.ending:\n # Reach a normal ending state\n token = word + ch\n token_type = self.ending[output_st]\n return True, \"\", 0, {\"token\": token, \"token_type\": token_type, \"error\": False}\n elif output_st in self.ending_asterisk:\n # Reach an one-step-back ending state\n if ch not in SEPARATOR:\n flag, next_word, next_state, info = self.run_forward(\"\", 0, ch)\n token = word\n token_type = self.ending_asterisk[output_st]\n\n return flag, next_word.strip(), next_state,\\\n {\"token\": token, \"token_type\": token_type, \"error\": info[\"error\"]}\n else:\n token = word\n token_type = self.ending_asterisk[output_st]\n return True, \"\", 0, {\"token\": token, \"token_type\": token_type, \"error\": False}\n\n return True, word+ch, output_st, {\"token\": \"\", \"token_type\": \"NA\", \"error\": False}\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # General parameters\n parser.add_argument('--input_dfa', type=str, default='./dfa.dat',\n help='Input transition table')\n parser.add_argument('--input_file', type=str, default='./test.vc',\n help='Input source file')\n parser.add_argument('--output_file', type=str, default='./test.vctok',\n help='Output lexical analysis result of the input source file')\n parser.add_argument('--debug', default=False, action='store_true')\n args = parser.parse_args()\n\n dfa = DFA(args.input_dfa)\n\n input_file = args.input_file\n output_file = args.output_file\n # Debug flag\n DEBUG = args.debug\n\n token_set = set()\n\n line_number = 0\n with open(input_file, \"r\") as f:\n with open(output_file, \"w\") as fout:\n for line in f.readlines():\n line_number += 1\n\n word = \"\"\n input_state = 0\n\n i = 0\n for c in line:\n i += 1\n flag, next_word, next_state, info = dfa.run_forward(word, input_state, c)\n\n if info[\"error\"]:\n # Output the lexical error and skip to next line\n print(\"Compiler error at line %i, index %i\" % (line_number, i))\n word = \"\"\n input_state = 0\n break\n\n token = info['token']\n token_type = info['token_type']\n if DEBUG:\n print(c, flag, next_word, next_state, token)\n if len(token) > 0:\n print(\"-----------\")\n print(\"Parsing output = \", token, token_type)\n print(\"-----------\")\n\n if len(token) > 0:\n if token not in token_set:\n if token_type == 'identifier':\n # Check for special cases with identifier\n if token in keyword_lists:\n token_type = token + \"_keyword\"\n elif token in boolean_const:\n token_type = token + \"_boolean_const\"\n\n fout.write(\"%s %s\\n\" % (token, token_type))\n token_set.add(token)\n\n word = next_word.strip()\n input_state = next_state\n","repo_name":"dothuuyen/Compiler","sub_path":"lexical.py","file_name":"lexical.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39764467278","text":"import numpy\r\n\r\n# J(X, y, theta) computes the cost of using theta as the\r\n# parameter for linear regression to fit the data points in X and y\r\n\r\n# ====================== YOUR CODE HERE ======================\r\n# Instructions: Compute the cost of a particular choice of theta\r\n# by completing the function below\r\n\r\ndef J(X, y, theta):\r\n J = 0\r\n m = y.shape[0]\r\n \r\n for i in range(m):\r\n J += (numpy.dot(theta, X[i]) - y[i])**2\r\n return J/(2*m)\r\n\r\n \r\n\r\n","repo_name":"pcsouza/machine-learning-conversion","sub_path":"ex1/computeCostComplete.py","file_name":"computeCostComplete.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6733914066","text":"\"\"\"\nutf-8\nСоздан 31.03.2022\n\n:author: maria.kalashnikova\n\nОбучающий курс Python QA Engineer.\nДомашняя работа №5. UI тесты для opencart\n\"\"\"\nimport pytest\nfrom selenium import webdriver\n\n\ndef browser_options(browser_instance):\n options = browser_instance\n options.add_argument('no-sandbox')\n options.add_argument('--ignore-certificate-errors')\n options.AcceptInsecureCertificates = 'true'\n return options\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--browser\",\n action='store',\n default=\"chrome\",\n help=\"This is browser choice\"\n )\n\n parser.addoption(\n \"--url\",\n action='store',\n default=\"http://192.168.0.157:8081\",\n help=\"This is the website address\"\n )\n\n parser.addoption(\n \"--driver_path\",\n action='store',\n default=\"C:\\\\Users\\\\maria.kalashnikova\\\\drivers\\\\chromedriver.exe\",\n help=\"This is the executable path\"\n )\n\n\n@pytest.fixture\ndef browser_name(request):\n return request.config.getoption(\"--browser\")\n\n\n@pytest.fixture\ndef base_url(request):\n return request.config.getoption(\"--url\")\n\n\n@pytest.fixture\ndef driver_path(request):\n return request.config.getoption(\"--driver_path\")\n\n@pytest.fixture()\ndef init_browser(browser_name, driver_path):\n if browser_name == \"chrome\":\n instance = webdriver.ChromeOptions()\n options = browser_options(instance)\n driver = webdriver.Chrome(executable_path=driver_path)\n return driver\n# if browser_name == \"firefox\":\n# instance = webdriver.FirefoxOptions()\n# options = browser_options(instance)\n# driver = webdriver.Firefox(executable_path='', options=options)\n# return driver\n# if browser_name == \"opera\":\n# instance = webdriver.FirefoxOptions()\n# options = browser_options(instance)\n# driver = webdriver.Firefox(executable_path='', options=options)\n# return driver\n#\n\n@pytest.fixture()\ndef browser(init_browser, base_url):\n init_browser.get(base_url)\n yield init_browser\n init_browser.quit()\n","repo_name":"maria-kalashnikova/autotests","sub_path":"waiting_elements/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73608208584","text":"import dash_bootstrap_components as dbc\nimport dash\nimport flask\n\nexternal_scripts = [\n {\"src\": \"https://kit.fontawesome.com/44d65a8b68.js\"},\n {\"crossorigin\": \"anonymous\"},\n]\n\nserver = flask.Flask(__name__)\n\napp = dash.Dash(\n __name__,\n server=server,\n external_stylesheets=[dbc.themes.FLATLY],\n title=\"Team 18\",\n external_scripts=external_scripts,\n suppress_callback_exceptions=True,\n meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1.0\"},\n ],\n)\n\n#server = app.server\n#app.config.suppress_callback_exceptions = True","repo_name":"jcelis87/team18-front-end-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72784776266","text":"# -*- coding: utf-8 -*-\n\"\"\"Setup RobotFramework tests.\nWe have 3 kinds of expected failures:\nExpected Failure\n We know they are failing and need some work to be fixed.\nMandelbug\n Their behaviour is chaotic and they are hard to fix.\nIssue related\n They are failing under certain reproducible circunstances.\n\"\"\"\nfrom collective.cover.testing import ROBOT_TESTING\nfrom plone.testing import layered\n\nimport os\nimport robotsuite\nimport unittest\n\n\ndirname = os.path.dirname(__file__)\nfiles = os.listdir(dirname)\ntests = [f for f in files if f.startswith(\"test_\") and f.endswith(\".robot\")]\n\nnoncritical = [\"Expected Failure\", \"Mandelbug\"]\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTests(\n [\n layered(\n robotsuite.RobotTestSuite(t, noncritical=noncritical),\n layer=ROBOT_TESTING,\n )\n for t in tests\n ]\n )\n return suite\n","repo_name":"collective/collective.cover","sub_path":"src/collective/cover/tests/test_robot.py","file_name":"test_robot.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"81"} +{"seq_id":"43201948162","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n'''————————————————————test1:N=1 C=1,kernel size=3的实现————————————————————————————————'''\n'''\n# 生成 feature map和kernel\nim = torch.rand(1, 1, 16, 16)\nkernel_data = torch.rand(1, 1, 3, 3) # batch,channel,height,weight\n\n# 利用pytorch的卷积 生成输出\nconv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 3), stride=1, padding=1, padding_mode='zeros', bias=False)\nconv.weight = nn.Parameter(kernel_data)\noutput = conv(im)\n\n\"\"\" image to col \"\"\"\n\ninput_shape = im.shape\nkernel_shape = conv.weight.data.shape\n# 预分配im2col的matrix\nim_col = torch.zeros(input_shape[-1]*input_shape[-2], kernel_shape[-1]*kernel_shape[-2]) # weight*height\n\npadded_im = nn.functional.pad(im, (1, 1, 1, 1), 'constant', 0)\npadded_input_shape = padded_im.shape\n# 截取每个3×3的子区域并塞入到im_col 里面\nk = 0\nfor i in range(1, padded_input_shape[-2]-1):\n for j in range(1, padded_input_shape[-1]-1):\n im_col[k, :] = padded_im[0, 0, i-1:i+2, j-1:j+2].clone().reshape(-1)\n k += 1\n\n# mat_col × kernel 并会reshape回原尺度。注意这里是矩阵乘法\noutput_mat = torch.matmul(im_col, kernel_data.view(9, 1))\noutput_mat_reshape = output_mat.reshape(1, 1, 16, 16)\n'''\n\n'''——————————————test2 自定义的N/C,但是kernel size还是坚持为3,可修改————————————————'''\n# 自定义 feature map大小,当然,kernel大小还是3\nout_c, n, in_c, h, w = 5, 2, 3, 13, 16\nim = torch.rand(n, in_c, h, w)\nkernel_data = torch.rand(out_c, in_c, 3, 3) # 后两位可以根据实际的kernel_size修改\nkernel_size = kernel_data.shape[-1] # 得到3,即kernel_size\nprint(\"input feature map shape is {}, kernel shape is {}\".format(im.shape, kernel_data.shape))\n\n# 使用torch自带的卷积,屏蔽掉bias\nconv = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=(3, 3), stride=1, padding=1, padding_mode='zeros',\n bias=False)\nconv.weight = nn.Parameter(kernel_data)\noutput = conv(im)\n\nprint(\"shape of output by torch.conv is {}\".format(output.shape))\n\n# 对feature map进行手动pad\npadded_im = nn.functional.pad(im, (1, 1, 1, 1), 'constant', 0)\nprint(\"shape of padded feature map is {}\".format(padded_im.shape))\n\n# 预分配im2col的matrix:\n# 大小为 [n * h * w, in_c * kernel_size * kernel_size]\nim_col = torch.zeros(n * w * h, in_c * kernel_size * kernel_size)\n# 这里数据的不同channel左右连接了。行数因为手动pad,所以是n*(w*h),这里是2*16*13。列数是左右链接3个channel形成的,这里是3*3*3。\nprint(\"多batch,多channel数据的im2映射:\", im_col.shape)\n\n# 截取每个3×3的子区域并塞入到im2col矩阵里面\npadded_input_shape = padded_im.shape\nk = 0\nfor idx_im in range(n):\n for i in range(1, padded_input_shape[-2] - 1):\n for j in range(1, padded_input_shape[-1] - 1):\n im_col[k, :] = padded_im[idx_im, :, i - 1:i + 2, j - 1:j + 2].clone().reshape(-1)\n k += 1\n\n# im2col和reshape后的kernel进行相乘\n# reshape后的kernel大小为[kernel_size*kernel_size*in_c, out_c]\nprint(\"多个卷积核,每个核多channel,权重的im2映射:\", kernel_data.reshape(kernel_size * kernel_size * in_c, out_c).shape) # 权重的不同channel上下连接\noutput_mat = torch.matmul(im_col, kernel_data.reshape(kernel_size * kernel_size * in_c, out_c))\nprint(\"矩阵相乘的结果(col格式):\", output_mat.shape) # [n * w * h, out_c]\n\n# 将结果reshape,这里面维度处理需要注意\noutput_mat_reshape = output_mat.permute(1, 0) # [out_c, n * h * w]\noutput_mat_reshape = output_mat_reshape.reshape(out_c, n, h, w) # [out_c, n, h,w]\noutput_mat_reshape = output_mat_reshape.permute(1, 0, 2, 3) # [ n, out_c, h,w]\nprint(\"矩阵相乘的结果(矩阵格式):\", output_mat_reshape.shape) # 可见img2col结果和pytorch卷积算子的结果一致\n\nif __name__ == '__main__':\n print('PyCharm')\n\n","repo_name":"yaqingLi467/img2col_compute","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1527930162","text":"from flask import Flask, render_template, jsonify\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nBootstrap(app)\n\n## Connect to Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n## Cafe TABLE Configuration\nclass Cafe(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), unique=True, nullable=False)\n map_url = db.Column(db.String(500), nullable=False)\n img_url = db.Column(db.String(500), nullable=False)\n location = db.Column(db.String(250), nullable=False)\n has_sockets = db.Column(db.Boolean, nullable=False)\n has_toilet = db.Column(db.Boolean, nullable=False)\n has_wifi = db.Column(db.Boolean, nullable=False)\n can_take_calls = db.Column(db.Boolean, nullable=False)\n seats = db.Column(db.String(250), nullable=False)\n coffee_price = db.Column(db.String(250), nullable=True)\n\n\n# Get Cafe from ID\ndef get_cafe(cafe_id):\n cafe = Cafe.query.get(cafe_id)\n return cafe\n\n\ndef refresh_db():\n return db.session.query(Cafe).order_by('name').all()\n\n\ndef convert_dict(db_entry):\n obj_dict = db_entry.__dict__\n del obj_dict['_sa_instance_state']\n return obj_dict\n\n\n# all Flask routes below\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/all\")\ndef all():\n return jsonify(dict([(cafe.id, convert_dict(cafe)) for cafe in refresh_db()]))\n\n\n@app.route('/cafes')\ndef cafes():\n cafes=refresh_db()\n print(cafes)\n return render_template('cafes.html', cafes=cafes)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"arjunbrara123/cafe-wifi-website","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38672152335","text":"# coding=utf-8\n#from __future__ import print_function\n\nimport yaml\nimport optparse\nimport sys\nimport time\nfrom util import loader\nimport itertools\nimport _pickle as cPickle\nfrom collections import OrderedDict\n\nimport torch\nfrom torch.autograd import Variable\n\nfrom util.utils import *\nfrom util.loader import *\nfrom util.model import BiLSTM_CRF\nfrom util.conlleval import *\n\n\noptparser = optparse.OptionParser()\noptparser.add_option(\"--config\", default=\"util/config.yaml\", type=str, help=\"config file path\")\noptparser.add_option(\"--tag_scheme\", default=\"iobes\",help=\"Tagging scheme, IOB or IOBES\")\noptparser.add_option('--use_gpu', default='1',type='int', help='whether or not to use gpu')\noptparser.add_option('--reload', default='0',type='int', help='whether or not to reload pretrained model')\noptparser.add_option('--pretrained_model', default='',type=str, help='pretrained model path')\n\n\nopts = optparser.parse_args()[0]\nconfig = AttrDict(yaml.load(open(opts.config, 'r')))\n# to beifen\nparameters = OrderedDict()\nparameters['tag_scheme'] = opts.tag_scheme\nparameters['lower'] = config['lower']\nparameters['zeros'] = config['zeros']\nparameters['char_dim'] = config['char_dim']\nparameters['char_lstm_dim'] = config['char_lstm_dim']\nparameters['char_bidirect'] = config['char_bidirect']\nparameters['word_dim'] = config['word_dim']\nparameters['word_lstm_dim'] = config['word_lstm_dim']\nparameters['word_bidirect'] = config['word_bidirect']== 1\nparameters['pre_emb'] = config['pre_emb']\nparameters['all_emb'] = config['all_emb'] == 1\nparameters['cap_dim'] = config['cap_dim']\nparameters['crf'] = config['crf']\nparameters['dropout'] = config['dropout']\nparameters['reload'] = opts.reload\n#parameters['name'] = config['name']\nparameters['use_gpu'] = opts.use_gpu == 1 and torch.cuda.is_available()\n\nmodels_path = config['models_path']\n#name = config['name']\n#model_name = models_path + name\n\nlower = parameters['lower']\nzeros = parameters['zeros']\ntag_scheme = parameters['tag_scheme']\nuse_gpu = parameters['use_gpu']\nlearning_rate = config['learning_rate']\nprintloss_after = config['printloss_after']\n#eval_after = config['eval_after']\n\nif not os.path.exists(models_path):\n os.makedirs(models_path)\n\n\n\ndef train(model, train_data,test_data, dev_data, id_to_tag,tag_to_id ):\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n loss = 0.0\n #count = 0\n all_count=0\n model.train(True)\n for epoch in range(1, 100):\n cnt = 0\n for i, index in enumerate(np.random.permutation(len(train_data))):\n all_count += 1\n cnt+=1\n if cnt>200:\n break\n data = train_data[index]\n model.zero_grad()\n\n sentence_in = data['words']\n sentence_in = Variable(torch.LongTensor(sentence_in))\n tags = data['tags']\n chars2 = data['chars']\n\n ######### char lstm\n chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)\n d = {}\n for i, ci in enumerate(chars2):\n for j, cj in enumerate(chars2_sorted):\n if ci == cj and not j in d and not i in d.values():\n d[j] = i\n continue\n chars2_length = [len(c) for c in chars2_sorted]\n char_maxl = max(chars2_length)\n chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')\n for i, c in enumerate(chars2_sorted):\n chars2_mask[i, :chars2_length[i]] = c\n chars2_mask = Variable(torch.LongTensor(chars2_mask))\n\n targets = torch.LongTensor(tags)\n caps = Variable(torch.LongTensor(data['caps']))\n if use_gpu:\n neg_log_likelihood = model.neg_log_likelihood(sentence_in.cuda(), targets.cuda(), chars2_mask.cuda(),\n caps.cuda(), chars2_length, d)\n else:\n neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets, chars2_mask, caps, chars2_length, d)\n # loss += neg_log_likelihood.data[0] / len(data['words'])\n loss += neg_log_likelihood.item() / len(data['words'])\n neg_log_likelihood.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)\n optimizer.step()\n\n if cnt % printloss_after == 0:\n loss /= printloss_after\n print(cnt,'/',len(train_data), ' : ', loss)\n loss = 0.0\n\n model.train(False)\n new_dev_recall, new_dev_pre, new_dev_F = evaluating(model, dev_data,id_to_tag,tag_to_id, epoch)\n new_test_recall, new_test_pre, new_test_F = evaluating(model, test_data, id_to_tag, tag_to_id, epoch)\n print('new_dev_recall:%3.6f, new_dev_pre: %3.6f , new_dev_F: %3.6f ' % (new_dev_recall, new_dev_pre , new_dev_F))\n print('new_test_recall:%3.6f, new_test_pre: %3.6f , new_test_F: %3.6f ' % (new_test_recall ,new_test_pre , new_test_F ))\n torch.save(model,models_path+'epoch_%d_%3.6f_%3.6f_%3.6f_%3.6f_%3.6f_%3.6f.model'%(epoch, new_dev_recall,new_dev_pre ,new_dev_F, new_test_recall, new_test_pre, new_test_F ))\n model.train(True)\n\n adjust_learning_rate(optimizer, lr=learning_rate / (1 + 0.05 * all_count / len(train_data)))\n\ndef evaluating(model, datas,id_to_tag,tag_to_id , epoch):\n prediction = []\n save = False\n new_F = -1.0\n confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))\n for data in datas:\n ground_truth_id = data['tags']\n words = data['str_words']\n chars2 = data['chars']\n caps = data['caps']\n\n chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)\n d = {}\n for i, ci in enumerate(chars2):\n for j, cj in enumerate(chars2_sorted):\n if ci == cj and not j in d and not i in d.values():\n d[j] = i\n continue\n chars2_length = [len(c) for c in chars2_sorted]\n char_maxl = max(chars2_length)\n chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')\n for i, c in enumerate(chars2_sorted):\n chars2_mask[i, :chars2_length[i]] = c\n chars2_mask = Variable(torch.LongTensor(chars2_mask))\n dwords = Variable(torch.LongTensor(data['words']))\n dcaps = Variable(torch.LongTensor(caps))\n if use_gpu:\n val, out = model(dwords.cuda(), chars2_mask.cuda(), dcaps.cuda(), chars2_length, d)\n else:\n val, out = model(dwords, chars2_mask, dcaps, chars2_length, d)\n predicted_id = out\n for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):\n line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])\n prediction.append(line)\n confusion_matrix[true_id, pred_id] += 1\n prediction.append('')\n\n # save the result\n predf = models_path + '/epoch_%d_pred.txt'%(epoch)\n with open(predf, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(prediction))\n\n true_seqs, pred_seqs = [], []\n with open(predf,'r',encoding='utf-8') as f:\n for line in f:\n #print(line)\n cols = line.strip().split()\n # each non-empty line must contain >= 3 columns\n if not cols:\n true_seqs.append('O')\n pred_seqs.append('O')\n elif len(cols) < 3:\n raise IOError(\"conlleval: too few columns in line %s\\n\" % line)\n else:\n # extract tags from last 2 columns\n true_seqs.append(cols[-2])\n pred_seqs.append(cols[-1])\n result = evaluate(true_seqs, pred_seqs)\n print('result: ',result)\n print(type(result))\n recall = result[0]\n precision = result[1]\n f1 = result[2]\n return recall, precision, f1\n\ndef main():\n # load data\n train_sentences = loader.load_sentences(config['train'], lower, zeros)\n dev_sentences = loader.load_sentences(config['dev'], lower, zeros)\n test_sentences = loader.load_sentences(config['test'], lower, zeros)\n\n #print(train_sentences) # [['a', 'DT', 'I-NP', 'O'], ... , ['lot', 'NN', 'I-NP', 'O']]\n\n # check tags\n update_tag_scheme(train_sentences, tag_scheme)\n update_tag_scheme(dev_sentences, tag_scheme)\n update_tag_scheme(test_sentences, tag_scheme)\n\n # get dictionary\n dico_words_train = word_mapping(train_sentences, lower)[0]\n dico_words, word_to_id, id_to_word = augment_with_pretrained(\n dico_words_train.copy(),\n parameters['pre_emb'],\n list(itertools.chain.from_iterable(\n [[w[0] for w in s] for s in dev_sentences + test_sentences])\n ) if not parameters['all_emb'] else None\n )\n dico_chars, char_to_id, id_to_char = char_mapping(train_sentences)\n dico_tags, tag_to_id, id_to_tag = tag_mapping(train_sentences)\n\n # prepare data\n train_data = prepare_dataset(train_sentences, word_to_id, char_to_id, tag_to_id, lower )\n dev_data = prepare_dataset(dev_sentences, word_to_id, char_to_id, tag_to_id, lower )\n test_data = prepare_dataset( test_sentences, word_to_id, char_to_id, tag_to_id, lower)\n print(\"%i / %i / %i sentences in train / dev / test.\" % (len(train_data), len(dev_data), len(test_data)))\n\n #prepare word_embeds\n all_word_embeds = {}\n for i, line in enumerate(codecs.open(config['pre_emb'], 'r', 'utf-8')):\n s = line.strip().split()\n if len(s) == parameters['word_dim'] + 1:\n all_word_embeds[s[0]] = np.array([float(i) for i in s[1:]])\n word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), config['word_dim']))\n for w in word_to_id:\n if w in all_word_embeds:\n word_embeds[word_to_id[w]] = all_word_embeds[w]\n elif w.lower() in all_word_embeds:\n word_embeds[word_to_id[w]] = all_word_embeds[w.lower()]\n print('Loaded %i pretrained embeddings.' % len(all_word_embeds))\n\n # create or load model\n model = BiLSTM_CRF(vocab_size=len(word_to_id), tag_to_ix=tag_to_id, embedding_dim=parameters['word_dim'], hidden_dim=parameters['word_lstm_dim'], use_gpu=use_gpu, char_to_ix=char_to_id, pre_word_embeds=word_embeds, use_crf=parameters['crf'])\n if parameters['reload']:\n model.load_state_dict(torch.load(opts.pretrained_model))\n if use_gpu:\n model.cuda()\n\n #train\n train(model, train_data, test_data, dev_data, id_to_tag, tag_to_id)\n\nif __name__ == '__main__':\n main()","repo_name":"htfhxx/nlp-beginner_solution","sub_path":"Task4/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10576,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"81"} +{"seq_id":"17075471197","text":"import keras\nfrom keras.models import Sequential # 序贯模型,即神经网络是单向的连接\nfrom keras.datasets import mnist # 手写数据集,为60000张手写图片和10000张验证图片,第一次运行会自动重网上下载(翻墙可能更快)\nfrom keras.layers import Dense, Dropout, Flatten\n# 分别引用Dense全连接层,Dropout层(断开一些连接防止过拟合), Flatten用于将卷积层压扁,方便全连接层输入\nfrom keras.layers import Conv2D, MaxPool2D # Conv2D卷积层, MaxPool2D使用maxpool的池化层\n# 池化层往往跟在卷积层后面, 将之前卷基层得到的特征图做一个聚合统计,也有降维的功能,减少计算量(理论上池化层有信息缺失)\nfrom keras import backend as K # 这个是用于调节Theano和tensorflow输入\n# Theano图像输入是channels_first也就是RGB通道在前面(3,28,28), 3表示红黄蓝三色的颜色通道, 28分别代表图片长宽像素\n# tensorflow 相反(28,28,3)RGB通道在最后\n\n# 预设参数\nbatch_size = 128 # 用于随机梯度下降,每次取128个样本作梯度下降,保证速度的情况下在最优点不至于剧烈震荡\nnum_classes = 10 # 有多少个类,这里手写体识别有10个类 类标签分类是0-9\nepochs = 1 # 训练次数\n# 指定输入数据的像素,因为是灰度图片所以没有颜色通道\nimg_rows, img_cols = 28, 28 # 这里现指定图片长宽\n(x_train, y_train), (x_test, y_test) = mnist.load_data() # 加载数据集\n\n# 设定输入格式,theano属于channel_first\n# tensorflow属于channel_last\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n # 样本数 颜色通道 行数 列数\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols) # 这里是Theano的输入格式\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n # 样本数 颜色通道 行数 列数\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1) # 这里是tensorflow的输入格式\n\n# 把数据变为float32\nx_train = x_train.astype('float32') # 将数据变为float32\nx_test = x_test.astype('float32')\nx_train /= 255 # 灰度值归一化\nx_test /= 255\n# 上面的步骤实际上是归一化图像数据,我们知道图像颜色范围在0-255之间,这里每个像素除以255相当于做了一个归一化\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'x_train samples') # 输出训练样本和测试样本个数\nprint(x_test.shape[0], 'x_test samples')\n\n# 把类别0-9变为二进制方便训练,softmax的输出\ny_train = keras.utils.to_categorical(y_train, num_classes)\n# 这里需要对标签做一个onehot编码,注意只有最后会后一层输出的激活层是softmax才使用\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# 定义序贯模型\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3),\n activation='relu',\n input_shape=input_shape)) # 卷积层有32个节点,每个卷积核的大小是(3,3),使用relu作为激活函数\nmodel.add(Conv2D(64, (3, 3), activation='relu')) # 卷积层有64个节点\n\n# 池化层\nmodel.add(MaxPool2D(pool_size=(2, 2))) # 将样本打个对折,这里的(2, 2)意思是将图片长宽/2, 如果是28,28的图片则变为14*14\nmodel.add(Dropout(0.35)) # 断开一些连接,只保留0.35的节点连接,防止过拟合\nmodel.add(Flatten()) # 将卷积层数据扁���化,变成一位向量\nmodel.add(Dense(128, activation='relu')) # 一个128个节点的全连接层\nmodel.add(Dropout(0.5)) # 断掉50%的连接防止过拟合\nmodel.add(Dense(num_classes, activation='softmax'))\n# 最后的输出层,注意这里使用了softmax作为激活函数, 所以上面y_train = keras.utils.to_categorical(y_train, num_classes)\n# 需要将标签做一个onehot编码\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='Adadelta',\n metrics=['accuracy']) # 编译指定随时函数,优化器,指标(这里使用正确率)\n\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_test, y_test))\n# 类似skearn训练数据,实际上keras和skearn类似,像水管,上面的步骤都是架设水管,水(数据)没放入水管,现在开始放水(训练数据)\n# validation_data=(x_test, y_test)这里使用指定的测试集进行,如果不设置侧可以使用validation_split进行交叉验证\n\nscore = model.evaluate(x_test, y_test, verbose=0) # 在测试集上测试数据verbose=0 表示简化输出模式\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n# 下面是画出网络结构需要安装graphviz和pydot\nfrom keras.utils import plot_model\nplot_model(model, to_file='model2.png', show_layer_names=True, show_shapes=True)\n","repo_name":"Asdil/Keras_MNIST","sub_path":"MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19012728418","text":"from url_package import isShortURLPresent, getShortURL, getLongURL, showDatabase, updateLongURL\n\nif __name__ == \"__main__\":\n fullURL = \"https://www.fb.com/\"\n \n #get the shortURL from fullURL\n short = getShortURL(fullURL, 'fb')\n print(short)\n \n # get the longURL from corresponding shortURL\n long = getLongURL(short)\n print(long)\n \n \n #update the longURL\n short = updateLongURL(short, \"https://www.facebook.com/\")\n \n #get the newLong URL\n long = getLongURL(short)\n print(long)\n \n # requests for database (username, password)\n # only admin can see the database\n showDatabase('chanchal', 'chanchal@123')\n \n \n \n \n \n \n \n ","repo_name":"chanchalkmaurya/URL-Shortener","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73516171465","text":"\"\"\"\n Write a program that accepts a comma separated sequence of words as input and\n prints the words in a comma-separated sequence after sorting them alphabetically.\n Suppose the following input is supplied to the program:\n without,hello,bag,world\n Then, the output should be:\n bag,hello,without,world\n\"\"\"\ndef orderWords(words):\n words = words.split(',')\n words = [word.strip() for word in words]\n words.sort()\n print(words)\n\norderWords('without,hello,bag,world')\n","repo_name":"horselurrver/100_python_exercises","sub_path":"ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8629245107","text":"from datetime import date\nimport pandas as pd\nimport yfinance as yf\nimport matplotlib.pyplot as plt\n\n\nclass Bar:\n def __init__(self):\n self.year = 0\n self.month = 0\n self.day = 0\n self.open = 0\n self.high = 0\n self.low = 0\n self.close = 0\n\n\ndef run(df, start_date, end_date, increment, date_increment, fees):\n \"\"\" Основная функция \"\"\"\n df = df[start_date:end_date]\n\n cur_bar = Bar()\n prev_bar = Bar()\n\n increment_is_completed = False # Приращение депо в этом месяце (False-не было, True-было)\n depo = 0 # Брокерский счет в валюте\n sum_depo = 0 # Общая сумма инвестирования (переведено на брокерский счет)\n portfolio = 0 # Количество бумаг в портфеле\n\n # Перебор строк DF, как приход нового бара\n for row in df.itertuples(): # Перебор строк DF (аналог появления нового бара)\n # Обновляем информацию текущего бара\n cur_bar.day = date.timetuple(row[0])[2]\n cur_bar.month = date.timetuple(row[0])[1]\n cur_bar.year = date.timetuple(row[0])[0]\n cur_bar.open = row[1]\n cur_bar.high = row[2]\n cur_bar.low = row[3]\n cur_bar.close = row[4]\n\n if cur_bar.month != prev_bar.month: # Если месяц предыдущего бара не равен месяцу текущего бара\n increment_is_completed = False # Признак инвес��ирования в текущем месяце в False\n elif cur_bar.month == prev_bar.month and not increment_is_completed and cur_bar.day >= date_increment:\n depo += increment # Увеличение брокерского счета на сумму ежемесячных инвестиций\n sum_depo += increment # Увеличение общей суммы инвестиций\n increment_is_completed = True # Признак инвестирования в текущем месяце в True\n\n while cur_bar.open + cur_bar.open * fees < depo: # Если сумма на счете позволяет купить фин. инструмент\n depo -= cur_bar.open + cur_bar.open * fees # Списываем с брокерского счета сумму на покупку\n portfolio += 1 # Увеличиваем количество бумаг в портфеле\n\n # Обновление информации предыдущего бара\n prev_bar.day = cur_bar.day\n prev_bar.month = cur_bar.month\n prev_bar.year = cur_bar.year\n prev_bar.open = cur_bar.open\n prev_bar.high = cur_bar.high\n prev_bar.low = cur_bar.low\n prev_bar.close = cur_bar.close\n\n return sum_depo, portfolio * cur_bar.close, depo, portfolio, cur_bar.close\n\n\nif __name__ == '__main__':\n # BRK-B IJH SPY AAPL QQQ\n ticker = 'VGK' # Тикер финансового инструмента как он отображается на Yahoo Finance\n increment: int = 100 # Сумма ежемесячного инвестирования\n date_increment: int = 15 # Дата пополнения(число месяца)\n year_invest: int = 10 # Количество лет инвестирования\n fees = 0.0006 # 0.05% комиссия брокера ВТБ + 0.01% комиссия биржи\n\n df_ticker = yf.download(ticker) # Загрузка данных с Yahoo Finance\n df_ticker = df_ticker.drop(columns=['Adj Close', 'Volume']) # Удаляем ненужные колонки\n\n df_rez_ticker = pd.DataFrame()\n\n for year in range(1993, 2022):\n start_date: date = date(year, 1, 1) # Дата старта инвестирования(год, месяц, число)\n end_date = date(start_date.year + year_invest, start_date.month, start_date.day)\n\n rez = run(df_ticker, start_date, end_date, increment, date_increment, fees)\n\n dohod = rez[1] - rez[0] + rez[2]\n # print(f'Год начала инвестирования: {year}\\n'\n # f'Всего инвестировано: {rez[0]}\\n'\n # f'Конечная стоимость портфеля: {rez[1]:.2f}\\n'\n # f'Остаток денег на брокерском счете: {rez[2]:.2f}\\n'\n # f'Количество бумаг в портфеле: {rez[3]}\\n'\n # f'Текущая стоимость одной бумаги: {rez[4]:.2f}\\n'\n # f'Доход: {dohod:.2f}\\n'\n # f'---------------------------------------------')\n\n # new_row = {'Год начала': year, 'Инвестировано': rez[0], 'Стоим портфеля': rez[1], 'Деньги': rez[2],\n # 'Бумаги': rez[3], 'Стоимость бумаги': rez[4], 'Доход': dohod}\n\n new_row = {'Год начала': str(year),\n 'Инвестировано': round(rez[0], 2),\n 'Стоим портфеля': round(rez[1], 2),\n 'Доход': round(dohod, 2)\n }\n\n # append row to the dataframe\n df_rez_ticker = df_rez_ticker.append(new_row, ignore_index=True)\n\n pd.set_option('display.max_columns', None) # Сброс ограничений на число столбцов\n print(df_rez_ticker) # Вывод таблицы результата\n\n # Построение графика\n index = df_rez_ticker['Год начала']\n values = df_rez_ticker['Доход']\n plt.title(f'Доход за {year_invest} лет ежемесячного инвестирования по ${increment} в инструмент {ticker}')\n plt.bar(index, values, label='Доход')\n plt.xticks(index, df_rez_ticker['Год начала'].apply(int), rotation=45) # Подписи к оси Х переведены в int и повернуты\n plt.xlabel(\"Год начала инвестирования\")\n plt.ylabel(\"Доход в $\")\n plt.legend(loc=2)\n plt.show()\n","repo_name":"Alkor135/researches","sub_path":"buy_and_hold.py","file_name":"buy_and_hold.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35425252401","text":"import os\nimport re\nfrom numpy.core.arrayprint import DatetimeFormat\n# import cv2\nimport numpy as np\nfrom PIL import Image\ndata = np.empty(shape=(0,1))\ntarget = []\ndef eachFile(filepath):\n flag = True\n i = -1\n for root, dirs, files in os.walk(filepath): \n for file in files:\n if re.search('Ambient', file):\n continue\n if re.search('.pgm',file):\n #print(os.path.join(root, file))\n im = Image.open(os.path.join(root, file))\n \n # 获得图像尺寸:\n w, h = im.size\n # 缩放到25%:\n im.thumbnail((w//4, h//4))\n\n \n img = np.array(im).flatten()\n if flag:\n flag = False\n data = img\n target.append(i)\n else:\n data = np.vstack((data,img))\n target.append(i)\n \n \n i=i+1\n tmp = np.array(target)[np.newaxis,:]\n print(tmp)\n\n data = np.concatenate((data,tmp.T),axis =1)\n np.savetxt(\"new.csv\", data, delimiter=',') \nif __name__ == '__main__':\n filepath = r'C:\\Users\\LiQi\\Desktop\\yaleBExtData'\n eachFile(filepath)","repo_name":"lqecho/pattern-recognition","sub_path":"file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20354468220","text":"import logging\n\ndef test_logs():\n logger = logging.getLogger(__name__)\n fileHandler =logging.FileHandler(\"logging.log\")\n formatter =logging.Formatter(\"%(asctime)s: %(levelname)s: %(name)s: %(message)s\")\n fileHandler.setFormatter(formatter)\n logger.addHandler(fileHandler)\n\n\n logger.setLevel(logging.DEBUG)\n logger.debug(\"A debug statement is executed\")\n logger.info(\"Information statement\")\n logger.error(\"error\")\n logger.warning(\"Something is in warning mode\")\n logger.critical(\"Critical issue\")\n\n#comments:Full process step by step\n#SUMMARY: 1. Create a file name called test_loggingPractise.py\n#2. import logging method\n#3. create logger.debug, info,warning,error,critical methods\n#4. create a file called logger.log using logging.Filehandler method n pass file name\n#5. need to format file using using logging.formatter(pass time , level, name and message ) name this as formatter\n#6. you need to set the format again using logging.setformat(pass in formatter)\n#7.logger.addhandler (pass filehadler here)\n#8 set level using logger.setLevel(logging.INFO)","repo_name":"pramodbojja/pythonProject6","sub_path":"PytestsDemo/test_logs.py","file_name":"test_logs.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25676017212","text":"#4 В лотерее 100 билетов. Из них 2 выигрышных.\n# Какова вероятность того, что 2 приобретенных билета окажутся выигрышными?\n\nimport math\nall_tickets = 100\nbaught_tickets = 2\nfavorable_outcome = 1\nall_outcome = math.factorial(all_tickets)/(math.factorial(baught_tickets)*math.factorial(all_tickets-baught_tickets))\nP = favorable_outcome/all_outcome\nprint(f'Вероятность того, что 2 приобретенных билета окажутся выигрышными равна {P*100 :.2f}%')","repo_name":"Stusha97/math_homework","sub_path":"HW1_task4.py","file_name":"HW1_task4.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20413462520","text":"import numpy as np\nimport pandas as pd\nfrom Analysis.analysis_utils import structured_unstructured, LINCLAB_COLS, plot_specs, analysis_specs\nfrom Analysis.analysis_utils import get_grids\nfrom modules.Utils import running_mean as rm\nfrom Analysis.analysis_utils import avg_performance_over_envs, avg_perf_over_envs_lines, avg_performance_over_envs_violins\nimport pickle\nimport matplotlib.pyplot as plt\ncache_limits = analysis_specs['cache_limits']\n\n# import csv data summary\nparent_path = '../../Data/'\ndf = pd.read_csv(parent_path+'train_test_ec.csv')\nref = pd.read_csv(parent_path+'train_test_shallowAC.csv')\n\ndf['representation'] = df['representation'].apply(structured_unstructured)\nref['representation'] = ref['representation'].apply(structured_unstructured)\n\nprint(df.load_from.unique())\n\ndef chop_(arr):\n smoothing = 20\n start = 5000-smoothing+1\n return np.concatenate((arr[0:start],arr[5000:]))\n\n\nenvs_to_plot = ['gridworld:gridworld-v1','gridworld:gridworld-v4','gridworld:gridworld-v3','gridworld:gridworld-v5']\npcts_to_plot = [100,75,50,25]\nreps_to_plot = ['unstructured','structured']\ngrids = get_grids(envs_to_plot)\ncol_to_plot = {'unstructured':LINCLAB_COLS['blue'], 'structured':LINCLAB_COLS['red']}\n\nenv = envs_to_plot[0]\npct = 100\nrep = 'structured'\nfig, ax = plt.subplots(len(envs_to_plot),2,sharex=True, sharey=True)\nfor e, env in enumerate(envs_to_plot):\n smoothing = 20\n upper_limit= 30000\n ftsz=8\n for r, rep in enumerate(reps_to_plot):\n # get MF\n\n ref_gb = ref.groupby(['env_name','representation','extra_info'])['save_id']\n id_list = list(ref_gb.get_group((env,rep,'x')))\n print(\"MF\",env, rep, len(id_list))\n total_avg_reward = []\n for i, id_num in enumerate(id_list):\n with open(parent_path+ f'results/{id_num}_data.p', 'rb') as f:\n dats = pickle.load(f)\n raw_score = dats['total_reward']\n normalization = analysis_specs['avg_max_rwd'][env[0:22]]\n scaled_ = (np.asarray(raw_score)+2.5)/(normalization +2.5)\n if len(scaled_) < upper_limit:\n num_extras = upper_limit-len(scaled_)\n last_200_mean = np.mean(scaled_[-5000:])\n last_200_std = np.std(scaled_[-5000:])\n filler = np.random.normal(last_200_mean,last_200_std,num_extras)\n nans = np.zeros(num_extras)\n nans[:] = np.nan\n if last_200_mean > 0.9:\n scaled_ = np.concatenate((scaled_, filler))\n else:\n scaled_ = np.concatenate((scaled_,nans))\n total_avg_reward.append(rm(scaled_,smoothing))\n mean = np.nanmean(total_avg_reward,axis=0)\n maxes = mean+np.nanstd(total_avg_reward,axis=0)/np.sqrt(len(total_avg_reward))\n mins = mean-np.nanstd(total_avg_reward,axis=0)/np.sqrt(len(total_avg_reward))\n\n mean = chop_(mean)\n maxes = chop_(maxes)\n mins = chop_(mins)\n\n ax[e,r].axvline(x=5000-smoothing+1, linestyle=\":\",color='gray')\n ax[e,r].plot(np.arange(len(mean)),mean,color='k',alpha=0.7)\n ax[e,r].fill_between(np.arange(len(mean)),mins,maxes,color='k', alpha=0.2)\n\n # get EC\n df_gb = df.groupby(['env_name','representation','num_trials','extra_info'])[\"save_id\"]\n id_list = list(df_gb.get_group((env+'1',rep,15000,'x')))\n print(\"EC\",env, rep, len(id_list))\n total_avg_reward = []\n for i, id_num in enumerate(id_list):\n print(id_num)\n with open(parent_path+ f'results/{id_num}_data.p', 'rb') as f:\n dats = pickle.load(f)\n raw_score = dats['total_reward']\n normalization = analysis_specs['avg_max_rwd'][env[0:22]]\n ECscaled_ = (np.asarray(raw_score)+2.5)/(normalization +2.5)\n if len(ECscaled_) < upper_limit:\n num_extras = upper_limit-len(ECscaled_)\n nans = np.zeros(num_extras)\n nans[:] = np.nan\n if list(df.loc[df['save_id']==id_num]['load_from'])[0] == ' ':\n full_scaled_ = np.concatenate((ECscaled_, nans))\n else:\n if env[-1]=='5':\n full_scaled_ = np.concatenate((nans, ECscaled_+0.07))\n else:\n full_scaled_ = np.concatenate((nans, ECscaled_))\n total_avg_reward.append(full_scaled_)\n ECmean = rm(np.nanmean(total_avg_reward,axis=0),smoothing)\n maxes = ECmean+rm(np.nanstd(total_avg_reward,axis=0),smoothing)/np.sqrt(len(total_avg_reward))\n mins = ECmean-rm(np.nanstd(total_avg_reward,axis=0),smoothing)/np.sqrt(len(total_avg_reward))\n\n ECmean = chop_(ECmean)\n maxes = chop_(maxes)\n mins = chop_(mins)\n\n ax[e,r].axvline(x=5000-smoothing+1, linestyle=\":\",color='gray')\n ax[e,r].plot(np.arange(len(ECmean)),ECmean,col_to_plot[rep])\n ax[e,r].fill_between(np.arange(len(ECmean)),mins,maxes,color=col_to_plot[rep], alpha=0.2)\n ax[e,r].set_ylim(0,1.1)\n ax[e,r].set_yticks([0,1])\n ax[e,0].set_yticklabels([0,100],fontsize=ftsz)\n ax[e,0].set_ylabel('Performance \\n(% Optimal)',fontsize=ftsz)\n\n#ax[e,0].set_xlim([5000-smoothing-50,5000+100])\n'''\nfor i in range(2):\n ax[e,i].set_xlabel('Episodes', fontsize=ftsz)\n ax[e,i].set_xticks([0,10000,20000,30000])\n ax[e,i].set_xticklabels([0,10000,20000,30000],fontsize=ftsz)\n'''\nplt.savefig(f'../figures/CH1/EC_traintest_compare_inset.svg')\nplt.show()\n\n","repo_name":"annikc/MEMRL","sub_path":"basic/Analysis/CH1/2_EC_traintest.py","file_name":"2_EC_traintest.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"5418364180","text":"class Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if s == s[::-1]: return s\n dp = [['']*len(s) for _ in range(len(s))]\n for i in range(len(s)):\n dp[i][i] = True\n if i != 0:\n dp[i][i-1] = dp[i-1][i] = (s[i] == s[i-1])\n for i in range(len(s)-1,-1,-1):\n for j in range(i+2, len(s)):\n dp[i][j] = (s[i] == s[j]) and dp[i+1][j-1]\n best = (1000, 0)\n for i in range(len(s)):\n for j in range(i, len(s)):\n if dp[i][j]:\n best = max([best, (i, j)], key=lambda x: x[1]-x[0])\n l, r = best\n return s[l:r+1]\n","repo_name":"RussellDash332/leetcode","sub_path":"Longest Palindromic Substring.py","file_name":"Longest Palindromic Substring.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18928975349","text":"import math\n\n\nclass FigureWithAngle:\n def __init__(self, *sides):\n self.sides = list(sides)\n\n def get_perimeter(self):\n return sum(self.sides)\n\n\nclass Circle:\n def __init__(self, radius):\n self.Pi = 3.14\n if type(radius) == int or type(radius) == float and radius > 0:\n self.radius = radius\n else:\n self.radius = -1\n\n def get_area(self):\n if self.radius > 0:\n return 2 * self.radius * self.Pi\n else:\n return 'Incorrect data'\n\n\nclass Triangle(FigureWithAngle):\n def __init__(self, *sides):\n super().__init__(*sides)\n self.check = ''\n try:\n self.more_side, self.cathets = self.determining_sides()\n except TypeError:\n self.more_side, self.cathets = 0, (0, 0)\n raise Exception(self.check)\n\n @staticmethod\n def check_data(sides):\n if any([type(i) == str for i in sides]):\n return 'Incorrect data. Was input string.'\n elif any(i <= 0 for i in sides):\n return 'Incorrect data. Impossible value.'\n else:\n return 1\n\n def get_area_by_three_sides(self):\n self.check = self.check_data(self.sides)\n if self.check == 1:\n p = self.get_perimeter() / 2\n tmp_answer = p * (p - self.more_side) * (p - self.cathets[0]) * (p - self.cathets[1])\n try:\n return round(math.sqrt(tmp_answer), 2)\n except ValueError:\n raise Exception('Incorrect triangle aspect ratio.')\n else:\n raise Exception(self.check)\n\n def react_triangle(self):\n self.check = self.check_data(self.sides)\n if self.check == 1:\n flag = sum([i ** 2 for i in self.cathets]) == self.more_side ** 2\n return flag\n else:\n raise Exception(self.check)\n\n def determining_sides(self):\n cathets = list()\n more_side = 0\n try:\n for i in self.sides:\n if i > more_side & more_side != 0:\n cathets.append(more_side)\n more_side = i\n elif i > more_side:\n more_side = i\n else:\n cathets.append(i)\n return more_side, cathets\n except TypeError:\n return 0, 0\n\n\ndef find_area(*args):\n if len(args) == 1:\n figure = Circle(args[0])\n return figure.get_area()\n elif len(args) == 3:\n figure = Triangle(list(args))\n return figure.get_area_by_three_sides()\n else:\n return 'I dont now this figure'","repo_name":"Dimentorr/AreaFigures-and-DF-","sub_path":"area_figures.py","file_name":"area_figures.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28890873868","text":"import calendar\n\nfrom datetime import datetime, timedelta\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom spaceplanner.models import Userweek, Workweek, Workstation\nfrom spaceplanner.app_logic.assigner import Assigner\nfrom spaceplanner.tables import WorkstationsScheduleTable\n\ndef generate_nonexistent_userweeks(user, first_monday, last_monday) -> int:\n week_counter = 0\n while first_monday != last_monday:\n calendar = first_monday.isocalendar()\n Userweek.objects.get_or_create(employee=user, year=calendar[0], week=calendar[1])\n for workstation in Workstation.objects.all():\n Workweek.objects.get_or_create(workstation=workstation, year=calendar[0], week=calendar[1])\n first_monday = first_monday + timedelta(weeks=1)\n week_counter = week_counter + 1\n return week_counter\n\ndef editweek_form_processing(editform, user):\n userweek = editform.save(commit=False)\n for weekday in list(calendar.day_name):\n workstation = getattr(userweek, weekday)\n if workstation:\n workweek, created = Workweek.objects.get_or_create(\n workstation = workstation, week = userweek.week, year = userweek.year)\n if not getattr(workweek, weekday):\n setattr(workweek, weekday, user)\n workweek.save()\n else:\n setattr(userweek, weekday, None)\n userweek.save()\n\ndef generateweek_form_processing(generateform, userweek: Userweek, user, this_week_flag:bool):\n weekdays = generateform.cleaned_data.get('weekdays')\n schedule = dict()\n cleared_days = list(calendar.day_name)\n if this_week_flag:\n for weekday in list(calendar.day_name):\n if list(calendar.day_name).index(weekday) < datetime.today().weekday():\n cleared_days.remove(weekday)\n else:\n break\n clear_workweek(userweek, list(cleared_days))\n clear_userweek(userweek, list(cleared_days))\n assigner = Assigner()\n schedule = assigner.assign_week(user, weekdays, userweek.week, userweek.year)\n wrong_weekdays = assign_user_to_workstation(userweek, schedule)\n return wrong_weekdays\n\ndef generate_unscheduled_days_message(wrong_weekday: list):\n message = _(\"Following days could not be scheduled: \")\n for day in wrong_weekday:\n message = message + _(day) + \", \"\n message = message[:-2]\n return message\n\ndef assign_user_to_workstation(userweek, schedule: dict()):\n user = userweek.employee\n wrong_weekdays = []\n for day in schedule.keys():\n if schedule[day]:\n setattr(schedule[day], day, user)\n setattr(userweek, day, schedule[day].workstation)\n userweek.save()\n schedule[day].save()\n else: \n wrong_weekdays.append(day)\n return wrong_weekdays\n\ndef clear_workweek(userweek, cleared_days: list):\n for weekday in cleared_days:\n workstation = getattr(userweek, weekday)\n if workstation:\n workweek, created = Workweek.objects.get_or_create(workstation = workstation,\n week = userweek.week, year = userweek.year)\n setattr(workweek, weekday, None)\n workweek.save()\n\ndef clear_userweek(userweek, cleared_days: list):\n for weekday in cleared_days:\n setattr(userweek, weekday, None)\n userweek.save()\n\ndef get_schedule_week_table(monday):\n workstations = Workstation.objects.all()\n isocalendar = monday.isocalendar()\n date_range = monday.strftime('%Y/%m/%d') + \" - \" + (monday + timedelta(days=6)).strftime('%Y/%m/%d') + \", \" + str(isocalendar[1]) + '|' + str(isocalendar[0])\n data = [Workweek.objects.get_or_create(workstation=x, week=isocalendar[1], year=isocalendar[0])[0] for x in workstations]\n table = WorkstationsScheduleTable(data)\n return date_range, table\n","repo_name":"prznoc/osplanner","sub_path":"spaceplanner/app_logic/views_processing.py","file_name":"views_processing.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17797615060","text":"from django.shortcuts import render, get_object_or_404, redirect\n\nfrom .models import Product, Opencv\nfrom .forms import ProductForm, RawProductForm, FormOpencv\n\n# Create your views here.\n\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport math\nimport os\nimport glob\nimport cv2\nfrom imageai.Detection import ObjectDetection\n\ndef recebe_imagem(obj):\n imagem_db = obj.imagem\n print(imagem_db)\n imagem_read = imagem_db.read() # type 'str'\n imagem_np = np.asarray(bytearray(imagem_read), dtype=\"uint8\") # type 'numpy.ndarray'\n imagem_op = cv2.imdecode(imagem_np, cv2.IMREAD_COLOR) # type 'numpy.ndarray'\n\n # retorna imagem para procesamento\n return imagem_op\n\ndef detec_view(request):\n form = FormOpencv(request.POST or None, request.FILES or None)\n if form.is_valid():\n\n obj = Opencv(imagem = request.FILES['imagem'])\n print(obj)\n imagem = recebe_imagem(obj)\n\n\n model_weight_path = \"resnet50_v2.0.1.h5\"\n detector = ObjectDetection()\n detector.setModelTypeAsRetinaNet()\n detector.setModelPath(model_weight_path)\n detector.loadModel()\n\n img = imagem.copy()\n detections = detector.detectObjectsFromImage(input_type = \"array\", input_image = img, minimum_percentage_probability = 90)\n #print(detections)\n for det in detections:\n cv2.rectangle(img, tuple(det[\"box_points\"][:2]), tuple(det[\"box_points\"][2:]), (0, 0, 0), 5)\n cv2.putText(img, \"{} - {}\".format(det[\"name\"], det[\"percentage_probability\"]), tuple(det[\"box_points\"][:2]), cv2.FONT_HERSHEY_PLAIN, 5,(0,0,255),3,cv2.LINE_AA)\n\n cv2.imwrite(\"products/static/img/original.png\", imagem)\n cv2.imwrite(\"products/static/img/canny.png\", img)\n\n data = True\n return render(request, 'filtros.html', {'form': form, 'data': data})\n\n else:\n form = FormOpencv()\n\n return render(request, 'products/filtros.html', {'form': FormOpencv})\n\n\n# treino youtube tutorial\n\ndef product_list_view(request):\n queryset = Product.objects.all()\n context = {\n \"object_list\": queryset\n }\n return render(request, \"products/product_list.html\", context)\n\ndef product_delete_view(request, my_id):\n obj = get_object_or_404(Product, id = my_id)\n print(obj.title)\n # print(request, my_id)\n # print(request.method)\n if request.method == \"POST\":\n # print(\"BLA2\")\n obj.delete()\n return redirect('../../')\n context = {\n \"object\": obj\n }\n return render(request, \"products/product_dynamic_delete.html\", context)\n\ndef dynamic_lookup_view(request, my_id, *args, **kwargs):\n #obj = Product.objects.get(id = my_id)\n obj = get_object_or_404(Product, id = my_id)\n context = {\n \"object\": obj\n }\n return render(request, \"products/product_detail.html\", context)\n\n\ndef product_create_view(request, *args, **kwargs):\n my_form = RawProductForm()\n print(\"BLA\")\n if request.method == \"POST\":\n print(\"BLA2\")\n my_form = RawProductForm(request.POST)\n if my_form.is_valid():\n print(my_form.cleaned_data)\n Product.objects.create(**my_form.cleaned_data)\n else:\n print(my_form.errors)\n context = {\n \"form\": my_form\n }\n return render(request, \"products/product_create.html\", context)\n\n\n\ndef product_detail_view(request, *args, **kwargs):\n obj = Product.objects.get(id = 1)\n # context = {\n # \"title\": obj.title,\n # \"description\": obj.description\n # }\n context = {\n \"object\": obj\n }\n return render(request, \"products/product_detail.html\", context)","repo_name":"rafaelmcam/DetectorObjetos-Django","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2951830725","text":"from PyQt5.QtWidgets import QMainWindow, QApplication, QToolButton, QSizePolicy, QLineEdit, QGridLayout, QLayout, QVBoxLayout, QWidget\nfrom PyQt5.QtCore import Qt\n\noperator = [\"+\", \"-\", \"*\", \"/\", \"=\"]\n\n\nclass App(QMainWindow):\n def __init__(self): # 클래스가 생성되면 가장 먼저 실행되는 초기화 영역. __init__() 함수가 호출 될 때 클래스에서 사용되는 변수 같은걸 초기화\n super().__init__() # 부모를 상속받아서 만들어진 자식클래스에서 부모 클래스의 생성자를 호출할 때 사용하는 메소드가 super()\n \"\"\"\n 부모의 클래스에서 사용하던 어떤 변수들이 초기화 되지 않으면 부모 클래스가 정상적으로 동작하지 않기 때문에 대부분 상속받은 자식 클래스의 입장에서는 부모 클래스의 초기화 함수를 수행해야 함.\n super()메소드를 활용해서 부모클래스의 초기화 함수 부분을 먼저 수행하고, 자식 클래스에서 자신의 초기화 영역을 수행하는게 일반적.\n \"\"\"\n\n self.title = \"계산기\"\n self.setWindowTitle(self.title)\n\n self.left = 100\n self.top = 200\n self.width = 300\n self.height = 200\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n self.table_widget = MyCalculator()\n self.setCentralWidget(self.table_widget)\n \n self.show()\n\n\nclass Button(QToolButton):\n def __init__(self, text):\n super().__init__()\n buttonStyle = '''\n QToolButton:hover {border:1px solid #0078d7; background-color:#e5f1fb;}\n QToolButton:pressed {background-color:#a7c8e3}\n QToolButton {font-size:11pt; font-family:NaNum Gothic; border:1px solid #d6d7d8; background-color#f0f1f1}\n '''\n self.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Preferred) # 버튼의 사이즈 정책을 결정.\n self.setText(text)\n self.setStyleSheet(buttonStyle)\n\n def sizeHint(self):\n size = super(Button, self).sizeHint()\n size.setHeight(size.height() + 30)\n size.setWidth(max(size.width(), size.height()))\n return size\n\n\nclass MyCalculator(QWidget):\n def __init__(self):\n super().__init__()\n\n self.waitingForOperand = True\n self.input_history = \"\"\n self.input_temporary = \"\"\n\n self.display = QLineEdit(\"0\") # 기본으로 0이 표시되게.\n self.display.setReadOnly(True) # 입력이 되지 않도록\n self.display.setAlignment(Qt.AlignRight) # 우측으로 정렬\n self.display.setMaxLength(15)\n self.display.setStyleSheet(\n \"border:0px; font-size:20pt; font-family:Nanum Gothic; font-weight:bold; padding:10px\")\n\n gridLayout = QGridLayout()\n gridLayout.setSizeConstraint(QLayout.SetFixedSize)\n\n # 버튼 생성\n self.clearButton = self.createButton(\"CE\", self.clear)\n self.clearAllButton = self.createButton(\"C\", self.clearAll)\n self.backButton = self.createButton(\"Back\", self.backDelete)\n self.divButton = self.createButton(\"/\", self.clickButtons)\n self.multiplyButton = self.createButton(\"*\", self.clickButtons)\n self.minusButton = self.createButton(\"-\", self.clickButtons)\n self.plusButton = self.createButton(\"+\", self.clickButtons)\n self.equalButton = self.createButton(\"=\", self.clickButtons)\n self.dotButton = self.createButton(\".\", self.clickButtons)\n self.reverseButton = self.createButton(\"R\", self.reverse)\n\n # 버튼 배치\n # 뒤에 1,1 은 한칸을 차지하겠다는 뜻. 앞의 두개는 row,col 좌표라고 생각하면 됨.\n gridLayout.addWidget(self.clearButton, 0, 0, 1, 1)\n gridLayout.addWidget(self.clearAllButton, 0, 1, 1, 1)\n gridLayout.addWidget(self.backButton, 0, 2, 1, 1)\n gridLayout.addWidget(self.divButton, 0, 3, 1, 1)\n gridLayout.addWidget(self.multiplyButton, 1, 3, 1, 1)\n gridLayout.addWidget(self.minusButton, 2, 3, 1, 1)\n gridLayout.addWidget(self.plusButton, 3, 3, 1, 1)\n gridLayout.addWidget(self.equalButton, 4, 3, 1, 1)\n gridLayout.addWidget(self.dotButton, 4, 2, 1, 1)\n gridLayout.addWidget(self.reverseButton, 4, 0, 1, 1)\n\n # 숫자 생성. 이건 일종의 공식으로 생각해주면 됨.\n self.digitButtons = []\n for i in range(10):\n self.digitButtons.append(\n self.createButton(str(i), self.clickButtons))\n\n gridLayout.addWidget(self.digitButtons[0], 4, 1, 1, 1)\n\n for i in range(1, 10):\n row = int(((9 - i) / 3) + 1)\n col = ((i - 1) % 3)\n gridLayout.addWidget(self.digitButtons[i], row, col, 1, 1)\n\n layout = QVBoxLayout()\n layout.addWidget(self.display)\n layout.addLayout(gridLayout)\n self.setLayout(layout)\n self.setWindowTitle(\"계산기\")\n self.setGeometry(300, 300, 300, 400)\n\n def clear(self):\n if self.waitingForOperand:\n return\n self.display.setText(\"0\")\n self.input_temporary = \"\"\n self.waitingForOperand = True\n\n def clearAll(self):\n self.display.setText(\"0\")\n self.input_temporary = \"\"\n self.input_history = \"\"\n self.waitingForOperand = True\n\n def backDelete(self):\n if self.waitingForOperand:\n return\n text = self.display.text()[:-1]\n self.input_temporary = text\n if not text: # text 가 없는 경우 ( ex. 9하나 입력하고 백 눌렀을 땐 남은 텍스트가 없으니 이런 경우에 해당 )\n text = \"0\"\n self.input_temporary = \"\"\n self.waitingForOperand = True\n self.display.setText(text)\n\n def reverse(self): # 음수 양수를 변환\n text = self.display.text()\n value = float(text)\n if value > 0.0:\n text = \"-\" + text\n elif value < 0.0:\n text = text[1:]\n self.display.setText(text)\n self.input_temporary = text\n\n def clickButtons(self):\n clickedButton = self.sender()\n digitValue = clickedButton.text()\n self.processKeyValue(digitValue)\n\n def processKeyValue(self, digitValue):\n if digitValue == \"=\":\n if self.calculator():\n self.waitingForOperand = True\n elif digitValue == \"+\" or digitValue == \"-\" or digitValue == \"*\" or digitValue == \"/\":\n if self.waitingForOperand: # 연산자 입력 상태에서 대기중인 경우에 연산자가 또 입력되면\n self.replaceLastOperator(digitValue)\n else:\n self.inputHistory(digitValue)\n self.calculator()\n self.waitingForOperand = True\n elif digitValue == \".\":\n if self.waitingForOperand: # 연산자를 입력 한 상태라는 뜻\n self.display.setText(\"0\")\n if \".\" not in self.display.text():\n self.display.setText(self.display.text() + \".\")\n self.inputHistory(str(\".\"))\n self.waitingForOperand = False # 연산자 대기 상태로 바꾼다.\n else: # 숫자인 상황\n keyvalue = ord(digitValue)\n if keyvalue >= 48 and keyvalue <= 57:\n if self.display.text() == \"0\" and digitValue == \"0.0\":\n return\n # 연산자를 입력한 후 숫자를 누르면 기존 display 는 사라지고 새로 입력한 숫자가 display 돼야 함.\n if self.waitingForOperand:\n self.display.clear()\n self.waitingForOperand = False # 숫자가 연속으로 입력이 가능해짐\n self.display.setText(self.display.text() + digitValue)\n self.inputHistory(str(digitValue))\n\n def keyPressEvent(self, e):\n if e.key() == Qt.Key_Backspace:\n self.backDelete()\n elif e.key() == Qt.Key_Enter:\n self.processKeyValue(\"=\")\n elif e.key() >= 47 and e.key() <= 57:\n self.processKeyValue(chr(e.key()))\n elif e.key() == 42 or e.key() == 43 or e.key() == 45 or e.key() == 46:\n self.processKeyValue(chr(e.key()))\n\n def string_calculator(self, user_input):\n string_list = []\n lop = 0\n\n if user_input[-1] not in operator:\n user_input += \"=\"\n\n for i, s in enumerate(user_input): # enumerate를 사용하여 Index 번호까지 가져오기\n if s in operator:\n if user_input[lop:i].strip() != \"\":\n string_list.append(user_input[lop:i])\n string_list.append(s)\n lop = i + 1\n\n string_list = string_list[:-1]\n\n pos = 0\n while True:\n if pos + 1 > len(string_list):\n break\n if len(string_list) > pos + 1 and string_list[pos] in operator:\n temp = string_list[pos - 1] + \\\n string_list[pos] + string_list[pos + 1]\n del string_list[0:3]\n string_list.insert(0, str(eval(temp)))\n pos = 0\n pos += 1\n if len(string_list) > 0:\n result = float(string_list[0])\n\n return round(result, 4)\n\n def calculator(self):\n operator_check = False\n for i in self.input_history:\n if i in operator:\n operator_check = True\n break\n if not operator_check:\n return False\n else:\n self.input_history += self.input_temporary\n self.input_temporary = \"\"\n self.display.setText(\n str(self.string_calculator(self.input_history)))\n return True\n\n def inputHistory(self, value):\n digitValue = str(value)\n if digitValue in operator: # 사용자의 입력값이 연산자인 경우에는\n if self.input_history[-1:] in operator:\n # 연산자가 입력 된 시점에서 temporary 에 기억되어 있는 숫자와 연산 기호를 합쳐서 input_history 에 넣거나\n self.input_history += self.input_temporary + digitValue\n else:\n # 아무것도 입력되어있지 않은 상황.\n self.input_history = self.input_temporary + digitValue\n self.input_temporary = \"\"\n else:\n self.input_temporary += str(digitValue)\n\n def replaceLastOperator(self, value):\n # 처음부터 마지막 글자를 뺀거에 새로운 연산자를 더하는 것.\n self.input_history = self.input_history[:-1] + str(value)\n\n def createButton(self, text, function):\n button = Button(text)\n button.clicked.connect(function)\n return button\n\n\napp = QApplication([])\ncalc = App()\napp.exec_()\n","repo_name":"hj8853-zz/wecode_pre_study_2","sub_path":"gui_calculator.py","file_name":"gui_calculator.py","file_ext":"py","file_size_in_byte":10894,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26232683114","text":"import sys\nimport time\n\nimport src.const as const\nimport src.method_const as mconst\n\n\ndef define_neighbor(th_opt, Delta):\n \"\"\"\n It returns the range of possible values for the next threshold.\n\n @param th_opt The current optimal threshold.\n @param Delta The maximum distance between two consecutive thresholds.\n\n @return The start and end of the neighbor set.\n \"\"\"\n neighbor_start = th_opt[-1] - Delta\n neighbor_end = th_opt[-1] + Delta + 1\n\n if len(th_opt) > 1 and th_opt[-2] >= th_opt[-1] - Delta:\n neighbor_start = th_opt[-2] + 1\n\n if neighbor_start < 1:\n neighbor_start = 1\n\n if neighbor_end >= const.B:\n neighbor_end = const.B\n\n return neighbor_start, neighbor_end\n\n\ndef exhaustive_search_2pstates(Lambda, Alpha, pstates, time_response_min, time_response_max, power_min, power_max):\n \"\"\"\n It finds the optimal threshold for the first two p-states.\n\n @param Lambda The arrival rate of the jobs.\n @param Alpha The weight of the response time in the objective function.\n @param pstates List of pstates.\n @param time_response_min Minimum time response.\n @param time_response_max The maximum time response of the system.\n @param power_min Minimum Power consumption of the system.\n @param power_max Maximum Power consumption of the system.\n\n @return The optimal threshold and the optimal value of the objective function.\n \"\"\"\n th_opt, val_opt = [], 1e5\n maxBorder = const.B - (len(pstates) - 2)\n for th1 in range(1, maxBorder):\n threshold = [th1]\n obj_func = mconst.find_obj_func(Lambda, Alpha, pstates[:2], threshold, time_response_min, time_response_max,\n power_min, power_max)\n if obj_func < val_opt:\n val_opt = obj_func\n th_opt = threshold\n\n return val_opt, th_opt\n\n\ndef search(Lambda, Alpha, pstates, Delta):\n \"\"\"\n It's a greedy algorithm that tries to find the best threshold for each iteration.\n\n @param Lambda The arrival rate of the jobs.\n @param Alpha The weight of the response time in the objective function.\n @param pstates List of power states.\n @param Delta The number of neighbors to consider.\n\n @return The optimal threshold vector, the optimal objective function value,\n and the time taken to run the algorithm.\n \"\"\"\n n = len(pstates)\n if n < 2 or n > 6:\n sys.exit(\"This is not a valid pstates model !\")\n\n start_timer = time.perf_counter()\n time_response_min, time_response_max, power_min, power_max = mconst.solve_time_and_power_dvfs(Lambda, pstates)\n val_opt, th_opt = exhaustive_search_2pstates(Lambda, Alpha, pstates, time_response_min, time_response_max,\n power_min, power_max)\n mconst.min_vector_greedy.append((th_opt.copy(), val_opt))\n\n for i in range(3, n + 1):\n val_opt = 1e5\n neighbor_start, neighbor_end = define_neighbor(th_opt, Delta)\n th_opt_temp = th_opt.copy()\n for neighbor in range(neighbor_start, neighbor_end):\n if neighbor != neighbor_start:\n th_opt_temp.pop()\n\n th_opt_temp.pop(), th_opt_temp.append(neighbor), th_opt_temp.append(-1)\n max_border = const.B - (n - i)\n for thi in range(neighbor + 1, max_border):\n th_opt_temp.pop(), th_opt_temp.append(thi)\n objFunc = mconst.find_obj_func(Lambda, Alpha, pstates[0:i], th_opt_temp, time_response_min,\n time_response_max, power_min, power_max)\n\n if objFunc < val_opt:\n val_opt = objFunc\n th_opt = th_opt_temp.copy()\n mconst.min_vector_greedy.append((th_opt.copy(), val_opt))\n\n end_timer = time.perf_counter()\n return th_opt, val_opt, end_timer - start_timer\n","repo_name":"KarmaEssence/DVFS_Research","sub_path":"src/algorithms/wsm/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12846619037","text":"from django.test import TestCase\nfrom django.test.utils import override_settings\n\nimport shanghai\nfrom shanghai.resources import ModelResource\n\n\nclass AutodiscoverTestCase(TestCase):\n\n def setUp(self):\n self.api = shanghai.api\n\n def test_app_should_have_registered_resources(self):\n articles = self.api.resource_for('articles')\n categories = self.api.resource_for('categories')\n\n self.assertIsNotNone(articles)\n self.assertIsNotNone(categories)\n\n self.assertIsInstance(articles, ModelResource)\n self.assertIsInstance(categories, ModelResource)\n\n\nclass AuthResourcesTestCase(TestCase):\n\n def test_app_should_have_auth_resources_by_default(self):\n self.assertIsNotNone(shanghai.api.resource_for('groups'))\n self.assertIsNotNone(shanghai.api.resource_for('users'))\n\n @override_settings(SHANGHAI_AUTH_RESOURCES=False)\n def test_app_should_not_have_auth_resources(self):\n api = shanghai.Shanghai()\n\n shanghai.discover(api)\n\n self.assertIsNone(api.resource_for('groups'))\n self.assertIsNone(api.resource_for('users'))\n","repo_name":"bobisjan/django-shanghai","sub_path":"tests/integration/autodiscover.py","file_name":"autodiscover.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31745813821","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\n# Definindo o nosso ângulo de rotação\nsentido = 1\nangle = 0\n\n# Definindo o tamanho da nossa janela\nwidth, height = 500, 500\nmx, my = 0, 0\nax, ay = 250, 250\nstart = 0\n\n# # Definindo os pontos do nosso pentágono\nvertices = [[-20, -20], # x + 0 , y + 0\n [20, -20], # x + 40, y + 0\n [30, 10], # x + 50, y + 30\n [0, 30], # x + 20, y + 50\n [-30, 10]] # x - 10, y + 30\n\n# Função callback para a nossa glutTimerFunc\ndef update(value):\n global angle\n\n if start == 1:\n angle += 1 * sentido\n angle %= 360\n\n glutPostRedisplay()\n glutTimerFunc(10, update, 0)\n\n# Função de inicializar o nosso desenho em 2D (ajuste da projeção)\ndef init2D():\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0, width, 0, height, 0, 1)\n\n# Função que rotaciona o nosso pentágono dado um eixo de rotação\ndef myRotate(x, y):\n glTranslate(x, y, 0)\n glRotate(angle, 0, 0, 1)\n glTranslate(-x, -y, 0)\n\n# Função callback que manipula os eventos do nosso mouse\ndef mouseFunc(button, state, x, y):\n global start, sentido, mx, my\n correct_y = abs(y - height)\n\n # Caso o botão esquerdo do mouse for solto\n if button == GLUT_LEFT_BUTTON:\n sentido = -1\n \n elif button == GLUT_RIGHT_BUTTON:\n sentido = 1\n\n if state == GLUT_UP:\n start = 1\n mx, my = x, correct_y\n\n# Função callback que manipula os eventos do teclado\ndef keyboardFunc(key, x, y):\n global start\n if key == b' ':\n start = 0\n \n# Função que desenha um pentágono\ndef drawPentagon():\n init2D()\n\n myRotate(mx, my)\n glTranslate(ax, ay, 0)\n \n glBegin(GL_POLYGON)\n for v in vertices:\n glVertex2fv(v)\n glEnd()\n\n# Loop principal para realizar o desenho\ndef draw():\n glClear(GL_COLOR_BUFFER_BIT)\n glLoadIdentity()\n\n glutMouseFunc(mouseFunc)\n glutKeyboardFunc(keyboardFunc)\n\n glColor3f(1.0, 1.0, 1.0)\n drawPentagon()\n\n glutSwapBuffers()\n\n# Função principal\ndef main():\n glutInit()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n\n glutInitWindowSize(width, height)\n glutInitWindowPosition(0, 0)\n glutCreateWindow('TP1')\n\n glutTimerFunc(10, update, 0)\n glutDisplayFunc(draw)\n glutIdleFunc(draw)\n glutMainLoop()\n\n# Chamando nossa funçao principal\nif __name__ == '__main__':\n main()","repo_name":"ThiagoPoppe/CC-UFMG","sub_path":"Semestres/Periodo4/CG/PyOpenGL/MiniTPS/TP1/tp1.py","file_name":"tp1.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8684269146","text":"import json\nfrom src.adapter_transportation import MIAdapter\nfrom src.enhancer_microservice import Enhancer\nfrom src.model_training import TrainModel\nimport joblib\n\n# When opening the file - make sure to adjust the path to your own location of the tnm.json file\n# f = open('/Users/rasmushenriksen/Desktop/BACHELOR-SOFTWARE/femte_og_sjette/femte_semester/Project/Python/src/Data_set/tnm.json')\ndef train_model_on_TNM(tnm):\n a = MIAdapter(tnm)\n\n # Convert TNM to Pandas dataframe\n print(\" -------------------- CONVERTING TO DATA FRAME --------------------\\n\")\n df = a.from_json()\n \n\n # Enhance the DataFrame\n print(\" -------------------- ENHANCING DATA FRAME --------------------\\n\")\n e = Enhancer(a.meta_data)\n enhanced_df = e.enhance_data_frame(df)\n print(enhanced_df)\n\n # Train the model\n print(\" -------------------- TRAINING THE MODEL --------------------\\n\")\n tm = TrainModel(enhanced_df)\n tm.train_model()\n\n return predict_heavy_traffic(tnm)\n\ndef predict_heavy_traffic(tnm):\n model = None\n try:\n model = joblib.load('decision_tree_model.joblib')\n except:\n print(\" -------------------- NO TRAINED MODEL --------------------\\n\")\n # return \"You must call /train to create a model first\"\n\n a = MIAdapter(tnm)\n\n # Convert TNM to Pandas dataframe\n print(\" -------------------- CONVERTING TO DATA FRAME --------------------\\n\")\n df = a.from_json()\n\n # Enhance the DataFrame\n print(\" -------------------- ENHANCING DATA FRAME --------------------\\n\")\n e = Enhancer(a.meta_data)\n df = e.remove_column(df,\"daily_trucks\")\n # Load the model\n \n\n print(\" -------------------- PREDICTING --------------------\\n\")\n\n\n # Make predictions\n predicted_weights = model.predict(df)\n print(\" -------------------- DONE PREDICTING --------------------\\n\")\n if len(predicted_weights) != df.shape[0]:\n print(\"ERROR!!!!\")\n exit(0)\n\n numbers = {}\n for (index, row), weight in zip(df.iterrows(), predicted_weights): \n numbers[str(row['id'])] = weight\n\n # Add weights to TNM\n print(\" -------------------- ADDING WEIGHTS TO TNM --------------------\\n\")\n updated_tnm = a.to_json(numbers)\n\n # Creates an updated TNM file with edge weights\n print(\" -------------------- CREATING UPDATED TNM FILE --------------------\\n\")\n with open(\"updated_tnm.json\", \"w\") as outfile:\n json.dump(updated_tnm, outfile, indent=4, sort_keys=False)\n\n return updated_tnm\n\ndef translate(tnm):\n parsed = json.loads(tnm,strict = False)\n with open(\"new_tnm.json\", \"w\") as outfile:\n json.dump(parsed, outfile, indent=4, sort_keys=False)\n return \"json file created\"","repo_name":"saxjax/TNMWeighterDailyTrucksCount","sub_path":"src/heavy_weighter.py","file_name":"heavy_weighter.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40275338541","text":"from discord.ext import commands\n\nadmins = []\n\n\ndef is_staff():\n async def predicate(ctx):\n for id in admins:\n if ctx.author.id == id:\n print('Parsed that admin! Matched ' + str(ctx.author.id) + ' to ' + str(id))\n return True\n return False\n\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n def __init__(self, bot, provided_admins):\n self.bot = bot\n global admins\n admins = provided_admins\n\n @commands.command(\n name='stop',\n aliases=['shutdown'],\n description='This is a staff only command to stop the bot'\n )\n @is_staff()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send('Oh, alright... I\\'ll just shutup I guess.. :wave:')\n await self.bot.close()\n","repo_name":"iBlitzkriegi/League-Bot","sub_path":"commands/StaffCommands.py","file_name":"StaffCommands.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70974761225","text":"import pygame, sys\n\npygame.init()\nDISPLAY_SURFACE = pygame.display.set_mode((400, 300))\npygame.display.set_caption('Hello World')\n\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 128)\n\nfontObj = pygame.font.Font('freesansbold.ttf', 32)\ntextSurfaceObj = fontObj.render('Hello World!', True, GREEN, BLUE)\ntextRectObj = textSurfaceObj.get_rect()\ntextRectObj.center = (200, 150)\n\n# run the game loop\nwhile True:\n DISPLAY_SURFACE.fill(WHITE)\n DISPLAY_SURFACE.blit(textSurfaceObj, textRectObj)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n pygame.display.update()\n","repo_name":"huangyiqianlin/PythonGames","sub_path":"Start/FontText.py","file_name":"FontText.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1012658058","text":"import os\nimport shutil\nimport unittest\n\nfrom dotenv import load_dotenv\nfrom reposcorer.scorer import score_repository\n\n\nclass AttributesTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n load_dotenv()\n cls.path_to_repo = os.path.join('test_data', 'ANXS', 'postgresql')\n\n def test_score_repository(self):\n scores = score_repository(\n path_to_repo=self.path_to_repo,\n host='github',\n full_name='ANXS/postgresql',\n calculate_comments_ratio=True,\n calculate_commit_frequency=True,\n calculate_core_contributors=True,\n calculate_has_ci=True,\n calculate_has_license=True,\n calculate_iac_ratio=True,\n calculate_issue_frequency=False,\n calculate_repository_size=True\n )\n self.assertAlmostEqual(scores['commit_frequency'], 6.5, 0)\n # self.assertEqual(scores['issue_frequency'], 0)\n self.assertEqual(scores['core_contributors'], 14)\n self.assertTrue(scores['has_ci'])\n self.assertTrue(scores['has_license'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"radon-h2020/radon-repository-scorer","sub_path":"tests/test_scorer.py","file_name":"test_scorer.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"6880188193","text":"# -*- coding: utf-8 -*-\nimport tensorflow.compat.v1 as tf\nimport numpy as np\nimport cv2\n\ndef LossL2(tensor1, tensor2):\n return tf.reduce_mean(tf.square(tensor1 - tensor2))\n\ndef LossL1(tensor1, tensor2):\n return tf.reduce_mean(tf.abs(tensor1 - tensor2))\n\ndef img_grad(inputs, name='img_grad'):\n with tf.variable_scope(name):\n inputs = inputs / 1000 * 0.0192\n\n kernel_hor_init = np.reshape(np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1], ], dtype=np.float32),\n [3, 3, 1, 1])\n\n kernel_ver_init = np.reshape(np.array([[-1, -2, -1],\n [0, 0, 0],\n [1, 2, 1], ], dtype=np.float32),\n [3, 3, 1, 1])\n\n kernel_hor = tf.concat([kernel_hor_init] * inputs.shape[-1], 2)\n kernel_ver = tf.concat([kernel_ver_init] * inputs.shape[-1], 2)\n\n grad_h = tf.nn.depthwise_conv2d(inputs, kernel_hor, [1, 1, 1, 1], 'SAME')\n grad_v = tf.nn.depthwise_conv2d(inputs, kernel_ver, [1, 1, 1, 1], 'SAME')\n\n grad = 1 / tf.rsqrt(tf.square(grad_h) + tf.square(grad_v))\n\n # grad = tf.clip_by_value(grad, 100, 1300, name + '_clip')\n\n # mask = tf.ones_like(grad)\n # mask = tf.where(grad < 100, 0 * mask, mask)\n # # mask = tf.where(grad > 500, 0 * mask, mask)\n # grad = grad * mask\n #\n # grad = tf.clip_by_value(grad, 0, 250, name + '_clip')\n\n return grad\n\ndef LossGrad(tensor1, tensor2):\n return LossL1(img_grad(tensor1), img_grad(tensor2))\n\ndef _tf_fspecial_gauss(size, sigma):\n \"\"\"Function to mimic the 'fspecial' gaussian MATLAB function\n \"\"\"\n x_data, y_data = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]\n\n x_data = np.expand_dims(x_data, axis=-1)\n x_data = np.expand_dims(x_data, axis=-1)\n\n y_data = np.expand_dims(y_data, axis=-1)\n y_data = np.expand_dims(y_data, axis=-1)\n\n x = tf.constant(x_data, dtype=tf.float32)\n y = tf.constant(y_data, dtype=tf.float32)\n\n g = tf.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n return g / tf.reduce_sum(g)\n\ndef tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5):\n window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]\n max_temp = tf.reduce_max(img2)\n img1 = img1 / max_temp\n img2 = img2 / max_temp\n K1 = 0.01\n K2 = 0.03\n L = 1 # depth of image (255 in case the image has a differnt scale)\n C1 = (K1 * L) ** 2\n C2 = (K2 * L) ** 2\n mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')\n mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = tf.nn.conv2d(img1 * img1, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_sq\n sigma2_sq = tf.nn.conv2d(img2 * img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu2_sq\n sigma12 = tf.nn.conv2d(img1 * img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_mu2\n if cs_map:\n value = (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2)),\n (2.0 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2))\n else:\n value = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n\n if mean_metric:\n value = tf.reduce_mean(value)\n return value\n\ndef tf_ms_ssim(img1, img2, mean_metric=True, level=5):\n weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)\n mssim = []\n mcs = []\n for l in range(level):\n ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)\n mssim.append(tf.reduce_mean(ssim_map))\n mcs.append(tf.reduce_mean(cs_map))\n filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')\n img1 = filtered_im1\n img2 = filtered_im2\n\n # list to tensor of dim D+1\n mssim = tf.stack(mssim, axis=0)\n mcs = tf.stack(mcs, axis=0)\n\n value = (tf.reduce_prod(mcs[0:level - 1] ** weight[0:level - 1]) *\n (mssim[level - 1] ** weight[level - 1]))\n\n if mean_metric:\n value = tf.reduce_mean(value)\n return value\n\ndef GaussianSmooth(input, kernelsize=11, sigma=3.5):\n # inputs: batch, width, height, channel\n f = np.multiply(cv2.getGaussianKernel(kernelsize, sigma), np.transpose(cv2.getGaussianKernel(kernelsize, sigma)))\n # kernel = tf.reshape(tf.float32(f), [kernelsize, kernelsize, 1, 1], 'kernel')\n\n # f = np.array([[0.0025, 0.0125, 0.0200, 0.0125, 0.0025],\n # [0.0125, 0.0625, 0.1000, 0.0625, 0.0125],\n # [0.0200, 0.1000, 0.1600, 0.1000, 0.0200],\n # [0.0125, 0.0625, 0.1000, 0.0625, 0.0125],\n # [0.0025, 0.0125, 0.0200, 0.0125, 0.0025]], dtype=np.float32)\n kernel = tf.reshape(np.float32(f), [kernelsize, kernelsize, 1, 1], 'kernel')\n\n low = tf.nn.conv2d(input, kernel, strides=[1, 1, 1, 1], name='f1', padding='SAME')\n high = input - low\n return high","repo_name":"YKZhangSEU/TIME-Net","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6241969730","text":"import socket, struct, time\nimport sys\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n host = input(\"Enter host IP address: \")\n port = input(\"Enter port: \")\n sock.connect((host, int(port)))\n\n ping_count = input(\"Enter ping count: \")\n seq_num = 0\n while seq_num != int(ping_count):\n try:\n seq_num += 1\n pdata = struct.pack(\"!Hd\", seq_num, time.time())\n sock.send(pdata)\n data = sock.recv(1024)\n current_time = time.time()\n (seq, timestamp) = struct.unpack(\"!Hd\", data)\n dif_time = (current_time - timestamp)*1000\n print(\"seg_number = %u, diff = %.3f ms\" % (seq, dif_time))\n time.sleep(1)\n except Exception:\n e = sys.exc_info()[1]\n print(e.args[0])","repo_name":"miron50/python_ping","sub_path":"client_part.py","file_name":"client_part.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11219502598","text":"import json\nimport sys\nimport pip\nimport platform\nimport time\nimport os\n\nLocalMachineOS = platform.system()\npythonVersion = sys.version_info\nif LocalMachineOS == \"Darwin\" or LocalMachineOS == \"Linux\":\n os.system(\"clear\")\nelif (\n LocalMachineOS == \"win32\"\n or LocalMachineOS == \"win64\"\n or LocalMachineOS == \"Windows\"\n):\n os.system(\"cls\")\n\ndef updateDirectory(dir):\n try:\n with open(dir + \"codes.json\") as f:\n codes = json.load(f)\n with open(dir + \"users.json\") as f:\n userData = json.load(f)\n with open(dir + \"bot.json\") as f:\n botToken = json.load(f)\n with open(dir + \"economy.json\") as f:\n economy = json.load(f)\n with open(\"codes.json\") as f:\n codesNew = json.load(f)\n with open(\"users.json\") as f:\n userDataNew = json.load(f)\n with open(\"bot.json\") as f:\n botTokenNew = json.load(f)\n with open(\"economy.json\") as f:\n economyNew = json.load(f)\n with open('main.py', encoding=\"utf8\") as f: \n mainNew = f.read()\n\n # exchange conversion center\n\n # bot\n if testIfVariableExists(botToken, \"Token\"):\n botTokenNew[\"Token\"] = botToken[\"Token\"]\n if testIfVariableExists(botToken, \"MainChannelId\"):\n botTokenNew[\"NotificationChannelId\"] = botToken[\"MainChannelId\"]\n if testIfVariableExists(botToken, \"NotificationChannelId\"):\n botTokenNew[\"NotificationChannelId\"] = botToken[\"NotificationChannelId\"]\n\n if testIfVariableExists(botToken, \"OwnerId\"):\n if isinstance(botToken[\"OwnerId\"], int):\n botTokenNew[\"Admins\"] = [botToken[\"OwnerId\"]]\n else:\n botTokenNew[\"Admins\"] = botToken[\"OwnerId\"]\n\n if testIfVariableExists(botToken, \"Admins\"):\n if isinstance(botToken[\"Admins\"], int):\n botTokenNew[\"Admins\"] = [botToken[\"Admins\"]]\n else:\n botTokenNew[\"Admins\"] = botToken[\"Admins\"]\n\n if testIfVariableExists(botToken, \"ServerID\"):\n botTokenNew[\"ServerID\"] = botToken[\"ServerID\"]\n if testIfVariableExists(botToken, \"DisableDiscordLogging\"):\n botTokenNew[\"DisableDiscordLogging\"] = botToken[\"DisableDiscordLogging\"]\n if testIfVariableExists(botToken, \"ShowMessageGlobal\"):\n botTokenNew[\"ShowMessageGlobal\"] = botToken[\"ShowMessageGlobal\"]\n if testIfVariableExists(botToken, \"SlashCommandsOnly\"):\n botTokenNew[\"SlashCommandsOnly\"] = botToken[\"SlashCommandsOnly\"]\n if testIfVariableExists(botToken, \"BlacklistedUsers\"):\n botTokenNew[\"BlacklistedUsers\"] = botToken[\"BlacklistedUsers\"]\n\n # codes\n\n for code in codes.keys():\n real_code = codes[code]\n if not real_code.get(\"Role\"):\n codes[code][\"Role\"] = 0\n if not real_code.get(\"DisputesEconomyCash\"):\n codes[code][\"DisputesEconomyCash\"] = False\n if real_code.get(\"reward\"):\n reward = codes[code][\"reward\"]\n codes[code].pop('reward', None)\n codes[code][\"Reward\"] = reward\n codesNew = codes\n\n # users\n\n userDataNew = userData\n\n # economy\n\n if testIfVariableExists(economy, \"Enabled\"):\n economyNew[\"Enabled\"] = economy[\"Enabled\"]\n if testIfVariableExists(economy, \"UserData\"):\n economyNew[\"UserData\"] = economy[\"UserData\"]\n if testIfVariableExists(economy, \"StoreInventory\"):\n economyNew[\"StoreInventory\"] = economy[\"StoreInventory\"]\n if testIfVariableExists(economy, \"EconomyName\"):\n economyNew[\"EconomyName\"] = economy[\"EconomyName\"]\n if testIfVariableExists(economy, \"Commands\"):\n economyNew[\"Commands\"] = economy[\"Commands\"]\n if testIfVariableExists(economy, \"SuccessRate\"):\n economyNew[\"SuccessRate\"] = economy[\"SuccessRate\"]\n if testIfVariableExists(economy, \"JobList\"):\n economyNew[\"JobList\"] = economy[\"JobList\"]\n if testIfVariableExists(economy, \"RoleMultiplier\"):\n economyNew[\"RoleMultiplier\"] = economy[\"RoleMultiplier\"]\n if testIfVariableExists(economy, \"InventoryLimit\"):\n economyNew[\"InventoryLimit\"] = economy[\"InventoryLimit\"]\n if testIfVariableExists(economy, \"GreatReset\"):\n economyNew[\"GreatReset\"] = economy[\"GreatReset\"]\n if testIfVariableExists(economy, \"AllowedMultipliers\"):\n economyNew[\"AllowedMultipliers\"] = economy[\"AllowedMultipliers\"]\n\n # end of conversion\n\n with open(dir + \"bot.json\", \"w\") as outfile:\n json.dump(botTokenNew, outfile, indent=4)\n with open(dir + \"users.json\", \"w\") as outfile:\n json.dump(userDataNew, outfile, indent=4)\n with open(dir + \"codes.json\", \"w\") as outfile:\n json.dump(codesNew, outfile, indent=4)\n with open(dir + \"economy.json\", \"w\") as outfile:\n json.dump(economyNew, outfile, indent=4)\n with open(dir + \"main.py\", \"w\", encoding=\"utf8\") as outfile:\n outfile.write(mainNew)\n return {\n \"success\": True,\n \"message\": \"Successfully updated!\"\n }\n except Exception as e:\n return {\n \"success\": False,\n \"message\": str(e)\n }\n\nif __name__ == \"__main__\":\n def testIfVariableExists(tablee, variablee):\n if variablee in tablee:\n return True\n else:\n return False\n \n def printSystemMessage(message):\n print(f\"\\x1b[38;2;255;75;0m{message}\\033[38;5;231m\")\n\n def printMainMessage(mes):\n print(f\"\\x1b[38;2;255;255;255m{mes}\\033[38;5;231m\")\n\n def printErrorMessage(mes):\n print(f\"\\x1b[38;2;255;0;0m{mes}\\033[38;5;231m\")\n\n def printSuccessMessage(mes):\n print(f\"\\x1b[38;2;0;255;0m{mes}\\033[38;5;231m\")\n\n def argumentHandler(args):\n mode = args[1]\n if mode == \"-directory\":\n dir = args[2]\n res = updateDirectory(dir)\n if res[\"success\"]:\n printSuccessMessage(\"Successfully updated Merch Code System Directory \" + dir + \"! You may use it!\")\n else:\n printErrorMessage(\"Failed to update Merch Code System Directory: \" + dir)\n\n if len(sys.argv) > 2:\n argumentHandler(sys.argv)\n else:\n printSystemMessage(\"\")\n printSystemMessage(\"███████╗███████╗░█████╗░███████╗██╗░██████╗  ███╗░░░███╗███████╗██████╗░░█████╗░██╗░░██╗\")\n printSystemMessage(\"██╔════╝██╔════╝██╔══██╗╚════██║╚█║██╔════╝  ████╗░████║██╔════╝██╔══██╗██╔══██╗██║░░██║\")\n printSystemMessage(\"█████╗░░█████╗░░███████║░░███╔═╝░╚╝╚█████╗░  ██╔████╔██║█████╗░░██████╔╝██║░░╚═╝███████║\")\n printSystemMessage(\"██╔══╝░░██╔══╝░░██╔══██║██╔══╝░░░░░░╚═══██╗  ██║╚██╔╝██║██╔══╝░░██╔══██╗██║░░██╗██╔══██║\")\n printSystemMessage(\"███████╗██║░░░░░██║░░██║███████╗░░░██████╔╝  ██║░╚═╝░██║███████╗██║░░██║╚█████╔╝██║░░██║\")\n printSystemMessage(\"╚══════╝╚═╝░░░░░╚═╝░░╚═╝╚══════╝░░░╚═════╝░  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝\")\n printSystemMessage(\"\")\n printSystemMessage(\"░█████╗░░█████╗░██████╗░███████╗  ░██████╗██╗░░░██╗░██████╗████████╗███████╗███╗░░░███╗\")\n printSystemMessage(\"██╔══██╗██╔══██╗██╔══██╗██╔════╝  ██╔════╝╚██╗░██╔╝██╔════╝╚══██╔══╝██╔════╝████╗░████║\")\n printSystemMessage(\"██║░░╚═╝██║░░██║██║░░██║█████╗░░  ╚█████╗░░╚████╔╝░╚█████╗░░░░██║░░░█████╗░░██╔████╔██║\")\n printSystemMessage(\"██║░░██╗██║░░██║██║░░██║██╔══╝░░  ░╚═══██╗░░╚██╔╝░░░╚═══██╗░░░██║░░░██╔══╝░░██║╚██╔╝██║\")\n printSystemMessage(\"╚█████╔╝╚█████╔╝██████╔╝███████╗  ██████╔╝░░░██║░░░██████╔╝░░░██║░░░███████╗██║░╚═╝░██║\")\n printSystemMessage(\"░╚════╝░░╚════╝░╚═════╝░╚══════╝  ╚═════╝░░░░╚═╝░░░╚═════╝░░░░╚═╝░░░╚══════╝╚═╝░░░░░╚═╝\")\n printSystemMessage(\"\")\n printSystemMessage(\"Welcome to Efaz's Discord Merch Code System Setup\")\n printSystemMessage(\"To set up your JSONs, please continue from here.\")\n printSystemMessage(\"Script Version v1.7.0\")\n\n checkpip = input(\"Do you want to install modules if haven't already? (y/n):\")\n if checkpip.lower() == \"y\":\n printMainMessage(\"Awaiting pip...\")\n time.sleep(2.1)\n pip.main(['install', \"discord.py\"])\n pip.main(['install', \"asyncio\"])\n printSuccessMessage(\"Finished running pip, continuing setup..\")\n\n alreadyexists = input(\n \"Do you have an existing Merch Code System installation or no? (y/n):\"\n )\n if alreadyexists.lower() == \"y\":\n directory = input(\"Enter the directory of the old installation: \")\n newDirect = directory.replace(\"'\", \"\")\n if LocalMachineOS == \"Darwin\" or LocalMachineOS == \"Linux\":\n if not newDirect.endswith(\"/\"):\n newDirect = newDirect + \"/\"\n elif (\n LocalMachineOS == \"win32\"\n or LocalMachineOS == \"win64\"\n or LocalMachineOS == \"Windows\"\n ):\n if not newDirect.endswith(\"\\\\\"):\n newDirect = newDirect + \"\\\\\"\n printMainMessage(\"Reviewing Directory...\")\n printMainMessage(\"New directory:\")\n printMainMessage(newDirect)\n confirm = input(\"Is this directory correct? (y/n): \")\n if confirm.lower() == \"y\":\n res = updateDirectory(newDirect)\n if res[\"success\"]:\n printSuccessMessage(\"Successfully updated Merch Code System Directory \" + newDirect + \"! You may use it!\")\n else:\n printErrorMessage(\"Failed to update Merch Code System Directory: \" + newDirect)\n enter = input(\"Press Enter to finish setup: \")\n else:\n print(\"Ending process..\")\n exit()\n else:\n printMainMessage(\"-- BOT SECTION --\")\n botInfo1 = input(\n \"Your Discord Bot token (Get from your Discord Developer Portal): \"\n )\n botInfo2 = int(input(\"Your Owner ID (your user Id): \"))\n botInfo3 = int(\n input(\n \"Your Main Channel (the channel id you would like messages to be sent to for messages from bot): \"\n )\n )\n botInfo4 = int(input(\"Your Guild or Discord Server ID: \"))\n botInfo5 = input(\"Slash Commands Only? (y/n)\")\n if botInfo5.lower() == \"y\":\n botInfo5 = True\n else:\n botInfo5 = False\n botInfo6 = input(\"Disable Discord Bot Logging? (y/n)\")\n if botInfo6.lower() == \"y\":\n botInfo6 = True\n else:\n botInfo6 = False\n printMainMessage(\"-- CODES SECTION --\")\n codeInfo1 = input(\"Would you like to add an perm code? (Optional) (y/n)\")\n codeInfo2 = None\n if codeInfo1.lower() == \"y\":\n codeInfo2 = input(\"Code?\")\n codeInfo3 = input(\"Reward?\")\n\n printMainMessage(\"-- ECONOMY SECTION --\")\n economy1 = input(\"Do you want to enable economy commands? (y/n):\")\n if economy1.lower() == \"y\":\n economy1 = True\n economy2 = input(\"Economy Name?\")\n economy3 = input(\"Success Rate? (1 / ): \")\n if int(economy3):\n economy3 = int(economy3)\n else:\n economy3 = 4\n economy4 = input(\"Daily Amount?\")\n if int(economy4):\n economy4 = int(economy4)\n else:\n economy4 = 100\n economy5 = input(\"Inventory Limit?\")\n if int(economy5):\n economy5 = int(economy5)\n else:\n economy5 = 20\n\n if input(\"Do you want to enable Great Reset and Season commands? (y/n): \").lower() == \"y\":\n economy6 = True\n economy7 = input(\"Cash King Role ID? (): \")\n if int(economy7):\n economy7 = int(economy7)\n else:\n economy7 = 0\n\n economy8 = input(\"Reset Cash when the great reset starts? (y/n): \")\n if economy8.lower() == \"y\":\n economy8 = True\n else:\n economy8 = False\n economy9 = input(\"Reset Items when the great reset starts? (y/n): \")\n if economy9.lower() == \"y\":\n economy9 = True\n else:\n economy9 = False\n economy10 = input(\"Reset Roles with multipilers when the great reset starts? (y/n): \")\n if economy10.lower() == \"y\":\n economy10 = True\n economy11 = input(\"One role to reset when great reset? (): \")\n if int(economy11):\n economy11 = int(economy11)\n else:\n economy11 = 0\n else:\n economy10 = False\n economy11 = 0\n else:\n economy6 = False\n economy7 = 0\n economy8 = False\n economy9 = False\n economy10 = False\n economy11 = 0\n else:\n economy1 = False\n\n printMainMessage(\"-- Finished Questions --\")\n printMainMessage(\"Preparing to generate JSONs\")\n botJSON = {\n \"Token\": botInfo1,\n \"Admins\": [botInfo2],\n \"NotificationChannelId\": botInfo3,\n \"ServerID\": botInfo4,\n \"SlashCommandsOnly\": botInfo5,\n \"BlacklistedUsers\": {},\n \"DisableDiscordLogging\": botInfo6,\n \"ShowMessageGlobal\": True\n }\n if economy1 == True:\n economyJSON = {\n \"Enabled\": True,\n \"UserData\": {},\n \"StoreInventory\": {},\n \"EconomyName\": economy2,\n \"Commands\": {\n \"Daily\": economy4\n },\n \"SuccessRate\": economy3,\n \"JobList\": [\n {\n \"name\": \"Mayor\",\n \"amount\": 5000\n },\n {\n \"name\": \"District Worker\",\n \"amount\": 1000\n },\n {\n \"name\": \"Principal\",\n \"amount\": 500\n },\n {\n \"name\": \"Teacher\",\n \"amount\": 100\n }\n ],\n \"GreatReset\": {\n \"Enabled\": economy6,\n \"RolesToReset\": [economy11],\n \"SeasonNumber\": 1,\n \"DataToReset\": {\n \"Cash\": economy8,\n \"Roles\": economy9,\n \"Items\": economy10\n },\n \"CashKingRoleID\": economy7,\n \"ItemDataWhenRestock\": [\n {\n \"stock\": 1,\n \"price\": 10000000,\n \"name\": \"Cash King\",\n \"role\": economy7,\n \"multiplierEnabled\": True\n }\n ]\n },\n \"AllowedMultipliers\": {\n \"Weekends\": True,\n \"Roles\": True,\n \"Seasons\": True\n },\n \"RoleMultiplier\": [],\n \"InventoryLimit\": economy5\n }\n with open(\"economy.json\", \"w\") as outfile:\n json.dump(economyJSON, outfile, indent=4)\n if codeInfo1.lower() == \"y\":\n codeJSON = {\n codeInfo2: {\n \"Reward\": codeInfo3,\n \"OneUserOnly\": False,\n \"Role\": 0,\n \"DisputesEconomyCash\": False\n }\n }\n with open(\"codes.json\", \"w\") as outfile:\n json.dump(codeJSON, outfile, indent=4)\n printSuccessMessage(\"JSONs Ready\")\n enter = input(\"Press Enter to finish setup: \")\n with open(\"bot.json\", \"w\") as outfile:\n json.dump(botJSON, outfile, indent=4)\n printSuccessMessage(\"Setup is finished!\")","repo_name":"EfazDev/merch-code-system","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":19276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14194260709","text":"import torch\nimport joblib\nfrom app.models import NeuralNet\n\ndef predict_model(data):\n\n loaded_scaler = joblib.load('./models/minmax_scaler.pkl' )\n data_scaled = loaded_scaler.transform(data)\n data_tensor = torch.FloatTensor(data_scaled)\n \n model = NeuralNet(5,2)\n model.load_state_dict(torch.load('./models/predict_model.pth', map_location=torch.device('cpu')))\n model.eval()\n\n with torch.no_grad():\n output = model(data_tensor)\n\n _, predicted_idx = torch.max(output, 1)\n predicted_label = predicted_idx.item()\n return predicted_label","repo_name":"junRepository/Predict_CIR","sub_path":"app/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17215348598","text":"import argparse\n\n\ndef _add_parser_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--token\", required=True, type=str, help=\"validation token\")\n parser.add_argument(\"--project_folder\", required=True, type=str, help=\"dbt project folder\")\n parser.add_argument(\"--target_folder\", required=False, type=str, help=\"target folder for the dbt run\")\n parser.add_argument(\"--logs_folder\", required=False, type=str, help=\"log folder from the dbt run\")\n parser.add_argument(\"--ignore-errors\", required=False, action='store_true', default=False,\n help=\"when set, the client will exit with exitCode 0 even if an error occurs\")\n\n\ndef parse_args_legacy(argv):\n parser = argparse.ArgumentParser(description=\"revefi dbt cli\")\n subparsers = parser.add_subparsers(dest='command')\n dbt_parser = subparsers.add_parser('dbt')\n _add_parser_args(dbt_parser)\n args = parser.parse_args(argv)\n return args\n\n\ndef parse_args_v2(argv):\n parser = argparse.ArgumentParser(description=\"revefi dbt cli\")\n _add_parser_args(parser)\n args = parser.parse_args(argv)\n return args\n","repo_name":"revefi/dbt_python_client","sub_path":"src/revefi_dbt_client/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"4771050736","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 17 11:01:55 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 13 09:12:13 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport socket\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nimport AcqData\r\nimport logging\r\n\r\n\r\nFileCCF=\"CCF4.csv\"\r\nfilename=\"RFISpectrum\"\r\nPathCCF = \"D:/Geomarr/Spectrum/\"\r\n#filename=\"IQWerte_100.0MHz.npy\"\r\n#filename=\"Spectrum_110.0MHz.npy\"\r\n\r\nAcqBW = 40e6\r\nk = 1.38064852e-23 # Boltzmann constant\r\nbandwidth = AcqBW # Hz\r\nStartFreq = 1000e6\r\nStopFreq = 1040e6\r\nZ0 = 119.9169832 * np.pi # Impedance of freespace\r\n\r\nG_LNA= 20 #dB gain of the LNA\r\nLcable = -1 #dB cable losses\r\nantennaEfficiency = 0.75 \r\n\r\nr = 1.0 # Distance DUT to receiving antenna\r\npath = \"D:/Geomarr/Spectrum/ChangeIntegrationTime\"+\"_gLNA\"+str(G_LNA)+\"_Lcable\"+str(Lcable)+\"_EffAnt\"+str(antennaEfficiency)+\"/ \"\r\nresolution = 1\r\nDataPath = \"D:/Geomarr/Spectrum/FullSpectrum\" #Path to save the spectra\r\nintegrationTime = 1 #integration time in sec\r\ndisplayResolution = 1 # times 107Hz\r\nusecase = 0 # 0 = plain data taking; 1 = calibrate data; 2 = acquire calibration data; 3 = start RFI data; 4 = acquire background data\r\nGPU_integration_time = 3\r\ncolor = ['y','hotpink','olive','coral','darkkhaki','orchid','lightblue','navy','rosybrown','cornflowerblue','lavenderblush','cadetblue','hotpink','olive','coral','darkkhaki','orchid','lightblue','navy','aliceblue','r','b','m','c','g']\r\n\r\ndef readIQDatafileCCF(path, filename):\r\n data = []\r\n with open(path+filename, \"r\") as filedata:\r\n csvdata = csv.reader(filedata, delimiter = ',')\r\n for row in csvdata:\r\n data.append(row)\r\n filedata.close()\r\n data =np.array(data, dtype = 'float') \r\n return data\r\n \r\ndef readIQDatafile(path, filename):\r\n data = []\r\n filedata = open(path+filename, \"r\")\r\n csvdata = csv.reader(filedata)\r\n for row in csvdata:\r\n data.append(row)\r\n filedata.close()\r\n data =np.array(data, dtype=float)\r\n return data\r\n \r\ndef readIQDataBin(path,filename):\r\n arraydata =np.load(path + filename)\r\n return arraydata.T\r\n\r\ndef readSpectralData(path,filename):\r\n arraydata = np.load(path + filename)\r\n return arraydata\r\n \r\n\r\ndef convert2Complex(iqdata):\r\n return [(x + 1j*y) for x, y in iqdata]\r\n\r\ndef creatFreqScale(centerFreq, bandwidth, sample):\r\n freqList = []\r\n bandstart = centerFreq-bandwidth/2\r\n freqResolution = bandwidth/sample\r\n for i in range(sample):\r\n freqList.append(float((freqResolution*i+bandstart)))\r\n return np.array(freqList, dtype=float)\r\n\r\n# genertes a list of standart center frequencies \r\ndef generate_CFlist(startfreq, stopfreq):\r\n lowerlimit = 80e6 # Hz Defines the lower frequency limit\r\n upperlimit = 6000e6 # Hz Defines the upper freqzency limit\r\n if startfreq > stopfreq: \r\n temp = startfreq\r\n startfreq = stopfreq\r\n stopfreq = temp\r\n if lowerlimit >= startfreq and startfreq <= upperlimit: startfreq = lowerlimit\r\n if lowerlimit >= stopfreq and stopfreq <= upperlimit: stopfreq = upperlimit\r\n if startfreq > stopfreq: \r\n temp = startfreq\r\n startfreq = stopfreq\r\n stopfreq = temp\r\n \r\n cfreq_tabelle = list(range(int(round((stopfreq/1e6-80)/40+0.499999))-int((startfreq/1e6-80)/40)))\r\n for i in range(len(cfreq_tabelle)):\r\n cfreq_tabelle[i]= ((i+int((startfreq/1e6-80)/40))*40+100)*1e6\r\n return cfreq_tabelle\r\n \r\ndef change_freq_channel(spectrum, factor):\r\n outputChannel = int(len(spectrum[0])/factor)\r\n outputfreqlist = np.zeros(outputChannel)\r\n outputspeclist = np.zeros(outputChannel)\r\n for i in range(outputChannel):\r\n outputfreqlist[i] = np.mean(spectrum[0][i*factor:(i*factor+factor)])\r\n outputspeclist[i] = np.mean(spectrum[1][i*factor:(i*factor+factor)])\r\n return outputfreqlist, outputspeclist\r\n \r\ndef to_decibels(x):\r\n calfactor = 1000/50/523392/2 \r\n return 10*np.log10(x*x*calfactor)\r\n \r\ndef trim_spectrum(spectrum):\r\n final_sample= 373852\r\n specsize=len(spectrum[0][:])\r\n AcqData._log.info(\"Usable number of Samples: %i \"%(final_sample))\r\n AcqData._log.info(\"Spec length: %i \"%(specsize))\r\n starttrim = int((specsize-final_sample)/2) \r\n stoptrim = int(specsize-(specsize-final_sample)/2)\r\n freq = np.array(spectrum[0])\r\n fft= np.array(spectrum[1])\r\n return freq[starttrim:stoptrim],fft[starttrim:stoptrim]\r\n \r\ndef plot_stiched_spectrum(spectrum, c, resfact = 1):\r\n trimspec = np.array(change_freq_channel(trim_spectrum(spectrum),resfact))\r\n #trimspec = np.array(trim_spectrum(spectrum))\r\n print(c)\r\n spec = to_decibels(trimspec[1][:])\r\n resolution = 0.1069943751528491*resfact\r\n #_log.info(\"Generating plots (%.2f to %.2f MHz)\"%(lower/1e6,upper/1e6))\r\n plt.plot(trimspec[0][:]/1e6,spec, c)\r\n #plt.ylim(-80,0)\r\n plt.ylabel(\"Electric field strenght [dBu/m]\")\r\n plt.xlabel(\"Frequency (MHz) (resolution %.3f kHz)\"%resolution)\r\n\r\ndef freq_scale(true_bw,buffer_size,cfreq):\r\n bw = true_bw\r\n resolution = bw/buffer_size\r\n lower = cfreq-bw/2\r\n upper = cfreq+bw/2\r\n return np.linspace(lower,upper,buffer_size)\r\n \r\ndef applyWindow(complexIQdata):\r\n window = np.blackman(len(complexIQdata))\r\n return complexIQdata*window\r\n\r\ndef calCompFFT(complexIQdata):\r\n tempSpec = np.fft.fft(complexIQdata)\r\n complexSpec = np.fft.fftshift(tempSpec)\r\n # Create the frequency scale\r\n freqscale = creatFreqScale(centerFreq, bandwidth, len(complexIQdata))\r\n return freqscale, complexSpec\r\n\r\ndef convertFFT2powerSpectrum(spectrum):\r\n x = np.abs(spectrum[1])\r\n calfactor = 1000/50/len(spectrum[1])/2\r\n \r\n return spectrum[0], 10*np.log10(x*x*calfactor) \r\n\r\ndef calCCF(spectrum, CCF, r, Lcable, G_LNA, antennaEfficiency, BW, centreFreq): # returns in [dBuV/m]\r\n spectrum[0,1] = 0 \r\n tempSpec = spectrum\r\n BW = BW*1e6\r\n # upperfreq = centreFreq + BW/2 \r\n #lowerfreq = centreFreq - BW/2 \r\n # CCFnew = CCF[(int(lowerfreq/1e6)-100):1:(int(upperfreq/1e6)-100)]\r\n for i in range(len(CCF[:,0])):\r\n for n,x in enumerate(spectrum[:,0]):\r\n #if x <= CCF[i,0] and x >= CCF[i,0]:\r\n tempSpec[n,1] = -G_LNA - Lcable - (10.0 * np.log10(antennaEfficiency)) - CCF[i,1] + (10.0 * np.log10(Z0 / (4.0 * np.pi * (r *r)))) + 90.0\r\n #tempSpec.append(temp)\r\n #tempSpec = np.array(tempSpec, dtype = 'float') \r\n\r\n # tempSpec[n] = np.nan\r\n #for i in range((int(lowerfreq/1e6)-100), (int(upperfreq/1e6)-100),1):\r\n # for j in range (int(len(spectrum[:,0])/56 ) ):\r\n # temp = -G_LNA - Lcable - (10.0 * np.log10(antennaEfficiency)) - CCF[i,1] + (10.0 * np.log10(Z0 / (4.0 * np.pi * (r *r)))) + 90.0\r\n # tempSpec.append(temp)\r\n #tempSpec = np.array(tempSpec, dtype = 'float')\r\n return spectrum[:,0], spectrum[:,1] #+ tempSpec[:,1]\r\n \r\ndef Temp_Sys(NF_rsa, Lcable, G_LNA): # returns in [dBuV/m]\r\n NF_dB = NF_rsa + Lcable - G_LNA\r\n Tsys = (10^(NF_dB/10))/(k*AcqBW)\r\n return Tsys\r\n \r\ndef Simga(TSys, L, N):\r\n # L is the length of the FFT (samples/sec)\r\n # N number of FFT being average (the size)\r\n Sigma = (k*TSys*AcqBW)/(L*np.sqrt(N))\r\n return 10*np.log10(Sigma)\r\n\r\ndef RFI_to_Simga(spectrum, CCF, r, Lcable, G_LNA, antennaEfficiency, BW, centreFreq): # returns in [dBuV/m]\r\n spectrum[0,1] = 0 \r\n tempSpec = spectrum\r\n BW = BW*1e6\r\n # upperfreq = centreFreq + BW/2 \r\n #lowerfreq = centreFreq - BW/2 \r\n # CCFnew = CCF[(int(lowerfreq/1e6)-100):1:(int(upperfreq/1e6)-100)]\r\n for i in range(len(CCF[:,0])):\r\n for n,x in enumerate(spectrum[:,0]):\r\n #if x <= CCF[i,0] and x >= CCF[i,0]:\r\n tempSpec[n,1] = -G_LNA - Lcable - (10.0 * np.log10(antennaEfficiency)) - CCF[i,1] + (10.0 * np.log10(Z0 / (4.0 * np.pi * (r *r)))) + 90.0\r\n #tempSpec.append(temp)\r\n #tempSpec = np.array(tempSpec, dtype = 'float') \r\n\r\n # tempSpec[n] = np.nan\r\n #for i in range((int(lowerfreq/1e6)-100), (int(upperfreq/1e6)-100),1):\r\n # for j in range (int(len(spectrum[:,0])/56 ) ):\r\n # temp = -G_LNA - Lcable - (10.0 * np.log10(antennaEfficiency)) - CCF[i,1] + (10.0 * np.log10(Z0 / (4.0 * np.pi * (r *r)))) + 90.0\r\n # tempSpec.append(temp)\r\n #tempSpec = np.array(tempSpec, dtype = 'float')\r\n return spectrum[:,0], spectrum[:,1] #+ tempSpec[:,1]\r\n \r\ndef mean_NoiseFloor(NF):\r\n meanNF =0\r\n for x in range(len(NF)):\r\n meanNF = NF[x] + meanNF\r\n return meanNF/len(NF)\r\n \r\nif __name__ == '__main__':\r\n CCFdata = readIQDatafileCCF(PathCCF,FileCCF) \r\n Freqlist =generate_CFlist(StartFreq,StopFreq)\r\n sample = 523852 \r\n BW = 40*1e6\r\n centreFreq = StartFreq+BW/2\r\n tempSpec = []\r\n CCF = []\r\n Length_freq = len(Freqlist) \r\n # set the CCF freq range\r\n upperfreq = centreFreq + BW/2 \r\n lowerfreq = centreFreq - BW/2\r\n for i in range((int(lowerfreq/1e6)-100), (int(upperfreq/1e6)-100),1):\r\n CCF.append(CCFdata[i])\r\n CCF = np.array(CCF, dtype = 'float') \r\n print()\r\n fileName = filename+str(int(Freqlist[0]/1e6))+\"MHz_IntegrationTime_\"+str(integrationTime)+\".npy\"\r\n ReadFile = readIQDataBin(path,fileName)\r\n Spec = calCCF(ReadFile, CCF, r, Lcable, G_LNA, antennaEfficiency, BW, Freqlist[0])\r\n plot_stiched_spectrum(Spec,color[0] )\r\n #plt.plot(tempSpec[i,0,:]/1e6,tempSpec[i,1,:])\r\n #print(CCF.shape)\r\n # spec = calCCF(spec, CCF[], r, Lcable, G_LNA, antennaEfficiency)\r\n #print(len(spec))♫\r\n #plt.plot(creatFreqScale(centerFreq,bandwidth,len(r)),convertFFT2powerSpectrum(spec))\r\n #plt.plot(spec[0],convertFFT2powerSpectrum(spec[1]))\r\n # plt.plot(Spec[0]/1e6,Spec[1])\r\n #plt.ylabel(\"Power (dB)\")\r\n #plt.xlabel(\"Frequency (MHz) (resolution %.3f kHz)\"%resolution)\r\n plt.show()\r\n ","repo_name":"Geomarr2/set-up-GUI","sub_path":"RFIChamberMeasure/SNR/CalData2.py","file_name":"CalData2.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34657391986","text":"from random import randint\nfrom bitarray import bitarray\n\ndef findDuplicates(arr):\n bitVector = bitarray(32*(2**10))\n for index, num in enumerate(arr):\n if bitVector[num] == False:\n bitVector[num] = True\n else:\n print(num)\n\ndef createList():\n arr = []\n for i in range(1000):\n arr.append(randint(0, 32000))\n return arr\n\n\nif __name__ == \"__main__\":\n arr = createList()\n findDuplicates(arr)\n","repo_name":"redixhumayun/ctci","sub_path":"Sorting_Searching/duplicate.py","file_name":"duplicate.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71343444105","text":"#!/usr/bin/env python\nimport rospy\nfrom numpy import *\n\nimport locosigns_msgs.msg\nimport nav_msgs.msg\nfrom geometry_msgs.msg import TwistStamped as MsgTwist\n\nclass SpeedometerNode():\n\n def sendMessage(self):\n if(self.velocity is None or self.velocity_n is None):\n return\n # Publish messages\n true_msg = MsgTwist()\n true_msg.header.stamp = rospy.Time.now()\n true_msg.twist.linear.x = self.velocity\n self.true_publisher.publish(true_msg)\n noisy_msg = MsgTwist()\n noisy_msg.header.stamp = rospy.Time.now()\n noisy_msg.twist.linear.x = self.velocity_n\n self.noisy_publisher.publish(noisy_msg)\n return\n\n def callback(self, msg):\n # Message\n time = msg.header.stamp\n position = msg.pose.pose.position\n # Initial condition\n if(self.position is None):\n self.position = position\n self.time = time\n return\n # Measure\n time_offset = float((time - self.time).to_sec())\n distance = sqrt(\n (position.x - self.position.x) ** 2.0 +\n (position.y - self.position.y) ** 2.0 +\n (position.z - self.position.z) ** 2.0\n )\n # Groundtruth\n self.velocity = distance/time_offset\n # Noise\n noise = ( 0.3 * self.velocity ) * (2.0 * random.rand() - 1.0)\n self.velocity_n = self.velocity + noise\n # Update\n self.position = position\n self.time = time\n return\n\n def loop(self):\n loop_timer = rospy.Rate(self.update_rate)\n while(not rospy.is_shutdown()):\n self.sendMessage()\n loop_timer.sleep()\n return\n\n def __init__(self):\n # Inner vars initialization\n self.position = None\n self.update_rate = 5 # Hertz\n self.velocity = None\n self.velocity_n = None\n # Publishers\n self.true_publisher = rospy.Publisher(\"/vehicle/state/groundtruth/velocity\", MsgTwist ,queue_size=1)\n self.noisy_publisher = rospy.Publisher(\"/vehicle/sensor/speedometer\", MsgTwist ,queue_size=1)\n # Subscription topics\n rospy.Subscriber(\"/base_pose_ground_truth\", nav_msgs.msg.Odometry, self.callback)\n # Loop\n self.loop()\n return\n\nif __name__ == \"__main__\":\n # Node initialization\n rospy.init_node('sim_speedometer_node')\n SpeedometerNode()","repo_name":"cabraile/locosigns-ws","sub_path":"locosigns_simulator/scripts/sim_speedometer_node.py","file_name":"sim_speedometer_node.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32262264318","text":"import brownie\nfrom brownie import Contract\nimport pytest\n\n\ndef test_sweep_token_1(\n gov,\n token_1_vault,\n token_1_strategy,\n token_1,\n user,\n token_1_amount,\n toke_token,\n toke_whale,\n):\n sweep(\n gov,\n token_1_vault,\n token_1_strategy,\n token_1,\n user,\n token_1_amount,\n toke_token,\n toke_whale,\n )\n\n\ndef test_sweep_token_2(\n gov,\n token_2_vault,\n token_2_strategy,\n token_2,\n user,\n token_2_amount,\n toke_token,\n toke_whale,\n):\n sweep(\n gov,\n token_2_vault,\n token_2_strategy,\n token_2,\n user,\n token_2_amount,\n toke_token,\n toke_whale,\n )\n\n\ndef sweep(gov, vault, strategy, token, user, amount, toke_token, toke_whale):\n # Strategy want token doesn't work\n token.transfer(strategy, amount, {\"from\": user})\n assert token.address == strategy.want()\n assert token.balanceOf(strategy) > 0\n with brownie.reverts(\"!want\"):\n strategy.sweep(token, {\"from\": gov})\n\n # Vault share token doesn't work\n with brownie.reverts(\"!shares\"):\n strategy.sweep(vault.address, {\"from\": gov})\n\n toke_amount = 10 * (10 ** 18)\n\n before_balance = toke_token.balanceOf(gov)\n toke_token.transfer(strategy, toke_amount, {\"from\": toke_whale})\n assert toke_whale.address != strategy.want()\n assert toke_token.balanceOf(user) == 0\n strategy.sweep(toke_token, {\"from\": gov})\n assert toke_token.balanceOf(gov) == toke_amount + before_balance\n","repo_name":"charlesndalton/generic-tokemak-strat","sub_path":"tests/test_sweep.py","file_name":"test_sweep.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11514689749","text":"# -*- coding: utf-8 -*-\nimport random\nimport tkinter as tk\nfrom tkinter import messagebox\n\nclass DiceRollingGame:\n def __init__(self, root):\n self.root = root\n self.root.title(\"Dice Rolling Game\")\n self.root.geometry(\"500x400\")\n\n self.players = []\n self.num_players = 2\n self.target_rolls = 10\n self.num_sides = 6\n self.total_rounds = 0\n self.num_rolls = 0\n\n self.create_widgets()\n\n def create_widgets(self):\n # Create labels, entry, and buttons for the GUI\n self.title_label = tk.Label(self.root, text=\"Dice Rolling Game\", font=(\"Helvetica\", 20, \"bold\"))\n self.title_label.pack(pady=10)\n\n self.players_label = tk.Label(self.root, text=\"Enter number of players:\")\n self.players_label.pack()\n self.players_entry = tk.Entry(self.root, width=5)\n self.players_entry.pack()\n\n self.rolls_label = tk.Label(self.root, text=\"Enter number of rolls per player:\")\n self.rolls_label.pack()\n self.rolls_entry = tk.Entry(self.root, width=5)\n self.rolls_entry.pack()\n\n self.start_button = tk.Button(self.root, text=\"Start Game\", command=self.start_game)\n self.start_button.pack(pady=10)\n\n self.roll_frame = tk.Frame(self.root)\n self.roll_frame.pack()\n\n self.result_label = tk.Label(self.root, text=\"\", font=(\"Arial\", 16))\n self.result_label.pack(pady=20)\n\n def start_game(self):\n # Start the game with specified number of players and target rolls\n try:\n self.num_players = int(self.players_entry.get())\n self.target_rolls = int(self.rolls_entry.get())\n if self.num_players <= 0 or self.target_rolls <= 0:\n raise ValueError\n except ValueError:\n messagebox.showerror(\"Error\", \"Please enter valid numbers for players and rolls.\")\n return\n\n # Create player list with initial scores and highest scores\n self.players = [{\"name\": f\"Player {i+1}\", \"score\": 0, \"highest_score\": 0} for i in range(self.num_players)]\n self.total_rounds = 0\n self.num_rolls = 0\n\n # Remove input elements and start button after starting the game\n self.players_label.pack_forget()\n self.players_entry.pack_forget()\n self.rolls_label.pack_forget()\n self.rolls_entry.pack_forget()\n self.start_button.pack_forget()\n\n # Create \"Roll Dice\" buttons for each player\n for player in self.players:\n self.create_roll_button(player[\"name\"])\n\n def create_roll_button(self, player_name):\n # Create a \"Roll Dice\" button for the given player\n roll_button = tk.Button(self.roll_frame, text=f\"Roll Dice for {player_name}\", command=lambda: self.roll_dice(player_name))\n roll_button.pack(pady=5)\n\n def roll_dice(self, player_name):\n # Roll the dice for the given player and update scores\n roll_result = random.randint(1, self.num_sides)\n self.result_label.config(text=f\"{player_name} rolled a {roll_result}!\")\n\n for player in self.players:\n if player[\"name\"] == player_name:\n if roll_result == self.num_sides:\n player[\"score\"] += self.num_sides\n self.result_label.config(text=f\"Congratulations, {player_name}! You win this round!\")\n\n self.num_rolls += 1\n if self.num_rolls == self.target_rolls:\n self.total_rounds += 1\n self.num_rolls = 0\n if self.total_rounds == self.num_players * self.target_rolls:\n self.end_game()\n break\n\n def end_game(self):\n # Display the winner(s) of the game\n self.roll_frame.pack_forget()\n max_score = max(player[\"score\"] for player in self.players)\n winners = [player[\"name\"] for player in self.players if player[\"score\"] == max_score]\n\n if len(winners) == 1:\n self.result_label.config(text=f\"Congratulations, {winners[0]}! You are the winner!\")\n else:\n self.result_label.config(text=\"It's a tie! The winners are:\\n\" + \"\\n\".join(winners))\n\ndef main():\n root = tk.Tk()\n game = DiceRollingGame(root)\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ammad994/Dice_roll_game","sub_path":"Dice_roll_game.py","file_name":"Dice_roll_game.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40062957657","text":"import random\nimport numpy as np\nimport pdb\n\nfrom ddpo import utils\nfrom ddpo.utils import imagenet\nimport inflect\n\ninflect_engine = inflect.engine()\n\n# --------------------------------- general api --------------------------------#\n\n\ndef batchify(prompt_fn, batch_size, **kwargs):\n inference_prompts, training_prompts, prompt_metadata = zip(\n *[prompt_fn(**kwargs) for _ in range(batch_size)]\n )\n return list(inference_prompts), training_prompts, prompt_metadata\n\n\ndef batchify_identical(prompt_fn, batch_size, **kwargs):\n inference_prompt, training_prompts, prompt_metadata = prompt_fn(**kwargs)\n inference_batch = [inference_prompt for _ in range(batch_size)]\n training_batch = [training_prompts for _ in range(batch_size)]\n metadata_batch = [prompt_metadata for _ in range(batch_size)]\n return inference_batch, training_batch, metadata_batch\n\n\ndef make_prompts(fn_name, batch_size, identical_batch=False, **kwargs):\n prompt_fn = globals()[fn_name]\n if identical_batch:\n return batchify_identical(prompt_fn, batch_size, **kwargs)\n else:\n return batchify(prompt_fn, batch_size, **kwargs)\n\n\n# ---------------------------- specific experiments ----------------------------#\n\n\ndef person_pet(evaluate=False):\n training_prompts = [\"a photo of a person with their pet\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef consistent_animals(evaluate=False):\n inference_prompt = \"a husky and a shoebill stork on the beach in a single image\"\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef get_random_class(idx=None, low=None, high=None):\n if idx is not None:\n label = imagenet.classes[idx]\n elif low is not None and high is not None:\n idx = random.randint(low, high)\n label = imagenet.classes[idx]\n else:\n label = random.choice(imagenet.classes)\n # if ',' in label:\n # label = label.split(',')[0]\n return label\n\n\ndef consistent_imagenet_animals(colors=False):\n class1 = get_random_class()\n class2 = get_random_class()\n if colors:\n inference_prompt = (\n f\"a realistic photo of a {random.choice(imagenet.colors)} {class1} and \"\n f\"a {random.choice(imagenet.colors)} {class2}\"\n )\n else:\n inference_prompt = f\"a realistic photo of a {class1} and a {class2}\"\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef consistent_imagenet_animals_3(colors=False):\n class1 = get_random_class()\n class2 = get_random_class()\n class3 = get_random_class()\n if colors:\n inference_prompt = (\n f\"a realistic photo of a {random.choice(imagenet.colors)} {class1}, \"\n f\"a {random.choice(imagenet.colors)} {class2}, and \"\n f\"a {random.choice(imagenet.colors)} {class3}\"\n )\n else:\n inference_prompt = (\n f\"a realistic photo of a {class1}, a {class2}, and a {class3}\"\n )\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef n_fingers(evaluate=False):\n n = random.randint(1, 4)\n inference_prompt = f'a photo of a hand holding up {n} finger{\"s\" if n > 1 else \"\"}'\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef imagenet_single(evaluate=False, idx=None):\n class1 = get_random_class(idx=idx)\n inference_prompt = f\"a realistic photo of a {class1}\"\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef imagenet_aesthetic(evaluate=False):\n class1 = get_random_class()\n training_prompts = [f\"a realistic photo of a {class1}\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef imagenet_simple(evaluate=False, idx=None):\n class1 = get_random_class(idx=idx)\n inference_prompt = f\"a {class1}\"\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef imagenet_dogs(evaluate=False, idx=None):\n class1 = get_random_class(idx=idx, low=151, high=268)\n training_prompts = [f\"{class1}\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef animal_debug(evaluate=False, idx=None):\n training_prompts = [\"a peacock\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef simple_dogs(evaluate=False, idx=None):\n class1 = get_random_class(idx=idx, low=151, high=268)\n training_prompts = [f\"{class1}\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef imagenet_animals(evaluate=False, idx=None):\n class1 = get_random_class(idx=idx, low=0, high=397)\n training_prompts = [f\"{class1}\"]\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef from_file(loadpath, evaluate=False, idx=None):\n prompts = utils.load_lines(loadpath)\n if idx is not None:\n inference_prompt = prompts[idx]\n else:\n inference_prompt = random.choice(prompts)\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef vqa_dataset(loadpath, max_samples=None, evaluate=False):\n dataset = utils.load_general_prompts(loadpath)\n entry = random.choice(dataset)\n training_prompts = [entry[\"prompt\"]]\n inference_prompt = entry[\"prompt\"]\n metadata = entry\n return inference_prompt, training_prompts, metadata\n\n\ndef manual(prompts, evaluate=False):\n training_prompts = prompts\n inference_prompt = random.choice(training_prompts)\n return inference_prompt, training_prompts, {}\n\n\ndef nouns_activities(nouns_path, activities_path, evaluate=False):\n nouns = utils.load_lines(nouns_path)\n activities = utils.load_lines(activities_path)\n inference_prompt = (\n f\"{inflect_engine.a(random.choice(nouns))} {random.choice(activities)}\"\n )\n training_prompts = [inference_prompt]\n return inference_prompt, training_prompts, {}\n\n\ndef counting(nouns_path, number_range, evaluate=False):\n nouns = utils.load_lines(nouns_path)\n number = inflect_engine.number_to_words(random.randint(*number_range))\n noun = random.choice(nouns)\n plural_noun = inflect_engine.plural(noun)\n inference_prompt = f\"{number} {plural_noun}\"\n training_prompts = [inference_prompt]\n metadata = {\n \"questions\": [\n f\"How many {plural_noun} are there in this image?\",\n f\"What animal is in this image?\",\n ],\n \"answers\": [\n number,\n noun,\n ],\n }\n return inference_prompt, training_prompts, metadata\n","repo_name":"jannerm/ddpo","sub_path":"ddpo/training/prompts.py","file_name":"prompts.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"81"} +{"seq_id":"2387546563","text":"\"\"\"OJ URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom users.views import MainPageView, UserFormView, LoginPageView, LogoutPageView\nfrom submissions.views import EditorView\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom blog.views import BlogDisplay\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n #Home page url\n url(r'^$', MainPageView.as_view(),name='home'),\n\n # user registration url\n url(r'^register/$', UserFormView.as_view(),name='register'),\n\n #Login page url\n url(r'^login/$', LoginPageView.as_view(),name='login'),\n\n #Logout page url\n url(r'^logout/$', LogoutPageView.as_view(), name='logout'),\n\n # users app\n url(r'^users/',include('users.urls')),\n\n #submissions app\n url(r'^submissions/',include('submissions.urls')),\n\n #ide\n url(r'^ide/$', EditorView.as_view(), name='ide'),\n\n #blog\n url(r'^blog/$',BlogDisplay.as_view(),name='blog'),\n\n #problemset\n url(r'^problemset/',include('problemset.urls')),\n\n #zinnias blog\n url(r'^weblog/', include('zinnia.urls')),\n url(r'^comments/', include('django_comments.urls')),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"nishantnahata/OJ","sub_path":"OJ/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"9824638159","text":"from typing import Any, Generator, Type, TypeVar\nfrom datetime import date, datetime\n\nfrom json_codec.types import (\n ParseProcessResult,\n ParseProcessYield,\n TypeDecoder,\n ValidationError,\n)\n\nT = TypeVar(\"T\")\n\n\nclass DateTypeDecoder(TypeDecoder[date]):\n def parse(\n self, value: Any, *types: Type[Any]\n ) -> Generator[\n ParseProcessYield[Any], ParseProcessResult[Any], ParseProcessResult[date]\n ]:\n if not isinstance(value, str):\n return self._failure(ValidationError(f\"Expected string, got {value}\"))\n\n try:\n return self._success(datetime.strptime(value, \"%Y-%m-%d\").date())\n except ValueError:\n return self._failure(\n ValidationError(\n f\"Expected date in format YYYY-MM-DD, but {value} is not a valid value\"\n )\n )\n yield\n\n\ndef serialize_date(value: date) -> Any:\n return value.strftime(\"%Y-%m-%d\")\n","repo_name":"LuscasLeo/json_codec","sub_path":"json_codec/codecs/date_codec.py","file_name":"date_codec.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"42711970219","text":"from difflib import ndiff\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\ndef levenshtein_distance(str1, str2):\r\n counter = {\"+\": 0, \"-\": 0}\r\n distance = 0\r\n for edit_code, *_ in ndiff(str1, str2):\r\n if edit_code == \" \":\r\n distance += max(counter.values())\r\n counter = {\"+\": 0, \"-\": 0}\r\n else:\r\n counter[edit_code] += 1\r\n distance += max(counter.values())\r\n return distance\r\n\r\n\r\ndef get_similarity(a, b):\r\n return (len(a) - levenshtein_distance(b, a)) / len(a)\r\n\r\n\r\ndef get_nearest_date_from_day(day: int) -> datetime:\r\n now = datetime.now()\r\n weekday = now.weekday()\r\n\r\n nearest_date = now + timedelta(days=day - weekday + (day < weekday and 7 or 0))\r\n return nearest_date\r\n","repo_name":"Chiam2222/AppDev-Project","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43365502852","text":"import config\nfrom utils import save_checkpoint, load_checkpoint, save_some_examples\nfrom dataset import MapDataset\nfrom generator import Generator\nfrom discriminator import Discriminator\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\n\ndef train_epoch(generator, discriminator, opt_gen, opt_disc, gen_scaler, disc_scaler, l1_loss, bce_loss, loader):\n loop = tqdm(loader, leave=True)\n\n for idx, (x, y) in enumerate(loop):\n x, y = x.to(config.DEVICE), y.to(config.DEVICE)\n\n # Train discriminator\n with torch.cuda.amp.autocast_mode.autocast():\n y_fake = generator(x)\n\n disc_real = discriminator(x, y)\n disc_fake = discriminator(x, y_fake)\n disc_real_loss = bce_loss(disc_real, torch.ones_like(disc_real))\n disc_fake_loss = bce_loss(disc_fake, torch.zeros_like(disc_fake))\n disc_loss = (disc_real_loss + disc_fake_loss) / 2\n \n opt_disc.zero_grad()\n disc_scaler.scale(disc_loss).backward(retain_graph=True)\n disc_scaler.step(opt_disc)\n disc_scaler.update()\n\n # Train generator\n with torch.cuda.amp.autocast_mode.autocast():\n disc_fake = discriminator(x, y_fake)\n gen_fake_loss = bce_loss(disc_fake, torch.ones_like(disc_fake))\n gen_l1_loss = l1_loss(y_fake, y) * config.L1_LAMBDA\n gen_loss = gen_fake_loss + gen_l1_loss\n \n opt_gen.zero_grad()\n gen_scaler.scale(gen_loss).backward(retain_graph=True)\n gen_scaler.step(opt_gen)\n gen_scaler.update()\n\ndef main():\n discriminator = Discriminator().to(config.DEVICE)\n generator = Generator().to(config.DEVICE)\n\n opt_disc = optim.Adam(discriminator.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))\n opt_gen = optim.Adam(generator.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))\n\n bce_loss = nn.BCEWithLogitsLoss()\n l1_loss = nn.L1Loss()\n\n if config.LOAD_MODEL:\n load_checkpoint(config.CHECKPOINT_GEN, generator, opt_gen, config.LEARNING_RATE)\n load_checkpoint(config.CHECKPOINT_DISC, discriminator, opt_gen, config.LEARNING_RATE)\n \n train_dataset = MapDataset('data/maps/train')\n train_loader = DataLoader(train_dataset, batch_size=config.BATCH_SIZE, shuffle=True, num_workers=config.NUM_WORKERS)\n val_dataset = MapDataset('data/maps/val')\n val_loader = DataLoader(val_dataset, batch_size=config.BATCH_SIZE, shuffle=True, num_workers=config.NUM_WORKERS)\n\n gen_scaler = torch.cuda.amp.grad_scaler.GradScaler()\n disc_scaler = torch.cuda.amp.grad_scaler.GradScaler()\n\n for epoch in range(config.NUM_EPOCHS):\n train_epoch(generator, discriminator, opt_gen, opt_disc, gen_scaler, disc_scaler, l1_loss, bce_loss, train_loader)\n\n if config.SAVE_MODEL:\n save_checkpoint(generator, opt_gen, filename=config.CHECKPOINT_GEN)\n save_checkpoint(discriminator, opt_gen, filename=config.CHECKPOINT_DISC)\n \n save_some_examples(generator, val_loader, epoch, folder='eval')\n\nif __name__ == '__main__':\n main()\n","repo_name":"i59korotkov/pytorch_gans","sub_path":"Pix2Pix/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22280701901","text":"import socket\r\nimport threading\r\nimport time\r\n\r\n\r\nclass UDP_Server:\r\n def _get_local_ip_addr(self):\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('www.baidu.com', 80))\r\n ip_addr = s.getsockname()[0]\r\n finally:\r\n s.close()\r\n return ip_addr\r\n\r\n def __init__(self, port=1234):\r\n self.local_ip = self._get_local_ip_addr()\r\n self.port = port\r\n self.client_ip = None\r\n self.send_socket = None\r\n self.monitor_socket = None\r\n self.monitor_thread = None\r\n\r\n def _thread_monitor(self):\r\n while True:\r\n try:\r\n data, _ = self.monitor_socket.recvfrom(\r\n 1024, socket.MSG_DONTWAIT)\r\n if data.decode() == '__bye__':\r\n self.client_ip = None\r\n self.monitor_socket.close()\r\n self.monitor_socket = None\r\n self.send_socket.close()\r\n self.send_socket = None\r\n print(\"PC Disconnected\")\r\n return\r\n except BlockingIOError:\r\n time.sleep(1)\r\n\r\n def is_connect(self):\r\n return (self.client_ip is not None)\r\n\r\n def connect(self, repeat=10, interval=1):\r\n connected = False\r\n # Set socket for broadcast\r\n net_addr_idx = self.local_ip.rindex('.')\r\n broadcast_addr = self.local_ip[:net_addr_idx] + '.255'\r\n s_broadcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s_broadcast.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST |\r\n socket.SO_REUSEADDR, True)\r\n # Set socket for listening (blocking with timeout)\r\n s_listen = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s_listen.bind(('', self.port + 1))\r\n s_listen.setblocking(True)\r\n s_listen.settimeout(1)\r\n # Broadcast Jetson's ipv4 addr for #repeat times\r\n for _ in range(repeat):\r\n s_broadcast.sendto('__search__'.encode(),\r\n (broadcast_addr, self.port))\r\n # Try to get the PC's addr response each broadcast\r\n try:\r\n data, s_addr = s_listen.recvfrom(1024)\r\n if data.decode() == '__connect__':\r\n s_listen.close()\r\n connected = True\r\n self.client_ip = s_addr[0]\r\n break\r\n except socket.timeout:\r\n time.sleep(interval)\r\n s_broadcast.close()\r\n if connected:\r\n self.monitor_socket = socket.socket(\r\n socket.AF_INET, socket.SOCK_DGRAM)\r\n self.monitor_socket.bind(('', self.port+1))\r\n self.monitor_socket.setblocking(False) # non-blocking socket\r\n self.monitor_thread = threading.Thread(target=self._thread_monitor)\r\n self.monitor_thread.setDaemon(True)\r\n self.monitor_thread.start()\r\n self.send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n return self.client_ip if connected else None\r\n\r\n def send_msg(self, msg):\r\n if self.send_socket is None:\r\n return False\r\n self.send_socket.sendto(msg.encode(), (self.client_ip, self.port))\r\n return True\r\n","repo_name":"zjuyzj/jetson-gesture-control","sub_path":"server/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2213829137","text":"\"\"\"\r\n@author: Ashwin Srinivasan\r\n\"\"\"\r\n#importing header files and data preparation\r\nimport numpy as np\r\nimport scipy.io\r\nimport math\r\nfrom copy import deepcopy\r\nimport sys\r\nimport warnings\r\nfrom matplotlib import pyplot as plt\r\nplt.rcParams['figure.figsize'] = (16, 9)\r\nplt.style.use('ggplot')\r\n\r\ndata = scipy.io.loadmat('AllSamples.mat')\r\nX = list(list(data.items())[3][1])\r\ntX = []\r\ntY = []\r\nfor i in range(0,len(X)):\r\n tX.append(X[i][0])\r\n tY.append(X[i][1])\r\nX1 = np.array(list(zip(tX,tY)))\r\n\"\"\"\r\nfunction to compute the objective function to find optimal k value\r\n\"\"\"\r\ndef objectFunc(data, mean , di):\r\n s_err = 0\r\n for i in range(0, len(data)):\r\n s_err = s_err + errorRate(data[i][0], data[i][1],\r\n mean[int(di[i])][0],\r\n mean[int(di[i])][1])\r\n return s_err\r\n\"\"\"\r\nfunction to calculate the total error\r\n\"\"\"\r\ndef errorRate(x1, y1, x2 , y2):\r\n power1 = math.pow((x1-x2), 2)\r\n power2 = math.pow((y1-y2),2)\r\n totalPower = power1 + power2\r\n return totalPower\r\n\r\n\"\"\"\r\nFunction to estimate the Euclidean distance\r\n\"\"\"\r\ndef euclideanDist(x, y, ax=1):\r\n return np.linalg.norm(x - y, axis=ax)\r\n\r\n\"\"\"\r\nCentroid initialization for statergy 1 - Random Initialization\r\n\"\"\"\r\ndef initialize_random(k):\r\n Xc = np.random.randint(0, np.max(X1), size=k)\r\n Xy = np.random.randint(0, np.max(X1), size=k)\r\n Centroid = np.array(list(zip(Xc, Xy)),dtype= np.int)\r\n return Centroid\r\n\r\n\"\"\"\r\nCentroid initialization for statergy 2 - kmeans++\r\n\"\"\"\r\ndef initialize_optimal(data, k): \r\n C = [] \r\n C.append(data[np.random.randint( \r\n data.shape[0]), :])\r\n for iterate in range(k - 1): \r\n distance = [] \r\n for i in range(data.shape[0]): \r\n p = data[i, :] \r\n d1 = sys.maxsize\r\n for j in range(len(C)): \r\n temp_dist = euclideanDist(p, C[j],None) \r\n d1 = min(d1, temp_dist) \r\n distance.append(d1) \r\n distance = np.array(distance) \r\n NC = data[np.argmax(distance), :] \r\n C.append(NC) \r\n distance = [] \r\n return C\r\n\"\"\"\r\nFunction for kmeans algorithm\r\nHere the calculation is done till two values turns out to be similar that \r\nis till error value becomes 0 [Till Convergence]\r\n\"\"\"\r\ndef kmeans(K,stratergy):\r\n k = K\r\n if stratergy == 1:\r\n Centroid = np.array(initialize_random(k))\r\n if stratergy == 2:\r\n Centroid = np.array(initialize_optimal(X1,k))\r\n Centroid_old = np.zeros(Centroid.shape)\r\n cluster = np.zeros(len(X1))\r\n err = euclideanDist(Centroid, Centroid_old, None)\r\n while err != 0:\r\n for i in range(len(X1)):\r\n distances = euclideanDist(X1[i], Centroid)\r\n clus = np.argmin(distances)\r\n cluster[i] = clus\r\n Centroid_old = deepcopy(Centroid)\r\n for i in range(k):\r\n points = [X1[j] for j in range(len(X1)) if cluster[j] == i]\r\n Centroid[i] = np.mean(points, axis=0) \r\n err = euclideanDist(Centroid, Centroid_old, None)\r\n return Centroid,cluster\r\n\"\"\"\r\nFunction for Plotting the graph\r\n\"\"\"\r\ndef plot_graph(stratergy): \r\n k_c = []\r\n obj_func =[]\r\n k_c1 = []\r\n obj_func1 =[]\r\n if stratergy == 1:\r\n for k in range(2, 11): \r\n C,clusters = kmeans(k,1)\r\n k_c.append(k)\r\n obj_func.append(objectFunc(X1,C,clusters))\r\n plt.plot(k_c, obj_func)\r\n plt.scatter(k_c, obj_func)\r\n plt.xlabel('K Value')\r\n plt.ylabel('Objective Function')\r\n plt.title('Plot to find Optimal K value')\r\n plt.show()\r\n if stratergy == 2:\r\n for k1 in range(2, 11): \r\n C1,clusters1 = kmeans(k1,2)\r\n k_c1.append(k1)\r\n obj_func1.append(objectFunc(X1,C1,clusters1))\r\n if k1 == 10:\r\n plt.scatter(X1[:, 0], X1[:, 1], marker = '.', \r\n color = 'chartreuse', label = 'Data') \r\n plt.scatter(C1[:-1, 0], C1[:-1, 1],marker = '*', \r\n color = 'red') \r\n plt.scatter(C1[-1, 0], C1[-1, 1],marker = '*',\r\n color = 'red') \r\n plt.legend() \r\n plt.xlim(-2, 12) \r\n plt.ylim(-2, 15) \r\n plt.show()\r\n plt.plot(k_c1, obj_func1)\r\n plt.scatter(k_c1, obj_func1)\r\n plt.xlabel('K Value')\r\n plt.ylabel('Objective Function')\r\n plt.title('Plot to find Optimal K value')\r\n plt.show()\r\n\"\"\"\r\nMain Function\r\n\"\"\"\r\ndef main():\r\n warnings.filterwarnings(\"ignore\")\r\n print(\"K-means Using Stratergy 1\")\r\n print(\"Random Initialization - 1\")\r\n plot_graph(1)\r\n print(\"Random Initialization - 2\") \r\n plot_graph(1)\r\n print(\"K-means Using Stratergy 2\")\r\n print(\"Initialization - 1\")\r\n print(\"Plot to represent K = 10\")\r\n plot_graph(2)\r\n print(\"Initialization - 2\") \r\n print(\"Plot to represent K = 10\")\r\n plot_graph(2)\r\n\r\n\"\"\"\r\nInvoking main function\r\n\"\"\"\r\nmain() \r\n","repo_name":"asrini56/Kmeans-Clustering","sub_path":"kmeans-clustering/project2Kmeans-main.py","file_name":"project2Kmeans-main.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19086259568","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef y(t, A, gamma, k, m):\n return A*np.exp(-gamma*t)*np.cos(np.sqrt(k/m)*t)\n\nn = 101\n\nt_array_a = np.zeros(n)\ny_array_a = np.zeros(n)\ndt = 25 / (n-1)\nfor i in range(n):\n t_array_a[i] = i * dt\n y_array_a[i] = y(t_array_a[i], k=4, gamma=0.15, m=9, A=-0.3)\n\nt_array_b = np.linspace(0, 25, n)\ny_array_b = y(t_array_b, k=4, gamma=0.15, m=9, A=-0.3)\n\nplt.plot(t_array_a, y_array_a, color=\"red\")\nplt.plot(t_array_b, y_array_b, \"-.\")\nplt.grid()\nplt.xlim(0, 25)\nplt.legend([\"y(t) for-loop\", \"y(t) vectorized\"])\nplt.xlabel(\"time (seconds)\")\nplt.ylabel(\"height (m)\")\nplt.show()\n\n\"\"\"\nSer at begge settene med arrays plotter samme linje.\n\"\"\"","repo_name":"willidu/IN1900","sub_path":"Obliger/Uke 39/oscillating_spring.py","file_name":"oscillating_spring.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9620103917","text":"from django.urls import path\nfrom . import views\n\napp_name = \"food\"\n\nurlpatterns = [\n path(\"create/\", views.CreateFood.as_view(), name=\"create\"),\n path(\"detail//\", views.FoodDetail.as_view(), name=\"detail\"),\n path(\"list/\", views.FoodList.as_view(), name=\"list\"),\n path(\"update//\", views.UpdateFood.as_view(), name=\"update\"),\n path(\"delete//\", views.DeleteFood.as_view(), name=\"delete\"),\n path(\"delete/success.html\", views.success_delete_food, name=\"success_delete\"),\n\n path(\"deposit//\", views.IncreaseReserves.as_view(), name='increase_reserves'),\n path(\"deposit/success//\", views.success_increase_reserves, name='success_increase_reserve'),\n\n path('unit/list/', views.DistributionUnitList.as_view(), name='unit_list'),\n path('unit/detail//', views.DistributionUnitDetail.as_view(), name='unit_detail'),\n path('unit/create/', views.CreateDistributionUnit.as_view(), name='unit_create'),\n path('unit/update//', views.UpdateDistributionUnit.as_view(), name='unit_update'),\n path('unit/delete//', views.DeleteDistributionUnit.as_view(), name='unit_delete'),\n path('unit/delete/success.html', views.success_delete_food_unit, name='success_delete_unit')\n]","repo_name":"jerryshikanga/refugee_system","sub_path":"food/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30603187991","text":"import pytest\nimport json\nfrom entities import Event\nfrom services import Proxy\n\nfrom settings import E2EMONITORING_SERVICE, E2EMONITORING_DOWN, E2EMONITORING_UP\n\nfrom . import load_up, load_down\n\nproxy = Proxy()\n\n\n@pytest.fixture\ndef up():\n return load_up()\n\n\n@pytest.fixture\ndef down():\n return load_down()\n\n\ndef test_to_json_up(up):\n assert json.loads(proxy.event_to_json(Event.create_event(up))) == {\n \"u_business_service\": E2EMONITORING_SERVICE,\n \"u_priority\": E2EMONITORING_UP,\n \"u_short_description\": \"unittest is Up\",\n \"u_description\": \"unittest is Up (for testing purpose). It was down for 0:01:39.\",\n }\n\n\ndef test_to_json_down(down):\n assert json.loads(proxy.event_to_json(Event.create_event(down))) == {\n \"u_business_service\": E2EMONITORING_SERVICE,\n \"u_priority\": E2EMONITORING_DOWN,\n \"u_short_description\": \"unittest is Down\",\n \"u_description\": \"unittest is Down: for testing purpose.\",\n }\n","repo_name":"ebreton/uptimerobot-proxy","sub_path":"test/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71716249545","text":"def numberOfRightTriangles(p):\n count = 0\n for o in range(1, int(p//2)):\n num = p**2 - 2*p*o\n den = 2*p-2*o\n a = num//den\n if a < o:\n break\n if (num%den==0):\n count+=1 \n return count\n\nmax_value = 0\nmax_count = 0\nfor p in range(3,1001):\n n = numberOfRightTriangles(p)\n if n>max_count:\n max_count = numberOfRightTriangles(p)\n max_value = p\nprint(max_count, max_value)","repo_name":"benfetch/Project-Euler-Python","sub_path":"problem39.py","file_name":"problem39.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15971218215","text":"from PIL import Image\nfrom tqdm import tqdm\nfrom copy import copy\nimport hashlib\nimport os\nimport json\nimport importlib\nimport sys\n\nsys.path.append(\"./src/\")\nimport models\n\n### Parameters #########################\nalgorithm = \"sha256\"\nhash_dict_name = \"./converted/hash_dict\"\nconverted_dir = \"./converted/\"\npantie_dir = \"./dream/\"\ncheckflags = {\n \"default\",\n \"with_bra\",\n \"is_lace\",\n \"add_sign\",\n \"stitch_correction\",\n \"is_frill\",\n \"use_ribbon_mesh\",\n \"is_4k\",\n \"is_knee\",\n \"with_garter\",\n}\n########################################\n\n\ndef read_hash(fname, algorithm=\"sha256\"):\n try:\n with open(fname, \"r\", encoding=\"UTF-8\") as f:\n hash = hashlib.new(algorithm)\n tmp = f.read()\n hash.update(tmp.encode(\"utf-8\"))\n except:\n return False\n return hash.hexdigest()\n\n\ndef make_hash_dict(checklist, algorithm=\"sha256\"):\n hash_dict = {\"algorithm\": algorithm}\n for f in checklist:\n hash_dict[f.split(\"/\")[-1]] = read_hash(f + \".py\", algorithm)\n return hash_dict\n\n\ndef write_hash_dict(fname, hash_dict):\n try:\n with open(fname, \"w\", encoding=\"utf-8\") as f:\n json.dump(hash_dict, f)\n except:\n return False\n return True\n\n\ndef read_hash_dict(fname):\n try:\n with open(fname, \"r\", encoding=\"utf-8\") as f:\n hash_dict = json.load(f)\n except:\n return None\n return hash_dict\n\n\nchecklist = models.models_namelist\nchecklist = [\"./src/models/\" + f for f in checklist]\nchecklist.append(\"./src/utils/imgproc\")\n\n# Check hash of the models, then make an updated model list\nlatest_hash = make_hash_dict(checklist, algorithm)\nkeys = latest_hash.keys()\nupdated = []\nif os.path.exists(hash_dict_name):\n previous_hash = read_hash_dict(hash_dict_name)\n for key in keys:\n try:\n if previous_hash[key] != latest_hash[key]:\n updated.append(key)\n except: # new model is available\n updated.append(key)\n if \"imgproc\" in updated:\n updated = list(keys)[1:-1] # remove algorithm and imgproc\nelse:\n updated = list(keys)[1:-1]\n\n# Update panties\nf = open(\"./webapp.json\", mode=\"r\")\noptions = json.load(f)\npanties = set(os.listdir(pantie_dir))\nfor model in models.models_namelist:\n print(f\"{model} is updating...\")\n module = importlib.import_module(\"models.\" + model)\n try:\n available_options = checkflags & set(options[model])\n except:\n available_options = set()\n available_options.add(\"default\")\n for option in available_options:\n print(f\"Process: {option} of {model}...\")\n fdir = f\"{converted_dir}{model}/{option}/\"\n os.makedirs(fdir, exist_ok=True)\n # os.makedirs(f'{converted_dir}{model}/default/', exist_ok=True)\n if model in updated:\n nonexists = panties\n else:\n # nonexists = sorted(panties - set(os.listdir(f'{converted_dir}{model}/default/')))\n nonexists = sorted(panties - set(os.listdir(fdir)))\n setup = copy(options[\"all\"])\n if option != \"default\":\n setup[option] = not setup[option]\n patcher = module.patcher(options=setup)\n for pantie in tqdm(sorted(nonexists)):\n if hasattr(patcher, \"noribbon\"):\n patched = patcher.patch(Image.open(pantie_dir[:-1] + \"_noribbon/\" + pantie), transparent=True)\n else:\n patched = patcher.patch(Image.open(pantie_dir + pantie), transparent=True)\n patcher.save(patched, f\"{fdir}{pantie}\")\n\n# Update hash dictionary\nwrite_hash_dict(hash_dict_name, latest_hash)\n","repo_name":"TenteEEEE/quiche_pantie_patch","sub_path":"update_all.py","file_name":"update_all.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"81"} +{"seq_id":"40165080733","text":"import os\nimport os.path\nimport shutil\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\nfrom hamcrest import assert_that, string_contains_in_order\n\nfrom testing_utils import DeviceStub, FIXTURE_DIR, change_dir, run_invoke_cmd\nfrom vznncv.stlink.tools.wrapper._cli import main\n\n\n@pytest.fixture\ndef dummy_usb_devices():\n with patch('usb.core.find', autospec=True) as find_mock:\n find_mock.return_value = [\n DeviceStub(idVendor=0x0BDA, idProduct=0x0411, serial_number=None),\n DeviceStub(idVendor=0x0483, idProduct=0x374e, serial_number='002F003D3438510B34313939')\n ]\n yield\n\n\n@pytest.fixture\ndef demo_project_path(tmp_path: Path):\n project_dir = tmp_path / 'stm_project'\n shutil.copytree(os.path.join(FIXTURE_DIR, 'stm_project_stub'), project_dir)\n yield project_dir\n\n\n@pytest.fixture\ndef tmp_bin_dir(tmp_path: Path):\n tmp_bin = tmp_path / 'bin'\n os.makedirs(tmp_bin, exist_ok=True)\n\n original_environ = os.environ.copy()\n path_var = f\"{tmp_bin}{os.pathsep}{os.environ.get('PATH', '')}\"\n try:\n os.environ['PATH'] = path_var\n yield tmp_bin\n finally:\n os.environ.clear()\n os.environ.update(original_environ)\n\n\n@pytest.fixture\ndef openocd_stub_path(tmp_bin_dir):\n openocd_path = tmp_bin_dir.joinpath('openocd')\n openocd_path.write_text(r'''\n#!/bin/sh\necho \"OpenOCD stub\" 1>&2\necho \"OpenOCD args: $@\" 1>&2\n'''.lstrip())\n openocd_path.chmod(0o777)\n yield openocd_path\n\n\n@pytest.fixture\ndef pyocd_stub_path(tmp_bin_dir):\n openocd_path = tmp_bin_dir.joinpath('pyocd')\n openocd_path.write_text(r'''\n#!/bin/sh\necho \"PyOCD stub\" 1>&2\necho \"PyOCD args: $@\" 1>&2\n'''.lstrip())\n openocd_path.chmod(0o777)\n yield openocd_path\n\n\ndef test_openocd_usage(demo_project_path: Path, openocd_stub_path: Path, dummy_usb_devices, capfd):\n with change_dir(demo_project_path):\n exit_code = run_invoke_cmd(main, ['upload-app', '--backend', 'openocd', '--elf-file', 'build'])\n\n assert exit_code == 0\n out_result = capfd.readouterr()\n assert_that(out_result.err, string_contains_in_order(\n 'Target elf file ', 'build/demo.elf',\n 'Target ST-Link device: ST-Link V3E',\n 'Upload backend: \"openocd\"',\n 'Run command', 'openocd', '--file', 'openocd_stm.cfg', '--command', 'program', 'demo.elf', 'verify reset exit',\n 'OpenOCD stub',\n 'OpenOCD args', '--file', 'openocd_stm.cfg', '--command', 'program', 'demo.elf', 'verify reset exit',\n 'Complete',\n ))\n\n\ndef test_pyocd_usage(demo_project_path: Path, pyocd_stub_path: Path, dummy_usb_devices, capfd):\n with change_dir(demo_project_path):\n exit_code = run_invoke_cmd(main, ['upload-app', '--backend', 'pyocd', '--elf-file', 'build', '--pyocd-target',\n 'stm32f411ce'])\n\n assert exit_code == 0\n out_result = capfd.readouterr()\n assert_that(out_result.err, string_contains_in_order(\n 'Target elf file ', 'build/demo.elf',\n 'Target ST-Link device: ST-Link V3E',\n 'Upload backend: \"pyocd\"',\n 'Run command', 'pyocd', 'flash', '--target', 'stm32f411ce', '--format', 'elf', 'demo.elf',\n 'PyOCD stub',\n 'PyOCD args', 'flash', '--target', 'stm32f411ce', '--format', 'elf', 'demo.elf',\n 'Complete',\n ))\n","repo_name":"vznncv/vznncv-stlink-tools-wrapper","sub_path":"tests/test_upload_app.py","file_name":"test_upload_app.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1324055067","text":"# -*- coding: utf-8 -*-\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg1= cv2.imread(r\"E:\\data\\dog1.jpg\")\r\nimg1 =cv2.resize(img1,(400,500))\r\n\r\n\r\n#creating i mage border\r\n#parameters(img,border_width(4-sides),bordertype,val_border)\r\n#top,bottom,right,left\r\nimg1= cv2.copyMakeBorder(img1,10,10,15,15,cv2.BORDER_CONSTANT,value=[255,0,125])\r\n\r\n\r\n\r\ncv2.imshow(\"res\",img1)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"ANUSHRAV01/spyder-project","sub_path":"spyder projects/demo8_imageborder.py","file_name":"demo8_imageborder.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28268843740","text":"import numpy as np\nimport torch as th\n\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam, SGD\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR\n\nfrom torch.nn import MSELoss, L1Loss\n\nimport os\n\nfrom training_procedure import training_procedure, load_model\nfrom networks.MSGNet import MSGNet\n\nfrom utils.dataset_loader import SintelCropped_hf, Sintel_FullTraining\nfrom utils.arg_parser import parse_args_joint_train as parse_args, \\\n get_exp_name, log_criterion\nfrom utils.losses import compose_criterion\n\n\n'''\nThis is the executable (via the terminal) file, \nit runs all the training with model loading/saving, data processing, training procedure logging.\n'''\n\n# First of all, all parameters of training must be collected. \n# For clarifications, it is elaborated in arg_parser.py file.\nopt = parse_args()\n\n# Let's fix anything we can to make experiments reproducible. \nth.manual_seed(1234)\nth.cuda.manual_seed(1234)\n\n# It is assumed by default that all the training is run via GPU. \n# Unfortunately, to change training processor to CPU, all the code must be rewritten\n# (In fact, only \".cuda()\" phrases should be discarded).\ngpu_id = opt.gpu_id # 0, by default\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=str(gpu_id)\n\ndef initialize_training(opt):\n '''\n Network, Optimizer (maybe, with Scheduler) and Criterion initialization. \n '''\n model = MSGNet(m=opt.m, nnl_type=opt.nnl_type, \n training_type =opt.training_type,\n num_channels =opt.num_channels,\n num_channels_y1=opt.num_channels_y1,\n num_channels_d1=opt.num_channels_d1,\n num_channelsT =opt.num_channelsT,\n ).cuda()\n\n lr0 = float(opt.lr0)\n optimizer = Adam(model.parameters(), lr=lr0)\n milestones = list(map(int, opt.milestones.strip('[]').split(','))) \\\n if opt.milestones != '' else []\n scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=0.1) # it was not used at all.\n criterion = compose_criterion(opt)\n\n return model, lr0, optimizer, milestones, scheduler, criterion\n\ndef initialize_network(opt, model, optimizer, scheduler, save_path):\n '''\n Loads the model, prepares (creates or loads) losses arrays.\n Creates a folder for experiment if there is no one.\n\n \"start_epoch\" will load the model saved at this epoch. \n '''\n start_epoch = opt.start_epoch\n\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n train_losses = []\n val_losses = []\n np.save(save_path+'train_losses.npy', train_losses)\n np.save(save_path+'val_losses.npy', val_losses)\n np.save(save_path+'training_time.npy', 0)\n start_epoch = 1\n else:\n if start_epoch is None:\n train_losses = []\n val_losses = []\n np.save(save_path+'train_losses.npy', train_losses)\n np.save(save_path+'val_losses.npy', val_losses)\n start_epoch = 1\n else:\n last_saved_epoch = int(np.load(save_path+'last_saved_epoch.npy'))\n if start_epoch > last_saved_epoch:\n start_epoch = last_saved_epoch\n\n train_losses = list(np.load(save_path+'train_losses.npy'))\n val_losses = list(np.load(save_path+'val_losses.npy'))\n \n train_div, train_mod = opt.num_images_train//opt.train_batchsize,\\\n opt.num_images_train%opt.train_batchsize\n val_div, val_mod = opt.num_images_val//opt.val_batchsize,\\\n opt.num_images_val%opt.val_batchsize \n \n train_num_batches = train_div if train_mod == 0 else train_div + 1\n val_num_batches = val_div if val_mod == 0 else val_div + 1\n\n np.save(save_path+'train_losses.npy', \n train_losses[:train_num_batches])\n np.save(save_path+'val_losses.npy', \n val_losses[:val_num_batches])\n\n model, optimizer, scheduler = load_model(start_epoch,\n model, optimizer, scheduler, save_path)\n\n\ndef initialize_datasets(opt):\n '''\n Initializes data processing. Returns data loaders.\n\n To use the dataset, another from Sintel, new functions must be written.\n '''\n th.manual_seed(0)\n\n set_train = Sintel_FullTraining(mode='train', \n num_images=opt.num_images_train,\n training_type=opt.training_type)\n\n train_loader = DataLoader(set_train, batch_size=opt.train_batchsize,\n shuffle=True, num_workers=5) \n\n set_val = Sintel_FullTraining(mode='val', \n num_images=opt.num_images_val,\n training_type='ordinary')\n\n val_loader = DataLoader(set_val, batch_size=opt.val_batchsize, \n shuffle=False, num_workers=5)\n\n return train_loader, val_loader\n\n#######################################################################\n#######################################################################\n#######################################################################\n\nif __name__ == '__main__':\n model, lr0, optimizer, milestones, scheduler, criterion = \\\n initialize_training(opt)\n \n exp_name = get_exp_name(opt)\n\n save_path = './saved_models/MSGNet/'+exp_name+'/'\n start_epoch = 1 if opt.start_epoch is None else opt.start_epoch\n\n initialize_network(opt, model, optimizer, scheduler, save_path)\n\n train_div, train_mod = opt.num_images_train//opt.train_batchsize,\\\n opt.num_images_train%opt.train_batchsize\n val_div, val_mod = opt.num_images_val//opt.val_batchsize,\\\n opt.num_images_val%opt.val_batchsize \n \n train_num_batches = train_div if train_mod == 0 else train_div + 1\n val_num_batches = val_div if val_mod == 0 else val_div + 1\n\n np.save(save_path+'exp_params.npy', {\n 'num_images_train':opt.num_images_train, \n 'num_images_val':opt.num_images_val,\n 'train_batchsize':opt.train_batchsize,\n 'val_batchsize':opt.val_batchsize,\n 'train_num_batches':train_num_batches,\n 'val_num_batches':val_num_batches,\n 'save_path':save_path,\n 'lr0':lr0, 'milestones':milestones,\n 'num_epochs':opt.num_epochs,\n 'save_each_epoch':opt.save_each_epoch,\n 'm':opt.m,\n 'num_channels' :opt.num_channels,\n 'num_channels_y1':opt.num_channels_y1,\n 'num_channels_d1':opt.num_channels_d1,\n 'num_channelsT' :opt.num_channelsT,\n 'training_type':opt.training_type,\n 'nnl_type':opt.nnl_type,\n 'pad_type':opt.pad_type,\n 'criterion':log_criterion(opt)})\n\n train_loader, val_loader = initialize_datasets(opt)\n\n # Runs the training...\n training_procedure(start_epoch, opt.num_epochs, model, optimizer, scheduler, \n criterion, train_loader, val_loader, \n save_path, opt.save_each_epoch, opt.no_val)","repo_name":"AndreiDavydov/Guided-Depth-Super-Resolution","sub_path":"train_MSGNet.py","file_name":"train_MSGNet.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37602003179","text":"\"\"\"\nThis is the (unofficial) Python API for EZTV.it\n\nUsing this code, you can manage to get the information regarding any TV Show\nwhich is listed on EZTV.it See how to use it thanks to the file \"APIExample.py\"\n\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\n\nURL = \"https://eztv.ag\"\nQUALITY_PREF = \"720p\"\n\n\nclass EztvException(Exception):\n \"\"\"\n Base exception for this API\n \"\"\"\n\n def __init__(self, message, errors):\n \"\"\"\n Class constructor\n \"\"\"\n\n # Call the base class constructor with the parameters it needs\n Exception.__init__(self, message)\n self.errors = errors\n\n\nclass TVShowNotFound(EztvException):\n \"\"\"\n TV Show Not Found Exception\n \"\"\"\n\n\nclass SeasonNotFound(EztvException):\n \"\"\"\n Season Not Found Exception\n \"\"\"\n\n\nclass EpisodeNotFound(EztvException):\n \"\"\"\n Episode Not Found Exception\n \"\"\"\n\n\nclass EztvAPI(object):\n \"\"\"\n EztvAPI Main Handler\n \"\"\"\n\n _instance = None\n _id_tv_show = None\n _season_and_episode = {}\n _patterns = [\n r\"S(\\d+)E(\\d+)\", # Matches SXXEYY (eg. S01E10)\n r\"(\\d+)x(\\d+)\", # Matches SSxYY (eg. 01x10)\n ]\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n __new__ builtin\n \"\"\"\n if not cls._instance:\n cls._instance = super(EztvAPI, cls).__new__(\n cls, *args, **kwargs)\n return cls._instance\n\n def _match_pattern(self, pattern, episode):\n regex = re.search(pattern, episode)\n if regex is None: # Yeah, I try to be a positive person.\n return\n\n season_tv_show = regex.group(1)\n episode_tv_show = regex.group(2)\n regex = re.search(r\"href=\\\"(.*)\\\" \", episode)\n magnet_link = regex.group(1)\n\n return (season_tv_show, episode_tv_show, magnet_link.split('\"')[0])\n\n def tv_show(self, name):\n \"\"\"\n Fetches a show mapping $name returns a $self instance.\n Might raise a TVShowNotFound exception\n \"\"\"\n # all strings are in lowercase\n name = name.lower()\n data = {\n 'SearchString': '',\n 'SearchString1': name,\n 'search': 'search'\n }\n\n req = requests.post(URL + \"/search/\", data=data, timeout=5)\n self.content = requests.get(req.url, timeout=5).content\n\n # load the tv show data\n self.load_tv_show_data()\n return self._instance\n\n def load_tv_show_data(self):\n \"\"\"\n load the data, create a dictionary structure with all seasons,\n episodes, magnet.\n \"\"\"\n soup = BeautifulSoup(self.content, 'html.parser')\n\n self._season_and_episode = {}\n episodes = str(soup('a', {'class': 'magnet'})).split('')\n for epi in episodes:\n for pat in self._patterns:\n data = self._match_pattern(pat, epi)\n if data is None:\n continue\n self.add_season_and_episode(data[0], data[1], data[2])\n return self._instance\n\n def add_season_and_episode(self, num_season, num_episode, magnet_link):\n \"\"\"\n insert into the dictionary the season and the episode with the\n specific magnet link \n but also consider quality preference (QUALITY_PREF)\n \"\"\"\n num_season = int(num_season)\n num_episode = int(num_episode)\n magnet_link = magnet_link.replace('&', '&')\n\n if (num_season not in self._season_and_episode):\n self._season_and_episode[num_season] = {}\n\n if (num_episode not in self._season_and_episode[num_season]):\n self._season_and_episode[num_season][num_episode] = magnet_link\n elif (QUALITY_PREF in magnet_link):\n self._season_and_episode[num_season][num_episode] = magnet_link\n\n return self._instance\n\n def episode(self, num_season=None, num_episode=None):\n \"\"\"\n specific episode\n return magnet link of episode\n might raise SeasonNotFound or EpisodeNotFound exceptions\n \"\"\"\n # specific episode\n if (num_season is not None and num_episode is not None):\n # verifiyng the season exist\n if (num_season not in self._season_and_episode):\n raise SeasonNotFound(\n 'The season %s does not exist.' % num_season, None)\n\n # verifying the episode exists\n if (num_episode not in self._season_and_episode[num_season]):\n raise EpisodeNotFound(\n 'The episode %s does not exist.' % num_episode, None)\n\n return self._season_and_episode[num_season][num_episode]\n\n def season(self, num_season=None):\n \"\"\"\n specifc season\n return data structure (dictionary)\n might raise SeasonNotFound exceptions\n \"\"\"\n # specific season, all episodes\n if (num_season is not None):\n # verifiyng the season exist\n if (num_season not in self._season_and_episode):\n raise SeasonNotFound(\n 'The season %s does not exist.' % num_season, None)\n\n return self._season_and_episode[num_season]\n\n # all seasons\n else:\n return self._season_and_episode\n\n def seasons(self):\n \"\"\"\n all seasons\n \"\"\"\n return self._season_and_episode\n\n def update(self):\n \"\"\"\n load the data, create a dictionary structure with all seasons,\n episodes, magnet.\n \"\"\"\n return self.load_tv_show_data()\n","repo_name":"PaulSec/API-EZTV.it","sub_path":"eztv_api.py","file_name":"eztv_api.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"81"} +{"seq_id":"19469947890","text":"from flask import Flask, render_template, request\nfrom werkzeug import secure_filename\nfrom flask import jsonify\nimport sys\nimport requests\nimport os\n\napp = Flask(__name__)\n\napp.config['ALLOWED_EXTENSIONS'] = set(['pdf', 'png', 'jpg', 'jpeg'])\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']\n\n@app.route('/')\ndef upload():\n return render_template('Predict.html')\n\n@app.route('/uploader', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n files = request.files.getlist(\"pic[]\")\n ##for f in files:\n ##f.replace(' ', '_')\n print(files)\n str1 = {}\n for file in files:\n tmp = file.filename\n fname = \"\"\n if tmp.find(' ') != -1:\n i = 0\n while i < len(tmp):\n if tmp[i] == ' ':\n i += 1\n else:\n fname += tmp[i]\n i += 1\n else:\n fname = tmp\n fname1 = fname.lower()\n print(fname1)\n if file and allowed_file(fname1):\n ##file.filename.replace(' ', '-')\n ##print(file.filename)\n #dir_path = os.path.dirname(os.path.realpath(file.filename))\n #print(dir_path)\n #file.save(dir_path, name='Newname')\n file.save(secure_filename(fname1))\n f = {'file': open(fname1,'rb')}\n r = requests.post(\"https://predictapp.azurewebsites.net/predict\", files=f)\n print(r.text)\n ##dic_tmp = {}\n dic_tmp = r.json()\n if dic_tmp.get('predictionHealthy') > dic_tmp.get('predictionUnhealthy'):\n str1[file.filename] = str(dic_tmp) + '\\n\\n\\n\\nFinal result: The cow is healthy'\n else:\n str1[file.filename] = str(dic_tmp) + '\\n\\n\\n\\nFinal result: The cow is unhealthy'\n else:\n str1[file.filename] = 'file not supported'\n print(str1)\n return render_template('Result.html', result = str1)\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"GengweiChen/CC_project","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27348589270","text":"from linguappt.vocab_ppt import VocabPPT\nfrom linguappt.es.vocab_meta import SpanishVocabMeta\nimport os\nimport json\n\nclass SpanishVocabPPT(VocabPPT):\n \"\"\"Create Vocabulary PPT for Spanish study\n\n Attributes:\n content (list of dict): read from csv file\n word_distrubtion (dict): key is PoS, e.g, ``noun``, ``verb``, ``daj``, value is list of vocabularies\n \"\"\"\n\n _template_dir = os.path.dirname(__file__)\n _templates = {\n \"classic\": os.path.join(_template_dir, 'templates/vocab_spanish_classic.pptx'),\n }\n lang = 'es'\n\n content_keys = ['word', 'meaning', 'dict_pos', 'from', 'extension', 'variations', 'examples']\n\n _metainfo = SpanishVocabMeta\n\n ALLOWED_POSES = ['noun', 'adj', 'verb']\n\n def __init__(self, sourcefile, title=\"\", genre=\"classic\"):\n super().__init__(sourcefile, title, genre)\n\n def _create_noun_with_extension_A(self, v):\n layout = self._prs.slide_layouts.get_by_name(\"Noun with extension A\")\n slide = self._prs.slides.add_slide(layout)\n holders = slide.shapes.placeholders\n\n pos = v[\"dict_pos\"]\n\n pos_holder = holders[10]\n pos_holder.text_frame.text = pos \n noun, meaning = holders[11], holders[12]\n noun.text_frame.text = v[\"word\"]\n ms = v[\"meaning\"].split(\",\")\n if len(ms) > 4:\n ms = ms[:4]\n meaning.text_frame.text = \"\\n\".join(ms) \n\n\n s_def = holders[13]\n s_undef = holders[15]\n pl_def = holders[17]\n pl_undef = holders[19]\n\n arts = self.__class__._metainfo.article_variation[pos]\n\n s_def.text_frame.text = arts[0] \n s_undef.text_frame.text = arts[1]\n pl_def.text_frame.text = arts[2]\n pl_undef.text_frame.text = arts[3]\n\n extension = json.loads(v[\"extension\"])\n if \"pl.\" in pos:\n s, pl = extension[arts[4]], v[\"word\"]\n else:\n s, pl = v[\"word\"], extension[arts[4]] \n\n holders[14].text_frame.text = s\n holders[16].text_frame.text = s\n holders[18].text_frame.text = pl \n holders[20].text_frame.text = pl \n\n note = slide.notes_slide\n note.notes_text_frame.text = v[\"word\"]\n\n def _create_noun(self, v):\n if v[\"extension\"] != \"\":\n self._create_noun_with_extension_A(v)\n else:\n self._create_default_word(v)\n\n def _create_adj_with_extension_A(self, v):\n layout = self._prs.slide_layouts.get_by_name(\"Adj with extension A\")\n slide = self._prs.slides.add_slide(layout)\n holders = slide.shapes.placeholders\n\n pos_holder = holders[10]\n adj, meaning = holders[11], holders[12]\n adj.text_frame.text = v[\"word\"]\n ms = v[\"meaning\"].split(\",\")\n if len(ms) > 4:\n ms = ms[:4]\n\n meaning.text_frame.text = \"\\n\".join(ms)\n\n s_m, s_f, pl_m, pl_f = holders[13], holders[14], holders[15], holders[16]\n\n extension = json.loads(v[\"extension\"])\n\n s_m.text_frame.text = extension[\"m\"]\n s_f.text_frame.text = extension[\"f\"]\n pl_m.text_frame.text = extension[\"mpl\"]\n pl_f.text_frame.text = extension[\"fpl\"]\n\n note = slide.notes_slide\n note.notes_text_frame.text = v[\"word\"]\n\n def _create_adj(self, v):\n if v[\"extension\"] != \"\":\n self._create_adj_with_extension_A(v)\n else:\n self._create_default_word(v)\n\n def _create_verb_with_extension_A(self, v):\n layout = self._prs.slide_layouts.get_by_name(\"Verb with extension A\")\n slide = self._prs.slides.add_slide(layout)\n holders = slide.shapes.placeholders\n \n variations = json.loads(v[\"variations\"])\n\n pos_holder = holders[10]\n pos_holder.text_frame.text = v[\"dict_pos\"]\n\n original, word, meaning = holders[11], holders[12], holders[13]\n original.text_frame.text = variations[\"original\"]\n word.text_frame.text = v[\"word\"]\n\n ms = v[\"meaning\"].split(\",\")\n if len(ms) > 4:\n ms = ms[:4]\n\n meaning.text_frame.text = \"\\n\".join(ms)\n\n sign = variations[\"formats\"][0]\n tense = sign[\"tense\"]\n person = sign[\"person\"]\n\n extension = json.loads(v[\"extension\"])[tense]\n\n holders[14].text_frame.text = extension[\"yo\"] if extension[\"yo\"] != \"\" else \" \"\n holders[15].text_frame.text = extension[\"tú\"]\n holders[16].text_frame.text = extension[\"él/ella/Usted\"]\n holders[17].text_frame.text = extension[\"nosotros\"]\n holders[18].text_frame.text = extension[\"vosotros\"]\n holders[19].text_frame.text = extension[\"ellos/ellas/Ustedes\"]\n\n holders[20].text_frame.text = self.__class__._metainfo.tense_info[tense]\n holders[21].text_frame.text = \" \".join([\"人称\", person, \"的变位\"])\n\n note = slide.notes_slide\n note.notes_text_frame.text = v[\"word\"]\n\n\n def _create_verb_with_extension_B(self, v):\n layout = self._prs.slide_layouts.get_by_name(\"Verb with extension B\")\n slide = self._prs.slides.add_slide(layout)\n holders = slide.shapes.placeholders\n \n variations = json.loads(v[\"variations\"])\n\n pos_holder = holders[10]\n pos_holder.text_frame.text = v[\"dict_pos\"]\n\n original, word, meaning = holders[11], holders[12], holders[13]\n original.text_frame.text = variations[\"original\"]\n word.text_frame.text = v[\"word\"]\n\n ms = v[\"meaning\"].split(\",\")\n if len(ms) > 4:\n ms = ms[:4]\n\n meaning.text_frame.text = \"\\n\".join(ms)\n \n formats = variations[\"formats\"]\n holders[14].text_frame.text = \"\\n\".join([self.__class__._metainfo.tense_info[f[\"tense\"]] if \"tense\" in f.keys() else self.__class__._metainfo.tense_info[f[\"format\"]] for f in formats])\n holders[15].text_frame.text = \"\\n\".join([\" \".join([f[\"person\"], \"的变位\"]) if \"person\" in f.keys() else \"\" for f in formats])\n\n note = slide.notes_slide\n note.notes_text_frame.text = v[\"word\"]\n\n def _create_verb_with_extension_C(self, v):\n layout = self._prs.slide_layouts.get_by_name(\"Verb with extension C\")\n slide = self._prs.slides.add_slide(layout)\n holders = slide.shapes.placeholders\n \n variations = json.loads(v[\"variations\"])\n\n pos_holder = holders[10]\n pos_holder.text_frame.text = v[\"dict_pos\"]\n original, word, meaning = holders[11], holders[12], holders[13]\n original.text_frame.text = variations[\"original\"]\n word.text_frame.text = v[\"word\"]\n\n ms = v[\"meaning\"].split(\",\")\n if len(ms) > 4:\n ms = ms[:4]\n\n meaning.text_frame.text = \"\\n\".join(ms)\n \n sign = variations[\"formats\"][0]\n\n holders[14].text_frame.text = self.__class__._metainfo.tense_info[sign[\"format\"]]\n\n note = slide.notes_slide\n note.notes_text_frame.text = v[\"word\"]\n\n def _create_verb(self, v):\n if v[\"variations\"] == \"\":\n self._create_default_word(v)\n else:\n variations = json.loads(v[\"variations\"])\n if \"formats\" in variations.keys():\n formats = variations[\"formats\"]\n if len(formats) > 1:\n self._create_verb_with_extension_B(v)\n elif \"tense\" in formats[0].keys():\n self._create_verb_with_extension_A(v)\n elif \"format\" in formats[0].keys():\n self._create_verb_with_extension_C(v)\n else:\n self._create_default_word(v)\n else:\n self._create_default_word(v)\n\n","repo_name":"qishe-nlp/linguappt","sub_path":"linguappt/es/vocab_summary.py","file_name":"vocab_summary.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30778040766","text":"exp_name = 'ailut_ppr10k'\n\ncustom_imports=dict(\n imports=['adaint'],\n allow_failed_imports=False)\n\n# model settings\nmodel = dict(\n type='AiLUT',\n n_ranks=5,\n n_vertices=33,\n en_adaint=True,\n en_adaint_share=False,\n backbone='res18', # 'tpami'\n pretrained=True,\n n_colors=3,\n sparse_factor=0.0001,\n smooth_factor=0,\n monotonicity_factor=10.0,\n recons_loss=dict(type='MSELoss', loss_weight=1.0, reduction='mean'))\n# model training and testing settings\ntrain_cfg = dict(n_fix_iters=3329*5)\ntest_cfg = dict(metrics=['PSNR'], crop_border=0)\n\n# dataset settings\ntrain_dataset_type = 'PPR10K'\nval_dataset_type = 'PPR10K'\n\ntrain_pipeline = [\n dict(\n type='LoadImageFromFile',\n io_backend='disk',\n key='lq',\n backend='cv2',\n flag='unchanged'),\n dict(type='FlipChannels', keys=['lq']), # BGR->RGB\n dict(\n type='LoadImageFromFile',\n io_backend='disk',\n key='gt',\n backend='pillow',\n channel_order='rgb'),\n dict(type='RandomRatioCrop', keys=['lq', 'gt'], crop_ratio=(0.6, 1.0)),\n dict(type='Resize', keys=['lq', 'gt'], scale=(448, 448), backend='cv2', interpolation='bilinear'),\n dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='horizontal'),\n dict(type='FlexibleRescaleToZeroOne', keys=['lq', 'gt'], precision=32),\n dict(type='ImageToTensor', keys=['lq', 'gt']),\n dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])\n]\n\ntest_pipeline = [\n dict(\n type='LoadImageFromFile',\n io_backend='disk',\n key='lq',\n backend='cv2',\n flag='unchanged'),\n dict(type='FlipChannels', keys=['lq']), # BGR->RGB\n dict(\n type='LoadImageFromFile',\n io_backend='disk',\n key='gt',\n backend='pillow',\n channel_order='rgb'),\n dict(type='FlexibleRescaleToZeroOne', keys=['lq', 'gt'], precision=32),\n dict(type='ImageToTensor', keys=['lq', 'gt']),\n dict(\n type='Collect',\n keys=['lq', 'gt'],\n meta_keys=['lq_path', 'gt_path'])\n]\n\ntarget = 'a' # change this line (a/b/c) to use other groundtruths\n\ndata = dict(\n workers_per_gpu=8,\n train_dataloader=dict(samples_per_gpu=16),\n val_dataloader=dict(samples_per_gpu=1),\n test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),\n\n # train\n train=dict(\n type=train_dataset_type,\n dir_lq='data/PPR10K/source_aug_6',\n dir_gt=f'data/PPR10K/target_{target}',\n ann_file='data/PPR10K/train_aug.txt',\n pipeline=train_pipeline,\n test_mode=False,\n filetmpl_lq='{}.tif',\n filetmpl_gt='{}.tif'),\n # val\n val=dict(\n type=val_dataset_type,\n dir_lq='data/PPR10K/source',\n dir_gt=f'data/PPR10K/target_{target}',\n ann_file='data/PPR10K/test.txt',\n pipeline=test_pipeline,\n test_mode=True,\n filetmpl_lq='{}.tif',\n filetmpl_gt='{}.tif'),\n # test\n test=dict(\n type=val_dataset_type,\n dir_lq='data/PPR10K/source',\n dir_gt=f'data/PPR10K/target_{target}',\n ann_file='data/PPR10K/test.txt',\n pipeline=test_pipeline,\n test_mode=True,\n filetmpl_lq='{}.tif',\n filetmpl_gt='{}.tif'),\n)\n\n# optimizer\noptimizers = dict(\n type='Adam',\n lr=1e-4,\n weight_decay=0,\n betas=(0.9, 0.999),\n eps=1e-8,\n paramwise_cfg=dict(custom_keys={'adaint': dict(lr_mult=0.1)}))\nlr_config = None\n\n# learning policy\ntotal_iters = 3329*200\n\ncheckpoint_config = dict(interval=3329, save_optimizer=True, by_epoch=False)\nevaluation = dict(interval=3329, save_image=False)\nlog_config = dict(\n interval=100,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n ])\nvisual_config = None\n\n# runtime settings\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = f'./work_dirs/{exp_name}'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\nfind_unused_parameters = True\n","repo_name":"ImCharlesY/AdaInt","sub_path":"adaint/configs/ppr10k.py","file_name":"ppr10k.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"81"} +{"seq_id":"16122667816","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nimport re\nfrom urllib.parse import urljoin, urldefrag, urlsplit, urlparse\n\nstop_words = ['the', 'that', 'to', 'as', 'there', 'has', 'and', 'or', 'is', 'not', 'a', 'of', 'but', 'in', 'by', 'on', 'are', 'it', 'if','an']\nelements = ['var','ul','li','px','div','script','inline','tr']\n\nclass Density():\n\n def __init__(self,keyword,body):\n body = body.decode('utf-8')\n body = self.clean_page(body)\n result = self.get_density(keyword[0],body)\n self.content_length = result['content_length']\n self.density = result['density']\n self.keyword_occurence = result['keyword_mentions']\n\n\n @classmethod\n def clean_page(self,body):\n master = []\n final = []\n FINAL =[]\n # Strip out the sequency of script tags\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', body)\n # Remove blank spaces and split by percieved word\n new = [line for line in cleantext.split('\\n') if line.strip() != '']\n new = list(filter(None, new))\n for i in new:\n word = i.split(' ')\n master.extend(word)\n # Cleans empty values to return a list of words\n for i in master:\n if i != '':\n final.append(i)\n for a in final:\n # Matches words to regex and if it still contains script parameters get's rid of them\n extract = re.compile('[a-zA-Z\\'-]+')\n clean = re.sub(extract,'',a)\n if len(clean) == 0 and a not in elements:\n FINAL.append(a)\n return FINAL\n\n @classmethod\n def get_density(self,keyword,body):\n data = {}\n print(keyword)\n ngram = []\n clean = keyword.split(' ')\n lengthKeyword = len(clean)\n data['content_length'] = len(body)\n counter = 0\n for i in range(len(body)):\n ngram.append(body[counter:counter + lengthKeyword])\n counter = counter + 1\n wordList = []\n for i in ngram:\n new = ' '.join(i)\n wordList.append(new)\n count = 0\n for a in wordList:\n if a.lower() == keyword:\n count += 1\n else:\n continue\n try:\n Density = count / len(body)\n data['density'] = Density\n except:\n Density = None\n data['density'] = Density\n data['keyword_mentions'] = count\n return data\n\n\nclass META():\n data = {}\n\n def __init__(self, url, body):\n print(url)\n soup = BeautifulSoup(body)\n self.url = url\n self.now = time.strftime('%Y-%m-%d %H:%M')\n try:\n gen = soup.find('meta', attrs={'name': 'generator'})\n self.type1 = 'none'\n if gen['content'] is not None:\n self.type1 = gen['content']\n self.now = time.strftime('%Y-%m-%d %H:%M')\n except:\n self.type1 = 'None'\n try:\n self.meta_title = soup.title.text\n self.title_length = len(self.meta_title)\n except:\n self.meta_title = 'N/A'\n self.title_length = 'N/A'\n try:\n meta_description = soup.find('meta', attrs={'name': 'description'})\n self.meta_description = meta_description['content']\n self.meta_description_length = len(meta_description)\n except:\n self.meta_description = 'N/A'\n self.meta_description_length = 'N/A'\n try:\n canonical = soup.find('link', attrs={'rel': 'canonical'})\n self.canonical_count = len(soup.findAll('link', attrs={'rel': 'canonical'}))\n self.canonical = canonical['href']\n except:\n self.canonical = 'N/A'\n self.canonical_count = 0\n try:\n robots = soup.find('meta', attrs={'name': 'robots'})\n self.robots = robots['content']\n except:\n self.robots = 'N/A'\n try:\n self.H1 = soup.find('h1').text\n except:\n self.H1 = 'None'\n","repo_name":"saiyancode/Local-monster","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40215343464","text":"if __name__ == '__main__':\n\n # i don't need this line\n input()\n M = set( input().split() )\n # i don't need this line too\n input()\n N = set( input().split() )\n\n # difference between the sets, this function make\n # a.difference(b) and b.difference(a).\n # and M^N also works\n difference = set( M.symmetric_difference(N) )\n\n # transform the set to a list of integers\n difference = list( map(int, difference) )\n\n # print the list sorted\n print( *sorted(difference) , sep = '\\n' )\n","repo_name":"AlexandreVelloso/Hackerrank","sub_path":"Python/Sets/#01 Symmetric Difference.py","file_name":"#01 Symmetric Difference.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14099341283","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom .views import *\n\n# --- api routes ---\nrouter = routers.DefaultRouter()\nrouter.register(r'students', StudentViewSet, basename='students')\nrouter.register(r'enrolled', EnrolledViewSet, basename='enrolled')\nrouter.register(r'majors', MajorViewSet, basename='majors')\nrouter.register(r'categories', CategoryViewSet, basename='categories')\nrouter.register(r'subcategories', SubCategoryViewSet, basename='subcategories')\nrouter.register(r'requirements', RequirementViewSet, basename='requirements')\nrouter.register(r'courses', CourseViewSet, basename='courses')\nrouter.register(r'prereqs', PrereqViewSet, basename='prereqs')\nrouter.register(r'apcredits', ApCreditViewSet, basename='apcredits')\n\n\nurlpatterns = [\n # automatic API URL routing.\n path('', include(router.urls)),\n # login URLs for the browsable API.\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n # get json list data\n path(\"majorlist//\", requestMajorList),\n path(\"categorylist///\", requestCategoriesList),\n path(\"subcategorylist////\", requestSubcategoriesList),\n path(\"requirementlist/////\", requestRequirementsList),\n path(\"courselist//////\", requestCoursesList),\n path(\"prereqlist///////\", requestPrereqList),\n path(\"aplist///////\", requestApList),\n path(\"specificsubcategory/////\", requestSubcategoryNote),\n path(\"specificrequirement//////\", requestRequirementCredit),\n path(\"specificcourse///////\", requestCoursesCredit),\n # form operation create/delete\n path(\"createstudent\", addStudent, name='addStudent'),\n path(\"deletestudent\", deleteStudent, name='deleteStudent'),\n path(\"createenrollkey\", addEnroll, name='addEnrollment'),\n path(\"deleteenrollkey\", deleteEnroll, name='deleteEnrollment'),\n path(\"createmajor\", addMajor, name='addMajor'),\n path(\"deletemajor\", deleteMajor, name='deleteMajor'),\n\n path(\"createcategory\", createCategory, name='createCategory'),\n path(\"addcategory\", addCategory, name='addCategory'),\n path(\"deletecategory\", deleteCategory, name='deleteCategory'),\n\n path(\"createsubcategory\", createSubCategory, name='createSubcategory'),\n path(\"addsubcategory\", addSubCategory, name='addSubcategory'),\n path(\"deletesubcategory\", deleteSubCategory, name='deleteSubcategory'),\n \n path(\"createrequirement\", createRequirement, name='createRequirement'),\n path(\"addrequirement\", addRequirement, name='addRequirement'),\n path(\"deleterequirement\", deleteRequirement, name='deleteRequirement'),\n\n path(\"createcourse\", createCourse, name='createCourse'),\n path(\"addcourse\", addCourse, name='addCourse'),\n path(\"deletecourse\", deleteCourse, name='deleteCourse'),\n\n path(\"addprereq\", addPrereq, name='addPrereq'),\n path(\"deleteprereq\", deletePrereq, name='deletePrereq'),\n\n path(\"createap\", createAp, name='createAp'),\n path(\"addap\", addAp, name='addAp'),\n path(\"deleteap\", deleteAp, name='deleteAp'),\n]\n","repo_name":"chrisgliu/CourseHelper","sub_path":"mysite/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12303556379","text":"from .. import graph, using, style\nfrom flask_babel import gettext as _l\nimport pygal\n\n\n@graph(_l('Operating system'))\n@using('account_activity')\ndef device_usage(data):\n acc_act = data['account_activity']\n os = acc_act.os.value_counts()\n os = os / os.sum() * 100\n os = os.round(2)\n\n pie_chart = pygal.Pie(style=style, inner_radius=.4)\n for os, count in zip(os.index, os):\n pie_chart.add(os, count)\n\n return pie_chart\n","repo_name":"klima7/Social-Insight","sub_path":"analytics/administration/os.py","file_name":"os.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34090853780","text":"# acronym.py\n\n\ndef main():\n # get user input of a phrase\n phrase = input(\"Enter a phrase: \")\n ph = phrase.upper()\n phSplit = ph.split()\n print(phSplit)\n\n acronym = []\n # get the first letter of each word\n for p in phSplit:\n letter = p[0]\n acronym.append(letter)\n\n # print new acronym\n fa = \"\".join(acronym)\n print(\"This is the new acronym: {0}\".format(fa))\n\n\nmain()","repo_name":"justinta89/Work","sub_path":"PythonProgramming/Chapter 5/Exercises/acronym.py","file_name":"acronym.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21761553416","text":"class Solution:\n def edgesToParentsLeaves(n,edges):\n parents = [None]*n\n hasParent = set()\n hasParent.add(0)\n i = 0\n leaves = set(range(0,n))\n edgeIndices = set(range(0,n))\n while len(hasParent) < n:\n if edges[i][0] in hasParent:\n leaves.discard(edges[i][0])\n parents[edges[i][1]] = edges[i][0]\n hasParent.add(edges[i][1])\n edgeIndices.remove(i)\n elif edges[i][1] in hasParent:\n leaves.discard(edges[i][1])\n parents[edges[i][0]] = edges[i][1]\n hasParent.add(edges[i][0])\n edgeIndices.remove(i)\n i = list(edgeIndices)[1]\n return parents, leaves\n def edgesToParentsLeavesAdj(n,edges):\n adj = {}\n for i in range(n):\n adj[i] = set()\n for edge in edges:\n adj[edge[0]].add(edge[1])\n adj[edge[1]].add(edge[0])\n q = [(0,None)]\n visited = set()\n parents = [None] * n\n while len(q) > 0:\n num, parent = q.pop()\n parents[num] = parent\n if num not in visited:\n visited.add(num)\n q.extend([(child,num) for child in adj[num] if child not in visited])\n return parents, set([ key for key in adj if len(adj[key]) == 1])\n\n def countSubTrees(n, edges, labels):\n from datetime import datetime, timedelta\n start = datetime.now()\n # find parents of all nodes (?)\n # iterate through unvisited, going up to root\n # keep track of label counts until you hit a visited,\n # then add counts to already visited\n parents, leaves = Solution.edgesToParentsLeavesAdj(n, edges)\n afterParents = datetime.now()\n print('parents time:', afterParents - start)\n ans = [1]*n\n\n\n unvisited = set(range(1,n))\n while len(unvisited) > 0:\n counts = {}\n current = leaves.pop()\n\n while current in unvisited:\n counts[labels[current]] = counts[labels[current]] + 1 if labels[current] in counts else 1\n ans[current] = counts[labels[current]]\n unvisited.remove(current)\n current = parents[current]\n while current is not None:\n if labels[current] in counts:\n ans[current] += counts[labels[current]]\n current = parents[current]\n print('end:', datetime.now() - afterParents)\n return ans\n\nprint(Solution.countSubTrees(n=7, edges=[[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]], labels=\"abaedcd\"))\nprint(Solution.countSubTrees(n = 4, edges = [[0,1],[1,2],[0,3]], labels = \"bbbb\"))\nprint(Solution.countSubTrees(n=5, edges=[[0,1],[0,2],[1,3],[0,4]], labels=\"aabab\"))\n","repo_name":"jrchew15/leetcode","sub_path":"countSubTreeLabels.py","file_name":"countSubTreeLabels.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5048930344","text":"import json\nimport shutil\nimport tarfile\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport seisbench\nimport seisbench.util\n\nfrom .base import BenchmarkDataset\n\n# Conversion from earth radius\nDEG2KM = 2 * np.pi * 6371 / 360\n\n\nclass NEIC(BenchmarkDataset):\n \"\"\"\n NEIC dataset from Yeck and Patton\n \"\"\"\n\n def __init__(self, **kwargs):\n citation = (\n \"Yeck, W.L., and Patton, J., 2020, Waveform Data and Metadata used to \"\n \"National Earthquake Information Center Deep-Learning Models: \"\n \"U.S. Geological Survey data release, https://doi.org/10.5066/P9OHF4WL.\"\n )\n super().__init__(citation=citation, repository_lookup=True, **kwargs)\n\n def _download_dataset(self, writer, cleanup=True, blocksize=2**14):\n \"\"\"\n Downloads and converts the dataset from the original publication\n\n :param writer: WaveformWriter\n :param cleanup: If true, delete the original and temporary data files after conversion. Defaults to true.\n :param blocksize: Number of waveform samples to read from disk at once\n :return:\n \"\"\"\n seisbench.logger.warning(\n \"Converting this catalog from source will require ~250 GB disk storage. \"\n \"The resulting catalog has ~75GB. \"\n \"Please ensure that the storage is available on your disk.\"\n )\n\n path = self.path\n path_original = path / \"original\"\n path_original.mkdir(parents=True, exist_ok=True)\n path_meta = path_original / \"meta.json\"\n\n # Download metadata in json format to extract download links for the data\n seisbench.util.download_http(\n \"https://www.sciencebase.gov/catalog/item/5ed528ff82ce2832f047eee6?format=json\",\n path_meta,\n progress_bar=False,\n )\n\n # Load metadata\n with open(path_meta, \"r\") as fmeta:\n meta = json.load(fmeta)\n\n # Download data files\n for f in meta[\"files\"]:\n # Uses callback_if_uncached only to be able to utilize the cache mechanism.\n # Concurrent accesses are anyhow already controlled by the callback_if_uncached\n # call wrapping _download_dataset.\n # It's therefore considered save to set force=True.\n def callback_download_original(path):\n seisbench.util.download_http(\n f[\"url\"],\n path,\n desc=f\"Downloading file {f['name']}\",\n )\n\n seisbench.util.callback_if_uncached(\n path_original / f[\"name\"], callback_download_original, force=True\n )\n\n # Note: The following lines could also each be wrapped into a callback_if_uncached\n # However, concatenating and unpacking do not take too long and this way the code is easier.\n\n # Concatenate partitioned files\n seisbench.logger.warning(\n \"Concatenating partitioned tar.gz archives. This might take a few minutes.\"\n )\n partitioned_files = [\"PWF_Test.tar.gz\", \"PWF_Train.tar.gz\", \"SWF_Train.tar.gz\"]\n for partitioned_file in partitioned_files:\n members = sorted(\n [\n x\n for x in path_original.iterdir()\n if x.name.startswith(partitioned_file)\n and not x.name == partitioned_file\n ]\n )\n with open(path_original / partitioned_file, \"wb\") as fout:\n for file in members:\n with open(file, \"rb\") as fin:\n data = fin.read(1000000) # Read 1MB parts\n while len(data) > 0:\n fout.write(data)\n data = fin.read(1000000) # Read 1MB parts\n\n # Unpack files\n seisbench.logger.warning(\n \"Unpacking tar.gz archives. This might take a few minutes.\"\n )\n path_unpacked = path / \"unpacked\"\n path_unpacked.mkdir(parents=True, exist_ok=True)\n\n tar_files = [x for x in path_original.iterdir() if x.name.endswith(\".tar.gz\")]\n for tar_path in tar_files:\n with tarfile.open(tar_path, \"r:gz\") as file:\n seisbench.util.safe_extract_tar(file, path_unpacked)\n\n groups = [(\"P\", \"Train\"), (\"S\", \"Train\"), (\"P\", \"Test\"), (\"S\", \"Test\")]\n\n total_samples = (\n np.load(path_unpacked / f\"PAzi_Train.npy\").shape[0]\n + np.load(path_unpacked / f\"SAzi_Train.npy\").shape[0]\n + np.load(path_unpacked / f\"PAzi_Test.npy\").shape[0]\n + np.load(path_unpacked / f\"SAzi_Test.npy\").shape[0]\n )\n\n # Select 10 percent of the training events for development\n # As the train test split is random, but event wise, a similar strategy is employed here\n train_ids = np.concatenate(\n [\n np.load(path_unpacked / f\"PEID_Train.npy\"),\n np.load(path_unpacked / f\"SEID_Train.npy\"),\n ]\n )\n train_ids = np.unique(train_ids)\n dev_ids = set(train_ids[::10])\n\n writer.set_total(total_samples)\n\n # TODO: Verify that these are unrestituted counts\n writer.data_format = {\n \"dimension_order\": \"CW\",\n \"component_order\": \"ZNE\",\n \"measurement\": \"velocity\",\n \"sampling_rate\": 40,\n \"unit\": \"counts\",\n \"instrument_response\": \"not restituted\",\n }\n\n eq_counts = defaultdict(lambda: 0)\n\n for wavetype, split in groups:\n # Flush cache after train/dev is complete\n if wavetype == \"P\" and split == \"Test\":\n writer.flush_hdf5()\n\n azimuth = np.load(path_unpacked / f\"{wavetype}Azi_{split}.npy\")\n distance = np.load(path_unpacked / f\"{wavetype}Dist_{split}.npy\")\n event_id = np.load(path_unpacked / f\"{wavetype}EID_{split}.npy\")\n magnitude = np.load(path_unpacked / f\"{wavetype}Mag_{split}.npy\")\n\n p = 0\n while p < azimuth.shape[0]:\n # Recreate memmap each epoch to avoid memory \"leak\"\n # For details see\n # https://stackoverflow.com/questions/45132940/numpy-memmap-memory-usage-want-to-iterate-once\n waveforms = np.load(\n path_unpacked / f\"{wavetype}WF_{split}.npy\", mmap_mode=\"r\"\n )\n\n block_azimuth = azimuth[p : p + blocksize]\n block_distance = distance[p : p + blocksize]\n block_event_id = event_id[p : p + blocksize]\n block_magnitude = magnitude[p : p + blocksize]\n block_waveforms = waveforms[\n p : p + blocksize\n ].copy() # Copy causes the load into memory\n\n for azi, dist, eid, mag, wf in zip(\n block_azimuth,\n block_distance,\n block_event_id,\n block_magnitude,\n block_waveforms,\n ):\n trace_station_id = eq_counts[eid]\n eq_counts[eid] += 1\n trace_name = f\"{eid}_st{trace_station_id}\"\n\n trace_split = split.lower()\n if eid in dev_ids:\n trace_split = \"dev\"\n\n metadata = {\n \"trace_name\": trace_name,\n \"trace_category\": \"earthquake\",\n f\"trace_{wavetype.lower()}_arrival_sample\": 1200,\n f\"trace_{wavetype.lower()}_status\": \"manual\",\n \"source_magnitude\": mag,\n \"source_id\": eid,\n \"path_ep_distance_km\": dist * DEG2KM,\n \"path_back_azimuth_deg\": azi,\n \"split\": trace_split,\n }\n\n writer.add_trace(metadata, wf)\n\n p += blocksize\n\n if cleanup:\n seisbench.logger.warning(\n \"Cleaning up source and temporary files. This might take a few minutes.\"\n )\n shutil.rmtree(path_unpacked)\n shutil.rmtree(path_original)\n\n\nclass MLAAPDE(BenchmarkDataset):\n \"\"\"\n MLAAPDE dataset from Cole et al. (2023)\n\n Note that the SeisBench version is not identical to the precompiled version\n distributed directly through USGS but uses a different data selection.\n In addition, custom versions of MLAAPDE can be compiled with the software\n provided by the original authors. These datasets can be exported in\n SeisBench format.\n \"\"\"\n\n def __init__(self, **kwargs):\n citation = (\n \"Cole, H. M., Yeck, W. L., & Benz, H. M. (2023). \"\n \"MLAAPDE: A Machine Learning Dataset for Determining \"\n \"Global Earthquake Source Parameters. \"\n \"Seismological Research Letters, 94(5), 2489-2499. \"\n \"https://doi.org/10.1785/0220230021\"\n \"\\n\\n\"\n \"Cole H. M. and W. L. Yeck, 2022, \"\n \"Global Earthquake Machine Learning Dataset: \"\n \"Machine Learning Asset Aggregation of the PDE (MLAAPDE): \"\n \"U.S. Geological Survey data release, doi:10.5066/P96FABIB\"\n )\n license = \"MLAAPDE code under CC0 1.0 Universal, data licenses dependent on the underlying networks\"\n\n super().__init__(\n citation=citation, license=license, repository_lookup=True, **kwargs\n )\n\n def _download_dataset(self, writer, **kwargs):\n pass\n\n @staticmethod\n def available_chunks(*args, **kwargs):\n return [\n \"_201307\",\n \"_201308\",\n \"_201309\",\n \"_201310\",\n \"_201311\",\n \"_201312\",\n \"_201401\",\n \"_201402\",\n \"_201403\",\n \"_201404\",\n \"_201405\",\n \"_201406\",\n \"_201407\",\n \"_201408\",\n \"_201409\",\n \"_201410\",\n \"_201411\",\n \"_201412\",\n \"_201501\",\n \"_201502\",\n \"_201503\",\n \"_201504\",\n \"_201505\",\n \"_201506\",\n \"_201507\",\n \"_201508\",\n \"_201509\",\n \"_201510\",\n \"_201511\",\n \"_201512\",\n \"_201601\",\n \"_201602\",\n \"_201603\",\n \"_201604\",\n \"_201605\",\n \"_201606\",\n \"_201607\",\n \"_201608\",\n \"_201609\",\n \"_201610\",\n \"_201611\",\n \"_201612\",\n \"_201701\",\n \"_201702\",\n \"_201703\",\n \"_201704\",\n \"_201705\",\n \"_201706\",\n \"_201707\",\n \"_201708\",\n \"_201709\",\n \"_201710\",\n \"_201711\",\n \"_201712\",\n \"_201901\",\n \"_201902\",\n \"_201903\",\n \"_201904\",\n \"_201905\",\n \"_201906\",\n \"_201907\",\n \"_201908\",\n \"_201909\",\n \"_201910\",\n \"_201911\",\n \"_201912\",\n \"_202001\",\n \"_202002\",\n \"_202003\",\n \"_202004\",\n \"_202005\",\n \"_202006\",\n \"_202007\",\n \"_202008\",\n \"_202009\",\n \"_202010\",\n \"_202011\",\n \"_202012\",\n \"_202101\",\n \"_202102\",\n \"_202103\",\n \"_202104\",\n \"_202105\",\n \"_202106\",\n \"_202107\",\n \"_202108\",\n \"_202109\",\n \"_202110\",\n \"_202111\",\n \"_202112\",\n \"_201801\",\n \"_201802\",\n \"_201803\",\n \"_201804\",\n \"_201805\",\n \"_201806\",\n \"_201807\",\n \"_201808\",\n \"_201809\",\n \"_201811\",\n \"_201810\",\n \"_201812\",\n \"_202201\",\n \"_202202\",\n \"_202203\",\n \"_202204\",\n ]\n\n def _write_chunk_file(self):\n \"\"\"\n Write out the chunk file\n\n :return: None\n \"\"\"\n chunks_path = self.path / \"chunks\"\n\n if chunks_path.is_file():\n return\n\n chunks = self.available_chunks()\n chunks_str = \"\\n\".join(chunks) + \"\\n\"\n\n self.path.mkdir(exist_ok=True, parents=True)\n with open(chunks_path, \"w\") as f:\n f.write(chunks_str)\n","repo_name":"seisbench/seisbench","sub_path":"seisbench/data/neic.py","file_name":"neic.py","file_ext":"py","file_size_in_byte":12568,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"81"} +{"seq_id":"8628295454","text":"import math\n\ndef solve(x):\n y = math.floor((2 * x + 0.25) ** 0.5 - 0.5)\n return y\n\nif __name__ == \"__main__\":\n n = int(input())\n for _ in range(n):\n print(solve(int(input())))\n","repo_name":"Ochirgarid/uhunt","sub_path":"solutions/1-introduction/starred/11614.py","file_name":"11614.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2013316538","text":"import time, sys, os\n#from PIL import Image\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\n\nURL = 'https://www.journohq.com/journo/europe-in-6-19909'\ndriver = webdriver.Firefox()\ndriver.get(URL)\n\nentries = driver.find_elements(By.CLASS_NAME, 'entry-box')\ntotal_entries = len(entries)\nfor i in range(total_entries):\n driver.execute_script(\"var c = document.getElementsByClassName('entry-box'); for (var i=c.length;i--;i>=0) { if (i!==parseInt(arguments[0])) c[i].remove(); }\", i)\n\n # click all comment links to expand them\n more_links = driver.find_elements(By.CLASS_NAME, 'more-less-btn')\n for ml in more_links:\n ml.click()\n\n driver.execute_script(\"var el = document.getElementsByClassName('lazy'); for(var i=0; i 0:', str(\"{:,}\".format(math.ceil(y_values[-1])))+'m/s -', str(cPerc)+'% speed of light') #%s kinda funky\r\n if y_values[-1] >= 299792458: \r\n cPerc = math.ceil(((y_values[-1]-299792458)/299792458)*100)\r\n plt.suptitle('VALUE_ERROR: Final Velcity Exceeds Speed of Light by: '+str(\"{:,}\".format(y_values[-1]-299792458))+'m/s - '+str(cPerc)+'%', fontsize=7, color='r')\r\n plt.subplots_adjust(bottom=0.15)\r\n plt.plot(x_values, y_values, marker=',', mfc='r', color=lineColor)\r\n return\r\n\r\n # Calling Functions\r\n plot(x_values, y_values, pltType)\r\n return x, y\r\n\r\n# Force-Distance Build\r\ndef forceBuild(d): # Returns Force Array, provided Distance(d) \r\n def forceEqn(r, d): #Equation for Integration\r\n # function f(d)\r\n x = 2*((d/r)*(k*((abs(q1*(q2/h)))/r**2)))\r\n computationsNum[0] = computationsNum[0]+1\r\n return x\r\n\r\n def forceIntgrl(d): #Itegration of Force Equation\r\n limUpper = (math.sqrt(((d)**2)+((h/2)**2)))\r\n computationsNum[0] = computationsNum[0]+1\r\n # integration over r\r\n i, err = quad(forceEqn, d, limUpper, args=(d))\r\n absErr.append(err)\r\n computationsNum[1] = computationsNum[1]+1\r\n return(i)\r\n\r\n # Calling Functions\r\n forceExperienced = forceIntgrl(d)\r\n return forceExperienced\r\n\r\n# # Acceleration-Distance Build\r\ndef accelerationBuild(forceArray): # Returns acceleration value array, provided force array\r\n def acceleration(forceArray): \r\n # f(d) -> a(d)\r\n accelerationArray = []\r\n print('Acceleration Build Time:')\r\n with alive_bar(len(forceArray)) as bar:\r\n for x in forceArray:\r\n accelerationArray.append(x/m)\r\n computationsNum[0] = computationsNum[0]+1\r\n bar()\r\n return accelerationArray\r\n \r\n # Calling Functions\r\n accelerationExperienced = acceleration(forceArray)\r\n return accelerationExperienced\r\n# Velocity Distance Build\r\ndef velocityBuild(x_values, accelerationArray): # Returns velocity experienced, provided distance\r\n def velocity(x_values, accelerationArray):\r\n velocityArray = []\r\n print('Velocity Build Time:')\r\n with alive_bar(len(x_values)) as bar:\r\n for x in x_values:\r\n # Distance must be divisible by n\r\n limUpper = ((s-x)/n)\r\n computationsNum[0] = computationsNum[0]+1\r\n sum = summation(limUpper, 0, accelerationArray)\r\n sum = math.sqrt(sum)\r\n computationsNum[0] = computationsNum[0]+1\r\n velocityArray.append(sum) \r\n bar()\r\n return velocityArray\r\n # Calling Functions\r\n velocityArray = velocity(x_values, accelerationArray)\r\n return velocityArray\r\n\r\n# Summation Func\r\ndef summation(limUpper, limLower, accelerationArray): # Returns sum total, provided limits\r\n def summation(limUpper, limLower, acclerationArray): #summation in loop form\r\n sum = 0\r\n # PROBLEM: Ceil func\r\n itterations = math.ceil(limUpper-limLower)\r\n computationsNum[0] = computationsNum[0]+1\r\n for x in range(0, itterations):\r\n sum = sum+sumFunc(x, acclerationArray)\r\n computationsNum[0] = computationsNum[0]+1\r\n # incorrect order - low dist = high itterate, not inverse\r\n return sum\r\n\r\n def sumFunc(x, accelerationArray): #function for summation\r\n index = round(s-x*n, len(str(step).split(\".\")[1]))\r\n index = int(index*(10**(len(str(step).split(\".\")[1]))))\r\n a = accelerationArray[-index]\r\n # acceleration index was hitting inverse indicies, negitive is a band-aid fix\r\n x = (2*a*n)\r\n computationsNum[0] = computationsNum[0]+3\r\n return x\r\n \r\n # Calling Functions\r\n sumTotal = summation(limUpper, limLower, accelerationArray)\r\n return sumTotal\r\n\r\ndef processingInfo(computationsNum):\r\n computationsNum[3] = time.time()\r\n timeElapsed = round(computationsNum[3]-computationsNum[2], 2)\r\n print(f'{Fore.GREEN}Processing Info:{Style.RESET_ALL}')\r\n print(' Time Elapsed:', str(timeElapsed)+' seconds')\r\n print(' Average Absolute Error:', '('+str(((mean(absErr))*100))+')%')\r\n print(' Total Integral Computations:', (\"{:,}\".format(computationsNum[1])))\r\n print(' Total Linear Computations:', (\"{:,}\".format(computationsNum[0])))\r\n\r\nx, y = distanceItteration(forceBuild)\r\ngraphingFunc(x, y, 'f(d)')\r\ny = accelerationBuild(y)\r\ngraphingFunc(x, y, 'a(d)')\r\ny = velocityBuild(x, y)\r\ngraphingFunc(x, y, 'v(d)')\r\nprocessingInfo(computationsNum)\r\nplt.show()\r\n\r\n","repo_name":"JustinD-T/Charged-Particle-Velocity-Simulator","sub_path":"Version 2.py","file_name":"Version 2.py","file_ext":"py","file_size_in_byte":9709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3223730462","text":"'''Game guess the number by computer !!!'''\n\n\nimport numpy as np\n\n\ndef random_predict(number: int = 1) -> int:\n \"\"\"Randomly crack the number\n\n Args:\n number (int): Given number\n \n Returns:\n int: Number of attemps\n \"\"\"\n count = 0\n min_num = 1\n max_num = 100 # Setting the limits of the selection of the number\n \n while True:\n count += 1\n predict_number = np.random.randint(min_num, max_num + 1)\n \n if predict_number > number: # Search condition for a new number\n max_num = predict_number\n elif predict_number < number:\n min_num = predict_number\n else:\n break # Exit the loop if guessed right\n \n return count\n\n\ndef score_games(random_predict) -> int:\n \"\"\"Average number of 1000 attemps\n\n Args:\n random_predict ([type]): Guessing function\n \n Returns:\n int: Average number of attemps\n \"\"\"\n count_lst = []\n np.random.seed(1) # Fix it for reproducibility\n random_array = np.random.randint(1, 101, size=1000) # Made a list of numbers\n \n for number in random_array:\n count_lst.append(random_predict(number))\n \n score = int(np.mean(count_lst))\n print(f'On average we guess the number in {score} attemps')\n return score\n\n \nif __name__ == '__main__':\n # Run \n score_games(random_predict)\n","repo_name":"StartrexII/DataScienceProjects","sub_path":"project_0/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15679084218","text":"from ray import TempRec, Ray\nfrom material import *\nimport Utilities\n\n\n# Light Objects\nclass Light:\n def __init__(self, pos, color, intensity):\n self.pos = pos # Light Position\n self.color = color # Light Color\n self.intensity = intensity # Light Intensity\n\n def scatter(self, rec, cam):\n return Color(0, 0, 0) # Radiance by Light\n\n\nclass DirectionLight(Light):\n def __init__(self, pos, direction, color, intensity):\n super().__init__(pos, color, intensity)\n self.direction = direction\n\n def scatter(self, rec, cam):\n return Blinn(self, rec, cam, self.direction)\n\n\nclass PointLight(Light):\n def __init__(self, pos, color, intensity):\n super().__init__(pos, color, intensity)\n\n def scatter(self, rec, cam):\n return Blinn(self, rec, cam, rec.p - self.pos) / (Vector.dist(self.pos, rec.p) ** 2)\n\n\nclass SpotLight(Light):\n def __init__(self, pos, direction, totalwidth, falloffwidth, color, intensity):\n super().__init__(pos, color, intensity)\n self.direction = direction\n self.totalwidth = totalwidth\n self.falloffwidth = falloffwidth\n\n def scatter(self, rec, cam):\n point_dir = rec.p - self.pos\n cos_theta = Vector.dot(point_dir, self.direction) / (point_dir.mag() * self.direction.mag())\n theta = 2 * math.acos(cos_theta)\n falloff = 0\n if theta > self.totalwidth:\n falloff = 0\n elif theta < self.falloffwidth:\n falloff = 1\n else:\n falloff = (self.totalwidth - theta) / (self.totalwidth - self.falloffwidth)\n return (Blinn(self, rec, cam, self.direction) * falloff) / (Vector.dist(self.pos, rec.p) ** 2)\n\n\nclass LightObjects:\n def __init__(self, soft_shadows):\n self.lights = []\n self.soft_shadows = soft_shadows # Soft Shadows\n\n def add(self, light):\n self.lights.append(light) # Add Lights to the list\n\n def lightColor(self, rec, objects, cam):\n lightColor = Color(0, 0, 0)\n for light in self.lights:\n rec2 = TempRec()\n for obj in objects:\n rec2 = obj.intersect(Ray(rec.p, light.pos - rec.p + Utilities.RandomInSphere() * self.soft_shadows), rec2)\n s = 1 # Shadow Term\n if rec2.hashit:\n s = 0\n lightColor += s * (light.scatter(rec, cam))\n return lightColor\n","repo_name":"shreyaggarwal219/Ray-Tracer-in-Python","sub_path":"light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14824629924","text":"import random\nfrom common.socket_id import *\nfrom common.common import *\nfrom common.constants import *\nfrom common.server_process import server\n\n\ndef get_npc_in_scene(pid, map_id=0):\n if not map_id:\n map_id = rget(pid, CHAR, '地图')\n npcs = []\n\n # 脚本方式\n # for npc in NPCS:\n # if npc.map_id == map_id:\n # npc_data = {\n # 'npc_id': npc.npc_id,\n # '名称': npc.name,\n # '称谓': npc.title,\n # '模型': npc.model,\n # 'mx': npc.mx,\n # 'my': npc.my,\n # '方向': npc.direction,\n # '地图': npc.map_id,\n # 'NPC类型': npc.npc_id\n # }\n\n # csv方式\n i = 0\n for npc in NPCS.values():\n if int(npc['地图']) == map_id:\n i += 1\n npc_data = {\n 'id': i,\n '名称': npc['名称'],\n '称谓': npc['称谓'],\n '模型': npc['模型'],\n 'mx': npc['x'],\n 'my': npc['y'],\n '方向': npc['方向'],\n '地图': npc['地图'],\n 'NPC类型': npc['类型']\n }\n npcs.append(npc_data)\n return npcs\n\n\ndef player_enter_scene(pid, map_id):\n # 取其他玩家数据\n for _pid in get_players_in_scene(pid, map_id):\n player_data = rget(_pid, CHAR)\n send2pid(pid, S_添加玩家, player_data)\n # 通知其他玩家进入新场景\n my_data = rget(pid, CHAR)\n for _pid in get_players_in_scene(pid, map_id):\n print('通知进入:', pid, _pid)\n send2pid(_pid, S_添加玩家, my_data)\n\n\ndef player_leave_scene(pid):\n map_id = rget(pid, CHAR, '地图')\n # 取其他玩家数据\n for _pid in get_players_in_scene(pid, map_id):\n player_data = rget(_pid, CHAR)\n send2pid(pid, S_添加玩家, player_data)\n # 通知其他玩家离开原场景\n for _pid in get_players_in_scene(pid, map_id):\n print('通知离开:', pid, _pid)\n send2pid(_pid, S_删除玩家, dict(玩家=pid))\n\n\ndef player_set_path_request(pid, path: list):\n \"\"\"\n 玩家有移动路径时, 判断是否能移动\n :param pid:\n :param path:\n :return:\n \"\"\"\n # TODO\n send2pid(pid, S_发送路径, dict(路径=path))\n # 如果移动(path非空), 则广播给同场景玩家\n if path:\n for _pid in get_players_in_scene(pid, None):\n send2pid(_pid, S_玩家寻路, dict(玩家=pid, 路径=path))\n\n\ndef player_speak(pid, ch, text):\n map_id = rget(pid, CHAR, '地图')\n if ch == '当前':\n print('发言给:', get_players_in_scene(pid, map_id, True))\n for _pid in get_players_in_scene(pid, map_id, True):\n send2pid(_pid, S_频道发言, dict(频道=ch, 内容=text, 名称=rget(pid, CHAR, '名称')))\n send2pid(_pid, S_角色发言显示, dict(player=pid, 内容=text))\n\n\ndef scene_transfer(pid, map_id, x, y):\n player_leave_scene(pid)\n rset(pid, CHAR, map_id, '地图')\n rset(pid, CHAR, x, 'mx')\n rset(pid, CHAR, y, 'my')\n send_data = dict(map_id=map_id, x=x, y=y)\n send2pid(pid, S_地图传送, send_data)\n player_enter_scene(pid, map_id)\n","repo_name":"1992leiting/pygame_nt","sub_path":"server_redis/scene/scene_handler.py","file_name":"scene_handler.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7908228269","text":"from pymongo import MongoClient\nimport pymongo\nfrom pymongo import Connection\nimport json\nfrom bson import BSON\nfrom bson import json_util\n\nclass MongoConnection():\n def __init__ (self, host=\"localhost\",port=27017, db_name='indexer'):\n self.host = host\n self.port = port\n self.client = MongoClient(self.host, self.port)\n self.db = self.client[db_name]\n\n def create_table(self, table_name, index=None):\n self.db[table_name].create_index( [(index, pymongo.DESCENDING), ('unique',True)] )\n\n\n def get_one(self,table_name,conditions={}):\n single_doc = self.db[table_name].find_one(conditions)\n json_doc = json.dumps(single_doc,default=json_util.default)\n json_doc = json_doc.replace(\"$oid\", \"id\")\n json_doc = json_doc.replace(\"_id\", \"uid\")\n return json.loads(json_doc)\n\n def get_all(self,table_name,conditions={}, sort_index ='_id'):\n all_doc = self.db[table_name].find(conditions).sort(sort_index, pymongo.DESCENDING).limit(8)\n json_doc = json.dumps(list(all_doc),default=json_util.default)\n json_doc = json_doc.replace(\"$oid\", \"id\")\n json_doc = json_doc.replace(\"_id\", \"uid\")\n return json.loads(json_doc)\n \n def insert_one(self, table_name, value):\n self.db[table_name].insert(value)\n\n def update(self, table_name, where, what):\n self.db[table_name].update(where,{\"$set\":what},upsert=False)\n\n\n ","repo_name":"eddowding/foodtrade","sub_path":"mainapp/MongoConnection.py","file_name":"MongoConnection.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42974196705","text":"import sys\r\nfrom collections import Counter\r\nimport time as time\r\nimport numpy as np\r\nimport math\r\nimport string\r\nimport spacy\r\nfrom spacy.lang.en import English\r\nfrom spacy.lang.es import Spanish\r\nfrom spacy.lang.es.examples import sentences\r\n\r\n#tokenizer, takes about max 0.5 second just to eliminate stop-words and have unique words for 1MB of text\r\n\r\ndef main(): \r\n nlp = Spanish()\r\n nlp.Defaults.stop_words|= {\"-\",\"--\",\"!\",\"'\",\"&\",\"$\",}\r\n inputTextFilePath = sys.argv[1]\r\n content = open(inputTextFilePath, \"r\", encoding = 'utf-8')\r\n entireFileContent = content.read()\r\n setofuniqueselectedwords = set()\r\n selected_words = []\r\n\r\n #starting timer\r\n startingTime = time.time()\r\n totalFileLength = len(entireFileContent)\r\n numNLPIteration = math.modf(totalFileLength/1000000)\r\n remainder = numNLPIteration[0]\r\n numNLPIteration = numNLPIteration[1]\r\n i = 0\r\n while (i < numNLPIteration):\r\n subFileContent = entireFileContent[(i*1000000):((i+1)*1000000)]\r\n token = nlp(subFileContent, disable=[\"parser\", \"tagger\", \"ner\"]) #simply just tokenize the given text\r\n for word in token:\r\n if word.is_stop is False and word.is_punct is False and word.is_quote is False and word.is_bracket is False and word.is_digit is False:\r\n selected_words.append(word.text)\r\n i += 1 \r\n\r\n if remainder > 0:\r\n subFileContent = entireFileContent[(i*1000000):]\r\n token = nlp(subFileContent, disable=[\"parser\", \"tagger\", \"ner\"])\r\n for word in token:\r\n if word.is_stop is False and word.is_punct is False and word.is_quote is False and word.is_bracket is False and word.is_digit is False:\r\n selected_words.append(word.text)\r\n\r\n setofuniqueselectedwords = set(selected_words)\r\n print(time.time() - startingTime)\r\n keywords = ' '.join(sorted(setofuniqueselectedwords))\r\n sys.stdout.buffer.write(keywords.encode('utf-8'))\r\n\r\nif __name__=='__main__':\r\n main()\r\n\r\n","repo_name":"Joyliu290/KeyPhraseExtractionApp","sub_path":"POC_spacy_tokenization.py","file_name":"POC_spacy_tokenization.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27189260660","text":"def printMatrix(m):\r\n for row in m:\r\n print(row)\r\n\r\ndef chainMatrix(dims):\r\n # Create the empty 2-D table\r\n n = len(dims)-1\r\n m = [[None for i in range(n)] for j in range(n)]\r\n traceback = [[None for i in range(n)] for j in range(n)]\r\n\r\n # Fill in the base case values\r\n for i in range(n):\r\n m[i][i] = 0\r\n traceback[i][i] = 0\r\n\r\n # Fill in the rest of the table diagonal by diagonal\r\n for chainLength in range(2,n+1):\r\n for i in range(n+1-chainLength):\r\n j = i + chainLength - 1\r\n # Fill in m[i][j] with the best of the recursive options\r\n m[i][j] = float(\"inf\")\r\n for k in range(i,j):\r\n # Two previous table values plus\r\n # what it cost to mult the resulting matrices\r\n q = m[i][k]+m[k+1][j]+dims[i]*dims[k+1]*dims[j+1]\r\n if q < m[i][j]:\r\n m[i][j] = q\r\n traceback[i][j] = k\r\n\r\n printMatrix(m)\r\n parentStr(traceback, i, j)\r\n return m[0][n-1]\r\n\r\ndef parentStr(traceback, start, end):\r\n # Add parenthesis up until start and end are equivalent\r\n if start == end:\r\n print(\"A{}\".format(start), end = '')\r\n else:\r\n k = traceback[start][end]\r\n print('(', end = '')\r\n parentStr(traceback, start, k)\r\n parentStr(traceback, k+1, end)\r\n print(')', end = '')\r\n\r\n\r\ndims = [30,35,15,5,10,20,25]\r\nchainMatrix(dims)\r\n","repo_name":"kminde/OtherProjects","sub_path":"Katrina_Wheeler_lab8.py","file_name":"Katrina_Wheeler_lab8.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21142043644","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom datetime import datetime\nfrom .AlpacaFunctions import get_historical_dataframe\n\n\n# %%\n# Constructing stock data api call\ndef get_stock_data(stock_tickers, numDays):\n symbols = list(stock_tickers)\n st.write(symbols)\n today = dt.date.today()\n start = pd.to_datetime(today - dt.timedelta(days=numDays))\n yesterday = pd.to_datetime(today - dt.timedelta(days=1))\n end= yesterday\n timeframe='1Day'\n limit = 5000\n stocks_df = pd.DataFrame()\n stocks_close_df = pd.DataFrame()\n\n \n # Iterating through tickers to isolate and concat close data \n for symbol in symbols: \n \n symbol_df = get_historical_dataframe(symbol=symbol, start=start, end=end, timeframe=timeframe, limit = limit)\n ticker_close_df = pd.DataFrame(symbol_df['close'])\n ticker_close_df.index = ticker_close_df.index.droplevel(0)\n ticker_close_df.columns = [symbol]\n if stocks_close_df.empty:\n stocks_close_df = ticker_close_df\n else:\n # Merge the close data into a single dataframe\n stocks_close_df = pd.merge(stocks_close_df, ticker_close_df, left_index=True, right_index=True)\n # Concatenating all stock data\n stocks_df = pd.concat([stocks_df, symbol_df], axis=1)\n \n # %%\n #Drop n/a values by columns, we don't want to skew our data if stocks do not have enough historical data\n stocks_df.dropna(axis=1, inplace=True)\n\n # %%\n #Eliminating any duplicate columns\n new_stocks_df = stocks_df.copy().loc[:,~stocks_df.columns.duplicated()]\n st.dataframe(stocks_close_df)\n\n \n \n # Normalize the stock dataframe index if dataframe is not empty\n # Convert index to datetime if not already\n \n stocks_close_df.index = pd.to_datetime(stocks_close_df.index)\n stocks_close_df.index = stocks_close_df.index.normalize()\n \n \n return stocks_close_df","repo_name":"sjufan84/esg_risk_parity","sub_path":"utils/get_stock_data.py","file_name":"get_stock_data.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74303308743","text":"def meth(ll):\n firstlargest = 0\n secondlargest = 0\n for x in ll :\n if x > secondlargest :\n if x > firstlargest :\n firstlargest = x \n else :\n secondlargest =x\n\n\n return secondlargest \n\n\n\ndef main():\n ll = [2,4,1,9,4,7,0,1,7,8,21,15,0,1]\n print(meth(ll))\n\nif __name__ == '__main__' :\n main() ","repo_name":"devx359/pythonInterview","sub_path":"scripts/2ndlargest.py","file_name":"2ndlargest.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38772427467","text":"# Parse an .md file and convert footnotes to the correct form\n# (superscript index with hover-text and footer).\n\nimport sys\nimport re\n\nlast_index = 0\nnotes = []\n\n\ndef make_footnote(match):\n global last_index, notes\n last_index += 1\n notes.append(match.group(1))\n\n res = '' \\\n '%d%s'\n\n return res % (last_index, last_index, match.group(1))\n\n\ndef main():\n text = ''.join(sys.stdin.readlines())\n res = re.sub(' ?{([^{]*)}', make_footnote, text)\n\n note = '
%d: %s
'\n footer = '\\n'.join([note % (i + 1, i + 1, s) for i, s in\n enumerate(notes)])\n\n print(res)\n if last_index > 0:\n print('\\n- - -\\n

Footnotes:

\\n')\n print(footer)\n\nif __name__ == '__main__':\n main()\n","repo_name":"dd0/bridge","sub_path":"footnote.py","file_name":"footnote.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15465186088","text":"from setuptools import setup\n\npackage_name = 'prototype'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='vehshanaan',\n maintainer_email='vehshanaan@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n\n \"NodeInstance = prototype.node:main\",\n # ros2 run pkg 指令名 = 包名。对应python文件去掉py结尾:main\n \"OOPNode = prototype.node_oop:main\",\n \"Talker = prototype.talker:main\",\n \"Listener = prototype.listener:main\",\n \"customMsg = prototype.customMsg:main\",\n \"customServer = prototype.customServer:main\",\n \"customClient = prototype.customClient:main\",\n \"parameter = prototype.parameters:main\",\n \"customActionServer = prototype.customActionServer:main\",\n \"customActionClient = prototype.customActionClient:main\",\n ],\n },\n)\n","repo_name":"Vehshanaan/Dissertation2022","sub_path":"codes/ros_learn/src/prototype/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11326704476","text":"from io import BytesIO\nfrom zipfile import ZipFile\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n\nclass ImageMapper:\n\n @staticmethod\n def from_api(file):\n \"\"\"\n Maps an Image from buffer to opencv-Image\n :param file: File as Buffer\n :return: opencv-image (Numpy Array)\n \"\"\"\n image = Image.open(file.stream)\n image = np.array(image)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n return image\n\n @staticmethod\n def from_image(image):\n \"\"\"\n Maps an opencv-image to buffer\n :param image: opencv-image (Numpy Array)\n :return: File as Buffer\n \"\"\"\n is_success, buffer = cv2.imencode(\".png\", image)\n return BytesIO(buffer)\n\n @staticmethod\n def from_images(images):\n \"\"\"\n Maps multiple opencv-images to zip file as buffer\n :param images: list of opencv-images\n :return: Zip file as buffer\n \"\"\"\n stream = BytesIO()\n with ZipFile(stream, 'w') as zf:\n for i, image in enumerate(images):\n is_success, buffer = cv2.imencode(\".png\", image)\n zf.writestr(f'sudoku_solution_{i}.png', buffer)\n stream.seek(0)\n return stream\n","repo_name":"niklastanner/puzzle-solver","sub_path":"src/api/mappers/ImageMapper.py","file_name":"ImageMapper.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11779958635","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\ndef can_go(r, c):\n if r >= n or r <= -1 or c >= n or c <= -1 or visited[r][c]:\n return False\n return True\n\nn = int(input())\nr1, c1, r2, c2 = map(int, input().split())\n\nvisited = [[0 for _ in range(n)] for _ in range(n)]\nQ = deque()\ndr = [-2, -2, 0, 0, 2, 2]\ndc = [-1, 1, -2, 2, -1, 1]\n\nfor zr, zc in zip(dr, dc):\n r = r1 + zr\n c = c1 + zc\n if not can_go(r, c):\n continue\n visited[r][c] = 1\n Q.append((r, c, 1))\n\nwhile Q:\n qr, qc, cnt = Q.popleft()\n if qr == r2 and qc == c2:\n print(cnt)\n break\n for zr, zc in zip(dr, dc):\n r = qr + zr\n c = qc + zc\n if not can_go(r, c):\n continue\n visited[r][c] = 1\n Q.append((r, c, cnt+1))\n\nif not Q:\n print(-1)","repo_name":"SeHeon-Park/Study_Algorithm","sub_path":"알고리즘 스터디/16948번 데스 나이트.py","file_name":"16948번 데스 나이트.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"27924079712","text":"# Bhaskara's Formula\r\nimport math\r\n\r\n\r\ndef return_delta(a, b, c):\r\n return round(float(math.pow(b, 2)-4*a*c),5)\r\n\r\n\r\ndef bhaskara_formula(a, b, delta, root):\r\n assert (root in('x1','x2')),\"Valor inválido para root\"\r\n if root == 'x1':\r\n x1 = ((-1*b) + math.sqrt(delta))/(2*a)\r\n return x1\r\n else:\r\n x2 = ((-1*b) - math.sqrt(delta))/(2*a)\r\n return x2\r\n\r\n\r\nvalues = input().split(' ')\r\n\r\na, b, c = values\r\n\r\na = float(a)\r\nb = float(b)\r\nc = float(c)\r\n\r\ntry:\r\n delta = return_delta(a, b, c)\r\n x1 = bhaskara_formula(a,b,delta,'x1')\r\n x2 = bhaskara_formula(a,b,delta,'x2')\r\n\r\n print(\"R1 = %.5f\" % x1)\r\n print(\"R2 = %.5f\" % x2)\r\nexcept ZeroDivisionError:\r\n print(\"Impossivel calcular\")\r\nexcept ValueError:\r\n print(\"Impossivel calcular\")\r\n","repo_name":"rendersonjunior/UriOnlineJudge-Python","sub_path":"1036_Bhaskara's_Formula.py","file_name":"1036_Bhaskara's_Formula.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"45458521996","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab\n\nfrom __future__ import unicode_literals, division, absolute_import, print_function\n\nimport os\nimport sys\nimport math\n\nfrom utilities import UpdateChecker, tuple_version, ismacos, iswindows\n\ntry:\n from PySide2.QtCore import Qt, QByteArray, QCoreApplication, QLibraryInfo, QTimer, QMargins, qVersion\n from PySide2.QtWidgets import QAction, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox\n from PySide2.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QLineEdit, QMessageBox, QPushButton\n from PySide2.QtWidgets import QStyleFactory, QTextEdit, QMainWindow, QWidget, QLayout\n from PySide2.QtGui import QColor, QFont, QIcon, QPalette, QPixmap, QImage\n print('Pyside2')\nexcept ImportError:\n from PyQt5.QtCore import Qt, QByteArray, QCoreApplication, QLibraryInfo, QTimer, QMargins, qVersion\n from PyQt5.QtWidgets import QAction, QApplication, QCheckBox, QComboBox, QDialog, QDialogButtonBox\n from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QLineEdit, QMessageBox, QPushButton\n from PyQt5.QtWidgets import QStyleFactory, QTextEdit, QMainWindow, QWidget, QLayout\n from PyQt5.QtGui import QColor, QFont, QIcon, QPalette, QPixmap, QImage\n print('PyQt5')\n\n\ndef launch_gui(bk, prefs):\n if not ismacos:\n try:\n setup_highdpi(bk._w.highdpi)\n except Exception:\n pass\n try:\n setup_ui_font(bk._w.uifont)\n except Exception:\n pass\n if not ismacos and not iswindows:\n # Qt 5.10.1 on Linux resets the global font on first event loop tick.\n # So workaround it by setting the font once again in a timer.\n try:\n QTimer.singleShot(0, lambda : setup_ui_font(bk._w.uifont))\n except Exception:\n pass\n app = QApplication([])\n icon = os.path.join(bk._w.plugin_dir, bk._w.plugin_name, 'plugin.png')\n app.setWindowIcon(QIcon(icon))\n \n if tuple_version(qVersion()) >= (5, 10, 0):\n app.setAttribute(Qt.AA_DisableWindowContextHelpButton)\n \n # Make plugin match Sigil's light/dark theme\n dark_palette(bk, app)\n \n win = guiMain(bk, prefs)\n app.exec_()\n\ndef dark_palette(bk, app):\n if not (bk.launcher_version() >= 20200117):\n return\n if bk.colorMode() != \"dark\":\n return\n\n p = QPalette()\n sigil_colors = bk.color\n dark_color = QColor(sigil_colors(\"Window\"))\n disabled_color = QColor(127,127,127)\n dark_link_color = QColor(108, 180, 238)\n text_color = QColor(sigil_colors(\"Text\"))\n p.setColor(p.Window, dark_color)\n p.setColor(p.WindowText, text_color)\n p.setColor(p.Base, QColor(sigil_colors(\"Base\")))\n p.setColor(p.AlternateBase, dark_color)\n p.setColor(p.ToolTipBase, dark_color)\n p.setColor(p.ToolTipText, text_color)\n p.setColor(p.Text, text_color)\n p.setColor(p.Disabled, p.Text, disabled_color)\n p.setColor(p.Button, dark_color)\n p.setColor(p.ButtonText, text_color)\n p.setColor(p.Disabled, p.ButtonText, disabled_color)\n p.setColor(p.BrightText, Qt.red)\n p.setColor(p.Link, dark_link_color)\n p.setColor(p.Highlight, QColor(sigil_colors(\"Highlight\")))\n p.setColor(p.HighlightedText, QColor(sigil_colors(\"HighlightedText\")))\n p.setColor(p.Disabled, p.HighlightedText, disabled_color)\n\n app.setStyle(QStyleFactory.create(\"Fusion\"))\n app.setPalette(p)\n\ndef setup_highdpi(highdpi):\n has_env_setting = False\n env_vars = ('QT_AUTO_SCREEN_SCALE_FACTOR', 'QT_SCALE_FACTOR', 'QT_SCREEN_SCALE_FACTORS', 'QT_DEVICE_PIXEL_RATIO')\n for v in env_vars:\n if os.environ.get(v):\n has_env_setting = True\n break\n if highdpi == 'on' or (highdpi == 'detect' and not has_env_setting):\n QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)\n elif highdpi == 'off':\n QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, False)\n for p in env_vars:\n os.environ.pop(p, None)\n\ndef setup_ui_font(font_str):\n font = QFont()\n font.fromString(font_str)\n QApplication.setFont(font)\n\n\nclass guiMain(QMainWindow):\n def __init__(self, bk, prefs):\n super(guiMain, self).__init__()\n # Edit Plugin container object\n self.bk = bk\n self.prefs = prefs\n \n self._ok_to_close = False\n self.setWindowTitle('Remove Duplicates')\n self.setup_ui()\n \n def setup_ui(self):\n app = QApplication.instance()\n p = app.palette()\n link_color = p.color(p.Active, p.Link).name()\n \n layout = QVBoxLayout()\n \n widget = QWidget()\n widget.setLayout(layout)\n self.setCentralWidget(widget)\n \n button_layout = QHBoxLayout()\n layout.addLayout(button_layout)\n self.process_button = QPushButton('Remove Duplicates', self)\n self.process_button.clicked.connect(self._remove_duplicates)\n button_layout.addWidget(self.process_button)\n \n button_layout.addStretch(-1)\n \n self.abort_button = QPushButton('Abort', self)\n self.abort_button.clicked.connect(self._abort_clicked)\n button_layout.addWidget(self.abort_button)\n \n \n self.quit_button = QPushButton('Quit', self)\n self.quit_button.clicked.connect(self._quit_clicked)\n button_layout.addWidget(self.quit_button)\n \n layout.addStretch(-1)\n \n images_layout = FlowLayout(self)\n layout.addLayout(images_layout)\n \n for id, path, minetype in self.bk.image_iter():\n images_layout.addWidget(ImageCheck(self.bk.id_to_bookpath(id), self.bk.readfile(id)))\n \n \n if 'windowGeometry' in self.prefs:\n try:\n self.restoreGeometry(QByteArray.fromHex(self.prefs['windowGeometry'].encode('ascii')))\n except Exception:\n pass\n \n self.show()\n \n \n def _remove_duplicates(self):\n print('_remove_duplicates()')\n print('Remove Duplicates')\n \n def _quit_clicked(self):\n self._ok_to_close = True\n \n \n self.close()\n \n def _abort_clicked(self):\n self._ok_to_close = True\n self.close()\n \n def closeEvent(self, event):\n self.prefs['windowGeometry'] = self.saveGeometry().toHex().data().decode('ascii')\n if self._ok_to_close:\n event.accept()\n else:\n self._abort_clicked()\n\n\nclass FlowLayout(QLayout):\n def __init__(self, parent=None):\n super().__init__(parent)\n if parent is not None:\n self.setContentsMargins(QMargins(0, 0, 0, 0))\n \n self._item_list = []\n \n def __del__(self):\n item = self.takeAt(0)\n while item:\n item = self.takeAt(0)\n \n def addItem(self, item):\n self._item_list.append(item)\n \n def count(self):\n return len(self._item_list)\n \n def itemAt(self, index):\n if 0 <= index < len(self._item_list):\n return self._item_list[index]\n \n return None\n \n def takeAt(self, index):\n if 0 <= index < len(self._item_list):\n return self._item_list.pop(index)\n \n return None\n \n def expandingDirections(self):\n return Qt.Orientation(0)\n \n def hasHeightForWidth(self):\n return True\n \n def heightForWidth(self, width):\n height = self._do_layout(QRect(0, 0, width, 0), True)\n return height\n \n def setGeometry(self, rect):\n super(FlowLayout, self).setGeometry(rect)\n self._do_layout(rect, False)\n \n def sizeHint(self):\n return self.minimumSize()\n \n def minimumSize(self):\n size = QSize()\n \n for item in self._item_list:\n size = size.expandedTo(item.minimumSize())\n \n size += QSize(2 * self.contentsMargins().top(), 2 * self.contentsMargins().top())\n return size\n \n def _do_layout(self, rect, test_only):\n x = rect.x()\n y = rect.y()\n line_height = 0\n spacing = self.spacing()\n \n for item in self._item_list:\n style = item.widget().style()\n layout_spacing_x = style.layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Horizontal)\n layout_spacing_y = style.layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Vertical)\n space_x = spacing + layout_spacing_x\n space_y = spacing + layout_spacing_y\n next_x = x + item.sizeHint().width() + space_x\n if next_x - space_x > rect.right() and line_height > 0:\n x = rect.x()\n y = y + line_height + space_y\n next_x = x + item.sizeHint().width() + space_x\n line_height = 0\n \n if not test_only:\n item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))\n \n x = next_x\n line_height = max(line_height, item.sizeHint().height())\n \n return y + line_height - rect.y()\n\n\nclass ImageCheck(QWidget):\n def __init__(self, image_path, bytes):\n QWidget.__init__(self)\n self.setStyleSheet(\"border: 1px solid black\")\n \n layout = QVBoxLayout()\n self.setLayout(layout)\n \n self.image_name = os.path.basename(image_path)\n self.check = QCheckBox(self.image_name, self)\n \n image = QPixmap()\n image.loadFromData(bytes)\n image.scaled(100, 100, aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatioByExpanding, transformMode=Qt.TransformationMode.FastTransformation)\n label = QLabel(self)\n label.setPixmap(image)\n layout.addWidget(label)\n \n info_layout = QHBoxLayout()\n layout.addLayout(info_layout)\n \n size = str(image.width()) +'x'+ str(image.height())\n info_layout.addWidget(QLabel(size))\n \n info_layout.addStretch(-1)\n \n size = len(bytes) / 1024\n size = str(size)+'Kb'\n info_layout.addWidget(QLabel(size))\n \n def checkState(self):\n return self.check.checkState()\n\n\ndef main():\n return -1\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"un-pogaz/sigil-plugins","sub_path":"RemoveDuplicates/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21272164328","text":"from collections import OrderedDict as od\n\ndef solve():\n to_find = \"hackerrank\"\n s = list(input())\n s = list(od.fromkeys(s))\n for alpha in s:\n print((alpha, s.count(alpha)), end = ' ')\n\ndef main():\n for i in range(int(input())):\n solve()\n \nmain()","repo_name":"Ni9Logic/Competitive-Programming","sub_path":"HackerRank/src/hacker.py","file_name":"hacker.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18932854240","text":"import os\nimport sys\nimport time\nimport psycopg2\nfrom tqdm import tqdm\n\nconnection = psycopg2.connect(database=sys.argv[1])\ncursor = connection.cursor()\ncmd = \"insert into oban_jobs (state,queue,worker,args) values (%s,%s,%s,%s)\"\nwith open(\"jobs.backup\", \"r\") as f, open(\"/tmp/failed.backup\", \"w\") as g:\n for i in tqdm(f):\n data = [x.strip() for x in i.split(\"|\")]\n try:\n cursor.execute(cmd, (\"available\", data[2], data[3], data[4]))\n connection.commit()\n except Exception as ex:\n print(ex)\n g.write(i)\n time.sleep(0.2)\n","repo_name":"FloatingGhost/pleroma-oban-respooler","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71690476746","text":"import pygame\nimport random\nimport time\nfrom pokemon import *\nfrom player import *\nfrom pokemon_list import *\npygame.init()\n\n\n#Specific Pokemon Variables\npokemon_caught = 0\npokemon_on_screen = 0\n\n\n#Game & character variabls\nscore = 0\nseconds = 60\nfont = pygame.font.SysFont(\"comicsans\", 30, True)\nrun = False\nwin = pygame.display.set_mode((500,500))\npygame.display.set_caption(\"PokeCatcher\")\nclock = pygame.time.Clock()\nbg = pygame.image.load('bg.png')\nimage = pygame.image.load('main_character/D1.png')\nlogo = pygame.image.load('logo.png')\nlogo = pygame.transform.scale(logo, (300,150))\nscreen = pygame.image.load('screen.png')\nscreen = pygame.transform.scale(screen, (500,500))\n\n\n\ndef getPokemon():\n if(pokemon_on_screen < 3 and len(pokemon_list) > 0):\n # pokemon_on_screen = pokemon_on_screen + 1\n pokemon_num = random.randint(0, len(pokemon_list)-1)\n x = random.randint(90 , 360)\n y =random.randint(90, 360)\n sprite = pokemon_list[pokemon_num]\n new_poke = pokemon(x, y , sprite)\n pokemon_list.pop(pokemon_num)\n return new_poke\n\ndef updateTimer():\n text = font.render(\"Time: \" + str(seconds//50 * 1), 1, (255,255,255))\n win.blit(text, (350,00))\n\n\n\ndef redrawGameWindow():\n global walkCount\n win.blit(bg , (0 ,0))\n updateTimer()\n red.draw(win)\n text = font.render(\"Score: \" + str(score), 1, (255,255,255))\n win.blit(text, (0, 0))\n poke.draw(win)\n poke2.draw(win)\n poke3.draw(win)\n pygame.display.update()\n\nwhile(run == False):\n for event in pygame.event.get():\n if(event.type == pygame.QUIT):\n run = False\n win.blit(screen , ( 0,0))\n win.blit(image, (220, 300))\n win.blit(logo, (100, 100))\n keys = pygame.key.get_pressed()\n pygame.display.update()\n if keys[pygame.K_RETURN]:\n run = True\n\n\nred = player(random.randint(90 , 360),random.randint(90 , 360), 64, 64)\nwin.blit(image, (red.x, red.y))\npoke = getPokemon()\npoke2 = getPokemon()\npoke3 = getPokemon()\npygame.display.update()\nwhile (run and (seconds//50*1 != 30)):\n clock.tick(27)\n for event in pygame.event.get():\n if(event.type == pygame.QUIT):\n run = False\n\n\n if((poke.x -32 < red.x and red.x < poke.x + 32) or (poke.x + 32 < red.x and red.x < poke.x-32)):\n if((poke.y -32 < red.y and red.y < poke.y + 32) or (poke.y + 32 < red.y and red.y < poke.y-32)):\n score += 10\n pokemon_caught += 1\n pokemon_on_screen -= 1\n poke.update()\n\n\n if((poke2.x -32 < red.x and red.x < poke2.x + 32) or (poke2.x + 32 < red.x and red.x < poke2.x-32)):\n if((poke2.y -32 < red.y and red.y < poke2.y + 32) or (poke2.y + 32 < red.y and red.y < poke2.y-32)):\n score += 10\n pokemon_caught += 1\n pokemon_on_screen -= 1\n poke2.update()\n\n\n if((poke3.x -32 < red.x and red.x < poke3.x + 32) or (poke3.x + 32 < red.x and red.x < poke3.x-32)):\n if((poke3.y -32 < red.y and red.y < poke3.y + 32) or (poke3.y + 32 < red.y and red.y < poke3.y-32)):\n score += 10\n pokemon_caught += 1\n pokemon_on_screen -= 1\n poke3.update()\n\n\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n if(red.x > 30):\n red.x -= red.vel\n red.left = True\n red.right = False\n red.standing = False\n elif keys[pygame.K_RIGHT]:\n if(red.x < (500 - 90)):\n red.x += red.vel\n red.right = True\n red.left = False\n red.standing = False\n elif keys[pygame.K_UP]:\n if(red.y > 30):\n red.y -= red.vel\n red.up = True\n red.right = False\n red.left = False\n red.down = False\n red.standing = False\n elif keys[pygame.K_DOWN]:\n if(red.y < (500 - 150)):\n red.y += red.vel\n red.up = False\n red.right = False\n red.left = False\n red.down = True\n red.standing = False\n else:\n red.standing = True\n\n redrawGameWindow()\n seconds += 1\n pokemon_on_screen += 1\n\nendScreen = True\nwhile endScreen:\n for event in pygame.event.get():\n if(event.type == pygame.QUIT):\n endScreen = False\n win.blit(screen , (0 ,0))\n text = font.render(\"Final Score: \" + str(score), 1, (0,0,0))\n win.blit(text, (125, 200))\n caught = font.render(\"Total Pokemon Caught: \" + str(pokemon_caught), 1, (0,0,0))\n win.blit(caught, (125, 250))\n pygame.display.update()\n\n\n\n\nprint(\"Congratulations!\")\nprint(\"Score: \" + str(score))\nprint(\"Pokemon Caught: \" + str(pokemon_caught))\npygame.quit()\n","repo_name":"kylejava/PickHacks-2021","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24052766215","text":"from xml.etree.ElementTree import Element, tostring\n\ndef dict_to_xml(tag, d):\n '''\n Turn a simple dict of key/value pairs into XML\n '''\n elem = Element(tag)\n for key, val in d.items():\n child = Element(key)\n child.text = str(val)\n elem.append(child)\n return elem\n\ndef serialize_xml(data, alias='root', element='element'):\n alunos = Element(alias)\n for item in data:\n alunos.append( dict_to_xml(element, item) )\n return tostring(alunos)","repo_name":"nenodias/anev-webservice","sub_path":"xml_utils.py","file_name":"xml_utils.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30550967016","text":"#MQTT Publish\nfrom random import randint\nimport paho.mqtt.publish as publish\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\ndelayT = .1\nvalue = 0 #LDR Value\nmod = 'auto'\nldr = 7 # LDR pin number\n\nhost = \"5.196.95.208\"\n\ndef publishMod():\n f = open(\"mod\", \"r\")\n mod = f.read()\n f.close()\n publish.single(\"iotSmartHouse001/ldr/mod/current\", str(mod), hostname=host)\n\ndef rc_time(ldr):\n count = 0\n\n GPIO.setup(ldr, GPIO.OUT)\n GPIO.output(ldr, False)\n time.sleep(delayT)\n\n GPIO.setup(ldr, GPIO.IN)\n\n while(GPIO.input(ldr) == 0):\n count += 1\n\n return count\n\nprint(\"Server Started.\")\ntry:\n while True:\n value = rc_time(ldr)\n publishMod()\n if(int(value) <= 400000):\n publish.single(\"iotSmartHouse001/lightDecision\", str(value) + \",OFF\", hostname=host)\n if(int(value) > 400000):\n publish.single(\"iotSmartHouse001/lightDecision\", str(value) + \",ON\", hostname=host)\n\nexcept KeyboardInterrupt:\n pass\nfinally:\n GPIO.cleanup()\n","repo_name":"NipunEranda/Smart-House-IOT","sub_path":"webapp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9068726636","text":"\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport random\nimport numpy as np\n\n# --------------------------------------- SUMMARY ---------------------------------------\n'''\nLet's say 4 examples, block_size = 8:\n\nXb\t\t\t(4, 8)\t\t\t4 words, 8 letters each\nC[Xb]\t\t(4, 8, 10)\t\tEmbeds each letter into 10 dimensions\nFlatten\t\t(4, 80)\t\t\tFlattens char and dim layers\nLinear\t\t(4, 200)\t\tPuts 80 dimensions into 200 hidden neurons (4, 80) @ (80, 200) + (200)\n\nIn matrix mult, torch treats first dimensions as batch dimensions, doesn't change\n(4, 5, 6, 7, 80) @ (80, 200) = (4, 5, 6, 7, 200)\n\nProblem with makemore is that it squishes eight characters down to one layer.\nWe want to group them in bigrams (20 char-dims instead of 80).\nDon't want 80 flattened char-dims to come in, but separate (4, 4, 20) (4 groups of 2 characters * 10 features)\nWant (4, 4, 20) @ (20, 200) + (200) = (4, 4, 200)\n\nChanges:\n\tLinear layer shouldn't expect 80 inputs, but 20\n\tFlatten layer shouldn't flatten all the way to 80, but to 20\n\n\nFlatten\n\tInput: e = (4, 8, 10)\n\tPerforms: e.view(4, 80)\n\tWant: torch.cat([e[:, ::2, :], e[:, 1::2, :]], dim = 2)\n\n\nEmbedding : (4, 8, 10)\nFlattenConsecutive : (4, 4, 20)\nLinear : (4, 4, 200)\nBatchNorm1d : (4, 4, 200)\nTanh : (4, 4, 200)\nFlattenConsecutive : (4, 2, 400)\nLinear : (4, 2, 200)\nBatchNorm1d : (4, 2, 200)\nTanh : (4, 2, 200)\nFlattenConsecutive : (4, 400)\nLinear : (4, 200)\nBatchNorm1d : (4, 200)\nTanh : (4, 200)\nLinear : (4, 27)\n\n\n'''\nblock_size = 8\n\n\nn_embd = 10\nn_hidden = 100\n\nmax_steps = 50000\nbatch_size = 32\n\nn_samples = 30\n\n# --------------------------------------- BUILDING DATASET ---------------------------------------\n\ng = torch.Generator().manual_seed(2147483647)\nwords = open('names.txt', 'r').read().splitlines()\n\nchars = sorted(list(set(''.join(words))))\nstoi = {s:i+1 for i,s in enumerate(chars)}\nstoi['.'] = 0\n\n\nitos = {i:s for s,i in stoi.items()}\n\nvocab_size = len(itos) \t# 27\n\n#random.seed(42)\nrandom.shuffle(words)\n\ndef build_dataset(words):\n\tX, Y = [], []\n\n\tfor w in words:\n\n\t\tcontext = [0] * block_size\n\n\t\tfor ch in w + '.':\n\t\t\tix = stoi[ch]\n\t\t\tX.append(context) # input: three chars\n\t\t\tY.append(ix) # output: index of next char\n\t\t\t#print(''.join(itos[i] for i in context), '--->', itos[ix])\n\t\t\tcontext = context[1:] + [ix] # crop and append\n\n\tX = torch.tensor(X)\n\tY = torch.tensor(Y)\n\n\treturn X, Y\n\n\nn1 = int(0.8*len(words))\nn2 = int(0.9*len(words))\n\nX_tr, Y_tr = build_dataset(words[:n1])\nX_dev, Y_dev = build_dataset(words[n1:n2])\nX_te, Y_te = build_dataset(words[n2:])\n\n\n\n# --------------------------------------- NETWORK CLASSES ---------------------------------------\n\n\nclass Linear:\n\n\tdef __init__(self, fan_in, fan_out, bias=True):\n\n\t\t# Squash W1 to void killing neurons and saturated tanh.\n\t\tself.weight = torch.randn((fan_in, fan_out), generator=g) / fan_in**0.5\n\t\tself.bias = torch.zeros(fan_out) if bias else None\n\n\n\tdef __call__(self, x):\n\t\tself.out = x @ self.weight\n\t\tif self.bias is not None:\n\t\t\tself.out += self.bias\n\n\t\treturn self.out\n\n\tdef parameters(self):\n\t\treturn [self.weight] + ([] if self.bias is None else [self.bias])\n\n\n# Problem: only works for 1 dimension\n# (32, 4, 68) --> (1, 4, 68); want to normalize 32*4 numbers\nclass BatchNorm1d:\n\n\tdef __init__(self, dim, eps=1e-5, momentum=0.1):\n\t\tself.eps = eps\n\t\tself.momentum = momentum\n\t\tself.training = True\n\n\t\t# Parameters trained with backprop\n\t\tself.gamma = torch.ones(dim)\t# Batch Norm Gain\t\n\t\tself.beta = torch.zeros(dim)\t# Batch Norm Bias\n\n\t\t# Buffers\n\t\tself.running_mean = torch.zeros(dim)\n\t\tself.running_var = torch.ones(dim)\n\n\tdef __call__(self, x):\n\n\t\t# Forward Pass\n\t\tif self.training:\n\n\n\t\t\tif x.ndim == 2: # (32, 100) --> (1, 100)\n\t\t\t\tdim = 0\n\t\t\telif x.ndim == 3: # (32, 4, 68) --> (1, 1, 68) instead of (1, 4, 68)\n\t\t\t\tdim = (0, 1)\n\t\t\t\t# Departure from pytorch: torch would batchnorm over (0th and 2nd layers, not 0th and 1st)\n\n\t\t\txmean = x.mean(dim, keepdim=True) \t\t\t\t# Batch mean\n\t\t\txvar = x.var(dim, keepdim=True, unbiased=True) \t# Batch variance\n\t\telse:\n\t\t\txmean = self.running_mean\n\t\t\txvar = self.running_var\n\n\t\txhat = (x - xmean) / torch.sqrt(xvar + self.eps)\t# Normalize to unit variance, avoid div by 0 with eps.\n\t\tself.out = self.gamma * xhat + self.beta\n\n\t\t# Update buffers (running mean and var).\n\t\tif self.training:\n\t\t\twith torch.no_grad():\n\t\t\t\tself.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * xmean\n\t\t\t\tself.running_var = (1 - self.momentum) * self.running_var + self.momentum * xvar\n\n\t\treturn self.out\n\n\n\tdef parameters(self):\n\t\treturn [self.gamma, self.beta] # Gain and bias\n\n\nclass Tanh:\n\n\tdef __call__(self, x):\n\t\tself.out = torch.tanh(x)\n\t\treturn self.out\n\n\tdef parameters(self):\n\t\treturn []\n\n# C\nclass Embedding:\n\n\tdef __init__(self, num_embeddings, embedding_dim):\n\t\tself.weight = torch.randn((num_embeddings, embedding_dim))\n\n\tdef __call__(self, IX):\n\t\tself.out = self.weight[IX]\n\t\treturn self.out\n\n\tdef parameters(self):\n\t\treturn [self.weight]\n\n\nclass FlattenConsecutive:\n\n\tdef __init__(self, n):\n\t\tself.n = n \t\t# Number of elements to flatten and concatenate in last dimension\n\n\tdef __call__(self, x):\n\n\t\tB, T, C = x.shape\t\t# (4, 8, 10)\n\t\t# self.out = x.view(x.shape[0], -1) \told\n\n\t\t# Input: (4, 8, 10); Output: (4, 4, 20)\n\t\t# First tensor: takes all even dimensions from first dimension (4, 4, 10)\n\t\t# Second tensor: takes all odd dimensions from first dimension (4, 4, 10)\n\t\t# Concat along second dim (10s): (4, 4, 20)\n\n\t\t#self.out = torch.cat([x[:, ::2, :], x[:, 1::2, :]], dim = 2)\t# Explicit\n\t\tx = x.view(B, T//self.n, C*self.n)\n\n\t\t# Can happen if n is something like 3: superious dimension\n\t\tif x.shape[1] == 1:\n\t\t\tx = x.squeeze(1) \t# Will return (B, C*n)\n\n\t\tself.out = x\n\t\treturn self.out\n\n\tdef parameters(self):\n\t\treturn []\n\n\nclass Sequential:\n\n\tdef __init__(self, layers):\n\t\tself.layers = layers\n\n\tdef __call__(self, x):\n\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x)\n\t\tself.out = x\n\t\treturn self.out\n\n\tdef parameters(self):\n\t\treturn [p for layer in self.layers for p in layer.parameters()]\n\n\tdef print_layers(self):\n\t\tfor layer in self.layers:\n\t\t\tprint(layer.__class__.__name__, ':', tuple(layer.out.shape))\n\n\n# --------------------------------------- NETWORK ARCHITECTURE ---------------------------------------\n\n#torch.manual_seed(42)\n\n\n# If you don't have the Tanh layers, your activations will explode.\n# Also, your whole network will be one linear function.\nmodel = Sequential([\n\tEmbedding(vocab_size, n_embd), \n\tFlattenConsecutive(2), Linear(n_embd * 2, n_hidden, bias=False), BatchNorm1d(n_hidden), Tanh(),\n\tFlattenConsecutive(2), Linear(n_hidden * 2, n_hidden, bias=False), BatchNorm1d(n_hidden), Tanh(),\n\tFlattenConsecutive(2), Linear(n_hidden * 2, n_hidden, bias=False), BatchNorm1d(n_hidden), Tanh(),\n\tLinear(n_hidden, vocab_size)\n])\n\n\nwith torch.no_grad():\n\t# Last layer: make it less confident\n\tif isinstance(model.layers[-1], Linear):\n\t\tmodel.layers[-1].weight *= 0.1\n\n\nparameters = model.parameters()\nprint(\"Number of parameters: \", sum(p.nelement() for p in parameters))\nfor p in parameters:\n\tp.requires_grad = True\n\n\n# --------------------------------------- OPTIMIZATION ---------------------------------------\n\n\nlossi = []\n\n\nfor i in range(max_steps):\n\n\t# Minibatch construction\n\tix = torch.randint(0, X_tr.shape[0], (batch_size, ))\n\tXb, Yb = X_tr[ix], Y_tr[ix]\t\t# Batch X and Y\n\n\t# Forward pass\n\tlogits = model(Xb)\n\tloss = F.cross_entropy(logits, Yb)\t\t# loss function\n\n\n\t# Backward pass\n\tfor p in parameters:\n\t\tp.grad = None\n\tloss.backward()\n\n\n\t# Update\n\tlr = 0.1 if i < (0.9 * max_steps) else 0.01 # Step learning rate decay\n\tfor p in parameters:\n\t\tp.data += -lr * p.grad\n\n\n\t# Track stats\n\tif i % (max_steps * 0.1) == 0:\n\t\tprint(f'{i:7d}/{max_steps:7d}: {loss.item():.4f}')\n\tlossi.append(loss.log10().item())\n\n\n#model.print_layers()\n\n# Split lossi into 200 rows of 1000 samples, take the mean of each row\nlossi_avg = torch.tensor(lossi).view(-1, 1000).mean(1)\t\n# plt.plot(lossi_avg)\n# plt.savefig('lossi_avg.png')\n# --------------------------------------- VALIDATION ---------------------------------------\n\nfor layer in model.layers:\n\tlayer.training = False\n\n@torch.no_grad()\ndef split_loss(split):\n\tx, y = {\n\t'train': (X_tr, Y_tr),\n\t'val': (X_dev, Y_dev),\n\t'test': (X_te, Y_te),\n\t}[split]\n\n\tlogits = model(x)\n\tloss = F.cross_entropy(logits, y)\n\tprint(split, loss.item())\n\nsplit_loss('train')\nsplit_loss('val')\nsplit_loss('test')\n\n\n# --------------------------------------- SAMPLING ---------------------------------------\n\n\nfor _ in range(n_samples):\n\n\tout = []\n\tcontext = [0] * block_size\n\twhile True:\n\n\t\t# Forward\n\t\tx = torch.tensor([context])\n\t\tlogits = model(x)\n\t\tprobs = F.softmax(logits, dim=1)\n\n\t\t# Sample\n\t\tix = torch.multinomial(probs, num_samples=1).item()\n\n\t\t# shift context window and track samples\n\t\tcontext = context[1:] + [ix]\n\t\tout.append(ix)\n\n\t\t# break if we sample the end token\n\t\tif ix == 0:\n\t\t\tbreak\n\n\tprint(''.join(itos[i] for i in out))\n\n\ntorch.save(model.parameters(), 'parameters.txt')\n\n\np = torch.load('parameters.txt')\n","repo_name":"mxrtin-beep/karpathy","sub_path":"makemore-wavenet.py","file_name":"makemore-wavenet.py","file_ext":"py","file_size_in_byte":8984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9753963790","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# 读数据\ndata = pd.read_csv('../today_province_2022_06_16.csv')\n# 画直方图\nplt.rcParams['font.sans-serif'] = ['SimHei']\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 8))\nd = 5000 # 组距\nx = data.total_heal\nnum_bins = (max(x) - min(x)) // d\nax.hist(x, bins=num_bins)\n# 设置x轴刻度\nplt.xticks(range(min(x), max(x) + d, d))\nplt.title('总治愈人数的分布直方图')\nplt.savefig('直方图.svg', dpi=600)\nplt.show()\n","repo_name":"LCDedc/DataAnalysis","sub_path":"可视化/直方图.py","file_name":"直方图.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"458095730","text":"# Passo 1: Importar a base de dados\r\n\r\nimport pandas as pd # Apelido\r\nimport plotly.express as px\r\n\r\n\r\ntabela = pd.read_csv(r\"C:\\Users\\felip\\OneDrive\\Área de Trabalho\\Prog\\Intensivão de Python\\Aula 2\\telecom_users.csv\")\r\n\r\n# informação que não te ajuda, atrapalha\r\n\r\n\r\n\r\n# Passo 2: Visualizar a base de dados\r\n# Entender as informações que você tem disponível\r\n# Descobrir a cagada da base de dados\r\n\r\n\r\n# excluir coluna inútil\r\n# tabela = tabela.drog(\"nome\", eixo) \r\n# axis = 0 -> eixo da linha\r\n# axis = 1 -> eixo da coluna\r\ntabela = tabela.drop(\"Unnamed: 0\", axis=1) \r\n\r\n\r\n# Passo 3: Tratamento de Dados (resolver as cagadas da base de dados)\r\n# informações do tipo correto - ajustar o TotalGasto\r\ntabela[\"TotalGasto\"] = pd.to_numeric(tabela[\"TotalGasto\"], errors=\"coerce\")\r\n\r\n# informações vazias\r\n\r\n# axis = 0 -> eixo da linha\r\n# axis = 1 -> eixo da coluna\r\n# colunas completamente vazias -> excluir\r\ntabela = tabela.dropna(how=\"all\", axis=1)\r\n\r\n# linhas que tem alguma informação vazia -> excluir\r\ntabela = tabela.dropna(how=\"any\", axis=0)\r\n\r\n\r\n\r\n# Passo 4: Análise inicial dos dados\r\n# como estão os cancelamentos? 26%\r\nprint(tabela[\"Churn\"].value_counts())\r\nprint(tabela[\"Churn\"].value_counts(normalize=True))\r\n\r\n\r\n\r\n# Passo 5: Descobrir os motivos do cancelamento\r\n\r\nfor coluna in tabela.columns:\r\n\r\n # etapa 1: criar o grafico\r\n grafico = px.histogram(tabela, x=coluna, color=\"Churn\")\r\n # etapa 2: exibir o grafico\r\n grafico.show()","repo_name":"Trevisan1/Automation-Python","sub_path":"Aula 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28162214826","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import Setting\nfrom setting.forms import SettingForm\nfrom common.models import Common\n\n\n@login_required(login_url=\"common:login\")\ndef inbody(request):\n if request.method == 'POST':\n form = SettingForm(request.POST)\n if form.is_valid():\n Set = Setting() # 모델 클래스 변수 생성\n Set.weight = form.cleaned_data['weight'] # form의 제목을 가져옴\n Set.fat = form.cleaned_data['fat']\n Set.muscle = form.cleaned_data['muscle']\n Set.target_weight = form.cleaned_data['target_weight']\n Set.user = request.user # 현재 로그인한 사용자의 id\n Set.save()\n return HttpResponseRedirect('/food/food/') # 작성 후 글목록으로 이동\n else:\n return render(request, 'setting/inbody.html')\n else:\n form = SettingForm()\n return render(request, 'setting/inbody.html', {'form': form})\n\n\n# @login_required(login_url=\"common:login\")\n# def inbody_update(request):\n# Set = Setting.objects.get(user=request.user)\n# if request.method == \"POST\":\n# Set.weight = request.POST.get('weight')\n# Set.muscle = request.POST.get('muscle')\n# Set.fat = request.POST.get('fat')\n# Set.target_weight = request.POST.get('target_weight')\n# Set.user = request.user\n# Set.save()\n# return redirect('/food/food/')\n# else:\n# settingForm = SettingForm(instance = Set)\n# return render(request, 'setting/inbody_update.html', {'settingForm':settingForm})\n#\n\n@login_required(login_url=\"common:login\")\ndef inbody_list(request):\n sets = Setting.objects.all().order_by('-id')\n return render(request, 'setting/inbody_list.html', {'sets': sets})\n\n","repo_name":"KIMCHAEYEONGKK/capstone-project2","sub_path":"setting/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16404641781","text":"from django.contrib import admin\n\nfrom .models import Tweets, Tweetcounts\n\nclass TweetsSettingsAllFields(admin.ModelAdmin):\n list_display = [field.name for field in Tweets._meta.get_fields()]\n\nclass CountsSettingsAllFields(admin.ModelAdmin):\n list_display = [field.name for field in Tweetcounts._meta.get_fields()]\n\nadmin.site.register(Tweets, TweetsSettingsAllFields)\nadmin.site.register(Tweetcounts, CountsSettingsAllFields)","repo_name":"chinchillabrains/cryptoracle","sub_path":"cryptoracle/twittersearch/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17625223810","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom behave import given, when, then\nimport time\n\n\n@given(\"the user is on the “expenses” page\")\ndef step_impl(context):\n context.browser = webdriver.Chrome() # or another browser driver\n context.browser.get(\n \"http://localhost:5000/expenses\"\n ) # Replace with your app's expenses URL\n\n\n@when(\"the user inputs all required expense details\")\ndef step_impl(context):\n # Replace the below IDs with the actual IDs from your form\n context.browser.find_element(By.ID, \"category-dropdown\").send_keys(\"Transport\")\n context.browser.find_element(By.ID, \"name\").send_keys(\"Uber\")\n context.browser.find_element(By.ID, \"amount\").send_keys(\"15\")\n context.browser.find_element(By.ID, \"currency\").send_keys(\"USD\")\n context.browser.find_element(By.ID, \"expenseDate\").send_keys(\"2023-03-15\")\n context.browser.find_element(\n By.ID, \"no\"\n ).click() # Assuming \"No\" is selected for recurring\n\n\n@when('clicks the \"Submit\" button')\ndef step_impl(context):\n submit_button = context.browser.find_element(By.XPATH, '//input[@value=\"Submit\"]')\n submit_button.click()\n time.sleep(2) # Wait for the form to submit and page to update\n\n\n@then(\n 'the system adds the expense to the \"Per category history\" section under the chosen category'\n)\ndef step_impl(context):\n # This step would require checking the \"Per category history\" section for the new entry\n # Assuming the new entry would be the last row in the table\n rows = context.browser.find_elements(By.CSS_SELECTOR, \"#expense-table tbody tr\")\n last_row = rows[-1]\n assert (\n \"Uber\" in last_row.text and \"15\" in last_row.text and \"dollar\" in last_row.text\n )\n","repo_name":"JoachimBaumann/InteractiveSystemsEngineering-SDUGroup12","sub_path":"features/steps/adding_expense_steps.py","file_name":"adding_expense_steps.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31266727597","text":"import argparse\nimport os\nimport subprocess\nimport time\nfrom os.path import exists, isdir\n\nfrom Constants import FOLDER_EXPORT_ASSEMBLY, get_path_header, get_path_footer\nfrom util.ConditionalExecutionCode import ConditionalExecutionCode\nfrom util.Processor import Processor\nfrom util.Stack import Stack\n\n\ndef existsDirectory(path):\n if exists(path):\n return isdir(path)\n else:\n return False\n\n\ndef createDirectoryIfNew(directory):\n if not exists(directory):\n command = \"mkdir -p \" + directory\n subprocess.run(command, shell=True)\n\n\ndef getFilesInDir(directory):\n lst = os.listdir(directory) # your directory path\n return len(lst)\n\n\nif __name__ == '__main__':\n start_time = time.time()\n parser = argparse.ArgumentParser()\n\n required = parser.add_argument_group('Program Modes')\n required.add_argument(\"-b\", \"--singleInstruction\",\n help=\"Test Single Instruction. In combination with complete instruciton, will only generate all variants for this single instruction.\",\n default=\"\")\n required.add_argument(\"--basicInstruction\",\n help=\"Instructions for all basic instructions, don't execute special case code.\",\n action=\"store_true\")\n required.add_argument(\"-c\", \"--completeInstruction\",\n help=\"complete single Instruction Test (going through every possible option one file per instruction)\",\n action=\"store_true\")\n required.add_argument(\"-i\", \"--interleaving\", help=\"interleaving Test.\", action=\"store_true\")\n required.add_argument(\"-s\", \"--sequence\",\n help=\"if set an extra instruction path with only plain instructions gets created and compares it results with the normal instruction path.\",\n action=\"store_true\")\n\n optional = parser.add_argument_group('optional arguments')\n optional.add_argument(\"-a\", \"--architecture\",\n help=\"Architecture description folder in sources/ARCHITECTURE. Defaults to rv32imc.\",\n default=\"rv32imc\")\n optional.add_argument(\"-l\", \"--level\", help=\"Number of interleaving stacks in sequence. Defaults to 1.\",\n default=\"1\")\n optional.add_argument(\"--immediate\",\n help=\"Probability of immediate generated. Range from 0.0 - 1.0. Defaults to 0.5.\",\n default=\"0.5\")\n optional.add_argument(\"--switch\",\n help=\"Probability of instructions with switched focusReg and randValue generated. Range from 0.0 - 1.0. Defaults to 0.5.\",\n default=\"0.5\")\n optional.add_argument(\"-r\", \"--repetitions\", help=\"Repetitions of complex tests to create. Defaults to 1.\",\n default=\"1\")\n optional.add_argument(\"--simulator\", help=\"modify for simulation\", action=\"store_true\")\n optional.add_argument(\"-f\", \"--forwarding\",\n help=\"Test Sequence with FORWARDING instruction inside of the sequence. Defaults to 0.\",\n default=0)\n optional.add_argument(\"--specialImmediates\", help=\"Create Sequences which test Special immediates. Defaults to 0.\",\n default=\"0.0\")\n optional.add_argument(\"--newMemoryBlock\",\n help=\"Probability of starting a new memory block region. Defaults to 0.0.\", default=\"0.0\")\n optional.add_argument(\"--dcacheMiss\", help=\"Probability of incuring Data Cache misses. Defaults to 0.5.\",\n default=\"0.5\")\n optional.add_argument(\"--icacheMiss\",\n help=\"Probability of incuring Instruction Cache miss. Defaults to 0.5. Not supported on sequences!\",\n default=\"0.5\")\n optional.add_argument(\"--sequenceLength\",\n help=\"Give the list of Sequences to iterate through. Defaults to 4. Only works on sequences!\",\n default=4)\n optional.add_argument(\"--sequenceStall\",\n help=\"Probability to stall the pipeline between base instructions with forwarding holes. Defaults to 0.5\",\n default=0.5)\n\n\n\n args = parser.parse_args()\n\n dCacheMisses = float(args.dcacheMiss)\n newMemoryBlock = float(args.newMemoryBlock)\n\n Processor(architecture=args.architecture, cacheMiss=float(dCacheMisses),\n newMemoryBlock=float(newMemoryBlock))\n\n sequenceLength = int(args.sequenceLength)\n\n switchProbability = float(args.switch)\n depth = int(args.level)\n useSimulator = args.simulator\n immediateProbability = float(args.immediate)\n files = int(args.repetitions)\n singleInstruction = args.singleInstruction\n stack = Stack(args.architecture, args.icacheMiss)\n v = ConditionalExecutionCode(args.architecture)\n # prepare Export folder\n createDirectoryIfNew(FOLDER_EXPORT_ASSEMBLY)\n forwardingHole = int(args.forwarding)\n specialImmediates = float(args.specialImmediates)\n forwardingStallProb = float(args.sequenceStall)\n\n # import Constant files\n headerFile = open(get_path_header(args.architecture, simulator=useSimulator), 'r')\n header = headerFile.read()\n headerFile.close()\n footerFile = open(get_path_footer(args.architecture, simulator=useSimulator), 'r')\n footer = footerFile.read()\n footerFile.close()\n\n # liste = []\n # liste.append(copy.deepcopy(DataBank(args).getInstruction(\"sw\")))\n # liste.append(copy.deepcopy(DataBank(args).getInstruction(\"rem\")))\n # liste.append(copy.deepcopy(DataBank(args).getInstruction(\"lw\")))\n # liste.append(copy.deepcopy(DataBank(args).getInstruction(\"sra\")))\n #\n # code, testInstr, instrCount = stack.createSequenceInstructions(liste, immediateProbability=immediateProbability,\n # switchProbability=switchProbability, forwarding=0,\n # specialImmediates=specialImmediates)\n #\n # file = open(os.path.join(FOLDER_EXPORT_ASSEMBLY, \"test.S\"), \"w\")\n # file.write(header)\n # file.write(code)\n # file.write(footer)\n # file.close()\n # exit()\n runID = str(int(time.time()))\n numberTestInstructions = 0\n totalInstructions = 0\n fileCount =0\n original_fileCount = getFilesInDir(FOLDER_EXPORT_ASSEMBLY)\n\n if singleInstruction:\n singleInstructionTests, testcases, instrCount = stack.createSingleInstructionTest(immediateProbability,\n switchProbability,\n singleInstruction)\n numberTestInstructions += testcases\n totalInstructions += instrCount\n for [instructionName, code] in singleInstructionTests:\n filestring = \"singleInstr_\" + instructionName + \"_switch_\" + str(\n int(switchProbability * 100)) +\"_\"+ runID+ \".\" + Processor().get_assembler_ending()\n exportFile = os.path.join(FOLDER_EXPORT_ASSEMBLY, filestring)\n rawfile = open(exportFile, 'w')\n\n rawfile.write(header)\n rawfile.write(code)\n rawfile.write(footer)\n rawfile.close()\n\n fileCount += 1\n\n if args.basicInstruction:\n # for i in range(files):\n i = 0\n singleInstructionTests, testcases, instrCount = stack.createBasicSingleInstructionTest(singleInstruction)\n numberTestInstructions += testcases\n totalInstructions += instrCount\n for instructionName, code in singleInstructionTests.items():\n filestring = \"basic_\" + instructionName + \"_\" + str(i) +\"_\"+ runID+ \".\" + Processor().get_assembler_ending()\n exportFile = os.path.join(FOLDER_EXPORT_ASSEMBLY, filestring)\n\n rawfile = open(exportFile, 'w')\n rawfile.write(header)\n rawfile.write(code)\n rawfile.write(footer)\n rawfile.close()\n\n fileCount += 1\n\n if args.completeInstruction:\n\n # for i in range(files):\n i = 0\n singleInstructionTests, testcases, instrCount = stack.createCompleteSingleInstructionTest(singleInstruction)\n numberTestInstructions += testcases\n totalInstructions += instrCount\n for instructionName, code in singleInstructionTests.items():\n filestring = \"complete_\" + instructionName + \"_\" + str(i)+\"_\"+ runID + \".\" + Processor().get_assembler_ending()\n exportFile = os.path.join(FOLDER_EXPORT_ASSEMBLY, filestring)\n\n rawfile = open(exportFile, 'w')\n rawfile.write(header)\n rawfile.write(code)\n rawfile.write(footer)\n rawfile.close()\n\n fileCount += 1\n\n if args.sequence:\n\n for i in range(files):\n instructions = stack.generateSequenceInstructionsList(sequenceLength)\n for instructionList in instructions:\n code, testInstr, instrCount = stack.createSequenceInstructions(instructionList, immediateProbability,\n switchProbability, forwardingHole,\n specialImmediates,\n forwardingStallProb=forwardingStallProb)\n numberTestInstructions += testInstr\n totalInstructions += instrCount\n # export file\n filestring = \"sequence\"\n filestring += \"_switch_\" + str(int(switchProbability * 100))\n filestring += \"_forwarding_\" + str(forwardingHole)\n filestring += \"_forwardStall_\" + str(int(forwardingStallProb * 100))\n filestring += \"_specialImms_\" + str(int(specialImmediates * 100))\n filestring += \"_dcache_\" + str(int(dCacheMisses * 100))\n filestring += \"_newMemBlock_\" + str(int(newMemoryBlock * 100))\n filestring += \"_\" + str(i)\n filestring += \"_\" + str(fileCount)\n filestring += \"_\"+ runID\n filestring += \".\" + Processor().get_assembler_ending()\n exportFile = os.path.join(FOLDER_EXPORT_ASSEMBLY, filestring)\n\n rawfile = open(exportFile, 'w')\n rawfile.write(header)\n rawfile.write(code)\n rawfile.write(footer)\n rawfile.close()\n\n fileCount += 1\n\n if args.interleaving:\n for i in range(files):\n filestring = \"interleaving\"\n filestring += \"_switch_\" + str(int(switchProbability * 100))\n filestring += \"_specialImms_\" + str(int(specialImmediates * 100))\n filestring += \"_dcache_\" + str(int(dCacheMisses * 100))\n filestring += \"_newMemBlock_\" + str(int(newMemoryBlock * 100))\n filestring += \"_\" + str(i)\n filestring += \"_\" + str(fileCount)\n filestring += \"_\" + runID\n filestring += \".\" + Processor().get_assembler_ending()\n\n code, testInstr, instrCount = stack.createInterleavingInstructions(depth, immediateProbability,\n switchProbability,\n specialImmediates=specialImmediates)\n\n numberTestInstructions += testInstr\n totalInstructions += instrCount\n\n exportFile = os.path.join(FOLDER_EXPORT_ASSEMBLY, filestring)\n rawfile = open(exportFile, 'w')\n rawfile.write(header)\n rawfile.write(code)\n rawfile.write(footer)\n rawfile.close()\n\n fileCount += 1\n\n\n\n\n print(str(fileCount) + \" Assembly Files successfully generated. Total files in folder: \" + str(original_fileCount + fileCount))\n print(\"Test Instructions created: \" + str(numberTestInstructions))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"tubs-eis/PATARA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12222,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"7342862819","text":"# coding: utf-8\n\nimport sys\nimport codecs\nimport csv\nimport xlutils.copy\nimport xlrd\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nregister_language = ['iOS KVN','Android KVN','页面','备注','zh-Hans','zh-Hant','en','ko','ja']\n\ndef main():\n\n\tmode = int(raw_input(\"\\n请选择脚本工作模式\\n\\n1.载入 iOS Localizable.string 文件然后导出 language.cvs.\\n2.导出 Localozable.string For iOS.\\n3.纠正比对\\n\"))\n\tif mode == 1:\n\t\tload_localizable_dump_cvs()\n\telif mode == 2:\n\t\tdump_localizable_for_ios()\n\telif mode == 3:\n\t\tcorrect_sth()\n\telse:\n\t\tprint(\"错误:指令有误\")\n\t\texit(0)\n\ndef load_localizable_dump_cvs():\n\tprint(\"\\n注意事项:\\n1.请将Localizable.strings放置于当前脚本所在目录下\\n2.请确认Locallizable.string文件中键值对的形式均为 \\\"key\\\" = \\\"value\\\";的形式 以英文双引号开头\\n,且键值对中不出现'='3.每行只有一个键值对\\n\\n\\n\\n\")\n\tprint(\"开始读取文件\")\n\tlzs = None\n\ttry:\n\t\tlzs = file('Localizable.strings')\n\texcept Exception as e:\n\t\tprint('找不到Localizable.string文件')\n\t\texit(0)\n\n\tprint(\"加载文件成功\")\n\tfilter_file(lzs)\n\ndef dump_localizable_for_ios():\n\tprint(\"\\n注意事项:\\n1.请将language.cvs文件置于脚本所在目录下\\n\")\n\tprint(\"开始读取文件\")\n\t# csvfile = file('language.csv', 'rb')\n\t# reader = csv.reader(csvfile)\n\tdata = xlrd.open_workbook(\"language.xls\")\n\ttable = data.sheets()[0]\n\treader = table.nrows\n\tfor i in range(len(register_language)-4):\n\t\twith open(register_language[4+i]+'_Localizable.strings','w') as f:\n\n\t\t\tfor j in range(1,reader):\n\t\t\t\tcell_key = table.cell(j,0).value\n\t\t\t\tcell_value = table.cell(j,4+i).value\n\t\t\t\tf.write(\"\\\"%s\\\" = \\\"%s\\\";\\n\"%(cell_key,cell_value))\n\t\t\tf.close()\n\t\t\tprint(register_language[4+i]+'_Localizable.strings')\ndef correct_sth():\n\t#1 加载\n\tdata = xlrd.open_workbook('android.xls')\n\ttable = data.sheets()[0]\n\tdic = {}\n\tfor i in range(table.nrows):\n\t\tcell_text = table.cell(i,5).value\n\t\trr = dic.get(cell_text)\n\n\t\tif rr == None:\n\t\t\tdic[cell_text] = table.cell(i,8).value\n\t\telse:\n\t\t\tprint(\"重复\" + rr)\n\tprint(\"共有:\"+str(len(dic)))\n\n\tdata2 = xlrd.open_workbook('language.xls')\n\ttable2 = data2.sheets()[0]\n\twb = xlutils.copy.copy(data2)\n\tws = wb.get_sheet(0)\n\tfor i in range(table2.nrows):\n\t\tcell_text = table2.cell(i,4).value\n\t\tk = dic.get(cell_text)\n\t\tprint(k)\n\t\tif k!=None:\n\t\t\tws.write(i, 7, k)\n\twb.save('language.xls')\n\n\ndef filter_file(language_file):\n\tfile_lines = language_file.readlines()\n\ttrue_lines = []\n\tfor line in file_lines:\n\t\tresult = filter_line(line)\n\t\tif result[0] == 0:\n\t\t\tpass\n\t\t\t#完全错误格式\n\t\telif result[0] == 1:\n\t\t\tpass\n\t\t\t#类正确格式错误\n\t\telif result[0] == 2:\n\t\t\ttrue_line = result[1]\n\t\t\ttrue_lines.append(true_line)\n\t\t\t#类正确格式正确\n\t\telif result[0] == 3:\n\t\t\texit(0)\n\tif len(true_lines)==0:\n\t\tprint(\"加载文件失败,可用行为空\")\n\t\texit(0)\n\telse:\n\n\t\tcsv_file = file('language.csv','wb')\n\t\t\n\t\tcsv_file.write(codecs.BOM_UTF8)\n\t\twriter = csv.writer(csv_file)\n\t\twriter.writerow(register_language)\n\t\tfor item in true_lines:\n\t\t\twriter.writerow(item)\n\t\tcsv_file.close()\n\t\tprint(\"写入完成,共有%d个键值对生成\"%(len(true_lines)))\t\n\n\ndef filter_line(line):\n\tclip = line.split('=')\n\tis_kv_line = False\n\tfor char in clip[0]:\n\t\tif char == ' ':\n\t\t\tcontinue\n\t\telif char == '\\\"':\n\t\t\tis_kv_line = True\n\t\t\tbreak\n\t\telse:\n\t\t\tis_kv_line = False\n\t\t\treturn (0,None)\n\tif len(clip) == 2:\n\t\tkey = get_info_inside_kv(clip[0])\n\t\tvalue = get_info_inside_kv(clip[1])\n\t\treturn (2,[key,\"\",\"\",\"\",value,\"\" ,\"\",\"\",\"\"])\n\n\telse:\n\t\tprint(\"键值内对中不该出现 '=' \")\ndef get_info_inside_kv(kv):\n\tstart = 0\n\tend = 0\n\n\tfor i in range(len(kv)):\n\t\tchar = kv[0+i:1+i]\n\t\tif char == ' ' or char == '\\n':\n\t\t\tcontinue\n\t\telif char == '\\\"':\n\t\t\tstart = i\n\t\t\tbreak\n\tfor i in range(len(kv)):\n\t\tlength = len(kv)\n\t\tchar = kv[length-1-i:length-i]\n\n\t\tif char == ' ' or char == '\\n' or char == ';':\n\t\t\tcontinue\n\t\telif char == '\\\"':\n\t\t\tend = len(kv) - i\n\t\t\tbreak\n\treturn kv[start+1:end-1]\n\nif __name__ == '__main__':\n\tmain()","repo_name":"naxiemolv/WYPy-internationalization","sub_path":"genstring.py","file_name":"genstring.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40793795607","text":"\"\"\" Display messages in terminal \"\"\"\n\nimport numpy as np\nfrom ..config import config\n\ndef disp_best_sim_params(sim_num, run_info):\n \"\"\"\n Show info. from the simulation with max. value of the objective function\n \"\"\"\n pos = np.zeros(sim_num)\n val = np.zeros(sim_num)\n \n for n in range(1,sim_num+1):\n pos[n-1] = n\n val[n-1] = run_info['run'+str(n)]['opt_fval']\n \n max_val_ind = np.argmax(val)\n\n print(f\"Max. value ( {val[max_val_ind]} ) found in Run Number : {max_val_ind+1}\")\n\n #print the values of parameters\n for v in config.var_names:\n print(f\"{v} : {run_info['run'+str(max_val_ind+1)][v]} \")\n","repo_name":"TAUSystems/TauOpt","sub_path":"src/util/disp_message.py","file_name":"disp_message.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74760778825","text":"import requests\nimport random\nimport os\nfrom lxml import etree\nfrom multiprocessing.dummy import Pool\n\n# 需求:爬取梨视频的视频数据\nheaders = {\n 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\"\n}\n\nif not os.path.exists('./pearVideo'):\n os.mkdir('./pearVideo')\n\n# 原则:线程池处理的是阻塞且耗时的操作\n\n# 对下述url发起请求解析出视频详情页的url和视频名称\nurl = \"https://www.pearvideo.com/category_5\"\npage_text = requests.get(url=url, headers=headers).text\n\ntree = etree.HTML(page_text)\nli_list = tree.xpath('//ul[@id=\"listvideoListUl\"]/li')\nurls = []\n\nfor li in li_list:\n detail_url = \"https://www.pearvideo.com/\" + li.xpath('./div/a/@href')[0]\n name = li.xpath('./div/a/div[2]/text()')[0] + '.mp4'\n video_id = detail_url.split('/')[-1].split('_')[-1]\n\n # 对详情页的url发起请求\n detail_page_text = requests.get(url=detail_url, headers=headers).text\n\n # 从详情页中解析出视频的地址(url)\n o_headers = {\n 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\",\n 'Referer': detail_url\n }\n get_video_url = \"https://www.pearvideo.com/videoStatus.jsp\"\n #\n params = {\n 'contId': video_id,\n 'mrd': random.random()\n }\n\n video_details = requests.get(url=get_video_url, params=params, headers=o_headers).json()\n video_url = video_details['videoInfo']['videos']['srcUrl']\n\n head_url = video_url.rsplit('/', 1)[0] + '/cont-'\n ass_url = video_id + '-' + video_url.rsplit('/', 1)[1].split('-', 1)[1]\n video_real_url = head_url + ass_url\n\n # print(video_real_url)\n dic = {\n 'name': name,\n 'url': video_real_url\n }\n\n urls.append(dic)\n\n\ndef get_video_data(dic):\n p_url = dic['url']\n print(dic['name'], '正在下载...')\n data = requests.get(url=p_url, headers=headers).content\n video_path = './pearVideo/' + dic['name']\n # 持久化存储操作\n with open(video_path, 'wb') as fp:\n fp.write(data)\n print(dic['name'], '下载成功')\n\n\n# 使用线程池对视频数据进行请求(较为耗时的阻塞操作)\npool = Pool(4)\npool.map(get_video_data, urls)\n\npool.close()\npool.join()\n","repo_name":"gotha331/python-learning-more","sub_path":"19_爬虫-高性能异步爬虫/dm_03_线程池在爬虫案例中的应用.py","file_name":"dm_03_线程池在爬虫案例中的应用.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27517408204","text":"import logging\n\nimport pandas as pd\n\nfrom stock.models import IncomeStatement\nfrom stock.models import MyStock\nfrom yahooquery import Ticker\n\nlogger = logging.getLogger(\"stock\")\n\nM = 10 ** 6\nB = 10 ** 9\n\n\nclass MyIncomeStatement:\n def __init__(self, symbol):\n self.stock = MyStock.objects.get(symbol=symbol)\n\n def get(self):\n s = Ticker(self.stock.symbol, timeout=15)\n\n # all numbers convert to million\n df = s.income_statement(frequency=\"q\")\n if \"unavailable\" in df or \"error\" in df:\n logger.error(\"{}: {}\".format(self.stock.symbol, df))\n return\n\n # DB doesn't like NaN\n df = df.where(pd.notnull(df), 0)\n\n mapping = {\n \"basic_eps\": \"BasicEPS\",\n \"ebit\": \"EBIT\",\n \"net_income\": \"NetIncome\",\n \"normalized_ebitda\": \"NormalizedEBITDA\",\n \"operating_expense\": \"OperatingExpense\",\n \"operating_income\": \"OperatingIncome\",\n \"operating_revenue\": \"OperatingRevenue\",\n \"pretax_income\": \"PretaxIncome\",\n \"selling_general_and_administration\": \"SellingGeneralAndAdministration\",\n \"total_expenses\": \"TotalExpenses\",\n \"total_revenue\": \"TotalRevenue\",\n \"tax_rate\": \"TaxRateForCalcs\",\n \"gross_profit\": \"GrossProfit\",\n \"general_and_administrative_expense\": \"GeneralAndAdministrativeExpense\",\n \"research_and_development\": \"ResearchAndDevelopment\",\n \"selling_and_marketing_expense\": \"SellingAndMarketingExpense\",\n \"total_operating_income_as_reported\": \"TotalOperatingIncomeAsReported\",\n \"reconciled_cost_of_revenue\": \"ReconciledCostOfRevenue\",\n \"cost_of_revenue\": \"CostOfRevenue\",\n \"interest_expense_non_operating\": \"InterestExpenseNonOperating\",\n \"interest_income_non_operating\": \"InterestIncomeNonOperating\",\n \"other_income_expense\": \"OtherIncomeExpense\",\n \"other_non_operating_income_expenses\": \"OtherNonOperatingIncomeExpenses\",\n \"tax_provision\": \"TaxProvision\",\n \"net_income_common_stockholders\": \"NetIncomeCommonStockholders\",\n \"net_income_from_continuing_and_discontinued_operation\": \"NetIncomeFromContinuingAndDiscontinuedOperation\",\n \"interest_income\": \"InterestIncome\",\n \"interest_expense\": \"InterestExpense\",\n \"net_interest_income\": \"NetInterestIncome\",\n \"ebitda\": \"EBITDA\",\n \"reconciled_depreciation\": \"ReconciledDepreciation\",\n \"net_income_from_continuing_operation_net_minority_interest\": \"NetIncomeFromContinuingOperationNetMinorityInterest\",\n }\n # enumerate data frame\n for row in df.itertuples(index=False):\n i, created = IncomeStatement.objects.get_or_create(\n stock=self.stock, on=row.asOfDate.date()\n )\n\n for key, val in mapping.items():\n try:\n tmp = float(getattr(row, val))\n except AttributeError:\n tmp = 0\n\n # if tmp is a large number, it's unlikely a rate,\n # eg. tax rate, thus convert it to B.\n if abs(tmp) > M:\n tmp = tmp / B\n\n # set value\n setattr(i, key, tmp)\n\n i.save()\n","repo_name":"fengxia41103/stock","sub_path":"backend/stock/workers/get_income_statement.py","file_name":"get_income_statement.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27347478300","text":"from spacy.matcher import Matcher, PhraseMatcher, DependencyMatcher\nfrom kg_detective.lib import merge\n\ndef search_out(doc, nlp):\n \"\"\"Search for \n\n Args:\n doc (spacy.tokens.Doc): doc to be analyzed\n nlp (spacy.language.Language): context language\n\n Returns:\n list: list of spacy.tokens.Span\n \"\"\"\n result = []\n\n dep_matcher = DependencyMatcher(nlp.vocab)\n dep_patterns = [\n [\n {\n \"RIGHT_ID\": \"adj\",\n \"RIGHT_ATTRS\": {\"TAG\": {\"IN\": [\"ADJA\"]}, \"MORPH\": {\"IS_SUPERSET\": [\"Degree=Sup\"]}}\n },\n ],\n [\n {\n \"RIGHT_ID\": \"adj\",\n \"RIGHT_ATTRS\": {\"TAG\": {\"IN\": [\"ADJD\"]}, \"MORPH\": {\"IS_SUPERSET\": [\"Degree=Sup\"]}}\n },\n {\n \"LEFT_ID\": \"adj\",\n \"REL_OP\": \">\",\n \"RIGHT_ID\": \"am\",\n \"RIGHT_ATTRS\": {\"DEP\": \"pm\", \"POS\": \"PART\", \"TAG\": \"PTKA\", \"LOWER\": \"am\"}\n },\n ],\n ]\n dep_matcher.add(\"adj_superlativ\", dep_patterns)\n matches = dep_matcher(doc)\n\n for _, ids in matches:\n span_ids = ids \n \n sorted_span_ids = sorted(span_ids)\n span_text = \" \".join([doc[e].text for e in sorted_span_ids])\n result.append({\"text\": span_text})\n\n\n return result\n \n","repo_name":"qishe-nlp/kg-detective","sub_path":"kg_detective/de/rules/adj_superlativ.py","file_name":"adj_superlativ.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16598969891","text":"a = \"Incrível\"\nprint(\"Python é \" + a)\n\nnome = \"Robson \"\nsobreNome = \"Bento\"\nnomeCompleto = nome + sobreNome\n\nprint(nomeCompleto)\n\nx = 5\ny = 10\n\nprint(x + y)\n\nnum = 20\ntexto = \" Carros\"\n\nprint(str(num) + texto)","repo_name":"robsonjava/Python-projects-for-portfolio","sub_path":"Noções Basicas/valoresDeSaida.py","file_name":"valoresDeSaida.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11056773652","text":"from occultence import *\nimport glob\nimport pickle as pkl\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('i')\n args = parser.parse_args()\n\n clean_kw = {'dust_removal': False, 'bad_weather_removal': False, 'cosmics_removal': True, 'cosmic_boxsize': 0.08,\n 'cosmic_nsigma': 3,\n 'threshold_removal': True, 'thresholds': {'airmass': 2.0, 'fwhm': 6, 'bkg': 1000},\n 'threshold_operators': {\"airmass\": \">\", \"fwhm\": \">\", \"bkg\": \">\"}}\n gp_kw = {'do_first_sigma_clip': True, 'do_second_sigma_clip': True,\n 'running_mean_boxsize': 0.08, 'nsigma': 3, 'plot': False}\n bls_kw = {\"minimum_period\": 0.5, \"maximum_period\": 10,\n 'transit_durations': np.linspace(0.01, 0.1, 10), 'plot': False}\n bls_bin = 7.5 * u.minute\n recovery_kw = {'condition_on_epoch': 1 * u.hour}\n gp_bin = 20 * u.minute\n plot = False\n verbose = False\n\n dirname = \"/Users/catrionamurray/Library/CloudStorage/OneDrive-UCB-O365/SPECULOOS/Sp2049+3336/injection_recovery/3000_injected_planets\"\n self = pickle.load(open(f\"{dirname}/lc_without_planet.pkl\", 'rb'))\n lcs_with_transits = pickle.load(open(f\"{dirname}/lcs_injected_planets.pkl\", 'rb'))\n planets = pd.read_csv(f\"{dirname}/injected_planets_df.csv\")\n\n clean_lcs, gp_lcs, bls_lcs = [], [], []\n\n for i in range((int(args.i)-1)*300, int(args.i)*300):\n print(f\"{i + 1}/{int(args.i) * 300}...\")\n clean_targ, gp_targ, bls_targ, planets = self.single_injection_recovery(lcs_with_transits[i],\n planets,\n i,\n clean_kw,\n gp_bin,\n gp_kw,\n bls_kw,\n bls_bin,\n recovery_kw,\n plot,\n verbose)\n clean_lcs.append(clean_targ)\n gp_lcs.append(gp_targ)\n bls_lcs.append(bls_targ)\n\n planets[(int(args.i)-1)*300:int(args.i)*300].to_csv(f\"{dirname}/injected_planets_df_{int(args.i)}.csv\", index=False)\n pickle.dump(clean_lcs, open(f\"{dirname}/lcs_clean_{int(args.i)}.pkl\", 'wb'))\n pickle.dump(gp_lcs, open(f\"{dirname}/lcs_gp_{int(args.i)}.pkl\", 'wb'))\n pickle.dump(bls_lcs, open(f\"{dirname}/lc_bls_{int(args.i)}.pkl\", 'wb'))","repo_name":"catrionamurray/occultence","sub_path":"occultence/run_inj_rec.py","file_name":"run_inj_rec.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19418130327","text":"import numpy as np\nimport torch\nfrom . import filters\nfrom . import padding\n\nclass Scattering:\n def __init__(self, shape, J, L, m, padding_mode='none', precision='double'):\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if len(shape) != 2:\n raise ValueError(f'\\'shape\\' must have length 2, not {len(shape)}')\n if not isinstance(shape[0], int) or not isinstance(shape[1], int):\n raise ValueError('\\'shape\\' must have integer elements')\n self.shape = np.array(shape, dtype=int)\n\n if J != int(J):\n raise ValueError('\\'J\\' must be an integer')\n self.J = int(J)\n if self.J < 1:\n raise ValueError('\\'J\\' must be greater or equal to 1')\n if 2**self.J > min(shape[0], shape[1]):\n raise ValueError('The smallest dimension in \\'shape\\' must be larger or equal than 2^J')\n \n if not isinstance(L, int):\n raise ValueError('\\'L\\' must be an integer')\n self.L = int(L)\n if self.L < 1:\n raise ValueError('\\'L\\' must be greater or equal to 1')\n\n self.padding = padding_mode\n self.padded_shape = padding.padded_shape(self.padding, self.shape, self.J)\n\n if m not in [0, 1, 2]:\n raise ValueError(f'\\'m\\' must be 0 <= m <= 2, not {m}')\n self.m = m\n\n if precision not in {'single', 'double'}:\n raise ValueError(f'\\'precision\\' must be either \\'single\\' or \\'double\\', not {precision}')\n if precision == 'single':\n self.dtype = torch.complex64\n elif precision == 'double':\n self.dtype = torch.complex128\n\n self.filters = filters.filters(self.padded_shape, self.J, self.L, self.dtype, self.device)\n \n\n def filters(self):\n return self.filters\n \n from ._coefficients import coefficients\n from ._synthesis import synthesis","repo_name":"mariaprat/scattering","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18763467638","text":"from simple_emulator import CongestionControl\nfrom simple_emulator import BlockSelection\nfrom simple_emulator import constant\nfrom simple_emulator import objects\nfrom objects.sender import Sender\nfrom objects.packet import Packet\nimport numpy as np;\nimport math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n#from random_process import OrnsteinUhlenbeckProcess\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nEVENT_TYPE_FINISHED='F'\nEVENT_TYPE_DROP='D'\nEVENT_TYPE_TEMP='T'\n\n# Superparameters\nimport numpy as np\nimport random\nEPISODE = 20\n######################################## DDPG ################################\n\nMEMORY_CAPACITY = 3000\n\ndef fanin_init(size, fanin=None):\n fanin = fanin or size[0]\n v = 1. / np.sqrt(fanin)\n return torch.Tensor(size).uniform_(-v, v)\n\nclass Actor(nn.Module):\n def __init__(self, nb_states, nb_actions, hidden1=40, hidden2=30, init_w=3e-3):\n super(Actor, self).__init__()\n self.fc1 = nn.Linear(nb_states, hidden1)\n self.fc2 = nn.Linear(hidden1, hidden2)\n self.fc3 = nn.Linear(hidden2, 3)\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n self.init_weights(init_w)\n \n def init_weights(self, init_w):\n self.fc1.weight.data = fanin_init(self.fc1.weight.data.size())\n self.fc2.weight.data = fanin_init(self.fc2.weight.data.size())\n self.fc3.weight.data.uniform_(-init_w, init_w)\n \n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.relu(out)\n out = F.softmax(self.fc3(out))\n #out = self.tanh(out)\n #ut = F.log_softmax(out,dim=1)\n return out\n\n\nclass Critic(nn.Module):\n#class ValueNetwork(nn.Module):\n def __init__(self, num_inputs, num_actions, hidden_size=30 ,init_w = 3e-3):\n super(Critic, self).__init__()\n\n self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, 1)\n\n self.linear3.weight.data.uniform_(-init_w,init_w)\n self.linear3.bias.data.uniform_(-init_w,init_w)\n\n def forward(self, state, action):\n x = torch.cat([state, action], 1)\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n \n\n return x\n #以上AC经典模式\n\nclass Agent(object):\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n s_dim = 2 #状态维度2\n a_dim = 3 #动作维度3\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.actor = Actor(s_dim, a_dim) #actor网络\n self.actor_target = Actor(s_dim, a_dim) #actor目标网络\n self.critic = Critic(s_dim, a_dim) #critic网络\n self.critic_target = Critic(s_dim, a_dim) #critic目标网络\n self.actor_optim = optim.Adam(self.actor.parameters(), lr = self.actor_lr) #优化器,待优化参数,学习率\n self.critic_optim = optim.Adam(self.critic.parameters(), lr = self.critic_lr)\n self.buffer = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)\n self.pointer = 0\n self.actor_target.load_state_dict(self.actor.state_dict())\n self.critic_target.load_state_dict(self.critic.state_dict())\n \n def policy_action(self, s0):\n s0 = torch.tensor(s0, dtype=torch.float).reshape((2,))\n a0 = self.actor(s0).detach().numpy() #+ np.random.uniform(-0.05,0.05,1)\n \n return a0\n \n def random_action(self):\n action = np.random.randint(0,3)\n return action\n \n \n def put(self, s0,a0,r1,s1): \n transition = np.hstack((np.array(s0).reshape(-1),np.array(a0).reshape(-1),np.array(r1).reshape(-1),np.array(s1).reshape(-1)))\n index = self.pointer % MEMORY_CAPACITY\n self.buffer[index, :] = transition\n self.pointer += 1\n \n def replay(self): #replay存储\n indices = np.random.choice(MEMORY_CAPACITY, size=self.batch_size)\n batch = self.buffer[indices,:]\n s0_batch = torch.FloatTensor(batch[:, :self.s_dim])\n a0_batch = torch.FloatTensor(batch[:,self.s_dim: self.s_dim + self.a_dim])\n r_batch = torch.FloatTensor(batch[:, -self.s_dim - 1: -self.s_dim])\n s1_batch = torch.FloatTensor(batch[:, -self.s_dim:])\n return s0_batch,a0_batch,r_batch,s1_batch\n \n def learn(self): #critic和actor的learn\n if self.pointer= block.block_info[\"Deadline\"]:\n return False\n if (cur_time - best_block_create_time) >= best_block.block_info[\"Deadline\"]:\n return True\n if best_block_create_time != cur_block_create_time:\n return best_block_create_time > cur_block_create_time\n return (cur_time - best_block_create_time) * best_block.block_info[\"Deadline\"] > \\\n (cur_time - cur_block_create_time) * block.block_info[\"Deadline\"]\n\n best_block_idx = -1\n best_block= None\n for idx, item in enumerate(block_queue):\n if best_block is None or is_better(item) :\n best_block_idx = idx\n best_block = item\n\n return best_block_idx\n\n def on_packet_sent(self, cur_time):\n \"\"\"\n The part of solution to update the states of the algorithm when sender need to send pacekt.\n \"\"\"\n return super().on_packet_sent(cur_time)\n\n def cc_trigger(self, cur_time, event_info):\n \"\"\"\n The part of algorithm to make congestion control, which will be call when sender get an event about acknowledge or lost from reciever.\n See more at https://github.com/AItransCompetition/simple_emulator/tree/master#congestion_control_algorithmpy.\n \"\"\"\n # estimate the bandwidth\n super().estimate_bandwidth(cur_time, event_info)\n \n # set cwnd or sending rate in sender\n return {\n \"cwnd\" : self.cwnd,\n \"send_rate\" : self.send_rate,\n }","repo_name":"WMlab2021/code","sub_path":"毕设代码/DDPG+torch+自己写出来训练的/solution+torch+question.py","file_name":"solution+torch+question.py","file_ext":"py","file_size_in_byte":11354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74358382346","text":"# https://leetcode.com/problems/unique-paths/\n\nclass Solution:\n def __init__(self):\n self.cache = {}\n self.cache['1_1'] = 1\n\n def uniquePaths(self, m: int, n: int) -> int:\n k = str(m)+'_'+str(n)\n\n if k in self.cache:\n return self.cache[k]\n\n from_top = 0 if m - 1 < 0 else self.uniquePaths(m - 1, n)\n from_left = 0 if n - 1 < 0 else self.uniquePaths(m, n - 1)\n result = from_top + from_left\n\n self.cache[k] = result\n\n return self.cache[k]\n\n\n\n\ns = Solution()\nresult = s.uniquePaths(3,3)\n\nprint(result)","repo_name":"SergeySatunin/leetcode","sub_path":"dynamic_programming/unique_paths.py","file_name":"unique_paths.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73183829704","text":"import requests\nfrom PIL import Image\nimport base64\nfrom io import BytesIO\nimport csv\nimport json\nImage.MAX_IMAGE_PIXELS = None\n\ndef polys(row):\n\traw = row.split(\",\")\n\traw[0] = raw[0].strip(raw[0][:raw[0].find('(')] + '(((')\n\traw[len(raw)-1] = raw[len(raw)-1].strip(')))')\n\txPoints = []\n\tyPoints = []\n\tfor i in range(0, len(raw)):\n\t xPoints.append(float(raw[i].split(\" \")[0]))\n\t yPoints.append(float(raw[i].split(\" \")[1]))\n\n\treturn (xPoints, yPoints)\n\n#URL = 'http://192.168.1.65:8082'\nURL = 'http://127.0.0.1:5000'\nannotations = ['../ODYSSEY/Images/LRM/PNPG.csv', '../ODYSSEY/Images/LRM/Arcos.csv']\n#DTM = 'Arcos.tif'\npath = ['../ODYSSEY/Images/LRM/PNPG.tif', '../ODYSSEY/Images/LRM/Arcos.tif']#, '../ODYSSEY/Images/LRM/Viana.tif', '../ODYSSEY/Images/LRM/Coura.tif']\n# bbox_converted = [xmin, ymin, xmax, ymax]\n#coords = (-61809,236336,-2247, 245758)\ncoords = (0,258000,6000, 266000)\n\n#image1 = base64.b64encode(open(LRM,'rb').read()).decode('ascii')\n#image2 = base64.b64encode(open(DTM,'rb').read()).decode('ascii')\n#images = {}\n#images['LRM'] = image1\n#images['DTM'] = image2\n\ndata = {}\nclasses = ['mamoa', 'outro']\n\nx = 0\nfor annotation in annotations:\n\twith open(annotation) as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\t# This considers that polygons are under the column name \"WKT\" and labels are under the column name \"Id\"\n\t\tpolygons = \"MULTIPOLYGON (((\"\n\t\tcount = 0\n\t\tfor row in reader:\n\t\t\txPoints, yPoints = polys(row['WKT'])\n\n\t\t\tif count != 0:\n\t\t\t\tpolygons += ', (('\n\n\t\t\tfor i in range(len(xPoints)):\n\t\t\t\tif i != len(xPoints)-1:\n\t\t\t\t\tpolygons += str(xPoints[i]) + \" \" + str(yPoints[i]) + ','\n\t\t\t\telse:\n\t\t\t\t\tpolygons += str(xPoints[i]) + \" \" + str(yPoints[i]) + '))'\n\n\t\t\tcount += 1\n\t\tpolygons += ')'\n\n\tdata[classes[x]] = polygons\n\tx+=1\n\tprint(count)\n\npurpose = 'training' # training/inference\n\n\n\ntypes = ['LRM']\nmultipleFiles = [('annotations', json.dumps(data)), ('geotiff', json.dumps(path)), ('coords', json.dumps(coords)), ('purpose', purpose)]\n\nreceived = requests.post(URL, data=multipleFiles)\n\nprint(received.text)","repo_name":"ddcanedo/odyssey_ws","sub_path":"testWs.py","file_name":"testWs.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10155128697","text":"import argparse\nimport feedparser\nimport requests\nimport logging\nfrom .cli_package.library import ping_mobile_app, cve_feed, app_version, health_check, send_message, docker_sdk\n# Allow request for self signed https certificates\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport os\n\nprint(os.listdir())\n\n# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\ndef cli():\n # Initialize the parser\n parser = argparse.ArgumentParser(description='Gnakrydev-cli app')\n # Create a subparser\n subparser = parser.add_subparsers(dest='command')\n # Create a command\n version = subparser.add_parser(\n 'version', help=\"Show the app version\")\n health = subparser.add_parser(\n 'health', help=\"Perform a health-check of the endpoints listed on the YML file\")\n message = subparser.add_parser(\n 'message', help=\"Send a notification to the Gnakrydev Mobile-APP\")\n cve = subparser.add_parser('cve', help=\"List CERT-FR rss recents vulns\")\n ping = subparser.add_parser(\n 'ping', help=\"Send a ping message to the Gnakrydev Mobile-APP\")\n\n docker = subparser.add_parser(\n 'docker', help=\"Docker command SDK\")\n\n # Create arguments for the command\n\n # GnakryDev YML config file\n health.add_argument('--config', type=str,\n required=True, metavar=\"\", help=\"YAML config file path\")\n\n # ApiKey available on the mobile-app\n health.add_argument('--apikey', type=str, required=True, metavar=\"\",\n help=\"apiKey available on the mobile-app\")\n # ApiKey available on the mobile-app\n ping.add_argument('--apikey', type=str, required=True, metavar=\"\",\n help=\"apiKey available on the mobile-app\")\n #\n health.add_argument('--verbose', action='store_true',\n help=\"Show request details in stdout\")\n #\n message.add_argument('--apikey', type=str, required=True, metavar=\"\",\n help=\"apiKey available on the mobile-app\")\n #\n message.add_argument('--id', type=str, metavar=\"\",\n help=\"Message ID, Default= ramdom uuid\")\n #\n message.add_argument('--title', type=str, metavar=\"\",\n required=True, help=\"Message title\")\n\n # Message content\n message.add_argument('--content', type=str, metavar=\"\",\n required=True, help=\"Message content\")\n\n # Message type\n message.add_argument('--type', type=str, metavar=\"\",\n help=\"Message type: info, warning, success, error. Default= info\")\n\n # ApiKey available on the mobile-app\n docker.add_argument('--apikey', type=str, required=True, metavar=\"\",\n help=\"apiKey available on the mobile-app\")\n\n docker.add_argument('--c_status', action='store_true',\n help=\"Show and send containers status\")\n docker.add_argument('--info', action='store_true',\n help=\"docker host infos\")\n docker.add_argument('--compose_scan', action='store_true',\n help=\"docker-compose scan\")\n docker.add_argument('--gen_dockerfile', action='store_true',\n help=\"docker-compose scan\")\n\n \n\n # Load all arguments from the CLI\n args = parser.parse_args()\n\n # Check\n if args.command == 'version':\n app_version()\n elif args.command == 'health':\n health_check(args)\n elif args.command == 'message':\n send_message(args)\n elif args.command == 'cve':\n cve_feed()\n elif args.command == 'ping':\n ping_mobile_app(args)\n elif args.command == 'docker':\n docker_sdk(args)\n","repo_name":"GnakryDev/gnakrydev-cli","sub_path":"src/gnakrydev/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13526763932","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .DBHelper import DBHelper\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\ndef ReportListAllInvoices(request):\n db = DBHelper()\n data, columns = db.fetch ('SELECT i.invoice_no as \"Invoice No\", i.invoice_date as \"Date\" '\n ' , i.customer_code as \"Customer Code\", c.customer_name as \"Customer Name\" '\n ' , i.due_date as \"Due Date\", i.total as \"Total\", i.vat as \"VAT\", i.amount_due as \"Amount Due\" '\n ' , ili.product_code as \"Product Code\", p.name as \"Product Name\" '\n ' , ili.quantity as \"Quantity\", ili.unit_price as \"Unit Price\", ili.product_total as \"Extended Price\" '\n ' FROM invoice i JOIN customer c ON i.customer_code = c.customer_code '\n ' JOIN invoice_line_item ili ON i.invoice_no = ili.invoice_no '\n ' JOIN product p ON ili.product_code = p.code '\n ' ')\n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n return render(request, 'report_list_all_invoices.html', data_report)\n\ndef ReportProductsSold(request):\n db = DBHelper()\n data, columns = db.fetch ('SELECT ili.product_code as \"Product Code\", p.name as \"Product Name\" '\n ' , SUM(ili.quantity) as \"Total Quantity Sold\", SUM(ili.product_total) as \"Total Value Sold\" '\n \n ' FROM invoice i JOIN invoice_line_item ili ON i.invoice_no = ili.invoice_no '\n ' JOIN product p ON ili.product_code = p.code '\n ' GROUP BY p.code, ili.product_code, p.name ')\n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n return render(request, 'report_products_sold.html', data_report)\n\ndef ReportListAllProducts(request):\n db = DBHelper()\n data, columns = db.fetch ('SELECT code as \"Code\", name as \"Name\", units as \"Units\" FROM product ')\n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n return render(request, 'report_list_all_products.html', data_report)\n\ndef CursorToDict(data,columns):\n result = []\n fieldnames = [name.replace(\" \", \"_\").lower() for name in columns]\n for row in data:\n rowset = []\n for field in zip(fieldnames, row):\n rowset.append(field)\n result.append(dict(rowset))\n return result\n\ndef ReportListAllReceipts(request):\n db = DBHelper()\n data, columns = db.fetch ('SELECT r.receipt_no as \"Receipt No\", r.receipt_date as \"Date\" '\n ' , r.customer_code as \"Customer Code\", c.customer_name as \"Customer Name\" '\n ' , pp.payment_method as \"Payment Name\",r.payment_reference as \"Payment Reference\"'\n ' , r.remarks as \"Remarks\", r.total_receipt as \"Total Received\" '\n ' , rli.invoice_no as \"Invoice No\",i.invoice_date as \"Invoice Date\"'\n ' ,i.amount_due as \"Invoice Full Amount\",(i.amount_due - rli.amount_paid_here) as \"Invoice Amount Remain\" '\n ' ,rli.amount_paid_here as \"Amount Paid Here\"'\n\n ' FROM receipt r JOIN customer c ON r.customer_code = c.customer_code '\n ' JOIN receipt_line_item rli ON r.receipt_no = rli.receipt_no '\n ' JOIN payment_method pp ON r.payment_method = pp.payment_method'\n ' JOIN invoice i ON i.invoice_no = rli.invoice_no')\n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n return render(request, 'report_list_all_receipts.html', data_report)\n\n\ndef ReportUnpaidInvoices(request):\n db = DBHelper()\n data, columns = db.fetch ('select i.invoice_no as \"Invoice Number\" , i.invoice_date as \"Date\" , i.customer_code as \"Customer Code\"'\n ', c.customer_name as \"Customer Name\" , i.amount_due as \"Amount Due\" '\n ', sum(rli.amount_paid_here) as \"Amount Paid Here\" '\n ', (i.amount_due - sum(rli.amount_paid_here)) as \"Amount Unpaid\" '\n\n 'FROM receipt r JOIN receipt_line_item rli ON rli.receipt_no = r.receipt_no '\n 'JOIN invoice i ON i.invoice_no = rli.invoice_no '\n 'JOIN customer c ON c.customer_code = i.customer_code '\n 'Group by i.invoice_no, c.customer_name , i.amount_due; ')\n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n data2, columns2 = db.fetch (' select count(\"Invoice Amount Not Paid\") as \"Number of Invoices Not Paid\", sum(\"Invoice Amount Not Paid\") as \"Total Invoice Amount Not Paid\" '\n ', sum(\"Amount Paid Here\") as \"Total Invoice Amount Received\" '\n\n ' from (SELECT rli.\"invoice_no\" as \"Invoice No\", i.invoice_date as \"Date\", c.customer_name as \"Customer Name\" , '\n ' i.amount_due as \"Amount Received\", SUM(rli.amount_paid_here) as \"Amount Paid Here\", '\n ' (i.amount_due - sum(rli.amount_paid_here)) as \"Invoice Amount Not Paid\" '\n ' FROM receipt r JOIN receipt_line_item rli ON r.\"receipt_no\" = rli.\"receipt_no\" '\n ' JOIN invoice i ON i.\"invoice_no\" = rli.\"invoice_no\" '\n ' JOIN customer c ON c.\"customer_code\" = i.\"customer_code\" '\n ' GROUP BY rli.\"invoice_no\" ,i.\"invoice_date\", c.\"customer_name\",i.\"amount_due\") as total_un_re; ')\n\n data_report['data2'] = CursorToDict (data2,columns2)\n data_report['column_name2'] = columns2\n\n\n return render(request, 'report_unpaid_invoices.html', data_report)\n\n\ndef ReportGroupBy(request):\n db = DBHelper()\n data, columns = db.fetch ('SELECT ili.product_code as \"Product Code\", p.name as \"Product Name\" '\n ', SUM(ili.quantity) as \"Total Quantity Sold\", SUM(ili.product_total) as \"Total Value Sold\" '\n ',c.customer_code as \"Customer Code\" '\n ',p.units as \"Units\" '\n\n 'FROM invoice i JOIN invoice_line_item ili ON i.invoice_no = ili.invoice_no '\n 'JOIN product p ON ili.product_code = p.code '\n ' JOIN customer c ON c.customer_code = i.customer_code '\n ' WHERE i.invoice_date between \\'' + '2021-01-01' + '\\' and \\'' + '2021-01-31' + '\\' '\n 'GROUP BY p.code,c.customer_code, ili.product_code, p.name ')\n \n data_report = dict()\n data_report['data'] = CursorToDict (data,columns)\n data_report['column_name'] = columns\n\n return render(request, 'report_group_by.html', data_report)\n\n\n","repo_name":"yuii88/231_lab4","sub_path":"report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24776451705","text":"tree = {\r\n '/': {\r\n 'a': {\r\n 'e': {\r\n 'i': 584\r\n },\r\n 'f': 29116,\r\n 'g': 2557,\r\n 'h.lst': 62596\r\n },\r\n 'b.txt': 14848514,\r\n 'c.dat': 8504156,\r\n 'd': {\r\n 'j': 4060174,\r\n 'd.log': 8033020,\r\n 'd.ext': 5626152,\r\n 'k': 7214296\r\n }\r\n }\r\n}\r\n\r\ndef get_total_size(tree, directory):\r\n total_size = 0\r\n for item in tree[directory]:\r\n if isinstance(tree[directory][item], dict):\r\n # The item is a directory, so get its total size.\r\n total_size += get_total_size(tree, tree[directory][item])\r\n else:\r\n # The item is a file, so add its size to the total.\r\n total_size += tree[directory][item]\r\n return total_size\r\n\r\ndirectories = []\r\nfor directory in tree:\r\n total_size = get_total_size(tree, directory)\r\n if total_size <= 100000:\r\n directories.append(directory)\r\n\r\n# Calculate the sum of the total sizes of the directories found above.\r\ntotal_sum = 0\r\nfor directory in directories:\r\n total_sum += get_total_size(tree, directory)\r\n\r\n# Print the result.\r\nprint(total_sum)","repo_name":"IumoInfinium/cp","sub_path":"adventOfCode_2022/day7/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24344494194","text":"from pathlib import Path\nfrom freelancer.items import FreelancerItem\n\nimport scrapy\nimport os\n\n\n# class FreelancerSpider(scrapy.Spider):\n# name = \"freelancer\"\n# start_urls = [\"https://www.freelancer.com/\"]\n\n# def parse(self, response):\n# directory = f\"templates/\"\n# filename = os.path.join(directory, f\"freelancer.html\")\n# Path(filename).write_bytes(response.body)\n# self.log(f\"Saved file {filename}\")\n\n\nclass FreelancerSpider(scrapy.Spider):\n name = \"freelancer\"\n start_urls = [\"https://www.freelancer.com/jobs/python_django_web-scraping/\"]\n\n # Define a callback function \"parse\" for processing the initial page\n def parse(self, response):\n # Iterate through each project on the page\n for project in response.css(\"div.JobSearchCard-primary-heading\"):\n # Extract the URL for each project\n url = project.css(\"a.JobSearchCard-primary-heading-link::attr(href)\").get()\n url = \"https://www.freelancer.com\" + url\n # Create a Scrapy Request to visit the project's URL and parse it\n yield scrapy.Request(url, callback=self.parse_project)\n\n # Extract pagination links and follow the \"next\" link if available\n pagination_links = response.css(\n \"div.ProjectSearch-footer-pagination a.btn.Pagination-item\"\n )\n for link in pagination_links:\n if link.attrib.get(\"rel\") == \"next\":\n next_page_url = link.attrib[\"href\"]\n # Follow the \"next\" link and continue parsing the next page\n yield response.follow(next_page_url, self.parse)\n\n # Define a callback function \"parse_project\" for processing individual project pages\n def parse_project(self, response):\n # Extract project details from the project page\n title = response.css(\"h1::text\").get()\n price = response.css(\"p.PageProjectViewLogout-projectInfo-byLine::text\").get()\n detail = response.css(\"div.PageProjectViewLogout-detail p::text\").get()\n tags = response.css(\"p.PageProjectViewLogout-detail-tags a::text\").getall()\n client = response.css(\n \"div.PageProjectViewLogout-detail-reputation-employerInfo span::text\"\n ).getall()\n\n # Create an instance of the \"FreelancerItem\" to store the extracted data\n item = FreelancerItem()\n item[\"title\"] = title\n item[\"price\"] = price\n item[\"detail\"] = detail\n item[\"tags\"] = [obj.strip() for obj in tags]\n item[\"client\"] = [obj.strip() for obj in client if len(obj.strip()) > 1]\n\n # Yield the item to be processed and stored by Scrapy\n yield item\n","repo_name":"Amirhamidi2001/Freelancer-Scrapy","sub_path":"freelancer/freelancer/spiders/freelancer_spider.py","file_name":"freelancer_spider.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75136353865","text":"from functools import cmp_to_key\n\nclass Solution:\n def earliestFullBloom(self, plantTime: List[int], growTime: List[int]) -> int:\n #done using exchange argument algorithm\n #sort the list in such way that they are compare on the basis of total grow time (kinda confusing)\n #then just normally iterate through it and update latest and curr\n #latest is max time or ans, curr is the max plant time\n def compare(a,b):\n p1,g1=a\n p2,g2=b\n return (p1+max(g1,p2+g2))-(p2+max(g2,p1+g1))\n \n times=list(zip(plantTime,growTime))\n times.sort(key=cmp_to_key(compare))\n \n latest,curr=0,0\n for p,g in times:\n latest=max(latest,p+g+curr)\n curr+=p\n \n return latest\n ","repo_name":"bamblebam/competitive-programming","sub_path":"2022/1_January_22/10-1-22/earliestpossibledayoffullbloom.py","file_name":"earliestpossibledayoffullbloom.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31936056002","text":"import pygame as pg\nimport random\n\n\nvec2 = pg.math.Vector2\n\nRES = WIDTH, HEIGHT = 1600, 900\nNUM_STARS = 120\n\nCOLORS = [(0, 255, 0), (0, 80, 0)]\n\nALPHA = 35\n\n\nclass Block:\n def __init__(self, app):\n self.screen = app.screen\n\n self.vel = random.uniform(0.05, 0.25)\n self.color = random.choice(COLORS)\n self.screen_pos = vec2(0, 0)\n self.size = 15\n self.pos2d = self.get_pos2d(self.size)\n\n self.speed = random.randrange(7, 10)\n\n\n def get_pos2d(self, size):\n\n count_col = int(WIDTH / size)\n col = [i for i in range(count_col)]\n x = col[random.randrange(len(col))] * size\n y = random.randrange(0, 700)\n return vec2(x, y)\n\n def update(self):\n mouse_pos = pg.mouse.get_pos()\n\n\n self.pos2d = self.get_pos2d(self.size) if self.pos2d.y > HEIGHT else self.pos2d\n\n\n\n self.pos2d.y = self.pos2d.y + self.speed\n self.screen_pos = vec2(self.pos2d.x, self.pos2d.y)\n\n def draw(self):\n s = self.size\n if (-s < self.screen_pos.x < WIDTH + s) and (-s < self.screen_pos.y < HEIGHT + s):\n pg.draw.rect(self.screen, self.color, (*self.screen_pos, self.size, self.size))\n\n\nclass Blockfield:\n def __init__(self, app):\n self.stars = [Block(app) for i in range(NUM_STARS)]\n\n def run(self):\n [star.update() for star in self.stars]\n\n [star.draw() for star in self.stars]\n\n\nclass App:\n def __init__(self):\n self.screen = pg.display.set_mode(RES)\n\n self.alpha_surface = pg.Surface(RES)\n\n self.alpha_surface.set_alpha(ALPHA)\n self.clock = pg.time.Clock()\n self.starfield = Blockfield(self)\n\n def run(self):\n while True:\n\n\n self.screen.blit(self.alpha_surface, (0, 0))\n self.starfield.run()\n\n pg.display.flip()\n [exit() for i in pg.event.get() if i.type == pg.QUIT]\n self.clock.tick(60)\n\n\nif __name__ == '__main__':\n app = App()\n app.run()\n","repo_name":"MelmotWanderer/Matrix","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34841831666","text":"import torch\nfrom torch import nn\nfrom torchvision.models.resnet import resnet34\nimport numpy as np\n\n\nclass LocationModel():\n def __init__(self):\n self.resnet = resnet34(pretrained=True)\n self.linear = nn.Linear(1000, 3)\n self.optimiser = torch.optim.Adam(list(self.resnet.parameters()) + list(self.linear.parameters()), lr=.0003, eps=1e-6)\n\n def predict(self, obses):\n outs = self.linear(self.resnet(obses)) # shape (batch, 3)\n locations, angles = outs[:, :2], outs[:, 2:]\n return locations, angles\n\n def train(self, obses, locations, angles):\n labels = np.concatenate([locations, angles], axis=-1)\n res_out = self.linear(self.resnet(obses)) # shape (batch, 3)\n loss = ((res_out - labels) ** 2).mean()\n self.resnet.zero_grad()\n self.linear.zero_grad()\n loss.backward()\n self.optimiser.step()\n\n","repo_name":"albertwujj/minerl","sub_path":"item_locator/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19060771818","text":"import csv\n\n\ndef importer_csv(path_to_file):\n if not path_to_file.endswith('.csv'):\n raise FileNotFoundError(f\"Extensão inválida: {path_to_file}\")\n\n try:\n with open(path_to_file) as file:\n return format_file(file)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Arquivo inexistente: {path_to_file}\")\n\n\ndef format_file(file):\n requests = dict()\n\n for i in csv.reader(file):\n name, food, day = i[0], i[1], i[2]\n if name not in requests.keys():\n requests[name] = {food: {day: 1}}\n elif food not in requests[name].keys():\n requests[name][food] = {day: 1}\n elif day not in requests[name][food].keys():\n requests[name][food][day] = 1\n else:\n requests[name][food][day] += 1\n\n return requests\n\n\ndef most_requested_dish_by_maria(path_to_file, costumer):\n requests = importer_csv(path_to_file)\n\n frequency = {}\n\n for i in requests[costumer].keys():\n for j in requests[costumer][i]:\n frequency[i] = requests[costumer][i][j]\n\n return max(frequency, key=frequency.get)\n\n\ndef how_many_hamburgers_did_arnaldo_order(path_to_file):\n requests = importer_csv(path_to_file)\n\n hamburgers = 0\n\n for i in requests['arnaldo']['hamburguer']:\n hamburgers += requests['arnaldo']['hamburguer'][i]\n\n return hamburgers\n\n\ndef how_many_dishes_did_joao_ever_order(path_to_file):\n requests = importer_csv(path_to_file)\n\n all_foods = set()\n foods_joao = set()\n\n for i in requests:\n for j in requests[i]:\n all_foods.add(j)\n\n for foods in requests['joao']:\n foods_joao.add(foods)\n\n return all_foods - foods_joao\n\n\ndef all_days(requests):\n all_days = set()\n\n for name in requests:\n for food in requests[name]:\n for days in requests[name][food]:\n all_days.add(days)\n\n return all_days\n\n\ndef days_off_joao(path_to_file):\n requests = importer_csv(path_to_file)\n\n days_joao = set()\n\n for food in requests['joao']:\n for days in requests['joao'][food]:\n days_joao.add(days)\n\n return all_days(requests) - days_joao\n\n\ndef analyze_log(path_to_file):\n a = most_requested_dish_by_maria(path_to_file, 'maria')\n b = how_many_hamburgers_did_arnaldo_order(path_to_file)\n c = how_many_dishes_did_joao_ever_order(path_to_file)\n d = days_off_joao(path_to_file)\n\n resposta = f'{a}\\n{b}\\n{c}\\n{d}'\n with open('data/mkt_campaign.txt', 'w') as f:\n f.write(resposta)\n\n\nif __name__ == '__main__':\n print(analyze_log('data/orders_1.csv'))\n","repo_name":"igorhamzi/restaurant_orders","sub_path":"src/analyze_log.py","file_name":"analyze_log.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13382416015","text":"# Program validating user response\n\nfrom validator_collection import validators\n\n\ndef main():\n email = validator(input(\"What's your email address? \"))\n print(email)\n\n\ndef validator(email):\n try:\n validators.email(email)\n return \"Valid\"\n except:\n return \"Invalid\"\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rubxcube/cs50code","sub_path":"response/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7204836226","text":"import os\nimport time\nfrom glob import glob\nimport numpy as np\nimport SimpleITK as sitk\nimport torch\n\n\ndef reshape_by_padding_upper_coords(image, new_shape, pad_value=None):\n shape = tuple(list(image.shape))\n new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0))\n if pad_value is None:\n if len(shape)==2:\n pad_value = image[0,0]\n elif len(shape)==3:\n pad_value = image[0, 0, 0]\n else:\n raise ValueError(\"Image must be either 2 or 3 dimensional\")\n res = np.ones(list(new_shape), dtype=image.dtype) * pad_value\n if len(shape) == 2:\n res[0:0+int(shape[0]), 0:0+int(shape[1])] = image\n elif len(shape) == 3:\n res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image\n return res\n\ndef random_crop_3D_image_batched(img, crop_size):\n if type(crop_size) not in (tuple, list):\n crop_size = [crop_size] * (len(img.shape) - 2)\n else:\n assert len(crop_size) == (len(img.shape) - 2), (\"If you provide a list/tuple \"\n \"as center crop make sure it has the same len as your data has dims (3d)\")\n\n if crop_size[0] < img.shape[2]:\n lb_x = np.random.randint(0, img.shape[2] - crop_size[0])\n elif crop_size[0] == img.shape[2]:\n lb_x = 0\n else:\n raise ValueError(\"crop_size[0] must be smaller or equal to the images x dimension\")\n\n if crop_size[1] < img.shape[3]:\n lb_y = np.random.randint(0, img.shape[3] - crop_size[1])\n elif crop_size[1] == img.shape[3]:\n lb_y = 0\n else:\n raise ValueError(\"crop_size[1] must be smaller or equal to the images y dimension\")\n\n if crop_size[2] < img.shape[4]:\n lb_z = np.random.randint(0, img.shape[4] - crop_size[2])\n elif crop_size[2] == img.shape[4]:\n lb_z = 0\n else:\n raise ValueError(\"crop_size[2] must be smaller or equal to the images z dimension\")\n img_sub = img[:, :, lb_x:lb_x+crop_size[0], lb_y:lb_y+crop_size[1], lb_z:lb_z+crop_size[2]]\n return img_sub\n\ndef random_data_argument(x, y):\n i = np.random.choice(range(10), 1)[0]\n if i == 0:\n return x*0.9, y\n elif i == 1:\n return x*1.1, y\n elif i in [2,3,4]:\n x = np.flip(x, axis=i).copy()\n y = np.flip(y, axis=i).copy()\n return x, y\n else:\n return x, y\n\ndef data_view(x, y, view_flip):\n view, bool_flip = view_flip.split('_')\n if y is None:\n y = np.ones([1,1,1,1,1])\n if view == 'axial':\n pass\n elif view == 'saggital':\n x = np.transpose(x, [0,1,3,4,2])\n y = np.transpose(y, [0,1,3,4,2])\n elif view == 'coronal':\n x = np.transpose(x, [0,1,4,2,3])\n y = np.transpose(y, [0,1,4,2,3])\n if bool_flip.lower() == 'flip':\n x = np.flip(x, 2).copy()\n y = np.flip(y, 2).copy()\n return x, y\n\ndef data_view_inverted(x, y, view_flip):\n 'data_view的逆过程'\n view, bool_flip = view_flip.split('_')\n if y is None:\n y = np.ones([1,1,1,1,1])\n if bool_flip.lower() == 'flip':\n x = np.flip(x, 2).copy()\n y = np.flip(y, 2).copy()\n if view == 'axial':\n pass\n elif view == 'saggital':\n x = np.transpose(x, [0,1,4,2,3]) \n y = np.transpose(y, [0,1,4,2,3])\n elif view == 'coronal':\n x = np.transpose(x, [0,1,3,4,2])\n y = np.transpose(y, [0,1,3,4,2])\n return x, y\n\ndef save_probmap(prob, save_path):\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n prob_sitk = sitk.GetImageFromArray(prob.astype(np.float32))\n sitk.WriteImage(prob_sitk, save_path)\n\ndef save_image(image, save_path, save_original=False):\n '''Save iamge to 'save_path', which can be added '.nii.gz' automatically.\n And the iamge will be fell into the [155,240,240]\n '''\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n if not save_path.endswith('.nii.gz'):\n save_path += \".nii.gz\"\n image_shape = np.array(image.shape)\n assert(len(image_shape) == 3), f\"The iamge shape is {image_shape}. It must be 3-dimension\"\n if np.sum([155,240,240] - image_shape) > 0:\n d0, h0, w0 = (0.5*([155,240,240] - image_shape)).astype(np.int)\n d, h, w = image_shape\n label_like = np.zeros([155,240,240])\n label_like[d0:d0+d,h0:h0+h,w0:w0+w] = image\n else:\n label_like = image\n if save_original:\n image_sitk = sitk.GetImageFromArray(label_like)\n else:\n label_like = label_convert(label_like, [1,2,3], [2,1,4])\n image_sitk = sitk.GetImageFromArray(label_like.astype(np.uint8))\n sitk.WriteImage(image_sitk, save_path)\n\ndef label_convert(label, label_ls=[1,2,3], target_ls=[2,1,4]):\n \"Convert the label value from 'label_ls' to 'target_ls'.\"\n label_like = np.zeros_like(label)\n for i,j in zip(label_ls, target_ls):\n label_like[label==i] = j\n return label_like\n\ndef label_split_to_channels(inputs, label_ls=[1,2,3], axis=1, remain_self=False):\n 'get one hot map concatenated on dimension 1.'\n if remain_self:\n outs = [inputs]\n else:\n outs = []\n for i in label_ls:\n outs.append(np.array(inputs==i, dtype=np.int32))\n outs = np.concatenate(outs, axis=axis)\n return outs\n\ndef label_split_to_region(inputs, label_ls=[1,2,3], axis=1):\n 'get one hot map concatenated on dimension 1.'\n outs = []\n for i in label_ls:\n outs.append(np.array(inputs >= i, dtype=np.uint8))\n outs = np.concatenate(outs, axis=axis)\n return outs\n\ndef datestr():\n 'Get the real-time string.'\n tl = time.localtime()\n t_str = ('{t.tm_year:04}-{t.tm_mon:02}-{t.tm_mday:02} '\n '{t.tm_hour:02}:{t.tm_min:02}:{t.tm_sec:02}').format(t=tl)\n return t_str\n\ndef get_cross_idxs(txtname, i):\n 'split the i-th cross folder from whole idxs'\n with open(txtname, 'r') as ftxt:\n lines = ftxt.readlines()\n val_idxs = lines[i].split()\n val_idxs = [int(x) for x in val_idxs]\n train_idxs = lines[:i]+lines[i+1:]\n train_idxs = ' '.join(train_idxs)\n train_idxs = train_idxs.split()\n train_idxs = [int(x) for x in train_idxs]\n return train_idxs, val_idxs\n\ndef chkdir(dir):\n if os.path.exists(dir):\n return dir\n else:\n os.makedirs(dir)\n return dir\n\ndef random_restore_pretrain(model, pretrain_dir, pre_folder_ls, model_xy, T=0.5):\n 'Where the random map > T restore the saved params.'\n if pre_folder_ls is None:\n return model\n def get_latest(folder):\n paths = sorted(glob(f\"{pretrain_dir}/{folder}/{model_xy}*\"))\n return paths[-1]\n def get_pre_dict(net, folder):\n saved_state_dict = torch.load(get_latest(folder))\n new_params_dict = net.state_dict().copy()\n for name, param in new_params_dict.items():\n print(name, end='')\n if name in saved_state_dict and param.size() == saved_state_dict[name].size():\n # if name in saved_state_dict and ('1.0' in name or '2.0' in name):\n\n random_dict = torch.where(torch.rand_like(param) > T, \n saved_state_dict[name].cpu(), new_params_dict[name])\n new_params_dict[name].copy_(random_dict)\n print('\\t*** copy {}'.format(name))\n else:\n print()\n return new_params_dict\n model.net_0.load_state_dict(get_pre_dict(model.net_0, pre_folder_ls[0]))\n model.net_1.load_state_dict(get_pre_dict(model.net_1, pre_folder_ls[1]))\n model.net_2.load_state_dict(get_pre_dict(model.net_2, pre_folder_ls[2]))\n return model\n","repo_name":"lixiaopang221/2m_re1","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43073358268","text":"from setuptools import setup, find_packages\n\n\nclassifiers = [\n # 'Development Status :: 5 - Production/Stable',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: BSD License',\n]\n\ninstall_requires = [\n 'ansimarkup',\n 'prompt-toolkit>=0.60',\n 'pyserial>=3.0',\n 'pyserial-asyncio'\n]\n\nextras_require = {\n 'tests': [\n 'tox >= 2.6.0',\n 'pytest >= 3.0.3',\n 'pytest-cov >= 2.3.1',\n ],\n 'devel': [\n 'bumpversion >= 0.5.2',\n 'check-manifest >= 0.35',\n 'readme-renderer >= 16.0',\n 'flake8',\n 'pep8-naming',\n ]\n}\n\nkw = {\n 'name': 'grblcom',\n 'version': '0.0.0',\n\n 'description': 'Rich serial-console client for GRBL',\n 'long_description': open('README.rst').read(),\n\n 'author': 'Georgi Valkov',\n 'author_email': 'georgi.t.valkov@gmail.com',\n 'license': 'Revised BSD License',\n 'keywords': 'grbl',\n 'url': 'https://github.com/gvalkov/grblcom',\n 'classifiers': classifiers,\n 'install_requires': install_requires,\n 'extras_require': extras_require,\n 'packages': find_packages(),\n 'zip_safe': True,\n 'entry_points': {\n 'console_scripts': [\n 'grblcom = grblcom.__main__:main'\n ]\n }\n}\n\n\nif __name__ == '__main__':\n setup(**kw)\n","repo_name":"gvalkov/grblcom","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17795102777","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom .import views\n\napp_name='students'\nurlpatterns=[\n\t#url(r'^$',views.view_student,name=\"list\"),\n\turl(r'^profile/$',views.students, name=\"students_profile\"),\n\turl(r'^upload_document/$',views.simple_upload,name=\"upload_document\"),\n\turl(r'^dayAttendance/$',views.dayAttendance,name=\"dayAttendance\"),\n\turl(r'^monthlyAttendance/$',views.monthlyAttendance,name=\"monthlyAttendance\"),\n\turl(r'^yearlyAttendance/$',views.yearlyAttendance,name=\"yearlyAttendance\"),\n\turl(r'^totalAttendance/$',views.totalAttendance,name=\"totalAttendance\"),\n\turl(r'^viewAttendance/$',views.viewAttendance,name='viewAttendance'),\n\turl(r'^Listofsubject/$',views.Listofsubject,name='Listofsubject'),\n\turl(r'^ListofAssignments/$',views.viewAssignment,name='Assignment'),\n]\n","repo_name":"surbhijha17/school_management_system","sub_path":"students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5299883137","text":"import cv2\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport csv\ndef process_graph(cleared_final_detections , frame, car_len):\n frameset_ratios = []\n for detection_set in cleared_final_detections:\n frame_row, ratio = compute_ratio(detection_set, frame, car_len)\n if ratio > 0 and frame_row > 40:\n frameset_ratios.append([frame_row, ratio]) # actually now (frame_row, car_pixel_length)\n print(frameset_ratios)\n return frameset_ratios\n\ndef find_center(detection):\n x1, y1, x2, y2, obj_id = detection\n w = x2 - x1\n h = y2 - y1\n center_x = x1 + w/2\n center_y = y1 + h/2\n return [center_x,center_y]\n\ndef compute_ratio(detection_set,frame, car_len):\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n centroid_list = []\n for det in detection_set:\n centroid_list.append(find_center(det))\n np_centroid_list = np.array(centroid_list)\n [vx, vy, x, y] = cv2.fitLine(np_centroid_list, cv2.DIST_L2, 0, 0.01, 0.01)\n lefty = int((-x * vy / vx) + y)\n righty = int(((frame.shape[1] - x) * vy / vx) + y)\n cv2.line(frame, (frame.shape[1] - 1, righty), (0, lefty), 255, 2)\n pixel_row, pixel_len = median_intersection_length(detection_set, vx, vy, lefty)\n if pixel_len>0:\n # ratio = car_len/pixel_len # now giving the pixel length\n ratio = pixel_len\n return pixel_row, ratio\n else:\n return pixel_row, -1\n\ndef median_intersection_length(detection_set,vx,vy,lefty):\n length = len(detection_set)\n middle = int(length/2) + 1\n m = vy / vx\n x1, y1, x2, y2, obj_id = detection_set[middle-1] # from here onwards working on one bounding box\n upper_x = (y1-lefty) / m\n upper = [upper_x, y1]\n lower_x = (y2-lefty) / m\n lower = [lower_x, y2]\n median_frm_center = find_center(detection_set[middle-1])\n if x1 str:\n \"\"\"\n Converts CamelCase to CAMEL_CASE.\n\n Examples::\n\n ENUMName -> ENUM_NAME\n EnumName1 -> ENUM_NAME1\n ENUM_NAME -> ENUM_NAME\n ENUM_NAME1 -> ENUM_NAME1\n ENUM_Name2 -> ENUM_NAME2\n ENUM24_Name -> ENUM24_NAME\n \"\"\"\n c_fun_str = c_name(value, False)\n if value.isupper():\n return c_fun_str\n\n new_name = ''\n length = len(c_fun_str)\n for i in range(length):\n char = c_fun_str[i]\n # When char is upper case and no '_' appears before, do more checks\n if char.isupper() and (i > 0) and c_fun_str[i - 1] != '_':\n if i < length - 1 and c_fun_str[i + 1].islower():\n new_name += '_'\n elif c_fun_str[i - 1].isdigit():\n new_name += '_'\n new_name += char\n return new_name.lstrip('_').upper()\n\n\ndef c_enum_const(type_name: str,\n const_name: str,\n prefix: Optional[str] = None) -> str:\n \"\"\"\n Generate a C enumeration constant name.\n\n :param type_name: The name of the enumeration.\n :param const_name: The name of this constant.\n :param prefix: Optional, prefix that overrides the type_name.\n \"\"\"\n if prefix is not None:\n type_name = prefix\n return camel_to_upper(type_name) + '_' + c_name(const_name, False).upper()\n\n\ndef c_name(name: str, protect: bool = True) -> str:\n \"\"\"\n Map ``name`` to a valid C identifier.\n\n Used for converting 'name' from a 'name':'type' qapi definition\n into a generated struct member, as well as converting type names\n into substrings of a generated C function name.\n\n '__a.b_c' -> '__a_b_c', 'x-foo' -> 'x_foo'\n protect=True: 'int' -> 'q_int'; protect=False: 'int' -> 'int'\n\n :param name: The name to map.\n :param protect: If true, avoid returning certain ticklish identifiers\n (like C keywords) by prepending ``q_``.\n \"\"\"\n # ANSI X3J11/88-090, 3.1.1\n c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',\n 'default', 'do', 'double', 'else', 'enum', 'extern',\n 'float', 'for', 'goto', 'if', 'int', 'long', 'register',\n 'return', 'short', 'signed', 'sizeof', 'static',\n 'struct', 'switch', 'typedef', 'union', 'unsigned',\n 'void', 'volatile', 'while'])\n # ISO/IEC 9899:1999, 6.4.1\n c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])\n # ISO/IEC 9899:2011, 6.4.1\n c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic',\n '_Noreturn', '_Static_assert', '_Thread_local'])\n # GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html\n # excluding _.*\n gcc_words = set(['asm', 'typeof'])\n # C++ ISO/IEC 14882:2003 2.11\n cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',\n 'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',\n 'namespace', 'new', 'operator', 'private', 'protected',\n 'public', 'reinterpret_cast', 'static_cast', 'template',\n 'this', 'throw', 'true', 'try', 'typeid', 'typename',\n 'using', 'virtual', 'wchar_t',\n # alternative representations\n 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',\n 'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])\n # namespace pollution:\n polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386', 'linux'])\n name = re.sub(r'[^A-Za-z0-9_]', '_', name)\n if protect and (name in (c89_words | c99_words | c11_words | gcc_words\n | cpp_words | polluted_words)\n or name[0].isdigit()):\n return 'q_' + name\n return name\n\n\nclass Indentation:\n \"\"\"\n Indentation level management.\n\n :param initial: Initial number of spaces, default 0.\n \"\"\"\n def __init__(self, initial: int = 0) -> None:\n self._level = initial\n\n def __repr__(self) -> str:\n return \"{}({:d})\".format(type(self).__name__, self._level)\n\n def __str__(self) -> str:\n \"\"\"Return the current indentation as a string of spaces.\"\"\"\n return ' ' * self._level\n\n def increase(self, amount: int = 4) -> None:\n \"\"\"Increase the indentation level by ``amount``, default 4.\"\"\"\n self._level += amount\n\n def decrease(self, amount: int = 4) -> None:\n \"\"\"Decrease the indentation level by ``amount``, default 4.\"\"\"\n assert amount <= self._level\n self._level -= amount\n\n\n#: Global, current indent level for code generation.\nindent = Indentation()\n\n\ndef cgen(code: str, **kwds: object) -> str:\n \"\"\"\n Generate ``code`` with ``kwds`` interpolated.\n\n Obey `indent`, and strip `EATSPACE`.\n \"\"\"\n raw = code % kwds\n pfx = str(indent)\n if pfx:\n raw = re.sub(r'^(?!(#|$))', pfx, raw, flags=re.MULTILINE)\n return re.sub(re.escape(EATSPACE) + r' *', '', raw)\n\n\ndef mcgen(code: str, **kwds: object) -> str:\n if code[0] == '\\n':\n code = code[1:]\n return cgen(code, **kwds)\n\n\ndef c_fname(filename: str) -> str:\n return re.sub(r'[^A-Za-z0-9_]', '_', filename)\n\n\ndef guardstart(name: str) -> str:\n return mcgen('''\n#ifndef %(name)s\n#define %(name)s\n\n''',\n name=c_fname(name).upper())\n\n\ndef guardend(name: str) -> str:\n return mcgen('''\n\n#endif /* %(name)s */\n''',\n name=c_fname(name).upper())\n\n\ndef gen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]],\n cond_fmt: str, not_fmt: str,\n all_operator: str, any_operator: str) -> str:\n\n def do_gen(ifcond: Union[str, Dict[str, Any]],\n need_parens: bool) -> str:\n if isinstance(ifcond, str):\n return cond_fmt % ifcond\n assert isinstance(ifcond, dict) and len(ifcond) == 1\n if 'not' in ifcond:\n return not_fmt % do_gen(ifcond['not'], True)\n if 'all' in ifcond:\n gen = gen_infix(all_operator, ifcond['all'])\n else:\n gen = gen_infix(any_operator, ifcond['any'])\n if need_parens:\n gen = '(' + gen + ')'\n return gen\n\n def gen_infix(operator: str, operands: Sequence[Any]) -> str:\n return operator.join([do_gen(o, True) for o in operands])\n\n if not ifcond:\n return ''\n return do_gen(ifcond, False)\n\n\ndef cgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:\n return gen_ifcond(ifcond, 'defined(%s)', '!%s', ' && ', ' || ')\n\n\ndef docgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:\n # TODO Doc generated for conditions needs polish\n return gen_ifcond(ifcond, '%s', 'not %s', ' and ', ' or ')\n\n\ndef gen_if(cond: str) -> str:\n if not cond:\n return ''\n return mcgen('''\n#if %(cond)s\n''', cond=cond)\n\n\ndef gen_endif(cond: str) -> str:\n if not cond:\n return ''\n return mcgen('''\n#endif /* %(cond)s */\n''', cond=cond)\n\n\ndef must_match(pattern: str, string: str) -> Match[str]:\n match = re.match(pattern, string)\n assert match is not None\n return match\n","repo_name":"qemu/qemu","sub_path":"scripts/qapi/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","stars":8597,"dataset":"github-code","pt":"81"} +{"seq_id":"503451735","text":"from nltk import sent_tokenize\n\nfrom googletrans import Translator\n\ntranslator = Translator()\n\ndata = \"shut up\"\n\ntoken = sent_tokenize(data)\n\nfor tt in token:\n translatedText = translator.translate(tt, dest=\"en\")\n print(translatedText.text)\n\n\nimport pyttsx3\n\nsynthesizer = pyttsx3.init()\ni=1\nwhile( i<10):\n synthesizer.say(translatedText.text) \n i=i+1\nsynthesizer.runAndWait() \nsynthesizer.stop()\n","repo_name":"composureR3j3c/NTLK","sub_path":"ref/6_googletranslater.py","file_name":"6_googletranslater.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70203653704","text":"import os\nimport uuid\nimport logging\nimport aiofiles\n\nfrom fastapi import UploadFile, File\nfrom typing import List\n\nfrom core.devicemgr_config import (FILE_SAVED_FOLDER)\n\n\n# Setting Logger\nLOGGER = logging.getLogger(__name__)\n\n\ndef generate_hex_uuid() -> str:\n \"\"\" Create unique uuid for using \"\"\"\n return uuid.uuid4().hex\n\n\nasync def save_file(file_name: str, in_file: UploadFile = File(...)):\n \"\"\" Async saving file in to images folder \"\"\"\n LOGGER.info(f\"Saved image name: {file_name}\")\n\n save_file_path = f\"{FILE_SAVED_FOLDER}{file_name}\"\n\n async with aiofiles.open(save_file_path, 'wb') as out_file:\n content = await in_file.read() # async read\n await out_file.write(content) # async write\n\n return\n\n\nasync def delete_file(file_name: str):\n \"\"\" Async delete file from images folder \"\"\"\n LOGGER.info(f\"Delete image name: {file_name}\")\n\n delete_file_path = f\"{FILE_SAVED_FOLDER}{file_name}\"\n\n if os.path.exists(delete_file_path):\n # File exist and delete\n await aiofiles.os.remove(delete_file_path)\n else:\n LOGGER.warning(f\"File Not exist in ({delete_file_path})\")\n pass\n\n return\n\n\nasync def save_files(in_files: List[UploadFile] = File(...)):\n \"\"\" Async saving file in to images folder \"\"\"\n LOGGER.info(f\"Save image in {FILE_SAVED_FOLDER}\")\n\n if not os.path.exists(FILE_SAVED_FOLDER):\n os.mkdir(FILE_SAVED_FOLDER)\n\n res_data = []\n for file in in_files:\n # Split input filename and extention and generate new unique file name with uuid\n new_uuid = generate_hex_uuid()\n _, ext = os.path.splitext(file.filename)\n new_filename = new_uuid + ext\n await save_file(new_filename, file)\n res_data.append(new_filename)\n\n return res_data\n","repo_name":"umingbrightben/test","sub_path":"app_lib/file_utility.py","file_name":"file_utility.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39603898315","text":"class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n res = list(s)\n indices = []\n for i, c in enumerate(res):\n if c == '(':\n indices.append(i)\n elif c == ')':\n if indices: indices.pop()\n else: res[i] = ''\n while indices:\n res[indices.pop()] = ''\n return \"\".join(res)\n","repo_name":"allenhyp/LeetCodePractice","sub_path":"1249_Minimum_Remove_to_Make_Valid_Parentheses.py","file_name":"1249_Minimum_Remove_to_Make_Valid_Parentheses.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72439087944","text":"import boto3\nfrom botocore.exceptions import ClientError\n\n# https://docs.aws.amazon.com/en_pv/ses/latest/DeveloperGuide/send-using-sdk-python.html\n\nSENDER = \"Dustin Test \"\nRECIPIENT = \"djefford@lilly.com\"\n\nAWS_REGION = \"us-east-1\"\n\nSUBJECT = \"Amazon SES Test (SDK for Python)\"\nBODY_TEXT = (\"Amazon SES Test (Python)\\r\\n\"\n \"This email was sent with Amazon SES using the \"\n \"AWS SDK for Python (Boto).\"\n )\n\n# BODY_HTML = \"\"\"\n# \n# \n#

Amazon SES Test (SDK for Python)

\n#

This email was sent with\n# Amazon SES using the\n# \n# AWS SDK for Python (Boto).

\n# \n# \n# \"\"\"\n\nBODY_HTML = \"\"\"\n

Landing Zone Account Notification
\n---------------------------------------------

\n

Account: ######

\n

Region: ######

\n

The following resources in the account are not tagged properly. Please log into the Landing Zone account and take corrective action on the resources listed below.
\nFor a complete list, you can find Compliance results in the AWS Config Console (please ensure you are logged into the account specified above).

\n

Amazon SES

\n\n

Resource List\n
-----------------

\n

ResourceID:

\n

ResourceID:

\n

-----------------

\n

For questions about this communication, please submit an email to the Landing Zone team:
\nFakeList@fake.com

\n\"\"\"\n\nCHARSET = \"UTF-8\"\n\n# Create a new SES resource and specify a region.\nclient = boto3.client('ses',region_name=AWS_REGION)\n\ntry:\n response = client.send_email(\n Destination={\n 'ToAddresses': [\n RECIPIENT,\n ],\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': CHARSET,\n 'Data': BODY_HTML,\n },\n 'Text': {\n 'Charset': CHARSET,\n 'Data': BODY_TEXT,\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': SUBJECT,\n },\n },\n Source=SENDER,\n )\n\nexcept ClientError as e:\n print(e.response['Error']['Message'])\nelse:\n print(\"Email sent! Message ID:\"),\n print(response['MessageId']) \n\n","repo_name":"djefford/hacking","sub_path":"AWS/amazon-ses-sample.py","file_name":"amazon-ses-sample.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25040200657","text":"import mingus.containers.Note as Note\nimport mingus.containers.NoteContainer as NoteContainer\nimport mingus.containers.Bar as Bar\nimport mingus.containers.Track as Track\nimport mingus.containers.Composition as Composition\nfrom mingus.containers.Instrument import MidiInstrument\nimport mingus.core.notes as notes\nimport mingus.core.intervals as intervals\nimport binascii\n\n\ndef MIDI_to_Composition(file):\n \"\"\"Converts a MIDI file to a mingus.containers.Composition and returns it in a \\\ntuple with the last used tempo in beats per minute (this will change in the \\\nfuture). This function can raise all kinds of exceptions (IOError, \\\nHeaderError, TimeDivisionError, FormatError), so be sure to try and catch.\"\"\"\n\n m = MidiFile()\n return m.MIDI_to_Composition(file)\n\n\nclass HeaderError(Exception):\n\n pass\n\n\nclass TimeDivisionError(Exception):\n\n pass\n\n\nclass FormatError(Exception):\n\n pass\n\n\nclass MidiFile:\n\n \"\"\"This class parses a MIDI file.\"\"\"\n\n bpm = 120\n meter = (4, 4)\n bytes_read = 0\n\n def MIDI_to_Composition(self, file):\n (header, track_data) = self.parse_midi_file(file)\n c = Composition()\n if header[2]['fps']:\n print(\"Don't know how to parse this yet\")\n return c\n ticks_per_beat = header[2]['ticks_per_beat']\n\n for track in track_data:\n # this loop will gather data for all notes,\n # set up keys and time signatures for all bars\n # and set the tempo and instrument for the track.\n\n metronome = 1 # Tick once every quarter note\n thirtyseconds = 8 # 8 thirtyseconds in a quarter note\n step = 256.0 # WARNING: Assumes our smallest desired quantization step is a 256th note.\n\n meter = (4, 4)\n key = 'C'\n bar = 0\n beat = 0\n now = (bar, beat)\n b = None\n\n started_notes = {}\n finished_notes = {}\n b = Bar(key=key, meter=meter)\n bars = [b]\n\n bpm = None\n instrument = None\n track_name = None\n\n for deltatime, event in track:\n if deltatime != 0:\n duration = (ticks_per_beat * 4.0) / float(deltatime)\n\n dur_q = int(round(step/duration))\n length_q = int(b.length * step)\n\n o_bar = bar\n c_beat = beat + dur_q\n bar += int(c_beat / length_q)\n beat = c_beat % length_q\n\n while o_bar < bar:\n o_bar += 1\n o_key = b.key\n b = Bar(key=key, meter=meter)\n b.key = o_key\n bars.append(b)\n\n now = (bar, beat)\n\n if event['event'] == 8:\n # note off\n channel = event['channel']\n note_int = event['param1']\n velocity = event['param2']\n note_name = notes.int_to_note(note_int % 12)\n octave = note_int / 12 - 1\n\n note = Note(note_name, octave)\n note.channel = channel\n note.velocity = velocity\n\n x = (channel, note_int)\n start_time = started_notes[x]\n del started_notes[x]\n end_time = now\n\n y = (start_time, end_time)\n if y not in finished_notes:\n finished_notes[y] = []\n\n finished_notes[y].append(note)\n\n elif event['event'] == 9:\n # note on\n channel = event['channel']\n note_int = event['param1']\n velocity = event['param2']\n x = (channel, note_int)\n\n # add the note to the current NoteContainer\n started_notes[x] = now\n\n elif event['event'] == 10:\n # note aftertouch\n pass\n\n elif event['event'] == 11:\n # controller select\n pass\n\n elif event['event'] == 12:\n # program change\n # WARNING: only the last change in instrument will get saved.\n i = MidiInstrument()\n i.instrument_nr = event['param1']\n instrument = i\n\n elif event['event'] == 0x0f:\n # meta event Text\n if event['meta_event'] == 1:\n pass\n\n elif event['meta_event'] == 3:\n # Track name\n track_name = event['data']\n\n elif event['meta_event'] == 6:\n # Marker\n pass\n\n elif event['meta_event'] == 7:\n # Cue Point\n pass\n\n elif event['meta_event'] == 47:\n # End of Track\n pass\n\n elif event['meta_event'] == 81:\n # Set tempo\n # WARNING: Only the last change in bpm will get saved\n mpqn = self.bytes_to_int(event['data'])\n bpm_o = bpm\n bpm = 60000000 / mpqn\n\n elif event['meta_event'] == 88:\n # Time Signature\n d = event['data']\n thirtyseconds = self.bytes_to_int(d[3])\n metronome = self.bytes_to_int(d[2]) / 24.0\n denom = 2 ** self.bytes_to_int(d[1])\n numer = self.bytes_to_int(d[0])\n meter = (numer, denom)\n b.set_meter(meter)\n\n elif event['meta_event'] == 89:\n # Key Signature\n d = event['data']\n sharps = self.bytes_to_int(d[0])\n minor = self.bytes_to_int(d[0])\n if minor:\n key = 'A'\n else:\n key = 'C'\n for i in range(abs(sharps)):\n if sharps < 0:\n key = intervals.major_fourth(key)\n else:\n key = intervals.major_fifth(key)\n b.key = Note(key)\n\n else:\n print('Unsupported META event', event['meta_event'])\n\n else:\n print('Unsupported MIDI event', event)\n\n t = Track(instrument)\n t.name = track_name\n\n sorted_notes = {}\n\n # sort the notes (so they are added to the bars in order)\n # this loop will also split up notes that span more than one bar.\n for x in finished_notes:\n (start_bar, start_beat), (end_bar, end_beat) = x\n if end_beat == 0:\n end_bar -= 1\n end_beat = int(bars[end_bar].length * step)\n\n while start_bar <= end_bar:\n nc = NoteContainer(finished_notes[x])\n b = bars[start_bar]\n\n if start_bar < end_bar:\n # only executes when note spans more than one bar.\n length_q = int(b.length * step)\n dur = int(step/(length_q - start_beat))\n else:\n # always executes - add the final section of this note.\n dur = int(step/(end_beat-start_beat))\n\n if start_beat != 0:\n at = float(start_beat)/step\n else:\n at = 0.0\n\n if start_bar not in sorted_notes:\n sorted_notes[start_bar] = {}\n if at not in sorted_notes[start_bar]:\n sorted_notes[start_bar][at] = (dur, nc)\n\n # set our offsets for the next loop\n start_beat = 0\n start_bar += 1\n\n # add all notes to all bars in order.\n for start_bar in sorted(sorted_notes.keys()):\n for at in sorted(sorted_notes[start_bar].keys()):\n dur, nc = sorted_notes[start_bar][at]\n bars[start_bar].place_notes_at(nc, dur, at)\n\n # add the bars to the track, in order\n for b in bars:\n b.fill_with_rests()\n t + b\n\n # add the track to the composition\n c.tracks.append(t)\n\n return (c, bpm)\n\n def parse_midi_file_header(self, fp):\n \"\"\"Reads the header of a MIDI file and returns a touple containing the \\\nformat type, number of tracks and parsed time division information\"\"\"\n\n # Check header\n\n try:\n if fp.read(4) != 'MThd':\n raise HeaderError('Not a valid MIDI file header. Byte %d.'\\\n % self.bytes_read)\n self.bytes_read += 4\n except:\n raise IOError(\"Couldn't read from file.\")\n\n # Parse chunk size\n\n try:\n chunk_size = self.bytes_to_int(fp.read(4))\n self.bytes_read += 4\n except:\n raise IOError(\"Couldn't read chunk size from file. Byte %d.\"\\\n % self.bytes_read)\n\n # Expect chunk size to be at least 6\n\n if chunk_size < 6:\n return False\n try:\n format_type = self.bytes_to_int(fp.read(2))\n self.bytes_read += 2\n if format_type not in [0, 1, 2]:\n raise FormatError('%d is not a valid MIDI format.'\\\n % format_type)\n except:\n raise IOError(\"Couldn't read format type from file.\")\n try:\n number_of_tracks = self.bytes_to_int(fp.read(2))\n time_division = self.parse_time_division(fp.read(2))\n self.bytes_read += 4\n except:\n raise IOError(\"Couldn't read number of tracks and/or time division from tracks.\")\n\n chunk_size -= 6\n if chunk_size % 2 == 1:\n raise FormatError(\"Won't parse this.\")\n fp.read(chunk_size / 2)\n self.bytes_read += chunk_size / 2\n return (format_type, number_of_tracks, time_division)\n\n def bytes_to_int(self, bytes):\n return int(binascii.b2a_hex(bytes), 16)\n\n def parse_time_division(self, bytes):\n \"\"\"Parses the time division found in the header of a MIDI file and returns \\\na dictionairy with the boolean fps set to indicate whether to use frames \\\nper second or ticks per beat. If fps is True, the values SMPTE_frames \\\nand clock_ticks will also be set. If fps is False, ticks_per_beat will \\\nhold the value.\"\"\"\n\n # If highest bit is set, time division is set in frames per second\n # otherwise in ticks_per_beat\n\n value = self.bytes_to_int(bytes)\n if not value & 0x8000:\n return {'fps': False, 'ticks_per_beat': value & 0x7FFF}\n else:\n SMPTE_frames = (value & 0x7F00) >> 2\n if SMPTE_frames not in [24, 25, 29, 30]:\n raise TimeDivisionError(\"'%d' is not a valid value for the number of SMPTE frames\"\\\n % SMPTE_frames)\n clock_ticks = (value & 0x00FF) >> 2\n return {'fps': True, 'SMPTE_frames': SMPTE_frames,\n 'clock_ticks': clock_ticks}\n\n def parse_track(self, fp):\n \"\"\"Parses a MIDI track from its header to its events. And returns a list of \\\nevents and the number of bytes that were read.\"\"\"\n\n events = []\n chunk_size = self.parse_track_header(fp)\n bytes = chunk_size\n while chunk_size > 0:\n (delta_time, chunk_delta) = self.parse_varbyte_as_int(fp)\n chunk_size -= chunk_delta\n (event, chunk_delta) = self.parse_midi_event(fp)\n chunk_size -= chunk_delta\n events.append([delta_time, event])\n if chunk_size < 0:\n print('yikes.', self.bytes_read, chunk_size)\n return events\n\n def parse_midi_event(self, fp):\n \"\"\"Parses a MIDI event. Returns a dictionary and a the number of bytes \\\nread.\"\"\"\n\n chunk_size = 0\n try:\n ec = self.bytes_to_int(fp.read(1))\n chunk_size += 1\n self.bytes_read += 1\n except:\n raise IOError(\"Couldn't read event type and channel data from file.\")\n\n # Get the nibbles\n\n event_type = (ec & 0xf0) >> 4\n channel = ec & 0x0f\n\n # I don't know what these events are supposed to do, but I keep finding\n # them. The parser ignores them.\n\n if event_type < 8:\n raise FormatError('Unknown event type %d. Byte %d.' % (event_type,\n self.bytes_read))\n\n # Meta events can have strings of variable length\n\n if event_type == 0x0f:\n try:\n meta_event = self.bytes_to_int(fp.read(1))\n (length, chunk_delta) = self.parse_varbyte_as_int(fp)\n data = fp.read(length)\n chunk_size += 1 + chunk_delta + length\n self.bytes_read += 1 + length\n except:\n raise IOError(\"Couldn't read meta event from file.\")\n return ({'event': event_type, 'meta_event': meta_event, 'data'\n : data}, chunk_size)\n elif event_type in [12, 13]:\n\n # Program change and Channel aftertouch events only have one parameter\n\n try:\n param1 = fp.read(1)\n chunk_size += 1\n self.bytes_read += 1\n except:\n raise IOError(\"Couldn't read MIDI event parameters from file.\")\n param1 = self.bytes_to_int(param1)\n return ({'event': event_type, 'channel': channel, 'param1'\n : param1}, chunk_size)\n else:\n try:\n param1 = fp.read(1)\n param2 = fp.read(1)\n chunk_size += 2\n self.bytes_read += 2\n except:\n raise IOError(\"Couldn't read MIDI event parameters from file.\")\n param1 = self.bytes_to_int(param1)\n param2 = self.bytes_to_int(param2)\n return ({\n 'event': event_type,\n 'channel': channel,\n 'param1': param1,\n 'param2': param2,\n }, chunk_size)\n\n def parse_track_header(self, fp):\n \"\"\"Returns the size of the track chunk.\"\"\"\n\n # Check the header\n\n try:\n h = fp.read(4)\n self.bytes_read += 4\n except:\n raise IOError(\"Couldn't read track header from file. Byte %d.\"\\\n % self.bytes_read)\n if h != 'MTrk':\n raise HeaderError('Not a valid Track header. Byte %d.'\\\n % self.bytes_read)\n\n # Parse the size of the header\n\n try:\n chunk_size = fp.read(4)\n self.bytes_read += 4\n except:\n raise IOError(\"Couldn't read track chunk size from file.\")\n chunk_size = self.bytes_to_int(chunk_size)\n return chunk_size\n\n def parse_midi_file(self, file):\n \"\"\"Parses a MIDI file. Returns the header -as a tuple containing \\\nrespectively the MIDI format, the number of tracks and the time \\\ndivision-, the parsed track data and the number of bytes read\"\"\"\n\n try:\n f = open(file, 'r')\n except:\n raise IOError('File not found')\n self.bytes_read = 0\n header = self.parse_midi_file_header(f)\n tracks = header[1]\n result = []\n while tracks > 0:\n events = self.parse_track(f)\n result.append(events)\n tracks -= 1\n f.close()\n return (header, result)\n\n def parse_varbyte_as_int(self, fp, return_bytes_read=True):\n \"\"\"Reads a variable length byte from the file and returns the corresponding \\\ninteger.\"\"\"\n\n result = 0\n bytes_read = 0\n r = 0x80\n while r & 0x80:\n try:\n r = self.bytes_to_int(fp.read(1))\n self.bytes_read += 1\n except:\n (IOError, \"Couldn't read variable length byte from file.\")\n if r & 0x80:\n result = (result << 7) + (r & 0x7F)\n else:\n result = (result << 7) + r\n bytes_read += 1\n if not return_bytes_read:\n return result\n else:\n return (result, bytes_read)\n\n\nif __name__ == '__main__':\n from sys import argv\n from . import fluidsynth\n from . import MidiFileOut\n fluidsynth.init()\n (m, bpm) = MIDI_to_Composition(argv[1])\n MidiFileOut.write_Composition('test.mid', m, bpm)\n\n","repo_name":"anthonyt/mingus-counterpoint","sub_path":"mingus/midi/MidiFileIn.py","file_name":"MidiFileIn.py","file_ext":"py","file_size_in_byte":17037,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"7423974470","text":"def searchInsert(nums, target):\r\n if target < nums[0]:\r\n return 0\r\n if target > nums[len(nums)-1]:\r\n return len(nums)\r\n a = 0\r\n b = len(nums)-1\r\n c = a+b //2\r\n while (b-a > 1):\r\n if (target > nums[c]):\r\n a = c\r\n else:\r\n b = c\r\n c = (a+b)//2\r\n if (nums[a] == target):\r\n return a\r\n return b\r\nN = int(input())\r\nclct = []\r\ntmp = input()\r\ntmp = tmp.split()\r\nfor elem in tmp:\r\n clct.append(int(elem))\r\nclct = list(set(clct))\r\nclct = sorted(clct)\r\nK = int(input())\r\nif N == 0:\r\n for _ in range(K):\r\n print(0)\r\nlst = []\r\ntmp = input()\r\ntmp = tmp.split()\r\nfor elem in tmp:\r\n elem = int(elem)\r\n x = searchInsert(clct, elem)\r\n print(x)\r\n\r\n","repo_name":"DrozdovVladimir1/yandex_algo","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1030917780","text":"import collections\nimport sys\nsys.setrecursionlimit(10000)\n\nN, M = map(int, sys.stdin.readline().split())\ngraph = collections.defaultdict(list)\ndiscovered=[]\nfor _ in range(M):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n graph[b].append(a)\n \ndef stack_dfs(v):\n stack = [v]\n while stack:\n w = stack.pop()\n if not w in discovered:\n discovered.append(w)\n for x in graph[w]:\n stack.append(x)\n\ncnt = 0\nfor i in range(1, N+1):\n if not i in discovered:\n stack_dfs(i)\n cnt += 1\nprint(cnt)\n","repo_name":"sawol/algorithm","sub_path":"Baekjoon/연결 요소의 개수.py","file_name":"연결 요소의 개수.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14250769452","text":"import numpy as np\nfrom itertools import combinations\n\n\ndef slope(p1, p2):\n slope_x, slope_y = p1 - p2\n if slope_x == 0.:\n return float('inf')\n else:\n return slope_y / slope_x\n\n\ndef convex_hull(pts):\n \"\"\"Graham scan\"\"\"\n points = pts.copy()\n h = []\n\n # Sort points by smallest value of x\n points = points[np.lexsort(points.T[::-1])] # curve_points[np.lexsort((curve_points[:,1], curve_points[:,0]))]\n\n # The first point definitely belongs to the convex hull - take it out\n h.append(points[0])\n points = np.delete(points, 0, axis=0)\n\n # Sort remaining points in counterclockwise order by their slope (note: will be shown as clockwise in matplotlib bc we flipped the y axis)\n points = np.array(sorted(points, key=lambda pt: (slope(pt, h[0]), -pt[1], pt[0])))\n\n for p in points:\n h.append(p)\n # cross product of three points = direction of rotation\n # cross > 0: rotating left = the corner (2nd pt) in polygon formed by last three points is convex and lies on the outside\n # cross < 0: rotating right => middle point lies within the hull\n # cross == 0: three points are collinear\n while len(h) > 2 and np.cross(h[-2] - h[-3], h[-1] - h[-3]) <= 0:\n h.pop(-2)\n\n h.append(h[0]) # add first point to close the polygon\n return np.array(h)\n\n\ndef polygon_area(contour):\n \"\"\" Area of the polygon formed by given vertices (shoelace formula) \"\"\"\n n = len(contour) # of corners\n area = 0.0\n for i in range(n):\n j = (i + 1) % n\n area += contour[i][0] * contour[j][1]\n area -= contour[j][0] * contour[i][1]\n area = abs(area) / 2.0\n return area\n\n\ndef euclidean(p1, p2):\n # Euclidean distance is the L2 norm\n return np.linalg.norm(p2 - p1)\n\n\ndef euclidean_all(data_points):\n # compute the deltas from vectorized points\n d = np.diff(data_points, axis=0)\n # np.hypot to compute the lengths\n return np.hypot(d[:, 0], d[:, 1])\n\n\ndef get_perimeter(data_points):\n # contour perimeter, or arc length\n return np.sum(euclidean_all(data_points))\n\n\ndef get_curvature(points): # p1,p2,p3):\n # Calculating length of all three sides\n sides = [euclidean(*p_) for p_ in combinations(points, 2)]\n area = polygon_area(points)\n\n curvature = (4 * area) / np.prod(sides)\n\n return curvature, area\n\n\ndef perpendicular_distance(pt, start, end):\n \"\"\" Calculate perpendicular distance between vectors -> vector rejection of a on b \"\"\"\n b = end - start # vector AB = b - a\n a = end - pt # vector AC = c - a # a = end - pt\n\n # distance can be expressed via cross-product ||a x b|| = ||a|| ||b|| ||sin(theta)||\n # easier than first calculating delta y or delta x since we don't need to make theta-based distinction of axes etc etc\n num = np.linalg.norm(np.cross(b, a)) # cross-product of vectors = magnitude\n den = np.linalg.norm(b)\n return num / den\n","repo_name":"mshishki/amaglyph","sub_path":"geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28562282388","text":"import sys\n\ndef distance(p1,p2):\n d = (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2\n return d\n\ndef dnq(start,end):\n length = end - start\n if length == 2:\n return distance(points[start],points[start+1])\n if length == 3:\n return min(distance(points[start],points[start+1]),distance(points[start],points[start+2]),distance(points[start+2],points[start+1]))\n else:\n mid = (start + end)//2\n dis = min(dnq(start,mid),dnq(mid,end))\n candi = []\n midx = points[mid][0]\n \n for i in range(start,end):\n if (points[i][0] - midx)**2 <= dis:\n candi.append(points[i])\n c_len = len(candi)\n if c_len >= 2:\n candi.sort(key = lambda x : x[1])\n for i in range(c_len -1):\n for j in range(i+1,c_len):\n if (candi[i][1] - candi[j][1])**2 > dis:\n break\n elif candi[i][0] < midx and candi[j][0] < midx:\n continue\n elif candi[i][0] >= midx and candi[j][0] >= midx:\n continue\n dis = min(dis,distance(candi[i],candi[j]))\n \n return dis\n \nn = int(input())\n\npoints = []\n\nfor i in range(n):\n \n x,y = map(int,input().split())\n \n points.append((x,y))\n\npoints = list(set(points)) \npoints.sort()\n\n\nif n != len(points):\n print(0)\nelse:\n print(dnq(0,n))","repo_name":"kjh000/Algorithm","sub_path":"algo/가장 가까운 두점(분할정복).py","file_name":"가장 가까운 두점(분할정복).py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32403220319","text":"# name of the model to train save and test\nmodel_savename = \"t5-base-frame-finetuned\"\n\n# name of the model to start training from. You can use local names to continue training from previously\n# saved checkpoints or choose 't5-base'/'t5-small' to start from pretrained t5.\npretrained_name = \"t5-base\"\n\n# hyperparameters\nepochs = 5\nbatchsize = 6\nlearning_rate=1e-4\ntrain_min_class_count=5\ntest_split=0.2\nval_split=0.2\n\n# default value of tries for the test.py script\nno_tries = 3\n\n# default location of dataset\ndata_csv_file = \"data/Webis-argument-framing.csv\"\ndata_path = 'data'\n\n# location of saved test set\ntestset_name = 'args_test.pkl'\n\n# default location of trained models\nmodels_path = 'trained_models'\n","repo_name":"Oliver-Tautz/T5-frame-identification","sub_path":"defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36444625128","text":"from django.urls import path\nfrom .views import BlogList, BlogCreate, BlogDelete, BlogDetail, BlogUpdate, BlogMyList\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\napp_name = \"blog\"\nurlpatterns = [\n path(\"create/\", BlogCreate.as_view(), name='create'),\n path(\"delete//\", BlogDelete.as_view(), name='delete'),\n path(\"update//\", BlogUpdate.as_view(), name='update'),\n path(\"detail//\", BlogDetail.as_view(), name='detail'),\n path(\"\", BlogList.as_view(), name='index'),\n path(\"mylist/\", BlogMyList.as_view(), name='mylist')\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n# 미디어 파일 제공하는 URL 패턴 수정 static 함수가 첫 번째 인자로 setting.py 에 설정된 MEDIA_URL 을 가져오고,\n# 키워드 인자로 미디어 파일이 위치한 경로를 전달함. - 이미지가 오류로 보이는 것을 막아 줌.\n","repo_name":"zerochobo/salstagram","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74145785865","text":"import serial\nimport time\nfrom datetime import datetime\n\ns = serial.Serial( \n port='/dev/ttyUSB0',\n baudrate = 2400,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1)\n\npost=False\nprint (s.name )\n\n#0xEE 0x00 0x00 0x10 0x03 0xFD\ns.flushInput ()\ns.flushOutput ()\ns.write (b\"\\x02\")\nnow=datetime.now()\nprint(now.strftime(\"%d %H:%M:%S.%f\") + \" --> STX\")\n\nmessage=[]\n\ntry:\n while True:\n time.sleep(0.1)\n time.sleep(0.1)\n time.sleep(0.1)\n time.sleep(0.1)\n if s.inWaiting ():\n now=datetime.now()\n #print (now.strftime(\"%d %H:%M:%S.%f\") + \" <-X- %3.2X\"% s.inWaiting (),)\n while s.in_waiting:\n char= s.read ()\n message.append(\"%2.2X\"% ord(char))\n print (now.strftime(\"%d %H:%M:%S.%f\") + \" <-- Mess \",message)\n \n if len (char):\n #print (now.strftime(\"%d %H:%M:%S.%f\") + \" <--- %2.2X\"% ord (char),)\n if ord (char)== 0x10:\n #s.write (b\"\\xEE\\x00\\x00\\x10\\x03\\xFD\")\n post=True\n elif ord (char)== 0x02:\n if post:\n \n #s.write (b\"\\x10\")\n #print (now.strftime(\"%d %H:%M:%S.%f\") + \" --> DLE\")\n a=1\n elif not post:\n \n #s.write (b\"\\x02\")\n #print (now.strftime(\"%d %H:%M:%S.%f\") + \" --> STX\")\n a=2\n if len(message) == 1:\n if message[0] == '02':\n s.write (b\"\\x10\")\n print (now.strftime(\"%d %H:%M:%S.%f\") + \" --> DLE2\")\n if len(message) >=2:\n s.write (b\"\\x10\")\n now=datetime.now()\n print (now.strftime(\"%d %H:%M:%S.%f\") + \" --> DLE3\")\n message.clear()\n\nexcept KeyboardInterrupt as e:\n logging.info(\"Stopping...\")\n","repo_name":"KMatuszewski/myHome","sub_path":"myBuderus/s.Logamatic2107.1.py","file_name":"s.Logamatic2107.1.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13793757190","text":"import cv2\nimport mediapipe as mp\nimport time \n\ncapture = cv2.VideoCapture(0, )\n\nmpHands = mp.solutions.hands\nhands = mpHands.Hands()\n\nmpDraw = mp.solutions.drawing_utils\n\npTime = 0\ncTime = 0\n\nwhile True:\n success, img = capture.read()\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n results = hands.process(imgRGB)\n #print(results.multi_hand_landmarks)\n\n h, w, c = img.shape\n\n if results.multi_hand_landmarks:\n for handLandMrks in results.multi_hand_landmarks:\n for id, landm in enumerate(handLandMrks.landmark):\n cx, cy = int(landm.x*w), int(landm.y*h)\n print(id, cx, cy)\n if id == 4:\n cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)\n\n mpDraw.draw_landmarks(img, handLandMrks, mpHands.HAND_CONNECTIONS)\n\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n\n cv2.putText(img, str(int(fps)), (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 4)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\n","repo_name":"pavelkochkin1/hand-tracking","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31052221618","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nReadme!\n\nRun from terminal/command line by:\n cd to inside the directory where this file is located\n python -i CreateReport.py\n >>>CreateReport(logpath,daterange)\nlogpath should be relative (if the log is called \"log.txt\" and is in folder B \nwhich is in Folder A where this file is, then the path should be 'B/log.txt')\n\nTested on the supplied logs (examplelog 1-3) in Ubuntu 16.04.\n\n\"\"\"\nimport pandas as pd\nfrom datetime import datetime as dt\n\n#Helper function to convert string to datetime object and then to unix time \ndef ToDT(datetimestring):\n if(len(datetimestring) == 22):\n return dt.strptime(datetimestring, '%Y-%m-%d %H:%M:%S%Z').timestamp()\n else:\n return dt.strptime(datetimestring, '%Y-%m-%d %H:%M:%S').timestamp()\n\n\n#Report creation function\ndef CreateReport(logpath, daterange):\n log = pd.read_csv(logpath)\n\n reportdf = pd.DataFrame(columns=['url','page views','visitors']) #The actual report\n if len(log) == 0 or daterange is None:\n return reportdf #Nothing to show, return empty report with the column titles only\n else:\n reqtimestart = ToDT(daterange[:19])\n reqtimeend = ToDT(daterange[:11]+daterange[22:])\n \n #Dropping the rows with timestamps outside the requested date-range \n for l in log['timestamp']:\n if ToDT(l) < reqtimestart or ToDT(l) > reqtimeend:\n idx = log[log['timestamp']==l].index\n log.drop(idx, inplace=True) \n \n urlarr = list(set(log['url'])) #set of unique url values from the log\n\n reportdf = pd.DataFrame(columns=['url','page views','visitors']) #The actual report\n reportdf['url'] = urlarr #urls column\n reportdf.fillna(0)\n\n #Finding number of page views and adding to the report\n for r in reportdf['url']:\n idx = reportdf[reportdf['url'] == r].index\n reportdf.loc[idx,'page views'] = log.url.value_counts()[r]\n \n \n #Finding number of unique visitors for each page and adding to the report\n for r in reportdf['url']:\n uni = log.loc[log['url']==r].userid.nunique() #Number of unique visitors for the rows containing a particular url\n idx = reportdf[reportdf['url'] == r].index\n reportdf.loc[idx,'visitors'] = uni\n \n\n return reportdf\n\n","repo_name":"Nuculais/tracking-pixel-report-system","sub_path":"CreateReport.py","file_name":"CreateReport.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26446183129","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Curso de visualizacion de Datos \n\n# **Estudiante : Richard Douglas Grijalba**\n# \n# **Modalidad : virtual**\n\n# ## Este coresponde a un curso de la especialidad de Ciencia Datos con python. Grow up\n\n# In[1]:\n\n\n1+1\n\n\n# In[2]:\n\n\n2*6\n\n\n# In[3]:\n\n\n5/8\n\n\n# In[4]:\n\n\nimport os\n\n\n# In[5]:\n\n\nos.getcwd()\n\n\n# In[6]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime as dt\nimport matplotlib.ticker as ticker\n\npath = r'C:\\Users\\Rdouglas\\Documents\\Python Scripts\\curso Grow Up\\Visualizacion Datos'\nlist_orders = pd.read_csv(path+'\\List of Orders.csv', sep=';',header=0, index_col=False,\n encoding='latin-1', engine = 'python')\norders_details = pd.read_csv(path+'\\Order Details.csv', sep=',', header=0, index_col=False,\n encoding='latin-1',engine='python')\n\n\n# In[7]:\n\n\nlist_orders.head()\n\n\n# In[8]:\n\n\norders_details.head()\n\n\n# ## Transformacion de Datos\n\n# In[9]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime as dt\n\n\n# In[10]:\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[11]:\n\n\nlist_orders['Order Date'] = pd.to_datetime(list_orders['Order Date']) # para cambiar a formato de fecha\n\n\n# In[12]:\n\n\n# unir las bases\ndf = pd.merge(orders_details, list_orders,\n left_on= 'Order ID',\n right_on= 'Order ID')\n\n\n# In[13]:\n\n\n# Creamos las columas de Mes y Año\ndf['Mes'] = df['Order Date'].dt.month\ndf['Anio'] = df['Order Date'].dt.year\ndf.head()\n\n\n# In[14]:\n\n\n# **Crear una agrupacion**\ndf_mes = pd.DataFrame(df.groupby(['Mes'])[['Amount','Quantity']].sum()).reset_index()\n\n\n# In[15]:\n\n\nprint(df_mes)\n\n\n# ## 2.4 Crear una carcaza en donde se ubicaran los gráficos\n\n# In[16]:\n\n\nfig, ax = plt.subplots()\nplt.show;\n\n\n# In[17]:\n\n\n# Este corresponde a la creación del primer gráfico\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'])\nplt.show()\n\n\n# **Probar tipos de markers**\n\n# In[18]:\n\n\n#Crear un graico de lineas \n# se va a trabajar los marcadores 'o', 'v', tipo de linea '--', 'None', color = 'r'\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='o')\nplt.show()\n\n\n# In[19]:\n\n\n#Crear un graico de lineas \n# se va a trabajar los marcadores 'o', 'v', tipo de linea '--', 'None', color = 'r'\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='v')\nplt.show()\n\n\n# In[20]:\n\n\n#Crear un graico de lineas \n# se va a trabajar los marcadores 'o', 'v', tipo de linea '--', 'None', color = 'r'\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='.')\nplt.show()\n\n\n# In[21]:\n\n\n#Crear un graico de lineas \n# se va a trabajar los marcadores 'o', 'v', tipo de linea '--', 'None', color = 'r'\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='x')\nplt.show()\n\n\n# In[22]:\n\n\n#Crear un graico de lineas \n# se va a trabajar los marcadores 'o', 'v', tipo de linea '--', 'None', color = 'r'\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h')\nplt.show()\n\n\n# **Probar tipos de lineas**\n\n# In[23]:\n\n\n# probar varios tipos de lineas\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = '--')\nplt.show()\n\n\n# In[24]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = '-.')\nplt.show()\n\n\n# In[25]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':')\nplt.show()\n\n\n# In[26]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':', color = 'r')\nplt.show()\n\n\n# In[27]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':', color = 'b')\nplt.show()\n\n\n# In[28]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':', color = 'g')\nplt.show()\n\n\n# In[29]:\n\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':', color = 'g')\nplt.show()\n\n\n# ### Personalizar el grafico de lineas\n\n# In[30]:\n\n\n# agregando un eje secundario\n\nfig, ax = plt.subplots()\nax.plot(df_mes['Mes'],df_mes['Amount'],\n marker='h', linestyle = ':', color = 'b')\nax.plot(df_mes['Mes'],df_mes['Quantity'],\n marker='v', color= 'g')\nax.set_xlabel ('Mes') # nmbre del eje x\nax.set_ylabel ('Venta vs cantidad') # nombre del eje y\nax.set_title('Analisis de Venta y Cantidad por Mes') # agrega el titulo\nplt.show()\n\n\n# ### matplotlib.colors\n# b: blue\n# \n# g: green\n# \n# r: red\n# #\n# c: cyan\n# \n# m: magenta\n# \n# y: yellow\n# \n# k: black\n# \n# w: white # https://matplotlib.org/2.0.1/api/colors_api.html\n\n# ## Varios graficos en una Figura\n\n# In[31]:\n\n\n# se desea agregar diferentes graficos en un espacio\nfig, ax = plt.subplots(2,1)\nplt.show;\n\n\n# In[32]:\n\n\nfig, ax = plt.subplots(2,1, sharex = True)\n\n#sharex o sharey = True esto indica que los eje son iguales o compartidos\n\nax[0].plot(df_mes['Mes'], df_mes['Amount'],\n marker= '.', linestyle = '-.', color ='b')\n\nax[1].plot(df_mes['Mes'], df_mes['Quantity'],\n marker='v', linestyle= None, color = 'g')\n\nax[0].set_ylabel('Sales')\nax[1].set_ylabel('Quantity')\nax[1].set_xlabel('Mes')\nplt.show;\n\n\n# ## 3.5 graficos de dos eje y diferentes escalas\n\n# In[33]:\n\n\n# los parametros correspondientes a cada uno de los ejes\n# invertir ejes , cambiar escalas, cambiar valor de inicio de la escala\n# limites de la escala\n\nfig, ax = plt.subplots()\n\nax.plot(df_mes['Mes'], df_mes['Amount'], color = 'blue')\nax.set_xlabel('Mes')\nax.set_ylabel('Ventas', color= 'blue')\nax.tick_params('y', colors = 'blue')\nax2 = ax.twinx()\nax2.plot(df_mes['Mes'], df_mes['Quantity'], color = 'magenta')\nax2.set_ylabel('Cantidad Unidades', color= 'magenta')\nax2.tick_params('y', colors = 'magenta')\nplt.show()\n\n\n# In[34]:\n\n\n# invertir los ejes y modificar la escala \n\nfig, ax = plt.subplots()\n\nax.plot(df_mes['Mes'], df_mes['Amount'], color = 'blue')\nax.set_xlabel('Mes')\nax.set_ylabel('Ventas', color= 'blue')\nax.tick_params('y', colors = 'blue')\nax2 = ax.twinx()\nax2.plot(df_mes['Mes'], df_mes['Quantity'], color = 'magenta')\nax2.set_ylabel('Cantidad Unidades', color= 'magenta')\nax2.tick_params('y', colors = 'magenta')\nax.invert_yaxis()\nax.set_ylim(0,70000) # parametro d eescala\nax2.set_ylim(0,750)\nplt.show()\n\n\n# ## 4.1 Grafico Barras Horizontales\n\n# In[35]:\n\n\n# Agrupamos categora - ventas - cantidad\n\ndf_categoria = pd.DataFrame(df.groupby(['Category'])[['Amount','Quantity']].sum())\ndf_categoria.head()\n\n\n# In[36]:\n\n\n# Graficos de barras horizontales (barras)\n\nfig, ax = plt.subplots()\n\nax.barh(df_categoria.index, df_categoria['Quantity'])\nplt.show()\n\n\n# ## Grafico Barras Verticales\n\n# In[37]:\n\n\nfig, ax = plt.subplots()\n\nax.bar(df_categoria.index, df_categoria['Amount'], label = 'Sales')\nax.legend()\nax.set_ylabel('Ventas por Categoria')\nplt.show()\n\n\n# ## Grafico de Barras Apiladas\n\n# In[38]:\n\n\nfig, ax = plt.subplots()\nax.bar(df_categoria.index, df_categoria['Amount'], label = 'Sales')\nax.bar(df_categoria.index, df_categoria['Quantity'],\n bottom= df_categoria['Amount'], label = 'Quantity')\nax.legend()\nax.set_ylabel('Ventas por Categoria')\nplt.show()\n\n\n# In[39]:\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[40]:\n\n\nfig, ax = plt.subplots()\nax.bar(df_categoria.index, df_categoria['Amount'], label = 'Sales')\nax.bar(df_categoria.index, df_categoria['Quantity'],\n bottom= df_categoria['Amount'], label = 'Quantity')\nax.legend()\nax.set_xticklabels(df_categoria.index, rotation = 45)\nax.set_ylabel('Ventas por Categoria')\nfig.savefig('grafico_bar1.png', dpi = 200)\nplt.show()\n\n\n# ## Graficos de Barras con Anotaciones\n\n# In[41]:\n\n\n# grafico de barras con anotaciones\n\nwidth = 0.35 # el ancho de las barras\nfig, ax = plt.subplots()\n\nx=np.arange(len(df_categoria.index))\n\nrects1= ax.bar(df_categoria.index,df_categoria['Amount'], width, label = 'Sales')\nrects2= ax.bar(df_categoria.index,df_categoria['Quantity'], width, label = 'Quantity')\nax.legend()\nax.set_ylabel('Ventas')\nax.set_xlabel('Categorias')\nax.set_title('Grafico Apilado de Ventas y Unidades')\nax.set_xticklabels(df_categoria.index, rotation = 45)\nax.set_xticks(x)\n\ndef autolabel(rects):\n \"\"\"Funcion para agregar una etiqueta con el valor de cada barra \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy= (rect.get_x() + rect.get_width()/2, height),\n xytext = (0,5), \n textcoords= 'offset points',\n ha = 'center', va = 'bottom')\n\n#añadir las etiquetas para cada barra\nautolabel(rects1)\nautolabel(rects2)\nfig.tight_layout()\nplt.show()\n\n\n# In[42]:\n\n\n# darle un color distinto a cada barra segun la categoria \nfig, ax = plt.subplots()\n\nax.bar(df_categoria.index, df_categoria['Amount'], label = 'Sales',color=['blue','red','green'])\nax.set_ylabel('Ventas por Categoria')\n\nplt.show()\n\n\n# ## Grafico de dispersion\n\n# In[43]:\n\n\ndf.head()\n\n\n# In[44]:\n\n\n# segmentar el dataframe en 2 años\ndf_fecha = pd.DataFrame(df.groupby(['Order Date','Anio'])[['Amount','Quantity']].sum()).reset_index()\n\nper_19= df_fecha[df_fecha['Anio']==2019]\nper_18= df_fecha[df_fecha['Anio']==2018]\ndf_fecha.head()\n\n\n# In[45]:\n\n\n# grafio de dispersion\n\nfig, ax = plt.subplots()\n\nax.scatter(per_18['Quantity'],per_18['Amount'],\n color= 'red', label='2018')\nax.scatter(per_19['Quantity'],per_19['Amount'],\n color= 'blue', label='2019')\nplt.show()\n\n\n# ## Crear Anotaciones en el Grafico de Dispersion\n\n# In[46]:\n\n\n# grafio de dispersion\n\nfig, ax = plt.subplots()\n\nax.scatter(per_18['Quantity'],per_18['Amount'],\n color= 'red', label='2018')\nax.scatter(per_19['Quantity'],per_19['Amount'],\n color= 'blue', label='2019')\nax.annotate('Venta Max', xy = (130,14000), xycoords = 'data',\n xytext = (0.8,0.95), textcoords = 'axes fraction',\n arrowprops= dict(facecolor ='black', shrink = 0.05),\n horizontalalignment= 'right', verticalalignment = 'top') # esto modifica la ubicacion de la flecha\nax.set_xlabel('Cantidad')\nax.set_ylabel('Ventas')\nax.legend() # esto agrega la leyenda de los años \nplt.show()\n\n\n# In[47]:\n\n\n# grafio de dispersion\n\n# cambiar el fondo del grafico\n\nplt.style.use('default')\nplt.style.use('ggplot') # esto agrega el estilo de grafico fondo tipo ggplot\nfig, ax = plt.subplots()\n\nax.scatter(per_18['Quantity'],per_18['Amount'],\n color= 'red', label='2018')\nax.scatter(per_19['Quantity'],per_19['Amount'],\n color= 'blue', label='2019')\nax.annotate('Venta Max', xy = (130,14000), xycoords = 'data',\n xytext = (0.8,0.95), textcoords = 'axes fraction',\n arrowprops= dict(facecolor ='black', shrink = 0.05),\n horizontalalignment= 'right', verticalalignment = 'top')\nax.set_xlabel('Cantidad')\nax.set_ylabel('Ventas')\nax.legend()\nplt.show()\n\n\n# ## Graficos Estilo Boxplot\n\n# In[48]:\n\n\nplt.style.use('default')\n\nfig, ax = plt.subplots()\n\nax.boxplot(per_19['Quantity'])\nplt.show()\n\n\n# ## Introduccion a Seaborn\n\n# **Iniciar** con la importacion de un dataset corresponde a estadisticas de los pokemon\n\n# In[49]:\n\n\nimport os\nos.getcwd()\n\n\n# In[50]:\n\n\n\npath = r'C:\\Users\\Rdouglas\\Documents\\Python Scripts\\curso Grow Up\\Visualizacion Datos\\Video_Games.csv'\nvideo_games= pd.read_csv(path,sep=';',header=0, index_col=False, encoding='latin-1',\n engine ='python')\n\npath1 = r'C:\\Users\\Rdouglas\\Documents\\Python Scripts\\curso Grow Up\\Visualizacion Datos\\Pokemon.csv'\npokemon= pd.read_csv(path1, sep=';',header=0, index_col=False, encoding='latin-1', engine = 'python')\n\n\n# In[51]:\n\n\n# eplorar el dataset\n#video games\nvideo_games.head()\n\n\n# In[52]:\n\n\nvideo_games.info()\n\n\n# In[53]:\n\n\nvideo_games.describe()\n\n\n# In[54]:\n\n\n# Pokemon\npokemon.head()\n\n\n# In[55]:\n\n\npokemon.info()\n\n\n# In[56]:\n\n\npokemon.describe()\n\n\n# ### Los histogramas\n# Los histogramas muestran la forma de sus datos. El eje horizontal muestra sus valores de datos, \n# con cada barra correspondiendo a un rango de valores. El eje vertical muestra cuántos puntos de\n# datos tienen valores en el rango de cada barra\n\n# **Una equeña guía de como interpretar los histogramas**\n# \n# Paso 1: Evaluar las características clave : Identifique los picos, que son los conglomerados más altos de las barras. Los picos representan los valores más comunes. Evalúe la dispersión de su muestra para entender qué tanto varían sus datos.\n# \n# Paso 2: Buscar indicadores de datos inusuales o no normales : Los datos asimétricos y los datos multimodales indican que los datos podrían ser no normales. Los valores atípicos pueden indicar otras condiciones en sus datos.\n# \n# Paso 3: Evaluar el ajuste de una distribución: Si su histograma tiene una línea de distribución ajustada, evalúe que tan cerca siguen las alturas de las barras la forma de la línea. Si las barras siguen de cerca la línea de distribución ajustada, entonces los datos se ajustan adecuadamente a la distribución.\n# \n# Paso 4: Evaluar y comparar los grupos: Si su histograma tiene grupos, evalúe y compare el centro y la dispersión de los grupos.\n# \n# https://support.minitab.com/es-mx/minitab/19/help-and-how-to/graphs/histogram/interpret-the-results/key-results/\n\n# ![image.png](attachment:image.png)\n\n# In[57]:\n\n\n# Ver distribuciones , primero crear un histograma \nsns.histplot(video_games['Critic_Score'], kde= False, bins= 15, fill= False, element = 'step')\nplt.show()\n\n#fill= False quita el color o relleno de las barras\n# element = 'step' quita las lineas divisorias de las barras\n\n\n# In[58]:\n\n\nsns.histplot(video_games['Critic_Score'], kde= True) # con la opcion kde= true permite ver la linea\nplt.show()\n\n\n# In[59]:\n\n\nsns.histplot(pokemon['Attack'], kde= True, color = 'r') # con la opcion kde= true permite ver la linea\nplt.show()\n\n\n# In[60]:\n\n\nsns.histplot(pokemon['Speed'], kde= True, color = 'gold') # con la opcion kde= true permite ver la linea\nplt.show()\n\n\n# ### Graficos de Densidad\n\n# Un gráfico de densidad visualiza la distribución de datos en un intervalo continuo.\n# todo aquello que se dibuje debajo de los curva es el 100% de los datos\n\n# In[61]:\n\n\nfig, ax = plt.subplots()\nsns.distplot(video_games['Critic_Score'])\nax.set(xlabel= 'Puntaje Critico', \n xlim=(20,100),\n title= 'Video Juegos')\nax.set(ylabel= 'Densidad')\nplt.show()\n\n# esta version de grafico será remobida en este paquete y se tendrá que generar de forma diferente\n\n\n# In[62]:\n\n\n# para crear el grafico con paquetes recientes\n# visualizar la frecuencia de la informacion \n\nsns.displot(data= video_games['Critic_Score'], kind= 'kde', fill = True)\nplt.show()\n\n\n# In[63]:\n\n\nsns.displot(data= video_games['Critic_Score'], kind= 'kde', fill = True, color= 'dimgray')\nplt.show()\n\n\n# In[64]:\n\n\nsns.displot(data= video_games['Critic_Score'], kind= 'kde', fill = True, color= 'springgreen')\nplt.show()\n\n\n# ### Grafico de Enjambre\n\n# In[65]:\n\n\npokemon.head()\n\n\n# In[66]:\n\n\n#Un gráfico de enjambre es un tipo de gráfico de dispersión que se utiliza para representar valores categóricos\n# permite ver la distribucion de los datos\n\nsns.swarmplot(data = pokemon, \n x= 'Type 1',\n y='Total')\nplt.show()\n\n\n# In[67]:\n\n\nsns.swarmplot(data = pokemon, \n x= 'Type 1',\n y='Total',\n s= 3) # agregando el criterio s=3 esto baja la cantidad de los puntos por defaul genera en 5\nplt.show()\n\n\n# ### Grafico de Banda\n\n# In[68]:\n\n\n# este grafico muestra la distribucion de los datos y pemrite ver los datos\n# atipicos\n\nsns.stripplot(data = pokemon,\n x= 'Type 1',\n y= 'Total',\n jitter = True)\nplt.show()\n\n\n# ### Crear Boxplots con SEABORN\n\n# Para ver las paletas predefinidas \n# https://matplotlib.org/stable/gallery/color/named_colors.html\n\n# In[69]:\n\n\nsns.boxenplot(data = pokemon,\n x= 'Type 1',\n y= 'Total',\n palette= 'Paired')\nplt.show()\n\n\n# In[70]:\n\n\nsns.boxenplot(data = pokemon,\n x= 'Type 1',\n y= 'Total',\n palette= 'Accent') # aplicar una variacion a la paleta\nplt.show()\n\n\n# In[71]:\n\n\nsns.boxenplot(data = pokemon,\n x= 'Type 1',\n y= 'Total',\n palette= 'tab10') # aplicar una variacion a la paleta\nplt.show()\n\n\n# In[72]:\n\n\n# boxplot\n\nsns.boxplot(data= pokemon,\n x='Type 1',\n y='Total')\n\nplt.show()\nplt.clf() # esto permite observar la calidad de la imagen del grafico\n\n\n# ### Grafico de Violin\n\n# In[73]:\n\n\nsns.violinplot(data= pokemon,\n x='Type 1',\n y='Total')\nplt.show()\n\n\n# In[74]:\n\n\nsns.violinplot(data= pokemon,\n x='Type 1',\n y='Total',\n bw = 0.25) # este criterio altera los anchos de los violines\nplt.show()\n\n\n# ## 6.1. Graficos Dinamicos\n\n# In[75]:\n\n\n## verificar la instalacion de los paquetes necesarios\n## pip install bar_chart_race\n\nimport bar_chart_race as bcr\nfrom IPython.display import HTML\npath = r'C:\\Users\\Rdouglas\\Documents\\Python Scripts\\curso Grow Up\\Visualizacion Datos'\nunits_sales = pd.read_csv(path+'\\Dinamico.csv', sep=';',header=0, index_col=False,\n encoding='latin-1', engine = 'python')\n\nunits_sales.set_index('Order Date', inplace=True) # definimos Order Date como indice\nunits_sales_acum = units_sales.cumsum(axis=0) # se acumulan las ventas por region\nunits_sales.head()\n\n\n# In[76]:\n\n\nunits_sales_acum\n\n\n# In[77]:\n\n\n# Grafico Dinamico\nbcr.bar_chart_race(df= units_sales_acum, filename = None,\n figsize = (3.5,3), title = 'Venta Acumulada Enero 2014')\n\n\n# ## Informacion Util \n\n# ![image.png](attachment:image.png)\n\n# Para ver las paletas predefinidas https://matplotlib.org/stable/gallery/color/named_colors.html\n\n# ![image.png](attachment:image.png)\n\n# ![image.png](attachment:image.png)\n","repo_name":"dougnow/-Portfolio","sub_path":"Python/Curso-Visualizacion Datos Python/Introduccion Visualizacion de Datos.py","file_name":"Introduccion Visualizacion de Datos.py","file_ext":"py","file_size_in_byte":17896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39448045096","text":"#!/bin/env python\nfrom ROOT import *\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\n\nfrom plots.common.utils import *\nimport sys\nfrom array import array\nfrom copy import copy\n\nimport argparse\nparser = argparse.ArgumentParser(\n description='Add the MVA friend tree with BDT values'\n)\nparser.add_argument(\n '-d',\n '--debug', required=False, default=False, action='store_true', dest='debug',\n help='Enable debug printout'\n)\nparser.add_argument(\n \"-f\",\n \"--file\", type=str, required=True, dest=\"fname\", default=None,\n help=\"the root file to use\"\n)\nparser.add_argument(\n \"-c\",\n \"--channel\", type=str, required=False, choices=[\"mu\", \"ele\"], dest=\"channel\", default=None,\n help=\"the lepton channel to use\"\n)\nargs = parser.parse_args()\nif not args.channel:\n if \"/mu/\" in args.fname:\n args.channel = \"mu\"\n elif \"/ele/\" in args.fname:\n args.channel = \"ele\"\n else:\n raise ValueError(\"channel not specified: %\", args.fname)\n\nlogger = logging.getLogger('addMVAasFriend.py')\n\nif args.debug:\n logger.setLevel(logging.DEBUG)\n\nproc = args.channel\nfname = args.fname\n\nfrom sampleList import varRank\n\n# Create reader and relate variables\nreader = {}\nvarlist = varRank[proc]\nvars={}\nmvalist=[]\nvlist=[]\nvlistMva={}\nbase='BDT_with'\nfor v in varlist:\n vars[v] = array('f',[0])\n base+='_%s' % v\n mvalist+=[base]\n vlist+=[v]\n vlistMva[base]=copy(vlist)\n\n# Book the MVA's\nmva={}\nfor m in mvalist:\n reader[m] = TMVA.Reader()\n mva[m] = array('f',[0])\n for v in vlistMva[m]:\n reader[m].AddVariable(v,vars[v])\n reader[m].BookMVA(m,\"weights/stop_\"+proc+\"_\"+m+\".weights.xml\")\n\n# Run over files and add all the MVA's to the trees\nlogger.info(\"Starting: %s\" % fname)\ntf=TFile(fname,'UPDATE')\nt=tf.Get(\"trees/Events\")\ntf.cd('trees')\nmt=TTree(\"MVA\",\"MVA\")\nt.SetBranchStatus(\"*\",0)\nbranch={}\nfor v in varlist:\n t.SetBranchStatus(v,1)\n t.SetBranchAddress(v,vars[v])\nfor m in mvalist:\n branch[m]=mt.Branch('mva_'+m,mva[m],'mva_'+m+'/F')\nfor i in range(t.GetEntries()):\n t.GetEntry(i)\n for m in mvalist:\n calc = True\n for v in varlist:\n if not vars[v][0] == vars[v][0]:\n calc = False\n if calc:\n mva[m][0] = reader[m].EvaluateMVA(m)\n else:\n mva[m][0] = float('nan')\n logger.debug('i: %d, mva: %1.3f, vars: %s' % (i,mva[m][0],str(vars)))\n mt.Fill()\nmt.Write('',TObject.kOverwrite)\ntf.Close()\n\n","repo_name":"HEP-KBFI/stpol","sub_path":"mvatools/addMVAasFriend.py","file_name":"addMVAasFriend.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24903414157","text":"import traceback\r\nfrom rply import ParserGenerator\r\nfrom logic4py.formula import AtomFormula, PredicateFormula, NegationFormula, BinaryFormula, AndFormula, OrFormula, ImplicationFormula, BiImplicationFormula, QuantifierFormula, ExistentialFormula, UniversalFormula\r\nfrom logic4py.lexer import Lexer\r\n\r\n# PARSER of a Formula\r\nclass ParserFormula():\r\n def __init__(self, state):\r\n self.state = state\r\n self.pg = ParserGenerator(\r\n # A list of all token names accepted by the parser.\r\n ['COMMA', 'OPEN_PAREN', 'CLOSE_PAREN', 'NOT',\r\n 'AND', 'OR', 'BOTTOM','ATOM', 'IMPLIE', 'IFF',\r\n 'VAR','EXT','ALL' ],\r\n #The precedence $\\lnot,\\forall,\\exists,\\land,\\lor,\\rightarrow,\\leftrightarrow$\r\n precedence=[\r\n ('right', ['IFF']),\r\n ('right', ['IMPLIE']),\r\n ('right', ['OR']),\r\n ('right', ['AND']),\r\n ('right', ['EXT']),\r\n ('right', ['ALL']),\r\n ('right', ['NOT']),\r\n ]\r\n )\r\n\r\n def parse(self):\r\n @self.pg.production('program : formula')\r\n def program(p):\r\n rule_info = p[0]\r\n return p[0][1]\r\n\r\n @self.pg.production('formula : EXT formula')\r\n @self.pg.production('formula : ALL formula')\r\n @self.pg.production('formula : formula OR formula')\r\n @self.pg.production('formula : formula AND formula')\r\n @self.pg.production('formula : formula IMPLIE formula')\r\n @self.pg.production('formula : formula IFF formula')\r\n @self.pg.production('formula : NOT formula')\r\n @self.pg.production('formula : ATOM OPEN_PAREN variableslist CLOSE_PAREN')\r\n @self.pg.production('formula : ATOM')\r\n @self.pg.production('formula : BOTTOM')\r\n def formula(p):\r\n #print(p)\r\n if len(p) < 3:\r\n if p[0].gettokentype() == 'ATOM':\r\n return p[0], AtomFormula(key=p[0].value)\r\n elif p[0].gettokentype() == 'BOTTOM':\r\n return p[0], AtomFormula(key=p[0].value)\r\n elif p[0].gettokentype() == 'NOT':\r\n result = p[1]\r\n return p[0], NegationFormula(formula=result[1]) \r\n elif( not type(p[0]) is tuple):\r\n result1 = p[0]\r\n result2 = p[1]\r\n # Universal Formula\r\n if p[0].gettokentype() == 'EXT': \r\n var = p[0].value.split('E')[1]\r\n return p[0], ExistentialFormula(variable=var, formula=p[1][1])\r\n elif p[0].gettokentype() == 'ALL': \r\n var = p[0].value.split('A')[1]\r\n return p[0], UniversalFormula(variable=var, formula=p[1][1])\r\n elif len(p)==4:\r\n # Predicate Formula\r\n name = p[0]\r\n varlist = p[2]\r\n return p[0], PredicateFormula(name=p[0].value,variables=varlist[1]) \r\n elif len(p) == 3:\r\n # Binary Formula\r\n result1 = p[0]\r\n result2 = p[2]\r\n if(p[1].value=='&'):\r\n return result1[0], AndFormula(left=result1[1], right=result2[1])\r\n elif(p[1].value=='|'):\r\n return result1[0], OrFormula(left=result1[1], right=result2[1])\r\n elif(p[1].value=='->'):\r\n return result1[0], ImplicationFormula(left=result1[1], right=result2[1])\r\n elif(p[1].value=='<->'):\r\n return result1[0], BiImplicationFormula(left=result1[1], right=result2[1])\r\n else:\r\n return result1[0], BinaryFormula(key=p[1].value, left=result1[1], right=result2[1])\r\n\r\n @self.pg.production('formula : OPEN_PAREN formula CLOSE_PAREN')\r\n def paren_formula(p):\r\n result = p[1]\r\n return p[0], result[1]\r\n\r\n @self.pg.production('variableslist : VAR')\r\n @self.pg.production('variableslist : VAR COMMA variableslist')\r\n def variablesList(p):\r\n if len(p) == 1:\r\n return p[0], [p[0].value]\r\n else:\r\n result = p[2]\r\n return p[0], [p[0].value] + result[1]\r\n\r\n\r\n @self.pg.error\r\n def error_handle(token):\r\n productions = self.state.splitlines()\r\n error = '' \r\n\r\n if(productions == ['']):\r\n error = 'None formula was submitted.'\r\n if token.gettokentype() == '$end':\r\n error = 'None formula was submitted.'\r\n else:\r\n source_position = token.getsourcepos()\r\n error = 'The formula definition is not correct, check that all rules were applied correctly.\\nRemember that a formula is defined by the following BNF:\\nF :== P | ~ P | Q&A | P | Q | P -> Q | P <-> Q | (P), where P,Q are atoms.\\n'\r\n error += \"Sintax error:\\n\"\r\n error += productions[source_position.lineno - 1]\r\n string = '\\n'\r\n for i in range(source_position.colno -1):\r\n string += ' '\r\n string += '^'\r\n if token.gettokentype() == 'OUT':\r\n string += ' Symbol does not belong to the language.'\r\n error += string\r\n \r\n raise ValueError(\"@@\"+error)\r\n\r\n def get_error(self, type_error, token_error, rule):\r\n productions = self.state.splitlines()\r\n column_error = token_error.getsourcepos().colno\r\n erro = \"Syntax error in line {}:\\n\".format(token_error.getsourcepos().lineno)\r\n erro += productions[token_error.getsourcepos().lineno-1] + \"\\n\"\r\n for i in range(column_error-1):\r\n erro += ' '\r\n \r\n return erro\r\n \r\n def get_parser(self):\r\n return self.pg.build()\r\n @staticmethod\r\n def getFormula(input_text=''):\r\n lexer = Lexer().get_lexer()\r\n tokens = lexer.lex(input_text)\r\n\r\n pg = ParserFormula(state=input_text)\r\n pg.parse()\r\n parser = pg.get_parser()\r\n result = parser.parse(tokens)\r\n return result\r\n\r\n\r\ndef get_formula(input_formula=''):\r\n try:\r\n return ParserFormula.getFormula(input_formula)\r\n except ValueError:\r\n #s = traceback.format_exc()\r\n return None\r\n else:\r\n return None\r\n pass\r\n","repo_name":"daviromero/logic4py","sub_path":"src/logic4py/parser_formula.py","file_name":"parser_formula.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"34023014493","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\nclass SinglyLinkedListNode:\n def __init__(self, node_data):\n self.data = node_data\n self.next = None\n\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def insert_node(self, node_data):\n node = SinglyLinkedListNode(node_data)\n\n if not self.head:\n self.head = node\n else:\n self.tail.next = node\n\n self.tail = node\n\n\ndef print_singly_linked_list(node, sep, fptr):\n while node:\n fptr.write(str(node.data))\n\n node = node.next\n\n if node:\n fptr.write(sep)\n\n\n#\n# Complete the 'distinct' function below.\n#\n# The function is expected to return an INTEGER_SINGLY_LINKED_LIST.\n# The function accepts INTEGER_SINGLY_LINKED_LIST head as parameter.\n#\n\n#\n# For your reference:\n#\n# SinglyLinkedListNode:\n# int data\n# SinglyLinkedListNode next\n#\n#\n\ndef distinct(head):\n\t# Write your code here\n\tnode = head\n\tprevious = None\n\tvalue_tracker = {}\n\twhile node is not None:\n\t\tif node.data in value_tracker:\n\t\t\tnode = node.next\n\t\t\t# cast away the duplicate\n\t\t\tprevious.next = node\n\t\telse:\n\t\t\tvalue_tracker[node.data] = True\n\t\t\tprevious = node\n\t\t\tnode = node.next\n\treturn head\n\n\nif __name__ == '__main__':\n\tif os.environ.get('OUTPUT_PATH') is not None:\n\t\tfptr = open(os.environ['OUTPUT_PATH'], 'w')\n\telse:\n\t\tfptr = sys.stdout\n\n\thead_count = int(input().strip())\n\n\thead = SinglyLinkedList()\n\n\tfor _ in range(head_count):\n\t\thead_item = int(input().strip())\n\t\thead.insert_node(head_item)\n\n\tresult = distinct(head.head)\n\n\tprint_singly_linked_list(result, '\\n', fptr)\n\tfptr.write('\\n')\n\n\tfptr.close()\n","repo_name":"btruhand/prep-questions","sub_path":"hackerrank/kulani-1/linked-list-redundancy.py","file_name":"linked-list-redundancy.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27124853895","text":"from gin_rummy import GinRummy\nfrom player import Bot\n\nif __name__ == '__main__':\n # Create a Gin Rummy game instance\n game = GinRummy()\n\n # Create a bot player and add it to the game\n bot = Bot(game)\n game.set_bot(bot)\n\n # Start the game\n game.play()\n","repo_name":"rawbeen248/Gin-Rummy-AI-vs-Human","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73219831306","text":"'''\nLoad K-folding models from groups and evaluate performance\n\nauthor : obanmarcos\n'''\nimport os\nimport os, sys\nfrom config import * \n\nsys.path.append(where_am_i())\n\nimport pytorch_lightning as pl\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom utilities import dataloading_utilities as dlutils\nfrom utilities.folders import *\n\nfrom training import train_utilities as trutils\n\nfrom models.models_system import MoDLReconstructor\nimport torch\n\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms as T\nfrom pytorch_msssim import SSIM\n# from torchmetrics import StructuralSimilarityIndexMeasure as SSIM\nfrom torchmetrics import MultiScaleStructuralSimilarityIndexMeasure as MSSSIM\nimport wandb\nfrom pathlib import Path\nimport pandas as pd\n\ngroup_name = ''\n\nuse_default_model_dict = True\nuse_default_dataloader_dict = True\nuse_default_trainer_dict = True\n\nacceleration_factor = 22\ndata_transform = None\n\nif use_default_model_dict == True:\n # ResNet dictionary parameters\n resnet_options_dict = {'number_layers': 8,\n 'kernel_size':3,\n 'features':64,\n 'in_channels':1,\n 'out_channels':1,\n 'stride':1, \n 'use_batch_norm': False,\n 'init_method': 'xavier'}\n\n # Model parameters\n modl_dict = {'use_torch_radon': False,\n 'number_layers': 8,\n 'K_iterations' : 8,\n 'number_projections_total' : 720,\n 'number_projections_undersampled' : 720//acceleration_factor, \n 'acceleration_factor': acceleration_factor,\n 'image_size': 100,\n 'lambda': 0.05,\n 'use_shared_weights': True,\n 'denoiser_method': 'resnet',\n 'resnet_options': resnet_options_dict,\n 'in_channels': 1,\n 'out_channels': 1}\n \n admm_dictionary = {'number_projections': modl_dict['number_projections_undersampled'],\n 'alpha': 0.005, \n 'delta': 2, \n 'max_iter': 30, \n 'tol': 10e-7, \n 'use_invert': 0,\n 'use_warm_init' : 1,\n 'verbose': True}\n\n twist_dictionary = {'number_projections': modl_dict['number_projections_undersampled'], \n 'lambda': 1e-4, \n 'tolerance':1e-4,\n 'stop_criterion':1, \n 'verbose':0,\n 'initialization':0,\n 'max_iter':10000, \n 'gpu':0,\n 'tau': 0.02}\n \n # Training parameters\n loss_dict = {'loss_name': 'psnr',\n 'psnr_loss': torch.nn.MSELoss(reduction = 'mean'),\n 'ssim_loss': SSIM(data_range=1, size_average=True, channel=1),\n 'msssim_loss': MSSSIM(kernel_size = 1)}\n\n # Optimizer parameters\n optimizer_dict = {'optimizer_name': 'Adam+Tanh',\n 'lr': 1e-4}\n\n # System parameters\n model_system_dict = {'optimizer_dict': optimizer_dict,\n 'kw_dictionary_modl': modl_dict,\n 'loss_dict': loss_dict, \n 'method':'unet', \n 'track_train': True,\n 'track_val': True,\n 'track_test': True,\n 'max_epochs':40, \n 'tv_iters': 40,\n 'track_alternating_admm': True,\n 'admm_dictionary': admm_dictionary,\n 'track_alternating_twist': True,\n 'twist_dictionary': twist_dictionary}\n\n# PL Trainer and W&B logger dictionaries\nif use_default_trainer_dict == True:\n\n\n logger_dict = {'project':'deepopt',\n 'entity': 'omarcos', \n 'log_model': True}\n\n lightning_trainer_dict = {'max_epochs': 40,\n 'log_every_n_steps': 10,\n 'check_val_every_n_epoch': 1,\n 'gradient_clip_val' : 0.5,\n 'accelerator' : 'gpu', \n 'devices' : 1,\n 'fast_dev_run' : False,\n 'default_root_dir': model_folder}\n\n profiler = None\n # profiler = SimpleProfiler(dirpath = './logs/', filename = 'Test_training_profile_pytorch')\n # profiler = PyTorchProfiler(dirpath = './logs/', filename = 'Test_training_profile_pytorch')\n\n trainer_dict = {'lightning_trainer_dict': lightning_trainer_dict,\n 'use_k_folding': True, \n 'track_checkpoints': False,\n 'epoch_number_checkpoint': 10,\n 'use_swa' : False,\n 'use_accumulate_batches': False,\n 'k_fold_number_datasets': 3,\n 'use_logger' : True,\n 'resume':'allow',\n 'logger_dict': logger_dict,\n 'track_default_checkpoints' : False,\n 'use_auto_lr_find': False,\n 'batch_accumulate_number': 3,\n 'use_mixed_precision': False,\n 'batch_accumulation_start_epoch': 0, \n 'profiler': profiler,\n 'restore_fold': False,\n 'fold_number_restore': 2,\n 'acc_factor_restore': 22}\n\n# Dataloader dictionary\nif use_default_dataloader_dict == True:\n \n # data_transform = T.Compose([T.ToTensor()])\n data_transform = None \n \n dataloader_dict = {'datasets_folder': datasets_folder,\n 'number_volumes' : 0,\n 'experiment_name': 'Bassi',\n 'img_resize': 100,\n 'load_shifts': True,\n 'save_shifts':False,\n 'number_projections_total': 720,\n 'number_projections_undersampled': 720//acceleration_factor,\n 'acceleration_factor':acceleration_factor,\n 'train_factor' : 0.8, \n 'val_factor' : 0.2,\n 'test_factor' : 0.2, \n 'batch_size' : 8, \n 'sampling_method' : 'equispaced-linear',\n 'shuffle_data' : True,\n 'data_transform' : data_transform,\n 'num_workers' : 8}\n\nartifact_names_x26_psnr = [\n'model-32wj43mf:v0', 'model-3kmtjdm4:v0' ,'model-3l028zex:v0', 'model-2jnmr8t0:v0']\nartifact_names_x22_psnr = ['model-3dp1wex6:v0', 'model-2jwf0rwa:v0', 'model-1qtf5f8u:v0', 'model-2nxos558:v0']\n\ndataset_list_x22 = ['140315_3dpf_head_22', '140114_5dpf_head_22', '140519_5dpf_head_22', '140117_3dpf_body_22', '140114_5dpf_upper tail_22', '140315_1dpf_head_22', '140114_5dpf_lower tail_22', '140714_5dpf_head_22', '140117_3dpf_head_22', '140117_3dpf_lower tail_22', '140117_3dpf_upper tail_22', '140114_5dpf_body_22']\n\nif __name__ == '__main__':\n \n artifact_names = artifact_names_x22_psnr\n testing_name_group = 'x{}_histogram'.format(acceleration_factor)\n\n run_name = 'test_metrics_histogram_x{}'.format(acceleration_factor)\n metric = 'psnr'\n dataset_list = dataset_list_x22 \n df_path = 'logs/test_dataframe_x22_normalization.pkl'\n\n user_project_name = 'omarcos/deepopt/'\n\n trainer_system = trutils.TrainerSystem(trainer_dict, dataloader_dict,model_system_dict)\n \n run = wandb.init(project = 'deepopt', reinit = True, group = testing_name_group, job_type = 'Dataset Evaluation', name = run_name)\n\n dataframe = pd.DataFrame(columns = ['test/psnr', 'test/ssim','test/psnr_admm', 'test/ssim_admm', 'test/psnr_fbp', 'test/ssim_fbp', 'fish_part', 'fish_dpf', 'datacode'])\n # dataframe = pd.read_pickle(df_path)\n \n for k_fold, artifact_name in enumerate(artifact_names):\n \n artifact = run.use_artifact(user_project_name+artifact_name, type='model')\n artifact_dir = artifact.download()\n \n model = MoDLReconstructor.load_from_checkpoint(Path(artifact_dir) / \"model.ckpt\", kw_dictionary_model_system = model_system_dict) \n\n trainer = trainer_system.create_trainer()\n\n test_datasets_folders = [datasets_folder+'x{}/'.format(acceleration_factor)+x for x in dataset_list[-3:]]\n\n print('Testing folders:\\n', test_datasets_folders)\n\n # Run testing over slices/everything\n for i, test_dataset_folder in enumerate(test_datasets_folders):\n \n # idx = k_fold*len(test_datasets_folders)+i\n # print(idx)\n model.create_test_metric() # Creo el logger para cada dataset\n\n datacode = test_dataset_folder.split('_')[-4].split('/')[-1]\n fish_part = test_dataset_folder.split('_')[-2]\n fish_dpf = test_dataset_folder.split('_')[-3]\n \n dataset_dict = {'root_folder' : test_dataset_folder, \n 'acceleration_factor' : acceleration_factor,\n 'transform' : None}\n\n test_dataset = dlutils.ReconstructionDataset(**dataset_dict) \n\n test_dataloader = DataLoader(test_dataset, \n batch_size = 1,\n shuffle = False,\n num_workers = 8)\n\n test_dict = trainer.test(model = model, dataloaders = test_dataloader)[0]\n\n row = {'test/psnr_admm': model.test_metric['test/psnr_admm'], 'test/ssim_admm': model.test_metric['test/ssim_admm'], 'test/psnr':model.test_metric['test/psnr'], 'test/ssim':model.test_metric['test/ssim'] ,'test/psnr_fbp':model.test_metric['test/psnr_fbp'], 'test/ssim_fbp': model.test_metric['test/ssim_fbp'], 'fish_part': fish_part, 'fish_dpf': fish_dpf, 'datacode':datacode}\n \n # row = {'test/psnr':model.test_metric['test/psnr'], 'test/ssim':model.test_metric['test/ssim'] ,'test/psnr_fbp':model.test_metric['test/psnr_fbp'], 'test/ssim_fbp': model.test_metric['test/ssim_fbp'], 'fish_part': fish_part, 'fish_dpf': fish_dpf, 'datacode':datacode}\n\n dataframe = dataframe.append(row, ignore_index=True)\n # Rotate\n trainer_system.rotate_list(dataset_list, 3)\n \n dataframe.to_pickle(df_path)\n \n\n\n","repo_name":"marcoso96/ToMoDL","sub_path":"ToMoDL/scripts/12-Histogram-Per_Slice.py","file_name":"12-Histogram-Per_Slice.py","file_ext":"py","file_size_in_byte":10709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26383543700","text":"from nltk.tokenize import sent_tokenize\nfrom classification.ChatGPTClassifier import ChatGPTClassifier\n\nimport dbInit\n\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\nfrom sqlalchemy.orm import scoped_session\nfrom database import SessionLocal\n\nfrom decorators.postData import data_required\n\nfrom config import currentConfig\n\nfrom models import Selector\n\nDEFAULT_CONTENT = ''\n\ndef createApp(config):\n app = Flask(__name__)\n\n CORS(app, resources={r'/api/*': {'origins': '*'}})\n\n app.session = scoped_session(SessionLocal)\n app.config['JSON_AS_ASCII'] = config.JSON_AS_ASCII\n\n return app\n\napp = createApp(currentConfig)\ndbInit.initializeDb(app.session)\n\n@app.route('/api/v0/selectors/', methods=['GET'])\ndef getSelectors():\n selectors = [{'url': selector.url, 'selector': selector.selector}\n for selector in app.session.query(Selector).all()]\n\n return jsonify(selectors), 200\n\n@app.route('/api/v0/selectors/', methods=['POST'])\n@data_required(['url', 'selector'])\ndef addSelector():\n json = request.get_json(force=True)\n\n selector = Selector(\n url=json['url'],\n selector=json['selector'])\n\n app.session.add(selector)\n app.session.commit()\n\n return DEFAULT_CONTENT, 200\n\n@app.route('/api/v0/classify/', methods=['POST'])\n@data_required(['newsText'])\ndef classifyNewsText():\n json = request.get_json(force=True)\n\n newsSentences = sent_tokenize(json['newsText'])\n\n classifier = ChatGPTClassifier()\n classifications = classifier.classifySentences(newsSentences)\n\n result = [{'sentenceLength': len(sentence),\n 'opinionLevel': classification}\n for sentence, classification\n in zip(newsSentences, classifications)]\n\n return jsonify(list(result)), 200\n\n\n@app.teardown_appcontext\ndef removeSession(*args, **kwargs):\n app.session.remove()\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"lucasescucha/Opinologo.API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72177651785","text":"#!/usr/bin/env python3\n\"\"\"\nScript to post [Grafana Annotations](https://grafana.com/docs/grafana/latest/dashboards/annotations/)\n\nThis is primarily used to annotate deployments in Grafana,\nwhich can be very useful when displayed alongside various graphs.\n\nThis script requires:\n\n- An environment variable GRAFANA_API_KEY with a grafana\n [API Key](https://grafana.com/docs/grafana/latest/http_api/auth/#create-api-token)\n with at least Editor permissions\n- The requests library\n\"\"\"\nimport argparse\nimport os\nimport time\n\nimport requests\n\n\ndef create_annotation(grafana_url, grafana_api_key, tags, text):\n \"\"\"\n Create annotation in a grafana instance.\n \"\"\"\n return requests.post(\n grafana_url + \"/api/annotations\",\n json={\n \"tags\": tags,\n \"text\": text,\n \"time\": int(time.time() * 1000),\n \"isRegion\": False,\n },\n headers={\"Authorization\": f\"Bearer {grafana_api_key}\"},\n ).text\n\n\ndef main():\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"--grafana-url\", help=\"URL of the grafana instance to use\")\n\n argparser.add_argument(\n \"--tag\",\n help=\"Tags to add to the annotation\",\n default=[],\n action=\"append\",\n dest=\"tags\",\n )\n\n argparser.add_argument(\"text\", help=\"Text to use for the annotation\")\n\n args = argparser.parse_args()\n print(\n create_annotation(\n args.grafana_url, os.environ[\"GRAFANA_API_KEY\"], args.tags, args.text\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jupyterhub/mybinder.org-deploy","sub_path":"post-grafana-annotation.py","file_name":"post-grafana-annotation.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"81"} +{"seq_id":"12450476184","text":"import tkinter as tk\r\nimport random\r\nsize = 500\r\nrows = 15\r\nclassNum = 15\r\nclasses = set()\r\nwhile len(classes) 1:\n eprint(output)\n raise Exception(f'Package {package_name} found multiple versions, aborting!')\n\n name, version, *stuff = map(str.strip, matching[0].split('|'))\n\n eprint(f'found {package_name}={version}')\n return version\n\n@dataclasses.dataclass(frozen=True)\nclass Dep:\n dep_name: str\n package_name: str\n dist: str\n\nDEPS: List[Dep] = [\n Dep('WAYLAND_VERSION', 'wayland', 'lunar/main'),\n Dep('LIBDRM_VERSION', 'libdrm', 'lunar/main'),\n Dep('WAYLAND_PROTOCOLS_VERSION', 'wayland-protocols', 'lunar/main'),\n Dep('HWDATA_VERSION', 'hwdata', 'lunar/main'),\n Dep('VULKAN_LOADER_VERSION', 'vulkan-loader', 'lunar/main'),\n Dep('LIBSDL2_VERSION', 'libsdl2', 'lunar/main')\n]\n\nfor dep in DEPS:\n version = get_available_version(dep.package_name, dep.dist)\n print(f'{dep.dep_name}={version}')\n","repo_name":"akdor1154/gamescope-pkg","sub_path":"ubuntu_22.04/01-deps/query-deps.py","file_name":"query-deps.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"72266374664","text":"\nfrom glob import glob\nimport numpy as np \nimport math\nfrom pydub import AudioSegment\nimport soundfile as sf\nfrom utils import read_xml, make_labels\n\n#SPECIFY VARIABLES\ndataset=['A','B','C','D']\npath_dataset=\"D:/PRP Project/Audio Processing/Dataset/MIVIA/\"\ndestination_path=\"D:/Audio_final_results/Training/\"\nwindow_len=3\nshift=0.3\n \nfor i in range(len(dataset)):\n Xml_dir=glob(path_dataset+dataset[i]+\"/**/*.xml\", recursive=True)\n final_label=[]\n file_name=[]\n A=0\n Wav_dir=glob(path_dataset+dataset[i]+\"/v2/**/*.wav\", recursive=True)\n for j in range(len(Xml_dir)):\n f = sf.SoundFile(Wav_dir[j])\n duration=(len(f) / f.samplerate)\n\n total_windows=math.floor(((duration-window_len)/shift)+1) \n start=np.linspace(0,(total_windows-1)*shift, total_windows)\n end=np.linspace(window_len,(total_windows-1)*shift+window_len, total_windows)\n newAudio = AudioSegment.from_wav(Wav_dir[j])\n my_table=read_xml(Xml_dir[j])\n ########Load model\n label=make_labels(my_table, window_len, shift,total_windows)\n \n \n for k in range(len(start)):\n if label[k]!=5:\n sampleaudio = newAudio[start[k]*1000:end[k]*1000]\n path=destination_path +dataset[i]+'/'+str(A)+'.wav'\n A=A+1\n file_name.append(str(j)+str(k))\n sampleaudio.export(path, format=\"wav\") #Exports to a wav file in the current path.\n AA=np.where(label==5) \n new_a=np.delete(label,AA) \n final_label.append(new_a)\n np_label=np.hstack(final_label[:]) \n np.savetxt(destination_path+dataset[i]+'/labels.csv', np_label, delimiter=\",\")\n \n\n\n","repo_name":"majidfy/Audio-based-vehicle-crash-classifier","sub_path":"codes for Git/audio_segment_MIVIA.py","file_name":"audio_segment_MIVIA.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71604866504","text":"class Persona:\n nombre = \"\"\n edad = 0\n pais = \"\"\n\n def __init__(self, nombre, edad, pais):\n self.nombre= nombre\n self.edad=edad\n self.pais=pais\n\n\n def saludar (self):\n print(\"hola mi nombre es: {}\" .format(self.nombre))\n\n\n\n\n\n\n def despedir(self):\n print('hasta pronto volvere: {}' .format(self.nombre))\n\n\n def comprar(self):\n print('puedo comprar x cosa')\n\n\n\n #crear instancia de la clase persona\njonatan = Persona('jonatan mideros', '31' , 'ecuador')\n\nprint(jonatan.nombre)\nprint(jonatan.edad)\nprint(jonatan.pais)\njonatan.saludar()\njonatan.comprar()\njonatan.despedir()\n\n\n\nclass Estudiante(Persona):\n colegio=\"\"\n def __init__(self, nombre, edad, pais, colegio):\n Persona.__init__(self, nombre,edad,pais)\n self.colegio = colegio\n\n def get_colegio(self):\n print('su colegio es: {}' .format(self.colegio))\n\nandrea = Estudiante('andrea', 25, 'chile', 'seminario')\nandrea.saludar()\nandrea.comprar()\nandrea.despedir()\nandrea.get_colegio()\n\n\n\n\nclass Universidad(Estudiante):\n programa = \"\"\n def __init__(self, nombre, edad, pais, colegio, programa):\n Estudiante.__init__(self, nombre, edad, pais, colegio )\n self.programa = programa\n\n def get_programa(self):\n print('su programa es: {}' .format(self.programa))\n\n\ncesmag = Universidad('carlos', 41, 'Guayana', 'escuela del rock', 'grafico ')\n\ncesmag.saludar()\ncesmag.comprar()\ncesmag.despedir()\ncesmag.get_colegio()\ncesmag.get_programa()\n\n\nclass Cargo:\n cargo = \"\"\n\n def __init__(self, cargo):\n self.cargo = cargo\n\n\n def get_cargo(self):\n print('su cargo es: {}' .format(self.cargo))\n\n\n\nclass Trabajador(Persona, Cargo):\n sueldo = 0\n\n def __init__(self, nombre, edad, pais, cargo, sueldo):\n Persona.__init__(self, nombre, edad, pais)\n Cargo.__init__(self,cargo)\n self.sueldo= sueldo\n\n\n def get_sueldp(self):\n print('su salario es de : {}' .format(self.sueldo))\n\ndiana = Trabajador('diana' , 32, 'chile', 'albañil', 25000000)\ndiana.saludar()\ndiana.comprar()\ndiana.despedir()","repo_name":"cristian93/clase5","sub_path":"clases.py","file_name":"clases.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70333734024","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:LPE\n# datetime:2020-02-27 10:52\n# description: 封装了日志输出方法\nfrom config.varConfig import parentDirPath\nimport logging\nimport time\nimport os\n\nclass Log(object):\n\n def __init__(self):\n # 日志路径拼接\n log_path = os.path.join(parentDirPath, 'logs')\n # 如果不存在这个logs文件夹,就自动创建一个\n if os.path.exists(log_path) and os.path.isdir(log_path):\n pass\n else:\n os.mkdir(log_path)\n # 文件的命名\n self.log_name = os.path.join(log_path, '%s.log' % time.strftime('%Y_%m_%d'))\n # 创建一个logger\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n # 定义日志输出格式\n self.formatter = logging.Formatter('[%(asctime)s] - %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S')\n # self.formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S')\n\n def __console(self, level, message):\n # 创建一个FileHandler,用于写到本地\n fh = logging.FileHandler(self.log_name, encoding='utf-8')\n fh.setLevel(logging.DEBUG) # 日志级别:DEBUG -> INFO -> WARNING -> ERROR\n # 给handler添加formatter\n fh.setFormatter(self.formatter)\n # 给logger添加handler\n self.logger.addHandler(fh)\n\n # 创建一个StreamHandler,用于输出到控制台\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # 给handler添加formatter\n ch.setFormatter(self.formatter)\n # 给logger添加handler\n self.logger.addHandler(ch)\n\n if level == 'debug':\n self.logger.debug(message)\n elif level == 'info':\n self.logger.info(message)\n elif level == 'warning':\n self.logger.warning(message)\n elif level == 'error':\n self.logger.error(message)\n # 这两行代码是为了避免日志输出重复问题\n self.logger.removeHandler(ch)\n self.logger.removeHandler(fh)\n # 关闭打开的文件\n fh.close()\n\n @staticmethod\n def debug(message):\n Log().__console('debug', message)\n\n @staticmethod\n def info(message):\n Log().__console('info', message)\n\n @staticmethod\n def warning(message):\n Log().__console('warning', message)\n\n @staticmethod\n def error(message):\n Log().__console('error', message)\n\n\nif __name__ == '__main__':\n Log.debug(\"this is debug\")\n Log.info(\"this is info\")\n Log.warning(\"this is warning\")\n Log.error(\"this is error\")\n","repo_name":"linpeie/ZYSF","sub_path":"HIS13ZYSF/util/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15959282594","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n Author: qinLess\n File: post_gre_sql_pool.py\n Time: 2021/4/10 下午4:49\n-------------------------------------------------\n Change Activity: 2021/4/10 下午4:49\n-------------------------------------------------\n Desc: \n\"\"\"\nimport psycopg2\nfrom DBUtils.PooledDB import PooledDB\n\n\nclass PostGreHandle(object):\n __instance = {}\n __init = {}\n\n def __new__(cls, *args, **kwargs):\n config = kwargs['config']\n name = config.get('name', 'post_gre')\n\n if not cls.__instance.get(name):\n cls.__instance[name] = super().__new__(cls)\n\n return cls.__instance[name]\n\n def __init__(self, config, spider):\n name = config.get('name', 'post_gre')\n if PostGreHandle.__init.get(name):\n return\n PostGreHandle.__init[name] = True\n\n self.log = spider.logger\n self.config = config\n\n self.pool = PooledDB(\n creator=psycopg2,\n maxconnections=0,\n mincached=5,\n maxcached=5,\n maxshared=3,\n blocking=True,\n maxusage=None,\n setsession=[],\n ping=0,\n host=self.config['host'],\n port=self.config['port'],\n user=self.config['user'],\n password=self.config['password'],\n database=self.config['db']\n )\n\n def get_pool(self):\n conn = self.pool.connection()\n cur = conn.cursor()\n return conn, cur\n\n def execute(self, sql, info_data=None):\n conn, cur = self.get_pool()\n try:\n if isinstance(info_data, dict):\n cur.execute(sql, info_data)\n elif isinstance(info_data, list):\n cur.executemany(sql, info_data)\n else:\n cur.execute(sql)\n conn.commit()\n return True\n\n except Exception as e:\n self.log.info(f'sql db: {e}')\n self.log.info(f\"execute failed: {sql}\")\n return False\n\n finally:\n cur.close()\n conn.close()\n\n def insert_conflict_list(self, table_name, info_list, indexes=None):\n keys = list(info_list[0].keys())\n fs = ', '.join(keys)\n vs = ', '.join(list(map(lambda x: '%(' + x + ')s', keys)))\n\n sql = f\"insert into {table_name} ({fs}) values ({vs}) on conflict ({indexes}) do nothing;\"\n\n try:\n return self.execute(sql, info_list)\n except Exception as e:\n self.log.exception(f'insert_conflict_list.sql db: {e}')\n return False\n\n def insert_conflict_dict(self, table_name, info_dict, indexes=None):\n fs = ', '.join(list(info_dict.keys()))\n vs = ', '.join(list(map(lambda x: '%(' + x + ')s', [*info_dict.keys()])))\n sql = f\"insert into {table_name} ({fs}) values ({vs}) on conflict ({indexes}) do nothing;\"\n\n try:\n return self.execute(sql, info_dict)\n except Exception as e:\n self.log.exception(f'insert_conflict_dict.sql db: {e}')\n self.log.error(\"insert_conflict_dict.failed: \" + sql + \"\\t\" + str(info_dict.values()))\n return False\n\n def select(self, sql):\n conn, cur = self.get_pool()\n\n try:\n cur.execute(sql)\n result = cur.fetchall()\n\n finally:\n conn.close()\n cur.close()\n return result\n\n def close_pool(self):\n self.pool.close()\n","repo_name":"qinLess/magical","sub_path":"magical/sync_spider/databases/post_gre_sql_pool.py","file_name":"post_gre_sql_pool.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"42324811796","text":"# 플로이드 워셜 알고리즘\n\nINF = int(1e9)\n\n# 노드의 개수 및 간선의 개수를 입력받기\nn = int(input())\nm = int(input())\n\n# 2차원 리스트(그래프 표현)을 만들고, 모든 값을 무한으로 초기화\ngraph = [[INF] * (n + 1) for _ in range(n + 1)]\n\n# 자기 자신의 거리는 0으로 초기화\nfor a in range(1, n + 1):\n for b in range(1, n + 1):\n if a == b:\n graph[a][b] = 0\n\n# 각 간선에 대한 정보를 입력받아, 그 값으로 초기화\nfor _ in range(m):\n # a에서 b로가는 비용은 c라고 설정\n a, b, c = map(int, input().split())\n graph[a][b] = c\n\n\n# 수행된 결과를 출력\ndef printer():\n for a in range(1, n + 1):\n for b in range(1, n + 1):\n # 도달할 수 없는 경우, 무한(INFINITY)이라고 출력\n if graph[a][b] == INF:\n print(\"INFINITY\", end = \" \")\n else:\n print(graph[a][b], end = \" \")\n print()\n\n# 점화식에 따라 플로이드 워셜 알고리즘을 수행\nfor k in range(1, n + 1):\n for a in range(1, n + 1):\n for b in range(1, n + 1):\n # 기존의 a와 b의 간선 값\n old = graph[a][b]\n # a에서 k를 경유해 b로 가는 경로의 값\n new = graph[a][k] + graph[k][b]\n graph[a][b] = min(old, new)\n\n\nprinter()","repo_name":"iksflow/coding-test-with-python","sub_path":"shortestpath/9-3.py","file_name":"9-3.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72238234185","text":"from .pyqt_screen_widget import ScreenWidget\n\n#################################\n# values only widget\n#################################\n\n\nclass ValuesWidget(ScreenWidget):\n def set_minimum_size(self):\n super().set_minimum_size()\n h = int(self.height() / (self.max_height + 1))\n for i in range(self.max_height + 1):\n self.layout.setRowMinimumHeight(i, h)\n","repo_name":"hishizuka/pizero_bikecomputer","sub_path":"modules/pyqt/pyqt_values_widget.py","file_name":"pyqt_values_widget.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":633,"dataset":"github-code","pt":"81"} +{"seq_id":"13144100757","text":"#Inputting the height and weight values\r\nheight = input(\"enter your height in m: \")\r\nweight = input(\"enter your weight in kg: \")\r\n#Converting height and weight values into float and string\r\nweight_in_float= int(weight)\r\nheight_in_float= float(height)\r\n#Calculating BMI by dividing weight by height and multiplying the number with 2\r\nBMI= weight_in_float/height_in_float ** 2\r\n#Converting the BMI value into integer\r\nBMI_in_float=int(BMI)\r\n#printing the value of calculated BMI\r\nprint(BMI_in_float)\r\n","repo_name":"ajithm804/BMI-Calculator","sub_path":"BMI Calculator.py","file_name":"BMI Calculator.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70788918026","text":"class Solution:\n def canPartition(self, nums: List[int]) -> bool:\n # DP\n # 1. Calculate target sum\n # 2. Iterate through each num\n # 3. Iterate backwards from target to 0\n # to avoid reusing coins\n\n s = sum(nums)\n target = int(sum(nums) / 2)\n # Can't split odd sum\n if s % 2 == 1:\n return False\n\n dp = [False] * (target + 1)\n dp[0] = True\n\n # Iterate through nums one at a time to\n # not reuse coins\n for num in nums:\n # Iterate backwards to avoid reuse\n for amount in range(target, num - 1, -1):\n dp[amount] = dp[amount - num] or dp[amount]\n return dp[target]\n","repo_name":"Drblessing/leetcode","sub_path":"Dynamic Programming/416. Partition Equal Subset Sum.py","file_name":"416. Partition Equal Subset Sum.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23675711605","text":"import torch as torch\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom global_config import AU_INTENSITY_COLS\n\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LogisticRegression, self).__init__()\n self.linear = torch.nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n output = torch.sigmoid(self.linear(x))\n return output\n\n\nclass Trainer:\n epochs = 20000\n learning_rate = 0.01\n\n def __init__(self, x, y, n_features, n_classes):\n if not torch.is_tensor(x):\n x = torch.tensor(x)\n if not torch.is_tensor(y):\n y = torch.tensor(y)\n\n self.model = LogisticRegression(n_features, n_classes)\n self.criterion = torch.nn.BCELoss()\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)\n self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(x, y)\n\n def train(self):\n for epoch in range(self.epochs):\n self.model.train()\n self.optimizer.zero_grad() # Setting our stored gradients equal to zero\n output = self.model(self.x_train.float())\n\n loss = self.criterion(torch.squeeze(output).float(), self.y_train.float())\n loss.backward() # Computes the gradient of the given tensor w.r.t. graph leaves\n\n self.optimizer.step() # Updates weights and biases with the optimizer (SGD)\n\n if epoch % 100 == 0:\n self.model.eval()\n train_acc = self.get_accuracy(self.x_train, self.y_train)\n valid_acc = self.get_accuracy(self.x_test, self.y_test)\n\n print(\"Loss: {}\".format(loss.item()))\n #print(\"Train Accuracy: {}\".format(train_acc))\n #print(\"Validation Accuracy: {}\".format(valid_acc))\n\n def get_accuracy(self, x, y):\n correct, total = 0, 0\n out = self.model(x.float())\n predictions = np.round(out.detach()).squeeze()\n total += y.size(0)\n correct = (predictions == y).sum()\n acc = correct / total\n return acc\n\n\ndef main():\n df = pd.read_csv(\n '/files/tests/out/video/video_data_time_series_ang_sad.csv')\n\n x = []\n y = []\n for _, group in df.groupby('filename'):\n x_arr = torch.tensor(group[AU_INTENSITY_COLS].values)\n if group[['emotion_1_id']].values[0] == 12:\n y.append(1)\n else:\n y.append(0)\n x.append(x_arr)\n\n x_pad = pad_sequence(x, batch_first=True)\n y_tensor = torch.tensor(y)\n x_tensor = torch.reshape(x_pad, (x_pad.shape[0], x_pad.shape[1] * 17))\n\n trainer = Trainer(x_tensor, y_tensor, x_pad.shape[1] * 17, 1)\n trainer.train()\n\n\n#if __name__ == \"__main__\":\n # main()\n\n","repo_name":"timlac/quan-machine-learning","sub_path":"src/analysis/supervised_learning/classification/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15045543310","text":"import sys\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow\r\nfrom PyQt5.QtGui import QPalette, QColor\r\nfrom PyQt5.QtGui import *\r\n\r\nclass Color(QWidget):\r\n def __init__(self, color):\r\n super(Color, self).__init__()\r\n self.setAutoFillBackground(True)\r\n\r\n palette = self.palette()\r\n palette.setColor(QPalette.ColorRole.Window, QColor(color)) # BTK da (24.7) ColorRole eksik !!!!!\r\n self.setPalette(palette)\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super(MainWindow,self).__init__()\r\n self.setGeometry(100,100,500,500)\r\n\r\n # hlayout1 = QtWidgets.QHBoxLayout()\r\n \r\n # hlayout1.addWidget(Color('red'))\r\n # hlayout1.addWidget(Color('blue'))\r\n # hlayout1.addWidget(Color('green'))\r\n # hlayout1.setContentsMargins(30,20,0,3) # sol, üst, sağ, alt tan verilen pixel değerlerine göre boşluk bırakır.\r\n # hlayout1.setSpacing(50) # her layout arasında 50 pixel boşluk bırakır.\r\n\r\n # hlayout2 = QtWidgets.QHBoxLayout()\r\n \r\n # hlayout2.addWidget(Color('red'))\r\n # hlayout2.addWidget(Color('green'))\r\n\r\n # vlayout = QtWidgets.QVBoxLayout()\r\n # vlayout.addLayout(hlayout1)\r\n # vlayout.addLayout(hlayout2)\r\n\r\n layout = QtWidgets.QGridLayout()\r\n layout.addWidget(Color('red'),0,0)\r\n layout.addWidget(Color('blue'),1,0)\r\n layout.addWidget(Color('green'),0,1)\r\n layout.addWidget(Color('yellow'),1,1)\r\n \r\n\r\n widget = QWidget()\r\n widget.setLayout(layout)\r\n\r\n # widget = Color('blue')\r\n self.setCentralWidget(widget)\r\n\r\ndef app():\r\n app = QApplication(sys.argv)\r\n win = MainWindow()\r\n win.show()\r\n sys.exit(app.exec_())\r\n\r\napp()\r\n","repo_name":"ilkerozmen/python-works","sub_path":"pyqt5-layouts.py","file_name":"pyqt5-layouts.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"9319528787","text":"keys = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'] # , 'cid'\n\ntot = 0\nx = 0\nwith open('input') as f:\n for l in f:\n d = {x.split(':')[0]: x.split(':')[1] for x in l.split()}\n for key in keys:\n if key not in d:\n break\n else:\n tot += 1\n\n\nprint(tot)\n","repo_name":"r-udd/adventofcode2020","sub_path":"day4/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23305753294","text":"# Python Program To Display A Histogram Showing The Number Of Employees In Specific Age Groups\r\n\r\n'''\r\nFunction Name : Display A Histogram Showing The Number Of Employees In Specific Age Groups\r\nFunction Date : 8 Oct 2020\r\nFunction Author : Prasad Dangare\r\nInput : In This Program We Showing Bar-Graph Of Particular Age Groups By Its ID NO\r\nOutput : It Display Bar-Graph On Python3.8 Concole\r\n'''\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# Take Individual Employee Ages And Range Of Ages\r\n\r\nemp_ages = [22, 45, 30, 59, 58, 56, 57, 45, 43, 43, 50, 40, 34, 33, 25, 19]\r\nbins = [0, 10, 20, 30, 40, 50, 60]\r\n\r\n# Create Histogram Of Bar Type\r\n\r\nplt.hist(emp_ages, bins, histtype='bar', rwidth=0.8, color='cyan')\r\n\r\n# Set Labels\r\n\r\nplt.xlabel('Employee Ages')\r\nplt.ylabel('No Of Employees')\r\nplt.title('MICROSOFT CORP')\r\nplt.legend()\r\n\r\n# Draw The Histogram\r\n\r\nplt.show()\r\n","repo_name":"PRASAD-DANGARE/PYTHON","sub_path":"datascience3.py","file_name":"datascience3.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14410396824","text":"#! /usr/bin/python3\nimport os\nimport time\nimport json\n\ndef recv():\n\tout = os.popen('./dragino_lora_app recv')\n\t\n\ttry:\n\t\tmessage = out.read()\t\n\t\treturn message\n\texcept:\n\t\tprint(\"JSON decode error\")\n\t\n\t\t\ndef getGps_time():\t\t\t\t\t\t\n\tcmd = \"sudo python3 gps_time.py\"\n\t\t\t\t\n\tos.system(cmd)\n\n\n\t\t\ndef make_JSON(mess):\n\t\n\ttry:\n\t\tdicT = json.loads(mess)\n\texcept json.decoder.JSONDecodeError:\n\t\tdicT = None\n\tfinally:\n\t\treturn dicT \n\ndef main():\n\tprint(\"Started\")\n\tdevice_id = \"0x00\"\n\ttoDevice = \"0xff\"\n\t#os.system(cmd)\n\tsync = \"false\"\n\tprop = 0\n\tpyNode1= False\n\tpyNode2= False\n\trpiNode1= False\n\tgetGps_time()\n\twhile True:\n\t\tmessage = recv()\n\t\tif message:\n\t\t\tdicT = make_JSON(message)\n\t\n\t\tif dicT:\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\t\t\n\t\t\tif dicT['To'] == device_id and dicT['Sync'] == False:\n\t\t\t\t\n\t\t\t\trcvTme = dicT['MyTime']\n\t\t\t\tif dicT[\"From\"] == \"0x01\":\n\t\t\t\t\tprint(\"Device Unsynced = Pycom1 @ \" + str(time.mktime(time.localtime()))+ \", Device Time = \" + str(rcvTme)+\", Sending time now\")\n\t\t\t\t\ttoDevice = \"0x01\"\n\t\t\t\t\tsync = \"false\"\n\t\t\t\t\tpyNode1= False\n\t\t\t\n\t\t\t\t\tfor i in range (0,12):\n\t\t\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\t\t\tsend_str = \"{\\\"To\\\":\\\"\"+ toDevice +\"\\\",\\\"From\\\":\\\"\" + device_id + \"\\\",\\\"MyTime\\\":\"+ str(tme) + \",\\\"Sync\\\":\" + sync + \"}\" \n\t\t\t\t\t\t\n\t\t\t\t\t\tsend_str = json.dumps(send_str)\n\t\t\t\t\t\t#print(len(send_str))\n\t\t\t\t\t\tcmd = './dragino_lora_app sender ' + str(send_str) \n\t\t\t\t\n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\ti+=1\n\t\t\t\telif dicT[\"From\"] == \"0x02\":\n\t\t\t\t\tprint(\"Device Unsynced = Pycom2 @ \" + str(time.mktime(time.localtime())) + \", Device Time = \" + str(rcvTme) + \", Sending time now\")\n\t\t\t\t\ttoDevice = \"0x02\"\n\t\t\t\t\tsync = \"false\"\n\t\n\t\t\t\t\tpyNode2= False\n\t\t\n\t\t\t\t\tfor i in range (0,12):\n\t\t\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\t\t\tsend_str = \"{\\\"To\\\":\\\"\"+ toDevice +\"\\\",\\\"From\\\":\\\"\" + device_id + \"\\\",\\\"MyTime\\\":\"+ str(tme) + \",\\\"Sync\\\":\" + sync + \"}\" \n\t\t\t\t\t\t\n\t\t\t\t\t\tsend_str = json.dumps(send_str)\n\t\t\t\t\t\t#print(len(send_str))\n\t\t\t\t\t\tcmd = './dragino_lora_app sender ' + str(send_str) \n\t\t\t\t\n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\ti+=1\n\t\t\t\t\t\t\n\t\t\t\telif dicT[\"From\"] == \"0x03\":\n\t\t\t\t\tprint(\"Device Unsynced = RPi_node @ \" + str(time.mktime(time.localtime()))+ \", Device Time = \" + str(rcvTme)+\", Sending time now\")\n\t\t\t\t\ttoDevice = \"0x03\"\n\t\t\t\t\tsync = \"false\"\n\t\t\t\t\n\n\t\t\t\t\trpiNode1= False\t\t\t\n\t\t\t\t\tfor i in range (0,12):\n\t\t\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\t\t\tsend_str = \"{\\\"To\\\":\\\"\"+ toDevice +\"\\\",\\\"From\\\":\\\"\" + device_id + \"\\\",\\\"MyTime\\\":\"+ str(tme) + \",\\\"Sync\\\":\" + sync + \"}\" \n\t\t\t\t\t\t\n\t\t\t\t\t\tsend_str = json.dumps(send_str)\n\t\t\t\t\t\t#print(len(send_str))\n\t\t\t\t\t\tcmd = './dragino_lora_app sender ' + str(send_str) \n\t\t\t\t\n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\ti+=1\n\n\n\t\t\telif dicT['To'] == device_id and dicT['Sync'] == True:\n\t\t\t\tprint(dicT[\"From\"])\n\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\tprint(\"---------------------------------------------------------\")\n\t\t\t\trcvTme = dicT['MyTime']\n\t\t\t\n\t\t\t\tprop = tme - rcvTme\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif rcvTme == tme:\n\t\t\t\t\tif dicT[\"From\"] == \"0x01\":\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\tprint(\"Device Synced = Pycom1 @ \" + str(time.mktime(time.localtime())))\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\ttoDevice = \"0x01\"\n\t\t\t\t\t\tpyNode1= True\n\n\t\t\t\t\telif dicT[\"From\"] == \"0x02\":\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\tprint(\"Device Synced = Pycom2 @ \" + str(time.mktime(time.localtime())))\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\ttoDevice = \"0x02\"\n\t\t\t\t\t\tpyNode2= True\n\t\n\t\t\t\t\telif dicT[\"From\"] == \"0x03\":\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\tprint(\"Device Synced = RPi_node @ \" + str(time.mktime(time.localtime())))\n\t\t\t\t\t\tprint(\"****************************************************************\")\n\t\t\t\t\t\ttoDevice = \"0x03\"\n\t\t\t\t\t\trpiNode1= True\n\t\t\t\t\tsync = \"true\"\n\t\t\t\t\tfor i in range (0,12):\n\t\t\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\t\t\ttme = tme + prop\n\t\t\t\t\t\tsend_str = \"{\\\"To\\\":\\\"\"+ toDevice +\"\\\",\\\"From\\\":\\\"\" + device_id + \"\\\",\\\"MyTime\\\":\"+ str(prop) + \",\\\"Sync\\\":\" + sync + \"}\" \n\t\t\t\t\t\tsend_str = json.dumps(send_str)\n\t\t\t\t\t\tcmd = './dragino_lora_app sender ' + str(send_str) \n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\ti+=1\n\t\t\t\t\tprint(\"---------------------------------------------------------\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Difference = \" + str(prop))\n\t\t\t\t\tsync = \"false\"\n\t\t\t\t\tif dicT[\"From\"] == \"0x01\":\n\t\t\t\t\t\tprint(\"Sending Difference to Pycom1\")\n\t\t\t\t\t\tpyNode1= False\n\n\t\t\t\t\telif dicT[\"From\"] == \"0x02\":\n\t\t\t\t\t\tprint(\"Sending Difference to Pycom2\")\n\t\t\t\t\t\tpyNode2= False\n\t\n\t\t\t\t\telif dicT[\"From\"] == \"0x03\":\n\t\t\t\t\t\tprint(\"Sending Difference to RPi_node\")\n\t\t\t\t\n\t\t\t\t\t\trpiNode1= False\n\t\t\t\t\t\n\t\t\t\t\tfor i in range (0,20):\n\t\t\t\t\t\ttme = time.mktime(time.localtime())\n\t\t\t\t\t\ttme = tme + prop\n\t\t\t\t\t\tsend_str = \"{\\\"To\\\":\\\"\"+ dicT[\"From\"] +\"\\\",\\\"From\\\":\\\"\" + device_id + \"\\\",\\\"MyTime\\\":\"+ str(prop) + \",\\\"Sync\\\":\" + sync + \"}\" \n\t\t\t\t\t\tsend_str = json.dumps(send_str)\n\t\t\t\t\t\tcmd = './dragino_lora_app sender ' + str(send_str) \n\t\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\ti+=1\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\tif pyNode1 == True and pyNode2 == True and \trpiNode1 == True:\n\t\t\t\tprint(\"!!!!!!!!!!!!!!!!!!!!\")\n\t\t\t\tprint(\"!!System Synced!!!!!\")\n\t\t\t\tprint(\"!!!!!!!!!!!!!!!!!!!!\")\n\t\t\t\tgetGps_time()\n\n\n\n\t\t\n\t\n\t\t\t\t\n\n\n\t\t\t\t\t\t\n\t\t\t\t\nmain()\n","repo_name":"BricBoy218/LoRa_Time_Sync","sub_path":"RPi_Master_Node/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6422498319","text":"# Download BMS executable from https://cs-people.bu.edu/jmzhang/BMS/BMS.html\n# and update BMS_EXE before running this script.\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport imageio\nimport scipy.io as sio\n\nBMS_EXE = \"/home/czhao/Downloads/bms/BMS_CODE/BMS\"\n\ninput_path = sys.argv[1] + \"/\"\noutput_path = sys.argv[2]\n\nSAMPLE_STEP_SIZE = 8\nMAX_DIM = 405\nDILATION_WIDTH_1 = max(round(7 * MAX_DIM / 400), 1)\nDILATION_WIDTH_2 = max(round(9 * MAX_DIM / 400), 1)\nBLUR_STD = round(9 * MAX_DIM / 400)\nCOLOR_SPACE = 2\nWHITENING = 1\n\nwith tempfile.TemporaryDirectory() as tmpdir:\n print(\"Running BMS...\")\n args = [str(arg) for arg in [BMS_EXE, input_path, tmpdir + \"/\", SAMPLE_STEP_SIZE, DILATION_WIDTH_1, DILATION_WIDTH_2, BLUR_STD, COLOR_SPACE, WHITENING, MAX_DIM]]\n subprocess.run(args, check=True)\n\n print(\"Converting maps...\")\n for name in os.listdir(tmpdir):\n num = int(name[name.index(\"r_\")+2:-4])\n infile = os.path.join(tmpdir, name)\n subdir = os.path.join(output_path, f\"img{num}/\")\n if not os.path.exists(subdir):\n os.mkdir(subdir)\n outfile = os.path.join(subdir, f\"img{num}_method_bms.mat\")\n the_map = imageio.imread(infile)\n data = {\"bms\": the_map}\n sio.savemat(outfile, data)\n","repo_name":"czhao39/neurips-attention","sub_path":"get_bms_saliency.py","file_name":"get_bms_saliency.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"1018220893","text":"from Rooms import Room\n\ndef test_room():\n gold = Room(\"GoldRoom\", \n \"\"\"This room has gold in it you can grab, \n There is a door to the north,\"\"\")\n\n assert gold.name == \"GoldRoom\"\n assert gold.paths == {}\n\ndef test_add_paths():\n pass\n center_room = Room(\"Center\", \"Test room in the center\")\n north_room = Room(\"North\", \"Test room in the north\")\n south_room = Room(\"South\", \"Test room in the south\")\n\n north_room.add_paths({'south': south_room})\n center_room.add_paths({'north': north_room})\n center_room.add_paths({'south': south_room})\n south_room.add_paths({'north': center_room})\n\n\n my_rooms = north_room.go('south')\n assert my_rooms.name == 'South'\n my_rooms = my_rooms.go('north')\n assert my_rooms.name == 'Center'\n my_rooms = my_rooms.go('north')\n assert my_rooms.name == 'North'\n \n","repo_name":"ulfsjodin/ex47","sub_path":"test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25895265570","text":"import open3d as o3d\nimport numpy as np\nimport os\n\ndef get_file(path):\n files = os.listdir(path)\n names = []\n for file in files:\n name, _ = file.split('.')\n names.append(name)\n return names\n\nif __name__ == '__main__':\n txtpath = '/Users/jinxuanchen/Files_Local/Point_image_fusion/kittidata/odometry/sequences/02/0_out/5_full_cluster/'\n savepath = '/Users/jinxuanchen/Files_Local/Point_image_fusion/kittidata/odometry/sequences/02/0_out/6_center.txt'\n \n filenames = get_file(txtpath)\n center_list = []\n for filename in filenames:\n path = txtpath + filename + '.txt'\n \n try:\n txt = np.loadtxt(path)\n points = txt[:,0:3]\n ids = txt[:,4:5]\n except:\n pass\n \n \n # txt = np.loadtxt(path)\n # points = txt[:,0:3]\n # ids = txt[:,4:5]\n \n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(points)\n \n center = pcd.get_center()\n center = np.array(center)\n id = ids[0]\n id = np.array(id)\n center = np.concatenate((center,id),axis=0).reshape(1,4)\n center = center.tolist()\n \n center_list.extend(center)\n \n # np.savetxt(savepath,np.array(center_list))\n \n # center = np.mean(points, axis=0)\n # center = np.array(center)\n # id = ids[0]\n # id = np.array(id)\n # center = np.concatenate((center,id),axis=0).reshape(1,4)\n # center = center.tolist()\n \n # center_list.extend(center)\n np.savetxt(savepath,np.array(center_list),fmt='%.08f')\n \n ","repo_name":"ooooohhhhhxxx/Point_image_fusion","sub_path":"fusion/center.py","file_name":"center.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42866720445","text":"import sys\r\nfrom collections import deque\r\n\r\ninput = sys.stdin.readline\r\n\r\ndef push(q, x):\r\n q.append(x)\r\ndef pop(q):\r\n if q:\r\n print(q.popleft())\r\n else:\r\n print(-1)\r\ndef size(q):\r\n print(len(q))\r\ndef empty(q):\r\n if q:\r\n print(0)\r\n else:\r\n print(1)\r\ndef front(q):\r\n if q:\r\n print(q[0])\r\n else:\r\n print(-1)\r\ndef back(q):\r\n if q:\r\n print(q[-1])\r\n else:\r\n print(-1)\r\n\r\nN = int(input())\r\nq = deque()\r\n\r\nfor _ in range(N):\r\n a, *x = input().split()\r\n if a == 'push':\r\n push(q, int(*x))\r\n elif a == 'pop':\r\n pop(q)\r\n elif a == 'size':\r\n size(q)\r\n elif a == 'empty':\r\n empty(q)\r\n elif a == 'front':\r\n front(q)\r\n elif a == 'back':\r\n back(q)\r\n","repo_name":"iblug/Baekjoon","sub_path":"백준/Silver/10845. 큐/큐.py","file_name":"큐.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10665776771","text":"import optuna\nfrom .cfg import CFG\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression, PassiveAggressiveClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\n\ndef without_compare():\n model_params = {\n \"SVM\": {\n \"model\": SVC(gamma=\"auto\"),\n \"params\":{\n \"C\": [1, 10, 20],\n \"kernel\": [\"rbf\", \"linear\"]\n }\n },\n \"RandomForest\": {\n \"model\": RandomForestClassifier(),\n \"params\": {\n \"n_estimators\": [1, 5, 10]\n }\n },\n \"LogisticRegression\":{\n \"model\": LogisticRegression(solver=\"liblinear\", multi_class=\"auto\"),\n \"params\": {\n \"C\": [1, 10, 20]\n }\n },\n \"ExtraTree\": {\n \"model\": ExtraTreesClassifier(),\n \"params\": {\n \"n_estimators\" : [1, 5, 10]\n }\n },\n \"DecisionTree\": {\n \"model\": DecisionTreeClassifier(),\n \"params\": {\n \"criterion\": [\"gini\", \"entropy\"]\n }\n },\n }\n return model_params\n\ndef get_params(trial, model_config):\n if model_config[\"model_name\"] == \"ExtraTree\":\n params = {\n \"n_estimators\": trial.suggest_categorical(\"n_estimators\", [7000, 15000, 20000]),\n \"criterion\": trial.suggest_categorical(\"criterion\", [\"gini\", \"entropy\"]),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 9),\n \"early_stopping_rounds\": trial.suggest_int(\"early_stopping_rounds\", 100, 500),\n \"max_features\": trial.suggest_categorical(\"max_features\", [\"auto\", \"sqrt\", \"log2\"]),\n }\n\n elif model_config[\"model_name\"] == \"xgb\":\n params = {\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 1e-2, 0.25, log=True),\n \"reg_lambda\": trial.suggest_float(\"reg_lambda\", 1e-8, 100.0, log=True),\n \"reg_alpha\": trial.suggest_float(\"reg_alpha\", 1e-8, 100.0, log=True),\n \"subsample\": trial.suggest_float(\"subsample\", 0.1, 1.0),\n \"colsample_bytree\": trial.suggest_float(\"colsample_bytree\", 0.1, 1.0),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 9),\n \"early_stopping_rounds\": trial.suggest_int(\"early_stopping_rounds\", 100, 500),\n \"n_estimators\": trial.suggest_categorical(\"n_estimators\", [7000, 15000, 20000]),\n }\n if model_config[\"use_gpu\"]:\n params[\"tree_method\"] = \"gpu_hist\"\n params[\"gpu_id\"] = 0\n params[\"predictor\"] = \"gpu_predictor\"\n else:\n pass\n # params[\"tree_method\"] = trial.suggest_categorical(\"tree_method\", [\"exact\", \"approx\", \"hist\"])\n # params[\"booster\"] = trial.suggest_categorical(\"booster\", [\"gbtree\", \"gblinear\"])\n # if params[\"booster\"] == \"gbtree\":\n # params[\"gamma\"] = trial.suggest_float(\"gamma\", 1e-8, 1.0, log=True)\n # params[\"grow_policy\"] = trial.suggest_categorical(\"grow_policy\", [\"depthwise\", \"lossguide\"])\n \n elif model_config[\"model_name\"] == \"lgb\":\n params = {\n \"verbosity\": -1,\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.01, 0.3),\n \"num_leaves\": trial.suggest_int('num_leaves', 20, 3000, step=20),\n \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12),\n \"min_data_in_leaf\": trial.suggest_int(\"min_data_in_leaf\", 200, 10000, step=100),\n \"max_bin\": trial.suggest_int(\"max_bin\", 200, 300),\n \"lambda_l1\": trial.suggest_int(\"lambda_l1\", 0, 100, step=5),\n \"lambda_l2\": trial.suggest_int(\"lambda_l2\", 0, 100, step=5),\n \"min_gain_to_split\": trial.suggest_float(\"min_gain_to_split\", 0, 15),\n \"bagging_fraction\": trial.suggest_float(\n \"bagging_fraction\", 0.2, 0.95, step=0.1\n ),\n \"bagging_freq\": trial.suggest_categorical(\"bagging_freq\", [1]),\n \"feature_fraction\": trial.suggest_float(\n \"feature_fraction\", 0.2, 0.95, step=0.1\n ),\n \"early_stopping_rounds\": trial.suggest_int(\"early_stopping_rounds\", 100, 500),\n }\n\n if model_config[\"use_gpu\"] == True:\n params[\"device_type\"] = \"gpu\"\n return params","repo_name":"Aditta-das/autotrainer","sub_path":"trainme/src/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3676403574","text":"from osbot_utils.utils.Files import Files\nfrom osbot_utils.utils.Json import Json\n\n\nclass Links:\n def __init__(self, file_system):\n self.file_system = file_system\n self.link_pairs = {\n 'has role' : 'role is assigned to' , 'role is assigned to' : 'has role' ,\n 'reports to' : 'is manager of' , 'is manager of' : 'reports to' ,\n 'is funded by' : 'funds' , 'funds' : 'is funded by' ,\n 'is function within' : 'has function' , 'has function' : 'is function within' ,\n 'is owned by' : 'owns' , 'owns' : 'is owned by' ,\n 'is user account for' : 'has user account' , 'has user account' : 'is user account for' ,\n 'is admin account for' : 'has admin account' , 'has admin account': 'is admin account for' ,\n 'has detection' : 'is detection logged against' , 'is detection logged against' : 'has detection' ,\n 'detected by' : 'registered detection' , 'registered detection' : 'detected by' ,\n 'delivers' : 'is delivered by' , 'is delivered by' : 'delivers' ,\n 'role is assigned to' : 'has role' , 'has role ': 'role is assigned to' ,\n 'can escalate to' : 'can occur due to' , 'can occur due to' : 'can escalate to' ,\n 'is supported by' : 'supports' , 'supports' : 'is supported by' ,\n 'indicates failure of' : 'failure indicated by' , 'failure indicated by' : 'indicates failure of' ,\n 'causes' : 'is caused by' , 'is caused by' : 'causes' ,\n 'establishes' : 'is established by' , 'is established by': 'establishes' ,\n 'requires decision about': 'decision is required due to' , 'decision is required due to' : 'requires decision about',\n 'owned by' : 'owns' , 'owns' : 'owned by' ,\n\n 'can arise due to exploit of' : 'exploit can lead to' ,\n 'exploit can lead to' : 'can arise due to exploit of' ,\n 'represents critical business asset' : 'is made up of sub asset' ,\n 'is made up of sub asset' : 'represents critical business asset' ,\n\n }\n\n def all(self):\n path = self.path_links()\n if Files.exists(path):\n return Json.load_file(path)\n return []\n\n def add(self,from_key, link_type, to_key):\n link = [from_key.strip(), link_type.strip(), to_key.strip()]\n links = self.all()\n if link in links:\n return {'status': 'error', 'data' : 'link already existed: {0}'.format(link)}\n links.append(link)\n self.save(links)\n return {'status': 'ok', 'data': 'link added: {0}'.format(link)}\n\n def add_pair(self, from_key, link_type, to_key):\n link_pair = self.link_pairs.get(link_type.strip())\n if link_pair is None: link_pair = \"(opposite of) {0}\".format(link_type)\n self.add(from_key,link_type, to_key)\n self.add(to_key, link_pair, from_key)\n return self\n\n def delete(self,from_key, link_type, to_key):\n link = [from_key, link_type, to_key]\n links = self.all()\n if link not in links:\n return {'status': 'error', 'data' : 'link not found: {0}'.format(link)}\n links.remove(link)\n self.save(links)\n return {'status': 'ok', 'data': 'link deleted: {0}'.format(link)}\n\n def save(self,links):\n path = self.path_links()\n Json.save_file_pretty(path, links)\n return self\n def path_links(self):\n return Files.path_combine(self.file_system.folder_data,'links.json')","repo_name":"owasp-sbot/OSBot-Jira","sub_path":"OSBot-GraphDB/osbot_graphsv/api/Links.py","file_name":"Links.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"5295043823","text":"'''\n\"프로그래머스\" 와 \"이것이 코딩테스트다\" p355\n블록 이동하기\n문제 설명\n로봇개발자 \"무지\"는 한 달 앞으로 다가온 \"카카오배 로봇경진대회\"에 출품할 로봇을 준비하고 있습니다. 준비 중인 로봇은 2 x 1 크기의 로봇으로 \"무지\"는 \"0\"과 \"1\"로 이루어진 N x N 크기의 지도에서 2 x 1 크기인 로봇을 움직여 (N, N) 위치까지 이동 할 수 있도록 프로그래밍을 하려고 합니다. 로봇이 이동하는 지도는 가장 왼쪽, 상단의 좌표를 (1, 1)로 하며 지도 내에 표시된 숫자 \"0\"은 빈칸을 \"1\"은 벽을 나타냅니다. 로봇은 벽이 있는 칸 또는 지도 밖으로는 이동할 수 없습니다. 로봇은 처음에 아래 그림과 같이 좌표 (1, 1) 위치에서 가로방향으로 놓여있는 상태로 시작하며, 앞뒤 구분없이 움직일 수 있습니다.\n\n블럭이동-1.jpg\n\n로봇이 움직일 때는 현재 놓여있는 상태를 유지하면서 이동합니다. 예를 들어, 위 그림에서 오른쪽으로 한 칸 이동한다면 (1, 2), (1, 3) 두 칸을 차지하게 되며, 아래로 이동한다면 (2, 1), (2, 2) 두 칸을 차지하게 됩니다. 로봇이 차지하는 두 칸 중 어느 한 칸이라도 (N, N) 위치에 도착하면 됩니다.\n\n로봇은 다음과 같이 조건에 따라 회전이 가능합니다.\n\n블럭이동-2.jpg\n\n위 그림과 같이 로봇은 90도씩 회전할 수 있습니다. 단, 로봇이 차지하는 두 칸 중, 어느 칸이든 축이 될 수 있지만, 회전���는 방향(축이 되는 칸으로부터 대각선 방향에 있는 칸)에는 벽이 없어야 합니다. 로봇이 한 칸 이동하거나 90도 회전하는 데는 걸리는 시간은 정확히 1초 입니다.\n\n\"0\"과 \"1\"로 이루어진 지도인 board가 주어질 때, 로봇이 (N, N) 위치까지 이동하는데 필요한 최소 시간을 return 하도록 solution 함수를 완성해주세요.\n\n제한사항\nboard의 한 변의 길이는 5 이상 100 이하입니다.\nboard의 원소는 0 또는 1입니다.\n로봇이 처음에 놓여 있는 칸 (1, 1), (1, 2)는 항상 0으로 주어집니다.\n로봇이 항상 목적지에 도착할 수 있는 경우만 입력으로 주어집니다.\n입출력 예\nboard\tresult\n[[0, 0, 0, 1, 1],[0, 0, 0, 1, 0],[0, 1, 0, 1, 1],[1, 1, 0, 0, 1],[0, 0, 0, 0, 0]]\t7\n입출력 예에 대한 설명\n문제에 주어진 예시와 같습니다.\n로봇이 오른쪽으로 한 칸 이동 후, (1, 3) 칸을 축으로 반시계 방향으로 90도 회전합니다. 다시, 아래쪽으로 3칸 이동하면 로봇은 (4, 3), (5, 3) 두 칸을 차지하게 됩니다. 이제 (5, 3)을 축으로 시계 방향으로 90도 회전 후, 오른쪽으로 한 칸 이동하면 (N, N)에 도착합니다. 따라서 목적지에 도달하기까지 최소 7초가 걸립니다.\n\n혼자 풀기가 막막하다면, 풀이 강의를 들어보세요 (클릭)\n'''\nfrom collections import deque\n\ndef solution(board):\n n = len(board)\n visited = dict()\n \n # dx dy list\n offset = [(0, 1, 0, 1), (0, -1, 0, -1), (1, 0, 1, 0), (-1, 0, -1, 0)]\n # 수평상태에서의 회전\n h_rotation = [(0, 0, 1, -1, 1, 1), (0, 0, -1, -1, -1, 1), (1, 1, 0, 0, 1, 0), (-1, 1, 0, 0, -1, 0)]\n # 수직상태에서의 회전\n v_rotation = [(0, 0, -1, 1, 1, 1), (0, 0, -1, -1, 1, -1), (1, 1, 0, 0, 0, 1), (1, -1, 0, 0, 0, -1)] \n \n start = (0, 0, (0, 0, 0, 1))\n \n q = deque() \n q.append(start)\n \n answer = 0\n \n while q:\n d, cnt, pos = q.popleft()\n s_x, s_y, e_x, e_y = pos\n \n if (s_x, s_y, e_x, e_y) in visited:\n continue\n \n visited[(s_x, s_y, e_x, e_y)] = True\n \n if (s_x == n - 1 and s_y == n - 1) or (e_x == n - 1 and e_y == n - 1):\n answer = cnt\n break\n \n for sdx, sdy, edx, edy in offset:\n new_s_x = s_x + sdx\n new_s_y = s_y + sdy\n new_e_x = e_x + edx\n new_e_y = e_y + edy\n \n if not (0 <= new_s_x < n and 0 <= new_s_y < n and 0 <= new_e_x < n and 0 <= new_e_y < n):\n continue\n \n if board[new_s_x][new_s_y] == 1 or board[new_e_x][new_e_y] == 1:\n continue\n \n if (new_s_x, new_s_y, new_e_x, new_e_y) in visited:\n continue\n \n q.append((d, cnt + 1, (new_s_x, new_s_y, new_e_x, new_e_y)))\n \n \n if d == 0:\n for sdx, sdy, edx, edy, cdx, cdy in h_rotation:\n new_s_x = s_x + sdx\n new_s_y = s_y + sdy\n new_e_x = e_x + edx\n new_e_y = e_y + edy\n new_c_x = s_x + cdx\n new_c_y = s_y + cdy \n \n if not (0 <= new_s_x < n and 0 <= new_s_y < n and 0 <= new_e_x < n and 0 <= new_e_y < n and\n 0 <= new_c_x < n and 0 <= new_c_y < n ):\n continue\n\n if board[new_s_x][new_s_y] == 1 or board[new_e_x][new_e_y] == 1 or board[new_c_x][new_c_y] == 1:\n continue\n\n # 순서 변경 \n if new_s_x > new_e_x or (new_s_x == new_e_x and new_s_y > new_e_y):\n (new_s_x, new_s_y), (new_e_x, new_e_y) = (new_e_x, new_e_y), (new_s_x, new_s_y) \n \n if (new_s_x, new_s_y, new_e_x, new_e_y) in visited:\n continue\n\n q.append((1, cnt + 1, (new_s_x, new_s_y, new_e_x, new_e_y)))\n else:\n for sdx, sdy, edx, edy, cdx, cdy in v_rotation:\n new_s_x = s_x + sdx\n new_s_y = s_y + sdy\n new_e_x = e_x + edx\n new_e_y = e_y + edy\n new_c_x = s_x + cdx\n new_c_y = s_y + cdy \n \n if not (0 <= new_s_x < n and 0 <= new_s_y < n and 0 <= new_e_x < n and 0 <= new_e_y < n and\n 0 <= new_c_x < n and 0 <= new_c_y < n ):\n continue\n\n if board[new_s_x][new_s_y] == 1 or board[new_e_x][new_e_y] == 1 or board[new_c_x][new_c_y] == 1:\n continue\n\n # 순서 변경 \n if new_s_x > new_e_x or (new_s_x == new_e_x and new_s_y > new_e_y):\n (new_s_x, new_s_y), (new_e_x, new_e_y) = (new_e_x, new_e_y), (new_s_x, new_s_y)\n \n if (new_s_x, new_s_y, new_e_x, new_e_y) in visited:\n continue\n\n q.append((0, cnt + 1, (new_s_x, new_s_y, new_e_x, new_e_y)))\n \n return answer","repo_name":"KyuSahm/problems-solving","sub_path":"DFS and BFS/python/move_blocks.py","file_name":"move_blocks.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31492889347","text":"\"\"\"\n4. Automobile Costs\nWrite a program that asks the user to enter the monthly costs for the following expenses\nincurred from operating his or her automobile: loan payment, insurance, gas, oil, tires, and\nmaintenance. The program should then display the total monthly cost of these expenses,\nand the total annual cost of these expenses.\n\n@author Sharaf Qeshta\n\"\"\"\n\nmonthly_loan_payment = float(input(\"enter your monthly loan payment:\"))\nmonthly_insurance = float(input(\"enter your monthly insurance:\"))\nmonthly_gas = float(input(\"enter your monthly gas:\"))\nmonthly_oil = float(input(\"enter your monthly oil:\"))\nmonthly_tires = float(input(\"enter your monthly tires:\"))\nmonthly_maintenance = float(input(\"enter your monthly maintenance:\"))\n\ntotal_monthly = monthly_loan_payment + monthly_insurance + monthly_gas + monthly_oil + monthly_tires + monthly_maintenance\ntotal_annual = total_monthly * 12\n\nprint(f\"your total payment in a month is ${total_monthly}\")\nprint(f\"your total payment in a year is ${total_annual}\")\n","repo_name":"sharaf-qeshta/starting_out_with_python_exercises_solutions","sub_path":"Chapter_05/Exercise_05_04/Exercise_05_04.py","file_name":"Exercise_05_04.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74727351624","text":"# this is the helper function which are only used in lib folder\n\nfrom eunomia.arch.wasm.memory import (insert_symbolic_memory,\n lookup_symbolic_memory_data_section)\nfrom z3 import BitVecVal, is_bv, is_bv_value\n\ndef _extract_params(param_str, state):\n assert 0\n param_cnt = len(param_str.split(\" \"))\n params = []\n for _ in range(param_cnt):\n params.append(state.symbolic_stack.pop())\n\n # concretize\n params_result = []\n for i in params:\n if is_bv_value(i):\n params_result.append(i.as_long())\n else:\n params_result.append(i)\n\n return params_result\n\ndef sgx_extract_params(param_str, state):\n \"\"\"\n Return a list of elements, which are the arguments of the given import function.\n Note that, the order will be reversed.\n For example, if the signature of function foo is: foo (a, b), the returned arguments will be [b, a]\n \"\"\"\n param_cnt = len([x for x in param_str.split(\" \") if x]) \n params = []\n shadow_params = []\n for _ in range(param_cnt):\n params.append(state.symbolic_stack.pop())\n shadow_params.append(state.shadow_stack.pop())\n\n\n\n\n return params,shadow_params\n\n\ndef _storeN(state, dest, val, len_in_bytes):\n if not is_bv(val):\n state.symbolic_memory = insert_symbolic_memory(\n state.symbolic_memory, dest, len_in_bytes,\n BitVecVal(val, len_in_bytes * 8))\n else:\n state.symbolic_memory = insert_symbolic_memory(\n state.symbolic_memory, dest, len_in_bytes, val)\n\n\ndef _loadN(state, data_section, dest, len_in_bytes):\n val = lookup_symbolic_memory_data_section(\n state.symbolic_memory, data_section, dest, len_in_bytes)\n if is_bv_value(val):\n val = val.as_long()\n return val\n","repo_name":"PKU-ASAL/WASEM","sub_path":"eunomia/arch/wasm/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11744765541","text":"#Plotar um vetor em 2D\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nprint(\"\")\r\nprint(\"-----------------VETORES----------------\")\r\nq = int(input(\"Quantos Vetores você quer plotar: \"))\r\n\r\nfor i in range(q):\r\n\tprint(\"--------------------VETOR \"+ str(i+1) +\"-------------\")\r\n\tp1 = eval(input(\"Coordenada do ponto 1 (x,y): \"))\r\n\tp2 = eval(input(\"Coordenada do ponto 2 (x,y): \"))\r\n\tprint(\"-----------------------------------------------------\")\r\n\tx1 = np.linspace(p1[0], p2[0], 100)\r\n\ty1 = np.linspace(p1[1], p2[1], 100)\r\n\tplt.plot(x1,y1, lw=2, label=\"Vetor \"+ str(i+1))\r\nplt.legend()\r\nplt.show()","repo_name":"icleveston/Python-Codes","sub_path":"Linear-Algebra/Trabalho_1/Plotar_Vetor_2D.py","file_name":"Plotar_Vetor_2D.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20634152902","text":"import matplotlib.pyplot as plt\r\nfrom sys import argv\r\nimport numpy as np\r\nfrom data import *\r\nfrom net import *\r\nimport math, copy\r\n\r\ndef plot1D(X, y, label):\r\n\tx = np.array(X).T.tolist()[0]\r\n\tplt.plot(x, y, label = label)\r\n\r\ndef cal2DError(net, net1, testX, testY, testY1):\r\n\tresults = net.forward(testX)\r\n\tresults1 = net1.forward(testX)\r\n\t\r\n\tcount = 0\r\n\tfor i in range(len(testX)):\r\n\t\tcount = count + ((results[i] - testY[i]) ** 2 + (results1[i] - testY1[i]) ** 2) ** 0.5\r\n\t\t\r\n\treturn count\r\n\r\ndef plotPoints1(X):\r\n\tx = []\r\n\ty = []\r\n\tfor i in range(len(X)):\r\n\t\tx.append(X[i][0])\r\n\t\ty.append(X[i][1])\r\n\tplt.plot(x, y, 'o', alpha = 0.8, markersize = 20)\r\n\r\ndef plotPoints(X):\r\n\tx = []\r\n\ty = []\r\n\tfor i in range(len(X)):\r\n\t\tx.append(X[i][0])\r\n\t\ty.append(X[i][1])\r\n\tplt.plot(x, y, 'o')\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\t# For 3.1\r\n\tif argv[1] == '0':\r\n\t\tkind = 0\r\n\t\ttrainX, trainY = generate(kind=kind, noise = 0)\r\n\t\ttestX, testY = generate(kind=kind, st=0.05)\r\n\r\n\t\t# for sigma in range(3):\r\n\t\tfor sigma in [10, 1.5, 1, 0.5, 0.2]:\r\n\t\t\terrors = []\r\n\t\t\terrors_t = []\r\n\t\t\tx = []\r\n\t\t\tN = len(trainX)\r\n\t\t\tfor n in range(5, 50):\r\n\t\t\t# for n in [N]:\r\n\t\t\t\tnet = network(n)\r\n\t\t\t\tfor i in range(n):\r\n\t\t\t\t\tnet.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t\t\t\t# net.nodes[i].param[0] = trainX[i]\r\n\t\t\t\t\t# net.nodes[i].param[1] = 2 ** (- sigma)\r\n\t\t\t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t\t\tnet.leastSquares(trainX, trainY)\r\n\t\t\t\t# net.deltaRule(trainX, trainY, batch=1, maxIter=5000, lr=0.05)\r\n\t\t\t\tx.append(n)\r\n\t\t\t\terrors.append(net.calError(testX, testY))\r\n\t\t\t\tprint(net.calError(trainX, trainY))\r\n\t\t\t\terrors_t.append(net.calError(testX, testY))\r\n\r\n\t\t\tplt.plot(x, errors, label=sigma)\r\n\t\t\t# plt.plot(x, errors_t, 'k:', label=sigma)\r\n\t\tplt.legend()\r\n\t\tplt.xlabel(\"Number of units\")\r\n\t\tplt.ylabel(\"Error\")\r\n\t\t# plt.ylim((0, 0.2))\r\n\t\tplt.yscale('log')\r\n\t\tplt.show()\r\n\r\n\t\t'''\r\n\t\t# For visualize one case\r\n\t\tn = 7\r\n\t\tnet = network(n)\r\n\r\n\t\tfor i in range(n):\r\n\t\t\tnet.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t\t#net.nodes[i].param[1] = 0.5\r\n\r\n\t\t# net.deltaRule(trainX, trainY)\r\n\t\tnet.leastSquares(trainX, trainY)\r\n\t\tplot1D(testX, testY, 'data')\r\n\t\tplot1D(testX, net.squareForward(testX), 'transform', marker='.')\r\n\t\tplot1D(testX, net.forward(testX), 'output', marker='g')\r\n\t\tplt.legend()\r\n\t\tplt.show()\r\n\t\t'''\r\n\t\t\r\n\t# For 3.2\r\n\tif argv[1] == '1':\r\n\t\tkind = 0\r\n\t\ttrainX, trainY = generate(kind=kind, noise=0.1)\r\n\t\t# testX, testY = generate(kind=kind, st=0.05)\r\n\r\n\t\t# for sigma in [1, 0.5, 0.2]:\r\n\t\t# \terrors = []\r\n\t\t# \tx = []\r\n\t\t#\r\n\t\t# \tfor n in [5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20]:\r\n\t\t# \t\tnet = network(n)\r\n\t\t# \t\tfor i in range(n):\r\n\t\t# \t\t\tnet.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t# \t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t# \t\t# net.leastSquares(trainX, trainY)\r\n\t\t# \t\tnet.deltaRule(trainX, trainY, batch=1, maxIter=5000, lr=0.1)\r\n\t\t# \t\tx.append(n)\r\n\t\t# \t\terrors.append(net.calError(trainX, trainY))\r\n\t\t#\r\n\t\t#\r\n\t\t# \t# random distribution of RBF positioning\r\n\t\t# \t'''\r\n\t\t# \tfor n in [5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20]:\r\n\t\t# \t\terrors_sum = 0\r\n\t\t# \t\tfor j in range(5):\r\n\t\t# \t\t\tnet = network(n)\r\n\t\t# \t\t\tfor i in range(n):\r\n\t\t# \t\t\t\t# net.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t# \t\t\t\tnet.nodes[i].param[0] = [random.uniform(0, 2 * math.pi)]\r\n\t\t# \t\t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t# \t\t\tnet.leastSquares(trainX, trainY)\r\n\t\t# \t\t\t# net.deltaRule(trainX, trainY, batch=1, maxIter=5000, lr=0.05)\r\n\t\t# \t\t\terrors_sum += net.calError(trainX, trainY)\r\n\t\t# \t\tx.append(n)\r\n\t\t# \t\terrors.append(errors_sum)\r\n\t\t# \t'''\r\n\r\n\t\t# change eta (learning rate)\r\n\t\tfor lr in [0.3, 0.1, 0.05, 0.01]:\r\n\t\t\tsigma = 0.5\r\n\t\t\tn = 10\r\n\t\t\terrors = []\r\n\t\t\tx = []\r\n\r\n\t\t\tfor iters in range(0, 3001, 50):\r\n\t\t\t\tnet = network(n)\r\n\t\t\t\tfor i in range(n):\r\n\t\t\t\t\tnet.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t\t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t\t\t# net.leastSquares(trainX, trainY)\r\n\t\t\t\tnet.deltaRule(trainX, trainY, batch=1, maxIter=iters, lr=lr)\r\n\t\t\t\tx.append(iters)\r\n\t\t\t\terrors.append(net.calError(trainX, trainY))\r\n\r\n\t\t\tplt.plot(x, errors, label=lr)\r\n\t\tplt.legend()\r\n\t\tplt.xlabel(\"Number of iters\")\r\n\t\tplt.ylabel(\"Error\")\r\n\t\t# plt.yscale('log')\r\n\t\tplt.show()\r\n\r\n\t\t# For visualize one case\r\n\t\t'''\r\n\t\tn = 150\r\n\t\tnet = network(n)\r\n\r\n\t\tfor i in range(n):\r\n\t\t\tnet.nodes[i].param[0] = [2 * math.pi / (n - 1) * i]\r\n\t\t\t#net.nodes[i].param[1] = 0.5\r\n\r\n\t\tnet.deltaRule(trainX, trainY, lr = 0.001, maxIter = 20000)\r\n\r\n\t\tplot1D(testX, testY, 'test')\r\n\t\t#plot1D(testX, net.squareForward(testX), 'normalized')\r\n\t\tplot1D(testX, net.r, it is the modulo operation.\r\n\r\nforward(testX), 'network')\r\n\t\tprint (\"absolute residual error =\", net.calError(testX, testY))\r\n\t\tplt.legend()\r\n\t\tplt.show()\r\n\t\t'''\r\n\t\t\r\n\tif argv[1] == '2': # For 3.3.1 Competitive Learning\r\n\t\tkind = 0\r\n\t\ttrainX, trainY = generate(kind=kind, noise = 0.0)\r\n\t\ttestX, testY = generate(kind=kind, st=0.05)\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\ttestN = 10\r\n\t\tnet = network(testN)\r\n\t\tnet.CL(trainX, trainY, deadNode = False)\r\n\t\tfor i in range(testN):\r\n\t\t\tplt.plot([net.nodes[i].param[0][0]], [0], 'ro')\r\n\t\tplot1D(trainX, trainY, 'train')\r\n\t\tplt.show()\r\n\t\t'''\r\n\t\t\r\n\t\t# for sigma in range(3):\r\n\t\tfor sigma in [1, 0.5, 0.2]:\r\n\t\t\terrors = []\r\n\t\t\terrors_t = []\r\n\t\t\tx = []\r\n\t\t\tN = len(trainX)\r\n\t\t\tfor n in range(5, 13):\r\n\t\t\t# for n in [N]:\r\n\t\t\t\tnet = network(n)\r\n\t\t\t\tfor i in range(n):\r\n\t\t\t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t\t\tnet.CL(trainX, trainY, deadNode = True)\r\n\t\t\t\tnet.leastSquares(trainX, trainY)\r\n\t\t\t\t# net.deltaRule(trainX, trainY, batch=1, maxIter=5000, lr=0.05)\r\n\t\t\t\tx.append(n)\r\n\t\t\t\terrors.append(net.calError(testX, testY))\r\n\t\t\t\tprint(net.calError(trainX, trainY))\r\n\t\t\t\terrors_t.append(net.calError(testX, testY))\r\n\r\n\t\t\tplt.plot(x, errors_t, label=sigma)\r\n\t\t\t# plt.plot(x, errors_t, 'k:', label=sigma)\r\n\t\tplt.legend()\r\n\t\tplt.xlabel(\"Number of units\")\r\n\t\tplt.ylabel(\"Error\")\r\n\t\t# plt.ylim((0, 0.2))\r\n\t\tplt.yscale('log')\r\n\t\tplt.show()\r\n\t\t'''\r\n\tif argv[1] == '3':\r\n\t\tkind = 2\r\n\t\ttrainX, trainY, trainY1 = generate(kind = kind)\r\n\t\ttestX, testY, testY1 = generate(kind = kind, test = 1)\r\n\t\tnode.dim = 2\r\n\t\t\r\n\t\tplotPoints(trainX)\r\n\r\n\t\t\r\n\t\ttestN = 60\r\n\t\tnet = network(testN)\r\n\t\tnet.CL(trainX, trainY, deadNode = True)\r\n\t\tnetNodes = []\r\n\t\tfor i in range(testN):\r\n\t\t\tnetNodes.append(copy.copy(net.nodes[i].param[0]))\r\n\t\tplotPoints1(netNodes)\r\n\t\tplt.show()\r\n\t\t\r\n\t\t\r\n\t\t# for sigma in range(3):\r\n\t\tfor sigma in [1.0, 0.5, 0.2]:\r\n\t\t\terrors = []\r\n\t\t\terrors_t = []\r\n\t\t\tx = []\r\n\t\t\tN = len(trainX)\r\n\t\t\tfor n in range(50, 100):\r\n\t\t\t# for n in [N]:\r\n\t\t\t\tnet = network(n)\r\n\t\t\t\tfor i in range(n):\r\n\t\t\t\t\tnet.nodes[i].param[1] = sigma\r\n\t\t\t\tnet.CL(trainX, trainY, deadNode = True)\r\n\t\t\t\tnet1 = network(n)\r\n\t\t\t\tfor i in range(n):\r\n\t\t\t\t\tnet1.nodes[i].param[0] = copy.copy(net.nodes[i].param[0])\r\n\t\t\t\t\tnet1.nodes[i].param[1] = sigma\t\t\t\t\r\n\r\n\t\t\t\tnet.leastSquares(trainX, trainY)\r\n\t\t\t\tnet1.leastSquares(trainX, trainY1)\t\t\t\t\r\n\t\t\t\t# net.deltaRule(trainX, trainY, batch=1, maxIter=5000, lr=0.05)\r\n\t\t\t\tx.append(n)\r\n\t\t\t\terrors.append(cal2DError(net, net1, trainX, trainY, trainY1))\r\n\t\t\t\t# print(net.calError(trainX, trainY))\r\n\t\t\t\terrors_t.append(cal2DError(net, net1, testX, testY, testY1))\r\n\r\n\t\t\tplt.plot(x, errors, label= \"train\" + str(sigma))\r\n\t\t\tplt.plot(x, errors_t, '--', label= \"test\" + str(sigma))\t\t\t\r\n\t\t\t# plt.plot(x, errors_t, 'k:', label=sigma)\r\n\t\tplt.legend()\r\n\t\tplt.xlabel(\"Number of units\")\r\n\t\tplt.ylabel(\"Error\")\r\n\t\t# plt.ylim((0, 0.2))\r\n\t\tplt.yscale('log')\r\n\t\tplt.show()\t\t\r\n\t\t\r\n\t\t\r\n","repo_name":"WangZesen/DD2437-Lab2","sub_path":"part1/3.3.4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37609034747","text":"# File: TestLinkedList.py\n\n# Description: A program to utilize the data structure linked list in various situations\n\n# Student's Name: Stephen Rauner\n\n# Student's UT EID: STR428\n\n# Course Name: CS 313E \n\n# Unique Number: 50945\n\n# Date Created: 4/3/2016\n\n# Date Last Modified: 4/4/2016\n\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n\nclass Link(object):\n\tdef __init__ (self, data, next = None):\n\t\tself.data = data\n\t\tself.next = next\n\n\tdef __str__(self):\n\t\treturn str(self.data)\n\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n\nclass LinkedList(object):\n\tdef __init__(self):\n\t\tself.first = None\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# get number of links\n\tdef getNumLinks(self):\n\t\tcount = 1\n\t\tcurrent = self.first\n\n\t\tif (current == None):\n\t\t\treturn 0\n\n\t\twhile (current.next != None):\n\t\t\tcount += 1\n\t\t\tcurrent = current.next\n\n\t\treturn count\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# add data at the front of a list\n\tdef addFirst(self, data):\n\n\t\tnewLink = Link(data)\n\t\tnewLink.next = self.first\n\t\tself.first = newLink\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# add data at the end of a list\n\tdef addLast(self, data):\n\n\t\tnewLink = Link(data)\n\t\tcurrent = self.first\n\n\t\tif (current == None):\n\t\t\tself.first = newLink\n\t\t\treturn\n\n\t\twhile (current.next != None):\n\t\t\tcurrent = current.next\n\n\t\tcurrent.next = newLink\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# add data in an ordered list in ascending order\n\tdef addInOrder(self, data):\n\n\t\tnewLink = Link(data)\n\t\tprevious = self.first\n\t\tcurrent = self.first\n\n\t\tif (current == None):\n\t\t\tnewLink.next = None\n\t\t\tself.first = newLink\n\n\t\twhile (newLink.data > current.data):\n\t\t\tprevious = current\n\t\t\tif (current.next == None):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcurrent = current.next\n\n\t\tif (current == self.first):\n\t\t\tself.first = newLink\n\t\t\tnewLink.next = current\n\t\telif (current.next == None):\n\t\t\tprevious.next = newLink\n\t\t\tnewLink.next = None\n\t\telse:\n\t\t\tprevious.next = newLink\n\t\t\tnewLink.next = current\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# search in an unordered list, return None if not found\n\tdef findUnordered(self, data):\n\n\t\tcurrent = self.first\n\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current.data != data):\n\t\t\tif (current.next == None):\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tcurrent = current.next\n\n\t\treturn current\n\t\t\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# search in an ordered list, return None if not found\n\tdef findOrdered(self, data):\n\n\t\tcurrent = self.first\n\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current.data < data):\n\t\t\tif (current.next == None):\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tcurrent = current.next\n\t\treturn current\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# delete and return link from an unordered list or None if not found\n\tdef delete(self, data):\n\n\t\tcurrent = self.first\n\t\tprevious = self.first\n\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current.data != data):\n\t\t\tif (current.next == None):\n\t\t\t\treturn None\n\n\t\t\telse:\n\t\t\t\tprevious = current\n\t\t\t\tcurrent = current.next\n\n\t\tif (current == self.first):\n\t\t\tself.first = self.first.next\n\n\t\telse:\n\t\t\tprevious.next = current.next\n\t\t\n\t\treturn current\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# string representation of data 10 items to a line, 2 spaces between data\n\tdef __str__(self):\n\t\tlength = self.getNumLinks()\n\t\tcurrent = self.first\n\t\tcount = 0\n\n\t\tif (current == None):\n\t\t\treturn \"List is Empty.\"\n\n\t\tst = \"\"\n\n\t\twhile (current != None):\n\n\t\t\tif (count % 10 == 9):\n\t\t\t\tst += \"{}\\n\".format(str(current.data))\n\n\t\t\telse:\n\t\t\t\tst += \"{:<4}\".format(current.data)\n\n\t\t\tcurrent = current.next\n\t\t\tcount += 1\n\n\t\treturn st\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# copy the contents of a list and return new list\n\tdef copyList(self):\n\n\t\tnew_list = LinkedList()\n\t\tcurrent = self.first\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current != None):\n\t\t\tdata = current.data\n\t\t\tnew_list.addLast(data)\n\t\t\tcurrent = current.next\n\n\t\treturn new_list\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# reverse the contents of a list and return new list\n\tdef reverseList(self):\n\t\tcurrent = self.first\n\t\tnew = LinkedList()\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current != None):\n\t\t\tdata = current.data\n\t\t\tnew.addFirst(data)\n\t\t\tcurrent = current.next\n\n\t\treturn new\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# sort the contents of a list in ascending order and return new list\n\tdef sortList(self):\n\n\t\tcurrent = self.first\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\tnew = LinkedList()\n\t\tnew.addFirst(current.data)\n\t\tcurrent = current.next\n\t\twhile (current != None):\n\t\t\tnew.addInOrder(current.data)\n\t\t\tcurrent = current.next\n\n\t\treturn new\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# return True if a list is sorted in ascending order or False otherwise\n\tdef isSorted(self):\n\n\t\tisSorted = True\n\t\tcurrent = self.first\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current.next != None):\n\t\t\tisSorted = isSorted and (current.data <= current.next.data)\n\t\t\tcurrent = current.next\n\t\treturn isSorted\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# return True if a list is empty or False otherwise\n\tdef isEmpty(self):\n\n\t\treturn (self.first == None)\n\t\t\t\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# Merge two sorted lists and return new list in ascending order\n\tdef mergeList(self, b):\n\n\t\tnewList = LinkedList()\n\t\ts_current = self.first\n\t\tb_current = b.first\n\n\t\tif (s_current == None) and (b_current == None):\n\t\t\treturn None\n\t\telif (s_current == None):\n\t\t\treturn b\n\t\telif (b_current == None):\n\t\t\treturn self\n\n\t\twhile (s_current != None) and (b_current != None):\n\t\t\tif (s_current == None):\n\t\t\t\treturn \n\n\t\tpass\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# test if two lists are equal, item by item and return True if so\n\tdef isEqual(self, b):\n\n\t\tif (self.getNumLinks() != b.getNumLinks()):\n\t\t\treturn False\n\t\tnumLinks = self.getNumLinks()\n\t\ts_current = self.first\n\t\tb_current = b.first\n\t\tfor i in range (numLinks):\n\t\t\tif (s_current.data != b_current.data):\n\t\t\t\treturn False\n\t\t\telif (s_current.next == None):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\ts_current = s_current.next\n\t\t\t\tb_current = s_current.next\n\n# /////////////////////////////////////////////////////////////////////////////////\n\n\t# return a new list, keeping only the first occurence of an element \n\t# and removing all duplicates. Do not change the order of the elements.\n\tdef removeDuplicates(self):\n\t\tprevious = self.first\n\t\tcurrent = self.first\n\t\tif (current == None):\n\t\t\treturn None\n\n\t\twhile (current.next != None):\n\t\t\tif (current.data != current.next.data):\n\t\t\t\tprevious = current\n\t\t\t\tcurrent = current.next\n\t\t\telse:\n\t\t\t\tc\n\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n# /////////////////////////////////////////////////////////////////////////////////\n\ndef main():\n\n\tdata_ordered = [2, 3, 4, 5, 6, 7, 7, 8, 10, 11, 12, 13, 15, 20, 21, 23]\n\t\n\t# test methods addFirst() and __str__() by adding more than\n\t# 10 items to a list and printing it\n\tdata_unordered = [7, 6, 5, 4, 3, 2, 3, 4, 5, 6, 4, 8, 9, 6, 4] \n\tlists = []\n\n\tlist1 = LinkedList()\n\tlists.append(list1)\n\n\tfor el in data_ordered:\n\t\tlist1.addFirst(el)\n\n\tprint (\"\\n-----ADDFIRST TEST-------------\\n\")\n\tprint (\"Data used for list: \\n\", data_ordered)\n\tprint (list1)\n\n\t# test method addLast()\n\tlist2 = LinkedList()\n\tlists.append(list2)\n\n\tfor el in data_unordered:\n\t\tlist2.addLast(el)\n\tprint (\"\\n-----ADDLAST TEST-----------\\n\")\n\tprint (\"Data used for list: \\n\", data_unordered)\n\tprint (list2)\n\n\t# test method addInOrder()\n\tprint (\"\\n-----ADDINORDER TEST-----------\\n\")\n\n\tlist3 = list2.copyList()\n\tlists.append(list3)\n\n\n\tlist3.addInOrder(9)\n\tlist3.addInOrder(1)\n\tlist3.addInOrder(24)\n\n\tprint (list3)\n\n\t# test method getNumLinks()\n\tprint (\"-----GETNUMLINKS TEST-----------\\n\")\n\n\tfor i in range(len(lists)):\n\t\tprint (\"List {}: {} links\".format(i + 1, lists[i].getNumLinks()))\n\n\n\t# test method findUnordered()\n\t# consider two cases - item is there, item is not there\n\tprint (\"-----FINDUNORDERED TEST-----------\\n\")\n\tprint(\"Searching for 7 and 1 in list2:\")\n\tprint(\"list2 = \", list2)\n\n\tlist2.findUnordered(7)\n\tlist2.findUnordered(1)\n\n\tprint(\"list3 is sorted:\", list3.isSorted())\n\n\t# test method findOrdered()\n\t# consider two cases - item is there, item is not there\n\tlist1.findUnordered(7)\n\tlist1.findUnordered(1)\n\n\t# test method delete()\n\t# consider two cases - item is there, item is not there\n\n\n\t# test method copyList()\n\n\t# test method reverseList()\n\n\t# test method reverseList()\n\n\t# test method sortList()\n\n\t# test method isSorted()\n\t# consider two cases - list is/is not sorted\n\n\t# test method isEmpty()\n\n\t# test method mergeList()\n\n\t# test method isEqual()\n\t# consider two cases - lists are/aren't equal\n\n\t# test removeDuplicates()\n\n\n\n\nmain()","repo_name":"stOracle/Migrate","sub_path":"Programming/CS313E/TestLinkedList.py","file_name":"TestLinkedList.py","file_ext":"py","file_size_in_byte":9762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29080018615","text":"import unittest\nimport os\n\nfrom qtpy import QtWidgets\n\nfrom tests.utility import QtTest\n\nfrom t_rax.model.DiamondModel import DiamondModel\n\nunittest_path = os.path.dirname(__file__)\nunittest_files_path = os.path.join(unittest_path, '..', 'test_files')\ntest_file = os.path.join(unittest_files_path, 'temper_009.spe')\n\n\nclass DiamondModelTest(QtTest):\n def setUp(self):\n self.model = DiamondModel()\n\n def test_get_pressure(self):\n self.model.reference_position = 1334.\n self.model.sample_position = 1335.\n self.assertGreater(self.model.get_pressure(), 0)\n\n def test_change_reference_position(self):\n self.model.sample_position = 1350\n p1 = self.model.get_pressure()\n self.model.reference_position = 1338\n self.assertLess(self.model.get_pressure(), p1)\n","repo_name":"CPrescher/T-rax","sub_path":"tests/model_test/test_DiamondModel.py","file_name":"test_DiamondModel.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"42623625147","text":"import datetime\nimport re\n\n\ndef test1():\n\tinput = '0: 3\\n1: 2\\n4: 4\\n6: 4'\n\t#with open('day13.txt', 'r') as content_file: input = content_file.read()\n\td = dict()\n\tfor line in input.splitlines():\n\t\tln = list(map(lambda x: int(x), re.sub('\\W+',' ', line).split()))\n\t\t#print(ln)\n\t\td[ln[0]] = ln[1]\n\n\tsev = 0\n\tlun = max(d.keys())\n\tfor layer in range(lun+1):\n\t\trng = d.get(layer, 0)\n\t\tstatus = -1 if rng == 0 else 0\n\t\tstatus = layer % (rng-1) if rng > 0 and layer > 0 else status\n\t\tif rng == 2:\n\t\t\tstatus = 0 if layer %2 == 0 else 1\n\n\t\tprint('layer', layer, 'range', rng, 'status', status)\n\t\tsev += layer * rng if status == 0 else 0\n\treturn sev\n\n\n\n\nclass layer:\n\n\tdef __init__(self, range):\n\t\tself.range = range\n\t\t\n\tdef hit(self, time):\n\t\tb = time % (self.range * 2 - 2) == 0\n\t\treturn b\n\n\tdef severity(self, i):\n\t\ts = i * self.range if i % (self.range * 2 - 2) == 0 else 0\n\t\treturn s\n\n\ndef scan(firewall, time):\n\tfor i in range(max(firewall.keys())+1):\n\t\tif i in firewall and firewall[i].hit(time + i):\n\t\t\treturn False\n\treturn True\n\ndef severity(firewall):\n\tsev = 0\n\tfor i in range(max(firewall.keys())+1):\n\t\tif i in firewall:\n\t\t sev += firewall[i].severity(i)\n\treturn sev\n\n\ndef test2():\n\tinput = '0: 3\\n1: 2\\n4: 4\\n6: 4'\n\twith open('day13.txt', 'r') as content_file: input = content_file.read()\n\t\n\tfirewall = {}\n\t\n\tfor line in input.splitlines():\n\t\ti, rng = map(int, re.sub('\\W+',' ', line).split())\n\t\tfirewall[i] = layer(rng)\n\n\tprint('severity', severity(firewall))\n\n\tfound = False\n\ttime = 0\n\twhile not found:\n\t\tfound = scan(firewall, time)\n\t\tif time % 20000 == 0:\n\t\t print('found', found, 'time', time)\t\t\n\t\ttime += 1\n\t\t\n\treturn time -1\n\nprint(datetime.datetime.now())\nprint(test2(),datetime.datetime.now())\n","repo_name":"yomodev/AdventOfCode","sub_path":"AdventCode2017_Python/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15636861804","text":"from typing import Any\nimport torch\nfrom torch import Tensor\nfrom torch import nn\n\n\nclass InceptionV4(nn.Module):\n\n def __init__(\n self,\n k: int = 192,\n l: int = 224,\n m: int = 256,\n n: int = 384,\n num_classes: int = 1000,\n ) -> None:\n super(InceptionV4, self).__init__()\n self.features = nn.Sequential(\n InceptionV4Stem(3),\n InceptionA(384),\n InceptionA(384),\n InceptionA(384),\n InceptionA(384),\n ReductionA(384, k, l, m, n),\n InceptionB(1024),\n InceptionB(1024),\n InceptionB(1024),\n InceptionB(1024),\n InceptionB(1024),\n InceptionB(1024),\n InceptionB(1024),\n ReductionB(1024),\n InceptionC(1536),\n InceptionC(1536),\n InceptionC(1536),\n )\n\n self.global_average_pooling = nn.AdaptiveAvgPool2d((1, 1))\n\n self.linear = nn.Linear(1536, num_classes)\n\n # Initialize neural network weights\n self._initialize_weights()\n\n def forward(self, x: Tensor) -> Tensor:\n out = self._forward_impl(x)\n\n return out\n\n # Support torch.script function\n def _forward_impl(self, x: Tensor) -> Tensor:\n out = self.features(x)\n out = self.global_average_pooling(out)\n out = torch.flatten(out, 1)\n out = self.linear(out)\n\n return out\n\n def _initialize_weights(self) -> None:\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n stddev = float(module.stddev) if hasattr(module, \"stddev\") else 0.1\n torch.nn.init.trunc_normal_(module.weight, mean=0.0, std=stddev, a=-2, b=2)\n elif isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n self.relu = nn.ReLU(True)\n\n def forward(self, x: Tensor) -> Tensor:\n out = self.conv(x)\n out = self.bn(out)\n out = self.relu(out)\n\n return out\n\nclass InceptionV4Stem(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(InceptionV4Stem, self).__init__()\n self.conv2d_1a_3x3 = BasicConv2d(in_channels, 32, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0))\n\n self.conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0))\n self.conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n self.mixed_3a_branch_0 = nn.MaxPool2d((3, 3), (2, 2))\n self.mixed_3a_branch_1 = BasicConv2d(64, 96, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0))\n\n self.mixed_4a_branch_0 = nn.Sequential(\n BasicConv2d(160, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0)),\n )\n self.mixed_4a_branch_1 = nn.Sequential(\n BasicConv2d(160, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(64, 64, kernel_size=(1, 7), stride=(1, 1), padding=(0, 3)),\n BasicConv2d(64, 64, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0)),\n BasicConv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0))\n )\n\n self.mixed_5a_branch_0 = BasicConv2d(192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0))\n self.mixed_5a_branch_1 = nn.MaxPool2d((3, 3), (2, 2))\n\n def forward(self, x: Tensor) -> Tensor:\n out = self.conv2d_1a_3x3(x)\n out = self.conv2d_2a_3x3(out)\n out = self.conv2d_2b_3x3(out)\n\n mixed_3a_branch_0 = self.mixed_3a_branch_0(out)\n mixed_3a_branch_1 = self.mixed_3a_branch_1(out)\n mixed_3a_out = torch.cat([mixed_3a_branch_0, mixed_3a_branch_1], 1)\n\n mixed_4a_branch_0 = self.mixed_4a_branch_0(mixed_3a_out)\n mixed_4a_branch_1 = self.mixed_4a_branch_1(mixed_3a_out)\n mixed_4a_out = torch.cat([mixed_4a_branch_0, mixed_4a_branch_1], 1)\n\n mixed_5a_branch_0 = self.mixed_5a_branch_0(mixed_4a_out)\n mixed_5a_branch_1 = self.mixed_5a_branch_1(mixed_4a_out)\n mixed_5a_out = torch.cat([mixed_5a_branch_0, mixed_5a_branch_1], 1)\n\n return mixed_5a_out\n\nclass InceptionV4ResNetStem(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(InceptionV4ResNetStem, self).__init__()\n self.features = nn.Sequential(\n BasicConv2d(in_channels, 32, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0)),\n BasicConv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.MaxPool2d((3, 3), (2, 2)),\n BasicConv2d(64, 80, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(80, 192, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0)),\n nn.MaxPool2d((3, 3), (2, 2)),\n )\n self.branch_0 = BasicConv2d(192, 96, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n self.branch_1 = nn.Sequential(\n BasicConv2d(192, 48, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(48, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)),\n )\n self.branch_2 = nn.Sequential(\n BasicConv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n BasicConv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n )\n self.branch_3 = nn.Sequential(\n nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),\n BasicConv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n )\n\n def forward(self, x):\n features = self.features(x)\n branch_0 = self.branch_0(features)\n branch_1 = self.branch_1(features)\n branch_2 = self.branch_2(features)\n branch_3 = self.branch_3(features)\n\n out = torch.cat([branch_0, branch_1, branch_2, branch_3], 1)\n\n return out\n\nclass InceptionA(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(InceptionA, self).__init__()\n self.branch_0 = BasicConv2d(in_channels, 96, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n self.branch_1 = nn.Sequential(\n BasicConv2d(in_channels, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n )\n self.branch_2 = nn.Sequential(\n BasicConv2d(in_channels, 64, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n BasicConv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n )\n self.brance_3 = nn.Sequential(\n nn.AvgPool2d((3, 3), (1, 1), (1, 1), count_include_pad=False),\n BasicConv2d(384, 96, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n )\n\n def forward(self, x: Tensor) -> Tensor:\n branch_0 = self.branch_0(x)\n branch_1 = self.branch_1(x)\n branch_2 = self.branch_2(x)\n brance_3 = self.brance_3(x)\n\n out = torch.cat([branch_0, branch_1, branch_2, brance_3], 1)\n\n return out\n\nclass ReductionA(nn.Module):\n def __init__(\n self,\n in_channels: int,\n k: int,\n l: int,\n m: int,\n n: int,\n ) -> None:\n super(ReductionA, self).__init__()\n self.branch_0 = BasicConv2d(in_channels, n, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0))\n self.branch_1 = nn.Sequential(\n BasicConv2d(in_channels, k, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(k, l, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n BasicConv2d(l, m, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0)),\n )\n self.branch_2 = nn.MaxPool2d((3, 3), (2, 2))\n\n def forward(self, x: Tensor) -> Tensor:\n branch_0 = self.branch_0(x)\n branch_1 = self.branch_1(x)\n branch_2 = self.branch_2(x)\n\n out = torch.cat([branch_0, branch_1, branch_2], 1)\n\n return out\n\nclass InceptionB(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(InceptionB, self).__init__()\n self.branch_0 = BasicConv2d(in_channels, 384, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n self.branch_1 = nn.Sequential(\n BasicConv2d(in_channels, 192, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(192, 224, kernel_size=(1, 7), stride=(1, 1), padding=(0, 3)),\n BasicConv2d(224, 256, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0)),\n )\n self.branch_2 = nn.Sequential(\n BasicConv2d(in_channels, 192, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(192, 192, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0)),\n BasicConv2d(192, 224, kernel_size=(1, 7), stride=(1, 1), padding=(0, 3)),\n BasicConv2d(224, 224, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0)),\n BasicConv2d(224, 256, kernel_size=(1, 7), stride=(1, 1), padding=(0, 3)),\n )\n self.branch_3 = nn.Sequential(\n nn.AvgPool2d((3, 3), (1, 1), (1, 1), count_include_pad=False),\n BasicConv2d(in_channels, 128, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n )\n\n def forward(self, x: Tensor) -> Tensor:\n branch_0 = self.branch_0(x)\n branch_1 = self.branch_1(x)\n branch_2 = self.branch_2(x)\n branch_3 = self.branch_3(x)\n\n out = torch.cat([branch_0, branch_1, branch_2, branch_3], 1)\n\n return out\n\nclass ReductionB(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(ReductionB, self).__init__()\n self.branch_0 = nn.Sequential(\n BasicConv2d(in_channels, 192, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0)),\n )\n self.branch_1 = nn.Sequential(\n BasicConv2d(in_channels, 256, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(256, 256, kernel_size=(1, 7), stride=(1, 1), padding=(0, 3)),\n BasicConv2d(256, 320, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0)),\n BasicConv2d(320, 320, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0)),\n )\n self.branch_2 = nn.MaxPool2d((3, 3), (2, 2))\n\n def forward(self, x: Tensor) -> Tensor:\n branch_0 = self.branch_0(x)\n branch_1 = self.branch_1(x)\n branch_2 = self.branch_2(x)\n\n out = torch.cat([branch_0, branch_1, branch_2], 1)\n\n return out\n\nclass InceptionC(nn.Module):\n def __init__(\n self,\n in_channels: int,\n ) -> None:\n super(InceptionC, self).__init__()\n self.branch_0 = BasicConv2d(in_channels, 256, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n\n self.branch_1 = BasicConv2d(in_channels, 384, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n self.branch_1_1 = BasicConv2d(384, 256, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))\n self.branch_1_2 = BasicConv2d(384, 256, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))\n\n self.branch_2 = nn.Sequential(\n BasicConv2d(in_channels, 384, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n BasicConv2d(384, 448, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0)),\n BasicConv2d(448, 512, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1)),\n )\n self.branch_2_1 = BasicConv2d(512, 256, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))\n self.branch_2_2 = BasicConv2d(512, 256, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))\n\n self.branch_3 = nn.Sequential(\n nn.AvgPool2d((3, 3), (1, 1), (1, 1)),\n BasicConv2d(in_channels, 256, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))\n )\n\n def forward(self, x: Tensor) -> Tensor:\n branch_0 = self.branch_0(x)\n branch_1 = self.branch_1(x)\n\n branch_1_1 = self.branch_1_1(branch_1)\n branch_1_2 = self.branch_1_2(branch_1)\n x1 = torch.cat([branch_1_1, branch_1_2], 1)\n\n branch_2 = self.branch_2(x)\n branch_2_1 = self.branch_2_1(branch_2)\n branch_2_2 = self.branch_2_2(branch_2)\n x2 = torch.cat([branch_2_1, branch_2_2], 1)\n\n x3 = self.branch_3(x)\n\n out = torch.cat([branch_0, x1, x2, x3], 1)\n\n return out\n\n","repo_name":"sidsid84-kor/xeception","sub_path":"googlenetv4.py","file_name":"googlenetv4.py","file_ext":"py","file_size_in_byte":13210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7887391444","text":"from sys import stdin, stdout\n\n# 4 7 9 [2] [3]\n# 3 4 7 9 [2]\n# 2 3 4 7 9\n# 4 7 9 [2 3]\n\n# [4 7 9] 2 3\n# [4] 7 9 2 3\n# [7] 9 2 3 4\n# [9] 2 3 4 7\n# 2 3 4 7 9\n\n# 4 [7 9] 3 3 10\n# 1 2 3 0 0 4\n# 1 2 3 4\n# find longest subseq that already sorted\n\n# 1\n# 1 2\n# 1 2 3\n# 10, 9, 1, 7, 0, 8, 0, 7, 3, 6, 2, 5, 4, 5, 11, 6, 7, 12, 0, 6\n# 5 5 6 6\n\n# 2 2 3 3\n# dp:\n# dp[i,0]: counts of a[i], ex 3 [3]\n# dp[i,1]: still has a[i], ex 2 2 [3]\n# dp[i,2]: no a[i] ex 2 2 3 [3]\n\ndef flying_sort(n, a):\n\n b = sorted(a)\n\n dic = {}\n seq = 1\n\n dic[b[0]] = seq\n\n num = [0 for i in range(n + 1)]\n head = [0 for i in range(n + 1)]\n tail = [-1 for i in range(n+1)]\n pos = [0 for i in range(n+1)]\n\n for i in range(1, len(b)):\n if b[i] != b[i-1]:\n seq += 1\n dic[b[i]] = seq\n\n for i in range(len(a)):\n a[i] = dic[a[i]]\n num[a[i]] += 1\n tail[a[i]] = i+1\n if head[a[i]] == 0:\n head[a[i]] = i+1\n\n # find longest subseq\n dp = [[0, 0, 0] for i in range(n+1)]\n maxseq = 0\n\n #print(a)\n\n for i in range(1, n+1):\n\n v = a[i-1]\n dp[i][0] = dp[pos[v]][0] + 1\n\n #print(v)\n\n dp[i][1] = max(dp[pos[v]][1] + 1, dp[pos[v - 1]][0] + 1, dp[pos[v - 1]][2] + 1)\n\n if tail[v] == i:\n dp[i][2] = dp[head[v]][1] + num[v] - 1\n\n pos[v] = i\n\n for k in range(3):\n maxseq = max(maxseq, dp[i][k])\n\n #for i in range(len(dp)):\n # print(a[i-1])\n # print(dp[i])\n\n res = len(a) - maxseq\n return res\n\n\nif __name__ == '__main__':\n t = int(stdin.readline())\n\n for i in range(t):\n n = int(stdin.readline())\n a = list(map(int, stdin.readline().split()))\n stdout.write(str(flying_sort(n, a)) + '\\n')\n","repo_name":"tycyd/codeforces","sub_path":"dp/1367F Flying Sort.py","file_name":"1367F Flying Sort.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23752016232","text":"\"\"\"\nFinding line slope and intercept\n19.08.2018 00:59\n\"\"\"\n\nprint(\"This program will compute a line equation y=mx+c, slope and intercept, given two points\")\n\n# Define new function for keyboard input\n\n\ndef arguments(prompt_msg):\n while True: # create loop for error check\n try:\n variable = float(input(prompt_msg)) # convert user input to float\n except ValueError:\n # if input is not a number, retry\n print(\"invalid number, please try again:\")\n continue\n else:\n # user input was valid, end\n break\n return variable\n\n\nprint(\"\\nDefine points coordinates:\\n\")\npx1 = arguments(\"Please input x1 value: \")\npy1 = arguments(\"Please input y1 value: \")\npx2 = arguments(\"Please input x2 value: \")\npy2 = arguments(\"Please input y2 value: \")\n\n\ndef slope(x1, y1, x2, y2):\n m = (y2 - y1) / (x2-x1)\n return m\n\n\ndef interceptx(x1, y1, x2, y2):\n m = slope(x1, y1, x2, y2)\n x = (m*x1-y1) / m\n return x\n\n\ndef intercepty(x1, y1, x2, y2):\n m = slope(x1, y1, x2, y2)\n y = -m*x1+y1\n return y\n\n\nprint(\"\\nSlope of line:\", slope(px1, py1, px2, py2))\nprint(\"X intercept:\", interceptx(px1, py1, px2, py2))\nprint(\"Y intercept:\", intercepty(px1, py1, px2, py2))\n","repo_name":"pBogey/hello-world","sub_path":"04. Functions/03. Slope-Intercept.py","file_name":"03. Slope-Intercept.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4285411592","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport re\nimport sys\n\nimport django\n\nsys.path[0:0] = [\n os.path.abspath('.'),\n os.path.abspath('../../'),\n]\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'app.settings.djangotest'\ndjango.setup()\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Enquiry Management'\ncopyright = '2020, FIXME, provide author names'\nauthor = 'FIXME, provide author names'\n\n# The full version, including alpha/beta/rc tags\nrelease = '1.0.0'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"celery.contrib.sphinx\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n # 'canonical_url': '',\n # 'logo_only': False,\n # 'display_version': True,\n # 'prev_next_buttons_location': 'bottom',\n # 'style_external_links': False,\n # 'vcs_pageview_mode': '',\n # 'style_nav_header_background': 'white',\n # Toc options\n # 'collapse_navigation': True,\n # 'sticky_navigation': True,\n 'navigation_depth': 4,\n 'includehidden': True,\n 'titles_only': False\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The link substitutions, are defined in the README for it to work on GitHub,\n# we extract everything after the \".. rst_prolog\" comment from the README\n# so the substitutions work everywhere, including docstrings. Due to this,\n# the doc compilation shows \"Duplicate substitution definition\" warnings.\nrst_prolog = re.search(r'rst_prolog.*?$(.*)',\n open('../../README.rst').read(),\n re.MULTILINE | re.DOTALL).group(1)\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.8\", None),\n \"django\": (\n \"https://docs.djangoproject.com/en/dev/\",\n \"https://docs.djangoproject.com/en/dev/_objects/\",\n ),\n \"requests\": (\n \"https://requests.readthedocs.io/en/master/\",\n \"https://requests.readthedocs.io/en/master/objects.inv\",\n ),\n \"mohawk\": (\n \"https://mohawk.readthedocs.io/en/latest/\",\n \"https://mohawk.readthedocs.io/en/latest/objects.inv\",\n ),\n}\n\nautodoc_inherit_docstrings = False\n","repo_name":"uktrade/enquiry-mgmt-tool","sub_path":"doc/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"2424977918","text":"import torch\nimport torch.nn as nn\nfrom zmq import device\n\nfrom transfromer import Transformer\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n src = torch.randint(0, 10, (1, 10)).to(device)\n tgt = torch.randint(0, 10, (1, 10)).to(device)\n\n src_pad_idx = 0\n tgt_pad_idx = 0\n\n src_vocab_size = 10\n tgt_vocab_size = 10\n\n model = Transformer(src_vocab_size, tgt_vocab_size, src_pad_idx, tgt_pad_idx).to(device)\n\n output = model(src, tgt[:, :-1])\n print(output.shape)\n print(output)\n\n","repo_name":"Vasanthengineer4949/TFS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"41263136047","text":"# -*- coding: utf-8 -*-\n# Created on Tue Jul 03 2018 12:33:47\n# Author: WuLC\n# EMail: liangchaowu5@gmail.com\n\n# hashmap, simple solution\nclass Solution(object):\n def lemonadeChange(self, bills):\n \"\"\"\n :type bills: List[int]\n :rtype: bool\n \"\"\"\n count = {5:0, 10:0, 20:0}\n for i in xrange(len(bills)):\n if bills[i] == 5:\n count[5] += 1\n elif bills[i] == 10:\n if count[5] == 0:\n return False\n count[5] -= 1\n count[10] += 1\n elif bills[i] == 20:\n if count[10] > 0 and count[5] > 0:\n count[10] -= 1\n count[5] -= 1\n elif count[5] >= 3:\n count[5] -= 3\n else:\n return False\n return True","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/860. Lemonade Change.py","file_name":"860. Lemonade Change.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"36870581416","text":"from mcpi.minecraft import Minecraft\nmc=Minecraft.create()\nimport random\n\nx,y,z=mc.player.getPos()\n\ndef randomblocklocation(blocktype,repeat):\n x,y,z=mc.player.getPos()\n for i in range(repeat):\n x=x+random.randrange(-20,20)\n z=z+random.randrange(-20,20)\n mc.setBlock(x,y,z,blocktype)\n \nrandomblocklocation(103,7)\nrandomblocklocation(56,7)\nrandomblocklocation(98,7) ","repo_name":"newsteinking/workspace_backup","sub_path":"workspace_K/workspace_jihyung/python/python_minecraft/Learn to Program with Minecraft Code실습/chapter8-functions/test11.py","file_name":"test11.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73741299464","text":"from __future__ import annotations\n\nimport os\nimport platform\nimport subprocess\n\nfrom click import UsageError\n\nimport plagdef.gui.main as main\nfrom plagdef.app import write_doc_pair_matches_to_json, read_doc_pair_matches_from_json\nfrom plagdef.config import settings\nfrom plagdef.gui.model import DocumentPairMatches\nfrom plagdef.gui.views import HomeView, LoadingView, NoResultsView, ErrorView, ResultView, \\\n FileDialog, MatchesDialog, MessageDialog, SettingsDialog\nfrom plagdef.model import models\n\n\nclass HomeController:\n def __init__(self):\n self.view = HomeView()\n self.settings_controller = SettingsController()\n self.archive_dir_dialog = FileDialog()\n self.docs_dir_dialog = FileDialog()\n self.common_dir_dialog = FileDialog()\n self._connect_slots()\n\n def _connect_slots(self):\n self.view.register_for_signals(select_lang=self.on_select_lang,\n open_report_dir=self.on_open_click,\n select_archive_dir=self._on_select_archive_dir,\n rm_archive_dir=self._on_remove_archive_dir,\n select_docs_dir=self._on_select_docs_dir,\n rm_docs_dir=self._on_remove_docs_dir,\n select_common_dir=self._on_select_common_dir,\n rm_common_dir=self._on_remove_common_dir,\n detect=self._on_detect,\n settings=self._on_settings_click)\n\n def on_select_lang(self):\n settings.update({'lang': self.view.lang})\n\n def on_open_click(self):\n dialog = FileDialog()\n if dialog.open():\n matches = read_doc_pair_matches_from_json(dialog.selected_dir)\n if len(matches):\n main.app.window.switch_to(ResultView, matches)\n else:\n MessageDialog('The selected folder contains no match files.')\n\n def _on_settings_click(self):\n self.settings_controller.view.open()\n settings.update({'ocr': self.settings_controller.view.ocr,\n 'download_path': self.settings_controller.view.download_path,\n 'transl': self.settings_controller.view.translate,\n 'min_cos_sim': self.settings_controller.view.similarity_threshold,\n 'min_dice_sim': self.settings_controller.view.similarity_threshold,\n 'min_cluster_cos_sim': self.settings_controller.view.similarity_threshold})\n\n def _on_select_archive_dir(self):\n if self.archive_dir_dialog.open():\n folder_name = self.archive_dir_dialog.selected_dir[self.archive_dir_dialog.selected_dir.rfind(\"/\"):]\n self.view.archive_dir_selected(folder_name)\n\n def _on_remove_archive_dir(self):\n self.archive_dir_dialog.selected_dir = None\n self.view.archive_dir_removed()\n\n def _on_select_docs_dir(self):\n if self.docs_dir_dialog.open():\n folder_name = self.docs_dir_dialog.selected_dir[self.docs_dir_dialog.selected_dir.rfind(\"/\"):]\n self.view.docs_dir_selected(folder_name)\n\n def _on_remove_docs_dir(self):\n self.docs_dir_dialog.selected_dir = None\n self.view.docs_dir_removed()\n\n def _on_select_common_dir(self):\n if self.common_dir_dialog.open():\n folder_name = self.common_dir_dialog.selected_dir[self.common_dir_dialog.selected_dir.rfind(\"/\"):]\n self.view.common_dir_selected(folder_name)\n\n def _on_remove_common_dir(self):\n self.common_dir_dialog.selected_dir = None\n self.view.common_dir_removed()\n\n def _on_detect(self):\n doc_dir = (self.docs_dir_dialog.selected_dir, self.view.docs_rec) if self.docs_dir_dialog.selected_dir else None\n archive_dir = (self.archive_dir_dialog.selected_dir, self.view.archive_rec) \\\n if self.archive_dir_dialog.selected_dir else None\n common_dir = (self.common_dir_dialog.selected_dir, self.view.common_rec) \\\n if self.common_dir_dialog.selected_dir else None\n main.app.find_matches(doc_dir, archive_dir, common_dir, self._on_detect_success, self._on_detect_error)\n main.app.window.switch_to(LoadingView)\n self.archive_dir_dialog.selected_dir = self.common_dir_dialog.selected_dir \\\n = self.docs_dir_dialog.selected_dir = None\n\n def _on_detect_success(self, matches: list[models.DocumentPairMatches]):\n if matches:\n main.app.window.switch_to(ResultView, matches)\n else:\n main.app.window.switch_to(NoResultsView)\n\n def _on_detect_error(self, error: (type, Exception)):\n if error[0] == UsageError:\n main.app.window.switch_to(ErrorView, str(error[1]))\n else:\n main.app.window.switch_to(ErrorView,\n 'An error occurred. Please refer to the command line for more details.')\n raise error[1]\n\n\nclass SettingsController:\n def __init__(self):\n self.view = SettingsDialog()\n self._download_dir_dialog = FileDialog()\n self.view.register_for_signals(self._on_download_path_select)\n\n def _on_download_path_select(self):\n if self.view.download_path:\n self.view.download_dir_selected(None)\n elif self._download_dir_dialog.open():\n self.view.download_dir_selected(self._download_dir_dialog.selected_dir)\n\n\nclass LoadingController:\n def __init__(self):\n self.view = LoadingView()\n\n\nclass NoResultsController:\n def __init__(self):\n self.view = NoResultsView()\n self._connect_slots()\n\n def _connect_slots(self):\n self.view.register_for_signals(self._on_again)\n\n def _on_again(self):\n main.app.window.switch_to(HomeView)\n\n\nclass ErrorController:\n def __init__(self):\n self.view = ErrorView()\n self._connect_slots()\n\n def _connect_slots(self):\n self.view.register_for_signals(self._on_again)\n\n def _on_again(self):\n main.app.window.switch_to(HomeView)\n\n\nclass ResultController:\n def __init__(self):\n self.view = ResultView()\n self.matches_dialog = MatchesDialog()\n self._connect_slots()\n\n def _connect_slots(self):\n self.view.register_for_signals(self.on_export, self._on_again, self.on_select_pair)\n self.matches_dialog.register_for_signals(self.on_prev_match, self.on_next_match, self.on_doc_name_click,\n self.on_reanalyze_click)\n\n def on_export(self):\n selected_matches = self.view.selected_matches\n if not selected_matches:\n MessageDialog(f\"Please select matches first!\")\n else:\n dialog = FileDialog()\n if dialog.open():\n write_doc_pair_matches_to_json(selected_matches, dialog.selected_dir)\n MessageDialog(f\"Successfully generated {len(selected_matches)} JSON report(s).\")\n\n def _on_again(self):\n main.app.window.switch_to(HomeView)\n\n def on_select_pair(self, doc_pair_matches: DocumentPairMatches):\n self.matches_dialog.open(doc_pair_matches)\n\n def on_prev_match(self):\n self.matches_dialog.prev_match()\n\n def on_next_match(self):\n self.matches_dialog.next_match()\n\n def on_doc_name_click(self, path: str):\n if platform.system() == 'Darwin': # macOS\n subprocess.call(('open', path))\n elif platform.system() == 'Windows': # Windows\n os.startfile(path)\n else: # linux variants\n subprocess.call(('xdg-open', path))\n\n def on_reanalyze_click(self):\n self.matches_dialog.reanalyzing(True)\n main.app.reanalyze_pair(self.matches_dialog.doc1, self.matches_dialog.doc2,\n self.matches_dialog.sim_threshold, self._on_reanalyze_success, self._on_reanalyze_error)\n\n def _on_reanalyze_success(self, matches: list[models.DocumentPairMatches]):\n self.matches_dialog.reanalyzing(False)\n if matches:\n self.matches_dialog.set_data(DocumentPairMatches.from_model(matches[0], self.matches_dialog.match_type))\n else:\n MessageDialog(\"No matches found.\")\n\n def _on_reanalyze_error(self, error: (type, Exception)):\n self.matches_dialog.reanalyzing(False)\n if error[0] == UsageError:\n MessageDialog(str(error[1]))\n else:\n MessageDialog('An error occurred. Please refer to the command line for more details.')\n raise error[1]\n","repo_name":"devWhyqueue/plagdef","sub_path":"plagdef/gui/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38896439077","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n#%matplotlib notebook \nimport math # First line is to enable inline animation in jupyter notebook\nimport numpy as np # The rest are just python libraries that I need for certain steps below\nimport random\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\n\n\n# In[3]:\n\n\ndef initialize_array_lattice(lattice): # Calling this function randomly populates the lattice \n for i in range(0,size): # with 1's and -1's, it was initially just zeros\n for j in range(0,size):\n if random.random() < 0.5: # random.random() is a function that generates random numbers\n lattice[i,j] = int(1) # between 0 and 1, using this to randomly decide 1 or -1\n else:\n lattice[i,j] = int(-1)\n return lattice # nan = not a number, part of the math library. I'm \n # basically returning nothing, the function just creates # the 2-D array\n\ndef deltaE(i,j,lattice): # Function to calculate differrence in energy when I flip\n if i == 0: # a dipole (see Metropolis Algorithm)\n up = lattice[size-1,j]\n else:\n up = lattice[i-1,j] # The reason for all the if statement is to implement\n if i == size-1: # periodic boundary condition\n down = lattice[0,j]\n else:\n down = lattice[i+1,j]\n if j == 0:\n left = lattice[i,size-1] # Formula for difference in energy is:\n else: # 2*(spin of main dipole)*(sum of neighboring spins)\n left = lattice[i,j-1]\n if j == size-1:\n right = lattice[i,0]\n else:\n right = lattice[i,j+1] \n return 2*lattice[i,j]*(up+down+left+right)\n\ndef metropolis(lattice): \n lattice = initialize_array_lattice(lattice) \n \n M = np.zeros(mc_step+1)\n M[0] = sum(sum(lattice)) # loop to run through 100*size^(2) so each dipole\n \n for n in range(0, mc_step): # has the chance to be flipped 100 times.\n i = random.randint(0,size-1) \n j = random.randint(0,size-1) # Picking a random dipole between 0 and size\n \n E_diff = deltaE(i,j,lattice) # Calulating Energy Difference\n if E_diff <= 0:\n lattice[i,j] *= -1 # Flip if E <= 0 (See Metropolis Algorithm)\n M[n+1] = M[n] + 2*lattice[i,j]\n else:\n if np.random.uniform() < math.exp(-E_diff/T): # If E > 0, flip using Boltzmann weight\n lattice[i,j] *= -1\n M[n+1] = M[n] + 2*lattice[i,j]\n else:\n M[n+1] = M[n]\n continue\n return lattice, M\n\n\nsize = 6 # np.zeros creates a size x size array with zeros.\nT = 0.1 # Calling the initialize array function.\nmc_step = 1000*size**2\n\n# Create empty lattice\nemptylattice1 = np.zeros([size,size], dtype=int)\nemptylattice2 = np.zeros([size,size], dtype=int)\n\n# Feed empty lattice into the Metropolis function and it outputs \"lattice1\" and \"M1\", both are arrays\nlattice1, M1 = metropolis(emptylattice1)\nlattice2, M2 = metropolis(emptylattice2)\n\nprint('Total M for lattice1:',M1[-1]/size**2)\nprint('Total M for lattice2:',M2[-1]/size**2)\nprint('Sum of lattice1:',sum(sum(lattice1)))\nprint('Sum of lattice2:',sum(sum(lattice2)))\n\n\n# Show image of lattices\nfig1 = plt.figure(figsize=(8,4))\nplt.subplot(1,2,1)\ncmap = mpl.colors.ListedColormap(['darkred','blue']) # This just creates the colormap needed for plotting\nplt.imshow(lattice1,cmap=cmap,aspect='equal') # imshow displays data as image, first argument is \n # your data, in this case, the lattice array \nplt.subplot(1,2,2)\nplt.imshow(lattice2,cmap=cmap,aspect='equal') \n\n\n\n# Graphing the magnetization, simply create x-axis (time) and plot M1 M2 given above\n\nfig3 = plt.figure(figsize=(30,8))\nax = fig3.add_subplot(121)\nt = np.linspace(0,mc_step+1,mc_step+1)\nplt.plot(t/size**2, M1/size**2, '-',label='lattice1')\nplt.plot(t/size**2, M2/size**2, '-',label='lattice2')\nplt.xlabel(\"Time (MC step per lattice site)\",fontsize=18)\nplt.ylabel(\"Magnetization M\",fontsize=18)\nax.tick_params(labelsize=15)\n#plt.legend(fontsize=15)\n\n\n# In[2]:\n\n\n# This is the code to animate the Ising model. Note that this section of code is a standalone, does not require\n# anything from above other than the imported libraries\n\ndef deltaE(i,j):\n s = lattice[(i+1) % l,j] + lattice[i, (j+1) % l] + lattice[(i-1) % l, j] + lattice[i,(j-1) % l]\n return 2*lattice[i,j]*s\n \ndef mcstep(*args):\n i = random.randint(0,l-1) ## random.randint(0, l-1) 0 to l-1 inclusive\n j = np.random.randint(l) ## np.random.randint(l), l exclusive, same as above, different function\n E_diff = deltaE(i,j)\n if E_diff <= 0:\n lattice[i,j] *= -1\n else:\n if random.random() < math.exp(-E_diff/T):\n lattice[i,j] *= -1\n grid.set_array(lattice.ravel()) # Set_array is how colormap updates using the array in the argument\n return grid, # the ravel function flattens the array into 1D, it's just a requirement\n # \"grid,\" needs a comma because it need to return a tuple, why a tuple\n # exactly? Not sure. Could also use notation \"[grid]\"\n \ndef init_spin_config(opt):\n if opt == 'inf':\n lattice = np.random.randint(0,2,size=(l,l)) # lxl lattice with random spin configuration\n lattice[lattice==0] = -1\n return lattice\n \n if opt == 'zero': ## lxl lattice with +1 \n lattice = np.full( (l,l), 1, dtype=int) ## argument: shape, value, value_type\n return lattice\n\n\nl = 25 #Lattice dimension\nJ = 0.3 #Interaction strength\nT = 3.5 #Temperature, be sure to change it depending on initial lattice temperature\nN = 20000 #Number of iterations of MC step\nopt = 'inf' \n \nlattice = init_spin_config(opt)\n# print(sum(sum(lattice)))\n\nfig = plt.figure(figsize=(8,8),dpi=80) # figsize in inches, dpi = dots (pixels) per inch\nfig.suptitle(\"Ising Model\", fontsize=50) # suptitle = super title, for when you have multi-subplots\n#X, Y = np.meshgrid(range(l), range(l)) # Creating my domain and range of grid\ngrid = plt.pcolormesh(range(l),range(l),lattice,shading='nearest',cmap = \"gray\", vmin=-1, vmax=1) \n # Create grid color plot with dimensions X,Y and values from lattice\nani = animation.FuncAnimation(fig, mcstep, frames = N, interval = 5, blit = True, repeat = False)\n # Arugments: figure object, function to call for each frame of animation, # of frames/# of times\n # to call the animate function, delay between frames in milliseconds, enable blit so funcAnimation\n # won't redraw the whole thing every frame, it'll just update it.\n \n # Keeping the interval too low (i.e. < 1) might cause the animation to lag\n \n# if you're running animation in jupyter, make sure %matplotlib notebook is uncommented up top\n\n\n# In[ ]:\n\n\n# Check what writers you have\n\n# import matplotlib\n# print(matplotlib.animation.writers.list())\n\n\n# In[13]:\n\n\n# Save the animation as a gif, the single flip dynamic is really highlighted in gif format\n\n# writergif = animation.PillowWriter(fps=150)\n# ani.save('filename.gif',writer=writergif)\n\n","repo_name":"kaihu01/sjsu_spacetime","sub_path":"1-D Ising.py","file_name":"1-D Ising.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40064784167","text":"import itertools\nimport numpy as np\nimport deeppy as dp\nimport deeppy.expr as ex\nfrom deeppy.misc.test import (eps, BPropableSource, approx_fprime, gradclose,\n graph_funs)\n\n\nshapes = [(1, 4), (1, 7), (2, 6), (3, 3), (3, 4), (5, 1)]\n\n\ndef test_activations():\n ops = [\n ex.nnet.LeakyReLU, ex.nnet.ReLU, ex.nnet.Sigmoid, ex.nnet.Softmax,\n ex.nnet.Softplus,\n ]\n confs = itertools.product(shapes, ops)\n for shape, op in confs:\n print('%s shape=%s' % (op.__name__.ljust(20), shape))\n src_array = np.random.normal(size=shape).astype(dp.float_)\n\n if op is ex.nnet.LeakyReLU:\n src_array[np.fabs(src_array) < eps] = eps\n if op is ex.nnet.Softmax:\n src_array *= 100\n\n src = BPropableSource(src_array)\n sink = op()(src)\n f, f_grad = graph_funs(src, sink)\n g_approx = approx_fprime(src_array, f)\n g_true = f_grad(src_array)\n assert gradclose(g_true, g_approx)\n","repo_name":"andersbll/deeppy","sub_path":"test/unit/expr/nnet/test_activation.py","file_name":"test_activation.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1370,"dataset":"github-code","pt":"81"} +{"seq_id":"7780854079","text":"class Node:\n def __init__(self, val, next=None):\n self.value = val\n self.next = next\n\n\nclass LinkedList:\n \"\"\"This is a linked list implementation\"\"\"\n size = 0\n def __init__(self):\n self.head = None\n \n\n def push_front(self, val):\n \"\"\"Add a new element to the front of the linked list. O(1)\"\"\"\n # None\n # (value: a, next: None)\n # (value: b, next:(value: a, next: None))\n node = Node(val)\n if (self.size==0):\n self.head=node\n self.size+=1\n else:\n node.next=self.head\n self.head=node\n self.size+=1\n\n def get_element(self, index):\n \"\"\"Returns the value of the element at the provided index. O(n)\"\"\"\n if (index >= self.size):\n raise IndexError\n i = 0\n node = self.head\n while i < index:\n i += 1\n node = node.next\n return node.value\n\n def count(self):\n \"\"\"Returns the number of elements in the list. O(1)\"\"\"\n return self.size\n\n def pop_front(self):\n \"\"\"Removes the val from the front of the list and returns the value\n of that val. O(1)\"\"\"\n # node= head head.next:None , return head.value\n node = self.head\n self.head=node.next\n node.next=None\n self.size-=1\n return node.value\n\n def insert_after(self, index, val):\n \"\"\"Inserts an val in the list after the provided index. O(n)\"\"\"\n #crear un nodo con un valor, darle como nodo siguiente el apuntador del nodo anterior y luego apuntar ese nodo al nuevo que inserte.\n i = 0\n if(index>=self.size):\n raise IndexError\n node = self.head\n while i < index:\n i += 1\n node = node.next\n node1 = Node(val, node.next)\n node.next = node1\n self.size+=1\n \n def remove_element(self, index):\n \"\"\"Removes element at the provided index. Returns the removed\n element. O(n)\"\"\"\n i = 0\n node = self.head\n while i < index-1:\n i += 1\n node = node.next\n apuntador=node.next\n node.next=apuntador.next\n apuntador.next= None\n self.size-=1\n return apuntador.value\n\n \n\n def reverse(self):\n node = self.head\n nodeb= self.head.next\n nodec = nodeb.next\n while nodeb.next!=None:\n nodeb.next = node\n self.head = nodeb\n node=nodeb\n nodeb = nodec\n if (nodec!=None): \n nodec=nodec.next\n nodeb.next = node\n self.head = nodeb\n \"\"\"Reverses the direction of the linked list. O(n)\"\"\"","repo_name":"nathaliamg-44/LinkedList-in-Python","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8291900199","text":"import plotly.graph_objects as go\n\nclass Graph:\n \"\"\"\n General Graph class for making visualizations easily\n \"\"\"\n\n def __init__(self):\n self.fig = go.Figure()\n\n def show_chart(self):\n \"\"\"\n Method that exports the chart\n \"\"\"\n self.fig.show()\n\n def update_layout(self, title, x_title=\"\", y_title=\"\", x_suffix=\"\", y_suffix=\"\", paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)'):\n \"\"\"\n Updates layout\n\n Attributes:\n title (string): Title of the chart\n x_title (string): X-axis label\n y_title (string): Y-axis label\n x_suffix (string): X-axis suffix, e.g. %\n y_suffix (string): Y-axis suffix, e.g. %\n paper_bgcolor (string): Sets the color of paper where the graph is drawn, e.g. \"white\"\n plot_bgcolor (string): Sets the background color of plotly area, e.g. \"white\"\n \"\"\"\n self.title = title\n self.x_suffix = x_suffix\n self.y_suffix = y_suffix\n self.paper_bgcolor = paper_bgcolor\n self.plot_bgcolor = plot_bgcolor\n\n self.fig.update_layout(\n title={'text': self.title},\n yaxis=dict(ticksuffix=self.y_suffix, title=y_title),\n xaxis=dict(ticksuffix=self.x_suffix, title=x_title),\n paper_bgcolor=self.paper_bgcolor,\n plot_bgcolor=self.plot_bgcolor)\n\n\n\n","repo_name":"vProto/Data-Science-Portfolio","sub_path":"easy_plotly - Python Package/easy_plotly/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19852000426","text":"# -*- coding: utf-8 -\n#\n# This file is part of http-parser released under the MIT license.\n\n# See the NOTICE for more information.\n\nfrom io import DEFAULT_BUFFER_SIZE, RawIOBase\n\nfrom http_parser.util import StringIO\n\nimport types\n\nclass HttpBodyReader(RawIOBase):\n \"\"\" Raw implementation to stream http body \"\"\"\n\n def __init__(self, http_stream):\n self.http_stream = http_stream\n self.eof = False\n\n def readinto(self, b):\n if self.http_stream.parser.is_message_complete() or self.eof:\n if self.http_stream.parser.is_partial_body():\n return self.http_stream.parser.recv_body_into(b)\n return 0\n\n self._checkReadable()\n try:\n self._checkClosed()\n except AttributeError:\n pass\n\n while True:\n buf = bytearray(DEFAULT_BUFFER_SIZE)\n recved = self.http_stream.stream.readinto(buf)\n if recved is None:\n break\n\n del buf[recved:]\n nparsed = self.http_stream.parser.execute(bytes(buf), recved)\n if nparsed != recved:\n return None\n\n if self.http_stream.parser.is_partial_body() or recved == 0:\n break\n elif self.http_stream.parser.is_message_complete():\n break\n\n if not self.http_stream.parser.is_partial_body():\n self.eof = True\n b = b''\n return len(b'')\n\n return self.http_stream.parser.recv_body_into(b)\n\n def readable(self):\n return not self.closed or self.http_stream.parser.is_partial_body()\n\n def close(self):\n if self.closed:\n return\n RawIOBase.close(self)\n self.http_stream = None\n\nclass IterReader(RawIOBase):\n \"\"\" A raw reader implementation for iterable \"\"\"\n def __init__(self, iterable):\n self.iter = iter(iterable)\n self._buffer = \"\"\n\n def readinto(self, b):\n self._checkClosed()\n self._checkReadable()\n\n l = len(b)\n try:\n chunk = self.iter.next()\n self._buffer += chunk\n m = min(len(self._buffer), l)\n data, self._buffer = self._buffer[:m], self._buffer[m:]\n b[0:m] = data\n return len(data)\n except StopIteration:\n del b[0:]\n return 0\n\n def readable(self):\n return not self.closed\n\n def close(self):\n if self.closed:\n return\n RawIOBase.close(self)\n self.iter = None\n\nclass StringReader(IterReader):\n \"\"\" a raw reader for strings or StringIO.StringIO,\n cStringIO.StringIO objects \"\"\"\n\n def __init__(self, string):\n if isinstance(string, types.StringTypes):\n iterable = StringIO(string)\n else:\n iterable = string\n IterReader.__init__(self, iterable)\n\nfrom http_parser._socketio import SocketIO\n\nclass SocketReader(SocketIO):\n def __init__(self, sock):\n super(SocketReader, self).__init__(sock, mode='rb')\n","repo_name":"benoitc/http-parser","sub_path":"http_parser/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"81"} +{"seq_id":"36482271848","text":"from tkinter import ttk\nfrom tkinter.messagebox import showinfo\nfrom tkinter.messagebox import askokcancel\nfrom tkinter.messagebox import askyesnocancel\nimport tkinter as tk\nimport os\nimport sys\nimport datetime\nfrom tkinter.filedialog import askopenfilename\nfrom merge_Sort import mergeSort\nfrom find_items import find_item1\nfrom find_items import find_exist\n\n# 由相对路径得到绝对路径 ;在程序打包的时候会用到\ndef get_resources_path(relative_path):\n if getattr(sys, \"frozen\", False):\n base_path = sys._MEIPASS\n else:\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)\n\n\nIMAGE_PATH_login = get_resources_path(os.path.join(\"resources\", \"login.png\"))\nLOGO_PATH = get_resources_path(os.path.join(\"resources\", \"plane.ico\"))\nIMAGE_PATH = get_resources_path(os.path.join(\"resources\", \"plane_cr.png\"))\n\nclass LoginWindow:\n def __init__(self):\n self.root_login = tk.Tk()\n self.root_login.title(\"登入窗口\")\n self.root_login.geometry(\"600x420+420+150\")\n self.root_login.iconbitmap(LOGO_PATH)\n self.root_login.resizable(False, False)\n self.root_login[\"background\"] = \"white\"\n ## widget\n self.style_login = ttk.Style()\n self.style_login.configure(\"login.TPanedwindow\", bg=\"Azure\")\n ## login_label(登入图片)\n self.login_image = tk.PhotoImage(file=IMAGE_PATH_login)\n self.login_label = tk.Label(self.root_login, image=self.login_image)\n self.login_label.place(x=3, y=10)\n\n ## 定义下方容器,登入标签以及文本框,按钮\n\n self.botton_pane = ttk.Panedwindow(self.root_login, style=\"login.TPanedwindow\", width=600, height=80)\n self.botton_pane.place(x=0, y=340)\n self.label_user_name = tk.Label(self.botton_pane, text=\"用户名:\", font=(\"华文黑体\", 13, \"bold\"), bg=\"white\",\n fg=\"black\")\n self.label_user_name.place(x=6, y=25)\n self.entry_user = tk.Entry(self.botton_pane, width=15)\n self.entry_user.place(x=80, y=25)\n self.label_user_key = tk.Label(self.botton_pane, text=\"密码:\", font=(\"华文黑体\", 13, \"bold\"), bg=\"white\",\n fg=\"black\")\n self.label_user_key.place(x=200, y=25)\n self.entry_key = tk.Entry(self.botton_pane, width=15,show=\"*\")\n self.entry_key.place(x=270, y=25)\n\n self.login_button = tk.Button(self.botton_pane, text=\"登录\", font=(\"华文黑体\", 13, \"bold\"))\n self.login_button.place(x=500, y=20)\n self.login_button.bind(\"\", self.login_handle)\n\n self.root_login.mainloop()\n\n # 登入控制函数\n def login_handle(self, event):\n account = {}\n with open(\"account.txt\",mode=\"r\",encoding=\"utf-8\") as ff: # 读取account文件中的账户\n line_ff = ff.readline()\n while line_ff:\n username = line_ff.strip().split(',')[0]\n password = line_ff.strip().split(',')[1]\n account[username] = password\n line_ff = ff.readline()\n if self.entry_user.get() in account.keys() and self.entry_key.get() == account[self.entry_user.get()]:\n login_user = self.entry_user.get()\n self.root_login.destroy()\n MainWindow(login_user)\n else:\n showinfo(title=\"温馨提示\",message=\"账号或密码错误\")\n\nclass MainWindow:\n\n def __init__(self, name):\n # 自定义的变量\n\n self.user = name\n self.cur = datetime.datetime.now()\n # 加载gui\n self.root = tk.Tk()\n self.root.title = (\"飞机订票系统\")\n self.root.geometry(\"900x640+275+80\")\n self.root.iconbitmap(LOGO_PATH)\n self.root.resizable(0, 0)\n self.root[\"bg\"] = \"skyblue\"\n self.root.protocol(\"WM_DELETE_WINDOW\", self.close_handle)\n self.all_fight_list = [] # 当前Treeview1中的所有信息,时刻随着“增删改查”进行动态的变化\n self.all_seek = [] # Treeview1 当前查找待购的航班记录\n self.find_texts = {} # Treeview1 得到待购航班查询信息的内容,利用字典,方便进行查询\n self.all_seek2 = [] # Treeview2 当前查找已购的航班记录\n self.find_texts2 = {} # Treeview2 得到已购航班查询信息的内容,利用字典,方便进行查询\n self.all_selected = [] # 当前Treeview2中的所有信息,时刻随着“增删改查”进行动态的变化\n self.file_path = \"\" # 在打开文件时用到的路径\n self.setup_UI()\n self.load_info()\n self.root.mainloop()\n\n\n # 窗口关闭协议\n def close_handle(self):\n ret = askyesnocancel(title=\"退出提示\", message=\"是否保存文件后退出\")\n if ret == True:\n self.save()\n self.root.destroy()\n elif ret == False:\n self.root.destroy()\n else:\n pass\n\n def add_item(self):\n self.top_add = tk.Tk()\n self.top_add.title(\"添加信息\")\n\n # self.top_add = tk.Toplevel()\n # self.top_add.title(\"添加航线信息\")\n item_all = [[], [], [], [], [], [], [], []]\n item_all[0].append(tk.Label(self.top_add, text=\"航班号\"))\n item_all[1].append(tk.Label(self.top_add, text=\"出发时间\"))\n item_all[2].append(tk.Label(self.top_add, text=\"到站时间\"))\n item_all[3].append(tk.Label(self.top_add, text=\"起点\"))\n item_all[4].append(tk.Label(self.top_add, text=\"终点\"))\n item_all[5].append(tk.Label(self.top_add, text=\"票价\"))\n item_all[6].append(tk.Label(self.top_add, text=\"余额\"))\n item_all[7].append(tk.Label(self.top_add, text=\"时长\"))\n t1 = tk.Entry(self.top_add)\n t2 = tk.Entry(self.top_add)\n t3 = tk.Entry(self.top_add)\n t4 = tk.Entry(self.top_add)\n t5 = tk.Entry(self.top_add)\n t6 = tk.Entry(self.top_add)\n t7 = tk.Entry(self.top_add)\n t8 = tk.Entry(self.top_add)\n item_all[0].append(t1)\n item_all[1].append(t2)\n item_all[2].append(t3)\n item_all[3].append(t4)\n item_all[4].append(t5)\n item_all[5].append(t6)\n item_all[6].append(t7)\n item_all[7].append(t8)\n add_row = 0\n for group in item_all:\n add_column = 0\n for it in group:\n it.grid(row=add_row, column=add_column, padx=3, pady=2)\n add_column += 1\n add_row += 1\n bnt_add = tk.Button(self.top_add, text=\"确定\",\n command=lambda: self.tree_add(t1.get(), t2.get(), t3.get(), t4.get()\n , t5.get(), t6.get(), t7.get(), t8.get()))\n bnt_add.grid(row=8, column=0, columnspan=2)\n\n self.top_add.mainloop()\n\n def tree_add(self, t1, t2, t3, t4, t5, t6, t7, t8):\n\n for list_one in self.all_fight_list:\n if t1 == list_one[0]:\n showinfo(title='警告', message='不允许有相同的航班号')\n return\n\n if t1 == '' or t2 == '' or t3 == '' or t4 == '' or t5 == '' or t6 == '' or t7 == '' or t8 == '':\n showinfo(title='警告', message='不允许相关信息为空')\n return\n self.Tree.insert(\"\", index=tk.END, values=(t1, t2, t3, t4, t5, t6, t7, t8))\n self.all_fight_list.append([t1, t2, t3, t4, t5, t6, t7, t8])\n self.top_add.destroy()\n\n def delete_item(self):\n ret = askokcancel(title=\"温馨提示\", message='确定进行数据的删除')\n if ret:\n id = self.Tree.selection()\n flight_one = list(self.Tree.item(id, 'values'))\n self.all_fight_list.remove(flight_one)\n self.Tree.delete(id)\n else:\n return\n\n\n def alter_item(self):\n x = self.Tree.selection()\n content = self.Tree.item(x, \"values\")\n self.top_alter = tk.Tk()\n self.top_alter.title(\"修改信息\")\n item_all = [[], [], [], [], [], [], [], []]\n item_all[0].append(tk.Label(self.top_alter, text=\"航班号\"))\n item_all[1].append(tk.Label(self.top_alter, text=\"出发时间\"))\n item_all[2].append(tk.Label(self.top_alter, text=\"到站时间\"))\n item_all[3].append(tk.Label(self.top_alter, text=\"起点\"))\n item_all[4].append(tk.Label(self.top_alter, text=\"终点\"))\n item_all[5].append(tk.Label(self.top_alter, text=\"票价\"))\n item_all[6].append(tk.Label(self.top_alter, text=\"余额\"))\n item_all[7].append(tk.Label(self.top_alter, text=\"时长\"))\n t1 = tk.Entry(self.top_alter)\n t1.insert(0, content[0])\n t2 = tk.Entry(self.top_alter)\n t2.insert(0, content[1])\n t3 = tk.Entry(self.top_alter)\n t3.insert(0, content[2])\n t4 = tk.Entry(self.top_alter)\n t4.insert(0, content[3])\n t5 = tk.Entry(self.top_alter)\n t5.insert(0, content[4])\n t6 = tk.Entry(self.top_alter)\n t6.insert(0, content[5])\n t7 = tk.Entry(self.top_alter)\n t7.insert(0, content[6])\n t8 = tk.Entry(self.top_alter)\n t8.insert(0, content[7])\n item_all[0].append(t1)\n item_all[1].append(t2)\n item_all[2].append(t3)\n item_all[3].append(t4)\n item_all[4].append(t5)\n item_all[5].append(t6)\n item_all[6].append(t7)\n item_all[7].append(t8)\n add_row = 0\n for group in item_all:\n add_column = 0\n for it in group:\n it.grid(row=add_row, column=add_column, padx=3, pady=2)\n add_column += 1\n add_row += 1\n bnt_add = tk.Button(self.top_alter, text=\"确定\",\n command=lambda: self.tree_alter(t1.get(), t2.get(), t3.get(), t4.get()\n , t5.get(), t6.get(), t7.get(), t8.get()))\n bnt_add.grid(row=8, column=0, columnspan=2)\n self.top_alter.mainloop()\n\n def tree_alter(self, t1, t2, t3, t4, t5, t6, t7, t8):\n if t1 == '' or t2 == '' or t3 == '' or t4 == '' or t5 == '' or t6 == '' or t7 == '' or t8 == '':\n showinfo(title='警告', message='不允许相关信息为空')\n return\n # 这里的逻辑有点不好想,先除去要改的航班号,方便下面的判断,以及添加\n x = self.Tree.selection()\n x_content = list(self.Tree.item(x, 'values'))\n index_before = self.all_fight_list.index(x_content)\n self.all_fight_list.remove(x_content)\n for list_one in self.all_fight_list:\n if t1 == list_one[0]:\n showinfo(title='警告', message=\"不允许有相同的航班号\")\n self.all_fight_list.insert(index_before,x_content)\n return\n\n x_alter = [t1, t2, t3, t4, t5, t6, t7, t8]\n self.Tree.item(x, values=(t1, t2, t3, t4, t5, t6, t7, t8))\n self.all_fight_list.insert(index_before,x_alter)\n self.top_alter.destroy()\n\n # Treeview1 找到进行查询的条件,这里用到了字典,这里使用字典可以实现多条件查询\n def find_text(self):\n self.find_texts = {}\n sno = self.Entry_sno.get()\n if (sno != ''):\n self.find_texts['0'] = sno # '0'是航班号在列表中的下标\n start = self.Entry_start.get()\n if (start != ''):\n self.find_texts['3'] = start # '3'是出发时间在列表中的下标\n end = self.Entry_end.get()\n if (end != ''):\n self.find_texts['4'] = end # '4'是到达时间在列表中的下标\n return len(self.find_texts)\n\n # 这里多条件的查询是使用迭代方法进行的\n def find_item(self):\n number = self.find_text()\n if (number == 0):\n return\n else:\n self.all_seek = []\n all_item_cur = []\n for it in self.Tree.get_children():\n all_item_cur.append(self.Tree.item(it, 'values'))\n self.all_seek = all_item_cur\n for index in self.find_texts.keys():\n self.all_seek = find_item1(self.all_seek, int(index), self.find_texts[index])\n\n # 如果没有查询到结果,进行相应的提示\n if (len(self.all_seek) == 0):\n showinfo(title=\"提示\", message=\"无查询结果,请适当修改查询条件\")\n # 查询到结果,进行数据的显示\n else:\n self.load_treeview(self.all_seek)\n\n def find_text2(self):\n self.find_texts2 = {}\n sno = self.Entry_sno_2.get()\n if (sno != ''):\n self.find_texts2['0'] = sno\n start = self.Entry_start_2.get()\n if (start != ''):\n self.find_texts2['3'] = start\n end = self.Entry_end_2.get()\n if (end != ''):\n self.find_texts2['4'] = end\n return len(self.find_texts2)\n\n def find_item2(self):\n number = self.find_text2()\n if (number == 0):\n return\n else:\n self.all_seek2 = []\n all_item_cur = []\n for it in self.Tree2.get_children():\n all_item_cur.append(self.Tree2.item(it, 'values'))\n self.all_seek2 = all_item_cur\n for index in self.find_texts2.keys():\n self.all_seek2 = find_item1(self.all_seek2, int(index), self.find_texts2[index])\n\n if (len(self.all_seek2) == 0):\n showinfo(title=\"提示\", message=\"无查询结果,请适当修改查询条件\")\n else:\n self.load_treeview2(self.all_seek2)\n\n def treeview_sort_column(self, tv, col, reverse): # Treeview、列名、排列方式\n l = [(tv.set(k, col), k) for k in tv.get_children('')]\n l.sort(reverse=reverse) # 排序方式\n # rearrange items in sorted positions\n for index, (val, k) in enumerate(l): # 根据排序后索引移动\n tv.move(k, '', index) # 将“k\"ID的记录移动到下表为index的位置\n tv.heading(col, command=lambda: self.treeview_sort_column(tv, col, not reverse)) # 重写标题,使之成为再点倒序的标题\n\n def treeview_mergesort_column(self, tv, col, reverse):\n l = [(tv.set(k, col), k) for k in tv.get_children('')]\n l = mergeSort(l, reverse) # 排序方式\n # rearrange items in sorted positions\n for index, (val, k) in enumerate(l): # 根据排序后索引移动\n tv.move(k, '', index)\n tv.heading(col, command=lambda: self.treeview_mergesort_column(tv, col, not reverse)) # 重写标题,使之成为再点倒序的标题\n\n\n def select_item(self):\n\n self.top_number = tk.Tk()\n self.top_number.title(\"select\")\n label_number = tk.Label(self.top_number, text=\"请输入票数\")\n entry_number = tk.Entry(self.top_number)\n button_number = tk.Button(self.top_number, text=\"确定\", command=lambda: self.select_confirm(entry_number.get()))\n label_number.grid(row=0, column=0)\n entry_number.grid(row=0, column=1)\n button_number.grid(row=0, column=2)\n\n def select_confirm(self, num):\n\n num = int(num)\n x = self.Tree.selection()\n x_content = list(self.Tree.item(x, 'values'))\n if num > int(x_content[6]):\n showinfo(title=\"温馨提示\", message=\"超过了可选的最大票数\")\n return\n elif num == int(x_content[6]):\n\n # 对待购的treeview以及相应的列表更新\n self.all_fight_list.remove(x_content)\n self.load_treeview(self.all_fight_list)\n # 进行已购航班的查询,若已有相应航班号的记录,只需修改次数即可,如果没有进行相应记录的添加\n index_x_2 = find_exist(self.all_selected, x_content[0])\n if index_x_2 != -1:\n self.all_selected[index_x_2][6] = str(int(self.all_selected[index_x_2][6]) + num) # 如果已经存在,只需改变相应的记录的次数\n self.load_treeview2(self.all_selected)\n else:\n x_content[6] = str(num)\n self.all_selected.append(x_content)\n self.load_treeview2(self.all_selected)\n else:\n\n # 对待购的treeview以及相应的列表更新\n index_x = self.all_fight_list.index(x_content)\n self.all_fight_list[index_x][6] = str(int(self.all_fight_list[index_x][6]) - num)\n x_content[6] = str(int(x_content[6]) - num)\n self.Tree.item(x, values=x_content)\n\n # 进行已购航班的查询,若已有相应航班号的记录,只需修改次数即可,如果没有进行相应记录的添加\n index_x_2 = find_exist(self.all_selected, x_content[0])\n if index_x_2 != -1:\n self.all_selected[index_x_2][6] = str(int(self.all_selected[index_x_2][6]) + num) # 如果已经存在,只需改变相应的记录的次数\n self.load_treeview2(self.all_selected)\n else:\n x_content[6] = str(num)\n self.all_selected.append(x_content)\n self.load_treeview2(self.all_selected)\n self.top_number.destroy()\n\n\n def cancel_selected(self):\n\n self.top_number2 = tk.Tk()\n self.top_number2.title(\"cancel\")\n label_number = tk.Label(self.top_number2, text=\"请输入票数\")\n entry_number = tk.Entry(self.top_number2)\n button_number = tk.Button(self.top_number2, text=\"确定\", command=lambda: self.cancel_confirm(entry_number.get()))\n label_number.grid(row=0, column=0)\n entry_number.grid(row=0, column=1)\n button_number.grid(row=0, column=2)\n\n def cancel_confirm(self, num):\n num = int(num)\n x = self.Tree2.selection()\n x_content = list(self.Tree2.item(x, 'values'))\n if num > int(x_content[6]):\n showinfo(title=\"温馨提示\", message=\"超过了可退的最大票数\")\n return\n elif num == int(x_content[6]):\n # 对已购的treeview以及相应的列表更新\n self.all_selected.remove(x_content)\n self.load_treeview2(self.all_selected)\n # 进行代购航班的查询,若已有相应航班号的记录,只需修改次数即可,如果没有进行相应记录的添加\n index_x_2 = find_exist(self.all_fight_list, x_content[0])\n if index_x_2 != -1:\n self.all_fight_list[index_x_2][6] = str(\n int(self.all_fight_list[index_x_2][6]) + num) # 如果已经存在,只需改变相应的记录的次数\n self.load_treeview(self.all_fight_list)\n else:\n x_content[6] = str(num)\n self.all_fight_list.append(x_content)\n self.load_treeview(self.all_fight_list)\n else:\n # 对已购的treeview以及相应的列表更新\n index_x = self.all_selected.index(x_content)\n self.all_selected[index_x][6] = str(int(self.all_selected[index_x][6]) - num)\n x_content[6] = str(int(x_content[6]) - num)\n self.Tree2.item(x, values=x_content)\n\n # 进行代购航班的查询,若已有相应航班号的记录,只需修改次数即可,如果没有进行相应记录的添加\n index_x_2 = find_exist(self.all_fight_list, x_content[0])\n if index_x_2 != -1:\n self.all_fight_list[index_x_2][6] = str(\n int(self.all_fight_list[index_x_2][6]) + num) # 如果已经存在,只需改变相应的记录的次数\n self.load_treeview(self.all_fight_list)\n else:\n x_content[6] = str(num)\n self.all_fight_list.append(x_content)\n self.load_treeview(self.all_fight_list)\n\n self.top_number2.destroy()\n\n\n def save(self):\n\n with open(\"待购航班情况.txt\", mode='w', encoding='utf-8', newline='') as f:\n for line in self.all_fight_list:\n f.writelines(\",\".join(line) + '\\n')\n\n with open(\"已购航班情况.txt\", mode=\"w\", encoding='utf-8', newline='') as f2:\n for line2 in self.all_selected:\n f2.writelines(\",\".join(line2) + '\\n')\n\n def setup_UI(self):\n\n # # 设定Style\n # self.Style01 = ttk.Style()\n # self.Style01.configure(\"left.TPanedwindow\",background = \"Azure\")\n # self.Style01.configure(\"right.TPanedwindow\", background=\"Azure\")\n # self.Style01.configure(\"TButton\",width = 10,font = (\"华文黑体\",15,\"bold\"))\n\n # Top_banner\n self.start_image = tk.PhotoImage(file=IMAGE_PATH)\n self.Lable_image = tk.Label(self.root, image=self.start_image)\n self.Lable_image.pack()\n self.user_label = tk.Label(self.root, text=\"当前用户:%s\" % self.user, font=(\"华文黑体\", 15, \"bold\"), bg=\"white\",\n fg=\"black\")\n self.user_label.place(x=650, y=35)\n self.date = tk.Label(self.root, text=\"当前日期:{}\".format(self.cur.date()), font=(\"华文黑体\", 15, \"bold\"), bg=\"white\",\n fg=\"black\")\n self.date.place(x=650, y=60)\n # 设定选项卡\n self.notebook = ttk.Notebook(self.root)\n self.frame1 = tk.Frame()\n self.frame2 = tk.Frame()\n self.frame3 = tk.Frame()\n\n # frame1:左边容器及按钮组件\n self.Pane_left = tk.PanedWindow(self.frame1, width=162, bg=\"Azure\")\n self.Pane_left.pack(fill=tk.Y, side=tk.LEFT, padx=2, pady=2)\n\n self.Button_add = tk.Button(self.Pane_left, text=\"信息添加\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.add_item)\n self.Button_add.place(x=5, y=10)\n self.Button_alter = tk.Button(self.Pane_left, text=\"信息修改\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.alter_item)\n self.Button_alter.place(x=5, y=40)\n self.Button_delete = tk.Button(self.Pane_left, text=\"信息删除\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.delete_item)\n self.Button_delete.place(x=5, y=70)\n self.Button_select = tk.Button(self.Pane_left, text=\"选票\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.select_item)\n self.Button_select.place(x=5, y=100)\n self.Button_modify = tk.Button(self.Pane_left, text=\"导入文件\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.openfile)\n self.Button_modify.place(x=5, y=420)\n self.Button_save = tk.Button(self.Pane_left, text=\"保存数据\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.save)\n self.Button_save.place(x=5, y=450)\n\n self.notebook.add(self.frame1, text=\"可选余票\")\n self.notebook.add(self.frame2, text=\"已选购\")\n self.notebook.add(self.frame3, text=\"客户信息\")\n self.notebook.pack(padx=3, pady=3, fill=tk.BOTH, expand=True)\n\n # frame1右边: 容器,查询、TreeView\n\n self.Pane_right = tk.PanedWindow(self.frame1, width=720, bg=\"Azure\")\n self.Pane_right.pack(fill=tk.Y, side=tk.LEFT, padx=2, pady=2)\n\n # LabelFrame\n self.LabelFrame_query = tk.LabelFrame(self.Pane_right, text=\"航班信息查询\", width=720, height=70)\n self.LabelFrame_query.place(x=2, y=2)\n # 添加控件\n self.Label_sno = tk.Label(self.LabelFrame_query, text=\"航班号:\")\n self.Label_sno.place(x=5, y=13)\n self.Entry_sno = tk.Entry(self.LabelFrame_query, width=12)\n self.Entry_sno.place(x=50, y=10)\n\n self.Label_start = tk.Label(self.LabelFrame_query, text=\"起点:\")\n self.Label_start.place(x=160, y=13)\n self.Entry_start = tk.Entry(self.LabelFrame_query, width=12)\n self.Entry_start.place(x=200, y=10)\n\n self.Label_end = tk.Label(self.LabelFrame_query, text=\"终点:\")\n self.Label_end.place(x=300, y=13)\n self.Entry_end = tk.Entry(self.LabelFrame_query, width=12)\n self.Entry_end.place(x=340, y=10)\n\n self.Button_query = tk.Button(self.LabelFrame_query, text=\"查询\", width=4, command=self.find_item)\n self.Button_query.place(x=520, y=10)\n self.Button_all = tk.Button(self.LabelFrame_query, text=\"显示全部\", width=8,\n command=lambda: self.load_treeview(self.all_fight_list))\n self.Button_all.place(x=590, y=10)\n\n # 添加TreeView控件\n self.Tree = ttk.Treeview(self.Pane_right, columns=(\"sno\", \"st_time\",\n \"fi_time\", \"origin\", \"destination\", \"price\", \"remaining\",\n \"time\"),\n show=\"headings\", height=20)\n\n # 设置每一个列的宽度和对齐的方式\n self.Tree.column(\"sno\", width=90, anchor=\"center\")\n self.Tree.column(\"st_time\", width=100, anchor=\"center\")\n self.Tree.column(\"fi_time\", width=100, anchor=\"center\")\n self.Tree.column(\"origin\", width=100, anchor=\"center\")\n self.Tree.column(\"destination\", width=100, anchor=\"center\")\n self.Tree.column(\"price\", width=70, anchor=\"center\")\n self.Tree.column(\"remaining\", width=80, anchor=\"center\")\n self.Tree.column(\"time\", width=70, anchor=\"center\")\n # 设置每个列的标题\n self.Tree.heading(\"sno\", text=\"航班号\", command=lambda: self.treeview_sort_column(self.Tree, 'sno', False))\n self.Tree.heading(\"st_time\", text=\"出发时间\",\n command=lambda: self.treeview_sort_column(self.Tree, 'st_time', False))\n self.Tree.heading(\"fi_time\", text=\"到站时间\",\n command=lambda: self.treeview_sort_column(self.Tree, 'fi_time', False))\n self.Tree.heading(\"origin\", text=\"起点\",\n command=lambda: self.treeview_sort_column(self.Tree, 'origin', False))\n self.Tree.heading(\"destination\", text=\"终点\",\n command=lambda: self.treeview_sort_column(self.Tree, 'destination', False))\n self.Tree.heading(\"price\", text=\"票价\", command=lambda: self.treeview_mergesort_column(self.Tree, 'price', False))\n self.Tree.heading(\"remaining\", text=\"余额\",\n command=lambda: self.treeview_mergesort_column(self.Tree, 'remaining', False))\n self.Tree.heading(\"time\", text=\"时长\", command=lambda: self.treeview_sort_column(self.Tree, 'time', False))\n\n self.Tree.place(x=2, y=72)\n self.VScroll1 = tk.Scrollbar(self.Pane_right, orient='vertical', command=self.Tree.yview)\n self.VScroll1.place(relx=0.980, rely=0.14, relwidth=0.024, relheight=0.86)\n # 给treeview添加配置\n self.Tree.configure(yscrollcommand=self.VScroll1.set)\n\n # frame2 左边组件:\n self.Pane_left_2 = tk.PanedWindow(self.frame2, width=162, bg=\"Azure\")\n self.Pane_left_2.pack(fill=tk.Y, side=tk.LEFT, padx=2, pady=2)\n self.Button_cancel = tk.Button(self.Pane_left_2, text=\"退票\", width=13, font=(\"华文黑体\", 12, \"bold\"),\n command=self.cancel_selected)\n self.Button_cancel.place(x=5, y=10)\n\n # frame2 右边组件:\n self.Pane_right_2 = tk.PanedWindow(self.frame2, width=720, bg=\"Azure\")\n self.Pane_right_2.pack(fill=tk.Y, side=tk.LEFT, padx=2, pady=2)\n\n self.Tree2 = ttk.Treeview(self.Pane_right_2, columns=(\"sno\", \"st_time\",\n \"fi_time\", \"origin\", \"destination\", \"price\", \"remaining\",\n \"time\"),\n show=\"headings\", height=20)\n\n # 设置每一个列的宽度和对齐的方式\n self.Tree2.column(\"sno\", width=90, anchor=\"center\")\n self.Tree2.column(\"st_time\", width=100, anchor=\"center\")\n self.Tree2.column(\"fi_time\", width=100, anchor=\"center\")\n self.Tree2.column(\"origin\", width=100, anchor=\"center\")\n self.Tree2.column(\"destination\", width=100, anchor=\"center\")\n self.Tree2.column(\"price\", width=70, anchor=\"center\")\n self.Tree2.column(\"remaining\", width=80, anchor=\"center\")\n self.Tree2.column(\"time\", width=70, anchor=\"center\")\n # 设置每个列的标题\n self.Tree2.heading(\"sno\", text=\"航班号\", command=lambda: self.treeview_sort_column(self.Tree2, 'sno', False))\n self.Tree2.heading(\"st_time\", text=\"出发时间\",\n command=lambda: self.treeview_sort_column(self.Tree2, 'st_time', False))\n self.Tree2.heading(\"fi_time\", text=\"到站时间\",\n command=lambda: self.treeview_sort_column(self.Tree2, 'fi_time', False))\n self.Tree2.heading(\"origin\", text=\"起点\",\n command=lambda: self.treeview_sort_column(self.Tree2, 'origin', False))\n self.Tree2.heading(\"destination\", text=\"终点\",\n command=lambda: self.treeview_sort_column(self.Tree2, 'destination', False))\n self.Tree2.heading(\"price\", text=\"票价\",\n command=lambda: self.treeview_mergesort_column(self.Tree2, 'price', False))\n self.Tree2.heading(\"remaining\", text=\"余额\",\n command=lambda: self.treeview_mergesort_column(self.Tree2, 'remaining', False))\n self.Tree2.heading(\"time\", text=\"时长\", command=lambda: self.treeview_sort_column(self.Tree2, 'time', False))\n\n self.Tree2.place(x=2, y=72)\n self.VScroll1_2 = tk.Scrollbar(self.Pane_right_2, orient='vertical', command=self.Tree2.yview)\n self.VScroll1_2.place(relx=0.980, rely=0.14, relwidth=0.024, relheight=0.86)\n # 给treeview添加配置\n self.Tree2.configure(yscrollcommand=self.VScroll1_2.set)\n\n # LabelFrame\n self.LabelFrame_query_2 = tk.LabelFrame(self.Pane_right_2, text=\"航班信息查询\", width=720, height=70)\n self.LabelFrame_query_2.place(x=2, y=2)\n # 添加控件\n self.Label_sno_2 = tk.Label(self.LabelFrame_query_2, text=\"航班号:\")\n self.Label_sno_2.place(x=5, y=13)\n self.Entry_sno_2 = tk.Entry(self.LabelFrame_query_2, width=12)\n self.Entry_sno_2.place(x=50, y=10)\n\n self.Label_start_2 = tk.Label(self.LabelFrame_query_2, text=\"起点:\")\n self.Label_start_2.place(x=160, y=13)\n self.Entry_start_2 = tk.Entry(self.LabelFrame_query_2, width=12)\n self.Entry_start_2.place(x=200, y=10)\n\n self.Label_end_2 = tk.Label(self.LabelFrame_query_2, text=\"终点:\")\n self.Label_end_2.place(x=300, y=13)\n self.Entry_end_2 = tk.Entry(self.LabelFrame_query_2, width=12)\n self.Entry_end_2.place(x=340, y=10)\n\n self.Button_query_2 = tk.Button(self.LabelFrame_query_2, text=\"查询\", width=4, command=self.find_item2)\n self.Button_query_2.place(x=520, y=10)\n self.Button_all_2 = tk.Button(self.LabelFrame_query_2, text=\"显示全部\", width=8,\n command=lambda: self.load_treeview2(self.all_selected))\n self.Button_all_2.place(x=590, y=10)\n\n def load_info(self):\n try:\n self.all_fight_list = []\n with open(file=\"待购航班���况.txt\", mode=\"r\", encoding=\"utf-8\") as fd:\n # 一次读一行\n current_line = fd.readline()\n while current_line:\n temp_list = current_line.strip().split(\",\") # 长字符串分割层三个\n self.all_fight_list.append(temp_list)\n # 读取下一行,读完了循环就结束了\n current_line = fd.readline()\n self.all_selected = []\n with open(file=\"已购航班情况.txt\", mode=\"r\", encoding=\"utf-8\") as f:\n # 一次读一行\n current_line = f.readline()\n while current_line:\n temp_list = current_line.strip().split(\",\") # 长字符串分割层三个\n self.all_selected.append(temp_list)\n # 读取下一行,读完了循环就结束了\n current_line = f.readline()\n except:\n showinfo(\"系统消息\", \"文件读取出现异常!\")\n self.load_treeview(self.all_fight_list)\n self.load_treeview2(self.all_selected)\n\n def openfile(self):\n self.file_path = askopenfilename()\n if self.file_path == \"\":\n return\n self.load_file_flight_info()\n self.load_treeview(self.all_fight_list)\n # 重新导入文件,刷新Tree2和其对应的列表\n delete_all_tree2 = self.Tree2.get_children()\n for id in delete_all_tree2:\n self.Tree2.delete(id)\n self.all_selected = []\n\n\n def load_file_flight_info(self):\n if not os.path.exists(self.file_path):\n showinfo(\"系统消息\", \"提供的文件名不存在!\")\n else:\n try:\n self.all_fight_list = []\n with open(file=self.file_path, mode=\"r\", encoding=\"utf-8\") as fd:\n # 一次读一行\n current_line = fd.readline()\n while current_line:\n temp_list = current_line.strip().split(\",\") # 长字符串分割层三个\n self.all_fight_list.append(temp_list)\n # 读取下一行,读完了循环就结束了\n current_line = fd.readline()\n except:\n showinfo(\"系统消息\", \"文件读取出现异常!\")\n\n def load_treeview(self, current_list: list):\n # 每次加载前先进行数据的清除\n for row in self.Tree.get_children():\n self.Tree.delete(row)\n # 判断是否有数据:\n if len(current_list) == 0:\n showinfo(\"系统消息\", \"没有数据加载\")\n else:\n for index in range(len(current_list)):\n self.Tree.insert(\"\", index, values=(current_list[index][0], current_list[index][1],\n current_list[index][2], current_list[index][3],\n current_list[index][4], current_list[index][5],\n current_list[index][6], current_list[index][7]))\n\n def load_treeview2(self, current_list: list):\n # 每次加载前先进行数据的清除\n for row in self.Tree2.get_children():\n self.Tree2.delete(row)\n # 判断是否有数据:\n if len(current_list) == 0:\n showinfo(\"系统消息\", \"没有数据加载\")\n else:\n for index in range(len(current_list)):\n self.Tree2.insert(\"\", index, values=(current_list[index][0], current_list[index][1],\n current_list[index][2], current_list[index][3],\n current_list[index][4], current_list[index][5],\n current_list[index][6], current_list[index][7]))\n\n\nif __name__ == '__main__':\n LoginWindow()\n","repo_name":"Rocklis/-python-","sub_path":"main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":35634,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"23173835790","text":"import unittest\nfrom processes import *\nfrom models import *\n\nclass TestPortfolio(unittest.TestCase):\n\n def test_create_portfolio(self):\n position = Position(100, 0, 1, 0, 1, 0)\n participant = Participant(\"Tim\", 9900, 1)\n index = Index(\"xci\", \"Xsauce Culture Index\", 150, \"12/12/2022\", \"12:02:02\")\n\n expected = \"Funds: {}\\n\" \\\n \"Short Shares: {} \\n\"\\\n \"Long Shares: {} \\n\" \\\n \"Short: {} \\n\"\\\n \"Long: {} \\n\" \\\n \"Avg Buy Price Short: {} \\n\"\\\n \"Avg Buy Price Long: {} \\n\" \\\n \"PNL: {}\\n\" \\\n \"Total Trades: {}\".format(9900, 0, 1 ,0, 150.0, 0, 100.0, 50.0, 1)\n\n portfolio_info = portfolio.determine_portfolio_by_index(position, participant, index)\n\n self.assertEqual(expected, repr(portfolio_info))\n\n def test_calculate_index_pnl(self):\n long_shares = 5\n short_shares = 0\n avg_buy_price_long = 10\n avg_buy_price_short = 10\n index_price = 20\n\n initial_long = long_shares * avg_buy_price_long\n initial_short = short_shares * avg_buy_price_short\n\n Long = portfolio.calculate_long_position(\n long_shares, avg_buy_price_long, index_price)\n\n Short = portfolio.calculate_short_position(\n short_shares, avg_buy_price_short, index_price)\n\n expected = 50\n\n pnl = round(portfolio.calculate_profit_and_loss(initial_long, initial_short, Long, Short))\n\n self.assertEqual(expected,pnl)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"m3elabs/Xchange-TG","sub_path":"test/test_portfolio.py","file_name":"test_portfolio.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"11359027439","text":"# plotowanie przebiegu eksperymentalnego z fitem channellaba\n# pliki wejściowe to sklejone kolumny plików .fit\n\nimport pyabf\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nimport pandas as pd\nimport seaborn as sns\n\n\n#sim_fit_5_traces = pd.read_csv('traces_5.csv', sep=',')\n#sim_fit_5_traces = sim_fit_5_traces.melt(id_vars='t', var_name='type', value_name='popen')\n#print(sim_fit_5_traces)\n\nsim_fit_30_traces = pd.read_csv('traces_30.csv', sep=',')\nsim_fit_30_traces = sim_fit_30_traces.melt(id_vars='t', var_name='type', value_name='popen')\nprint(sim_fit_30_traces)\n\n#sim_fit_1000_traces = pd.read_csv('traces_1000.csv')\n#sim_fit_1000_traces = sim_fit_1000_traces.melt(id_vars='t', var_name='type', value_name='popen')\n#print(sim_fit_1000_traces)\n\n'''\n#VERY OLD#\n\nsim_30_traces = ['WT_30_final.abf', 'G258V_30_final.abf', 'L296V_30_final.abf']\nsim_30_titles = ['WT', 'G258V', 'L296V']\nsim_30_colors = [(0, 0, 0), (0.88, 0.54, 0.31), (0.8, 0.74, 0.43)]\n\nsim_30_LC_traces = ['WT_LC_30_final.abf', 'G254V_LC_30_final.abf']\nsim_30_LC_titles = ['WT_LC', 'G254V']\nsim_30_LC_colors = [(0, 0, 0), (0.77, 0.20, 0.25)]\n\nsim_500_traces = ['WT_500_final.abf', 'L300V_500_final.abf']\nsim_500_titles = ['WT_500', 'L300V']\nsim_500_colors = [(0, 0, 0), (0.60, 0.75, 0.75)]\n'''\n\nsns.set_style()\nsns.set_context(\"paper\")\n\ndef plot_traces5(sim_fit_5_traces, sim_fit_colors, simt_fit_types, file):\n\n for_plot = sim_fit_5_traces.loc[sim_fit_5_traces['type'].isin(simt_fit_types)]\n\n sns.relplot(x='t', y='popen', hue='type', kind=\"line\", data=for_plot,\n palette=sim_fit_colors,\n #style='type',\n height=2, aspect=1.,\n #linewidth=3,\n #scale=2,\n size='type',\n sizes=(0.5, 1)\n )\n plt.savefig(file + '.png', dpi=600)\n #plt.show()\n\ndef plot_traces30(sim_fit_30_traces, sim_fit_colors, simt_fit_types, file):\n\n for_plot = sim_fit_30_traces.loc[sim_fit_30_traces['type'].isin(simt_fit_types)]\n\n sns.relplot(x='t', y='popen', hue='type', kind=\"line\", data=for_plot,\n palette=sim_fit_colors,\n #style='type',\n height=2, aspect=1.,\n #linewidth=3,\n #scale=2,\n size='type',\n sizes=(0.5, 1)\n )\n plt.savefig(file + '.png', dpi=600)\n #plt.show()\n\ndef plot_traces1000(sim_fit_1000_traces, sim_fit_colors, simt_fit_types, file):\n\n for_plot = sim_fit_1000_traces.loc[sim_fit_1000_traces['type'].isin(simt_fit_types)]\n\n sns.relplot(x='t', y='popen', hue='type', kind=\"line\", data=for_plot,\n palette=sim_fit_colors,\n #style='type',\n height=2, aspect=2.,\n #linewidth=3,\n size='type',\n sizes=(0.5, 1)\n )\n plt.savefig(file + '.png', dpi=600)\n #plt.show()\n\n\n#plot_traces5(sim_fit_5_traces, [(0.57, 0.55, 0.55), 'black'], ['WT_2D_exp', 'WT_2D_fit'], 'WT_2D')\n#plot_traces5(sim_fit_5_traces, [(0.40, 0.67, 0.67), 'black'], ['L300V_exp', 'L300V_fit'], 'L300V')\n\n\nplot_traces30(sim_fit_30_traces, [(0.57, 0.55, 0.55), 'black'], ['wt_exp', 'wt_fit'], 'WT')\n#plot_traces30(sim_fit_30_traces, [(0.57, 0.55, 0.55), 'black'], ['WT_exp2', 'WT_fit2'], 'WT2')\n#plot_traces30(sim_fit_30_traces, [(0.57, 0.55, 0.55), 'black'], ['WT_LC_exp', 'WT_LC_fit'], 'WT_LC')\nplot_traces30(sim_fit_30_traces, [(0.88, 0.54, 0.31), 'black'], ['k_exp', 'k_fit'], 'P277K')\nplot_traces30(sim_fit_30_traces, [(0.70, 0.62, 0.17), 'black'], ['h_exp', 'h_fit'], 'P277H')\n#plot_traces30(sim_fit_30_traces, [(0.77, 0.20, 0.25), 'black'], ['G254V_exp', 'G254V_fit'], 'G254V')\n\n\n#plot_traces1000(sim_fit_1000_traces, ['grey', 'black'], ['WT_exp', 'WT_fit'], 'WT_1000')\n#plot_traces1000(sim_fit_1000_traces, [(0.40, 0.67, 0.67), 'black'], ['L300V_exp', 'L300V_fit'], 'L300V_1000')\n\n\n#plot_traces(sim_30_LC_traces, sim_30_LC_titles, sim_30_LC_colors, (0.04, 0.08))\n#plot_traces(sim_500_traces, sim_500_titles, sim_500_colors, (0, 1))","repo_name":"michal2am/bioscripts","sub_path":"multi_channel/fit_trace_aligned_plot.py","file_name":"fit_trace_aligned_plot.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"42926484096","text":"import argparse\nimport os\nimport gzip\nimport shutil\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\nimport pandas as pd\nfrom pathlib import Path\nfrom bs4 import BeautifulSoup\n\nsys.path.append(str(Path(__file__).absolute().parent.parent))\n\nfrom scripts import build_and_run\n\nurl = \"https://ftp.ncbi.nlm.nih.gov/pubchem/Compound/CURRENT-Full/SDF/\"\nprocessed_list_path = \"not initialized path\"\nsdf_dir_path = \"not initialized path\"\ndest_dir_path = \"not initialized path\"\nparse_mode = \"not initialized mode\"\n\ntarget = \"parse_sdf\"\n\n\ndef create_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sdf_dir_path\", type=str, required=True)\n parser.add_argument(\"--dest_dir_path\", type=str, required=True)\n parser.add_argument(\"--parse_mode\", type=str, required=True)\n parser.add_argument(\"--processed_list_path\", type=str, required=True)\n return parser\n\n\ndef get_urls(url, ext):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, features='lxml')\n all_links = [link.get(\"href\") for link in soup(\"a\")]\n return pd.Series(filter(lambda x: x.endswith(ext), all_links))\n\n\ndef tag_processed(file_name):\n with open(processed_list_path, 'a') as f:\n f.write(str(file_name) + '\\n')\n print(f'processed: {file_name}', file=sys.stderr)\n\n\ndef check_processed(file_name, processed):\n for processed_name in processed:\n if file_name in processed_name:\n return True\n return False\n\n\ndef remove_unprocessed():\n with processed_list_path.open('r') as f:\n processed = f.read().split()\n for file in os.listdir(sdf_dir_path):\n if check_processed(file, processed):\n continue\n if (sdf_dir_path / file).is_dir():\n shutil.rmtree(sdf_dir_path / file)\n else:\n os.remove(sdf_dir_path / file)\n print('remove:', sdf_dir_path / file, file=sys.stderr)\n\n\ndef download_zip(file_name, where_to):\n if file_name in processed_before:\n print(f\"skipped downloading of {file_name}, because it's already processed\", file=sys.stderr)\n return\n print(f\"start download {file_name}\", file=sys.stderr)\n with requests.get(url + file_name, stream=True) as r:\n with open(where_to / file_name, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n out_file_path = where_to / file_name[:-3]\n with gzip.open(where_to / file_name, 'rb') as f_in:\n with open(out_file_path, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(where_to / file_name)\n return out_file_path\n\n\ndef process(file_name):\n where_to = sdf_dir_path / str(file_name).split('.')[0]\n where_to.mkdir(exist_ok=True)\n sdf_file = download_zip(file_name, where_to)\n build_and_run.run(target, [f\"--source_dir_path={sdf_file.parent}\",\n f\"--dest_dir_path={dest_dir_path}\",\n f\"--parse_mode={parse_mode}\"])\n os.remove(sdf_file)\n os.rmdir(where_to)\n tag_processed(file_name)\n\n\nif __name__ == \"__main__\":\n arguments_parser = create_parser()\n args = arguments_parser.parse_args()\n print(args, file=sys.stderr)\n build_and_run.build(target)\n\n sdf_dir_path = Path(args.sdf_dir_path)\n dest_dir_path = Path(args.dest_dir_path)\n parse_mode = args.parse_mode\n processed_list_path = Path(args.processed_list_path)\n build_and_run.run_command(f'touch {processed_list_path}')\n\n remove_unprocessed()\n\n dest_dir_path.mkdir(parents=True, exist_ok=True)\n sdf_dir_path.mkdir(parents=True, exist_ok=True)\n with open(processed_list_path, 'r') as f:\n processed_before = set(f.read().split())\n sdfs = get_urls(url, \".sdf.gz\")\n file_names = pd.Series(list(sorted(sdfs)))\n\n ThreadPoolExecutor().map(process, file_names)\n","repo_name":"Seva-Vaskin/qtr-fingerprint","sub_path":"python/scripts/parse_sdf.py","file_name":"parse_sdf.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18403107250","text":"import tensorflow as tf \nimport numpy as np \nimport viewer\n\ninput_size = 784\n\n\n\n\n\"\"\"\nif you want to save weights. edit it for using tf.Saver!\nI don't mind it.\nfind \"Session initializer!\"\nviewer is custom viewer that made by matplotlib\n\"\"\"\ndef normalizer(data):\n mean = np.mean(data)\n std = np.std(data)\n data = (data - mean) / std\n return data\n\"\"\"\ndef normalizer(x_data):\n return (x_data/255.)\n\"\"\"\n\ndef unnormalizer(x_data):\n return x_data*255.\n \n\nclass Autoencoder():\n\n def __init__(self, input_size, n_hidden, batch_size=256, epochs=20, learning_rate = 0.01):\n self.input_size = input_size\n self.n_hidden = n_hidden\n self.batch_size = batch_size\n self.epochs = epochs\n self.learning_rate = learning_rate\n\n def inference_flatted(self, x):\n def weight_var(shape, name):\n #weight = tf.Variable(tf.truncated_normal(shape, stddev = 0.01))\n weight = tf.get_variable(name = 'W_'+name, shape=shape, initializer = tf.contrib.layers.xavier_initializer())\n #weight = tf.reshape(weight, [1]+shape)\n return weight\n\n def bias_var(shape, name):\n #bias = tf.Variable(tf.zeros(shape))\n bias = tf.get_variable(name = 'b_'+name, shape=shape, initializer = tf.contrib.layers.xavier_initializer())\n return bias\n\n def hidden(x, n_hidden, func_name):\n layer = None\n for number in range(0, len(n_hidden) - 1) :\n w = weight_var(n_hidden[number:number+2], str(number)+func_name)\n b = bias_var(n_hidden[number+1], str(number)+func_name)\n\n if number == 0:\n layer = tf.nn.relu(tf.add(tf.matmul(x,w), b))\n elif number == len(n_hidden)-2 and func_name == 'decoder':\n layer = tf.add(tf.matmul(layer, w), b)\n else:\n layer =tf.nn.relu(tf.add(tf.matmul(layer,w), b)) \n return layer\n self.x = x\n encoder = hidden(self.x, self.n_hidden, 'encoder')\n reversed_hidden = [x for x in self.n_hidden[::-1]]\n self.decoder = hidden(encoder, reversed_hidden, 'decoder')\n\n return self.decoder\n\n\n def inference_normal(self, x):\n def weight_var(shape, name):\n #weight = tf.Variable(tf.truncated_normal(shape, stddev = 0.01))\n weight = tf.get_variable(name = 'W_'+name, shape=shape, initializer = tf.contrib.layers.xavier_initializer())\n #weight = tf.reshape(weight, [1]+shape)\n return weight\n\n def bias_var(shape, name):\n #bias = tf.Variable(tf.zeros(shape))\n bias = tf.get_variable(name = 'b_'+name, shape=shape, initializer = tf.contrib.layers.xavier_initializer())\n return bias\n\n def hidden(x, n_hidden, func_name):\n layer = None\n for number in range(0, len(n_hidden) - 1) :\n w = weight_var(n_hidden[number:number+2], str(number)+func_name)\n b = bias_var(n_hidden[number+1], str(number)+func_name)\n\n if number == 0:\n layer = tf.nn.relu(tf.add(tf.einsum(\"ijk,kh->ijh\",x,w), b))\n elif number == len(n_hidden)-2 and func_name == 'decoder':\n layer = tf.add(tf.einsum(\"ijk,kh->ijh\",layer, w), b)\n else:\n layer =tf.nn.relu(tf.add(tf.einsum(\"ijk,kh->ijh\", layer,w), b)) \n return layer\n self.x = x\n encoder = hidden(self.x, self.n_hidden, 'encoder')\n reversed_hidden = [x for x in self.n_hidden[::-1]]\n self.decoder = hidden(encoder, reversed_hidden, 'decoder')\n\n return self.decoder\n\n #cost function\n def cost_func(self, x_data, decoder):\n #self.cost = tf.reduce_mean(tf.pow(x_data-decoder, 2))\n self.cost = tf.reduce_mean(tf.reduce_sum(tf.square(x_data - decoder), axis = -1))\n return self.cost\n\n def training(self, cost, lr = 0.01):\n self.train = tf.train.AdamOptimizer(learning_rate = lr)\n self.train = self.train.minimize(cost)\n return self.train\n\n def fit(self, x_data, sess):\n print(\"==start training==\\n\")\n for epoch in range(self.epochs):\n np.random.shuffle(x_data)\n for batch in range(len(x_data)//self.batch_size):\n start = batch*self.batch_size\n end = start+self.batch_size\n _, loss = sess.run([self.train, self.cost], feed_dict = {self.x:x_data[start:end]})\n print(\"epochs : \",epoch, \"batch : \", end,\"/\",len(x_data)//self.batch_size, \"loss : \", loss, end = '\\r')\n print(\"epochs : \",epoch, \"batch : \", end,\"/\",len(x_data)//self.batch_size, \"loss : \", loss)\n\n print(\"===end training===\")\n\n \"\"\"\n def eval_cost(self, x_data, sess):\n co = sess.run(self.cost, feed_dict = { self.x : x_data })\n return co\n \"\"\"\n\n def test(self, test_data, sess):\n print(\"test\")\n decode_data, loss = sess.run([self.decoder, self.cost], feed_dict={self.x:test_data})\n print(loss)\n #decode_data = unnormalizer(decode_data)\n test_data = viewer.rebuild_image(test_data)\n decode_data = viewer.rebuild_image(decode_data)\n viewer.viewer(test_data[0:7], decode_data[0:7])\n\n \n\n\n\n\n\n\n\n \n\n\n\n","repo_name":"fabyday/MNIST_AutoEncoder","sub_path":"mnist_autoencoder.py","file_name":"mnist_autoencoder.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"19228224315","text":"r=5\nwhile r>0 :\n\tc=5\n\tresult=''\n\twhile c>0 :\n\t\tif r==5 or c==5 :\n\t\t\tresult+='5'\n\t\telif r==4 or c==4 :\n\t\t\tresult+='4'\n\t\telif r==3 or c==3 :\n\t\t\tresult+='3'\n\t\telif r==2 or c==2 :\n\t\t\tresult+='2'\n\t\telif r==1 or c==1 :\n\t\t\tresult+='1'\n\t\tc-=1\n\tprint(result)\n\tr-=1","repo_name":"TarunKumar38/Python-practice-codes","sub_path":"Patterns/n14.py","file_name":"n14.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32233648102","text":"import openpmd_api as api\nimport time\nimport os\nimport h5py\nimport Shadow\nimport numpy\n\ndef loadShadowOpenPMD(filename):\n '''\n Parameters\n ----------\n filename: str\n\n Output\n ------\n Shadow.Beam object\n '''\n f = h5py.File(filename,'r')\n\n x = f[\"/data/0/particles/rays/position\"][0,:]\n y = f[\"/data/0/particles/rays/position\"][1,:]\n z = f[\"/data/0/particles/rays/position\"][2,:]\n\n vx = f[\"/data/0/particles/rays/direction\"][0,:]\n vy = f[\"/data/0/particles/rays/direction\"][1,:]\n vz = f[\"/data/0/particles/rays/direction\"][2,:]\n\n Es_x = f[\"/data/0/particles/rays/photonSPolarizationAmplitude\"][0,:]\n Es_y = f[\"/data/0/particles/rays/photonSPolarizationAmplitude\"][1,:]\n Es_z = f[\"/data/0/particles/rays/photonSPolarizationAmplitude\"][2,:]\n\n Ep_x = f[\"/data/0/particles/rays/photonPPolarizationAmplitude\"][0,:]\n Ep_y = f[\"/data/0/particles/rays/photonPPolarizationAmplitude\"][1,:]\n Ep_z = f[\"/data/0/particles/rays/photonPPolarizationAmplitude\"][2,:]\n\n id = f[\"/data/0/particles/rays/id\"][:]\n lostRay = f[\"/data/0/particles/rays/particleStatus\"][:]\n phaseS = f[\"/data/0/particles/rays/photonSPolarizationPhase\"][:]\n phaseP = f[\"/data/0/particles/rays/photonPPolarizationPhase\"][:]\n wavelength = f[\"/data/0/particles/rays/wavelength\"][:]\n\n f.close()\n\n shadow_beam = Shadow.Beam(N=x.size)\n rays = shadow_beam.rays\n\n rays[:, 0] = x\n rays[:, 1] = y\n rays[:, 2] = z\n\n rays[:, 0 + 4 - 1] = vx\n rays[:, 1 + 4 - 1] = vy\n rays[:, 2 + 4 - 1] = vz\n\n rays[:, 0 + 7 - 1] = Es_x\n rays[:, 1 + 7 - 1] = Es_y\n rays[:, 2 + 7 - 1] = Es_z\n\n rays[:, 0 + 16 - 1] = Ep_x\n rays[:, 1 + 16 - 1] = Ep_y\n rays[:, 2 + 16 - 1] = Ep_z\n\n rays[:, 12 - 1] = id\n rays[:, 10 - 1] = lostRay\n rays[:, 14 - 1] = phaseS\n rays[:, 15 - 1] = phaseP\n rays[:, 11 - 1] = 2 * numpy.pi / (wavelength * 1e-7) # wavenumber in cm^-1\n\n print(wavelength)\n\n return shadow_beam\n\n# NEW VERSION\n\ndef saveShadowToHDF(oasysRaysObject, filename='ShadowOutput.h5', workspace_units_to_cm=100.0):\n '''\n Parameters\n ----------\n oasysRaysObject: Shadow.Beam object from Oasys\n\n filename: str\n\n workspace_units_to_cm: default is 100., otherwise take Oasys variable\n\n Output\n ------\n hdf file according to the openPMD specification for raytracing\n\n Description\n -----------\n Beam.getshonecol(colNo)\n Extract a column from a shadow file (eg. begin.dat) or a Shadow.Beam instance.\n The column are numbered in the fortran convention, i.e. starting from 1.\n It returns a numpy.array filled with the values of the chosen column.\n '''\n\n SCALAR = api.Mesh_Record_Component.SCALAR\n oasysRays = oasysRaysObject\n try:\n unit = workspace_units_to_cm # Conversion to cm\n except:\n unit = 100.\n\n # Unit_Dimension: length L, mass M, time T, electric current I, thermodynamic temperature theta, amount of substance N, luminous intensity J\n\n series = api.Series(filename, api.Access_Type.create)\n\n # get date\n dateNow = time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())\n\n # default series settings\n print(\"Default settings:\")\n print(\"basePath: \", series.base_path)\n print(\"openPMD version: \", series.openPMD)\n print(\"iteration format: \", series.iteration_format)\n\n # openPMD standard\n series.set_openPMD(\"1.1.0\")\n series.set_openPMD_extension(0)\n series.set_author(\"Aljosa Hafner \")\n\n series.set_date(dateNow)\n series.set_software(\"OASYS\", \"1.2\")\n series.set_comment(\"Example output from ShadowOui widget in OASYS.\")\n\n # series.set_particles_path(\"rays\")\n\n n_iter = [0]\n # new iteration\n for n in n_iter:\n cur_it = series.iterations[n]\n\n nRays = oasysRays.nrays()\n\n rays = cur_it.particles['rays']\n\n # id\n id = oasysRays.getshonecol(12)\n d = api.Dataset(id.dtype, id.shape)\n\n rays[\"id\"][SCALAR].reset_dataset(d)\n rays[\"id\"][SCALAR].store_chunk(id)\n\n # - position: m\n # - direction: unitless\n # - photonSPolarizationAmplitude: have to check\n # - photonSPolarizationPhase: unitless\n # - wavelength: nm\n\n # Position\n position = numpy.vstack((oasysRays.getshonecol(1),\n oasysRays.getshonecol(2),\n oasysRays.getshonecol(3))) # 3xN\n\n d = api.Dataset(position.dtype, position.shape)\n rays[\"position\"][SCALAR].reset_dataset(d)\n rays[\"position\"][SCALAR].set_unit_SI(unit / 1e2) # <--------- recheck this.\n rays[\"position\"].set_unit_dimension({api.Unit_Dimension.L: 1.}) # m\n rays[\"position\"][SCALAR].store_chunk(position)\n\n # Direction\n direction = numpy.vstack((oasysRays.getshonecol(4),\n oasysRays.getshonecol(5),\n oasysRays.getshonecol(6))) # 3xN\n d = api.Dataset(direction.dtype, direction.shape)\n rays[\"direction\"][SCALAR].reset_dataset(d)\n rays[\"direction\"][SCALAR].set_unit_SI(unit / 1e2)\n rays[\"direction\"][SCALAR].store_chunk(direction)\n\n # Polarization of E-field, S-polarization\n photonSPolarizationAmplitude = numpy.vstack((oasysRays.getshonecol(7),\n oasysRays.getshonecol(8),\n oasysRays.getshonecol(9))) # 3xN\n d = api.Dataset(photonSPolarizationAmplitude.dtype, photonSPolarizationAmplitude.shape)\n rays[\"photonSPolarizationAmplitude\"][SCALAR].reset_dataset(d)\n rays[\"photonSPolarizationAmplitude\"][SCALAR].set_unit_SI(unit / 1e2)\n rays[\"photonSPolarizationAmplitude\"][SCALAR].store_chunk(photonSPolarizationAmplitude)\n\n # Polarization of E-field, P-polarization\n photonPPolarizationAmplitude = numpy.vstack((oasysRays.getshonecol(16),\n oasysRays.getshonecol(17),\n oasysRays.getshonecol(18))) # 3xN\n d = api.Dataset(photonPPolarizationAmplitude.dtype, photonPPolarizationAmplitude.shape)\n rays[\"photonPPolarizationAmplitude\"][SCALAR].reset_dataset(d)\n rays[\"photonPPolarizationAmplitude\"][SCALAR].set_unit_SI(unit / 1e2)\n rays[\"photonPPolarizationAmplitude\"][SCALAR].store_chunk(photonPPolarizationAmplitude)\n\n # Photon energy [1.602176634e−19 eV = J = kg m^2 s^-2]\n\n wavelength = 1240 / oasysRays.getshonecol(11)\n\n d = api.Dataset(wavelength.dtype, wavelength.shape)\n rays[\"wavelength\"][SCALAR].reset_dataset(d)\n rays[\"wavelength\"][SCALAR].set_unit_SI(1e-9)\n rays[\"wavelength\"].set_unit_dimension({api.Unit_Dimension.L: 1.})\n rays[\"wavelength\"][SCALAR].store_chunk(wavelength)\n\n # Phase for S-polarized and P-polarized photons\n photonSPolarizationPhase = oasysRays.getshonecol(14)\n photonPPolarizationPhase = oasysRays.getshonecol(15)\n\n d = api.Dataset(photonSPolarizationPhase.dtype, photonSPolarizationPhase.shape)\n rays[\"photonSPolarizationPhase\"][SCALAR].reset_dataset(d)\n rays[\"photonSPolarizationPhase\"][SCALAR].set_unit_SI(1.0)\n rays[\"photonPPolarizationPhase\"][SCALAR].reset_dataset(d)\n rays[\"photonPPolarizationPhase\"][SCALAR].set_unit_SI(1.0)\n rays[\"photonSPolarizationPhase\"][SCALAR].store_chunk(photonSPolarizationPhase)\n rays[\"photonPPolarizationPhase\"][SCALAR].store_chunk(photonPPolarizationPhase)\n\n # Lost rays\n particleStatus = oasysRays.getshonecol(10)\n d = api.Dataset(particleStatus.dtype, particleStatus.shape)\n rays[\"particleStatus\"][SCALAR].reset_dataset(d)\n rays[\"particleStatus\"][SCALAR].set_unit_SI(1.0)\n rays[\"particleStatus\"][SCALAR].store_chunk(particleStatus)\n\n series.flush()\n\n del series\n\n# OLD VERSION\n\n# def saveShadowToHDF(oasysRaysObject, filename='raytracing_out.h5', workspace_units_to_cm=100.0 ):\n# '''\n# Beam.getshonecol(colNo)\n# Extract a column from a shadow file (eg. begin.dat) or a Shadow.Beam instance.\n# The column are numbered in the fortran convention, i.e. starting from 1.\n# It returns a numpy.array filled with the values of the chosen column.\n#\n# Inumpy.ts:\n# beam : str instance with the name of the shadow file to be loaded. OR\n# Shadow.Beam initialized instance.\n# col : int for the chosen columns.\n#\n# Outputs:\n# numpy.array 1-D with length numpy.INT.\n#\n# Error:\n# if an error occurs an ArgsError is raised.\n#\n# Possible choice for col are:\n# 1 X spatial coordinate [user's unit]\n# 2 Y spatial coordinate [user's unit]\n# 3 Z spatial coordinate [user's unit]\n# 4 Xp direction or divergence [rads]\n# 5 Yp direction or divergence [rads]\n# 6 Zp direction or divergence [rads]\n# 7 X component of the electromagnetic vector (s-polariz)\n# 8 Y component of the electromagnetic vector (s-polariz)\n# 9 Z component of the electromagnetic vector (s-polariz)\n# 10 Lost ray flag\n# 11 Energy [eV]\n# 12 Ray index\n# 13 Optical path length\n# 14 Phase (s-polarization) in rad\n# 15 Phase (p-polarization) in rad\n# 16 X component of the electromagnetic vector (p-polariz)\n# 17 Y component of the electromagnetic vector (p-polariz)\n# 18 Z component of the electromagnetic vector (p-polariz)\n# 19 Wavelength [A]\n# 20 R= SQRT(X^2+Y^2+Z^2)\n# 21 angle from Y axis\n# 22 the magnitude of the Electromagnetic vector\n# 23 |E|^2 (total intensity)\n# 24 total intensity for s-polarization\n# 25 total intensity for p-polarization\n# 26 K = 2 pi / lambda [A^-1]\n# 27 K = 2 pi / lambda * col4 [A^-1]\n# 28 K = 2 pi / lambda * col5 [A^-1]\n# 29 K = 2 pi / lambda * col6 [A^-1]\n# 30 S0-stokes = |Es|^2 + |Ep|^2\n# 31 S1-stokes = |Es|^2 - |Ep|^2\n# 32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)\n# 33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)\n# 34 Power = intensity(col 23) * energy (col 11)\n# 35 Angle-X with Y: |arcsin(X')|\n# 36 Angle-Z with Y: |arcsin(Z')|\n# 37 Angle-X with Y: |arcsin(X') - mean(arcsin(X'))|\n# 38 Angle-Z with Y: |arcsin(Z') - mean(arcsin(Z'))|\n# '''\n#\n# SCALAR = api.Mesh_Record_Component.SCALAR\n# oasysRays = oasysRaysObject\n# try:\n# unit = workspace_units_to_cm # Conversion to cm\n# except:\n# unit = 100.\n#\n# # Unit_Dimension: length L, mass M, time T, electric current I, thermodynamic temperature theta, amount of substance N, luminous intensity J\n#\n# try:\n# os.remove(filename)\n# except:\n# pass\n# series = api.Series(filename, api.Access_Type.create)\n#\n# # get date\n# dateNow = time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())\n#\n# # default series settings\n# print(\"Default settings:\")\n# print(\"basePath: \", series.base_path)\n# print(\"openPMD version: \", series.openPMD)\n# print(\"iteration format: \", series.iteration_format)\n#\n# # openPMD standard\n# series.set_openPMD(\"1.1.0\")\n# series.set_openPMD_extension(0)\n# series.set_author(\"Aljosa Hafner \")\n#\n# series.set_date(dateNow)\n# series.set_software(\"OASYS\", \"1.2\")\n# series.set_comment(\"Example output from ShadowOui widget in OASYS.\")\n#\n# # series.set_particles_path(\"rays\")\n#\n# # new iteration\n# cur_it = series.iterations[0]\n#\n# nRays = oasysRays.nrays()\n#\n# rays = cur_it.particles['rays']\n#\n# # id\n# id = oasysRays.getshonecol(12)\n# d = api.Dataset(id.dtype, id.shape)\n# rays[\"id\"][SCALAR].reset_dataset(d)\n# rays[\"id\"][SCALAR].store_chunk(id)\n#\n# # Position\n# position_x = oasysRays.getshonecol(1)\n# position_y = oasysRays.getshonecol(2)\n# position_z = oasysRays.getshonecol(3)\n#\n# # Position in [m]\n# d = api.Dataset(position_x.dtype, position_x.shape)\n# rays[\"position\"][\"x\"].reset_dataset(d)\n# rays[\"position\"][\"x\"].set_unit_SI(unit / 1e2)\n# rays[\"position\"][\"y\"].reset_dataset(d)\n# rays[\"position\"][\"y\"].set_unit_SI(unit / 1e2)\n# rays[\"position\"][\"z\"].reset_dataset(d)\n# rays[\"position\"][\"z\"].set_unit_SI(unit / 1e2)\n# rays[\"position\"].set_unit_dimension({api.Unit_Dimension.L: 1.0})\n# rays[\"position\"][\"x\"].store_chunk(position_x)\n# rays[\"position\"][\"y\"].store_chunk(position_y)\n# rays[\"position\"][\"z\"].store_chunk(position_z)\n#\n# # Direction\n# direction_x = oasysRays.getshonecol(4)\n# direction_y = oasysRays.getshonecol(5)\n# direction_z = oasysRays.getshonecol(6)\n#\n# # Direction in [rad]\n# d = api.Dataset(direction_x.dtype, direction_x.shape)\n# rays[\"direction\"][\"x\"].reset_dataset(d)\n# rays[\"direction\"][\"x\"].set_unit_SI(1.0)\n# rays[\"direction\"][\"y\"].reset_dataset(d)\n# rays[\"direction\"][\"y\"].set_unit_SI(1.0)\n# rays[\"direction\"][\"z\"].reset_dataset(d)\n# rays[\"direction\"][\"z\"].set_unit_SI(1.0)\n# rays[\"direction\"][\"x\"].store_chunk(direction_x)\n# rays[\"direction\"][\"y\"].store_chunk(direction_y)\n# rays[\"direction\"][\"z\"].store_chunk(direction_z)\n#\n# # Polarization of E-field, S-polarization\n# sPol_x = oasysRays.getshonecol(7)\n# sPol_y = oasysRays.getshonecol(8)\n# sPol_z = oasysRays.getshonecol(9)\n#\n# # S-polarization in [unitless]\n# d = api.Dataset(sPol_x.dtype, sPol_x.shape)\n# rays[\"eFieldSPolarisation\"][\"x\"].reset_dataset(d)\n# rays[\"eFieldSPolarisation\"][\"x\"].set_unit_SI(1.0)\n# rays[\"eFieldSPolarisation\"][\"y\"].reset_dataset(d)\n# rays[\"eFieldSPolarisation\"][\"y\"].set_unit_SI(1.0)\n# rays[\"eFieldSPolarisation\"][\"z\"].reset_dataset(d)\n# rays[\"eFieldSPolarisation\"][\"z\"].set_unit_SI(1.0)\n# rays[\"eFieldSPolarisation\"][\"x\"].store_chunk(sPol_x)\n# rays[\"eFieldSPolarisation\"][\"y\"].store_chunk(sPol_y)\n# rays[\"eFieldSPolarisation\"][\"z\"].store_chunk(sPol_z)\n#\n# # Polarization of E-field, P-polarization\n# pPol_x = oasysRays.getshonecol(16)\n# pPol_y = oasysRays.getshonecol(17)\n# pPol_z = oasysRays.getshonecol(18)\n#\n# # P-polarization in [unitless]\n# d = api.Dataset(pPol_x.dtype, pPol_x.shape)\n# rays[\"eFieldPPolarisation\"][\"x\"].reset_dataset(d)\n# rays[\"eFieldPPolarisation\"][\"x\"].set_unit_SI(1.0)\n# rays[\"eFieldPPolarisation\"][\"y\"].reset_dataset(d)\n# rays[\"eFieldPPolarisation\"][\"y\"].set_unit_SI(1.0)\n# rays[\"eFieldPPolarisation\"][\"z\"].reset_dataset(d)\n# rays[\"eFieldPPolarisation\"][\"z\"].set_unit_SI(1.0)\n# rays[\"eFieldPPolarisation\"][\"x\"].store_chunk(pPol_x)\n# rays[\"eFieldPPolarisation\"][\"y\"].store_chunk(pPol_y)\n# rays[\"eFieldPPolarisation\"][\"z\"].store_chunk(pPol_z)\n#\n# #TODO: remove, information already in wavelength\n# # Photon energy [1.602176634e−19 eV = J = kg m^2 s^-2]\n# energy = oasysRays.getshonecol(11)\n# d = api.Dataset(energy.dtype, energy.shape)\n# rays[\"photonEnergy\"][SCALAR].reset_dataset(d)\n# rays[\"photonEnergy\"][SCALAR].set_unit_SI(1.602176634e-19)\n# rays[\"photonEnergy\"].set_unit_dimension({api.Unit_Dimension.L: 2.,\n# api.Unit_Dimension.M: 1.,\n# api.Unit_Dimension.T: -2.})\n# rays[\"photonEnergy\"][SCALAR].store_chunk(energy)\n#\n# # Photon wavelength [A]\n# wavelength = oasysRays.getshonecol(19)\n# d = api.Dataset(wavelength.dtype, wavelength.shape)\n# rays[\"photonWavelength\"][SCALAR].reset_dataset(d)\n# rays[\"photonWavelength\"][SCALAR].set_unit_SI(1e-10)\n# rays[\"photonWavelength\"].set_unit_dimension({api.Unit_Dimension.L: 1.})\n# rays[\"photonWavelength\"][SCALAR].store_chunk(wavelength)\n#\n# #TODO: check is open PMD wants angle (radiants) as in SHADOW or exp(i * angle)\n# # Phase for S-polarized and P-polarized photons\n# phase_sPol_r = oasysRays.getshonecol(14)\n# phase_pPol_r = oasysRays.getshonecol(15)\n#\n# # Phase [rad]\n# d = api.Dataset(phase_sPol_r.dtype, phase_sPol_r.shape)\n# rays[\"phase\"][\"sPol_r\"].reset_dataset(d)\n# rays[\"phase\"][\"sPol_r\"].set_unit_SI(1.0)\n# rays[\"phase\"][\"pPol_r\"].reset_dataset(d)\n# rays[\"phase\"][\"pPol_r\"].set_unit_SI(1.0)\n# rays[\"phase\"][\"sPol_r\"].store_chunk(phase_sPol_r)\n# rays[\"phase\"][\"pPol_r\"].store_chunk(phase_pPol_r)\n#\n# # TODO: remove? Info in |Es|**2 + {Ep|**2\n# # Total intensity [unitless]\n# intensity = oasysRays.getshonecol(23)\n# d = api.Dataset(intensity.dtype, intensity.shape)\n# rays[\"totalIntensity\"][SCALAR].reset_dataset(d)\n# rays[\"totalIntensity\"][SCALAR].set_unit_SI(1.0)\n# rays[\"totalIntensity\"][SCALAR].store_chunk(intensity)\n#\n# # Lost rays\n# lost_ray = oasysRays.getshonecol(10)\n# d = api.Dataset(intensity.dtype, intensity.shape)\n# rays[\"lostRay\"][SCALAR].reset_dataset(d)\n# rays[\"lostRay\"][SCALAR].set_unit_SI(1.0)\n# rays[\"lostRay\"][SCALAR].store_chunk(lost_ray)\n#\n# # TODO missing SHADOW optical path (column 13)\n# series.flush()\n#\n# del series\n\n\n\nif __name__ == \"__main__\":\n\n def run_shadow(iwrite=0):\n\n #\n # initialize shadow3 source (oe0) and beam\n #\n beam = Shadow.Beam()\n oe0 = Shadow.Source()\n oe1 = Shadow.OE()\n oe2 = Shadow.OE()\n oe3 = Shadow.OE()\n oe4 = Shadow.OE()\n oe5 = Shadow.OE()\n\n #\n # Define variables. See meaning of variables in:\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n #\n\n oe0.FDISTR = 3\n oe0.F_COLOR = 3\n oe0.F_PHOT = 0\n oe0.F_POLAR = 0\n oe0.HDIV1 = 0.0\n oe0.HDIV2 = 0.0\n oe0.IDO_VX = 0\n oe0.IDO_VZ = 0\n oe0.IDO_X_S = 0\n oe0.IDO_Y_S = 0\n oe0.IDO_Z_S = 0\n oe0.ISTAR1 = 5676561\n oe0.NPOINT = 500000\n oe0.PH1 = 14100.0\n oe0.PH2 = 14300.0\n oe0.SIGDIX = 9.5e-05\n oe0.SIGDIZ = 3.1e-06\n oe0.SIGMAX = 6.1e-05\n oe0.SIGMAZ = 9e-06\n oe0.VDIV1 = 0.0\n oe0.VDIV2 = 0.0\n\n oe1.DUMMY = 100.0\n oe1.FWRITE = 3\n oe1.F_REFRAC = 2\n oe1.F_SCREEN = 1\n oe1.N_SCREEN = 1\n oe1.T_IMAGE = 0.0\n oe1.T_INCIDENCE = 0.0\n oe1.T_REFLECTION = 180.0\n oe1.T_SOURCE = 28.2\n\n oe2.DUMMY = 100.0\n oe2.FHIT_C = 1\n oe2.FILE_REFL = b'/home/aljosa/Oasys/development_sprint/si5_15.111'\n oe2.FWRITE = 1\n oe2.F_CENTRAL = 1\n oe2.F_CRYSTAL = 1\n oe2.PHOT_CENT = 14200.0\n oe2.RLEN1 = 0.02\n oe2.RLEN2 = 0.02\n oe2.RWIDX1 = 0.15\n oe2.RWIDX2 = 0.15\n oe2.R_LAMBDA = 5000.0\n oe2.T_IMAGE = 0.0\n oe2.T_INCIDENCE = 81.9952066442\n oe2.T_REFLECTION = 81.9952066442\n oe2.T_SOURCE = 1.8\n\n oe3.DUMMY = 100.0\n oe3.FWRITE = 3\n oe3.F_REFRAC = 2\n oe3.F_SCREEN = 1\n oe3.I_SLIT = numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n oe3.N_SCREEN = 1\n oe3.RX_SLIT = numpy.array([0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe3.RZ_SLIT = numpy.array([0.0008, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n oe3.T_IMAGE = 0.0\n oe3.T_INCIDENCE = 0.0\n oe3.T_REFLECTION = 180.0\n oe3.T_SOURCE = 10.9\n\n oe4.DUMMY = 100.0\n oe4.FCYL = 1\n oe4.FHIT_C = 1\n oe4.FMIRR = 2\n oe4.FWRITE = 3\n oe4.F_DEFAULT = 0\n oe4.RLEN1 = 0.15\n oe4.RLEN2 = 0.15\n oe4.RWIDX1 = 0.02\n oe4.RWIDX2 = 0.02\n oe4.SIMAG = 2.1\n oe4.SSOUR = 43.15\n oe4.THETA = 89.77654645989898\n oe4.T_IMAGE = 0.1\n oe4.T_INCIDENCE = 89.77654645989898\n oe4.T_REFLECTION = 89.77654645989898\n oe4.T_SOURCE = 2.25\n\n oe5.ALPHA = 90.0\n oe5.DUMMY = 100.0\n oe5.FCYL = 1\n oe5.FHIT_C = 1\n oe5.FMIRR = 2\n oe5.FWRITE = 3\n oe5.F_DEFAULT = 0\n oe5.RLEN1 = 0.15\n oe5.RLEN2 = 0.15\n oe5.RWIDX1 = 0.02\n oe5.RWIDX2 = 0.02\n oe5.SIMAG = 1.9\n oe5.SSOUR = 43.35\n oe5.THETA = 89.77654645989898\n oe5.T_IMAGE = 1.9\n oe5.T_INCIDENCE = 89.77654645989898\n oe5.T_REFLECTION = 89.77654645989898\n oe5.T_SOURCE = 0.1\n\n # Run SHADOW to create the source\n\n if iwrite:\n oe0.write(\"start.00\")\n\n beam.genSource(oe0)\n\n if iwrite:\n oe0.write(\"end.00\")\n beam.write(\"begin.dat\")\n\n #\n # run optical element 1\n #\n print(\" Running optical element: %d\" % (1))\n if iwrite:\n oe1.write(\"start.01\")\n\n beam.traceOE(oe1, 1)\n\n if iwrite:\n oe1.write(\"end.01\")\n beam.write(\"star.01\")\n\n #\n # run optical element 2\n #\n print(\" Running optical element: %d\" % (2))\n if iwrite:\n oe2.write(\"start.02\")\n\n beam.traceOE(oe2, 2)\n\n if iwrite:\n oe2.write(\"end.02\")\n beam.write(\"star.02\")\n\n #\n # run optical element 3\n #\n print(\" Running optical element: %d\" % (3))\n if iwrite:\n oe3.write(\"start.03\")\n\n beam.traceOE(oe3, 3)\n\n if iwrite:\n oe3.write(\"end.03\")\n beam.write(\"star.03\")\n\n #\n # run optical element 4\n #\n print(\" Running optical element: %d\" % (4))\n if iwrite:\n oe4.write(\"start.04\")\n\n beam.traceOE(oe4, 4)\n\n if iwrite:\n oe4.write(\"end.04\")\n beam.write(\"star.04\")\n\n #\n # run optical element 5\n #\n print(\" Running optical element: %d\" % (5))\n if iwrite:\n oe5.write(\"start.05\")\n\n beam.traceOE(oe5, 5)\n\n if iwrite:\n oe5.write(\"end.05\")\n beam.write(\"star.05\")\n\n return beam\n\n\n def compare_rays(beamnew, beam, user_unit_to_m=1.0, do_plot=True, do_assert=False):\n\n from srxraylib.plot.gol import plot_scatter\n\n rays = beam.rays\n raysnew = beamnew.rays\n\n if do_plot:\n plot_scatter(rays[:, 3], rays[:, 5], title=\"Divergences\", show=False)\n plot_scatter(raysnew[:, 3], raysnew[:, 5], title=\"Divergences new\")\n\n plot_scatter(rays[:, 0], rays[:, 2], title=\"Real Space\", show=False)\n plot_scatter(raysnew[:, 0], raysnew[:, 2], title=\"Real Space new\")\n\n Shadow.ShadowTools.histo1(beam, 11, ref=23, nolost=1)\n Shadow.ShadowTools.histo1(beamnew, 11, ref=23, nolost=1)\n\n\n\n beam = run_shadow()\n beam.write(\"star.05\")\n\n\n saveShadowToHDF(oasysRaysObject=beam, filename=\"/home/aljosa/Oasys/development_sprint_2/tmp.h5\")#, workspace_units_to_cm=1e-2)\n\n beam2 = loadShadowOpenPMD(\"/home/aljosa/Oasys/development_sprint_2/tmp.h5\")\n\n compare_rays(beam2,beam)\n # Shadow.ShadowTools.plotxy(beam, 1, 3, nbins=101, nolost=1, title=\"Real space\")\n # Shadow.ShadowTools.plotxy(beam2, 1, 3, nbins=101, nolost=1, title=\"RELOADED : Real space\")\n # Shadow.ShadowTools.histo1(beam, 11, nbins=101, ref=23, nolost=1 )\n # Shadow.ShadowTools.histo1(beam2,11, nbins=101, ref=23, nolost=1 )\n","repo_name":"PaNOSC-ViNYL/OASYS1-PaNOSC","sub_path":"orangecontrib/panosc/shadow/util/openPMD.py","file_name":"openPMD.py","file_ext":"py","file_size_in_byte":23693,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41310149177","text":"import os\nimport logging\nimport requests\nimport yaml\nfrom keycloak import KeycloakAdmin, KeycloakOpenID\n\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"keycloak_bootstrap\")\nlogger.setLevel(logging.DEBUG)\n\n# This is the only URL this file should ever need -\n# The URL stuff inside the cluster (aka this bootstrap job) will use to resolve keycloak (private, non-browser clients)\nkc_internal_url = os.getenv(\n \"KEYCLOAK_INTERNAL_URL\", \"http://keycloak-http/auth\"\n).rstrip(\"/\")\n\n\ndef createAttributes(attribute_host, preloaded_attributes, authToken):\n loc = f\"{attribute_host}/definitions/attributes\"\n for definition in preloaded_attributes:\n q_params = {\"name\": definition[\"name\"], \"authority\": definition[\"authority\"]}\n # Look up existing attrs by guaranteed-unique NS/authority combo\n # to decide if we should POST (not there) or PUT (already there)\n get_response = requests.get(\n loc, headers={\"Authorization\": f\"Bearer {authToken}\"}, params=q_params\n )\n # POST - add new\n if get_response.status_code == 404 or (\n get_response.status_code == 200 and not get_response.json()\n ):\n logger.info(f\"Adding attribute definition {definition}\")\n http_call = requests.post\n\n # PUT - update existing\n elif get_response.status_code < 400:\n logger.info(f\"Updating attribute definition {definition}\")\n http_call = requests.put\n\n else:\n # catch and weird codes from attribtues service\n logger.error(\n \"Unexpected code [%s] from attributes service when attempting to GET attribute definition! [%s]\",\n get_response.status_code,\n get_response.text,\n exc_info=True,\n )\n exit(1)\n\n logger.debug(\"Using auth JWT: [%s]\", authToken)\n response = http_call(\n loc,\n json=definition,\n headers={\"Authorization\": f\"Bearer {authToken}\"},\n )\n if response.status_code != 200:\n logger.error(\n \"Unexpected code [%s] from attributes service! [%s]\",\n response.status_code,\n response.text,\n exc_info=True,\n )\n exit(1)\n else:\n logger.info(\"Attribute created/updated successfully\")\n\n\ndef createAuthorities(attribute_host, preloaded_authorities, authToken):\n loc = f\"{attribute_host}/authorities\"\n get_response = requests.get(loc, headers={\"Authorization\": f\"Bearer {authToken}\"})\n for authority in preloaded_authorities:\n if authority not in get_response.json():\n logger.info(f\"Adding authority {authority}\")\n logger.debug(\"Using auth JWT: [%s]\", authToken)\n\n response = requests.post(\n loc,\n json={\"authority\": authority},\n headers={\"Authorization\": f\"Bearer {authToken}\"},\n )\n if response.status_code != 200:\n logger.error(\n \"Unexpected code [%s] from attributes service when attempting to create authority! [%s]\",\n response.status_code,\n response.text,\n exc_info=True,\n )\n exit(1)\n\n\ndef createPreloaded(\n keycloak_admin,\n realm,\n keycloak_auth_url,\n preloaded_authorities,\n preloaded_attributes,\n):\n attribute_clientid = os.getenv(\"ATTRIBUTES_CLIENT_ID\")\n attribute_username = os.getenv(\"ATTRIBUTES_USERNAME\")\n attribute_password = os.getenv(\"ATTRIBUTES_PASSWORD\")\n attribute_host = os.getenv(\n \"ATTRIBUTE_AUTHORITY_HOST\", \"http://attributes:4020\"\n ).rstrip(\"/\")\n\n keycloak_openid = KeycloakOpenID(\n server_url=keycloak_auth_url,\n client_id=attribute_clientid,\n realm_name=realm,\n )\n\n logger.debug(\n \"Connecting to realm [%s] on [%s] with user [%s] for client [%s]\",\n realm,\n keycloak_auth_url,\n attribute_username,\n attribute_clientid,\n )\n authToken = keycloak_openid.token(attribute_username, attribute_password)\n\n # Create authorities\n if preloaded_authorities is not None:\n createAuthorities(\n attribute_host, preloaded_authorities, authToken[\"access_token\"]\n )\n # Create attributes\n if preloaded_attributes is not None:\n createAttributes(\n attribute_host, preloaded_attributes, authToken[\"access_token\"]\n )\n\n\ndef attributes_bootstrap():\n logger.info(\"Running Attributes/PGSQL bootstrap\")\n username = os.getenv(\"keycloak_admin_username\")\n password = os.getenv(\"keycloak_admin_password\")\n keycloak_auth_url = kc_internal_url + \"/\"\n\n # Preloaded authorities\n try:\n with open(\"/etc/virtru-config/authorities.yaml\") as f:\n preloaded_authorities = yaml.safe_load(f)\n except FileNotFoundError:\n logger.warning(\"Not found: /etc/virtru-config/authorities.yaml\", exc_info=1)\n preloaded_authorities = None\n\n # Preloaded attributes\n try:\n with open(\"/etc/virtru-config/attributes.yaml\") as f:\n preloaded_attributes = yaml.safe_load(f)\n except FileNotFoundError:\n logger.warning(\"Not found: /etc/virtru-config/attributes.yaml\", exc_info=1)\n preloaded_attributes = None\n\n if not preloaded_attributes and not preloaded_authorities:\n return\n\n attribute_realm = os.getenv(\"ATTRIBUTES_REALM\")\n\n keycloak_admin = KeycloakAdmin(\n server_url=keycloak_auth_url,\n username=username,\n password=password,\n realm_name=attribute_realm,\n user_realm_name=\"master\",\n )\n\n # TDF\n createPreloaded(\n keycloak_admin,\n attribute_realm,\n keycloak_auth_url,\n preloaded_authorities,\n preloaded_attributes,\n )\n","repo_name":"opentdf/backend","sub_path":"containers/keycloak-bootstrap/attributes_bootstrap.py","file_name":"attributes_bootstrap.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"42199651555","text":"from PIL import Image\nimport numpy as np\nfrom chainer import cuda\nimport chainer\ntry:\n import cupy\nexcept:\n pass\nimport os\n\nfrom .image_processing import copy_to_cpu, postprocessing_tanh, postprocessing_sigmoid\n\n\ndef save_single_image(img, path, using_tanh=True):\n img = copy_to_cpu(img)\n if using_tanh:\n img = postprocessing_tanh(img)\n else:\n img = postprocessing_sigmoid(img)\n #ch, w, h = img.shape\n img = img.transpose((1, 2, 0))\n pilImg = Image.fromarray(img)\n pilImg.save(path, \"JPEG\")\n # cv2.imwrite(path, img)\n\ndef create_image_grid(imgs, grid_w=4, grid_h=4,\n using_tanh=True, transposed=False, bgr2rgb=False):\n\n imgs = copy_to_cpu(imgs)\n\n if using_tanh:\n imgs = postprocessing_tanh(imgs)\n else:\n imgs = postprocessing_sigmoid(imgs)\n\n b, ch, w, h = imgs.shape\n assert b == grid_w*grid_h\n\n if bgr2rgb:\n imgs2 = imgs.copy()\n imgs[:,0,:,:], imgs[:,2,:,:] = imgs2[:,2,:,:], imgs2[:,0,:,:]\n\n imgs = imgs.reshape((grid_w, grid_h, ch, w, h))\n imgs = imgs.transpose(0, 1, 3, 4, 2)\n\n if transposed:\n imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(1, 2, 0, 3, 4).reshape((grid_h*w, grid_w*h, ch))\n else:\n imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(0, 2, 1, 3, 4).reshape((grid_w*w, grid_h*h, ch))\n\n # Automaticly compress grayscale\n if ch==1:\n imgs = imgs.reshape((grid_w*w, grid_h*h))\n\n return imgs\n\ndef save_images_grid(imgs, path, grid_w=4, grid_h=4,\n using_tanh=True, transposed=False, bgr2rgb=False):\n\n imgs = copy_to_cpu(imgs)\n\n if using_tanh:\n imgs = postprocessing_tanh(imgs)\n else:\n imgs = postprocessing_sigmoid(imgs)\n\n\n b, ch, w, h = imgs.shape\n assert b == grid_w*grid_h\n\n imgs = imgs.reshape((grid_w, grid_h, ch, w, h))\n imgs = imgs.transpose(0, 1, 3, 4, 2)\n\n if transposed:\n imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(1, 2, 0, 3, 4).reshape((grid_h*w, grid_w*h, ch))\n else:\n imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(0, 2, 1, 3, 4).reshape((grid_w*w, grid_h*h, ch))\n\n if ch==1:\n imgs = imgs.reshape((grid_w*w, grid_h*h))\n\n\n pilImg = Image.fromarray(imgs)\n pilImg.save(path, \"JPEG\")\n # if bgr2rgb:\n # imgs = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)\n # cv2.imwrite(path, imgs)\n\ndef convert_batch_images(x, rows, cols):\n x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)\n _, _, H, W = x.shape\n x = x.reshape((rows, cols, 3, H, W))\n x = x.transpose(0, 3, 1, 4, 2)\n x = x.reshape((rows * H, cols * W, 3)) \n return x\n","repo_name":"pfnet-research/chainer-stylegan","sub_path":"src/common/utils/save_images.py","file_name":"save_images.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"81"} +{"seq_id":"74394993225","text":"import random\n\nrandom_number = random.randint(1, 10)\nguess = None\n\nwhile(guess != 10):\n guess = int(input(\"Guess a number between 1 and 10. \"))\n if guess < random_number:\n print(\"Too low. Try again. \")\n elif(guess > random_number):\n print(\"Too high. Try again. \")\n else:\n print(f\"That is correct. The number was {random_number}.\")\n keep_playing = input(\"Would you like to keep playing? (y/n): \")\n if keep_playing == \"y\":\n random_number = random.randint(1, 10)\n guess = None\n else:\n print(\"Thanks for playing\")\n break\n","repo_name":"theranbrig/python_tuts","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32344285022","text":"S = input()\nL = [\"A\", \"C\", \"G\", \"T\"]\n\narr = []\nst = \"\"\n\nfor s in S:\n if st and s not in L:\n st = \"\"\n if s in L:\n st += s\n arr.append(st)\n\nprint(max(map(len, arr)) if arr else 0)\n","repo_name":"tanaka0325/atcoder","sub_path":"abc122/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5421557300","text":"import eventlet\nimport time\nimport re\nimport pymysql\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as expected_conditions\n\nfrom config import *\n\neventlet.monkey_patch()\n\nprint(\"---------------init-------------\")\nchrome_options = Options()\n# chrome_options.add_argument('--headless') # 使用无头谷歌浏览器模式,如你要调试请注释\nchrome_options.add_argument('--disable-gpu')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument(\"--proxy-server=\" + proxy_server)\ndriver = webdriver.Chrome(options=chrome_options)\n\nbase_url = 'https://www.pinterest.com/'\n\n\n# 插入图片src到数据库\ndef insert_user_info(search_text, img_src):\n print(\"插入图片src到数据库\")\n try:\n cue.execute(\"insert ignore into pinterest_img \"\n \"(search_text, img_src) \"\n \"values (%s,%s)\",\n [search_text, img_src])\n except Exception as e:\n print('Insert error:', e)\n con.rollback()\n else:\n con.commit()\n\n\n# 搜索\ndef search(search_text):\n print(\"-------search-------\")\n # 点击登陆按钮后页面可能一时半会没加载出来\n # 如果规定时间没这个元素则会提示报错:selenium.common.exceptions.TimeoutException: Message:\n element = WebDriverWait(driver, 15).until(expected_conditions.presence_of_element_located((By.XPATH, '//input[@data-test-id=\"search-box-input\"]')))\n element.send_keys(search_text)\n driver.find_element_by_xpath('//input[@data-test-id=\"search-box-input\"]').send_keys(Keys.ENTER)\n count_scroll = 0\n print(\"睡眠3s等待页面加载完图片数据\")\n time.sleep(3)\n while count_scroll < 3: # 限制滑动次数\n # 滑倒底\n obtainPhotoSrc()\n js = \"window.scrollTo(0,document.body.scrollHeight)\"\n print('当前滚动,次数为:', count_scroll)\n driver.execute_script(js)\n count_scroll += 1\n print('结束滚动,次数为:', count_scroll)\n time.sleep(3)\n print(\"停止滚动了,结束\")\n\ndef obtainPhotoSrc():\n # 获取所有图片链接 这里报错:说我递归太深\n print(\"进入获取图片方法\")\n lst_img = [img.get_attribute('src') for img in driver.find_elements_by_xpath(\"//div[@class='vbI XiG']//div[@data-test-id='pin']//img\")]\n lst_img = [re.sub('236x', '564x', i) for i in lst_img] # 换成大图\n count = len(lst_img)\n print(search_text, 'unique img count:', count)\n for i in range(count):\n img_src = lst_img[i]\n print(search_text, i, '/', count, img_src)\n # if img_src != None:\n # insert_user_info(search_text, img_src)\n\n\nif __name__ == '__main__':\n con = pymysql.connect(host=host, user=user, passwd=psd, db=db, charset=c, port=port)\n cue = con.cursor()\n\n print(\"数据库链接上\")\n # 登录\n driver.get(base_url)\n\n try:\n # 先找到登陆按钮,点击登录按钮弹出登陆页面,输入密码后登陆完成即可\n elem = driver.find_element_by_xpath(\"//div[@data-test-id='simple-login-button']\")\n elem.click() # 点击登陆按钮\n driver.find_element_by_id('email').send_keys(LOGIN_EMAIL)\n driver.find_element_by_id('password').send_keys(LOGIN_PASSWORD)\n driver.find_element_by_xpath('//button[@class=\"red SignupButton active\"]').click() # 点击弹出框登录\n if TYPE_DB_OR_IMG == 1:\n for search_text in lst_search_text:\n # while True:\n search(search_text)\n finally:\n print(\"finally\")\n driver.quit()\n cue.close()\n","repo_name":"jkhumm/SpiderSSS","sub_path":"Pinterest/p_search_img_humm.py","file_name":"p_search_img_humm.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31678101143","text":"pcbnew = __import__('pcbnew')\nimport kicad\nfrom kicad.pcbnew import layer\nfrom kicad.point import Point\nfrom kicad import units\nfrom kicad.pcbnew.item import HasPosition\n\nclass Via(HasPosition, object):\n def __init__(self, coord, layer_pair, diameter, drill, board=None):\n self._obj = pcbnew.VIA(board and board.native_obj)\n self.diameter = diameter\n coord_point = Point.build_from(coord)\n self._obj.SetEnd(coord_point.native_obj)\n self._obj.SetStart(coord_point.native_obj)\n if board:\n self._obj.SetLayerPair(board.get_layer(layer_pair[0]),\n board.get_layer(layer_pair[1]))\n else:\n self._obj.SetLayerPair(layer.get_std_layer(layer_pair[0]),\n layer.get_std_layer(layer_pair[1]))\n\n self.drill = drill\n\n @property\n def native_obj(self):\n return self._obj\n\n @staticmethod\n def wrap(instance):\n \"\"\"Wraps a C++ api VIA object, and returns a `Via`.\"\"\"\n return kicad.new(Via, instance)\n\n @property\n def drill(self):\n \"\"\"Via drill diameter\"\"\"\n return float(self._obj.GetDrill()) / units.DEFAULT_UNIT_IUS\n\n @drill.setter\n def drill(self, value):\n self._obj.SetDrill(int(value * units.DEFAULT_UNIT_IUS))\n\n @property\n def diameter(self):\n \"\"\"Via diameter\"\"\"\n return float(self._obj.GetWidth()) / units.DEFAULT_UNIT_IUS\n\n @diameter.setter\n def diameter(self, value):\n self._obj.SetWidth(int(value * units.DEFAULT_UNIT_IUS))\n","repo_name":"cculpepper/kicad-python","sub_path":"kicad/pcbnew/via.py","file_name":"via.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"25210426714","text":"from superdesk.resource import Resource\n\nTODAY_DATE = \"now/d\"\nWEEK_DATE = \"now/w\"\n\nallowed_times = [\"now-{0}h\".format(hour) for hour in range(1, 25)]\nallowed_times.append(TODAY_DATE)\nallowed_times.append(WEEK_DATE)\n\n\nclass HighlightsResource(Resource):\n \"\"\"Highlights schema\"\"\"\n\n schema = {\n \"name\": {\"type\": \"string\", \"iunique\": True, \"required\": True},\n \"template\": Resource.rel(\"content_templates\", nullable=True),\n \"desks\": {\"type\": \"list\", \"schema\": Resource.rel(\"desks\", True)},\n \"auto_insert\": {\n \"type\": \"string\",\n \"allowed\": allowed_times,\n \"default\": TODAY_DATE,\n },\n \"groups\": {\"type\": \"list\", \"schema\": {\"type\": \"string\"}},\n }\n privileges = {\"POST\": \"highlights\", \"PATCH\": \"highlights\", \"DELETE\": \"highlights\"}\n\n mongo_indexes = {\n \"name_1\": ([(\"name\", 1)], {\"unique\": True}),\n }\n\n\nclass MarkedForHighlightsResource(Resource):\n \"\"\"Marked for highlights Schema\"\"\"\n\n schema = {\"highlights\": {\"type\": \"list\", \"required\": True}, \"marked_item\": {\"type\": \"string\", \"required\": True}}\n privileges = {\"POST\": \"mark_for_highlights\"}\n","repo_name":"superdesk/superdesk-core","sub_path":"apps/highlights/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"33575947255","text":"from nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\n__all__ = [\n \"clean\",\n \"ner_preprocessing\",\n]\n\n\ndef clean(a):\n noise = stopwords.words(\"english\") \n new = []\n\n for x in word_tokenize(a):\n if x not in noise:\n new.append(x)\n \n return new\n\ndef ner_preprocessing(a):\n tokens = word_tokenize(a)\n prepositions = [\"visit\", \"to\", \"in\", \"near\", \"by\", \"at\", \"for\"]\n \n for prep in prepositions:\n idxs = [i for i, x in enumerate(tokens) if x == prep]\n for i in idxs:\n if i+1 < len(tokens) and tokens[i+1] not in prepositions:\n tokens[i+1] = tokens[i+1][0].upper() + tokens[i+1][1:]\n\n return ' '.join(tokens)\n","repo_name":"VIVelev/NLBooking","sub_path":"nlbooking/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42787569889","text":"''' An example that runs and renders a pendulum agent.\n'''\nimport autograd.numpy as np\nimport matplotlib.pyplot as plt\nfrom ncortex.envs import DoubleIntegrator\nfrom ncortex.optimization.ddp import DDP\n\n\ndef main():\n ''' Run an example DDP on the Pendulum environment.\n '''\n # Initialize the environment and solve with DDP.\n x_init = np.random.random((101, 2))\n u_init = np.zeros((100, 1))\n env = DoubleIntegrator(dt=0.05, x_0=x_init[0, :], use_tf=False)\n ddp = DDP(env, x_init, u_init)\n info = ddp.solve(max_iter=10)\n\n # Plot the solution in phase space.\n plt.plot(ddp.x[:, 0], ddp.x[:, 1])\n plt.figure()\n plt.plot(info['cost'])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pvarin/ncortex","sub_path":"ncortex/examples/run_double_integrator_ddp.py","file_name":"run_double_integrator_ddp.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14896366761","text":"class Solution:\n def findClosest(self, hourses, stores):\n if len(hourses) == 0 or len(stores) == 0:\n return []\n stores = sorted(stores)\n print(stores)\n hourseDis = {}\n res = []\n for index, hourse in enumerate(hourses):\n if hourse in hourseDis:\n res.append(hourseDis[hourse])\n continue\n else:\n hourseDis[hourse] = self.find(hourse, stores)\n res.append(hourseDis[hourse])\n return res\n\n def find(self, hourse, stores):\n start, end = 0, len(stores) - 1\n\n while start + 1 < end:\n mid = (start + end) // 2\n if stores[mid] < hourse:\n start = mid\n else:\n end = mid\n print(stores[start], stores[end], hourse)\n if abs(stores[start] - hourse) <= abs(stores[end] - hourse):\n \n return stores[start]\n else:\n return stores[end]\n\n\nhourses = [5, 10, 17]\nstores = [1, 5, 3, 7, 13, 20, 11, 12,10,16]\ns = Solution()\nr = s.findClosest(hourses, stores)\nprint(r)\n","repo_name":"shaheming/leecode","sub_path":"google/house_store.py","file_name":"house_store.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6255702009","text":"import discord\nfrom discord.ext import commands\nfrom .utils.dataIO import dataIO, fileIO\nimport os\nfrom cogs.utils import checks\nimport re\n\ntags_path = \"data/crtools/tags.json\"\nclans_path = \"data/crtools/clans.json\"\n\ntags_bs_path = \"data/crtools/tags_bs.json\"\nclubs_path = \"data/crtools/clubs.json\"\n\nauth_path = \"data/crtools/auth.json\"\nconstants_path = \"data/crtools/constants.json\"\n\nBOTCOMMANDER_ROLES = [\"Family Representative\", \"Clan Manager\",\n \"Club Manager\", \"Club Deputy\", \"Vice President\",\n \"Clan Deputy\", \"Co-Leader\", \"Hub Officer\", \"admin\"]\n\ndefault_clans = {'defualt': {'tag': '9PJYVVL2', 'role': 'everyone', 'name': 'defualt',\n 'nickname': 'defualt', 'discord': None, 'waiting': [], 'members': {},\n 'bonustitle': '', 'personalbest': 0, 'warTrophies': 0, 'approval': False,\n 'log_channel': None, 'warlog_channel': None, 'emoji': '',\n 'cwr': {\"legend\": 0, \"gold\": 0, \"silver\": 0, \"bronze\": 0}}}\n\ndefault_clubs = {'defualt': {'tag': 'VUYG8U2', 'role': 'everyone', 'name': 'defualt',\n 'nickname': 'defualt', 'discord': None, 'waiting': [], 'members': {},\n 'bonustitle': '', 'personalbest': 0, 'approval': False,\n 'log_channel': None, 'emoji': ''}}\n\n\nclass constants:\n \"\"\"constants Management\"\"\"\n\n def __init__(self):\n self.constants = dataIO.load_json(constants_path)\n self.images = 'https://royaleapi.github.io/cr-api-assets/'\n\n async def card_to_key(self, name):\n \"\"\"Card key to decklink id.\"\"\"\n for card in self.constants[\"cards\"]:\n if name == card[\"name\"]:\n return str(card[\"id\"])\n return None\n\n async def card_to_rarity(self, name):\n \"\"\"Card name to rarity.\"\"\"\n for card in self.constants[\"cards\"]:\n if name == card[\"name\"]:\n return card[\"rarity\"]\n return None\n\n async def get_new_level(self, card):\n \"\"\"Conver the old card levels to the new ones\"\"\"\n newLevel = card.level\n if card.max_level == 11:\n newLevel = card.level + 2\n elif card.max_level == 8:\n newLevel = card.level + 5\n elif card.max_level == 5:\n newLevel = card.level + 8\n\n return newLevel\n\n async def get_region_key(self, num):\n \"\"\"Get a region's key name.\"\"\"\n for region in self.constants[\"regions\"]:\n if num == region[\"id\"]:\n return region[\"key\"].lower()\n return None\n\n async def decklink_url(self, deck, war=False):\n \"\"\"Decklink URL.\"\"\"\n ids = []\n for card in deck:\n ids.append(await self.card_to_key(card[\"name\"]))\n url = 'https://link.clashroyale.com/deck/en?deck=' + ';'.join(ids)\n if war:\n url += '&ID=CRRYRPCC&war=1'\n return url\n\n async def get_clan_image(self, p):\n \"\"\"Get clan badge URL from badge ID\"\"\"\n try:\n badge_id = p.clan.badge_id\n except AttributeError:\n try:\n badge_id = p.badge_id\n except AttributeError:\n return 'https://i.imgur.com/Y3uXsgj.png'\n\n if badge_id is None:\n return 'https://i.imgur.com/Y3uXsgj.png'\n\n for i in self.constants[\"alliance_badges\"]:\n if i[\"id\"] == badge_id:\n return self.images + 'badges/' + i[\"name\"] + '.png'\n\n\nclass tags:\n \"\"\"Tags Management\"\"\"\n\n def __init__(self):\n self.tags = dataIO.load_json(tags_path)\n self.tags_bs = dataIO.load_json(tags_bs_path)\n\n async def verifyTag(self, tag):\n \"\"\"Check if a player/can tag is valid\"\"\"\n check = ['P', 'Y', 'L', 'Q', 'G', 'R', 'J', 'C', 'U', 'V', '0', '2', '8', '9']\n\n if any(i not in check for i in tag):\n return False\n\n return True\n\n async def formatTag(self, tag):\n \"\"\"Sanitize and format CR Tag\"\"\"\n return tag.strip('#').upper().replace('O', '0')\n return True\n\n async def formatName(self, name):\n \"\"\"Sanitize player Name\"\"\"\n p = re.sub(r'(.*)<\\/c>', r'\\1', name)\n return p or name\n\n async def linkTagCR(self, tag, userID):\n \"\"\"Link a CR player tag to a discord User\"\"\"\n tag = await self.formatTag(tag)\n\n self.tags.update({userID: {'tag': tag}})\n dataIO.save_json(tags_path, self.tags)\n\n async def unlinkTagCR(self, userID):\n \"\"\"Unlink a CR player tag to a discord User\"\"\"\n if self.tags.pop(str(userID), None):\n dataIO.save_json(tags_path, self.tags)\n return True\n return False\n\n async def getTagCR(self, userID):\n \"\"\"Get a user's CR Tag\"\"\"\n return self.tags[userID]['tag']\n\n async def linkTagBS(self, tag, userID):\n \"\"\"Link a BS player tag to a discord User\"\"\"\n tag = await self.formatTag(tag)\n\n self.tags_bs.update({userID: {'tag': tag}})\n dataIO.save_json(tags_bs_path, self.tags_bs)\n\n async def unlinkTagBS(self, userID):\n \"\"\"Unlink a BS player tag to a discord User\"\"\"\n if self.tags_bs.pop(str(userID), None):\n dataIO.save_json(tags_bs_path, self.tags_bs)\n return True\n return False\n\n async def getTagBS(self, userID):\n \"\"\"Get a user's BS Tag\"\"\"\n return self.tags_bs[userID]['tag']\n\n async def getUserCR(self, serverUsers, tag):\n \"\"\"Get User from CR Tag\"\"\"\n for user in serverUsers:\n if user.id in self.tags:\n player_tag = self.tags[user.id]['tag']\n if player_tag == await self.formatTag(tag):\n return user\n return None\n\n async def getUserBS(self, serverUsers, tag):\n \"\"\"Get User from BS Tag\"\"\"\n for user in serverUsers:\n if user.id in self.tags_bs:\n player_tag = self.tags_bs[user.id]['tag']\n if player_tag == await self.formatTag(tag):\n return user\n return None\n\n\nclass auth:\n \"\"\"RoyaleAPI key management\"\"\"\n\n def __init__(self):\n self.auth = dataIO.load_json(auth_path)\n\n async def addToken(self, key):\n \"\"\"Add a RoyaleAPI Token\"\"\"\n self.auth['RoyaleAPI'] = key\n dataIO.save_json(auth_path, self.auth)\n\n async def addTokenBS(self, key):\n \"\"\"Add a BrawlAPI.cf Token\"\"\"\n self.auth['BrawlAPI'] = key\n dataIO.save_json(auth_path, self.auth)\n\n async def addTokenOfficial(self, key):\n \"\"\"Add a api.clashroyal.com Token\"\"\"\n self.auth['OfficialAPI'] = key\n dataIO.save_json(auth_path, self.auth)\n\n def getToken(self):\n \"\"\"Get RoyaleAPI Token\"\"\"\n return self.auth['RoyaleAPI']\n\n def getOfficialToken(self):\n \"\"\"Get OfficialAPI Token\"\"\"\n return self.auth['OfficialAPI']\n\n def getBSToken(self):\n \"\"\"Get brawlstars-api Token\"\"\"\n return self.auth['BrawlAPI']\n\n\nclass clans:\n \"\"\"CR Clan Family Management\"\"\"\n\n def __init__(self):\n self.clans = dataIO.load_json(clans_path)\n\n async def getClans(self):\n \"\"\"Return clan array\"\"\"\n return self.clans\n\n async def getClanData(self, clankey, data):\n \"\"\"Return clan array\"\"\"\n return self.clans[clankey][data]\n\n async def getClanMemberData(self, clankey, memberkey, data):\n \"\"\"Return clan member's dict\"\"\"\n return self.clans[clankey]['members'][memberkey][data]\n\n async def numClans(self):\n \"\"\"Return the number of clans\"\"\"\n return len(self.clans.keys())\n\n def keysClans(self):\n \"\"\"Get keys of all the clans\"\"\"\n return self.clans.keys()\n\n def keysClanMembers(self, clankey):\n \"\"\"Get keys of all the clan members\"\"\"\n return self.clans[clankey]['members'].keys()\n\n async def namesClans(self):\n \"\"\"Get name of all the clans\"\"\"\n return \", \".join(key for key in self.keysClans())\n\n async def tagsClans(self):\n \"\"\"Get tags of all the clans\"\"\"\n return [self.clans[clan][\"tag\"] for clan in self.clans]\n\n async def rolesClans(self):\n \"\"\"Get roles of all the clans\"\"\"\n roles = [\"Member\"]\n for x in self.clans:\n roles.append(self.clans[x]['role'])\n return roles\n\n async def verifyMembership(self, clantag):\n \"\"\"Check if a clan is part of the family\"\"\"\n for clankey in self.keysClans():\n if self.clans[clankey]['tag'] == clantag:\n return True\n return False\n\n async def getClanKey(self, clantag):\n \"\"\"Get a clan key from a clan tag.\"\"\"\n for clankey in self.keysClans():\n if self.clans[clankey]['tag'] == clantag:\n return clankey\n return None\n\n async def numWaiting(self, clankey):\n \"\"\"Get a clan's wating list length from a clan key.\"\"\"\n return len(self.clans[clankey]['waiting'])\n\n async def setWarTrophies(self, clankey, trophies):\n \"\"\"Set a clan's wartrophies\"\"\"\n self.clans[clankey]['warTrophies'] = trophies\n dataIO.save_json(clans_path, self.clans)\n\n async def setWarstats(self, clankey, tag, trophies, cards):\n \"\"\"Set a clan member's wins and cards\"\"\"\n self.clans[clankey]['members'][tag]['WarDayWins'] = trophies\n self.clans[clankey]['members'][tag]['cardsEarned'] = cards\n dataIO.save_json(clans_path, self.clans)\n\n async def getMemberWins(self, clankey, tag):\n \"\"\"Get a member's war day wins from the week\"\"\"\n try:\n return self.clans[clankey]['members'][tag]['WarDayWins']\n except KeyError:\n return 0\n\n async def getMemberCards(self, clankey, tag):\n \"\"\"Get a member's cardsEarned from the week\"\"\"\n try:\n return self.clans[clankey]['members'][tag]['cardsEarned']\n except KeyError:\n return 0\n\n async def getClanCWR(self, clankey, league):\n \"\"\"Get a clan's CWR\"\"\"\n try:\n return self.clans[clankey]['cwr'][league]\n except KeyError:\n return 0\n\n async def addWaitingMember(self, clankey, memberID):\n \"\"\"Add a user to a clan's waiting list\"\"\"\n if memberID not in self.clans[clankey]['waiting']:\n self.clans[clankey]['waiting'].append(memberID)\n dataIO.save_json(clans_path, self.clans)\n return True\n else:\n return False\n\n async def delWaitingMember(self, clankey, memberID):\n \"\"\"Remove a user to a clan's waiting list\"\"\"\n if memberID in self.clans[clankey]['waiting']:\n self.clans[clankey]['waiting'].remove(memberID)\n dataIO.save_json(clans_path, self.clans)\n\n return True\n else:\n return False\n\n async def checkWaitingMember(self, clankey, memberID):\n \"\"\"check if a user is in a waiting list\"\"\"\n return memberID in self.clans[clankey]['waiting']\n\n async def getWaitingIndex(self, clankey, memberID):\n \"\"\"Get the waiting position from a clan's waiting list\"\"\"\n return self.clans[clankey]['waiting'].index(memberID)\n\n async def delClan(self, clankey):\n \"\"\"delete a clan from the family\"\"\"\n if self.clans.pop(clankey, None):\n dataIO.save_json(clans_path, self.clans)\n return True\n return False\n\n async def setPBTrophies(self, clankey, trophies):\n \"\"\"Set a clan's PB Trohies\"\"\"\n self.clans[clankey]['personalbest'] = trophies\n dataIO.save_json(clans_path, self.clans)\n\n async def setCWR(self, clankey, league, cwr):\n \"\"\"Set a clan's CWR\"\"\"\n self.clans[clankey]['cwr'][league] = cwr\n dataIO.save_json(clans_path, self.clans)\n\n async def setBonus(self, clankey, bonus):\n \"\"\"Set a clan's Bonus Statement\"\"\"\n self.clans[clankey]['bonustitle'] = bonus\n dataIO.save_json(clans_path, self.clans)\n\n async def setLogChannel(self, clankey, channel):\n \"\"\"Set a clan's log channel\"\"\"\n self.clans[clankey]['log_channel'] = channel\n dataIO.save_json(clans_path, self.clans)\n\n async def setWarLogChannel(self, clankey, channel):\n \"\"\"Set a clan's warlog channel\"\"\"\n self.clans[clankey]['warlog_channel'] = channel\n dataIO.save_json(clans_path, self.clans)\n\n async def addMember(self, clankey, name, tag):\n \"\"\"Add a member to the clan\"\"\"\n self.clans[clankey]['members'][tag] = {}\n self.clans[clankey]['members'][tag][\"tag\"] = tag\n self.clans[clankey]['members'][tag][\"name\"] = name\n self.clans[clankey]['members'][tag][\"WarDayWins\"] = 0\n self.clans[clankey]['members'][tag][\"cardsEarned\"] = 0\n dataIO.save_json(clans_path, self.clans)\n\n async def delMember(self, clankey, tag):\n \"\"\"Remove a member to the clan\"\"\"\n self.clans[clankey]['members'].pop(tag, None)\n dataIO.save_json(clans_path, self.clans)\n\n async def togglePrivate(self, clankey):\n \"\"\"oggle Private approval of new recruits\"\"\"\n self.clans[clankey]['approval'] = not self.clans[clankey]['approval']\n dataIO.save_json(clans_path, self.clans)\n\n return self.clans[clankey]['approval']\n\n\nclass clubs:\n \"\"\"BS Club Family Management\"\"\"\n\n def __init__(self):\n self.clubs = dataIO.load_json(clubs_path)\n\n async def getClubs(self):\n \"\"\"Return club array\"\"\"\n return self.clubs\n\n async def getClubData(self, clubkey, data):\n \"\"\"Return club array\"\"\"\n return self.clubs[clubkey][data]\n\n async def getClubMemberData(self, clubkey, memberkey, data):\n \"\"\"Return club member's dict\"\"\"\n return self.clubs[clubkey]['members'][memberkey][data]\n\n async def numClubs(self):\n \"\"\"Return the number of clubs\"\"\"\n return len(self.clubs.keys())\n\n def keysClubs(self):\n \"\"\"Get keys of all the clubs\"\"\"\n return self.clubs.keys()\n\n def keysClubMembers(self, clubkey):\n \"\"\"Get keys of all the club members\"\"\"\n return self.clubs[clubkey]['members'].keys()\n\n async def namesClubs(self):\n \"\"\"Get name of all the clubs\"\"\"\n return \", \".join(key for key in self.keysClubs())\n\n async def tagsClubs(self):\n \"\"\"Get tags of all the clubs\"\"\"\n return [self.clubs[club][\"tag\"] for club in self.clubs]\n\n async def rolesClubs(self):\n \"\"\"Get roles of all the clubs\"\"\"\n roles = [\"Member\"]\n for x in self.clubs:\n roles.append(self.clubs[x]['role'])\n return roles\n\n async def verifyMembership(self, clubtag):\n \"\"\"Check if a club is part of the family\"\"\"\n for clubkey in self.keysClubs():\n if self.clubs[clubkey]['tag'] == clubtag:\n return True\n return False\n\n async def getClubKey(self, clubtag):\n \"\"\"Get a club key from a club tag.\"\"\"\n for clubkey in self.keysClubs():\n if self.clubs[clubkey]['tag'] == clubtag:\n return clubkey\n return None\n\n async def numWaiting(self, clubkey):\n \"\"\"Get a club's wating list length from a club key.\"\"\"\n return len(self.clubs[clubkey]['waiting'])\n\n async def addWaitingMember(self, clubkey, memberID):\n \"\"\"Add a user to a club's waiting list\"\"\"\n if memberID not in self.clubs[clubkey]['waiting']:\n self.clubs[clubkey]['waiting'].append(memberID)\n dataIO.save_json(clubs_path, self.clubs)\n return True\n else:\n return False\n\n async def delWaitingMember(self, clubkey, memberID):\n \"\"\"Remove a user to a club's waiting list\"\"\"\n if memberID in self.clubs[clubkey]['waiting']:\n self.clubs[clubkey]['waiting'].remove(memberID)\n dataIO.save_json(clubs_path, self.clubs)\n\n return True\n else:\n return False\n\n async def checkWaitingMember(self, clubkey, memberID):\n \"\"\"check if a user is in a waiting list\"\"\"\n return memberID in self.clubs[clubkey]['waiting']\n\n async def getWaitingIndex(self, clubkey, memberID):\n \"\"\"Get the waiting position from a club's waiting list\"\"\"\n return self.clubs[clubkey]['waiting'].index(memberID)\n\n async def delClub(self, clubkey):\n \"\"\"delete a club from the family\"\"\"\n if self.clubs.pop(clubkey, None):\n dataIO.save_json(clubs_path, self.clubs)\n return True\n return False\n\n async def setPBTrophies(self, clubkey, trophies):\n \"\"\"Set a club's PB Trohies\"\"\"\n self.clubs[clubkey]['personalbest'] = trophies\n dataIO.save_json(clubs_path, self.clubs)\n\n async def setBonus(self, clubkey, bonus):\n \"\"\"Set a club's Bonus Statement\"\"\"\n self.clubs[clubkey]['bonustitle'] = bonus\n dataIO.save_json(clubs_path, self.clubs)\n\n async def setLogChannel(self, clubkey, channel):\n \"\"\"Set a club's log channel\"\"\"\n self.clubs[clubkey]['log_channel'] = channel\n dataIO.save_json(clubs_path, self.clubs)\n\n async def addMember(self, clubkey, name, tag):\n \"\"\"Add a member to the club\"\"\"\n self.clubs[clubkey]['members'][tag] = {}\n self.clubs[clubkey]['members'][tag][\"tag\"] = tag\n self.clubs[clubkey]['members'][tag][\"name\"] = name\n dataIO.save_json(clubs_path, self.clubs)\n\n async def delMember(self, clubkey, tag):\n \"\"\"Remove a member to the club\"\"\"\n self.clubs[clubkey]['members'].pop(tag, None)\n dataIO.save_json(clubs_path, self.clubs)\n\n async def togglePrivate(self, clubkey):\n \"\"\"oggle Private approval of new recruits\"\"\"\n self.clubs[clubkey]['approval'] = not self.clubs[clubkey]['approval']\n dataIO.save_json(clubs_path, self.clubs)\n\n return self.clubs[clubkey]['approval']\n\n\nclass crtools:\n \"\"\"Clash Royale Tools\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.tags = tags()\n self.clans = clans()\n self.clubs = clubs()\n self.auth = auth()\n self.constants = constants()\n\n @commands.command()\n @checks.mod_or_permissions(administrator=True)\n async def settoken(self, *, key):\n \"\"\"Input your Clash Royale API Token from RoyaleAPI.com\"\"\"\n await self.auth.addToken(key)\n await self.bot.say(\"RoyaleAPI Token set\")\n\n @commands.command()\n @checks.mod_or_permissions(administrator=True)\n async def settokenbs(self, *, key):\n \"\"\"Input your BrawlStars API Token\"\"\"\n await self.auth.addTokenBS(key)\n await self.bot.say(\"BrawlAPI Token set\")\n\n @commands.command()\n @checks.mod_or_permissions(administrator=True)\n async def settokencr(self, *, key):\n \"\"\"Input your Official CR API Token\"\"\"\n await self.auth.addTokenOfficial(key)\n await self.bot.say(\"OfficialAPI Token set\")\n\n @commands.group(pass_context=True, name=\"clans\")\n @commands.has_any_role(*BOTCOMMANDER_ROLES)\n async def _clans(self, ctx):\n \"\"\"Base command for managing clash royale clans. [p]help clans for details\"\"\"\n if ctx.invoked_subcommand is None:\n await self.bot.send_cmd_help(ctx)\n\n @_clans.command(pass_context=True, name=\"delete\")\n @checks.is_owner()\n async def clans_delete(self, ctx, clankey):\n \"\"\"Remove a clan from tracking\"\"\"\n clankey = clankey.lower()\n if await self.clans.delClan(clankey):\n return await self.bot.say(\"Success\")\n else:\n await self.bot.say(\"Failed\")\n\n @_clans.command(pass_context=True, name=\"pb\")\n async def clans_pb(self, ctx, clankey, pb: int):\n \"\"\"Set a Personal Best requirement for a clan\"\"\"\n clankey = clankey.lower()\n try:\n await self.clans.setPBTrophies(clankey, pb)\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n\n await self.bot.say(\"Success\")\n\n @_clans.command(pass_context=True, name=\"cwr\")\n async def clans_cwr(self, ctx, clankey, league, percent: int):\n \"\"\"Set a CWR requirement for a clan\"\"\"\n clankey = clankey.lower()\n league = league.lower()\n\n leagueNames = [\"legend\",\"gold\",\"silver\",\"bronze\"]\n if league not in leagueNames:\n return await self.bot.say(\"Please use a valid league name: {}\".format(\", \".join(key for key in leagueNames)))\n\n if percent < 0 or percent > 100:\n return await self.bot.say(\"Error, percent value must be between 1-99.\")\n \n try:\n cwrDict = await self.clans.getClanData(clankey, 'cwr')\n count = sum(1 for i in cwrDict.values() if i > 0)\n\n if percent > 0 and count >= 2:\n if cwrDict[league] == 0:\n return await self.bot.say(\"Error, You can only set CWR for 2 different leagues.\")\n await self.clans.setCWR(clankey, league, percent)\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n\n await self.bot.say(\"Success\")\n\n @_clans.command(pass_context=True, name=\"bonus\")\n async def clans_bonus(self, ctx, clankey, *bonus):\n \"\"\"Add bonus information to title of clan (i.e. Age: 21+)\"\"\"\n clankey = clankey.lower()\n try:\n await self.clans.setBonus(clankey, \" \".join(bonus))\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n\n await self.bot.say(\"Success\")\n\n @_clans.command(pass_context=True, name=\"log\")\n async def clans_log(self, ctx, clankey, channel: discord.Channel):\n \"\"\"Set Clan's Log channel to track in's and outs\"\"\"\n clankey = clankey.lower()\n try:\n server = ctx.message.server\n\n if not server.get_member(self.bot.user.id).permissions_in(channel).send_messages:\n return await self.bot.say(\"I do not have permissions to send messages to {0.mention}\".format(channel))\n\n if channel is None:\n await self.bot.say(\"I can't find the specified channel. It might have been deleted.\")\n\n await self.clans.setLogChannel(clankey, channel.id)\n\n await self.bot.send_message(channel, \"I will now send log messages to {0.mention}\".format(channel))\n await self.bot.say(\"Clash log channel for {} is now set to {}\".format(clankey, channel))\n\n except KeyError:\n await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n return\n except discord.errors.Forbidden:\n await self.bot.say(\"No permission to send messages to that channel\")\n\n @_clans.command(pass_context=True, name=\"war\")\n async def clans_warlog(self, ctx, clankey, channel: discord.Channel):\n \"\"\"Set Clan's War Log channel to track wins\"\"\"\n clankey = clankey.lower()\n try:\n server = ctx.message.server\n\n if not server.get_member(self.bot.user.id).permissions_in(channel).send_messages:\n return await self.bot.say(\"I do not have permissions to send messages to {0.mention}\".format(channel))\n\n if channel is None:\n await self.bot.say(\"I can't find the specified channel. It might have been deleted.\")\n\n await self.clans.setWarLogChannel(clankey, channel.id)\n\n await self.bot.send_message(channel, \"I will now send war log messages to {0.mention}\".format(channel))\n await self.bot.say(\"Clash war log channel for {} is now set to {}\".format(clankey, channel))\n\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n except discord.errors.Forbidden:\n await self.bot.say(\"No permission to send messages to that channel\")\n\n @_clans.command(pass_context=True, name=\"private\")\n async def clans_private(self, ctx, clankey):\n \"\"\"Toggle Private approval of new recruits\"\"\"\n clankey = clankey.lower()\n try:\n await self.bot.say(\"Private Approval now is set to \" + str(await self.clans.togglePrivate(clankey)))\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clans.namesClans()))\n\n @commands.group(pass_context=True, name=\"clubs\")\n @commands.has_any_role(*BOTCOMMANDER_ROLES)\n async def _clubs(self, ctx):\n \"\"\"Base command for managing clash royale clubs. [p]help clubs for details\"\"\"\n if ctx.invoked_subcommand is None:\n await self.bot.send_cmd_help(ctx)\n\n @_clubs.command(pass_context=True, name=\"delete\")\n @checks.is_owner()\n async def clubs_delete(self, ctx, clankey):\n \"\"\"Remove a clan from tracking\"\"\"\n clankey = clankey.lower()\n if await self.clubs.delClan(clankey):\n return await self.bot.say(\"Success\")\n else:\n await self.bot.say(\"Failed\")\n\n @_clubs.command(pass_context=True, name=\"pb\")\n async def clubs_pb(self, ctx, clankey, pb: int):\n \"\"\"Set a Personal Best requirement for a club\"\"\"\n clankey = clankey.lower()\n try:\n await self.clubs.setPBTrophies(clankey, pb)\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clubs.namesClubs()))\n\n await self.bot.say(\"Success\")\n\n @_clubs.command(pass_context=True, name=\"bonus\")\n async def clubs_bonus(self, ctx, clankey, *bonus):\n \"\"\"Add bonus information to title of clan (i.e. Age: 21+)\"\"\"\n clankey = clankey.lower()\n try:\n await self.clubs.setBonus(clankey, \" \".join(bonus))\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clubs.namesClubs()))\n\n await self.bot.say(\"Success\")\n\n @_clubs.command(pass_context=True, name=\"log\")\n async def clubs_log(self, ctx, clankey, channel: discord.Channel):\n \"\"\"Set Clan's Log channel to track in's and outs\"\"\"\n clankey = clankey.lower()\n try:\n server = ctx.message.server\n\n if not server.get_member(self.bot.user.id).permissions_in(channel).send_messages:\n return await self.bot.say(\"I do not have permissions to send messages to {0.mention}\".format(channel))\n\n if channel is None:\n await self.bot.say(\"I can't find the specified channel. It might have been deleted.\")\n\n await self.clubs.setLogChannel(clankey, channel.id)\n\n await self.bot.send_message(channel, \"I will now send log messages to {0.mention}\".format(channel))\n await self.bot.say(\"Clash log channel for {} is now set to {}\".format(clankey, channel))\n\n except KeyError:\n await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clubs.namesClubs()))\n return\n except discord.errors.Forbidden:\n await self.bot.say(\"No permission to send messages to that channel\")\n\n @_clubs.command(pass_context=True, name=\"private\")\n async def clubs_private(self, ctx, clankey):\n \"\"\"Toggle Private approval of new recruits\"\"\"\n clankey = clankey.lower()\n try:\n await self.bot.say(\"Private Approval now is set to \" + str(await self.clubs.togglePrivate(clankey)))\n except KeyError:\n return await self.bot.say(\"Please use a valid clanname: {}\".format(await self.clubs.namesClubs()))\n\n\ndef check_folders():\n if not os.path.exists(\"data/crtools\"):\n print(\"Creating data/crtools folder...\")\n os.makedirs(\"data/crtools\")\n\n\ndef check_files():\n if not fileIO(tags_path, \"check\"):\n print(\"Creating empty tags.json...\")\n fileIO(tags_path, \"save\", {\"0\": {\"tag\": \"DONOTREMOVE\"}})\n\n if not fileIO(tags_bs_path, \"check\"):\n print(\"Creating empty tags_bs.json...\")\n fileIO(tags_bs_path, \"save\", {\"0\": {\"tag\": \"DONOTREMOVE\"}})\n\n if not fileIO(auth_path, \"check\"):\n print(\"enter your RoyaleAPI token in data/crtools/auth.json...\")\n fileIO(auth_path, \"save\", {\"OfficialAPI\": \"enter your OfficialAPI token here!\"})\n\n if not fileIO(clans_path, \"check\"):\n print(\"Creating empty clans.json...\")\n fileIO(clans_path, \"save\", default_clans)\n\n if not fileIO(clubs_path, \"check\"):\n print(\"Creating empty clubs.json...\")\n fileIO(clubs_path, \"save\", default_clubs)\n\n\ndef check_auth():\n c = dataIO.load_json(auth_path)\n if 'RoyaleAPI' not in c:\n c['RoyaleAPI'] = \"enter your RoyaleAPI token here!\"\n c = dataIO.load_json(auth_path)\n if 'OfficialAPI' not in c:\n c['OfficialAPI'] = \"enter your OfficialAPI token here!\"\n c = dataIO.load_json(auth_path)\n if 'BrawlAPI' not in c:\n c['BrawlAPI'] = \"enter your BrawlAPI token here!\"\n dataIO.save_json(auth_path, c)\n\n\ndef setup(bot):\n check_folders()\n check_files()\n check_auth()\n bot.add_cog(crtools(bot))\n","repo_name":"Gr8z/Legend-Cogs","sub_path":"crtools/crtools.py","file_name":"crtools.py","file_ext":"py","file_size_in_byte":29047,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"38987622174","text":"#coding: utf-8\nimport re\n\ndef cookiesniffing(html):\n\tresult = 0\n\n\tif ( bool(re.search('document\\.cookie',html, re.IGNORECASE|re.DOTALL)) == True ): # 자바스크립트에서는 document.cookie 객체를 통해 쿠키에 접근 가능\n\t\tif ( bool(re.search('new\\s?XMLHttpRequest',html, re.IGNORECASE|re.DOTALL)) == True ): # 데이터 전송을 위해 사용되는 메서드와 프로퍼티를 가진 객체\n\t\t\tif (html.find('post') or html.find('get')) != -1:\n\t\t\t\tresult = 1\n\treturn result\n\ndef cors(html):\n\tresult = 0\n\tscriptRegExp = re.compile('.*?', re.DOTALL)\n\t# 검색\n\tscriptList = scriptRegExp.findall(html)\n\tfor script in scriptList:\n\t\t# withCredentials 검색\n\t\tif(bool(re.search('withCredentials', script)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef crossdocumentmessaging(html):\n\tresult = 0\n\tscriptRegExp = re.compile('.*?', re.DOTALL)\n\tscriptList = scriptRegExp.findall(html)\n\tfor script in scriptList:\n\t\tif (bool(re.search('\\.postMessage', script, re.IGNORECASE)) == True ):\n\t\t\tif (bool(re.search('\\.origin', script, re.IGNORECASE)) == False):\n\t\t\t\tresult = 1\n\treturn result\n\ndef csrf(html):\n\tresult = 0\n\tiframeRegExp = re.compile('.*?', re.DOTALL)\n\timgRegExp = re.compile('')\n\tformRegExp = re.compile('.*?', re.DOTALL)\n\t# [1] CSRF Attack iframe Tag\n\t# 검색\n\tiframeList = iframeRegExp.findall(html)\n\tfor iframe in iframeList:\n\t\tif(bool(re.search('.*\\.php|.*\\.asp|.*\\.html', iframe)) == True):\n\t\t\t# print 'No safety Zone, iframe Tag Threat!!'\n\t\t\tresult = 1\n\t\tif(bool(re.search('visibility:\\s?hidden', iframe)) == True):\n\t\t\t# print \"Display-None >> Threat!!\"\n\t\t\tresult = 1\n\t# [2] CSRF Attack Img links\n\timgList = imgRegExp.findall(html)\n\tfor img in imgList:\n\t\tif (bool(re.search('.*\\.gif|.*\\.jpg|.*\\.bmp', img)) == True):\n\t\t\t# print 'No safety Zone, IMG Tag Threat!!'\n\t\t\tresult = 1\n\t# [3] if you found form, check => {% csrf_token %}\n\tformList = formRegExp.findall(html)\n\tfor form in formList:\n\t\tif ((bool(re.search('post', form, re.IGNORECASE)) == True) and (bool(re.search('csrf\\.token|token', form)) == True)):\n\t\t\t# print 'No safety Zone, Form Tag Threat!!'\n\t\t\tresult = 1\n\treturn result\n\ndef fileapi(html):\n\tresult = 0\n\t# Warning! opacity = 0\n\tif(bool(re.search('opacity:0',html)) == True):\n\t\tresult = 1\n\t# Warning! Using FileAPI\n\tif (bool(re.search('FileReader', html)) == True):\n\t\tresult = 1\n\n\treturn result\n\ndef filedownload(html):\n\tahrefRegExp = re.compile('<.*?>', re.DOTALL)\n\tresult = 0\n\tahrefList = ahrefRegExp.findall(html)\n\tfor ahref in ahrefList:\n\t\tif(bool(re.search('(file)(path|down|download)?\\s*=', ahref, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\t\tif (bool(re.search('(path|download)\\s*=', ahref, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef geolocationapi(html):\n\tresult = 0\n\tif (bool(re.search('(getCurrentPosition\\(.*?\\))|(watchPosition\\(.*?\\))|(coords)|(maximumAge\\s:\\s0)', html, re.IGNORECASE|re.DOTALL)) == True):\n\t\tresult = 1\n\treturn result\n\n\n\ndef localdb(html):\n\tresult = 0\n\tscriptRegExp = re.compile('.*?', re.DOTALL)\n\t# 검색\n\tscriptList = scriptRegExp.findall(html)\n\tfor script in scriptList:\n\t\tif ( re.search('openDatabase', script, re.IGNORECASE) and re.search('executeSql', script, re.IGNORECASE) ):\n\t\t\tresult = 1\n\treturn result\n\ndef newtagabusing(html):\n\tresult = 0\n\ttagList = ['audio', 'video', 'source', 'canvas', 'embed']\n\tfor i in range(len(tagList)):\n\t\tif(bool(re.search('<' + tagList[i] + '.*?>.*?', html, re.IGNORECASE|re.DOTALL)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef protocolScheme(html):\n\tresult = 0\n\tscriptRegExp = re.compile('.*?', re.DOTALL)\n\tscriptList = scriptRegExp.findall(html)\n\tfor script in scriptList:\n\t\tif (bool(re.search('\\.registerProtocolHandler\\(.*?\\)', script, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef scriptddos(html):\n\tresult = 0\n\twhileRegExp = re.compile('while\\(.*?\\)\\s?\\{.*\\}', re.DOTALL)\n\twhileList = whileRegExp.findall(html)\n\tfor while_ in whileList:\n\t\t# XMLHttpSendCheck\n\t\tif (bool(re.search('\\.send\\(.*?\\)', while_, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\t\t# XMLHttpOpen Check\n\t\tif (bool(re.search('\\.open\\(.*?\\)', while_, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\t\t# XMLHttpRequest Check\n\t\tif (bool(re.search('new\\s?xdomainrequest\\(.*?\\)|new\\s?xmlhttprequest\\(.*?\\)', while_, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\t\t# Image src check\n\t\tif (bool(re.search('\\.src\\(.*?\\)', while_, re.IGNORECASE)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef websocket(html):\n\tresult = 0\n\tif (bool(re.search('\\.onopen|\\.onmessage', html, re.IGNORECASE|re.DOTALL)) == True):\n\t\tresult = 1\n\treturn result\n\ndef webstorage(html):\n\tresult = 0\n\tAPIList = ['length', 'key', 'getitem', 'setitem', 'removeitem', 'clear']\n\n\tfor i in range(len(APIList)):\n\t\tif (bool(re.search('storage\\.'+ APIList[i], html, re.IGNORECASE|re.DOTALL)) == True):\n\t\t\tresult = 1\n\treturn result\n\ndef webworker(html):\n\tresult = 0\n\tif (bool(re.search('new\\s?Worker\\(.*?\\)', html, re.IGNORECASE|re.DOTALL)) == True):\n\t\tresult = 1\n\treturn result\n\ndef scan(html):\n\tfunctions = [cookiesniffing, cors, crossdocumentmessaging, csrf, fileapi, filedownload, geolocationapi, localdb,\n\t\t\t\t newtagabusing, protocolScheme, scriptddos, websocket, webstorage, webworker]\n\tresult = []\n\tfor function in functions:\n\t\tresult.append(function(html))\n\treturn result\n\n\n","repo_name":"hksecurity/html5-scanner-3rd","sub_path":"scanner_server/jsunpack-n-master/script_scanner.py","file_name":"script_scanner.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2748514973","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom constants import *\nfrom utils import *\n\nimport os\nfrom io import open\nfrom tqdm import tqdm\nfrom pathlib import Path\n\n\npaths = [str(x) for x in Path(CORPUS.all_clean_files).glob(\"**/*.txt\")]\n\n\npart = [x.split(\"/\")[-2] for x in paths]\nfrom collections import Counter\nCounter(part)\n\n\ntokenizer_save_dir = TOK.out\nif not os.path.exists(tokenizer_save_dir):\n os.makedirs(tokenizer_save_dir)\nprint(tokenizer_save_dir)\n\n\ntokenizer_args = {\n \"lowercase\": TOK.lower,\n \"cls_token\": \"[CLS]\",\n \"pad_token\": \"[PAD]\",\n \"sep_token\": \"[SEP]\",\n \"unk_token\": \"[UNK]\",\n \"mask_token\": \"[MASK]\",\n}\nfast_tokenizer_args = tokenizer_args.copy()\nfast_tokenizer_args[\"do_lower_case\"] = TOK.lower\n\ntokenizer = TOK.tok_class(**tokenizer_args)\n\ntokenizer_args[\"vocab_file\"] = f\"{tokenizer_save_dir}/vocab.txt\"\n\nsave_json(tokenizer_args, TOK.tokenizer_args)\nsave_json(fast_tokenizer_args, TOK.fast_tokenizer_args)\n\ntokenizer.train(\n files=paths, vocab_size=TOK.vocab_size, min_frequency=TOK.min_frequency,\n special_tokens=[\"[CLS]\",\"[PAD]\",\"[SEP]\",\"[UNK]\",\"[MASK]\",],\n)\nprint(tokenizer.get_vocab_size())\n\n\ntokenizer.save(tokenizer_save_dir)\n\n\nprint(\"ORIGINAL\")\nprint(tokenizer)\nprint(tokenizer.get_vocab_size())\nprint(tokenizer.encode(\"This is a sample text\").tokens)\n\ntokenizer_args = load_json(TOK.tokenizer_args)\ntok2 = TOK.tok_class(**tokenizer_args)\nprint()\nprint(\"RELOADED\")\nprint(tok2)\nprint(tok2.get_vocab_size())\nprint(tok2.encode(\"This is a sample text\").tokens)\n\n\nfrom transformers import BertTokenizerFast\nfast_tokenizer_args = load_json(TOK.fast_tokenizer_args)\ntok = BertTokenizerFast.from_pretrained(TOK.out, **fast_tokenizer_args)\nprint()\nprint(\"FAST TOKENIZER (transformers)\")\nprint(tok, tok.unk_token, tok.sep_token, tok.cls_token, tok.pad_token, tok.mask_token)\nprint(tok.vocab_size)\nprint(tok.tokenize(\"This is a sample text\", add_special_tokens=True))\n\n","repo_name":"KevinRoitero/dilbert","sub_path":"01_build_tokenizer.py","file_name":"01_build_tokenizer.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"25987607896","text":"#!/usr/bin/python3\nimport sys\nimport math\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import LaserScan\nfrom ackermann_msgs.msg import AckermannDriveStamped\n\nclass ReactiveGapFollow(Node):\n def __init__(self):\n super().__init__('reactive_gap_follow')\n self.create_subscription(LaserScan, '/scan2', self.lidar_callback, 10)\n self.drive_pub = self.create_publisher(AckermannDriveStamped, '/drive', 10)\n\n def lidar_callback(self, data):\n proc_ranges = self.preprocess_lidar(\n data.ranges, data.angle_min, data.angle_increment)\n min_distance = self.find_min_distance(proc_ranges)\n proc_ranges = self.safety_bubble(\n safety_bubble_radius=20, ranges=proc_ranges, min_distance_idx=proc_ranges.index(min_distance))\n max_gap = self.find_max_gap(proc_ranges)\n best_point = self.find_best_point(max_gap)\n angle = (data.angle_min +\n (proc_ranges.index(best_point) * data.angle_increment))\n\n if abs(angle) > 0.5:\n speed = 1.0\n elif 0.5 < abs(angle) <= 1.5:\n speed = 1.5\n else:\n speed = 4.0\n self.get_logger().info('Publishing: angle: %f, speed: %f' % (angle, speed))\n \n self.publish_drive_msg(angle, speed)\n\n def preprocess_lidar(self, ranges, angle_min, angle_increment):\n self.a_idx = int((math.radians(70) - angle_min) / angle_increment)\n self.b_idx = int((math.radians(-70) - angle_min) / angle_increment)\n\n proc_ranges = list(ranges)\n for i in range(len(proc_ranges)):\n if proc_ranges[i] > 3:\n proc_ranges[i] = 0\n return proc_ranges\n\n def safety_bubble(self, safety_bubble_radius, ranges, min_distance_idx):\n while safety_bubble_radius > 0:\n ranges[int(min_distance_idx - (safety_bubble_radius))] = 0\n safety_bubble_radius -= 2\n return ranges\n\n def find_min_distance(self, ranges):\n try:\n return min([i for i in ranges[self.b_idx:self.a_idx] if i != 0])\n except ValueError:\n return 0\n\n def find_max_gap(self, free_space_ranges):\n temp = []\n max_gap = []\n max_gap_len = -np.inf\n for i in range(self.b_idx, self.a_idx):\n if free_space_ranges[i] > 2:\n temp.append(free_space_ranges[i])\n if (len(max_gap) > max_gap_len):\n max_gap = temp\n max_gap_len = len(max_gap)\n else:\n temp = []\n return max_gap\n\n def find_best_point(self, max_gap):\n try:\n return (max_gap[int(len(max_gap) / 2)])\n except IndexError:\n return 0\n\n def publish_drive_msg(self, angle, speed):\n msg = AckermannDriveStamped()\n msg.header.stamp = self.get_clock().now().to_msg()\n msg.header.frame_id = \"laser\"\n msg.drive.steering_angle = angle\n msg.drive.speed = speed\n self.drive_pub.publish(msg)\n\ndef main(args=None):\n rclpy.init(args=args)\n node = ReactiveGapFollow()\n rclpy.spin(node)\n node.destroy_node()\n\n","repo_name":"ibrahimsel/f1tenth_ros2_algorithms","sub_path":"gap_follower/gap_follower/gap_follower_node.py","file_name":"gap_follower_node.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2632462995","text":"from django.urls import path\nfrom. import views\n\napp_name = 'artgallery'\n\nurlpatterns = [\n path('', views.art_main_page, name='art_list'),\n path('update-art/',views.ajax_change_post, name='update_art_ajax')\n\n]","repo_name":"cmiles33/bigBlog","sub_path":"artgallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12408049422","text":"import disnake as discord\r\nfrom disnake.ext import commands\r\nimport difflib\r\n\r\n\r\ndef get_command_info(cmd):\r\n if isinstance(cmd, commands.InvokableSlashCommand):\r\n return cmd.name, cmd.description\r\n elif isinstance(cmd, commands.SubCommand):\r\n return cmd.qualified_name, cmd.option.description\r\n\r\n\r\nclass HelpMenu(discord.ui.View):\r\n def __init__(self, cmds, *, per_page):\r\n super().__init__()\r\n self.cmds = list(cmds)\r\n self.per_page = per_page\r\n self.page = 0\r\n self.pages = (len(self.cmds) + per_page - 1) // per_page\r\n\r\n self.add_item(discord.ui.Button(label=\"Wiki\", emoji=\"📚\", url=\"https://github.com/Pawl-Patrol/Dynamic-Voice-Channels/wiki\"))\r\n self.add_item(discord.ui.Button(label=\"Issues\", url=\"https://github.com/Pawl-Patrol/Dynamic-Voice-Channels/issues\"))\r\n self.add_item(discord.ui.Button(label=\"Source\", url=\"https://github.com/Pawl-Patrol/Dynamic-Voice-Channels\"))\r\n\r\n def format_page(self, page):\r\n embed = discord.Embed(\r\n title=\"Help\",\r\n description=\"Use `/help ` for more info on a command.\"\r\n )\r\n idx = self.page * self.per_page\r\n for cmd in self.cmds[idx:(min(idx + self.per_page, len(self.cmds)))]:\r\n name, description = get_command_info(cmd)\r\n embed.add_field(\r\n name=f\"/{name}\",\r\n value=description or \"No description\",\r\n inline=False\r\n )\r\n embed.set_footer(text=f\"Page {self.page + 1} of {self.pages}\")\r\n return embed\r\n\r\n def update(self):\r\n self.previous.disabled = self.page == 0\r\n self.first.disabled = self.page < 2\r\n self.next.disabled = self.page == (self.pages - 1)\r\n self.last.disabled = self.page > (self.pages - 3)\r\n\r\n return self.format_page(self.page)\r\n\r\n async def send_initial(self, ctx):\r\n await ctx.response.send_message(embed=self.update(), view=self)\r\n\r\n @discord.ui.button(emoji=\"⏮️\", style=discord.ButtonStyle.secondary)\r\n async def first(self, button, ctx):\r\n self.page = 0\r\n await ctx.response.edit_message(embed=self.update(), view=self)\r\n\r\n @discord.ui.button(emoji=\"◀️\", style=discord.ButtonStyle.secondary)\r\n async def previous(self, button, ctx):\r\n self.page -= 1\r\n await ctx.response.edit_message(embed=self.update(), view=self)\r\n\r\n @discord.ui.button(emoji=\"⏹️\", style=discord.ButtonStyle.secondary)\r\n async def close(self, button, ctx):\r\n self.clear_items()\r\n await ctx.response.edit_message(embed=self.update(), view=self)\r\n self.stop()\r\n\r\n @discord.ui.button(emoji=\"▶️\", style=discord.ButtonStyle.secondary)\r\n async def next(self, button, ctx):\r\n self.page += 1\r\n await ctx.response.edit_message(embed=self.update(), view=self)\r\n\r\n @discord.ui.button(emoji=\"⏭️\", style=discord.ButtonStyle.secondary)\r\n async def last(self, button, ctx):\r\n self.page = self.pages - 1\r\n await ctx.response.edit_message(embed=self.update(), view=self)\r\n\r\n\r\nasync def get_commands(ctx):\r\n cmds = []\r\n for cmd in ctx.bot.slash_commands:\r\n if not cmd.children:\r\n cmds.append(cmd)\r\n continue\r\n for subcmd in cmd.children.values():\r\n cmds.append(subcmd)\r\n return cmds\r\n\r\n\r\nasync def auto_complete(ctx, arg):\r\n cmds = [get_command_info(cmd)[0] for cmd in await get_commands(ctx)]\r\n results = difflib.get_close_matches(arg, cmds)\r\n for cmd in cmds:\r\n if arg in cmd and cmd not in results:\r\n results.append(cmd)\r\n return results\r\n\r\n\r\n@commands.slash_command(name=\"help\")\r\nasync def help_command(ctx, command: str = commands.Param(default=None, autocomplete=auto_complete)):\r\n \"\"\"Shows you this\"\"\"\r\n if command is None:\r\n menu = HelpMenu(await get_commands(ctx), per_page=4)\r\n await menu.send_initial(ctx)\r\n else:\r\n cmd = discord.utils.find(lambda c: get_command_info(c)[0] == command, await get_commands(ctx))\r\n if not cmd:\r\n await ctx.send(\"Command not found.\", ephemeral=True)\r\n else:\r\n name, description = get_command_info(cmd)\r\n await ctx.send(embed=discord.Embed(\r\n title=f\"/{name}\",\r\n description=description\r\n ))\r\n\r\n\r\ndef setup(bot):\r\n bot.add_slash_command(help_command)\r\n","repo_name":"Pawl-Patrol/Dynamic-Voice-Channels","sub_path":"bot/ext/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"26043285595","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/mgdal_env\n# @Time : 2023/11/22 10:19\n# @Author : Hexk\n# @Descript :\n\nimport os\nimport matplotlib\nimport numpy as np\nfrom datetime import datetime\nfrom datetime import timedelta\nimport netCDF4 as nc\nimport glob\nimport json\nimport matplotlib.pyplot as plt\nfrom osgeo import gdal\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import ndimage\nfrom nansat import Nansat\nimport common_lib as clib\n\nos.environ['PROJ_LIB'] = 'D:/Mambaforge/envs/mgdal_env/Library/share/proj'\nos.environ['GDAL_DATA'] = 'D:/Mambaforge/envs/mgdal_env/Library/share'\nif __name__ == '__main__':\n\n file_path = R\"E:\\Glacier_DEM_Register\\Tanggula_FourYear_Data\\Test_Final_20231018\\1_Cartography\\3_Analysis\\3_Analysis_Data\\20231122_1_ERA5Wind\\ERA5_Wind_00-22.nc\"\n\n ERA5_BASE_TIME = datetime(year=1900, month=1, day=1)\n\n # 提取NC文件中的相应数据\n wind_data = nc.Dataset(file_path)\n\n lon_arr = wind_data.variables['longitude'][:]\n lat_arr = wind_data.variables['latitude'][:]\n wind_time = wind_data.variables['time'][:]\n u10 = wind_data.variables['u10'][:]\n v10 = wind_data.variables['v10'][:]\n si10 = wind_data.variables['si10'][:]\n\n # 将经纬度转为grid格式,并用flatten()把它从二维变为一维\n lon_grid, lat_grid = np.meshgrid(lon_arr, lat_arr)\n lon_grid = lon_grid.flatten()\n lat_grid = lat_grid.flatten()\n\n # 读取研究区域的经纬度文件\"/mnt/g/aaa/gate_shape/MIZ_lon_lat_arr.csv\"\n # miz_extent_data = np.loadtxt(\"/mnt/g/aaa/gate_shape/MIZ_lon_lat_arr.csv\", skiprows=1, delimiter=\",\")\n # lon_arr_miz = miz_extent_data[:, 0]\n # lat_arr_miz = miz_extent_data[:, 1]\n lon_arr_miz = [90, 91, 92]\n lat_arr_miz = [33, 34]\n\n # 循环计算出在NC文件上,离研究区域的每个点最近的经纬度的index,就可以利用index索引研究区域的数据。\n for i in range(len(lon_arr_miz)):\n lon = lon_arr_miz[i]\n lat = lat_arr_miz[i]\n distance = (lon_grid - lon) ** 2 + (lat_grid - lat) ** 2\n index = np.argmin(distance)\n pass\n\n sub_u10 = []\n sub_v10 = []\n sub_si10 = []\n monthly_time = []\n for i in range(len(wind_time)):\n key = ERA5_BASE_TIME + timedelta(hours=int(wind_time[i]))\n monthly_time.append(key)\n sub_u10.append(u10[i].flatten()[index])\n sub_v10.append(v10[i].flatten()[index])\n sub_si10.append(si10[i].flatten()[index])\n\n sub_lon, sub_lat = lon_grid[index], lat_grid[index]\n\n # 下面开始算风速方向\n deg = 180.0 / np.pi\n monthly_time_p = []\n sub_si10_p = []\n wdir = []\n\n # 用循环将每年12月的风速一个个输出到sub_si10_p[],并将12月平均风向一个个加到wdir[]\n for i, t in enumerate(monthly_time):\n if t.month == 12:\n monthly_time_p.append(t)\n sub_si10_p.append(np.nanmean(sub_si10[i]))\n # 利用u/v计算风速方向\n wdir.append(180.0 + np.arctan2(np.nanmean(sub_u10[i]), np.nanmean(sub_v10[i])) * deg)\n\n # 将结果输出为csv\n dataframe = pd.DataFrame({\"time\": monthly_time_p, \"wspd\": sub_si10_p, \"wdir\": wdir})\n dataframe.to_csv(f\"wind_0220.csv\", index=False)\n","repo_name":"QuentinHe/GDALRapidFunc","sub_path":"RasterCalculate/Example/P20231122_1_ERA5Wind.py","file_name":"P20231122_1_ERA5Wind.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"2921917841","text":"# Imports\nimport logging\n\n\nclass Logger:\n\n def __init__(self, user, log_name=\"log.log\", level=\"debug\"):\n # Vars;\n self.user = user\n self.log_name = log_name\n self.level = level\n\n def print_logger(self, message_type, message):\n # Objects;\n logger = logging.getLogger(self.user)\n handler = logging.FileHandler(f\"files/logs/{self.log_name}\")\n\n # Verify levels;\n if self.level == \"debug\":\n logger.setLevel(logging.DEBUG)\n elif self.level == \"info\":\n logger.setLevel(logging.INFO)\n elif self.level == \"warning\":\n logger.setLevel(logging.WARNING)\n elif self.level == \"error\":\n logger.setLevel(logging.ERROR)\n elif self.level == \"critical\":\n logger.setLevel(logging.CRITICAL)\n else:\n logger.setLevel(logging.INFO)\n\n # Configure logging;\n log_format = \"%(asctime)s - [ %(name)s ] - ( %(levelname)s ): %(message)s\"\n logging.basicConfig(format=log_format,\n datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n # Configure handler;\n format_logging = logging.Formatter(log_format)\n handler.setFormatter(format_logging)\n logger.addHandler(handler)\n\n # Print logger;\n if message_type == \"debug\":\n logger.debug(message)\n elif message_type == \"info\":\n logger.info(message)\n elif message_type == \"warning\":\n logger.warning(message)\n elif message_type == \"error\":\n logger.error(message)\n elif message_type == \"critical\":\n logger.critical(message)\n else:\n logger.info(message)\n","repo_name":"bitaiir/core-bitaiir","sub_path":"tools/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"43159650775","text":"\"\"\"\nThis file shows an example grid search for the lstm model\n\"\"\"\n\nfrom src.data.dataset_generator import DatasetGenerator\nfrom src.data.data_splitter import train_test_split\nfrom src.data.data_transformer import DataTransformer\nfrom src.grid_searcher import GridSearcher\n\n\nif __name__ == '__main__':\n # define a parameter grid for each model\n lstm_param_grid = {\n 'model_name': ['lstm'],\n 'window_size': [168, 2*168, 4*168],\n 'gap': [0, 23, 167], # Prediction horizons (hour, day, week)\n 'num_layers': [1, 2],\n 'hidden_layer_size': [128, 256],\n 'epochs': [100],\n 'regularization': [0, 0.001]\n }\n\n trivial_param_grid = {\n 'model_name': ['trivial'] # Add trivial model as baseline\n }\n\n # list of dicts containing the parameter grid for the every model\n all_model_param_grids = [lstm_param_grid,\n trivial_param_grid]\n\n # CREATE TRAIN AND TEST DATASET #\n dg = DatasetGenerator(['all'])\n dataset = dg.get_dataset('2016-01-01', '2021-08-15', 'T23')\n train, test = train_test_split(dataset, 0.1)\n\n # Start the grid search\n grid_search = GridSearcher(all_model_param_grids)\n grid_search.run(train, test)\n\n","repo_name":"kerstinforster/electricity-price-forecasting","sub_path":"experiments/grid_search_lstm.py","file_name":"grid_search_lstm.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31271023529","text":"from flask import current_app, jsonify\nfrom flask import g\nfrom flask import request\nfrom flask import session\nfrom info import constants\n\nfrom info.models import User, News, Category\nfrom info.modules.index import index_blu\nfrom flask import render_template\n\nfrom info.utils.response_code import RET\nfrom info.utils.common import user_login_data\n\n\n@index_blu.route('/')\n@user_login_data\ndef index():\n # # 如果用户已经登陆,将当前登陆用户的数据传到模板中,供模板显示\n # # 获取到当前登陆用户的id\n # user_id = session.get(\"user_id\", None)\n # # 通过id获取用户信息\n # user = None\n # if user_id:\n # try:\n # user = User.query.get(user_id)\n # except Exception as e:\n # current_app.logger.error(e)\n user = g.user\n new_list = []\n categories = []\n try:\n cate = Category.query.all()\n new_list = News.query.order_by(News.clicks.desc()).limit(6)\n for category in cate:\n categories.append(category.name)\n print(categories, user)\n except Exception as e:\n current_app.logger.error(e)\n new_dict_li = list()\n for news in new_list:\n new_dict_li.append(news.to_basic_dict())\n data = {\n \"user\": user.to_dict() if user else None,\n \"new_dict_li\": new_dict_li,\n \"categories\": categories\n }\n return render_template(\"news/index.html\", data=data)\n\n\n@index_blu.route(\"/newslist\")\ndef get_news_list():\n \"\"\"\n 获取参数\n 校验参数\n 查询数据\n 返回数据\n \"\"\"\n # 获取参数\n args_dict = request.args\n print(args_dict)\n page = args_dict.get(\"page\", \"1\")\n per_page = args_dict.get(\"per_page\", constants.HOME_PAGE_MAX_NEWS)\n category_id = args_dict.get(\"cid\", \"1\")\n\n # 校验参数\n try:\n category_id = int(category_id)\n page = int(page)\n per_page = int(per_page)\n except Exception as res:\n current_app.logger.error(res)\n return jsonify(errno=RET.PARAMERR, errmsg=\"参数错误\")\n\n # 查询数据并分页\n filterr = [News.status == 0]\n # 如果分类id不为1, 那么添加分类id的过滤\n if category_id != 0:\n filterr.append(News.category_id == category_id+1)\n print(type(category_id))\n\n try:\n paginates = News.query.filter(*filterr).order_by(News.create_time.desc())\n print(paginates.first())\n paginates = paginates.paginate(page, per_page, False)\n # 获取查询出来的数据\n #\n items = paginates.items\n # 获取总页数\n total_page = paginates.pages\n current_page = paginates.page\n except Exception as res:\n current_app.logger.error(res)\n return jsonify(errno=RET.DBERR, errmsg=\"数据查询失败\")\n news_li = []\n print(\"before news loop\")\n print(items)\n for news in items:\n news_li.append(news.to_basic_dict())\n # 返回数据\n print(total_page, current_page)\n return jsonify(errno=RET.OK, errmsg=\"OK\",\n totalPage=total_page,\n currentPage=current_page,\n newsList=news_li,\n cid=category_id)\n\n\n@index_blu.route('/favicon.ico')\ndef favicon():\n return current_app.send_static_file(\"news/favicon.ico\")\n","repo_name":"moonbria/test1","sub_path":"info/modules/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13299348643","text":"import time\n\nfrom app.data import create_tables, insert_many_to_record\nfrom app.es.client import init, write\nfrom app.metric.helper import list_realtime_metrics\nfrom app.util.logger import log\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom config import APP_SETTINGS, ParameterIllegalException\n\nscheduler = BlockingScheduler()\n\nstore = APP_SETTINGS.prop('dmonitor.store')\n\n\n@scheduler.scheduled_job('cron', second='*/5', max_instances=5)\ndef request_update_status():\n log.info('Starting job %s', int(time.time()))\n do_write_metrics()\n log.info('Finish job %s', int(time.time()))\n\n\ndef do_write_metrics():\n if store == 'ES':\n write()\n elif store == 'DB':\n data = list_realtime_metrics()\n log.info('inserting')\n insert_many_to_record(data)\n log.info('inserted')\n\n\ndef start():\n if store == 'ES':\n init()\n elif store == 'DB':\n pass\n else:\n raise ParameterIllegalException('Not support store type %s' % store)\n create_tables()\n scheduler.start()\n\n\ndef shutdown():\n scheduler.shutdown()\n","repo_name":"danielpine/dmonitor","sub_path":"app/cron/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71253193546","text":"from threading import *\nfrom socket import error as SocketError\nimport errno\n\n\nclass Server(Thread):\n def __init__(self, socket, address):\n Thread.__init__(self)\n self.sock = socket\n self.addr = address\n self.start()\n\n def run(self):\n while 1:\n print('2\\n')\n try:\n _received_data = self.sock.recv(1024).decode()\n print('Client sent:', _received_data)\n self.sock.send(b'you sent something to me')\n #self.sock.close()\n break\n except SocketError as e:\n if e.errno != errno.ECONNRESET:\n raise\n else:\n pass\n","repo_name":"harveyspec1245/tcpClient","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38735891978","text":"\"\"\"empty message\n\nRevision ID: 3187042f56e1\nRevises: \nCreate Date: 2023-02-16 15:38:13.244995\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.sql import text\n\n# revision identifiers, used by Alembic.\nrevision = '3187042f56e1'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n connection = op.get_bind()\n connection.execute(text('CREATE SCHEMA IF NOT EXISTS auth;'))\n\n op.create_table('roles',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id'),\n sa.UniqueConstraint('name'),\n schema='auth'\n )\n op.create_table('users',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('login', sa.String(length=100), nullable=False),\n sa.Column('password_hash', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id'),\n sa.UniqueConstraint('login'),\n schema='auth'\n )\n op.create_table('user_login_history',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=False),\n sa.Column('user_agent', sa.TEXT(), nullable=False),\n sa.Column('auth_date', postgresql.TIMESTAMP(), nullable=False),\n sa.Column('user_device_type', sa.TEXT(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['auth.users.id'], ),\n sa.PrimaryKeyConstraint('id', 'user_device_type'),\n sa.UniqueConstraint('id', 'user_device_type'),\n schema='auth',\n postgresql_partition_by='LIST (user_device_type)',\n )\n\n connection = op.get_bind()\n connection.execute(text(\"\"\"\n CREATE TABLE IF NOT EXISTS \"user_login_history_smart\" PARTITION OF \"user_login_history\" FOR VALUES IN ('smart');\n \"\"\"))\n connection.execute(text(\"\"\"\n CREATE TABLE IF NOT EXISTS \"user_login_history_mobile\" PARTITION OF \"user_login_history\" FOR VALUES IN ('mobile')\n \"\"\"))\n connection.execute(text(\"\"\"\n CREATE TABLE IF NOT EXISTS \"user_login_history_web\" PARTITION OF \"user_login_history\" FOR VALUES IN ('web')\n \"\"\"))\n\n op.create_table('user_profile',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=False),\n sa.Column('last_name', sa.String(length=256), nullable=False),\n sa.Column('first_name', sa.String(length=256), nullable=False),\n sa.Column('email', sa.String(length=256), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['auth.users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('id'),\n sa.UniqueConstraint('user_id'),\n schema='auth'\n )\n op.create_table('user_refresh_token',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=True),\n sa.Column('access_token', sa.TEXT(), nullable=False),\n sa.Column('refresh_token', sa.TEXT(), nullable=False),\n sa.Column('expires', postgresql.TIMESTAMP(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['auth.users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id'),\n sa.UniqueConstraint('refresh_token'),\n schema='auth'\n )\n op.create_table('roles_parents',\n sa.Column('role_id', sa.UUID(), nullable=True),\n sa.Column('parent_id', sa.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['parent_id'], ['auth.roles.id'], ),\n sa.ForeignKeyConstraint(['role_id'], ['auth.roles.id'], )\n )\n op.create_table('users_roles',\n sa.Column('id', sa.UUID(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=True),\n sa.Column('role_id', sa.UUID(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['auth.roles.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['auth.users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users_roles')\n op.drop_table('roles_parents')\n op.drop_table('user_refresh_token', schema='auth')\n op.drop_table('user_profile', schema='auth')\n op.drop_table('user_login_history', schema='auth')\n op.drop_table('users', schema='auth')\n op.drop_table('roles', schema='auth')\n # ### end Alembic commands ###\n","repo_name":"Shuich1/yp_cinema_auth","sub_path":"auth_service/src/migrations/versions/3187042f56e1_init_db.py","file_name":"3187042f56e1_init_db.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41189462493","text":"from django.contrib.postgres.search import SearchVector\nfrom django.db.models import F\nfrom django.db.models.fields.json import KeyTextTransform\n\n# Imported from\n# https://github.com/postgres/postgres/blob/REL_14_STABLE/src/bin/initdb/initdb.c#L659\nTSEARCH_CONFIG_LANGUAGES = {\n \"ar\": \"arabic\",\n \"ca\": \"catalan\",\n \"da\": \"danish\",\n \"de\": \"german\",\n \"el\": \"greek\",\n \"en\": \"english\",\n \"es\": \"spanish\",\n \"eu\": \"basque\",\n \"fi\": \"finnish\",\n \"fr\": \"french\",\n \"ga\": \"irish\",\n \"hi\": \"hindi\",\n \"hu\": \"hungarian\",\n \"hy\": \"armenian\",\n \"id\": \"indonesian\",\n \"it\": \"italian\",\n \"lt\": \"lithuanian\",\n \"ne\": \"nepali\",\n \"nl\": \"dutch\",\n \"no\": \"norwegian\",\n \"pt\": \"portuguese\",\n \"ro\": \"romanian\",\n \"ru\": \"russian\",\n \"sr\": \"serbian\",\n \"sv\": \"swedish\",\n \"ta\": \"tamil\",\n \"tr\": \"turkish\",\n \"yi\": \"yiddish\",\n}\n\n# Imported from\n# https://github.com/postgres/postgres/blob/REL_14_STABLE/src/bin/initdb/initdb.c#L2557\nDEFAULT_TEXT_SEARCH_CONFIG = \"simple\"\n\nDOCUMENT_SEARCH_VECTOR = (\n SearchVector(\"title\", weight=\"A\", config=F(\"config\"))\n + SearchVector(KeyTextTransform(\"slug\", \"metadata\"), weight=\"A\", config=F(\"config\"))\n + SearchVector(KeyTextTransform(\"toc\", \"metadata\"), weight=\"B\", config=F(\"config\"))\n + SearchVector(KeyTextTransform(\"body\", \"metadata\"), weight=\"C\", config=F(\"config\"))\n + SearchVector(\n KeyTextTransform(\"parents\", \"metadata\"), weight=\"D\", config=F(\"config\")\n )\n)\n","repo_name":"django/djangoproject.com","sub_path":"docs/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":1791,"dataset":"github-code","pt":"81"} +{"seq_id":"33902424484","text":"import datetime\nimport dateutil.tz\n\nclass CommonWriter:\n # pylint: disable=too-few-public-methods\n\n def __init__(self, all_media, main_title, max_media_per_page, years_prior_are_approximate,\n extra_header, version_label):\n # pylint: disable=too-many-arguments\n\n self.all_media = all_media\n self.main_title = main_title\n self.max_media_per_page = max_media_per_page\n self.years_prior_are_approximate = years_prior_are_approximate\n self.extra_header = extra_header\n self.version_label = version_label\n self.generated_at = datetime.datetime.now(dateutil.tz.tzlocal()) \\\n .strftime(\"%B %-d, %Y %H:%M:%S %Z\")\n\n def _get_date_parts(self, timestamp):\n date = datetime.datetime.fromtimestamp(timestamp)\n if self.years_prior_are_approximate and date.year < int(self.years_prior_are_approximate):\n return {\"year\": str(date.year), \"month\": None}\n\n return {\"year\": str(date.year), \"month\": date.strftime(\"%b\"), \"day\": str(date.day),\n \"weekday\": str(date.weekday()), \"hour\": 12 if date.hour == 0 else date.hour,\n \"minute\": date.minute, \"am_pm\": \"pm\" if date.hour >= 12 else \"am\"}\n\n WEEKDAYS = {\"0\": \"Mon\", \"1\": \"Tue\", \"2\": \"Wed\", \"3\": \"Thu\", \"4\": \"Fri\", \"5\": \"Sat\", \"6\": \"Sun\"}\n\n def _get_date_string(self, date_parts, include_more):\n if not date_parts[\"month\"]:\n return date_parts[\"year\"]\n\n ret = ''\n if include_more:\n ret += self.WEEKDAYS[date_parts[\"weekday\"]] + \" \"\n\n ret += \"%s %s, %s\" % (date_parts[\"month\"], date_parts[\"day\"], date_parts[\"year\"])\n\n has_approx_time = date_parts[\"minute\"] == 0 and date_parts[\"hour\"] in (0, 12)\n if include_more and not has_approx_time:\n hour = date_parts[\"hour\"] - 12 if date_parts[\"hour\"] > 12 else date_parts[\"hour\"]\n ret += \" %d:%02d%s\" % (hour, date_parts[\"minute\"], date_parts[\"am_pm\"])\n\n return ret\n\n def _get_date_range(self, min_timestamp, max_timestamp):\n if min_timestamp is None:\n return None\n\n min_parts = self._get_date_parts(min_timestamp)\n max_parts = self._get_date_parts(max_timestamp)\n\n if min_parts != max_parts and min_parts[\"month\"]:\n if min_parts[\"year\"] == max_parts[\"year\"] and \\\n min_parts[\"month\"] == max_parts[\"month\"] and \\\n min_parts[\"day\"] == max_parts[\"day\"]:\n return \"%s %s, %s\" % \\\n (min_parts[\"month\"], min_parts[\"day\"], min_parts[\"year\"])\n\n if min_parts[\"year\"] == max_parts[\"year\"] and min_parts[\"month\"] == max_parts[\"month\"]:\n return \"%s %s-%s, %s\" % \\\n (min_parts[\"month\"], min_parts[\"day\"], max_parts[\"day\"], min_parts[\"year\"])\n\n if min_parts[\"year\"] == max_parts[\"year\"]:\n return \"%s %s-%s %s, %s\" % \\\n (min_parts[\"month\"], min_parts[\"day\"], max_parts[\"month\"], max_parts[\"day\"],\n min_parts[\"year\"])\n\n min_str = self._get_date_string(min_parts, False)\n max_str = self._get_date_string(max_parts, False)\n if min_str == max_str:\n return min_str\n\n return \"%s to %s\" % (min_str, max_str)\n\n def _cleanup_tags(self, taglist):\n # Cleanup nested tags. For example, ['/Places', '/Places/WV'] becomes ['WV']\n tags_to_remove = set([])\n all_tags = set(taglist)\n tag_name_to_id = {}\n\n for tag_id in all_tags:\n tag_name = self.all_media[\"tags_by_id\"][tag_id][\"full_title\"]\n if not tag_name.startswith(\"/\"):\n continue\n\n tag_name_to_id[tag_name] = tag_id\n tag_parts = tag_name.split(\"/\")\n if len(tag_parts) == 2:\n continue\n\n tags_to_remove.add(\"/\".join(tag_parts[0:-1]))\n\n for tag_name in tags_to_remove:\n tag_id = tag_name_to_id[tag_name]\n all_tags.remove(tag_id)\n\n ret = [(tag_id, self.all_media[\"tags_by_id\"][tag_id][\"title\"]) for tag_id in all_tags]\n ret.sort(key=lambda tag: tag[1])\n\n return ret\n\n def _generate_media_index(self, all_media, media_indexer, media_index_config):\n if not all_media:\n return\n\n media_chunks = list(self._split_media_list_into_chunks(all_media))\n for index, media_on_page in enumerate(media_chunks):\n media_indexer(media_index_config, index + 1, media_on_page)\n\n def _split_media_list_into_chunks(self, media):\n for i in range(0, len(media), self.max_media_per_page):\n yield media[i:i + self.max_media_per_page]\n\n def _all_media_indexer(self, config, page_number, media_on_page):\n for media in media_on_page:\n if \"exposure_time\" not in media[\"media\"] or media[\"media\"][\"exposure_time\"] == 0:\n continue\n\n year = self._get_date_parts(media[\"media\"][\"exposure_time\"])[\"year\"]\n if year not in config[\"year\"]:\n config[\"year\"][year] = {\"page\": page_number}\n\n if \"event_id\" in media[\"media\"] and media[\"media\"][\"event_id\"] not in config[\"event\"]:\n config[\"event\"][media[\"media\"][\"event_id\"]] = {\"page\": page_number}\n\n config[\"media\"][media[\"media\"][\"media_id\"]] = {\"page\": page_number}\n","repo_name":"masneyb/shotwell-site-generator","sub_path":"media_writer_common.py","file_name":"media_writer_common.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"863949564","text":"\r\n# ca_wizard_mechanical, version 0.2\r\n# Allows the generation of comm-files for simple 3D structural analyses in code_aster with an interactive GUI\r\n#\r\n# This work is licensed under the terms and conditions of the GNU General Public License version 3\r\n# Copyright (C) 2017 Dominik Lechleitner\r\n# Contact: kaktus018(at)gmail.com\r\n# GitHub repository: https://github.com/kaktus018/ca_wizard_mechanical\r\n#\r\n# This file is part of ca_wizard_mechanical.\r\n# \r\n# ca_wizard_mechanical is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# ca_wizard_mechanical is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with ca_wizard_mechanical. If not, see .\r\n\r\nimport xml.etree.ElementTree as ET\r\n\r\nfrom keyword import iskeyword\r\nfrom copy import deepcopy\r\n\r\nls = \"\\n\"\r\n\r\ndef setMatLibPath(p):\r\n global matLibPath\r\n matLibPath = p\r\n\r\ndef setVersion(v):\r\n global cawmVersion\r\n cawmVersion = v\r\n\r\ndef isNumeric(*args):\r\n for s in args:\r\n try:\r\n float(s)\r\n except ValueError:\r\n return False\r\n return True\r\n \r\ndef isInteger(*args):\r\n for s in args:\r\n try:\r\n int(s)\r\n except ValueError:\r\n return False\r\n return True\r\n\r\ndef hasFunction(functionList,*args):\r\n for f in args:\r\n if f in [functionList[i].funcName for i in range(len(functionList))]:\r\n return True\r\n return False\r\n \r\ndef hasConstant(functionList,*args):\r\n for c in args:\r\n if c not in [functionList[i].funcName for i in range(len(functionList))] and c:\r\n return True\r\n return False\r\n\r\n# check if string is either empty, a function or numeric - in which case: return True\r\ndef checkValidEntry(functionList,*args):\r\n for el in args:\r\n if el and not hasFunction(functionList,el) and not isNumeric(el):\r\n return False\r\n return True\r\n \r\nclass cawmInst:\r\n \r\n def __init__(self,solverSet,names,workingDir,studyName):\r\n self.solverSet = solverSet\r\n self.names = names\r\n self.workingDir = workingDir\r\n self.studyName = studyName\r\n self.cawmVersion = cawmVersion\r\n\r\nclass PyFunction:\r\n \r\n def __init__(self,funcName,funcText):\r\n self.funcName = funcName\r\n self.funcText = funcText\r\n \r\n # verify function name and see if the interpreter raises an exception\r\n def verify(self,functionList,checkFuncDefi):\r\n msgs = []\r\n if not self.funcName.isidentifier() or iskeyword(self.funcName):\r\n msgs.append(self.funcName + \" is not a valid function name. The function will not be checked for further errors.\")\r\n elif checkFuncDefi:\r\n try:\r\n exec(\"def \" + self.funcName + \"(x,y,z,t):\\n\" + self.funcText)\r\n except Exception as e:\r\n msgs.append(\"While trying to evaluate the Python function \" + self.funcName + \" the Python 3 interpreter raised the following exception:\\n\" + str(e))\r\n return msgs\r\n\r\nclass Material:\r\n \r\n def __init__(self,matName):\r\n root = ET.parse(matLibPath).getroot()\r\n for child in root:\r\n if child.attrib[\"name\"] == matName:\r\n self.matName = matName\r\n self.matNum = child.find(\"materialNumber\").text\r\n self.matCat = child.find(\"category\").text\r\n self.youngsModulus = child.find(\"YoungsModulus\").text\r\n self.poissonRatio = child.find(\"PoissonRatio\").text\r\n self.alpha = child.find(\"alpha\").text\r\n self.density = child.find(\"density\").text\r\n return\r\n\r\nclass MaterialSet:\r\n \r\n def __init__(self,assiName,nodalGroupName,materialName):\r\n self.assiName = assiName\r\n self.nodalGroupName = nodalGroupName\r\n self.material = Material(materialName)\r\n \r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if not isNumeric(self.material.youngsModulus,self.material.poissonRatio):\r\n msgs.append(self.assiName + \": Young's modulus or Poisson's ratio is not numeric.\")\r\n if not [self.nodalGroupName, \"Volume\"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == \"whole mesh\":\r\n msgs.append(self.assiName + \": Material is not assigned to a valid node group.\")\r\n return msgs\r\n\r\nclass NodeJointSet:\r\n \r\n def __init__(self,assiName,jointGroupName,nodeName,cX,cY,cZ,cPhiX,cPhiY,cPhiZ):\r\n self.assiName = assiName\r\n self.jointGroupName = jointGroupName\r\n self.nodeName = nodeName\r\n self.cX = cX\r\n self.cY = cY\r\n self.cZ = cZ\r\n self.cPhiX = cPhiX\r\n self.cPhiY = cPhiY\r\n self.cPhiZ = cPhiZ\r\n \r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if not isNumeric(self.cX, self.cY, self.cZ, self.cPhiX, self.cPhiY, self.cPhiZ):\r\n msgs.append(self.assiName + \": At least one stiffness value is not numeric.\")\r\n if not [self.jointGroupName, \"Node joint group\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Node group name for the node joint group is not valid.\")\r\n if not [self.nodeName, \"Vertex/Node\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Node group name for the node is not valid.\")\r\n return msgs\r\n \r\nclass RestraintSet:\r\n \r\n def __init__(self,assiName,nodalGroupName,rotMatViaPython,deltaX,deltaY,deltaZ,deltaPhiX,deltaPhiY,deltaPhiZ,xTrans,yTrans,zTrans,rotX,rotY,rotZ,reacMX,reacMY,reacMZ):\r\n self.assiName = assiName\r\n self.nodalGroupName = nodalGroupName\r\n self.rotMatViaPython = rotMatViaPython\r\n self.deltaX = deltaX\r\n self.deltaY = deltaY\r\n self.deltaZ = deltaZ\r\n self.deltaPhiX = deltaPhiX\r\n self.deltaPhiY = deltaPhiY\r\n self.deltaPhiZ = deltaPhiZ\r\n self.xTrans = xTrans\r\n self.yTrans = yTrans\r\n self.zTrans = zTrans\r\n self.rotX = rotX\r\n self.rotY = rotY\r\n self.rotZ = rotZ\r\n self.reacMX = reacMX\r\n self.reacMY = reacMY\r\n self.reacMZ = reacMZ\r\n \r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if self.rotMatViaPython:\r\n if hasFunction(functionList,self.deltaX,self.deltaY,self.deltaZ,self.deltaPhiX,self.deltaPhiY,self.deltaPhiZ):\r\n raise ValueError(self.assiName + \": When using the provided function for the rotation matrix the entries for the restraints can not be a function.\")\r\n if (self.rotX and not (self.deltaX and self.deltaPhiY and self.deltaPhiZ)) or (self.rotY and not(self.deltaY and self.deltaPhiX and self.deltaPhiZ)) or \\\r\n (self.rotZ and not (self.deltaZ and self.deltaPhiX and self.deltaPhiY)):\r\n raise ValueError(self.assiName + \": When using the provided function for the rotation matrix the translational DoFs for all axes to which the rotation is applied to have to be restrained.\")\r\n if not isNumeric(self.deltaPhiX, self.deltaPhiY, self.deltaPhiZ, self.xTrans, self.yTrans, self.zTrans):\r\n msgs.append(self.assiName + \": Inputs for the rotational DoFs and the coordinates of the rotation center have to be numeric. (All rotational DoFs have to be restrained).\")\r\n if not checkValidEntry(functionList,self.deltaX,self.deltaY,self.deltaZ,self.deltaPhiX,self.deltaPhiY,self.deltaPhiZ):\r\n msgs.append(self.assiName + \": At least one input for translation or rotation is neither a function nor numeric. If this is related to the rotational DoFs and the restraint is not assigned to \" + \\\r\n \"a node of a node joint group you can ignore this warning.\")\r\n if not isNumeric(self.reacMX, self.reacMY, self.reacMZ):\r\n msgs.append(self.assiName + \": At least one input for the coordinates for the computation of the torsional reactions is not numeric.\")\r\n if not [self.nodalGroupName, \"Surface\"] in [names[i] for i in range(len(names))] and not [self.nodalGroupName, \"Edge\"] in [names[i] for i in range(len(names))] and \\\r\n not [self.nodalGroupName, \"Vertex/Node\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Restraint is not assigned to a valid node group.\")\r\n return msgs\r\n \r\n \r\nclass LoadSet:\r\n \r\n def __init__(self,assiName,nodalGroupName,loadType,FX,FY,FZ,MX,MY,MZ,p,gX,gY,gZ,omega,centerX,centerY,centerZ,axisX,axisY,axisZ):\r\n self.assiName = assiName\r\n self.nodalGroupName = nodalGroupName\r\n self.loadType = loadType\r\n self.FX = FX\r\n self.FY = FY\r\n self.FZ = FZ\r\n self.MX = MX\r\n self.MY = MY\r\n self.MZ = MZ\r\n self.p = p\r\n self.gX = gX\r\n self.gY = gY\r\n self.gZ = gZ\r\n self.omega = omega\r\n self.centerX = centerX\r\n self.centerY = centerY\r\n self.centerZ = centerZ\r\n self.axisX = axisX\r\n self.axisY = axisY\r\n self.axisZ = axisZ\r\n \r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if self.loadType == \"Gravity\":\r\n if not isNumeric(self.gX, self.gY, self.gZ):\r\n msgs.append(self.assiName + \": At least one input for the gravity vector is not numeric.\")\r\n if not [self.nodalGroupName, \"Volume\"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == \"whole mesh\":\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Centrifugal force\":\r\n if not isNumeric(self.omega, self.centerX, self.centerY, self.centerZ, self.axisX, self.axisY, self.axisZ):\r\n msgs.append(self.assiName + \": At least one input for the rotation is not numeric.\")\r\n if not [self.nodalGroupName, \"Volume\"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == \"whole mesh\":\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Force on volume\":\r\n if not checkValidEntry(functionList, self.FX, self.FY, self.FZ):\r\n msgs.append(self.assiName + \": At least one input for the force vector is neither a function nor numeric.\")\r\n if not [self.nodalGroupName, \"Volume\"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == \"whole mesh\":\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Force on face\":\r\n if not checkValidEntry(functionList, self.FX, self.FY, self.FZ):\r\n msgs.append(self.assiName + \": At least one input for the force vector is neither a function nor numeric.\")\r\n if not [self.nodalGroupName, \"Surface\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Force on edge\":\r\n if not checkValidEntry(functionList, self.FX, self.FY, self.FZ):\r\n msgs.append(self.assiName + \": At least one input for the force vector is neither a function nor numeric.\")\r\n if not [self.nodalGroupName, \"Edge\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Force on node\":\r\n if not checkValidEntry(functionList, self.FX, self.FY, self.FZ, self.MX, self.MY, self.MZ):\r\n msgs.append(self.assiName + \": At least one input for the force or torque vector is neither a function nor numeric (if this message relates to the torque and the node\" + \\\r\n \"is not assigned to a node joint group, you can disregard this message).\")\r\n if not self.nodalGroupName in [names[i][0] for i in range(len(names))] and not [self.nodalGroupName, \"Node joint group\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n if self.loadType == \"Pressure\":\r\n if not checkValidEntry(functionList, self.p) or not self.p:\r\n msgs.append(self.assiName + \": Input for the pressure is neither a function nor numeric.\")\r\n if not [self.nodalGroupName, \"Surface\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Load is not assigned to a valid node group.\")\r\n return msgs\r\n \r\nclass ContactGlobalSetting:\r\n \r\n def __init__(self,formulationType,frictionModel,contactAlgo,frictionAlgo):\r\n self.formulationType = formulationType\r\n self.frictionModel = frictionModel\r\n self.contactAlgo = contactAlgo\r\n self.frictionAlgo = frictionAlgo\r\n \r\nclass ContactSet:\r\n \r\n def __init__(self,assiName,masterName,slaveName,fricCoeff,contactAlgo,E_N,E_T,globalSettings):\r\n self.assiName = assiName\r\n self.masterName = masterName\r\n self.slaveName = slaveName\r\n self.fricCoeff = fricCoeff\r\n self.contactAlgo = contactAlgo\r\n self.E_N = E_N\r\n self.E_T = E_T\r\n self.globalSettings = globalSettings\r\n \r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if self.globalSettings.formulationType == \"discrete\":\r\n if self.contactAlgo == \"PENALISATION\":\r\n if not isNumeric(self.E_N):\r\n msgs.append(self.assiName + \": E_N is not numeric.\")\r\n if self.globalSettings.frictionModel == \"Coulomb\":\r\n if not isNumeric(self.E_T):\r\n msgs.append(self.assiName + \": E_T is not numeric.\")\r\n if not isNumeric(self.fricCoeff):\r\n msgs.append(self.assiName + \": Friction coefficient is not numeric.\")\r\n else:\r\n if self.globalSettings.frictionModel == \"Coulomb\":\r\n if not isNumeric(self.fricCoeff):\r\n msgs.append(self.assiName + \": Friction coefficient is not numeric.\")\r\n if not [self.masterName, \"Surface\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Master is not assigned to a valid node group.\")\r\n if not [self.slaveName, \"Surface\"] in [names[i] for i in range(len(names))]:\r\n msgs.append(self.assiName + \": Slave is not assigned to a valid node group.\")\r\n return msgs\r\n \r\nclass ThermalSet:\r\n \r\n def __init__(self,assiName,nodalGroupName,assiType,deltaT,unite,T0,funStr):\r\n self.assiName = assiName\r\n self.nodalGroupName = nodalGroupName\r\n self.assiType = assiType\r\n self.deltaT = deltaT\r\n self.unite = unite\r\n self.T0 = T0\r\n self.funStr = funStr\r\n\r\n # verify datatype of properties and node group name\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if self.assiType == \"const\":\r\n if not checkValidEntry(functionList, self.deltaT):\r\n msgs.append(self.assiName + \": \\u0394T is neither a function nor numeric.\")\r\n else:\r\n if not isNumeric(self.unite, self.T0):\r\n msgs.append(self.assiName + \": UNITE or T0 is not numeric.\")\r\n if not [self.nodalGroupName, \"Volume\"] in [names[i] for i in range(len(names))] and not self.nodalGroupName == \"whole mesh\":\r\n msgs.append(self.assiName + \": Temp. field is not assigned to a valid node group.\")\r\n return msgs\r\n \r\nclass OutputSet:\r\n \r\n def __init__(self,nodalGroupName,SIGM,SIEQ,EPS,REAC,ERME,TEMP):\r\n self.nodalGroupName = nodalGroupName\r\n self.SIGM = SIGM\r\n self.SIEQ = SIEQ\r\n self.EPS = EPS\r\n self.REAC = REAC\r\n self.ERME = ERME\r\n self.TEMP = TEMP\r\n \r\nclass SolverSet:\r\n \r\n def __init__(self,analysisType,timeSteps,endTime,timeRampUp,timeRampDown,timeRampFunc,strainModel,method,resi,maxIter,functions,checkFuncDefis,materialSets,nodeJointSets,restraintSets,loadSets,contactSets,\r\n thermalSets,outputSet):\r\n self.analysisType = analysisType\r\n self.timeSteps = timeSteps\r\n self.endTime = endTime\r\n self.timeRampUp = timeRampUp\r\n self.timeRampDown = timeRampDown\r\n self.timeRampFunc = timeRampFunc\r\n self.strainModel = strainModel\r\n self.method = method\r\n self.resi = resi\r\n self.maxIter = maxIter\r\n self.functions = functions\r\n self.checkFuncDefis = checkFuncDefis\r\n self.materialSets = materialSets\r\n self.nodeJointSets = nodeJointSets\r\n self.restraintSets = restraintSets\r\n self.loadSets = loadSets\r\n self.contactSets = contactSets\r\n self.thermalSets = thermalSets\r\n self.outputSet = outputSet\r\n \r\n # this method will check if relevant inputs are numeric and all assignments to node groups are valid. It will NOT check in anyway if the resulting comm-file will run in code_aster!\r\n def verify(self,names,functionList):\r\n msgs = []\r\n if len(self.materialSets) == 0 or len(self.restraintSets) == 0:\r\n msgs.extend([\"The current setup has no material assignments and/or no restraint assignments.\"])\r\n for el in self.functions:\r\n msgs.extend(el.verify(functionList,self.checkFuncDefis))\r\n for el in self.materialSets + self.nodeJointSets + self.restraintSets + self.loadSets + self.contactSets + self.thermalSets:\r\n msgs.extend(el.verify(names,functionList))\r\n if not isInteger(self.timeSteps):\r\n raise ValueError(\"The number of time steps is not of type integer.\")\r\n if not isNumeric(self.endTime):\r\n msgs.extend([\"The simulation end time is not numeric.\"])\r\n if self.analysisType == \"non-linear static\":\r\n if not isInteger(self.maxIter):\r\n msgs.extend([\"The number of max. iterations has to be of type integer.\"])\r\n if not isNumeric(self.resi):\r\n msgs.extend([\"Max. relative global residual is not numeric.\"])\r\n if int(self.timeSteps) < 1:\r\n msgs.extend([\"A non-linear analysis requires at least one time step.\"])\r\n if self.timeRampUp and self.timeRampDown and not int(self.timeSteps) % 2 == 0:\r\n msgs.extend([\"Ramping loads and restraints up AND down requires an even amount of time steps. Otherwise a computation with their max. values will not happen.\"])\r\n if self.outputSet.ERME and len(self.nodeJointSets) > 0:\r\n msgs.extend([\"Calculation of the error a posteriori (ERME) with code_aster version <= 13.2 can only be performed on the whole mesh. This will not work with the discrete element\" + \\\r\n \" of a node joint (MODELISATION='DIS_TR').\"])\r\n return msgs\r\n \r\n # generate string for comm-file\r\n def assembleCOMM(self):\r\n \r\n def getFormuleName(funcName):\r\n for el in formules:\r\n if el[1] == funcName:\r\n return el[0]\r\n return None\r\n \r\n pythonFuns = \"\"\r\n # If any restraints require the application of the roational matrix, add generic translation functions\r\n if sum([self.restraintSets[i].rotMatViaPython for i in range(len(self.restraintSets))]) > 0:\r\n pythonFuns = \"# Generic translation functions:\" + ls + \"def translate_X(deltaX,phiY,phiZ,XTrans,YTrans,ZTrans,X,Y,Z):\" + ls + \\\r\n \" return deltaX+(X-XTrans)*cos(phiY)+(Z-ZTrans)*sin(phiY)+(X-XTrans)*cos(phiZ)-(Y-YTrans)*sin(phiZ)-2*(X-XTrans)\" + ls + ls + \\\r\n \"def translate_Y(deltaY,phiX,phiZ,XTrans,YTrans,ZTrans,X,Y,Z):\" + ls + \\\r\n \" return deltaY+(Y-YTrans)*cos(phiX)-(Z-ZTrans)*sin(phiX)+(Y-YTrans)*cos(phiZ)+(X-XTrans)*sin(phiZ)-2*(Y-YTrans)\" + ls + ls + \\\r\n \"def translate_Z(deltaZ,phiX,phiY,XTrans,YTrans,ZTrans,X,Y,Z):\" + ls + \\\r\n \" return deltaZ+(Z-ZTrans)*cos(phiX)+(Y-YTrans)*sin(phiX)+(Z-ZTrans)*cos(phiY)-(X-XTrans)*sin(phiY)-2*(Z-ZTrans)\" + ls + ls\r\n \r\n # For restraints that use the generic translation functions defined above, add wrapper functions to the functions list\r\n restraintSetsLocal = deepcopy(self.restraintSets) # allow local modification of the restraint sets without compromising the original data\r\n functionsLocal = deepcopy(self.functions) # allow local modification of the functions list without compromising the original data\r\n for el in restraintSetsLocal:\r\n if el.rotMatViaPython:\r\n if not el.deltaX == \"\" or el.rotX:\r\n if el.rotX:\r\n phiY = str(float(el.deltaPhiY))\r\n phiZ = str(float(el.deltaPhiZ))\r\n else:\r\n phiY = \"0.0\"\r\n phiZ = \"0.0\"\r\n functionsLocal.append(PyFunction(\"DX_\" + el.assiName, \" return translate_X(\"+str(float(el.deltaX))+\",\"+phiY+\",\"+phiZ+\",\"+ \\\r\n str(float(el.xTrans))+\",\"+str(float(el.yTrans))+\",\"+str(float(el.zTrans))+\",x,y,z)\"))\r\n el.deltaX = \"DX_\" + el.assiName\r\n if not el.deltaY == \"\" or el.rotY:\r\n if el.rotY:\r\n phiX = str(float(el.deltaPhiX))\r\n phiZ = str(float(el.deltaPhiZ))\r\n else:\r\n phiX = \"0.0\"\r\n phiZ = \"0.0\"\r\n functionsLocal.append(PyFunction(\"DY_\" + el.assiName, \" return translate_Y(\"+str(float(el.deltaY))+\",\"+phiX+\",\"+phiZ+\",\"+ \\\r\n str(float(el.xTrans))+\",\"+str(float(el.yTrans))+\",\"+str(float(el.zTrans))+\",x,y,z)\"))\r\n el.deltaY = \"DY_\" + el.assiName\r\n if not el.deltaZ == \"\" or el.rotZ:\r\n if el.rotZ:\r\n phiX = str(float(el.deltaPhiX))\r\n phiY = str(float(el.deltaPhiY))\r\n else:\r\n phiX = \"0.0\"\r\n phiY = \"0.0\"\r\n functionsLocal.append(PyFunction(\"DZ_\" + el.assiName, \" return translate_Z(\"+str(float(el.deltaZ))+\",\"+phiX+\",\"+phiY+\",\"+ \\\r\n str(float(el.xTrans))+\",\"+str(float(el.yTrans))+\",\"+str(float(el.zTrans))+\",x,y,z)\"))\r\n el.deltaZ = \"DZ_\" + el.assiName\r\n\r\n # Add all Python functions in the functions list to the comm-file\r\n if len(functionsLocal) > 0:\r\n pythonFuns = pythonFuns + \"# User defined Python functions and wrappers for the generic translation functions\" + ls + ls\r\n for el in functionsLocal:\r\n pythonFuns = pythonFuns + \"def \" + el.funcName + \"(x,y,z,t):\" + ls + el.funcText + ls + ls\r\n\r\n # DEBUT statement\r\n debutStr = ls + \"# Start of code_aster commands\" + ls + \"DEBUT();\" + ls + ls\r\n \r\n # list of time steps\r\n if int(self.timeSteps) > 0:\r\n tListStr = \"# list of time steps\" + ls + \"TLIST=DEFI_LIST_REEL(DEBUT=0.0,INTERVALLE=_F(JUSQU_A=\" + self.endTime + \",NOMBRE=\" + self.timeSteps + \",),);\" + ls + ls\r\n \"SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(\"\r\n if self.timeRampUp == 1 and self.timeRampDown == 1:\r\n tListStr = tListStr + \"SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 0.0, 0.5, 1.0 ,1.0 ,0.0,),);\" + ls + ls\r\n elif self.timeRampUp:\r\n tListStr = tListStr + \"SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 0.0, 1.0, 1.0,),);\" + ls + ls\r\n elif self.timeRampDown:\r\n tListStr = tListStr + \"SF=DEFI_FONCTION(NOM_PARA='INST',VALE=(0.0, 1.0, 1.0, 0.0,),);\" + ls + ls \r\n else:\r\n tListStr = \"\"\r\n \r\n # Bind all Python functions to corresponding code_aster formules\r\n formuleStr = \"\"\r\n formules = []\r\n for el in functionsLocal:\r\n if formuleStr == \"\":\r\n formuleStr = \"# Assign formules\" + ls\r\n # store all identifiers for the formules in a list (max. 8 characters allowed for identifier -> can not use the actual function name)\r\n # formule name at index 0, function name at index 1\r\n formules.append([\"F\" + str(len(formules)),el.funcName])\r\n formuleStr = formuleStr + formules[-1][0] + \"=FORMULE(VALE='\" + el.funcName + \"(X,Y,Z,INST)',NOM_PARA=('X','Y','Z','INST',),);\" + ls\r\n if not formuleStr == \"\":\r\n formuleStr = formuleStr + ls\r\n \r\n # material definitions\r\n matDefiStr = \"# Material definitions\" + ls\r\n matDefiNames = [] # same here as with the formules - use short identifiers for the material definitions\r\n for el in self.materialSets:\r\n matDefiNames.append(\"MA\"+str(len(matDefiNames)))\r\n matDefiStr = matDefiStr + matDefiNames[-1] + \"=\" + \"DEFI_MATERIAU(ELAS=_F(E=\" + el.material.youngsModulus + \",NU=\" + el.material.poissonRatio + \\\r\n \",RHO=\" + el.material.density + \",ALPHA=\" + el.material.alpha + \",),);\" + ls\r\n matDefiStr = matDefiStr + ls\r\n \r\n # reading/modifying the mesh\r\n meshName = \"MAIL0\"\r\n # reading\r\n meshStr = \"# reading/modifying the mesh\" + ls + meshName + \"=LIRE_MAILLAGE(FORMAT='MED',);\" + ls\r\n # create points for node joints\r\n if len(self.nodeJointSets) > 0:\r\n meshName = \"MAIL1\"\r\n meshStr = meshStr + meshName + \"=CREA_MAILLAGE(MAILLAGE=MAIL0,CREA_POI1=(\"\r\n for el in self.nodeJointSets:\r\n meshStr = meshStr + \"_F(NOM_GROUP_MA='\" + el.nodeName + \"',GROUP_NO='\" + el.nodeName + \"',),\" + ls\r\n meshStr = meshStr + \"),);\" + ls\r\n # mesh adaption for pressure loads and contacts\r\n groupMAStr = \"\"\r\n groupMAList = []\r\n for el in self.loadSets:\r\n if el.loadType == \"Pressure\" and not el.nodalGroupName in groupMAList:\r\n groupMAStr = groupMAStr + \"'\" + el.nodalGroupName + \"',\"\r\n groupMAList.append(el.nodalGroupName)\r\n if self.analysisType == \"non-linear static\":\r\n for el in self.contactSets:\r\n if not el.masterName in groupMAList:\r\n groupMAStr = groupMAStr + \"'\" + el.masterName + \"',\"\r\n groupMAList.append(el.masterName)\r\n if not el.slaveName in groupMAList:\r\n groupMAStr = groupMAStr + \"'\" + el.slaveName + \"',\"\r\n groupMAList.append(el.slaveName)\r\n if not groupMAStr == \"\":\r\n meshStr = meshStr + meshName + \"=MODI_MAILLAGE(reuse=\" + meshName + \",MAILLAGE=\" + meshName + \",ORIE_PEAU_3D=_F(GROUP_MA=(\" + ls + \\\r\n groupMAStr + \"),),);\" + ls\r\n meshStr = meshStr + ls\r\n \r\n # create model\r\n modelStr = \"# create model\" + ls + \"MODE=AFFE_MODELE(MAILLAGE=\" + meshName + \",AFFE=(_F(TOUT='OUI',PHENOMENE='MECANIQUE',MODELISATION='3D',),\" + ls\r\n groupMAStr = \"\"\r\n for el in self.nodeJointSets:\r\n groupMAStr = groupMAStr + ls + \"'\" + el.nodeName + \"',\"\r\n if not groupMAStr == \"\":\r\n modelStr = modelStr + \"_F(GROUP_MA=(\" + groupMAStr + \"),\" + ls + \"PHENOMENE='MECANIQUE',MODELISATION='DIS_TR',),\"\r\n modelStr = modelStr + \"),);\" + ls + ls\r\n\r\n # create temperature fields from constant or function\r\n tempFieldStr = \"\"\r\n tempFieldNames = []\r\n if sum([self.thermalSets[i].assiType == \"const\" for i in range(len(self.thermalSets))]) > 0:\r\n tempFieldStr = \"# Create temperature fields\" + ls\r\n for el in self.thermalSets:\r\n if el.assiType == \"const\":\r\n tempFieldNames.append(\"TFld\" + str(len(tempFieldNames)))\r\n tempFieldStr = tempFieldStr + tempFieldNames[-1] + \"=CREA_CHAMP(TYPE_CHAM='NOEU_TEMP_\"\r\n if hasFunction(functionsLocal,el.deltaT):\r\n tempFieldStr = tempFieldStr + \"F\"\r\n else:\r\n tempFieldStr = tempFieldStr + \"R\"\r\n tempFieldStr = tempFieldStr + \"',OPERATION='AFFE',MODELE=MODE,AFFE=(_F(\"\r\n if el.nodalGroupName == \"whole mesh\":\r\n tempFieldStr = tempFieldStr + \"TOUT='OUI'\"\r\n else:\r\n tempFieldStr = tempFieldStr + \"GROUP_MA='\" + el.nodalGroupName + \"'\"\r\n tempFieldStr = tempFieldStr + \",NOM_CMP='TEMP'\"\r\n if hasFunction(functionsLocal,el.deltaT):\r\n tempFieldStr = tempFieldStr + \",VALE_F=\" + getFormuleName(el.deltaT)\r\n else:\r\n tempFieldStr = tempFieldStr + \",VALE=\" + el.deltaT\r\n tempFieldStr = tempFieldStr + \",),),);\" + ls\r\n if not tempFieldStr == \"\":\r\n tempFieldStr = tempFieldStr + ls\r\n \r\n # create a code_aster-result for all temp. fields\r\n tempResStr = \"\"\r\n tempResNames = []\r\n if len(tempFieldNames) > 0:\r\n tempResStr = \"# Create results for all temperature fields\" + ls\r\n for el in tempFieldNames:\r\n tempResNames.append(\"TRes\" + str(len(tempResNames)))\r\n tempResStr = tempResStr + tempResNames[-1] + \"=CREA_RESU(OPERATION='AFFE',TYPE_RESU='EVOL_THER',NOM_CHAM='TEMP',AFFE=_F(CHAM_GD=\" + el + \",\"\r\n if int(self.timeSteps) > 0:\r\n tempResStr = tempResStr + \"LIST_INST=TLIST\"\r\n else:\r\n tempResStr = tempResStr + \"INST=0.0\"\r\n tempResStr = tempResStr + \",),);\" + ls\r\n if not tempResStr == \"\":\r\n tempResStr = tempResStr + ls\r\n \r\n # create a code_aster-result for the temp. field from a med-file\r\n if sum([self.thermalSets[i].assiType == \"file\" for i in range(len(self.thermalSets))]) > 0:\r\n tempResStr = tempResStr + \"# create result for the temperature field from a med-files\"\r\n for el in self.thermalSets:\r\n if el.assiType == \"file\":\r\n tempResNames.append(\"TRes\" + str(len(tempResNames)))\r\n tempResStr = tempResStr + tempResNames[-1] + \"=LIRE_RESU(TYPE_RESU='EVOL_THER',FORMAT='MED',\" + \\\r\n \"MAILLAGE=\" + meshName + \",\" + ls + \"UNITE=\" + el.unite + \",\" + \\\r\n \"FORMAT_MED=_F(NOM_CHAM='TEMP',NOM_CHAM_MED='TEMP____TEMP',),TOUT_ORDRE='OUI',);\" + ls\r\n if sum([self.thermalSets[i].assiType == \"file\" for i in range(len(self.thermalSets))]) > 0:\r\n tempResStr = tempResStr + ls\r\n \r\n # assign materials and temperature results\r\n matTempAssiStr = \"# Assign materials and temp. results\" + ls + \"MATE=AFFE_MATERIAU(MAILLAGE=\" + meshName + \",AFFE=(\" + ls\r\n i=0\r\n for el in self.materialSets:\r\n matTempAssiStr = matTempAssiStr + \"_F(\" \r\n if el.nodalGroupName == \"whole mesh\":\r\n matTempAssiStr = matTempAssiStr + \"TOUT='OUI',\"\r\n else:\r\n matTempAssiStr = matTempAssiStr + \"GROUP_MA='\" + el.nodalGroupName + \"',\"\r\n matTempAssiStr = matTempAssiStr + \"MATER=\" + matDefiNames[i] + \",),\" + ls\r\n i = i+1\r\n matTempAssiStr = matTempAssiStr + \"),\"\r\n i = 0\r\n if len(self.thermalSets) > 0:\r\n matTempAssiStr = matTempAssiStr + \"AFFE_VARC=(\" + ls\r\n for el in self.thermalSets:\r\n matTempAssiStr = matTempAssiStr + \"_F(\"\r\n if el.nodalGroupName == \"whole mesh\":\r\n matTempAssiStr = matTempAssiStr + \"TOUT='OUI'\"\r\n else:\r\n matTempAssiStr = matTempAssiStr + \"GROUP_MA='\" + el.nodalGroupName + \"'\"\r\n matTempAssiStr = matTempAssiStr + \",NOM_VARC='TEMP',EVOL=\" + tempResNames[i] + \",\"\r\n if el.assiType == \"file\":\r\n matTempAssiStr = matTempAssiStr + \"VALE_REF=\" + str(float(el.T0))\r\n else:\r\n matTempAssiStr = matTempAssiStr + \"VALE_REF=0.0\"\r\n matTempAssiStr = matTempAssiStr + \",),\" + ls\r\n i = i+1\r\n if len(self.thermalSets) > 0:\r\n matTempAssiStr = matTempAssiStr + \"),\"\r\n matTempAssiStr = matTempAssiStr + \");\" + ls + ls\r\n \r\n # assign properties for node joints\r\n caraStr = \"\"\r\n for el in self.nodeJointSets:\r\n if caraStr == \"\":\r\n caraStr = \"# assign properties for node joints\" + ls + \"CARA=AFFE_CARA_ELEM(MODELE=MODE,DISCRET=(\"\r\n caraStr = caraStr + \"_F(CARA='K_TR_D_N',GROUP_MA='\" + el.nodeName + \"',VALE=(\" + el.cX + \",\" + el.cY + \",\" + el.cZ + \\\r\n \",\" + el.cPhiX + \",\" + el.cPhiY + \",\" + el.cPhiZ + \",),),\" + ls\r\n if not caraStr == \"\":\r\n caraStr = caraStr + \"),);\" + ls + ls\r\n \r\n # assign restraints/loads via formules\r\n affeCharMecaFStr = \"\"\r\n # restraints\r\n for el in restraintSetsLocal:\r\n hasFormulesTrans = hasFunction(functionsLocal,el.deltaX, el.deltaY, el.deltaZ) # at least one delta is not numeric (is a function)\r\n hasFormulesRot = 0\r\n if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # restraint applied to a node of a node joint -> rotational DOFs\r\n hasFormulesRot = hasFunction(functionsLocal,el.deltaPhiX, el.deltaPhiY, el.deltaPhiZ)\r\n if hasFormulesTrans or hasFormulesRot: # restraint uses at least one function\r\n affeCharMecaFStr = affeCharMecaFStr + \"_F(GROUP_NO='\" + el.nodalGroupName + \"',\"\r\n if hasFunction(functionsLocal,el.deltaX):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DX=\" + getFormuleName(el.deltaX) + \",\"\r\n if hasFunction(functionsLocal,el.deltaY):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DY=\" + getFormuleName(el.deltaY) + \",\"\r\n if hasFunction(functionsLocal,el.deltaZ):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DZ=\" + getFormuleName(el.deltaZ) + \",\"\r\n if hasFormulesRot:\r\n if hasFunction(functionsLocal,el.deltaPhiX):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DRX=\" + getFormuleName(el.deltaPhiX) + \",\"\r\n if hasFunction(functionsLocal,el.deltaPhiY):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DRY=\" + getFormuleName(el.deltaPhiY) + \",\"\r\n if hasFunction(functionsLocal,el.deltaPhiZ):\r\n affeCharMecaFStr = affeCharMecaFStr + \"DRZ=\" + getFormuleName(el.deltaPhiZ) + \",\"\r\n affeCharMecaFStr = affeCharMecaFStr + \"),\" + ls\r\n if not affeCharMecaFStr == \"\":\r\n affeCharMecaFStr = \"DDL_IMPO=(\" + ls + affeCharMecaFStr + \"),\" + ls\r\n # loads\r\n forceOnVolumeStr = \"\"\r\n forceOnFaceStr = \"\"\r\n forceOnEdgeStr = \"\"\r\n forceOnNodeStr = \"\"\r\n pressureStr = \"\"\r\n for el in self.loadSets:\r\n # forces/torques\r\n if el.loadType in [\"Force on volume\",\"Force on face\",\"Force on edge\",\"Force on node\"]:\r\n hasFormulesForce = hasFunction(functionsLocal,el.FX,el.FY,el.FZ)\r\n hasFormulesTorque = 0\r\n if el.loadType == \"Force on node\":\r\n if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # load applied to a node of a node joint -> torque assignment possible\r\n hasFormulesTorque = hasFunction(functionsLocal,el.MX,el.MY,el.MZ)\r\n if hasFormulesForce or hasFormulesTorque:\r\n if el.nodalGroupName == \"whole mesh\":\r\n assiStr = \"TOUT='OUI',\"\r\n elif el.loadType == \"Force on node\":\r\n assiStr = \"GROUP_NO='\" + el.nodalGroupName + \"',\"\r\n else:\r\n assiStr = \"GROUP_MA='\" + el.nodalGroupName + \"',\"\r\n tempStr = \"_F(\" + assiStr\r\n if hasFunction(functionsLocal,el.FX):\r\n tempStr = tempStr + \"FX=\" + getFormuleName(el.FX) + \",\"\r\n if hasFunction(functionsLocal,el.FY):\r\n tempStr = tempStr + \"FY=\" + getFormuleName(el.FY) + \",\"\r\n if hasFunction(functionsLocal,el.FZ):\r\n tempStr = tempStr + \"FZ=\" + getFormuleName(el.FZ) + \",\"\r\n if hasFormulesTorque:\r\n if hasFunction(functionsLocal,el.MX):\r\n tempStr = tempStr + \"MX=\" + getFormuleName(el.MX) + \",\"\r\n if hasFunction(functionsLocal,el.MY):\r\n tempStr = tempStr + \"MY=\" + getFormuleName(el.MY) + \",\"\r\n if hasFunction(functionsLocal,el.MZ):\r\n tempStr = tempStr + \"MZ=\" + getFormuleName(el.MZ) + \",\"\r\n tempStr = tempStr + \"),\" + ls\r\n if el.loadType == \"Force on volume\":\r\n forceOnVolumeStr = forceOnVolumeStr + tempStr\r\n elif el.loadType == \"Force on face\":\r\n forceOnFaceStr = forceOnFaceStr + tempStr\r\n elif el.loadType == \"Force on edge\":\r\n forceOnEdgeStr = forceOnEdgeStr + tempStr\r\n elif el.loadType == \"Force on node\":\r\n forceOnNodeStr = forceOnNodeStr + tempStr\r\n # pressures\r\n if el.loadType == \"Pressure\":\r\n if hasFunction(functionsLocal,el.p):\r\n pressureStr = pressureStr + \"_F(GROUP_MA='\" + el.nodalGroupName + \"',PRES=\" + getFormuleName(el.p) + \",),\" + ls\r\n if not forceOnVolumeStr == \"\":\r\n affeCharMecaFStr = affeCharMecaFStr + \"FORCE_INTERNE=(\" + ls + forceOnVolumeStr + \"),\" + ls\r\n if not forceOnFaceStr == \"\":\r\n affeCharMecaFStr = affeCharMecaFStr + \"FORCE_FACE=(\" + ls + forceOnFaceStr + \"),\" + ls\r\n if not forceOnEdgeStr == \"\":\r\n affeCharMecaFStr = affeCharMecaFStr + \"FORCE_ARETE=(\" + ls + forceOnEdgeStr + \"),\" + ls\r\n if not forceOnNodeStr == \"\":\r\n affeCharMecaFStr = affeCharMecaFStr + \"FORCE_NODALE=(\" + ls + forceOnNodeStr + \"),\" + ls\r\n if not pressureStr == \"\":\r\n affeCharMecaFStr = affeCharMecaFStr + \"PRES_REP=(\" + ls + pressureStr + \"),\" + ls\r\n if not affeCharMecaFStr == \"\":\r\n affeCharMecaFStr = \"# assign restraints/loads via formules\" + ls + \"CHARF=AFFE_CHAR_MECA_F(MODELE=MODE,\" + ls + affeCharMecaFStr + \");\" + ls + ls\r\n\r\n # assign remaining restraints, node joints and loads\r\n affeCharMecaStr = \"\"\r\n # restraints\r\n for el in restraintSetsLocal:\r\n hasConstantsTrans = hasConstant(functionsLocal,el.deltaX, el.deltaY, el.deltaZ) # at least one delta is not a function\r\n hasConstantsRot = 0\r\n if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # restraint applied to a node of a node joint -> rotational DOFs\r\n hasConstantsRot = hasConstant(functionsLocal,el.deltaPhiX, el.deltaPhiY, el.deltaPhiZ)\r\n if hasConstantsTrans or hasConstantsRot: # restraint uses at least one constant\r\n if not el.rotMatViaPython:\r\n affeCharMecaStr = affeCharMecaStr + \"_F(GROUP_NO='\" + el.nodalGroupName + \"',\"\r\n if hasConstant(functionsLocal,el.deltaX):\r\n affeCharMecaStr = affeCharMecaStr + \"DX=\" + el.deltaX + \",\"\r\n if hasConstant(functionsLocal,el.deltaY):\r\n affeCharMecaStr = affeCharMecaStr + \"DY=\" + el.deltaY + \",\"\r\n if hasConstant(functionsLocal,el.deltaZ):\r\n affeCharMecaStr = affeCharMecaStr + \"DZ=\" + el.deltaZ + \",\"\r\n if hasConstantsRot:\r\n if hasConstant(functionsLocal,el.deltaPhiX):\r\n affeCharMecaStr = affeCharMecaStr + \"DRX=\" + el.deltaPhiX + \",\"\r\n if hasConstant(functionsLocal,el.deltaPhiY):\r\n affeCharMecaStr = affeCharMecaStr + \"DRY=\" + el.deltaPhiY + \",\"\r\n if hasConstant(functionsLocal,el.deltaPhiZ):\r\n affeCharMecaStr = affeCharMecaStr + \"DRZ=\" + el.deltaPhiZ + \",\"\r\n affeCharMecaStr = affeCharMecaStr + \"),\" + ls\r\n if not affeCharMecaStr == \"\":\r\n affeCharMecaStr = \"DDL_IMPO=(\" + ls + affeCharMecaStr + \"),\" + ls\r\n # node joints\r\n nodeJointsStr = \"\"\r\n for el in self.nodeJointSets:\r\n nodeJointsStr = nodeJointsStr + \"_F(GROUP_NO='\" + el.jointGroupName + \"',),\" + ls\r\n # loads\r\n forceOnVolumeStr = \"\"\r\n forceOnFaceStr = \"\"\r\n forceOnEdgeStr = \"\"\r\n forceOnNodeStr = \"\"\r\n pressureStr = \"\"\r\n gravityStr = \"\"\r\n centrifugalForceStr = \"\"\r\n for el in self.loadSets:\r\n # forces/torques\r\n if el.loadType in [\"Force on volume\",\"Force on face\",\"Force on edge\",\"Force on node\"]:\r\n hasConstantsForce = hasConstant(functionsLocal,el.FX,el.FY,el.FZ)\r\n hasConstantsTorque = 0\r\n if el.loadType == \"Force on node\":\r\n if el.nodalGroupName in [self.nodeJointSets[i].nodeName for i in range(len(self.nodeJointSets))]: # load applied to a node of a node joint -> torque assignment possible\r\n hasConstantsTorque = hasConstant(functionsLocal,el.MX,el.MY,el.MZ)\r\n if hasConstantsForce or hasConstantsTorque:\r\n if el.nodalGroupName == \"whole mesh\":\r\n assiStr = \"TOUT='OUI',\"\r\n elif el.loadType == \"Force on node\":\r\n assiStr = \"GROUP_NO='\" + el.nodalGroupName + \"',\"\r\n else:\r\n assiStr = \"GROUP_MA='\" + el.nodalGroupName + \"',\"\r\n tempStr = \"_F(\" + assiStr\r\n if hasConstant(functionsLocal,el.FX):\r\n tempStr = tempStr + \"FX=\" + el.FX + \",\"\r\n if hasConstant(functionsLocal,el.FY):\r\n tempStr = tempStr + \"FY=\" + el.FY + \",\"\r\n if hasConstant(functionsLocal,el.FZ):\r\n tempStr = tempStr + \"FZ=\" + el.FZ + \",\"\r\n if hasConstantsTorque:\r\n if hasConstant(functionsLocal,el.MX):\r\n tempStr = tempStr + \"MX=\" + el.MX + \",\"\r\n if hasConstant(functionsLocal,el.MY):\r\n tempStr = tempStr + \"MY=\" + gel.MY + \",\"\r\n if hasConstant(functionsLocal,el.MZ):\r\n tempStr = tempStr + \"MZ=\" + el.MZ + \",\"\r\n tempStr = tempStr + \"),\" + ls\r\n if el.loadType == \"Force on volume\":\r\n forceOnVolumeStr = forceOnVolumeStr + tempStr\r\n elif el.loadType == \"Force on face\":\r\n forceOnFaceStr = forceOnFaceStr + tempStr\r\n elif el.loadType == \"Force on edge\":\r\n forceOnEdgeStr = forceOnEdgeStr + tempStr\r\n elif el.loadType == \"Force on node\":\r\n forceOnNodeStr = forceOnNodeStr + tempStr\r\n # pressures\r\n if el.loadType == \"Pressure\":\r\n if hasConstant(functionsLocal,el.p):\r\n pressureStr = pressureStr + \"_F(GROUP_MA='\" + el.nodalGroupName + \"',PRES=\" + el.p + \",),\" + ls\r\n # gravity\r\n if el.loadType == \"Gravity\":\r\n g = (float(el.gX)**2+float(el.gY)**2+float(el.gZ)**2)**0.5\r\n dirStr = \"(\" + str(float(el.gX)/g) + \",\" + str(float(el.gY)/g) + \",\" + str(float(el.gZ)/g) + \",)\"\r\n if el.nodalGroupName == \"whole mesh\":\r\n assiStr = \"\"\r\n else:\r\n assiStr = \"GROUP_MA='\" + el.nodalGroupName + \"',\"\r\n gravityStr = gravityStr + \"_F(\" + assiStr + \"GRAVITE=\" + str(g) + \",DIRECTION=\" + dirStr + \",),\" + ls\r\n # centrifugal forces\r\n if el.loadType == \"Centrifugal force\":\r\n if el.nodalGroupName == \"whole mesh\":\r\n assiStr = \"TOUT='OUI',\"\r\n else:\r\n assiStr = \"GROUP_MA='\" + el.nodalGroupName + \"',\"\r\n centrifugalForceStr = centrifugalForceStr + \"_F(\" + assiStr + \"VITESSE=\" + el.omega + \",AXE=(\" + \\\r\n el.axisX + \",\" + el.axisY + \",\" + el.axisZ + \",),CENTRE=(\" + el.centerX + \",\" + el.centerY + \",\" + el.centerZ + \",),),\" + ls\r\n if not nodeJointsStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"LIAISON_SOLIDE=(\" + ls + nodeJointsStr + \"),\" + ls\r\n if not forceOnVolumeStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"FORCE_INTERNE=(\" + ls + forceOnVolumeStr + \"),\" + ls\r\n if not forceOnFaceStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"FORCE_FACE=(\" + ls + forceOnFaceStr + \"),\" + ls\r\n if not forceOnEdgeStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"FORCE_ARETE=(\" + ls + forceOnEdgeStr + \"),\" + ls\r\n if not forceOnNodeStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"FORCE_NODALE=(\" + ls + forceOnNodeStr + \"),\" + ls\r\n if not pressureStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"PRES_REP=(\" + ls + pressureStr + \"),\" + ls\r\n if not gravityStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"PESANTEUR=(\" + ls + gravityStr + \"),\" + ls\r\n if not centrifugalForceStr == \"\":\r\n affeCharMecaStr = affeCharMecaStr + \"ROTATION=(\" + ls + centrifugalForceStr + \"),\" + ls\r\n if not affeCharMecaStr == \"\":\r\n affeCharMecaStr = \"# assign constant restraints/loads and node joints\" + ls + \"CHAR=AFFE_CHAR_MECA(MODELE=MODE,\" + ls + affeCharMecaStr + \");\" + ls + ls\r\n \r\n # contact definition\r\n contactStr = \"\"\r\n if self.analysisType == \"non-linear static\" and len(self.contactSets) > 0:\r\n contactStr = \"# contact definition\" + ls +\"CONT=DEFI_CONTACT(MODELE=MODE,\"\r\n if self.contactSets[0].globalSettings.formulationType == \"discrete\":\r\n contactStr = contactStr + \"FORMULATION='DISCRETE',\"\r\n else:\r\n contactStr = contactStr + \"FORMULATION='CONTINUE',ALGO_RESO_CONT='\" + self.contactSets[0].globalSettings.contactAlgo + \"',\"\r\n if self.contactSets[0].globalSettings.frictionModel == \"Coulomb\":\r\n contactStr = contactStr + \"FROTTEMENT='COULOMB',\"\r\n if self.contactSets[0].globalSettings.formulationType == \"continuous\":\r\n contactStr = contactStr + \"ALGO_RESO_FROT='\" + self.contactSets[0].globalSettings.frictionAlgo + \"',\"\r\n else:\r\n contactStr = contactStr + \"FROTTEMENT='SANS',\"\r\n contactStr = contactStr + \"ZONE=\"\r\n for el in self.contactSets:\r\n contactStr = contactStr + ls + \"_F(GROUP_MA_MAIT='\" + el.masterName + \"',GROUP_MA_ESCL='\" + el.slaveName + \"',\"\r\n if el.globalSettings.formulationType == \"discrete\":\r\n contactStr = contactStr + \"ALGO_CONT='\" + el.contactAlgo + \"',\"\r\n if el.contactAlgo == \"PENALISATION\":\r\n contactStr = contactStr + \"E_N=\" + el.E_N + \",\"\r\n if el.globalSettings.frictionModel == \"Coulomb\":\r\n contactStr = contactStr + \"COULOMB=\" + el.fricCoeff + \",\" + \"ALGO_FROT='PENALISATION',E_T=\" + el.E_T + \",\"\r\n else:\r\n if el.globalSettings.frictionModel == \"Coulomb\":\r\n contactStr = contactStr + \"COULOMB=\" + el.fricCoeff + \",\"\r\n contactStr = contactStr + \"),\"\r\n contactStr = contactStr + \");\" + ls + ls\r\n\r\n # setting up and calling the solver\r\n if self.analysisType == \"linear static\":\r\n # MECA_STATIQUE\r\n solverStr = \"# calling MECA_STATIQUE\" + ls + \"RESU=MECA_STATIQUE(MODELE=MODE,CHAM_MATER=MATE,\"\r\n if not caraStr == \"\":\r\n solverStr = solverStr + \"CARA_ELEM=CARA,\"\r\n solverStr = solverStr + \"EXCIT=(\"\r\n if not affeCharMecaStr == \"\":\r\n solverStr = solverStr + \"_F(CHARGE=CHAR,\"\r\n if not tListStr == \"\" and (self.timeRampUp or self.timeRampDown):\r\n solverStr = solverStr + \"FONC_MULT=SF,),\"\r\n else:\r\n solverStr = solverStr + \"),\"\r\n if not affeCharMecaFStr == \"\":\r\n solverStr = solverStr + \"_F(CHARGE=CHARF,\"\r\n if not tListStr == \"\" and self.timeRampFunc and (self.timeRampUp or self.timeRampDown):\r\n solverStr = solverStr + \"FONC_MULT=SF,),\"\r\n else:\r\n solverStr = solverStr + \"),\"\r\n solverStr = solverStr + \"),\"\r\n if not tListStr == \"\":\r\n solverStr = solverStr + \"LIST_INST=TLIST,\"\r\n solverStr = solverStr + \"SOLVEUR=_F(METHODE='\" + self.method + \"',),);\" + ls + ls\r\n else:\r\n # STAT_NON_LINE\r\n solverStr = \"# calling STAT_NON_LINE\" + ls + \"RESU=STAT_NON_LINE(MODELE=MODE,CHAM_MATER=MATE,\"\r\n if not caraStr == \"\":\r\n solverStr = solverStr + \"CARA_ELEM=CARA,\"\r\n solverStr = solverStr + \"EXCIT=(\"\r\n if not affeCharMecaStr == \"\":\r\n solverStr = solverStr + \"_F(CHARGE=CHAR,\"\r\n if (self.timeRampUp or self.timeRampDown):\r\n solverStr = solverStr + \"FONC_MULT=SF,),\"\r\n else:\r\n solverStr = solverStr + \"),\"\r\n if not affeCharMecaFStr == \"\":\r\n solverStr = solverStr + \"_F(CHARGE=CHARF,\"\r\n if self.timeRampFunc and (self.timeRampUp or self.timeRampDown):\r\n solverStr = solverStr + \"FONC_MULT=SF,),\"\r\n else:\r\n solverStr = solverStr + \"),\"\r\n solverStr = solverStr + \"),\"\r\n if not contactStr == \"\":\r\n solverStr = solverStr + \"CONTACT=CONT,\" + ls\r\n if self.strainModel == \"Green-Lagrange\":\r\n solverStr = solverStr + \"COMPORTEMENT=_F(RELATION='ELAS',DEFORMATION='GROT_GDEP',),\" + ls\r\n solverStr = solverStr + \"NEWTON=_F(REAC_ITER=1,),INCREMENT=_F(LIST_INST=TLIST,),CONVERGENCE=_F(ITER_GLOB_MAXI=\" + self.maxIter + \",RESI_GLOB_RELA=\" + self.resi + \\\r\n \"),SOLVEUR=_F(METHODE='\" + self.method + \"',),);\" + ls + ls\r\n \r\n # compute quantities from result\r\n calcChampStr = \"\"\r\n if self.outputSet.SIGM + self.outputSet.EPS + self.outputSet.SIEQ + self.outputSet.REAC > 0:\r\n calcChampStr = \"# compute output quantities\" + ls + \"RESU=CALC_CHAMP(reuse =RESU,RESULTAT=RESU,\" + ls\r\n if self.outputSet.SIGM:\r\n calcChampStr = calcChampStr + \"CONTRAINTE=('SIGM_NOEU',),\" + ls\r\n if self.outputSet.EPS:\r\n if self.strainModel == \"Green-Lagrange\" and self.analysisType == \"non-linear static\":\r\n calcChampStr = calcChampStr + \"DEFORMATION=('EPSG_NOEU',),\" + ls\r\n else:\r\n calcChampStr = calcChampStr + \"DEFORMATION=('EPSI_NOEU',),\" + ls\r\n if self.outputSet.SIEQ:\r\n calcChampStr = calcChampStr + \"CRITERES=('SIEQ_NOEU',),\" + ls\r\n if self.outputSet.REAC:\r\n calcChampStr = calcChampStr + \"FORCE=('REAC_NODA',),\" + ls\r\n calcChampStr = calcChampStr + \");\" + ls + ls\r\n \r\n # estimate error\r\n erreurStr = \"\"\r\n if self.outputSet.ERME:\r\n erreurStr = \"# error estimation a posteriori \" + ls + \"RESU=CALC_ERREUR(reuse=RESU,RESULTAT=RESU,OPTION=('ERME_ELEM',),);\" + ls + ls\r\n \r\n # compute reactions at restraints\r\n reacStr = \"\"\r\n if self.outputSet.REAC and len(restraintSetsLocal) > 0:\r\n reacStr = \"# integrate reactions at restraints\" + ls + \"Reac_Sum=POST_RELEVE_T(ACTION=(\"\r\n for el in restraintSetsLocal:\r\n reacStr = reacStr + \"_F(OPERATION='EXTRACTION',INTITULE='\" + el.nodalGroupName + \\\r\n \"',RESULTAT=RESU,NOM_CHAM='REAC_NODA',GROUP_NO=('\" + el.nodalGroupName + \"',),RESULTANTE=('DX','DY','DZ',),MOMENT=('DRX','DRY','DRY',),\" + \\\r\n \"POINT=(\" + el.reacMX + \",\" + el.reacMY + \",\" + el.reacMZ + \",),),\"\r\n reacStr = reacStr + \"),);\" + ls + ls + \"IMPR_TABLE(TABLE=Reac_Sum,);\" + ls + ls\r\n \r\n # write the results to file\r\n writeStr = \"# write result to file (mechanical quantities)\" + ls + \"IMPR_RESU(FORMAT='MED',RESU=_F(RESULTAT=RESU,NOM_CHAM=('DEPL',\"\r\n if self.outputSet.SIGM:\r\n writeStr = writeStr + \"'SIGM_NOEU',\"\r\n if self.outputSet.SIEQ:\r\n writeStr = writeStr + \"'SIEQ_NOEU',\"\r\n if self.outputSet.EPS:\r\n if self.strainModel == \"Green-Lagrange\" and self.analysisType == \"non-linear static\":\r\n writeStr = writeStr + \"'EPSG_NOEU',\"\r\n else:\r\n writeStr = writeStr + \"'EPSI_NOEU',\"\r\n if self.outputSet.REAC:\r\n writeStr = writeStr + \"'REAC_NODA',\"\r\n if self.outputSet.ERME:\r\n writeStr = writeStr + \"'ERME_ELEM',\"\r\n writeStr = writeStr + \"),\"\r\n if self.outputSet.nodalGroupName == \"whole mesh\":\r\n writeStr = writeStr + \"TOUT='OUI',),);\" + ls + ls\r\n else:\r\n writeStr = writeStr + \"GROUP_MA='\" + self.outputSet.nodalGroupName + \"',),);\" + ls + ls\r\n if self.outputSet.TEMP and len(self.thermalSets) > 0:\r\n writeStr = writeStr + \"# write result to file (temperature)\" + ls \r\n for el in tempResNames:\r\n writeStr = writeStr + \"IMPR_RESU(FORMAT='MED',RESU=_F(RESULTAT=\" + el + \",NOM_CHAM='TEMP',TOUT='OUI',),);\" + ls\r\n writeStr = writeStr + ls\r\n\r\n # FIN statement\r\n finStr = \"FIN();\"\r\n \r\n # assemble everything\r\n commStr = pythonFuns + debutStr + tListStr + formuleStr + matDefiStr + meshStr + modelStr + tempFieldStr + tempResStr + matTempAssiStr + caraStr + affeCharMecaFStr + \\\r\n affeCharMecaStr + contactStr + solverStr + calcChampStr + erreurStr + reacStr + writeStr + finStr\r\n\r\n return commStr\r\n","repo_name":"kaktus018/ca_wizard_mechanical","sub_path":"cawm_classes.py","file_name":"cawm_classes.py","file_ext":"py","file_size_in_byte":55264,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"35376528345","text":"import os\nimport subprocess\n\nroot = \"/workspace\"\nwebui_path = os.path.join(root, \"stable-diffusion-webui\")\n\n# 列出需要检查的库\nlibraries = [\"libcudnn8=8.9.2.26-1+cuda11.8\", \"libcudnn8-dev=8.9.2.26-1+cuda11.8\"]\n\n# 检查每个库是否已经安装\nfor lib in libraries:\n package_name, package_version = lib.split('=')\n \n # 检查当前已安装版本\n result = subprocess.run([\"apt-cache\", \"policy\", package_name], capture_output=True, text=True)\n installed_line = [line for line in result.stdout.split('\\n') if 'Installed:' in line][0]\n installed_version = installed_line.split(':')[1].strip()\n \n # 如果未安装或版本不匹配\n if installed_version == '(none)' or installed_version != package_version:\n print(f\"Installing {lib}...\")\n subprocess.run([\"apt\", \"install\", \"-y\", lib, \"--allow-change-held-packages\"], check=True)\n else:\n print(f\"{lib} is already installed.\")\n\n# launch webui\nsubprocess.run([\"./webui.sh\", \"-f\"], cwd=webui_path, check=True)","repo_name":"aaronfang/RunPod_Notes","sub_path":"webui_scripts/launch_webui.py","file_name":"launch_webui.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35926843251","text":"# ENUNCIADO:\n# Calcular e apresentar o valor do volume de uma lata de óleo,\n# utilizando a fórmula VOLUME ← 3.14159 * R ↑ 2 * ALTURA.\n\nimport math\nPI = math.pi\n\nraio_input = float(input(\"Insira o raio da lata de óleo: \"))\naltura_input = float(input(\"Insira a altura da lata de óleo: \"))\n\n\ndef volume_lata(raio, altura):\n volume = PI * (raio**2) * altura\n return round(volume, 2)\n\n\nvolume_output = volume_lata(raio_input, altura_input)\n\n\nprint(f'O volume da lata de óleo é de {volume_output} unidades cúbicas')\n","repo_name":"BrunoNogueiraBruno/algoritmos-python","sub_path":"semana1/volume_lata.py","file_name":"volume_lata.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27345799740","text":"import matplotlib.pyplot as plt\n\n\n# 绘制手写数字的灰度图\ndef show_image(data, index):\n plt.imshow(data.reshape((28, 28)), cmap='gray')\n plt.savefig(f'./images/{index}.jpg')\n\n\n# 绘制 Loss 曲线\ndef plot_loss(path, loss_train, loss_test):\n plt.figure(dpi=150)\n plt.title('Loss Curve')\n plt.plot(loss_train)\n plt.plot(loss_test)\n plt.legend(['train', 'test'])\n plt.savefig(path+'LossCurve.jpg')\n\n\n# 绘制 Accuracy 曲线\ndef plot_acc(path, acc):\n plt.figure(dpi=150)\n plt.title('Accuracy Curve')\n plt.plot(acc)\n plt.savefig(path+'AccCurve.jpg')\n","repo_name":"qishi21/NeuralNetwork","sub_path":"common/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27743838950","text":"#!/usr/bin/env python3\n\"\"\"Program Create the class LSTMCell that represents an LSTM unit\"\"\"\nimport numpy as np\n\n\nclass LSTMCell():\n \"\"\"class LSTMCell\"\"\"\n def __init__(self, i, h, o):\n \"\"\"class constructor\"\"\"\n self.Wf = np.random.normal(size=(i + h, h))\n self.Wu = np.random.normal(size=(i + h, h))\n self.Wc = np.random.normal(size=(i + h, h))\n self.Wo = np.random.normal(size=(i + h, h))\n self.Wy = np.random.normal(size=(h, o))\n self.bf = np.zeros((1, h))\n self.bu = np.zeros((1, h))\n self.bc = np.zeros((1, h))\n self.bo = np.zeros((1, h))\n self.by = np.zeros((1, o))\n\n def softmax(self, x):\n \"\"\"Function that performs softmax\"\"\"\n return np.exp(x) / np.exp(x).sum(axis=1, keepdims=True)\n\n def sigmoid(self, x):\n \"\"\"Function that performs Sigmoid\"\"\"\n return (1 / (1 + np.exp(-x)))\n\n def forward(self, h_prev, c_prev, x_t):\n \"\"\"Function that performs forward propagation for one time step\"\"\"\n U = np.hstack((h_prev, x_t))\n f = self.sigmoid(np.dot(U, self.Wf) + self.bf)\n u = self.sigmoid(np.dot(U, self.Wu) + self.bu)\n c_bar = np.tanh(np.dot(U, self.Wc) + self.bc)\n c_next = f * c_prev + u * c_bar\n o = self.sigmoid(np.dot(U, self.Wo) + self.bo)\n h_next = o * np.tanh(c_next)\n y = self.softmax(np.dot(h_next, self.Wy) + self.by)\n return (h_next, c_next, y)\n","repo_name":"oscarmrt/holbertonschool-machine_learning","sub_path":"supervised_learning/0x0D-RNNs/3-lstm_cell.py","file_name":"3-lstm_cell.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3289455141","text":"#A typracer game!\n#This variation pulls out the first paragraph from a random article from Wikipedia(EN)\n\nimport time\nimport random\nfrom bs4 import BeautifulSoup as bs\nimport requests as req\ndef text_generator():\n\tURL = \"https://en.wikipedia.org/wiki/Special:Random\"\n\tr = req.get(URL)\n\tsoup = bs(r.content, 'html.parser')\n\tpara = soup.find_all('p')\n\ti=0\n\tfor i in range(len(para)):\n\t\tif para[i].text and para[i].text.strip():\n\t\t\treturn(para[i].text)\n\t\t\tbreak\n\n\n\nn = 5\ntext = text_generator()\nword_number = (int(text.count(' ')) + 1)\n\nprint(text)\n\nfor i in range(0,5):\n\tprint(n-i)\n\ttime.sleep(1)\n\nprint('Go')\nstart = time.time()\nuser_in = input()\nend = time.time()\n\nl_user_in = user_in.split()\nl_text = text.split()\n\nerror = 0\ntry:\n\tfor index, item in enumerate(l_text):\n\t\tif l_text[index] != l_user_in[index]:\n\t\t\terror += 1\n\n\twpm = round(((word_number/(end-start))*60) - error)\n\tprint(f'Your speed was {wpm} wpm and you made {error} errors!')\n\nexcept IndexError:\n\tprint('You did not complete the text!')\n","repo_name":"CupidOfDeath/FruitBasket","sub_path":"TypingTest - Rohan.py","file_name":"TypingTest - Rohan.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7179259786","text":"#! /usr/env python3\n#\n# Credit for the majority of code in this script goes to the spilo project:\n# Ref: https://github.com/zalando/spilo/blob/master/postgres-appliance/bootstrap/clone_with_wale.py\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom collections import namedtuple\nfrom dateutil.parser import parse\n\nlogging.basicConfig(format=\"%(asctime)s %(levelname)s: %(message)s\", level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef read_configuration():\n parser = argparse.ArgumentParser(\n description=\"Script to clone from S3 with support for point-in-time-recovery\"\n )\n parser.add_argument(\"--scope\", required=True, help=\"target cluster name\")\n parser.add_argument(\n \"--datadir\", required=True, help=\"target cluster postgres data directory\"\n )\n parser.add_argument(\n \"--recovery-target-time\",\n help=\"the timestamp up to which recovery will proceed (including time zone)\",\n dest=\"recovery_target_time_string\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"find a matching backup and build the wal-g \"\n \"command to fetch that backup without running it\",\n )\n args = parser.parse_args()\n\n options = namedtuple(\"Options\", \"name datadir recovery_target_time dry_run\")\n if args.recovery_target_time_string:\n recovery_target_time = parse(args.recovery_target_time_string)\n if recovery_target_time.tzinfo is None:\n raise Exception(\"recovery target time must contain a timezone\")\n else:\n recovery_target_time = None\n\n return options(args.scope, args.datadir, recovery_target_time, args.dry_run)\n\n\ndef build_walg_command(command, datadir=None, backup=None):\n cmd = [\"wal-g\"] + [command]\n if command == \"backup-fetch\":\n if datadir is None or backup is None:\n raise Exception(\"backup-fetch requires datadir and backup arguments\")\n cmd.extend([datadir, backup])\n elif command != \"backup-list\":\n raise Exception(\"invalid {0} command {1}\".format(cmd[0], command))\n return cmd\n\n\ndef fix_output(output):\n \"\"\"WAL-G is using spaces instead of tabs and writes some garbage before the actual header\"\"\"\n\n started = None\n for line in output.decode(\"utf-8\").splitlines():\n if not started:\n started = re.match(r\"^name\\s+last_modified\\s+\", line) or re.match(\n r\"^name\\s+modified\\s+\", line\n )\n if started:\n line = line.replace(\" modified \", \" last_modified \")\n if started:\n yield \"\\t\".join(line.split())\n\n\ndef choose_backup(backup_list, recovery_target_time):\n \"\"\"Pick up the latest backup file starting before time recovery_target_time\"\"\"\n\n match_timestamp = match = None\n for backup in backup_list:\n last_modified = parse(backup[\"last_modified\"])\n if last_modified < recovery_target_time:\n if match is None or last_modified > match_timestamp:\n match = backup\n match_timestamp = last_modified\n if match is not None:\n return match[\"name\"]\n\n\ndef list_backups(env):\n backup_list_cmd = build_walg_command(\"backup-list\")\n output = subprocess.check_output(backup_list_cmd, env=env)\n reader = csv.DictReader(fix_output(output), dialect=\"excel-tab\")\n return list(reader)\n\n\ndef find_backup(recovery_target_time, env):\n backup_list = list_backups(env)\n if backup_list:\n if recovery_target_time:\n backup = choose_backup(backup_list, recovery_target_time)\n if backup:\n return backup\n else:\n return \"LATEST\"\n if recovery_target_time:\n raise Exception(\n \"Could not find any backups prior to the point in time {0}\".format(\n recovery_target_time\n )\n )\n raise Exception(\"Could not find any backups\")\n\n\ndef run_clone_from_s3(options):\n env = os.environ.copy()\n\n backup_name = find_backup(options.recovery_target_time, env)\n\n backup_fetch_cmd = build_walg_command(\"backup-fetch\", options.datadir, backup_name)\n logger.info(\n \"clone-with-walg: cloning cluster %s using %s\",\n options.name,\n \" \".join(backup_fetch_cmd),\n )\n logger.info(\"clone-with-walg: called with %s\", sys.argv[1:])\n if not options.dry_run:\n ret = subprocess.call(backup_fetch_cmd, env=env)\n if ret != 0:\n raise Exception(\"wal-g backup-fetch exited with exit code {0}\".format(ret))\n return 0\n\n\ndef main():\n options = read_configuration()\n try:\n run_clone_from_s3(options)\n except Exception:\n logger.exception(\"clone-with-walg: clone failed\")\n return 1\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"input-output-hk/bitte-cells","sub_path":"cells/patroni/packages/clone-with-walg.py","file_name":"clone-with-walg.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"14266904067","text":"from PySide6.QtWidgets import QMessageBox\n\n\ndef pop_up(title, txt, icon='information'):\n icons = {'information': QMessageBox.Information,\n 'warning': QMessageBox.Warning,\n 'critical': QMessageBox.Critical,\n 'question': QMessageBox.Question,\n 'no_icon': QMessageBox.NoIcon}\n\n msg = QMessageBox()\n msg.setWindowTitle(title)\n msg.setIcon(icons[icon])\n msg.setText(txt)\n msg.setStyleSheet('color: #f8f8f2; background-color: #282a36;')\n msg.exec()\n","repo_name":"sandenbergmelo/Gerador-de-senhas-python","sub_path":"utils/pop_up.py","file_name":"pop_up.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13800724296","text":"\"\"\"\nWS 2021/22 - TH Brandenburg - OOS - Prof. Dr. Thomas Preuss\nSemesterprojekt Python - Pygame - Sidescroller Hulahu\n@author: Annemie Berning, Carolin Jacob\n@version: 1.0\n@date: 01.12.2021\n\"\"\"\n\nimport pygame\nfrom pygame.locals import *\nimport sys\nfrom Endscreen import Endscreen\nfrom Player import Player\nfrom Game import Game\nfrom Background import Background\nfrom Startscreen import Startscreen\nfrom Button import Button\n\nclass Hulahu():\n \"\"\"\n initialized Game with mainloop\n\n Attributes\n ----------\n W, H : int - width and hight\n screen : Display - display module\n runner : Player - initialize player\n moving_speed : int - background scrolling speed\n buttonList, buttonListEndScreen : list - list of Buttons used in Start- and Endscreen \n game : Game - initialize game\n clock : Clock - initialize clock module\n back_ground : Background - initialize Background\n final_screen : Endscreen - initialize Endscreen\n start_screen : Startscreen - initialize Startscreen\n speed : int - max Framerate\n startScreen : Boolean - activates startscreen\n levelRunning : Boolean - activates running mode \n endScreen : Boolean - activates endscreen\n levelTime : int - millisec until running mode finishes \n castleTime : int - millisec until castles is drawn\n showCastle : Boolean - activates castle\n owl, win : Sound - game sound\n channel1, channel2 : Channel - sound channel\n\n Methods\n -------\n initGame() : set caption, add user events\n newLevel(level, obstacles speed) : restarts level with given level and speed\n mainLoop() : game loop to run game with runnig mode, start- and endscreen\n \"\"\"\n def __init__(self):\n self.W, self.H = 800, 447\n self.screen = pygame.display.set_mode(\n (self.W, self.H)) # setting screen\n self.screen.fill((255, 255, 255))\n self.runner = Player(200, 330, 40, 55)\n self.moving_speed = 5\n self.buttonList = [Button(10, 390, 107, 45, 'leicht'), Button(\n 132, 390, 107, 45, 'schwer'), Button(254, 390, 107, 45, 'ende')]\n self.buttonListEndScreen = [Button(410, 390, 107, 45, 'leicht'), Button(\n 532, 390, 107, 45, 'schwer'), Button(654, 390, 107, 45, 'ende')]\n self.game = Game(1, self.runner, 0)\n self.clock = pygame.time.Clock()\n self.back_ground = Background(self.moving_speed, self.game)\n self.final_screen = Endscreen(self.game, self.screen)\n self.start_screen = Startscreen(self.screen)\n self.speed = 30 # Max Framerate\n self.startScreen = True\n self.levelRunning = False\n self.endScreen = False\n self.levelTime = 20000\n self.castleTime = 14500\n self.showCastle = False\n self.owl = pygame.mixer.Sound('Sound/owl.wav')\n self.win = pygame.mixer.Sound('Sound/win.wav')\n self.channel1 = pygame.mixer.Channel(0)\n self.channel2 = pygame.mixer.Channel(1)\n\n def initGame(self):\n \"\"\"\n set caption, add user events\n \"\"\"\n pygame.display.set_caption('Hulahu auf Reisen') # setting game title\n\n # Adding User events\n self.add_obstacle = pygame.USEREVENT + 1\n pygame.time.set_timer(\n self.add_obstacle, self.game.generate_random_time(self.game.level), loops=1)\n\n self.level_end_timer = pygame.USEREVENT + 2\n pygame.time.set_timer(self.level_end_timer, self.levelTime, loops=1)\n\n self.show_castle_timer = pygame.USEREVENT + 3\n pygame.time.set_timer(self.show_castle_timer, self.castleTime, loops=1)\n\n def newLevel(self, level, obstacleSpeed):\n \"\"\"\n restarts level with given level and speed\n @param level: int\n @param obstacleSpeed: int\n \"\"\"\n # initialize new game and new Background with new HitUi\n self.game = Game(level, self.runner, obstacleSpeed)\n self.back_ground = Background(self.moving_speed, self.game)\n\n # set needed event-timers\n pygame.time.set_timer(\n self.show_castle_timer, self.castleTime, loops=1)\n pygame.time.set_timer(\n self.add_obstacle, self.game.generate_random_time(self.game.level), loops=1)\n pygame.time.set_timer(\n self.level_end_timer, self.levelTime, loops=1)\n\n # set switches\n self.startScreen = False\n self.showCastle = False\n self.endScreen = False\n self.levelRunning = True\n\n # start sound\n self.channel2.play(self.owl)\n\n def mainLoop(self):\n \"\"\"\n game loop to run game with runnig mode, start- and endscreen\n \"\"\"\n while True:\n # Main loop\n while self.levelRunning:\n # Eventhandler: Cycles through all occurring events\n for event in pygame.event.get():\n if event.type == self.add_obstacle:\n self.game.add_random_obstacle(self.showCastle)\n pygame.time.set_timer(\n self.add_obstacle, self.game.generate_random_time(self.game.level), loops=1)\n if event.type == self.show_castle_timer:\n self.showCastle = True\n if event.type == self.level_end_timer:\n self.channel1.play(self.win)\n self.endScreen = True\n self.levelRunning = False\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE] or keys[pygame.K_UP]:\n if not(self.runner.jumping):\n self.runner.jumping = True\n if keys[pygame.K_h]:\n self.game.show_hitboxes = not self.game.show_hitboxes\n if keys[pygame.K_e] or keys[pygame.K_RIGHT]:\n if not(self.runner.fading):\n self.runner.fading = True\n\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n self.back_ground.update(self.screen, self.showCastle)\n self.runner.update(self.screen)\n self.game.update_obstacles(self.screen, self.showCastle)\n pygame.display.update()\n self.clock.tick(self.speed)\n\n while self.startScreen:\n for event in pygame.event.get():\n keys = pygame.key.get_pressed()\n if event.type == MOUSEBUTTONDOWN:\n if self.start_screen.collsionDetection(self.buttonList, pygame.mouse.get_pos()) == 'leicht':\n self.newLevel(1,7)\n\n elif self.start_screen.collsionDetection(self.buttonList, pygame.mouse.get_pos()) == 'schwer':\n self.newLevel(2,10)\n\n elif self.start_screen.collsionDetection(self.buttonList, pygame.mouse.get_pos()) == 'ende':\n pygame.quit()\n sys.exit()\n\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n self.start_screen.update(self.screen, self.buttonList)\n pygame.display.update()\n\n while self.endScreen:\n for event in pygame.event.get():\n keys = pygame.key.get_pressed()\n if event.type == MOUSEBUTTONDOWN:\n if self.final_screen.collsionDetection(self.buttonListEndScreen, pygame.mouse.get_pos()) == 'leicht':\n self.newLevel(1,7)\n\n elif self.final_screen.collsionDetection(self.buttonListEndScreen, pygame.mouse.get_pos()) == 'schwer':\n self.newLevel(2,10)\n\n elif self.final_screen.collsionDetection(self.buttonListEndScreen, pygame.mouse.get_pos()) == 'ende':\n pygame.quit()\n sys.exit()\n\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n self.final_screen.update(self.screen, self.game, self.buttonListEndScreen, self.endScreen)\n pygame.display.update()\n\n\nif __name__ == '__main__':\n pygame.init()\n hulahu = Hulahu()\n hulahu.initGame()\n hulahu.mainLoop()\n","repo_name":"jacocaro/OOSWS2122","sub_path":"Hulahu.py","file_name":"Hulahu.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11779748815","text":"from bisect import bisect_left, bisect_right\n\nn = int(input())\nA = []\ncount=0\nfor i in range(n):\n A.append(int(input()))\n\nA.sort() # 리스트를 정렬\n\nfor i in range(n-2): \n for j in range(i+1, n-1): # a,b 뽑기\n r = 3*A[j]-2*A[i]\n l = 2*A[j]-A[i]\n count += bisect_right(A[j+1:], r) - bisect_left(A[j+1:], l) # c가 될수 있는 개수 더해줌\n\nprint(count)\n\n# 입력받은 값을 정렬을 하여 첫번째 조건을 만족 시킨다. 두번째 조건을 변형시키면 2b-a<=c<=3b-2a이다.\n# 정렬된 리스트에서 a,b를 for문을 통해 뽑아주고 2b-a<=c<=3b-2a를 만족시키는 c를 이진탐색을 통해 뽑아준다.\n\n# bisect_right(a, x)함수: 이진탐색을 하며 x값이 리스트a에 있으면 뒤의 위치를 반환한다. \n# bisect_left(a, x)함수: 이진탐색을 하며 x값이 리스트a에 있으면 해당 위치를 반환한다.\n# 두 함수모두 a리스트에 x값이 없다면 오름차순에 들어갈 위치를 반환한다.\n# 따라서 두 함수가 반환한 인덱스의 차는 c가 될 수 있는 개수를 의미한다.\n\n# 수행시간 분석\n# 두개의 for문에서 a,b를 뽑는데 T(n) = (n-2)+(n-3)+(n-4)...+1 ==>O(n^2)이고 이진탐색의 경우 항상 O(log(n))을 만족하므로 O(n^2logn)이다.","repo_name":"SeHeon-Park/Study_Algorithm","sub_path":"고급문제해결/1번 (딩__동__댕)/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"26475477058","text":"from typing import *\n\nclass Solution:\n def findBall_recursion(self, grid: List[List[int]]) -> List[int]:\n m = len(grid)\n n = len(grid[0])\n res = []\n\n def findDropColumn(row, col):\n if row == m:\n return col\n\n nextCol = col + grid[row][col]\n if nextCol >= 0 and nextCol < n \\\n and grid[row][nextCol] == grid[row][col]:\n return findDropColumn(row+1, nextCol)\n else:\n return -1\n\n for startpos in range(n):\n res.append(findDropColumn(0, startpos))\n\n return res\n\n def findBall_dp(self, grid: List[List[int]]) -> List[int]:\n m = len(grid)\n n = len(grid[0])\n table = [[-1 for _ in range(n)] for _ in range(m+1)]\n\n for row in range(m, -1, -1):\n for col in range(n):\n if row == m:\n table[row][col] = col\n continue\n\n nextCol = col + grid[row][col]\n if nextCol >= 0 and nextCol < n and grid[row][col] == grid[row][nextCol]:\n table[row][col] = table[row+1][nextCol]\n else:\n table[row][col] = -1\n\n # for row in table:\n # print(row)\n # print()\n return table[0]\n\n\n def findBall_ballbyball(self, grid: List[List[int]]) -> List[int]:\n m = len(grid)\n n = len(grid[0])\n i = j = 0\n res = []\n\n for s in range(n):\n i = 0\n j = s\n while i >= 0 and i < m and j >= 0 and j < n:\n adjacent = j + grid[i][j]\n if adjacent >= 0 \\\n and adjacent < n \\\n and grid[i][j] == grid[i][adjacent]:\n j += grid[i][j]\n i += 1\n else:\n break\n\n if i == m:\n res.append(j)\n else:\n res.append(-1)\n\n return res\n\nif __name__ == '__main__':\n g1 = [[1, -1]]\n g2 = [[1, 1]]\n g3 = [[-1, -1]]\n g4 = [[1, 1, 1, 1, 1, 1], [-1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, 1], [-1, -1, -1, -1, -1, -1]]\n\n\n print(Solution().findBall_dp(g1))\n print(Solution().findBall_dp(g2))\n print(Solution().findBall_dp(g3))\n print(Solution().findBall_dp(g4))","repo_name":"sleepycat233/leetcoding","sub_path":"leetcode_src/src/WhereWillTheBallFall.py","file_name":"WhereWillTheBallFall.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72482755786","text":"# 변수에 만들고 싶은 파일을 open() 해야 한다.\n# open() 할때 r : 읽기 / w : 쓰기(+덮어씌워짐) / a : 추가\n# f = open('만들 파일명', '행동')\n\nf = open('ssafy.txt', 'w')\n\nfor i in range(10):\n f.write(f'This is line {i+1}.\\n')\nf.close()\n\n# with 구문 (context manager)\nwith open('with_ssafy.txt', 'w') as f:\n for i in range(10):\n f.write(f'This is line {i+1}.\\n')\n \n\n# writelines() : list를 넣어주면, 요소 하나당 한 줄씩 저장된다.\nwith open('ssafy.txt', 'w') as f:\n f.writelines(['0\\n', '1\\n', '2\\n', '3\\n'])\n\n\n# 이스케이프 문자\n# \\n : 개행문자(다음 줄 이동)\n# \\t : 탭문자\n# \\\\ : \\가 출력\n# \\' : 따옴표\n# \\\" : 큰 따옴표","repo_name":"baambox5/TIL","sub_path":"00_startcamp/03_day/txt_write.py","file_name":"txt_write.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20026196757","text":"#!/usr/bin/python\n\nimport sys, getopt\n\nimport image_scrape\nimport stats_scrape\nimport message\n\ndef main(argv):\n \n try:\n command = argv[0]\n args1 = argv[1]\n args2 = argv[2]\n\n if command == 'image' or command == 'i':\n image_scrape.scrape(args1, args2)\n elif command == 'stats' or command == 's':\n stats_scrape.scrape(args1, args2)\n\n except IndexError:\n message.invalid_command()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"restaadiputra/pokedb-scraper","sub_path":"poke_scraper.py","file_name":"poke_scraper.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16109753128","text":"\n\"\"\" \nSingle figure and axes with two items\n=======================================\n\nOnly the pressure q[0] is plotted. \n\nIn this example the line and points are plotted in different colors by\nspecifying a second item on the same axes.\n\"\"\" \n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n \n \"\"\" \n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for q[0]\n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-.5,1.1]\n plotaxes.title = 'Pressure'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(name='line', plot_type='1d')\n plotitem.plot_var = 0\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(name='points', plot_type='1d')\n plotitem.plot_var = 0\n plotitem.plotstyle = 'o'\n plotitem.color = '#ff00ff' # any color supported by matplotlib\n \n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html'# pointer for index page\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 1 # layout of plots\n plotdata.latex_framesperline = 2 # layout of plots\n plotdata.latex_makepdf = True # also run pdflatex?\n\n return plotdata\n\n \n","repo_name":"geoflows/D-Claw","sub_path":"doc/sphinx/example-acoustics-1d/setplot_2.py","file_name":"setplot_2.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"10285790060","text":"import os\r\nimport pdb\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.metrics import accuracy_score, roc_auc_score\r\nimport torch\r\nfrom torch import nn\r\nfrom torch_geometric.nn.models import LightGCN\r\nimport wandb\r\nimport yaml\r\nfrom .utils import get_logger, logging_conf\r\n\r\n\r\nlogger = get_logger(logger_conf=logging_conf)\r\n\r\n\r\ndef build(n_node: int, weight: str = None, **kwargs):\r\n model = LightGCN(num_nodes=n_node, **kwargs)\r\n if weight:\r\n if not os.path.isfile(path=weight):\r\n logger.fatal(\"Model Weight File Not Exist\")\r\n logger.info(\"Load model\")\r\n state = torch.load(f=weight)[\"model\"]\r\n model.load_state_dict(state)\r\n return model\r\n else:\r\n logger.info(\"No load model\")\r\n return model\r\n\r\n\r\ndef run(\r\n args,\r\n model: nn.Module,\r\n train_data: dict,\r\n valid_data: dict = None,\r\n n_epochs: int = 100,\r\n learning_rate: float = 0.01,\r\n model_dir: str = None,\r\n):\r\n model.train()\r\n\r\n optimizer = torch.optim.Adam(params=model.parameters(), lr=learning_rate)\r\n\r\n os.makedirs(name=model_dir, exist_ok=True)\r\n\r\n if valid_data is None:\r\n eids = np.arange(len(train_data[\"label\"]))\r\n eids = np.random.permutation(eids)[:1000]\r\n edge, label = train_data[\"edge\"], train_data[\"label\"]\r\n label = label.to(\"cpu\").detach().numpy()\r\n pdb.set_trace()\r\n valid_data = dict(edge=edge[:, eids], label=label[eids])\r\n\r\n logger.info(f\"Training Started : n_epochs={n_epochs}\")\r\n best_auc, best_epoch = 0, -1\r\n\r\n for e in range(n_epochs):\r\n logger.info(\"Epoch: %s\", e)\r\n # TRAIN\r\n train_auc, train_acc, train_loss = train(train_data=train_data, model=model, optimizer=optimizer)\r\n \r\n # VALID\r\n auc, acc = validate(valid_data=valid_data, model=model)\r\n wandb.log(dict(train_loss_epoch=train_loss,\r\n train_acc_epoch=train_acc,\r\n train_auc_epoch=train_auc,\r\n valid_acc_epoch=acc,\r\n valid_auc_epoch=auc))\r\n\r\n if auc > best_auc:\r\n logger.info(\"Best model updated AUC from %.4f to %.4f\", best_auc, auc)\r\n best_auc, best_epoch = auc, e\r\n best_acc = acc\r\n torch.save(obj= {\"model\": model.state_dict(), \"epoch\": e + 1},\r\n f=os.path.join(model_dir, f\"lgcn_best_model.pt\"))\r\n torch.save(obj={\"model\": model.state_dict(), \"epoch\": e + 1},\r\n f=os.path.join(model_dir, f\"last_model.pt\"))\r\n logger.info(f\"Best Weight Confirmed : {best_epoch+1}'th epoch\")\r\n\r\n if args.sweep_run:\r\n wandb.log({\r\n 'val_auc': best_auc,\r\n 'val_acc': best_acc,\r\n })\r\n \r\n curr_dir = __file__[:__file__.rfind('/')+1]\r\n with open(curr_dir + '../sweep_best_auc.yaml') as file:\r\n output = yaml.load(file, Loader=yaml.FullLoader)\r\n file.close()\r\n \r\n if output[args.model.lower()]['best_auc'] < best_auc:\r\n output[args.model.lower()]['best_auc'] = float(best_auc)\r\n output[args.model.lower()]['parameter'] = dict(zip(dict(wandb.config).keys(),map(lambda x: x if type(x) == str else float(x) , dict(wandb.config).values())))\r\n \r\n with open(curr_dir + '../sweep_best_auc.yaml', 'w') as file:\r\n yaml.dump(output, file, default_flow_style=False)\r\n file.close()\r\n \r\n return model.get_embedding(train_data['edge']).detach().cpu().numpy()\r\n\r\n\r\n\r\n\r\ndef run_kfold(\r\n args,\r\n train_data: dict,\r\n n_node: int,\r\n device: str,\r\n n_epochs: int = 100,\r\n learning_rate: float = 0.01,\r\n model_dir: str = None,\r\n):\r\n \r\n folds_weights = []\r\n\r\n for i in range(args.n_folds):\r\n model = build(\r\n n_node=n_node,\r\n embedding_dim=args.hidden_dim,\r\n num_layers=args.n_layers,\r\n alpha=args.alpha\r\n )\r\n\r\n model = model.to(device)\r\n eids = np.arange(len(train_data[\"label\"]))\r\n eids = np.random.permutation(eids)[1000*i:1000*(i+1)]\r\n edge, label = train_data[\"edge\"], train_data[\"label\"]\r\n label = label.to(\"cpu\").detach().numpy()\r\n valid_data = dict(edge=edge[:, eids], label=label[eids])\r\n\r\n model.train()\r\n\r\n optimizer = torch.optim.Adam(params=model.parameters(), lr=learning_rate)\r\n\r\n os.makedirs(name=model_dir, exist_ok=True)\r\n\r\n logger.info(f\"Training Started : n_epochs={n_epochs}\")\r\n best_auc, best_epoch = 0, -1\r\n\r\n for e in range(n_epochs):\r\n logger.info(\"Epoch: %s\", e)\r\n # TRAIN\r\n train_auc, train_acc, train_loss = train(train_data=train_data, model=model, optimizer=optimizer)\r\n \r\n # VALID\r\n auc, acc = validate(valid_data=valid_data, model=model)\r\n wandb.log(dict(train_loss_epoch=train_loss,\r\n train_acc_epoch=train_acc,\r\n train_auc_epoch=train_auc,\r\n valid_acc_epoch=acc,\r\n valid_auc_epoch=auc))\r\n\r\n if auc > best_auc:\r\n logger.info(\"Best model updated AUC from %.4f to %.4f\", best_auc, auc)\r\n best_auc, best_epoch = auc, e\r\n best_acc = acc\r\n best_model = model.state_dict()\r\n \r\n folds_weights.append(best_model)\r\n logger.info(f\"Best Weight Confirmed : {best_epoch+1}'th epoch\")\r\n\r\n average_weights = {}\r\n for key in folds_weights[0].keys():\r\n average_weights[key] = torch.stack([fold[key].float() for fold in folds_weights], dim=0).mean(dim=0)\r\n for key in average_weights: average_weights[key].to(dtype = torch.int64)\r\n\r\n model = LightGCN(num_nodes=n_node)\r\n model.load_state_dict(average_weights)\r\n torch.save(obj= {\"model\": model.state_dict()},\r\n f=os.path.join(model_dir, f\"lgcn_best_model_kfold.pt\"))\r\n return \r\n\r\n\r\n\r\n\r\ndef train(model: nn.Module, train_data: dict, optimizer: torch.optim.Optimizer):\r\n pred = model(train_data[\"edge\"])\r\n loss = model.link_pred_loss(pred=pred, edge_label=train_data[\"label\"])\r\n \r\n prob = model.predict_link(edge_index=train_data[\"edge\"], prob=True)\r\n prob = prob.detach().cpu().numpy()\r\n\r\n label = train_data[\"label\"].cpu().numpy()\r\n acc = accuracy_score(y_true=label, y_pred=prob > 0.5)\r\n auc = roc_auc_score(y_true=label, y_score=prob)\r\n\r\n # backward\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n logger.info(\"TRAIN AUC : %.4f ACC : %.4f LOSS : %.4f\", auc, acc, loss.item())\r\n return auc, acc, loss\r\n\r\n\r\ndef validate(valid_data: dict, model: nn.Module):\r\n with torch.no_grad():\r\n prob = model.predict_link(edge_index=valid_data[\"edge\"], prob=True)\r\n prob = prob.detach().cpu().numpy()\r\n \r\n label = valid_data[\"label\"]\r\n acc = accuracy_score(y_true=label, y_pred=prob > 0.5)\r\n auc = roc_auc_score(y_true=label, y_score=prob)\r\n logger.info(\"VALID AUC : %.4f ACC : %.4f\", auc, acc)\r\n return auc, acc\r\n\r\n\r\ndef inference(model: nn.Module, data: dict, output_dir: str):\r\n model.eval()\r\n with torch.no_grad():\r\n pred = model.predict_link(edge_index=data[\"edge\"], prob=True)\r\n \r\n logger.info(\"Saving Result ...\")\r\n pred = pred.detach().cpu().numpy()\r\n os.makedirs(name=output_dir, exist_ok=True)\r\n write_path = os.path.join(output_dir, \"submission.csv\")\r\n pd.DataFrame({\"prediction\": pred}).to_csv(path_or_buf=write_path, index_label=\"id\")\r\n logger.info(\"Successfully saved submission as %s\", write_path)\r\n\r\n","repo_name":"boostcampaitech5/level2_dkt-recsys-02","sub_path":"code/lightgcn/lightgcn/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72184861386","text":"import numpy as np\nfrom numpy import linalg as LA\nimport tensorflow as tf\n# import matplotlib.pyplot as plt\nimport itertools\n\n\ndef nn_run(k_squared, encoder_init_weights, decoder_init_weights,\n\tlearning_rates, optimizers, encoder_activations, decoder_activations, init_weights_function, \n\tinit_bias_function, num_units_list, m, train_batch_size, mc_batch_size, num_epochs, x_stddev): \n\t'''\n\tk_squared: k_squared value for cost function\n\tencoder_init_weights: a list of initial weights for encoder. \n\tdecoder_init_weights: a list of initial weights for decoder. \n\tlearning_rates: list of two learning rates (for encoder and decoder)\n\toptimizers: list of two optimizers (for encoder and decoder)\n\tencoder_activations: a list of activation functions (variable length)\n\tdecoder_activations: a list of activation functions (variable length)\n\tinit_weights_function: Weight initialization function. \n\tinit_bias_function: Bias initialization function\n\tnum_units_list: list of unit numbers, of the form [m, ..., m] where m is dimension.\n\tm: Dimension of input/output \n\ttrain_batch_size: Number of samples in a training batch \n\tmc_batch_size: Number of samples in a Monte Carlo batch (for testing)\n\tnum_epochs: Number of epoch steps to take in training\n\tx_stddev: The standard deviation of the x0 input \n\t'''\n\tif not(k_squared): \n\t\tk_squared = 0.04\n\t\n\tg1 = tf.Graph()\n\n\tx0 = tf.placeholder(tf.float32, [None, 1])\n\tz = tf.placeholder(tf.float32, [None, 1])\n\t\n\tnum_encoder_layers = len(encoder_activations)\n\tnum_decoder_layers = len(decoder_activations)\n\n\t# xavier_init = tf.glorot_uniform_initializer()\n\n\t#declare encoder\n\tencoder_params = []\n\tfor i in range(num_encoder_layers): \n\t\tw_name = 'W' + str(i + 1)\n\t\tb_name = 'b' + str(i + 1)\n\t\tif encoder_init_weights: \n\t\t\tassert len(encoder_init_weights) == 2 * num_encoder_layers, 'Wrong number of initial weights!'\n\t\t\tinit_weight, init_bias = encoder_init_weights[2 * i], encoder_init_weights[1 + (2 * i)]\n\t\t\tencoder_params.append(tf.Variable(initial_value=init_weight, name=w_name))\n\t\t\tencoder_params.append(tf.Variable(initial_value=init_bias, name=b_name))\n\t\telse: \n\t\t\tfan_in, fan_out = num_units_list[i], num_units_list[i + 1]\n\t\t\tencoder_params.append(tf.Variable(init_weights_function([fan_in, fan_out]), name=w_name))\n\t\t\tencoder_params.append(tf.Variable(init_bias_function([fan_out]), name=b_name))\n\n\t#declare decoder\n\tdecoder_params = []\n\tfor j in range(num_decoder_layers): \n\t\ttotal_index = j + num_encoder_layers\n\t\tw_name = 'W' + str(total_index + 1)\n\t\tb_name = 'b' + str(total_index + 1)\n\t\tif decoder_init_weights: \n\t\t\tassert len(decoder_init_weights) == 2 * num_decoder_layers, 'Wrong number of initial weights!'\n\t\t\tinit_weight, init_bias = decoder_init_weights[2 * j], decoder_init_weights[1 + (2 * j)]\n\t\t\tdecoder_params.append(tf.Variable(initial_value=init_weight, name=w_name))\n\t\t\tdecoder_params.append(tf.Variable(initial_value=init_bias, name=b_name))\n\t\telse: \n\t\t\tfan_in, fan_out = num_units_list[total_index], num_units_list[total_index + 1]\n\t\t\tdecoder_params.append(tf.Variable(init_weights_function([fan_in, fan_out]), name=w_name))\n\t\t\tdecoder_params.append(tf.Variable(init_bias_function([fan_out]), name=b_name))\n\n\t#Encoder forward pass \n\tcurrent_hidden = x0 \n\tfor i in range(num_encoder_layers): \n\t\tcurrent_weight, current_bias = encoder_params[2 * i], encoder_params[1 + (2 * i)]\n\t\taffine_forward = tf.add(tf.matmul(current_hidden, current_weight), current_bias)\n\t\tcurrent_hidden = encoder_activations[i](affine_forward)\n\n\tu1 = current_hidden\n\tu1_cost = k_squared * tf.reduce_mean(tf.reduce_sum((u1)**2, axis=1))\n\n\tx1 = u1 + x0\n\tx1_noise = x1 + z\n\tcurrent_hidden = x1_noise\n\t#Decoder foward pass \n\tfor j in range(num_decoder_layers): \n\t\tcurrent_weight, current_bias = decoder_params[2 * j], decoder_params[1 + (2 * j)]\n\t\taffine_forward = tf.add(tf.matmul(current_hidden, current_weight), current_bias)\n\t\tcurrent_hidden = decoder_activations[i](affine_forward)\n\n\tu2 = current_hidden\n\tx2 = x1 - u2\n\tx2_cost = tf.reduce_mean(tf.reduce_sum((x2)**2, axis=1))\n\n\twits_cost = x2_cost + u1_cost\n\n\t# Define gradients and optimizers \n\tencoder_lr, decoder_lr = learning_rates[0], learning_rates[1]\n\n\tencoder_opt = optimizers[0](learning_rate = encoder_lr)\n\tdecoder_opt = optimizers[1](learning_rate = decoder_lr)\n\n\tgrads = tf.gradients(wits_cost, encoder_params + decoder_params)\n\tgrads1 = grads[:len(encoder_params)]\n\tgrads2 = grads[len(encoder_params):]\n\n\ttrain_op1 = encoder_opt.apply_gradients(zip(grads1, encoder_params))\n\ttrain_op2 = decoder_opt.apply_gradients(zip(grads2, decoder_params))\n\ttrain_op = tf.group(train_op1, train_op2)\n\n\t# Training here. \n\n\tmc_x_batch = np.random.normal(size=(mc_batch_size, m), scale = x_stddev)\n\tmc_z_batch = np.random.normal(size=(mc_batch_size, m), scale = 1.0)\n\tmc_losses = []\n\n\tepoch_step = int(num_epochs/20)\n\n\tprint('Beginning Training....')\n\tprint('Training Batch Size: {}, MC Batch Size: {}'.format(train_batch_size, mc_batch_size))\n\n\t#declare testing stuf\n\tnum_x0_points = 100\n\ttest_averaging = 50\n\tx0_test = np.linspace(-3 * x_stddev, 3 * x_stddev, num=num_x0_points)\n\tz_test = np.random.normal(scale=1, size=num_x0_points)\n\tu1_test, u2_test, y2_test = np.zeros((1, num_x0_points)), np.zeros((1, num_x0_points)), np.zeros((1, num_x0_points))\n\n\twith tf.Session() as sess: \n\t\tsess.run(tf.global_variables_initializer())\n\t\tfor epoch in range(num_epochs): \n\t\t\tx_batch = np.random.normal(size=(train_batch_size, m), scale = x_stddev)\n\t\t\tz_batch = np.random.normal(size=(train_batch_size, m), scale = 1.0)\n\n\t\t\t_, train_cost = sess.run([train_op, wits_cost], feed_dict = {x0: x_batch, z: z_batch})\n\n\t\t\n\t\t\tif epoch % epoch_step == 0: \n\t\t\t\tmc_cost = sess.run([wits_cost], feed_dict={x0: mc_x_batch, z: mc_z_batch})\n\t\t\t\tprint('Epoch {}, Cost {}, MC Cost: {}'.format(epoch, train_cost, mc_cost[0]))\n\t\tfinal_mc_cost = mc_cost[0]\n\t\tprint('Epoch {}, Cost {}, MC Cost: {}'.format(epoch, train_cost, final_mc_cost))\n\n\t\t\n\n\t\t# print('Beginning testing....')\n\t\t# for i in range(num_x0_points):\n\t\t# \tu1t, u2t, y2t = 0, 0, 0\n\n\t\t# \t#vignesh says: don't pass in y2 values\n\t\t# \tfor _ in range(test_averaging):\n\t\t# \t\tu1tmp, u2tmp, y2tmp, x1tmp = sess.run(\n\t\t# \t\t\t[u1, u2, x1_noise, x1],\n\t\t# \t\t\tfeed_dict={x0: x0_test[i].reshape((1, 1)),\n\t\t# \t\t\tz: np.array(np.random.normal(scale=1)).reshape((1, 1))}) #generate z on the fly.\n\t\t# \t\tu1t += u1tmp\n\t\t# \t\tu2t += u2tmp\n\t\t# \t\ty2t += y2tmp\n\n\t\t# \tu1_test[0, i] = u1t / test_averaging\n\t\t# \tu2_test[0, i] = u2t / test_averaging\n\t\t# \ty2_test[0, i] = y2t / test_averaging\n\n\treturn final_mc_cost\n\t\t\t\t\t\n\n\ndef cartesian_product(*arrays): \n return itertools.product(*arrays)\n\n\nif __name__ == \"__main__\":\n\tk_squared_vals = [0.04]\n\tencoder_init_weights_lists = [[]]\n\tdecoder_init_weights_lists = [[]]\n\tlearning_rates_lists = [[5e-4, 5e-4], [5e-4, 0]]\n\toptimizers_lists = [[tf.train.GradientDescentOptimizer, tf.train.GradientDescentOptimizer]]\n\tencoder_activations_lists = [[tf.nn.sigmoid, tf.identity], [tf.nn.tanh, tf.identity]]\n\tdecoder_activations_lists = [[tf.nn.sigmoid, tf.identity]]\n\tinit_weights_functions = [tf.glorot_normal_initializer()]\n\tinit_bias_functions = [tf.zeros_initializer()]\n\tnum_units_lists = [[1, 10, 1, 10, 1]]\n\tm_list = [1]\n\ttrain_batch_sizes = [1000]\n\tmc_batch_sizes = [1000]\n\tnum_epochs_list = [2000]\n\tx_stddev_list = [5]\n\t\n\t# train_net(200, 500, 100, 5)\n\n\tall_hyperparam_tuples = cartesian_product(k_squared_vals,\n\t\tencoder_init_weights_lists,\n\t\tdecoder_init_weights_lists,\n\t\tlearning_rates_lists,\n\t\toptimizers_lists,\n\t\tencoder_activations_lists,\n\t\tdecoder_activations_lists,\n\t\tinit_weights_functions,\n\t\tinit_bias_functions,\n\t\tnum_units_lists,\n\t\tm_list,\n\t\ttrain_batch_sizes,\n\t\tmc_batch_sizes,\n\t\tnum_epochs_list,\n\t\tx_stddev_list)\n\n\trun_num = 1\n\t# seed = 85\n\n\tgood_seeds = []\n\tgood_losses = []\n\tfor tup in all_hyperparam_tuples: \n\t\t#Unroll the huge tuple of hyperparameters.\n\t\tk_squared, encoder_init_weights, decoder_init_weights, learning_rates, optimizers, encoder_activations, decoder_activations, init_weights_function, init_bias_function, num_units_list, m, train_batch_size, mc_batch_size, num_epochs, x_stddev = tup\n\n\t\t#Seed for reproducibility. Change seed every time. \n\t\t# do we need a seed for the random seed generator? whoa...\n\t\tseed = np.random.randint(low=5, high=200)\n\t\tnp.random.seed(seed)\n\t\ttf.set_random_seed(seed)\n\n\t\tprint('RUN NUMBER {}'.format(run_num))\n\t\tprint('Numpy/TF random seed {}'.format(seed)) #prints the seed here. \n\t\tprint('HYPERPARAMETERS ARE: ')\n\t\tprint('k_squared, encoder_init_weights, decoder_init_weights,' +\n\t\t\t'learning_rates, optimizers, encoder_activations, decoder_activations, init_weights_function, ' + \n\t\t\t'init_bias_function, num_units_list, m, train_batch_size, mc_batch_size, num_epochs, x_stddev')\n\t\tprint(tup)\n\t\tprint('-----------------------------------------------\\n')\n\t\tfinal_cost = nn_run(k_squared, encoder_init_weights, decoder_init_weights,\n\t\t\tlearning_rates, optimizers, encoder_activations, decoder_activations, init_weights_function, \n\t\t\tinit_bias_function, num_units_list, m, train_batch_size, mc_batch_size, num_epochs, x_stddev)\n\t\tprint('-----------------------------------------------\\n')\n\t\trun_num += 1","repo_name":"laurabrink13/Witsenhausen-Counterexample-with-NNs","sub_path":"simple_1d_script.py","file_name":"simple_1d_script.py","file_ext":"py","file_size_in_byte":9105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30010282311","text":"import os\nimport pytest\nimport tempfile\n\nfrom xcalar.compute.util.cluster import DevCluster\n\nfrom xcalar.external.LegacyApi.XcalarApi import XcalarApiStatusException\nfrom xcalar.external.exceptions import XDPException\nfrom xcalar.external.client import Client\n\nfrom xcalar.compute.localtypes.Connectors_pb2 import RemoveFileRequest\nfrom xcalar.compute.localtypes.Workbook_pb2 import WorkbookScope\n\npytestmark = pytest.mark.last(\n \"Execute this test as late as possible since it manages its own cluster\")\n\n# Don't throw on failure to find env var.\nXcalarQaDatasetPath = os.environ.get('XcalarQaDatasetPath', None)\n\n# Target tests\nshared_qa_target_name = \"QA shared target\"\nunsharedSymmTargetName = \"QA unshared symmetric\"\n\ntargets = [(shared_qa_target_name, \"shared\", {\n \"mountpoint\": XcalarQaDatasetPath\n}),\n (unsharedSymmTargetName, \"sharednothingsymm\", {\n \"mountpoint\": XcalarQaDatasetPath\n })]\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef one_node_cluster():\n with DevCluster(num_nodes=1):\n yield\n\n\n@pytest.fixture(scope=\"module\")\ndef client(one_node_cluster):\n client = Client()\n for target_name, target_type, target_args in targets:\n client.add_data_target(target_name, target_type, target_args)\n\n yield client\n\n for target_name, target_type, target_args in targets:\n client.get_data_target(target_name).delete()\n\n\n@pytest.mark.parametrize(\"target_name\", [\n (shared_qa_target_name),\n (unsharedSymmTargetName),\n])\ndef test_delete_file(client, target_name):\n target = client.get_data_target(target_name)\n temp_dir_prefix = os.path.join(target.params[\"mountpoint\"], \"tmp\")\n with tempfile.TemporaryDirectory(prefix=temp_dir_prefix) as outdir:\n # write out a file to delete using delete api\n actual_path = os.path.join(outdir, \"test_file\")\n with open(actual_path, 'w') as fp:\n fp.write(\"contents\")\n\n # we pass paths to list_files and remove apis the relative path from mountpoint\n relative_path = os.path.relpath(actual_path,\n target.params[\"mountpoint\"])\n resp = target.list_files(relative_path)\n files = resp['files']\n assert len(files) == 1\n delete_files(client, relative_path, target_name)\n with pytest.raises(XDPException) as ex:\n target.list_files(relative_path)\n\n\n@pytest.mark.parametrize(\"target_name\", [\n (shared_qa_target_name),\n (unsharedSymmTargetName),\n])\ndef test_delete_dir(client, target_name):\n target = client.get_data_target(target_name)\n temp_dir_prefix = os.path.join(target.params[\"mountpoint\"], \"tmp\")\n with tempfile.TemporaryDirectory(prefix=temp_dir_prefix) as outdir:\n # write out a directory and two files in it, to delete using delete api\n actual_path = os.path.join(outdir, \"test_dir\")\n os.makedirs(actual_path)\n assert os.path.isdir(actual_path)\n file1 = os.path.join(actual_path, \"file1\")\n file2 = os.path.join(actual_path, \"file2\")\n with open(file1, 'w') as fp1, open(file2, 'w') as fp2:\n fp1.write(\"contents1\")\n fp2.write(\"contents2\")\n\n # we pass paths to list_files and remove apis the relative path from mountpoint\n relative_path = os.path.relpath(actual_path,\n target.params[\"mountpoint\"])\n resp = target.list_files(relative_path)\n files = resp['files']\n assert len(files) == 2 # two files written in the directory\n assert all([not file[\"isDir\"] for file in files])\n delete_files(client, relative_path, target_name)\n with pytest.raises(XDPException) as ex:\n target.list_files(relative_path)\n\n\ndef delete_files(client, path, target_name):\n scope = WorkbookScope()\n scope.workbook.name.username = client.username\n\n req = RemoveFileRequest()\n req.path = path\n req.target_name = target_name\n req.scope.CopyFrom(scope)\n\n client._connectors_service.removeFile(req)\n","repo_name":"varlogtim/xcalar","sub_path":"src/bin/tests/pyTestNew/io/test_remove.py","file_name":"test_remove.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74971333386","text":"#Faça um algoritmo para ler um vetor de 30 números. Após isto, ler mais um número qualquer, \r\n #calcular e escrever quantas vezes esse número aparece no vetor.\r\n\r\n\r\nnumeros = [10,20,30,40,50,60,70,80,90,77,1,2,3,4,5,6,77,8,9,10,11,12,13,14,15,16,17,18,19,77]\r\nnumero = int(input('digite um numero: '))\r\ncount = 0\r\nfor num in numeros:\r\n if num == numero:\r\n count += 1\r\nprint(f'{numero} apareceu {count} vezes')","repo_name":"JeiAlbuquerque089/uniesp_introducao_promacao","sub_path":"ted 02.py","file_name":"ted 02.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39420986554","text":"# https://leetcode.com/problems/merge-two-sorted-lists/submissions/\r\nfrom typing import Optional\r\n\r\nfrom tester import Tester\r\nfrom ListNode import ListNode, list_to_nodes as ltn\r\n\r\n\r\nclass Solution:\r\n def mergeTwoLists(\r\n self, l1: Optional[ListNode], l2: Optional[ListNode]\r\n ) -> Optional[ListNode]:\r\n head = ListNode()\r\n tail = head\r\n while l1 and l2:\r\n if l1.val > l2.val:\r\n tail.next = l2\r\n l2 = l2.next\r\n else:\r\n tail.next = l1\r\n l1 = l1.next\r\n tail = tail.next\r\n tail.next = l1 or l2\r\n return head.next\r\n\r\n\r\nt = Tester(Solution())\r\n\r\nt.test(ltn(\"[1,1,2,3,4,4]\"), ltn(\"[1,2,4]\"), ltn(\"[1,3,4]\"))\r\n\r\nt.report()\r\n","repo_name":"thinhntr/cp","sub_path":"leetcode/Merge Two Sorted Lists.py","file_name":"Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23382654931","text":"import streamlit as st\nfrom numpy import load\nfrom numpy import expand_dims\nfrom PIL import Image, ImageDraw, ImageFont\nfrom streamlit_drawable_canvas import st_canvas\nimport numpy as np\nimport os\nimport requests\nimport io\n\n# first option: upload a paint file:\nst.markdown(\"

🏠 🏠 🏠

\",\n unsafe_allow_html=True)\nst.markdown(\"

Generate facades images using cGAN

\",\n unsafe_allow_html=True)\n\n# navigation\nst.sidebar.write(\"# Navigation\")\nnav = st.sidebar.radio(\"Go to:\", [\"file\", \"drawing\"])\n\nif nav == \"file\":\n\n st.markdown(\n \"

Choose any paint image and get the corresponding facade :

\",\n unsafe_allow_html=True)\n\n st.set_option(\"deprecation.showfileUploaderEncoding\", False)\n uploaded_file = st.file_uploader(\"Choose an image\", type=[\"png\",\"jpeg\", \"jpg\"])\n\n col1, col2 = st.columns(2)\n\n if uploaded_file is not None:\n\n # uploaded_file.__dict__\n # uploaded_file\n\n in_image = Image.open(uploaded_file)\n #st.image(image, caption=\"Uploaded image\", use_column_width=True)\n\n col1.header(\"Input Image\")\n col1.image(in_image, use_column_width=True)\n\n # convert image to bytes\n img_byte_arr = io.BytesIO()\n in_image.save(img_byte_arr, format='PNG')\n img_byte_arr = img_byte_arr.getvalue()\n\n # with open(\"image.jpg\", \"wb\") as f:\n # f.write(img_byte_arr)\n\n # api call\n if st.button('generate'):\n url = \"https://pix2pix-dttdfzxwga-ew.a.run.app/predict\"\n files = {\"file\": img_byte_arr}\n\n response = requests.post(url, files=files)\n\n print(response)\n\n response\n\n if response.status_code == 200:\n resp = response.content\n\n with open(\"sample_image.png\", \"wb\") as file:\n file.write(response.content)\n\n image = Image.open(io.BytesIO(response.content))\n col2.header(\"Generated Image\")\n #st.image(image, caption='prediction', use_column_width=False)\n col2.image(image,use_column_width=True)\n\n else:\n \"😬 api error 🤖\"\n\nelif nav == \"drawing\":\n\n #second option: draw a paint\n st.markdown(\n \"

Draw an awesome one yourself and generate its facade :

\",\n unsafe_allow_html=True)\n\n element = st.sidebar.radio(\"Select facade element :\", [\n \"Background\",\n \"Wall\",\n \"Door\",\n \"Window\",\n \"Window still\",\n \"Window head\",\n \"Shutter\",\n \"Balcony\",\n \"Trim\",\n \"Cornice\",\n \"Column\",\n \"Entrance\"])\n\n element_color = {\n \"Background\": \"rgba(1, 6, 216, 1)\",\n \"Wall\": \"rgba(13, 61, 250, 1)\",\n \"Door\": \"rgba(165, 4, 3, 1)\",\n \"Window\": \"rgba(4, 117, 254, 1)\",\n \"Window still\": \"rgba(104, 248, 152, 1)\",\n \"Window head\": \"rgba(26, 253, 221, 1)\",\n \"Shutter\": \"rgba(238, 237, 40, 1)\",\n \"Balcony\": \"rgba(184, 253, 56, 1)\",\n \"Trim\": \"rgba(254, 146, 5, 1)\",\n \"Cornice\": \"rgba(253, 68, 3, 1)\",\n \"Column\": \"rgba(246, 2, 4, 1)\",\n \"Entrance\": \"rgba(11, 201, 253, 1)\"\n }\n\n color = element_color.get(element)\n\n\n tool = st.sidebar.selectbox(\"Drawing tool:\",\n (\"rect\", \"line\", \"circle\", \"freedraw\",\n \"transform\"))\n\n\n # realtime_update = st.sidebar.checkbox(\"Update in realtime\", True)\n canvas_result = st_canvas(\n fill_color=color,\n stroke_width=1,\n stroke_color=color,\n background_color=\"rgba(0, 0, 0, 0)\",\n background_image=None,\n update_streamlit=True, # realtime_update\n width=512,\n height=512,\n drawing_mode=tool,\n key=\"canvas\")\n\n col1, col2 = st.columns(2)\n\n def save_image():\n\n dr_img = Image.fromarray(canvas_result.image_data.astype(\"uint8\"), 'RGBA')\n # dr_img.save(\"drawing.png\")\n st.session_state.dr_image = dr_img\n\n col1.header(\"Input Image\")\n col1.image(dr_img, use_column_width=True)\n\n if st.button(\"save\"):\n\n save_image()\n\n # api call\n def call_api():\n\n url = \"https://pix2pix-dttdfzxwga-ew.a.run.app/predict\"\n\n dr_img = st.session_state.dr_image\n img_byte_arr = io.BytesIO()\n dr_img.save(img_byte_arr, format='PNG')\n the_bytes = img_byte_arr.getvalue()\n\n response = requests.post(url, files= {\"file\": the_bytes})\n\n print(response)\n\n response\n\n if response.status_code == 200:\n resp = response.content\n\n #with open(\"sample_image.png\", \"wb\") as file:\n #file.write(response.content)\n\n gen_img = Image.open(io.BytesIO(response.content))\n col2.header(\"Generated Image\")\n #st.image(image, caption='prediction', use_column_width=False)\n col2.image(gen_img, use_column_width=True)\n\n else:\n \"😬 api error 🤖\"\n\n if st.button('generate'):\n save_image()\n call_api()\n","repo_name":"Oumnia-Sdn/pix2pixwebapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39483381017","text":"\nname = \"Caroline\"\ngreeting= f\"hello , {name}\"\n\nprint(greeting)\n\nname=\"Oner\"\ngreeting=\"Hello, {}\"\nwithName = greeting.format(name) \n#format permet de definir la variable a rajouter a l'accolade\nwithName1 =greeting.format(\"Ali et Nimet\")\nprint(withName)\nprint(withName1)\n\nphrase=\"Salut, {}. aujourd'hui nous sommes {} \"\ninserer = phrase.format(\"Ebru\", \"Samedi\") \n\nprint(inserer)","repo_name":"OnerBerk/b-f-Python-flask","sub_path":"2_string-formatting/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30954991568","text":"from typing import Dict\nimport streamlit as st\nfrom datetime import datetime, timedelta, timezone\nfrom hydralit.loading_app import LoadingApp\nimport hydralit_components as hc\nfrom hydralit.wrapper_class import Templateapp\n\n\nclass HydraApp(object):\n \"\"\"\n Class to create a host application for combining multiple streamlit applications.\n \"\"\"\n\n def __init__(self,\n title='Hydralit Apps',\n nav_container=None,\n nav_horizontal=True,\n layout='wide',\n favicon=\"🧊\",\n use_navbar=True,\n navbar_theme=None,\n navbar_sticky=True,\n navbar_mode='pinned',\n use_loader=True,\n use_cookie_cache=True,\n sidebar_state='auto',\n navbar_animation=True,\n allow_url_nav=False,\n hide_streamlit_markers=False,\n use_banner_images=None,\n banner_spacing=None,\n clear_cross_app_sessions=True,\n session_params=None):\n \"\"\"\n A class to create an Multi-app Streamlit application. This class will be the host application for multiple applications that are added after instancing.\n The secret saurce to making the different apps work together comes from the use of a global session store that is shared with any HydraHeadApp that is added to the parent HydraApp.\n The session store is created when this class is instanced, by default the store contains the following variables that are used across the child apps:\n - previous_app\n = selected_app\n - preserve_state\n - allow_access\n - current_user\n More global values can be added by passing in a Dict when instancing the class, the dict needs to provide a name and default value that will be added to the global session store.\n Parameters\n -----------\n title: str, 'Streamlit MultiApp'\n The title of the parent app, this name will be used as the application (web tab) name.\n nav_container: Streamlit.container, None\n A container in which to populate the navigation buttons for each attached HydraHeadApp. Default will be a horizontal aligned banner style over the child applications. If the Streamlit sidebar is the target container, the navigation items will appear at the top and the default state of the sidebar will be expanded.\n nav_horizontal: bool, True\n To align the navigation buttons horizonally within the navigation container, if False, the items will be aligned vertically.\n layout: str, 'wide'\n The layout format to be used for all app pages (HydraHeadApps), same as the layout variable used in `set_page_config `.\n favicon: str\n An inline favicon image to be used as the application favicon.\n allow_url_nav: bool False\n Enable navigation using url parameters, this allows for bookmarking and using internal links for navigation\n use_navbar: bool, False\n Use the Hydralit Navbar component or internal Streamlit components to create the nav menu. Currently Hydralit Navbar doesn't support dropdown menus.\n navbar_theme: Dict, None\n Override the Hydralit Navbar theme, you can overrider only the part you wish or the entire theme by only providing details of the changes.\n - txc_inactive: Inactive Menu Item Text color\n - menu_background: Menu Background Color\n - txc_active: Active Menu Item Text Color\n - option_active: Active Menu Item Color \n example, navbar_theme = {'txc_inactive': '#FFFFFF','menu_background':'red','txc_active':'yellow','option_active':'blue'}\n navbar_sticky: bool, True\n Set navbar to be sticky and fixed to the top of the window.\n use_loader: bool, True\n Set if to use the app loader with auto transition spinners or load directly.\n navbar_animation: bool, False\n Set navbar is menu transitions should be animated.\n sidebar_state: str, 'auto'\n The starting state of the sidebase, same as variable used in `set_page_config `.\n hide_streamlit_markers: bool, False\n A flag to hide the default Streamlit menu hamburger button and the footer watermark.\n use_banner_images: str or Array, None\n A path to the image file to use as a banner above the menu or an array of paths to use multiple images spaced using the rations from the banner_spacing parameter.\n banner_spacing: Array, None\n An array to specify the alignment of the banner images, this is the same as the array spec used by Streamlit Columns, if you want centered with 20% padding either side -> banner_spacing =[20,60,20]\n clear_cross_app_sessions: bool, True\n A flag to indicate if the local session store values within individual apps should be cleared when moving to another app, if set to False, when loading sidebar controls, will be a difference between expected and selected.\n session_params: Dict\n A Dict of parameter name and default values that will be added to the global session store, these parameters will be available to all child applications and they can get/set values from the store during execution.\n\n \"\"\"\n\n self._apps = {}\n self._nav_pointers = {}\n self._navbar_pointers = {}\n self._login_app = None\n self._unsecure_app = None\n self._home_app = None\n self._home_label = None\n self._home_id = 'Home'\n self._complex_nav = None\n self._navbar_mode = navbar_mode\n self._navbar_active_index = 0\n self._allow_url_nav = allow_url_nav\n self._navbar_animation = navbar_animation\n self._navbar_sticky = navbar_sticky\n self._nav_item_count = 0\n self._use_navbar = use_navbar\n self._navbar_theme = navbar_theme\n self._banners = use_banner_images\n self._banner_spacing = banner_spacing\n self._hide_streamlit_markers = hide_streamlit_markers\n self._loader_app = LoadingApp()\n self._user_loader = use_loader\n self._use_cookie_cache = use_cookie_cache\n self._cookie_manager = None\n self._logout_label = None\n self._logout_id = 'Logout'\n self._logout_callback = None\n self._login_callback = None\n self._session_attrs = {}\n self._call_queue = []\n self._other_nav = None\n self._guest_name = 'guest'\n self._guest_access = 1\n self._hydralit_url_hash = 'hYDRALIT|-HaShing==seCr8t'\n self._no_access_level = 0\n\n self._user_session_params = session_params\n\n try:\n st.set_page_config(page_title=title, page_icon=favicon,\n layout=layout, initial_sidebar_state=sidebar_state,)\n except:\n pass\n\n self._nav_horizontal = nav_horizontal\n\n if self._banners is not None:\n self._banner_container = st.container()\n\n if nav_container is None:\n self._nav_container = st.container()\n else:\n # hack to stop the beta containers from running set_page_config before HydraApp gets a chance to.\n # if we have a beta_columns container, the instance is delayed until the run() method is called, beta components, who knew!\n if nav_container.__name__ in ['container']:\n self._nav_container = nav_container()\n else:\n self._nav_container = nav_container\n\n self.cross_session_clear = clear_cross_app_sessions\n\n if clear_cross_app_sessions:\n preserve_state = 0\n else:\n preserve_state = 1\n\n self._session_attrs = {'previous_app': None, 'selected_app': None, 'other_nav_app': None,\n 'preserve_state': preserve_state, 'allow_access': self._no_access_level, 'logged_in': False, 'access_hash': None}\n self.session_state = st.session_state\n\n if isinstance(self._user_session_params, Dict):\n self._session_attrs |= self._user_session_params\n\n for key, item in self._session_attrs.items():\n if not hasattr(self.session_state, key):\n self.session_state[key] = item\n\n\n # def _encode_hyauth(self):\n # user_access_level, username = self.check_access()\n # payload = {\"exp\": datetime.now(timezone.utc) + timedelta(days=1), \"userid\": username,\"user_level\":user_access_level}\n # return jwt.encode(payload, self._hydralit_url_hash, algorithm=\"HS256\")\n\n # def _decode_hyauth(self,token):\n # return jwt.decode(token, self._hydralit_url_hash, algorithms=[\"HS256\"])\n\n def add_loader_app(self, loader_app):\n \"\"\"\n To improve the transition between HydraHeadApps, a loader app is used to quickly clear the window during loading, you can supply a custom loader app, if your include an app that loads a long time to initalise, that is when this app will be seen by the user.\n NOTE: make sure any items displayed are removed once the target app loading is complete, or the items from this app will remain on top of the target app display.\n Parameters\n ------------\n loader_app: HydraHeadApp:`~Hydralit.HydraHeadApp`\n The loader app, this app must implement a modified run method that will receive the target app to be loaded, within the loader run method, the run() method of the target app must be called, or nothing will happen and it will stay in the loader app.\n \"\"\"\n\n if loader_app:\n self._loader_app = loader_app\n self._user_loader = True\n else:\n self._loader_app = None\n self._user_loader = False\n\n def add_app(self, title, app, icon=None, is_login=False, is_home=False, logout_label=None, is_unsecure=False):\n \"\"\"\n Adds a new application to this HydraApp\n\n Parameters\n ----------\n title: str\n The title of the app. This is the name that will appear on the menu item for this app.\n app: :HydraHeadApp:`~Hydralit.HydraHeadApp`\n The app class representing the app to include, it must implement inherit from HydraHeadApp classmethod.\n icon: str\n The icon to use on the navigation button, this will be appended to the title to be used on the navigation control.\n is_login: bool, False\n Is this app used to login to the family of apps, this app will provide request response to gateway access to the other apps within the HydraApp.\n is_home: bool, False\n Is this the first 'page' that will be loaded, if a login app is provided, this is the page that will be kicked to upon successful login.\n is_unsecure: bool, False\n An app that can be run other than the login if using security, this is typically a sign-up app that can be run and then kick back to the login.\n \"\"\"\n\n # don't add special apps to list\n if self._use_navbar and not is_login and not is_home:\n self._navbar_pointers[title] = [title, icon]\n\n # if icon is None and not is_login and not is_home:\n # self._nav_pointers[title] = title\n # else:\n # self._nav_pointers[title] = '{} {}'.format(icon,title)\n\n if is_unsecure:\n self._unsecure_app = app\n\n if is_login:\n self._login_app = app\n self._logout_label = [title, icon]\n\n elif is_home:\n self._home_app = app\n self._home_label = [title, icon]\n else:\n self._apps[title] = app\n\n self._nav_item_count = int(\n self._login_app is not None) + len(self._apps.keys())\n app.assign_session(self.session_state, self)\n\n def _run_selected(self):\n try:\n if self.session_state.selected_app is None:\n self.session_state.other_nav_app = None\n self.session_state.previous_app = None\n self.session_state.selected_app = self._home_id\n\n # can disable loader\n if self._user_loader:\n self._loader_app.run(self._home_app)\n else:\n self._home_app.run()\n\n # st.experimental_set_query_params(selected=self._home_app)\n else:\n\n if self.session_state.other_nav_app is not None:\n self.session_state.previous_app = self.session_state.selected_app\n self.session_state.selected_app = self.session_state.other_nav_app\n self.session_state.other_nav_app = None\n\n if self.session_state.selected_app == self._home_id:\n if self._user_loader:\n self._loader_app.run(self._home_app)\n else:\n self._home_app.run()\n else:\n if self._user_loader:\n self._loader_app.run(\n self._apps[self.session_state.selected_app])\n else:\n self._apps[self.session_state.selected_app].run()\n else:\n if self.session_state.selected_app == self._home_id:\n if self._user_loader:\n self._loader_app.run(self._home_app)\n else:\n self._home_app.run()\n else:\n if self._user_loader:\n self._loader_app.run(\n self._apps[self.session_state.selected_app])\n else:\n self._apps[self.session_state.selected_app].run()\n # st.experimental_set_query_params(selected=self.session_state.selected_app)\n\n except Exception as e:\n st.error(\n '😭 Error triggered from app: **{}**'.format(self.session_state.selected_app))\n st.error('Details: {}'.format(e))\n\n def _clear_session_values(self):\n for key in st.session_state:\n del st.session_state[key]\n\n def set_guest(self, guest_name):\n \"\"\"\n Set the name to be used for guest access.\n Parameters\n -----------\n guest_name: str\n The value to use when allowing guest logins.\n \"\"\"\n\n if guest_name is not None:\n self._guest_name = guest_name\n\n def set_noaccess_level(self, no_access_level: int):\n \"\"\"\n Set the access level integer value to be used to indicate no access, default is zero.\n Parameters\n -----------\n no_access_level: int\n The value to use to block access, all other values will have some level of access\n \"\"\"\n\n if no_access_level is not None:\n self._no_access_level = int(no_access_level)\n\n def set_access(self, allow_access=0, access_user='', cache_access=False):\n \"\"\"\n Set the access permission and the assigned username for that access during the current session.\n Parameters\n -----------\n allow_access: int, 0\n Value indicating if access has been granted, can be used to create levels of permission.\n access_user: str, None\n The username the access has been granted to for this session.\n cache_access: bool, False\n Save these access details to a browser cookie so the user will auto login when they visit next time.\n \"\"\"\n\n # Set the global access flag\n self.session_state.allow_access = allow_access\n\n # Also, who are we letting in..\n self.session_state.current_user = access_user\n\n def check_access(self):\n \"\"\"\n Check the access permission and the assigned user for the running session.\n Returns\n ---------\n tuple: access_level, username\n \"\"\"\n username = None\n\n if hasattr(self.session_state, 'current_user'):\n username = str(self.session_state.current_user)\n\n return int(self.session_state.allow_access), username\n\n def get_nav_transition(self):\n \"\"\"\n Check the previous and current app names the user has navigated between\n Returns\n ---------\n tuple: previous_app, current_app\n \"\"\"\n\n return str(self.session_state.previous_app), str(self.session_state.selected_app)\n\n def get_user_session_params(self):\n \"\"\"\n Return a dictionary of the keys and current values of the user defined session parameters.\n Returns\n ---------\n dict\n \"\"\"\n user_session_params = {}\n\n if self._user_session_params is not None:\n for k in self._user_session_params.keys():\n if hasattr(self.session_state, k):\n user_session_params[k] = getattr(self.session_state, k)\n\n return user_session_params\n\n def _do_logout(self):\n self.session_state.allow_access = self._no_access_level\n self._logged_in = False\n # self._delete_cookie_cache()\n if callable(self._logout_callback):\n self._logout_callback()\n\n st.experimental_rerun()\n\n def _run_navbar(self, menu_data):\n\n if hasattr(hc, '__version__'):\n\n if hc.__version__ >= 104:\n login_nav = None\n home_nav = None\n\n if self._login_app:\n login_nav = {\n 'id': self._logout_id, 'label': self._logout_label[0], 'icon': self._logout_label[1], 'ttip': 'Logout'}\n\n if self._home_app:\n home_nav = {\n 'id': self._home_id, 'label': self._home_label[0], 'icon': self._home_label[1], 'ttip': 'Home'}\n\n self.session_state.selected_app = hc.nav_bar(menu_definition=menu_data, key=\"mainHydralitMenuComplex\", home_name=home_nav, override_theme=self._navbar_theme,\n login_name=login_nav, use_animation=self._navbar_animation, hide_streamlit_markers=self._hide_streamlit_markers)\n else:\n self.session_state.selected_app = hc.nav_bar(menu_definition=menu_data, key=\"mainHydralitMenuComplex\",\n home_name=self._home_app, override_theme=self._navbar_theme, login_name=self._logout_label)\n\n # if nav_selected is not None:\n # if nav_selected != self.session_state.previous_app and self.session_state.selected_app != nav_selected:\n # self.session_state.selected_app = nav_selected\n\n if self.cross_session_clear and self.session_state.preserve_state:\n self._clear_session_values()\n\n def _build_nav_menu(self):\n\n if self._complex_nav is None:\n number_of_sections = self._nav_item_count\n else:\n number_of_sections = int(\n self._login_app is not None) + len(self._complex_nav.keys())\n\n if self._nav_horizontal:\n if hasattr(self._nav_container, 'columns'):\n nav_slots = self._nav_container.columns(number_of_sections)\n elif self._nav_container.__name__ in ['columns']:\n nav_slots = self._nav_container(number_of_sections)\n else:\n nav_slots = self._nav_container\n else:\n if self._nav_container.__name__ in ['columns']:\n # columns within columns currently not supported\n nav_slots = st\n else:\n nav_slots = self._nav_container\n\n # actually build the menu\n if self._complex_nav is None:\n if self._use_navbar:\n menu_data = [{'label': self._navbar_pointers[app_name][0], 'id':app_name,\n 'icon': self._navbar_pointers[app_name][1]} for app_name in self._apps.keys()]\n\n # Add logout button and kick to login action\n if self._login_app is not None:\n # if self.session_state.current_user is not None:\n # self._logout_label = '{} : {}'.format(self.session_state.current_user.capitalize(),self._logout_label)\n\n with self._nav_container:\n self._run_navbar(menu_data)\n\n # user clicked logout\n if self.session_state.selected_app == self._logout_label:\n self._do_logout()\n else:\n with self._nav_container:\n self._run_navbar(menu_data)\n else:\n for i, app_name in enumerate(self._apps.keys()):\n if self._nav_horizontal:\n nav_section_root = nav_slots[i]\n else:\n nav_section_root = nav_slots\n\n if nav_section_root.button(label=self._nav_pointers[app_name]):\n self.session_state.previous_app = self.session_state.selected_app\n self.session_state.selected_app = app_name\n\n if self.cross_session_clear and self.session_state.previous_app != self.session_state.selected_app and not self.session_state.preserve_state:\n self._clear_session_values()\n\n # Add logout button and kick to login action\n if self._login_app is not None:\n # if self.session_state.current_user is not None:\n # self._logout_label = '{} : {}'.format(self.session_state.current_user.capitalize(),self._logout_label)\n\n if self._nav_horizontal:\n if nav_slots[-1].button(label=self._logout_label):\n self._do_logout()\n else:\n if nav_slots.button(label=self._logout_label):\n self._do_logout()\n else:\n if self._use_navbar:\n menu_data = []\n for i, nav_section_name in enumerate(self._complex_nav.keys()):\n menu_item = None\n if nav_section_name not in [self._home_id, self._logout_id]:\n if len(self._complex_nav[nav_section_name]) == 1:\n # if (self._complex_nav[nav_section_name][0] != self._home_app and self._complex_nav[nav_section_name][0] != self._logout_label):\n menu_item = {'label': self._navbar_pointers[self._complex_nav[nav_section_name][0]][0], 'id': self._complex_nav[\n nav_section_name][0], 'icon': self._navbar_pointers[self._complex_nav[nav_section_name][0]][1]}\n else:\n submenu_items = []\n for nav_item in self._complex_nav[nav_section_name]:\n # if (nav_item != self._home_app and nav_item != self._logout_label):\n menu_item = {\n 'label': self._navbar_pointers[nav_item][0], 'id': nav_item, 'icon': self._navbar_pointers[nav_item][1]}\n submenu_items.append(menu_item)\n\n if len(submenu_items) > 0:\n menu_item = {\n 'label': nav_section_name, 'id': nav_section_name, 'submenu': submenu_items}\n\n if menu_item is not None:\n menu_data.append(menu_item)\n\n # Add logout button and kick to login action\n if self._login_app is not None:\n # if self.session_state.current_user is not None:\n # self._logout_label = '{} : {}'.format(self.session_state.current_user.capitalize(),self._logout_label)\n\n with self._nav_container:\n self._run_navbar(menu_data)\n\n # user clicked logout\n if self.session_state.selected_app == self._logout_id:\n self._do_logout()\n else:\n #self.session_state.previous_app = self.session_state.selected_app\n with self._nav_container:\n self._run_navbar(menu_data)\n\n else:\n\n for i, nav_section_name in enumerate(self._complex_nav.keys()):\n if nav_section_name not in [self._home_id, self._logout_id]:\n if self._nav_horizontal:\n nav_section_root = nav_slots[i]\n else:\n nav_section_root = nav_slots\n\n if len(self._complex_nav[nav_section_name]) == 1:\n nav_section = nav_section_root.container()\n else:\n nav_section = nav_section_root.expander(\n label=nav_section_name, expanded=False)\n\n for nav_item in self._complex_nav[nav_section_name]:\n if nav_section.button(label=self._nav_pointers[nav_item]):\n self.session_state.previous_app = self.session_state.selected_app\n self.session_state.selected_app = nav_item\n\n if self.cross_session_clear and self.session_state.previous_app != self.session_state.selected_app and not self.session_state.preserve_state:\n self._clear_session_values()\n\n # Add logout button and kick to login action\n if self._login_app is not None:\n # if self.session_state.current_user is not None:\n # self._logout_label = '{} : {}'.format(self.session_state.current_user.capitalize(),self._logout_label)\n\n if self._nav_horizontal:\n if nav_slots[-1].button(label=self._logout_label[0]):\n self._do_logout()\n else:\n if nav_slots.button(label=self._logout_label[0]):\n self._do_logout()\n\n def _do_url_params(self):\n if self._allow_url_nav:\n\n query_params = st.experimental_get_query_params()\n if 'selected' in query_params:\n # and (query_params['selected'])[0] != self.session_state.previous_app:\n if (query_params['selected'])[0] != 'None' and (query_params['selected'])[0] != self.session_state.selected_app:\n self.session_state.other_nav_app = (\n query_params['selected'])[0]\n\n def enable_guest_access(self, guest_access_level=1, guest_username='guest'):\n \"\"\"\n This method will auto login a guest user when the app is secured with a login app, this will allow fora guest user to by-pass the login app and gain access to the other apps that the assigned access level will allow.\n\n ------------\n guest_access_level: int, 1\n Set the access level to assign to an auto logged in guest user.\n guest_username: str, guest\n Set the username to assign to an auto logged in guest user.\n \"\"\"\n\n user_access_level, username = self.check_access()\n if user_access_level == 0 and username is None:\n self.set_access(guest_access_level, guest_username)\n\n # def get_cookie_manager(self):\n # if self._use_cookie_cache and self._cookie_manager is not None:\n # return self._cookie_manager\n # else:\n # return None\n\n # def _delete_cookie_cache(self):\n # if self._use_cookie_cache and self._cookie_manager is not None:\n # username_cache = self._cookie_manager.get('hyusername')\n # accesslevel_cache = self._cookie_manager.get('hyaccesslevel')\n\n # if username_cache is not None:\n # self._cookie_manager.delete('hyusername')\n\n # if accesslevel_cache is not None:\n # self._cookie_manager.delete('hyaccesslevel')\n\n # def _write_cookie_cache(self,hyaccesslevel,hyusername):\n # if self._use_cookie_cache and self._cookie_manager is not None:\n # if hyaccesslevel is not None and hyusername is not None:\n # self._cookie_manager.set('hyusername',hyusername)\n # self._cookie_manager.set('hyaccesslevel',hyaccesslevel)\n\n # def _read_cookie_cache(self):\n # if self._use_cookie_cache and self._cookie_manager is not None:\n # username_cache = self._cookie_manager.get('hyusername')\n # accesslevel_cache = self._cookie_manager.get('hyaccesslevel')\n\n # if username_cache is not None and accesslevel_cache is not None:\n # self.set_access(int(accesslevel_cache), str(username_cache))\n\n def run(self, complex_nav=None):\n \"\"\"\n This method is the entry point for the HydraApp, just like a single Streamlit app, you simply setup the additional apps and then call this method to begin.\n Parameters\n ------------\n complex_nav: Dict\n A dictionary that indicates how the nav items should be structured, each key will be a section title and the value will be a list or array of the names of the apps (as registered with the add_app method). The sections with only a single item will be displayed directly, the sections with more than one will be wrapped in an exapnder for cleaner layout.\n \"\"\"\n # process url navigation parameters\n # self._do_url_params()\n\n self._complex_nav = complex_nav\n # A hack to hide the hamburger button and Streamlit footer\n # if self._hide_streamlit_markings is not None:\n # st.markdown(self._hide_streamlit_markings, unsafe_allow_html=True)\n\n if self._banners is not None:\n if isinstance(self._banners, str):\n self._banners = [self._banners]\n\n if self._banner_spacing is not None and len(self._banner_spacing) == len(self._banners):\n cols = self._banner_container.columns(self._banner_spacing)\n for idx, im in enumerate(self._banners):\n if im is not None:\n if isinstance(im, Dict):\n cols[idx].markdown(\n next(iter(im.values())), unsafe_allow_html=True)\n else:\n cols[idx].image(im)\n else:\n if self._banner_spacing is not None and len(self._banner_spacing) != len(self._banners):\n print(\n 'WARNING: Banner spacing spec is a different length to the number of banners supplied, using even spacing for each banner.')\n\n cols = self._banner_container.columns([1]*len(self._banners))\n for idx, im in enumerate(self._banners):\n if im is not None:\n if isinstance(im, Dict):\n cols[idx].markdown(\n next(iter(im.values())), unsafe_allow_html=True)\n else:\n cols[idx].image(im)\n\n if self.session_state.allow_access > self._no_access_level or self._login_app is None:\n if callable(self._login_callback):\n if not self.session_state.logged_in:\n self.session_state.logged_in = True\n self._login_callback()\n\n if self._nav_item_count == 0:\n self._default()\n else:\n self._build_nav_menu()\n self._run_selected()\n elif self.session_state.allow_access < self._no_access_level:\n self.session_state.current_user = self._guest_name\n self._unsecure_app.run()\n else:\n self.session_state.logged_in = False\n self.session_state.current_user = None\n self.session_state.access_hash = None\n self._login_app.run()\n\n def _default(self):\n st.header('Welcome to Hydralit')\n st.write('Thank you for your enthusiasum and looking to run the HydraApp as quickly as possible, for maximum effect, please add a child app by one of the methods below.')\n\n st.write(\n 'For more information, please see the instructions on the home page [Hydralit Home Page](https://github.com/TangleSpace/hydralit)')\n\n st.write('Method 1 (easiest)')\n\n st.code(\"\"\"\n#when we import hydralit, we automatically get all of Streamlit\nimport hydralit as hy\n\napp = hy.HydraApp(title='Simple Multi-Page App')\n\n@app.addapp()\ndef my_cool_function():\n hy.info('Hello from app 1')\n \"\"\"\n )\n\n st.write('Method 2 (more fun)')\n\n st.code(\"\"\"\nfrom hydralit import HydraHeadApp\nimport streamlit as st\n\n\n#create a child app wrapped in a class with all your code inside the run() method.\nclass CoolApp(HydraHeadApp):\n\n def run(self):\n st.info('Hello from cool app 1')\n\n\n\n#when we import hydralit, we automatically get all of Streamlit\nimport hydralit as hy\n\napp = hy.HydraApp(title='Simple Multi-Page App')\n\napp.add_app(\"My Cool App\", icon=\"📚\", app=CoolApp(title=\"Cool App\"))\n \"\"\"\n )\n\n st.write(\n 'Once we have added atleast one child application, we just run the parent app!')\n\n st.code(\"\"\"\napp.run()\n \"\"\")\n\n st.write(\n 'For example you get can going super quick with a couple of functions and a call to Hydralit App run().')\n\n st.code(\"\"\"\n #when we import hydralit, we automatically get all of Streamlit\n import hydralit as hy\n\n app = hy.HydraApp(title='Simple Multi-Page App')\n\n @app.addapp(is_home=True)\n def my_home():\n hy.info('Hello from Home!')\n\n @app.addapp()\n def app2():\n hy.info('Hello from app 2')\n\n @app.addapp(title='The Best', icon=\"🥰\")\n def app3():\n hy.info('Hello from app 3, A.K.A, The Best 🥰')\n\n #Run the whole lot, we get navbar, state management and app isolation, all with this tiny amount of work.\n app.run()\n \"\"\")\n\n def logout_callback(self, func):\n \"\"\"\n This is a decorate to add a function to be run when a user is logged out.\n\n \"\"\"\n\n def my_wrap(*args, **kwargs):\n return func(*args, **kwargs)\n\n self._logout_callback = my_wrap\n return my_wrap\n\n def login_callback(self, func):\n \"\"\"\n This is a decorate to add a function to be run when a user is first logged in.\n\n \"\"\"\n\n def my_wrap(*args, **kwargs):\n return func(*args, **kwargs)\n\n self._login_callback = my_wrap\n return my_wrap\n\n def addapp(self, title=None, icon=None, is_home=False):\n \"\"\"\n This is a decorator to quickly add a function as a child app in a style like a Flask route.\n\n You can do everything you can normally do when adding a class based HydraApp to the parent, except you can not add a login or unsecure app using this method, as\n those types of apps require functions provided from inheriting from HydraAppTemplate.\n\n Parameters\n ----------\n title: str\n The title of the app. This is the name that will appear on the menu item for this app.\n icon: str\n The icon to use on the navigation button, this will be appended to the title to be used on the navigation control.\n is_home: bool, False\n Is this the first 'page' that will be loaded, if a login app is provided, this is the page that will be kicked to upon successful login.\n \"\"\"\n\n def decorator(func):\n\n wrapped_app = Templateapp(mtitle=title, run_method=func)\n app_title = wrapped_app.title\n app_icon = icon\n\n if is_home and title is None and icon is None:\n app_title = None\n app_icon = \"fa fa-home\"\n\n self.add_app(title=app_title, app=wrapped_app,\n icon=app_icon, is_home=is_home)\n\n return func\n\n return decorator\n","repo_name":"TangleSpace/hydralit","sub_path":"hydralit/hydra_app.py","file_name":"hydra_app.py","file_ext":"py","file_size_in_byte":36535,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"81"} +{"seq_id":"73369138505","text":"\"\"\"add instructions_template to Bot\n\nRevision ID: 6c1c3f3dbb92\nRevises: 38f0bc56a565\nCreate Date: 2023-04-04 11:53:40.587362\n\n\"\"\"\nimport sqlalchemy as sa\nimport sqlmodel\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"6c1c3f3dbb92\"\ndown_revision = \"38f0bc56a565\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.add_column(\n \"bot_config\",\n sa.Column(\n \"instructions_template\", sqlmodel.sql.sqltypes.AutoString(), nullable=True\n ),\n )\n\n\ndef downgrade() -> None:\n op.drop_column(\"bot_config\", \"instructions_template\")\n","repo_name":"rehmatsg/StarfishGPT","sub_path":"venv/lib/python3.10/site-packages/marvin/infra/alembic/versions/2023_04_04_1153-6c1c3f3dbb92_add_instructions_template_to_bot.py","file_name":"2023_04_04_1153-6c1c3f3dbb92_add_instructions_template_to_bot.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16764945757","text":"import collections\nimport logging\nfrom datetime import date, datetime, timedelta\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django_countries import countries\n\nfrom .forms import TrainingForm\nfrom .galaxy import (add_group_user, authenticate, create_group, create_role,\n get_groups, get_jobs, get_roles, get_users,\n get_workflow_invocations)\nfrom .models import Training\n\nlogger = logging.getLogger(__name__)\n\n\ndef register(request):\n if request.method == \"POST\":\n # create a form instance and populate it with data from the request:\n form = TrainingForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n form.save()\n\n # TODO: refactor this into forms:\n identifier = form.cleaned_data[\"training_identifier\"]\n if settings.TIAAS_SEND_EMAIL_TO:\n send_mail(\n f\"New TIaaS Request ({identifier})\",\n (\n \"A new TIaaS request has been received. View it in the\"\n \" admin dashboard: \"\n f\"{settings.GALAXY_URL}/tiaas/admin/training/training/\"\n \"?processed__exact=UN\"\n ),\n settings.TIAAS_SEND_EMAIL_FROM,\n [settings.TIAAS_SEND_EMAIL_TO],\n fail_silently=True, # TODO should handle and log\n )\n if settings.TIAAS_SEND_EMAIL_TO_REQUESTER:\n send_mail(\n f\"TIaaS Request confirmation: ({identifier})\",\n (\n f'Dear {form.cleaned_data[\"name\"]},\\n\\n'\n \"Thanks for requesting a new TIaaS allocation.\\n\"\n \"We will contact you to let you know when your request\"\n \" has been reviewed.\\n\\n\"\n f\"Regards,\\nThe {settings.TIAAS_OWNER} team\"\n ),\n settings.TIAAS_SEND_EMAIL_FROM,\n [form.cleaned_data[\"email\"]],\n fail_silently=True, # TODO should handle and log\n )\n\n return HttpResponseRedirect(reverse(\"thanks\"))\n\n # Form was invalid\n logger.warning(f\"Location: {form.data['location']}\")\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = TrainingForm(initial=request.GET.dict())\n\n return render(request, \"training/register.html\", {\"form\": form})\n\n\ndef about(request):\n return render(request, \"training/about.html\")\n\n\ndef thanks(request):\n return render(request, \"training/thanks.html\")\n\n\ndef dashboard_example(request):\n \"\"\"Show the example training dashboard page.\"\"\"\n return render(request, \"training/dashboard-example.html\")\n\n\ndef stats_csv(request):\n data = \"name,code,pop\\n\"\n trainings = Training.objects.exclude(training_identifier=\"test\").filter(\n processed=\"AP\"\n )\n locations = collections.Counter()\n codes = {}\n\n for t in trainings:\n for loc in t.location:\n locations[loc.alpha3] += 1\n codes[loc.alpha3] = loc.name\n\n for k, v in locations.items():\n data += f\"{codes[k]},{k},{v}\\n\"\n\n return HttpResponse(data, content_type=\"text/csv\")\n\n\ndef numbers_csv(request):\n data = \"id,start,end,location,use_gtn,attendance\\n\"\n\n trainings = Training.objects.exclude(training_identifier=\"test\").filter(\n processed=\"AP\"\n )\n for t in trainings:\n countries = [x.code for x in t.location]\n data += (\n \",\".join(\n map(\n str,\n [\n t.id,\n t.start,\n t.end,\n \"|\".join(countries),\n t.use_gtn,\n t.attendance,\n ],\n )\n )\n + \"\\n\"\n )\n\n return HttpResponse(data, content_type=\"text/csv\")\n\n\ndef trainings_for(trainings, year, month, day):\n # find trainings including this given day.\n if day == 0:\n return 0\n if year == 2020 and month == 1:\n print(day, [x for x in trainings if x.start <= date(year, month, day) <= x.end])\n\n return len([x for x in trainings if x.start <= date(year, month, day) <= x.end])\n\n\ndef calendar_view(request):\n \"\"\"Display scheduled events in an interactive calendar view.\"\"\"\n approved_trainings = (\n Training.objects.all()\n .exclude(training_identifier=\"test\")\n .filter(processed=\"AP\")\n .order_by(\"start\")\n )\n return render(\n request,\n \"training/calendar.html\",\n {\n \"events\": approved_trainings,\n \"admin_user\": request.user.is_staff,\n \"n_events\": approved_trainings.count(),\n },\n )\n\n\ndef calendar_api(request):\n events = (\n Training.objects.all().exclude(training_identifier=\"test\").order_by(\"start\")\n )\n\n results = {\"events\": []}\n is_super = request.user.is_superuser\n\n colors = [\n \"red\",\n \"orange\",\n \"pink\",\n \"green\",\n \"blue\",\n \"purple\",\n \"grey\",\n \"brown\",\n \"black\",\n ]\n\n for i, event in enumerate(events):\n event_data = {\n \"name\": \"\",\n \"start\": event.start.strftime(\"%Y-%m-%d\"),\n \"end\": event.end.strftime(\"%Y-%m-%d\"),\n \"color\": \"blue\",\n }\n\n if is_super:\n event_data.update(\n {\n \"title\": event.title,\n \"color\": colors[i % len(colors)],\n \"organiser\": event.name,\n \"email\": event.email,\n \"description\": event.description,\n \"website\": event.website,\n \"location\": event.str_locations,\n \"use_gtn\": event.use_gtn,\n \"attendance\": event.attendance,\n \"identifier\": event.training_identifier,\n \"other_requests\": event.other_requests,\n }\n )\n\n results[\"events\"].append(event_data)\n return JsonResponse(results)\n\n\ndef stats(request):\n trainings = Training.objects.exclude(\n training_identifier=\"test\"\n ) # Exclude the 'test' group from showing up in calendar\n\n approved = trainings.filter(processed=\"AP\")\n\n waiting = trainings.filter(processed=\"UN\")\n\n if approved:\n days = sum(\n (end - start).days for end, start in approved.values_list(\"end\", \"start\")\n )\n students = sum(approved.values_list(\"attendance\", flat=True))\n today = date.today()\n current = approved.filter(start__lte=today, end__gte=today)\n earliest = min(approved.values_list(\"start\", flat=True))\n countries_lookup = dict(countries)\n locations = collections.Counter()\n for locs in approved.values_list(\"location\", flat=True):\n for loc in locs.split(\",\"):\n locations[countries_lookup[loc]] += 1\n\n data = {\n \"trainings\": trainings,\n \"waiting\": waiting.count(),\n \"approved\": approved.count(),\n \"days\": days,\n \"students\": students,\n \"locations\": dict(locations.items()),\n \"current_trainings\": current.count(),\n \"earliest\": earliest,\n }\n else:\n data = {\n \"trainings\": trainings,\n \"waiting\": waiting.count(),\n \"approved\": 0,\n \"days\": 0,\n \"students\": 0,\n \"locations\": {},\n \"current_trainings\": 0,\n \"earliest\": None,\n }\n\n return render(request, \"training/stats.html\", data)\n\n\ndef join(request, training_id):\n training_id = training_id.lower()\n trainings = Training.objects.filter(\n training_identifier__iexact=training_id, processed=\"AP\",\n )\n\n # If we don't know this training, reject\n if not trainings.count():\n return render(\n request,\n \"training/error.html\",\n {\n \"message\": (\n \"An approved Training event with ID\"\n f' \"{training_id}\" was not found.'\n ),\n },\n )\n\n event = trainings.first()\n\n # If the event has not yet started, return \"come back soon\"\n tz_flexible_now = (\n datetime.now()\n + timedelta(hours=settings.TIAAS_JOIN_TRAINING_FLEX_HOURS)\n )\n if event.start > tz_flexible_now.date():\n return render(\n request,\n \"training/early.html\",\n {\n \"start_date\": event.start.strftime(\"%d-%m-%Y\"),\n \"timezone\": settings.TIME_ZONE,\n \"host\": request.META.get(\"HTTP_HOST\", None),\n },\n )\n\n # If the event has already finished, reject request\n tz_flexible_now = (\n datetime.now()\n - timedelta(hours=settings.TIAAS_JOIN_TRAINING_FLEX_HOURS)\n )\n if event.end < tz_flexible_now.date():\n return render(\n request,\n \"training/error.html\",\n {\n \"message\": (\n \"Sorry, this event finished on\"\n f\" {event.end.strftime('%Y-%m-%d')}.\"\n \" If you think this is a mistake, please contact Galaxy\"\n \" support.\"\n ),\n \"host\": request.META.get(\"HTTP_HOST\", None),\n },\n )\n\n user = authenticate(request)\n if not user:\n return render(\n request,\n \"training/error.html\",\n {\n \"message\": \"Please login to Galaxy first!\",\n \"host\": request.META.get(\"HTTP_HOST\", None),\n },\n )\n\n training_role_name = \"training-%s\" % training_id\n # Otherwise, training is OK + they are a valid user.\n # We need to add them to the role\n\n ################\n # BEGIN UNSAFE #\n ################\n # Create role if need to.\n current_roles = list(get_roles())\n role_exists = any([training_role_name == x[\"name\"] for x in current_roles])\n\n if not role_exists:\n role_id = create_role(training_role_name)\n else:\n role_id = [x for x in current_roles if training_role_name == x[\"name\"]][0][\"id\"]\n\n # Create group if need to\n current_groups = list(get_groups())\n group_exists = any([training_role_name == x[\"name\"] for x in current_groups])\n if not group_exists:\n group_id = create_group(training_role_name, role_id)\n else:\n group_id = [x for x in current_groups if training_role_name == x[\"name\"]][0][\n \"id\"\n ]\n\n ################\n # END UNSAFE #\n ################\n\n add_group_user(group_id, user)\n\n return render(\n request,\n \"training/join.html\",\n {\"training\": event, \"host\": request.META.get(\"HTTP_HOST\", None)},\n )\n\n\ndef _summarize(d):\n state_summary = {}\n for item in d:\n if item[\"state\"] not in state_summary:\n state_summary[item[\"state\"]] = 0\n if \"__total__\" not in state_summary:\n # div 0\n state_summary[\"__total__\"] = 1\n\n state_summary[item[\"state\"]] += 1\n state_summary[\"__total__\"] += 1\n return state_summary\n\n\ndef status(request, training_id):\n training_id = training_id.lower()\n\n trainings = Training.objects.all().filter(training_identifier__iexact=training_id)\n any_approved = any([t.processed == \"AP\" for t in trainings])\n\n if len(trainings) == 0 or not any_approved:\n return render(\n request,\n \"training/error.html\",\n {\n \"message\": \"Training does not exist\",\n \"host\": request.META.get(\"HTTP_HOST\", None),\n },\n )\n\n refresh = request.GET.get(\"refresh\", False) is not False\n # hours param\n hours = int(request.GET.get(\"hours\", 3))\n if hours > 64:\n hours = 64\n elif hours < 1:\n hours = 1\n\n jobs = list(get_jobs(training_id, hours))\n wfs = list(get_workflow_invocations(training_id, hours))\n users = list(get_users(training_id))\n jobs_overview = {}\n for job in jobs:\n tool_id = job[\"tool_id\"]\n if tool_id not in jobs_overview:\n jobs_overview[tool_id] = {\n \"ok\": 0,\n \"new\": 0,\n \"error\": 0,\n \"queued\": 0,\n \"running\": 0,\n # prevent div 0\n \"__total__\": 1,\n }\n\n if job[\"state\"] in (\"ok\", \"new\", \"error\", \"queued\", \"running\"):\n jobs_overview[tool_id][job[\"state\"]] += 1\n jobs_overview[tool_id][\"__total__\"] += 1\n\n state_summary = _summarize(jobs)\n wf_state_summary = _summarize(wfs)\n\n for job, data in jobs_overview.items():\n data[\"ok_percent\"] = data[\"ok\"] / len(jobs)\n data[\"new_percent\"] = data[\"new\"] / len(jobs)\n data[\"error_percent\"] = data[\"error\"] / len(jobs)\n data[\"queued_percent\"] = data[\"queued\"] / len(jobs)\n data[\"running_percent\"] = data[\"running\"] / len(jobs)\n\n return render(\n request,\n \"training/status.html\",\n {\n \"training\": trainings[0],\n \"jobs\": jobs,\n \"wfs\": wfs,\n \"jobs_overview\": jobs_overview,\n \"users\": users,\n \"state\": state_summary,\n \"wf_state\": wf_state_summary,\n \"refresh\": refresh,\n },\n )\n","repo_name":"galaxyproject/tiaas2","sub_path":"training/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13684,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"34595177491","text":"from dash import Dash, html, dcc, Input, Output, State, dash_table\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nimport pandas as pd\r\nimport dash_bootstrap_components as dbc\r\nfrom dash_bootstrap_templates import ThemeSwitchAIO\r\nimport os\r\nimport locale\r\n\r\nlocale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')\r\n\r\napp = Dash(__name__)\r\napp.scripts.config.serve_locally = True\r\nserver = app.server\r\n\r\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\r\n\r\n# ===== style ======= #\r\n\r\ntab_card = {'height': '100%'}\r\n\r\n# MAIN CONFIG serve para configurar de maneira geral todos os graficos de uma só vez!\r\n\r\nmain_config = {\r\n \"hovermode\": \"x unified\",\r\n \"legend\": {\"yanchor\": \"top\",\r\n \"y\": 0.9,\r\n \"xanchor\": \"left\",\r\n \"x\": 0.1,\r\n \"title\": {\"text\": None},\r\n \"font\": {\"color\": \"white\"},\r\n \"bgcolor\": \"rgba(0,0,0,0.5)\"},\r\n \"margin\": {\"l\": 10, \"r\": 10, \"t\": 10, \"b\": 10}\r\n}\r\n\r\nconfig_graph = {\"displayModeBar\": False, \"showTips\": False}\r\n\r\ntemplate_theme1 = \"flatly\"\r\ntemplate_theme2 = \"darkly\"\r\nurl_theme1 = dbc.themes.FLATLY\r\nurl_theme2 = dbc.themes.DARKLY\r\n\r\n# ============================ IMPORTANDO OS DADOS ============================ #\r\ndados = pd.read_excel(f'{current_dir}\\\\empenhos em aberto.xlsx', sheet_name='SET2023, Saldo - R$ (Conta Con')\r\ncred_disponivel = pd.read_excel(f'{current_dir}\\\\Crédito disponivel UGR.xlsx')\r\n\r\n# ============================ TRATAMENTO DE DADOS ============================ #\r\n\r\ndados = dados.drop(0, axis = 0).reset_index(drop = True)\r\ndados.columns = dados.iloc[0, :]\r\ndados = dados.drop(0, axis = 0).reset_index(drop = True)\r\ndados = dados.rename(columns={'631510000' : '631510000 RPNP A LIQ BLOQUEADOS P/DECRETO_93872/86'})\r\ndados = dados.rename(columns={'631100000' : '631100000 RP NAO PROCESSADOS A LIQUIDAR'})\r\ndados = dados.rename(columns={'622920101' : '622920101 EMPENHOS A LIQUIDAR'})\r\ndados = dados.drop(0, axis = 0)\r\nempenhos = dados.loc[dados['UG Responsável'] == 'GRUPAMENTO DE FUZILEIROS NAVAIS DO RIO GRANDE', :]\r\nempenhos = empenhos.loc[empenhos['PI'] != 'B44101002DD' ,:]\r\nempenhos = empenhos.fillna(0)\r\n\r\nindex_comeco = cred_disponivel.loc[cred_disponivel['Crédito disponivel UGR'] == 'Mês Lançamento: SET/2023'].index\r\nindex_comeco = int(index_comeco.values[0])\r\ncred_disponivel = cred_disponivel[index_comeco:]\r\ncred_disponivel['Unnamed: 8'] = cred_disponivel['Unnamed: 8'].fillna(0)\r\ncred_disponivel['Unnamed: 8'] = cred_disponivel['Unnamed: 8'].astype(int)\r\ncred_disponivel['Unnamed: 8'] = cred_disponivel['Unnamed: 8'].astype(str)\r\ncred_disponivel['Unnamed: 1'] = cred_disponivel['Unnamed: 1'] + ' - ' + cred_disponivel['Unnamed: 2']\r\ncred_disponivel['Unnamed: 3'] = cred_disponivel['Unnamed: 3'] + ' - ' + cred_disponivel['Unnamed: 4']\r\ncred_disponivel['Unnamed: 10'] = cred_disponivel['Unnamed: 10'] + ' - ' + cred_disponivel['Unnamed: 11']\r\ncred_disponivel['Unnamed: 12'] = cred_disponivel['Unnamed: 12'] + ' - ' + cred_disponivel['Unnamed: 13']\r\ncred_disponivel['Unnamed: 6'] = cred_disponivel['Unnamed: 6'] + cred_disponivel['Unnamed: 7'] + '000' + cred_disponivel['Unnamed: 8'] + ' - ' + cred_disponivel['Unnamed: 9']\r\ncred_disponivel = cred_disponivel.drop(['Unnamed: 2', 'Unnamed: 4', 'Unnamed: 11', 'Unnamed: 13', 'Unnamed: 7', 'Unnamed: 8', 'Unnamed: 9'], axis = 1).reset_index(drop = True)\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 1' : 'Unidade Orçamentária'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 3' : 'UG Responsável'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 5' : 'PTRES'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 6' : 'Plano Orçamentário'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 10' : 'PI'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 12' : 'Natureza de Despesa'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Unnamed: 14' : 'Crédito Disponível'})\r\ncred_disponivel = cred_disponivel.rename(columns={'Crédito disponivel UGR' : 'Conta Corrente'})\r\ncred_disponivel = cred_disponivel.drop(0, axis=0).reset_index(drop= True)\r\ncred_disponivel = cred_disponivel.loc[cred_disponivel['UG Responsável'] == '785200 - GRUPAMENTO DE FUZILEIROS NAVAIS DO RIO GRANDE'].reset_index(drop= True)\r\ncred_disponivel = cred_disponivel.loc[cred_disponivel['UG Responsável'] == '785200 - GRUPAMENTO DE FUZILEIROS NAVAIS DO RIO GRANDE'].reset_index(drop= True)\r\ncred_disponivel['Crédito Disponível'] = cred_disponivel['Crédito Disponível'].astype(float)\r\ncred_por_natureza = cred_disponivel.groupby(['Conta Corrente', 'PI', 'Natureza de Despesa']).sum().reset_index()\r\n\r\n# ============================ PLOTANDO OS GRÁFICOS ============================ #\r\n\r\ndf_grafico1 = cred_por_natureza\r\ndf_grafico1['PI'] = df_grafico1['PI'].str[:11]\r\ndf_grafico1['Natureza de Despesa'] = df_grafico1['Natureza de Despesa'].str[7 + 1:]\r\ngrafico1 = px.bar(x=df_grafico1['PI'] + ' ' + df_grafico1['Natureza de Despesa'],\r\n y=df_grafico1['Crédito Disponível'])\r\ngrafico1.update_layout(xaxis_title = 'Conta corrente',\r\n yaxis_title = 'Crédito Disponível',\r\n xaxis_ticktext=cred_por_natureza['PI'].str[:11]\r\n )\r\n\r\n\r\n# ========= Layout =========== #\r\napp.layout = dbc.Container(children=[\r\n\r\n # ========== Row 1 ============\r\n\r\n dbc.Row([\r\n\r\n # Gráfico 1\r\n dbc.Col([\r\n dbc.Card([\r\n dbc.CardBody([\r\n dbc.Row([\r\n dbc.Col([\r\n html.Legend(\"Gestão de crédito - GPTFNRG\")\r\n ], sm=10),\r\n dbc.Col([\r\n html.I(className='fas fa-chart-bar',\r\n style={'font-size': '300%'})\r\n ], sm=4, align='center')\r\n ]),\r\n dbc.Row([\r\n dbc.Col([\r\n ThemeSwitchAIO(aio_id=\"theme\", themes=[\r\n url_theme1, url_theme2]),\r\n html.Legend(\"Controle das contas correntes\")\r\n ])\r\n ], style={'margin-top': '10px'}),\r\n ])\r\n ], style=tab_card),\r\n ], sm=4, lg=2),\r\n\r\n # Gráfico 2\r\n dbc.Col([\r\n dbc.Card([\r\n dbc.CardBody([\r\n dbc.Row(\r\n dbc.Col(\r\n html.Legend('Saldo disponível em cada conta corrente')\r\n )\r\n ),\r\n dbc.Row([\r\n dbc.Col([\r\n dcc.Graph(id='grafico1', className='dbc', config=config_graph, figure=grafico1)\r\n ], sm=12, md=12)\r\n ])\r\n ])\r\n ], style=tab_card)\r\n ], sm=12, lg=10)\r\n\r\n ], className='g-2 my-auto', style={'margin-top': '7px'}),\r\n\r\n # ========== Row 2 ============\r\n\r\n dbc.Row([\r\n\r\n dbc.Col([\r\n dbc.Card(\r\n [\r\n dbc.CardBody(\r\n [\r\n dash_table.DataTable(\r\n style_data={\r\n 'whiteSpace': 'normal',\r\n 'height': 'auto',\r\n },\r\n data=cred_disponivel.to_dict('records'),\r\n columns=[{'id': c, 'name': c} for c in cred_disponivel.columns]),\r\n ]\r\n )\r\n ],\r\n style=tab_card, className='w-100')\r\n ], sm=12, lg=12)\r\n\r\n ], className='g-2 my-auto', style={'margin-top': '7px'})\r\n\r\n], fluid=True, style={'height': '100vh'})\r\n\r\n# =========== callbacks ===========\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","repo_name":"PedroRezende21/exemplos_dashboard","sub_path":"dashboardgerentecredito.py","file_name":"dashboardgerentecredito.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31060003790","text":"import os\r\nimport argparse\r\nimport numpy as np\r\n\r\nimport cv2\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport torch.backends.cudnn as cudnn\r\nimport torchvision.utils as vutils\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom utils import *\r\nfrom model import define_G\r\nfrom data1 import Dataset \r\nfrom perceptual_network_loss import perceptual_loss, extract_embedding\r\n\r\nfrom app import main_matcher\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument('--gpu_ids', default='0', type=str)\r\nparser.add_argument('--workers', default=8, type=int)\r\nparser.add_argument('--lr', default=0.0002, type=float) #used 0.0002 usar depois 0,1\r\nparser.add_argument('--batch_size', default=8, type=int) #testar com 128\r\nparser.add_argument('--epochs', default=300, type=int)\r\nparser.add_argument('--print_iter', default=20, type=int, help='print frequency')\r\nparser.add_argument('--save_epoch', default=1, type=int)\r\nparser.add_argument('--output_path', default='./results', type=str)\r\nparser.add_argument('--output1_path', default='./test', type=str)\r\n\r\n\r\nparser.add_argument('--img_root_train', default='C:\\\\Users\\\\ana_l\\\\Desktop\\\\TCC-Fingerprint\\\\2002\\\\DB1_A-opticalsensor', type=str)\r\nparser.add_argument('--img_root_test', default='C:\\\\Users\\\\ana_l\\\\Desktop\\\\TCC-Fingerprint\\\\2002\\\\DB1_B-opticalsensor', type=str)\r\n\r\n\r\ndef main():\r\n global args\r\n args = parser.parse_args()\r\n #print(args)\r\n \r\n minloss = 100\r\n minepoch = 0\r\n \r\n graph_loss = []\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\r\n cudnn.benchmark = True\r\n\r\n if not os.path.exists(args.output_path):\r\n os.makedirs(args.output_path)\r\n \r\n if not os.path.exists(args.output1_path):\r\n os.makedirs(args.output1_path)\r\n\r\n # generator\r\n encoder, decoder = define_G(input_dim=1, output_dim=1, ndf=32)\r\n\r\n\r\n # dataset\r\n train_loader = torch.utils.data.DataLoader(\r\n Dataset(args, \"train\"), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)\r\n test_loader = torch.utils.data.DataLoader(\r\n Dataset(args, \"test\"), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)\r\n # optimizer\r\n optimizer = optim.Adam(list(encoder.parameters()) +\r\n list(decoder.parameters()), lr=args.lr, betas=(0.5, 0.999))\r\n\r\n # criterion\r\n criterionPix = torch.nn.L1Loss().cuda()\r\n criterionL2 = torch.nn.MSELoss().cuda()\r\n\r\n # train\r\n for epoch in range(args.epochs):\r\n\r\n # creat random index\r\n arange = torch.arange(args.batch_size).cuda()\r\n idx = torch.randperm(args.batch_size).cuda()\r\n while 0.0 in (idx - arange):\r\n idx = torch.randperm(args.batch_size).cuda()\r\n\r\n for iteration, data in enumerate(train_loader, start=1):\r\n # get data\r\n img = Variable(data[\"img\"].cuda())\r\n batch_size = img.size(0)\r\n if batch_size < args.batch_size:\r\n continue\r\n\r\n \r\n noise = torch.zeros(batch_size, 256).normal_(0, 1).cuda()\r\n z_img = encoder(img, \"enc\")\r\n\r\n style_img = encoder(z_img, \"style\")\r\n assign_adain_params(style_img, decoder)\r\n fake_img = decoder(torch.cat([noise, z_img], dim=1))\r\n \r\n # all losses\r\n loss_pix = 100 * (criterionPix(fake_img, img))\r\n loss_MSE = criterionL2(fake_img, img)\r\n loss_perceptual = perceptual_loss(embedding_orig, embedding_out)\r\n \r\n loss = loss_pix + loss_MSE + loss_perceptual\r\n \r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n \r\n info = \"====> Epoch[{}][{}/{}] | \".format(epoch, iteration, len(train_loader))\r\n #info += \"Loss pix: {:4.2f}, Total Loss: {:4.2f}\".format(loss_pix.item(), loss.item())\r\n print(info)\r\n '''\r\n if epoch>200:\r\n vutils.save_image(torch.cat([img, fake_img], dim=0).data,\r\n \"{}/Epoch_{:03d}_Iter_{:06d}_img.tif\".format(args.output_path, epoch, iteration), nrow=batch_size)\r\n \r\n if epoch % args.save_epoch == 0:\r\n save_checkpoint(encoder, epoch, \"encoder\")\r\n save_checkpoint(decoder, epoch, \"decoder\")\r\n ''' \r\n '''\r\n if iteration % args.print_iter == 0:\r\n info = \"====> Epoch[{}][{}/{}] | \".format(epoch, iteration, len(train_loader))\r\n #info += \"Loss: pix: {:4.2f} ort: {:4.2f} | Ang-real rec: {:4.2f} pair: {:4.2f} | Ang-fake rec: {:4.2f} pair: {:4.2f}\".format(\r\n # loss_pix.item(), loss_ort.item(), real_ang_rec.item(), real_ang_pair.item(), fake_ang_rec.item(), fake_ang_pair.item())\r\n info += \"Loss pix: {:4.2f}, Total Loss: {:4.2f}\".format(loss_pix.item(), loss.item())\r\n print(info)\r\n\r\n # save images\r\n if iteration % 500 == 0:\r\n vutils.save_image(torch.cat([img, fake_img], dim=0).data,\r\n \"{}/Epoch_{:03d}_Iter_{:06d}_img.tif\".format(args.output_path, epoch, iteration), nrow=batch_size)\r\n '''\r\n vutils.save_image(img[0].data, \r\n \"{}/orig_Epoch_{:03d}_Iter_{:06d}_img.tif\".format(args.output_path, epoch, iteration), nrow = batch_size)\r\n vutils.save_image(fake_img[0].data,\r\n \"{}/fake_Epoch_{:03d}_Iter_{:06d}_img.tif\".format(args.output_path, epoch, iteration), nrow = batch_size)\r\n #vutils.save_image(torch.cat([img, fake_img], dim=0).data,\r\n # \"{}/Epoch_{:03d}_Iter_{:06d}_img.tif\".format(args.output_path, epoch, iteration), nrow=batch_size)\r\n \r\n if epoch % args.save_epoch == 0:\r\n save_checkpoint(encoder, epoch, \"encoder\")\r\n save_checkpoint(decoder, epoch, \"decoder\")\r\n \r\n\r\n if loss.item() < minloss:\r\n minloss = loss.item()\r\n minepoch = epoch\r\n \r\n \r\n \r\n graph_loss.append(loss.item()) \r\n # save model\r\n \r\n \r\n with torch.no_grad():\r\n for iteration, data in enumerate(test_loader, start=1):\r\n \r\n img = Variable(data[\"img\"].cuda())\r\n batch_size = img.size(0)\r\n if batch_size < args.batch_size:\r\n continue\r\n \r\n #mudanças\r\n noise = torch.zeros(batch_size, 256).normal_(0, 1).cuda()\r\n \r\n z_img = encoder(img, \"enc\")\r\n \r\n fake_img = decoder(torch.cat([noise, z_img], dim=1))\r\n #fake_img = decoder(z_img)\r\n for j in range(batch_size):\r\n \r\n vutils.save_image(img[j].data,\r\n \"{}/Original-{}-Test_Iter_{:06d}_img.tif\".format(args.output1_path, j, iteration), nrow=batch_size)\r\n vutils.save_image(fake_img[j].data,\r\n \"{}/Fake-{}-Test_Iter_{:06d}_img.tif\".format(args.output1_path, j, iteration), nrow=batch_size)\r\n #vutils.save_image(torch.cat([img, fake_img], dim=0).data,\r\n # \"{}/Test_Iter_{:06d}_img.tif\".format(args.output1_path, iteration), nrow=batch_size)\r\n \r\n print(\"Loss minimun {:4.2f} at {:03d}\".format(minloss, minepoch))\r\n print(graph_loss)\r\n \r\n \r\n plt.figure()\r\n plt.plot(graph_loss)\r\n plt.title('Model Train Loss')\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epoch')\r\n plt.legend('train', loc='upper left')\r\n plt.savefig('trainloss.png')\r\n plt.show()\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n","repo_name":"analeticiagarcez/Autoencoder-based-methodology-for-spoofing-fingerprints-generation","sub_path":"main_perceptual.py","file_name":"main_perceptual.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21087953301","text":"from typing import List\n\nfrom db.database import execute_query, fetch_data\nfrom model.reaction import Reaction\n\n\ndef send_reaction(room_id: int,\n player_id: int,\n reaction_id: int):\n query = (\n \"INSERT INTO send_reaction (room_id, player_id, reaction_id) \"\n \"VALUES (%s, %s, %s)\"\n )\n execute_query(query, (room_id, player_id, reaction_id))\n\n\ndef fetch_reaction(reaction_id: int, player_id: int) -> Reaction:\n query = (\n \"SELECT id, name \"\n \"FROM reaction \"\n \"WHERE id = %s\"\n )\n result = fetch_data(query, (reaction_id,))\n\n if result:\n reaction = Reaction(result[0][0], result[0][1], player_id)\n return reaction\n else:\n return None\n","repo_name":"KITA-DS12/koi-jan","sub_path":"server/repository/db/reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26837037434","text":"\n\nclass AssertMethod:\n\n def __init__(self, actual_result, hope_result):\n self.actual_result = str(actual_result)\n self.result = self.asserts = None\n print('hope_result:', hope_result)\n if ',' in hope_result:\n self.asserts = hope_result.split(',')\n else:\n try:\n self.assertmethod, self.hoperesult = hope_result.split(':', 1)\n except Exception as e:\n print('期望结果异常:', e)\n self.assertmethod = '包含'\n self.hoperesult = 'ewedew@$!!##$@#$!@'\n\n # print(\"self.assertmethod: %s, self.hoperesult: %s\" % (self.assertmethod, self.hoperesult))\n # self.actual_result = str(self.actual_result)\n # print('self.actual_result: ', self.actual_result)\n\n def assert_method(self):\n try:\n if self.asserts:\n for _assert in self.asserts:\n assertmethod, hoperesult = _assert.split(':', 1)\n if '包含' in assertmethod:\n result = self.assert_in(hoperesult)\n elif '等于' in assertmethod:\n result = self.assert_eq(hoperesult)\n else:\n result = ''\n if result == '测试失败':\n return '测试失败'\n return '测试成功'\n else:\n\n if '不包含' in self.assertmethod:\n result = self.not_assert_in(self.hoperesult)\n elif '不等于' in self.assertmethod:\n result = self.not_assert_eq(self.hoperesult)\n elif '等于' in self.assertmethod:\n result = self.assert_eq(self.hoperesult)\n elif '包含' in self.assertmethod:\n result = self.assert_in(self.hoperesult)\n elif '大于' in self.assertmethod:\n result = self.assert_gt(self.hoperesult)\n else:\n result = ''\n return result\n except Exception as e:\n print('期望结果异常:', e)\n return '期望结果解析错误'\n\n def assert_eq(self, hoperesult):\n if self.actual_result == hoperesult: # 返回结果与期望结果相等\n self.result = '测试成功'\n else:\n self.result = '测试失败'\n return self.result\n\n def assert_in(self, hoperesult):\n if hoperesult in self.actual_result: # 期望结果在返回结果中\n self.result = '测试成功'\n else:\n self.result = '测试失败'\n print('self.result: ', self.result)\n return self.result\n\n def not_assert_in(self, hoperesult):\n if hoperesult not in self.actual_result: # 期望结果不在返回结果中\n self.result = '测试成功'\n else:\n self.result = '测试失败'\n print('self.result: ', self.result)\n return self.result\n\n def not_assert_eq(self, hoperesult):\n if self.actual_result != hoperesult: # 返回结果与期望结果相等\n self.result = '测试成功'\n else:\n self.result = '测试失败'\n return self.result\n\n def assert_gt(self, hoperesult):\n if self.actual_result > hoperesult: # 返回结果与期望结果相等\n self.result = '测试成功'\n else:\n self.result = '测试失败'\n return self.result\n","repo_name":"yangleiqing0/MyVueFlask","sub_path":"common/com/assert_method.py","file_name":"assert_method.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9818534550","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport collections\n\n\nclass SoftPooling3D(torch.nn.Module):\n def __init__(self,kernel_size,strides=None,padding=0,ceil_mode = False,count_include_pad = True,divisor_override = None):\n super(SoftPooling3D, self).__init__()\n self.avgpool = torch.nn.AvgPool3d(kernel_size,strides,padding,ceil_mode,count_include_pad,divisor_override)\n def forward(self, x):\n x_exp = torch.exp(x)\n x_exp_pool = self.avgpool(x_exp)\n x = self.avgpool(x_exp*x)\n return x/x_exp_pool\n\n\nclass Mish(nn.Module):\n def __init__(self):\n super(Mish, self).__init__()\n\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))\n\n\nclass ConvLayer(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel=3, stride=1, dropout=0.1):\n super().__init__()\n self.add_module('conv', nn.Conv3d(in_channels, out_channels, kernel_size=kernel,\n stride=stride, padding=kernel//2, bias = False))\n self.add_module('norm', nn.BatchNorm3d(out_channels))\n self.add_module('relu', Mish())\n\n #print(kernel, 'x', kernel, 'x', in_channels, 'x', out_channels)\n\n def forward(self, x):\n return super().forward(x)\n\n\nclass HarDBlock(nn.Module):\n def get_link(self, layer, base_ch, growth_rate, grmul):\n if layer == 0:\n return base_ch, 0, []\n out_channels = growth_rate\n link = []\n for i in range(10):\n dv = 2 ** i\n if layer % dv == 0:\n k = layer - dv\n link.append(k)\n if i > 0:\n out_channels *= grmul\n out_channels = int(int(out_channels + 1) / 2) * 2\n in_channels = 0\n for i in link:\n ch,_,_ = self.get_link(i, base_ch, growth_rate, grmul)\n in_channels += ch\n return out_channels, in_channels, link\n\n def get_out_ch(self):\n return self.out_channels\n \n def __init__(self, in_channels, growth_rate, grmul, n_layers, keepBase=False, residual_out=False):\n super().__init__()\n self.in_channels = in_channels\n self.growth_rate = growth_rate\n self.grmul = grmul\n self.n_layers = n_layers\n self.keepBase = keepBase\n self.links = []\n layers_ = []\n self.out_channels = 0 # if upsample else in_channels\n for i in range(n_layers):\n outch, inch, link = self.get_link(i+1, in_channels, growth_rate, grmul)\n self.links.append(link)\n use_relu = residual_out\n layers_.append(ConvLayer(inch, outch))\n if (i % 2 == 0) or (i == n_layers - 1):\n self.out_channels += outch\n #print(\"Blk out =\",self.out_channels)\n self.layers = nn.ModuleList(layers_)\n\n\n def forward(self, x):\n layers_ = [x]\n for layer in range(len(self.layers)):\n link = self.links[layer]\n tin = []\n for i in link:\n tin.append(layers_[i])\n if len(tin) > 1:\n x = torch.cat(tin, 1)\n else:\n x = tin[0]\n out = self.layers[layer](x)\n layers_.append(out)\n t = len(layers_)\n out_ = []\n for i in range(t):\n if (i == 0 and self.keepBase) or \\\n (i == t-1) or (i%2 == 1):\n out_.append(layers_[i])\n out = torch.cat(out_, 1)\n return out\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):\n super(BasicConv2d, self).__init__()\n \n \n self.conv = nn.Conv3d(in_planes, out_planes,\n kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=False)\n self.bn = nn.BatchNorm3d(out_planes)\n self.relu = Mish()\n #nn.ReLU(inplace=True)\n \n\n def forward(self, x):\n \n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass TransitionUp(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n #print(\"upsample\",in_channels, out_channels)\n\n def forward(self, x, skip, concat=True):\n \n out = F.interpolate(\n x,\n size=(skip.size(2), skip.size(3), skip.size(4)),\n mode=\"trilinear\",\n align_corners=True,\n )\n if concat: \n out = torch.cat([out, skip], 1)\n \n return out\n\nclass hardnet4(nn.Module):\n def __init__(self, n_classes=3, deep_supervision=True):\n super(hardnet4, self).__init__()\n self.deep_supervision = deep_supervision\n #'''\n first_ch = [16,32,32,64]\n ch_list = [ 128, 256, 320, 320, 1024]\n grmul = 1.7\n gr = [ 14, 16, 20, 20,160]\n n_layers = [ 8, 16, 16 ,16]#, 4]\n \n blks = len(n_layers) \n self.shortcut_layers = []\n\n self.base = nn.ModuleList([])\n self.base.append (ConvLayer(in_channels=4, out_channels=first_ch[2], kernel=3,stride=2) )\n self.base.append ( ConvLayer(first_ch[2], first_ch[3], kernel=3) )\n skip_connection_channel_counts = []\n ch = first_ch[3]\n for i in range(blks):\n \n blk = HarDBlock(ch, gr[i], grmul, n_layers[i])\n ch = blk.get_out_ch()\n skip_connection_channel_counts.append(ch)\n self.base.append ( blk )\n if i < blks-1:\n self.shortcut_layers.append(len(self.base)-1)\n\n self.base.append ( ConvLayer(ch, ch_list[i], kernel=1) )\n ch = ch_list[i]\n #######################################\n if i < blks-1: \n self.base.append ( SoftPooling3D(kernel_size=2, strides=2) )\n #self.base.append ( nn.AvgPool3d(kernel_size=2, stride=2) ) \n\n cur_channels_count = ch\n prev_block_channels = ch\n n_blocks = blks-1\n self.n_blocks = n_blocks\n \n ######################\n #deep_sup\n ######################\n #'''\n if self.deep_supervision:\n self.deep_bottom = nn.Sequential(\n nn.Conv3d(320, n_classes, kernel_size=1, stride=1, bias=True),\n nn.Upsample(scale_factor=16, mode=\"trilinear\", align_corners=True))\n\n\n self.deep_bottom2 = nn.Sequential(\n nn.Conv3d(328, n_classes, kernel_size=1, stride=1, bias=True),\n nn.Upsample(scale_factor=8, mode=\"trilinear\", align_corners=True))\n \n\n self.deep3 = nn.Sequential(\n nn.Conv3d(262, n_classes, kernel_size=1, stride=1, bias=True),\n nn.Upsample(scale_factor=4, mode=\"trilinear\", align_corners=True))\n \n\n self.deep2 = nn.Sequential(\n nn.Conv3d(124, n_classes, kernel_size=1, stride=1, bias=True),\n nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True))\n \n #'''\n #######################\n # Upsampling path #\n #######################\n\n self.transUpBlocks = nn.ModuleList([])\n self.denseBlocksUp = nn.ModuleList([])\n self.conv1x1_up = nn.ModuleList([])\n \n for i in range(n_blocks-1,-1,-1):\n ##################################\n \n self.transUpBlocks.append(TransitionUp(prev_block_channels, prev_block_channels))\n cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]\n self.conv1x1_up.append(ConvLayer(cur_channels_count, cur_channels_count//2, kernel=1))\n cur_channels_count = cur_channels_count//2\n\n blk = HarDBlock(cur_channels_count, gr[i], grmul, n_layers[i])\n \n self.denseBlocksUp.append(blk)\n prev_block_channels = blk.get_out_ch()\n cur_channels_count = prev_block_channels\n\n\n self.finalConv = nn.Conv3d(in_channels=cur_channels_count,\n out_channels=n_classes, kernel_size=1, stride=1,\n padding=0, bias=True)\n\n def forward(self, x):\n skip_connections = []\n size_in = x.size()\n \n for i in range(len(self.base)):\n x = self.base[i](x)\n \n if i in self.shortcut_layers:\n skip_connections.append(x)\n out = x\n\n ##################################\n #skip = skip_connections.pop() \n ##################################\n for i in range(self.n_blocks):\n \n skip = skip_connections.pop()\n if i ==0:\n x4 = out;\n \n out = self.transUpBlocks[i](out, skip, True)\n out = self.conv1x1_up[i](out)\n out = self.denseBlocksUp[i](out)\n if i ==0:\n x3= out;\n elif i ==1:\n x2 = out;\n elif i ==2:\n x1 = out;\n \n out = self.finalConv(out)\n out = F.interpolate(\n out,\n size=(size_in[2], size_in[3],size_in[4]),\n mode=\"trilinear\",\n align_corners=True)\n \n #####################\n \n if self.deep_supervision:\n deeps = []\n for seg, deep in zip(\n [x4, x3, x2, x1],\n [self.deep_bottom, self.deep_bottom2, self.deep3, self.deep2]):\n deeps.append(deep(seg))\n \n return out, deeps\n \n \n \n return out \n\n\n# +\n#'''\nimport time\n\nif __name__ == \"__main__\":\n model = hardnet4()\n total_params = sum(p.numel() for p in model.parameters())\n #print('Parameters: ', total_params )\n device = torch.device(\"cuda\")\n model = model.to(device)\n model.eval()\n model.to(device)\n total_time = 0\n start_time = 0\n time_all = 0 \n #images = tor\n print(model)\n for i in range(100):\n images = torch.randn((1, 4, 128, 128,128)).to(\"cuda\")\n \n if i == 0:\n with torch.no_grad():\n output = model(images)\n else:\n torch.cuda.synchronize()\n start_time = time.perf_counter()\n\n with torch.no_grad():\n outputs = model(images)\n torch.cuda.synchronize()\n elapsed_time = time.perf_counter() - start_time\n\n\n print(\n \"Inference time \\\n (iter {0:5d}): {1:3.5f} fps\".format(\n i + 1, 1 / elapsed_time\n )\n )\n total_time += 1/elapsed_time\n print(total_time/100)\n#'''\n# -\n\n\n","repo_name":"HungYu-Wu/HarDNet-BTS","sub_path":"src/models/HarDNet_BTS.py","file_name":"HarDNet_BTS.py","file_ext":"py","file_size_in_byte":10937,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74511025544","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv('/Users/Noa/Documents/train.csv')\ndf.keys()\ndf['question_text'].head(10)\n\ndf[df['target']==1].shape[0]\n\n####imbalanced data!!!\n\n#import pandas_ml as pdml\nimport random\nfrom pandas import Series,DataFrame\n \nd_class0 = df[df['target'] == 0]\nd_class1 = df[df['target'] == 1]\n \nnumRows_class0 = len(d_class0.index)\nnumRows_class1 = len(d_class1.index)\n \n# downsample the class 0\n \nd_class0_downsampled = d_class0.sample(n=numRows_class1,replace=False, random_state=42)\n \n# new output data frame containing 1:1 class ratios\n \ndata_set = DataFrame()\ndata_set = data_set.append(d_class0_downsampled)\ndata_set = data_set.append(d_class1)\n \n# shuffle the rows\n \nnumRows_data_set =len(data_set.index)\ndata_set = data_set.sample(n=numRows_data_set, replace= False)\n","repo_name":"guypy/BusinessAnalyticsSeminar","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"121415242","text":"from django.urls import path\n\nfrom goodgames import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('register/', views.register, name='register'),\n path('login/', views.myLogin, name='login'),\n path('logout/', views.myLogout, name='logout'),\n path('tournament/', views.tournament, name='tournament'),\n path('tournament/create/', views.create_tournament, name='create_tournament'),\n path('tournament//', views.tournament_info, name='tournament_info'),\n path('tournament//bracket', views.tournament_bracket, name='tournament_bracket'),\n path('tournament//manage', views.manage_tournament, name='manage_tournament'),\n path('team//', views.team_info, name='team_info'),\n path('team//match/', views.match_team, name='match_team'),\n path('team/create/', views.create_team, name='create_team'),\n path('ranking/', views.ranking, name='ranking'),\n path('team/', views.team, name='team'),\n path('team//manage/', views.manage_team, name='manage_team'),\n path('manage_player/', views.manage_player, name='manage_player'),\n path('team//match//', views.notice_result, name='notice_result'),\n path('404/', views.error404, name='error'),\n]\n","repo_name":"Nuengnakhap/GoodGames","sub_path":"goodgames/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35669819130","text":"\"\"\"foodgram URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.contrib.flatpages import views as flatpages_views\n\nfrom foodgram import views\nfrom recipe.views import index\n\n\nhandler404 = views.page_not_found\nhandler500 = views.server_error\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"about/\", include(\"django.contrib.flatpages.urls\")),\n path(\"auth/\", include(\"users.urls\")),\n path(\"auth/\", include(\"django.contrib.auth.urls\")),\n path(\"recipes/\", include(\"recipe.urls\")),\n path(\"api/\", include(\"api_v1.urls\")),\n path(\"\", index, name=\"index\"),\n]\n\nurlpatterns += [\n path(\n \"about-author/\",\n flatpages_views.flatpage,\n {\"url\": \"/about-author/\"},\n name=\"about-author\",\n ),\n path(\n \"about-project/\",\n flatpages_views.flatpage,\n {\"url\": \"/about-project/\"},\n name=\"about-project\",\n ),\n path(\n \"technologies/\",\n flatpages_views.flatpage,\n {\"url\": \"/technologies/\"},\n name=\"technologies\",\n ),\n]\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n\n urlpatterns += static(\n settings.STATIC_URL, document_root=settings.STATIC_ROOT\n )\n","repo_name":"Irina-Nazarova/foodgram-project","sub_path":"foodgram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25989899776","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nclass analysis(object):\r\n\r\n # 统计各地段区域的占比\r\n # 以饼图展现\r\n # series:pandas的series元素,即列对象\r\n # 返回 img对象:生成的图片(location_percent.jpg)\r\n # 返回 series对象\r\n # 格式为元组(series, img)\r\n def show_pie(self, series, flag=0, img_name=''):\r\n # 统计各元素出现的次数,返回一个Series对象\r\n result_location = series.value_counts()\r\n x_than_flag = result_location[result_location >= flag] # 选取数值大于等于20的,返回Series\r\n x_little_flag = result_location[result_location < flag] # 选取数值小于20的,返回Series\r\n x_other = pd.Series({'其他': x_little_flag.sum()}) # 创建一个Series,表示数值少于20的所有之和\r\n x_new = x_than_flag.append(x_other) # 将x_other Series加到x_than_20之后,形成新的Series\r\n # 设置字体\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n # 解决保存图像是负号'-'显示为方块的问题\r\n plt.rcParams['axes.unicode_minus'] = False\r\n # 对结果进行可视化处理\r\n # 指定图像大小\r\n img = plt.figure(figsize=(12, 5.5))\r\n img.suptitle(img_name + \" 统计情况\", fontsize=15)\r\n # 子图:即在一张图片中显示多个子图\r\n # subplot(numRows, numCols, plotNum)\r\n # 图表的整个绘图区域被分成numRows行和numCols列,plotNum参数指定创建的Axes对象所在的区域,如何理解呢?\r\n # 如果numRows = 3,numCols = 2,那整个绘制图表样式为3X2的图片区域,用坐标表示为(1,1),(1,2),(1,3),(2,1),(2,2),(2,3)。\r\n # 这时,当plotNum = 1时,表示的坐标为(1,3),即第一行第一列的子图;\r\n # 放在子图的第一个位置:第1行第1列\r\n ax1 = plt.subplot(1, 2, 1)\r\n explode = [0] * len(x_new) # 生成一个列表,长度(元素个数)为len(x_new),其元素全为数值0\r\n explode[0] = 0.05 # 将第一个元素改为0.5\r\n\r\n # 将x_new用“饼图”展示\r\n ax1.pie(\r\n x=x_new, # 指定绘图的数据\r\n labels=x_new.index, # (每一块)饼图外侧显示的说明文字\r\n autopct='%1.1f%%', # 控制饼图内百分比设置\r\n pctdistance=0.8, # 指定autopct的位置刻度\r\n startangle=0, # 指定起始角度\r\n labeldistance=1.03, # 指定label的位置刻度\r\n explode=explode # (每一块)离开中心距离,必须是列表,长度与x长度要一致\r\n )\r\n ax1.set_title('数量不低于 %d 的区域' %flag) # 指定图片标题\r\n # 放在子图的第二个位置::第1行第2列\r\n ax2 = plt.subplot(1, 2, 2)\r\n # 将x_little_20用“饼图”展示\r\n ax2.pie(\r\n x=x_little_flag,\r\n labels=x_little_flag.index,\r\n autopct='%1.1f%%',\r\n pctdistance=0.8,\r\n startangle=90,\r\n labeldistance=1.03,\r\n radius=0.8 # 控制饼图半径\r\n )\r\n ax2.set_title('数量低于 %d 的区域(其他)' %flag) # 指定图片标题\r\n # legend : 显示图示labeldistance\r\n # plt.legend()\r\n # 显示出我们创建的所有绘图对象。\r\n plt.show()\r\n # 保存图片\r\n #img.savefig('location_percent.jpg')\r\n\r\n return (series, img)\r\n\r\n\r\n # 统计每一个区域中,在售的小区信息\r\n # dataframe pandas dataframe对象\r\n # name 小区名称\r\n def show_name_percent(self, dataframe, name=''):\r\n # 通过name的值,在dataframe中找到其所在的行号\r\n low_index_list = dataframe[dataframe.location == name].index.tolist()\r\n # 通过行号,找到小区名称\r\n # 从'name列取值,行号为low_index_list。存放到列表\r\n low_name = []\r\n for i in low_index_list:\r\n a = dataframe['name'][i]\r\n low_name.append(a)\r\n # 存放为series对象\r\n name_series = pd.Series(low_name, index=low_index_list)\r\n # 调用数据分析方法-饼图显示\r\n series, img = self.show_pie(name_series,4,name)\r\n pass\r\n\r\n\r\n # 数据清洗过程\r\n # 返回DataFrame对象\r\n # location: 指定区域\r\n # name:指定小区名称\r\n # flag: 总价阀值,清除比该值大的数据\r\n def wash_data(self, dataframe, location='', name='', flag=10000):\r\n # 选取列名为location,返回DataFrame对象\r\n df_location = dataframe[dataframe.location == location]\r\n # 选取行名为name,返回DataFrame对象\r\n df_name = df_location[df_location.name ==name]\r\n # 数据清洗:去除总价大于flag的数据\r\n df_name = df_name[df_name.total_price <= flag]\r\n # 将清洗后的房源数据返回\r\n return df_name\r\n\r\n\r\n\r\n # 绘制详细信息\r\n # DataFrame 为清洗之后的数据对象\r\n def show_info(self, DataFrame):\r\n # 取得area数据列表\r\n area_list = DataFrame['area'].values\r\n # 取得总价数据列表\r\n total_price_list = DataFrame['total_price'].values\r\n # 取得单价数据列表\r\n price_list = DataFrame['price'].values\r\n # 创制一个图形对象\r\n fig = plt.figure(figsize=(12, 6), dpi=120)\r\n # 设置字体\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n # 解决保存图像是负号'-'显示为方块的问题\r\n plt.rcParams['axes.unicode_minus'] = False\r\n # 增加总价格-面积 图表\r\n total_price_area_ax = fig.add_subplot(2,1,1)\r\n # 在价格图表中右边增加面积y坐标\r\n area_ax = total_price_area_ax.twinx()\r\n # 左边y轴标签\r\n total_price_area_ax.set_ylabel('总价格 / 万元', color='b')\r\n # 右边y轴标签\r\n area_ax.set_ylabel('面积 / 平方米', color='r')\r\n # x轴标签\r\n a = DataFrame['name'].values[0]\r\n b = DataFrame['location'].values[0]\r\n # 图形名称\r\n fig.suptitle('[' + b + ' - ' + a +'] 小区价格图', fontsize=15)\r\n # 绘制总价格线plot\r\n total_price_area_ax.plot(total_price_list, '-o', ms=5, lw=2, mfc='orange', label = \"总价格\")\r\n # 绘制面积线plot\r\n area_ax.plot(area_list, '--*',color='r', alpha=0.8, label = \"面积\")\r\n # 显示标签\r\n total_price_area_ax.legend(loc=2)\r\n area_ax.legend(loc=1)\r\n # 显示网格\r\n total_price_area_ax.grid(color='b', alpha=0.3, linestyle=\"-\")\r\n area_ax.grid(color='r', alpha=0.3,linestyle=\"-.\")\r\n\r\n # 增加单价图表\r\n price_ax = fig.add_subplot(2, 1, 2)\r\n # 设置x轴标签\r\n price_ax.set_ylabel('单价 元/平方米', color='orange')\r\n # 绘制单价线plot\r\n price_ax.plot(price_list, '-', mfc='orange', label = \"单价\", color='orange')\r\n # 显示网格\r\n price_ax.grid(color='orange',alpha=0.3, linestyle=\"-.\")\r\n # 显示数值标签\r\n for x,y in zip(price_list,range(len(price_list))):\r\n price_ax.text(y, x, x, bbox=dict(facecolor='white', alpha=0.5), ha='left', va= 'bottom',fontsize=7)\r\n\r\n # 绘制平均单价线\r\n # 均值\r\n value = sum(price_list) / len(price_list)\r\n # 增加均值线\r\n price_ax.plot([value]*(len(price_list)), '--', label = \"均价\", color='g',lw=1)\r\n # 显示标签\r\n price_ax.legend(loc=3)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n fig.show()\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n path = 'data.csv'\r\n path2 = 'data_analysis.xlsx'\r\n # 读入文件\r\n # df:任意的Pandas DataFrame对象\r\n # s:任意的Pandas Series对象\r\n df = pd.read_csv(path)\r\n a = analysis()\r\n a.show_pie(df['location'], 10)\r\n a.show_name_percent(df, '增城 新塘南')\r\n a.show_info(a.wash_data(df,location='增城 新塘南', name='金地香山湖', flag=300))","repo_name":"huily/scrapy-FangTX","sub_path":"FangTX/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40225433571","text":"import os\r\nfrom flask import *\r\nimport jwt\r\nimport datetime\r\nfrom functools import wraps\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = 'iniAdalahSecretKey'\r\n\r\ndef token_required(f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n token = request.args.get('token')\r\n\r\n if not token:\r\n return jsonify({'message' : 'Token is missing!'}), 403\r\n \r\n try:\r\n data = jwt.decode(token, app.config['SECRET_KEY'])\r\n except:\r\n return jsonify({'message' : 'Token is invalid!'}), 403\r\n \r\n return f(*args, **kwargs)\r\n return decorated\r\n\r\n\r\n@app.route('/login')\r\ndef login():\r\n auth = request.authorization\r\n\r\n if auth and auth.username == 'user' and auth.password == '1234':\r\n # generate token\r\n token = jwt.encode({'user' : auth.username, \r\n 'exp' : datetime.datetime.utcnow() + \r\n datetime.timedelta(seconds=30)}, \r\n app.config['SECRET_KEY'])\r\n\r\n return jsonify({'token' : token.decode('UTF-8')})\r\n return make_response('Could not verify!', 401, {'WWW-Authenticate' : 'Basic realm=\"Login Required\"'})\r\n\r\n@app.route('/protected', methods=['GET', 'POST'])\r\n@token_required\r\ndef protected():\r\n if request.method == 'GET':\r\n session.permanent = True\r\n token = request.args.get('token')\r\n session[\"main\"] = token\r\n return redirect(url_for(\"main\"))\r\n else:\r\n if \"main\" in session:\r\n return redirect(url_for(\"login\"))\r\n\r\n\r\n@app.route('/main', methods=['GET', 'POST'])\r\ndef main():\r\n if \"main\" in session:\r\n if request.method == 'POST':\r\n \r\n \r\n\r\n weight = request.form['weight']\r\n height = request.form['height']\r\n height = int(height)/100\r\n \r\n #count BMI\r\n bmi = float(weight) / (height*height)\r\n \r\n #categorization\r\n if bmi <= 18.4:\r\n return '

You are underweight.

'\r\n elif bmi > 18.4 and bmi < 25.0:\r\n return '

You have a normal weight

'\r\n elif bmi >= 25.0:\r\n weightMin = 18.5 * height * height\r\n weightMax = 24.9 * height * height\r\n kuranginMax = int(weight) - weightMin\r\n kuranginMin = int(weight) - weightMax \r\n return(redirect(url_for(\"count\", min=round(kuranginMin))))\r\n else:\r\n return '''
\r\n

Body Weight (kg)

\r\n \r\n

Body Height (cm)

\r\n \r\n
''' \r\n else:\r\n return redirect(url_for(\"login\"))\r\n\r\n@app.route(\"/\")\r\ndef count(min):\r\n return jsonify({'weightToLose' : f'{min}'})\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n session.pop(\"main\", None)\r\n return '''

Logged Out

\r\n '''\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\r\n","repo_name":"celiii26/Tubes-TST-api-SmartIdealBMI","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13535151436","text":"import datetime\r\ndef printTimeStamp(name):\r\n print('Овчаренко Иван Кожушко Андрей: ' + name)\r\n print('Час компіляції: ' + str(datetime.datetime.now()))\r\n\r\n\r\n\r\ndef GCD(x, y):\r\n if x == y:\r\n return x\r\n elif x > y:\r\n return GCD(x - y, y)\r\n elif x < y:\r\n return GCD(x, y - x)\r\n\r\nprint(GCD(135, 20))\r\n\r\n","repo_name":"Heonesuch/Ovcharenko","sub_path":"день3(зад.12).py","file_name":"день3(зад.12).py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25472177079","text":"from copy import copy\nfrom lukefi.metsi.data.enums.mela import (\n MelaOwnerCategory, \n MelaSiteTypeCategory, \n MelaSoilAndPeatlandCategory, \n MelaTreeSpecies, \n MelaLandUseCategory,\n MelaDrainageCategory\n )\nfrom lukefi.metsi.data.enums.internal import (\n SiteType, \n SoilPeatlandCategory, \n TreeSpecies, \n OwnerCategory, \n LandUseCategory,\n DrainageCategory\n )\nfrom lukefi.metsi.data.conversion.util import apply_mappers\n# TODO: can we find a way to resolve the circular import introduced by trying to use these classes just for typing?\n# Even using the iffing below, pytest fails during top_level_collect\n# if typing.TYPE_CHECKING:\n# from forestdatamodel.model import ForestStand, TreeStratum, ReferenceTree\n\n\nspecies_map = {\n TreeSpecies.PINE: MelaTreeSpecies.SCOTS_PINE,\n TreeSpecies.SPRUCE: MelaTreeSpecies.NORWAY_SPRUCE,\n TreeSpecies.SILVER_BIRCH: MelaTreeSpecies.SILVER_BIRCH,\n TreeSpecies.DOWNY_BIRCH: MelaTreeSpecies.DOWNY_BIRCH,\n TreeSpecies.ASPEN: MelaTreeSpecies.ASPEN,\n TreeSpecies.GREY_ALDER: MelaTreeSpecies.ALDER,\n TreeSpecies.COMMON_ALDER: MelaTreeSpecies.ALDER,\n TreeSpecies.OTHER_CONIFEROUS: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.OTHER_DECIDUOUS: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.DOUGLAS_FIR: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.JUNIPER: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.SHORE_PINE: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.EUROPEAN_WHITE_ELM: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.LARCH: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.SMALL_LEAVED_LIME: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.BLACK_SPRUCE: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.WILLOW: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.MOUNTAIN_ASH: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.ABIES: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.GOAT_WILLOW: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.COMMON_ASH: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.KEDAR: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.SERBIAN_SPRUCE: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.OAK: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.BIRD_CHERRY: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.MAPLE: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.CURLY_BIRCH: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.WYCH_ELM: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.UNKNOWN_CONIFEROUS: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.UNKNOWN_DECIDUOUS: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.OTHER_PINE: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.OTHER_SPRUCE: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.THUJA: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.YEW: MelaTreeSpecies.OTHER_CONIFEROUS,\n TreeSpecies.BAY_WILLOW: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.POPLAR: MelaTreeSpecies.OTHER_DECIDUOUS,\n TreeSpecies.HAZEL: MelaTreeSpecies.OTHER_DECIDUOUS\n}\n\n\nland_use_map = {\n LandUseCategory.FOREST: MelaLandUseCategory.FOREST_LAND,\n LandUseCategory.SCRUB_LAND: MelaLandUseCategory.SCRUB_LAND,\n LandUseCategory.WASTE_LAND: MelaLandUseCategory.WASTE_LAND,\n LandUseCategory.OTHER_FOREST: MelaLandUseCategory.OTHER,\n LandUseCategory.AGRICULTURAL: MelaLandUseCategory.AGRICULTURAL_LAND,\n LandUseCategory.BUILT_LAND: MelaLandUseCategory.BUILT_UP_LAND,\n LandUseCategory.ROAD: MelaLandUseCategory.ROADS_OR_ELECTRIC_LINES,\n LandUseCategory.ENERGY_TRANSMISSION_LINE: MelaLandUseCategory.ROADS_OR_ELECTRIC_LINES,\n LandUseCategory.FRESHWATER: MelaLandUseCategory.LAKES_AND_RIVERS,\n LandUseCategory.SEA: MelaLandUseCategory.SEA,\n LandUseCategory.REAL_ESTATE: MelaLandUseCategory.BUILT_UP_LAND,\n LandUseCategory.OTHER_LAND: MelaLandUseCategory.ROADS_OR_ELECTRIC_LINES,\n LandUseCategory.WATER_BODY: MelaLandUseCategory.LAKES_AND_RIVERS\n}\n\n\nowner_map = {\n OwnerCategory.UNKNOWN: MelaOwnerCategory.PRIVATE,\n OwnerCategory.PRIVATE: MelaOwnerCategory.PRIVATE,\n OwnerCategory.FOREST_INDUSTRY: MelaOwnerCategory.ENTERPRISE,\n OwnerCategory.OTHER_ENTERPRISE: MelaOwnerCategory.ENTERPRISE,\n OwnerCategory.METSAHALLITUS: MelaOwnerCategory.STATE,\n OwnerCategory.OTHER_STATE_AGENCY: MelaOwnerCategory.STATE,\n OwnerCategory.FOREST_COOP: MelaOwnerCategory.COMMUNITY,\n OwnerCategory.MUNICIPALITY: MelaOwnerCategory.MUNICIPALITY,\n OwnerCategory.CONGREGATION: MelaOwnerCategory.COMMUNITY,\n OwnerCategory.OTHER_COMMUNITY: MelaOwnerCategory.COMMUNITY,\n OwnerCategory.UNDIVIDED: MelaOwnerCategory.COMMUNITY\n}\n\n\n_site_type_map = {\n SiteType.VERY_RICH_SITE: MelaSiteTypeCategory.VERY_RICH_SITE,\n SiteType.RICH_SITE: MelaSiteTypeCategory.RICH_SITE,\n SiteType.DAMP_SITE: MelaSiteTypeCategory.DAMP_SITE,\n SiteType.SUB_DRY_SITE: MelaSiteTypeCategory.SUB_DRY_SITE,\n SiteType.DRY_SITE: MelaSiteTypeCategory.DRY_SITE,\n SiteType.BARREN_SITE: MelaSiteTypeCategory.BARREN_SITE,\n SiteType.ROCKY_OR_SANDY_AREA: MelaSiteTypeCategory.ROCKY_OR_SANDY_AREA,\n SiteType.OPEN_MOUNTAINS: MelaSiteTypeCategory.OPEN_MOUNTAINS,\n SiteType.TUNTURIKOIVIKKO: MelaSiteTypeCategory.OPEN_MOUNTAINS,\n SiteType.LAKIMETSA_TAI_TUNTURIHAVUMETSA: MelaSiteTypeCategory.OPEN_MOUNTAINS\n}\n\n\n#this doesn't have a mapping for TREELESS_MIRE, as its mapping to MELA values is determined by the SiteType category. \n_soil_peatland_map = {\n SoilPeatlandCategory.MINERAL_SOIL: MelaSoilAndPeatlandCategory.MINERAL_SOIL,\n SoilPeatlandCategory.SPRUCE_MIRE: MelaSoilAndPeatlandCategory.PEATLAND_SPRUCE_MIRE,\n SoilPeatlandCategory.PINE_MIRE: MelaSoilAndPeatlandCategory.PEATLAND_PINE_MIRE,\n}\n\n\n_rich_mire_types = [\n SiteType.VERY_RICH_SITE,\n SiteType.RICH_SITE,\n SiteType.DAMP_SITE\n]\n\n\ndef site_type_mapper(target):\n target.site_type_category = _site_type_map.get(target.site_type_category)\n return target\n\n\ndef drainage_category_mapper(target):\n if target.drainage_category == DrainageCategory.UNDRAINED_MINERAL_SOIL_OR_MIRE:\n if target.soil_peatland_category == SoilPeatlandCategory.MINERAL_SOIL:\n target.drainage_category = MelaDrainageCategory.UNDRAINED_MINERAL_SOIL\n else:\n target.drainage_category = MelaDrainageCategory.UNDRAINED_MIRE\n elif target.drainage_category == DrainageCategory.DITCHED_MINERAL_SOIL:\n target.drainage_category = MelaDrainageCategory.DITCHED_MINERAL_SOIL\n elif target.drainage_category == DrainageCategory.DITCHED_MIRE:\n target.drainage_category = MelaDrainageCategory.DITCHED_MIRE\n elif target.drainage_category == DrainageCategory.TRANSFORMING_MIRE:\n target.drainage_category = MelaDrainageCategory.TRANSFORMING_MIRE\n elif target.drainage_category == DrainageCategory.TRANSFORMED_MIRE:\n target.drainage_category = MelaDrainageCategory.TRANSFORMED_MIRE\n else:\n target.drainage_category = MelaDrainageCategory.UNDRAINED_MINERAL_SOIL\n return target\n\n\ndef soil_peatland_mapper(target):\n \"\"\"If the internal SoilPeatlandCategory is TREELESS_MIRE, determining the soil or peatland type for MELA requires knowing the site type (fertility type).\n Make sure to set it first, because otherwise this method is unable to determine soil_peatland_category and sets it to None.\n \"\"\"\n\n if target.soil_peatland_category == SoilPeatlandCategory.TREELESS_MIRE:\n if target.site_type_category is None:\n target.soil_peatland_category = None\n\n elif target.site_type_category in _rich_mire_types:\n target.soil_peatland_category = MelaSoilAndPeatlandCategory.PEATLAND_RICH_TREELESS_MIRE\n else:\n target.soil_peatland_category = MelaSoilAndPeatlandCategory.PEATLAND_BARREN_TREELESS_MIRE\n else: \n target.soil_peatland_category = _soil_peatland_map.get(target.soil_peatland_category)\n \n return target\n \n\ndef land_use_mapper(target):\n \"\"\"in-place mapping from internal LandUseCategory to MelaLandUseCategory\"\"\"\n target.land_use_category = land_use_map.get(target.land_use_category)\n return target\n\n\ndef owner_mapper(target):\n \"\"\"in-place mapping from internal land owner category to mela owner category\"\"\"\n target.owner_category = owner_map.get(target.owner_category)\n return target\n\n\ndef species_mapper(target):\n \"\"\"in-place mapping from internal tree species to mela tree species\"\"\"\n target.species = species_map.get(target.species, MelaTreeSpecies.OTHER_DECIDUOUS)\n return target\n\n\ndef stand_location_converter(target):\n \"\"\"\n in-place conversion of ForestStand geolocation to kilometer precision,\n and to YKJ/KKJ3 with band prefix 3 removed for EPSG:2393\n \"\"\"\n if target.geo_location[3] == 'EPSG:3067':\n lat, lon = (target.geo_location[0] / 1000, target.geo_location[1] / 1000)\n elif target.geo_location[3] == 'EPSG:2393':\n lat, lon = (target.geo_location[0] / 1000, target.geo_location[1] / 1000 - 3000)\n else:\n raise Exception(\"Unsupported CRS {} for stand {}\".format(target.geo_location[3], target.identifier))\n\n target.geo_location = (\n lat,\n lon,\n target.geo_location[2],\n target.geo_location[3])\n return target\n\n\ndef stand_area_converter(target):\n \"\"\" in-place conversion to Mela value space for area related matters \"\"\"\n if target.is_auxiliary():\n target.area = 0.0\n return target\n\n\ndef mela_stratum(stratum):\n \"\"\"Convert a TreeStratum so that enumerated category variables are converted to Mela value space\"\"\"\n result = copy(stratum)\n result.stand_origin_relative_position = copy(stratum.stand_origin_relative_position)\n return apply_mappers(result, *default_mela_stratum_mappers)\n\n\ndef mela_tree(tree):\n \"\"\"Convert a ReferenceTree so that enumerated category variables are converted to Mela value space\"\"\"\n result = copy(tree)\n result.stand_origin_relative_position = copy(tree.stand_origin_relative_position)\n return apply_mappers(result, *default_mela_tree_mappers)\n\n\ndef mela_stand(stand):\n \"\"\"Convert a ForestStand so that enumerated category variables are converted to Mela value space\"\"\"\n result = copy(stand)\n result.geo_location = copy(stand.geo_location)\n result.area_weight_factors = copy(stand.area_weight_factors)\n result = apply_mappers(result, *default_mela_stand_mappers)\n result.reference_trees = list(map(mela_tree, result.reference_trees))\n for tree in result.reference_trees:\n tree.stand = result\n result.tree_strata = list(map(mela_stratum, result.tree_strata))\n for stratum in result.tree_strata:\n stratum.stand = result\n return result\n\n\ndefault_mela_tree_mappers = [species_mapper]\ndefault_mela_stratum_mappers = [species_mapper]\ndefault_mela_stand_mappers = [stand_location_converter,\n stand_area_converter,\n owner_mapper, \n land_use_mapper, \n site_type_mapper, \n soil_peatland_mapper,\n drainage_category_mapper]\n","repo_name":"lukefi/metsi","sub_path":"lukefi/metsi/data/conversion/internal2mela.py","file_name":"internal2mela.py","file_ext":"py","file_size_in_byte":11055,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21114715939","text":"import numpy as onp\nimport jax\nimport jax.numpy as jnp\nimport haiku as hk\nimport gin\nimport chex\nfrom typing import Optional\n\nfrom learned_optimization.tasks.datasets import image\nfrom learned_optimization.tasks import base\nfrom learned_optimization.tasks import es_wrapper\nimport functools\n\n\ndef _fc_ae_loss_fn(hidden_units, activation):\n \"\"\"Build a fully connected autoencoder loss.\"\"\"\n\n def _fn(batch):\n net = hk.Flatten()(batch[\"image\"])\n feats = net.shape[-1]\n logits = hk.nets.MLP(hidden_units + [feats], activation=activation)(net)\n\n loss_vec = jnp.mean(jnp.square(net - jax.nn.sigmoid(logits)), [1])\n return jnp.mean(loss_vec)\n\n return _fn\n\n\ndef _make_task(hk_fn, datasets) -> base.Task:\n \"\"\"Make a Task subclass for the haiku loss and datasets.\"\"\"\n init_net, apply_net = hk.transform(hk_fn)\n\n class _Task(base.Task):\n \"\"\"Annonomous task object with corresponding loss and datasets.\"\"\"\n\n def __init__(self):\n self.datasets = datasets\n\n def init(self, key: chex.PRNGKey) -> base.Params:\n batch = next(datasets.train)\n return init_net(key, batch)\n\n def loss(self, params, key, data):\n return apply_net(params, key, data)\n\n def loss_with_state_and_aux(self, params, state, key, data):\n return self.loss(params, key, data), state, {}\n\n return _Task()\n\n\ndef ScalingTasks_Imagenet16AE(hidden_size, layers, activation=jax.nn.relu):\n base_model_fn = _fc_ae_loss_fn([hidden_size] * layers, activation)\n datasets = image.imagenet16_datasets(128, (16, 16))\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(2, 15)]:\n name = \"ScalingTasks_Imagenet16AE_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Imagenet16AE, size, 3), name)\n del name\n\n\nclass LinearStack(hk.Module):\n\n def __init__(self, splits, feats, name: Optional[str] = None):\n super().__init__(name=name)\n self.splits = splits\n self.feats = feats\n\n def __call__(self, x):\n assert len(x.shape) == 3\n batch_size, split_size, input_size = x.shape\n stddev = 1. / onp.sqrt(input_size)\n w_init = hk.initializers.TruncatedNormal(stddev=stddev)\n w = hk.get_parameter(\n \"w\", [split_size, input_size, self.feats], jnp.float32, init=w_init)\n\n b_init = hk.initializers.Constant(0.)\n b = hk.get_parameter(\n \"b\", [split_size, self.feats], jnp.float32, init=b_init)\n\n out = jax.vmap(jnp.dot, in_axes=(1, 0), out_axes=1)(x, w)\n return out + b\n\n\ndef _permute(val, seed):\n bs, spl, feat = val.shape\n key = jax.random.PRNGKey(seed)\n val = jnp.reshape(val, [bs, spl * feat])\n val = jax.random.permutation(key, val, axis=1)\n return jnp.reshape(val, [bs, spl, feat])\n\n\ndef _split_fc_ae_loss_fn(hidden_units, activation, splits=4):\n\n def _fn(batch):\n net = hk.Flatten()(batch[\"image\"])\n batch_size, num_feats = net.shape\n assert num_feats % splits == 0\n split_feats = jnp.reshape(net, [batch_size, splits, num_feats // splits])\n\n for si, size in enumerate(hidden_units):\n assert size % splits == 0\n split_feats = LinearStack(\n splits=splits, feats=size // splits)(\n split_feats)\n split_feats = activation(split_feats)\n split_feats = _permute(split_feats, seed=si)\n\n feats = jnp.reshape(split_feats, [batch_size, -1])\n logits = hk.Linear(num_feats)(feats)\n loss_vec = jnp.mean(jnp.square(net - jax.nn.sigmoid(logits)), [1])\n return jnp.mean(loss_vec)\n\n return _fn\n\n\ndef ScalingTasks_Imagenet16SplitAE(hidden_size,\n layers,\n splits=4,\n activation=jax.nn.relu):\n base_model_fn = _split_fc_ae_loss_fn(\n [hidden_size] * layers, activation, splits=splits)\n datasets = image.imagenet16_datasets(128, (16, 16))\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(3, 17)]:\n name = \"ScalingTasks_Imagenet16Split8AE_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Imagenet16SplitAE, size, 3, splits=8),\n name)\n del name\n\n\n# Now for some classification!\ndef _fc_loss_fn(hidden_units, activation, num_clases=1000):\n\n def _fn(batch):\n # Center the image.\n inp = (batch[\"image\"] - 0.5) * 2\n inp = jnp.reshape(inp, [inp.shape[0], -1])\n sizes = hidden_units + [num_clases]\n logits = hk.nets.MLP(sizes, activation=activation)(inp)\n print(logits.shape, batch[\"label\"].shape)\n loss_vec = base.softmax_cross_entropy(\n logits=logits, labels=jax.nn.one_hot(batch[\"label\"], num_clases))\n return jnp.mean(loss_vec)\n\n return _fn\n\n\ndef ScalingTasks_Imagenet16FC(hidden_size, layers, activation=jax.nn.relu):\n base_model_fn = _fc_loss_fn([hidden_size] * layers, activation)\n datasets = image.imagenet16_datasets(128, (16, 16))\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(2, 17)]:\n name = \"ScalingTasks_Imagenet16FC_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Imagenet16FC, size, 3), name)\n del name\n\n\ndef _make(base_name, e):\n return es_wrapper.ESTask(globals()[base_name](), n_pairs=e)\n\n\nfor size in [2**i for i in range(2, 17)]:\n for e in [2, 8]:\n name = f\"ScalingTasks_ES{e}_Imagenet16FC_3layer_{size}size\"\n base_name = f\"ScalingTasks_Imagenet16FC_3layer_{size}size\"\n locals()[name] = gin.external_configurable(\n functools.partial(_make, base_name, e), name)\n del name, base_name\n\n\ndef ScalingTasks_Cifar10FC(hidden_size, layers, activation=jax.nn.relu):\n base_model_fn = _fc_loss_fn([hidden_size] * layers, activation)\n datasets = image.cifar10_datasets(128)\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(2, 17)]:\n name = \"ScalingTasks_Cifar10FC_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Cifar10FC, size, 3), name)\n del name\n\n\ndef _make_cifar(base_name, e):\n return es_wrapper.ESTask(globals()[base_name](), n_pairs=e)\n\n\nfor size in [2**i for i in range(2, 17)]:\n for e in [2, 8]:\n name = f\"ScalingTasks_ES{e}_Cifar10FC_3layer_{size}size\"\n base_name = f\"ScalingTasks_Cifar10FC_3layer_{size}size\"\n locals()[name] = gin.external_configurable(\n functools.partial(_make_cifar, base_name, e), name)\n del name, base_name\n\n\ndef _split_fc_loss_fn(hidden_units, activation, num_clases=1000, splits=4):\n\n def _fn(batch):\n # Center the image.\n inp = (batch[\"image\"] - 0.5) * 2\n net = hk.Flatten()(batch[\"image\"])\n batch_size, num_feats = net.shape\n assert num_feats % splits == 0\n split_feats = jnp.reshape(net, [batch_size, splits, num_feats // splits])\n\n for si, size in enumerate(hidden_units):\n assert size % splits == 0\n split_feats = LinearStack(\n splits=splits, feats=size // splits)(\n split_feats)\n split_feats = activation(split_feats)\n split_feats = _permute(split_feats, seed=si)\n\n feats = jnp.reshape(split_feats, [batch_size, -1])\n logits = hk.Linear(num_clases)(feats)\n loss_vec = base.softmax_cross_entropy(\n logits=logits, labels=jax.nn.one_hot(batch[\"label\"], num_clases))\n return jnp.mean(loss_vec)\n\n return _fn\n\n\ndef ScalingTasks_Imagenet16SplitFC(hidden_size, layers, activation=jax.nn.relu):\n base_model_fn = _split_fc_loss_fn(\n [hidden_size] * layers, activation, splits=4)\n datasets = image.imagenet16_datasets(128, (16, 16))\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(2, 17)]:\n name = \"ScalingTasks_Imagenet16SplitFC_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Imagenet16SplitFC, size, 3), name)\n del name\n\n\ndef ScalingTasks_Imagenet16Split8FC(hidden_size,\n layers,\n activation=jax.nn.relu):\n base_model_fn = _split_fc_loss_fn(\n [hidden_size] * layers, activation, splits=8)\n datasets = image.imagenet16_datasets(128, (16, 16))\n return _make_task(base_model_fn, datasets)\n\n\nfor size in [2**i for i in range(2, 17)]:\n name = \"ScalingTasks_Imagenet16Split8FC_3layer_%dsize\" % size\n locals()[name] = gin.external_configurable(\n functools.partial(ScalingTasks_Imagenet16Split8FC, size, 3), name)\n del name\n","repo_name":"google/learned_optimization","sub_path":"learned_optimization/research/scaling/scaling_tasks.py","file_name":"scaling_tasks.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"81"} +{"seq_id":"72515540424","text":"import numpy as np\nfrom gaze.utils import LOGGER\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport torch\nimport datetime\nimport json\n\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_absolute_error\n\n\nclass Prober():\n def __init__(self, d, feature_max, output_dir):\n self.d = d\n self.probing_dataset = None\n self.feature_max = feature_max\n self.output_dir = output_dir\n\n def create_probing_dataset(self, model, mean=False):\n LOGGER.info(f\"Creating datasets, Mean = {mean} ...\")\n\n probing_dataset = defaultdict(list)\n\n print(model.config.num_hidden_layers)\n\n LOGGER.info(f\"Start creating dataset...\")\n\n for text_input, target, mask in tqdm(list(zip(self.d.text_inputs, self.d.targets, self.d.masks))):\n\n # getting the id of the last token \n last_token_id = (len(mask) - 1 - mask.tolist()[::-1].index(1)) - 1\n\n with torch.no_grad():\n model_output = model(input_ids=torch.as_tensor([text_input]), attention_mask=torch.as_tensor([mask]))\n \n for layer in range(model.config.num_hidden_layers):\n\n hidden_state = model_output.hidden_states[layer].numpy()\n\n non_masked_els = np.multiply.reduce(target != -1, 1) > 0\n\n if not mean:\n # take only the first subword embedding for a given word\n probe_input = hidden_state[0, non_masked_els, :]\n else:\n # take the mean of the subwords's embedding for a given word\n # id of the words's start\n probe_input = [np.mean(split_, 0) for split_ in np.split(hidden_state[0], np.where(non_masked_els)[0], 0)[1:-1]]\n # the last have mean all the vector from the last non masked to the sep token (sep token is the last 1 in mask)\n last_mean = np.mean(hidden_state[0, np.where(non_masked_els)[0][-1] : last_token_id, :], 0)\n probe_input.append(last_mean)\n\n probe_input = np.array(input)\n\n output = target[non_masked_els, :]\n\n # take elements token-wise\n for i in range(probe_input.shape[0]):\n probing_dataset[layer].append((probe_input[i], output[i]))\n \n LOGGER.info(\"Retrieving done, postprocess...\")\n \n # concatenate the inputs and outputs !!!!\n for layer in range(model.config.num_hidden_layers):\n input_list = []\n output_list = []\n \n for probe_input, output in probing_dataset[layer]:\n input_list.append(probe_input)\n output_list.append(output)\n\n probing_dataset[layer] = (input_list, output_list)\n\n self.probing_dataset = probing_dataset\n \n return probing_dataset\n\n\n def _apply_model(self, inputs, targets, linear = True, k_folds=10):\n # do cross-validation\n\n l = len(inputs)\n l_ts = l//k_folds\n\n loss_tr_mean = None\n loss_ts_mean = None\n\n for k in tqdm(range(k_folds)):\n # cicle over folds, for every fold create train_d, test_d\n if k != k_folds-1: # exclude the k-th part from the validation\n train_inputs = inputs[:(k)*l_ts] + inputs[(k+1)*l_ts:]\n train_targets = targets[:(k)*l_ts] + targets[(k+1)*l_ts:]\n test_inputs = inputs[k*l_ts:(k+1)*l_ts]\n test_targets = targets[k*l_ts:(k+1)*l_ts]\n\n else: # last fold clausole\n train_inputs = inputs[:k*l_ts]\n train_targets = targets[:k*l_ts]\n test_inputs = inputs[k*l_ts:]\n test_targets = targets[k*l_ts:]\n\n # min max scaler the targets\n scaler = MinMaxScaler(feature_range=[0, self.feature_max])\n scaler.fit(train_targets)\n train_targets = scaler.transform(train_targets)\n test_targets = scaler.transform(test_targets)\n\n # apply a model for each feature\n predicted_train = None\n predicted_test = None\n # learn a model for each feature, then concatenate the predictions, \n for feat_i in range(train_targets.shape[1]):\n if linear:\n # regr = SVR(kernel=\"linear\", degree=1).fit(train_inputs, train_targets[:, feat_i])\n regr = Ridge().fit(train_inputs, train_targets[:, feat_i])\n else:\n regr = MLPRegressor().fit(train_inputs, train_targets[:, feat_i])\n\n if predicted_train is None:\n predicted_train = np.expand_dims(regr.predict(train_inputs), axis=0)\n predicted_test = np.expand_dims(regr.predict(test_inputs), axis=0)\n else:\n predicted_train = np.concatenate((predicted_train, np.expand_dims(regr.predict(train_inputs), axis=0)), axis=0)\n predicted_test = np.concatenate((predicted_test, np.expand_dims(regr.predict(test_inputs), axis=0)), axis=0)\n\n predicted_train = predicted_train.T\n predicted_test = predicted_test.T\n\n # Train errors\n loss_tr = np.concatenate((([mean_absolute_error(train_targets, predicted_train)], mean_absolute_error(train_targets, predicted_train, multioutput='raw_values'))), axis=0)\n\n if not loss_tr_mean is None:\n loss_tr_mean += loss_tr\n else:\n loss_tr_mean = loss_tr\n\n # Test errors\n loss_ts = np.concatenate(([mean_absolute_error(test_targets, predicted_test)], mean_absolute_error(test_targets, predicted_test, multioutput='raw_values')), axis=0)\n\n if not loss_ts_mean is None:\n loss_ts_mean += loss_ts\n else:\n loss_ts_mean = loss_ts\n\n loss_tr_mean /= k_folds\n loss_ts_mean /= k_folds\n\n return loss_tr_mean, loss_ts_mean\n\n\n def probe(self, linear, k_folds):\n LOGGER.info(f\"Starting probe, Linear = {linear} ...\")\n metrics = dict()\n\n metrics[\"linear\"] = linear\n\n for layer, dataset in self.probing_dataset.items():\n LOGGER.info(f\"Cross Validation layer : {layer} ...\")\n\n inputs, targets = dataset\n\n score_train, score_test = self._apply_model(inputs, targets, linear, k_folds)\n\n metrics[layer] = {\n \"score_train\" : score_train.tolist(),\n \"score_test\" : score_test.tolist()\n }\n\n LOGGER.info(f\"Scores layer - {layer} :\")\n LOGGER.info(f\"Train: {score_train.tolist()}\")\n LOGGER.info(f\"Test: {score_test.tolist()}\")\n LOGGER.info(f\"done!!!\")\n\n with open(f\"{self.output_dir}/probe_results.json\", 'w') as f:\n json.dump(metrics, f)","repo_name":"Andrew-Wyn/multilingual-gaze-MECO","sub_path":"gaze/prober.py","file_name":"prober.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8001506709","text":"import numpy as np\nimport cv2\nimport pyrealsense2 as rs\nimport open3d as o3d\n\npipe = rs.pipeline()\ncfg = pipe.start()\n\nfor x in range(5):\n pipe.wait_for_frames()\n\n\n\nprofile = cfg.get_stream(rs.stream.depth)\nintr = profile.as_video_stream_profile().get_intrinsics()\nw = []\nalign_to = rs.stream.color\nalign = rs.align(align_to)\n\ndef mouse_callback(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n print(f\"x: {x}, y: {y}\")\n res = rs.rs2_deproject_pixel_to_point(intr, [x, y], w[y, x])\n print(res[2], -res[0], -res[1])\n\ncv2.namedWindow('image')\ncv2.setMouseCallback('image', mouse_callback)\npcd = o3d.geometry.PointCloud()\n\nwhile True:\n pipe.wait_for_frames()\n f = pipe.wait_for_frames()\n f = align.process(f)\n depth = f.get_depth_frame()\n color = f.get_color_frame()\n intr = depth.profile.as_video_stream_profile().intrinsics\n w = np.asanyarray(depth.get_data())\n a = np.asanyarray(color.get_data())\n# arr = [rs.rs2_deproject_pixel_to_point(intr, [j, i], w[i, j]) for j in range(640) for i in range(480)]\n# pcd.points = o3d.utility.Vector3dVector(arr)\n\n cv2.imshow('image', a)\n cv2.resizeWindow('image', 640, 480)\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n print(\"ESC 키 눌러짐\")\n break\n\ncv2.destroyAllWindows()\n\npipe.stop()\n\n","repo_name":"paull04/ground_projection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5227078368","text":"# Script to store the entire elasticsearch search result into a dataframe for further analysis.\n# Change the below before running this script,\n# localhost = the elastic search ip\n# port = the elasticsearch port\n# query = your search query\n# index = elasticsearch index name\n# doc_type = the type in index\n\n# import the required libraries.\nimport pandas as pd\nfrom elasticsearch import Elasticsearch, helpers\n\n# elasticsearch object.\nes = Elasticsearch('localhost', port=9200)\n\n# define the python generator object.\nres = helpers.scan(\n client = es, \n scroll = '2m',\n query = {\"_source\": [\"id\", \"timestamp\", \"value\"],\n \"query\":{'match_all':{}}\n }, \n index = \"ec2_cpu\",\n doc_type= \"cpu_data\")\n\n# define an empty dataframe to store the search result.\ndf = pd.DataFrame()\n\n# the search results are returned in dictionary format, so create a list of result dictionaries,which is 'l' here.\nl=[]\nfor i in res:\n #print((i['_source']))\n l.append(i['_source'])\n\n# Convert the list of result dictionaries to dataframe \ndf = pd.DataFrame.from_dict(l)\n\n# Check some rows of dataframe\ndf.head()\n","repo_name":"snbhanja/python_elasticsearch_R","sub_path":"es_to_pandas_df.py","file_name":"es_to_pandas_df.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3647690517","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# lr for VBPR, AMR [0.01,1e-4,1e-3]\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Run Recommender Model.\")\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--dataset', nargs='?', default='tradesy',\n help='dataset path')\nparser.add_argument('--experiment_name', nargs='?',\n default='original',\n help='original, fgsm_***, cw_***, pgd_***')\nparser.add_argument('--model', nargs='?', default='DVBPR',\n help='recommender models: VBPR, DVBPR')\nparser.add_argument('--emb1_K', type=int, default=64, help='size of embeddings')\nparser.add_argument('--layers_component', type=list, default=[64, 1], help='list component level layers for ACF')\nparser.add_argument('--layers_item', type=list, default=[64, 1], help='list item level layers for ACF')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size')\nparser.add_argument('--lr', nargs='+', type=float, default=[1e-4], help='learning rate')\nparser.add_argument('--verbose', type=int, default=1, help='verbosity and Checkpoint epoch')\nparser.add_argument('--epoch', type=int, default=2, help='epochs')\n# parser.add_argument('--regs', nargs='?', default='[1e-1,1e-3,0]', help='lambdas for regularization')\nparser.add_argument('--regs', nargs='+', type=float, default=[1, 0.001], help='lambdas for regularization')\nparser.add_argument('--lmd', type=float, default=0.1,\n help='lambda for balance the common loss and adversarial loss')\nparser.add_argument('--keep_prob', type=float, default=0.6, help='keep probability of dropout layers')\nparser.add_argument('--adv', type=int, default=0, help='adversarial training')\nparser.add_argument('--adv_type', nargs='?', default='grad', help='adversarial training type: grad, rand')\nparser.add_argument('--cnn', nargs='?', default='resnet', help='cnn type: resnet50')\nparser.add_argument('--epsilon', type=float, default=1, help='epsilon for adversarial')\nparser.add_argument('--weight_dir', nargs='?', default='rec_model_weights', help='directory to store the weights')\nparser.add_argument('--result_dir', nargs='?', default='rec_results', help='directory to store the predictions')\n\nparser.add_argument('--topk', type=int, default=100,\n help='top k predictions to store before the evaluation')\n\nargs = parser.parse_args()\n\nimport tensorflow as tf\nif args.model in ['DVBPR', 'ACF']:\n tf.compat.v1.enable_eager_execution()\n\nfrom recommendation.recommender_utils.Solver import Solver\nfrom time import time\n\nif __name__ == '__main__':\n\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n print('Device gpu: {0}'.format(os.environ['CUDA_VISIBLE_DEVICES']))\n solver = Solver(args)\n\n print(args)\n\n start_time = time()\n\n print('START Training of the Recommender Model at {0}.'.format(start_time))\n if args.model in ['DVBPR', 'ACF']:\n solver.train_tf2()\n else:\n solver.train()\n print('END Training of the Recommender Model in {0} secs.'.format(time() - start_time))\n\n","repo_name":"sisinflab/Visual-Adversarial-Recommendation","sub_path":"src/rec_generator.py","file_name":"rec_generator.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"23081829630","text":"from flask import request, current_app\n\nfrom servicex.models import TransformRequest, TransformationResult, DatasetFile, db\nfrom servicex.resources.servicex_resource import ServiceXResource\n\n\nclass TransformerFileComplete(ServiceXResource):\n @classmethod\n def make_api(cls, transformer_manager, elasticsearch_adapter):\n cls.transformer_manager = transformer_manager\n cls.elasticsearch_adapter = elasticsearch_adapter\n return cls\n\n def put(self, request_id):\n info = request.get_json()\n submitted_request = TransformRequest.return_request(request_id)\n dataset_file = DatasetFile.get_by_id(info['file-id'])\n\n rec = TransformationResult(\n did=submitted_request.did,\n file_id=dataset_file.id,\n request_id=request_id,\n file_path=info['file-path'],\n transform_status=info['status'],\n transform_time=info['total-time'],\n total_bytes=info['total-bytes'],\n total_events=info['total-events'],\n avg_rate=info['avg-rate'],\n messages=info['num-messages']\n )\n rec.save_to_db()\n\n if self.elasticsearch_adapter:\n self.elasticsearch_adapter.create_update_path(\n dataset_file.get_path_id(),\n self._generate_file_status_record(dataset_file, info['status']))\n\n self.elasticsearch_adapter.create_update_request(\n request_id,\n self._generate_transformation_record(submitted_request, 'transforming'))\n\n files_remaining = TransformRequest.files_remaining(request_id)\n if files_remaining is not None and files_remaining <= 0:\n namespace = current_app.config['TRANSFORMER_NAMESPACE']\n print(\"Job is all done... shutting down transformers\")\n self.transformer_manager.shutdown_transformer_job(request_id, namespace)\n\n if self.elasticsearch_adapter:\n self.elasticsearch_adapter.create_update_request(\n request_id,\n self._generate_transformation_record(submitted_request, 'complete'))\n\n print(info)\n db.session.commit()\n\n return \"Ok\"\n","repo_name":"AndrewEckart/ServiceX_App","sub_path":"servicex/resources/transformer_file_complete.py","file_name":"transformer_file_complete.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"7423993300","text":"A = input()\r\nB = input()\r\nC = input()\r\nA_list = A.split(':')\r\nB_list = B.split(':')\r\nC_list = C.split(':')\r\ntime_diff = 0\r\nif int(C_list[0] < A_list[0]):\r\n time_diff= 24*3600\r\ntime_diff += (int(C_list[0]) - int(A_list[0]))*3600 + (int(C_list[1]) - int(A_list[1]))*60 + (int(C_list[2]) - int(A_list[2]))\r\n\r\ntime_diff = time_diff//2 + time_diff %2\r\nhour_diff = time_diff // 3600\r\nminute_diff = time_diff//60 - hour_diff*60\r\nseconds_diff = time_diff - minute_diff*60 - hour_diff*3600\r\n\r\nhour_final = int(B_list[0])\r\nminutes_final = int(B_list[1])\r\nseconds_final = int(B_list[2])\r\n\r\nhour_final +=hour_diff\r\nhour_final %=24\r\nminutes_final += minute_diff\r\nif minutes_final >=60:\r\n hour_final += minutes_final//60\r\n minutes_final%=60\r\nseconds_final+=seconds_diff\r\nif seconds_final >=60:\r\n minutes_final+= seconds_final//60\r\n seconds_final%=60\r\n\r\nchar_1 = hour_final\r\nif hour_final <10:\r\n char_1 = '0' + str(hour_final)\r\nchar_2 = minutes_final\r\nif minutes_final <10:\r\n char_2 = '0' + str(minutes_final)\r\nchar_3 = seconds_final\r\nif seconds_final <10:\r\n char_3 = '0' + str(seconds_final)\r\nprint(f\"{char_1}:{char_2}:{char_3}\")\r\n","repo_name":"DrozdovVladimir1/yandex_algo","sub_path":"SNTP.py","file_name":"SNTP.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15048784760","text":"# 加密记事本程序\nimport json\nimport time\nfrom os.path import exists, join\n\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.lang.builder import Builder\nfrom kivy.properties import BooleanProperty, StringProperty, NumericProperty, ListProperty, \\\n AliasProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition\n\n__version__ = \"1.0.0\"\n\nBuilder.load_string(\"\"\"\n:\n # 普通标签\n Label:\n id: w_label\n pos: root.pos\n text: root.text\n # 输入框,双击标签时激活\n TextInput:\n id: w_textinput\n pos: root.pos\n text: root.text\n multiline: root.multiline\n on_focus: root.check_focus_and_view(self)\n\n:\n # Rst文档 \n RstDocument:\n id: w_label\n pos: root.pos\n text: root.text\n # 输入框,双击标签时激活\n TextInput:\n id: w_textinput\n pos: root.pos\n text: root.text\n multiline: root.multiline\n on_focus: root.check_focus_and_view(self)\n\n:\n # 记事本查看/编辑\n on_note_content: app.set_note_content(self.note_index, self.note_content)\n on_note_title: app.set_note_title(self.note_index, self.note_title)\n\n BoxLayout:\n\n orientation: 'vertical'\n\n BoxLayout:\n\n orientation: 'horizontal'\n size_hint_y: None\n height: '48dp'\n padding: '5dp'\n # 背景色\n canvas:\n Color:\n rgb: .3, .3, .3\n Rectangle:\n pos: self.pos\n size: self.size\n # 返回按钮,如果是新建且内容为空,则先删除再返回\n Button:\n text: '<'\n size_hint_x: None\n width: self.height\n on_release: \n if root.note_title == 'New note' and root.note_content == '' : \\\n app.del_note(root.note_index) \n app.go_notes()\n # 标题输入\n MutableLabelTextInput:\n text: root.note_title\n font_size: '16sp'\n multiline: False\n on_text: root.note_title = self.text\n # 删除按钮\n Button:\n text: 'X'\n size_hint_x: None\n width: self.height\n on_release: app.del_note(root.note_index)\n # 内容输入\n MutableLabelTextInput:\n text: root.note_content\n on_text: root.note_content = self.text\n\n:\n # 记事本列表项\n on_note_mtime: root.update_mtime(root.note_mtime)\n height: '48sp'\n size_hint_y: None\n\n canvas:\n Color:\n rgb: .3, .3, .3\n Rectangle:\n pos: self.pos\n size: self.width, 1\n\n BoxLayout:\n\n padding: '5dp'\n # 标题\n Label:\n text: root.note_title\n # 修改时间\n Label:\n id: label_mtime\n text: \"1982-07-07\"\n # 编写按钮\n Button:\n text: '>'\n size_hint_x: None\n width: self.height\n on_release: app.edit_note(root.note_index)\n \n:\n # 记事本\n BoxLayout:\n\n orientation: 'vertical'\n\n BoxLayout:\n\n orientation: 'horizontal'\n size_hint_y: None\n height: '48dp'\n padding: '5dp'\n\n canvas:\n Color:\n rgb: .3, .3, .3\n Rectangle:\n pos: self.pos\n size: self.size\n # 图标\n Image:\n source: 'icon.png'\n mipmap: True\n size_hint_x: None\n width: self.height\n # 标题\n Label:\n text: 'Notes'\n font_size: '16sp'\n # 新建按钮\n Button:\n text: '+'\n size_hint_x: None\n width: self.height\n on_release: app.add_note()\n # 记事本标题列表\n RecycleView:\n data: root.data_for_widgets\n viewclass: 'NoteListItem'\n RecycleBoxLayout:\n default_size: None, dp(56)\n default_size_hint: 1, None\n size_hint_y: None\n height: self.minimum_height\n orientation: 'vertical'\n spacing: dp(2)\n\n\"\"\")\n\n\nclass MutableTextInput(FloatLayout):\n \"\"\"可变文本输入框\"\"\"\n text = StringProperty()\n multiline = BooleanProperty(True)\n\n def __init__(self, **kwargs):\n \"\"\"初始化\"\"\"\n super(MutableTextInput, self).__init__(**kwargs)\n Clock.schedule_once(self.prepare, 0)\n\n def prepare(self, *args):\n \"\"\"设置显示和输入\"\"\"\n self.w_textinput = self.ids.w_textinput.__self__\n self.w_label = self.ids.w_label.__self__\n self.view()\n\n def on_touch_down(self, touch):\n \"\"\"双击标签编辑\"\"\"\n if self.collide_point(*touch.pos) and touch.is_double_tap:\n self.edit()\n return super(MutableTextInput, self).on_touch_down(touch)\n\n def edit(self):\n \"\"\"编辑内容\"\"\"\n self.clear_widgets()\n self.add_widget(self.w_textinput)\n self.w_textinput.focus = True\n\n def view(self):\n self.clear_widgets()\n if not self.text:\n self.w_label.text = \"Double tap/click to edit\"\n self.add_widget(self.w_label)\n\n def check_focus_and_view(self, textinput):\n \"\"\"当输入框失去焦点时显示内容\"\"\"\n if not textinput.focus:\n self.text = textinput.text\n self.view()\n\n\nclass NoteView(Screen):\n \"\"\"记事本编辑/查看\"\"\"\n note_index = NumericProperty()\n note_title = StringProperty()\n note_content = StringProperty()\n note_mtime = NumericProperty()\n\n\nclass NoteListItem(BoxLayout):\n \"\"\"记事本列表\"\"\"\n note_content = StringProperty()\n note_title = StringProperty()\n note_index = NumericProperty()\n note_mtime = NumericProperty()\n\n def update_mtime(self, mtime):\n \"\"\"更新修改时间\"\"\"\n self.ids.label_mtime.text = time.strftime(\"%Y-%m-%d\\n%H:%M:%S\", time.localtime(mtime))\n\n\nclass Notes(Screen):\n \"\"\"记事本类\"\"\"\n data = ListProperty()\n\n def _get_data_for_widgets(self):\n return [{\n 'note_index': index,\n 'note_content': item['content'],\n 'note_title': item['title'],\n 'note_mtime': item['mtime']}\n for index, item in enumerate(self.data)]\n\n data_for_widgets = AliasProperty(_get_data_for_widgets, bind=['data'])\n\n\nclass NoteApp(App):\n \"\"\"记事本程序\"\"\"\n def build(self):\n self.notes = Notes(name='notes')\n self.load_notes() # 加载记事本\n self.transition = SlideTransition(duration=.35)\n root = ScreenManager(transition=self.transition)\n root.add_widget(self.notes)\n return root\n\n def load_notes(self):\n \"\"\"加载记事本\"\"\"\n if not exists(self.notes_fn):\n self.notes.data = []\n return\n with open(self.notes_fn) as fd:\n data = json.load(fd)\n self.notes.data = data\n\n def add_note(self):\n \"\"\"增加一个记事\"\"\"\n self.notes.data.append({'title': 'New note',\n 'content': '',\n 'mtime': time.time()})\n note_index = len(self.notes.data) - 1\n self.edit_note(note_index)\n\n def edit_note(self, note_index):\n \"\"\"编辑记事\"\"\"\n note = self.notes.data[note_index]\n name = 'note{}'.format(note_index)\n\n if self.root.has_screen(name):\n self.root.remove_widget(self.root.get_screen(name))\n\n view = NoteView(\n name=name,\n note_index=note_index,\n note_title=note.get('title'),\n note_content=note.get('content'),\n note_mtime=note.get('mtime'))\n self.root.add_widget(view)\n self.transition.direction = 'left'\n self.root.current = view.name\n\n def del_note(self, note_index):\n \"\"\"删除记事\"\"\"\n del self.notes.data[note_index]\n self.save_notes()\n self.refresh_notes()\n self.go_notes()\n\n def set_note_content(self, note_index, note_content):\n \"\"\"设置记事内容\"\"\"\n self.notes.data[note_index]['content'] = note_content\n self.notes.data[note_index]['mtime'] = time.time()\n data = self.notes.data\n self.notes.data = []\n self.notes.data = data\n self.save_notes()\n self.refresh_notes()\n\n def set_note_title(self, note_index, note_title):\n \"\"\"设置记事标题\"\"\"\n self.notes.data[note_index]['title'] = note_title\n self.notes.data[note_index]['mtime'] = time.time()\n self.save_notes()\n self.refresh_notes()\n\n def refresh_notes(self):\n \"\"\"刷新记事本\"\"\"\n data = self.notes.data\n self.notes.data = []\n self.notes.data = data\n\n def save_notes(self):\n \"\"\"保存记事\"\"\"\n with open(self.notes_fn, 'w') as fd:\n json.dump(self.notes.data, fd)\n\n def go_notes(self):\n \"\"\"返回记事本列表\"\"\"\n self.transition.direction = 'right'\n self.root.current = 'notes'\n\n @property\n def notes_fn(self):\n return join('./', 'notes.json')\n\n\nif __name__ == '__main__':\n NoteApp().run()\n","repo_name":"babylco0/python3_examples","sub_path":"Note/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"32604486209","text":"#before running the program please install the following packages\r\n# for beautifulsoup4 in terminal write the following command-\r\n# pip install beautifulsoup4\r\n# for urllib.request-\r\n# pip install urllib3\r\n#for requests-\r\n# pip install requests\r\nimport requests\r\nimport os\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.request\r\n\r\n# for returnig beautifulsoup\r\ndef html(pageurl):\r\n url = pageurl\r\n code = requests.get(url)\r\n plaintext = code.text\r\n soup= BeautifulSoup(plaintext, 'html.parser')\r\n return soup\r\n\r\ndef category_search(pageurl):\r\n soup=html(pageurl)\r\n f=open('category.txt','w')\r\n for head in soup.findAll('div',{\"class\": \"heading\"}): #finds divisions having class:heading\r\n f.write(head.text) #writes it in a file\r\n f.close()\r\n\r\ndef course_title(pageurl):\r\n soup=html(pageurl)\r\n f=open('course title.txt','w')\r\n for title in soup.findAll('h2',{\"class\": \"channel-overview-title\"}): #finds all headings having class:channel-overview-title\r\n f.write(title.text+'\\n') #writes it in a file\r\n f.close()\r\n\r\ndef cover_image(pageurl):\r\n soup=html(pageurl)\r\n path = \"cover images\"\r\n if not os.path.exists(path):\r\n os.makedirs(path) #makes a directory named cover images\r\n for image in soup.findAll('div',{\"class\": \"channel-overview-img\"}):\r\n src='https://see.stanford.edu/'+image.find('img').get('src')\r\n name=path+'/'+image.find('img').get('alt')+'.jpg'\r\n urllib.request.urlretrieve(src,name) #downloads the image and stores it the directory\r\n\r\n\r\ndef course_number(pageurl):\r\n soup=html(pageurl)\r\n f=open('course number.txt','w')\r\n for ul in soup.findAll('ul',{\"class\": \"channel-overview-details\"}): #finds all ul tags having class:channel-overview-details\r\n f.write(ul.find('li').text+'\\n') #writes the first li content of the ul\r\n f.close()\r\n\r\ndef instructor(pageurl):\r\n soup=html(pageurl)\r\n path = \"instructor details with photo\"\r\n if not os.path.exists(path):\r\n os.makedirs(path) #creates the directory 'instructor details with photo'\r\n for ul in soup.findAll('ul',{\"class\": \"channel-overview-details\"}):\r\n soup=html(pageurl+'/'+ul.find('li').text) #goes to the individual courses\r\n innerpath=path +'/'+ul.find('li').text\r\n if not os.path.exists(innerpath):\r\n os.makedirs(innerpath) #makes directory for each course\r\n for div in soup.findAll('div',{'class':'panel-content instructor-bio-panel'}):\r\n fullpath = os.path.join(innerpath, div.find('p').text + '.txt')\r\n f = open(fullpath, 'w')\r\n f.write(div.text) #writes instructor detail\r\n src = 'https://see.stanford.edu/' + div.find('img').get('src')\r\n name=innerpath+'/'+div.find('p').text+'.jpg'\r\n urllib.request.urlretrieve(src,name) #downloads the image\r\n f.close()\r\n\r\ndef sessions(pageurl):\r\n soup=html(pageurl)\r\n path='Number of course sessions'\r\n if not os.path.exists(path):\r\n os.makedirs(path) #makes directory\r\n for ul in soup.findAll('ul',{\"class\": \"channel-overview-details\"}):\r\n soup=html(pageurl + '/' + ul.find('li').text)\r\n innerpath=path+'/'+ul.find('li').text+'.txt'\r\n f=open(innerpath,'w')\r\n for head in soup.findAll('h2',{'class':'pull-left'}):\r\n if head.find('span')!=None: #checks for the specific h2 having a span tag\r\n session=head.find('span').text\r\n f.write(''.join(c for c in session if c in '0123456789')) #writes only the number part from the string\r\n\r\ndef description(pageurl):\r\n soup=html(pageurl)\r\n path = 'description'\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n for ul in soup.findAll('ul', {\"class\": \"channel-overview-details\"}):\r\n soup=html(pageurl + '/' + ul.find('li').text)\r\n innerpath=path+'/'+ul.find('li').text+'.txt'\r\n f=open(innerpath,'w')\r\n div=soup.find('div',{'class':'panel-content'})\r\n desc=div.find('p')\r\n f.write(desc.text) #writes the description\r\n f.close()\r\n\r\ndef exams(pageurl):\r\n soup=html(pageurl)\r\n path = 'Exam details'\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n for ul in soup.findAll('ul', {\"class\": \"channel-overview-details\"}):\r\n soup=html(pageurl + '/' + ul.find('li').text)\r\n for div in soup.findAll('div',{'class':'panel-heading'}):\r\n if div.find('h2').text=='Exams':\r\n innerpath = path + '/' + ul.find('li').text\r\n if not os.path.exists(innerpath):\r\n os.makedirs(innerpath)\r\n nextdiv = div.findNext('div') #returns the nextsibling of the div tag having heading as Exams\r\n for tr in nextdiv.findAll('tr'): #crates sub-directory for each exam type and stores all the pdf of that type\r\n for td in tr.findAll('td'):\r\n if not td.find('a'):\r\n fullpath = innerpath + '/' + td.text\r\n if not os.path.exists(fullpath):\r\n os.makedirs(fullpath)\r\n else:\r\n url='https://see.stanford.edu/'+td.find('a').get('href')\r\n name=fullpath+'/'+td.text+'.pdf'\r\n urllib.request.urlretrieve(url,name)\r\n\r\nurl='https://see.stanford.edu/Course'\r\ncategory_search(url)\r\ncourse_title(url)\r\ncover_image(url)\r\ncourse_number(url)\r\ninstructor(url)\r\nsessions(url)\r\ndescription(url)\r\nexams(url)","repo_name":"rathorekishan0/Web-Scraping","sub_path":"stanford.py","file_name":"stanford.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16795740784","text":"\"\"\"\nSearches the titles, descriptions and transcripts of Rob Miles videos, to find keywords/phrases\n\"\"\"\n\nimport re\nimport os\nfrom modules.module import Module, Response\nfrom config import subs_dir\n\n\nclass VideoSearch(Module):\n \"\"\"\n A module that searches the titles, descriptions and transcripts of videos, to find keywords/phrases\n \"\"\"\n\n NOT_FOUND_MESSAGE = \"No matches found\"\n\n def __init__(self):\n super().__init__()\n self.re_search = re.compile(\n r\"\"\"((([Ww]hich|[Ww]hat) vid(eo)? (is|was) (it|that))|\n?([Ii]n )?([Ww]hich|[Ww]hat)('?s| is| was| are| were)? ?(it|that|the|they|those)? ?vid(eo)?s? ?(where|in which|which)?|\n?[Vv]id(eo)? ?[Ss]earch) (?P.+)\"\"\"\n )\n self.subsdir = subs_dir\n self.videos = []\n self.load_videos()\n\n class Video:\n def __init__(self, title, stub, text=\"\", description=\"\"):\n\n self.title = title\n self.stub = stub\n self.text = text\n self.description = description\n\n self.url = \"http://youtu.be/%s\" % self.stub\n\n self.score = 0\n\n def __repr__(self):\n return '