diff --git "a/4114.jsonl" "b/4114.jsonl"
new file mode 100644--- /dev/null
+++ "b/4114.jsonl"
@@ -0,0 +1,635 @@
+{"seq_id":"631888591","text":"#Time Complexity : O(N)\n#Space Complexity: O(N)\n#Yes it ran on leetcode\n\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n global dic\n dic = {}\n global idx\n idx = 0\n if len(preorder) == 0 and len(inorder) == 0:\n return None\n for i in range(len(inorder)):\n dic[inorder[i]] = i\n return self.helper(preorder, inorder, 0, len(inorder) - 1)\n\n def helper(self, preorder, inorder, start, end):\n global dic\n global idx\n # base\n if idx == len(preorder) or start > end:\n return None\n\n # logic\n\n rootIdx = dic[preorder[idx]]\n root = TreeNode(preorder[idx])\n idx += 1\n root.left = self.helper(preorder, inorder, start, rootIdx - 1)\n root.right = self.helper(preorder, inorder, rootIdx + 1, end)\n\n return root","sub_path":"Construct_Binary_Tree.py","file_name":"Construct_Binary_Tree.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"225415147","text":"#!/usr/bin/env python3\n\nimport urllib.request\nimport iterm2\n\n\nasync def main(connection):\n component = iterm2.StatusBarComponent(\n short_description=\"Show PublicIP\",\n detailed_description=\"Show Public IP Address\",\n knobs=[],\n exemplar=\"[Public IP]\",\n update_cadence=30,\n identifier=\"koh-sh.iterm2-statusbar-scripts.publicip\"\n )\n\n @iterm2.StatusBarRPC\n async def showpublicip(knobs):\n url = 'http://checkip.amazonaws.com/'\n try:\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as res:\n body = res.read()\n return \"PublicIP: \" + str(body.decode()).replace(\"\\n\", \"\")\n except Exception:\n return \"No Connection\"\n\n await component.async_register(connection, showpublicip)\n\niterm2.run_forever(main)\n","sub_path":"publicip.py","file_name":"publicip.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"380277425","text":"\"\"\"Test the yig driver by looping back:\n note-cannot use YIG_*atten because it goes below ground\n YIG_1_tune to Analog_In_1 5:1 voltage divider\n YIG_1_tune to Analog_In_2 5:1 voltage divider\n YIG_2_tune to Analog_In_5 5:1 voltage divider\n YIG_2_tune to Analog_In_6 5:1 voltage divider\n P0_0 to P1_0\n P0_1 to P1_1\n P0_2 to P1_2\n\"\"\"\n\nimport time\nimport curses\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom time import sleep\nexec(compile(open(\"./io_board_subs.py\", \"rb\").read(), \"./io_board_subs.py\", 'exec'))\nGPIO.setwarnings(False)\ndef main(win):\n global stdscr\n stdscr = win\n curses.initscr()\n curses.nl()\n curses.noecho()\n#Instantiate the devices and objects we need\n dio = Dio()\n adc0 = Adc(0)\n dac0 = Dac(0)\n adc1 = Adc(1)\n dac1 = Dac(1)\n adc0.setOneShotMode()\n adc1.setOneShotMode()\n stdscr.clear\n stdscr.move(1,20)\n stdscr.addstr(\"YIG DRIVER LOOPBACK\")\n stdscr.move(2,1)\n stdscr.addstr(\"count: \")\n stdscr.move(4,1)\n stdscr.addstr(\"ANALOG_IN_1: \")\n stdscr.move(5,1)\n stdscr.addstr(\"ANALOG_IN_2: \")\n stdscr.move(6,1)\n stdscr.addstr(\"ANALOG_IN_5: \")\n stdscr.move(7,1)\n stdscr.addstr(\"ANALOG_IN_6: \")\n stdscr.move(9,1)\n stdscr.addstr(\"DIO OUT\")\n stdscr.move(9,12)\n stdscr.addstr(\"DIO IN\")\n stdscr.move(10,1)\n stdscr.addstr(\"0: \")\n stdscr.move(11,1)\n stdscr.addstr(\"1: \")\n stdscr.move(12,1)\n stdscr.addstr(\"2: \")\n stdscr.move(13,1)\n stdscr.addstr(\"4: \")\n stdscr.refresh()\n def check_db():\n dac0.write(2,32767) # U3-7 V_OUTC Analog_Out_3\n dac1.write(2,32767) # U6-7 V_OUTC Analog_Out_7\n i = 0\n while True:\n i = i+1\n stdscr.move(2,15)\n stdscr.clrtoeol()\n stdscr.addstr(str(i))\n dac0.write(0,i) # U3-1 V_OUTA Analog_Out_1 YIG_1_tune\n# dac1.write(0,i) # U6-1 V_OUTA Analog_Out_5 YIG_2_tune\n# dac0.write(1,i) # U3-2 V_OUTB Analog_Out_2 YIG_1_atten\n# dac0.write(3,i) # U6-2 V_OUTB Analog_Out_6 YIG_2_atten\n# Analog_In_1 = adc0.read(0)\n stdscr.move(4,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_1))\n# Analog_In_2 = adc0.read(1)\n stdscr.move(5,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_2))\n# Analog_In_5 = adc1.read(0)\n stdscr.move(6,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_5))\n# Analog_In_6 = adc1.read(1)\n stdscr.move(7,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_6))\n# dio.write(0)\n# readback = dio.read()\n stdscr.move(10,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(1)\n# readback = dio.read()\n stdscr.move(11,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(2)\n# readback = dio.read()\n stdscr.move(12,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(4)\n# readback = dio.read()\n stdscr.move(13,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n stdscr.refresh()\n sleep(0.05)\n if i >= 65535: \n i = 0\n check_db()\n curses.nocbreak(); \n stdscr.keypad(0); \n curses.echo()\ncurses.wrapper(main)\n","sub_path":"look_at_i2c_buss.py","file_name":"look_at_i2c_buss.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"653502721","text":"import numpy as np\nimport matplotlib\n\n# sudo apt install python3-tk\n# sudo -H pip3 install PyQt5\n# print('matplotlib all backends are:')\n# backends = sorted(matplotlib.rcsetup.all_backends, key=str.lower)\n# for backend in backends:\n# \tprint(' ', backend)\n# print('matplotlib default backend is:', matplotlib.get_backend())\n# matplotlib.use('Qt5Agg')\n# print('matplotlib current backend is:', matplotlib.get_backend())\nimport matplotlib.pyplot as plt\n\ndef mandelbrot(h, w, maxit=20):\n\t\"\"\"Returns an image of the Mandelbrot fractal of size (h,w).\"\"\"\n\ty, x = np.ogrid[-1.4:1.4:h * 1j, -2:0.8:w * 1j]\n\tc = x + y * 1j\n\tz = c\n\tdivtime = maxit + np.zeros(z.shape, dtype=int)\n\n\tfor i in range(maxit):\n\t\tz = z ** 2 + c\n\t\tdiverge = z * np.conj(z) > 2 ** 2 # who is diverging\n\t\tdiv_now = diverge & (divtime == maxit) # who is diverging now\n\t\tdivtime[div_now] = i # note when\n\t\tz[diverge] = 2 # avoid diverging too much\n\n\treturn divtime\n\ndef plot_histogram1():\n\t# Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2\n\tmu, sigma = 2, 0.5\n\tv = np.random.normal(mu, sigma, 10000)\n\t# Plot a normalized histogram with 50 bins\n\tplt.hist(v, bins=50, normed=1) # matplotlib version (plot)\n\tplt.show()\n\ndef plot_histogram2():\n\tmu, sigma = 2, 0.5\n\tv = np.random.normal(mu, sigma, 10000)\n\t# Compute the histogram with numpy and then plot it\n\t(n, bins) = np.histogram(v, bins=50, normed=True) # NumPy version (no plot)\n\tplt.plot(.5 * (bins[1:] + bins[:-1]), n)\n\tplt.show()\n\ndef main():\n\tplt.imshow(mandelbrot(400, 400))\n\tplt.show()\n\t# plt.savefig('foo.png')\n\n\tplot_histogram1()\n\n\tplot_histogram2()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"toys/numpy_mandelbrot.py","file_name":"numpy_mandelbrot.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"238680884","text":"import cv2\nimport numpy as np\n\n# https://www.youtube.com/watch?v=WQeoO7MI0Bs\n\nimg = cv2.imread(\"resources/Lena.png\")\nkernel = np.ones((5, 5), np.uint8)\n\n# making an image gray\nimgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# cv2.imshow(\"gray image\", imgGray)\n\n# making an image blur\nimgBlur = cv2.GaussianBlur(imgGray, (7, 7), 0) # kernel need to be odd numbers\n# cv2.imshow(\"blur image\", imgBlur)\n\n# edge detection\nimgCanny = cv2.Canny(img, 150, 200)\n# cv2.imshow(\"canny\", imgCanny)\n\n# dialation\nimgDialation = cv2.dilate(imgCanny, kernel, iterations=1)\n# cv2.imshow(\"dialation\", imgDialation)\n\n# thinner image\nimgEroded = cv2.erode(imgDialation, kernel, iterations=1)\n# cv2.imshow(\"eroded\", imgEroded)\n\ncv2.waitKey(0)\n","sub_path":"Chapter2.py","file_name":"Chapter2.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"187254924","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n############\n# Standard #\n############\nimport os\nimport logging\n\n###############\n# Third Party #\n###############\nimport cv2\nimport numpy as np\nimport simplejson as sjson\nfrom pyadplugin import ADPluginServer, ADPluginFunction\n\n##########\n# Module #\n##########\nfrom .statistics import contouring_pipeline\n\nlogger = logging.getLogger(__name__)\n\ndef contouring_plugin(ad_prefix, plugin_prefix=\"\", plugin_suffix=\"\", \n save_image=False, image_dir=None, save_json=False, \n json_path=None, min_cbtime=2, stream=\"IMAGE2\",\n enable_callbacks=True, resize=1.0, kernel=(11,11),\n description=\"\", threshold_factor=2):\n \"\"\"\n Runs a pyadplugin that uses the contouring pipeline.\n \"\"\"\n # Set the image saving path\n if save_image:\n save_frequency = 0.2\n if image_dir is None:\n image_dir = Path(os.path.dirname(os.path.abspath(__file__)) / \n \"{0}_images_{1}\".format(\n plugin_prefix, plugin_suffix))\n else:\n image_dir = Path(str(image_dir))\n # Check that the path exists, create it if not\n if not image_dir.exists():\n image_dir.mkdir(parents=True)\n\n # Set the json saving path\n if save_json:\n if json_path is None:\n json_path = Path(os.path.dirname(os.path.abspath(__file__)) / \n \"{0}_data{1}.json\".format(\n plugin_prefix, plugin_suffix))\n else:\n json_path = Path(str(json_path))\n # Check the file and its parents exist, making them if they don't\n if not json_path.exists():\n json_path.parent.mkdir(parents=True)\n json_path.touch()\n\n # Description to be passed on as a PV\n if not description:\n description = \"PyADPlugin '{0}{1}': Pipeline to output beam statitics.\"\n\n # Define the ADPluginFunction\n def pyad_contouring_plugin(array, height=None, width=None):\n return contouring_pipeline(\n array, height=height, width=width, resize=resize, kernel=kernel,\n prefix=plugin_prefix, suffix=plugin_suffix, save=save_frequency,\n description=description, json_path=json_path, save_image=save_image,\n image_dir=image_dir, thresh_factor=threshold_factor)\n\n # Define the default values for the pv dictionary\n output_dict = {\n \"{0}:DESC{1}\".format(plugin_prefix, plugin_suffix): description,\n \"{0}:BEAM{1}\".format(plugin_prefix, plugin_suffix) : False, \n \"{0}:CENT:X{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:CENT:Y{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:LENGTH{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:WIDTH{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:AREA{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:MATCH{1}\".format(plugin_prefix, plugin_suffix) : -1,\n \"{0}:M{1}\".format(plugin_prefix, plugin_suffix) : np.zeros((24))-1,\n }\n\n logger.info(\"Running '{0}{1}' server for '{2}'.\".format(\n plugin_prefix, plugin_suffix, ad_prefix))\n\n try:\n # Set up the server\n pyad_server = ADPluginServer(\n prefix = prefix,\n ad_prefix = ad_prefix,\n stream = stream,\n min_cbtime = min_cbtime,\n enable_callbacks = enable_callbacks,\n )\n\n # Define the function\n pyad_function = ADPluginFunction(\n \"{0}{1}\".format(plugin_prefix, plugin_suffix), \n output_dict,\n pyad_contouring_plugin,\n pyad_server,\n )\n \n # Log any exceptions we run into\n except Exception as e:\n logger.error(\"Exception raised by pyad server/function:\\n{0}\".format(e))\n raise\n\n \n \n\n \n \n \n \n \n","sub_path":"psbeam/pyadplugins/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"277501941","text":"# library imports\nimport cv2.cv2 as cv\nimport numpy as np\n\n# Chargement d'une image\nimg = cv.imread('../images/bgr.png')\ncv.imshow('Input', img)\n\n# Récupération de la longueur et la largueur\nwidth = img.shape[0]\nheight = img.shape[1]\n\n# Récupération des canaux de couleurs\nblues, greens, reds = cv.split(img)\n\n# display the image with OpenCV imshow()\n# cv.imshow('(B)lues', blues)\n# cv.imshow('(G)reens', greens)\n# cv.imshow('(R)eds ', reds)\n\n# Création d'une matrice vide avec convertion du depth\nzero = np.zeros((width, height))\nzero = np.uint8(zero)\n\nRG = cv.merge([zero, greens, reds])\nBR = cv.merge((blues, zero, reds))\nBG = cv.merge((blues, greens, zero))\nB = cv.merge((blues, zero, zero))\nG = cv.merge((zero, greens, zero))\nR = cv.merge((zero, zero, reds))\ncv.imshow('R+G', RG)\ncv.imshow('R+B', BR)\ncv.imshow('B+G', BG)\ncv.imshow('B', B)\ncv.imshow('G', G)\ncv.imshow('R', R)\n\n# OpenCV waitKey() is a required keyboard binding function after imwshow()\ncv.waitKey(0)\n# destroy all windows command\ncv.destroyAllWindows()","sub_path":"Exercices/exercice2.py","file_name":"exercice2.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"492740033","text":"import copy\nimport logging\n\nfrom pgdrive.constants import TerminationState\nfrom pgdrive.envs.pgdrive_env_v2 import PGDriveEnvV2\nfrom pgdrive.scene_creator.blocks.first_block import FirstBlock\nfrom pgdrive.scene_creator.road.road import Road\nfrom pgdrive.scene_manager.spawn_manager import SpawnManager\nfrom pgdrive.utils import setup_logger, get_np_random, PGConfig\nfrom pgdrive.utils.pg_config import merge_dicts\n\nMULTI_AGENT_PGDRIVE_DEFAULT_CONFIG = dict(\n # ===== Multi-agent =====\n is_multi_agent=True,\n num_agents=2, # If num_agents is set to None, then endless vehicles will be added only the empty spawn points exist\n\n # Whether to terminate a vehicle if it crash with others. Since in MA env the crash is extremely dense, so\n # frequently done might not be a good idea.\n crash_done=False,\n out_of_road_done=True,\n delay_done=25, # Wait for 5 seconds in real world.\n\n # Whether the vehicle can rejoin the episode\n allow_respawn=True,\n\n # The maximum length of the episode. If allow respawn, then this is the maximum step that respawn can happen. After\n # that, the episode won't terminate until all existing vehicles reach their horizon or done. The vehicle specified\n # horizon is also this value.\n horizon=1000,\n\n # ===== Vehicle Setting =====\n vehicle_config=dict(lidar=dict(num_lasers=72, distance=40, num_others=0)),\n target_vehicle_configs=dict(),\n\n # ===== New Reward Setting =====\n out_of_road_penalty=10,\n crash_vehicle_penalty=10,\n crash_object_penalty=10,\n crash_vehicle_cost=1,\n crash_object_cost=1,\n out_of_road_cost=0, # Do not count out of road into cost!\n\n # ===== Environmental Setting =====\n top_down_camera_initial_x=0,\n top_down_camera_initial_y=0,\n top_down_camera_initial_z=120, # height\n traffic_density=0.,\n auto_termination=False,\n camera_height=4,\n)\n\n\nclass MultiAgentPGDrive(PGDriveEnvV2):\n \"\"\"\n This serve as the base class for Multi-agent PGDrive!\n \"\"\"\n\n # A list of road instances denoting which roads afford spawn points. If not set, then search for all\n # possible roads and spawn new agents in them if possible.\n spawn_roads = [\n # Road(FirstBlock.NODE_1, FirstBlock.NODE_2),\n Road(FirstBlock.NODE_2, FirstBlock.NODE_3)\n ]\n\n @staticmethod\n def default_config() -> PGConfig:\n config = PGDriveEnvV2.default_config()\n config.update(MULTI_AGENT_PGDRIVE_DEFAULT_CONFIG)\n return config\n\n def __init__(self, config=None):\n self._raw_input_config = copy.deepcopy(config)\n super(MultiAgentPGDrive, self).__init__(config)\n self._top_down_renderer = None\n\n def _process_extra_config(self, config) -> \"PGConfig\":\n ret_config = self.default_config().update(\n config, allow_overwrite=False, stop_recursive_update=[\"target_vehicle_configs\"]\n )\n if not ret_config[\"crash_done\"] and ret_config[\"crash_vehicle_penalty\"] > 2:\n logging.warning(\n \"Are you sure you wish to set crash_vehicle_penalty={} when crash_done=False?\".format(\n ret_config[\"crash_vehicle_penalty\"]\n )\n )\n if ret_config[\"use_render\"] and ret_config[\"fast\"]:\n logging.warning(\"Turn fast=False can accelerate Multi-agent rendering performance!\")\n\n # Workaround\n if ret_config[\"target_vehicle_configs\"]:\n for k, v in ret_config[\"target_vehicle_configs\"].items():\n old = ret_config[\"vehicle_config\"].copy()\n new = old.update(v)\n ret_config[\"target_vehicle_configs\"][k] = new\n\n self._spawn_manager = SpawnManager(\n exit_length=ret_config[\"map_config\"][\"exit_length\"],\n lane_num=ret_config[\"map_config\"][\"lane_num\"],\n num_agents=ret_config[\"num_agents\"],\n vehicle_config=ret_config[\"vehicle_config\"],\n target_vehicle_configs=ret_config[\"target_vehicle_configs\"],\n seed=self._DEBUG_RANDOM_SEED\n )\n\n self._spawn_manager.set_spawn_roads(self.spawn_roads)\n\n ret_config = self._update_agent_pos_configs(ret_config)\n return ret_config\n\n def _update_agent_pos_configs(self, config):\n config[\"target_vehicle_configs\"] = self._spawn_manager.get_target_vehicle_configs(seed=self._DEBUG_RANDOM_SEED)\n return config\n\n def done_function(self, vehicle_id):\n done, done_info = super(MultiAgentPGDrive, self).done_function(vehicle_id)\n if done_info[TerminationState.CRASH] and (not self.config[\"crash_done\"]):\n assert done_info[TerminationState.CRASH_VEHICLE] or \\\n done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]\n if not (done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]):\n # Does not revert done if high-priority termination happens!\n done = False\n\n if done_info[TerminationState.OUT_OF_ROAD] and (not self.config[\"out_of_road_done\"]):\n assert done_info[TerminationState.CRASH_VEHICLE] or \\\n done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]\n if not done_info[TerminationState.SUCCESS]:\n done = False\n\n return done, done_info\n\n def step(self, actions):\n o, r, d, i = super(MultiAgentPGDrive, self).step(actions)\n o, r, d, i = self._after_vehicle_done(o, r, d, i)\n\n # Update respawn manager\n if self.episode_steps >= self.config[\"horizon\"] or self.scene_manager.replay_system is not None:\n self.agent_manager.set_allow_respawn(False)\n self._spawn_manager.step()\n new_obs_dict = self._respawn_vehicles(randomize_position=self.config[\"random_traffic\"])\n if new_obs_dict:\n for new_id, new_obs in new_obs_dict.items():\n o[new_id] = new_obs\n r[new_id] = 0.0\n i[new_id] = {}\n d[new_id] = False\n\n # Update __all__\n d[\"__all__\"] = (\n ((self.episode_steps >= self.config[\"horizon\"]) and (all(d.values()))) or (len(self.vehicles) == 0)\n or (self.episode_steps >= 5 * self.config[\"horizon\"])\n )\n if d[\"__all__\"]:\n for k in d.keys():\n d[k] = True\n\n return o, r, d, i\n\n def reset(self, *args, **kwargs):\n self.config = self._update_agent_pos_configs(self.config)\n ret = super(MultiAgentPGDrive, self).reset(*args, **kwargs)\n assert (len(self.vehicles) == self.num_agents) or (self.num_agents == -1)\n return ret\n\n def _reset_agents(self):\n # update config (for new possible spawn places)\n for v_id, v in self.vehicles.items():\n if v_id in self.config[\"target_vehicle_configs\"]:\n v.vehicle_config = self._get_single_vehicle_config(self.config[\"target_vehicle_configs\"][v_id])\n super(MultiAgentPGDrive, self)._reset_agents() # Update config before actually resetting!\n for v_id, _ in self.vehicles.items():\n self._update_destination_for(v_id)\n\n def _after_vehicle_done(self, obs=None, reward=None, dones: dict = None, info=None):\n if self.scene_manager.replay_system is not None:\n return obs, reward, dones, info\n for v_id, v_info in info.items():\n if v_info.get(\"episode_length\", 0) >= self.config[\"horizon\"]:\n if dones[v_id] is not None:\n info[v_id][TerminationState.MAX_STEP] = True\n dones[v_id] = True\n self.dones[v_id] = True\n for dead_vehicle_id, done in dones.items():\n if done:\n self.agent_manager.finish(\n dead_vehicle_id, ignore_delay_done=info[dead_vehicle_id].get(TerminationState.SUCCESS, False)\n )\n self._update_camera_after_finish(dead_vehicle_id)\n return obs, reward, dones, info\n\n def _update_camera_after_finish(self, dead_vehicle_id):\n if self.main_camera is not None and dead_vehicle_id == self.agent_manager.object_to_agent(\n self.current_track_vehicle.name) \\\n and self.pg_world.taskMgr.hasTaskNamed(self.main_camera.CHASE_TASK_NAME):\n self.chase_another_v()\n\n def _get_target_vehicle_config(self):\n return {\n name: self._get_single_vehicle_config(new_config)\n for name, new_config in self.config[\"target_vehicle_configs\"].items()\n }\n\n def _get_observations(self):\n return {\n name: self.get_single_observation(self._get_single_vehicle_config(new_config))\n for name, new_config in self.config[\"target_vehicle_configs\"].items()\n }\n\n def _get_single_vehicle_config(self, extra_config: dict):\n \"\"\"\n Newly introduce method\n \"\"\"\n vehicle_config = merge_dicts(self.config[\"vehicle_config\"], extra_config, allow_new_keys=False)\n return PGConfig(vehicle_config)\n\n def _after_lazy_init(self):\n super(MultiAgentPGDrive, self)._after_lazy_init()\n\n # Use top-down view by default\n if hasattr(self, \"main_camera\") and self.main_camera is not None:\n top_down_camera_height = self.config[\"top_down_camera_initial_z\"]\n self.main_camera.camera.setPos(0, 0, top_down_camera_height)\n self.main_camera.top_down_camera_height = top_down_camera_height\n self.main_camera.stop_track(self.pg_world, self.current_track_vehicle)\n self.main_camera.camera_x += self.config[\"top_down_camera_initial_x\"]\n self.main_camera.camera_y += self.config[\"top_down_camera_initial_y\"]\n\n def _respawn_vehicles(self, randomize_position=False):\n new_obs_dict = {}\n if not self.agent_manager.has_pending_objects():\n return new_obs_dict\n while True:\n new_id, new_obs = self._respawn_single_vehicle(randomize_position=randomize_position)\n if new_obs is not None:\n new_obs_dict[new_id] = new_obs\n else:\n break\n return new_obs_dict\n\n def _force_respawn(self, agent_name, randomize_position=False):\n \"\"\"\n This function can force a given vehicle to respawn!\n \"\"\"\n self.agent_manager.finish(agent_name, ignore_delay_done=True)\n self._update_camera_after_finish(agent_name)\n new_id, new_obs = self._respawn_single_vehicle(randomize_position=randomize_position)\n return new_id, new_obs\n\n def _respawn_single_vehicle(self, randomize_position=False):\n \"\"\"\n Arbitrary insert a new vehicle to a new spawn place if possible.\n \"\"\"\n safe_places_dict = self._spawn_manager.get_available_respawn_places(\n self.pg_world, self.current_map, randomize=randomize_position\n )\n if len(safe_places_dict) == 0 or not self.agent_manager.allow_respawn:\n # No more run, just wait!\n return None, None\n assert len(safe_places_dict) > 0\n bp_index = get_np_random(self._DEBUG_RANDOM_SEED).choice(list(safe_places_dict.keys()), 1)[0]\n new_spawn_place = safe_places_dict[bp_index]\n\n if new_spawn_place[self._spawn_manager.FORCE_AGENT_NAME] is not None:\n if new_spawn_place[self._spawn_manager.FORCE_AGENT_NAME] != self.agent_manager.next_agent_id():\n return None, None\n\n new_agent_id, vehicle = self.agent_manager.propose_new_vehicle()\n new_spawn_place_config = new_spawn_place[\"config\"]\n vehicle.vehicle_config.update(new_spawn_place_config)\n vehicle.reset(self.current_map)\n self._update_destination_for(new_agent_id)\n vehicle.update_state(detector_mask=None)\n self.dones[new_agent_id] = False # Put it in the internal dead-tracking dict.\n\n new_obs = self.observations[new_agent_id].observe(vehicle)\n return new_agent_id, new_obs\n\n def _update_destination_for(self, vehicle_id):\n pass\n\n # when agent re-joined to the game, call this to set the new route to destination\n # end_road = -get_np_random(self._DEBUG_RANDOM_SEED).choice(self.spawn_roads) # Use negative road!\n # vehicle.routing_localization.set_route(vehicle.lane_index[0], end_road.end_node)\n\n def render(self, mode='human', text=None, *args, **kwargs):\n if mode == \"top_down\":\n ret = self._render_topdown(*args, **kwargs)\n else:\n ret = super(MultiAgentPGDrive, self).render(mode=mode, text=text)\n return ret\n\n def _render_topdown(self, *args, **kwargs):\n if self._top_down_renderer is None:\n from pgdrive.obs.top_down_renderer import TopDownRenderer\n self._top_down_renderer = TopDownRenderer(self.current_map, *args, **kwargs)\n self._top_down_renderer.render(list(self.vehicles.values()))\n\n def close_and_reset_num_agents(self, num_agents):\n config = copy.deepcopy(self._raw_input_config)\n self.close()\n config[\"num_agents\"] = num_agents\n super(MultiAgentPGDrive, self).__init__(config)\n\n\ndef _test():\n setup_logger(True)\n env = MultiAgentPGDrive(\n {\n \"num_agents\": 12,\n \"allow_respawn\": False,\n \"use_render\": True,\n \"debug\": False,\n \"fast\": True,\n \"manual_control\": True,\n \"pg_world_config\": {\n \"pstats\": False\n },\n }\n )\n o = env.reset()\n total_r = 0\n for i in range(1, 100000):\n # o, r, d, info = env.step(env.action_space.sample())\n o, r, d, info = env.step({v_id: [0, 1] for v_id in env.vehicles.keys()})\n for r_ in r.values():\n total_r += r_\n # o, r, d, info = env.step([0,1])\n d.update({\"total_r\": total_r})\n # env.render(text=d)\n if len(env.vehicles) == 0:\n total_r = 0\n print(\"Reset\")\n env.reset()\n env.close()\n\n\ndef _vis():\n setup_logger(True)\n env = MultiAgentPGDrive(\n {\n # \"use_render\": True,\n # \"fast\": True,\n \"num_agents\": 12,\n \"allow_respawn\": False,\n \"manual_control\": True,\n \"pg_world_config\": {\n \"pstats\": False\n },\n }\n )\n o = env.reset()\n total_r = 0\n for i in range(1, 100000):\n # o, r, d, info = env.step(env.action_space.sample())\n o, r, d, info = env.step({v_id: [0.0, 0.0] for v_id in env.vehicles.keys()})\n for r_ in r.values():\n total_r += r_\n # o, r, d, info = env.step([0,1])\n # d.update({\"total_r\": total_r})\n env.render(mode=\"top_down\")\n if len(env.vehicles) == 0:\n total_r = 0\n print(\"Reset\")\n env.reset()\n env.close()\n\n\ndef pygame_replay(name, env_class, save=True, other_ckpt=None):\n import copy\n import json\n import pygame\n env = env_class({\"use_topdown\": True})\n ckpt = \"metasvodist_{}_best.json\".format(name) if other_ckpt is None else other_ckpt\n with open(ckpt, \"r\") as f:\n traj = json.load(f)\n o = env.reset(copy.deepcopy(traj))\n frame_count = 0\n while True:\n o, r, d, i = env.step(env.action_space.sample())\n env.pg_world.force_fps.toggle()\n env.render(mode=\"top_down\", num_stack=50, film_size=(4000, 4000), history_smooth=0)\n if save:\n pygame.image.save(env._top_down_renderer._runtime, \"{}_{}.png\".format(name, frame_count))\n frame_count += 1\n if len(env.scene_manager.replay_system.restore_episode_info) == 0:\n env.close()\n\n\nif __name__ == '__main__':\n _vis()\n","sub_path":"pgdrive/envs/multi_agent_pgdrive.py","file_name":"multi_agent_pgdrive.py","file_ext":"py","file_size_in_byte":15730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"43254788","text":"'''\nCreated on Jan 19, 2013\n\n@author: joshandrews\n'''\nimport math\nimport sys\nsys.path.append(\"..\")\nfrom datetime import datetime\nimport control.logic.standardcalc as standardcalc\nimport control.GlobalVars as gVars\nimport control.StaticVars as sVars\nimport thread\nimport control.sailbotlogger as SBLogger\nfrom control.logic import coresailinglogic\n\ndef setWayPtCoords(boxCoords): #sets the waypoints of the challenge\n wayPtCoords = [] #order = top face, right face, bottom face, left face\n if (boxCoords[0].lat == boxCoords[1].lat): #square\n wayPtCoords[0] = standardcalc.GPSDistAway(boxCoords[0], 20.0, 100.0)\n wayPtCoords[1] = standardcalc.GPSDistAway(boxCoords[1], 100.0, -20.0)\n wayPtCoords[2] = standardcalc.GPSDistAway(boxCoords[2], -20.0, -100.0)\n wayPtCoords[3] = standardcalc.GPSDistAway(boxCoords[3], -100.0, 20.0)\n elif (boxCoords[0].lat < boxCoords[1].lat): #diamond or tilted left square\n cAngle = standardcalc.angleBetweenTwoCoords(boxCoords[0],boxCoords[1])\n wayPntDist1 = 100.0*math.cos(cAngle)\n wayPntDist2 = 100.0*math.sin(cAngle)\n midDist1 = 20.0*math.cos(90 - cAngle)\n midDist2 = 20.0*math.sin(90 - cAngle)\n \n topMidpnt = standardcalc.GPSDistAway(boxCoords[0], midDist1, midDist2)\n rightMidpnt = standardcalc.GPSDistAway(boxCoords[1], midDist2, -midDist1)\n botMidpnt = standardcalc.GPSDistAway(boxCoords[2], -midDist1, -midDist2)\n leftMidpnt = standardcalc.GPSDistAway(boxCoords[3], -midDist2, midDist1)\n wayPtCoords[0] = standardcalc.GPSDistAway(topMidpnt, -wayPntDist1, wayPntDist2)\n wayPtCoords[1] = standardcalc.GPSDistAway(rightMidpnt, wayPntDist2, wayPntDist1)\n wayPtCoords[2] = standardcalc.GPSDistAway(botMidpnt, wayPntDist1, -wayPntDist2)\n wayPtCoords[3] = standardcalc.GPSDistAway(leftMidpnt, -wayPntDist2, -wayPntDist1)\n else: #right tilted square\n cAngle = 180 - standardcalc.angleBetweenTwoCoords(boxCoords[0],boxCoords[1])\n wayPntDist1 = 100.0*math.cos(cAngle)\n wayPntDist2 = 100.0*math.sin(cAngle)\n midDist1 = 20.0*math.cos(90 - cAngle)\n midDist2 = 20.0*math.sin(90 - cAngle)\n \n topMidpnt = standardcalc.GPSDistAway(boxCoords[0], midDist1, -midDist2)\n rightMidpnt = standardcalc.GPSDistAway(boxCoords[1], -midDist2, -midDist1)\n botMidpnt = standardcalc.GPSDistAway(boxCoords[2], -midDist1, midDist2)\n leftMidpnt = standardcalc.GPSDistAway(boxCoords[3], midDist2, midDist1)\n wayPtCoords[0] = standardcalc.GPSDistAway(topMidpnt, wayPntDist1, wayPntDist2)\n wayPtCoords[1] = standardcalc.GPSDistAway(rightMidpnt, wayPntDist2, -wayPntDist1)\n wayPtCoords[2] = standardcalc.GPSDistAway(botMidpnt, -wayPntDist1, -wayPntDist2)\n wayPtCoords[3] = standardcalc.GPSDistAway(leftMidpnt, -wayPntDist2, wayPntDist1)\n \n return wayPtCoords\n\n\ndef SKTimer():\n gVars.SKMinLeft = ((datetime.now() - gVars.taskStartTime ).seconds) / 60\n gVars.SKSecLeft = ((datetime.now() - gVars.taskStartTime ).seconds) - gVars.SKMinLeft*60\n gVars.SKMilliSecLeft = ((datetime.now() - gVars.taskStartTime).microseconds) / 1000\n\ndef getBoxDist(boxCoords):\n boxDistList = [] #top, right, bottom, left\n TL2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[0]) #top left to boat\n TR2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[1]) #top right to boat\n BR2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[2]) #bottom right to boat\n TL2TR = standardcalc.distBetweenTwoCoords(boxCoords[0], boxCoords[1]) #top left to top right\n TR2BR = standardcalc.distBetweenTwoCoords(boxCoords[1], boxCoords[2]) #top right to bottom right\n \n topLeftAngle = standardcalc.findCosLawAngle(TL2TR, TL2Boat, TR2Boat)\n rightTopAngle = standardcalc.findCosLawAngle(TR2BR, TR2Boat, BR2Boat)\n \n boxDistList[0] = TL2Boat * math.sin(topLeftAngle) #top dist\n boxDistList[1] = TR2Boat * math.sin(rightTopAngle) #right dist\n boxDistList[2] = 40 - boxDistList[0] #bottom dist\n boxDistList[3] = 40 - boxDistList[1] #left dist\n return boxDistList\n\ndef stationKeepInit(topLeftWaypnt, topRightWaypnt, botLeftWaypnt, botRightWaypnt):\n topLeftCoord = topLeftWaypnt.coordinate\n topRightCoord = topRightWaypnt.coordinate\n botLeftCoord = botLeftWaypnt.coordinate\n botRightCoord = botRightWaypnt.coordinate\n boxCoords = standardcalc.setBoxCoords(topLeftCoord, topRightCoord, botLeftCoord, botRightCoord) #boxCoords[0] = TL, boxCoords[1] = TR, boxCoords[2] = BR, boxCoords[3] = BL\n wayPtCoords = setWayPtCoords(boxCoords) #top, right, bottom, left\n spdList = [0.75]*10\n boxDistList = getBoxDist(boxCoords) #top, right, bottom, left\n meanSpd = 0.75 #from old arduino code\n arduino = gVars.arduino\n gVars.SKCurrentWaypnt = boxDistList.index(min(boxDistList))\n logger = SBLogger.logger()\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n logger.info(\"Station Keeping Initialization finished. Now running Station Keeping Challenge\")\n run(boxCoords, wayPtCoords, spdList, meanSpd, arduino, logger)\n return\n \ndef run(boxCoords, wayPtCoords, spdList, meanSpd, arduino, logger):\n exiting = 0\n while (((datetime.now() - gVars.taskStartTime).seconds < 300) and (gVars.kill_flagSK == 0)):\n secLeft = 300 - (datetime.now() - gVars.taskStartTime).seconds\n turning = 0\n SKTimer()\n boxDistList = getBoxDist(boxCoords)\n if (exiting == 0):\n if (standardcalc.isWPNoGo(gVars.currentData[sVars.AWA_INDEX],gVars.currentData[sVars.HOG_INDEX], gVars.SKCurrentWaypnt, gVars.currentData[sVars.SOG_INDEX], gVars.currentData[sVars.GPS_INDEX])):\n logger.info(\"The boat is sailing upwind. Changing current waypoint.\")\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 1) % 4\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n gVars.kill_flagPTP = 1\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n turning = 1\n if (boxDistList[gVars.SKCurrentWaypnt] < 5):\n logger.info(\"The boat is too close to an edge. Changing current waypoint.\")\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 2) % 4\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n gVars.kill_flagPTP = 1\n logger.info(\"Commencing gybe.\")\n if (gVars.currentData[sVars.AWA_INDEX] < 0):\n arduino.gybe(1)\n else:\n arduino.gybe(0)\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n turning = 1\n if (turning == 0):\n spdList = standardcalc.changeSpdList(spdList)\n meanSpd = standardcalc.meanOfList(spdList)\n logger.info(\"The mean speed of the boat is \" + meanSpd + \" metres per second.\")\n if (boxDistList[gVars.SKCurrentWaypnt] >= meanSpd*(secLeft+2)): #leeway of 2 seconds\n exiting = 1\n logger.info(\"Station Keeping event is about to end. Exiting to current waypoint.\")\n elif (boxDistList[(gVars.SKCurrentWaypnt + 2) % 4] >= meanSpd*(secLeft+2+4) ): #leeway of 2 seconds, 4 seconds for gybe\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 2) % 4\n gVars.kill_flagPTP = 1\n logger.info(\"Station Keeping event is about to end. Gybing and exiting to waypoint \" + gVars.SKCurrentWaypnt)\n if (gVars.currentData[sVars.AWA_INDEX] < 0):\n arduino.gybe(1)\n else:\n arduino.gybe(0)\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n exiting = 1\n if (gVars.kill_flagSK == 1):\n logger.info(\"Station Keeping Kill Flag initialized. Station Keeping Challenge has been stopped.\")\n else:\n logger.info(\"Station Keeping Challenge timer has ended.\")\n boxDistList = getBoxDist(boxCoords)\n gVars.SKMinLeft = 0\n gVars.SKSecLeft = 0\n gVars.SKMilliSecLeft = 0\n gVars.kill_flagSK = 0\n gVars.SKCurrentWaypnt = None\n \n return","sub_path":"control/challenge/stationkeeping.py","file_name":"stationkeeping.py","file_ext":"py","file_size_in_byte":8737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"20604800","text":"\"\"\"timetracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url, patterns\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^$', 'timetracker.views.index', name=\"index\"),\n url(r'^activities/', include('timetrack.urls', namespace=\"timetrack\")),\n url(r'^login/$', 'timetracker.views.login_view', name=\"login\"),\n url(r'^loginuser/$', 'timetracker.views.login_user', name=\"login_user\"),\n url(r'^logout/$', 'timetracker.views.logout_user', name=\"logout_user\"),\n url(r'^admin/', include(admin.site.urls)),\n]\n\nurlpatterns += patterns(\n '',\n url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n)\n","sub_path":"timetracker/timetracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"129875483","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n result = []\n hash_table = {}\n count = len(arrays)\n\n for single_array in arrays:\n for number in single_array:\n if number not in hash_table:\n hash_table[number] = 1\n else:\n hash_table[number] += 1\n if hash_table[number] == len(arrays):\n result.append(number)\n # print(hash_table) \n return result\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"373308987","text":"# SCAR - Serverless Container-aware ARchitectures\n# Copyright (C) GRyCAP - I3M - UPV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport yaml\nimport os\n\nclass Function:\n def __init__(self, name, image):\n self.name = name\n self.image_id = image\n\nclass YamlParser(object):\n \n def __init__(self, args):\n file_path = args.conf_file\n self.func = args.func\n if os.path.isfile(file_path):\n with open(file_path) as cfg_file:\n self.__setattr__(\"yaml_data\", yaml.safe_load(cfg_file))\n \n def parse_arguments(self):\n functions = []\n for function in self.yaml_data['functions']:\n functions.append(self.parse_function(function, self.yaml_data['functions'][function]))\n return functions[0]\n \n def parse_function(self, function_name, function_data):\n args = {'func' : self.func }\n # Get function name\n args['name'] = function_name\n # Parse function information\n if 'image' in function_data:\n args['image_id'] = function_data['image']\n if 'image_file' in function_data:\n args['image_file'] = function_data['image_file']\n if 'time' in function_data:\n args['time'] = function_data['time']\n if 'memory' in function_data:\n args['memory'] = function_data['memory']\n if 'timeout_threshold' in function_data:\n args['timeout_threshold'] = function_data['timeout_threshold']\n if 'lambda_role' in function_data:\n args['lambda_role'] = function_data['lambda_role']\n if 'description' in function_data:\n args['description'] = function_data['description']\n if 'init_script' in function_data:\n args['init_script'] = function_data['init_script']\n if 'run_script' in function_data:\n args['run_script'] = function_data['run_script'] \n if 'extra_payload' in function_data:\n args['extra_payload'] = function_data['extra_payload']\n if 'log_level' in function_data:\n args['log_level'] = function_data['log_level']\n if 'environment' in function_data:\n variables = []\n for k,v in function_data['environment'].items():\n variables.append(str(k) + '=' + str(v))\n args['environment_variables'] = variables\n # LOG COMMANDS\n if 'log_stream_name' in function_data:\n args['log_stream_name'] = function_data['log_stream_name']\n if 'request_id' in function_data:\n args['request_id'] = function_data['request_id']\n \n if 'data_binary' in function_data:\n args['data_binary'] = function_data['data_binary']\n \n if 's3' in function_data:\n s3_data = function_data['s3']\n if 'deployment_bucket' in s3_data:\n args['deployment_bucket'] = s3_data['deployment_bucket']\n if 'input_bucket' in s3_data:\n args['input_bucket'] = s3_data['input_bucket']\n if 'input_folder' in s3_data:\n args['input_folder'] = s3_data['input_folder']\n if 'output_bucket' in s3_data:\n args['output_bucket'] = s3_data['output_bucket']\n if 'output_folder' in s3_data:\n args['output_folder'] = s3_data['output_folder']\n if 'api_gateway' in function_data:\n api_data = function_data['api_gateway']\n if 'name' in api_data:\n args['api_gateway_name'] = api_data['name']\n if 'parameters' in api_data:\n args['parameters'] = api_data['parameters']\n return args\n ","sub_path":"src/parser/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"599726622","text":"# coding=utf-8\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom faker import Factory\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.utils import check_random_state\n\n# The default number of sample returned by generators\nNB_SAMPLE = 10\n\n\nclass TestGenerator(TransformerMixin, BaseEstimator):\n \"\"\"Generate random data in the form of a DataFrame for test purpose.\n\n Can generate:\n * categorical data from a sample\n * numeric data between a range\n * time series data as the index at a given start date and with a given frequency\n\n Parameters\n ----------\n freq: string (default='D')\n Frequency compliant with the pandas frequency aliases.\n start_date: string or datetime-like, (default='today')\n The first date of the index.\n categ_sample: list (default=('foo', 'bar'))\n The list of values used to generate categorical data.\n num_sample: list (default=('foo', 'bar'))\n The list of values used to generate categorical data.\n nb_sample: int (default=NB_SAMPLE)\n The number of sample (row) to generate.\n random_state : RandomState or an int seed (default=0)\n A random number generator instance to define the state of the random permutations generator.\n Returns\n -------\n df_converted\n The DataFrame with normalized columns.\n \"\"\"\n\n def __init__(self, freq='D', start_date='today', categ_sample=('foo', 'bar'), num_sample=(0, 100),\n nb_sample=NB_SAMPLE, random_state=0):\n self.freq = freq\n self.start_date = start_date\n self.categ_sample = categ_sample\n self.num_sample = num_sample\n self.nb_sample = nb_sample\n self.random_state = random_state\n\n def transform(self, X, **transform_params):\n random_state = check_random_state(self.random_state)\n # Generating a sample and selecting a subset to randomize them\n df = pd.DataFrame({'categ': random_state.choice(self.categ_sample, self.nb_sample),\n 'number': random_state.randint(self.num_sample[0], self.num_sample[1], self.nb_sample)\n },\n index=random_state.choice(\n pd.date_range(start=pd.to_datetime(self.start_date), periods=self.nb_sample * 3,\n freq=self.freq),\n self.nb_sample))\n # DateTimeIndex shall be sorted\n df.sort_index(inplace=True)\n return df\n\n def fit(self, X, y=None, **fit_params):\n # Does nothing\n return self\n\n\nclass FakeGenerator(TransformerMixin, BaseEstimator):\n \"\"\"A Fake generator wrapping the Faker data generator http://fake-factory.readthedocs.org/en/stable/.\n\n Parameters\n ----------\n fakes: list (default=('name', 'address', 'text']))\n The list of fake to generate, a column by fake will be generated\n locale: string (default=None)\n The locale to use, see the Faker documentation.\n nb_sample: int (default=NB_SAMPLE)\n The number of sample (row) to generate.\n random_state : RandomState or an int seed (default=0)\n A random number generator instance to define the state of the random permutations generator.\n\n Returns\n -------\n DataFrame\n Containing the fake values (a column by fake and `nb_sample` rows) with a default index.\n\n Raises\n ------\n ValueError\n If a fake is not supported.\n \"\"\"\n\n def __init__(self, fakes=('name', 'address', 'text'), locale=None, nb_sample=NB_SAMPLE, random_state=0):\n self.fakes = fakes\n self.locale = locale\n self.nb_sample = nb_sample\n self.random_state = random_state\n\n def transform(self, X, **transform_params):\n _data = defaultdict(list)\n faker = Factory.create(self.locale)\n random_state = check_random_state(self.random_state)\n faker.seed(self.random_state)\n for sample in range(self.nb_sample):\n for fk in self.fakes:\n try:\n _data[fk].append(getattr(faker, fk)())\n except AttributeError as e:\n _msg = 'Fake [%s] not supported' % fk\n raise Exception(_msg)\n return pd.DataFrame.from_dict(_data)\n\n def fit(self, X, y=None, **fit_params):\n # Does nothing\n return self","sub_path":"pyranha/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"452251196","text":"import cv2\r\nimport numpy as np\r\n\r\ncam=cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n k=cv2.waitKey(1)\r\n if k & 0xFF==ord('q'):\r\n break\r\n _,frame=cam.read()\r\n img=cv2.inRange(frame,np.array([0,0,0]),np.array([180,255,30]))\r\n im2,contours,hierarchy = cv2.findContours(img, 1, 2)\r\n for i in range(len(contours)):\r\n cnt = contours[i]\r\n area = cv2.contourArea(cnt)\r\n if area>200:\r\n print(area)\r\n cv2.imshow('Image',img)\r\n","sub_path":"Focal_length_calculation/fl_C.py","file_name":"fl_C.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"613355007","text":"\"\"\"FormScribe meta classes.\"\"\"\n\n\nfrom formscribe.error import InvalidFieldError\n\n\nclass MetaField(type):\n \"\"\"Field metaclass.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n instance = object.__new__(cls, *args, **kwargs)\n\n regex_attributes = [getattr(instance, attribute) for attribute in\n ('regex_group', 'regex_group_key', 'regex_key')]\n if any(regex_attributes) and not all(regex_attributes):\n raise InvalidFieldError('The following attributes are required:'\n ' regex_group, regex_group_key,'\n ' regex_key.')\n\n if instance.regex_key and instance.key:\n raise InvalidFieldError('The following attributes are incompatible:'\n ' regex_key, key.')\n\n if not instance.key and not all(regex_attributes):\n raise InvalidFieldError('Field must be either key-based or'\n ' regex-based.')\n\n instance.__init__()\n\n try:\n automatically_validate = kwargs['automatically_validate']\n except KeyError:\n try:\n automatically_validate = args[1]\n except IndexError:\n automatically_validate = True\n\n if automatically_validate:\n try:\n return instance.validate(kwargs['value'])\n except KeyError:\n try:\n return instance.validate(args[0])\n except IndexError:\n pass\n\n return instance\n","sub_path":"formscribe/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"613101452","text":"import numpy as np\nfrom mlp.activation_functions import Sigmoid\nfrom sklearn.metrics import accuracy_score\nfrom mlp import NeuralNet\nfrom mlp.util import BSSF\nfrom rnn import util\n\n\nclass BPTT(NeuralNet):\n def __init__(self, features=6, hidden=60, classes=7,\n u_back=(0, 20), u_forward=(-21, -1), v_range=(10, 50), k_back=1, k_forward=1,\n learning_rate=0.9, a_func=Sigmoid, max_epochs=1000, patience=20,\n validation_set=None, multi_vsets=False, classification=True):\n self.H = np.arange(hidden)\n # get correct indexes\n self._k = k_back\n self._j = k_forward\n # setup extra matrices and values\n self._hb = u_back\n self._hf = u_forward\n self._v = v_range\n # recurrent matrices\n self.V = self.input_matrix(features, *v_range)\n if u_back and k_back > 0:\n self.Ub = self.recurrent_matrix(*u_back)\n self.δb = self.delta_vecs(u_back, k_back)\n self.Zb = self.Z_vecs(hidden, k_back)\n self.Zin_b = self.Z_vecs(features, k_back)\n if u_forward and k_forward > 0:\n self.Uf = self.recurrent_matrix(*u_forward)\n self.δf = self.delta_vecs(u_forward, k_forward)\n self.Zf = self.Z_vecs(hidden, k_forward)\n self.Zin_f = self.Z_vecs(features, k_forward)\n super().__init__(features, hidden, classes, learning_rate, a_func, max_epochs, patience, validation_set,\n multi_vsets, classification)\n # overwrite W so there's only one\n self.W = np.random.randn(hidden, classes)\n print(\"BPTT!\")\n\n def fit(self, X, Y, multi_sets=False):\n epoch = 0\n Δp = 0\n bssf = BSSF(self.W, self.b, 0)\n if not multi_sets:\n X = [X]\n Y = [Y]\n while epoch < self._max_epochs and Δp < self._patience:\n idx = util.get_indices(X, multi_sets, self._k, self._j)\n for i, j in idx:\n self._forward_prop_tt(X[i], j)\n self._back_prop(Y[i][j])\n epoch += 1\n # Do validation check\n if self._VS:\n score = self.score(self._VS[0], self._VS[1], multi_sets=self._multi_vsets)\n if score > bssf.score:\n bssf = BSSF(self.W, self.b, score)\n Δp = 0\n else:\n Δp += 1\n # if training stopped because of patience, use bssf instead\n if self._VS and Δp >= self._patience:\n self.W = bssf.W\n self.b = bssf.b\n return epoch\n\n # region Predict and Score\n def predict(self, X, multi_sets=False):\n out = []\n if not multi_sets:\n X = [X]\n idx = util.get_indices(X, multi_sets, self._k, self._j, shuffle=False)\n for i, j in idx:\n z = self._forward_prop_tt(X[i], j)\n if self._classification:\n q = np.zeros(z.shape)\n q[z.argmax()] = 1.\n out.append(q)\n else:\n out.append(z)\n return np.array(out)\n\n def score(self, X, y, sample_weight=None, multi_sets=False):\n y2 = []\n if not multi_sets:\n y = [y]\n idx = util.get_indices(y, multi_sets, self._k, self._j, shuffle=False)\n for i, j in idx:\n y2.append(y[i][j])\n y2 = np.array(y2)\n predicted = self.predict(X, multi_sets)\n return accuracy_score(y2, predicted, sample_weight=sample_weight)\n # endregion\n\n def _forward_prop_tt(self, Xi, j):\n # initial activation of hidden layer\n self.Z[1] = np.ones(self.Z[1].shape)\n self.Z[1] *= .0001\n # backwards t\n if self.Ub is not None:\n t = self._k\n for i in range(self._k):\n x = Xi[j-t+i]\n xt = x.reshape(1, len(x))\n self.Zin_b[i] = xt\n self.Z[1][:,slice(*self._v)] += self.activation(xt.dot(self.V) + self.b[0][:, slice(*self._v)])\n self.Z[1][:,slice(*self._hb)] += self.activation(self.Z[1][:,slice(*self._hb)].dot(self.Ub) +\n self.b[0][:,slice(*self._hb)])\n self.Zb[i] = self.Z[1].copy()\n # t == 0\n x = Xi[j]\n self.x0 = x.reshape(1, len(x))\n self.Z[1][:,slice(*self._v)] += self.activation(self.x0.dot(self.V) + self.b[0][:,slice(*self._v)])\n # forwards t\n if self.Uf is not None:\n for i in range(self._j):\n x = Xi[j+i+1]\n xt = x.reshape(1, len(x))\n self.Z[1][:,slice(*self._v)] += self.activation(xt.dot(self.V) + self.b[0][:,slice(*self._v)])\n self.Z[1][:,slice(*self._hf)] += self.activation(self.Z[1][:,slice(*self._hf)].dot(self.Ub) +\n self.b[0][:,slice(*self._hf)])\n self.Zf[i] = self.Z[1].copy()\n # output layer\n self.Z[-1] = self.activation(self.Z[-2].dot(self.W) + self.b[-1])\n return self.Z[-1][0]\n\n def _back_prop(self, y):\n # output layer's delta: δ = (T-Z) * f'(net)\n self.δ[-1] = (y - self.Z[-1]) * self.f_prime(self.Z[-1])\n # compute deltas: δj = Σ[δk*Wjk] * f'(net)\n self.δ[0] = np.zeros(self.δ[0].shape) # initially clear\n # t backwards\n if self.Ub is not None:\n self.δb[-1] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._hb)] * self.f_prime(self.Zb[-1][:,slice(*self._hb)])\n for i in range(self._k-1, 0, -1):\n self.δb[i-1] = np.tensordot(self.δb[i], self.Ub, (1, 1)) * self.f_prime(self.Zb[i][:,slice(*self._hb)])\n # t == 0\n self.δ[0][:,slice(*self._v)] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._v)] * self.f_prime(self.Z[1][:,slice(*self._v)])\n # t forwards\n if self.Uf is not None:\n self.δf[-1] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._hf)] * self.f_prime(self.Zf[-1][:,slice(*self._hf)])\n for i in range(self._j-1, 0, -1):\n self.δf[i-1] = np.tensordot(self.δf[i], self.Uf, (1, 1)) * self.f_prime(self.Zf[i][:,slice(*self._hf)])\n\n # update weights: ΔWij = C*δj*Zi\n # output layer\n self.W += self.C * np.outer(self.Z[1], self.δ[-1])\n self.b[-1] += self.C * self.δ[-1]\n # recurrent layers\n ΔV = np.zeros(self.V.shape)\n nv = np.zeros(self.V.shape)\n Δb = np.zeros(self.b[0].shape)\n nb = np.zeros(self.b[0].shape)\n # backwards\n if self.Ub is not None:\n ΔUb = np.zeros(self.Ub.shape)\n for i in range(self._k):\n ΔUb += self.C * np.outer(self.Zb[i][:,slice(*self._hf)], self.δb[i])\n ΔV[:,slice(*self._hb)] += self.C * np.outer(self.Zin_b[i], self.δb[i])\n nv[:,slice(*self._hb)] += 1\n Δb[:,slice(*self._hb)] += self.C * self.δb[i]\n nb[:,slice(*self._hb)] += 1\n ΔUb /= self._k\n self.Ub += ΔUb\n # t == 0\n ΔV += self.C * np.outer(self.x0, self.δ[0][:,slice(*self._v)])\n nv += 1\n Δb[:,slice(*self._v)] += self.C * self.δ[0][:,slice(*self._v)]\n nb[:,slice(*self._v)] += 1\n # forwards\n if self.Uf is not None:\n ΔUf = np.zeros(self.Uf.shape)\n for i in range(self._j):\n ΔUf += self.C * np.outer(self.Zf[i][:,slice(*self._hf)], self.δf[i])\n ΔV[:,slice(*self._hf)] += self.C * np.outer(self.Zin_f[i], self.δf[i])\n nv[:,slice(*self._hf)] += 1\n Δb[:,slice(*self._hf)] += self.C * self.δf[i]\n nb[:,slice(*self._hf)] += 1\n ΔUf /= self._j\n self.Uf += ΔUf\n ΔV /= nv\n Δb = Δb / nb\n self.V += ΔV\n self.b[0] += Δb\n\n def recurrent_matrix(self, start, stop):\n _len = self.H[stop] - self.H[start]\n return np.random.randn(_len, _len)\n\n def input_matrix(self, f, start, stop):\n _len = self.H[stop] - self.H[start]\n return np.random.randn(f, _len)\n\n def delta_vecs(self, h, k):\n start, stop = h\n _len = self.H[stop] - self.H[start]\n δ = []\n for i in range(k):\n δ.append(np.zeros(_len))\n return δ\n\n def Z_vecs(self, hidden, k):\n _Z = []\n for i in range(k):\n _Z.append(np.zeros(hidden))\n return _Z","sub_path":"rnn/bptt.py","file_name":"bptt.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"74797797","text":"\n\nimport sqlite3\nimport os\n\"\"\"\n- How many total Characters are there?\n- How many of each specific subclass?\n- How many total Items?\n- How many of the Items are weapons? How many are not?\n- How many Items does each character have? (Return first 20 rows)\n- How many Weapons does each character have? (Return first 20 rows)\n- On average, how many Items does each Character have?\n- On average, how many Weapons does each character have?\n\n\"\"\"\n\ndef connect_to_db(db_name=\"rpg_db.sqlite3\"):\n return sqlite3.connect(db_name)\n\ndef execute_query(cursor, query):\n cursor.execute(query)\n return cursor.fetchall()\nGET_CHARACTERS = \"\"\"\nSELECT count(character_id) FROM charactercreator_character;\n-- how many total characters are there\n\"\"\"\nGET_CHARACTERS = \"\"\"\nSELECT count(character_id) FROM charactercreator_character;\n-- how each specific sub class\n\"\"\"\n\n\n\n\nif __name__ == \"__main__\":\n conn = connect_to_db()\n curs = conn.cursor()\n results = execute_query(curs, GET_CHARACTERS)\n print(results)\n\n\n","sub_path":"module1-introduction-to-sql/rpg_work.py","file_name":"rpg_work.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"628598270","text":"from models import DigitalDocumentModel\nfrom utilities import ObjectValidation\nfrom anuvaad_auditor.errorhandler import post_error\nfrom datetime import datetime\nimport uuid\nfrom utilities import AppContext\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nimport json\n\nvalidator=ObjectValidation()\n\nclass DigitalDocumentRepositories:\n \n def __init__(self):\n self.docModel=DigitalDocumentModel()\n\n def store(self, userID, recordID, files): \n try:\n for file in files:\n\n # recordID= recordID\n jobID= recordID.split('|')[0]\n fileID=file['file']['identifier']\n file_name=file['file']['name']\n locale=file['config']['language']\n file_type=file['file']['type']\n\n pages =file['pages']\n log_info(\"DigitalDocumentRepo save document for user: {}| record: {}| count of pages received: {}\".format(userID,recordID,str(len(pages))), AppContext.getContext())\n \n blocks=[] \n for page in pages:\n block=self.create_regions_from_page(userID,jobID,recordID,fileID,file_name,locale,file_type,page)\n if len(block.keys())>5:\n blocks.append(block)\n else:\n return block\n log_info('DigitalDocumentRepo page blocks created for insert, user_id:{}, record_id:{}, block length:{}'.format(userID, recordID,str(len(blocks))), AppContext.getContext())\n result=self.docModel.store_bulk_blocks(blocks)\n if result == False:\n return False \n except Exception as e:\n AppContext.addRecordID(recordID)\n log_exception('Exception on save document | DigitalDocumentRepo :{}'.format(str(e)), AppContext.getContext(), e)\n return post_error(\"Data Missing\",\"Failed to store doc since :{}\".format(str(e)),None)\n \n\n\n def update_words(self, user_id, words):\n\n for word in words:\n Validation= validator.update_word_validation(word)\n if Validation is not None:\n return Validation\n\n page=word['page_no']\n region_id=word['region_id']\n word_id=word['word_id']\n record_id=word['record_id']\n user_word = word['updated_word']\n\n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo update word request\", AppContext.getContext())#str(page)\n region_to_update= self.docModel.get_word_region(user_id,record_id,region_id,page)\n if region_to_update:\n if region_to_update['identifier']== region_id :\n region_to_update['updated']=True\n for data in region_to_update['regions']:\n for word in data['regions']:\n if word['identifier']==word_id:\n word['ocr_text']=word['text']\n word['text']=user_word\n break\n else:\n pass\n # return post_error(\"Data Missing\",\"No record with the given user_id,record_id and word_id\",None)\n else:\n return post_error(\"Data Missing\",\"No record with the given user_id,record_id and region_id\",None)\n \n \n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo update word region :{}\".format(str(region_to_update)), AppContext.getContext())\n print(region_to_update)\n if self.docModel.update_word(user_id,record_id,region_id,region_to_update,page) == False:\n return post_error(\"Data Missing\",\"Failed to update word since data is missing\",None)\n return True\n\n\n def get_pages(self, record_id, start_page=1, end_page=5):\n\n total_page_count = self.docModel.get_document_total_page_count(record_id)\n if start_page == 0 and end_page == 0:\n start_page = 1\n end_page = total_page_count\n \n if start_page == 0:\n start_page = 1\n if end_page == 0:\n end_page = 5\n if start_page > end_page:\n return False\n if start_page > total_page_count:\n return False\n\n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo fetching doc by pages for record_id:{}\".format(str(record_id)), AppContext.getContext())\n pages = []\n data = {}\n data_page = []\n for i in range(start_page, end_page+1):\n page_block = self.docModel.get_record_by_page(record_id, i)\n if page_block == False:\n return False\n else:\n data_page.append(page_block)\n \n \n pg_block_formated=self.format_page_data(data_page)\n\n data['pages'] = pg_block_formated\n data['start_page'] = start_page\n data['end_page'] = end_page\n data['total'] = total_page_count\n return data\n\n \n def create_regions_from_page(self,userID,jobID,recordID,fileID,file_name,locale,file_type,page):\n try:\n AppContext.addRecordID(recordID)\n log_info('DigitalDocumentRepo page blocks creation started for record_id:{}, page_number:{}'.format(recordID,str(page['page_no'])), AppContext.getContext())\n block_info = {}\n block_info['userID']=userID\n block_info['jobID']=jobID\n block_info['recordID']=recordID\n block_info['file_identifier']=fileID\n block_info['file_name']=file_name\n block_info['file_locale']=locale\n block_info['file_type']= file_type\n block_info['created_on']=datetime.utcnow()\n\n\n page_info = {}\n page_info['page_no'] = page['page_no'] + 1\n page_info['page_identifier'] = page['identifier']\n page_info['page_boundingBox'] = page['boundingBox']\n page_info['page_img_path'] = page['path']\n if 'resolution' in page.keys():\n page_info['page_resolution'] = page['resolution']\n\n block_info['page_info'] = page_info\n\n block_info['regions'] = page['regions']\n return block_info\n except Exception as e:\n AppContext.addRecordID(recordID)\n log_exception('Exception on save document | DigitalDocumentRepo :{}'.format(str(e)), AppContext.getContext(), e)\n return post_error(\"Data Missing\",\"Failed to store doc since data is missing\",None)\n\n\n def format_page_data(self,page_blocks):\n block1 = page_blocks[0]\n pages = {}\n file = {}\n if \"file_identifier\" in block1:\n file[\"identifier\"] = block1[\"file_identifier\"]\n file[\"name\"] = block1[\"file_name\"]\n file[\"type\"] = block1[\"file_type\"]\n config = {}\n config[\"language\"] = block1[\"file_locale\"]\n\n pages[\"file\"] = file\n pages[\"config\"] = config\n pages[\"pages\"] = []\n for block in page_blocks:\n if block == None:\n pages[\"pages\"].append(None)\n continue\n block_info = {}\n block_info[\"identifier\"]= block[\"page_info\"][\"page_identifier\"]\n block_info[\"resolution\"]= block[\"page_info\"][\"page_resolution\"]\n block_info[\"path\"] = block[\"page_info\"][\"page_img_path\"]\n block_info[\"boundingBox\"]= block[\"page_info\"][\"page_boundingBox\"]\n block_info[\"page_no\"] = block[\"page_info\"][\"page_no\"]\n block_info[\"regions\"] = block[\"regions\"]\n\n pages[\"pages\"].append(block_info)\n return pages\n\n \n\n\n\n\n","sub_path":"anuvaad-etl/anuvaad-extractor/ocr-content-handler/src/repositories/ocr_document.py","file_name":"ocr_document.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"406832584","text":"# -*- coding: utf-8 -*-\n'''\nГенерация word2vector моделей для слов и частей слов.\nИспользуется возможность gensim брать текст из генератора.\n'''\n\nfrom __future__ import print_function\nfrom gensim.models import word2vec\nimport logging\nimport os\nimport random\nfrom collections import Counter\n\n\n# ----------------------------------------------------------------------------\n\n# Будем генерировать корпус с частями слов на лету, читая по одному предложению\n# из исходного корпуса. Для каждого исходного предложения создается несколько\n# новых предложений, включая исходный вариант.\nclass WordPartsGenerator:\n '''\n fname - имя файла с исходным корпусом\n max_per_line - макс. число предложений с частями слов, генерируемых из одного исходного\n '''\n def __init__(self, fname, max_per_line, min_part_len, max_part_len, max_lines ):\n self.fname = fname\n self.max_per_line = max_per_line\n self.min_part_len = min_part_len\n self.max_part_len = max_part_len\n self.max_lines = max_lines\n self.line_buf = []\n self.ibuf= 0\n self.rdr = None\n self.total_lines = 0\n \n def fill_buffer(self):\n self.line_buf = []\n self.ibuf = 0\n\n line = self.rdr.readline().decode('utf-8').strip()\n if line==None:\n return\n\n n_generated=0\n nprobe=0\n words = line.split(' ')\n \n self.line_buf.append( words ) # исходное предложение добавляется обязательно\n self.total_lines += 1\n \n if len(words)>2:\n while n_generated - Page précèdente\")\n else:\n print(\"-\")\n print(\" Don\\'t have an account? Click here to register. Papers: {PaperCount}, Citations: {CitationCount} Papers: {PaperCount}, Citations: {CitationCount} Papers: {PaperCount}, Citations: {CitationCount} Papers: {PaperCount}, Citations: {CitationCount} Citations: {CitationCount} Papers: {PaperCount}, Citations: {CitationCount} - Page suivante\")\n print(\" - Accueil\")\n print(\"']\n gram_counter.update(temp)\n total_length += len(temp)\n # print(total_counter, gram_counter)\n gram_counter = dict(sorted(gram_counter.items()))\n\n with open(MODEL_FILE_NAME, 'w') as model_file:\n res = ''\n for word, count in gram_counter.items():\n temp = word + ' ' + str(float(count) / total_length) + '\\n'\n res += temp\n model_file.write(temp)\n\n print('Model training finished, written in {}.'.format(MODEL_FILE_NAME))\n return res\n print('Model training failed.')\n return -1\n\n\ndef test_unigram(test_file):\n with open(MODEL_FILE_NAME) as model_file:\n model = {}\n for line in model_file:\n temp = line.replace('\\n', '').split(' ')\n model[temp[0]] = float(temp[-1])\n model = collections.Counter(model)\n\n entropy = 0.0\n coverage = 0.0\n total_length = 0\n for line in test_file:\n words = line.replace('\\n', '').split(' ') + ['']\n total_length += len(words)\n\n # calc entropy\n for word in words:\n p = LAMBDA_1 * model[word] + LAMBDA_UNK / V\n entropy -= math.log(p, 2)\n\n coverage += 1 if model[word] > 0 else 0\n\n entropy /= total_length\n coverage /= total_length\n # print(entropy, coverage)\n return 'entropy = ' + str(entropy) + '\\n' + 'coverage = ' + str(coverage) + '\\n'\n\n\ndef test(result, answer):\n detla = 1e-5\n items_r = result.replace('\\n', ' ').split(' ')\n items_a = answer.replace('\\n', ' ').replace('\\t', ' ').split(' ')\n\n if len(items_a) != len(items_r):\n return False\n for item_r, item_a in zip(items_r, items_a):\n try:\n r = float(item_r)\n a = float(item_a)\n if abs(r - a) > detla:\n return False\n except:\n if item_r != item_a:\n return False\n return True\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n STATUS = sys.argv[1]\n\n # STATUS = 'test'\n print(STATUS, 'mode.')\n if STATUS == 'train':\n func = train_unigram\n else:\n func = test_unigram\n INPUT_FILE_NAME = '01-test-input.txt'\n ANSWER_FILE_NAME = '01-test-answer.txt'\n\n with open(PATH + INPUT_FILE_NAME) as file:\n res = func(file)\n\n ans = ''\n with open(PATH + ANSWER_FILE_NAME) as ans_file:\n for line in ans_file:\n if len(line) > 1 and '#' not in line:\n ans += line\n if test(res, ans):\n print('Accept')\n else:\n print('Wrong Answer')\n\n","sub_path":"zzz/tutorial01/tutorial01.py","file_name":"tutorial01.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"108099616","text":"import urllib.request, json\n\nSEARCH_URL = 'https://api.flickr.com/services/rest/?method=flickr.photos.search'\n\nAPI_PARAM = '&api_key='\nLAT_PARAM = '&lat='\nLON_PARAM = '&lon='\nPER_PAGE_PARAM = '&per_page='\nPAGE_PARAM = '&page='\nFORMAT_PARAM = '&format='\n\nAPI_KEY = 'b8b5ee365c39800a925944f298b44e92'\nPER_PAGE = '5'\nPAGE = '1'\nFORMAT = 'json'\n\n\n# This method is responsible to retrieve 5 photo ids\n# from flickr API and return it as an array of string.\n\n# latitude: the latitude of the user (String)\n# longitude: the longitude of the user (String)\n\n# returns: an array of string containing 5 ids of photos\n# nearby the given lat and lon.\n\ndef getPhotoId(latitude, longitude):\n # Build the url.\n url = SEARCH_URL + API_PARAM + API_KEY + LAT_PARAM + latitude + LON_PARAM + longitude + PER_PAGE_PARAM + PER_PAGE + PAGE_PARAM + PAGE + FORMAT_PARAM + FORMAT\n\n # Send a GET request to the url, and retrieve the JSON.\n # It is advised to print this variable in order to understand it better.\n retrieved = urllib.request.urlopen(url).read()\n\n # Strip the outer part of json, because it has a header that wraps the json.\n retrieved = retrieved[14:-1] # [14:-1] means that we perform a substring to the retrieved json\n # from index 14 (inclusive) to the last index (exclusive).\n # Read python substring docs for more info.\n\n\n # The actual retrieved json was just a string; therefore, it needs to be parsed into a real json.\n data = json.loads(retrieved)\n\n # Get the values of 'photos' from the json.\n # It is advised to print this variable in order to understand it better\n page = data['photos']\n\n # Get the array of photo from 'photos'.\n # It is advised to print this variable in order to understand it better\n photos = page['photo']\n\n # This for loop will get the id of each photo in the array,\n # and put it inside a new array named photo_id\n photo_id = []\n for photo in photos:\n photo_id.append(photo['id'])\n\n return photo_id\n","sub_path":"flickrcontroller.py","file_name":"flickrcontroller.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"364848462","text":"\"\"\"\nProblem: #1.7\n\nRotate Matrix: Given an image represented by an NxN matrix, where each pixel in the image is 4 \nbytes, write a method to rotate the image by 90 degrees. Can you do this in place?\n\"\"\"\ndef swap_rows(matrix):\n n = len(matrix)\n for i in range(n // 2):\n matrix[i], matrix[n - i - 1] = matrix[n - i - 1], matrix[i]\n return matrix\n\ndef transpose_matrix(matrix):\n n = len(matrix)\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n return matrix\n\ndef rotate(matrix):\n return transpose_matrix(swap_rows(matrix))\n\nmatrix = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]]\nprint(rotate(matrix))\n","sub_path":"arrays-and-strings/rotate-matrix.py","file_name":"rotate-matrix.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"293185305","text":"# !usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom service import NoteService\nfrom model import Note\n\n\nclass ListWindow(QWidget):\n \"\"\"\n 查看用户列表/公开笔记列表/用户笔记列表的窗口\n \"\"\"\n ALLUSERSLIST = 0\n ALLNOTESLIST = 1\n MYNOTESLIST = 2\n def __init__(self, main_window, window_type = ALLUSERSLIST):\n super().__init__()\n self.main_window = main_window\n self.window_type = window_type\n self.initUI()\n\n \n def initUI(self):\n self.set_items() # 设置元件\n self.set_layout() # 设置布局\n \n def set_items(self):\n \"\"\"\n 设置一个按钮和一个文本框\n \"\"\"\n self.back_button = QPushButton('Back', self)\n self.back_button.setFixedSize(self.back_button.sizeHint())\n if self.window_type == self.ALLUSERSLIST:\n # 返回 ‘Note Manager’窗口\n self.back_button.clicked.connect(self.main_window.to_init_window)\n elif self.window_type == self.ALLNOTESLIST:\n # 返回 ‘Public Note’窗口\n self.back_button.clicked.connect(\n self.main_window.get_public_notes)\n else:\n # 返回 ‘My Note’窗口\n self.back_button.clicked.connect(\n self.main_window.deal_with_my_notes) \n self.text_browser = QTextBrowser()\n self.set_text()\n \n def set_layout(self):\n \"\"\" \n 设置布局 \n \"\"\"\n # 水平布局\n hbox = QHBoxLayout()\n hbox.addWidget(self.back_button, 0, Qt.AlignRight) # 使用了右对齐\n # 竖直布局\n vbox = QVBoxLayout()\n vbox.addWidget(self.text_browser, 0)\n # 竖直布局\n vbox1 = QVBoxLayout()\n vbox1.addLayout(hbox)\n vbox1.addLayout(vbox)\n \n self.setLayout(vbox1)\n \n def set_text(self):\n \"\"\"\n 设置显示在文本框的内容\n \"\"\"\n self.__noteservice = NoteService()\n if self.window_type == self.ALLUSERSLIST:\n text = self.all_users_list()\n elif self.window_type == self.ALLNOTESLIST:\n text = self.all_notes_list()\n else:\n text = self.my_notes_list()\n self.text_browser.setText(text)\n \n def all_users_list(self):\n # 获得所有用户名\n user_names = self.__noteservice.list_all_users()\n for i, user_name in enumerate(user_names):\n user_names[i] = '{:^33}{:<30}'.format(str(i), user_name)\n \n return '\\n'.join(user_names)\n\n def all_notes_list(self):\n # 获得所有公开笔记\n notes = self.__noteservice.list_all_notes()\n for i, note in enumerate(notes):\n if note.get_note_permission() == Note.PUBLIC:\n note_permission = 'PUBLIC'\n else:\n note_permission = 'PRIVATE'\n # 序号 笔记名 权限 by 创建者\n notes[i] = '{:^10}{:<30}{:<15} by {:<20}'.format(str(i), \\\n note.get_note_name(), \\\n note_permission, \\\n note.get_user_name())\n return '\\n'.join(notes)\n\n def my_notes_list(self):\n # 获得用户所有笔记\n notes = self.__noteservice.list_notes_by_user(\n self.main_window.user_name)\n for i, note in enumerate(notes):\n if int(note[0]) == Note.PUBLIC:\n note_permission = 'PUBLIC'\n else:\n note_permission = 'PRIVATE'\n # 序号 笔记名 权限\n notes[i] = ' {:<20}{:<30}{:<25}'.format(str(i), note[1], \\\n note_permission)\n \n return '\\n'.join(notes)","sub_path":"task10GUI/src/NoteUI/list_window.py","file_name":"list_window.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"439827106","text":"#!/usr/bin/env python3\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n# This script can be used to Mozilla-fy Mercurial templates.\n\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\n\n\nREMOVE_DIRS = {\n 'coal',\n 'monoblue',\n 'spartan',\n}\n\nREMOVE_FILES = {\n 'static/background.png',\n 'static/style-extra-coal.css',\n 'static/style-monoblue.css',\n}\n\nCOPY_FILES = {\n 'atom/pushlog.tmpl',\n 'atom/pushlogentry.tmpl',\n 'gitweb_mozilla/firefoxreleases.tmpl',\n 'gitweb_mozilla/pushlog.tmpl',\n 'gitweb_mozilla/repoinfo.tmpl',\n 'static/jquery-1.2.6.min.js',\n 'static/livemarks16.png',\n 'static/moz-logo-bw-rgb.svg',\n}\n\nREPLACEMENTS = [\n # Replace logo HTML.\n (b'\\nMercurial',\n b'\\n - Quitter\")\n\n product_choice = input(\"\\nEntrez votre choix : \")\n\n if product_choice == \"p\":\n if num_page > 1:\n header()\n display_ls_products(category, page_number - 1)\n else:\n header()\n display_ls_products(category, page_number=1)\n\n elif product_choice == \"s\":\n header()\n display_ls_products(category, page_number + 1)\n\n elif product_choice == \"a\":\n header()\n home()\n\n elif product_choice == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n try:\n product_choice = int(product_choice)\n except:\n error = (\n \"Veuillez saisir un nombre entre 1 et {}, 'n', 'q'\".format(len(page['products'])))\n header(error)\n display_ls_products(category)\n else:\n if product_choice > 0 and product_choice <= len(page['products']):\n code = (page['products'][product_choice-1]['code'])\n header()\n display_product(code, category)\n display_alter_product(code, category)\n\n else:\n error = (\n \" Veuillez saisir un nombre entre 1 et {}, 'n', 'q'\".format(len(page['products'])))\n header(error)\n display_ls_products(category, page_number)\n\n\ndef display_product(code, category):\n \"\"\"Displays the 'product sheet'\n\n Arguments:\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n category = category\n url = conf.url_product(code)\n r = requests.get(url).json()\n page_product = json.dumps(r)\n\n product = json.loads(page_product)\n line_1 = \"Code produit ...........: \"\n line_2 = \"Nom du produit .........: \"\n line_3 = \"Quantité ...............: \"\n line_4 = \"Fabriquant .............: \"\n line_5 = \"Suggestion de magasin ..: \"\n line_6 = \"Nutri-score ............: \"\n line_7 = \"Page OpenFoodFacts .....: \"\n\n print(f\"{line_1}{product['code']}\")\n print(f\"{line_2}{product['product']['product_name_fr']}\")\n print(f\"{line_3}{product['product']['quantity']}\")\n print(f\"{line_4}{product['product']['brands']}\")\n print(f\"{line_5}{product['product']['stores']}\")\n\n if product['product']['nutrition_grades'] == \"a\":\n print(line_6 + Back.GREEN + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"b\":\n print(line_6 + Back.GREEN + Fore.YELLOW + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"c\":\n print(line_6 + Back.YELLOW + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"d\":\n print(line_6 + Back.YELLOW + Fore.RED + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"e\":\n print(line_6 + Back.RED + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n\n print(f\"{line_7}https://fr.openfoodfacts.org/produit/\\\n{product['product']['code']}\")\n\n\ndef display_alter_product(code, category):\n \"\"\"Search alternative product\n\n Arguments:\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n category = category\n url = conf.url_product(code)\n r = requests.get(url).json()\n page_product = json.dumps(r)\n product = json.loads(page_product)\n print(\"*\" * 80)\n # print(product['product']['generic_name_fr'])\n # print(len(product['product']['generic_name_fr']))\n\n if len(product['product']['generic_name_fr']) == 0:\n keyword = conf.cleaner(product['product']['product_name_fr'])\n print(\"Produit de subsitution pour :\", keyword)\n else:\n keyword = conf.cleaner(product['product']['generic_name_fr'])\n print(\"Produit de sublistution pour :\", keyword)\n\n if product['product']['nutrition_grades'] != \"a\":\n url = conf.url_alt_product(keyword)\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'a':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n url = conf.url_alt_product(keyword, 'b')\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'b':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n url = conf.url_alt_product(keyword, 'c')\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'c':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n print(\"\\nOups ! Pas de produit de substition trouvé :-(\")\n\n else:\n print(\"\")\n\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\"
- Quitter\")\n\n alter_input = input(\"\\nEntrez votre choix : \")\n\n if alter_input.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n elif alter_input.lower() == \"e\":\n try:\n Dbase.insert_tbl_categories(code, category)\n except mysql.connector.Error as err:\n if err.errno == 1062:\n msg = (\"Produit déjà enregistré\")\n header(msg)\n display_ls_products(category)\n else:\n msg = (\"Produit enregistré\")\n header(msg)\n display_ls_products(category)\n\n elif alter_input.lower() == \"a\":\n header()\n home()\n\n elif alter_input.lower() == \"r\":\n header()\n display_ls_products(category)\n\n else:\n error = (\"Saisie incorrecte\")\n header(error)\n display_ls_products(category)\n\n\ndef display_all_records():\n \"\"\"Displays all records in the database\n \"\"\"\n print(\"Display all records\")\n cnx = Dbase.sql_connect()\n cur = cnx.cursor()\n cur.execute(\"USE purbeurre\")\n cur.execute(\"\"\"\n SELECT * FROM Records;\n \"\"\")\n number_line = 1\n for raw in cur:\n print(f\"{raw[0]} - {raw[2]} {raw[3]}\")\n number_line += 1\n\n print(\"\")\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\"
- Quitter\")\n ask = input(\">> \")\n\n if ask.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n elif ask.lower() == \"a\":\n header()\n home()\n\n try:\n ask = int(ask)\n except:\n error = (\"Entrée incorrecte\")\n header(error)\n display_all_records()\n else:\n header()\n display_record(ask, raw[1], raw[8])\n\n\ndef display_record(ask, code, category):\n \"\"\"Displays a product registered in the database\n\n Arguments:\n ask {str} -- User response\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n cnx = Dbase.sql_connect()\n cur = cnx.cursor()\n cur.execute(\"USE purbeurre\")\n\n cur.execute(\"\"\"\n SELECT * FROM Records WHERE id={}\n \"\"\".format(ask))\n product = (cur.fetchone())\n\n display_product(product[1], product[8])\n\n print(\"\")\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\"
- Quitter\")\n print(\"
=2000 and split[0]==\"pgdist\":\n\t\tdistant+=1\nOUT.write(\"%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n\"%(\"promoter\",p,\"body\",b,\"Co\",c,\"f\",f,\"distant\",distant))\n\nIN.close()\nOUT.close()\t\n","sub_path":"script/Sumpgv1.py","file_name":"Sumpgv1.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"266953353","text":"import board_manipulation as b_m\nfrom search_node import search_node\n\n# manhattan distance is distance between two points on a grid based on strictly horizontal and/or vertical path\ndef manhattan_distance(start, end):\n\n sx, sy = start\n ex, ey = end\n\n return abs(ex - sx) + abs(ey - sy)\n\n# generates and returns all children nodes of the node if it does not collide with a wall\ndef generate_all_successors(board, node):\n\n new_nodes = {}\n cell_categories = [\"w\", \"m\", \"f\", \"g\", \"r\", \".\", \"B\"]\n\n temp_location = list(node.location)\n\n if temp_location[1] < (len(board)-1): # checks if this is edge of map\n\n temp_location[1] += 1 # y value +1 (goes up)\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories: #checks if map value is not a wall\n\n #creates a node\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g , Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n #creates the nodes \"state\", what path it takes from start node\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n #repeats previous actions on left, right, down movement\n\n temp_location = list(node.location)\n\n if temp_location[0] < (len(board[0])-1):\n\n temp_location[0] += 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0],temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n temp_location = list(node.location)\n\n if temp_location[1]> 0:\n\n temp_location[1] -= 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n temp_location = list(node.location)\n\n if temp_location[0] > 0:\n\n temp_location[0] -= 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n return new_nodes\n\n# attaches child to a node that is now considered its best parent so far\ndef attach_and_eval(map_array, child, parent, goal_location):\n\n child.parent = parent\n child.g = parent.g + b_m.calculate_g(map_array, child)\n child.h = manhattan_distance(child.location, goal_location)\n child.f = int(child.g) # doesnt matter what f is in bfs\n\n# recurses through the children of a parent and other descendants if the new parent.g value makes the path better\ndef propagate_path_improvements(map_array, parent):\n\n for kid in parent.children:\n\n if ((parent.g) + b_m.calculate_g(map_array, kid)) < kid.g:\n\n kid.parent = parent\n kid.g = parent.g + b_m.calculate_g(map_array, kid)\n kid.f = kid.g # doesnt matter what f is in bfs","sub_path":"bfs/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"260646019","text":"# Create your views here.\n\nfrom blog.models import Blog, Category\nfrom django.shortcuts import render_to_response, get_object_or_404\n\ndef index(request):\n \"\"\"render index.html\n \n Arguments:\n - `request`:\n \"\"\"\n context = {'categories': Category.objects.all(),\n 'posts':Blog.objects.all()[:5]}\n return render_to_response('index.html',\n context,)\n\n\ndef view_post(request, slug):\n \"\"\"render posts\n \n Arguments:\n - `request`:\n - `slug`:\n \"\"\"\n context = {'post':get_object_or_404(Blog, slug=slug)}\n render_to_response('view_post.html',\n context)\n\ndef view_category(request, slug):\n \"\"\"view categories\n \n Arguments:\n - `request`:\n - `slug`:\n \"\"\"\n category = get_object_or_404(Category, slug=slug)\n context = {'category':category,\n 'posts':Blog.objects.filter(category=category)[:5]}\n \n render_to_response('view_category.html', \n context)\n\n \n\n\n\n \n","sub_path":"djangorocks/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"459576174","text":"from flask import Flask, request, Response\nfrom pymysql import cursors\nimport json\n\ndef javaHashMapStrToJson(data):\n data = data.replace(',', '}, {')\n data = data.replace('=',',')\n data = data.replace(\"'\",'\"')\n data = '['+data+']'\n data = eval(data)\n data = sorted(data,key = lambda i:i['id'])\n return data\n\ndef mod(n):\n if n < 0:\n return -n\n return n\n\ndef answers(connection):\n with connection.cursor() as cursor:\n fbid = request.form.get(\"firebase_id\")\n cursor.execute(\"SELECT firebase_id FROM profile WHERE firebase_id = '{}'\".format(fbid))\n if cursor.rowcount == 0:\n return Response(json.dumps({\"status\": \"failure\", \"Reason\":\"Firebase ID doesnot exist\", \"status_code\": \"200\"}), mimetype=\"application/json\", status=200)\n data = request.form.get(\"answers\")\n try:\n data = javaHashMapStrToJson(data)\n except:\n return Response(json.dumps({\"status\":\"failure\", \"Reason\":\"Cant parse answers\", \"status_code\":\"200\"}),mimetype=\"application/json\",status = 200)\n score = 0\n id = 0\n query = \"select * from quiz where id = \".format(data[id][\"id\"])\n for i in range(len(data)):\n query += \" {} or id = \".format(data[id][\"id\"])\n id+=1\n id = 0\n query = query[:-9]\n cursor.execute(query)\n if cursor.rowcount==0:\n return Response(json.dumps({\"status\": \"failure\", \"status_code\": \"200\"}), mimetype=\"application/json\", status=200)\n ans = cursor.fetchall()\n for i in range(len(data)):\n qid = ans[id][\"id\"]\n if(int(ans[id][\"ans\"])==int(data[id][\"ans\"])):\n score+=1\n id+=1\n cursor.execute(\"SELECT quiz_rating FROM profile WHERE firebase_id = '{}'\".format(fbid))\n rating = cursor.fetchone()\n rating = rating[\"quiz_rating\"]\n newRating = rating+score\n cursor.execute(\"UPDATE profile SET quiz_rating = '{}', points = points + {} WHERE firebase_id = '{}'\".format(newRating,score,fbid))\n # connection.commit()\n return Response(json.dumps({\"status\": \"success\", \"status_code\": \"200\", \"score\": score}),mimetype = \"application/json\",status = 200)\n","sub_path":"quiz_answers.py","file_name":"quiz_answers.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"34488279","text":"from random import *\n\n\n# ETAPE 1\ndef crypter_char(char, decalage):\n actuel = ord(char)\n actuel += decalage\n return chr(actuel)\n\n\n# ETAPE 2\ndef decrypter_char(char, decalage):\n actuel = ord(char)\n actuel -= decalage\n return chr(actuel)\n\n\n# ETAPE 3\ndef crypter_phrase(phrase, decalage):\n nouvelle_phrase = \"\"\n for lettre in phrase:\n nouvelle_phrase += crypter_char(lettre, decalage)\n return nouvelle_phrase\n\n\n# ETAPE 4\ndef decrypter_phrase(phrase, decalage):\n nouvelle_phrase = \"\"\n for lettre in phrase:\n nouvelle_phrase += decrypter_char(lettre, decalage)\n return nouvelle_phrase\n\n\n# ETAPE 5\ndef cryptage(element):\n decalage = randrange(1, 10)\n phrase = crypter_phrase(element, decalage)\n return chr(decalage) + phrase\n\n\n# ETAPE 6\ndef decryptage(element):\n decalage = ord(element[0])\n phrase = element[1:]\n return decrypter_phrase(phrase, decalage)\n\n\n# ETAPE 7\ndef main():\n reponse = str(input('Voulez-vous crypter ou decrypter: '))\n while (reponse.lower() != 'crypter' and reponse.lower() != 'decrypter'):\n reponse = str(input('Voulez-vous crypter ou decrypter: '))\n\n if (reponse.lower() == 'crypter'):\n element = str(input('Element à crypter: '))\n print('Element crypter: ', cryptage(element))\n else:\n element = str(input('Element à decrypter: '))\n print('Element décrypter: ', decryptage(element))\n\n\n# ETAPE 8\nreponse = \"rien\" # pour rentrer dans le while\nwhile (reponse.lower() != 'non'):\n main()\n reponse = str(input('\\nVoulez-vous recommencer? ')) \n","sub_path":"exo9.6.py","file_name":"exo9.6.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"414210145","text":"import os, time, json, logging\n\nfrom initial_ML_module import Trainer\n\ntrain_folder = os.environ.get(\"TRAIN_DATA_FOLDER\",\"./train\")\nlog_folder = os.environ.get(\"LOGS_FOLDER\",\"./logs\")\n\nlogFile = log_folder + '/ml.log'\nlogger = logging.getLogger(__name__)\n\n# Create handlers\nf_handler = logging.FileHandler(logFile)\nf_handler.setLevel(logging.DEBUG)\n\n# Create formatters and add it to handler\nf_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nf_handler.setFormatter(f_format)\n\n# Add handlers to the logger\nlogger.addHandler(f_handler)\ndef start():\n logger.info(\"Training process started ...\")\n _json = None \n try:\n _file = open(train_folder + \"/train.data\",'r')\n _json = json.loads(_file.read())\n _file.close()\n except Exception as e:\n logger.info(\"An error occured\")\n logger.info(e)\n return False \n try:\n trainer = Trainer(_json[\"url_file\"],_json[\"target\"],_json[\"application\"],_json[\"features\"], _json['variant'])\n trainer.train()\n except Exception as e:\n print(e)\n logger.info(e)\n\nif __name__ == \"__main__\":\n start()","sub_path":"forecasting/morphemic/morphemic-performance-model/ml_code/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"388483145","text":"import math\nN = int(input())\n\ninput_list = [list(map(int, input().split())) for i in range(N)]\n \ndistance_list = []\nfor x1, y1 in input_list:\n for x2, y2 in input_list:\n dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n distance_list.append(dist)\nprint(round(max(distance_list), 6))","sub_path":"arc004/a/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"84697487","text":"from datetime import date\n\nfrom bidict import bidict\n\nfrom constants import DB\nfrom models import Vehicle, Company, DeliveryNote, DeliveryNoteItem\nfrom models import Settings\n\nITEM_TYPES = bidict(\n {\"HOURS\": \"Hours\",\n \"KMS\": \"Kms\",\n \"OTHERS\": \"Others\"}\n )\nEXPIRATION_DAYS = (30, 60, 90, 120, 180)\nPAYMENT_TYPES = bidict(\n {\"CONFIRMING\": \"Confirming\",\n \"BANK_TRANSFER\": \"Bank transfer\",\n \"PROMISSORY_NOTE\": \"Promissory note\",\n \"CASH\": \"Cash\", \"PAID\": \"Paid\",\n \"CHECK\": \"Check\"}\n )\n \n\ndef init_db():\n DB.connect()\n DB.drop_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n DB.create_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n\n\ndef drop_tables():\n DB.connect()\n DB.drop_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n\n\ndef populate_db():\n v12 = Vehicle(\n number=12, plate=\"1234ABC\", brand=\"My brand\",\n model=\"My model\", hour_price=27.32, km_price=8.99\n )\n v15 = Vehicle(\n number=15, plate=\"9876ZYX\", brand=\"My brand\",\n model=\"My model\", hour_price=34.02, km_price=4.00\n )\n v12.save()\n v15.save()\n\n c1 = Company(\n code=\"0001\", name=\"Company name\", nif=\"B12345678\",\n address=\"Rue st.\", city=\"Zaragoza\", state=\"Zaragoza\",\n zip_code=\"50000\", phone=\"123456789\", contact_person=\"Foolano\",\n alternative_phone=\"987654321\", fax=\"246813579\",\n email=\"foolano@bar.com\", iban=\"ES12345678901234567890123456789012\",\n bank_name=\"THE Bank\", payment_type=\"CASH\", expiration_days=30,\n first_payment_day=5, second_payment_day=15, third_payment_day=25\n )\n\n c2 = Company(\n code=\"0002\", name=\"Foo Inc.\", nif=\"B45678123\",\n address=\"Major st\", city=\"Zaragoza\", state=\"Zaragoza\",\n zip_code=\"50002\", email=\"foolano@bar.com\",\n iban=\"ES12345678901234567890123456789012\", bank_name=\"Minor Bank\",\n payment_type=\"BANK_TRANSFER\", expiration_days=45,\n first_payment_day=8\n )\n\n c1.save()\n c2.save()\n\n dn1 = DeliveryNote(\n code=\"11111111\", date=date(2016, 1, 3), company=c1, vehicle=v12,\n invoiced=False\n )\n dn2 = DeliveryNote(\n code=\"22222222\", date=date(2016, 1, 5), company=c1, vehicle=v15,\n invoiced=False\n )\n dn1.save()\n dn2.save()\n\n dni1 = DeliveryNoteItem(\n delivery_note=dn1, item_type=\"HOURS\", units=12,\n price=v12.hour_price, description=\"Working hard\"\n )\n dni2 = DeliveryNoteItem(\n delivery_note=dn2, item_type=\"HOURS\", units=7,\n price=21.00, description=\"We are working hard here\"\n )\n dni3 = DeliveryNoteItem(\n delivery_note=dn2, item_type=\"OTHERS\", units=1,\n price=327.86, description=\"Are you working hard?\"\n )\n dni1.save()\n dni2.save()\n dni3.save()\n\n Settings(vat=21.00, invoice_number=\"0000000001\").save()\n","sub_path":"sigg/util/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"313206570","text":"import socket#Get Required Modules\nimport sys\nfrom _thread import *\naddress=[]\nconnections=[]\n\ndef ReturnData(Type):\n if Type==\"Connections\":\n global address\n return address\n\ndef Message(Message,ConnN):\n #Message(Message , Connection Number)\n #Send Client(s) a message\n global connections#Get Connections List from Global Source\n if ConnN==\"*\":\n for connection in connections:#For Every Connection\n connection.sendall(Message)#Send Message To Current Connection\n else:\n connections[ConnN].sendall(Message)#Send Message To Selected Connection\n\ndef GetReply(Query,ConnN,Terminator):\n #GetReply(Question , Connection Num , End Character , Reply Message)\n #Ask Client(s) a question\n global connections#Get Connections List from Global Source\n if ConnN==\"*\":#If Target Connection is \"*\"\n for ConTrgt in connections:#Run GetReplyExec for each Connection\n Word=[]\n Word.append(GetReplyExec(Query,ConTrgt,Terminator))\n else:#Run GetReplyExec for Selected Connection\n print(connections)\n Word=GetReplyExec(Query,connections[ConnN],Terminator)\n print(Word)\n return Word\n exit()\n\ndef GetReplyExec(Query,ConTrgt,Terminator):\n #GetReply(Question , Connection Num , End Character , Reply Message)\n #Ask Client(s) a question\n ConTrgt.sendall(str.encode(Query))#Send Client Message\n Word=GetUsrInput(ConTrgt,Terminator)\n return Word\n\ndef GetUsrInput(ConTrgt,Terminator):\n Word=\"\"\n while True:\n c = ConTrgt.recv(2048000).decode(\"utf-8\")#Get Clients Character Input\n Word+=c#Add character to the word/scenetence\n\n if c==\"\b\":#if character is (Back Space)\n\n Word=Word[0:len(Word)-2]#Remove Delete Character And Previous Charcter\n\n if Terminator not in Word:#Check if Terminator Charcter in Word\n\n if not Word:\n break#End Function\n\n if Terminator in Word:#If Terminsator is in Word\n\n #print(Word[0:len(Word)-1])#Display Word\n return Word[0:len(Word)-1]\n break\n #End Function\n\ndef Server():\n #start_new_thread(Server,())\n #Start The Server\n\n import socket#Import Required Modules\n import sys\n\n host = ''#Set Connection Settings\n port = 1234\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:#Try To Bind To Port And IP\n s.bind((host, port))\n except socket.error as e:\n print(str(e))\n\n s.listen(5)\n #print('Waiting for a connection.')\n\n connections=[]#Create connections List\n address=[]\n global connections, address\n\n while True:\n\n connection, addr = s.accept()#Set connection and addr\n connections.append(connection)#Add connection to list of connections\n address.append(addr)\n print('connected to: '+addr[0]+':'+str(addr[1]))#Log the new connection\n\n#start_new_thread(Server,())\n","sub_path":"Server/Version 1/ServerModule.py","file_name":"ServerModule.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"475076779","text":"#\n# [347] Top K Frequent Elements\n#\n# https://leetcode.com/problems/top-k-frequent-elements/description/\n#\n# algorithms\n# Medium (50.13%)\n# Total Accepted: 113.4K\n# Total Submissions: 226.1K\n# Testcase Example: '[1,1,1,2,2,3]\\n2'\n#\n#\n# Given a non-empty array of integers, return the k most frequent elements.\n#\n# For example,\n# Given [1,1,1,2,2,3] and k = 2, return [1,2].\n#\n#\n# Note:\n#\n# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.\n# Your algorithm's time complexity must be better than O(n log n), where n is\n# the array's size.\n#\n\n\nclass Solution:\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n dic = dict()\n for n in nums:\n if n not in dic:\n dic[n] = 1\n else:\n dic[n] += 1\n\n # Bucket Sort using frequency number in dic\n bucket = [set() for x in range(len(nums)+1)]\n for n in nums:\n bucket[dic[n]].add(n)\n res, i = [], 0\n for b in bucket[::-1]:\n for n in b:\n res.append(n)\n i += 1\n if i == k:\n return res\n","sub_path":"347.top-k-frequent-elements.python3.py","file_name":"347.top-k-frequent-elements.python3.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"318200876","text":"#!/usr/bin/env python3\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import Int32MultiArray\nimport rrtstarconnect1\nimport numpy as np\n\ndef print1():\n print(\"hello from ros\")\n\ndef give():\n pub = rospy.Publisher('path_topic', Int32MultiArray, queue_size=10)\n rospy.init_node('path_node', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n path = Int32MultiArray()\n # path.data = rrtstarconnect1.p4\n\n while not rospy.is_shutdown():\n # path = rrtstarconnect1.p4\n # img = rrtstarconnect1.imre\n # rrtstarconnect1.show(img)\n p5 = np.array(rrtstarconnect1.p4)\n print(p5)\n # p5 = (p5/300)*11\n path.data = np.frombuffer(p5.tobytes(),'int32')\n print(path.data)\n pub.publish(path)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n rrtstarconnect1.rrtstarconnect()\n give()\n except rospy.ROSInterruptException:\n pass","sub_path":"task 3/codes/catkin_ws/src/turtle/src/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"266007195","text":"# Calcule o resultado da expressão A > b and C or D, utilizando os valores da tabela a seguir.\n\na = 1\nb = 2\nc = True\nD = False\n\nprint (a > b and c or D) # falso\n\na2 = 10\nb2 = 3\nc2 = False\nd2 = False\n\nprint (a2 > b2 and c2 or d2)\n\na3 = 5\nb3 = 1\nc3 = True\nd3 = True\n\nprint (a3 > b3 and c3 or d3)\n\n","sub_path":"exercicio 3.5.py","file_name":"exercicio 3.5.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"232332239","text":"#! /usr/bin/env python \n\nimport urllib2\nimport subprocess\nimport stat\nimport os\nimport time\nimport smtplib\nimport shlex\nimport socket\nimport fcntl\nimport sys\n\nSCRIPTS_PATH = '/root/scripts/'\nTOMCAT_URL = 'http://www.intranet.upsa.es:8180/prueba'\nWEB_URL = 'http://www.upsa.es/prueba'\nTOMCAT_PID_FILE = '/var/run/tomcat5.5.pid'\nWEB_PID_FILE = '/var/run/lighttpd.pid'\nLOCK_FILE = '/var/lock/check_services'\n\ndef test_url(url):\n try:\n uf = urllib2.urlopen(url)\n except:\n return False\n data = uf.read().strip()\n if data == 'OK':\n return True\n else:\n return False\n\ndef test_service(pid_file):\n if os.path.exists(pid_file):\n mode = os.stat(pid_file)\n if (mode[stat.ST_CTIME] + 300) > int(time.time()):\n return False\n return True\n\ndef test_web():\n if test_service(WEB_PID_FILE) and not test_url(WEB_URL):\n return False\n return True\n\ndef test_tomcat():\n if test_service(TOMCAT_PID_FILE) and not test_url(TOMCAT_URL):\n return False\n return True\n\n\ndef restart_service(service):\n msg = \"\"\"Subject: reinciando servicios\n\n Reiniando servicios: \n \"\"\" \n msg = msg + service\n server = smtplib.SMTP('edge.upsa.es')\n server.sendmail('webpro01@upsa.es', 'sistemas@upsa.es', msg)\n server.quit()\n cmd = '/etc/init.d/' + service + ' restart'\n args = shlex.split(cmd)\n subprocess.Popen(args)\n\ndef lock():\n f = open(LOCK_FILE, 'w')\n try:\n fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)\n except:\n return False\n return f\n\ndef unlock(f):\n fcntl.flock(f.fileno(), fcntl.LOCK_UN)\n f.close()\n os.remove(LOCK_FILE)\n return True\n\ndef main():\n\n f = lock()\n if not f:\n sys.exit()\n \n timeout = 5\n socket.setdefaulttimeout(timeout)\n \n if not test_tomcat() or not test_tomcat():\n restart_service('tomcat5.5')\n if not test_web() or not test_web():\n restart_service('lighttpd')\n \n unlock(f)\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_services.py","file_name":"check_services.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"36974809","text":"import inspect\n\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Iterable\n\nimport typing\nfrom typing import Dict, Any, NoReturn\n\nimport discord\nfrom discord.ext.commands import Bot, Context, BadArgument\n\nfrom pymongo import ReturnDocument\n\nimport logging\nlog = logging.getLogger('Penelope')\n\nclass CogConfig(ABC):\n \"\"\"Uses python type hints to autofill the guild config for a cog\"\"\"\n _bot: Bot\n\n def __new__(cls):\n self = super().__new__(cls)\n self.type_hints = typing.get_type_hints(cls)\n del self.type_hints['_bot'] # ehh\n return self\n\n\n def __getattr__(self, name):\n if name == 'guild':\n return self._bot.get_guild(self._guild_id)\n\n if name in self.type_hints:\n hint = self.type_hints[name]\n param_id = self.__getattribute__(self._serialize_param(name))\n\n if typing.get_origin(hint) is list:\n hint = typing.get_args(hint)[0]\n return [self._get_param(p_id, hint) for p_id in param_id]\n\n return self._get_param(param_id, hint)\n\n\n def _get_param(self, param_id, hint):\n if hint is discord.TextChannel:\n return self._bot.get_channel(param_id)\n\n elif hint is discord.User:\n return self._bot.get_user(param_id)\n\n elif hint is discord.Role:\n return self.guild.get_role(param_id)\n\n elif hint is discord.Message:\n if not param_id:\n return None\n param = list(map(int, param_id.split(':')))\n return self._bot.get_channel(param[0]).fetch_message(param[1])\n\n else:\n log.warning(f'{self.__class__.__name__} - {hint} not implemented in __getattr__')\n\n\n @property\n def _embed(self) -> discord.Embed:\n e = discord.Embed(color=0xD81B60)\n e.title = f'{self.__class__.__name__}'\n return e\n\n\n async def handle_command(self, ctx: Context, *args):\n args = list(args)\n if not any(args):\n await self._send_params(ctx)\n\n else:\n param = args.pop(0)\n try:\n if param not in self.type_hints:\n raise BadArgument(f'`{param}` is not a valid config option')\n\n hint = self.type_hints[param]\n origin = typing.get_origin(hint)\n\n if origin is list:\n action = args.pop(0).strip()\n hint = typing.get_args(self.type_hints[param])[0]\n\n singlearg = await self._convert_argument(ctx, hint, args, param)\n\n arg = getattr(self, param)\n\n if action == 'add':\n if not singlearg in arg:\n arg.append(singlearg)\n\n elif action == 'remove':\n arg.remove(singlearg)\n\n else:\n raise BadArgument(f'Must use \\'add\\' or \\'remove\\' for List parameter {param}')\n\n else:\n arg = await self._convert_argument(ctx, hint, args, param)\n\n await self._update_config(param, arg)\n\n await ctx.send(embed=await self._single_param_embed(param))\n\n except BadArgument as e:\n await ctx.send(e)\n\n async def _single_param_embed(self, param):\n e = self._embed\n e.description = await self._render_param(param)\n return e\n\n async def _render_hint(self, param) -> str:\n hint = self.type_hints[param]\n origin = typing.get_origin(hint)\n if origin and issubclass(origin, Iterable):\n return f'**{param}** ({origin.__name__.capitalize()}[{typing.get_args(hint)[0].__name__}]):\\n'\n\n else:\n return f'**{param}** ({hint.__name__}) = '\n\n def _render_val(self, val) -> str:\n if isinstance(val, (discord.abc.Messageable, discord.Role)):\n return val.mention\n elif isinstance(val, discord.Message):\n return f'[Message]({val.jump_url})'\n else:\n return val\n\n async def _render_arg(self, param) -> str:\n arg = getattr(self, param)\n\n if inspect.iscoroutine(arg):\n arg = await arg\n\n if isinstance(arg, Iterable):\n return '\\n'.join([f'- {self._render_val(s)}' for s in arg])\n else:\n return self._render_val(arg)\n\n async def _render_param(self, param):\n return await self._render_hint(param) \\\n + str(await self._render_arg(param)) \\\n + '\\n'\n\n async def _send_params(self, ctx):\n e = self._embed\n e.title += ' \\N{WHITE HEAVY CHECK MARK}' if self.check else ' \\N{CROSS MARK}'\n e.description = ''\n\n for param, hint in self.type_hints.items():\n e.description += await self._render_param(param)\n\n e.description += ''\n await ctx.send(embed=e)\n\n async def _convert_argument(self, ctx, converter, args, param) -> Any:\n if converter is discord.Message:\n converter = discord.TextChannel\n\n if converter is str:\n args = ' '.join(args)\n else:\n args = args[0]\n\n converted = await ctx.command._actual_conversion(ctx, converter, args, param)\n\n if converter is discord.Message:\n return await converted.fetch_message(args[1])\n\n return converted\n\n def _make_key(self, param, hint):\n if issubclass(hint, discord.abc.Snowflake):\n return f'{param}_id'\n\n return f'{param}'\n\n def _serialize_param(self, param) -> dict:\n hint = self.type_hints[param]\n\n if typing.get_origin(hint) is list:\n hint = typing.get_args(hint)[0]\n\n return f'{self._make_key(param, hint)}s'\n\n return self._make_key(param, hint)\n\n def _make_val(self, val):\n if isinstance(val, discord.Message):\n return f'{val.channel.id}:{val.id}'\n\n if issubclass(type(val), discord.abc.Snowflake):\n return val.id\n\n return val\n\n def _serialize_arg(self, val):\n if isinstance(val, list):\n return [self._make_val(v) for v in val]\n\n return self._make_val(val)\n\n async def _update_config(self, param, arg) -> NoReturn:\n data = {f'{self.name}.{self._serialize_param(param)}': self._serialize_arg(arg)}\n\n doc = await self._bot.db.guild_config.find_one_and_update(\n {\"id\": self._guild_id},\n {\"$set\": data},\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n\n self.from_doc(doc)\n\n\n @classmethod\n async def from_db(cls, guild_id, bot):\n self = cls()\n self._guild_id = guild_id\n self._bot = bot\n\n doc = await bot.guild_config(guild_id)\n self.from_doc(doc)\n\n log.debug(f'{self.__class__.__name__} - Loaded guild \"{self.guild.name}\" ({self.guild.id}) config from db')\n\n return self\n\n\n def from_doc(self, doc: Dict) -> NoReturn:\n doc = doc.get(self.name, {})\n for param, hint in self.type_hints.items():\n param_id = self._serialize_param(param)\n\n if hasattr(self, param):\n default = getattr(self, param)\n elif typing.get_origin(hint) is list:\n default = []\n else:\n default = None\n\n arg = doc.get(param_id, default)\n\n setattr(self, param_id, arg)\n\n\n def __repr__(self):\n return f'<{self.__class__.__name__} {\" \".join([f\"{p}={getattr(self, p)}\" for p, h in self.type_hints.items()])}>'\n\n @property\n @abstractmethod\n def check(self):\n return self.enabled\n","sub_path":"cogs/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"627356528","text":"from tensorflow.keras.datasets import mnist\nimport numpy as np\n\nclass DataLoader:\n def load(self, is_reshape=True, is_one_hot=False):\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n # convert\n x_train = self._normalize(x_train)\n x_test = self._normalize(x_test)\n if is_reshape:\n x_train = x_train.reshape(x_train.shape[0], -1)\n x_test = x_test.reshape(x_test.shape[0], -1)\n if is_one_hot:\n y_train = self._one_hot(y_train)\n y_test = self._one_hot(y_test)\n \n return (x_train, y_train), (x_test, y_test)\n\n def _one_hot(self, y_):\n one_hot = np.array([[int(i == y) for i in range(10)] for y in y_], dtype=float)\n return one_hot\n \n def _normalize(self, x_):\n return x_ / 255.0\n\n\n","sub_path":"mnist/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"542994355","text":"'''\r\nScript to compare files in two folders and copy those which are not overlapping.\r\n'''\r\nimport os\r\nimport shutil\r\nsource = \"Source Path\"\r\nfdir1 = os.listdir(\"folder1\")\r\nfdir2 = os.listdir(\"folder2\")\r\ndiff = list(set(fdir2) - set(fdir1))\r\nfor file in diff:\r\n shutil.copy(os.path.join(source, file), \"Destination Path\")\r\n","sub_path":"FileCompare.py","file_name":"FileCompare.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"247888445","text":"# -*- coding: utf-8 -*-\n\n'''\nName: Poojan Gajera\nStevens ID: 10432734\nCourse: FE 595\nHW: 01\nTitle: Python Refresher\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Taking values from 0 to 360 for the cycle of sine/cosine.\nperiod = np.arange(0,2*np.pi,0.01)\n\n# Sine, Cosine and tangent Graph\nsine = np.sin(period )\ncosine = np.cos(period )\ntan = np.tan(period)\n\n#plotting sin and cosine on the same axis\nplt.plot(period,sine,period,cosine,period,tan)\n\n##creating legends\nplt.subplot().legend(['Sine','Cosine','Tangent'])\n\nplt.subplot().axhline(y=0, color='k')\nplt.subplot().axvline(x=0, color='k')\nplt.savefig(\"graph.png\")\nplt.show()\n","sub_path":"10432734_HW_1_FE_595.py","file_name":"10432734_HW_1_FE_595.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"191831243","text":"#!/usr/bin/python\n#coding=utf-8\n# @hequan\n\nfrom scrapy.spiders import Spider, Rule\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nimport re\nimport requests\nfrom scrapy.spiders import CrawlSpider\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom opensource_spider.items import OpensourceSpiderItem\n\nclass opensource_httpd_spider(CrawlSpider):\n # 爬虫的识别名称,必须是唯一的,在不同的爬虫中你必须定义不同的名字\n name = \"httpd_spider\" # 设置爬虫名称\n\n # 搜索的域名范围,也就是爬虫的约束区域,规定爬虫只爬取这个域名下的网页\n # http://mirrors.aliyun.com/apache/httpd/\n allowed_domains = [\"mirrors.aliyun.com\"] # 设置允许的域名\n\n # 爬取的url列表,爬虫从这里开始抓取数据,所以,第一次下载的数据将会从这些urls开始,其他子url将会从这些起始url中继承性生成\n start_urls = [\n 'http://mirrors.aliyun.com/apache/httpd/',\n ]\n\n # 解析的方法,调用的时候传入从每一个url传回的response对象作为唯一参数,负责解析并获取抓取的数据(解析为item),跟踪更多的url\n def parse(self, response):\n sel = Selector(response)\n items = []\n lvs_lists = sel.xpath('//a/@href').extract()\n for v in lvs_lists:\n if v == '../' or v == 'tmp/' or v == 'Name' or v == 'Last modified' or v == 'Description' or v == 'Parent Directory' or v == 'ChangeLog' or v == 'Size':\n continue\n\n item = OpensourceSpiderItem()\n item['orginname'] = v\n item['downurl'] = response.url + v\n item['filesize'] = 0\n items.append(item)\n\n return items\n\n\n","sub_path":"opensource_spider/spiders/opensource_httpd_spider.py","file_name":"opensource_httpd_spider.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"497990757","text":"\nimport pathlib\nimport typing\n\nfrom openff.nagl._base.base import ImmutableModel\nfrom openff.nagl.nn.gcn._base import _GCNStackMeta\nfrom openff.nagl.nn.activation import ActivationFunction\nfrom openff.nagl.features.atoms import DiscriminatedAtomFeatureType\nfrom openff.nagl.features.bonds import DiscriminatedBondFeatureType\nfrom openff.nagl.utils._types import FromYamlMixin\n\nAggregatorType = typing.Literal[\"mean\", \"gcn\", \"pool\", \"lstm\", \"sum\"]\nPostprocessType = typing.Literal[\"readout\", \"compute_partial_charges\", \"regularized_compute_partial_charges\"]\n\ntry:\n from pydantic.v1 import Field, validator\nexcept ImportError:\n from pydantic import Field, validator\n\nclass BaseLayer(ImmutableModel):\n \"\"\"Base class for single layer in the neural network\"\"\"\n hidden_feature_size: int = Field(\n description=(\n \"The feature sizes to use for each hidden layer. \"\n \"Each hidden layer will have the shape \"\n \"`n_atoms` x `hidden_feature_sizes`.\"\n )\n )\n activation_function: ActivationFunction = Field(\n description=\"The activation function to apply for each layer\"\n )\n dropout: float = Field(\n default=0.0,\n description=\"The dropout to apply after each layer\"\n )\n\n @validator(\"activation_function\", pre=True)\n def _validate_activation_function(cls, v):\n return ActivationFunction._get_class(v)\n\n\nclass ConvolutionLayer(BaseLayer):\n \"\"\"Configuration for a single convolution layer\"\"\"\n aggregator_type: AggregatorType = Field(\n default=None,\n description=\"The aggregator function to apply after each convolution\"\n )\n\n\nclass ForwardLayer(BaseLayer):\n \"\"\"Configuration for a single feedforward layer\"\"\"\n\n\nclass ConvolutionModule(ImmutableModel):\n architecture:typing.Literal[\"SAGEConv\", \"GINConv\"] = Field(\n description=\"GCN architecture to use\"\n )\n layers: typing.List[ConvolutionLayer] = Field(\n description=\"Configuration for each layer\"\n )\n\n\nclass ReadoutModule(ImmutableModel):\n pooling: typing.Literal[\"atoms\", \"bonds\"]\n layers: typing.List[ForwardLayer] = Field(\n description=\"Configuration for each layer\"\n )\n postprocess: typing.Optional[PostprocessType] = Field(\n description=\"Optional post-processing layer for prediction\"\n )\n\n # @validator(\"postprocess\", pre=True)\n # def _validate_postprocess(cls, v):\n # from openff.nagl.nn.postprocess import _PostprocessLayerMeta\n # if v is None:\n # return None\n # return _PostprocessLayerMeta._get_object(v)\n\n\nclass ModelConfig(ImmutableModel, FromYamlMixin):\n version: typing.Literal[\"0.1\"]\n atom_features: typing.List[DiscriminatedAtomFeatureType] = Field(\n description=\"Atom features to use\"\n )\n bond_features: typing.List[DiscriminatedBondFeatureType] = Field(\n description=(\n \"Bond features to use. \"\n \"Not all architectures support bond features\"\n )\n )\n convolution: ConvolutionModule = Field(\n description=\"Convolution config to pass molecular graph through\"\n )\n readouts: typing.Dict[str, ReadoutModule] = Field(\n description=\"Readout configs to map convolution representation to output\"\n )\n\n # @validator(\"atom_features\", \"bond_features\", pre=True)\n # def _validate_atom_features(cls, v, field):\n # if isinstance(v, dict):\n # v = list(v.items())\n # all_v = []\n # for item in v:\n # if isinstance(item, dict):\n # all_v.extend(list(item.items()))\n # elif isinstance(item, (str, field.type_, type(field.type_))):\n # all_v.append((item, {}))\n # else:\n # all_v.append(item)\n\n # instantiated = []\n # for klass, args in all_v:\n # if isinstance(klass, (AtomFeature, BondFeature)):\n # instantiated.append(klass)\n # else:\n # klass = type(field.type_)._get_class(klass)\n # if not isinstance(args, dict):\n # item = klass._with_args(args)\n # else:\n # item = klass(**args)\n # instantiated.append(item)\n # return instantiated\n \n def to_simple_dict(self):\n \"\"\"\n Create a simple dictionary representation of the model config\n\n This simplifies the representation of atom and bond features\n \"\"\"\n dct = self.dict()\n dct[\"atom_features\"] = tuple(\n [\n {f.feature_name: f.dict(exclude={\"feature_name\"})}\n for f in self.atom_features\n ]\n )\n\n dct[\"bond_features\"] = tuple(\n [\n {f.feature_name: f.dict(exclude={\"feature_name\"})}\n for f in self.bond_features\n ]\n )\n new_dict = dict(dct)\n for k, v in dct.items():\n if isinstance(v, pathlib.Path):\n v = str(v.resolve())\n new_dict[k] = v\n return new_dict\n \n @property\n def n_atom_features(self) -> int:\n \"\"\"The number of features used to represent an atom\"\"\"\n lengths = [len(feature) for feature in self.atom_features]\n n_features = sum(lengths)\n return n_features","sub_path":"openff/nagl/config/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"396833070","text":"from __future__ import annotations\nfrom typing import List, Generator, Union\nfrom datetime import datetime, timedelta\nimport pvlib as pv\nimport pandas as pd\nimport dataclasses\nimport numpy as np\nimport abc\n\n\n@dataclasses.dataclass()\nclass Path:\n points: List[pv.location.Location]\n timestamps: List[Union[pd.Timestamp, datetime]]\n\n @classmethod\n @abc.abstractmethod\n def create(cls, *args, **kwargs) -> LinearPath:\n pass\n\n @property\n def lats_lons(self) -> (List[float], List[float]):\n lats = [point.latitude for point in self.points]\n lons = [point.longitude for point in self.points]\n\n return lats, lons\n\n def __iter__(self) -> Generator[(pd.Timestamp, pv.location.Location), None, None]:\n yield from zip(self.timestamps, self.points)\n\n\nclass LinearPath(Path):\n @classmethod\n def create(cls, start_loc: pv.location.Location, end_loc: pv.location.Location,\n start_time: datetime, end_time: datetime,\n npoints=None) -> LinearPath:\n \"\"\"\n\n :param start_loc:\n :param end_loc:\n :param start_time:\n :param end_time:\n :param npoints: Number of points on the path. Defaults to None, for auto.\n :return:\n \"\"\"\n\n time_range = end_time - start_time\n\n if npoints is None: # automatically determine appropriate number of points\n npoints = int(time_range / timedelta(minutes=60))\n\n lats = np.linspace(start_loc.latitude, end_loc.latitude, npoints)\n\n lons = np.linspace(start_loc.longitude, end_loc.longitude, npoints)\n\n points = [pv.location.Location(lat, lon) for lat, lon in zip(lats, lons)]\n\n delta = time_range / npoints\n times = [start_time + (delta * n) for n in range(npoints)]\n\n return cls(points=points, timestamps=times)\n\n\n@dataclasses.dataclass()\nclass SegmentedPath(Path):\n segments: List[LinearPath] = dataclasses.field(default_factory=list)\n\n @classmethod\n def create(cls, start_loc: pv.location.Location, start_time: datetime) -> SegmentedPath:\n instance = cls([], [])\n instance.points = [start_loc]\n instance.timestamps = [start_time]\n return instance\n\n def append_point(self, loc: pv.location.Location, time: datetime, npoints=None):\n previous_loc = self.points[-1]\n previous_time = self.timestamps[-1]\n line = LinearPath.create(previous_loc, loc, previous_time, time, npoints=npoints)\n self.points.extend(line.points[1:])\n self.timestamps.extend(line.timestamps[1:])\n self.segments.append(line)\n\n","sub_path":"gui_and_analytics/analytics/location/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"533228489","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom jaratoolbox import loadbehavior\nimport os\nimport facemapanalysis as fmap\nimport sys\n\ndef onset_values(signalArray): \n\n '''\n Helps to find onset start values of the sync singal in any given array: \n Args: \n SignalArray (np.array) = array that contains data of the sync signal\n Returns:\n onsetStartValues (np.array) = an array of the indices containing the start onset values of the sync signal.\n ''' \n firstIndexValue = 0 \n lastIndexValue = len(signalArray)-1 \n stepNumber = 2\n startIndicesValues = range(firstIndexValue, lastIndexValue, stepNumber)\n startIndicesVec = np.array(startIndicesValues)\n onsetStartValues = np.take(signalArray, startIndicesVec)\n return (onsetStartValues)\n \ndef eventlocked_signal(timeVec, signal, eventOnsetTimes, windowTimeRange):\n '''\n Make array of signal traces locked to an event.\n Args:\n timeVec (np.array): time of each sample in the signal.\n signal (np.array): samples of the signal to process.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n windowTimeVec (np.array): time of each sample in the window w.r.t. the event.\n lockedSignal (np.array): extracted windows of signal aligned to event. Size (nSamples,nEvents)\n '''\n if (eventOnsetTimes[0] + windowTimeRange[0]) < timeVec[0]:\n raise ValueError('Your first window falls outside the recorded data.')\n if (eventOnsetTimes[-1] + windowTimeRange[-1]) > timeVec[-1]:\n raise ValueError('Your last window falls outside the recorded data.')\n samplingRate = 1/(timeVec[1]-timeVec[0])\n windowSampleRange = samplingRate*np.array(windowTimeRange) # units: frames\n windowSampleVec = np.arange(*windowSampleRange, dtype=int) # units: frames\n windowTimeVec = windowSampleVec/samplingRate # Units: time\n nSamples = len(windowTimeVec) # time samples / trial\n nTrials = len(eventOnsetTimes) # number of times the sync light went off\n lockedSignal = np.empty((nSamples,nTrials))\n for inde,eventTime in enumerate(eventOnsetTimes):\n eventSample = np.searchsorted(timeVec, eventTime) # eventSample = index at which the synch turns on\n thiswin = windowSampleVec + eventSample # indexes of window\n lockedSignal[:,inde] = signal[thiswin]\n return (windowTimeVec, lockedSignal)\n\n\ndef find_valid_windows(timeVec, eventOnsetTimes, windowTimeRange):\n \"\"\"\n Find windows that lie within the timeVec.\n Args:\n timeVec (np.array): time of each sample in the signal.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n validWindows (np.array): array of booleans that is True if the window falls within.\n\n \"\"\"\n return (validWindows)\n\n\n\ndef eventlocked_signalold(timeVec, signal, eventOnsetTimes, windowTimeRange):\n '''\n Make array of signal traces locked to an event.\n Args:\n timeVec (np.array): time of each sample in the signal.\n signal (np.array): samples of the signal to process.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n windowTimeVec (np.array): time of each sample in the window w.r.t. the event.\n lockedSignal (np.array): extracted windows of signal aligned to event. Size (nSamples,nEvents)\n '''\n samplingRate = 1/(timeVec[1]-timeVec[0])\n windowSampleRange = samplingRate*np.array(windowTimeRange) # units: frames\n windowSampleVec = np.arange(*windowSampleRange, dtype=int) # units: frames\n windowTimeVec = windowSampleVec/samplingRate # Units: time\n nSamples = len(windowTimeVec) # time samples / trial\n nTrials = len(eventOnsetTimes) # number of times the sync light went off\n lockedSignal = np.empty((nSamples,nTrials))\n discards = [] #DB ADDED\n for inde,eventTime in enumerate(eventOnsetTimes):\n eventSample = np.searchsorted(timeVec, eventTime) # eventSample = index at which the synch turns on\n thiswin = windowSampleVec + eventSample # indexes of window\n if np.logical_and(np.min(thiswin) > 0, np.max(thiswin) < len(signal)): # DB ADDED\n lockedSignal[:,inde] = signal[thiswin] # DB ADDED\n else: # DB ADDED\n discards.append(inde) # DB ADDED\n lockedSignal_trim = np.delete(lockedSignal,discards,1) # DB ADDED\n return (windowTimeVec, lockedSignal_trim)\n \ndef find_prepost_values(timeArray, dataArray, preLimDown, preLimUp, postLimDown, postLimUp): \n \n ''' \n Obtain pupil data before and after stimulus \n Args: \n timeArray (np.array): array of the time window to evaluate pupil area obtained from event_locked \n dataArray (np.array): array of the pupil data obtained from event_locked function \n preLimDown (int or float): first number of the time interval to evaluate before stimulus onset \n preLimUp (int or float): second number of the time interval to evaluate before stimulus onset\n postLimDown (int or float): first number of the time interval to evaluate after stimulus onset \n postLimUp (int or float): second number of the time interval to evaluate after stimulus onset \n Returns: \n preData (np.array): array with the pupil data before stimulus \n postData (np.array): array with the pupil data after stimulus \n ''' \n preBool = np.logical_and(preLimDown <= timeArray, timeArray < preLimUp) \n postBool = np.logical_and(postLimDown <= timeArray, timeArray < postLimUp) \n preValuesIndices = np.argwhere(preBool == True) \n postValuesIndices = np.argwhere(postBool == True) \n preProcessedPreValues = dataArray[preValuesIndices] \n preProcessedPostValues = dataArray[postValuesIndices] \n preData = preProcessedPreValues.reshape(preValuesIndices.shape[0], dataArray.shape[1]) \n postData = preProcessedPostValues.reshape(postValuesIndices.shape[0], dataArray.shape[1]) \n return(preData, postData)\n\ndef freqs_and_meanParea(freqsArray, meanPareaVariable, freq1, freq2, freq3, freq4, freq5): \n '''\n Creates arrays containing the pupil area for each tested frequency\n Args:\n freqsArray (np.array): array containing the tested frequencies\n meanPareaVariable (np.array): array containing the average pupil size\n freq1..5 (int): frequencies tested\n \n returns:\n arrValues1..5 (np.array): one array per frequency tested (freq1..5) that contains the pupil size for the given frequency\n '''\n \n indicesFreq1 = np.argwhere(freq1 == freqsArray) \n indicesFreq2 = np.argwhere(freq2 == freqsArray)\n indicesFreq3 = np.argwhere(freq3 == freqsArray) \n indicesFreq4 = np.argwhere(freq4 == freqsArray) \n indicesFreq5 = np.argwhere(freq5 == freqsArray) \n newIndexArr1 = np.take(meanPareaVariable, indicesFreq1) \n newIndexArr2 = np.take(meanPareaVariable, indicesFreq2) \n newIndexArr3 = np.take(meanPareaVariable, indicesFreq3) \n newIndexArr4 = np.take(meanPareaVariable, indicesFreq4) \n newIndexArr5 = np.take(meanPareaVariable, indicesFreq5)\n arrValues1 = newIndexArr1.flatten()\n arrValues2 = newIndexArr2.flatten() \n arrValues3 = newIndexArr3.flatten() \n arrValues4 = newIndexArr4.flatten() \n arrValues5 = newIndexArr5.flatten()\n return(arrValues1, arrValues2, arrValues3, arrValues4, arrValues5)\n\n\ndef normalize_data(pupilArea, valuesToNormalize): \n minVal = np.amin(pupilArea) \n maxVal = np.amax(pupilArea) \n rangeValues = maxVal - minVal \n listData = [] \n for i in valuesToNormalize: \n substractMin = i - minVal \n newData = substractMin/rangeValues\n listData.append(newData) \n normalizedData = np.asarray(listData) \n return(normalizedData)\n\n \ndef comparison_plot(time, valuesData1, pVal): \n ''' \n Creates 1 figure with 3 plots \n Args: \n time = vector values for x axis \n valuesData1 (np.array) = vector values for y axis of the first plot \n valuesData2 (np.array)= vector values for y axis of the second plot\n valuesData3 (np.array)= vector values for y axis of the third plot\n returns: \n plt.show() = 1 figure with 3 plots using the input data \n ''' \n labelsSize = 16\n fig, subplt = plt.subplots(1,1)\n fig.set_size_inches(9.5, 7.5, forward = True)\n sp = np.round(pVal, decimals=17)\n label1 = filename,'pval:',sp\n \n subplt.plot(time, valuesData1, color = 'g', label = label1, linewidth = 4)\n\n subplt.set_xlabel('Time (s)', fontsize = labelsSize)\n subplt.set_ylabel('Pupil Area', fontsize = labelsSize)\n subplt.set_title('Pupil behavior: ' + filename, fontsize = labelsSize)\n plt.suptitle('Mouse = pure013. Data Collected 2022-07-01.', fontsize = labelsSize)\n plt.grid(b = True)\n #plt.ylim([550, 650])\n plt.xticks(fontsize = labelsSize)\n plt.yticks(fontsize = labelsSize)\n# plt.legend()\n #plt.legend(prop ={\"size\":10}, bbox_to_anchor=(1.0, 0.8))\n #plt.savefig('comparisonPure004Plot', format = 'pdf', dpi = 50)\n plt.show() \n return(plt.show())\n \ndef barScat_plots(firstPlotMeanValues1, firstPlotMeanValues2, xlabel1, xlabel2, firstPlotStdData1, firstPlotStdData2, pVal):\n '''\n Plot bar plots\n Args:\n MeanValues (int or float): number representing the average of the data to plot\n xlabel1 (string): name of the first condition to compare\n xlabel2 (string): name of the second condition to compare\n StdData (np.array): values to calculate the standard deviation from\n pVal (float or int): p-value for each one of the animals\n Returns:\n plt.show(): three bar plots within one figure\n '''\n barLabelsFontSize = 14\n meanPreSignal1 = firstPlotMeanValues1.mean(axis = 0) \n meanPostSignal1 = firstPlotMeanValues2.mean(axis = 0)\n preSignalStd1 = np.std(firstPlotStdData1) \n postSignalStd1 = np.std(firstPlotStdData2) \n barMeanValues1 = [meanPreSignal1, meanPostSignal1] \n stdErrors1 = [preSignalStd1, postSignalStd1] \n shortPval1 = np.round(pVal, decimals=3)\n pValue1 = 'P-value:', shortPval1\n dataPlot1 = [firstPlotMeanValues1, firstPlotMeanValues2] \n \n fig, barPlots = plt.subplots(1,1, constrained_layout = True, sharex = True, sharey = True)\n fig.set_size_inches(9.5, 7.5) \n barPlots.bar(xlabels, barMeanValues1, yerr = stdErrors1, color = 'g', label = pValue1) \n barPlots.errorbar(xlabels, barMeanValues1, yerr = stdErrors1, fmt='none', capsize=5, alpha=0.5, ecolor = 'black') \n barPlots.set_title(filename, fontsize = barLabelsFontSize)\n barPlots.set_ylabel('Pupil area', fontsize = barLabelsFontSize)\n barPlots.tick_params(axis='x', labelsize=barLabelsFontSize)\n #plotcolors = firstPlotMeanValues1 - firstPlotMeanValues2\n barPlots.plot(xlabels, dataPlot1, marker = 'o', c = 'k', alpha = 0.3, linewidth = 1)\n barPlots.legend(prop ={\"size\":10})\n \n #plt.ylim(250, 800)\n plt.suptitle('pupil behavior across trials', fontsize = barLabelsFontSize)\n #plt.xlabel(\"common X\", loc = 'center')\n #plt.savefig(scatBarDict['savedName'], format = 'pdf', dpi =50)\n plt.show() \n return(plt.show())\n \ndef pupilDilation_time(timeData1, plotData1, pvalue):\n shortPval = np.round(pvalue, decimals = 6)\n lab = 'p-value', shortPval \n plt.plot(timeData1,plotData1, label = lab)\n plt.title('pure004_20220110_2Sounds: average pupil behavior') \n plt.ylabel('Pupil Area', fontsize = 13)\n plt.xlabel('Time(s)', fontsize = 13)\n plt.legend()\n plt.show() \n return(plt.show())\n\ndef PDR_kHz_plot(freqsArray, arrFreq1, arrFreq2, arrFreq3, arrFreq4, arrFreq5):\n labelsSize = 16\n fig, freqplt = plt.subplots(1, 1)\n fig.set_size_inches(9.5, 7.5, forward = True)\n label1 = filename\n \n meanPoint1 = arrFreq1.mean(axis = 0)\n meanPoint2 = arrFreq2.mean(axis = 0) \n meanPoint3 = arrFreq3.mean(axis = 0) \n meanPoint4 = arrFreq4.mean(axis = 0) \n meanPoint5 = arrFreq5.mean(axis = 0) \n valuesPlot = [meanPoint1, meanPoint2, meanPoint3, meanPoint4, meanPoint5]\n \n freqplt.plot(freqsArray, valuesPlot, marker = 'o')\n freqplt.set_title('Pupil size for 5 different frequencies: pure011_20220331', fontsize = labelsSize)\n freqplt.set_ylabel('Mean pupil Area', fontsize = labelsSize)\n freqplt.set_xlabel('Frequencies (kHz)', fontsize = labelsSize)\n plt.grid(b = True)\n plt.xticks(fontsize = labelsSize)\n plt.yticks(fontsize = labelsSize)\n plt.show() \n return(plt.show())\n# ---------------------------------------------------------------------------------------------------------------\n \n \n \n#--- loading data ---\nfileloc = '/home/jarauser/Desktop/danny_datacollection/dbtest3_pure013_2022-07-01'\nfilename = 'pure013_detectiongonogo_20220701a_dbtest3_proc.npy'\nproc = fmap.load_data(os.path.join(fileloc, filename), runchecks=False)\n\n#--- obtain pupil data ---\npArea = fmap.extract_pupil(proc)\n\n#---calculate number of frames, frame rate, and time vector---\nnframes = len(pArea) # Number of frames.\nframeVec = np.arange(0, nframes, 1) # Vector of the total frames from the video.\nframerate = 30 # frame rate of video\ntimeVec = frameVec / framerate # Time Vector to calculate the length of the video.\n\n#--- obtain values where sync signal turns on ---\n_, syncOnsetValues, _, _ = fmap.extract_sync(proc)\ntimeOfSyncOnset = timeVec[syncOnsetValues] # Provides the time values in which the sync signal turns on.\n\n#--- Align trials to the event ---\ntimeRange = np.array([-0.5, 2.0]) # Range of time window\n# run function you're creating, to restrict trials to valid trials: timeofSyncOnset[bool] \nwindowTimeVec, windowed_signal = eventlocked_signal(timeVec, pArea, timeOfSyncOnset, timeRange)\n\n\n# TO SHOW SANTIAGO:\nprint('time of last sync light:')\nprint(timeOfSyncOnset[-1])\nprint('total time of recording:')\nprint(np.max(timeVec))\nprint('total number of sounds played (times sync light blinked):')\nprint(syncOnsetValues.shape)\nprint('total number of trials included in the analysis:')\nprint(windowed_signal.shape[1])\n\n\n#sys.exit() \n\n#--- Obtain pupil pre and post stimulus values, and average size ---\n#find_prepost_values(timeArray, dataArray, preLimDown, preLimUp, postLimDown, postLimUp)\npreSignal, postSignal = find_prepost_values(windowTimeVec, windowed_signal, -0.5, 0, 1.4, 2.0)\naveragePreSignal = preSignal.mean(axis = 0)\naveragePostSignal = postSignal.mean(axis = 0)\ndataToPlot = [averagePreSignal, averagePostSignal]\nxlabels = ['Pre signal', 'Post signal']\n\n#--- Wilcoxon test to obtain statistics ---\nwstat, pval = stats.wilcoxon(averagePreSignal, averagePostSignal)\nprint('Wilcoxon value config14_1', wstat,',', 'P-value config14_1', pval)\n\n#--- Defining the correct time range for pupil's relaxation (dilation) --- DB: SEEMS TO BE FOR PLOTTING, MAYBE WE SHOULD RENAME.\ntimeRangeForPupilDilation = np.array([-12, 12])\n#def eventlocked_signal(timeVec, signal, eventOnsetTimes, windowTimeRange)\npupilDilationTimeWindowVec, pAreaDilated = eventlocked_signal(timeVec, pArea, timeOfSyncOnset, timeRangeForPupilDilation)\npAreaDilatedMean = pAreaDilated.mean(axis = 1)\n\n#--- Plotting the results ---\nOverLapPlots = comparison_plot(pupilDilationTimeWindowVec, pAreaDilatedMean, pval)\nscattBar = barScat_plots(averagePreSignal, averagePostSignal, 'pre stimulus onset', 'post stimulus onset', preSignal, postSignal, pval)\n\n#--- Finding and plotting pupil area corresponding to each tested frequency ---\n#freqValues1, freqValues2, freqValues3, freqValues4, freqValues5 = freqs_and_meanParea(freqs, averagePostSignal, 2000, 4000, 8000, 16000, 32000) \n#pAreaFreqPlot = PDR_kHz_plot(frequenciesTested, freqValues1, freqValues2, freqValues3, freqValues4, freqValues5)\n\nprint('Data averaged over ', pAreaDilated.shape[1], ' trials')\n\n\n\n\n","sub_path":"dannybrown/interim/comparison1Plot_DBcompleted.py","file_name":"comparison1Plot_DBcompleted.py","file_ext":"py","file_size_in_byte":16360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"91935238","text":"# Write a program that computes the net amount of a bank account based a transaction log from console input. \n# The transaction log format is shown as following:\n# D 100 (D --> deposit)\n# W 200 (W --> withdrawal)\n\n# Input :\n# D 300\n# D 300\n# W 200\n# D 100\n# Output : 500\n\ndepo = withd = 0\nwhile True:\n\tdata = input()\n\tif not data:\n\t\tbreak\n\tv = data.split(' ')\n\tfor i in range(len(v)):\n\t\tif v[i] is 'D':\n\t\t\tdepo += int(v[i+1])\n\t\tif v[i] is 'W':\n\t\t\twithd += int(v[i+1])\nprint(depo-withd)\n\t","sub_path":"100_17_bank.py","file_name":"100_17_bank.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"480451370","text":"\r\nimport pandas as pd\r\n\r\ndef load_masterdictionary(file_path, stopwords_file, print_flag=False, f_log=None, get_other=False):\r\n _master_dictionary = {}\r\n _sentiment_categories = ['negative', 'positive', 'uncertainty', 'litigious', 'constraining',\r\n 'strong_modal', 'moderate_modal','weak_modal']\r\n\r\n\r\n _stopwords = pd.read_csv(stopwords_file, header=None)\r\n\r\n _stopwords = [val.lower() for sublist in _stopwords.values for val in sublist]\r\n\r\n #_stopwords = [word.lower() for word in _stopwords]\r\n\r\n\r\n with open(file_path) as f:\r\n _total_documents = 0\r\n _md_header = f.readline()\r\n for line in f:\r\n cols = line.split(',')\r\n\r\n _master_dictionary[cols[0].lower()] = MasterDictionary(cols, _stopwords)\r\n _total_documents += _master_dictionary[cols[0].lower()].doc_count\r\n if len(_master_dictionary) % 5000 == 0 and print_flag:\r\n print('\\r ...Loading Master Dictionary' + ' {}'.format(len(_master_dictionary)), end='', flush=True)\r\n\r\n if print_flag:\r\n print('\\r', end='') # clear line\r\n print('\\nMaster Dictionary loaded from file: \\n ' + file_path)\r\n print(' {0:,} words loaded in master_dictionary.'.format(len(_master_dictionary)) + '\\n')\r\n\r\n\r\n if f_log:\r\n try:\r\n f_log.write('\\n\\n load_masterdictionary log:')\r\n f_log.write('\\n Master Dictionary loaded from file: \\n ' + file_path)\r\n f_log.write('\\n {0:,} words loaded in master_dictionary.\\n'.format(len(_master_dictionary)))\r\n except Exception as e:\r\n print('Log file in load_masterdictionary is not available for writing')\r\n print('Error = {0}'.format(e))\r\n\r\n if get_other:\r\n return _master_dictionary, _md_header, _sentiment_categories, _stopwords\r\n else:\r\n return _master_dictionary\r\n\r\n\r\ndef create_sentimentdictionaries(_master_dictionary, _sentiment_categories, negation_list=None):\r\n\r\n _sentiment_dictionary = {}\r\n for category in _sentiment_categories:\r\n _sentiment_dictionary[category] = {}\r\n # Create dictionary of sentiment dictionaries with count set = 0\r\n for word in _master_dictionary.keys():\r\n for category in _sentiment_categories:\r\n if _master_dictionary[word].sentiment[category]:\r\n _sentiment_dictionary[category][word] = 0\r\n\r\n if negation_list is not None:\r\n _sentiment_dictionary['negation'] = {}\r\n\r\n for word in negation_list:\r\n _sentiment_dictionary['negation'][word] = 0\r\n\r\n return _sentiment_dictionary\r\n\r\n\r\nclass MasterDictionary:\r\n def __init__(self, cols, _stopwords):\r\n self.word = cols[0].lower()\r\n self.sequence_number = int(cols[1])\r\n self.word_count = int(cols[2])\r\n self.word_proportion = float(cols[3])\r\n self.average_proportion = float(cols[4])\r\n self.std_dev_prop = float(cols[5])\r\n self.doc_count = int(cols[6])\r\n self.negative = int(cols[7])\r\n self.positive = int(cols[8])\r\n self.uncertainty = int(cols[9])\r\n self.litigious = int(cols[10])\r\n self.constraining = int(cols[11])\r\n self.superfluous = int(cols[12])\r\n self.interesting = int(cols[13])\r\n self.modal_number = int(cols[14])\r\n self.strong_modal = False\r\n if int(cols[14]) == 1:\r\n self.strong_modal = True\r\n self.moderate_modal = False\r\n if int(cols[14]) == 2:\r\n self.moderate_modal = True\r\n self.weak_modal = False\r\n if int(cols[14]) == 3:\r\n self.weak_modal = True\r\n self.sentiment = {}\r\n self.sentiment['negative'] = bool(self.negative)\r\n self.sentiment['positive'] = bool(self.positive)\r\n self.sentiment['uncertainty'] = bool(self.uncertainty)\r\n self.sentiment['litigious'] = bool(self.litigious)\r\n self.sentiment['constraining'] = bool(self.constraining)\r\n self.sentiment['strong_modal'] = bool(self.strong_modal)\r\n self.sentiment['moderate_modal'] = bool(self.moderate_modal)\r\n self.sentiment['weak_modal'] = bool(self.weak_modal)\r\n self.irregular_verb = int(cols[15])\r\n self.harvard_iv = int(cols[16])\r\n self.syllables = int(cols[17])\r\n self.source = cols[18]\r\n\r\n if self.word in _stopwords:\r\n self.stopword = True\r\n else:\r\n self.stopword = False\r\n return\r\n","sub_path":"load_MasterDictionary.py","file_name":"load_MasterDictionary.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"190605724","text":"import tkinter\n\n\ndef parabola(par_x):\n return list(map(lambda x: (x ** 2) / 100, par_x))\n\n\ndef draw_axes(par_canvas):\n par_canvas.update()\n x_origin = par_canvas.winfo_width() / 2\n y_origin = par_canvas.winfo_height() / 2\n par_canvas.configure(scrollregion=(-x_origin, -y_origin, x_origin, y_origin))\n par_canvas.create_line(-x_origin, 0, x_origin, 0, fill=\"black\")\n par_canvas.create_line(0, -y_origin, 0, y_origin, fill=\"black\")\n\n\ndef draw_parabola(par_canvas):\n par_canvas.update()\n xes = range(-320, 320, 1)\n # ratio = parCanvas.winfo_width() / 100\n ratio = 1\n ratio2 = 100 / par_canvas.winfo_height()\n print(ratio)\n for i in range(len(xes) - 1):\n par_canvas.create_line(xes[i] * ratio, ratio2 * parabola(xes[i] * ratio), xes[i + 1] * ratio,\n ratio2 * parabola(xes[i + 1] * ratio), fill=\"black\")\n\n\ndef plot(par_canvas, par_x, par_y):\n par_canvas.update()\n for i in range(len(par_x) - 1):\n par_canvas.create_line(par_x[i], -par_y[i], par_x[i + 1],\n -par_y[i + 1], fill=\"black\")\n\n\nmainWindow = tkinter.Tk()\nmainWindow.title(\"Parabola\")\nmainWindow.geometry(\"640x480\")\n\ncanvas = tkinter.Canvas(mainWindow, width=320, height=480)\ncanvas.grid(row=0, column=0)\ncanvas2 = tkinter.Canvas(mainWindow, width=320, height=480)\ncanvas2.grid(row=0, column=1)\nprint(repr(canvas), repr(canvas2))\ndraw_axes(canvas)\ndraw_axes(canvas2)\nX = list(range(-100, 100, 1))\nprint(X)\nY = parabola(X)\nprint(Y)\nplot(canvas, X, Y)\n\nmainWindow.mainloop()\n","sub_path":"Section_11/95_lecture.py","file_name":"95_lecture.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"414715207","text":"import pandas as pd\r\nimport requests\r\nfrom lxml import html\r\nfrom pandas import ExcelWriter\r\nimport numpy as np\r\n\r\ndf = pd.DataFrame(columns=['Question','Answer'])\r\n# print (df)\r\n\r\nhp_link = \"https://www.gktoday.in/general-knowledge/\"\r\n\r\npage = requests.get(hp_link)\r\ndoc = html.fromstring(page.content)\r\n\r\nsections = np.array([])\r\n# topics = np.array([])\r\nfor i in range(1,20):\r\n try:\r\n x_path = '/html/body/div[1]/div[2]/div/aside/div[2]/ul/li[' + str(i) + ']/a'\r\n section = doc.xpath(x_path)[0].get(\"href\")\r\n sections = np.append(sections, section)\r\n # topics = np.append(topics, section.split('https://www.gktoday.in/quizbase/', 1)[-1])\r\n except (ValueError,IndexError):\r\n # print (i)\r\n continue\r\nsections = np.unique(sections)\r\n# print (sections)\r\n# print (topics)\r\n\r\n# section = sections[0]\r\n# print (section)\r\n\r\nindex = 0\r\nfor section in sections:\r\n page_link = section + '?pageno=1'\r\n page = requests.get(page_link)\r\n doc = html.fromstring(page.content)\r\n max_page = int(doc.xpath('/html/body/div[1]/div[2]/div/div/div[2]/article/div/ul/li[1]')[0].text_content().split('Page 1 of ', 1)[-1])\r\n # print (max_page)\r\n for page_no in range(1, max_page+1):\r\n page_link = section + '?pageno=' + str(page_no)\r\n # print (page_link)\r\n page = requests.get(page_link)\r\n doc = html.fromstring(page.content)\r\n for i in range(1,11):\r\n try:\r\n ques_x_path = '/html/body/div[1]/div[2]/div/div/div[2]/article/div/div[' + str(i) + ']/div[1]/p'\r\n ans_x_path = '/html/body/div[1]/div[2]/div/div/div[2]/article/div/div[' + str(i) + ']/div[3]/div[1]'\r\n ques = doc.xpath(ques_x_path)[0].text_content()\r\n ans = doc.xpath(ans_x_path)[0].text_content().split('[', 1)[-1].split(']', 1)[0]\r\n df.loc[index] = [ques, ans]\r\n index += 1\r\n except (ValueError,IndexError):\r\n # print (i)\r\n break\r\n print (section, page_no, max_page)\r\n\r\nwriter = ExcelWriter('gkques.xlsx')\r\ndf.to_excel(writer,'qna')\r\nwriter.save()\r\n\r\nprint (\"done\")","sub_path":"gktoday-gk-leecher.py","file_name":"gktoday-gk-leecher.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"394249379","text":"# -*- coding: utf-8 -*-\nimport queue\nimport random\n\nif __name__ == \"__main__\":\n q = queue.PriorityQueue()\n \n data = random.sample(range(1, 100), 5)\n print(\"Orignal data:\", data)\n \n for i in data:\n q.put(i)\n \n print(\"Priority queue:\")\n while not q.empty():\n print(q.get(), end=' ')\n print()","sub_path":"standard/010.queue/priority_queue.py","file_name":"priority_queue.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"65803651","text":"import neopixel\nimport board\nfrom random import randrange\nfrom time import sleep\n\n# definitions of functions\ndef rgb_to_grb_and_brightness(rgb, userbrightness):\n r, g, b = rgb\n ledpower = r * g * b # 255 255 255 would be 16,581,375\n\n if ledpower >= 3375000: # this is 150 150 150\n brightness = 0.3 * userbrightness\n else:\n brightness = 0.6 * userbrightness\n\n return (int(g * brightness), int(r * brightness), int(b * brightness))\n\nsettings_file = open(\"settings.txt\", \"rt\")\n\nbrightness_value_ = settings_file.readline()\nbrightness_value_ = brightness_value_.replace(\"brightness = \", \"\")\nbrightness_value_ = brightness_value_.replace(\"\\n\", \"\")\nbrightness_value = float(brightness_value_)\nbrightness_value = brightness_value / 100\n\ncolor1_ = settings_file.readline()\ncolor2_ = settings_file.readline()\ncolor3_ = settings_file.readline()\n\ncolor1_ = color1_.replace(\"color1 = \", \"\")\ncolor1_ = color1_.replace(\"\\n\", \"\")\ncolor2_ = color2_.replace(\"color2 = \", \"\")\ncolor2_ = color2_.replace(\"\\n\", \"\")\ncolor3_ = color3_.replace(\"color3 = \", \"\")\ncolor3_ = color3_.replace(\"\\n\", \"\")\n\ncolor1 = int(color1_)\ncolor2 = int(color2_)\ncolor3 = int(color3_)\n\ncolor_value = (color1, color2, color3)\n\nmode_ = settings_file.readline()\nmode_ = mode_.replace(\"mode = \", \"\")\nmode_ = mode_.replace(\"\\n\", \"\")\nmode = mode_\n\ndelay_ = settings_file.readline()\ndelay_ = delay_.replace(\"delayrainbow = \", \"\")\ndelay_ = delay_.replace(\"\\n\", \"\")\ndelay = int(delay_)\n\nprint(color_value)\nprint(brightness_value)\nprint(mode)\nprint(delay)\n\nboardleds = neopixel.NeoPixel(board.D4, 117)\n\nfor x in range(0, 10):\n if mode == \"rainbow\":\n while True:\n for pixel in range(0, 117):\n boardleds[pixel] = (int(randrange(0, 255) * brightness_value), int(randrange(0, 255) * brightness_value), int(randrange(0, 255) * brightness_value))\n sleep(float(delay)/1000)\n elif mode == \"setcolor\":\n while True:\n for pixel in range(0, 117):\n boardleds[pixel] = rgb_to_grb_and_brightness(color_value, brightness_value)\n else:\n mode = \"rainbow\"\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"368226418","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/3/13 10:32\n# @Author : Wowspring\n# @Site :\n# @File : BackgroundReplace.py\n# @Software: PyCharm\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimage = mpimg.imread(\"../img/certificate.jpg\")\nimage = np.copy(image)\nprint('this image is:', type(image), 'with dimensions:', image.shape)\n\ngray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\nplt.imshow(gray_image, cmap='gray')\nplt.show()\nlower_gray = np.array(235)\nupper_gray = np.array(255)\n\nmask = cv2.inRange(gray_image, lower_gray, upper_gray)\nplt.imshow(mask, cmap='gray')\nplt.show()\n\n# 腐蚀膨胀\nerode = cv2.erode(mask, None, iterations=1)\n# plt.imshow(erode)\n# plt.show()\ndilate = cv2.dilate(erode, None, iterations=1)\n# plt.imshow(dilate)\n# plt.show()\nmasked_img = np.copy(image)\nmasked_img[dilate != 0] = [0, 0, 0]\nplt.imshow(masked_img)\nplt.show()\n\n# Background img\nbackground_image = mpimg.imread('../img/sky2.jpeg')\ncrop_background_image = background_image[0:masked_img.shape[0], 0:masked_img.shape[1]]\nplt.imshow(crop_background_image)\nprint('Image dimensions:', crop_background_image.shape)\nplt.show()\n\ncrop_background = np.copy(crop_background_image)\ncrop_background[dilate == 0] = [0, 0, 0]\nplt.imshow(crop_background)\nplt.show()\n\nprint('Masked Image dimensions:', masked_img.shape)\nprint('background Image dimensions:', crop_background.shape)\n\ncomplete_image = masked_img + crop_background\nplt.imshow(complete_image)\nplt.show()\n\nmpimg.imsave('../img/certificateandsky.jpg', complete_image)\n","sub_path":"120181080223-姚鹏飞-作业2/src/BackgroundReplace.py","file_name":"BackgroundReplace.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"91176995","text":"import math, csv, operator, json\nfrom collections import defaultdict\nfrom random import shuffle\n\n### For splitting the continuous variables #################\ndef mean(data):\n return sum(x for x in data)/float(len(data))\n\ndef variance(data):\n m = mean(data)\n return sum((x-m)**2 for x in data)/float(len(data))\n\ndef sigma(data):\n return math.sqrt(variance(data))\n############################################\n \nclass Node:\n \"\"\" Data container for each node of the decision tree. Implements methods for computing entropy and information gain, as well as splitting the data. \"\"\"\n def __init__(self, data, attr=None, attr_value=None, parent=None):\n self.data = data\n self.attr = attr\n self.attr_value = attr_value\n #self.prev_split_idx = prev_split_idx\n self.parent = parent\n self.entropy = self.get_entropy()\n self.best_split_idx, self.best_split_gain = self.best_split()[0], self.best_split()[1]\n \n def get_entropy(self, attr=None):\n \"\"\" Computes the entropy of the entire node when attr=None or of the node created by selecting for attr. See split_data() for explanation of how continuous variables are handled. \"\"\"\n if attr:\n if len(attr) == 3:\n m = attr[2][0]\n s = attr[2][1]\n i = attr[0]\n attr = attr[1]\n counts = defaultdict(float)\n for row in self.data:\n if attr and attr not in ['under', 'over', 'within']:\n if row[i] == attr:\n counts[row[-1]] += 1 # row[-1] is the classification label or the training instance\n elif attr == 'under':\n if row[i] < m-s:\n counts[row[-1]] += 1\n elif attr == 'over':\n if row[i] > m+s:\n counts[row[-1]] += 1\n elif attr == 'within':\n if row[i] <= m+s and row[i] >= m-s:\n counts[row[-1]] += 1\n else:\n counts[row[-1]] += 1\n \n return -sum(count/sum(counts.values())*math.log(count/sum(counts.values()), 2) for count in counts.itervalues())\n \n def information_gain(self, field_index, c=None):\n \"\"\" Computes the information gain associated with splitting on field_index \"\"\"\n current_entropy = self.get_entropy()\n counts = defaultdict(float)\n if not c:\n for row in self.data:\n counts[row[field_index]] += 1\n else:\n m = c[0]\n s = c[1]\n for row in self.data:\n if row[field_index] < m-s:\n counts['under'] += 1\n elif row[field_index] > m+s:\n counts['over'] += 1\n else:\n counts['within'] += 1\n if not c:\n split_entropy = sum(count/sum(counts.values())*self.get_entropy((field_index, key)) for key, count in counts.iteritems())\n else:\n split_entropy = sum(count/sum(counts.values())*self.get_entropy((field_index, key, c)) for key, count in counts.iteritems())\n \n return current_entropy - split_entropy \n \n def best_split(self):\n \"\"\" Returns (index, information_gain) for the field that generates the highest information gain. \"\"\"\n gains = defaultdict(float)\n for i in range(0, len(self.data[0])-1):\n if type(self.data[0][i]) == str:\n gains[i] = self.information_gain(i)\n else:\n attrs = [row[i] for row in self.data]\n gains[i] = self.information_gain(i, (mean(attrs), sigma(attrs)) )\n return max(gains.iteritems(), key=operator.itemgetter(1)) \n \n def split_data(self):\n \"\"\" Splits the data on self.best_split_idx, which is the index returned by best_split(). Continuous variables are split into 3 bins: values under, within, or over a 2*sigma-wide interval centered on the mean of all values in the list self.data[self.best_split_idx]. \"\"\"\n best = self.best_split_idx\n new_data = defaultdict(list)\n if type(self.data[0][best]) == str:\n c = 'str'\n for row in self.data:\n new_data[row[best]].append(row)\n else:\n attrs = [row[best] for row in self.data]\n m = mean(attrs)\n s = sigma(attrs)\n c = (m, s)\n for row in self.data:\n if row[best] > m+s:\n new_data['over'].append(row)\n elif row[best] < m-s:\n new_data['under'].append(row)\n else:\n new_data['within'].append(row)\n return (new_data, c)\n \n def get_class_counts(self):\n \"\"\" Return the number of instances in each class within the node. \"\"\"\n counts = defaultdict(float)\n for row in self.data:\n counts[row[-1]] += 1\n return counts\n \ndef Tree(node, attr='root'):\n \"\"\" Builds the decision tree using Node class as a data container. \"\"\"\n name = attr\n node_info = {'name': name, 'parent': node.parent, 'node': node}\n \n children = []\n entropy = node.entropy\n if entropy < 0.45 or len(node.data) < 80: # Stop splitting. Values found via parameter sweep.\n node_info['leaf'] = True\n label = max(node.get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n node_info['children'] = label \n return node_info\n else: # Split node. \n node_info['children'] = defaultdict(Tree)\n node_info['leaf'] = False\n best = node.best_split_idx\n split_attr = node.split_data()\n split_attr, c = split_attr[0], split_attr[1]\n node_info['split_on'] = best\n if type(c) == str:\n node_info['split_mean'] = c\n else:\n node_info['split_mean'] = c[0]\n node_info['split_sigma'] = c[1]\n for key, data in split_attr.items():\n node_info['children'][key] = Tree(Node(data, best, key, name), key)\n return node_info\n \ndef classify(tree, instance):\n \"\"\" Recursively traverses tree along a path determined by instance attributes until a leaf is found. The value of tree['children'] is the class label. If a path does not match the instance attribute, max(count(class)) of all data in the current node of the tree is the prediction. \"\"\"\n if tree.get('leaf'):\n return tree.get('children')\n else:\n ind = tree.get('split_on')\n val = instance[ind]\n if type(val) == str:\n if val in tree.get('children').keys():\n next_node = tree.get('children').get(val)\n else:\n return max(tree.get('node').get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n else:\n m = tree.get('split_mean')\n s = tree.get('split_sigma')\n if val > m+s:\n next_node = tree.get('children').get('over')\n elif val < m-s:\n next_node = tree.get('children').get('under')\n else:\n next_node = tree.get('children').get('within')\n if next_node==None:\n return max(tree.get('node').get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n else:\n predict = classify(next_node, instance)\n return predict\n \n \ndef jsonTree(tree):\n \"\"\" Build a structure to output into a JSON file for debugging the algorithm \"\"\"\n node = {'name': tree['name'], 'parent': tree['parent']}\n children = tree['children']\n node['children'] = list()\n if type(children) is str:\n node['children'].append({'name': children})\n return node\n else:\n for child in children.itervalues():\n node['children'].append(jsonTree(child))\n return node\n \nif __name__ == \"__main__\":\n \n with open(\"hw4-task1-data.tsv\") as tsv:\n data = [list(line) for line in csv.reader(tsv, delimiter=\"\\t\")]\n \n int_ind = [0, 2, 4, 10, 11, 12]\n for row in data:\n for i, item in enumerate(row):\n if i in int_ind:\n row[i] = int(row[i])\n \n # Split training/test sets\n accuracy = []\n shuffle(data) # Mix it up\n K = 10\n chunk = len(data)/K\n for i in range(K):\n try:\n training_set = data[i*chunk:(i+1)*chunk]\n if i == 0:\n test_set = data[(i+1)*chunk:]\n elif i == 9:\n test_set = data[:len(data)-chunk]\n else:\n test_set = [item for sublist in [data[:i*chunk], data[(i+1)*chunk:]] for item in sublist]\n \n print('training on fold # ' + str(i))\n tree = Tree(Node(training_set))\n print ('fold # ' +str(i) + ' trained!')\n \n results = []\n for instance in test_set:\n result = classify(tree, instance)\n results.append(result == instance[-1])\n \n accuracy.append(float(results.count(True))/float(len(results)))\n except RuntimeError:\n print ('Failure at fold ' +str(i)+'. Maximum recursion depth exceeded.')\n print (\"Worst Accuracy:\\t %f\" % (min(accuracy)*100.0))\n print (\"Best Accuracy:\\t %f\" % (max(accuracy)*100.0))\n print (\"Avg Accuracy:\\t %f\" % (mean(accuracy)*100.0))\n avg_accuracy = mean(accuracy) # average accuracy across test sets from each fold\n \n # Writing results to a file \n f = open(\"result.txt\", \"w\")\n f.write(\"accuracy: %.4f\" % avg_accuracy)\n f.close()\n \n # json_data = jsonTree(tree)\n \n # with open('data.json', 'w') as outfile:\n # json.dump(json_data, outfile, sort_keys=True, indent=4, ensure_ascii=False)\n ","sub_path":"dtree.py","file_name":"dtree.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"17233648","text":"from datetime import timedelta\n\nfrom django.utils import timezone\nfrom django.core.mail import mail_managers\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom froide.helper.date_utils import format_seconds\n\n\ndef throttle(qs, throttle_config, date_param='first_message'):\n if throttle_config is None:\n return False\n\n # Return True if the next request would break any limit\n for count, seconds in throttle_config:\n f = {\n '%s__gte' % date_param: timezone.now() - timedelta(seconds=seconds)\n }\n if qs.filter(**f).count() + 1 > count:\n return (count, seconds)\n return False\n\n\ndef check_throttle(user, klass):\n if user.is_authenticated and not user.trusted():\n throttle_settings = settings.FROIDE_CONFIG.get('request_throttle', None)\n qs, date_param = klass.objects.get_throttle_filter(user)\n throttle_kind = throttle(qs, throttle_settings, date_param=date_param)\n if throttle_kind:\n mail_managers(_('User exceeded request limit'), user.pk)\n return _('You exceeded your request limit of {count} requests in {time}.'\n ).format(count=throttle_kind[0],\n time=format_seconds(throttle_kind[1])\n )\n","sub_path":"froide/foirequest/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"38450087","text":"'''\nHere we will learn to extract some frequently used properties of objects like \nSolidity, Equivalent Diameter, Mask image, Mean Intensity etc. More features \ncan be found at Matlab regionprops documentation.\n\n*(NB : Centroid, Area, Perimeter etc also belong to this category, but we have \nseen it in last chapter)*\n1. Aspect Ratio\n\nIt is the ratio of width to height of bounding rect of the object.\n'''\n\nAspectRatio=WidthHeight\nx,y,w,h = cv.boundingRect(cnt)\naspect_ratio = float(w)/h\n'''\n\n2. Extent\n\nExtent is the ratio of contour area to bounding rectangle area.\n'''\nExtent=ObjectAreaBoundingRectangleArea\narea = cv.contourArea(cnt)\nx,y,w,h = cv.boundingRect(cnt)\nrect_area = w*h\nextent = float(area)/rect_area\n\n'''\n3. Solidity\n\nSolidity is the ratio of contour area to its convex hull area.\n'''\nSolidity=ContourAreaConvexHullArea\narea = cv.contourArea(cnt)\nhull = cv.convexHull(cnt)\nhull_area = cv.contourArea(hull)\nsolidity = float(area)/hull_area\n\n'''\n4. Equivalent Diameter\n\nEquivalent Diameter is the diameter of the circle whose area is same as the contour area.\n\n'''\n\narea = cv.contourArea(cnt)\nequi_diameter = np.sqrt(4*area/np.pi)\n\n'''\n5. Orientation\n\nOrientation is the angle at which object is directed. Following method also gives the Major Axis and Minor Axis lengths.\n'''\n\n(x,y),(MA,ma),angle = cv.fitEllipse(cnt)\n\n'''\n\n6. Mask and Pixel Points\n\nIn some cases, we may need all the points which comprises that object. It can be done as follows:\n'''\n \nmask = np.zeros(imgray.shape,np.uint8)\ncv.drawContours(mask,[cnt],0,255,-1)\npixelpoints = np.transpose(np.nonzero(mask))\n#pixelpoints = cv.findNonZero(mask)\n\n'''\nHere, two methods, one using Numpy functions, next one using OpenCV function (last commented line) are given to do the same. Results are also same, but with a slight difference. Numpy gives coordinates in **(row, column)** format, while OpenCV gives coordinates in **(x,y)** format. So basically the answers will be interchanged. Note that, row = x and column = y.\n7. Maximum Value, Minimum Value and their locations\n\nWe can find these parameters using a mask image.\nmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(imgray,mask = mask)\n8. Mean Color or Mean Intensity\n\nHere, we can find the average color of an object. Or it can be average intensity of the object in grayscale mode. We again use the same mask to do it.\nmean_val = cv.mean(im,mask = mask)\n9. Extreme Points\n\nExtreme Points means topmost, bottommost, rightmost and leftmost points of the object.\n'''\n\nleftmost = tuple(cnt[cnt[:,:,0].argmin()][0])\nrightmost = tuple(cnt[cnt[:,:,0].argmax()][0])\ntopmost = tuple(cnt[cnt[:,:,1].argmin()][0])\nbottommost = tuple(cnt[cnt[:,:,1].argmax()][0])\n\n'''\nFor eg, if I apply it to an Bangladesh map, I get the following result :\n''' ","sub_path":"4. ImageProcessing/10.3_Contour_Properties.py","file_name":"10.3_Contour_Properties.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"219064679","text":"import os\nimport sys\nimport tqdm\nimport torch\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom types import SimpleNamespace\nfrom src.envs import all_envs\nfrom src.utils.logger import Logger\nfrom src.data.loaders import RolloutSequenceDataset\nfrom src.models.pytorch import EnvModel\nfrom src.models import all_envmodels, all_models, get_config\n\nclass Trainer():\n\tdef __init__(self, config):\n\t\tself.dataset_train = RolloutSequenceDataset(config, train=True)\n\t\tself.dataset_test = RolloutSequenceDataset(config, train=False)\n\t\tself.train_loader = torch.utils.data.DataLoader(self.dataset_train, batch_size=config.batch_size, shuffle=True, num_workers=config.nworkers)\n\t\tself.test_loader = torch.utils.data.DataLoader(self.dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.nworkers)\n\n\tdef train_loop(self, ep, envmodel, update=1):\n\t\tbatch_losses = []\n\t\tenvmodel.network.train()\n\t\twith tqdm.tqdm(total=len(self.dataset_train)) as pbar:\n\t\t\tpbar.set_description_str(f\"Train Ep: {ep}, \")\n\t\t\tfor i,(states, actions, next_states, rewards, dones) in enumerate(self.train_loader):\n\t\t\t\tloss = envmodel.network.optimize(states, actions, next_states, rewards, dones)\n\t\t\t\tif i%update == 0:\n\t\t\t\t\tpbar.set_postfix_str(f\"Loss: {loss:.4f}\")\n\t\t\t\t\tpbar.update(states.shape[0]*update)\n\t\t\t\tbatch_losses.append(loss)\n\t\treturn np.mean(batch_losses)\n\n\tdef test_loop(self, ep, envmodel):\n\t\tbatch_losses = []\n\t\tenvmodel.network.eval()\n\t\twith torch.no_grad():\n\t\t\tfor states, actions, next_states, rewards, dones in self.test_loader:\n\t\t\t\tloss = envmodel.network.get_loss(states, actions, next_states, rewards, dones).item()\n\t\t\t\tbatch_losses.append(loss)\n\t\treturn np.mean(batch_losses)\n\ndef train(make_env, config):\n\ttrainer = Trainer(config)\n\tenvmodel = EnvModel(config.state_size, config.action_size, config, load=\"\", gpu=True)\n\tcheckpoint = f\"{config.env_name}\"\n\tlogger = Logger(trainer, envmodel.network, config)\n\tep_train_losses = []\n\tep_test_losses = []\n\tfor ep in range(config.epochs):\n\t\ttrain_loss = trainer.train_loop(ep, envmodel)\n\t\ttest_loss = trainer.test_loop(ep, envmodel)\n\t\tep_train_losses.append(train_loss)\n\t\tep_test_losses.append(test_loss)\n\t\tenvmodel.network.schedule(test_loss)\n\t\tif ep_test_losses[-1] <= np.min(ep_test_losses): envmodel.network.save_model(checkpoint)\n\t\tlogger.log(f\"Step: {ep:7d}, Reward: {ep_test_losses[-1]:9.3f} [{ep_train_losses[-1]:8.3f}], Avg: {np.mean(ep_test_losses, axis=0):9.3f} ({1.0:.3f})\", envmodel.network.get_stats())\n\ndef parse_args(envs, models, envmodels):\n\tparser = argparse.ArgumentParser(description=\"MDRNN Trainer\")\n\tparser.add_argument(\"env_name\", type=str, choices=envs, help=\"Name of the environment to use. Allowed values are:\\n\"+', '.join(envs), metavar=\"env_name\")\n\tparser.add_argument(\"envmodel\", type=str, default=None, choices=envmodels, help=\"Which model to use as the dynamics. Allowed values are:\\n\"+', '.join(envmodels), metavar=\"envmodels\")\n\tparser.add_argument(\"--model\", type=str, default=None, choices=models, help=\"Which RL algorithm to use as the agent. Allowed values are:\\n\"+', '.join(models), metavar=\"model\")\n\tparser.add_argument(\"--nworkers\", type=int, default=0, help=\"Number of workers to use to load dataloader\")\n\tparser.add_argument(\"--epochs\", type=int, default=50, help=\"Number of epochs to train the envmodel\")\n\tparser.add_argument(\"--seq_len\", type=int, default=40, help=\"Length of sequence to train RNN\")\n\tparser.add_argument(\"--batch_size\", type=int, default=256, help=\"Size of batch to train RNN\")\n\tparser.add_argument(\"--train_prop\", type=float, default=0.9, help=\"Proportion of trajectories to use for training\")\n\treturn parser.parse_args()\n\nif __name__ == \"__main__\":\n\targs = parse_args(all_envs, list(all_models.values())[0].keys(), all_envmodels)\n\tmake_env, _, config = get_config(args.env_name, args.model)\n\tconfig.update(**args.__dict__)\n\ttrain(make_env=make_env, config=config)\n\t\t","sub_path":"train_envmodel.py","file_name":"train_envmodel.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"54185913","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'gbrva'\n\nfrom convert import *\nfrom interface import *\nimport sys\nimport os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\n\n# Номер збірки\nnumBuild = '01_210915'\n\n\nclass MyWin(QtWidgets.QMainWindow):\n mdlFlag = True\n\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.setWindowIcon(QIcon('main.ico'))\n self.ui = Ui_Main()\n self.ui.setupUi(self)\n\n\n # Встановлюємо початкові значення\n self.ui.pushButton.setEnabled(False)\n self.ui.progressBar.setProperty('value', 0)\n\n # Приєднуємо слоти\n self.ui.pushButton.clicked.connect(self.do_convert)\n self.ui.toolButton.clicked.connect(self.do_selectfile)\n self.ui.MoodleBtn.clicked.connect(self.do_moodleClick)\n self.ui.TextBtn.clicked.connect(self.do_textClick)\n\n def do_moodleClick(self):\n self.ui.MoodleBox.setChecked(True)\n self.ui.FotoBox.setChecked(False)\n self.ui.FotoBox.setEnabled(True)\n self.ui.GoogleBox.setChecked(False)\n return None\n\n def do_textClick(self):\n self.ui.MoodleBox.setChecked(True)\n self.ui.FotoBox.setChecked(False)\n self.ui.FotoBox.setEnabled(False)\n self.ui.GoogleBox.setChecked(False)\n\n pass\n\n def do_selectfile(self):\n fname = QtWidgets.QFileDialog.getOpenFileName()[0]\n self.ui.lineEdit.setText(str(fname))\n if fname != '':\n self.ui.pushButton.setEnabled(True)\n return None\n\n def do_convert(self):\n frname = self.ui.lineEdit.text()\n tmpname = os.path.split(frname)\n fwname = os.path.join(tmpname[0], 'mdl_' + tmpname[1])\n text1 = self.ui.outlabel.setText('Output file: ' + fwname)\n if self.ui.MoodleBtn.isChecked():\n Convert_1c(frname, fwname, self.ui.FotoBox.isChecked())\n else:\n Convert_Text(frname, fwname)\n # self.ui.progressBar.setProperty('value', newvalue)\n self.ui.lineEdit.setText('')\n self.ui.pushButton.setEnabled(False)\n return None\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n myapp = MyWin()\n myapp.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}
+{"seq_id":"502952583","text":"# -*- encoding: utf-8 -*-\r\nfrom flask import Blueprint\r\nfrom flask import jsonify\r\nfrom flask import request\r\nfrom flask_login import current_user\r\nfrom flask_login import login_required\r\nfrom sqlalchemy import func\r\n\r\nfrom lazyblacksmith.models import SolarSystem\r\nfrom lazyblacksmith.models import TokenScope\r\nfrom lazyblacksmith.models import db\r\n\r\nfrom . import logger\r\n\r\najax_account = Blueprint('ajax_account', __name__)\r\n\r\n\r\n@ajax_account.route('/scopes/
\")+\"
\\n
\\n\")\n for one_event in event_list:\n f.write(\"ID: %s
\\n\"%(one_event[0]))\n f.write(\"Type: %s
\\n\"%(one_event[1]))\n f.write(\"Anchor: %s
\\n\"%(one_event[2]))\n f.write(\"Extent: %s
\\n\"%(one_event[3]))\n f.write(\"
\\n\")\n f.write(\"\\n'\n b' \\n'\n b'
{DisplayName}
{DisplayName}
{DisplayName}
{DisplayName}
{OriginalTitle}
{DisplayName} (Level {Level})
Authors: {}
\".format(\", \".join(entity['data'][\"Authors\"]))\n entity['table-id'] = \"{}_{}\".format(data[i][1], entity['data'][id_helper_dict[data[i][1]]])\n data[i] = entity\n return flask.jsonify({'entities': data})\n\n\n\n@blueprint.route('/manualcache', methods=['POST'])\ndef manualcache():\n cache_dictionary = (json.loads(request.form.get('cache')))\n paper_action = request.form.get('paperAction')\n #saveNewBrowseCache(cache_dictionary)\n\n if paper_action == \"batch\":\n paper_ids = get_all_paper_ids(cache_dictionary[\"EntityIds\"])\n addToBatch(paper_ids)\n if paper_action == \"cache\":\n paper_ids = get_all_paper_ids(cache_dictionary[\"EntityIds\"])\n paper_info_db_check_multiquery(paper_ids)\n return flask.jsonify({})\n\n\n@blueprint.route('/submit/', methods=['GET', 'POST'])\ndef submit():\n pub_years = None\n cit_years = None\n self_citations = False\n coauthors = True\n if request.method == \"GET\":\n doc_id = request.args[\"id\"]\n ids, flower_name, curated_flag = url_decode_info(doc_id)\n author_ids = ids.author_ids\n affiliation_ids = ids.affiliation_ids\n conference_ids = ids.conference_series_ids\n fos_ids = ids.field_of_study_ids\n journal_ids = ids.journal_ids\n paper_ids = ids.paper_ids\n\n encoded_filters = request.args.get(\"filters\")\n if encoded_filters is not None:\n decoded_filters = decode_filters(encoded_filters)\n pub_years = decoded_filters.pub_years\n cit_years = decoded_filters.cit_years\n self_citations = decoded_filters.self_citations\n coauthors = decoded_filters.coauthors\n else:\n curated_flag = False\n data_str = request.form['data']\n data = json.loads(data_str)\n entities = data['entities']\n author_ids = list(map(int, entities['AuthorIds']))\n affiliation_ids = list(map(int, entities['AffiliationIds']))\n conference_ids = list(map(int, entities['ConferenceIds']))\n journal_ids = list(map(int, entities['JournalIds']))\n paper_ids = list(map(int, entities['PaperIds']))\n fos_ids = list(map(int, entities['FieldOfStudyIds']))\n\n flower_name = data.get('flower_name')\n doc_id = url_encode_info(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids, name=flower_name)\n\n if not flower_name:\n first_nonempty_id_list = (author_ids or affiliation_ids\n or conference_ids or journal_ids\n or paper_ids or fos_ids)\n if not first_nonempty_id_list:\n raise ValueError('no entities')\n name_lookup_f = {\n id(author_ids): author_name_query,\n id(affiliation_ids): affiliation_name_query,\n id(conference_ids): conference_name_query,\n id(journal_ids): journal_name_query,\n id(paper_ids): paper_name_query,\n id(fos_ids): fos_name_query}[id(first_nonempty_id_list)]\n flower_name = name_lookup_f([first_nonempty_id_list[0]])[0]\n total_entities = (len(author_ids) + len(affiliation_ids)\n + len(conference_ids) + len(journal_ids)\n + len(paper_ids) + len(fos_ids))\n if total_entities > 1:\n flower_name += f\" +{total_entities - 1} more\"\n\n flower = kb_client.get_flower(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids, pub_years=pub_years,\n cit_years=cit_years, coauthors=coauthors,\n self_citations=self_citations)\n\n stats = kb_client.get_stats(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids)\n\n url_base = f\"http://influencemap.ml/submit/?id={doc_id}\"\n\n session = dict(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_ids=conference_ids, journal_ids=journal_ids,\n fos_ids=fos_ids, paper_ids=paper_ids, flower_name=flower_name,\n url_base=url_base, icoauthor=coauthors, self_cite=self_citations)\n\n rdata = make_response_data(\n flower, stats, is_curated=curated_flag, flower_name=flower_name,\n session=session, selection=dict(\n pub_years=pub_years, cit_years=cit_years, coauthors=coauthors,\n self_citations=self_citations))\n return flask.render_template(\"flower.html\", **rdata)\n\n\n@blueprint.route('/resubmit/', methods=['POST'])\ndef resubmit():\n # option = request.form.get('option')\n # keyword = request.form.get('keyword')\n # flower_config['reference'] = request.form.get('cmp_ref') == 'true'\n # flower_config['num_leaves'] = int(request.form.get('numpetals'))\n # flower_config['order'] = request.form.get('petalorder')\n\n session = json.loads(request.form.get(\"session\"))\n flower_name = session['flower_name']\n author_ids = session['author_ids']\n affiliation_ids = session['affiliation_ids']\n conference_ids = session['conference_ids']\n journal_ids = session['journal_ids']\n fos_ids = session['fos_ids']\n paper_ids = session['paper_ids']\n\n self_citations = request.form.get('selfcite') == 'true'\n coauthors = request.form.get('coauthor') == 'true'\n pub_lower = int(request.form.get('from_pub_year'))\n pub_upper = int(request.form.get('to_pub_year'))\n cit_lower = int(request.form.get('from_cit_year'))\n cit_upper = int(request.form.get('to_cit_year'))\n\n flower = kb_client.get_flower(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids,\n pub_years=(pub_lower, pub_upper), cit_years=(cit_lower, cit_upper),\n coauthors=coauthors, self_citations=self_citations)\n\n rdata = make_response_data(\n flower, flower_name=flower_name, session=session)\n\n return flask.jsonify(rdata)\n\n\ndef conf_journ_to_display_names(papers):\n conf_journ_ids = {\"ConferenceSeriesIds\": [], \"JournalIds\": []}\n for paper in papers.values():\n if \"ConferenceSeriesId\" in paper: conf_journ_ids[\"ConferenceSeriesIds\"].append(paper[\"ConferenceSeriesId\"])\n if \"JournalId\" in paper: conf_journ_ids[\"JournalIds\"].append(paper[\"JournalId\"])\n conf_journ_display_names = get_conf_journ_display_names(conf_journ_ids)\n for paper in papers.values():\n if \"ConferenceSeriesId\" in paper:\n paper[\"ConferenceName\"] = conf_journ_display_names[\"Conference\"][paper[\"ConferenceSeriesId\"]]\n if \"JournalId\" in paper:\n paper[\"JournalName\"] = conf_journ_display_names[\"Journal\"][paper[\"JournalId\"]]\n return papers\n\n\n@blueprint.route('/get_publication_papers')\ndef get_publication_papers():\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n\n pub_year_min = int(request.form.get(\"pub_year_min\"))\n pub_year_max = int(request.form.get(\"pub_year_max\"))\n paper_ids = session['cache']\n papers = paper_info_db_check_multiquery(paper_ids)\n papers = [paper for paper in papers if (paper[\"Year\"] >= pub_year_min and paper[\"Year\"] <= pub_year_max)]\n papers = conf_journ_to_display_names({paper[\"PaperId\"]: paper for paper in papers})\n return flask.jsonify({\"papers\": papers, \"names\": session[\"entity_names\"]+ session[\"node_info\"]})\n\n\n@blueprint.route('/get_citation_papers')\ndef get_citation_papers():\n # request should contain the ego author ids and the node author ids separately\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n\n cite_year_min = int(request.form.get(\"cite_year_min\"))\n cite_year_max = int(request.form.get(\"cite_year_max\"))\n pub_year_min = int(request.form.get(\"pub_year_min\"))\n pub_year_max = int(request.form.get(\"pub_year_max\"))\n paper_ids = session['cache']\n papers = paper_info_db_check_multiquery(paper_ids)\n cite_papers = [[citation for citation in paper[\"Citations\"] if (citation[\"Year\"] >= cite_year_min and citation[\"Year\"] <= cite_year_max)] for paper in papers if (paper[\"Year\"] >= pub_year_min and paper[\"Year\"] <= pub_year_max)]\n citations = sum(cite_papers,[])\n citations = conf_journ_to_display_names({paper[\"PaperId\"]: paper for paper in citations})\n\n return flask.jsonify({\"papers\": citations, \"names\": session[\"entity_names\"] + session[\"node_info\"],\"node_info\": session[\"node_information_store\"]})\n\n\ndef get_entities(paper):\n ''' Gets the entities of a paper\n '''\n authors = [author[\"AuthorName\"] for author in paper[\"Authors\"]]\n affiliations = [author[\"AffiliationName\"] for author in paper[\"Authors\"] if \"AffiliationName\" in author]\n conferences = [paper[\"ConferenceName\"]] if (\"ConferenceName\" in paper) else []\n journals = [paper[\"JournalName\"]] if (\"JournalName\" in paper) else []\n fieldsofstudy = [fos[\"FieldOfStudyName\"] for fos in paper[\"FieldsOfStudy\"] if fos[\"FieldOfStudyLevel\"] == 1] if (\"FieldsOfStudy\" in paper) else []\n\n return authors, affiliations, conferences, journals, fieldsofstudy\n\n\nNODE_INFO_FIELDS = [\"PaperTitle\", \"Authors\", \"PaperId\", \"Year\", \"ConferenceName\",\n \"ConferenceSeriesId\", \"JournalName\", \"JournalId\"]\n\n\ndef get_node_info_single(entity, entity_type, year_ranges):\n # Determine the citation range\n pub_lower = year_ranges[\"pub_lower\"]\n pub_upper = year_ranges[\"pub_upper\"]\n cit_lower = year_ranges[\"cit_lower\"]\n cit_upper = year_ranges[\"cit_upper\"]\n\n # Get paper to get information from\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n papers = paper_info_db_check_multiquery(session[\"cache\"])\n\n # Get coauthors list to filter\n if session['icoauthor'] == 'false':\n coauthors = session['coauthors']\n else:\n coauthors = list()\n\n # Get self_citation list to filter\n if session['self_cite'] == 'false':\n self = session['entity_names']\n else:\n self = list()\n\n # Results\n papers_to_send = dict()\n links = dict()\n\n for paper in papers:\n # Publication range filter\n if paper[\"Year\"] < pub_lower or paper[\"Year\"] > pub_upper:\n continue\n\n for link_type in [\"References\", \"Citations\"]:\n for rel_paper in paper[link_type]:\n # Citation range filter\n if link_type == \"Citations\" and \\\n (rel_paper[\"Year\"] < cit_lower or rel_paper[\"Year\"] > cit_upper):\n continue\n\n # Get fields\n auth, inst, conf, jour, fos = get_entities(rel_paper)\n fields = dict()\n fields['author'] = set(auth)\n fields['inst'] = set(inst)\n fields['conf'] = set(conf + jour)\n fields['fos'] = set(fos)\n\n check = dict()\n check['author'] = coauthors + self\n check['inst'] = coauthors + self\n check['conf'] = coauthors\n check['fos'] = list()\n\n skip = False\n for n_type, check_val in check.items():\n if not set(check_val).isdisjoint(fields[entity_type]):\n skip = True\n break\n if skip:\n continue\n\n if entity not in fields[entity_type]:\n continue\n\n papers_to_send[paper[\"PaperId\"]] = {k:v for k,v in paper.items() if k in NODE_INFO_FIELDS}\n papers_to_send[paper[\"PaperId\"]] = add_author_order(papers_to_send[paper[\"PaperId\"]])\n\n papers_to_send[rel_paper[\"PaperId\"]] = {k:v for k,v in rel_paper.items() if k in NODE_INFO_FIELDS}\n papers_to_send[rel_paper[\"PaperId\"]] = add_author_order(papers_to_send[rel_paper[\"PaperId\"]])\n\n if link_type == \"Citations\":\n if paper[\"PaperId\"] in links:\n links[paper[\"PaperId\"]][\"reference\"].append(rel_paper[\"PaperId\"])\n else:\n links[paper[\"PaperId\"]] = {\"reference\": [rel_paper[\"PaperId\"]], \"citation\": list()}\n else:\n if paper[\"PaperId\"] in links:\n links[paper[\"PaperId\"]][\"citation\"].append(rel_paper[\"PaperId\"])\n else:\n links[paper[\"PaperId\"]] = {\"citation\": [rel_paper[\"PaperId\"]], \"reference\": list()}\n\n paper_sort_func = lambda x: -papers_to_send[x][\"Year\"]\n links = sorted([{\"citation\": sorted(link[\"citation\"],key=paper_sort_func), \"reference\": sorted(link[\"reference\"],key=paper_sort_func), \"ego_paper\": key} for key, link in links.items()], key=lambda x: paper_sort_func(x[\"ego_paper\"]))\n\n return {\"node_name\": entity, \"node_type\": entity_type, \"node_links\": links, \"paper_info\": papers_to_send}\n\n\n\n@blueprint.route('/get_node_info/', methods=['POST'])\ndef get_node_info():\n request_data = json.loads(request.form.get(\"data_string\"))\n node_name = request_data.get(\"name\")\n node_type = request_data.get(\"node_type\")\n session = request_data.get(\"session\")\n year_ranges = session[\"year_ranges\"]\n flower_name = session[\"flower_name\"]\n\n data = get_node_info_single(node_name, node_type, year_ranges)\n data[\"node_name\"] = node_name\n data[\"flower_name\"] = flower_name\n data[\"max_page\"] = math.ceil(len(data[\"node_links\"]) / 5)\n data[\"node_links\"] = data[\"node_links\"][0:min(5, len(data[\"node_links\"]))]\n return flask.jsonify(data)\n\n\n\n@blueprint.route('/get_next_node_info_page/', methods=['POST'])\ndef get_next_node_info_page():\n request_data = json.loads(request.form.get(\"data_string\"))\n node_name = request_data.get(\"name\")\n node_type = request_data.get(\"node_type\")\n session = request_data.get(\"session\")\n year_ranges = session[\"year_ranges\"]\n page = int(request_data.get(\"page\"))\n\n node_info = get_node_info_single(node_name, node_type, year_ranges)\n page_length = 5\n page_info = {\"paper_info\": node_info[\"paper_info\"], \"node_links\": node_info[\"node_links\"][0+page_length*(page-1):min(page_length*page, len(node_info[\"node_links\"]))]}\n return flask.jsonify(page_info)\n","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601961593","text":"import pandas as pd\nimport pickle\n\n\n\ndef parser_datos(archivo,columna,tabla,atributos):\n\texcel_prueba = pd.read_excel(archivo+\".xlsx\")\n\tdiccionario = {}\n\tlargo_archivo = len(excel_prueba.index)\n\n\tfile_guardar= open(archivo+\".txt\",\"w+\")\n\n\tfor i in range(largo_archivo):\n\t\tprovincia_texto = excel_prueba.loc[[i]][columna][i]\n\t\tdiccionario[provincia_texto] = i\n\n\t\tfile_guardar.write(\"INSERT INTO %s(%s) VALUES(%d,\\\"%s\\\");\\n\" % (tabla,atributos,i,provincia_texto))\n\n\tf = open(archivo+\".pkl\",\"wb\")\n\tpickle.dump(diccionario,f)\n\tf.close()\n\tfile_guardar.close() \n\t#print(diccionario_distritos)\n\ndef abrir_diccionario(archivo):\n\tfile = open(archivo+\".pkl\", 'rb')\n\tdiccionario_distritos = pickle.load(file)\n\t#print(diccionario_distritos)\n\treturn diccionario_distritos\n\ndef parser_datos_totales(archivo):\n\tdiccionario_distrito = abrir_diccionario(\"distritos\")\n\tdiccionario_provincia = abrir_diccionario(\"provincias\")\n\tdiccionario_canton = abrir_diccionario(\"cantones\")\n\tdiccionario_anho = abrir_diccionario(\"anhos\")\n\tdiccionario_mes = abrir_diccionario(\"meses\")\n\tdiccionario_dia = abrir_diccionario(\"dias\")\n\tdiccionario_rol = abrir_diccionario(\"roles\")\n\tdiccionario_sexo = abrir_diccionario(\"sexos\")\n\tdiccionario_lesion = abrir_diccionario(\"lesiones\")\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\tfecha_incidente_guardar= open(\"fecha_incidente.txt\",\"w+\")\n\tlocalizacion_guardar= open(\"localizacion.txt\",\"w+\")\n\tincidente_guardar= open(\"incidente.txt\",\"w+\")\n\tafectado_guardar= open(\"afectado.txt\",\"w+\")\n\n\tfor i in range(largo_archivo):\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\n\t\tdia_numero_tabla = diccionario_dia[dia_excel]\n\t\tmes_numero_tabla = diccionario_mes[mes_excel]\n\t\tanho_numero_tabla = diccionario_anho[anho_excel]\n\n\t\tinsercion_fecha_incidente = \"INSERT INTO FechaIncidente(codigoFechaIncidente,codigoAnho,codigoMes,codigoDia) VALUES(%d,%d,%d,%d);\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\n\t\tfecha_incidente_guardar.write(insercion_fecha_incidente)\n\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\n\t\tprovincia_numero_tabla = diccionario_provincia[provincia_excel]\n\t\tcanton_numero_tabla = diccionario_canton[canton_excel]\n\t\tdistrito_numero_tabla = diccionario_distrito[distrito_excel]\n\n\t\tinsercion_localizacion = \"INSERT INTO Localizacion(codigoLocalizacion,codigoProvincia,codigoCanton,codigoDistrito) VALUES(%d,%d,%d,%d);\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\n\t\tlocalizacion_guardar.write(insercion_localizacion)\n\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\trol_numero_tabla = diccionario_rol[rol_excel]\n\t\tsexo_numero_tabla = diccionario_sexo[sexo_excel]\n\t\tlesion_numero_tabla = diccionario_lesion[lesion_excel]\n\n\t\tinsercion_afectado = \"INSERT INTO Afectado(codigoAfectado,codigoRol,codigoSexo,codigoLesion,edad,edadQuinquenal) VALUES(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\");\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\n\t\tafectado_guardar.write(insercion_afectado)\n\n\t\tinsercion_incidente = \"INSERT INTO Incidente(codigoRegistro,codigoLocalizacion,codigoFecha,codigoAfectado) VALUES(%d,%d,%d,%d);\\n\"%((i+1),i,i,i)\n\n\t\tincidente_guardar.write(insercion_incidente)\n\n\tfecha_incidente_guardar.close()\n\tlocalizacion_guardar.close()\n\tincidente_guardar.close()\n\tafectado_guardar.close()\n\ndef parser_datos_totales_forma2(archivo):\n\tdiccionario_distrito = abrir_diccionario(\"distritos\")\n\tdiccionario_provincia = abrir_diccionario(\"provincias\")\n\tdiccionario_canton = abrir_diccionario(\"cantones\")\n\tdiccionario_anho = abrir_diccionario(\"anhos\")\n\tdiccionario_mes = abrir_diccionario(\"meses\")\n\tdiccionario_dia = abrir_diccionario(\"dias\")\n\tdiccionario_rol = abrir_diccionario(\"roles\")\n\tdiccionario_sexo = abrir_diccionario(\"sexos\")\n\tdiccionario_lesion = abrir_diccionario(\"lesiones\")\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\tfecha_incidente_guardar= open(\"fecha_incidente.txt\",\"w+\")\n\tlocalizacion_guardar= open(\"localizacion.txt\",\"w+\")\n\tincidente_guardar= open(\"incidente.txt\",\"w+\")\n\tafectado_guardar= open(\"afectado.txt\",\"w+\")\n\n\tprimer_linea_fecha_incidente = \"INSERT INTO FechaIncidente(codigoFechaIncidente,codigoAnho,codigoMes,codigoDia) VALUES\"\n\tfecha_incidente_guardar.write(primer_linea_fecha_incidente)\n\n\tprimer_linea_localizacion = \"INSERT INTO Localizacion(codigoLocalizacion,codigoProvincia,codigoCanton,codigoDistrito) VALUES\"\n\tlocalizacion_guardar.write(primer_linea_localizacion)\n\n\tprimer_linea_afectado = \"INSERT INTO Afectado(codigoAfectado,codigoRol,codigoSexo,codigoLesion,edad,edadQuinquenal) VALUES\"\n\tafectado_guardar.write(primer_linea_afectado)\n\n\tprimer_linea_incidente = \"INSERT INTO Incidente(codigoRegistro,codigoLocalizacion,codigoFechaIncidente,codigoAfectado) VALUES\"\n\tincidente_guardar.write(primer_linea_incidente)\n\tfor i in range(largo_archivo):\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\n\t\tdia_numero_tabla = diccionario_dia[dia_excel]\n\t\tmes_numero_tabla = diccionario_mes[mes_excel]\n\t\tanho_numero_tabla = diccionario_anho[anho_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_fecha_incidente = \"(%d,%d,%d,%d);\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\t\telse:\n\t\t\tinsercion_fecha_incidente = \"(%d,%d,%d,%d),\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\n\t\tfecha_incidente_guardar.write(insercion_fecha_incidente)\n\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\n\t\tprovincia_numero_tabla = diccionario_provincia[provincia_excel]\n\t\tcanton_numero_tabla = diccionario_canton[canton_excel]\n\t\tdistrito_numero_tabla = diccionario_distrito[distrito_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_localizacion = \"(%d,%d,%d,%d);\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\t\telse:\n\t\t\tinsercion_localizacion = \"(%d,%d,%d,%d),\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\n\t\tlocalizacion_guardar.write(insercion_localizacion)\n\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\trol_numero_tabla = diccionario_rol[rol_excel]\n\t\tsexo_numero_tabla = diccionario_sexo[sexo_excel]\n\t\tlesion_numero_tabla = diccionario_lesion[lesion_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_afectado = \"(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\");\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\t\telse:\n\t\t\tinsercion_afectado = \"(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\"),\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\n\t\tafectado_guardar.write(insercion_afectado)\t\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_incidente = \"(%d,%d,%d,%d);\\n\"%((i+1),i,i,i)\n\t\telse:\n\t\t\tinsercion_incidente = \"(%d,%d,%d,%d),\\n\"%((i+1),i,i,i)\n\n\t\tincidente_guardar.write(insercion_incidente)\n\n\tfecha_incidente_guardar.close()\n\tlocalizacion_guardar.close()\n\tincidente_guardar.close()\n\tafectado_guardar.close()\n\n\n\ndef parser_datos_tabla_completa(archivo):\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\ttabla_total_1_guardar= open(\"tabla_dios1.txt\",\"w+\")\n\ttabla_total_2_guardar= open(\"tabla_dios2.txt\",\"w+\")\n\n\tprimer_linea_tabla_total = \"INSERT INTO IncidenteCompleto(codigoRegistro,nombreProvincia,nombreCanton,nombreDistrito,nombreDia,nombreMes,nombreAnho,nombreRol,nombreSexo,nombreLesion,edad,edadQuinquenal) VALUES\"\n\ttabla_total_1_guardar.write(primer_linea_tabla_total)\n\ttabla_total_2_guardar.write(primer_linea_tabla_total)\n\n\tlargo_1 = largo_archivo//2\n\n\tlargo_2 = (largo_archivo//2)+(largo_archivo%2)\n\n\tcontador = 0\n\n\tfor i in range(largo_1):\n\t\t\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\t\n\t\tinsercion_tabla_zeus = \"(%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"),\\n\"%((contador+1),provincia_excel,canton_excel,distrito_excel,dia_excel,mes_excel,anho_excel,rol_excel,sexo_excel,lesion_excel,edad_excel,edad_quinquenal_excel)\n\n\t\ttabla_total_1_guardar.write(insercion_tabla_zeus)\n\n\t\tcontador +=1\n\n\tfor i in range(largo_2):\n\t\t\n\t\tprovincia_excel = excel_leer.loc[[i+contador]][\"Provincia\"][i+contador]\n\t\tcanton_excel = excel_leer.loc[[i+contador]][\"Cantón\"][i+contador]\n\t\tdistrito_excel = excel_leer.loc[[i+contador]][\"Distrito\"][i+contador]\n\t\tdia_excel = excel_leer.loc[[i+contador]][\"Día\"][i+contador]\n\t\tmes_excel = excel_leer.loc[[i+contador]][\"Mes\"][i+contador]\n\t\tanho_excel = excel_leer.loc[[i+contador]][\"Año\"][i+contador]\n\t\trol_excel = excel_leer.loc[[i+contador]][\"Rol\"][i+contador]\n\t\tsexo_excel = excel_leer.loc[[i+contador]][\"Sexo\"][i+contador]\n\t\tlesion_excel = excel_leer.loc[[i+contador]][\"Tipo de Lesión\"][i+contador]\n\t\tedad_excel = excel_leer.loc[[i+contador]][\"Edad\"][i+contador]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i+contador]][\"Edadquinquenal\"][i+contador]\n\n\t\t\n\t\tinsercion_tabla_zeus = \"(%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",%s,\\\"%s\\\"),\\n\"%((i+contador+1),provincia_excel,canton_excel,distrito_excel,dia_excel,mes_excel,anho_excel,rol_excel,sexo_excel,lesion_excel,edad_excel,edad_quinquenal_excel)\n\n\t\ttabla_total_2_guardar.write(insercion_tabla_zeus)\n\n\n\n\ttabla_total_1_guardar.close()\n\ttabla_total_2_guardar.close()\n\n\n\ndef divirdirArchivo(archivo):\n\tarchivo_divir = open(archivo, \"r\")\n\t\n\tline = archivo_divir.readline()\n\tcontador_parte = 0\n\n\tparte1 = open(\"parte1_division.txt\",\"w+\")\n\tparte2 = open(\"parte2_division.txt\",\"w+\")\n\tparte3 = open(\"parte3_division.txt\",\"w+\")\n\tparte4 = open(\"parte4_division.txt\",\"w+\")\n\n\tparte1.write(line)\n\tparte2.write(line)\n\tparte3.write(line)\n\tparte4.write(line)\n\n\tline = archivo_divir.readline()\n\n\twhile line:\n\t\tif (contador_parte == 0):\n\t\t\tparte1.write(line)\n\t\t\tcontador_parte+=1\n\t\telif(contador_parte == 1):\n\t\t\tparte2.write(line)\n\t\t\tcontador_parte+=1\n\t\telif(contador_parte == 2):\n\t\t\tparte3.write(line)\n\t\t\tcontador_parte+=1\n\t\telse:\n\t\t\tparte4.write(line)\n\t\t\tcontador_parte = 0\n\t\tline = archivo_divir.readline()\n\n\tparte1.close()\n\tparte2.close()\n\tparte3.close()\n\tparte4.close()\n\tarchivo_divir.close()\n\n \n\n\ndivirdirArchivo(\"tabla_dios2.txt\")\n #array = []\n #for line in ins:\n # array.append(line)'''\n#INSERT INTO Dia(codigoDia,nombreDia) VALUES(3,\"PRUEBA\")\n\n#print(excel_prueba[0])\n#abrir_diccionario(\"diccionario_distritos\")\n\n#parser_datos(\"distritos\",\"Distritos\",\"Distrito\",\"codigoDistrito,nombreDistrito\")\n#parser_datos(\"cantones\",\"Canton\",\"Canton\",\"codigoCanton,nombreCanton\")\n#parser_datos(\"dias\",\"Dia\",\"Dia\",\"codigoDia,nombreDia\")\n#parser_datos(\"meses\",\"Mes\",\"Mes\",\"codigoMes,nombreMes\")\n#parser_datos(\"anhos\",\"Anho\",\"Anho\",\"codigoAnho,nombreAnho\")\n#parser_datos(\"roles\",\"Rol\",\"Rol\",\"codigoRol,nombreRol\")\n#parser_datos(\"lesiones\",\"Lesion\",\"Lesion\",\"codigoLesion,nombreLesion\")\n#parser_datos(\"sexos\",\"Sexo\",\"Sexo\",\"codigoSexo,nombreSexo\")\n#parser_datos(\"provincias\",\"Provincia\",\"Provincia\",\"codigoProvincia,nombreProvincia\")\n\n#parser_datos_totales(\"acc1\")\n#parser_datos_totales_forma2(\"acc1\")\n\n#parser_datos_tabla_completa(\"acc1\")\n#divirdirArchivo(\"tabla_dios1.txt\")\n#print(\"Column headings:\")\n#print(excel_prueba.columns)\n","sub_path":"generacion_inserts_base_datos/prueba_excel.py","file_name":"prueba_excel.py","file_ext":"py","file_size_in_byte":12273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"46966840","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Quoc-Tuan Truong